1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * apic_introp.c: 30 * Has code for Advanced DDI interrupt framework support. 31 */ 32 33 #include <sys/cpuvar.h> 34 #include <sys/psm.h> 35 #include "apic.h" 36 #include <sys/sunddi.h> 37 #include <sys/ddi_impldefs.h> 38 #include <sys/mach_intr.h> 39 #include <sys/sysmacros.h> 40 #include <sys/trap.h> 41 #include <sys/pci.h> 42 #include <sys/pci_intr_lib.h> 43 44 extern struct av_head autovect[]; 45 46 /* 47 * Local Function Prototypes 48 */ 49 int apic_pci_msi_enable_vector(dev_info_t *, int, int, 50 int, int, int); 51 apic_irq_t *apic_find_irq(dev_info_t *, struct intrspec *, int); 52 static int apic_get_pending(apic_irq_t *, int); 53 static void apic_clear_mask(apic_irq_t *); 54 static void apic_set_mask(apic_irq_t *); 55 static uchar_t apic_find_multi_vectors(int, int); 56 int apic_navail_vector(dev_info_t *, int); 57 int apic_alloc_vectors(dev_info_t *, int, int, int, int); 58 void apic_free_vectors(dev_info_t *, int, int, int, int); 59 int apic_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 60 psm_intr_op_t, int *); 61 62 extern int intr_clear(void); 63 extern void intr_restore(uint_t); 64 extern uchar_t apic_bind_intr(dev_info_t *, int, uchar_t, uchar_t); 65 extern int apic_allocate_irq(int); 66 extern int apic_introp_xlate(dev_info_t *, struct intrspec *, int); 67 extern int apic_rebind_all(apic_irq_t *irq_ptr, int bind_cpu, int safe); 68 extern boolean_t apic_cpu_in_range(int cpu); 69 70 /* 71 * MSI support flag: 72 * reflects whether MSI is supported at APIC level 73 * it can also be patched through /etc/system 74 * 75 * 0 = default value - don't know and need to call apic_check_msi_support() 76 * to find out then set it accordingly 77 * 1 = supported 78 * -1 = not supported 79 */ 80 int apic_support_msi = 0; 81 82 /* Multiple vector support for MSI */ 83 int apic_multi_msi_enable = 1; 84 int apic_multi_msi_max = 2; 85 86 extern uchar_t apic_ipltopri[MAXIPL+1]; 87 extern uchar_t apic_vector_to_irq[APIC_MAX_VECTOR+1]; 88 extern int apic_max_device_irq; 89 extern int apic_min_device_irq; 90 extern apic_irq_t *apic_irq_table[APIC_MAX_VECTOR+1]; 91 extern volatile uint32_t *apicadr; /* virtual addr of local APIC */ 92 extern volatile int32_t *apicioadr[MAX_IO_APIC]; 93 extern lock_t apic_ioapic_lock; 94 extern kmutex_t airq_mutex; 95 extern apic_cpus_info_t *apic_cpus; 96 extern int apic_first_avail_irq; 97 98 99 /* 100 * apic_pci_msi_enable_vector: 101 * Set the address/data fields in the MSI/X capability structure 102 * XXX: MSI-X support 103 */ 104 /* ARGSUSED */ 105 int 106 apic_pci_msi_enable_vector(dev_info_t *dip, int type, int inum, int vector, 107 int count, int target_apic_id) 108 { 109 uint64_t msi_addr, msi_data; 110 111 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: dip=0x%p\n" 112 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip, 113 ddi_driver_name(dip), inum, vector, target_apic_id)); 114 115 /* MSI Address */ 116 msi_addr = (MSI_ADDR_HDR | (target_apic_id << MSI_ADDR_DEST_SHIFT)); 117 msi_addr |= ((MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) | 118 (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT)); 119 120 /* MSI Data: MSI is edge triggered according to spec */ 121 msi_data = ((MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) | vector); 122 123 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: addr=0x%lx " 124 "data=0x%lx\n", (long)msi_addr, (long)msi_data)); 125 126 if (pci_msi_configure(dip, type, count, inum, msi_addr, msi_data) != 127 DDI_SUCCESS) { 128 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: " 129 "pci_msi_configure failed\n")); 130 return (PSM_FAILURE); 131 } 132 133 return (PSM_SUCCESS); 134 } 135 136 137 /* 138 * This function returns the no. of vectors available for the pri. 139 * dip is not used at this moment. If we really don't need that, 140 * it will be removed. 141 */ 142 /*ARGSUSED*/ 143 int 144 apic_navail_vector(dev_info_t *dip, int pri) 145 { 146 int lowest, highest, i, navail, count; 147 148 DDI_INTR_IMPLDBG((CE_CONT, "apic_navail_vector: dip: %p, pri: %x\n", 149 (void *)dip, pri)); 150 151 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 152 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 153 navail = count = 0; 154 155 /* It has to be contiguous */ 156 for (i = lowest; i < highest; i++) { 157 count = 0; 158 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 159 (i < highest)) { 160 if ((i == T_FASTTRAP) || (i == APIC_SPUR_INTR)) 161 break; 162 count++; 163 i++; 164 } 165 if (count > navail) 166 navail = count; 167 } 168 return (navail); 169 } 170 171 static uchar_t 172 apic_find_multi_vectors(int pri, int count) 173 { 174 int lowest, highest, i, navail, start; 175 176 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_mult: pri: %x, count: %x\n", 177 pri, count)); 178 179 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 180 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 181 navail = 0; 182 183 /* It has to be contiguous */ 184 for (i = lowest; i < highest; i++) { 185 navail = 0; 186 start = i; 187 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 188 (i < highest)) { 189 if ((i == T_FASTTRAP) || (i == APIC_SPUR_INTR)) 190 break; 191 navail++; 192 if (navail >= count) 193 return (start); 194 i++; 195 } 196 } 197 return (0); 198 } 199 200 201 /* 202 * It finds the apic_irq_t associates with the dip, ispec and type. 203 */ 204 apic_irq_t * 205 apic_find_irq(dev_info_t *dip, struct intrspec *ispec, int type) 206 { 207 apic_irq_t *irqp; 208 int i; 209 210 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: dip=0x%p vec=0x%x " 211 "ipl=0x%x type=0x%x\n", (void *)dip, ispec->intrspec_vec, 212 ispec->intrspec_pri, type)); 213 214 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 215 if ((irqp = apic_irq_table[i]) == NULL) 216 continue; 217 if ((irqp->airq_dip == dip) && 218 (irqp->airq_origirq == ispec->intrspec_vec) && 219 (irqp->airq_ipl == ispec->intrspec_pri)) { 220 if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 221 if (APIC_IS_MSI_OR_MSIX_INDEX(irqp-> 222 airq_mps_intr_index)) 223 return (irqp); 224 } else 225 return (irqp); 226 } 227 } 228 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: return NULL\n")); 229 return (NULL); 230 } 231 232 233 /* 234 * This function will return the pending bit of the irqp. 235 * It either comes from the IRR register of the APIC or the RDT 236 * entry of the I/O APIC. 237 * For the IRR to work, it needs to be to its binding CPU 238 */ 239 static int 240 apic_get_pending(apic_irq_t *irqp, int type) 241 { 242 int bit, index, irr, pending; 243 int intin_no; 244 volatile int32_t *ioapic; 245 246 DDI_INTR_IMPLDBG((CE_CONT, "apic_get_pending: irqp: %p, cpuid: %x " 247 "type: %x\n", (void *)irqp, irqp->airq_cpu & ~IRQ_USER_BOUND, 248 type)); 249 250 /* need to get on the bound cpu */ 251 mutex_enter(&cpu_lock); 252 affinity_set(irqp->airq_cpu & ~IRQ_USER_BOUND); 253 254 index = irqp->airq_vector / 32; 255 bit = irqp->airq_vector % 32; 256 irr = apicadr[APIC_IRR_REG + index]; 257 258 affinity_clear(); 259 mutex_exit(&cpu_lock); 260 261 pending = (irr & (1 << bit)) ? 1 : 0; 262 if (!pending && (type == DDI_INTR_TYPE_FIXED)) { 263 /* check I/O APIC for fixed interrupt */ 264 intin_no = irqp->airq_intin_no; 265 ioapic = apicioadr[irqp->airq_ioapicindex]; 266 pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 267 AV_PENDING) ? 1 : 0; 268 } 269 return (pending); 270 } 271 272 273 /* 274 * This function will clear the mask for the interrupt on the I/O APIC 275 */ 276 static void 277 apic_clear_mask(apic_irq_t *irqp) 278 { 279 int intin_no; 280 int iflag; 281 int32_t rdt_entry; 282 volatile int32_t *ioapic; 283 284 DDI_INTR_IMPLDBG((CE_CONT, "apic_clear_mask: irqp: %p\n", 285 (void *)irqp)); 286 287 intin_no = irqp->airq_intin_no; 288 ioapic = apicioadr[irqp->airq_ioapicindex]; 289 290 iflag = intr_clear(); 291 lock_set(&apic_ioapic_lock); 292 293 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 294 295 /* clear mask */ 296 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 297 ((~AV_MASK) & rdt_entry)); 298 299 lock_clear(&apic_ioapic_lock); 300 intr_restore(iflag); 301 } 302 303 304 /* 305 * This function will mask the interrupt on the I/O APIC 306 */ 307 static void 308 apic_set_mask(apic_irq_t *irqp) 309 { 310 int intin_no; 311 volatile int32_t *ioapic; 312 int iflag; 313 int32_t rdt_entry; 314 315 DDI_INTR_IMPLDBG((CE_CONT, "apic_set_mask: irqp: %p\n", (void *)irqp)); 316 317 intin_no = irqp->airq_intin_no; 318 ioapic = apicioadr[irqp->airq_ioapicindex]; 319 320 iflag = intr_clear(); 321 322 lock_set(&apic_ioapic_lock); 323 324 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 325 326 /* mask it */ 327 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 328 (AV_MASK | rdt_entry)); 329 330 lock_clear(&apic_ioapic_lock); 331 intr_restore(iflag); 332 } 333 334 335 /* 336 * This function allocate "count" vector(s) for the given "dip/pri/type" 337 */ 338 int 339 apic_alloc_vectors(dev_info_t *dip, int inum, int count, int pri, int type) 340 { 341 int rcount, i; 342 uchar_t start, irqno, cpu; 343 short idx; 344 major_t major; 345 apic_irq_t *irqptr; 346 347 /* for MSI/X only */ 348 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) 349 return (0); 350 351 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: dip=0x%p type=%d " 352 "inum=0x%x pri=0x%x count=0x%x\n", 353 (void *)dip, type, inum, pri, count)); 354 355 if (count > 1) { 356 if (apic_multi_msi_enable == 0) 357 count = 1; 358 else if (count > apic_multi_msi_max) 359 count = apic_multi_msi_max; 360 } 361 362 if ((rcount = apic_navail_vector(dip, pri)) > count) 363 rcount = count; 364 365 mutex_enter(&airq_mutex); 366 367 for (start = 0; rcount > 0; rcount--) { 368 if ((start = apic_find_multi_vectors(pri, rcount)) != 0) 369 break; 370 } 371 372 if (start == 0) { 373 /* no vector available */ 374 mutex_exit(&airq_mutex); 375 return (0); 376 } 377 378 idx = (short)((type == DDI_INTR_TYPE_MSI) ? MSI_INDEX : MSIX_INDEX); 379 major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0; 380 for (i = 0; i < rcount; i++) { 381 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) == 382 (uchar_t)-1) { 383 mutex_exit(&airq_mutex); 384 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 385 "apic_allocate_irq failed\n")); 386 return (i); 387 } 388 apic_max_device_irq = max(irqno, apic_max_device_irq); 389 apic_min_device_irq = min(irqno, apic_min_device_irq); 390 irqptr = apic_irq_table[irqno]; 391 #ifdef DEBUG 392 if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ) 393 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 394 "apic_vector_to_irq is not APIC_RESV_IRQ\n")); 395 #endif 396 apic_vector_to_irq[start + i] = (uchar_t)irqno; 397 398 irqptr->airq_vector = (uchar_t)(start + i); 399 irqptr->airq_ioapicindex = (uchar_t)inum; /* start */ 400 irqptr->airq_intin_no = (uchar_t)rcount; 401 irqptr->airq_ipl = pri; 402 irqptr->airq_vector = start + i; 403 irqptr->airq_origirq = (uchar_t)(inum + i); 404 irqptr->airq_share_id = 0; 405 irqptr->airq_mps_intr_index = idx; 406 irqptr->airq_dip = dip; 407 irqptr->airq_major = major; 408 if (i == 0) /* they all bound to the same cpu */ 409 cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno, 410 0xff, 0xff); 411 else 412 irqptr->airq_cpu = cpu; 413 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: irq=0x%x " 414 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno, 415 (void *)irqptr->airq_dip, irqptr->airq_vector, 416 irqptr->airq_origirq, pri)); 417 } 418 mutex_exit(&airq_mutex); 419 return (rcount); 420 } 421 422 423 void 424 apic_free_vectors(dev_info_t *dip, int inum, int count, int pri, int type) 425 { 426 int i; 427 apic_irq_t *irqptr; 428 struct intrspec ispec; 429 430 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: dip: %p inum: %x " 431 "count: %x pri: %x type: %x\n", 432 (void *)dip, inum, count, pri, type)); 433 434 /* for MSI/X only */ 435 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) 436 return; 437 438 for (i = 0; i < count; i++) { 439 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: inum=0x%x " 440 "pri=0x%x count=0x%x\n", inum, pri, count)); 441 ispec.intrspec_vec = inum + i; 442 ispec.intrspec_pri = pri; 443 if ((irqptr = apic_find_irq(dip, &ispec, type)) == NULL) { 444 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: " 445 "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() " 446 "failed\n", (void *)dip, inum, pri)); 447 continue; 448 } 449 irqptr->airq_mps_intr_index = FREE_INDEX; 450 apic_vector_to_irq[irqptr->airq_vector] = APIC_RESV_IRQ; 451 } 452 } 453 454 455 /* 456 * check whether the system supports MSI 457 * 458 * If PCI-E capability is found, then this must be a PCI-E system. 459 * Since MSI is required for PCI-E system, it returns PSM_SUCCESS 460 * to indicate this system supports MSI. 461 */ 462 int 463 apic_check_msi_support() 464 { 465 dev_info_t *cdip; 466 char dev_type[16]; 467 int dev_len; 468 469 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support:\n")); 470 471 /* 472 * check whether the first level children of root_node have 473 * PCI-E capability 474 */ 475 for (cdip = ddi_get_child(ddi_root_node()); cdip != NULL; 476 cdip = ddi_get_next_sibling(cdip)) { 477 478 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: cdip: 0x%p," 479 " driver: %s, binding: %s, nodename: %s\n", (void *)cdip, 480 ddi_driver_name(cdip), ddi_binding_name(cdip), 481 ddi_node_name(cdip))); 482 dev_len = sizeof (dev_type); 483 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 484 "device_type", (caddr_t)dev_type, &dev_len) 485 != DDI_PROP_SUCCESS) 486 continue; 487 if (strcmp(dev_type, "pciex") == 0) 488 return (PSM_SUCCESS); 489 } 490 491 /* MSI is not supported on this system */ 492 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: no 'pciex' " 493 "device_type found\n")); 494 return (PSM_FAILURE); 495 } 496 497 int 498 apic_get_vector_intr_info(int vecirq, apic_get_intr_t *intr_params_p) 499 { 500 struct autovec *av_dev; 501 uchar_t irqno; 502 int i; 503 apic_irq_t *irq_p; 504 505 /* Sanity check the vector/irq argument. */ 506 ASSERT((vecirq >= 0) || (vecirq <= APIC_MAX_VECTOR)); 507 508 mutex_enter(&airq_mutex); 509 510 /* 511 * Convert the vecirq arg to an irq using vector_to_irq table 512 * if the arg is a vector. Pass thru if already an irq. 513 */ 514 if ((intr_params_p->avgi_req_flags & PSMGI_INTRBY_FLAGS) == 515 PSMGI_INTRBY_VEC) 516 irqno = apic_vector_to_irq[vecirq]; 517 else 518 irqno = vecirq; 519 520 irq_p = apic_irq_table[irqno]; 521 522 if ((irq_p == NULL) || 523 (irq_p->airq_temp_cpu == IRQ_UNBOUND) || 524 (irq_p->airq_temp_cpu == IRQ_UNINIT)) { 525 mutex_exit(&airq_mutex); 526 return (PSM_FAILURE); 527 } 528 529 if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) { 530 531 /* Get the (temp) cpu from apic_irq table, indexed by irq. */ 532 intr_params_p->avgi_cpu_id = irq_p->airq_temp_cpu; 533 534 /* Return user bound info for intrd. */ 535 if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) { 536 intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND; 537 intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND; 538 } 539 } 540 541 if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR) { 542 intr_params_p->avgi_vector = irq_p->airq_vector; 543 } 544 545 if (intr_params_p->avgi_req_flags & 546 (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS)) { 547 /* Get number of devices from apic_irq table shared field. */ 548 intr_params_p->avgi_num_devs = irq_p->airq_share; 549 } 550 551 if (intr_params_p->avgi_req_flags & PSMGI_REQ_GET_DEVS) { 552 553 intr_params_p->avgi_req_flags |= PSMGI_REQ_NUM_DEVS; 554 555 /* Some devices have NULL dip. Don't count these. */ 556 if (intr_params_p->avgi_num_devs > 0) { 557 for (i = 0, av_dev = autovect[irqno].avh_link; 558 av_dev; av_dev = av_dev->av_link) 559 if (av_dev->av_vector && av_dev->av_dip) 560 i++; 561 intr_params_p->avgi_num_devs = 562 MIN(intr_params_p->avgi_num_devs, i); 563 } 564 565 /* There are no viable dips to return. */ 566 if (intr_params_p->avgi_num_devs == 0) 567 intr_params_p->avgi_dip_list = NULL; 568 569 else { /* Return list of dips */ 570 571 /* Allocate space in array for that number of devs. */ 572 intr_params_p->avgi_dip_list = kmem_zalloc( 573 intr_params_p->avgi_num_devs * 574 sizeof (dev_info_t *), 575 KM_SLEEP); 576 577 /* 578 * Loop through the device list of the autovec table 579 * filling in the dip array. 580 * 581 * Note that the autovect table may have some special 582 * entries which contain NULL dips. These will be 583 * ignored. 584 */ 585 for (i = 0, av_dev = autovect[irqno].avh_link; 586 av_dev; av_dev = av_dev->av_link) 587 if (av_dev->av_vector && av_dev->av_dip) 588 intr_params_p->avgi_dip_list[i++] = 589 av_dev->av_dip; 590 } 591 } 592 593 mutex_exit(&airq_mutex); 594 595 return (PSM_SUCCESS); 596 } 597 598 /* 599 * This function provides external interface to the nexus for all 600 * functionalities related to the new DDI interrupt framework. 601 * 602 * Input: 603 * dip - pointer to the dev_info structure of the requested device 604 * hdlp - pointer to the internal interrupt handle structure for the 605 * requested interrupt 606 * intr_op - opcode for this call 607 * result - pointer to the integer that will hold the result to be 608 * passed back if return value is PSM_SUCCESS 609 * 610 * Output: 611 * return value is either PSM_SUCCESS or PSM_FAILURE 612 */ 613 int 614 apic_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 615 psm_intr_op_t intr_op, int *result) 616 { 617 int cap, ret; 618 int count_vec; 619 int cpu; 620 int old_priority; 621 int new_priority; 622 apic_irq_t *irqp; 623 struct intrspec *ispec, intr_spec; 624 625 DDI_INTR_IMPLDBG((CE_CONT, "apic_intr_ops: dip: %p hdlp: %p " 626 "intr_op: %x\n", (void *)dip, (void *)hdlp, intr_op)); 627 628 ispec = &intr_spec; 629 ispec->intrspec_pri = hdlp->ih_pri; 630 ispec->intrspec_vec = hdlp->ih_inum; 631 ispec->intrspec_func = hdlp->ih_cb_func; 632 633 switch (intr_op) { 634 case PSM_INTR_OP_CHECK_MSI: 635 /* 636 * Check MSI/X is supported or not at APIC level and 637 * masked off the MSI/X bits in hdlp->ih_type if not 638 * supported before return. If MSI/X is supported, 639 * leave the ih_type unchanged and return. 640 * 641 * hdlp->ih_type passed in from the nexus has all the 642 * interrupt types supported by the device. 643 */ 644 if (apic_support_msi == 0) { 645 /* 646 * if apic_support_msi is not set, call 647 * apic_check_msi_support() to check whether msi 648 * is supported first 649 */ 650 if (apic_check_msi_support() == PSM_SUCCESS) 651 apic_support_msi = 1; 652 else 653 apic_support_msi = -1; 654 } 655 if (apic_support_msi == 1) 656 *result = hdlp->ih_type; 657 else 658 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 659 DDI_INTR_TYPE_MSIX); 660 break; 661 case PSM_INTR_OP_ALLOC_VECTORS: 662 *result = apic_alloc_vectors(dip, hdlp->ih_inum, 663 hdlp->ih_scratch1, hdlp->ih_pri, hdlp->ih_type); 664 break; 665 case PSM_INTR_OP_FREE_VECTORS: 666 apic_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1, 667 hdlp->ih_pri, hdlp->ih_type); 668 break; 669 case PSM_INTR_OP_NAVAIL_VECTORS: 670 *result = apic_navail_vector(dip, hdlp->ih_pri); 671 break; 672 case PSM_INTR_OP_XLATE_VECTOR: 673 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 674 *result = apic_introp_xlate(dip, ispec, hdlp->ih_type); 675 break; 676 case PSM_INTR_OP_GET_PENDING: 677 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 678 return (PSM_FAILURE); 679 *result = apic_get_pending(irqp, hdlp->ih_type); 680 break; 681 case PSM_INTR_OP_CLEAR_MASK: 682 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 683 return (PSM_FAILURE); 684 irqp = apic_find_irq(dip, ispec, hdlp->ih_type); 685 if (irqp == NULL) 686 return (PSM_FAILURE); 687 apic_clear_mask(irqp); 688 break; 689 case PSM_INTR_OP_SET_MASK: 690 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 691 return (PSM_FAILURE); 692 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 693 return (PSM_FAILURE); 694 apic_set_mask(irqp); 695 break; 696 case PSM_INTR_OP_GET_CAP: 697 cap = DDI_INTR_FLAG_PENDING; 698 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 699 cap |= DDI_INTR_FLAG_MASKABLE; 700 *result = cap; 701 break; 702 case PSM_INTR_OP_GET_SHARED: 703 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 704 return (PSM_FAILURE); 705 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 706 return (PSM_FAILURE); 707 *result = irqp->airq_share ? 1: 0; 708 break; 709 case PSM_INTR_OP_SET_PRI: 710 old_priority = hdlp->ih_pri; /* save old value */ 711 new_priority = *(int *)result; /* try the new value */ 712 713 /* First, check if "hdlp->ih_scratch1" vectors exist? */ 714 if (apic_navail_vector(dip, new_priority) < hdlp->ih_scratch1) 715 return (PSM_FAILURE); 716 717 /* Now allocate the vectors */ 718 count_vec = apic_alloc_vectors(dip, hdlp->ih_inum, 719 hdlp->ih_scratch1, new_priority, hdlp->ih_type); 720 721 /* Did we get fewer vectors? */ 722 if (count_vec != hdlp->ih_scratch1) { 723 apic_free_vectors(dip, hdlp->ih_inum, count_vec, 724 new_priority, hdlp->ih_type); 725 return (PSM_FAILURE); 726 } 727 728 /* Finally, free the previously allocated vectors */ 729 apic_free_vectors(dip, hdlp->ih_inum, count_vec, 730 old_priority, hdlp->ih_type); 731 hdlp->ih_pri = new_priority; /* set the new value */ 732 break; 733 case PSM_INTR_OP_SET_CPU: 734 /* 735 * The interrupt handle given here has been allocated 736 * specifically for this command, and ih_private carries 737 * a CPU value. 738 */ 739 cpu = (int)(intptr_t)hdlp->ih_private; 740 741 if (!apic_cpu_in_range(cpu)) { 742 *result = EINVAL; 743 return (PSM_FAILURE); 744 } 745 746 mutex_enter(&airq_mutex); 747 748 /* Convert the vector to the irq using vector_to_irq table. */ 749 irqp = apic_irq_table[apic_vector_to_irq[hdlp->ih_vector]]; 750 if (irqp == NULL) { 751 mutex_exit(&airq_mutex); 752 *result = ENXIO; 753 return (PSM_FAILURE); 754 } 755 ret = apic_rebind_all(irqp, cpu, 1); 756 mutex_exit(&airq_mutex); 757 if (ret) { 758 *result = EIO; 759 return (PSM_FAILURE); 760 } 761 *result = 0; 762 break; 763 case PSM_INTR_OP_GET_INTR: 764 /* 765 * The interrupt handle given here has been allocated 766 * specifically for this command, and ih_private carries 767 * a pointer to a apic_get_intr_t. 768 */ 769 if (apic_get_vector_intr_info( 770 hdlp->ih_vector, hdlp->ih_private) != PSM_SUCCESS) 771 return (PSM_FAILURE); 772 break; 773 case PSM_INTR_OP_SET_CAP: 774 default: 775 return (PSM_FAILURE); 776 } 777 return (PSM_SUCCESS); 778 } 779