1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * apic_introp.c: 31 * Has code for Advanced DDI interrupt framework support. 32 */ 33 34 #include <sys/cpuvar.h> 35 #include <sys/psm.h> 36 #include "apic.h" 37 #include <sys/sunddi.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/mach_intr.h> 40 #include <sys/sysmacros.h> 41 #include <sys/trap.h> 42 #include <sys/pci.h> 43 #include <sys/pci_intr_lib.h> 44 45 extern struct av_head autovect[]; 46 47 /* 48 * Local Function Prototypes 49 */ 50 int apic_pci_msi_enable_vector(dev_info_t *, int, int, 51 int, int, int); 52 apic_irq_t *apic_find_irq(dev_info_t *, struct intrspec *, int); 53 static int apic_get_pending(apic_irq_t *, int); 54 static void apic_clear_mask(apic_irq_t *); 55 static void apic_set_mask(apic_irq_t *); 56 static uchar_t apic_find_multi_vectors(int, int); 57 int apic_navail_vector(dev_info_t *, int); 58 int apic_alloc_vectors(dev_info_t *, int, int, int, int); 59 void apic_free_vectors(dev_info_t *, int, int, int, int); 60 int apic_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 61 psm_intr_op_t, int *); 62 63 extern int intr_clear(void); 64 extern void intr_restore(uint_t); 65 extern uchar_t apic_bind_intr(dev_info_t *, int, uchar_t, uchar_t); 66 extern int apic_allocate_irq(int); 67 extern int apic_introp_xlate(dev_info_t *, struct intrspec *, int); 68 extern int apic_rebind_all(apic_irq_t *irq_ptr, int bind_cpu, int safe); 69 extern boolean_t apic_cpu_in_range(int cpu); 70 71 /* 72 * MSI support flag: 73 * reflects whether MSI is supported at APIC level 74 * it can also be patched through /etc/system 75 * 76 * 0 = default value - don't know and need to call apic_check_msi_support() 77 * to find out then set it accordingly 78 * 1 = supported 79 * -1 = not supported 80 */ 81 int apic_support_msi = 0; 82 83 /* Multiple vector support for MSI */ 84 int apic_multi_msi_enable = 1; 85 int apic_multi_msi_max = 2; 86 87 extern uchar_t apic_ipltopri[MAXIPL+1]; 88 extern uchar_t apic_vector_to_irq[APIC_MAX_VECTOR+1]; 89 extern int apic_max_device_irq; 90 extern int apic_min_device_irq; 91 extern apic_irq_t *apic_irq_table[APIC_MAX_VECTOR+1]; 92 extern volatile uint32_t *apicadr; /* virtual addr of local APIC */ 93 extern volatile int32_t *apicioadr[MAX_IO_APIC]; 94 extern lock_t apic_ioapic_lock; 95 extern kmutex_t airq_mutex; 96 extern apic_cpus_info_t *apic_cpus; 97 extern int apic_first_avail_irq; 98 99 100 /* 101 * apic_pci_msi_enable_vector: 102 * Set the address/data fields in the MSI/X capability structure 103 * XXX: MSI-X support 104 */ 105 /* ARGSUSED */ 106 int 107 apic_pci_msi_enable_vector(dev_info_t *dip, int type, int inum, int vector, 108 int count, int target_apic_id) 109 { 110 uint64_t msi_addr, msi_data; 111 112 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: dip=0x%p\n" 113 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip, 114 ddi_driver_name(dip), inum, vector, target_apic_id)); 115 116 /* MSI Address */ 117 msi_addr = (MSI_ADDR_HDR | (target_apic_id << MSI_ADDR_DEST_SHIFT)); 118 msi_addr |= ((MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) | 119 (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT)); 120 121 /* MSI Data: MSI is edge triggered according to spec */ 122 msi_data = ((MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) | vector); 123 124 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: addr=0x%lx " 125 "data=0x%lx\n", (long)msi_addr, (long)msi_data)); 126 127 if (pci_msi_configure(dip, type, count, inum, msi_addr, msi_data) != 128 DDI_SUCCESS) { 129 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: " 130 "pci_msi_configure failed\n")); 131 return (PSM_FAILURE); 132 } 133 134 return (PSM_SUCCESS); 135 } 136 137 138 /* 139 * This function returns the no. of vectors available for the pri. 140 * dip is not used at this moment. If we really don't need that, 141 * it will be removed. 142 */ 143 /*ARGSUSED*/ 144 int 145 apic_navail_vector(dev_info_t *dip, int pri) 146 { 147 int lowest, highest, i, navail, count; 148 149 DDI_INTR_IMPLDBG((CE_CONT, "apic_navail_vector: dip: %p, pri: %x\n", 150 (void *)dip, pri)); 151 152 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 153 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 154 navail = count = 0; 155 156 /* It has to be contiguous */ 157 for (i = lowest; i < highest; i++) { 158 count = 0; 159 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 160 (i < highest)) { 161 if ((i == T_FASTTRAP) || (i == APIC_SPUR_INTR)) 162 break; 163 count++; 164 i++; 165 } 166 if (count > navail) 167 navail = count; 168 } 169 return (navail); 170 } 171 172 static uchar_t 173 apic_find_multi_vectors(int pri, int count) 174 { 175 int lowest, highest, i, navail, start; 176 177 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_mult: pri: %x, count: %x\n", 178 pri, count)); 179 180 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 181 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 182 navail = 0; 183 184 /* It has to be contiguous */ 185 for (i = lowest; i < highest; i++) { 186 navail = 0; 187 start = i; 188 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 189 (i < highest)) { 190 if ((i == T_FASTTRAP) || (i == APIC_SPUR_INTR)) 191 break; 192 navail++; 193 if (navail >= count) 194 return (start); 195 i++; 196 } 197 } 198 return (0); 199 } 200 201 202 /* 203 * It finds the apic_irq_t associates with the dip, ispec and type. 204 */ 205 apic_irq_t * 206 apic_find_irq(dev_info_t *dip, struct intrspec *ispec, int type) 207 { 208 apic_irq_t *irqp; 209 int i; 210 211 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: dip=0x%p vec=0x%x " 212 "ipl=0x%x type=0x%x\n", (void *)dip, ispec->intrspec_vec, 213 ispec->intrspec_pri, type)); 214 215 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 216 if ((irqp = apic_irq_table[i]) == NULL) 217 continue; 218 if ((irqp->airq_dip == dip) && 219 (irqp->airq_origirq == ispec->intrspec_vec) && 220 (irqp->airq_ipl == ispec->intrspec_pri)) { 221 if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 222 if (APIC_IS_MSI_OR_MSIX_INDEX(irqp-> 223 airq_mps_intr_index)) 224 return (irqp); 225 } else 226 return (irqp); 227 } 228 } 229 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: return NULL\n")); 230 return (NULL); 231 } 232 233 234 /* 235 * This function will return the pending bit of the irqp. 236 * It either comes from the IRR register of the APIC or the RDT 237 * entry of the I/O APIC. 238 * For the IRR to work, it needs to be to its binding CPU 239 */ 240 static int 241 apic_get_pending(apic_irq_t *irqp, int type) 242 { 243 int bit, index, irr, pending; 244 int intin_no; 245 volatile int32_t *ioapic; 246 247 DDI_INTR_IMPLDBG((CE_CONT, "apic_get_pending: irqp: %p, cpuid: %x " 248 "type: %x\n", (void *)irqp, irqp->airq_cpu & ~IRQ_USER_BOUND, 249 type)); 250 251 /* need to get on the bound cpu */ 252 mutex_enter(&cpu_lock); 253 affinity_set(irqp->airq_cpu & ~IRQ_USER_BOUND); 254 255 index = irqp->airq_vector / 32; 256 bit = irqp->airq_vector % 32; 257 irr = apicadr[APIC_IRR_REG + index]; 258 259 affinity_clear(); 260 mutex_exit(&cpu_lock); 261 262 pending = (irr & (1 << bit)) ? 1 : 0; 263 if (!pending && (type == DDI_INTR_TYPE_FIXED)) { 264 /* check I/O APIC for fixed interrupt */ 265 intin_no = irqp->airq_intin_no; 266 ioapic = apicioadr[irqp->airq_ioapicindex]; 267 pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 268 AV_PENDING) ? 1 : 0; 269 } 270 return (pending); 271 } 272 273 274 /* 275 * This function will clear the mask for the interrupt on the I/O APIC 276 */ 277 static void 278 apic_clear_mask(apic_irq_t *irqp) 279 { 280 int intin_no; 281 int iflag; 282 int32_t rdt_entry; 283 volatile int32_t *ioapic; 284 285 DDI_INTR_IMPLDBG((CE_CONT, "apic_clear_mask: irqp: %p\n", 286 (void *)irqp)); 287 288 intin_no = irqp->airq_intin_no; 289 ioapic = apicioadr[irqp->airq_ioapicindex]; 290 291 iflag = intr_clear(); 292 lock_set(&apic_ioapic_lock); 293 294 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 295 296 /* clear mask */ 297 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 298 ((~AV_MASK) & rdt_entry)); 299 300 lock_clear(&apic_ioapic_lock); 301 intr_restore(iflag); 302 } 303 304 305 /* 306 * This function will mask the interrupt on the I/O APIC 307 */ 308 static void 309 apic_set_mask(apic_irq_t *irqp) 310 { 311 int intin_no; 312 volatile int32_t *ioapic; 313 int iflag; 314 int32_t rdt_entry; 315 316 DDI_INTR_IMPLDBG((CE_CONT, "apic_set_mask: irqp: %p\n", (void *)irqp)); 317 318 intin_no = irqp->airq_intin_no; 319 ioapic = apicioadr[irqp->airq_ioapicindex]; 320 321 iflag = intr_clear(); 322 323 lock_set(&apic_ioapic_lock); 324 325 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 326 327 /* mask it */ 328 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 329 (AV_MASK | rdt_entry)); 330 331 lock_clear(&apic_ioapic_lock); 332 intr_restore(iflag); 333 } 334 335 336 /* 337 * This function allocate "count" vector(s) for the given "dip/pri/type" 338 */ 339 int 340 apic_alloc_vectors(dev_info_t *dip, int inum, int count, int pri, int type) 341 { 342 int rcount, i; 343 uchar_t start, irqno, cpu; 344 short idx; 345 major_t major; 346 apic_irq_t *irqptr; 347 348 /* for MSI/X only */ 349 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) 350 return (0); 351 352 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: dip=0x%p type=%d " 353 "inum=0x%x pri=0x%x count=0x%x\n", 354 (void *)dip, type, inum, pri, count)); 355 356 if (count > 1) { 357 if (apic_multi_msi_enable == 0) 358 count = 1; 359 else if (count > apic_multi_msi_max) 360 count = apic_multi_msi_max; 361 } 362 363 if ((rcount = apic_navail_vector(dip, pri)) > count) 364 rcount = count; 365 366 mutex_enter(&airq_mutex); 367 368 for (start = 0; rcount > 0; rcount--) { 369 if ((start = apic_find_multi_vectors(pri, rcount)) != 0) 370 break; 371 } 372 373 if (start == 0) { 374 /* no vector available */ 375 mutex_exit(&airq_mutex); 376 return (0); 377 } 378 379 idx = (short)((type == DDI_INTR_TYPE_MSI) ? MSI_INDEX : MSIX_INDEX); 380 major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0; 381 for (i = 0; i < rcount; i++) { 382 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) == 383 (uchar_t)-1) { 384 mutex_exit(&airq_mutex); 385 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 386 "apic_allocate_irq failed\n")); 387 return (i); 388 } 389 apic_max_device_irq = max(irqno, apic_max_device_irq); 390 apic_min_device_irq = min(irqno, apic_min_device_irq); 391 irqptr = apic_irq_table[irqno]; 392 #ifdef DEBUG 393 if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ) 394 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 395 "apic_vector_to_irq is not APIC_RESV_IRQ\n")); 396 #endif 397 apic_vector_to_irq[start + i] = (uchar_t)irqno; 398 399 irqptr->airq_vector = (uchar_t)(start + i); 400 irqptr->airq_ioapicindex = (uchar_t)inum; /* start */ 401 irqptr->airq_intin_no = (uchar_t)rcount; 402 irqptr->airq_ipl = pri; 403 irqptr->airq_vector = start + i; 404 irqptr->airq_origirq = (uchar_t)(inum + i); 405 irqptr->airq_share_id = 0; 406 irqptr->airq_mps_intr_index = idx; 407 irqptr->airq_dip = dip; 408 irqptr->airq_major = major; 409 if (i == 0) /* they all bound to the same cpu */ 410 cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno, 411 0xff, 0xff); 412 else 413 irqptr->airq_cpu = cpu; 414 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: irq=0x%x " 415 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno, 416 (void *)irqptr->airq_dip, irqptr->airq_vector, 417 irqptr->airq_origirq, pri)); 418 } 419 mutex_exit(&airq_mutex); 420 return (rcount); 421 } 422 423 424 void 425 apic_free_vectors(dev_info_t *dip, int inum, int count, int pri, int type) 426 { 427 int i; 428 apic_irq_t *irqptr; 429 struct intrspec ispec; 430 431 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: dip: %p inum: %x " 432 "count: %x pri: %x type: %x\n", 433 (void *)dip, inum, count, pri, type)); 434 435 /* for MSI/X only */ 436 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) 437 return; 438 439 for (i = 0; i < count; i++) { 440 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: inum=0x%x " 441 "pri=0x%x count=0x%x\n", inum, pri, count)); 442 ispec.intrspec_vec = inum + i; 443 ispec.intrspec_pri = pri; 444 if ((irqptr = apic_find_irq(dip, &ispec, type)) == NULL) { 445 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: " 446 "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() " 447 "failed\n", (void *)dip, inum, pri)); 448 continue; 449 } 450 irqptr->airq_mps_intr_index = FREE_INDEX; 451 apic_vector_to_irq[irqptr->airq_vector] = APIC_RESV_IRQ; 452 } 453 } 454 455 456 /* 457 * check whether the system supports MSI 458 * 459 * If PCI-E capability is found, then this must be a PCI-E system. 460 * Since MSI is required for PCI-E system, it returns PSM_SUCCESS 461 * to indicate this system supports MSI. 462 */ 463 int 464 apic_check_msi_support(dev_info_t *dip) 465 { 466 467 dev_info_t *rootdip; 468 char dev_type[16]; 469 int dev_len; 470 471 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: dip: 0x%p\n", 472 (void *)dip)); 473 474 /* check whether the device or its ancestors have PCI-E capability */ 475 for (rootdip = ddi_root_node(); dip != rootdip; 476 dip = ddi_get_parent(dip)) { 477 478 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: dip: 0x%p," 479 " driver: %s, binding: %s, nodename: %s\n", (void *)dip, 480 ddi_driver_name(dip), ddi_binding_name(dip), 481 ddi_node_name(dip))); 482 dev_len = sizeof (dev_type); 483 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 484 "device_type", (caddr_t)dev_type, &dev_len) 485 != DDI_PROP_SUCCESS) 486 continue; 487 if (strcmp(dev_type, "pciex") == 0) 488 return (PSM_SUCCESS); 489 } 490 491 /* MSI is not supported on this system */ 492 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: no 'pciex' " 493 "device_type found\n")); 494 return (PSM_FAILURE); 495 } 496 497 int 498 apic_get_vector_intr_info(int vecirq, apic_get_intr_t *intr_params_p) 499 { 500 struct autovec *av_dev; 501 uchar_t irq; 502 int i; 503 504 /* Sanity check the vector/irq argument. */ 505 ASSERT((vecirq >= 0) || (vecirq <= APIC_MAX_VECTOR)); 506 507 mutex_enter(&airq_mutex); 508 509 /* 510 * Convert the vecirq arg to an irq using vector_to_irq table 511 * if the arg is a vector. Pass thru if already an irq. 512 */ 513 if ((intr_params_p->avgi_req_flags & PSMGI_INTRBY_FLAGS) == 514 PSMGI_INTRBY_VEC) 515 irq = apic_vector_to_irq[vecirq]; 516 else 517 irq = vecirq; 518 519 if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) { 520 521 /* Get the (temp) cpu from apic_irq table, indexed by irq. */ 522 intr_params_p->avgi_cpu_id = apic_irq_table[irq]->airq_temp_cpu; 523 524 /* Return user bound info for intrd. */ 525 if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) { 526 intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND; 527 intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND; 528 } 529 } 530 531 if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR) { 532 intr_params_p->avgi_vector = apic_irq_table[irq]->airq_vector; 533 } 534 535 if (intr_params_p->avgi_req_flags & 536 (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS)) { 537 /* Get number of devices from apic_irq table shared field. */ 538 intr_params_p->avgi_num_devs = apic_irq_table[irq]->airq_share; 539 } 540 541 if (intr_params_p->avgi_req_flags & PSMGI_REQ_GET_DEVS) { 542 543 intr_params_p->avgi_req_flags |= PSMGI_REQ_NUM_DEVS; 544 545 /* Some devices have NULL dip. Don't count these. */ 546 if (intr_params_p->avgi_num_devs > 0) { 547 for (i = 0, av_dev = autovect[irq].avh_link; 548 av_dev; av_dev = av_dev->av_link) 549 if (av_dev->av_vector && av_dev->av_dip) 550 i++; 551 intr_params_p->avgi_num_devs = 552 MIN(intr_params_p->avgi_num_devs, i); 553 } 554 555 /* There are no viable dips to return. */ 556 if (intr_params_p->avgi_num_devs == 0) 557 intr_params_p->avgi_dip_list = NULL; 558 559 else { /* Return list of dips */ 560 561 /* Allocate space in array for that number of devs. */ 562 intr_params_p->avgi_dip_list = kmem_zalloc( 563 intr_params_p->avgi_num_devs * 564 sizeof (dev_info_t *), 565 KM_SLEEP); 566 567 /* 568 * Loop through the device list of the autovec table 569 * filling in the dip array. 570 * 571 * Note that the autovect table may have some special 572 * entries which contain NULL dips. These will be 573 * ignored. 574 */ 575 for (i = 0, av_dev = autovect[irq].avh_link; 576 av_dev; av_dev = av_dev->av_link) 577 if (av_dev->av_vector && av_dev->av_dip) 578 intr_params_p->avgi_dip_list[i++] = 579 av_dev->av_dip; 580 } 581 } 582 583 mutex_exit(&airq_mutex); 584 585 return (PSM_SUCCESS); 586 } 587 588 /* 589 * This function provides external interface to the nexus for all 590 * functionalities related to the new DDI interrupt framework. 591 * 592 * Input: 593 * dip - pointer to the dev_info structure of the requested device 594 * hdlp - pointer to the internal interrupt handle structure for the 595 * requested interrupt 596 * intr_op - opcode for this call 597 * result - pointer to the integer that will hold the result to be 598 * passed back if return value is PSM_SUCCESS 599 * 600 * Output: 601 * return value is either PSM_SUCCESS or PSM_FAILURE 602 */ 603 int 604 apic_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 605 psm_intr_op_t intr_op, int *result) 606 { 607 int cap, ret; 608 int count_vec; 609 int cpu; 610 int old_priority; 611 int new_priority; 612 apic_irq_t *irqp; 613 struct intrspec *ispec, intr_spec; 614 615 DDI_INTR_IMPLDBG((CE_CONT, "apic_intr_ops: dip: %p hdlp: %p " 616 "intr_op: %x\n", (void *)dip, (void *)hdlp, intr_op)); 617 618 ispec = &intr_spec; 619 ispec->intrspec_pri = hdlp->ih_pri; 620 ispec->intrspec_vec = hdlp->ih_inum; 621 ispec->intrspec_func = hdlp->ih_cb_func; 622 623 switch (intr_op) { 624 case PSM_INTR_OP_CHECK_MSI: 625 /* 626 * Check MSI/X is supported or not at APIC level and 627 * masked off the MSI/X bits in hdlp->ih_type if not 628 * supported before return. If MSI/X is supported, 629 * leave the ih_type unchanged and return. 630 * 631 * hdlp->ih_type passed in from the nexus has all the 632 * interrupt types supported by the device. 633 */ 634 if (apic_support_msi == 0) { 635 /* 636 * if apic_support_msi is not set, call 637 * apic_check_msi_support() to check whether msi 638 * is supported first 639 */ 640 if (apic_check_msi_support(dip) == PSM_SUCCESS) 641 apic_support_msi = 1; 642 else 643 apic_support_msi = -1; 644 } 645 if (apic_support_msi == 1) 646 *result = hdlp->ih_type; 647 else 648 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 649 DDI_INTR_TYPE_MSIX); 650 break; 651 case PSM_INTR_OP_ALLOC_VECTORS: 652 *result = apic_alloc_vectors(dip, hdlp->ih_inum, 653 hdlp->ih_scratch1, hdlp->ih_pri, hdlp->ih_type); 654 break; 655 case PSM_INTR_OP_FREE_VECTORS: 656 apic_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1, 657 hdlp->ih_pri, hdlp->ih_type); 658 break; 659 case PSM_INTR_OP_NAVAIL_VECTORS: 660 *result = apic_navail_vector(dip, hdlp->ih_pri); 661 break; 662 case PSM_INTR_OP_XLATE_VECTOR: 663 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 664 *result = apic_introp_xlate(dip, ispec, hdlp->ih_type); 665 break; 666 case PSM_INTR_OP_GET_PENDING: 667 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 668 return (PSM_FAILURE); 669 *result = apic_get_pending(irqp, hdlp->ih_type); 670 break; 671 case PSM_INTR_OP_CLEAR_MASK: 672 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 673 return (PSM_FAILURE); 674 irqp = apic_find_irq(dip, ispec, hdlp->ih_type); 675 if (irqp == NULL) 676 return (PSM_FAILURE); 677 apic_clear_mask(irqp); 678 break; 679 case PSM_INTR_OP_SET_MASK: 680 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 681 return (PSM_FAILURE); 682 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 683 return (PSM_FAILURE); 684 apic_set_mask(irqp); 685 break; 686 case PSM_INTR_OP_GET_CAP: 687 cap = DDI_INTR_FLAG_PENDING; 688 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 689 cap |= DDI_INTR_FLAG_MASKABLE; 690 *result = cap; 691 break; 692 case PSM_INTR_OP_GET_SHARED: 693 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 694 return (PSM_FAILURE); 695 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 696 return (PSM_FAILURE); 697 *result = irqp->airq_share ? 1: 0; 698 break; 699 case PSM_INTR_OP_SET_PRI: 700 old_priority = hdlp->ih_pri; /* save old value */ 701 new_priority = *(int *)result; /* try the new value */ 702 703 /* First, check if "hdlp->ih_scratch1" vectors exist? */ 704 if (apic_navail_vector(dip, new_priority) < hdlp->ih_scratch1) 705 return (PSM_FAILURE); 706 707 /* Now allocate the vectors */ 708 count_vec = apic_alloc_vectors(dip, hdlp->ih_inum, 709 hdlp->ih_scratch1, new_priority, hdlp->ih_type); 710 711 /* Did we get fewer vectors? */ 712 if (count_vec != hdlp->ih_scratch1) { 713 apic_free_vectors(dip, hdlp->ih_inum, count_vec, 714 new_priority, hdlp->ih_type); 715 return (PSM_FAILURE); 716 } 717 718 /* Finally, free the previously allocated vectors */ 719 apic_free_vectors(dip, hdlp->ih_inum, count_vec, 720 old_priority, hdlp->ih_type); 721 hdlp->ih_pri = new_priority; /* set the new value */ 722 break; 723 case PSM_INTR_OP_SET_CPU: 724 /* 725 * The interrupt handle given here has been allocated 726 * specifically for this command, and ih_private carries 727 * a CPU value. 728 */ 729 cpu = (int)(intptr_t)hdlp->ih_private; 730 731 if (!apic_cpu_in_range(cpu)) { 732 *result = EINVAL; 733 return (PSM_FAILURE); 734 } 735 736 mutex_enter(&airq_mutex); 737 738 /* Convert the vector to the irq using vector_to_irq table. */ 739 irqp = apic_irq_table[apic_vector_to_irq[hdlp->ih_vector]]; 740 if (irqp == NULL) { 741 mutex_exit(&airq_mutex); 742 *result = ENXIO; 743 return (PSM_FAILURE); 744 } 745 ret = apic_rebind_all(irqp, cpu, 1); 746 mutex_exit(&airq_mutex); 747 if (ret) { 748 *result = EIO; 749 return (PSM_FAILURE); 750 } 751 *result = 0; 752 break; 753 case PSM_INTR_OP_GET_INTR: 754 /* 755 * The interrupt handle given here has been allocated 756 * specifically for this command, and ih_private carries 757 * a pointer to a apic_get_intr_t. 758 */ 759 if (apic_get_vector_intr_info( 760 hdlp->ih_vector, hdlp->ih_private) != PSM_SUCCESS) 761 return (PSM_FAILURE); 762 break; 763 case PSM_INTR_OP_SET_CAP: 764 default: 765 return (PSM_FAILURE); 766 } 767 return (PSM_SUCCESS); 768 } 769