1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * apic_introp.c: 30 * Has code for Advanced DDI interrupt framework support. 31 */ 32 33 #include <sys/cpuvar.h> 34 #include <sys/psm.h> 35 #include "apic.h" 36 #include <sys/sunddi.h> 37 #include <sys/ddi_impldefs.h> 38 #include <sys/mach_intr.h> 39 #include <sys/sysmacros.h> 40 #include <sys/trap.h> 41 #include <sys/pci.h> 42 #include <sys/pci_intr_lib.h> 43 44 extern struct av_head autovect[]; 45 46 /* 47 * Local Function Prototypes 48 */ 49 int apic_pci_msi_enable_vector(dev_info_t *, int, int, 50 int, int, int); 51 apic_irq_t *apic_find_irq(dev_info_t *, struct intrspec *, int); 52 static int apic_get_pending(apic_irq_t *, int); 53 static void apic_clear_mask(apic_irq_t *); 54 static void apic_set_mask(apic_irq_t *); 55 static uchar_t apic_find_multi_vectors(int, int); 56 int apic_navail_vector(dev_info_t *, int); 57 int apic_alloc_vectors(dev_info_t *, int, int, int, int, int); 58 void apic_free_vectors(dev_info_t *, int, int, int, int); 59 int apic_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 60 psm_intr_op_t, int *); 61 62 extern int intr_clear(void); 63 extern void intr_restore(uint_t); 64 65 /* 66 * MSI support flag: 67 * reflects whether MSI is supported at APIC level 68 * it can also be patched through /etc/system 69 * 70 * 0 = default value - don't know and need to call apic_check_msi_support() 71 * to find out then set it accordingly 72 * 1 = supported 73 * -1 = not supported 74 */ 75 int apic_support_msi = 0; 76 77 /* Multiple vector support for MSI */ 78 int apic_multi_msi_enable = 1; 79 int apic_multi_msi_max = 2; 80 81 extern uchar_t apic_ipltopri[MAXIPL+1]; 82 extern uchar_t apic_vector_to_irq[APIC_MAX_VECTOR+1]; 83 extern int apic_max_device_irq; 84 extern int apic_min_device_irq; 85 extern apic_irq_t *apic_irq_table[APIC_MAX_VECTOR+1]; 86 extern volatile uint32_t *apicadr; /* virtual addr of local APIC */ 87 extern volatile int32_t *apicioadr[MAX_IO_APIC]; 88 extern lock_t apic_ioapic_lock; 89 extern kmutex_t airq_mutex; 90 extern apic_cpus_info_t *apic_cpus; 91 extern int apic_first_avail_irq; 92 93 94 /* 95 * apic_pci_msi_enable_vector: 96 * Set the address/data fields in the MSI/X capability structure 97 * XXX: MSI-X support 98 */ 99 /* ARGSUSED */ 100 int 101 apic_pci_msi_enable_vector(dev_info_t *dip, int type, int inum, int vector, 102 int count, int target_apic_id) 103 { 104 uint64_t msi_addr, msi_data; 105 ushort_t msi_ctrl; 106 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip); 107 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(dip); 108 109 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: dip=0x%p\n" 110 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip, 111 ddi_driver_name(dip), inum, vector, target_apic_id)); 112 113 if (handle == NULL) 114 return (PSM_FAILURE); 115 116 /* MSI Address */ 117 msi_addr = (MSI_ADDR_HDR | (target_apic_id << MSI_ADDR_DEST_SHIFT)); 118 msi_addr |= ((MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) | 119 (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT)); 120 121 /* MSI Data: MSI is edge triggered according to spec */ 122 msi_data = ((MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) | vector); 123 124 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: addr=0x%lx " 125 "data=0x%lx\n", (long)msi_addr, (long)msi_data)); 126 127 if (type == DDI_INTR_TYPE_MSI) { 128 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 129 130 /* Set the bits to inform how many MSIs are enabled */ 131 msi_ctrl |= ((highbit(count) -1) << PCI_MSI_MME_SHIFT); 132 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 133 134 pci_config_put32(handle, 135 cap_ptr + PCI_MSI_ADDR_OFFSET, msi_addr); 136 137 if (msi_ctrl & PCI_MSI_64BIT_MASK) { 138 pci_config_put32(handle, 139 cap_ptr + PCI_MSI_ADDR_OFFSET + 4, msi_addr >> 32); 140 pci_config_put16(handle, 141 cap_ptr + PCI_MSI_64BIT_DATA, msi_data); 142 } else { 143 pci_config_put16(handle, 144 cap_ptr + PCI_MSI_32BIT_DATA, msi_data); 145 } 146 147 } else if (type == DDI_INTR_TYPE_MSIX) { 148 uintptr_t off; 149 ddi_intr_msix_t *msix_p = i_ddi_get_msix(dip); 150 151 /* Offset into the "inum"th entry in the MSI-X table */ 152 off = (uintptr_t)msix_p->msix_tbl_addr + 153 (inum * PCI_MSIX_VECTOR_SIZE); 154 155 ddi_put32(msix_p->msix_tbl_hdl, 156 (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), msi_data); 157 ddi_put64(msix_p->msix_tbl_hdl, 158 (uint64_t *)(off + PCI_MSIX_LOWER_ADDR_OFFSET), msi_addr); 159 } 160 161 return (PSM_SUCCESS); 162 } 163 164 165 /* 166 * This function returns the no. of vectors available for the pri. 167 * dip is not used at this moment. If we really don't need that, 168 * it will be removed. 169 */ 170 /*ARGSUSED*/ 171 int 172 apic_navail_vector(dev_info_t *dip, int pri) 173 { 174 int lowest, highest, i, navail, count; 175 176 DDI_INTR_IMPLDBG((CE_CONT, "apic_navail_vector: dip: %p, pri: %x\n", 177 (void *)dip, pri)); 178 179 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 180 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 181 navail = count = 0; 182 183 /* It has to be contiguous */ 184 for (i = lowest; i < highest; i++) { 185 count = 0; 186 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 187 (i < highest)) { 188 if (APIC_CHECK_RESERVE_VECTORS(i)) 189 break; 190 count++; 191 i++; 192 } 193 if (count > navail) 194 navail = count; 195 } 196 return (navail); 197 } 198 199 /* 200 * Finds "count" contiguous MSI vectors starting at the proper alignment 201 * at "pri". 202 * Caller needs to make sure that count has to be power of 2 and should not 203 * be < 1. 204 */ 205 static uchar_t 206 apic_find_multi_vectors(int pri, int count) 207 { 208 int lowest, highest, i, navail, start, msibits; 209 210 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_mult: pri: %x, count: %x\n", 211 pri, count)); 212 213 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 214 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 215 navail = 0; 216 217 /* 218 * msibits is the no. of lower order message data bits for the 219 * allocated MSI vectors and is used to calculate the aligned 220 * starting vector 221 */ 222 msibits = count - 1; 223 224 /* It has to be contiguous */ 225 for (i = lowest; i < highest; i++) { 226 navail = 0; 227 228 /* 229 * starting vector has to be aligned accordingly for 230 * multiple MSIs 231 */ 232 if (msibits) 233 i = (i + msibits) & ~msibits; 234 start = i; 235 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 236 (i < highest)) { 237 if (APIC_CHECK_RESERVE_VECTORS(i)) 238 break; 239 navail++; 240 if (navail >= count) 241 return (start); 242 i++; 243 } 244 } 245 return (0); 246 } 247 248 /* 249 * It finds the apic_irq_t associates with the dip, ispec and type. 250 */ 251 apic_irq_t * 252 apic_find_irq(dev_info_t *dip, struct intrspec *ispec, int type) 253 { 254 apic_irq_t *irqp; 255 int i; 256 257 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: dip=0x%p vec=0x%x " 258 "ipl=0x%x type=0x%x\n", (void *)dip, ispec->intrspec_vec, 259 ispec->intrspec_pri, type)); 260 261 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 262 if ((irqp = apic_irq_table[i]) == NULL) 263 continue; 264 if ((irqp->airq_dip == dip) && 265 (irqp->airq_origirq == ispec->intrspec_vec) && 266 (irqp->airq_ipl == ispec->intrspec_pri)) { 267 if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 268 if (APIC_IS_MSI_OR_MSIX_INDEX(irqp-> 269 airq_mps_intr_index)) 270 return (irqp); 271 } else 272 return (irqp); 273 } 274 } 275 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: return NULL\n")); 276 return (NULL); 277 } 278 279 280 /* 281 * This function will return the pending bit of the irqp. 282 * It either comes from the IRR register of the APIC or the RDT 283 * entry of the I/O APIC. 284 * For the IRR to work, it needs to be to its binding CPU 285 */ 286 static int 287 apic_get_pending(apic_irq_t *irqp, int type) 288 { 289 int bit, index, irr, pending; 290 int intin_no; 291 volatile int32_t *ioapic; 292 293 DDI_INTR_IMPLDBG((CE_CONT, "apic_get_pending: irqp: %p, cpuid: %x " 294 "type: %x\n", (void *)irqp, irqp->airq_cpu & ~IRQ_USER_BOUND, 295 type)); 296 297 /* need to get on the bound cpu */ 298 mutex_enter(&cpu_lock); 299 affinity_set(irqp->airq_cpu & ~IRQ_USER_BOUND); 300 301 index = irqp->airq_vector / 32; 302 bit = irqp->airq_vector % 32; 303 irr = apicadr[APIC_IRR_REG + index]; 304 305 affinity_clear(); 306 mutex_exit(&cpu_lock); 307 308 pending = (irr & (1 << bit)) ? 1 : 0; 309 if (!pending && (type == DDI_INTR_TYPE_FIXED)) { 310 /* check I/O APIC for fixed interrupt */ 311 intin_no = irqp->airq_intin_no; 312 ioapic = apicioadr[irqp->airq_ioapicindex]; 313 pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 314 AV_PENDING) ? 1 : 0; 315 } 316 return (pending); 317 } 318 319 320 /* 321 * This function will clear the mask for the interrupt on the I/O APIC 322 */ 323 static void 324 apic_clear_mask(apic_irq_t *irqp) 325 { 326 int intin_no; 327 int iflag; 328 int32_t rdt_entry; 329 volatile int32_t *ioapic; 330 331 DDI_INTR_IMPLDBG((CE_CONT, "apic_clear_mask: irqp: %p\n", 332 (void *)irqp)); 333 334 intin_no = irqp->airq_intin_no; 335 ioapic = apicioadr[irqp->airq_ioapicindex]; 336 337 iflag = intr_clear(); 338 lock_set(&apic_ioapic_lock); 339 340 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 341 342 /* clear mask */ 343 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 344 ((~AV_MASK) & rdt_entry)); 345 346 lock_clear(&apic_ioapic_lock); 347 intr_restore(iflag); 348 } 349 350 351 /* 352 * This function will mask the interrupt on the I/O APIC 353 */ 354 static void 355 apic_set_mask(apic_irq_t *irqp) 356 { 357 int intin_no; 358 volatile int32_t *ioapic; 359 int iflag; 360 int32_t rdt_entry; 361 362 DDI_INTR_IMPLDBG((CE_CONT, "apic_set_mask: irqp: %p\n", (void *)irqp)); 363 364 intin_no = irqp->airq_intin_no; 365 ioapic = apicioadr[irqp->airq_ioapicindex]; 366 367 iflag = intr_clear(); 368 369 lock_set(&apic_ioapic_lock); 370 371 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 372 373 /* mask it */ 374 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 375 (AV_MASK | rdt_entry)); 376 377 lock_clear(&apic_ioapic_lock); 378 intr_restore(iflag); 379 } 380 381 382 /* 383 * This function allocate "count" vector(s) for the given "dip/pri/type" 384 */ 385 int 386 apic_alloc_vectors(dev_info_t *dip, int inum, int count, int pri, int type, 387 int behavior) 388 { 389 int rcount, i; 390 uchar_t start, irqno, cpu; 391 major_t major; 392 apic_irq_t *irqptr; 393 394 /* only supports MSI at the moment, will add MSI-X support later */ 395 if (type != DDI_INTR_TYPE_MSI) 396 return (0); 397 398 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: dip=0x%p type=%d " 399 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n", 400 (void *)dip, type, inum, pri, count, behavior)); 401 402 if (count > 1) { 403 if (behavior == DDI_INTR_ALLOC_STRICT && 404 (apic_multi_msi_enable == 0 || count > apic_multi_msi_max)) 405 return (0); 406 407 if (apic_multi_msi_enable == 0) 408 count = 1; 409 else if (count > apic_multi_msi_max) 410 count = apic_multi_msi_max; 411 } 412 413 if ((rcount = apic_navail_vector(dip, pri)) > count) 414 rcount = count; 415 else if (rcount == 0 || (rcount < count && 416 behavior == DDI_INTR_ALLOC_STRICT)) 417 return (0); 418 419 /* if not ISP2, then round it down */ 420 if (!ISP2(rcount)) 421 rcount = 1 << (highbit(rcount) - 1); 422 423 mutex_enter(&airq_mutex); 424 425 for (start = 0; rcount > 0; rcount >>= 1) { 426 if ((start = apic_find_multi_vectors(pri, rcount)) != 0 || 427 behavior == DDI_INTR_ALLOC_STRICT) 428 break; 429 } 430 431 if (start == 0) { 432 /* no vector available */ 433 mutex_exit(&airq_mutex); 434 return (0); 435 } 436 437 major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0; 438 for (i = 0; i < rcount; i++) { 439 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) == 440 (uchar_t)-1) { 441 mutex_exit(&airq_mutex); 442 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 443 "apic_allocate_irq failed\n")); 444 return (i); 445 } 446 apic_max_device_irq = max(irqno, apic_max_device_irq); 447 apic_min_device_irq = min(irqno, apic_min_device_irq); 448 irqptr = apic_irq_table[irqno]; 449 #ifdef DEBUG 450 if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ) 451 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 452 "apic_vector_to_irq is not APIC_RESV_IRQ\n")); 453 #endif 454 apic_vector_to_irq[start + i] = (uchar_t)irqno; 455 456 irqptr->airq_vector = (uchar_t)(start + i); 457 irqptr->airq_ioapicindex = (uchar_t)inum; /* start */ 458 irqptr->airq_intin_no = (uchar_t)rcount; 459 irqptr->airq_ipl = pri; 460 irqptr->airq_vector = start + i; 461 irqptr->airq_origirq = (uchar_t)(inum + i); 462 irqptr->airq_share_id = 0; 463 irqptr->airq_mps_intr_index = MSI_INDEX; 464 irqptr->airq_dip = dip; 465 irqptr->airq_major = major; 466 if (i == 0) /* they all bound to the same cpu */ 467 cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno, 468 0xff, 0xff); 469 else 470 irqptr->airq_cpu = cpu; 471 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: irq=0x%x " 472 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno, 473 (void *)irqptr->airq_dip, irqptr->airq_vector, 474 irqptr->airq_origirq, pri)); 475 } 476 mutex_exit(&airq_mutex); 477 return (rcount); 478 } 479 480 481 void 482 apic_free_vectors(dev_info_t *dip, int inum, int count, int pri, int type) 483 { 484 int i; 485 apic_irq_t *irqptr; 486 struct intrspec ispec; 487 488 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: dip: %p inum: %x " 489 "count: %x pri: %x type: %x\n", 490 (void *)dip, inum, count, pri, type)); 491 492 /* for MSI/X only */ 493 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) 494 return; 495 496 for (i = 0; i < count; i++) { 497 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: inum=0x%x " 498 "pri=0x%x count=0x%x\n", inum, pri, count)); 499 ispec.intrspec_vec = inum + i; 500 ispec.intrspec_pri = pri; 501 if ((irqptr = apic_find_irq(dip, &ispec, type)) == NULL) { 502 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: " 503 "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() " 504 "failed\n", (void *)dip, inum, pri)); 505 continue; 506 } 507 irqptr->airq_mps_intr_index = FREE_INDEX; 508 apic_vector_to_irq[irqptr->airq_vector] = APIC_RESV_IRQ; 509 } 510 } 511 512 513 /* 514 * check whether the system supports MSI 515 * 516 * If PCI-E capability is found, then this must be a PCI-E system. 517 * Since MSI is required for PCI-E system, it returns PSM_SUCCESS 518 * to indicate this system supports MSI. 519 */ 520 int 521 apic_check_msi_support() 522 { 523 dev_info_t *cdip; 524 char dev_type[16]; 525 int dev_len; 526 527 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support:\n")); 528 529 /* 530 * check whether the first level children of root_node have 531 * PCI-E capability 532 */ 533 for (cdip = ddi_get_child(ddi_root_node()); cdip != NULL; 534 cdip = ddi_get_next_sibling(cdip)) { 535 536 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: cdip: 0x%p," 537 " driver: %s, binding: %s, nodename: %s\n", (void *)cdip, 538 ddi_driver_name(cdip), ddi_binding_name(cdip), 539 ddi_node_name(cdip))); 540 dev_len = sizeof (dev_type); 541 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 542 "device_type", (caddr_t)dev_type, &dev_len) 543 != DDI_PROP_SUCCESS) 544 continue; 545 if (strcmp(dev_type, "pciex") == 0) 546 return (PSM_SUCCESS); 547 } 548 549 /* MSI is not supported on this system */ 550 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: no 'pciex' " 551 "device_type found\n")); 552 return (PSM_FAILURE); 553 } 554 555 int 556 apic_get_vector_intr_info(int vecirq, apic_get_intr_t *intr_params_p) 557 { 558 struct autovec *av_dev; 559 uchar_t irqno; 560 int i; 561 apic_irq_t *irq_p; 562 563 /* Sanity check the vector/irq argument. */ 564 ASSERT((vecirq >= 0) || (vecirq <= APIC_MAX_VECTOR)); 565 566 mutex_enter(&airq_mutex); 567 568 /* 569 * Convert the vecirq arg to an irq using vector_to_irq table 570 * if the arg is a vector. Pass thru if already an irq. 571 */ 572 if ((intr_params_p->avgi_req_flags & PSMGI_INTRBY_FLAGS) == 573 PSMGI_INTRBY_VEC) 574 irqno = apic_vector_to_irq[vecirq]; 575 else 576 irqno = vecirq; 577 578 irq_p = apic_irq_table[irqno]; 579 580 if ((irq_p == NULL) || 581 (irq_p->airq_temp_cpu == IRQ_UNBOUND) || 582 (irq_p->airq_temp_cpu == IRQ_UNINIT)) { 583 mutex_exit(&airq_mutex); 584 return (PSM_FAILURE); 585 } 586 587 if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) { 588 589 /* Get the (temp) cpu from apic_irq table, indexed by irq. */ 590 intr_params_p->avgi_cpu_id = irq_p->airq_temp_cpu; 591 592 /* Return user bound info for intrd. */ 593 if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) { 594 intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND; 595 intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND; 596 } 597 } 598 599 if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR) { 600 intr_params_p->avgi_vector = irq_p->airq_vector; 601 } 602 603 if (intr_params_p->avgi_req_flags & 604 (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS)) { 605 /* Get number of devices from apic_irq table shared field. */ 606 intr_params_p->avgi_num_devs = irq_p->airq_share; 607 } 608 609 if (intr_params_p->avgi_req_flags & PSMGI_REQ_GET_DEVS) { 610 611 intr_params_p->avgi_req_flags |= PSMGI_REQ_NUM_DEVS; 612 613 /* Some devices have NULL dip. Don't count these. */ 614 if (intr_params_p->avgi_num_devs > 0) { 615 for (i = 0, av_dev = autovect[irqno].avh_link; 616 av_dev; av_dev = av_dev->av_link) 617 if (av_dev->av_vector && av_dev->av_dip) 618 i++; 619 intr_params_p->avgi_num_devs = 620 MIN(intr_params_p->avgi_num_devs, i); 621 } 622 623 /* There are no viable dips to return. */ 624 if (intr_params_p->avgi_num_devs == 0) 625 intr_params_p->avgi_dip_list = NULL; 626 627 else { /* Return list of dips */ 628 629 /* Allocate space in array for that number of devs. */ 630 intr_params_p->avgi_dip_list = kmem_zalloc( 631 intr_params_p->avgi_num_devs * 632 sizeof (dev_info_t *), 633 KM_SLEEP); 634 635 /* 636 * Loop through the device list of the autovec table 637 * filling in the dip array. 638 * 639 * Note that the autovect table may have some special 640 * entries which contain NULL dips. These will be 641 * ignored. 642 */ 643 for (i = 0, av_dev = autovect[irqno].avh_link; 644 av_dev; av_dev = av_dev->av_link) 645 if (av_dev->av_vector && av_dev->av_dip) 646 intr_params_p->avgi_dip_list[i++] = 647 av_dev->av_dip; 648 } 649 } 650 651 mutex_exit(&airq_mutex); 652 653 return (PSM_SUCCESS); 654 } 655 656 /* 657 * apic_pci_msi_unconfigure: 658 * 659 * This and next two interfaces are copied from pci_intr_lib.c 660 * Do ensure that these two files stay in sync. 661 * These needed to be copied over here to avoid a deadlock situation on 662 * certain mp systems that use MSI interrupts. 663 * 664 * IMPORTANT regards next three interfaces: 665 * i) are called only for MSI/X interrupts. 666 * ii) called with interrupts disabled, and must not block 667 */ 668 int 669 apic_pci_msi_unconfigure(dev_info_t *rdip, int type, int inum) 670 { 671 ushort_t msi_ctrl; 672 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip); 673 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip); 674 675 if (handle == NULL) 676 return (PSM_FAILURE); 677 678 if (type == DDI_INTR_TYPE_MSI) { 679 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 680 msi_ctrl &= (~PCI_MSI_MME_MASK); 681 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 682 pci_config_put32(handle, cap_ptr + PCI_MSI_ADDR_OFFSET, 0); 683 684 if (msi_ctrl & PCI_MSI_64BIT_MASK) { 685 pci_config_put16(handle, 686 cap_ptr + PCI_MSI_64BIT_DATA, 0); 687 pci_config_put32(handle, 688 cap_ptr + PCI_MSI_ADDR_OFFSET + 4, 0); 689 } else { 690 pci_config_put16(handle, 691 cap_ptr + PCI_MSI_32BIT_DATA, 0); 692 } 693 694 } else if (type == DDI_INTR_TYPE_MSIX) { 695 uintptr_t off; 696 ddi_intr_msix_t *msix_p = i_ddi_get_msix(rdip); 697 698 /* Offset into the "inum"th entry in the MSI-X table */ 699 off = (uintptr_t)msix_p->msix_tbl_addr + 700 (inum * PCI_MSIX_VECTOR_SIZE); 701 702 /* Reset the "data" and "addr" bits */ 703 ddi_put32(msix_p->msix_tbl_hdl, 704 (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), 0); 705 ddi_put64(msix_p->msix_tbl_hdl, (uint64_t *)off, 0); 706 } 707 708 return (PSM_SUCCESS); 709 } 710 711 712 /* 713 * apic_pci_msi_enable_mode: 714 */ 715 int 716 apic_pci_msi_enable_mode(dev_info_t *rdip, int type, int inum) 717 { 718 ushort_t msi_ctrl; 719 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip); 720 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip); 721 722 if (handle == NULL) 723 return (PSM_FAILURE); 724 725 if (type == DDI_INTR_TYPE_MSI) { 726 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 727 if ((msi_ctrl & PCI_MSI_ENABLE_BIT)) 728 return (PSM_SUCCESS); 729 730 msi_ctrl |= PCI_MSI_ENABLE_BIT; 731 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 732 733 } else if (type == DDI_INTR_TYPE_MSIX) { 734 uintptr_t off; 735 ddi_intr_msix_t *msix_p; 736 737 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL); 738 739 if (msi_ctrl & PCI_MSIX_ENABLE_BIT) 740 return (PSM_SUCCESS); 741 742 msi_ctrl |= PCI_MSIX_ENABLE_BIT; 743 pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL, msi_ctrl); 744 745 msix_p = i_ddi_get_msix(rdip); 746 747 /* Offset into "inum"th entry in the MSI-X table & clear mask */ 748 off = (uintptr_t)msix_p->msix_tbl_addr + (inum * 749 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET; 750 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, 0); 751 } 752 753 return (PSM_SUCCESS); 754 } 755 756 /* 757 * apic_pci_msi_disable_mode: 758 */ 759 int 760 apic_pci_msi_disable_mode(dev_info_t *rdip, int type, int inum) 761 { 762 ushort_t msi_ctrl; 763 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip); 764 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip); 765 766 if (handle == NULL) 767 return (PSM_FAILURE); 768 769 if (type == DDI_INTR_TYPE_MSI) { 770 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 771 if (!(msi_ctrl & PCI_MSI_ENABLE_BIT)) 772 return (PSM_SUCCESS); 773 774 msi_ctrl &= ~PCI_MSI_ENABLE_BIT; /* MSI disable */ 775 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 776 777 } else if (type == DDI_INTR_TYPE_MSIX) { 778 uintptr_t off; 779 ddi_intr_msix_t *msix_p; 780 781 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL); 782 783 if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) 784 return (PSM_SUCCESS); 785 786 msix_p = i_ddi_get_msix(rdip); 787 788 /* Offset into "inum"th entry in the MSI-X table & mask it */ 789 off = (uintptr_t)msix_p->msix_tbl_addr + (inum * 790 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET; 791 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, 0x1); 792 } 793 794 return (PSM_SUCCESS); 795 } 796 797 /* 798 * This function provides external interface to the nexus for all 799 * functionalities related to the new DDI interrupt framework. 800 * 801 * Input: 802 * dip - pointer to the dev_info structure of the requested device 803 * hdlp - pointer to the internal interrupt handle structure for the 804 * requested interrupt 805 * intr_op - opcode for this call 806 * result - pointer to the integer that will hold the result to be 807 * passed back if return value is PSM_SUCCESS 808 * 809 * Output: 810 * return value is either PSM_SUCCESS or PSM_FAILURE 811 */ 812 int 813 apic_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 814 psm_intr_op_t intr_op, int *result) 815 { 816 int cap, ret; 817 int count_vec; 818 int cpu; 819 int old_priority; 820 int new_priority; 821 int iflag; 822 apic_irq_t *irqp; 823 struct intrspec *ispec, intr_spec; 824 825 DDI_INTR_IMPLDBG((CE_CONT, "apic_intr_ops: dip: %p hdlp: %p " 826 "intr_op: %x\n", (void *)dip, (void *)hdlp, intr_op)); 827 828 ispec = &intr_spec; 829 ispec->intrspec_pri = hdlp->ih_pri; 830 ispec->intrspec_vec = hdlp->ih_inum; 831 ispec->intrspec_func = hdlp->ih_cb_func; 832 833 switch (intr_op) { 834 case PSM_INTR_OP_CHECK_MSI: 835 /* 836 * Check MSI/X is supported or not at APIC level and 837 * masked off the MSI/X bits in hdlp->ih_type if not 838 * supported before return. If MSI/X is supported, 839 * leave the ih_type unchanged and return. 840 * 841 * hdlp->ih_type passed in from the nexus has all the 842 * interrupt types supported by the device. 843 */ 844 if (apic_support_msi == 0) { 845 /* 846 * if apic_support_msi is not set, call 847 * apic_check_msi_support() to check whether msi 848 * is supported first 849 */ 850 if (apic_check_msi_support() == PSM_SUCCESS) 851 apic_support_msi = 1; 852 else 853 apic_support_msi = -1; 854 } 855 if (apic_support_msi == 1) 856 *result = hdlp->ih_type; 857 else 858 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 859 DDI_INTR_TYPE_MSIX); 860 break; 861 case PSM_INTR_OP_ALLOC_VECTORS: 862 *result = apic_alloc_vectors(dip, hdlp->ih_inum, 863 hdlp->ih_scratch1, hdlp->ih_pri, hdlp->ih_type, 864 (int)(uintptr_t)hdlp->ih_scratch2); 865 break; 866 case PSM_INTR_OP_FREE_VECTORS: 867 apic_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1, 868 hdlp->ih_pri, hdlp->ih_type); 869 break; 870 case PSM_INTR_OP_NAVAIL_VECTORS: 871 *result = apic_navail_vector(dip, hdlp->ih_pri); 872 break; 873 case PSM_INTR_OP_XLATE_VECTOR: 874 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 875 *result = apic_introp_xlate(dip, ispec, hdlp->ih_type); 876 break; 877 case PSM_INTR_OP_GET_PENDING: 878 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 879 return (PSM_FAILURE); 880 *result = apic_get_pending(irqp, hdlp->ih_type); 881 break; 882 case PSM_INTR_OP_CLEAR_MASK: 883 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 884 return (PSM_FAILURE); 885 irqp = apic_find_irq(dip, ispec, hdlp->ih_type); 886 if (irqp == NULL) 887 return (PSM_FAILURE); 888 apic_clear_mask(irqp); 889 break; 890 case PSM_INTR_OP_SET_MASK: 891 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 892 return (PSM_FAILURE); 893 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 894 return (PSM_FAILURE); 895 apic_set_mask(irqp); 896 break; 897 case PSM_INTR_OP_GET_CAP: 898 cap = DDI_INTR_FLAG_PENDING; 899 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 900 cap |= DDI_INTR_FLAG_MASKABLE; 901 *result = cap; 902 break; 903 case PSM_INTR_OP_GET_SHARED: 904 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 905 return (PSM_FAILURE); 906 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 907 return (PSM_FAILURE); 908 *result = irqp->airq_share ? 1: 0; 909 break; 910 case PSM_INTR_OP_SET_PRI: 911 old_priority = hdlp->ih_pri; /* save old value */ 912 new_priority = *(int *)result; /* try the new value */ 913 914 /* First, check if "hdlp->ih_scratch1" vectors exist? */ 915 if (apic_navail_vector(dip, new_priority) < hdlp->ih_scratch1) 916 return (PSM_FAILURE); 917 918 /* Now allocate the vectors */ 919 count_vec = apic_alloc_vectors(dip, hdlp->ih_inum, 920 hdlp->ih_scratch1, new_priority, hdlp->ih_type, 921 DDI_INTR_ALLOC_STRICT); 922 923 /* Did we get the new vectors? */ 924 if (!count_vec) 925 return (PSM_FAILURE); 926 927 /* Finally, free the previously allocated vectors */ 928 apic_free_vectors(dip, hdlp->ih_inum, count_vec, 929 old_priority, hdlp->ih_type); 930 hdlp->ih_pri = new_priority; /* set the new value */ 931 break; 932 case PSM_INTR_OP_SET_CPU: 933 /* 934 * The interrupt handle given here has been allocated 935 * specifically for this command, and ih_private carries 936 * a CPU value. 937 */ 938 cpu = (int)(intptr_t)hdlp->ih_private; 939 940 if (!apic_cpu_in_range(cpu)) { 941 *result = EINVAL; 942 return (PSM_FAILURE); 943 } 944 945 946 /* Convert the vector to the irq using vector_to_irq table. */ 947 mutex_enter(&airq_mutex); 948 irqp = apic_irq_table[apic_vector_to_irq[hdlp->ih_vector]]; 949 mutex_exit(&airq_mutex); 950 951 if (irqp == NULL) { 952 *result = ENXIO; 953 return (PSM_FAILURE); 954 } 955 956 iflag = intr_clear(); 957 lock_set(&apic_ioapic_lock); 958 959 ret = apic_rebind_all(irqp, cpu); 960 961 lock_clear(&apic_ioapic_lock); 962 intr_restore(iflag); 963 964 if (ret) { 965 *result = EIO; 966 return (PSM_FAILURE); 967 } 968 *result = 0; 969 break; 970 case PSM_INTR_OP_GET_INTR: 971 /* 972 * The interrupt handle given here has been allocated 973 * specifically for this command, and ih_private carries 974 * a pointer to a apic_get_intr_t. 975 */ 976 if (apic_get_vector_intr_info( 977 hdlp->ih_vector, hdlp->ih_private) != PSM_SUCCESS) 978 return (PSM_FAILURE); 979 break; 980 case PSM_INTR_OP_SET_CAP: 981 default: 982 return (PSM_FAILURE); 983 } 984 return (PSM_SUCCESS); 985 } 986