1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * apic_introp.c: 30 * Has code for Advanced DDI interrupt framework support. 31 */ 32 33 #include <sys/cpuvar.h> 34 #include <sys/psm.h> 35 #include "apic.h" 36 #include <sys/sunddi.h> 37 #include <sys/ddi_impldefs.h> 38 #include <sys/mach_intr.h> 39 #include <sys/sysmacros.h> 40 #include <sys/trap.h> 41 #include <sys/pci.h> 42 #include <sys/pci_intr_lib.h> 43 44 extern struct av_head autovect[]; 45 46 /* 47 * Local Function Prototypes 48 */ 49 int apic_pci_msi_enable_vector(dev_info_t *, int, int, 50 int, int, int); 51 apic_irq_t *apic_find_irq(dev_info_t *, struct intrspec *, int); 52 static int apic_get_pending(apic_irq_t *, int); 53 static void apic_clear_mask(apic_irq_t *); 54 static void apic_set_mask(apic_irq_t *); 55 static uchar_t apic_find_multi_vectors(int, int); 56 int apic_navail_vector(dev_info_t *, int); 57 int apic_alloc_vectors(dev_info_t *, int, int, int, int, int); 58 void apic_free_vectors(dev_info_t *, int, int, int, int); 59 int apic_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *, 60 psm_intr_op_t, int *); 61 62 extern int intr_clear(void); 63 extern void intr_restore(uint_t); 64 extern uchar_t apic_bind_intr(dev_info_t *, int, uchar_t, uchar_t); 65 extern int apic_allocate_irq(int); 66 extern int apic_introp_xlate(dev_info_t *, struct intrspec *, int); 67 extern int apic_rebind_all(apic_irq_t *irq_ptr, int bind_cpu, int safe); 68 extern boolean_t apic_cpu_in_range(int cpu); 69 70 /* 71 * MSI support flag: 72 * reflects whether MSI is supported at APIC level 73 * it can also be patched through /etc/system 74 * 75 * 0 = default value - don't know and need to call apic_check_msi_support() 76 * to find out then set it accordingly 77 * 1 = supported 78 * -1 = not supported 79 */ 80 int apic_support_msi = 0; 81 82 /* Multiple vector support for MSI */ 83 int apic_multi_msi_enable = 1; 84 int apic_multi_msi_max = 2; 85 86 extern uchar_t apic_ipltopri[MAXIPL+1]; 87 extern uchar_t apic_vector_to_irq[APIC_MAX_VECTOR+1]; 88 extern int apic_max_device_irq; 89 extern int apic_min_device_irq; 90 extern apic_irq_t *apic_irq_table[APIC_MAX_VECTOR+1]; 91 extern volatile uint32_t *apicadr; /* virtual addr of local APIC */ 92 extern volatile int32_t *apicioadr[MAX_IO_APIC]; 93 extern lock_t apic_ioapic_lock; 94 extern kmutex_t airq_mutex; 95 extern apic_cpus_info_t *apic_cpus; 96 extern int apic_first_avail_irq; 97 98 99 /* 100 * apic_pci_msi_enable_vector: 101 * Set the address/data fields in the MSI/X capability structure 102 * XXX: MSI-X support 103 */ 104 /* ARGSUSED */ 105 int 106 apic_pci_msi_enable_vector(dev_info_t *dip, int type, int inum, int vector, 107 int count, int target_apic_id) 108 { 109 uint64_t msi_addr, msi_data; 110 ushort_t msi_ctrl; 111 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip); 112 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(dip); 113 114 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: dip=0x%p\n" 115 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip, 116 ddi_driver_name(dip), inum, vector, target_apic_id)); 117 118 if (handle == NULL) 119 return (PSM_FAILURE); 120 121 /* MSI Address */ 122 msi_addr = (MSI_ADDR_HDR | (target_apic_id << MSI_ADDR_DEST_SHIFT)); 123 msi_addr |= ((MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) | 124 (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT)); 125 126 /* MSI Data: MSI is edge triggered according to spec */ 127 msi_data = ((MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) | vector); 128 129 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: addr=0x%lx " 130 "data=0x%lx\n", (long)msi_addr, (long)msi_data)); 131 132 if (type == DDI_INTR_TYPE_MSI) { 133 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 134 135 /* Set the bits to inform how many MSIs are enabled */ 136 msi_ctrl |= ((highbit(count) -1) << PCI_MSI_MME_SHIFT); 137 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 138 139 pci_config_put32(handle, 140 cap_ptr + PCI_MSI_ADDR_OFFSET, msi_addr); 141 142 if (msi_ctrl & PCI_MSI_64BIT_MASK) { 143 pci_config_put32(handle, 144 cap_ptr + PCI_MSI_ADDR_OFFSET + 4, msi_addr >> 32); 145 pci_config_put16(handle, 146 cap_ptr + PCI_MSI_64BIT_DATA, msi_data); 147 } else { 148 pci_config_put16(handle, 149 cap_ptr + PCI_MSI_32BIT_DATA, msi_data); 150 } 151 152 } else if (type == DDI_INTR_TYPE_MSIX) { 153 uintptr_t off; 154 ddi_intr_msix_t *msix_p = i_ddi_get_msix(dip); 155 156 /* Offset into the "inum"th entry in the MSI-X table */ 157 off = (uintptr_t)msix_p->msix_tbl_addr + 158 (inum * PCI_MSIX_VECTOR_SIZE); 159 160 ddi_put32(msix_p->msix_tbl_hdl, 161 (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), msi_data); 162 ddi_put64(msix_p->msix_tbl_hdl, 163 (uint64_t *)(off + PCI_MSIX_LOWER_ADDR_OFFSET), msi_addr); 164 } 165 166 return (PSM_SUCCESS); 167 } 168 169 170 /* 171 * This function returns the no. of vectors available for the pri. 172 * dip is not used at this moment. If we really don't need that, 173 * it will be removed. 174 */ 175 /*ARGSUSED*/ 176 int 177 apic_navail_vector(dev_info_t *dip, int pri) 178 { 179 int lowest, highest, i, navail, count; 180 181 DDI_INTR_IMPLDBG((CE_CONT, "apic_navail_vector: dip: %p, pri: %x\n", 182 (void *)dip, pri)); 183 184 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 185 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 186 navail = count = 0; 187 188 /* It has to be contiguous */ 189 for (i = lowest; i < highest; i++) { 190 count = 0; 191 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 192 (i < highest)) { 193 if (APIC_CHECK_RESERVE_VECTORS(i)) 194 break; 195 count++; 196 i++; 197 } 198 if (count > navail) 199 navail = count; 200 } 201 return (navail); 202 } 203 204 /* 205 * Finds "count" contiguous MSI vectors starting at the proper alignment 206 * at "pri". 207 * Caller needs to make sure that count has to be power of 2 and should not 208 * be < 1. 209 */ 210 static uchar_t 211 apic_find_multi_vectors(int pri, int count) 212 { 213 int lowest, highest, i, navail, start, msibits; 214 215 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_mult: pri: %x, count: %x\n", 216 pri, count)); 217 218 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 219 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 220 navail = 0; 221 222 /* 223 * msibits is the no. of lower order message data bits for the 224 * allocated MSI vectors and is used to calculate the aligned 225 * starting vector 226 */ 227 msibits = count - 1; 228 229 /* It has to be contiguous */ 230 for (i = lowest; i < highest; i++) { 231 navail = 0; 232 233 /* 234 * starting vector has to be aligned accordingly for 235 * multiple MSIs 236 */ 237 if (msibits) 238 i = (i + msibits) & ~msibits; 239 start = i; 240 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 241 (i < highest)) { 242 if (APIC_CHECK_RESERVE_VECTORS(i)) 243 break; 244 navail++; 245 if (navail >= count) 246 return (start); 247 i++; 248 } 249 } 250 return (0); 251 } 252 253 /* 254 * It finds the apic_irq_t associates with the dip, ispec and type. 255 */ 256 apic_irq_t * 257 apic_find_irq(dev_info_t *dip, struct intrspec *ispec, int type) 258 { 259 apic_irq_t *irqp; 260 int i; 261 262 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: dip=0x%p vec=0x%x " 263 "ipl=0x%x type=0x%x\n", (void *)dip, ispec->intrspec_vec, 264 ispec->intrspec_pri, type)); 265 266 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 267 if ((irqp = apic_irq_table[i]) == NULL) 268 continue; 269 if ((irqp->airq_dip == dip) && 270 (irqp->airq_origirq == ispec->intrspec_vec) && 271 (irqp->airq_ipl == ispec->intrspec_pri)) { 272 if (DDI_INTR_IS_MSI_OR_MSIX(type)) { 273 if (APIC_IS_MSI_OR_MSIX_INDEX(irqp-> 274 airq_mps_intr_index)) 275 return (irqp); 276 } else 277 return (irqp); 278 } 279 } 280 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: return NULL\n")); 281 return (NULL); 282 } 283 284 285 /* 286 * This function will return the pending bit of the irqp. 287 * It either comes from the IRR register of the APIC or the RDT 288 * entry of the I/O APIC. 289 * For the IRR to work, it needs to be to its binding CPU 290 */ 291 static int 292 apic_get_pending(apic_irq_t *irqp, int type) 293 { 294 int bit, index, irr, pending; 295 int intin_no; 296 volatile int32_t *ioapic; 297 298 DDI_INTR_IMPLDBG((CE_CONT, "apic_get_pending: irqp: %p, cpuid: %x " 299 "type: %x\n", (void *)irqp, irqp->airq_cpu & ~IRQ_USER_BOUND, 300 type)); 301 302 /* need to get on the bound cpu */ 303 mutex_enter(&cpu_lock); 304 affinity_set(irqp->airq_cpu & ~IRQ_USER_BOUND); 305 306 index = irqp->airq_vector / 32; 307 bit = irqp->airq_vector % 32; 308 irr = apicadr[APIC_IRR_REG + index]; 309 310 affinity_clear(); 311 mutex_exit(&cpu_lock); 312 313 pending = (irr & (1 << bit)) ? 1 : 0; 314 if (!pending && (type == DDI_INTR_TYPE_FIXED)) { 315 /* check I/O APIC for fixed interrupt */ 316 intin_no = irqp->airq_intin_no; 317 ioapic = apicioadr[irqp->airq_ioapicindex]; 318 pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no) & 319 AV_PENDING) ? 1 : 0; 320 } 321 return (pending); 322 } 323 324 325 /* 326 * This function will clear the mask for the interrupt on the I/O APIC 327 */ 328 static void 329 apic_clear_mask(apic_irq_t *irqp) 330 { 331 int intin_no; 332 int iflag; 333 int32_t rdt_entry; 334 volatile int32_t *ioapic; 335 336 DDI_INTR_IMPLDBG((CE_CONT, "apic_clear_mask: irqp: %p\n", 337 (void *)irqp)); 338 339 intin_no = irqp->airq_intin_no; 340 ioapic = apicioadr[irqp->airq_ioapicindex]; 341 342 iflag = intr_clear(); 343 lock_set(&apic_ioapic_lock); 344 345 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 346 347 /* clear mask */ 348 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 349 ((~AV_MASK) & rdt_entry)); 350 351 lock_clear(&apic_ioapic_lock); 352 intr_restore(iflag); 353 } 354 355 356 /* 357 * This function will mask the interrupt on the I/O APIC 358 */ 359 static void 360 apic_set_mask(apic_irq_t *irqp) 361 { 362 int intin_no; 363 volatile int32_t *ioapic; 364 int iflag; 365 int32_t rdt_entry; 366 367 DDI_INTR_IMPLDBG((CE_CONT, "apic_set_mask: irqp: %p\n", (void *)irqp)); 368 369 intin_no = irqp->airq_intin_no; 370 ioapic = apicioadr[irqp->airq_ioapicindex]; 371 372 iflag = intr_clear(); 373 374 lock_set(&apic_ioapic_lock); 375 376 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no); 377 378 /* mask it */ 379 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(ioapic, intin_no, 380 (AV_MASK | rdt_entry)); 381 382 lock_clear(&apic_ioapic_lock); 383 intr_restore(iflag); 384 } 385 386 387 /* 388 * This function allocate "count" vector(s) for the given "dip/pri/type" 389 */ 390 int 391 apic_alloc_vectors(dev_info_t *dip, int inum, int count, int pri, int type, 392 int behavior) 393 { 394 int rcount, i; 395 uchar_t start, irqno, cpu; 396 major_t major; 397 apic_irq_t *irqptr; 398 399 /* only supports MSI at the moment, will add MSI-X support later */ 400 if (type != DDI_INTR_TYPE_MSI) 401 return (0); 402 403 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: dip=0x%p type=%d " 404 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n", 405 (void *)dip, type, inum, pri, count, behavior)); 406 407 if (count > 1) { 408 if (behavior == DDI_INTR_ALLOC_STRICT && 409 (apic_multi_msi_enable == 0 || count > apic_multi_msi_max)) 410 return (0); 411 412 if (apic_multi_msi_enable == 0) 413 count = 1; 414 else if (count > apic_multi_msi_max) 415 count = apic_multi_msi_max; 416 } 417 418 if ((rcount = apic_navail_vector(dip, pri)) > count) 419 rcount = count; 420 else if (rcount == 0 || (rcount < count && 421 behavior == DDI_INTR_ALLOC_STRICT)) 422 return (0); 423 424 /* if not ISP2, then round it down */ 425 if (!ISP2(rcount)) 426 rcount = 1 << (highbit(rcount) - 1); 427 428 mutex_enter(&airq_mutex); 429 430 for (start = 0; rcount > 0; rcount >>= 1) { 431 if ((start = apic_find_multi_vectors(pri, rcount)) != 0 || 432 behavior == DDI_INTR_ALLOC_STRICT) 433 break; 434 } 435 436 if (start == 0) { 437 /* no vector available */ 438 mutex_exit(&airq_mutex); 439 return (0); 440 } 441 442 major = (dip != NULL) ? ddi_name_to_major(ddi_get_name(dip)) : 0; 443 for (i = 0; i < rcount; i++) { 444 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) == 445 (uchar_t)-1) { 446 mutex_exit(&airq_mutex); 447 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 448 "apic_allocate_irq failed\n")); 449 return (i); 450 } 451 apic_max_device_irq = max(irqno, apic_max_device_irq); 452 apic_min_device_irq = min(irqno, apic_min_device_irq); 453 irqptr = apic_irq_table[irqno]; 454 #ifdef DEBUG 455 if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ) 456 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: " 457 "apic_vector_to_irq is not APIC_RESV_IRQ\n")); 458 #endif 459 apic_vector_to_irq[start + i] = (uchar_t)irqno; 460 461 irqptr->airq_vector = (uchar_t)(start + i); 462 irqptr->airq_ioapicindex = (uchar_t)inum; /* start */ 463 irqptr->airq_intin_no = (uchar_t)rcount; 464 irqptr->airq_ipl = pri; 465 irqptr->airq_vector = start + i; 466 irqptr->airq_origirq = (uchar_t)(inum + i); 467 irqptr->airq_share_id = 0; 468 irqptr->airq_mps_intr_index = MSI_INDEX; 469 irqptr->airq_dip = dip; 470 irqptr->airq_major = major; 471 if (i == 0) /* they all bound to the same cpu */ 472 cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno, 473 0xff, 0xff); 474 else 475 irqptr->airq_cpu = cpu; 476 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_vectors: irq=0x%x " 477 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno, 478 (void *)irqptr->airq_dip, irqptr->airq_vector, 479 irqptr->airq_origirq, pri)); 480 } 481 mutex_exit(&airq_mutex); 482 return (rcount); 483 } 484 485 486 void 487 apic_free_vectors(dev_info_t *dip, int inum, int count, int pri, int type) 488 { 489 int i; 490 apic_irq_t *irqptr; 491 struct intrspec ispec; 492 493 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: dip: %p inum: %x " 494 "count: %x pri: %x type: %x\n", 495 (void *)dip, inum, count, pri, type)); 496 497 /* for MSI/X only */ 498 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) 499 return; 500 501 for (i = 0; i < count; i++) { 502 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: inum=0x%x " 503 "pri=0x%x count=0x%x\n", inum, pri, count)); 504 ispec.intrspec_vec = inum + i; 505 ispec.intrspec_pri = pri; 506 if ((irqptr = apic_find_irq(dip, &ispec, type)) == NULL) { 507 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: " 508 "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() " 509 "failed\n", (void *)dip, inum, pri)); 510 continue; 511 } 512 irqptr->airq_mps_intr_index = FREE_INDEX; 513 apic_vector_to_irq[irqptr->airq_vector] = APIC_RESV_IRQ; 514 } 515 } 516 517 518 /* 519 * check whether the system supports MSI 520 * 521 * If PCI-E capability is found, then this must be a PCI-E system. 522 * Since MSI is required for PCI-E system, it returns PSM_SUCCESS 523 * to indicate this system supports MSI. 524 */ 525 int 526 apic_check_msi_support() 527 { 528 dev_info_t *cdip; 529 char dev_type[16]; 530 int dev_len; 531 532 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support:\n")); 533 534 /* 535 * check whether the first level children of root_node have 536 * PCI-E capability 537 */ 538 for (cdip = ddi_get_child(ddi_root_node()); cdip != NULL; 539 cdip = ddi_get_next_sibling(cdip)) { 540 541 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: cdip: 0x%p," 542 " driver: %s, binding: %s, nodename: %s\n", (void *)cdip, 543 ddi_driver_name(cdip), ddi_binding_name(cdip), 544 ddi_node_name(cdip))); 545 dev_len = sizeof (dev_type); 546 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 547 "device_type", (caddr_t)dev_type, &dev_len) 548 != DDI_PROP_SUCCESS) 549 continue; 550 if (strcmp(dev_type, "pciex") == 0) 551 return (PSM_SUCCESS); 552 } 553 554 /* MSI is not supported on this system */ 555 DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: no 'pciex' " 556 "device_type found\n")); 557 return (PSM_FAILURE); 558 } 559 560 int 561 apic_get_vector_intr_info(int vecirq, apic_get_intr_t *intr_params_p) 562 { 563 struct autovec *av_dev; 564 uchar_t irqno; 565 int i; 566 apic_irq_t *irq_p; 567 568 /* Sanity check the vector/irq argument. */ 569 ASSERT((vecirq >= 0) || (vecirq <= APIC_MAX_VECTOR)); 570 571 mutex_enter(&airq_mutex); 572 573 /* 574 * Convert the vecirq arg to an irq using vector_to_irq table 575 * if the arg is a vector. Pass thru if already an irq. 576 */ 577 if ((intr_params_p->avgi_req_flags & PSMGI_INTRBY_FLAGS) == 578 PSMGI_INTRBY_VEC) 579 irqno = apic_vector_to_irq[vecirq]; 580 else 581 irqno = vecirq; 582 583 irq_p = apic_irq_table[irqno]; 584 585 if ((irq_p == NULL) || 586 (irq_p->airq_temp_cpu == IRQ_UNBOUND) || 587 (irq_p->airq_temp_cpu == IRQ_UNINIT)) { 588 mutex_exit(&airq_mutex); 589 return (PSM_FAILURE); 590 } 591 592 if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) { 593 594 /* Get the (temp) cpu from apic_irq table, indexed by irq. */ 595 intr_params_p->avgi_cpu_id = irq_p->airq_temp_cpu; 596 597 /* Return user bound info for intrd. */ 598 if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) { 599 intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND; 600 intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND; 601 } 602 } 603 604 if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR) { 605 intr_params_p->avgi_vector = irq_p->airq_vector; 606 } 607 608 if (intr_params_p->avgi_req_flags & 609 (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS)) { 610 /* Get number of devices from apic_irq table shared field. */ 611 intr_params_p->avgi_num_devs = irq_p->airq_share; 612 } 613 614 if (intr_params_p->avgi_req_flags & PSMGI_REQ_GET_DEVS) { 615 616 intr_params_p->avgi_req_flags |= PSMGI_REQ_NUM_DEVS; 617 618 /* Some devices have NULL dip. Don't count these. */ 619 if (intr_params_p->avgi_num_devs > 0) { 620 for (i = 0, av_dev = autovect[irqno].avh_link; 621 av_dev; av_dev = av_dev->av_link) 622 if (av_dev->av_vector && av_dev->av_dip) 623 i++; 624 intr_params_p->avgi_num_devs = 625 MIN(intr_params_p->avgi_num_devs, i); 626 } 627 628 /* There are no viable dips to return. */ 629 if (intr_params_p->avgi_num_devs == 0) 630 intr_params_p->avgi_dip_list = NULL; 631 632 else { /* Return list of dips */ 633 634 /* Allocate space in array for that number of devs. */ 635 intr_params_p->avgi_dip_list = kmem_zalloc( 636 intr_params_p->avgi_num_devs * 637 sizeof (dev_info_t *), 638 KM_SLEEP); 639 640 /* 641 * Loop through the device list of the autovec table 642 * filling in the dip array. 643 * 644 * Note that the autovect table may have some special 645 * entries which contain NULL dips. These will be 646 * ignored. 647 */ 648 for (i = 0, av_dev = autovect[irqno].avh_link; 649 av_dev; av_dev = av_dev->av_link) 650 if (av_dev->av_vector && av_dev->av_dip) 651 intr_params_p->avgi_dip_list[i++] = 652 av_dev->av_dip; 653 } 654 } 655 656 mutex_exit(&airq_mutex); 657 658 return (PSM_SUCCESS); 659 } 660 661 /* 662 * apic_pci_msi_unconfigure: 663 * 664 * This and next two interfaces are copied from pci_intr_lib.c 665 * Do ensure that these two files stay in sync. 666 * These needed to be copied over here to avoid a deadlock situation on 667 * certain mp systems that use MSI interrupts. 668 * 669 * IMPORTANT regards next three interfaces: 670 * i) are called only for MSI/X interrupts. 671 * ii) called with interrupts disabled, and must not block 672 */ 673 int 674 apic_pci_msi_unconfigure(dev_info_t *rdip, int type, int inum) 675 { 676 ushort_t msi_ctrl; 677 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip); 678 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip); 679 680 if (handle == NULL) 681 return (PSM_FAILURE); 682 683 if (type == DDI_INTR_TYPE_MSI) { 684 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 685 msi_ctrl &= (~PCI_MSI_MME_MASK); 686 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 687 pci_config_put32(handle, cap_ptr + PCI_MSI_ADDR_OFFSET, 0); 688 689 if (msi_ctrl & PCI_MSI_64BIT_MASK) { 690 pci_config_put16(handle, 691 cap_ptr + PCI_MSI_64BIT_DATA, 0); 692 pci_config_put32(handle, 693 cap_ptr + PCI_MSI_ADDR_OFFSET + 4, 0); 694 } else { 695 pci_config_put16(handle, 696 cap_ptr + PCI_MSI_32BIT_DATA, 0); 697 } 698 699 } else if (type == DDI_INTR_TYPE_MSIX) { 700 uintptr_t off; 701 ddi_intr_msix_t *msix_p = i_ddi_get_msix(rdip); 702 703 /* Offset into the "inum"th entry in the MSI-X table */ 704 off = (uintptr_t)msix_p->msix_tbl_addr + 705 (inum * PCI_MSIX_VECTOR_SIZE); 706 707 /* Reset the "data" and "addr" bits */ 708 ddi_put32(msix_p->msix_tbl_hdl, 709 (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), 0); 710 ddi_put64(msix_p->msix_tbl_hdl, (uint64_t *)off, 0); 711 } 712 713 return (PSM_SUCCESS); 714 } 715 716 717 /* 718 * apic_pci_msi_enable_mode: 719 */ 720 int 721 apic_pci_msi_enable_mode(dev_info_t *rdip, int type, int inum) 722 { 723 ushort_t msi_ctrl; 724 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip); 725 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip); 726 727 if (handle == NULL) 728 return (PSM_FAILURE); 729 730 if (type == DDI_INTR_TYPE_MSI) { 731 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 732 if ((msi_ctrl & PCI_MSI_ENABLE_BIT)) 733 return (PSM_SUCCESS); 734 735 msi_ctrl |= PCI_MSI_ENABLE_BIT; 736 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 737 738 } else if (type == DDI_INTR_TYPE_MSIX) { 739 uintptr_t off; 740 ddi_intr_msix_t *msix_p; 741 742 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL); 743 744 if (msi_ctrl & PCI_MSIX_ENABLE_BIT) 745 return (PSM_SUCCESS); 746 747 msi_ctrl |= PCI_MSIX_ENABLE_BIT; 748 pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL, msi_ctrl); 749 750 msix_p = i_ddi_get_msix(rdip); 751 752 /* Offset into "inum"th entry in the MSI-X table & clear mask */ 753 off = (uintptr_t)msix_p->msix_tbl_addr + (inum * 754 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET; 755 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, 0); 756 } 757 758 return (PSM_SUCCESS); 759 } 760 761 /* 762 * apic_pci_msi_disable_mode: 763 */ 764 int 765 apic_pci_msi_disable_mode(dev_info_t *rdip, int type, int inum) 766 { 767 ushort_t msi_ctrl; 768 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip); 769 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip); 770 771 if (handle == NULL) 772 return (PSM_FAILURE); 773 774 if (type == DDI_INTR_TYPE_MSI) { 775 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 776 if (!(msi_ctrl & PCI_MSI_ENABLE_BIT)) 777 return (PSM_SUCCESS); 778 779 msi_ctrl &= ~PCI_MSI_ENABLE_BIT; /* MSI disable */ 780 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 781 782 } else if (type == DDI_INTR_TYPE_MSIX) { 783 uintptr_t off; 784 ddi_intr_msix_t *msix_p; 785 786 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL); 787 788 if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) 789 return (PSM_SUCCESS); 790 791 msix_p = i_ddi_get_msix(rdip); 792 793 /* Offset into "inum"th entry in the MSI-X table & mask it */ 794 off = (uintptr_t)msix_p->msix_tbl_addr + (inum * 795 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET; 796 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, 0x1); 797 } 798 799 return (PSM_SUCCESS); 800 } 801 802 /* 803 * This function provides external interface to the nexus for all 804 * functionalities related to the new DDI interrupt framework. 805 * 806 * Input: 807 * dip - pointer to the dev_info structure of the requested device 808 * hdlp - pointer to the internal interrupt handle structure for the 809 * requested interrupt 810 * intr_op - opcode for this call 811 * result - pointer to the integer that will hold the result to be 812 * passed back if return value is PSM_SUCCESS 813 * 814 * Output: 815 * return value is either PSM_SUCCESS or PSM_FAILURE 816 */ 817 int 818 apic_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 819 psm_intr_op_t intr_op, int *result) 820 { 821 int cap, ret; 822 int count_vec; 823 int cpu; 824 int old_priority; 825 int new_priority; 826 apic_irq_t *irqp; 827 struct intrspec *ispec, intr_spec; 828 829 DDI_INTR_IMPLDBG((CE_CONT, "apic_intr_ops: dip: %p hdlp: %p " 830 "intr_op: %x\n", (void *)dip, (void *)hdlp, intr_op)); 831 832 ispec = &intr_spec; 833 ispec->intrspec_pri = hdlp->ih_pri; 834 ispec->intrspec_vec = hdlp->ih_inum; 835 ispec->intrspec_func = hdlp->ih_cb_func; 836 837 switch (intr_op) { 838 case PSM_INTR_OP_CHECK_MSI: 839 /* 840 * Check MSI/X is supported or not at APIC level and 841 * masked off the MSI/X bits in hdlp->ih_type if not 842 * supported before return. If MSI/X is supported, 843 * leave the ih_type unchanged and return. 844 * 845 * hdlp->ih_type passed in from the nexus has all the 846 * interrupt types supported by the device. 847 */ 848 if (apic_support_msi == 0) { 849 /* 850 * if apic_support_msi is not set, call 851 * apic_check_msi_support() to check whether msi 852 * is supported first 853 */ 854 if (apic_check_msi_support() == PSM_SUCCESS) 855 apic_support_msi = 1; 856 else 857 apic_support_msi = -1; 858 } 859 if (apic_support_msi == 1) 860 *result = hdlp->ih_type; 861 else 862 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 863 DDI_INTR_TYPE_MSIX); 864 break; 865 case PSM_INTR_OP_ALLOC_VECTORS: 866 *result = apic_alloc_vectors(dip, hdlp->ih_inum, 867 hdlp->ih_scratch1, hdlp->ih_pri, hdlp->ih_type, 868 (int)(uintptr_t)hdlp->ih_scratch2); 869 break; 870 case PSM_INTR_OP_FREE_VECTORS: 871 apic_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1, 872 hdlp->ih_pri, hdlp->ih_type); 873 break; 874 case PSM_INTR_OP_NAVAIL_VECTORS: 875 *result = apic_navail_vector(dip, hdlp->ih_pri); 876 break; 877 case PSM_INTR_OP_XLATE_VECTOR: 878 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 879 *result = apic_introp_xlate(dip, ispec, hdlp->ih_type); 880 break; 881 case PSM_INTR_OP_GET_PENDING: 882 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 883 return (PSM_FAILURE); 884 *result = apic_get_pending(irqp, hdlp->ih_type); 885 break; 886 case PSM_INTR_OP_CLEAR_MASK: 887 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 888 return (PSM_FAILURE); 889 irqp = apic_find_irq(dip, ispec, hdlp->ih_type); 890 if (irqp == NULL) 891 return (PSM_FAILURE); 892 apic_clear_mask(irqp); 893 break; 894 case PSM_INTR_OP_SET_MASK: 895 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 896 return (PSM_FAILURE); 897 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 898 return (PSM_FAILURE); 899 apic_set_mask(irqp); 900 break; 901 case PSM_INTR_OP_GET_CAP: 902 cap = DDI_INTR_FLAG_PENDING; 903 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 904 cap |= DDI_INTR_FLAG_MASKABLE; 905 *result = cap; 906 break; 907 case PSM_INTR_OP_GET_SHARED: 908 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 909 return (PSM_FAILURE); 910 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 911 return (PSM_FAILURE); 912 *result = irqp->airq_share ? 1: 0; 913 break; 914 case PSM_INTR_OP_SET_PRI: 915 old_priority = hdlp->ih_pri; /* save old value */ 916 new_priority = *(int *)result; /* try the new value */ 917 918 /* First, check if "hdlp->ih_scratch1" vectors exist? */ 919 if (apic_navail_vector(dip, new_priority) < hdlp->ih_scratch1) 920 return (PSM_FAILURE); 921 922 /* Now allocate the vectors */ 923 count_vec = apic_alloc_vectors(dip, hdlp->ih_inum, 924 hdlp->ih_scratch1, new_priority, hdlp->ih_type, 925 DDI_INTR_ALLOC_STRICT); 926 927 /* Did we get the new vectors? */ 928 if (!count_vec) 929 return (PSM_FAILURE); 930 931 /* Finally, free the previously allocated vectors */ 932 apic_free_vectors(dip, hdlp->ih_inum, count_vec, 933 old_priority, hdlp->ih_type); 934 hdlp->ih_pri = new_priority; /* set the new value */ 935 break; 936 case PSM_INTR_OP_SET_CPU: 937 /* 938 * The interrupt handle given here has been allocated 939 * specifically for this command, and ih_private carries 940 * a CPU value. 941 */ 942 cpu = (int)(intptr_t)hdlp->ih_private; 943 944 if (!apic_cpu_in_range(cpu)) { 945 *result = EINVAL; 946 return (PSM_FAILURE); 947 } 948 949 mutex_enter(&airq_mutex); 950 951 /* Convert the vector to the irq using vector_to_irq table. */ 952 irqp = apic_irq_table[apic_vector_to_irq[hdlp->ih_vector]]; 953 if (irqp == NULL) { 954 mutex_exit(&airq_mutex); 955 *result = ENXIO; 956 return (PSM_FAILURE); 957 } 958 ret = apic_rebind_all(irqp, cpu, 1); 959 mutex_exit(&airq_mutex); 960 if (ret) { 961 *result = EIO; 962 return (PSM_FAILURE); 963 } 964 *result = 0; 965 break; 966 case PSM_INTR_OP_GET_INTR: 967 /* 968 * The interrupt handle given here has been allocated 969 * specifically for this command, and ih_private carries 970 * a pointer to a apic_get_intr_t. 971 */ 972 if (apic_get_vector_intr_info( 973 hdlp->ih_vector, hdlp->ih_private) != PSM_SUCCESS) 974 return (PSM_FAILURE); 975 break; 976 case PSM_INTR_OP_SET_CAP: 977 default: 978 return (PSM_FAILURE); 979 } 980 return (PSM_SUCCESS); 981 } 982