1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * apic_introp.c: 27 * Has code for Advanced DDI interrupt framework support. 28 */ 29 30 #include <sys/cpuvar.h> 31 #include <sys/psm.h> 32 #include <sys/archsystm.h> 33 #include <sys/apic.h> 34 #include <sys/sunddi.h> 35 #include <sys/ddi_impldefs.h> 36 #include <sys/mach_intr.h> 37 #include <sys/sysmacros.h> 38 #include <sys/trap.h> 39 #include <sys/pci.h> 40 #include <sys/pci_intr_lib.h> 41 #include <sys/apic_common.h> 42 43 extern struct av_head autovect[]; 44 45 /* 46 * Local Function Prototypes 47 */ 48 apic_irq_t *apic_find_irq(dev_info_t *, struct intrspec *, int); 49 50 /* 51 * apic_pci_msi_enable_vector: 52 * Set the address/data fields in the MSI/X capability structure 53 * XXX: MSI-X support 54 */ 55 /* ARGSUSED */ 56 void 57 apic_pci_msi_enable_vector(apic_irq_t *irq_ptr, int type, int inum, int vector, 58 int count, int target_apic_id) 59 { 60 uint64_t msi_addr, msi_data; 61 ushort_t msi_ctrl; 62 dev_info_t *dip = irq_ptr->airq_dip; 63 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip); 64 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(dip); 65 msi_regs_t msi_regs; 66 int irqno, i; 67 void *intrmap_tbl[PCI_MSI_MAX_INTRS]; 68 69 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: dip=0x%p\n" 70 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip, 71 ddi_driver_name(dip), inum, vector, target_apic_id)); 72 73 ASSERT((handle != NULL) && (cap_ptr != 0)); 74 75 msi_regs.mr_data = vector; 76 msi_regs.mr_addr = target_apic_id; 77 78 intrmap_tbl[0] = irq_ptr->airq_intrmap_private; 79 apic_vt_ops->apic_intrmap_alloc_entry(intrmap_tbl, dip, type, 80 count, 0xff); 81 for (i = 0; i < count; i++) { 82 irqno = apic_vector_to_irq[vector + i]; 83 apic_irq_table[irqno]->airq_intrmap_private = 84 intrmap_tbl[i]; 85 } 86 87 apic_vt_ops->apic_intrmap_map_entry(irq_ptr->airq_intrmap_private, 88 (void *)&msi_regs, type, count); 89 apic_vt_ops->apic_intrmap_record_msi(irq_ptr->airq_intrmap_private, 90 &msi_regs); 91 92 /* MSI Address */ 93 msi_addr = msi_regs.mr_addr; 94 95 /* MSI Data: MSI is edge triggered according to spec */ 96 msi_data = msi_regs.mr_data; 97 98 DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: addr=0x%lx " 99 "data=0x%lx\n", (long)msi_addr, (long)msi_data)); 100 101 if (type == DDI_INTR_TYPE_MSI) { 102 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 103 104 /* Set the bits to inform how many MSIs are enabled */ 105 msi_ctrl |= ((highbit(count) -1) << PCI_MSI_MME_SHIFT); 106 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 107 108 /* 109 * Only set vector if not on hypervisor 110 */ 111 pci_config_put32(handle, 112 cap_ptr + PCI_MSI_ADDR_OFFSET, msi_addr); 113 114 if (msi_ctrl & PCI_MSI_64BIT_MASK) { 115 pci_config_put32(handle, 116 cap_ptr + PCI_MSI_ADDR_OFFSET + 4, msi_addr >> 32); 117 pci_config_put16(handle, 118 cap_ptr + PCI_MSI_64BIT_DATA, msi_data); 119 } else { 120 pci_config_put16(handle, 121 cap_ptr + PCI_MSI_32BIT_DATA, msi_data); 122 } 123 124 } else if (type == DDI_INTR_TYPE_MSIX) { 125 uintptr_t off; 126 ddi_intr_msix_t *msix_p = i_ddi_get_msix(dip); 127 128 ASSERT(msix_p != NULL); 129 130 /* Offset into the "inum"th entry in the MSI-X table */ 131 off = (uintptr_t)msix_p->msix_tbl_addr + 132 (inum * PCI_MSIX_VECTOR_SIZE); 133 134 ddi_put32(msix_p->msix_tbl_hdl, 135 (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), msi_data); 136 ddi_put64(msix_p->msix_tbl_hdl, 137 (uint64_t *)(off + PCI_MSIX_LOWER_ADDR_OFFSET), msi_addr); 138 } 139 } 140 141 /* 142 * This function returns the no. of vectors available for the pri. 143 * dip is not used at this moment. If we really don't need that, 144 * it will be removed. 145 */ 146 /*ARGSUSED*/ 147 int 148 apic_navail_vector(dev_info_t *dip, int pri) 149 { 150 int lowest, highest, i, navail, count; 151 152 DDI_INTR_IMPLDBG((CE_CONT, "apic_navail_vector: dip: %p, pri: %x\n", 153 (void *)dip, pri)); 154 155 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 156 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 157 navail = count = 0; 158 159 if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */ 160 lowest -= APIC_VECTOR_PER_IPL; 161 162 /* It has to be contiguous */ 163 for (i = lowest; i <= highest; i++) { 164 count = 0; 165 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 166 (i <= highest)) { 167 if (APIC_CHECK_RESERVE_VECTORS(i)) 168 break; 169 count++; 170 i++; 171 } 172 if (count > navail) 173 navail = count; 174 } 175 return (navail); 176 } 177 178 /* 179 * Finds "count" contiguous MSI vectors starting at the proper alignment 180 * at "pri". 181 * Caller needs to make sure that count has to be power of 2 and should not 182 * be < 1. 183 */ 184 uchar_t 185 apic_find_multi_vectors(int pri, int count) 186 { 187 int lowest, highest, i, navail, start, msibits; 188 189 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_mult: pri: %x, count: %x\n", 190 pri, count)); 191 192 highest = apic_ipltopri[pri] + APIC_VECTOR_MASK; 193 lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL; 194 navail = 0; 195 196 if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */ 197 lowest -= APIC_VECTOR_PER_IPL; 198 199 /* 200 * msibits is the no. of lower order message data bits for the 201 * allocated MSI vectors and is used to calculate the aligned 202 * starting vector 203 */ 204 msibits = count - 1; 205 206 /* It has to be contiguous */ 207 for (i = lowest; i <= highest; i++) { 208 navail = 0; 209 210 /* 211 * starting vector has to be aligned accordingly for 212 * multiple MSIs 213 */ 214 if (msibits) 215 i = (i + msibits) & ~msibits; 216 start = i; 217 while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) && 218 (i <= highest)) { 219 if (APIC_CHECK_RESERVE_VECTORS(i)) 220 break; 221 navail++; 222 if (navail >= count) 223 return (start); 224 i++; 225 } 226 } 227 return (0); 228 } 229 230 231 /* 232 * It finds the apic_irq_t associates with the dip, ispec and type. 233 */ 234 apic_irq_t * 235 apic_find_irq(dev_info_t *dip, struct intrspec *ispec, int type) 236 { 237 apic_irq_t *irqp; 238 int i; 239 240 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: dip=0x%p vec=0x%x " 241 "ipl=0x%x type=0x%x\n", (void *)dip, ispec->intrspec_vec, 242 ispec->intrspec_pri, type)); 243 244 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) { 245 for (irqp = apic_irq_table[i]; irqp; irqp = irqp->airq_next) { 246 if ((irqp->airq_dip == dip) && 247 (irqp->airq_origirq == ispec->intrspec_vec) && 248 (irqp->airq_ipl == ispec->intrspec_pri)) { 249 if (type == DDI_INTR_TYPE_MSI) { 250 if (irqp->airq_mps_intr_index == 251 MSI_INDEX) 252 return (irqp); 253 } else if (type == DDI_INTR_TYPE_MSIX) { 254 if (irqp->airq_mps_intr_index == 255 MSIX_INDEX) 256 return (irqp); 257 } else 258 return (irqp); 259 } 260 } 261 } 262 DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: return NULL\n")); 263 return (NULL); 264 } 265 266 /* 267 * This function will return the pending bit of the irqp. 268 * It either comes from the IRR register of the APIC or the RDT 269 * entry of the I/O APIC. 270 * For the IRR to work, it needs to be to its binding CPU 271 */ 272 static int 273 apic_get_pending(apic_irq_t *irqp, int type) 274 { 275 int bit, index, irr, pending; 276 int intin_no; 277 int apic_ix; 278 279 DDI_INTR_IMPLDBG((CE_CONT, "apic_get_pending: irqp: %p, cpuid: %x " 280 "type: %x\n", (void *)irqp, irqp->airq_cpu & ~IRQ_USER_BOUND, 281 type)); 282 283 /* need to get on the bound cpu */ 284 mutex_enter(&cpu_lock); 285 affinity_set(irqp->airq_cpu & ~IRQ_USER_BOUND); 286 287 index = irqp->airq_vector / 32; 288 bit = irqp->airq_vector % 32; 289 irr = apic_reg_ops->apic_read(APIC_IRR_REG + index); 290 291 affinity_clear(); 292 mutex_exit(&cpu_lock); 293 294 pending = (irr & (1 << bit)) ? 1 : 0; 295 if (!pending && (type == DDI_INTR_TYPE_FIXED)) { 296 /* check I/O APIC for fixed interrupt */ 297 intin_no = irqp->airq_intin_no; 298 apic_ix = irqp->airq_ioapicindex; 299 pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no) & 300 AV_PENDING) ? 1 : 0; 301 } 302 return (pending); 303 } 304 305 306 /* 307 * This function will clear the mask for the interrupt on the I/O APIC 308 */ 309 static void 310 apic_clear_mask(apic_irq_t *irqp) 311 { 312 int intin_no; 313 ulong_t iflag; 314 int32_t rdt_entry; 315 int apic_ix; 316 317 DDI_INTR_IMPLDBG((CE_CONT, "apic_clear_mask: irqp: %p\n", 318 (void *)irqp)); 319 320 intin_no = irqp->airq_intin_no; 321 apic_ix = irqp->airq_ioapicindex; 322 323 iflag = intr_clear(); 324 lock_set(&apic_ioapic_lock); 325 326 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no); 327 328 /* clear mask */ 329 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no, 330 ((~AV_MASK) & rdt_entry)); 331 332 lock_clear(&apic_ioapic_lock); 333 intr_restore(iflag); 334 } 335 336 337 /* 338 * This function will mask the interrupt on the I/O APIC 339 */ 340 static void 341 apic_set_mask(apic_irq_t *irqp) 342 { 343 int intin_no; 344 int apic_ix; 345 ulong_t iflag; 346 int32_t rdt_entry; 347 348 DDI_INTR_IMPLDBG((CE_CONT, "apic_set_mask: irqp: %p\n", (void *)irqp)); 349 350 intin_no = irqp->airq_intin_no; 351 apic_ix = irqp->airq_ioapicindex; 352 353 iflag = intr_clear(); 354 355 lock_set(&apic_ioapic_lock); 356 357 rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no); 358 359 /* mask it */ 360 WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no, 361 (AV_MASK | rdt_entry)); 362 363 lock_clear(&apic_ioapic_lock); 364 intr_restore(iflag); 365 } 366 367 368 void 369 apic_free_vectors(dev_info_t *dip, int inum, int count, int pri, int type) 370 { 371 int i; 372 apic_irq_t *irqptr; 373 struct intrspec ispec; 374 375 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: dip: %p inum: %x " 376 "count: %x pri: %x type: %x\n", 377 (void *)dip, inum, count, pri, type)); 378 379 /* for MSI/X only */ 380 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) 381 return; 382 383 for (i = 0; i < count; i++) { 384 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: inum=0x%x " 385 "pri=0x%x count=0x%x\n", inum, pri, count)); 386 ispec.intrspec_vec = inum + i; 387 ispec.intrspec_pri = pri; 388 if ((irqptr = apic_find_irq(dip, &ispec, type)) == NULL) { 389 DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: " 390 "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() " 391 "failed\n", (void *)dip, inum, pri)); 392 continue; 393 } 394 irqptr->airq_mps_intr_index = FREE_INDEX; 395 apic_vector_to_irq[irqptr->airq_vector] = APIC_RESV_IRQ; 396 } 397 } 398 399 /* 400 * apic_pci_msi_enable_mode: 401 */ 402 void 403 apic_pci_msi_enable_mode(dev_info_t *rdip, int type, int inum) 404 { 405 ushort_t msi_ctrl; 406 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip); 407 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(rdip); 408 409 ASSERT((handle != NULL) && (cap_ptr != 0)); 410 411 if (type == DDI_INTR_TYPE_MSI) { 412 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 413 if ((msi_ctrl & PCI_MSI_ENABLE_BIT)) 414 return; 415 416 msi_ctrl |= PCI_MSI_ENABLE_BIT; 417 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl); 418 419 } else if (type == DDI_INTR_TYPE_MSIX) { 420 uintptr_t off; 421 uint32_t mask; 422 ddi_intr_msix_t *msix_p; 423 424 msix_p = i_ddi_get_msix(rdip); 425 426 ASSERT(msix_p != NULL); 427 428 /* Offset into "inum"th entry in the MSI-X table & clear mask */ 429 off = (uintptr_t)msix_p->msix_tbl_addr + (inum * 430 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET; 431 432 mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off); 433 434 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask & ~1)); 435 436 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL); 437 438 if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) { 439 msi_ctrl |= PCI_MSIX_ENABLE_BIT; 440 pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL, 441 msi_ctrl); 442 } 443 } 444 } 445 446 static int 447 apic_set_cpu(int irqno, int cpu, int *result) 448 { 449 apic_irq_t *irqp; 450 ulong_t iflag; 451 int ret; 452 453 DDI_INTR_IMPLDBG((CE_CONT, "APIC_SET_CPU\n")); 454 455 mutex_enter(&airq_mutex); 456 irqp = apic_irq_table[irqno]; 457 mutex_exit(&airq_mutex); 458 459 if (irqp == NULL) { 460 *result = ENXIO; 461 return (PSM_FAILURE); 462 } 463 464 /* Fail if this is an MSI intr and is part of a group. */ 465 if ((irqp->airq_mps_intr_index == MSI_INDEX) && 466 (irqp->airq_intin_no > 1)) { 467 *result = ENXIO; 468 return (PSM_FAILURE); 469 } 470 471 iflag = intr_clear(); 472 lock_set(&apic_ioapic_lock); 473 474 ret = apic_rebind_all(irqp, cpu); 475 476 lock_clear(&apic_ioapic_lock); 477 intr_restore(iflag); 478 479 if (ret) { 480 *result = EIO; 481 return (PSM_FAILURE); 482 } 483 /* 484 * keep tracking the default interrupt cpu binding 485 */ 486 irqp->airq_cpu = cpu; 487 488 *result = 0; 489 return (PSM_SUCCESS); 490 } 491 492 static int 493 apic_grp_set_cpu(int irqno, int new_cpu, int *result) 494 { 495 dev_info_t *orig_dip; 496 uint32_t orig_cpu; 497 ulong_t iflag; 498 apic_irq_t *irqps[PCI_MSI_MAX_INTRS]; 499 int i; 500 int cap_ptr; 501 int msi_mask_off; 502 ushort_t msi_ctrl; 503 uint32_t msi_pvm; 504 ddi_acc_handle_t handle; 505 int num_vectors = 0; 506 uint32_t vector; 507 508 DDI_INTR_IMPLDBG((CE_CONT, "APIC_GRP_SET_CPU\n")); 509 510 /* 511 * Take mutex to insure that table doesn't change out from underneath 512 * us while we're playing with it. 513 */ 514 mutex_enter(&airq_mutex); 515 irqps[0] = apic_irq_table[irqno]; 516 orig_cpu = irqps[0]->airq_temp_cpu; 517 orig_dip = irqps[0]->airq_dip; 518 num_vectors = irqps[0]->airq_intin_no; 519 vector = irqps[0]->airq_vector; 520 521 /* A "group" of 1 */ 522 if (num_vectors == 1) { 523 mutex_exit(&airq_mutex); 524 return (apic_set_cpu(irqno, new_cpu, result)); 525 } 526 527 *result = ENXIO; 528 529 if (irqps[0]->airq_mps_intr_index != MSI_INDEX) { 530 mutex_exit(&airq_mutex); 531 DDI_INTR_IMPLDBG((CE_CONT, "set_grp: intr not MSI\n")); 532 goto set_grp_intr_done; 533 } 534 if ((num_vectors < 1) || ((num_vectors - 1) & vector)) { 535 mutex_exit(&airq_mutex); 536 DDI_INTR_IMPLDBG((CE_CONT, 537 "set_grp: base vec not part of a grp or not aligned: " 538 "vec:0x%x, num_vec:0x%x\n", vector, num_vectors)); 539 goto set_grp_intr_done; 540 } 541 DDI_INTR_IMPLDBG((CE_CONT, "set_grp: num intrs in grp: %d\n", 542 num_vectors)); 543 544 ASSERT((num_vectors + vector) < APIC_MAX_VECTOR); 545 546 *result = EIO; 547 548 /* 549 * All IRQ entries in the table for the given device will be not 550 * shared. Since they are not shared, the dip in the table will 551 * be true to the device of interest. 552 */ 553 for (i = 1; i < num_vectors; i++) { 554 irqps[i] = apic_irq_table[apic_vector_to_irq[vector + i]]; 555 if (irqps[i] == NULL) { 556 mutex_exit(&airq_mutex); 557 goto set_grp_intr_done; 558 } 559 #ifdef DEBUG 560 /* Sanity check: CPU and dip is the same for all entries. */ 561 if ((irqps[i]->airq_dip != orig_dip) || 562 (irqps[i]->airq_temp_cpu != orig_cpu)) { 563 mutex_exit(&airq_mutex); 564 DDI_INTR_IMPLDBG((CE_CONT, 565 "set_grp: cpu or dip for vec 0x%x difft than for " 566 "vec 0x%x\n", vector, vector + i)); 567 DDI_INTR_IMPLDBG((CE_CONT, 568 " cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu, 569 irqps[i]->airq_temp_cpu, (void *)orig_dip, 570 (void *)irqps[i]->airq_dip)); 571 goto set_grp_intr_done; 572 } 573 #endif /* DEBUG */ 574 } 575 mutex_exit(&airq_mutex); 576 577 cap_ptr = i_ddi_get_msi_msix_cap_ptr(orig_dip); 578 handle = i_ddi_get_pci_config_handle(orig_dip); 579 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL); 580 581 /* MSI Per vector masking is supported. */ 582 if (msi_ctrl & PCI_MSI_PVM_MASK) { 583 if (msi_ctrl & PCI_MSI_64BIT_MASK) 584 msi_mask_off = cap_ptr + PCI_MSI_64BIT_MASKBITS; 585 else 586 msi_mask_off = cap_ptr + PCI_MSI_32BIT_MASK; 587 msi_pvm = pci_config_get32(handle, msi_mask_off); 588 pci_config_put32(handle, msi_mask_off, (uint32_t)-1); 589 DDI_INTR_IMPLDBG((CE_CONT, 590 "set_grp: pvm supported. Mask set to 0x%x\n", 591 pci_config_get32(handle, msi_mask_off))); 592 } 593 594 iflag = intr_clear(); 595 lock_set(&apic_ioapic_lock); 596 597 /* 598 * Do the first rebind and check for errors. Apic_rebind_all returns 599 * an error if the CPU is not accepting interrupts. If the first one 600 * succeeds they all will. 601 */ 602 if (apic_rebind_all(irqps[0], new_cpu)) 603 (void) apic_rebind_all(irqps[0], orig_cpu); 604 else { 605 irqps[0]->airq_cpu = new_cpu; 606 607 for (i = 1; i < num_vectors; i++) { 608 (void) apic_rebind_all(irqps[i], new_cpu); 609 irqps[i]->airq_cpu = new_cpu; 610 } 611 *result = 0; /* SUCCESS */ 612 } 613 614 lock_clear(&apic_ioapic_lock); 615 intr_restore(iflag); 616 617 /* Reenable vectors if per vector masking is supported. */ 618 if (msi_ctrl & PCI_MSI_PVM_MASK) { 619 pci_config_put32(handle, msi_mask_off, msi_pvm); 620 DDI_INTR_IMPLDBG((CE_CONT, 621 "set_grp: pvm supported. Mask restored to 0x%x\n", 622 pci_config_get32(handle, msi_mask_off))); 623 } 624 625 set_grp_intr_done: 626 if (*result != 0) 627 return (PSM_FAILURE); 628 629 return (PSM_SUCCESS); 630 } 631 632 int 633 apic_get_vector_intr_info(int vecirq, apic_get_intr_t *intr_params_p) 634 { 635 struct autovec *av_dev; 636 uchar_t irqno; 637 int i; 638 apic_irq_t *irq_p; 639 640 /* Sanity check the vector/irq argument. */ 641 ASSERT((vecirq >= 0) || (vecirq <= APIC_MAX_VECTOR)); 642 643 mutex_enter(&airq_mutex); 644 645 /* 646 * Convert the vecirq arg to an irq using vector_to_irq table 647 * if the arg is a vector. Pass thru if already an irq. 648 */ 649 if ((intr_params_p->avgi_req_flags & PSMGI_INTRBY_FLAGS) == 650 PSMGI_INTRBY_VEC) 651 irqno = apic_vector_to_irq[vecirq]; 652 else 653 irqno = vecirq; 654 655 irq_p = apic_irq_table[irqno]; 656 657 if ((irq_p == NULL) || 658 ((irq_p->airq_mps_intr_index != RESERVE_INDEX) && 659 ((irq_p->airq_temp_cpu == IRQ_UNBOUND) || 660 (irq_p->airq_temp_cpu == IRQ_UNINIT)))) { 661 mutex_exit(&airq_mutex); 662 return (PSM_FAILURE); 663 } 664 665 if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) { 666 667 /* Get the (temp) cpu from apic_irq table, indexed by irq. */ 668 intr_params_p->avgi_cpu_id = irq_p->airq_temp_cpu; 669 670 /* Return user bound info for intrd. */ 671 if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) { 672 intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND; 673 intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND; 674 } 675 } 676 677 if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR) 678 intr_params_p->avgi_vector = irq_p->airq_vector; 679 680 if (intr_params_p->avgi_req_flags & 681 (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS)) 682 /* Get number of devices from apic_irq table shared field. */ 683 intr_params_p->avgi_num_devs = irq_p->airq_share; 684 685 if (intr_params_p->avgi_req_flags & PSMGI_REQ_GET_DEVS) { 686 687 intr_params_p->avgi_req_flags |= PSMGI_REQ_NUM_DEVS; 688 689 /* Some devices have NULL dip. Don't count these. */ 690 if (intr_params_p->avgi_num_devs > 0) { 691 for (i = 0, av_dev = autovect[irqno].avh_link; 692 av_dev; av_dev = av_dev->av_link) 693 if (av_dev->av_vector && av_dev->av_dip) 694 i++; 695 intr_params_p->avgi_num_devs = 696 MIN(intr_params_p->avgi_num_devs, i); 697 } 698 699 /* There are no viable dips to return. */ 700 if (intr_params_p->avgi_num_devs == 0) 701 intr_params_p->avgi_dip_list = NULL; 702 703 else { /* Return list of dips */ 704 705 /* Allocate space in array for that number of devs. */ 706 intr_params_p->avgi_dip_list = kmem_zalloc( 707 intr_params_p->avgi_num_devs * 708 sizeof (dev_info_t *), 709 KM_SLEEP); 710 711 /* 712 * Loop through the device list of the autovec table 713 * filling in the dip array. 714 * 715 * Note that the autovect table may have some special 716 * entries which contain NULL dips. These will be 717 * ignored. 718 */ 719 for (i = 0, av_dev = autovect[irqno].avh_link; 720 av_dev; av_dev = av_dev->av_link) 721 if (av_dev->av_vector && av_dev->av_dip) 722 intr_params_p->avgi_dip_list[i++] = 723 av_dev->av_dip; 724 } 725 } 726 727 mutex_exit(&airq_mutex); 728 729 return (PSM_SUCCESS); 730 } 731 732 /* 733 * This function provides external interface to the nexus for all 734 * functionalities related to the new DDI interrupt framework. 735 * 736 * Input: 737 * dip - pointer to the dev_info structure of the requested device 738 * hdlp - pointer to the internal interrupt handle structure for the 739 * requested interrupt 740 * intr_op - opcode for this call 741 * result - pointer to the integer that will hold the result to be 742 * passed back if return value is PSM_SUCCESS 743 * 744 * Output: 745 * return value is either PSM_SUCCESS or PSM_FAILURE 746 */ 747 int 748 apic_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, 749 psm_intr_op_t intr_op, int *result) 750 { 751 int cap; 752 int count_vec; 753 int old_priority; 754 int new_priority; 755 int new_cpu; 756 apic_irq_t *irqp; 757 struct intrspec *ispec, intr_spec; 758 759 DDI_INTR_IMPLDBG((CE_CONT, "apic_intr_ops: dip: %p hdlp: %p " 760 "intr_op: %x\n", (void *)dip, (void *)hdlp, intr_op)); 761 762 ispec = &intr_spec; 763 ispec->intrspec_pri = hdlp->ih_pri; 764 ispec->intrspec_vec = hdlp->ih_inum; 765 ispec->intrspec_func = hdlp->ih_cb_func; 766 767 switch (intr_op) { 768 case PSM_INTR_OP_CHECK_MSI: 769 /* 770 * Check MSI/X is supported or not at APIC level and 771 * masked off the MSI/X bits in hdlp->ih_type if not 772 * supported before return. If MSI/X is supported, 773 * leave the ih_type unchanged and return. 774 * 775 * hdlp->ih_type passed in from the nexus has all the 776 * interrupt types supported by the device. 777 */ 778 if (apic_support_msi == 0) { 779 /* 780 * if apic_support_msi is not set, call 781 * apic_check_msi_support() to check whether msi 782 * is supported first 783 */ 784 if (apic_check_msi_support() == PSM_SUCCESS) 785 apic_support_msi = 1; 786 else 787 apic_support_msi = -1; 788 } 789 if (apic_support_msi == 1) { 790 if (apic_msix_enable) 791 *result = hdlp->ih_type; 792 else 793 *result = hdlp->ih_type & ~DDI_INTR_TYPE_MSIX; 794 } else 795 *result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI | 796 DDI_INTR_TYPE_MSIX); 797 break; 798 case PSM_INTR_OP_ALLOC_VECTORS: 799 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 800 *result = apic_alloc_msi_vectors(dip, hdlp->ih_inum, 801 hdlp->ih_scratch1, hdlp->ih_pri, 802 (int)(uintptr_t)hdlp->ih_scratch2); 803 else 804 *result = apic_alloc_msix_vectors(dip, hdlp->ih_inum, 805 hdlp->ih_scratch1, hdlp->ih_pri, 806 (int)(uintptr_t)hdlp->ih_scratch2); 807 break; 808 case PSM_INTR_OP_FREE_VECTORS: 809 apic_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1, 810 hdlp->ih_pri, hdlp->ih_type); 811 break; 812 case PSM_INTR_OP_NAVAIL_VECTORS: 813 *result = apic_navail_vector(dip, hdlp->ih_pri); 814 break; 815 case PSM_INTR_OP_XLATE_VECTOR: 816 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 817 *result = apic_introp_xlate(dip, ispec, hdlp->ih_type); 818 if (*result == -1) 819 return (PSM_FAILURE); 820 break; 821 case PSM_INTR_OP_GET_PENDING: 822 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 823 return (PSM_FAILURE); 824 *result = apic_get_pending(irqp, hdlp->ih_type); 825 break; 826 case PSM_INTR_OP_CLEAR_MASK: 827 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 828 return (PSM_FAILURE); 829 irqp = apic_find_irq(dip, ispec, hdlp->ih_type); 830 if (irqp == NULL) 831 return (PSM_FAILURE); 832 apic_clear_mask(irqp); 833 break; 834 case PSM_INTR_OP_SET_MASK: 835 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 836 return (PSM_FAILURE); 837 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 838 return (PSM_FAILURE); 839 apic_set_mask(irqp); 840 break; 841 case PSM_INTR_OP_GET_CAP: 842 cap = DDI_INTR_FLAG_PENDING; 843 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 844 cap |= DDI_INTR_FLAG_MASKABLE; 845 *result = cap; 846 break; 847 case PSM_INTR_OP_GET_SHARED: 848 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 849 return (PSM_FAILURE); 850 ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp; 851 if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL) 852 return (PSM_FAILURE); 853 *result = (irqp->airq_share > 1) ? 1: 0; 854 break; 855 case PSM_INTR_OP_SET_PRI: 856 old_priority = hdlp->ih_pri; /* save old value */ 857 new_priority = *(int *)result; /* try the new value */ 858 859 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 860 return (PSM_SUCCESS); 861 } 862 863 /* Now allocate the vectors */ 864 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) { 865 /* SET_PRI does not support the case of multiple MSI */ 866 if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) > 1) 867 return (PSM_FAILURE); 868 869 count_vec = apic_alloc_msi_vectors(dip, hdlp->ih_inum, 870 1, new_priority, 871 DDI_INTR_ALLOC_STRICT); 872 } else { 873 count_vec = apic_alloc_msix_vectors(dip, hdlp->ih_inum, 874 1, new_priority, 875 DDI_INTR_ALLOC_STRICT); 876 } 877 878 /* Did we get new vectors? */ 879 if (!count_vec) 880 return (PSM_FAILURE); 881 882 /* Finally, free the previously allocated vectors */ 883 apic_free_vectors(dip, hdlp->ih_inum, count_vec, 884 old_priority, hdlp->ih_type); 885 break; 886 case PSM_INTR_OP_SET_CPU: 887 case PSM_INTR_OP_GRP_SET_CPU: 888 /* 889 * The interrupt handle given here has been allocated 890 * specifically for this command, and ih_private carries 891 * a CPU value. 892 */ 893 new_cpu = (int)(intptr_t)hdlp->ih_private; 894 if (!apic_cpu_in_range(new_cpu)) { 895 DDI_INTR_IMPLDBG((CE_CONT, 896 "[grp_]set_cpu: cpu out of range: %d\n", new_cpu)); 897 *result = EINVAL; 898 return (PSM_FAILURE); 899 } 900 if (hdlp->ih_vector > APIC_MAX_VECTOR) { 901 DDI_INTR_IMPLDBG((CE_CONT, 902 "[grp_]set_cpu: vector out of range: %d\n", 903 hdlp->ih_vector)); 904 *result = EINVAL; 905 return (PSM_FAILURE); 906 } 907 if ((hdlp->ih_flags & PSMGI_INTRBY_FLAGS) == PSMGI_INTRBY_VEC) 908 hdlp->ih_vector = apic_vector_to_irq[hdlp->ih_vector]; 909 if (intr_op == PSM_INTR_OP_SET_CPU) { 910 if (apic_set_cpu(hdlp->ih_vector, new_cpu, result) != 911 PSM_SUCCESS) 912 return (PSM_FAILURE); 913 } else { 914 if (apic_grp_set_cpu(hdlp->ih_vector, new_cpu, 915 result) != PSM_SUCCESS) 916 return (PSM_FAILURE); 917 } 918 break; 919 case PSM_INTR_OP_GET_INTR: 920 /* 921 * The interrupt handle given here has been allocated 922 * specifically for this command, and ih_private carries 923 * a pointer to a apic_get_intr_t. 924 */ 925 if (apic_get_vector_intr_info( 926 hdlp->ih_vector, hdlp->ih_private) != PSM_SUCCESS) 927 return (PSM_FAILURE); 928 break; 929 case PSM_INTR_OP_APIC_TYPE: 930 ((apic_get_type_t *)(hdlp->ih_private))->avgi_type = 931 apic_get_apic_type(); 932 ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_intr = 933 APIC_MAX_VECTOR; 934 ((apic_get_type_t *)(hdlp->ih_private))->avgi_num_cpu = 935 boot_ncpus; 936 hdlp->ih_ver = apic_get_apic_version(); 937 break; 938 case PSM_INTR_OP_SET_CAP: 939 default: 940 return (PSM_FAILURE); 941 } 942 return (PSM_SUCCESS); 943 } 944