1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * PX nexus interrupt handling: 30 * PX device interrupt handler wrapper 31 * PIL lookup routine 32 * PX device interrupt related initchild code 33 */ 34 35 #include <sys/types.h> 36 #include <sys/kmem.h> 37 #include <sys/async.h> 38 #include <sys/spl.h> 39 #include <sys/sunddi.h> 40 #include <sys/fm/protocol.h> 41 #include <sys/fm/util.h> 42 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 43 #include <sys/ddi_impldefs.h> 44 #include <sys/sdt.h> 45 #include <sys/atomic.h> 46 #include "px_obj.h" 47 #include <sys/ontrap.h> 48 #include <sys/membar.h> 49 #include <sys/clock.h> 50 51 /* 52 * interrupt jabber: 53 * 54 * When an interrupt line is jabbering, every time the state machine for the 55 * associated ino is idled, a new mondo will be sent and the ino will go into 56 * the pending state again. The mondo will cause a new call to 57 * px_intr_wrapper() which normally idles the ino's state machine which would 58 * precipitate another trip round the loop. 59 * 60 * The loop can be broken by preventing the ino's state machine from being 61 * idled when an interrupt line is jabbering. See the comment at the 62 * beginning of px_intr_wrapper() explaining how the 'interrupt jabber 63 * protection' code does this. 64 */ 65 66 /*LINTLIBRARY*/ 67 68 /* 69 * If the unclaimed interrupt count has reached the limit set by 70 * pci_unclaimed_intr_max within the time limit, then all interrupts 71 * on this ino is blocked by not idling the interrupt state machine. 72 */ 73 static int 74 px_spurintr(px_ino_pil_t *ipil_p) 75 { 76 px_ino_t *ino_p = ipil_p->ipil_ino_p; 77 px_ih_t *ih_p = ipil_p->ipil_ih_start; 78 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 79 char *err_fmt_str; 80 boolean_t blocked = B_FALSE; 81 int i; 82 83 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) 84 return (DDI_INTR_CLAIMED); 85 86 if (!ino_p->ino_unclaimed_intrs) 87 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 88 89 ino_p->ino_unclaimed_intrs++; 90 91 if (ino_p->ino_unclaimed_intrs <= px_unclaimed_intr_max) 92 goto clear; 93 94 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 95 > px_spurintr_duration) { 96 ino_p->ino_unclaimed_intrs = 0; 97 goto clear; 98 } 99 err_fmt_str = "%s%d: ino 0x%x blocked"; 100 blocked = B_TRUE; 101 goto warn; 102 clear: 103 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 104 warn: 105 cmn_err(CE_WARN, err_fmt_str, NAMEINST(px_p->px_dip), ino_p->ino_ino); 106 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) 107 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 108 ih_p->ih_inum); 109 cmn_err(CE_CONT, "!\n"); 110 111 /* Clear the pending state */ 112 if (blocked == B_FALSE) { 113 if (px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 114 INTR_IDLE_STATE) != DDI_SUCCESS) 115 return (DDI_INTR_UNCLAIMED); 116 } 117 118 return (DDI_INTR_CLAIMED); 119 } 120 121 extern uint64_t intr_get_time(void); 122 123 /* 124 * px_intx_intr (INTx or legacy interrupt handler) 125 * 126 * This routine is used as wrapper around interrupt handlers installed by child 127 * device drivers. This routine invokes the driver interrupt handlers and 128 * examines the return codes. 129 * 130 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 131 * least one handler claims the interrupt then the counter is halved and the 132 * interrupt state machine is idled. If no handler claims the interrupt then 133 * the counter is incremented by one and the state machine is idled. 134 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 135 * then the interrupt state machine is not idled thus preventing any further 136 * interrupts on that ino. The state machine will only be idled again if a 137 * handler is subsequently added or removed. 138 * 139 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 140 * DDI_INTR_UNCLAIMED otherwise. 141 */ 142 uint_t 143 px_intx_intr(caddr_t arg) 144 { 145 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 146 px_ino_t *ino_p = ipil_p->ipil_ino_p; 147 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 148 px_ih_t *ih_p = ipil_p->ipil_ih_start; 149 ushort_t pil = ipil_p->ipil_pil; 150 uint_t result = 0, r = DDI_INTR_UNCLAIMED; 151 int i; 152 153 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 154 "ino=%x sysino=%llx pil=%x ih_size=%x ih_lst=%x\n", 155 ino_p->ino_ino, ino_p->ino_sysino, ipil_p->ipil_pil, 156 ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 157 158 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 159 dev_info_t *dip = ih_p->ih_dip; 160 uint_t (*handler)() = ih_p->ih_handler; 161 caddr_t arg1 = ih_p->ih_handler_arg1; 162 caddr_t arg2 = ih_p->ih_handler_arg2; 163 164 if (ih_p->ih_intr_state == PX_INTR_STATE_DISABLE) { 165 DBG(DBG_INTX_INTR, px_p->px_dip, 166 "px_intx_intr: %s%d interrupt %d is disabled\n", 167 ddi_driver_name(dip), ddi_get_instance(dip), 168 ino_p->ino_ino); 169 170 continue; 171 } 172 173 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 174 "ino=%x handler=%p arg1 =%p arg2 = %p\n", 175 ino_p->ino_ino, handler, arg1, arg2); 176 177 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 178 void *, handler, caddr_t, arg1, caddr_t, arg2); 179 180 r = (*handler)(arg1, arg2); 181 182 /* 183 * Account for time used by this interrupt. Protect against 184 * conflicting writes to ih_ticks from ib_intr_dist_all() by 185 * using atomic ops. 186 */ 187 188 if (pil <= LOCK_LEVEL) 189 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 190 191 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 192 void *, handler, caddr_t, arg1, int, r); 193 194 result += r; 195 196 if (px_check_all_handlers) 197 continue; 198 if (result) 199 break; 200 } 201 202 if (result) 203 ino_p->ino_claimed |= (1 << pil); 204 205 /* Interrupt can only be cleared after all pil levels are handled */ 206 if (pil != ino_p->ino_lopil) 207 return (DDI_INTR_CLAIMED); 208 209 if (!ino_p->ino_claimed) { 210 if (px_unclaimed_intr_block) 211 return (px_spurintr(ipil_p)); 212 } 213 214 ino_p->ino_unclaimed_intrs = 0; 215 ino_p->ino_claimed = 0; 216 217 /* Clear the pending state */ 218 if (px_lib_intr_setstate(px_p->px_dip, 219 ino_p->ino_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 220 return (DDI_INTR_UNCLAIMED); 221 222 return (DDI_INTR_CLAIMED); 223 } 224 225 /* 226 * px_msiq_intr (MSI/X or PCIe MSG interrupt handler) 227 * 228 * This routine is used as wrapper around interrupt handlers installed by child 229 * device drivers. This routine invokes the driver interrupt handlers and 230 * examines the return codes. 231 * 232 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 233 * least one handler claims the interrupt then the counter is halved and the 234 * interrupt state machine is idled. If no handler claims the interrupt then 235 * the counter is incremented by one and the state machine is idled. 236 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 237 * then the interrupt state machine is not idled thus preventing any further 238 * interrupts on that ino. The state machine will only be idled again if a 239 * handler is subsequently added or removed. 240 * 241 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 242 * DDI_INTR_UNCLAIMED otherwise. 243 */ 244 uint_t 245 px_msiq_intr(caddr_t arg) 246 { 247 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 248 px_ino_t *ino_p = ipil_p->ipil_ino_p; 249 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 250 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 251 px_msiq_t *msiq_p = ino_p->ino_msiq_p; 252 dev_info_t *dip = px_p->px_dip; 253 ushort_t pil = ipil_p->ipil_pil; 254 msiq_rec_t msiq_rec, *msiq_rec_p = &msiq_rec; 255 msiqhead_t *curr_head_p; 256 msiqtail_t curr_tail_index; 257 msgcode_t msg_code; 258 px_ih_t *ih_p; 259 uint_t ret = DDI_INTR_UNCLAIMED; 260 int i, j; 261 262 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: msiq_id =%x ino=%x pil=%x " 263 "ih_size=%x ih_lst=%x\n", msiq_p->msiq_id, ino_p->ino_ino, 264 ipil_p->ipil_pil, ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 265 266 /* 267 * The px_msiq_intr() handles multiple interrupt priorities and it 268 * will set msiq->msiq_rec2process to the number of MSIQ records to 269 * process while handling the highest priority interrupt. Subsequent 270 * lower priority interrupts will just process any unprocessed MSIQ 271 * records or will just return immediately. 272 */ 273 if (msiq_p->msiq_recs2process == 0) { 274 /* Read current MSIQ tail index */ 275 px_lib_msiq_gettail(dip, msiq_p->msiq_id, &curr_tail_index); 276 msiq_p->msiq_new_head_index = msiq_p->msiq_curr_head_index; 277 278 if (curr_tail_index < msiq_p->msiq_curr_head_index) 279 curr_tail_index += msiq_state_p->msiq_rec_cnt; 280 281 msiq_p->msiq_recs2process = curr_tail_index - 282 msiq_p->msiq_curr_head_index; 283 } 284 285 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 286 "rec2process %x\n", msiq_p->msiq_curr_head_index, 287 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 288 289 /* If all MSIQ records are already processed, just return immediately */ 290 if ((msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index) 291 == msiq_p->msiq_recs2process) 292 goto intr_done; 293 294 curr_head_p = (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p + 295 (msiq_p->msiq_curr_head_index * sizeof (msiq_rec_t))); 296 297 /* 298 * Calculate the number of recs to process by taking the difference 299 * between the head and tail pointers. For all records we always 300 * verify that we have a valid record type before we do any processing. 301 * If triggered, we should always have at least one valid record. 302 */ 303 for (i = 0; i < msiq_p->msiq_recs2process; i++) { 304 /* Read next MSIQ record */ 305 px_lib_get_msiq_rec(dip, curr_head_p, msiq_rec_p); 306 307 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSIQ RECORD, " 308 "msiq_rec_type 0x%llx msiq_rec_rid 0x%llx\n", 309 msiq_rec_p->msiq_rec_type, msiq_rec_p->msiq_rec_rid); 310 311 if (!msiq_rec_p->msiq_rec_type) 312 goto next_rec; 313 314 /* Check MSIQ record type */ 315 switch (msiq_rec_p->msiq_rec_type) { 316 case MSG_REC: 317 msg_code = msiq_rec_p->msiq_rec_data.msg.msg_code; 318 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: PCIE MSG " 319 "record, msg type 0x%x\n", msg_code); 320 break; 321 case MSI32_REC: 322 case MSI64_REC: 323 msg_code = msiq_rec_p->msiq_rec_data.msi.msi_data; 324 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSI record, " 325 "msi 0x%x\n", msg_code); 326 327 /* Clear MSI state */ 328 px_lib_msi_setstate(dip, (msinum_t)msg_code, 329 PCI_MSI_STATE_IDLE); 330 break; 331 default: 332 msg_code = 0; 333 cmn_err(CE_WARN, "%s%d: px_msiq_intr: 0x%x MSIQ " 334 "record type is not supported", 335 ddi_driver_name(dip), ddi_get_instance(dip), 336 msiq_rec_p->msiq_rec_type); 337 338 goto next_rec; 339 } 340 341 /* 342 * Scan through px_ih_t linked list, searching for the 343 * right px_ih_t, matching MSIQ record data. 344 */ 345 for (j = 0, ih_p = ipil_p->ipil_ih_start; 346 ih_p && (j < ipil_p->ipil_ih_size) && 347 ((ih_p->ih_msg_code != msg_code) || 348 (ih_p->ih_rec_type != msiq_rec_p->msiq_rec_type)); 349 ih_p = ih_p->ih_next, j++) 350 ; 351 352 if ((ih_p->ih_msg_code == msg_code) && 353 (ih_p->ih_rec_type == msiq_rec_p->msiq_rec_type)) { 354 dev_info_t *dip = ih_p->ih_dip; 355 uint_t (*handler)() = ih_p->ih_handler; 356 caddr_t arg1 = ih_p->ih_handler_arg1; 357 caddr_t arg2 = ih_p->ih_handler_arg2; 358 359 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: ino=%x data=%x " 360 "handler=%p arg1 =%p arg2=%p\n", ino_p->ino_ino, 361 msg_code, handler, arg1, arg2); 362 363 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 364 void *, handler, caddr_t, arg1, caddr_t, arg2); 365 366 /* 367 * Special case for PCIE Error Messages. 368 * The current frame work doesn't fit PCIE Err Msgs 369 * This should be fixed when PCIE MESSAGES as a whole 370 * is architected correctly. 371 */ 372 if ((msg_code == PCIE_MSG_CODE_ERR_COR) || 373 (msg_code == PCIE_MSG_CODE_ERR_NONFATAL) || 374 (msg_code == PCIE_MSG_CODE_ERR_FATAL)) { 375 ret = px_err_fabric_intr(px_p, msg_code, 376 msiq_rec_p->msiq_rec_rid); 377 } else 378 ret = (*handler)(arg1, arg2); 379 380 /* 381 * Account for time used by this interrupt. Protect 382 * against conflicting writes to ih_ticks from 383 * ib_intr_dist_all() by using atomic ops. 384 */ 385 386 if (pil <= LOCK_LEVEL) 387 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 388 389 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 390 void *, handler, caddr_t, arg1, int, ret); 391 392 msiq_p->msiq_new_head_index++; 393 px_lib_clr_msiq_rec(dip, curr_head_p); 394 } else { 395 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr:" 396 "No matching MSIQ record found\n"); 397 } 398 next_rec: 399 /* Get the pointer next EQ record */ 400 curr_head_p = (msiqhead_t *) 401 ((caddr_t)curr_head_p + sizeof (msiq_rec_t)); 402 403 /* Check for overflow condition */ 404 if (curr_head_p >= (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p 405 + (msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t)))) 406 curr_head_p = (msiqhead_t *)msiq_p->msiq_base_p; 407 } 408 409 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: No of MSIQ recs processed %x\n", 410 (msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index)); 411 412 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 413 "rec2process %x\n", msiq_p->msiq_curr_head_index, 414 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 415 416 /* ino_claimed used just for debugging purpose */ 417 if (ret) 418 ino_p->ino_claimed |= (1 << pil); 419 420 intr_done: 421 /* Interrupt can only be cleared after all pil levels are handled */ 422 if (pil != ino_p->ino_lopil) 423 return (DDI_INTR_CLAIMED); 424 425 if (msiq_p->msiq_new_head_index <= msiq_p->msiq_curr_head_index) { 426 if (px_unclaimed_intr_block) 427 return (px_spurintr(ipil_p)); 428 } 429 430 /* Update MSIQ head index with no of MSIQ records processed */ 431 if (msiq_p->msiq_new_head_index >= msiq_state_p->msiq_rec_cnt) 432 msiq_p->msiq_new_head_index -= msiq_state_p->msiq_rec_cnt; 433 434 msiq_p->msiq_curr_head_index = msiq_p->msiq_new_head_index; 435 px_lib_msiq_sethead(dip, msiq_p->msiq_id, msiq_p->msiq_new_head_index); 436 437 msiq_p->msiq_new_head_index = 0; 438 msiq_p->msiq_recs2process = 0; 439 ino_p->ino_claimed = 0; 440 441 /* Clear the pending state */ 442 if (px_lib_intr_setstate(dip, ino_p->ino_sysino, 443 INTR_IDLE_STATE) != DDI_SUCCESS) 444 return (DDI_INTR_UNCLAIMED); 445 446 return (DDI_INTR_CLAIMED); 447 } 448 449 dev_info_t * 450 px_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 451 { 452 dev_info_t *cdip = rdip; 453 454 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 455 ; 456 457 return (cdip); 458 } 459 460 /* Default class to pil value mapping */ 461 px_class_val_t px_default_pil [] = { 462 {0x000000, 0xff0000, 0x1}, /* Class code for pre-2.0 devices */ 463 {0x010000, 0xff0000, 0x4}, /* Mass Storage Controller */ 464 {0x020000, 0xff0000, 0x6}, /* Network Controller */ 465 {0x030000, 0xff0000, 0x9}, /* Display Controller */ 466 {0x040000, 0xff0000, 0x8}, /* Multimedia Controller */ 467 {0x050000, 0xff0000, 0x9}, /* Memory Controller */ 468 {0x060000, 0xff0000, 0x9}, /* Bridge Controller */ 469 {0x0c0000, 0xffff00, 0x9}, /* Serial Bus, FireWire (IEEE 1394) */ 470 {0x0c0100, 0xffff00, 0x4}, /* Serial Bus, ACCESS.bus */ 471 {0x0c0200, 0xffff00, 0x4}, /* Serial Bus, SSA */ 472 {0x0c0300, 0xffff00, 0x9}, /* Serial Bus Universal Serial Bus */ 473 {0x0c0400, 0xffff00, 0x6}, /* Serial Bus, Fibre Channel */ 474 {0x0c0600, 0xffff00, 0x6} /* Serial Bus, Infiniband */ 475 }; 476 477 /* 478 * Default class to intr_weight value mapping (% of CPU). A driver.conf 479 * entry on or above the pci node like 480 * 481 * pci-class-intr-weights= 0x020000, 0xff0000, 30; 482 * 483 * can be used to augment or override entries in the default table below. 484 * 485 * NB: The values below give NICs preference on redistribution, and provide 486 * NICs some isolation from other interrupt sources. We need better interfaces 487 * that allow the NIC driver to identify a specific NIC instance as high 488 * bandwidth, and thus deserving of separation from other low bandwidth 489 * NICs additional isolation from other interrupt sources. 490 * 491 * NB: We treat Infiniband like a NIC. 492 */ 493 px_class_val_t px_default_intr_weight [] = { 494 {0x020000, 0xff0000, 35}, /* Network Controller */ 495 {0x010000, 0xff0000, 10}, /* Mass Storage Controller */ 496 {0x0c0400, 0xffff00, 10}, /* Serial Bus, Fibre Channel */ 497 {0x0c0600, 0xffff00, 50} /* Serial Bus, Infiniband */ 498 }; 499 500 static uint32_t 501 px_match_class_val(uint32_t key, px_class_val_t *rec_p, int nrec, 502 uint32_t default_val) 503 { 504 int i; 505 506 for (i = 0; i < nrec; rec_p++, i++) { 507 if ((rec_p->class_code & rec_p->class_mask) == 508 (key & rec_p->class_mask)) 509 return (rec_p->class_val); 510 } 511 512 return (default_val); 513 } 514 515 /* 516 * px_class_to_val 517 * 518 * Return the configuration value, based on class code and sub class code, 519 * from the specified property based or default px_class_val_t table. 520 */ 521 uint32_t 522 px_class_to_val(dev_info_t *rdip, char *property_name, px_class_val_t *rec_p, 523 int nrec, uint32_t default_val) 524 { 525 int property_len; 526 uint32_t class_code; 527 px_class_val_t *conf; 528 uint32_t val = default_val; 529 530 /* 531 * Use the "class-code" property to get the base and sub class 532 * codes for the requesting device. 533 */ 534 class_code = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 535 DDI_PROP_DONTPASS, "class-code", -1); 536 537 if (class_code == -1) 538 return (val); 539 540 /* look up the val from the default table */ 541 val = px_match_class_val(class_code, rec_p, nrec, val); 542 543 /* see if there is a more specific property specified value */ 544 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_NOTPROM, 545 property_name, (caddr_t)&conf, &property_len)) 546 return (val); 547 548 if ((property_len % sizeof (px_class_val_t)) == 0) 549 val = px_match_class_val(class_code, conf, 550 property_len / sizeof (px_class_val_t), val); 551 kmem_free(conf, property_len); 552 return (val); 553 } 554 555 /* px_class_to_pil: return the pil for a given device. */ 556 uint32_t 557 px_class_to_pil(dev_info_t *rdip) 558 { 559 uint32_t pil; 560 561 /* Default pil is 1 */ 562 pil = px_class_to_val(rdip, 563 "pci-class-priorities", px_default_pil, 564 sizeof (px_default_pil) / sizeof (px_class_val_t), 1); 565 566 /* Range check the result */ 567 if (pil >= 0xf) 568 pil = 1; 569 570 return (pil); 571 } 572 573 /* px_class_to_intr_weight: return the intr_weight for a given device. */ 574 static int32_t 575 px_class_to_intr_weight(dev_info_t *rdip) 576 { 577 int32_t intr_weight; 578 579 /* default weight is 0% */ 580 intr_weight = px_class_to_val(rdip, 581 "pci-class-intr-weights", px_default_intr_weight, 582 sizeof (px_default_intr_weight) / sizeof (px_class_val_t), 0); 583 584 /* range check the result */ 585 if (intr_weight < 0) 586 intr_weight = 0; 587 if (intr_weight > 1000) 588 intr_weight = 1000; 589 590 return (intr_weight); 591 } 592 593 /* ARGSUSED */ 594 int 595 px_intx_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 596 ddi_intr_handle_impl_t *hdlp, void *result) 597 { 598 px_t *px_p = DIP_TO_STATE(dip); 599 int ret = DDI_SUCCESS; 600 601 DBG(DBG_INTROPS, dip, "px_intx_ops: dip=%x rdip=%x intr_op=%x " 602 "handle=%p\n", dip, rdip, intr_op, hdlp); 603 604 switch (intr_op) { 605 case DDI_INTROP_GETCAP: 606 ret = pci_intx_get_cap(rdip, (int *)result); 607 break; 608 case DDI_INTROP_SETCAP: 609 DBG(DBG_INTROPS, dip, "px_intx_ops: SetCap is not supported\n"); 610 ret = DDI_ENOTSUP; 611 break; 612 case DDI_INTROP_ALLOC: 613 *(int *)result = hdlp->ih_scratch1; 614 break; 615 case DDI_INTROP_FREE: 616 break; 617 case DDI_INTROP_GETPRI: 618 *(int *)result = hdlp->ih_pri ? 619 hdlp->ih_pri : px_class_to_pil(rdip); 620 break; 621 case DDI_INTROP_SETPRI: 622 break; 623 case DDI_INTROP_ADDISR: 624 ret = px_add_intx_intr(dip, rdip, hdlp); 625 break; 626 case DDI_INTROP_REMISR: 627 ret = px_rem_intx_intr(dip, rdip, hdlp); 628 break; 629 case DDI_INTROP_ENABLE: 630 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 631 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_ENABLE, 0, 0); 632 break; 633 case DDI_INTROP_DISABLE: 634 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 635 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_DISABLE, 0, 0); 636 break; 637 case DDI_INTROP_SETMASK: 638 ret = pci_intx_set_mask(rdip); 639 break; 640 case DDI_INTROP_CLRMASK: 641 ret = pci_intx_clr_mask(rdip); 642 break; 643 case DDI_INTROP_GETPENDING: 644 ret = pci_intx_get_pending(rdip, (int *)result); 645 break; 646 case DDI_INTROP_NINTRS: 647 case DDI_INTROP_NAVAIL: 648 *(int *)result = i_ddi_get_intx_nintrs(rdip); 649 break; 650 default: 651 ret = DDI_ENOTSUP; 652 break; 653 } 654 655 return (ret); 656 } 657 658 /* ARGSUSED */ 659 int 660 px_msix_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 661 ddi_intr_handle_impl_t *hdlp, void *result) 662 { 663 px_t *px_p = DIP_TO_STATE(dip); 664 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 665 msiq_rec_type_t msiq_rec_type; 666 msi_type_t msi_type; 667 uint64_t msi_addr; 668 msinum_t msi_num; 669 msiqid_t msiq_id; 670 uint_t nintrs; 671 int i, ret = DDI_SUCCESS; 672 673 DBG(DBG_INTROPS, dip, "px_msix_ops: dip=%x rdip=%x intr_op=%x " 674 "handle=%p\n", dip, rdip, intr_op, hdlp); 675 676 /* Check for MSI64 support */ 677 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 678 msiq_rec_type = MSI64_REC; 679 msi_type = MSI64_TYPE; 680 msi_addr = msi_state_p->msi_addr64; 681 } else { 682 msiq_rec_type = MSI32_REC; 683 msi_type = MSI32_TYPE; 684 msi_addr = msi_state_p->msi_addr32; 685 } 686 687 switch (intr_op) { 688 case DDI_INTROP_GETCAP: 689 ret = pci_msi_get_cap(rdip, hdlp->ih_type, (int *)result); 690 break; 691 case DDI_INTROP_SETCAP: 692 DBG(DBG_INTROPS, dip, "px_msix_ops: SetCap is not supported\n"); 693 ret = DDI_ENOTSUP; 694 break; 695 case DDI_INTROP_ALLOC: 696 /* 697 * We need to restrict this allocation in future 698 * based on Resource Management policies. 699 */ 700 if ((ret = px_msi_alloc(px_p, rdip, hdlp->ih_inum, 701 hdlp->ih_scratch1, (uintptr_t)hdlp->ih_scratch2, &msi_num, 702 (int *)result)) != DDI_SUCCESS) { 703 DBG(DBG_INTROPS, dip, "px_msix_ops: allocation " 704 "failed, rdip 0x%p type 0x%d inum 0x%x " 705 "count 0x%x\n", rdip, hdlp->ih_type, hdlp->ih_inum, 706 hdlp->ih_scratch1); 707 708 return (ret); 709 } 710 711 if ((hdlp->ih_type == DDI_INTR_TYPE_MSIX) && 712 (i_ddi_get_msix(rdip) == NULL)) { 713 ddi_intr_msix_t *msix_p; 714 715 if (msix_p = pci_msix_init(rdip)) { 716 i_ddi_set_msix(rdip, msix_p); 717 break; 718 } 719 720 DBG(DBG_INTROPS, dip, "px_msix_ops: MSI-X allocation " 721 "failed, rdip 0x%p inum 0x%x\n", rdip, 722 hdlp->ih_inum); 723 724 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 725 hdlp->ih_scratch1); 726 727 return (DDI_FAILURE); 728 } 729 730 break; 731 case DDI_INTROP_FREE: 732 (void) pci_msi_disable_mode(rdip, hdlp->ih_type, NULL); 733 (void) pci_msi_unconfigure(rdip, hdlp->ih_type, hdlp->ih_inum); 734 735 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 736 goto msi_free; 737 738 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 739 break; 740 741 if (((i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1) == 0) && 742 (i_ddi_get_msix(rdip))) { 743 pci_msix_fini(i_ddi_get_msix(rdip)); 744 i_ddi_set_msix(rdip, NULL); 745 } 746 msi_free: 747 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 748 hdlp->ih_scratch1); 749 break; 750 case DDI_INTROP_GETPRI: 751 *(int *)result = hdlp->ih_pri ? 752 hdlp->ih_pri : px_class_to_pil(rdip); 753 break; 754 case DDI_INTROP_SETPRI: 755 break; 756 case DDI_INTROP_ADDISR: 757 if ((ret = px_msi_get_msinum(px_p, hdlp->ih_dip, 758 hdlp->ih_inum, &msi_num)) != DDI_SUCCESS) 759 return (ret); 760 761 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 762 msiq_rec_type, msi_num, &msiq_id)) != DDI_SUCCESS) { 763 DBG(DBG_INTROPS, dip, "px_msix_ops: Add MSI handler " 764 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 765 return (ret); 766 } 767 768 DBG(DBG_INTROPS, dip, "px_msix_ops: msiq used 0x%x\n", msiq_id); 769 770 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 771 msiq_id, msi_type)) != DDI_SUCCESS) { 772 (void) px_rem_msiq_intr(dip, rdip, 773 hdlp, msiq_rec_type, msi_num, msiq_id); 774 return (ret); 775 } 776 777 if ((ret = px_lib_msi_setstate(dip, msi_num, 778 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) { 779 (void) px_rem_msiq_intr(dip, rdip, 780 hdlp, msiq_rec_type, msi_num, msiq_id); 781 return (ret); 782 } 783 784 hdlp->ih_vector = msi_num; 785 break; 786 case DDI_INTROP_DUPVEC: 787 DBG(DBG_INTROPS, dip, "px_msix_ops: dupisr - inum: %x, " 788 "new_vector: %x\n", hdlp->ih_inum, hdlp->ih_scratch1); 789 790 ret = pci_msix_dup(hdlp->ih_dip, hdlp->ih_inum, 791 hdlp->ih_scratch1); 792 break; 793 case DDI_INTROP_REMISR: 794 msi_num = hdlp->ih_vector; 795 796 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 797 &msiq_id)) != DDI_SUCCESS) 798 return (ret); 799 800 if ((ret = px_lib_msi_setstate(dip, msi_num, 801 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) 802 return (ret); 803 804 ret = px_rem_msiq_intr(dip, rdip, 805 hdlp, msiq_rec_type, msi_num, msiq_id); 806 807 hdlp->ih_vector = 0; 808 break; 809 case DDI_INTROP_ENABLE: 810 msi_num = hdlp->ih_vector; 811 812 if ((ret = px_lib_msi_setvalid(dip, msi_num, 813 PCI_MSI_VALID)) != DDI_SUCCESS) 814 return (ret); 815 816 if ((pci_is_msi_enabled(rdip, hdlp->ih_type) != DDI_SUCCESS) || 817 (hdlp->ih_type == DDI_INTR_TYPE_MSIX)) { 818 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 819 820 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 821 nintrs, hdlp->ih_inum, msi_addr, 822 hdlp->ih_type == DDI_INTR_TYPE_MSIX ? 823 msi_num : msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 824 return (ret); 825 826 if ((ret = pci_msi_enable_mode(rdip, hdlp->ih_type)) 827 != DDI_SUCCESS) 828 return (ret); 829 } 830 831 if ((ret = pci_msi_clr_mask(rdip, hdlp->ih_type, 832 hdlp->ih_inum)) != DDI_SUCCESS) 833 return (ret); 834 835 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 836 break; 837 838 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 839 &msiq_id)) != DDI_SUCCESS) 840 return (ret); 841 842 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 843 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 844 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num); 845 846 break; 847 case DDI_INTROP_DISABLE: 848 msi_num = hdlp->ih_vector; 849 850 if ((ret = pci_msi_set_mask(rdip, hdlp->ih_type, 851 hdlp->ih_inum)) != DDI_SUCCESS) 852 return (ret); 853 854 if ((ret = px_lib_msi_setvalid(dip, msi_num, 855 PCI_MSI_INVALID)) != DDI_SUCCESS) 856 return (ret); 857 858 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 859 break; 860 861 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 862 &msiq_id)) != DDI_SUCCESS) 863 return (ret); 864 865 ret = px_ib_update_intr_state(px_p, rdip, 866 hdlp->ih_inum, px_msiqid_to_devino(px_p, msiq_id), 867 hdlp->ih_pri, PX_INTR_STATE_DISABLE, msiq_rec_type, 868 msi_num); 869 870 break; 871 case DDI_INTROP_BLOCKENABLE: 872 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 873 msi_num = hdlp->ih_vector; 874 875 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 876 nintrs, hdlp->ih_inum, msi_addr, 877 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 878 return (ret); 879 880 for (i = 0; i < nintrs; i++, msi_num++) { 881 if ((ret = px_lib_msi_setvalid(dip, msi_num, 882 PCI_MSI_VALID)) != DDI_SUCCESS) 883 return (ret); 884 885 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 886 &msiq_id)) != DDI_SUCCESS) 887 return (ret); 888 889 if ((ret = px_ib_update_intr_state(px_p, rdip, 890 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 891 msiq_id), hdlp->ih_pri, PX_INTR_STATE_ENABLE, 892 msiq_rec_type, msi_num)) != DDI_SUCCESS) 893 return (ret); 894 } 895 896 ret = pci_msi_enable_mode(rdip, hdlp->ih_type); 897 break; 898 case DDI_INTROP_BLOCKDISABLE: 899 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 900 msi_num = hdlp->ih_vector; 901 902 if ((ret = pci_msi_disable_mode(rdip, hdlp->ih_type, 903 hdlp->ih_cap & DDI_INTR_FLAG_BLOCK)) != DDI_SUCCESS) 904 return (ret); 905 906 for (i = 0; i < nintrs; i++, msi_num++) { 907 if ((ret = px_lib_msi_setvalid(dip, msi_num, 908 PCI_MSI_INVALID)) != DDI_SUCCESS) 909 return (ret); 910 911 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 912 &msiq_id)) != DDI_SUCCESS) 913 return (ret); 914 915 if ((ret = px_ib_update_intr_state(px_p, rdip, 916 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 917 msiq_id), hdlp->ih_pri, PX_INTR_STATE_DISABLE, 918 msiq_rec_type, msi_num)) != DDI_SUCCESS) 919 return (ret); 920 } 921 922 break; 923 case DDI_INTROP_SETMASK: 924 ret = pci_msi_set_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 925 break; 926 case DDI_INTROP_CLRMASK: 927 ret = pci_msi_clr_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 928 break; 929 case DDI_INTROP_GETPENDING: 930 ret = pci_msi_get_pending(rdip, hdlp->ih_type, 931 hdlp->ih_inum, (int *)result); 932 break; 933 case DDI_INTROP_NINTRS: 934 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 935 break; 936 case DDI_INTROP_NAVAIL: 937 /* XXX - a new interface may be needed */ 938 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 939 break; 940 default: 941 ret = DDI_ENOTSUP; 942 break; 943 } 944 945 return (ret); 946 } 947 948 static struct { 949 kstat_named_t pxintr_ks_name; 950 kstat_named_t pxintr_ks_type; 951 kstat_named_t pxintr_ks_cpu; 952 kstat_named_t pxintr_ks_pil; 953 kstat_named_t pxintr_ks_time; 954 kstat_named_t pxintr_ks_ino; 955 kstat_named_t pxintr_ks_cookie; 956 kstat_named_t pxintr_ks_devpath; 957 kstat_named_t pxintr_ks_buspath; 958 } pxintr_ks_template = { 959 { "name", KSTAT_DATA_CHAR }, 960 { "type", KSTAT_DATA_CHAR }, 961 { "cpu", KSTAT_DATA_UINT64 }, 962 { "pil", KSTAT_DATA_UINT64 }, 963 { "time", KSTAT_DATA_UINT64 }, 964 { "ino", KSTAT_DATA_UINT64 }, 965 { "cookie", KSTAT_DATA_UINT64 }, 966 { "devpath", KSTAT_DATA_STRING }, 967 { "buspath", KSTAT_DATA_STRING }, 968 }; 969 970 static uint32_t pxintr_ks_instance; 971 static char ih_devpath[MAXPATHLEN]; 972 static char ih_buspath[MAXPATHLEN]; 973 kmutex_t pxintr_ks_template_lock; 974 975 int 976 px_ks_update(kstat_t *ksp, int rw) 977 { 978 px_ih_t *ih_p = ksp->ks_private; 979 int maxlen = sizeof (pxintr_ks_template.pxintr_ks_name.value.c); 980 px_ino_pil_t *ipil_p = ih_p->ih_ipil_p; 981 px_ino_t *ino_p = ipil_p->ipil_ino_p; 982 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 983 devino_t ino; 984 sysino_t sysino; 985 986 ino = ino_p->ino_ino; 987 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, &sysino) != 988 DDI_SUCCESS) { 989 cmn_err(CE_WARN, "px_ks_update: px_lib_intr_devino_to_sysino " 990 "failed"); 991 } 992 993 (void) snprintf(pxintr_ks_template.pxintr_ks_name.value.c, maxlen, 994 "%s%d", ddi_driver_name(ih_p->ih_dip), 995 ddi_get_instance(ih_p->ih_dip)); 996 997 (void) ddi_pathname(ih_p->ih_dip, ih_devpath); 998 (void) ddi_pathname(px_p->px_dip, ih_buspath); 999 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_devpath, ih_devpath); 1000 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_buspath, ih_buspath); 1001 1002 if (ih_p->ih_intr_state == PX_INTR_STATE_ENABLE) { 1003 1004 switch (i_ddi_intr_get_current_type(ih_p->ih_dip)) { 1005 case DDI_INTR_TYPE_MSI: 1006 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 1007 "msi"); 1008 break; 1009 case DDI_INTR_TYPE_MSIX: 1010 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 1011 "msix"); 1012 break; 1013 default: 1014 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 1015 "fixed"); 1016 break; 1017 } 1018 1019 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = ino_p->ino_cpuid; 1020 pxintr_ks_template.pxintr_ks_pil.value.ui64 = ipil_p->ipil_pil; 1021 pxintr_ks_template.pxintr_ks_time.value.ui64 = ih_p->ih_nsec + 1022 (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks, 1023 ino_p->ino_cpuid); 1024 pxintr_ks_template.pxintr_ks_ino.value.ui64 = ino; 1025 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = sysino; 1026 } else { 1027 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 1028 "disabled"); 1029 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = 0; 1030 pxintr_ks_template.pxintr_ks_pil.value.ui64 = 0; 1031 pxintr_ks_template.pxintr_ks_time.value.ui64 = 0; 1032 pxintr_ks_template.pxintr_ks_ino.value.ui64 = 0; 1033 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = 0; 1034 } 1035 return (0); 1036 } 1037 1038 void 1039 px_create_intr_kstats(px_ih_t *ih_p) 1040 { 1041 msiq_rec_type_t rec_type = ih_p->ih_rec_type; 1042 1043 ASSERT(ih_p->ih_ksp == NULL); 1044 1045 /* 1046 * Create pci_intrs::: kstats for all ih types except messages, 1047 * which represent unusual conditions and don't need to be tracked. 1048 */ 1049 if (rec_type == 0 || rec_type == MSI32_REC || rec_type == MSI64_REC) { 1050 ih_p->ih_ksp = kstat_create("pci_intrs", 1051 atomic_inc_32_nv(&pxintr_ks_instance), "config", 1052 "interrupts", KSTAT_TYPE_NAMED, 1053 sizeof (pxintr_ks_template) / sizeof (kstat_named_t), 1054 KSTAT_FLAG_VIRTUAL); 1055 } 1056 if (ih_p->ih_ksp != NULL) { 1057 ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2; 1058 ih_p->ih_ksp->ks_lock = &pxintr_ks_template_lock; 1059 ih_p->ih_ksp->ks_data = &pxintr_ks_template; 1060 ih_p->ih_ksp->ks_private = ih_p; 1061 ih_p->ih_ksp->ks_update = px_ks_update; 1062 } 1063 } 1064 1065 /* 1066 * px_add_intx_intr: 1067 * 1068 * This function is called to register INTx and legacy hardware 1069 * interrupt pins interrupts. 1070 */ 1071 int 1072 px_add_intx_intr(dev_info_t *dip, dev_info_t *rdip, 1073 ddi_intr_handle_impl_t *hdlp) 1074 { 1075 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1076 px_ib_t *ib_p = px_p->px_ib_p; 1077 devino_t ino; 1078 px_ih_t *ih_p; 1079 px_ino_t *ino_p; 1080 px_ino_pil_t *ipil_p, *ipil_list; 1081 int32_t weight; 1082 int ret = DDI_SUCCESS; 1083 1084 ino = hdlp->ih_vector; 1085 1086 DBG(DBG_A_INTX, dip, "px_add_intx_intr: rdip=%s%d ino=%x " 1087 "handler=%x arg1=%x arg2=%x\n", ddi_driver_name(rdip), 1088 ddi_get_instance(rdip), ino, hdlp->ih_cb_func, 1089 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 1090 1091 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, 1092 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, 0, 0); 1093 1094 mutex_enter(&ib_p->ib_ino_lst_mutex); 1095 1096 ino_p = px_ib_locate_ino(ib_p, ino); 1097 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 1098 1099 /* Sharing ino */ 1100 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 1101 if (px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0)) { 1102 DBG(DBG_A_INTX, dip, "px_add_intx_intr: " 1103 "dup intr #%d\n", hdlp->ih_inum); 1104 1105 ret = DDI_FAILURE; 1106 goto fail1; 1107 } 1108 1109 /* Save mondo value in hdlp */ 1110 hdlp->ih_vector = ino_p->ino_sysino; 1111 1112 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 1113 ih_p)) != DDI_SUCCESS) 1114 goto fail1; 1115 1116 goto ino_done; 1117 } 1118 1119 if (hdlp->ih_pri == 0) 1120 hdlp->ih_pri = px_class_to_pil(rdip); 1121 1122 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 1123 ino_p = ipil_p->ipil_ino_p; 1124 1125 /* Save mondo value in hdlp */ 1126 hdlp->ih_vector = ino_p->ino_sysino; 1127 1128 DBG(DBG_A_INTX, dip, "px_add_intx_intr: pil=0x%x mondo=0x%x\n", 1129 hdlp->ih_pri, hdlp->ih_vector); 1130 1131 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1132 (ddi_intr_handler_t *)px_intx_intr, (caddr_t)ipil_p, NULL); 1133 1134 ret = i_ddi_add_ivintr(hdlp); 1135 1136 /* 1137 * Restore original interrupt handler 1138 * and arguments in interrupt handle. 1139 */ 1140 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1141 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1142 1143 if (ret != DDI_SUCCESS) 1144 goto fail2; 1145 1146 /* Save the pil for this ino */ 1147 ipil_p->ipil_pil = hdlp->ih_pri; 1148 1149 /* Select cpu, saving it for sharing and removal */ 1150 if (ipil_list == NULL) { 1151 ino_p->ino_cpuid = intr_dist_cpuid(); 1152 1153 /* Enable interrupt */ 1154 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1155 } 1156 1157 ino_done: 1158 /* Add weight to the cpu that we are already targeting */ 1159 weight = px_class_to_intr_weight(rdip); 1160 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1161 1162 ih_p->ih_ipil_p = ipil_p; 1163 px_create_intr_kstats(ih_p); 1164 if (ih_p->ih_ksp) 1165 kstat_install(ih_p->ih_ksp); 1166 mutex_exit(&ib_p->ib_ino_lst_mutex); 1167 1168 DBG(DBG_A_INTX, dip, "px_add_intx_intr: done! Interrupt 0x%x pil=%x\n", 1169 ino_p->ino_sysino, hdlp->ih_pri); 1170 1171 return (ret); 1172 fail2: 1173 px_ib_delete_ino_pil(ib_p, ipil_p); 1174 fail1: 1175 if (ih_p->ih_config_handle) 1176 pci_config_teardown(&ih_p->ih_config_handle); 1177 1178 mutex_exit(&ib_p->ib_ino_lst_mutex); 1179 kmem_free(ih_p, sizeof (px_ih_t)); 1180 1181 DBG(DBG_A_INTX, dip, "px_add_intx_intr: Failed! Interrupt 0x%x " 1182 "pil=%x\n", ino_p->ino_sysino, hdlp->ih_pri); 1183 1184 return (ret); 1185 } 1186 1187 /* 1188 * px_rem_intx_intr: 1189 * 1190 * This function is called to unregister INTx and legacy hardware 1191 * interrupt pins interrupts. 1192 */ 1193 int 1194 px_rem_intx_intr(dev_info_t *dip, dev_info_t *rdip, 1195 ddi_intr_handle_impl_t *hdlp) 1196 { 1197 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1198 px_ib_t *ib_p = px_p->px_ib_p; 1199 devino_t ino; 1200 cpuid_t curr_cpu; 1201 px_ino_t *ino_p; 1202 px_ino_pil_t *ipil_p; 1203 px_ih_t *ih_p; 1204 int ret = DDI_SUCCESS; 1205 1206 ino = hdlp->ih_vector; 1207 1208 DBG(DBG_R_INTX, dip, "px_rem_intx_intr: rdip=%s%d ino=%x\n", 1209 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 1210 1211 mutex_enter(&ib_p->ib_ino_lst_mutex); 1212 1213 ino_p = px_ib_locate_ino(ib_p, ino); 1214 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1215 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0); 1216 1217 /* Get the current cpu */ 1218 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1219 &curr_cpu)) != DDI_SUCCESS) 1220 goto fail; 1221 1222 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1223 goto fail; 1224 1225 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1226 1227 if (ipil_p->ipil_ih_size == 0) { 1228 hdlp->ih_vector = ino_p->ino_sysino; 1229 i_ddi_rem_ivintr(hdlp); 1230 1231 px_ib_delete_ino_pil(ib_p, ipil_p); 1232 } 1233 1234 if (ino_p->ino_ipil_size == 0) { 1235 kmem_free(ino_p, sizeof (px_ino_t)); 1236 } else { 1237 /* Re-enable interrupt only if mapping register still shared */ 1238 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1239 } 1240 1241 fail: 1242 mutex_exit(&ib_p->ib_ino_lst_mutex); 1243 return (ret); 1244 } 1245 1246 /* 1247 * px_add_msiq_intr: 1248 * 1249 * This function is called to register MSI/Xs and PCIe message interrupts. 1250 */ 1251 int 1252 px_add_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1253 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1254 msgcode_t msg_code, msiqid_t *msiq_id_p) 1255 { 1256 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1257 px_ib_t *ib_p = px_p->px_ib_p; 1258 px_msiq_state_t *msiq_state_p = &ib_p->ib_msiq_state; 1259 devino_t ino; 1260 px_ih_t *ih_p; 1261 px_ino_t *ino_p; 1262 px_ino_pil_t *ipil_p, *ipil_list; 1263 int32_t weight; 1264 int ret = DDI_SUCCESS; 1265 1266 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: rdip=%s%d handler=%x " 1267 "arg1=%x arg2=%x\n", ddi_driver_name(rdip), ddi_get_instance(rdip), 1268 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 1269 1270 if ((ret = px_msiq_alloc(px_p, rec_type, msiq_id_p)) != DDI_SUCCESS) { 1271 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1272 "msiq allocation failed\n"); 1273 return (ret); 1274 } 1275 1276 ino = px_msiqid_to_devino(px_p, *msiq_id_p); 1277 1278 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, hdlp->ih_cb_func, 1279 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rec_type, msg_code); 1280 1281 mutex_enter(&ib_p->ib_ino_lst_mutex); 1282 1283 ino_p = px_ib_locate_ino(ib_p, ino); 1284 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 1285 1286 /* Sharing ino */ 1287 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 1288 if (px_ib_intr_locate_ih(ipil_p, rdip, 1289 hdlp->ih_inum, rec_type, msg_code)) { 1290 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1291 "dup intr #%d\n", hdlp->ih_inum); 1292 1293 ret = DDI_FAILURE; 1294 goto fail1; 1295 } 1296 1297 /* Save mondo value in hdlp */ 1298 hdlp->ih_vector = ino_p->ino_sysino; 1299 1300 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 1301 ih_p)) != DDI_SUCCESS) 1302 goto fail1; 1303 1304 goto ino_done; 1305 } 1306 1307 if (hdlp->ih_pri == 0) 1308 hdlp->ih_pri = px_class_to_pil(rdip); 1309 1310 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 1311 ino_p = ipil_p->ipil_ino_p; 1312 1313 ino_p->ino_msiq_p = msiq_state_p->msiq_p + 1314 (*msiq_id_p - msiq_state_p->msiq_1st_msiq_id); 1315 1316 /* Save mondo value in hdlp */ 1317 hdlp->ih_vector = ino_p->ino_sysino; 1318 1319 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: pil=0x%x mondo=0x%x\n", 1320 hdlp->ih_pri, hdlp->ih_vector); 1321 1322 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1323 (ddi_intr_handler_t *)px_msiq_intr, (caddr_t)ipil_p, NULL); 1324 1325 ret = i_ddi_add_ivintr(hdlp); 1326 1327 /* 1328 * Restore original interrupt handler 1329 * and arguments in interrupt handle. 1330 */ 1331 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1332 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1333 1334 if (ret != DDI_SUCCESS) 1335 goto fail2; 1336 1337 /* Save the pil for this ino */ 1338 ipil_p->ipil_pil = hdlp->ih_pri; 1339 1340 /* Select cpu, saving it for sharing and removal */ 1341 if (ipil_list == NULL) { 1342 ino_p->ino_cpuid = intr_dist_cpuid(); 1343 1344 /* Enable MSIQ */ 1345 px_lib_msiq_setstate(dip, *msiq_id_p, PCI_MSIQ_STATE_IDLE); 1346 px_lib_msiq_setvalid(dip, *msiq_id_p, PCI_MSIQ_VALID); 1347 1348 /* Enable interrupt */ 1349 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1350 } 1351 1352 ino_done: 1353 /* Add weight to the cpu that we are already targeting */ 1354 weight = px_class_to_intr_weight(rdip); 1355 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1356 1357 ih_p->ih_ipil_p = ipil_p; 1358 px_create_intr_kstats(ih_p); 1359 if (ih_p->ih_ksp) 1360 kstat_install(ih_p->ih_ksp); 1361 mutex_exit(&ib_p->ib_ino_lst_mutex); 1362 1363 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: done! Interrupt 0x%x pil=%x\n", 1364 ino_p->ino_sysino, hdlp->ih_pri); 1365 1366 return (ret); 1367 fail2: 1368 px_ib_delete_ino_pil(ib_p, ipil_p); 1369 fail1: 1370 if (ih_p->ih_config_handle) 1371 pci_config_teardown(&ih_p->ih_config_handle); 1372 1373 mutex_exit(&ib_p->ib_ino_lst_mutex); 1374 kmem_free(ih_p, sizeof (px_ih_t)); 1375 1376 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: Failed! Interrupt 0x%x pil=%x\n", 1377 ino_p->ino_sysino, hdlp->ih_pri); 1378 1379 return (ret); 1380 } 1381 1382 /* 1383 * px_rem_msiq_intr: 1384 * 1385 * This function is called to unregister MSI/Xs and PCIe message interrupts. 1386 */ 1387 int 1388 px_rem_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1389 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1390 msgcode_t msg_code, msiqid_t msiq_id) 1391 { 1392 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1393 px_ib_t *ib_p = px_p->px_ib_p; 1394 devino_t ino = px_msiqid_to_devino(px_p, msiq_id); 1395 cpuid_t curr_cpu; 1396 px_ino_t *ino_p; 1397 px_ino_pil_t *ipil_p; 1398 px_ih_t *ih_p; 1399 int ret = DDI_SUCCESS; 1400 1401 DBG(DBG_MSIQ, dip, "px_rem_msiq_intr: rdip=%s%d msiq_id=%x ino=%x\n", 1402 ddi_driver_name(rdip), ddi_get_instance(rdip), msiq_id, ino); 1403 1404 mutex_enter(&ib_p->ib_ino_lst_mutex); 1405 1406 ino_p = px_ib_locate_ino(ib_p, ino); 1407 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1408 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, rec_type, 1409 msg_code); 1410 1411 /* Get the current cpu */ 1412 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1413 &curr_cpu)) != DDI_SUCCESS) 1414 goto fail; 1415 1416 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1417 goto fail; 1418 1419 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1420 1421 if (ipil_p->ipil_ih_size == 0) { 1422 hdlp->ih_vector = ino_p->ino_sysino; 1423 i_ddi_rem_ivintr(hdlp); 1424 1425 px_ib_delete_ino_pil(ib_p, ipil_p); 1426 1427 if (ino_p->ino_ipil_size == 0) 1428 px_lib_msiq_setvalid(dip, 1429 px_devino_to_msiqid(px_p, ino), PCI_MSIQ_INVALID); 1430 1431 (void) px_msiq_free(px_p, msiq_id); 1432 } 1433 1434 if (ino_p->ino_ipil_size == 0) { 1435 kmem_free(ino_p, sizeof (px_ino_t)); 1436 } else { 1437 /* Re-enable interrupt only if mapping register still shared */ 1438 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1439 } 1440 1441 fail: 1442 mutex_exit(&ib_p->ib_ino_lst_mutex); 1443 return (ret); 1444 } 1445