1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX nexus interrupt handling: 28 * PX device interrupt handler wrapper 29 * PIL lookup routine 30 * PX device interrupt related initchild code 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kmem.h> 35 #include <sys/async.h> 36 #include <sys/spl.h> 37 #include <sys/sunddi.h> 38 #include <sys/fm/protocol.h> 39 #include <sys/fm/util.h> 40 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 41 #include <sys/ddi_impldefs.h> 42 #include <sys/sdt.h> 43 #include <sys/atomic.h> 44 #include "px_obj.h" 45 #include <sys/ontrap.h> 46 #include <sys/membar.h> 47 #include <sys/clock.h> 48 49 /* 50 * interrupt jabber: 51 * 52 * When an interrupt line is jabbering, every time the state machine for the 53 * associated ino is idled, a new mondo will be sent and the ino will go into 54 * the pending state again. The mondo will cause a new call to 55 * px_intr_wrapper() which normally idles the ino's state machine which would 56 * precipitate another trip round the loop. 57 * 58 * The loop can be broken by preventing the ino's state machine from being 59 * idled when an interrupt line is jabbering. See the comment at the 60 * beginning of px_intr_wrapper() explaining how the 'interrupt jabber 61 * protection' code does this. 62 */ 63 64 /*LINTLIBRARY*/ 65 66 /* 67 * If the unclaimed interrupt count has reached the limit set by 68 * pci_unclaimed_intr_max within the time limit, then all interrupts 69 * on this ino is blocked by not idling the interrupt state machine. 70 */ 71 static int 72 px_spurintr(px_ino_pil_t *ipil_p) 73 { 74 px_ino_t *ino_p = ipil_p->ipil_ino_p; 75 px_ih_t *ih_p = ipil_p->ipil_ih_start; 76 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 77 char *err_fmt_str; 78 boolean_t blocked = B_FALSE; 79 int i; 80 81 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) 82 return (DDI_INTR_CLAIMED); 83 84 if (!ino_p->ino_unclaimed_intrs) 85 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 86 87 ino_p->ino_unclaimed_intrs++; 88 89 if (ino_p->ino_unclaimed_intrs <= px_unclaimed_intr_max) 90 goto clear; 91 92 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 93 > px_spurintr_duration) { 94 ino_p->ino_unclaimed_intrs = 0; 95 goto clear; 96 } 97 err_fmt_str = "%s%d: ino 0x%x blocked"; 98 blocked = B_TRUE; 99 goto warn; 100 clear: 101 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 102 warn: 103 cmn_err(CE_WARN, err_fmt_str, NAMEINST(px_p->px_dip), ino_p->ino_ino); 104 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) 105 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 106 ih_p->ih_inum); 107 cmn_err(CE_CONT, "!\n"); 108 109 /* Clear the pending state */ 110 if (blocked == B_FALSE) { 111 if (px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 112 INTR_IDLE_STATE) != DDI_SUCCESS) 113 return (DDI_INTR_UNCLAIMED); 114 } 115 116 return (DDI_INTR_CLAIMED); 117 } 118 119 extern uint64_t intr_get_time(void); 120 121 /* 122 * px_intx_intr (INTx or legacy interrupt handler) 123 * 124 * This routine is used as wrapper around interrupt handlers installed by child 125 * device drivers. This routine invokes the driver interrupt handlers and 126 * examines the return codes. 127 * 128 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 129 * least one handler claims the interrupt then the counter is halved and the 130 * interrupt state machine is idled. If no handler claims the interrupt then 131 * the counter is incremented by one and the state machine is idled. 132 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 133 * then the interrupt state machine is not idled thus preventing any further 134 * interrupts on that ino. The state machine will only be idled again if a 135 * handler is subsequently added or removed. 136 * 137 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 138 * DDI_INTR_UNCLAIMED otherwise. 139 */ 140 uint_t 141 px_intx_intr(caddr_t arg) 142 { 143 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 144 px_ino_t *ino_p = ipil_p->ipil_ino_p; 145 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 146 px_ih_t *ih_p = ipil_p->ipil_ih_start; 147 ushort_t pil = ipil_p->ipil_pil; 148 uint_t result = 0, r = DDI_INTR_UNCLAIMED; 149 int i; 150 151 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 152 "ino=%x sysino=%llx pil=%x ih_size=%x ih_lst=%x\n", 153 ino_p->ino_ino, ino_p->ino_sysino, ipil_p->ipil_pil, 154 ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 155 156 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 157 dev_info_t *dip = ih_p->ih_dip; 158 uint_t (*handler)() = ih_p->ih_handler; 159 caddr_t arg1 = ih_p->ih_handler_arg1; 160 caddr_t arg2 = ih_p->ih_handler_arg2; 161 162 if (ih_p->ih_intr_state == PX_INTR_STATE_DISABLE) { 163 DBG(DBG_INTX_INTR, px_p->px_dip, 164 "px_intx_intr: %s%d interrupt %d is disabled\n", 165 ddi_driver_name(dip), ddi_get_instance(dip), 166 ino_p->ino_ino); 167 168 continue; 169 } 170 171 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 172 "ino=%x handler=%p arg1 =%p arg2 = %p\n", 173 ino_p->ino_ino, handler, arg1, arg2); 174 175 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 176 void *, handler, caddr_t, arg1, caddr_t, arg2); 177 178 r = (*handler)(arg1, arg2); 179 180 /* 181 * Account for time used by this interrupt. Protect against 182 * conflicting writes to ih_ticks from ib_intr_dist_all() by 183 * using atomic ops. 184 */ 185 186 if (pil <= LOCK_LEVEL) 187 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 188 189 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 190 void *, handler, caddr_t, arg1, int, r); 191 192 result += r; 193 194 if (px_check_all_handlers) 195 continue; 196 if (result) 197 break; 198 } 199 200 if (result) 201 ino_p->ino_claimed |= (1 << pil); 202 203 /* Interrupt can only be cleared after all pil levels are handled */ 204 if (pil != ino_p->ino_lopil) 205 return (DDI_INTR_CLAIMED); 206 207 if (!ino_p->ino_claimed) { 208 if (px_unclaimed_intr_block) 209 return (px_spurintr(ipil_p)); 210 } 211 212 ino_p->ino_unclaimed_intrs = 0; 213 ino_p->ino_claimed = 0; 214 215 /* Clear the pending state */ 216 if (px_lib_intr_setstate(px_p->px_dip, 217 ino_p->ino_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 218 return (DDI_INTR_UNCLAIMED); 219 220 return (DDI_INTR_CLAIMED); 221 } 222 223 /* 224 * px_msiq_intr (MSI/X or PCIe MSG interrupt handler) 225 * 226 * This routine is used as wrapper around interrupt handlers installed by child 227 * device drivers. This routine invokes the driver interrupt handlers and 228 * examines the return codes. 229 * 230 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 231 * least one handler claims the interrupt then the counter is halved and the 232 * interrupt state machine is idled. If no handler claims the interrupt then 233 * the counter is incremented by one and the state machine is idled. 234 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 235 * then the interrupt state machine is not idled thus preventing any further 236 * interrupts on that ino. The state machine will only be idled again if a 237 * handler is subsequently added or removed. 238 * 239 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 240 * DDI_INTR_UNCLAIMED otherwise. 241 */ 242 uint_t 243 px_msiq_intr(caddr_t arg) 244 { 245 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 246 px_ino_t *ino_p = ipil_p->ipil_ino_p; 247 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 248 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 249 px_msiq_t *msiq_p = ino_p->ino_msiq_p; 250 dev_info_t *dip = px_p->px_dip; 251 ushort_t pil = ipil_p->ipil_pil; 252 msiq_rec_t msiq_rec, *msiq_rec_p = &msiq_rec; 253 msiqhead_t *curr_head_p; 254 msiqtail_t curr_tail_index; 255 msgcode_t msg_code; 256 px_ih_t *ih_p; 257 uint_t ret = DDI_INTR_UNCLAIMED; 258 int i, j; 259 260 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: msiq_id =%x ino=%x pil=%x " 261 "ih_size=%x ih_lst=%x\n", msiq_p->msiq_id, ino_p->ino_ino, 262 ipil_p->ipil_pil, ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 263 264 /* 265 * The px_msiq_intr() handles multiple interrupt priorities and it 266 * will set msiq->msiq_rec2process to the number of MSIQ records to 267 * process while handling the highest priority interrupt. Subsequent 268 * lower priority interrupts will just process any unprocessed MSIQ 269 * records or will just return immediately. 270 */ 271 if (msiq_p->msiq_recs2process == 0) { 272 /* Read current MSIQ tail index */ 273 px_lib_msiq_gettail(dip, msiq_p->msiq_id, &curr_tail_index); 274 msiq_p->msiq_new_head_index = msiq_p->msiq_curr_head_index; 275 276 if (curr_tail_index < msiq_p->msiq_curr_head_index) 277 curr_tail_index += msiq_state_p->msiq_rec_cnt; 278 279 msiq_p->msiq_recs2process = curr_tail_index - 280 msiq_p->msiq_curr_head_index; 281 } 282 283 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 284 "rec2process %x\n", msiq_p->msiq_curr_head_index, 285 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 286 287 /* If all MSIQ records are already processed, just return immediately */ 288 if ((msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index) 289 == msiq_p->msiq_recs2process) 290 goto intr_done; 291 292 curr_head_p = (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p + 293 (msiq_p->msiq_curr_head_index * sizeof (msiq_rec_t))); 294 295 /* 296 * Calculate the number of recs to process by taking the difference 297 * between the head and tail pointers. For all records we always 298 * verify that we have a valid record type before we do any processing. 299 * If triggered, we should always have at least one valid record. 300 */ 301 for (i = 0; i < msiq_p->msiq_recs2process; i++) { 302 msiq_rec_type_t rec_type; 303 304 /* Read next MSIQ record */ 305 px_lib_get_msiq_rec(dip, curr_head_p, msiq_rec_p); 306 307 rec_type = msiq_rec_p->msiq_rec_type; 308 309 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSIQ RECORD, " 310 "msiq_rec_type 0x%llx msiq_rec_rid 0x%llx\n", 311 rec_type, msiq_rec_p->msiq_rec_rid); 312 313 if (!rec_type) 314 goto next_rec; 315 316 /* Check MSIQ record type */ 317 switch (rec_type) { 318 case MSG_REC: 319 msg_code = msiq_rec_p->msiq_rec_data.msg.msg_code; 320 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: PCIE MSG " 321 "record, msg type 0x%x\n", msg_code); 322 break; 323 case MSI32_REC: 324 case MSI64_REC: 325 msg_code = msiq_rec_p->msiq_rec_data.msi.msi_data; 326 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSI record, " 327 "msi 0x%x\n", msg_code); 328 329 /* Clear MSI state */ 330 px_lib_msi_setstate(dip, (msinum_t)msg_code, 331 PCI_MSI_STATE_IDLE); 332 break; 333 default: 334 msg_code = 0; 335 cmn_err(CE_WARN, "%s%d: px_msiq_intr: 0x%x MSIQ " 336 "record type is not supported", 337 ddi_driver_name(dip), ddi_get_instance(dip), 338 rec_type); 339 340 goto next_rec; 341 } 342 343 /* 344 * Scan through px_ih_t linked list, searching for the 345 * right px_ih_t, matching MSIQ record data. 346 */ 347 for (j = 0, ih_p = ipil_p->ipil_ih_start; 348 ih_p && (j < ipil_p->ipil_ih_size) && 349 ((ih_p->ih_msg_code != msg_code) || 350 (ih_p->ih_rec_type != rec_type)); 351 ih_p = ih_p->ih_next, j++) 352 ; 353 354 if ((ih_p->ih_msg_code == msg_code) && 355 (ih_p->ih_rec_type == rec_type)) { 356 dev_info_t *dip = ih_p->ih_dip; 357 uint_t (*handler)() = ih_p->ih_handler; 358 caddr_t arg1 = ih_p->ih_handler_arg1; 359 caddr_t arg2 = ih_p->ih_handler_arg2; 360 361 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: ino=%x data=%x " 362 "handler=%p arg1 =%p arg2=%p\n", ino_p->ino_ino, 363 msg_code, handler, arg1, arg2); 364 365 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 366 void *, handler, caddr_t, arg1, caddr_t, arg2); 367 368 ih_p->ih_retarget_flag = B_FALSE; 369 370 /* 371 * Special case for PCIE Error Messages. 372 * The current frame work doesn't fit PCIE Err Msgs 373 * This should be fixed when PCIE MESSAGES as a whole 374 * is architected correctly. 375 */ 376 if ((rec_type == MSG_REC) && 377 ((msg_code == PCIE_MSG_CODE_ERR_COR) || 378 (msg_code == PCIE_MSG_CODE_ERR_NONFATAL) || 379 (msg_code == PCIE_MSG_CODE_ERR_FATAL))) { 380 ret = px_err_fabric_intr(px_p, msg_code, 381 msiq_rec_p->msiq_rec_rid); 382 } else 383 ret = (*handler)(arg1, arg2); 384 385 /* 386 * Account for time used by this interrupt. Protect 387 * against conflicting writes to ih_ticks from 388 * ib_intr_dist_all() by using atomic ops. 389 */ 390 391 if (pil <= LOCK_LEVEL) 392 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 393 394 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 395 void *, handler, caddr_t, arg1, int, ret); 396 397 msiq_p->msiq_new_head_index++; 398 px_lib_clr_msiq_rec(dip, curr_head_p); 399 } else { 400 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr:" 401 "No matching MSIQ record found\n"); 402 } 403 next_rec: 404 /* Get the pointer next EQ record */ 405 curr_head_p = (msiqhead_t *) 406 ((caddr_t)curr_head_p + sizeof (msiq_rec_t)); 407 408 /* Check for overflow condition */ 409 if (curr_head_p >= (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p 410 + (msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t)))) 411 curr_head_p = (msiqhead_t *)msiq_p->msiq_base_p; 412 } 413 414 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: No of MSIQ recs processed %x\n", 415 (msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index)); 416 417 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 418 "rec2process %x\n", msiq_p->msiq_curr_head_index, 419 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 420 421 /* ino_claimed used just for debugging purpose */ 422 if (ret) 423 ino_p->ino_claimed |= (1 << pil); 424 425 intr_done: 426 /* Interrupt can only be cleared after all pil levels are handled */ 427 if (pil != ino_p->ino_lopil) 428 return (DDI_INTR_CLAIMED); 429 430 if (msiq_p->msiq_new_head_index <= msiq_p->msiq_curr_head_index) { 431 if (px_unclaimed_intr_block) 432 return (px_spurintr(ipil_p)); 433 } 434 435 /* Update MSIQ head index with no of MSIQ records processed */ 436 if (msiq_p->msiq_new_head_index >= msiq_state_p->msiq_rec_cnt) 437 msiq_p->msiq_new_head_index -= msiq_state_p->msiq_rec_cnt; 438 439 msiq_p->msiq_curr_head_index = msiq_p->msiq_new_head_index; 440 px_lib_msiq_sethead(dip, msiq_p->msiq_id, msiq_p->msiq_new_head_index); 441 442 msiq_p->msiq_new_head_index = 0; 443 msiq_p->msiq_recs2process = 0; 444 ino_p->ino_claimed = 0; 445 446 /* Clear the pending state */ 447 if (px_lib_intr_setstate(dip, ino_p->ino_sysino, 448 INTR_IDLE_STATE) != DDI_SUCCESS) 449 return (DDI_INTR_UNCLAIMED); 450 451 return (DDI_INTR_CLAIMED); 452 } 453 454 dev_info_t * 455 px_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 456 { 457 dev_info_t *cdip = rdip; 458 459 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 460 ; 461 462 return (cdip); 463 } 464 465 /* ARGSUSED */ 466 int 467 px_intx_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 468 ddi_intr_handle_impl_t *hdlp, void *result) 469 { 470 px_t *px_p = DIP_TO_STATE(dip); 471 int ret = DDI_SUCCESS; 472 473 DBG(DBG_INTROPS, dip, "px_intx_ops: dip=%x rdip=%x intr_op=%x " 474 "handle=%p\n", dip, rdip, intr_op, hdlp); 475 476 switch (intr_op) { 477 case DDI_INTROP_GETCAP: 478 ret = pci_intx_get_cap(rdip, (int *)result); 479 break; 480 case DDI_INTROP_SETCAP: 481 DBG(DBG_INTROPS, dip, "px_intx_ops: SetCap is not supported\n"); 482 ret = DDI_ENOTSUP; 483 break; 484 case DDI_INTROP_ALLOC: 485 *(int *)result = hdlp->ih_scratch1; 486 break; 487 case DDI_INTROP_FREE: 488 break; 489 case DDI_INTROP_GETPRI: 490 *(int *)result = hdlp->ih_pri ? 491 hdlp->ih_pri : pci_class_to_pil(rdip); 492 break; 493 case DDI_INTROP_SETPRI: 494 break; 495 case DDI_INTROP_ADDISR: 496 ret = px_add_intx_intr(dip, rdip, hdlp); 497 break; 498 case DDI_INTROP_REMISR: 499 ret = px_rem_intx_intr(dip, rdip, hdlp); 500 break; 501 case DDI_INTROP_GETTARGET: 502 ret = px_ib_get_intr_target(px_p, hdlp->ih_vector, 503 (cpuid_t *)result); 504 break; 505 case DDI_INTROP_SETTARGET: 506 ret = DDI_ENOTSUP; 507 break; 508 case DDI_INTROP_ENABLE: 509 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 510 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_ENABLE, 0, 0); 511 break; 512 case DDI_INTROP_DISABLE: 513 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 514 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_DISABLE, 0, 0); 515 break; 516 case DDI_INTROP_SETMASK: 517 ret = pci_intx_set_mask(rdip); 518 break; 519 case DDI_INTROP_CLRMASK: 520 ret = pci_intx_clr_mask(rdip); 521 break; 522 case DDI_INTROP_GETPENDING: 523 ret = pci_intx_get_pending(rdip, (int *)result); 524 break; 525 case DDI_INTROP_NINTRS: 526 case DDI_INTROP_NAVAIL: 527 *(int *)result = i_ddi_get_intx_nintrs(rdip); 528 break; 529 default: 530 ret = DDI_ENOTSUP; 531 break; 532 } 533 534 return (ret); 535 } 536 537 /* ARGSUSED */ 538 int 539 px_msix_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 540 ddi_intr_handle_impl_t *hdlp, void *result) 541 { 542 px_t *px_p = DIP_TO_STATE(dip); 543 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 544 msiq_rec_type_t msiq_rec_type; 545 msi_type_t msi_type; 546 uint64_t msi_addr; 547 msinum_t msi_num; 548 msiqid_t msiq_id; 549 uint_t nintrs; 550 int ret = DDI_SUCCESS; 551 552 DBG(DBG_INTROPS, dip, "px_msix_ops: dip=%x rdip=%x intr_op=%x " 553 "handle=%p\n", dip, rdip, intr_op, hdlp); 554 555 /* Check for MSI64 support */ 556 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 557 msiq_rec_type = MSI64_REC; 558 msi_type = MSI64_TYPE; 559 msi_addr = msi_state_p->msi_addr64; 560 } else { 561 msiq_rec_type = MSI32_REC; 562 msi_type = MSI32_TYPE; 563 msi_addr = msi_state_p->msi_addr32; 564 } 565 566 (void) px_msi_get_msinum(px_p, hdlp->ih_dip, 567 (hdlp->ih_flags & DDI_INTR_MSIX_DUP) ? hdlp->ih_main->ih_inum : 568 hdlp->ih_inum, &msi_num); 569 570 switch (intr_op) { 571 case DDI_INTROP_GETCAP: 572 ret = pci_msi_get_cap(rdip, hdlp->ih_type, (int *)result); 573 if (ret == DDI_SUCCESS) 574 *(int *)result |= DDI_INTR_FLAG_RETARGETABLE; 575 break; 576 case DDI_INTROP_SETCAP: 577 DBG(DBG_INTROPS, dip, "px_msix_ops: SetCap is not supported\n"); 578 ret = DDI_ENOTSUP; 579 break; 580 case DDI_INTROP_ALLOC: 581 /* 582 * We need to restrict this allocation in future 583 * based on Resource Management policies. 584 */ 585 if ((ret = px_msi_alloc(px_p, rdip, hdlp->ih_type, 586 hdlp->ih_inum, hdlp->ih_scratch1, 587 (uintptr_t)hdlp->ih_scratch2, 588 (int *)result)) != DDI_SUCCESS) { 589 DBG(DBG_INTROPS, dip, "px_msix_ops: allocation " 590 "failed, rdip 0x%p type 0x%d inum 0x%x " 591 "count 0x%x\n", rdip, hdlp->ih_type, hdlp->ih_inum, 592 hdlp->ih_scratch1); 593 594 return (ret); 595 } 596 597 if ((hdlp->ih_type == DDI_INTR_TYPE_MSIX) && 598 (i_ddi_get_msix(rdip) == NULL)) { 599 ddi_intr_msix_t *msix_p; 600 601 if (msix_p = pci_msix_init(rdip)) { 602 i_ddi_set_msix(rdip, msix_p); 603 break; 604 } 605 606 DBG(DBG_INTROPS, dip, "px_msix_ops: MSI-X allocation " 607 "failed, rdip 0x%p inum 0x%x\n", rdip, 608 hdlp->ih_inum); 609 610 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 611 hdlp->ih_scratch1); 612 613 return (DDI_FAILURE); 614 } 615 616 break; 617 case DDI_INTROP_FREE: 618 (void) pci_msi_unconfigure(rdip, hdlp->ih_type, hdlp->ih_inum); 619 620 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 621 goto msi_free; 622 623 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 624 break; 625 626 if (((i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1) == 0) && 627 (i_ddi_get_msix(rdip))) { 628 pci_msix_fini(i_ddi_get_msix(rdip)); 629 i_ddi_set_msix(rdip, NULL); 630 } 631 msi_free: 632 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 633 hdlp->ih_scratch1); 634 break; 635 case DDI_INTROP_GETPRI: 636 *(int *)result = hdlp->ih_pri ? 637 hdlp->ih_pri : pci_class_to_pil(rdip); 638 break; 639 case DDI_INTROP_SETPRI: 640 break; 641 case DDI_INTROP_ADDISR: 642 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 643 msiq_rec_type, msi_num, -1, &msiq_id)) != DDI_SUCCESS) { 644 DBG(DBG_INTROPS, dip, "px_msix_ops: Add MSI handler " 645 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 646 return (ret); 647 } 648 649 DBG(DBG_INTROPS, dip, "px_msix_ops: msiq used 0x%x\n", msiq_id); 650 651 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 652 msiq_id, msi_type)) != DDI_SUCCESS) { 653 (void) px_rem_msiq_intr(dip, rdip, 654 hdlp, msiq_rec_type, msi_num, msiq_id); 655 return (ret); 656 } 657 658 if ((ret = px_lib_msi_setstate(dip, msi_num, 659 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) { 660 (void) px_rem_msiq_intr(dip, rdip, 661 hdlp, msiq_rec_type, msi_num, msiq_id); 662 return (ret); 663 } 664 665 if ((ret = px_lib_msi_setvalid(dip, msi_num, 666 PCI_MSI_VALID)) != DDI_SUCCESS) 667 return (ret); 668 669 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 670 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 671 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num); 672 673 break; 674 case DDI_INTROP_DUPVEC: 675 DBG(DBG_INTROPS, dip, "px_msix_ops: dupisr - inum: %x, " 676 "new_vector: %x\n", hdlp->ih_inum, hdlp->ih_scratch1); 677 678 ret = pci_msix_dup(hdlp->ih_dip, hdlp->ih_inum, 679 hdlp->ih_scratch1); 680 break; 681 case DDI_INTROP_REMISR: 682 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 683 &msiq_id)) != DDI_SUCCESS) 684 return (ret); 685 686 if ((ret = px_ib_update_intr_state(px_p, rdip, 687 hdlp->ih_inum, px_msiqid_to_devino(px_p, msiq_id), 688 hdlp->ih_pri, PX_INTR_STATE_DISABLE, msiq_rec_type, 689 msi_num)) != DDI_SUCCESS) 690 return (ret); 691 692 if ((ret = px_lib_msi_setvalid(dip, msi_num, 693 PCI_MSI_INVALID)) != DDI_SUCCESS) 694 return (ret); 695 696 if ((ret = px_lib_msi_setstate(dip, msi_num, 697 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) 698 return (ret); 699 700 ret = px_rem_msiq_intr(dip, rdip, 701 hdlp, msiq_rec_type, msi_num, msiq_id); 702 703 break; 704 case DDI_INTROP_GETTARGET: 705 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 706 &msiq_id)) != DDI_SUCCESS) 707 return (ret); 708 709 ret = px_ib_get_intr_target(px_p, 710 px_msiqid_to_devino(px_p, msiq_id), (cpuid_t *)result); 711 break; 712 case DDI_INTROP_SETTARGET: 713 ret = px_ib_set_msix_target(px_p, hdlp, msi_num, 714 *(cpuid_t *)result); 715 break; 716 case DDI_INTROP_ENABLE: 717 /* 718 * curr_nenables will be greater than 0 if rdip is using 719 * MSI-X and also, if it is using DUP interface. If this 720 * curr_enables is > 1, return after clearing the mask bit. 721 */ 722 if ((pci_is_msi_enabled(rdip, hdlp->ih_type) == DDI_SUCCESS) && 723 (i_ddi_intr_get_current_nenables(rdip) > 0)) { 724 return (pci_msi_clr_mask(rdip, hdlp->ih_type, 725 hdlp->ih_inum)); 726 } 727 728 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 729 730 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 731 nintrs, hdlp->ih_inum, msi_addr, 732 hdlp->ih_type == DDI_INTR_TYPE_MSIX ? msi_num : 733 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 734 return (ret); 735 736 if ((ret = pci_msi_enable_mode(rdip, 737 hdlp->ih_type)) != DDI_SUCCESS) 738 return (ret); 739 740 if ((ret = pci_msi_clr_mask(rdip, hdlp->ih_type, 741 hdlp->ih_inum)) != DDI_SUCCESS) 742 return (ret); 743 744 break; 745 case DDI_INTROP_DISABLE: 746 if ((ret = pci_msi_set_mask(rdip, hdlp->ih_type, 747 hdlp->ih_inum)) != DDI_SUCCESS) 748 return (ret); 749 750 /* 751 * curr_nenables will be greater than 1 if rdip is using 752 * MSI-X and also, if it is using DUP interface. If this 753 * curr_enables is > 1, return after setting the mask bit. 754 */ 755 if (i_ddi_intr_get_current_nenables(rdip) > 1) 756 return (DDI_SUCCESS); 757 758 if ((ret = pci_msi_disable_mode(rdip, hdlp->ih_type)) 759 != DDI_SUCCESS) 760 return (ret); 761 762 break; 763 case DDI_INTROP_BLOCKENABLE: 764 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 765 766 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 767 nintrs, hdlp->ih_inum, msi_addr, 768 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 769 return (ret); 770 771 ret = pci_msi_enable_mode(rdip, hdlp->ih_type); 772 break; 773 case DDI_INTROP_BLOCKDISABLE: 774 ret = pci_msi_disable_mode(rdip, hdlp->ih_type); 775 break; 776 case DDI_INTROP_SETMASK: 777 ret = pci_msi_set_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 778 break; 779 case DDI_INTROP_CLRMASK: 780 ret = pci_msi_clr_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 781 break; 782 case DDI_INTROP_GETPENDING: 783 ret = pci_msi_get_pending(rdip, hdlp->ih_type, 784 hdlp->ih_inum, (int *)result); 785 break; 786 case DDI_INTROP_NINTRS: 787 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 788 break; 789 case DDI_INTROP_NAVAIL: 790 /* XXX - a new interface may be needed */ 791 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 792 break; 793 case DDI_INTROP_GETPOOL: 794 if (msi_state_p->msi_pool_p == NULL) { 795 *(ddi_irm_pool_t **)result = NULL; 796 return (DDI_ENOTSUP); 797 } 798 *(ddi_irm_pool_t **)result = msi_state_p->msi_pool_p; 799 ret = DDI_SUCCESS; 800 break; 801 default: 802 ret = DDI_ENOTSUP; 803 break; 804 } 805 806 return (ret); 807 } 808 809 static struct { 810 kstat_named_t pxintr_ks_name; 811 kstat_named_t pxintr_ks_type; 812 kstat_named_t pxintr_ks_cpu; 813 kstat_named_t pxintr_ks_pil; 814 kstat_named_t pxintr_ks_time; 815 kstat_named_t pxintr_ks_ino; 816 kstat_named_t pxintr_ks_cookie; 817 kstat_named_t pxintr_ks_devpath; 818 kstat_named_t pxintr_ks_buspath; 819 } pxintr_ks_template = { 820 { "name", KSTAT_DATA_CHAR }, 821 { "type", KSTAT_DATA_CHAR }, 822 { "cpu", KSTAT_DATA_UINT64 }, 823 { "pil", KSTAT_DATA_UINT64 }, 824 { "time", KSTAT_DATA_UINT64 }, 825 { "ino", KSTAT_DATA_UINT64 }, 826 { "cookie", KSTAT_DATA_UINT64 }, 827 { "devpath", KSTAT_DATA_STRING }, 828 { "buspath", KSTAT_DATA_STRING }, 829 }; 830 831 static uint32_t pxintr_ks_instance; 832 static char ih_devpath[MAXPATHLEN]; 833 static char ih_buspath[MAXPATHLEN]; 834 kmutex_t pxintr_ks_template_lock; 835 836 int 837 px_ks_update(kstat_t *ksp, int rw) 838 { 839 px_ih_t *ih_p = ksp->ks_private; 840 int maxlen = sizeof (pxintr_ks_template.pxintr_ks_name.value.c); 841 px_ino_pil_t *ipil_p = ih_p->ih_ipil_p; 842 px_ino_t *ino_p = ipil_p->ipil_ino_p; 843 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 844 devino_t ino; 845 sysino_t sysino; 846 847 ino = ino_p->ino_ino; 848 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, &sysino) != 849 DDI_SUCCESS) { 850 cmn_err(CE_WARN, "px_ks_update: px_lib_intr_devino_to_sysino " 851 "failed"); 852 } 853 854 (void) snprintf(pxintr_ks_template.pxintr_ks_name.value.c, maxlen, 855 "%s%d", ddi_driver_name(ih_p->ih_dip), 856 ddi_get_instance(ih_p->ih_dip)); 857 858 (void) ddi_pathname(ih_p->ih_dip, ih_devpath); 859 (void) ddi_pathname(px_p->px_dip, ih_buspath); 860 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_devpath, ih_devpath); 861 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_buspath, ih_buspath); 862 863 if (ih_p->ih_intr_state == PX_INTR_STATE_ENABLE) { 864 865 switch (i_ddi_intr_get_current_type(ih_p->ih_dip)) { 866 case DDI_INTR_TYPE_MSI: 867 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 868 "msi"); 869 break; 870 case DDI_INTR_TYPE_MSIX: 871 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 872 "msix"); 873 break; 874 default: 875 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 876 "fixed"); 877 break; 878 } 879 880 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = ino_p->ino_cpuid; 881 pxintr_ks_template.pxintr_ks_pil.value.ui64 = ipil_p->ipil_pil; 882 pxintr_ks_template.pxintr_ks_time.value.ui64 = ih_p->ih_nsec + 883 (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks, 884 ino_p->ino_cpuid); 885 pxintr_ks_template.pxintr_ks_ino.value.ui64 = ino; 886 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = sysino; 887 } else { 888 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 889 "disabled"); 890 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = 0; 891 pxintr_ks_template.pxintr_ks_pil.value.ui64 = 0; 892 pxintr_ks_template.pxintr_ks_time.value.ui64 = 0; 893 pxintr_ks_template.pxintr_ks_ino.value.ui64 = 0; 894 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = 0; 895 } 896 return (0); 897 } 898 899 void 900 px_create_intr_kstats(px_ih_t *ih_p) 901 { 902 msiq_rec_type_t rec_type = ih_p->ih_rec_type; 903 904 ASSERT(ih_p->ih_ksp == NULL); 905 906 /* 907 * Create pci_intrs::: kstats for all ih types except messages, 908 * which represent unusual conditions and don't need to be tracked. 909 */ 910 if (rec_type == 0 || rec_type == MSI32_REC || rec_type == MSI64_REC) { 911 ih_p->ih_ksp = kstat_create("pci_intrs", 912 atomic_inc_32_nv(&pxintr_ks_instance), "config", 913 "interrupts", KSTAT_TYPE_NAMED, 914 sizeof (pxintr_ks_template) / sizeof (kstat_named_t), 915 KSTAT_FLAG_VIRTUAL); 916 } 917 if (ih_p->ih_ksp != NULL) { 918 ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2; 919 ih_p->ih_ksp->ks_lock = &pxintr_ks_template_lock; 920 ih_p->ih_ksp->ks_data = &pxintr_ks_template; 921 ih_p->ih_ksp->ks_private = ih_p; 922 ih_p->ih_ksp->ks_update = px_ks_update; 923 } 924 } 925 926 /* 927 * px_add_intx_intr: 928 * 929 * This function is called to register INTx and legacy hardware 930 * interrupt pins interrupts. 931 */ 932 int 933 px_add_intx_intr(dev_info_t *dip, dev_info_t *rdip, 934 ddi_intr_handle_impl_t *hdlp) 935 { 936 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 937 px_ib_t *ib_p = px_p->px_ib_p; 938 devino_t ino; 939 px_ih_t *ih_p; 940 px_ino_t *ino_p; 941 px_ino_pil_t *ipil_p, *ipil_list; 942 int32_t weight; 943 int ret = DDI_SUCCESS; 944 945 ino = hdlp->ih_vector; 946 947 DBG(DBG_A_INTX, dip, "px_add_intx_intr: rdip=%s%d ino=%x " 948 "handler=%x arg1=%x arg2=%x\n", ddi_driver_name(rdip), 949 ddi_get_instance(rdip), ino, hdlp->ih_cb_func, 950 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 951 952 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, 953 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, 0, 0); 954 955 mutex_enter(&ib_p->ib_ino_lst_mutex); 956 957 ino_p = px_ib_locate_ino(ib_p, ino); 958 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 959 960 /* Sharing ino */ 961 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 962 if (px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0)) { 963 DBG(DBG_A_INTX, dip, "px_add_intx_intr: " 964 "dup intr #%d\n", hdlp->ih_inum); 965 966 ret = DDI_FAILURE; 967 goto fail1; 968 } 969 970 /* Save mondo value in hdlp */ 971 hdlp->ih_vector = ino_p->ino_sysino; 972 973 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 974 ih_p)) != DDI_SUCCESS) 975 goto fail1; 976 977 goto ino_done; 978 } 979 980 if (hdlp->ih_pri == 0) 981 hdlp->ih_pri = pci_class_to_pil(rdip); 982 983 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 984 ino_p = ipil_p->ipil_ino_p; 985 986 /* Save mondo value in hdlp */ 987 hdlp->ih_vector = ino_p->ino_sysino; 988 989 DBG(DBG_A_INTX, dip, "px_add_intx_intr: pil=0x%x mondo=0x%x\n", 990 hdlp->ih_pri, hdlp->ih_vector); 991 992 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 993 (ddi_intr_handler_t *)px_intx_intr, (caddr_t)ipil_p, NULL); 994 995 ret = i_ddi_add_ivintr(hdlp); 996 997 /* 998 * Restore original interrupt handler 999 * and arguments in interrupt handle. 1000 */ 1001 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1002 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1003 1004 if (ret != DDI_SUCCESS) 1005 goto fail2; 1006 1007 /* Save the pil for this ino */ 1008 ipil_p->ipil_pil = hdlp->ih_pri; 1009 1010 /* Select cpu, saving it for sharing and removal */ 1011 if (ipil_list == NULL) { 1012 if (ino_p->ino_cpuid == -1) 1013 ino_p->ino_cpuid = intr_dist_cpuid(); 1014 1015 /* Enable interrupt */ 1016 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1017 } 1018 1019 ino_done: 1020 hdlp->ih_target = ino_p->ino_cpuid; 1021 1022 /* Add weight to the cpu that we are already targeting */ 1023 weight = pci_class_to_intr_weight(rdip); 1024 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1025 1026 ih_p->ih_ipil_p = ipil_p; 1027 px_create_intr_kstats(ih_p); 1028 if (ih_p->ih_ksp) 1029 kstat_install(ih_p->ih_ksp); 1030 mutex_exit(&ib_p->ib_ino_lst_mutex); 1031 1032 DBG(DBG_A_INTX, dip, "px_add_intx_intr: done! Interrupt 0x%x pil=%x\n", 1033 ino_p->ino_sysino, hdlp->ih_pri); 1034 1035 return (ret); 1036 fail2: 1037 px_ib_delete_ino_pil(ib_p, ipil_p); 1038 fail1: 1039 if (ih_p->ih_config_handle) 1040 pci_config_teardown(&ih_p->ih_config_handle); 1041 1042 mutex_exit(&ib_p->ib_ino_lst_mutex); 1043 kmem_free(ih_p, sizeof (px_ih_t)); 1044 1045 DBG(DBG_A_INTX, dip, "px_add_intx_intr: Failed! Interrupt 0x%x " 1046 "pil=%x\n", ino_p->ino_sysino, hdlp->ih_pri); 1047 1048 return (ret); 1049 } 1050 1051 /* 1052 * px_rem_intx_intr: 1053 * 1054 * This function is called to unregister INTx and legacy hardware 1055 * interrupt pins interrupts. 1056 */ 1057 int 1058 px_rem_intx_intr(dev_info_t *dip, dev_info_t *rdip, 1059 ddi_intr_handle_impl_t *hdlp) 1060 { 1061 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1062 px_ib_t *ib_p = px_p->px_ib_p; 1063 devino_t ino; 1064 cpuid_t curr_cpu; 1065 px_ino_t *ino_p; 1066 px_ino_pil_t *ipil_p; 1067 px_ih_t *ih_p; 1068 int ret = DDI_SUCCESS; 1069 1070 ino = hdlp->ih_vector; 1071 1072 DBG(DBG_R_INTX, dip, "px_rem_intx_intr: rdip=%s%d ino=%x\n", 1073 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 1074 1075 mutex_enter(&ib_p->ib_ino_lst_mutex); 1076 1077 ino_p = px_ib_locate_ino(ib_p, ino); 1078 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1079 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0); 1080 1081 /* Get the current cpu */ 1082 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1083 &curr_cpu)) != DDI_SUCCESS) 1084 goto fail; 1085 1086 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1087 goto fail; 1088 1089 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1090 1091 if (ipil_p->ipil_ih_size == 0) { 1092 hdlp->ih_vector = ino_p->ino_sysino; 1093 i_ddi_rem_ivintr(hdlp); 1094 1095 px_ib_delete_ino_pil(ib_p, ipil_p); 1096 } 1097 1098 if (ino_p->ino_ipil_size == 0) { 1099 kmem_free(ino_p, sizeof (px_ino_t)); 1100 } else { 1101 /* Re-enable interrupt only if mapping register still shared */ 1102 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1103 } 1104 1105 fail: 1106 mutex_exit(&ib_p->ib_ino_lst_mutex); 1107 return (ret); 1108 } 1109 1110 /* 1111 * px_add_msiq_intr: 1112 * 1113 * This function is called to register MSI/Xs and PCIe message interrupts. 1114 */ 1115 int 1116 px_add_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1117 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1118 msgcode_t msg_code, cpuid_t cpu_id, msiqid_t *msiq_id_p) 1119 { 1120 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1121 px_ib_t *ib_p = px_p->px_ib_p; 1122 px_msiq_state_t *msiq_state_p = &ib_p->ib_msiq_state; 1123 devino_t ino; 1124 px_ih_t *ih_p; 1125 px_ino_t *ino_p; 1126 px_ino_pil_t *ipil_p, *ipil_list; 1127 int32_t weight; 1128 int ret = DDI_SUCCESS; 1129 1130 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: rdip=%s%d handler=0x%x " 1131 "arg1=0x%x arg2=0x%x cpu=0x%x\n", ddi_driver_name(rdip), 1132 ddi_get_instance(rdip), hdlp->ih_cb_func, hdlp->ih_cb_arg1, 1133 hdlp->ih_cb_arg2, cpu_id); 1134 1135 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, hdlp->ih_cb_func, 1136 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rec_type, msg_code); 1137 1138 mutex_enter(&ib_p->ib_ino_lst_mutex); 1139 1140 ret = (cpu_id == -1) ? px_msiq_alloc(px_p, rec_type, msiq_id_p) : 1141 px_msiq_alloc_based_on_cpuid(px_p, rec_type, cpu_id, msiq_id_p); 1142 1143 if (ret != DDI_SUCCESS) { 1144 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1145 "msiq allocation failed\n"); 1146 goto fail; 1147 } 1148 1149 ino = px_msiqid_to_devino(px_p, *msiq_id_p); 1150 1151 ino_p = px_ib_locate_ino(ib_p, ino); 1152 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 1153 1154 /* Sharing ino */ 1155 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 1156 if (px_ib_intr_locate_ih(ipil_p, rdip, 1157 hdlp->ih_inum, rec_type, msg_code)) { 1158 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1159 "dup intr #%d\n", hdlp->ih_inum); 1160 1161 ret = DDI_FAILURE; 1162 goto fail1; 1163 } 1164 1165 /* Save mondo value in hdlp */ 1166 hdlp->ih_vector = ino_p->ino_sysino; 1167 1168 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 1169 ih_p)) != DDI_SUCCESS) 1170 goto fail1; 1171 1172 goto ino_done; 1173 } 1174 1175 if (hdlp->ih_pri == 0) 1176 hdlp->ih_pri = pci_class_to_pil(rdip); 1177 1178 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 1179 ino_p = ipil_p->ipil_ino_p; 1180 1181 ino_p->ino_msiq_p = msiq_state_p->msiq_p + 1182 (*msiq_id_p - msiq_state_p->msiq_1st_msiq_id); 1183 1184 /* Save mondo value in hdlp */ 1185 hdlp->ih_vector = ino_p->ino_sysino; 1186 1187 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: pil=0x%x mondo=0x%x\n", 1188 hdlp->ih_pri, hdlp->ih_vector); 1189 1190 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1191 (ddi_intr_handler_t *)px_msiq_intr, (caddr_t)ipil_p, NULL); 1192 1193 ret = i_ddi_add_ivintr(hdlp); 1194 1195 /* 1196 * Restore original interrupt handler 1197 * and arguments in interrupt handle. 1198 */ 1199 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1200 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1201 1202 if (ret != DDI_SUCCESS) 1203 goto fail2; 1204 1205 /* Save the pil for this ino */ 1206 ipil_p->ipil_pil = hdlp->ih_pri; 1207 1208 /* Select cpu, saving it for sharing and removal */ 1209 if (ipil_list == NULL) { 1210 /* Enable MSIQ */ 1211 px_lib_msiq_setstate(dip, *msiq_id_p, PCI_MSIQ_STATE_IDLE); 1212 px_lib_msiq_setvalid(dip, *msiq_id_p, PCI_MSIQ_VALID); 1213 1214 if (ino_p->ino_cpuid == -1) 1215 ino_p->ino_cpuid = intr_dist_cpuid(); 1216 1217 /* Enable interrupt */ 1218 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1219 } 1220 1221 ino_done: 1222 hdlp->ih_target = ino_p->ino_cpuid; 1223 1224 /* Add weight to the cpu that we are already targeting */ 1225 weight = pci_class_to_intr_weight(rdip); 1226 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1227 1228 ih_p->ih_ipil_p = ipil_p; 1229 px_create_intr_kstats(ih_p); 1230 if (ih_p->ih_ksp) 1231 kstat_install(ih_p->ih_ksp); 1232 mutex_exit(&ib_p->ib_ino_lst_mutex); 1233 1234 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: done! Interrupt 0x%x pil=%x\n", 1235 ino_p->ino_sysino, hdlp->ih_pri); 1236 1237 return (ret); 1238 fail2: 1239 px_ib_delete_ino_pil(ib_p, ipil_p); 1240 fail1: 1241 (void) px_msiq_free(px_p, *msiq_id_p); 1242 fail: 1243 if (ih_p->ih_config_handle) 1244 pci_config_teardown(&ih_p->ih_config_handle); 1245 1246 mutex_exit(&ib_p->ib_ino_lst_mutex); 1247 kmem_free(ih_p, sizeof (px_ih_t)); 1248 1249 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: Failed! Interrupt 0x%x pil=%x\n", 1250 ino_p->ino_sysino, hdlp->ih_pri); 1251 1252 return (ret); 1253 } 1254 1255 /* 1256 * px_rem_msiq_intr: 1257 * 1258 * This function is called to unregister MSI/Xs and PCIe message interrupts. 1259 */ 1260 int 1261 px_rem_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1262 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1263 msgcode_t msg_code, msiqid_t msiq_id) 1264 { 1265 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1266 px_ib_t *ib_p = px_p->px_ib_p; 1267 devino_t ino = px_msiqid_to_devino(px_p, msiq_id); 1268 cpuid_t curr_cpu; 1269 px_ino_t *ino_p; 1270 px_ino_pil_t *ipil_p; 1271 px_ih_t *ih_p; 1272 int ret = DDI_SUCCESS; 1273 1274 DBG(DBG_MSIQ, dip, "px_rem_msiq_intr: rdip=%s%d msiq_id=%x ino=%x\n", 1275 ddi_driver_name(rdip), ddi_get_instance(rdip), msiq_id, ino); 1276 1277 mutex_enter(&ib_p->ib_ino_lst_mutex); 1278 1279 ino_p = px_ib_locate_ino(ib_p, ino); 1280 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1281 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, rec_type, 1282 msg_code); 1283 1284 /* Get the current cpu */ 1285 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1286 &curr_cpu)) != DDI_SUCCESS) 1287 goto fail; 1288 1289 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1290 goto fail; 1291 1292 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1293 1294 if (ipil_p->ipil_ih_size == 0) { 1295 hdlp->ih_vector = ino_p->ino_sysino; 1296 i_ddi_rem_ivintr(hdlp); 1297 1298 px_ib_delete_ino_pil(ib_p, ipil_p); 1299 1300 if (ino_p->ino_ipil_size == 0) 1301 px_lib_msiq_setvalid(dip, 1302 px_devino_to_msiqid(px_p, ino), PCI_MSIQ_INVALID); 1303 } 1304 1305 (void) px_msiq_free(px_p, msiq_id); 1306 1307 if (ino_p->ino_ipil_size) { 1308 /* Re-enable interrupt only if mapping register still shared */ 1309 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1310 } 1311 1312 fail: 1313 mutex_exit(&ib_p->ib_ino_lst_mutex); 1314 return (ret); 1315 } 1316