1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX nexus interrupt handling: 28 * PX device interrupt handler wrapper 29 * PIL lookup routine 30 * PX device interrupt related initchild code 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kmem.h> 35 #include <sys/async.h> 36 #include <sys/spl.h> 37 #include <sys/sunddi.h> 38 #include <sys/fm/protocol.h> 39 #include <sys/fm/util.h> 40 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 41 #include <sys/ddi_impldefs.h> 42 #include <sys/sdt.h> 43 #include <sys/atomic.h> 44 #include "px_obj.h" 45 #include <sys/ontrap.h> 46 #include <sys/membar.h> 47 #include <sys/clock.h> 48 49 /* 50 * interrupt jabber: 51 * 52 * When an interrupt line is jabbering, every time the state machine for the 53 * associated ino is idled, a new mondo will be sent and the ino will go into 54 * the pending state again. The mondo will cause a new call to 55 * px_intr_wrapper() which normally idles the ino's state machine which would 56 * precipitate another trip round the loop. 57 * 58 * The loop can be broken by preventing the ino's state machine from being 59 * idled when an interrupt line is jabbering. See the comment at the 60 * beginning of px_intr_wrapper() explaining how the 'interrupt jabber 61 * protection' code does this. 62 */ 63 64 /*LINTLIBRARY*/ 65 66 /* 67 * If the unclaimed interrupt count has reached the limit set by 68 * pci_unclaimed_intr_max within the time limit, then all interrupts 69 * on this ino is blocked by not idling the interrupt state machine. 70 */ 71 static int 72 px_spurintr(px_ino_pil_t *ipil_p) 73 { 74 px_ino_t *ino_p = ipil_p->ipil_ino_p; 75 px_ih_t *ih_p = ipil_p->ipil_ih_start; 76 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 77 char *err_fmt_str; 78 boolean_t blocked = B_FALSE; 79 int i; 80 81 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) 82 return (DDI_INTR_CLAIMED); 83 84 if (!ino_p->ino_unclaimed_intrs) 85 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 86 87 ino_p->ino_unclaimed_intrs++; 88 89 if (ino_p->ino_unclaimed_intrs <= px_unclaimed_intr_max) 90 goto clear; 91 92 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 93 > px_spurintr_duration) { 94 ino_p->ino_unclaimed_intrs = 0; 95 goto clear; 96 } 97 err_fmt_str = "%s%d: ino 0x%x blocked"; 98 blocked = B_TRUE; 99 goto warn; 100 clear: 101 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 102 warn: 103 cmn_err(CE_WARN, err_fmt_str, NAMEINST(px_p->px_dip), ino_p->ino_ino); 104 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) 105 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 106 ih_p->ih_inum); 107 cmn_err(CE_CONT, "!\n"); 108 109 /* Clear the pending state */ 110 if (blocked == B_FALSE) { 111 if (px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 112 INTR_IDLE_STATE) != DDI_SUCCESS) 113 return (DDI_INTR_UNCLAIMED); 114 } 115 116 return (DDI_INTR_CLAIMED); 117 } 118 119 extern uint64_t intr_get_time(void); 120 121 /* 122 * px_intx_intr (INTx or legacy interrupt handler) 123 * 124 * This routine is used as wrapper around interrupt handlers installed by child 125 * device drivers. This routine invokes the driver interrupt handlers and 126 * examines the return codes. 127 * 128 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 129 * least one handler claims the interrupt then the counter is halved and the 130 * interrupt state machine is idled. If no handler claims the interrupt then 131 * the counter is incremented by one and the state machine is idled. 132 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 133 * then the interrupt state machine is not idled thus preventing any further 134 * interrupts on that ino. The state machine will only be idled again if a 135 * handler is subsequently added or removed. 136 * 137 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 138 * DDI_INTR_UNCLAIMED otherwise. 139 */ 140 uint_t 141 px_intx_intr(caddr_t arg) 142 { 143 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 144 px_ino_t *ino_p = ipil_p->ipil_ino_p; 145 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 146 px_ih_t *ih_p = ipil_p->ipil_ih_start; 147 ushort_t pil = ipil_p->ipil_pil; 148 uint_t result = 0, r = DDI_INTR_UNCLAIMED; 149 int i; 150 151 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 152 "ino=%x sysino=%llx pil=%x ih_size=%x ih_lst=%x\n", 153 ino_p->ino_ino, ino_p->ino_sysino, ipil_p->ipil_pil, 154 ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 155 156 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 157 dev_info_t *dip = ih_p->ih_dip; 158 uint_t (*handler)() = ih_p->ih_handler; 159 caddr_t arg1 = ih_p->ih_handler_arg1; 160 caddr_t arg2 = ih_p->ih_handler_arg2; 161 162 if (ih_p->ih_intr_state == PX_INTR_STATE_DISABLE) { 163 DBG(DBG_INTX_INTR, px_p->px_dip, 164 "px_intx_intr: %s%d interrupt %d is disabled\n", 165 ddi_driver_name(dip), ddi_get_instance(dip), 166 ino_p->ino_ino); 167 168 continue; 169 } 170 171 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 172 "ino=%x handler=%p arg1 =%p arg2 = %p\n", 173 ino_p->ino_ino, handler, arg1, arg2); 174 175 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 176 void *, handler, caddr_t, arg1, caddr_t, arg2); 177 178 r = (*handler)(arg1, arg2); 179 180 /* 181 * Account for time used by this interrupt. Protect against 182 * conflicting writes to ih_ticks from ib_intr_dist_all() by 183 * using atomic ops. 184 */ 185 186 if (pil <= LOCK_LEVEL) 187 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 188 189 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 190 void *, handler, caddr_t, arg1, int, r); 191 192 result += r; 193 194 if (px_check_all_handlers) 195 continue; 196 if (result) 197 break; 198 } 199 200 if (result) 201 ino_p->ino_claimed |= (1 << pil); 202 203 /* Interrupt can only be cleared after all pil levels are handled */ 204 if (pil != ino_p->ino_lopil) 205 return (DDI_INTR_CLAIMED); 206 207 if (!ino_p->ino_claimed) { 208 if (px_unclaimed_intr_block) 209 return (px_spurintr(ipil_p)); 210 } 211 212 ino_p->ino_unclaimed_intrs = 0; 213 ino_p->ino_claimed = 0; 214 215 /* Clear the pending state */ 216 if (px_lib_intr_setstate(px_p->px_dip, 217 ino_p->ino_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 218 return (DDI_INTR_UNCLAIMED); 219 220 return (DDI_INTR_CLAIMED); 221 } 222 223 /* 224 * px_msiq_intr (MSI/X or PCIe MSG interrupt handler) 225 * 226 * This routine is used as wrapper around interrupt handlers installed by child 227 * device drivers. This routine invokes the driver interrupt handlers and 228 * examines the return codes. 229 * 230 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 231 * least one handler claims the interrupt then the counter is halved and the 232 * interrupt state machine is idled. If no handler claims the interrupt then 233 * the counter is incremented by one and the state machine is idled. 234 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 235 * then the interrupt state machine is not idled thus preventing any further 236 * interrupts on that ino. The state machine will only be idled again if a 237 * handler is subsequently added or removed. 238 * 239 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 240 * DDI_INTR_UNCLAIMED otherwise. 241 */ 242 uint_t 243 px_msiq_intr(caddr_t arg) 244 { 245 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 246 px_ino_t *ino_p = ipil_p->ipil_ino_p; 247 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 248 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 249 px_msiq_t *msiq_p = ino_p->ino_msiq_p; 250 dev_info_t *dip = px_p->px_dip; 251 ushort_t pil = ipil_p->ipil_pil; 252 msiq_rec_t msiq_rec, *msiq_rec_p = &msiq_rec; 253 msiqhead_t *curr_head_p; 254 msiqtail_t curr_tail_index; 255 msgcode_t msg_code; 256 px_ih_t *ih_p; 257 uint_t ret = DDI_INTR_UNCLAIMED; 258 int i, j; 259 260 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: msiq_id =%x ino=%x pil=%x " 261 "ih_size=%x ih_lst=%x\n", msiq_p->msiq_id, ino_p->ino_ino, 262 ipil_p->ipil_pil, ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 263 264 /* 265 * The px_msiq_intr() handles multiple interrupt priorities and it 266 * will set msiq->msiq_rec2process to the number of MSIQ records to 267 * process while handling the highest priority interrupt. Subsequent 268 * lower priority interrupts will just process any unprocessed MSIQ 269 * records or will just return immediately. 270 */ 271 if (msiq_p->msiq_recs2process == 0) { 272 /* Read current MSIQ tail index */ 273 px_lib_msiq_gettail(dip, msiq_p->msiq_id, &curr_tail_index); 274 msiq_p->msiq_new_head_index = msiq_p->msiq_curr_head_index; 275 276 if (curr_tail_index < msiq_p->msiq_curr_head_index) 277 curr_tail_index += msiq_state_p->msiq_rec_cnt; 278 279 msiq_p->msiq_recs2process = curr_tail_index - 280 msiq_p->msiq_curr_head_index; 281 } 282 283 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 284 "rec2process %x\n", msiq_p->msiq_curr_head_index, 285 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 286 287 /* If all MSIQ records are already processed, just return immediately */ 288 if ((msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index) 289 == msiq_p->msiq_recs2process) 290 goto intr_done; 291 292 curr_head_p = (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p + 293 (msiq_p->msiq_curr_head_index * sizeof (msiq_rec_t))); 294 295 /* 296 * Calculate the number of recs to process by taking the difference 297 * between the head and tail pointers. For all records we always 298 * verify that we have a valid record type before we do any processing. 299 * If triggered, we should always have at least one valid record. 300 */ 301 for (i = 0; i < msiq_p->msiq_recs2process; i++) { 302 /* Read next MSIQ record */ 303 px_lib_get_msiq_rec(dip, curr_head_p, msiq_rec_p); 304 305 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSIQ RECORD, " 306 "msiq_rec_type 0x%llx msiq_rec_rid 0x%llx\n", 307 msiq_rec_p->msiq_rec_type, msiq_rec_p->msiq_rec_rid); 308 309 if (!msiq_rec_p->msiq_rec_type) 310 goto next_rec; 311 312 /* Check MSIQ record type */ 313 switch (msiq_rec_p->msiq_rec_type) { 314 case MSG_REC: 315 msg_code = msiq_rec_p->msiq_rec_data.msg.msg_code; 316 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: PCIE MSG " 317 "record, msg type 0x%x\n", msg_code); 318 break; 319 case MSI32_REC: 320 case MSI64_REC: 321 msg_code = msiq_rec_p->msiq_rec_data.msi.msi_data; 322 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSI record, " 323 "msi 0x%x\n", msg_code); 324 325 /* Clear MSI state */ 326 px_lib_msi_setstate(dip, (msinum_t)msg_code, 327 PCI_MSI_STATE_IDLE); 328 break; 329 default: 330 msg_code = 0; 331 cmn_err(CE_WARN, "%s%d: px_msiq_intr: 0x%x MSIQ " 332 "record type is not supported", 333 ddi_driver_name(dip), ddi_get_instance(dip), 334 msiq_rec_p->msiq_rec_type); 335 336 goto next_rec; 337 } 338 339 /* 340 * Scan through px_ih_t linked list, searching for the 341 * right px_ih_t, matching MSIQ record data. 342 */ 343 for (j = 0, ih_p = ipil_p->ipil_ih_start; 344 ih_p && (j < ipil_p->ipil_ih_size) && 345 ((ih_p->ih_msg_code != msg_code) || 346 (ih_p->ih_rec_type != msiq_rec_p->msiq_rec_type)); 347 ih_p = ih_p->ih_next, j++) 348 ; 349 350 if ((ih_p->ih_msg_code == msg_code) && 351 (ih_p->ih_rec_type == msiq_rec_p->msiq_rec_type)) { 352 dev_info_t *dip = ih_p->ih_dip; 353 uint_t (*handler)() = ih_p->ih_handler; 354 caddr_t arg1 = ih_p->ih_handler_arg1; 355 caddr_t arg2 = ih_p->ih_handler_arg2; 356 357 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: ino=%x data=%x " 358 "handler=%p arg1 =%p arg2=%p\n", ino_p->ino_ino, 359 msg_code, handler, arg1, arg2); 360 361 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 362 void *, handler, caddr_t, arg1, caddr_t, arg2); 363 364 /* 365 * Special case for PCIE Error Messages. 366 * The current frame work doesn't fit PCIE Err Msgs 367 * This should be fixed when PCIE MESSAGES as a whole 368 * is architected correctly. 369 */ 370 if ((msg_code == PCIE_MSG_CODE_ERR_COR) || 371 (msg_code == PCIE_MSG_CODE_ERR_NONFATAL) || 372 (msg_code == PCIE_MSG_CODE_ERR_FATAL)) { 373 ret = px_err_fabric_intr(px_p, msg_code, 374 msiq_rec_p->msiq_rec_rid); 375 } else 376 ret = (*handler)(arg1, arg2); 377 378 /* 379 * Account for time used by this interrupt. Protect 380 * against conflicting writes to ih_ticks from 381 * ib_intr_dist_all() by using atomic ops. 382 */ 383 384 if (pil <= LOCK_LEVEL) 385 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 386 387 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 388 void *, handler, caddr_t, arg1, int, ret); 389 390 msiq_p->msiq_new_head_index++; 391 px_lib_clr_msiq_rec(dip, curr_head_p); 392 } else { 393 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr:" 394 "No matching MSIQ record found\n"); 395 } 396 next_rec: 397 /* Get the pointer next EQ record */ 398 curr_head_p = (msiqhead_t *) 399 ((caddr_t)curr_head_p + sizeof (msiq_rec_t)); 400 401 /* Check for overflow condition */ 402 if (curr_head_p >= (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p 403 + (msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t)))) 404 curr_head_p = (msiqhead_t *)msiq_p->msiq_base_p; 405 } 406 407 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: No of MSIQ recs processed %x\n", 408 (msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index)); 409 410 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 411 "rec2process %x\n", msiq_p->msiq_curr_head_index, 412 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 413 414 /* ino_claimed used just for debugging purpose */ 415 if (ret) 416 ino_p->ino_claimed |= (1 << pil); 417 418 intr_done: 419 /* Interrupt can only be cleared after all pil levels are handled */ 420 if (pil != ino_p->ino_lopil) 421 return (DDI_INTR_CLAIMED); 422 423 if (msiq_p->msiq_new_head_index <= msiq_p->msiq_curr_head_index) { 424 if (px_unclaimed_intr_block) 425 return (px_spurintr(ipil_p)); 426 } 427 428 /* Update MSIQ head index with no of MSIQ records processed */ 429 if (msiq_p->msiq_new_head_index >= msiq_state_p->msiq_rec_cnt) 430 msiq_p->msiq_new_head_index -= msiq_state_p->msiq_rec_cnt; 431 432 msiq_p->msiq_curr_head_index = msiq_p->msiq_new_head_index; 433 px_lib_msiq_sethead(dip, msiq_p->msiq_id, msiq_p->msiq_new_head_index); 434 435 msiq_p->msiq_new_head_index = 0; 436 msiq_p->msiq_recs2process = 0; 437 ino_p->ino_claimed = 0; 438 439 /* Clear the pending state */ 440 if (px_lib_intr_setstate(dip, ino_p->ino_sysino, 441 INTR_IDLE_STATE) != DDI_SUCCESS) 442 return (DDI_INTR_UNCLAIMED); 443 444 return (DDI_INTR_CLAIMED); 445 } 446 447 dev_info_t * 448 px_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 449 { 450 dev_info_t *cdip = rdip; 451 452 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 453 ; 454 455 return (cdip); 456 } 457 458 /* ARGSUSED */ 459 int 460 px_intx_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 461 ddi_intr_handle_impl_t *hdlp, void *result) 462 { 463 px_t *px_p = DIP_TO_STATE(dip); 464 int ret = DDI_SUCCESS; 465 466 DBG(DBG_INTROPS, dip, "px_intx_ops: dip=%x rdip=%x intr_op=%x " 467 "handle=%p\n", dip, rdip, intr_op, hdlp); 468 469 switch (intr_op) { 470 case DDI_INTROP_GETCAP: 471 ret = pci_intx_get_cap(rdip, (int *)result); 472 break; 473 case DDI_INTROP_SETCAP: 474 DBG(DBG_INTROPS, dip, "px_intx_ops: SetCap is not supported\n"); 475 ret = DDI_ENOTSUP; 476 break; 477 case DDI_INTROP_ALLOC: 478 *(int *)result = hdlp->ih_scratch1; 479 break; 480 case DDI_INTROP_FREE: 481 break; 482 case DDI_INTROP_GETPRI: 483 *(int *)result = hdlp->ih_pri ? 484 hdlp->ih_pri : pci_class_to_pil(rdip); 485 break; 486 case DDI_INTROP_SETPRI: 487 break; 488 case DDI_INTROP_ADDISR: 489 ret = px_add_intx_intr(dip, rdip, hdlp); 490 break; 491 case DDI_INTROP_REMISR: 492 ret = px_rem_intx_intr(dip, rdip, hdlp); 493 break; 494 case DDI_INTROP_ENABLE: 495 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 496 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_ENABLE, 0, 0); 497 break; 498 case DDI_INTROP_DISABLE: 499 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 500 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_DISABLE, 0, 0); 501 break; 502 case DDI_INTROP_SETMASK: 503 ret = pci_intx_set_mask(rdip); 504 break; 505 case DDI_INTROP_CLRMASK: 506 ret = pci_intx_clr_mask(rdip); 507 break; 508 case DDI_INTROP_GETPENDING: 509 ret = pci_intx_get_pending(rdip, (int *)result); 510 break; 511 case DDI_INTROP_NINTRS: 512 case DDI_INTROP_NAVAIL: 513 *(int *)result = i_ddi_get_intx_nintrs(rdip); 514 break; 515 default: 516 ret = DDI_ENOTSUP; 517 break; 518 } 519 520 return (ret); 521 } 522 523 /* ARGSUSED */ 524 int 525 px_msix_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 526 ddi_intr_handle_impl_t *hdlp, void *result) 527 { 528 px_t *px_p = DIP_TO_STATE(dip); 529 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 530 msiq_rec_type_t msiq_rec_type; 531 msi_type_t msi_type; 532 uint64_t msi_addr; 533 msinum_t msi_num; 534 msiqid_t msiq_id; 535 uint_t nintrs; 536 int i, ret = DDI_SUCCESS; 537 538 DBG(DBG_INTROPS, dip, "px_msix_ops: dip=%x rdip=%x intr_op=%x " 539 "handle=%p\n", dip, rdip, intr_op, hdlp); 540 541 /* Check for MSI64 support */ 542 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 543 msiq_rec_type = MSI64_REC; 544 msi_type = MSI64_TYPE; 545 msi_addr = msi_state_p->msi_addr64; 546 } else { 547 msiq_rec_type = MSI32_REC; 548 msi_type = MSI32_TYPE; 549 msi_addr = msi_state_p->msi_addr32; 550 } 551 552 switch (intr_op) { 553 case DDI_INTROP_GETCAP: 554 ret = pci_msi_get_cap(rdip, hdlp->ih_type, (int *)result); 555 break; 556 case DDI_INTROP_SETCAP: 557 DBG(DBG_INTROPS, dip, "px_msix_ops: SetCap is not supported\n"); 558 ret = DDI_ENOTSUP; 559 break; 560 case DDI_INTROP_ALLOC: 561 /* 562 * We need to restrict this allocation in future 563 * based on Resource Management policies. 564 */ 565 if ((ret = px_msi_alloc(px_p, rdip, hdlp->ih_type, 566 hdlp->ih_inum, hdlp->ih_scratch1, 567 (uintptr_t)hdlp->ih_scratch2, 568 (int *)result)) != DDI_SUCCESS) { 569 DBG(DBG_INTROPS, dip, "px_msix_ops: allocation " 570 "failed, rdip 0x%p type 0x%d inum 0x%x " 571 "count 0x%x\n", rdip, hdlp->ih_type, hdlp->ih_inum, 572 hdlp->ih_scratch1); 573 574 return (ret); 575 } 576 577 if ((hdlp->ih_type == DDI_INTR_TYPE_MSIX) && 578 (i_ddi_get_msix(rdip) == NULL)) { 579 ddi_intr_msix_t *msix_p; 580 581 if (msix_p = pci_msix_init(rdip)) { 582 i_ddi_set_msix(rdip, msix_p); 583 break; 584 } 585 586 DBG(DBG_INTROPS, dip, "px_msix_ops: MSI-X allocation " 587 "failed, rdip 0x%p inum 0x%x\n", rdip, 588 hdlp->ih_inum); 589 590 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 591 hdlp->ih_scratch1); 592 593 return (DDI_FAILURE); 594 } 595 596 break; 597 case DDI_INTROP_FREE: 598 (void) pci_msi_disable_mode(rdip, hdlp->ih_type, NULL); 599 (void) pci_msi_unconfigure(rdip, hdlp->ih_type, hdlp->ih_inum); 600 601 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 602 goto msi_free; 603 604 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 605 break; 606 607 if (((i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1) == 0) && 608 (i_ddi_get_msix(rdip))) { 609 pci_msix_fini(i_ddi_get_msix(rdip)); 610 i_ddi_set_msix(rdip, NULL); 611 } 612 msi_free: 613 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 614 hdlp->ih_scratch1); 615 break; 616 case DDI_INTROP_GETPRI: 617 *(int *)result = hdlp->ih_pri ? 618 hdlp->ih_pri : pci_class_to_pil(rdip); 619 break; 620 case DDI_INTROP_SETPRI: 621 break; 622 case DDI_INTROP_ADDISR: 623 if ((ret = px_msi_get_msinum(px_p, hdlp->ih_dip, 624 hdlp->ih_inum, &msi_num)) != DDI_SUCCESS) 625 return (ret); 626 627 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 628 msiq_rec_type, msi_num, &msiq_id)) != DDI_SUCCESS) { 629 DBG(DBG_INTROPS, dip, "px_msix_ops: Add MSI handler " 630 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 631 return (ret); 632 } 633 634 DBG(DBG_INTROPS, dip, "px_msix_ops: msiq used 0x%x\n", msiq_id); 635 636 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 637 msiq_id, msi_type)) != DDI_SUCCESS) { 638 (void) px_rem_msiq_intr(dip, rdip, 639 hdlp, msiq_rec_type, msi_num, msiq_id); 640 return (ret); 641 } 642 643 if ((ret = px_lib_msi_setstate(dip, msi_num, 644 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) { 645 (void) px_rem_msiq_intr(dip, rdip, 646 hdlp, msiq_rec_type, msi_num, msiq_id); 647 return (ret); 648 } 649 650 hdlp->ih_vector = msi_num; 651 break; 652 case DDI_INTROP_DUPVEC: 653 DBG(DBG_INTROPS, dip, "px_msix_ops: dupisr - inum: %x, " 654 "new_vector: %x\n", hdlp->ih_inum, hdlp->ih_scratch1); 655 656 ret = pci_msix_dup(hdlp->ih_dip, hdlp->ih_inum, 657 hdlp->ih_scratch1); 658 break; 659 case DDI_INTROP_REMISR: 660 msi_num = hdlp->ih_vector; 661 662 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 663 &msiq_id)) != DDI_SUCCESS) 664 return (ret); 665 666 if ((ret = px_lib_msi_setstate(dip, msi_num, 667 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) 668 return (ret); 669 670 ret = px_rem_msiq_intr(dip, rdip, 671 hdlp, msiq_rec_type, msi_num, msiq_id); 672 673 hdlp->ih_vector = 0; 674 break; 675 case DDI_INTROP_ENABLE: 676 msi_num = hdlp->ih_vector; 677 678 if ((ret = px_lib_msi_setvalid(dip, msi_num, 679 PCI_MSI_VALID)) != DDI_SUCCESS) 680 return (ret); 681 682 if ((pci_is_msi_enabled(rdip, hdlp->ih_type) != DDI_SUCCESS) || 683 (hdlp->ih_type == DDI_INTR_TYPE_MSIX)) { 684 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 685 686 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 687 nintrs, hdlp->ih_inum, msi_addr, 688 hdlp->ih_type == DDI_INTR_TYPE_MSIX ? 689 msi_num : msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 690 return (ret); 691 692 if ((ret = pci_msi_enable_mode(rdip, hdlp->ih_type)) 693 != DDI_SUCCESS) 694 return (ret); 695 } 696 697 if ((ret = pci_msi_clr_mask(rdip, hdlp->ih_type, 698 hdlp->ih_inum)) != DDI_SUCCESS) 699 return (ret); 700 701 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 702 break; 703 704 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 705 &msiq_id)) != DDI_SUCCESS) 706 return (ret); 707 708 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 709 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 710 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num); 711 712 break; 713 case DDI_INTROP_DISABLE: 714 msi_num = hdlp->ih_vector; 715 716 if ((ret = pci_msi_set_mask(rdip, hdlp->ih_type, 717 hdlp->ih_inum)) != DDI_SUCCESS) 718 return (ret); 719 720 if ((ret = px_lib_msi_setvalid(dip, msi_num, 721 PCI_MSI_INVALID)) != DDI_SUCCESS) 722 return (ret); 723 724 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 725 break; 726 727 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 728 &msiq_id)) != DDI_SUCCESS) 729 return (ret); 730 731 ret = px_ib_update_intr_state(px_p, rdip, 732 hdlp->ih_inum, px_msiqid_to_devino(px_p, msiq_id), 733 hdlp->ih_pri, PX_INTR_STATE_DISABLE, msiq_rec_type, 734 msi_num); 735 736 break; 737 case DDI_INTROP_BLOCKENABLE: 738 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 739 msi_num = hdlp->ih_vector; 740 741 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 742 nintrs, hdlp->ih_inum, msi_addr, 743 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 744 return (ret); 745 746 for (i = 0; i < nintrs; i++, msi_num++) { 747 if ((ret = px_lib_msi_setvalid(dip, msi_num, 748 PCI_MSI_VALID)) != DDI_SUCCESS) 749 return (ret); 750 751 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 752 &msiq_id)) != DDI_SUCCESS) 753 return (ret); 754 755 if ((ret = px_ib_update_intr_state(px_p, rdip, 756 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 757 msiq_id), hdlp->ih_pri, PX_INTR_STATE_ENABLE, 758 msiq_rec_type, msi_num)) != DDI_SUCCESS) 759 return (ret); 760 } 761 762 ret = pci_msi_enable_mode(rdip, hdlp->ih_type); 763 break; 764 case DDI_INTROP_BLOCKDISABLE: 765 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 766 msi_num = hdlp->ih_vector; 767 768 if ((ret = pci_msi_disable_mode(rdip, hdlp->ih_type, 769 hdlp->ih_cap & DDI_INTR_FLAG_BLOCK)) != DDI_SUCCESS) 770 return (ret); 771 772 for (i = 0; i < nintrs; i++, msi_num++) { 773 if ((ret = px_lib_msi_setvalid(dip, msi_num, 774 PCI_MSI_INVALID)) != DDI_SUCCESS) 775 return (ret); 776 777 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 778 &msiq_id)) != DDI_SUCCESS) 779 return (ret); 780 781 if ((ret = px_ib_update_intr_state(px_p, rdip, 782 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 783 msiq_id), hdlp->ih_pri, PX_INTR_STATE_DISABLE, 784 msiq_rec_type, msi_num)) != DDI_SUCCESS) 785 return (ret); 786 } 787 788 break; 789 case DDI_INTROP_SETMASK: 790 ret = pci_msi_set_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 791 break; 792 case DDI_INTROP_CLRMASK: 793 ret = pci_msi_clr_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 794 break; 795 case DDI_INTROP_GETPENDING: 796 ret = pci_msi_get_pending(rdip, hdlp->ih_type, 797 hdlp->ih_inum, (int *)result); 798 break; 799 case DDI_INTROP_NINTRS: 800 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 801 break; 802 case DDI_INTROP_NAVAIL: 803 /* XXX - a new interface may be needed */ 804 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 805 break; 806 case DDI_INTROP_GETPOOL: 807 if (msi_state_p->msi_pool_p == NULL) { 808 *(ddi_irm_pool_t **)result = NULL; 809 return (DDI_ENOTSUP); 810 } 811 *(ddi_irm_pool_t **)result = msi_state_p->msi_pool_p; 812 ret = DDI_SUCCESS; 813 break; 814 default: 815 ret = DDI_ENOTSUP; 816 break; 817 } 818 819 return (ret); 820 } 821 822 static struct { 823 kstat_named_t pxintr_ks_name; 824 kstat_named_t pxintr_ks_type; 825 kstat_named_t pxintr_ks_cpu; 826 kstat_named_t pxintr_ks_pil; 827 kstat_named_t pxintr_ks_time; 828 kstat_named_t pxintr_ks_ino; 829 kstat_named_t pxintr_ks_cookie; 830 kstat_named_t pxintr_ks_devpath; 831 kstat_named_t pxintr_ks_buspath; 832 } pxintr_ks_template = { 833 { "name", KSTAT_DATA_CHAR }, 834 { "type", KSTAT_DATA_CHAR }, 835 { "cpu", KSTAT_DATA_UINT64 }, 836 { "pil", KSTAT_DATA_UINT64 }, 837 { "time", KSTAT_DATA_UINT64 }, 838 { "ino", KSTAT_DATA_UINT64 }, 839 { "cookie", KSTAT_DATA_UINT64 }, 840 { "devpath", KSTAT_DATA_STRING }, 841 { "buspath", KSTAT_DATA_STRING }, 842 }; 843 844 static uint32_t pxintr_ks_instance; 845 static char ih_devpath[MAXPATHLEN]; 846 static char ih_buspath[MAXPATHLEN]; 847 kmutex_t pxintr_ks_template_lock; 848 849 int 850 px_ks_update(kstat_t *ksp, int rw) 851 { 852 px_ih_t *ih_p = ksp->ks_private; 853 int maxlen = sizeof (pxintr_ks_template.pxintr_ks_name.value.c); 854 px_ino_pil_t *ipil_p = ih_p->ih_ipil_p; 855 px_ino_t *ino_p = ipil_p->ipil_ino_p; 856 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 857 devino_t ino; 858 sysino_t sysino; 859 860 ino = ino_p->ino_ino; 861 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, &sysino) != 862 DDI_SUCCESS) { 863 cmn_err(CE_WARN, "px_ks_update: px_lib_intr_devino_to_sysino " 864 "failed"); 865 } 866 867 (void) snprintf(pxintr_ks_template.pxintr_ks_name.value.c, maxlen, 868 "%s%d", ddi_driver_name(ih_p->ih_dip), 869 ddi_get_instance(ih_p->ih_dip)); 870 871 (void) ddi_pathname(ih_p->ih_dip, ih_devpath); 872 (void) ddi_pathname(px_p->px_dip, ih_buspath); 873 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_devpath, ih_devpath); 874 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_buspath, ih_buspath); 875 876 if (ih_p->ih_intr_state == PX_INTR_STATE_ENABLE) { 877 878 switch (i_ddi_intr_get_current_type(ih_p->ih_dip)) { 879 case DDI_INTR_TYPE_MSI: 880 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 881 "msi"); 882 break; 883 case DDI_INTR_TYPE_MSIX: 884 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 885 "msix"); 886 break; 887 default: 888 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 889 "fixed"); 890 break; 891 } 892 893 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = ino_p->ino_cpuid; 894 pxintr_ks_template.pxintr_ks_pil.value.ui64 = ipil_p->ipil_pil; 895 pxintr_ks_template.pxintr_ks_time.value.ui64 = ih_p->ih_nsec + 896 (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks, 897 ino_p->ino_cpuid); 898 pxintr_ks_template.pxintr_ks_ino.value.ui64 = ino; 899 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = sysino; 900 } else { 901 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 902 "disabled"); 903 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = 0; 904 pxintr_ks_template.pxintr_ks_pil.value.ui64 = 0; 905 pxintr_ks_template.pxintr_ks_time.value.ui64 = 0; 906 pxintr_ks_template.pxintr_ks_ino.value.ui64 = 0; 907 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = 0; 908 } 909 return (0); 910 } 911 912 void 913 px_create_intr_kstats(px_ih_t *ih_p) 914 { 915 msiq_rec_type_t rec_type = ih_p->ih_rec_type; 916 917 ASSERT(ih_p->ih_ksp == NULL); 918 919 /* 920 * Create pci_intrs::: kstats for all ih types except messages, 921 * which represent unusual conditions and don't need to be tracked. 922 */ 923 if (rec_type == 0 || rec_type == MSI32_REC || rec_type == MSI64_REC) { 924 ih_p->ih_ksp = kstat_create("pci_intrs", 925 atomic_inc_32_nv(&pxintr_ks_instance), "config", 926 "interrupts", KSTAT_TYPE_NAMED, 927 sizeof (pxintr_ks_template) / sizeof (kstat_named_t), 928 KSTAT_FLAG_VIRTUAL); 929 } 930 if (ih_p->ih_ksp != NULL) { 931 ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2; 932 ih_p->ih_ksp->ks_lock = &pxintr_ks_template_lock; 933 ih_p->ih_ksp->ks_data = &pxintr_ks_template; 934 ih_p->ih_ksp->ks_private = ih_p; 935 ih_p->ih_ksp->ks_update = px_ks_update; 936 } 937 } 938 939 /* 940 * px_add_intx_intr: 941 * 942 * This function is called to register INTx and legacy hardware 943 * interrupt pins interrupts. 944 */ 945 int 946 px_add_intx_intr(dev_info_t *dip, dev_info_t *rdip, 947 ddi_intr_handle_impl_t *hdlp) 948 { 949 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 950 px_ib_t *ib_p = px_p->px_ib_p; 951 devino_t ino; 952 px_ih_t *ih_p; 953 px_ino_t *ino_p; 954 px_ino_pil_t *ipil_p, *ipil_list; 955 int32_t weight; 956 int ret = DDI_SUCCESS; 957 958 ino = hdlp->ih_vector; 959 960 DBG(DBG_A_INTX, dip, "px_add_intx_intr: rdip=%s%d ino=%x " 961 "handler=%x arg1=%x arg2=%x\n", ddi_driver_name(rdip), 962 ddi_get_instance(rdip), ino, hdlp->ih_cb_func, 963 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 964 965 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, 966 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, 0, 0); 967 968 mutex_enter(&ib_p->ib_ino_lst_mutex); 969 970 ino_p = px_ib_locate_ino(ib_p, ino); 971 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 972 973 /* Sharing ino */ 974 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 975 if (px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0)) { 976 DBG(DBG_A_INTX, dip, "px_add_intx_intr: " 977 "dup intr #%d\n", hdlp->ih_inum); 978 979 ret = DDI_FAILURE; 980 goto fail1; 981 } 982 983 /* Save mondo value in hdlp */ 984 hdlp->ih_vector = ino_p->ino_sysino; 985 986 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 987 ih_p)) != DDI_SUCCESS) 988 goto fail1; 989 990 goto ino_done; 991 } 992 993 if (hdlp->ih_pri == 0) 994 hdlp->ih_pri = pci_class_to_pil(rdip); 995 996 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 997 ino_p = ipil_p->ipil_ino_p; 998 999 /* Save mondo value in hdlp */ 1000 hdlp->ih_vector = ino_p->ino_sysino; 1001 1002 DBG(DBG_A_INTX, dip, "px_add_intx_intr: pil=0x%x mondo=0x%x\n", 1003 hdlp->ih_pri, hdlp->ih_vector); 1004 1005 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1006 (ddi_intr_handler_t *)px_intx_intr, (caddr_t)ipil_p, NULL); 1007 1008 ret = i_ddi_add_ivintr(hdlp); 1009 1010 /* 1011 * Restore original interrupt handler 1012 * and arguments in interrupt handle. 1013 */ 1014 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1015 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1016 1017 if (ret != DDI_SUCCESS) 1018 goto fail2; 1019 1020 /* Save the pil for this ino */ 1021 ipil_p->ipil_pil = hdlp->ih_pri; 1022 1023 /* Select cpu, saving it for sharing and removal */ 1024 if (ipil_list == NULL) { 1025 ino_p->ino_cpuid = intr_dist_cpuid(); 1026 1027 /* Enable interrupt */ 1028 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1029 } 1030 1031 ino_done: 1032 /* Add weight to the cpu that we are already targeting */ 1033 weight = pci_class_to_intr_weight(rdip); 1034 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1035 1036 ih_p->ih_ipil_p = ipil_p; 1037 px_create_intr_kstats(ih_p); 1038 if (ih_p->ih_ksp) 1039 kstat_install(ih_p->ih_ksp); 1040 mutex_exit(&ib_p->ib_ino_lst_mutex); 1041 1042 DBG(DBG_A_INTX, dip, "px_add_intx_intr: done! Interrupt 0x%x pil=%x\n", 1043 ino_p->ino_sysino, hdlp->ih_pri); 1044 1045 return (ret); 1046 fail2: 1047 px_ib_delete_ino_pil(ib_p, ipil_p); 1048 fail1: 1049 if (ih_p->ih_config_handle) 1050 pci_config_teardown(&ih_p->ih_config_handle); 1051 1052 mutex_exit(&ib_p->ib_ino_lst_mutex); 1053 kmem_free(ih_p, sizeof (px_ih_t)); 1054 1055 DBG(DBG_A_INTX, dip, "px_add_intx_intr: Failed! Interrupt 0x%x " 1056 "pil=%x\n", ino_p->ino_sysino, hdlp->ih_pri); 1057 1058 return (ret); 1059 } 1060 1061 /* 1062 * px_rem_intx_intr: 1063 * 1064 * This function is called to unregister INTx and legacy hardware 1065 * interrupt pins interrupts. 1066 */ 1067 int 1068 px_rem_intx_intr(dev_info_t *dip, dev_info_t *rdip, 1069 ddi_intr_handle_impl_t *hdlp) 1070 { 1071 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1072 px_ib_t *ib_p = px_p->px_ib_p; 1073 devino_t ino; 1074 cpuid_t curr_cpu; 1075 px_ino_t *ino_p; 1076 px_ino_pil_t *ipil_p; 1077 px_ih_t *ih_p; 1078 int ret = DDI_SUCCESS; 1079 1080 ino = hdlp->ih_vector; 1081 1082 DBG(DBG_R_INTX, dip, "px_rem_intx_intr: rdip=%s%d ino=%x\n", 1083 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 1084 1085 mutex_enter(&ib_p->ib_ino_lst_mutex); 1086 1087 ino_p = px_ib_locate_ino(ib_p, ino); 1088 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1089 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0); 1090 1091 /* Get the current cpu */ 1092 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1093 &curr_cpu)) != DDI_SUCCESS) 1094 goto fail; 1095 1096 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1097 goto fail; 1098 1099 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1100 1101 if (ipil_p->ipil_ih_size == 0) { 1102 hdlp->ih_vector = ino_p->ino_sysino; 1103 i_ddi_rem_ivintr(hdlp); 1104 1105 px_ib_delete_ino_pil(ib_p, ipil_p); 1106 } 1107 1108 if (ino_p->ino_ipil_size == 0) { 1109 kmem_free(ino_p, sizeof (px_ino_t)); 1110 } else { 1111 /* Re-enable interrupt only if mapping register still shared */ 1112 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1113 } 1114 1115 fail: 1116 mutex_exit(&ib_p->ib_ino_lst_mutex); 1117 return (ret); 1118 } 1119 1120 /* 1121 * px_add_msiq_intr: 1122 * 1123 * This function is called to register MSI/Xs and PCIe message interrupts. 1124 */ 1125 int 1126 px_add_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1127 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1128 msgcode_t msg_code, msiqid_t *msiq_id_p) 1129 { 1130 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1131 px_ib_t *ib_p = px_p->px_ib_p; 1132 px_msiq_state_t *msiq_state_p = &ib_p->ib_msiq_state; 1133 devino_t ino; 1134 px_ih_t *ih_p; 1135 px_ino_t *ino_p; 1136 px_ino_pil_t *ipil_p, *ipil_list; 1137 int32_t weight; 1138 int ret = DDI_SUCCESS; 1139 1140 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: rdip=%s%d handler=%x " 1141 "arg1=%x arg2=%x\n", ddi_driver_name(rdip), ddi_get_instance(rdip), 1142 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 1143 1144 if ((ret = px_msiq_alloc(px_p, rec_type, msiq_id_p)) != DDI_SUCCESS) { 1145 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1146 "msiq allocation failed\n"); 1147 return (ret); 1148 } 1149 1150 ino = px_msiqid_to_devino(px_p, *msiq_id_p); 1151 1152 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, hdlp->ih_cb_func, 1153 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rec_type, msg_code); 1154 1155 mutex_enter(&ib_p->ib_ino_lst_mutex); 1156 1157 ino_p = px_ib_locate_ino(ib_p, ino); 1158 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 1159 1160 /* Sharing ino */ 1161 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 1162 if (px_ib_intr_locate_ih(ipil_p, rdip, 1163 hdlp->ih_inum, rec_type, msg_code)) { 1164 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1165 "dup intr #%d\n", hdlp->ih_inum); 1166 1167 ret = DDI_FAILURE; 1168 goto fail1; 1169 } 1170 1171 /* Save mondo value in hdlp */ 1172 hdlp->ih_vector = ino_p->ino_sysino; 1173 1174 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 1175 ih_p)) != DDI_SUCCESS) 1176 goto fail1; 1177 1178 goto ino_done; 1179 } 1180 1181 if (hdlp->ih_pri == 0) 1182 hdlp->ih_pri = pci_class_to_pil(rdip); 1183 1184 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 1185 ino_p = ipil_p->ipil_ino_p; 1186 1187 ino_p->ino_msiq_p = msiq_state_p->msiq_p + 1188 (*msiq_id_p - msiq_state_p->msiq_1st_msiq_id); 1189 1190 /* Save mondo value in hdlp */ 1191 hdlp->ih_vector = ino_p->ino_sysino; 1192 1193 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: pil=0x%x mondo=0x%x\n", 1194 hdlp->ih_pri, hdlp->ih_vector); 1195 1196 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1197 (ddi_intr_handler_t *)px_msiq_intr, (caddr_t)ipil_p, NULL); 1198 1199 ret = i_ddi_add_ivintr(hdlp); 1200 1201 /* 1202 * Restore original interrupt handler 1203 * and arguments in interrupt handle. 1204 */ 1205 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1206 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1207 1208 if (ret != DDI_SUCCESS) 1209 goto fail2; 1210 1211 /* Save the pil for this ino */ 1212 ipil_p->ipil_pil = hdlp->ih_pri; 1213 1214 /* Select cpu, saving it for sharing and removal */ 1215 if (ipil_list == NULL) { 1216 ino_p->ino_cpuid = intr_dist_cpuid(); 1217 1218 /* Enable MSIQ */ 1219 px_lib_msiq_setstate(dip, *msiq_id_p, PCI_MSIQ_STATE_IDLE); 1220 px_lib_msiq_setvalid(dip, *msiq_id_p, PCI_MSIQ_VALID); 1221 1222 /* Enable interrupt */ 1223 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1224 } 1225 1226 ino_done: 1227 /* Add weight to the cpu that we are already targeting */ 1228 weight = pci_class_to_intr_weight(rdip); 1229 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1230 1231 ih_p->ih_ipil_p = ipil_p; 1232 px_create_intr_kstats(ih_p); 1233 if (ih_p->ih_ksp) 1234 kstat_install(ih_p->ih_ksp); 1235 mutex_exit(&ib_p->ib_ino_lst_mutex); 1236 1237 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: done! Interrupt 0x%x pil=%x\n", 1238 ino_p->ino_sysino, hdlp->ih_pri); 1239 1240 return (ret); 1241 fail2: 1242 px_ib_delete_ino_pil(ib_p, ipil_p); 1243 fail1: 1244 if (ih_p->ih_config_handle) 1245 pci_config_teardown(&ih_p->ih_config_handle); 1246 1247 mutex_exit(&ib_p->ib_ino_lst_mutex); 1248 kmem_free(ih_p, sizeof (px_ih_t)); 1249 1250 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: Failed! Interrupt 0x%x pil=%x\n", 1251 ino_p->ino_sysino, hdlp->ih_pri); 1252 1253 return (ret); 1254 } 1255 1256 /* 1257 * px_rem_msiq_intr: 1258 * 1259 * This function is called to unregister MSI/Xs and PCIe message interrupts. 1260 */ 1261 int 1262 px_rem_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1263 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1264 msgcode_t msg_code, msiqid_t msiq_id) 1265 { 1266 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1267 px_ib_t *ib_p = px_p->px_ib_p; 1268 devino_t ino = px_msiqid_to_devino(px_p, msiq_id); 1269 cpuid_t curr_cpu; 1270 px_ino_t *ino_p; 1271 px_ino_pil_t *ipil_p; 1272 px_ih_t *ih_p; 1273 int ret = DDI_SUCCESS; 1274 1275 DBG(DBG_MSIQ, dip, "px_rem_msiq_intr: rdip=%s%d msiq_id=%x ino=%x\n", 1276 ddi_driver_name(rdip), ddi_get_instance(rdip), msiq_id, ino); 1277 1278 mutex_enter(&ib_p->ib_ino_lst_mutex); 1279 1280 ino_p = px_ib_locate_ino(ib_p, ino); 1281 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1282 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, rec_type, 1283 msg_code); 1284 1285 /* Get the current cpu */ 1286 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1287 &curr_cpu)) != DDI_SUCCESS) 1288 goto fail; 1289 1290 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1291 goto fail; 1292 1293 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1294 1295 if (ipil_p->ipil_ih_size == 0) { 1296 hdlp->ih_vector = ino_p->ino_sysino; 1297 i_ddi_rem_ivintr(hdlp); 1298 1299 px_ib_delete_ino_pil(ib_p, ipil_p); 1300 1301 if (ino_p->ino_ipil_size == 0) 1302 px_lib_msiq_setvalid(dip, 1303 px_devino_to_msiqid(px_p, ino), PCI_MSIQ_INVALID); 1304 1305 (void) px_msiq_free(px_p, msiq_id); 1306 } 1307 1308 if (ino_p->ino_ipil_size == 0) { 1309 kmem_free(ino_p, sizeof (px_ino_t)); 1310 } else { 1311 /* Re-enable interrupt only if mapping register still shared */ 1312 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1313 } 1314 1315 fail: 1316 mutex_exit(&ib_p->ib_ino_lst_mutex); 1317 return (ret); 1318 } 1319