1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX nexus interrupt handling: 28 * PX device interrupt handler wrapper 29 * PIL lookup routine 30 * PX device interrupt related initchild code 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kmem.h> 35 #include <sys/async.h> 36 #include <sys/spl.h> 37 #include <sys/sunddi.h> 38 #include <sys/fm/protocol.h> 39 #include <sys/fm/util.h> 40 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 41 #include <sys/ddi_impldefs.h> 42 #include <sys/sdt.h> 43 #include <sys/atomic.h> 44 #include "px_obj.h" 45 #include <sys/ontrap.h> 46 #include <sys/membar.h> 47 #include <sys/clock.h> 48 49 /* 50 * interrupt jabber: 51 * 52 * When an interrupt line is jabbering, every time the state machine for the 53 * associated ino is idled, a new mondo will be sent and the ino will go into 54 * the pending state again. The mondo will cause a new call to 55 * px_intr_wrapper() which normally idles the ino's state machine which would 56 * precipitate another trip round the loop. 57 * 58 * The loop can be broken by preventing the ino's state machine from being 59 * idled when an interrupt line is jabbering. See the comment at the 60 * beginning of px_intr_wrapper() explaining how the 'interrupt jabber 61 * protection' code does this. 62 */ 63 64 /*LINTLIBRARY*/ 65 66 /* 67 * If the unclaimed interrupt count has reached the limit set by 68 * pci_unclaimed_intr_max within the time limit, then all interrupts 69 * on this ino is blocked by not idling the interrupt state machine. 70 */ 71 static int 72 px_spurintr(px_ino_pil_t *ipil_p) 73 { 74 px_ino_t *ino_p = ipil_p->ipil_ino_p; 75 px_ih_t *ih_p = ipil_p->ipil_ih_start; 76 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 77 char *err_fmt_str; 78 boolean_t blocked = B_FALSE; 79 int i; 80 81 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) 82 return (DDI_INTR_CLAIMED); 83 84 if (!ino_p->ino_unclaimed_intrs) 85 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 86 87 ino_p->ino_unclaimed_intrs++; 88 89 if (ino_p->ino_unclaimed_intrs <= px_unclaimed_intr_max) 90 goto clear; 91 92 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 93 > px_spurintr_duration) { 94 ino_p->ino_unclaimed_intrs = 0; 95 goto clear; 96 } 97 err_fmt_str = "%s%d: ino 0x%x blocked"; 98 blocked = B_TRUE; 99 goto warn; 100 clear: 101 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 102 warn: 103 cmn_err(CE_WARN, err_fmt_str, NAMEINST(px_p->px_dip), ino_p->ino_ino); 104 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) 105 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 106 ih_p->ih_inum); 107 cmn_err(CE_CONT, "!\n"); 108 109 /* Clear the pending state */ 110 if (blocked == B_FALSE) { 111 if (px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 112 INTR_IDLE_STATE) != DDI_SUCCESS) 113 return (DDI_INTR_UNCLAIMED); 114 } 115 116 return (DDI_INTR_CLAIMED); 117 } 118 119 extern uint64_t intr_get_time(void); 120 121 /* 122 * px_intx_intr (INTx or legacy interrupt handler) 123 * 124 * This routine is used as wrapper around interrupt handlers installed by child 125 * device drivers. This routine invokes the driver interrupt handlers and 126 * examines the return codes. 127 * 128 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 129 * least one handler claims the interrupt then the counter is halved and the 130 * interrupt state machine is idled. If no handler claims the interrupt then 131 * the counter is incremented by one and the state machine is idled. 132 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 133 * then the interrupt state machine is not idled thus preventing any further 134 * interrupts on that ino. The state machine will only be idled again if a 135 * handler is subsequently added or removed. 136 * 137 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 138 * DDI_INTR_UNCLAIMED otherwise. 139 */ 140 uint_t 141 px_intx_intr(caddr_t arg) 142 { 143 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 144 px_ino_t *ino_p = ipil_p->ipil_ino_p; 145 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 146 px_ih_t *ih_p = ipil_p->ipil_ih_start; 147 ushort_t pil = ipil_p->ipil_pil; 148 uint_t result = 0, r = DDI_INTR_UNCLAIMED; 149 int i; 150 151 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 152 "ino=%x sysino=%llx pil=%x ih_size=%x ih_lst=%x\n", 153 ino_p->ino_ino, ino_p->ino_sysino, ipil_p->ipil_pil, 154 ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 155 156 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 157 dev_info_t *dip = ih_p->ih_dip; 158 uint_t (*handler)() = ih_p->ih_handler; 159 caddr_t arg1 = ih_p->ih_handler_arg1; 160 caddr_t arg2 = ih_p->ih_handler_arg2; 161 162 if (ih_p->ih_intr_state == PX_INTR_STATE_DISABLE) { 163 DBG(DBG_INTX_INTR, px_p->px_dip, 164 "px_intx_intr: %s%d interrupt %d is disabled\n", 165 ddi_driver_name(dip), ddi_get_instance(dip), 166 ino_p->ino_ino); 167 168 continue; 169 } 170 171 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 172 "ino=%x handler=%p arg1 =%p arg2 = %p\n", 173 ino_p->ino_ino, handler, arg1, arg2); 174 175 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 176 void *, handler, caddr_t, arg1, caddr_t, arg2); 177 178 r = (*handler)(arg1, arg2); 179 180 /* 181 * Account for time used by this interrupt. Protect against 182 * conflicting writes to ih_ticks from ib_intr_dist_all() by 183 * using atomic ops. 184 */ 185 186 if (pil <= LOCK_LEVEL) 187 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 188 189 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 190 void *, handler, caddr_t, arg1, int, r); 191 192 result += r; 193 194 if (px_check_all_handlers) 195 continue; 196 if (result) 197 break; 198 } 199 200 if (result) 201 ino_p->ino_claimed |= (1 << pil); 202 203 /* Interrupt can only be cleared after all pil levels are handled */ 204 if (pil != ino_p->ino_lopil) 205 return (DDI_INTR_CLAIMED); 206 207 if (!ino_p->ino_claimed) { 208 if (px_unclaimed_intr_block) 209 return (px_spurintr(ipil_p)); 210 } 211 212 ino_p->ino_unclaimed_intrs = 0; 213 ino_p->ino_claimed = 0; 214 215 /* Clear the pending state */ 216 if (px_lib_intr_setstate(px_p->px_dip, 217 ino_p->ino_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 218 return (DDI_INTR_UNCLAIMED); 219 220 return (DDI_INTR_CLAIMED); 221 } 222 223 /* 224 * px_msiq_intr (MSI/X or PCIe MSG interrupt handler) 225 * 226 * This routine is used as wrapper around interrupt handlers installed by child 227 * device drivers. This routine invokes the driver interrupt handlers and 228 * examines the return codes. 229 * 230 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 231 * least one handler claims the interrupt then the counter is halved and the 232 * interrupt state machine is idled. If no handler claims the interrupt then 233 * the counter is incremented by one and the state machine is idled. 234 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 235 * then the interrupt state machine is not idled thus preventing any further 236 * interrupts on that ino. The state machine will only be idled again if a 237 * handler is subsequently added or removed. 238 * 239 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 240 * DDI_INTR_UNCLAIMED otherwise. 241 */ 242 uint_t 243 px_msiq_intr(caddr_t arg) 244 { 245 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 246 px_ino_t *ino_p = ipil_p->ipil_ino_p; 247 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 248 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 249 px_msiq_t *msiq_p = ino_p->ino_msiq_p; 250 dev_info_t *dip = px_p->px_dip; 251 ushort_t pil = ipil_p->ipil_pil; 252 msiq_rec_t msiq_rec, *msiq_rec_p = &msiq_rec; 253 msiqhead_t *curr_head_p; 254 msiqtail_t curr_tail_index; 255 msgcode_t msg_code; 256 px_ih_t *ih_p; 257 uint_t ret = DDI_INTR_UNCLAIMED; 258 int i, j; 259 260 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: msiq_id =%x ino=%x pil=%x " 261 "ih_size=%x ih_lst=%x\n", msiq_p->msiq_id, ino_p->ino_ino, 262 ipil_p->ipil_pil, ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 263 264 /* 265 * The px_msiq_intr() handles multiple interrupt priorities and it 266 * will set msiq->msiq_rec2process to the number of MSIQ records to 267 * process while handling the highest priority interrupt. Subsequent 268 * lower priority interrupts will just process any unprocessed MSIQ 269 * records or will just return immediately. 270 */ 271 if (msiq_p->msiq_recs2process == 0) { 272 /* Read current MSIQ tail index */ 273 px_lib_msiq_gettail(dip, msiq_p->msiq_id, &curr_tail_index); 274 msiq_p->msiq_new_head_index = msiq_p->msiq_curr_head_index; 275 276 if (curr_tail_index < msiq_p->msiq_curr_head_index) 277 curr_tail_index += msiq_state_p->msiq_rec_cnt; 278 279 msiq_p->msiq_recs2process = curr_tail_index - 280 msiq_p->msiq_curr_head_index; 281 } 282 283 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 284 "rec2process %x\n", msiq_p->msiq_curr_head_index, 285 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 286 287 /* If all MSIQ records are already processed, just return immediately */ 288 if ((msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index) 289 == msiq_p->msiq_recs2process) 290 goto intr_done; 291 292 curr_head_p = (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p + 293 (msiq_p->msiq_curr_head_index * sizeof (msiq_rec_t))); 294 295 /* 296 * Calculate the number of recs to process by taking the difference 297 * between the head and tail pointers. For all records we always 298 * verify that we have a valid record type before we do any processing. 299 * If triggered, we should always have at least one valid record. 300 */ 301 for (i = 0; i < msiq_p->msiq_recs2process; i++) { 302 /* Read next MSIQ record */ 303 px_lib_get_msiq_rec(dip, curr_head_p, msiq_rec_p); 304 305 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSIQ RECORD, " 306 "msiq_rec_type 0x%llx msiq_rec_rid 0x%llx\n", 307 msiq_rec_p->msiq_rec_type, msiq_rec_p->msiq_rec_rid); 308 309 if (!msiq_rec_p->msiq_rec_type) 310 goto next_rec; 311 312 /* Check MSIQ record type */ 313 switch (msiq_rec_p->msiq_rec_type) { 314 case MSG_REC: 315 msg_code = msiq_rec_p->msiq_rec_data.msg.msg_code; 316 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: PCIE MSG " 317 "record, msg type 0x%x\n", msg_code); 318 break; 319 case MSI32_REC: 320 case MSI64_REC: 321 msg_code = msiq_rec_p->msiq_rec_data.msi.msi_data; 322 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSI record, " 323 "msi 0x%x\n", msg_code); 324 325 /* Clear MSI state */ 326 px_lib_msi_setstate(dip, (msinum_t)msg_code, 327 PCI_MSI_STATE_IDLE); 328 break; 329 default: 330 msg_code = 0; 331 cmn_err(CE_WARN, "%s%d: px_msiq_intr: 0x%x MSIQ " 332 "record type is not supported", 333 ddi_driver_name(dip), ddi_get_instance(dip), 334 msiq_rec_p->msiq_rec_type); 335 336 goto next_rec; 337 } 338 339 /* 340 * Scan through px_ih_t linked list, searching for the 341 * right px_ih_t, matching MSIQ record data. 342 */ 343 for (j = 0, ih_p = ipil_p->ipil_ih_start; 344 ih_p && (j < ipil_p->ipil_ih_size) && 345 ((ih_p->ih_msg_code != msg_code) || 346 (ih_p->ih_rec_type != msiq_rec_p->msiq_rec_type)); 347 ih_p = ih_p->ih_next, j++) 348 ; 349 350 if ((ih_p->ih_msg_code == msg_code) && 351 (ih_p->ih_rec_type == msiq_rec_p->msiq_rec_type)) { 352 dev_info_t *dip = ih_p->ih_dip; 353 uint_t (*handler)() = ih_p->ih_handler; 354 caddr_t arg1 = ih_p->ih_handler_arg1; 355 caddr_t arg2 = ih_p->ih_handler_arg2; 356 357 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: ino=%x data=%x " 358 "handler=%p arg1 =%p arg2=%p\n", ino_p->ino_ino, 359 msg_code, handler, arg1, arg2); 360 361 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 362 void *, handler, caddr_t, arg1, caddr_t, arg2); 363 364 /* 365 * Special case for PCIE Error Messages. 366 * The current frame work doesn't fit PCIE Err Msgs 367 * This should be fixed when PCIE MESSAGES as a whole 368 * is architected correctly. 369 */ 370 if ((msg_code == PCIE_MSG_CODE_ERR_COR) || 371 (msg_code == PCIE_MSG_CODE_ERR_NONFATAL) || 372 (msg_code == PCIE_MSG_CODE_ERR_FATAL)) { 373 ret = px_err_fabric_intr(px_p, msg_code, 374 msiq_rec_p->msiq_rec_rid); 375 } else 376 ret = (*handler)(arg1, arg2); 377 378 /* 379 * Account for time used by this interrupt. Protect 380 * against conflicting writes to ih_ticks from 381 * ib_intr_dist_all() by using atomic ops. 382 */ 383 384 if (pil <= LOCK_LEVEL) 385 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 386 387 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 388 void *, handler, caddr_t, arg1, int, ret); 389 390 msiq_p->msiq_new_head_index++; 391 px_lib_clr_msiq_rec(dip, curr_head_p); 392 } else { 393 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr:" 394 "No matching MSIQ record found\n"); 395 } 396 next_rec: 397 /* Get the pointer next EQ record */ 398 curr_head_p = (msiqhead_t *) 399 ((caddr_t)curr_head_p + sizeof (msiq_rec_t)); 400 401 /* Check for overflow condition */ 402 if (curr_head_p >= (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p 403 + (msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t)))) 404 curr_head_p = (msiqhead_t *)msiq_p->msiq_base_p; 405 } 406 407 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: No of MSIQ recs processed %x\n", 408 (msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index)); 409 410 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 411 "rec2process %x\n", msiq_p->msiq_curr_head_index, 412 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 413 414 /* ino_claimed used just for debugging purpose */ 415 if (ret) 416 ino_p->ino_claimed |= (1 << pil); 417 418 intr_done: 419 /* Interrupt can only be cleared after all pil levels are handled */ 420 if (pil != ino_p->ino_lopil) 421 return (DDI_INTR_CLAIMED); 422 423 if (msiq_p->msiq_new_head_index <= msiq_p->msiq_curr_head_index) { 424 if (px_unclaimed_intr_block) 425 return (px_spurintr(ipil_p)); 426 } 427 428 /* Update MSIQ head index with no of MSIQ records processed */ 429 if (msiq_p->msiq_new_head_index >= msiq_state_p->msiq_rec_cnt) 430 msiq_p->msiq_new_head_index -= msiq_state_p->msiq_rec_cnt; 431 432 msiq_p->msiq_curr_head_index = msiq_p->msiq_new_head_index; 433 px_lib_msiq_sethead(dip, msiq_p->msiq_id, msiq_p->msiq_new_head_index); 434 435 msiq_p->msiq_new_head_index = 0; 436 msiq_p->msiq_recs2process = 0; 437 ino_p->ino_claimed = 0; 438 439 /* Clear the pending state */ 440 if (px_lib_intr_setstate(dip, ino_p->ino_sysino, 441 INTR_IDLE_STATE) != DDI_SUCCESS) 442 return (DDI_INTR_UNCLAIMED); 443 444 return (DDI_INTR_CLAIMED); 445 } 446 447 dev_info_t * 448 px_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 449 { 450 dev_info_t *cdip = rdip; 451 452 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 453 ; 454 455 return (cdip); 456 } 457 458 /* ARGSUSED */ 459 int 460 px_intx_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 461 ddi_intr_handle_impl_t *hdlp, void *result) 462 { 463 px_t *px_p = DIP_TO_STATE(dip); 464 int ret = DDI_SUCCESS; 465 466 DBG(DBG_INTROPS, dip, "px_intx_ops: dip=%x rdip=%x intr_op=%x " 467 "handle=%p\n", dip, rdip, intr_op, hdlp); 468 469 switch (intr_op) { 470 case DDI_INTROP_GETCAP: 471 ret = pci_intx_get_cap(rdip, (int *)result); 472 break; 473 case DDI_INTROP_SETCAP: 474 DBG(DBG_INTROPS, dip, "px_intx_ops: SetCap is not supported\n"); 475 ret = DDI_ENOTSUP; 476 break; 477 case DDI_INTROP_ALLOC: 478 *(int *)result = hdlp->ih_scratch1; 479 break; 480 case DDI_INTROP_FREE: 481 break; 482 case DDI_INTROP_GETPRI: 483 *(int *)result = hdlp->ih_pri ? 484 hdlp->ih_pri : pci_class_to_pil(rdip); 485 break; 486 case DDI_INTROP_SETPRI: 487 break; 488 case DDI_INTROP_ADDISR: 489 ret = px_add_intx_intr(dip, rdip, hdlp); 490 break; 491 case DDI_INTROP_REMISR: 492 ret = px_rem_intx_intr(dip, rdip, hdlp); 493 break; 494 case DDI_INTROP_ENABLE: 495 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 496 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_ENABLE, 0, 0); 497 break; 498 case DDI_INTROP_DISABLE: 499 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 500 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_DISABLE, 0, 0); 501 break; 502 case DDI_INTROP_SETMASK: 503 ret = pci_intx_set_mask(rdip); 504 break; 505 case DDI_INTROP_CLRMASK: 506 ret = pci_intx_clr_mask(rdip); 507 break; 508 case DDI_INTROP_GETPENDING: 509 ret = pci_intx_get_pending(rdip, (int *)result); 510 break; 511 case DDI_INTROP_NINTRS: 512 case DDI_INTROP_NAVAIL: 513 *(int *)result = i_ddi_get_intx_nintrs(rdip); 514 break; 515 default: 516 ret = DDI_ENOTSUP; 517 break; 518 } 519 520 return (ret); 521 } 522 523 /* ARGSUSED */ 524 int 525 px_msix_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 526 ddi_intr_handle_impl_t *hdlp, void *result) 527 { 528 px_t *px_p = DIP_TO_STATE(dip); 529 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 530 msiq_rec_type_t msiq_rec_type; 531 msi_type_t msi_type; 532 uint64_t msi_addr; 533 msinum_t msi_num; 534 msiqid_t msiq_id; 535 uint_t nintrs; 536 int i, ret = DDI_SUCCESS; 537 538 DBG(DBG_INTROPS, dip, "px_msix_ops: dip=%x rdip=%x intr_op=%x " 539 "handle=%p\n", dip, rdip, intr_op, hdlp); 540 541 /* Check for MSI64 support */ 542 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 543 msiq_rec_type = MSI64_REC; 544 msi_type = MSI64_TYPE; 545 msi_addr = msi_state_p->msi_addr64; 546 } else { 547 msiq_rec_type = MSI32_REC; 548 msi_type = MSI32_TYPE; 549 msi_addr = msi_state_p->msi_addr32; 550 } 551 552 switch (intr_op) { 553 case DDI_INTROP_GETCAP: 554 ret = pci_msi_get_cap(rdip, hdlp->ih_type, (int *)result); 555 break; 556 case DDI_INTROP_SETCAP: 557 DBG(DBG_INTROPS, dip, "px_msix_ops: SetCap is not supported\n"); 558 ret = DDI_ENOTSUP; 559 break; 560 case DDI_INTROP_ALLOC: 561 /* 562 * We need to restrict this allocation in future 563 * based on Resource Management policies. 564 */ 565 if ((ret = px_msi_alloc(px_p, rdip, hdlp->ih_type, 566 hdlp->ih_inum, hdlp->ih_scratch1, 567 (uintptr_t)hdlp->ih_scratch2, 568 (int *)result)) != DDI_SUCCESS) { 569 DBG(DBG_INTROPS, dip, "px_msix_ops: allocation " 570 "failed, rdip 0x%p type 0x%d inum 0x%x " 571 "count 0x%x\n", rdip, hdlp->ih_type, hdlp->ih_inum, 572 hdlp->ih_scratch1); 573 574 return (ret); 575 } 576 577 if ((hdlp->ih_type == DDI_INTR_TYPE_MSIX) && 578 (i_ddi_get_msix(rdip) == NULL)) { 579 ddi_intr_msix_t *msix_p; 580 581 if (msix_p = pci_msix_init(rdip)) { 582 i_ddi_set_msix(rdip, msix_p); 583 break; 584 } 585 586 DBG(DBG_INTROPS, dip, "px_msix_ops: MSI-X allocation " 587 "failed, rdip 0x%p inum 0x%x\n", rdip, 588 hdlp->ih_inum); 589 590 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 591 hdlp->ih_scratch1); 592 593 return (DDI_FAILURE); 594 } 595 596 break; 597 case DDI_INTROP_FREE: 598 (void) pci_msi_unconfigure(rdip, hdlp->ih_type, hdlp->ih_inum); 599 600 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 601 goto msi_free; 602 603 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 604 break; 605 606 if (((i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1) == 0) && 607 (i_ddi_get_msix(rdip))) { 608 pci_msix_fini(i_ddi_get_msix(rdip)); 609 i_ddi_set_msix(rdip, NULL); 610 } 611 msi_free: 612 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 613 hdlp->ih_scratch1); 614 break; 615 case DDI_INTROP_GETPRI: 616 *(int *)result = hdlp->ih_pri ? 617 hdlp->ih_pri : pci_class_to_pil(rdip); 618 break; 619 case DDI_INTROP_SETPRI: 620 break; 621 case DDI_INTROP_ADDISR: 622 if ((ret = px_msi_get_msinum(px_p, hdlp->ih_dip, 623 hdlp->ih_inum, &msi_num)) != DDI_SUCCESS) 624 return (ret); 625 626 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 627 msiq_rec_type, msi_num, &msiq_id)) != DDI_SUCCESS) { 628 DBG(DBG_INTROPS, dip, "px_msix_ops: Add MSI handler " 629 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 630 return (ret); 631 } 632 633 DBG(DBG_INTROPS, dip, "px_msix_ops: msiq used 0x%x\n", msiq_id); 634 635 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 636 msiq_id, msi_type)) != DDI_SUCCESS) { 637 (void) px_rem_msiq_intr(dip, rdip, 638 hdlp, msiq_rec_type, msi_num, msiq_id); 639 return (ret); 640 } 641 642 if ((ret = px_lib_msi_setstate(dip, msi_num, 643 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) { 644 (void) px_rem_msiq_intr(dip, rdip, 645 hdlp, msiq_rec_type, msi_num, msiq_id); 646 return (ret); 647 } 648 649 hdlp->ih_vector = msi_num; 650 break; 651 case DDI_INTROP_DUPVEC: 652 DBG(DBG_INTROPS, dip, "px_msix_ops: dupisr - inum: %x, " 653 "new_vector: %x\n", hdlp->ih_inum, hdlp->ih_scratch1); 654 655 ret = pci_msix_dup(hdlp->ih_dip, hdlp->ih_inum, 656 hdlp->ih_scratch1); 657 break; 658 case DDI_INTROP_REMISR: 659 msi_num = hdlp->ih_vector; 660 661 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 662 &msiq_id)) != DDI_SUCCESS) 663 return (ret); 664 665 if ((ret = px_lib_msi_setstate(dip, msi_num, 666 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) 667 return (ret); 668 669 ret = px_rem_msiq_intr(dip, rdip, 670 hdlp, msiq_rec_type, msi_num, msiq_id); 671 672 hdlp->ih_vector = 0; 673 break; 674 case DDI_INTROP_ENABLE: 675 msi_num = hdlp->ih_vector; 676 677 if ((ret = px_lib_msi_setvalid(dip, msi_num, 678 PCI_MSI_VALID)) != DDI_SUCCESS) 679 return (ret); 680 681 if ((pci_is_msi_enabled(rdip, hdlp->ih_type) != DDI_SUCCESS) || 682 (hdlp->ih_type == DDI_INTR_TYPE_MSIX)) { 683 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 684 685 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 686 nintrs, hdlp->ih_inum, msi_addr, 687 hdlp->ih_type == DDI_INTR_TYPE_MSIX ? 688 msi_num : msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 689 return (ret); 690 691 if ((ret = pci_msi_enable_mode(rdip, hdlp->ih_type)) 692 != DDI_SUCCESS) 693 return (ret); 694 } 695 696 if ((ret = pci_msi_clr_mask(rdip, hdlp->ih_type, 697 hdlp->ih_inum)) != DDI_SUCCESS) 698 return (ret); 699 700 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 701 break; 702 703 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 704 &msiq_id)) != DDI_SUCCESS) 705 return (ret); 706 707 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 708 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 709 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num); 710 711 break; 712 case DDI_INTROP_DISABLE: 713 msi_num = hdlp->ih_vector; 714 715 if ((ret = pci_msi_disable_mode(rdip, hdlp->ih_type, 716 hdlp->ih_cap & DDI_INTR_FLAG_BLOCK)) != DDI_SUCCESS) 717 return (ret); 718 719 if ((ret = pci_msi_set_mask(rdip, hdlp->ih_type, 720 hdlp->ih_inum)) != DDI_SUCCESS) 721 return (ret); 722 723 if ((ret = px_lib_msi_setvalid(dip, msi_num, 724 PCI_MSI_INVALID)) != DDI_SUCCESS) 725 return (ret); 726 727 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 728 break; 729 730 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 731 &msiq_id)) != DDI_SUCCESS) 732 return (ret); 733 734 ret = px_ib_update_intr_state(px_p, rdip, 735 hdlp->ih_inum, px_msiqid_to_devino(px_p, msiq_id), 736 hdlp->ih_pri, PX_INTR_STATE_DISABLE, msiq_rec_type, 737 msi_num); 738 739 break; 740 case DDI_INTROP_BLOCKENABLE: 741 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 742 msi_num = hdlp->ih_vector; 743 744 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 745 nintrs, hdlp->ih_inum, msi_addr, 746 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 747 return (ret); 748 749 for (i = 0; i < nintrs; i++, msi_num++) { 750 if ((ret = px_lib_msi_setvalid(dip, msi_num, 751 PCI_MSI_VALID)) != DDI_SUCCESS) 752 return (ret); 753 754 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 755 &msiq_id)) != DDI_SUCCESS) 756 return (ret); 757 758 if ((ret = px_ib_update_intr_state(px_p, rdip, 759 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 760 msiq_id), hdlp->ih_pri, PX_INTR_STATE_ENABLE, 761 msiq_rec_type, msi_num)) != DDI_SUCCESS) 762 return (ret); 763 } 764 765 ret = pci_msi_enable_mode(rdip, hdlp->ih_type); 766 break; 767 case DDI_INTROP_BLOCKDISABLE: 768 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 769 msi_num = hdlp->ih_vector; 770 771 if ((ret = pci_msi_disable_mode(rdip, hdlp->ih_type, 772 hdlp->ih_cap & DDI_INTR_FLAG_BLOCK)) != DDI_SUCCESS) 773 return (ret); 774 775 for (i = 0; i < nintrs; i++, msi_num++) { 776 if ((ret = px_lib_msi_setvalid(dip, msi_num, 777 PCI_MSI_INVALID)) != DDI_SUCCESS) 778 return (ret); 779 780 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 781 &msiq_id)) != DDI_SUCCESS) 782 return (ret); 783 784 if ((ret = px_ib_update_intr_state(px_p, rdip, 785 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 786 msiq_id), hdlp->ih_pri, PX_INTR_STATE_DISABLE, 787 msiq_rec_type, msi_num)) != DDI_SUCCESS) 788 return (ret); 789 } 790 791 break; 792 case DDI_INTROP_SETMASK: 793 ret = pci_msi_set_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 794 break; 795 case DDI_INTROP_CLRMASK: 796 ret = pci_msi_clr_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 797 break; 798 case DDI_INTROP_GETPENDING: 799 ret = pci_msi_get_pending(rdip, hdlp->ih_type, 800 hdlp->ih_inum, (int *)result); 801 break; 802 case DDI_INTROP_NINTRS: 803 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 804 break; 805 case DDI_INTROP_NAVAIL: 806 /* XXX - a new interface may be needed */ 807 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 808 break; 809 case DDI_INTROP_GETPOOL: 810 if (msi_state_p->msi_pool_p == NULL) { 811 *(ddi_irm_pool_t **)result = NULL; 812 return (DDI_ENOTSUP); 813 } 814 *(ddi_irm_pool_t **)result = msi_state_p->msi_pool_p; 815 ret = DDI_SUCCESS; 816 break; 817 default: 818 ret = DDI_ENOTSUP; 819 break; 820 } 821 822 return (ret); 823 } 824 825 static struct { 826 kstat_named_t pxintr_ks_name; 827 kstat_named_t pxintr_ks_type; 828 kstat_named_t pxintr_ks_cpu; 829 kstat_named_t pxintr_ks_pil; 830 kstat_named_t pxintr_ks_time; 831 kstat_named_t pxintr_ks_ino; 832 kstat_named_t pxintr_ks_cookie; 833 kstat_named_t pxintr_ks_devpath; 834 kstat_named_t pxintr_ks_buspath; 835 } pxintr_ks_template = { 836 { "name", KSTAT_DATA_CHAR }, 837 { "type", KSTAT_DATA_CHAR }, 838 { "cpu", KSTAT_DATA_UINT64 }, 839 { "pil", KSTAT_DATA_UINT64 }, 840 { "time", KSTAT_DATA_UINT64 }, 841 { "ino", KSTAT_DATA_UINT64 }, 842 { "cookie", KSTAT_DATA_UINT64 }, 843 { "devpath", KSTAT_DATA_STRING }, 844 { "buspath", KSTAT_DATA_STRING }, 845 }; 846 847 static uint32_t pxintr_ks_instance; 848 static char ih_devpath[MAXPATHLEN]; 849 static char ih_buspath[MAXPATHLEN]; 850 kmutex_t pxintr_ks_template_lock; 851 852 int 853 px_ks_update(kstat_t *ksp, int rw) 854 { 855 px_ih_t *ih_p = ksp->ks_private; 856 int maxlen = sizeof (pxintr_ks_template.pxintr_ks_name.value.c); 857 px_ino_pil_t *ipil_p = ih_p->ih_ipil_p; 858 px_ino_t *ino_p = ipil_p->ipil_ino_p; 859 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 860 devino_t ino; 861 sysino_t sysino; 862 863 ino = ino_p->ino_ino; 864 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, &sysino) != 865 DDI_SUCCESS) { 866 cmn_err(CE_WARN, "px_ks_update: px_lib_intr_devino_to_sysino " 867 "failed"); 868 } 869 870 (void) snprintf(pxintr_ks_template.pxintr_ks_name.value.c, maxlen, 871 "%s%d", ddi_driver_name(ih_p->ih_dip), 872 ddi_get_instance(ih_p->ih_dip)); 873 874 (void) ddi_pathname(ih_p->ih_dip, ih_devpath); 875 (void) ddi_pathname(px_p->px_dip, ih_buspath); 876 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_devpath, ih_devpath); 877 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_buspath, ih_buspath); 878 879 if (ih_p->ih_intr_state == PX_INTR_STATE_ENABLE) { 880 881 switch (i_ddi_intr_get_current_type(ih_p->ih_dip)) { 882 case DDI_INTR_TYPE_MSI: 883 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 884 "msi"); 885 break; 886 case DDI_INTR_TYPE_MSIX: 887 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 888 "msix"); 889 break; 890 default: 891 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 892 "fixed"); 893 break; 894 } 895 896 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = ino_p->ino_cpuid; 897 pxintr_ks_template.pxintr_ks_pil.value.ui64 = ipil_p->ipil_pil; 898 pxintr_ks_template.pxintr_ks_time.value.ui64 = ih_p->ih_nsec + 899 (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks, 900 ino_p->ino_cpuid); 901 pxintr_ks_template.pxintr_ks_ino.value.ui64 = ino; 902 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = sysino; 903 } else { 904 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 905 "disabled"); 906 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = 0; 907 pxintr_ks_template.pxintr_ks_pil.value.ui64 = 0; 908 pxintr_ks_template.pxintr_ks_time.value.ui64 = 0; 909 pxintr_ks_template.pxintr_ks_ino.value.ui64 = 0; 910 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = 0; 911 } 912 return (0); 913 } 914 915 void 916 px_create_intr_kstats(px_ih_t *ih_p) 917 { 918 msiq_rec_type_t rec_type = ih_p->ih_rec_type; 919 920 ASSERT(ih_p->ih_ksp == NULL); 921 922 /* 923 * Create pci_intrs::: kstats for all ih types except messages, 924 * which represent unusual conditions and don't need to be tracked. 925 */ 926 if (rec_type == 0 || rec_type == MSI32_REC || rec_type == MSI64_REC) { 927 ih_p->ih_ksp = kstat_create("pci_intrs", 928 atomic_inc_32_nv(&pxintr_ks_instance), "config", 929 "interrupts", KSTAT_TYPE_NAMED, 930 sizeof (pxintr_ks_template) / sizeof (kstat_named_t), 931 KSTAT_FLAG_VIRTUAL); 932 } 933 if (ih_p->ih_ksp != NULL) { 934 ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2; 935 ih_p->ih_ksp->ks_lock = &pxintr_ks_template_lock; 936 ih_p->ih_ksp->ks_data = &pxintr_ks_template; 937 ih_p->ih_ksp->ks_private = ih_p; 938 ih_p->ih_ksp->ks_update = px_ks_update; 939 } 940 } 941 942 /* 943 * px_add_intx_intr: 944 * 945 * This function is called to register INTx and legacy hardware 946 * interrupt pins interrupts. 947 */ 948 int 949 px_add_intx_intr(dev_info_t *dip, dev_info_t *rdip, 950 ddi_intr_handle_impl_t *hdlp) 951 { 952 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 953 px_ib_t *ib_p = px_p->px_ib_p; 954 devino_t ino; 955 px_ih_t *ih_p; 956 px_ino_t *ino_p; 957 px_ino_pil_t *ipil_p, *ipil_list; 958 int32_t weight; 959 int ret = DDI_SUCCESS; 960 961 ino = hdlp->ih_vector; 962 963 DBG(DBG_A_INTX, dip, "px_add_intx_intr: rdip=%s%d ino=%x " 964 "handler=%x arg1=%x arg2=%x\n", ddi_driver_name(rdip), 965 ddi_get_instance(rdip), ino, hdlp->ih_cb_func, 966 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 967 968 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, 969 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, 0, 0); 970 971 mutex_enter(&ib_p->ib_ino_lst_mutex); 972 973 ino_p = px_ib_locate_ino(ib_p, ino); 974 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 975 976 /* Sharing ino */ 977 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 978 if (px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0)) { 979 DBG(DBG_A_INTX, dip, "px_add_intx_intr: " 980 "dup intr #%d\n", hdlp->ih_inum); 981 982 ret = DDI_FAILURE; 983 goto fail1; 984 } 985 986 /* Save mondo value in hdlp */ 987 hdlp->ih_vector = ino_p->ino_sysino; 988 989 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 990 ih_p)) != DDI_SUCCESS) 991 goto fail1; 992 993 goto ino_done; 994 } 995 996 if (hdlp->ih_pri == 0) 997 hdlp->ih_pri = pci_class_to_pil(rdip); 998 999 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 1000 ino_p = ipil_p->ipil_ino_p; 1001 1002 /* Save mondo value in hdlp */ 1003 hdlp->ih_vector = ino_p->ino_sysino; 1004 1005 DBG(DBG_A_INTX, dip, "px_add_intx_intr: pil=0x%x mondo=0x%x\n", 1006 hdlp->ih_pri, hdlp->ih_vector); 1007 1008 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1009 (ddi_intr_handler_t *)px_intx_intr, (caddr_t)ipil_p, NULL); 1010 1011 ret = i_ddi_add_ivintr(hdlp); 1012 1013 /* 1014 * Restore original interrupt handler 1015 * and arguments in interrupt handle. 1016 */ 1017 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1018 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1019 1020 if (ret != DDI_SUCCESS) 1021 goto fail2; 1022 1023 /* Save the pil for this ino */ 1024 ipil_p->ipil_pil = hdlp->ih_pri; 1025 1026 /* Select cpu, saving it for sharing and removal */ 1027 if (ipil_list == NULL) { 1028 ino_p->ino_cpuid = intr_dist_cpuid(); 1029 1030 /* Enable interrupt */ 1031 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1032 } 1033 1034 ino_done: 1035 /* Add weight to the cpu that we are already targeting */ 1036 weight = pci_class_to_intr_weight(rdip); 1037 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1038 1039 ih_p->ih_ipil_p = ipil_p; 1040 px_create_intr_kstats(ih_p); 1041 if (ih_p->ih_ksp) 1042 kstat_install(ih_p->ih_ksp); 1043 mutex_exit(&ib_p->ib_ino_lst_mutex); 1044 1045 DBG(DBG_A_INTX, dip, "px_add_intx_intr: done! Interrupt 0x%x pil=%x\n", 1046 ino_p->ino_sysino, hdlp->ih_pri); 1047 1048 return (ret); 1049 fail2: 1050 px_ib_delete_ino_pil(ib_p, ipil_p); 1051 fail1: 1052 if (ih_p->ih_config_handle) 1053 pci_config_teardown(&ih_p->ih_config_handle); 1054 1055 mutex_exit(&ib_p->ib_ino_lst_mutex); 1056 kmem_free(ih_p, sizeof (px_ih_t)); 1057 1058 DBG(DBG_A_INTX, dip, "px_add_intx_intr: Failed! Interrupt 0x%x " 1059 "pil=%x\n", ino_p->ino_sysino, hdlp->ih_pri); 1060 1061 return (ret); 1062 } 1063 1064 /* 1065 * px_rem_intx_intr: 1066 * 1067 * This function is called to unregister INTx and legacy hardware 1068 * interrupt pins interrupts. 1069 */ 1070 int 1071 px_rem_intx_intr(dev_info_t *dip, dev_info_t *rdip, 1072 ddi_intr_handle_impl_t *hdlp) 1073 { 1074 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1075 px_ib_t *ib_p = px_p->px_ib_p; 1076 devino_t ino; 1077 cpuid_t curr_cpu; 1078 px_ino_t *ino_p; 1079 px_ino_pil_t *ipil_p; 1080 px_ih_t *ih_p; 1081 int ret = DDI_SUCCESS; 1082 1083 ino = hdlp->ih_vector; 1084 1085 DBG(DBG_R_INTX, dip, "px_rem_intx_intr: rdip=%s%d ino=%x\n", 1086 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 1087 1088 mutex_enter(&ib_p->ib_ino_lst_mutex); 1089 1090 ino_p = px_ib_locate_ino(ib_p, ino); 1091 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1092 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0); 1093 1094 /* Get the current cpu */ 1095 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1096 &curr_cpu)) != DDI_SUCCESS) 1097 goto fail; 1098 1099 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1100 goto fail; 1101 1102 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1103 1104 if (ipil_p->ipil_ih_size == 0) { 1105 hdlp->ih_vector = ino_p->ino_sysino; 1106 i_ddi_rem_ivintr(hdlp); 1107 1108 px_ib_delete_ino_pil(ib_p, ipil_p); 1109 } 1110 1111 if (ino_p->ino_ipil_size == 0) { 1112 kmem_free(ino_p, sizeof (px_ino_t)); 1113 } else { 1114 /* Re-enable interrupt only if mapping register still shared */ 1115 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1116 } 1117 1118 fail: 1119 mutex_exit(&ib_p->ib_ino_lst_mutex); 1120 return (ret); 1121 } 1122 1123 /* 1124 * px_add_msiq_intr: 1125 * 1126 * This function is called to register MSI/Xs and PCIe message interrupts. 1127 */ 1128 int 1129 px_add_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1130 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1131 msgcode_t msg_code, msiqid_t *msiq_id_p) 1132 { 1133 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1134 px_ib_t *ib_p = px_p->px_ib_p; 1135 px_msiq_state_t *msiq_state_p = &ib_p->ib_msiq_state; 1136 devino_t ino; 1137 px_ih_t *ih_p; 1138 px_ino_t *ino_p; 1139 px_ino_pil_t *ipil_p, *ipil_list; 1140 int32_t weight; 1141 int ret = DDI_SUCCESS; 1142 1143 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: rdip=%s%d handler=%x " 1144 "arg1=%x arg2=%x\n", ddi_driver_name(rdip), ddi_get_instance(rdip), 1145 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 1146 1147 if ((ret = px_msiq_alloc(px_p, rec_type, msiq_id_p)) != DDI_SUCCESS) { 1148 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1149 "msiq allocation failed\n"); 1150 return (ret); 1151 } 1152 1153 ino = px_msiqid_to_devino(px_p, *msiq_id_p); 1154 1155 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, hdlp->ih_cb_func, 1156 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rec_type, msg_code); 1157 1158 mutex_enter(&ib_p->ib_ino_lst_mutex); 1159 1160 ino_p = px_ib_locate_ino(ib_p, ino); 1161 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 1162 1163 /* Sharing ino */ 1164 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 1165 if (px_ib_intr_locate_ih(ipil_p, rdip, 1166 hdlp->ih_inum, rec_type, msg_code)) { 1167 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1168 "dup intr #%d\n", hdlp->ih_inum); 1169 1170 ret = DDI_FAILURE; 1171 goto fail1; 1172 } 1173 1174 /* Save mondo value in hdlp */ 1175 hdlp->ih_vector = ino_p->ino_sysino; 1176 1177 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 1178 ih_p)) != DDI_SUCCESS) 1179 goto fail1; 1180 1181 goto ino_done; 1182 } 1183 1184 if (hdlp->ih_pri == 0) 1185 hdlp->ih_pri = pci_class_to_pil(rdip); 1186 1187 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 1188 ino_p = ipil_p->ipil_ino_p; 1189 1190 ino_p->ino_msiq_p = msiq_state_p->msiq_p + 1191 (*msiq_id_p - msiq_state_p->msiq_1st_msiq_id); 1192 1193 /* Save mondo value in hdlp */ 1194 hdlp->ih_vector = ino_p->ino_sysino; 1195 1196 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: pil=0x%x mondo=0x%x\n", 1197 hdlp->ih_pri, hdlp->ih_vector); 1198 1199 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1200 (ddi_intr_handler_t *)px_msiq_intr, (caddr_t)ipil_p, NULL); 1201 1202 ret = i_ddi_add_ivintr(hdlp); 1203 1204 /* 1205 * Restore original interrupt handler 1206 * and arguments in interrupt handle. 1207 */ 1208 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1209 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1210 1211 if (ret != DDI_SUCCESS) 1212 goto fail2; 1213 1214 /* Save the pil for this ino */ 1215 ipil_p->ipil_pil = hdlp->ih_pri; 1216 1217 /* Select cpu, saving it for sharing and removal */ 1218 if (ipil_list == NULL) { 1219 ino_p->ino_cpuid = intr_dist_cpuid(); 1220 1221 /* Enable MSIQ */ 1222 px_lib_msiq_setstate(dip, *msiq_id_p, PCI_MSIQ_STATE_IDLE); 1223 px_lib_msiq_setvalid(dip, *msiq_id_p, PCI_MSIQ_VALID); 1224 1225 /* Enable interrupt */ 1226 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1227 } 1228 1229 ino_done: 1230 /* Add weight to the cpu that we are already targeting */ 1231 weight = pci_class_to_intr_weight(rdip); 1232 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1233 1234 ih_p->ih_ipil_p = ipil_p; 1235 px_create_intr_kstats(ih_p); 1236 if (ih_p->ih_ksp) 1237 kstat_install(ih_p->ih_ksp); 1238 mutex_exit(&ib_p->ib_ino_lst_mutex); 1239 1240 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: done! Interrupt 0x%x pil=%x\n", 1241 ino_p->ino_sysino, hdlp->ih_pri); 1242 1243 return (ret); 1244 fail2: 1245 px_ib_delete_ino_pil(ib_p, ipil_p); 1246 fail1: 1247 if (ih_p->ih_config_handle) 1248 pci_config_teardown(&ih_p->ih_config_handle); 1249 1250 mutex_exit(&ib_p->ib_ino_lst_mutex); 1251 kmem_free(ih_p, sizeof (px_ih_t)); 1252 1253 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: Failed! Interrupt 0x%x pil=%x\n", 1254 ino_p->ino_sysino, hdlp->ih_pri); 1255 1256 return (ret); 1257 } 1258 1259 /* 1260 * px_rem_msiq_intr: 1261 * 1262 * This function is called to unregister MSI/Xs and PCIe message interrupts. 1263 */ 1264 int 1265 px_rem_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1266 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1267 msgcode_t msg_code, msiqid_t msiq_id) 1268 { 1269 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1270 px_ib_t *ib_p = px_p->px_ib_p; 1271 devino_t ino = px_msiqid_to_devino(px_p, msiq_id); 1272 cpuid_t curr_cpu; 1273 px_ino_t *ino_p; 1274 px_ino_pil_t *ipil_p; 1275 px_ih_t *ih_p; 1276 int ret = DDI_SUCCESS; 1277 1278 DBG(DBG_MSIQ, dip, "px_rem_msiq_intr: rdip=%s%d msiq_id=%x ino=%x\n", 1279 ddi_driver_name(rdip), ddi_get_instance(rdip), msiq_id, ino); 1280 1281 mutex_enter(&ib_p->ib_ino_lst_mutex); 1282 1283 ino_p = px_ib_locate_ino(ib_p, ino); 1284 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1285 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, rec_type, 1286 msg_code); 1287 1288 /* Get the current cpu */ 1289 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1290 &curr_cpu)) != DDI_SUCCESS) 1291 goto fail; 1292 1293 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1294 goto fail; 1295 1296 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1297 1298 if (ipil_p->ipil_ih_size == 0) { 1299 hdlp->ih_vector = ino_p->ino_sysino; 1300 i_ddi_rem_ivintr(hdlp); 1301 1302 px_ib_delete_ino_pil(ib_p, ipil_p); 1303 1304 if (ino_p->ino_ipil_size == 0) 1305 px_lib_msiq_setvalid(dip, 1306 px_devino_to_msiqid(px_p, ino), PCI_MSIQ_INVALID); 1307 1308 (void) px_msiq_free(px_p, msiq_id); 1309 } 1310 1311 if (ino_p->ino_ipil_size == 0) { 1312 kmem_free(ino_p, sizeof (px_ino_t)); 1313 } else { 1314 /* Re-enable interrupt only if mapping register still shared */ 1315 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1316 } 1317 1318 fail: 1319 mutex_exit(&ib_p->ib_ino_lst_mutex); 1320 return (ret); 1321 } 1322