1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PX nexus interrupt handling: 28 * PX device interrupt handler wrapper 29 * PIL lookup routine 30 * PX device interrupt related initchild code 31 */ 32 33 #include <sys/types.h> 34 #include <sys/kmem.h> 35 #include <sys/async.h> 36 #include <sys/spl.h> 37 #include <sys/sunddi.h> 38 #include <sys/fm/protocol.h> 39 #include <sys/fm/util.h> 40 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 41 #include <sys/ddi_impldefs.h> 42 #include <sys/sdt.h> 43 #include <sys/atomic.h> 44 #include "px_obj.h" 45 #include <sys/ontrap.h> 46 #include <sys/membar.h> 47 #include <sys/clock.h> 48 49 /* 50 * interrupt jabber: 51 * 52 * When an interrupt line is jabbering, every time the state machine for the 53 * associated ino is idled, a new mondo will be sent and the ino will go into 54 * the pending state again. The mondo will cause a new call to 55 * px_intr_wrapper() which normally idles the ino's state machine which would 56 * precipitate another trip round the loop. 57 * 58 * The loop can be broken by preventing the ino's state machine from being 59 * idled when an interrupt line is jabbering. See the comment at the 60 * beginning of px_intr_wrapper() explaining how the 'interrupt jabber 61 * protection' code does this. 62 */ 63 64 /*LINTLIBRARY*/ 65 66 /* 67 * If the unclaimed interrupt count has reached the limit set by 68 * pci_unclaimed_intr_max within the time limit, then all interrupts 69 * on this ino is blocked by not idling the interrupt state machine. 70 */ 71 static int 72 px_spurintr(px_ino_pil_t *ipil_p) 73 { 74 px_ino_t *ino_p = ipil_p->ipil_ino_p; 75 px_ih_t *ih_p = ipil_p->ipil_ih_start; 76 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 77 char *err_fmt_str; 78 boolean_t blocked = B_FALSE; 79 int i; 80 81 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) 82 return (DDI_INTR_CLAIMED); 83 84 if (!ino_p->ino_unclaimed_intrs) 85 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 86 87 ino_p->ino_unclaimed_intrs++; 88 89 if (ino_p->ino_unclaimed_intrs <= px_unclaimed_intr_max) 90 goto clear; 91 92 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 93 > px_spurintr_duration) { 94 ino_p->ino_unclaimed_intrs = 0; 95 goto clear; 96 } 97 err_fmt_str = "%s%d: ino 0x%x blocked"; 98 blocked = B_TRUE; 99 goto warn; 100 clear: 101 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 102 warn: 103 cmn_err(CE_WARN, err_fmt_str, NAMEINST(px_p->px_dip), ino_p->ino_ino); 104 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) 105 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 106 ih_p->ih_inum); 107 cmn_err(CE_CONT, "!\n"); 108 109 /* Clear the pending state */ 110 if (blocked == B_FALSE) { 111 if (px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 112 INTR_IDLE_STATE) != DDI_SUCCESS) 113 return (DDI_INTR_UNCLAIMED); 114 } 115 116 return (DDI_INTR_CLAIMED); 117 } 118 119 extern uint64_t intr_get_time(void); 120 121 /* 122 * px_intx_intr (INTx or legacy interrupt handler) 123 * 124 * This routine is used as wrapper around interrupt handlers installed by child 125 * device drivers. This routine invokes the driver interrupt handlers and 126 * examines the return codes. 127 * 128 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 129 * least one handler claims the interrupt then the counter is halved and the 130 * interrupt state machine is idled. If no handler claims the interrupt then 131 * the counter is incremented by one and the state machine is idled. 132 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 133 * then the interrupt state machine is not idled thus preventing any further 134 * interrupts on that ino. The state machine will only be idled again if a 135 * handler is subsequently added or removed. 136 * 137 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 138 * DDI_INTR_UNCLAIMED otherwise. 139 */ 140 uint_t 141 px_intx_intr(caddr_t arg) 142 { 143 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 144 px_ino_t *ino_p = ipil_p->ipil_ino_p; 145 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 146 px_ih_t *ih_p = ipil_p->ipil_ih_start; 147 ushort_t pil = ipil_p->ipil_pil; 148 uint_t result = 0, r = DDI_INTR_UNCLAIMED; 149 int i; 150 151 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 152 "ino=%x sysino=%llx pil=%x ih_size=%x ih_lst=%x\n", 153 ino_p->ino_ino, ino_p->ino_sysino, ipil_p->ipil_pil, 154 ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 155 156 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) { 157 dev_info_t *dip = ih_p->ih_dip; 158 uint_t (*handler)() = ih_p->ih_handler; 159 caddr_t arg1 = ih_p->ih_handler_arg1; 160 caddr_t arg2 = ih_p->ih_handler_arg2; 161 162 if (ih_p->ih_intr_state == PX_INTR_STATE_DISABLE) { 163 DBG(DBG_INTX_INTR, px_p->px_dip, 164 "px_intx_intr: %s%d interrupt %d is disabled\n", 165 ddi_driver_name(dip), ddi_get_instance(dip), 166 ino_p->ino_ino); 167 168 continue; 169 } 170 171 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 172 "ino=%x handler=%p arg1 =%p arg2 = %p\n", 173 ino_p->ino_ino, handler, arg1, arg2); 174 175 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 176 void *, handler, caddr_t, arg1, caddr_t, arg2); 177 178 r = (*handler)(arg1, arg2); 179 180 /* 181 * Account for time used by this interrupt. Protect against 182 * conflicting writes to ih_ticks from ib_intr_dist_all() by 183 * using atomic ops. 184 */ 185 186 if (pil <= LOCK_LEVEL) 187 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 188 189 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 190 void *, handler, caddr_t, arg1, int, r); 191 192 result += r; 193 194 if (px_check_all_handlers) 195 continue; 196 if (result) 197 break; 198 } 199 200 if (result) 201 ino_p->ino_claimed |= (1 << pil); 202 203 /* Interrupt can only be cleared after all pil levels are handled */ 204 if (pil != ino_p->ino_lopil) 205 return (DDI_INTR_CLAIMED); 206 207 if (!ino_p->ino_claimed) { 208 if (px_unclaimed_intr_block) 209 return (px_spurintr(ipil_p)); 210 } 211 212 ino_p->ino_unclaimed_intrs = 0; 213 ino_p->ino_claimed = 0; 214 215 /* Clear the pending state */ 216 if (px_lib_intr_setstate(px_p->px_dip, 217 ino_p->ino_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 218 return (DDI_INTR_UNCLAIMED); 219 220 return (DDI_INTR_CLAIMED); 221 } 222 223 /* 224 * px_msiq_intr (MSI/X or PCIe MSG interrupt handler) 225 * 226 * This routine is used as wrapper around interrupt handlers installed by child 227 * device drivers. This routine invokes the driver interrupt handlers and 228 * examines the return codes. 229 * 230 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 231 * least one handler claims the interrupt then the counter is halved and the 232 * interrupt state machine is idled. If no handler claims the interrupt then 233 * the counter is incremented by one and the state machine is idled. 234 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 235 * then the interrupt state machine is not idled thus preventing any further 236 * interrupts on that ino. The state machine will only be idled again if a 237 * handler is subsequently added or removed. 238 * 239 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 240 * DDI_INTR_UNCLAIMED otherwise. 241 */ 242 uint_t 243 px_msiq_intr(caddr_t arg) 244 { 245 px_ino_pil_t *ipil_p = (px_ino_pil_t *)arg; 246 px_ino_t *ino_p = ipil_p->ipil_ino_p; 247 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 248 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 249 px_msiq_t *msiq_p = ino_p->ino_msiq_p; 250 dev_info_t *dip = px_p->px_dip; 251 ushort_t pil = ipil_p->ipil_pil; 252 msiq_rec_t msiq_rec, *msiq_rec_p = &msiq_rec; 253 msiqhead_t *curr_head_p; 254 msiqtail_t curr_tail_index; 255 msgcode_t msg_code; 256 px_ih_t *ih_p; 257 uint_t ret = DDI_INTR_UNCLAIMED; 258 int i, j; 259 260 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: msiq_id =%x ino=%x pil=%x " 261 "ih_size=%x ih_lst=%x\n", msiq_p->msiq_id, ino_p->ino_ino, 262 ipil_p->ipil_pil, ipil_p->ipil_ih_size, ipil_p->ipil_ih_head); 263 264 /* 265 * The px_msiq_intr() handles multiple interrupt priorities and it 266 * will set msiq->msiq_rec2process to the number of MSIQ records to 267 * process while handling the highest priority interrupt. Subsequent 268 * lower priority interrupts will just process any unprocessed MSIQ 269 * records or will just return immediately. 270 */ 271 if (msiq_p->msiq_recs2process == 0) { 272 /* Read current MSIQ tail index */ 273 px_lib_msiq_gettail(dip, msiq_p->msiq_id, &curr_tail_index); 274 msiq_p->msiq_new_head_index = msiq_p->msiq_curr_head_index; 275 276 if (curr_tail_index < msiq_p->msiq_curr_head_index) 277 curr_tail_index += msiq_state_p->msiq_rec_cnt; 278 279 msiq_p->msiq_recs2process = curr_tail_index - 280 msiq_p->msiq_curr_head_index; 281 } 282 283 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 284 "rec2process %x\n", msiq_p->msiq_curr_head_index, 285 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 286 287 /* If all MSIQ records are already processed, just return immediately */ 288 if ((msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index) 289 == msiq_p->msiq_recs2process) 290 goto intr_done; 291 292 curr_head_p = (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p + 293 (msiq_p->msiq_curr_head_index * sizeof (msiq_rec_t))); 294 295 /* 296 * Calculate the number of recs to process by taking the difference 297 * between the head and tail pointers. For all records we always 298 * verify that we have a valid record type before we do any processing. 299 * If triggered, we should always have at least one valid record. 300 */ 301 for (i = 0; i < msiq_p->msiq_recs2process; i++) { 302 /* Read next MSIQ record */ 303 px_lib_get_msiq_rec(dip, curr_head_p, msiq_rec_p); 304 305 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSIQ RECORD, " 306 "msiq_rec_type 0x%llx msiq_rec_rid 0x%llx\n", 307 msiq_rec_p->msiq_rec_type, msiq_rec_p->msiq_rec_rid); 308 309 if (!msiq_rec_p->msiq_rec_type) 310 goto next_rec; 311 312 /* Check MSIQ record type */ 313 switch (msiq_rec_p->msiq_rec_type) { 314 case MSG_REC: 315 msg_code = msiq_rec_p->msiq_rec_data.msg.msg_code; 316 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: PCIE MSG " 317 "record, msg type 0x%x\n", msg_code); 318 break; 319 case MSI32_REC: 320 case MSI64_REC: 321 msg_code = msiq_rec_p->msiq_rec_data.msi.msi_data; 322 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSI record, " 323 "msi 0x%x\n", msg_code); 324 325 /* Clear MSI state */ 326 px_lib_msi_setstate(dip, (msinum_t)msg_code, 327 PCI_MSI_STATE_IDLE); 328 break; 329 default: 330 msg_code = 0; 331 cmn_err(CE_WARN, "%s%d: px_msiq_intr: 0x%x MSIQ " 332 "record type is not supported", 333 ddi_driver_name(dip), ddi_get_instance(dip), 334 msiq_rec_p->msiq_rec_type); 335 336 goto next_rec; 337 } 338 339 /* 340 * Scan through px_ih_t linked list, searching for the 341 * right px_ih_t, matching MSIQ record data. 342 */ 343 for (j = 0, ih_p = ipil_p->ipil_ih_start; 344 ih_p && (j < ipil_p->ipil_ih_size) && 345 ((ih_p->ih_msg_code != msg_code) || 346 (ih_p->ih_rec_type != msiq_rec_p->msiq_rec_type)); 347 ih_p = ih_p->ih_next, j++) 348 ; 349 350 if ((ih_p->ih_msg_code == msg_code) && 351 (ih_p->ih_rec_type == msiq_rec_p->msiq_rec_type)) { 352 dev_info_t *dip = ih_p->ih_dip; 353 uint_t (*handler)() = ih_p->ih_handler; 354 caddr_t arg1 = ih_p->ih_handler_arg1; 355 caddr_t arg2 = ih_p->ih_handler_arg2; 356 357 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: ino=%x data=%x " 358 "handler=%p arg1 =%p arg2=%p\n", ino_p->ino_ino, 359 msg_code, handler, arg1, arg2); 360 361 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 362 void *, handler, caddr_t, arg1, caddr_t, arg2); 363 364 /* 365 * Special case for PCIE Error Messages. 366 * The current frame work doesn't fit PCIE Err Msgs 367 * This should be fixed when PCIE MESSAGES as a whole 368 * is architected correctly. 369 */ 370 if ((msg_code == PCIE_MSG_CODE_ERR_COR) || 371 (msg_code == PCIE_MSG_CODE_ERR_NONFATAL) || 372 (msg_code == PCIE_MSG_CODE_ERR_FATAL)) { 373 ret = px_err_fabric_intr(px_p, msg_code, 374 msiq_rec_p->msiq_rec_rid); 375 } else 376 ret = (*handler)(arg1, arg2); 377 378 /* 379 * Account for time used by this interrupt. Protect 380 * against conflicting writes to ih_ticks from 381 * ib_intr_dist_all() by using atomic ops. 382 */ 383 384 if (pil <= LOCK_LEVEL) 385 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 386 387 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 388 void *, handler, caddr_t, arg1, int, ret); 389 390 msiq_p->msiq_new_head_index++; 391 px_lib_clr_msiq_rec(dip, curr_head_p); 392 } else { 393 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr:" 394 "No matching MSIQ record found\n"); 395 } 396 next_rec: 397 /* Get the pointer next EQ record */ 398 curr_head_p = (msiqhead_t *) 399 ((caddr_t)curr_head_p + sizeof (msiq_rec_t)); 400 401 /* Check for overflow condition */ 402 if (curr_head_p >= (msiqhead_t *)((caddr_t)msiq_p->msiq_base_p 403 + (msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t)))) 404 curr_head_p = (msiqhead_t *)msiq_p->msiq_base_p; 405 } 406 407 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: No of MSIQ recs processed %x\n", 408 (msiq_p->msiq_new_head_index - msiq_p->msiq_curr_head_index)); 409 410 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: curr_head %x new_head %x " 411 "rec2process %x\n", msiq_p->msiq_curr_head_index, 412 msiq_p->msiq_new_head_index, msiq_p->msiq_recs2process); 413 414 /* ino_claimed used just for debugging purpose */ 415 if (ret) 416 ino_p->ino_claimed |= (1 << pil); 417 418 intr_done: 419 /* Interrupt can only be cleared after all pil levels are handled */ 420 if (pil != ino_p->ino_lopil) 421 return (DDI_INTR_CLAIMED); 422 423 if (msiq_p->msiq_new_head_index <= msiq_p->msiq_curr_head_index) { 424 if (px_unclaimed_intr_block) 425 return (px_spurintr(ipil_p)); 426 } 427 428 /* Update MSIQ head index with no of MSIQ records processed */ 429 if (msiq_p->msiq_new_head_index >= msiq_state_p->msiq_rec_cnt) 430 msiq_p->msiq_new_head_index -= msiq_state_p->msiq_rec_cnt; 431 432 msiq_p->msiq_curr_head_index = msiq_p->msiq_new_head_index; 433 px_lib_msiq_sethead(dip, msiq_p->msiq_id, msiq_p->msiq_new_head_index); 434 435 msiq_p->msiq_new_head_index = 0; 436 msiq_p->msiq_recs2process = 0; 437 ino_p->ino_claimed = 0; 438 439 /* Clear the pending state */ 440 if (px_lib_intr_setstate(dip, ino_p->ino_sysino, 441 INTR_IDLE_STATE) != DDI_SUCCESS) 442 return (DDI_INTR_UNCLAIMED); 443 444 return (DDI_INTR_CLAIMED); 445 } 446 447 dev_info_t * 448 px_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 449 { 450 dev_info_t *cdip = rdip; 451 452 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 453 ; 454 455 return (cdip); 456 } 457 458 /* ARGSUSED */ 459 int 460 px_intx_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 461 ddi_intr_handle_impl_t *hdlp, void *result) 462 { 463 px_t *px_p = DIP_TO_STATE(dip); 464 int ret = DDI_SUCCESS; 465 466 DBG(DBG_INTROPS, dip, "px_intx_ops: dip=%x rdip=%x intr_op=%x " 467 "handle=%p\n", dip, rdip, intr_op, hdlp); 468 469 switch (intr_op) { 470 case DDI_INTROP_GETCAP: 471 ret = pci_intx_get_cap(rdip, (int *)result); 472 break; 473 case DDI_INTROP_SETCAP: 474 DBG(DBG_INTROPS, dip, "px_intx_ops: SetCap is not supported\n"); 475 ret = DDI_ENOTSUP; 476 break; 477 case DDI_INTROP_ALLOC: 478 *(int *)result = hdlp->ih_scratch1; 479 break; 480 case DDI_INTROP_FREE: 481 break; 482 case DDI_INTROP_GETPRI: 483 *(int *)result = hdlp->ih_pri ? 484 hdlp->ih_pri : pci_class_to_pil(rdip); 485 break; 486 case DDI_INTROP_SETPRI: 487 break; 488 case DDI_INTROP_ADDISR: 489 ret = px_add_intx_intr(dip, rdip, hdlp); 490 break; 491 case DDI_INTROP_REMISR: 492 ret = px_rem_intx_intr(dip, rdip, hdlp); 493 break; 494 case DDI_INTROP_ENABLE: 495 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 496 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_ENABLE, 0, 0); 497 break; 498 case DDI_INTROP_DISABLE: 499 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 500 hdlp->ih_vector, hdlp->ih_pri, PX_INTR_STATE_DISABLE, 0, 0); 501 break; 502 case DDI_INTROP_SETMASK: 503 ret = pci_intx_set_mask(rdip); 504 break; 505 case DDI_INTROP_CLRMASK: 506 ret = pci_intx_clr_mask(rdip); 507 break; 508 case DDI_INTROP_GETPENDING: 509 ret = pci_intx_get_pending(rdip, (int *)result); 510 break; 511 case DDI_INTROP_NINTRS: 512 case DDI_INTROP_NAVAIL: 513 *(int *)result = i_ddi_get_intx_nintrs(rdip); 514 break; 515 default: 516 ret = DDI_ENOTSUP; 517 break; 518 } 519 520 return (ret); 521 } 522 523 /* ARGSUSED */ 524 int 525 px_msix_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 526 ddi_intr_handle_impl_t *hdlp, void *result) 527 { 528 px_t *px_p = DIP_TO_STATE(dip); 529 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 530 msiq_rec_type_t msiq_rec_type; 531 msi_type_t msi_type; 532 uint64_t msi_addr; 533 msinum_t msi_num; 534 msiqid_t msiq_id; 535 uint_t nintrs; 536 int i, ret = DDI_SUCCESS; 537 538 DBG(DBG_INTROPS, dip, "px_msix_ops: dip=%x rdip=%x intr_op=%x " 539 "handle=%p\n", dip, rdip, intr_op, hdlp); 540 541 /* Check for MSI64 support */ 542 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) { 543 msiq_rec_type = MSI64_REC; 544 msi_type = MSI64_TYPE; 545 msi_addr = msi_state_p->msi_addr64; 546 } else { 547 msiq_rec_type = MSI32_REC; 548 msi_type = MSI32_TYPE; 549 msi_addr = msi_state_p->msi_addr32; 550 } 551 552 switch (intr_op) { 553 case DDI_INTROP_GETCAP: 554 ret = pci_msi_get_cap(rdip, hdlp->ih_type, (int *)result); 555 break; 556 case DDI_INTROP_SETCAP: 557 DBG(DBG_INTROPS, dip, "px_msix_ops: SetCap is not supported\n"); 558 ret = DDI_ENOTSUP; 559 break; 560 case DDI_INTROP_ALLOC: 561 /* 562 * We need to restrict this allocation in future 563 * based on Resource Management policies. 564 */ 565 if ((ret = px_msi_alloc(px_p, rdip, hdlp->ih_inum, 566 hdlp->ih_scratch1, (uintptr_t)hdlp->ih_scratch2, &msi_num, 567 (int *)result)) != DDI_SUCCESS) { 568 DBG(DBG_INTROPS, dip, "px_msix_ops: allocation " 569 "failed, rdip 0x%p type 0x%d inum 0x%x " 570 "count 0x%x\n", rdip, hdlp->ih_type, hdlp->ih_inum, 571 hdlp->ih_scratch1); 572 573 return (ret); 574 } 575 576 if ((hdlp->ih_type == DDI_INTR_TYPE_MSIX) && 577 (i_ddi_get_msix(rdip) == NULL)) { 578 ddi_intr_msix_t *msix_p; 579 580 if (msix_p = pci_msix_init(rdip)) { 581 i_ddi_set_msix(rdip, msix_p); 582 break; 583 } 584 585 DBG(DBG_INTROPS, dip, "px_msix_ops: MSI-X allocation " 586 "failed, rdip 0x%p inum 0x%x\n", rdip, 587 hdlp->ih_inum); 588 589 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 590 hdlp->ih_scratch1); 591 592 return (DDI_FAILURE); 593 } 594 595 break; 596 case DDI_INTROP_FREE: 597 (void) pci_msi_disable_mode(rdip, hdlp->ih_type, NULL); 598 (void) pci_msi_unconfigure(rdip, hdlp->ih_type, hdlp->ih_inum); 599 600 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 601 goto msi_free; 602 603 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 604 break; 605 606 if (((i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1) == 0) && 607 (i_ddi_get_msix(rdip))) { 608 pci_msix_fini(i_ddi_get_msix(rdip)); 609 i_ddi_set_msix(rdip, NULL); 610 } 611 msi_free: 612 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 613 hdlp->ih_scratch1); 614 break; 615 case DDI_INTROP_GETPRI: 616 *(int *)result = hdlp->ih_pri ? 617 hdlp->ih_pri : pci_class_to_pil(rdip); 618 break; 619 case DDI_INTROP_SETPRI: 620 break; 621 case DDI_INTROP_ADDISR: 622 if ((ret = px_msi_get_msinum(px_p, hdlp->ih_dip, 623 hdlp->ih_inum, &msi_num)) != DDI_SUCCESS) 624 return (ret); 625 626 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 627 msiq_rec_type, msi_num, &msiq_id)) != DDI_SUCCESS) { 628 DBG(DBG_INTROPS, dip, "px_msix_ops: Add MSI handler " 629 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 630 return (ret); 631 } 632 633 DBG(DBG_INTROPS, dip, "px_msix_ops: msiq used 0x%x\n", msiq_id); 634 635 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 636 msiq_id, msi_type)) != DDI_SUCCESS) { 637 (void) px_rem_msiq_intr(dip, rdip, 638 hdlp, msiq_rec_type, msi_num, msiq_id); 639 return (ret); 640 } 641 642 if ((ret = px_lib_msi_setstate(dip, msi_num, 643 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) { 644 (void) px_rem_msiq_intr(dip, rdip, 645 hdlp, msiq_rec_type, msi_num, msiq_id); 646 return (ret); 647 } 648 649 hdlp->ih_vector = msi_num; 650 break; 651 case DDI_INTROP_DUPVEC: 652 DBG(DBG_INTROPS, dip, "px_msix_ops: dupisr - inum: %x, " 653 "new_vector: %x\n", hdlp->ih_inum, hdlp->ih_scratch1); 654 655 ret = pci_msix_dup(hdlp->ih_dip, hdlp->ih_inum, 656 hdlp->ih_scratch1); 657 break; 658 case DDI_INTROP_REMISR: 659 msi_num = hdlp->ih_vector; 660 661 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 662 &msiq_id)) != DDI_SUCCESS) 663 return (ret); 664 665 if ((ret = px_lib_msi_setstate(dip, msi_num, 666 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) 667 return (ret); 668 669 ret = px_rem_msiq_intr(dip, rdip, 670 hdlp, msiq_rec_type, msi_num, msiq_id); 671 672 hdlp->ih_vector = 0; 673 break; 674 case DDI_INTROP_ENABLE: 675 msi_num = hdlp->ih_vector; 676 677 if ((ret = px_lib_msi_setvalid(dip, msi_num, 678 PCI_MSI_VALID)) != DDI_SUCCESS) 679 return (ret); 680 681 if ((pci_is_msi_enabled(rdip, hdlp->ih_type) != DDI_SUCCESS) || 682 (hdlp->ih_type == DDI_INTR_TYPE_MSIX)) { 683 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 684 685 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 686 nintrs, hdlp->ih_inum, msi_addr, 687 hdlp->ih_type == DDI_INTR_TYPE_MSIX ? 688 msi_num : msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 689 return (ret); 690 691 if ((ret = pci_msi_enable_mode(rdip, hdlp->ih_type)) 692 != DDI_SUCCESS) 693 return (ret); 694 } 695 696 if ((ret = pci_msi_clr_mask(rdip, hdlp->ih_type, 697 hdlp->ih_inum)) != DDI_SUCCESS) 698 return (ret); 699 700 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 701 break; 702 703 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 704 &msiq_id)) != DDI_SUCCESS) 705 return (ret); 706 707 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 708 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri, 709 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num); 710 711 break; 712 case DDI_INTROP_DISABLE: 713 msi_num = hdlp->ih_vector; 714 715 if ((ret = pci_msi_set_mask(rdip, hdlp->ih_type, 716 hdlp->ih_inum)) != DDI_SUCCESS) 717 return (ret); 718 719 if ((ret = px_lib_msi_setvalid(dip, msi_num, 720 PCI_MSI_INVALID)) != DDI_SUCCESS) 721 return (ret); 722 723 if (hdlp->ih_flags & DDI_INTR_MSIX_DUP) 724 break; 725 726 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 727 &msiq_id)) != DDI_SUCCESS) 728 return (ret); 729 730 ret = px_ib_update_intr_state(px_p, rdip, 731 hdlp->ih_inum, px_msiqid_to_devino(px_p, msiq_id), 732 hdlp->ih_pri, PX_INTR_STATE_DISABLE, msiq_rec_type, 733 msi_num); 734 735 break; 736 case DDI_INTROP_BLOCKENABLE: 737 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 738 msi_num = hdlp->ih_vector; 739 740 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 741 nintrs, hdlp->ih_inum, msi_addr, 742 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 743 return (ret); 744 745 for (i = 0; i < nintrs; i++, msi_num++) { 746 if ((ret = px_lib_msi_setvalid(dip, msi_num, 747 PCI_MSI_VALID)) != DDI_SUCCESS) 748 return (ret); 749 750 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 751 &msiq_id)) != DDI_SUCCESS) 752 return (ret); 753 754 if ((ret = px_ib_update_intr_state(px_p, rdip, 755 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 756 msiq_id), hdlp->ih_pri, PX_INTR_STATE_ENABLE, 757 msiq_rec_type, msi_num)) != DDI_SUCCESS) 758 return (ret); 759 } 760 761 ret = pci_msi_enable_mode(rdip, hdlp->ih_type); 762 break; 763 case DDI_INTROP_BLOCKDISABLE: 764 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 765 msi_num = hdlp->ih_vector; 766 767 if ((ret = pci_msi_disable_mode(rdip, hdlp->ih_type, 768 hdlp->ih_cap & DDI_INTR_FLAG_BLOCK)) != DDI_SUCCESS) 769 return (ret); 770 771 for (i = 0; i < nintrs; i++, msi_num++) { 772 if ((ret = px_lib_msi_setvalid(dip, msi_num, 773 PCI_MSI_INVALID)) != DDI_SUCCESS) 774 return (ret); 775 776 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 777 &msiq_id)) != DDI_SUCCESS) 778 return (ret); 779 780 if ((ret = px_ib_update_intr_state(px_p, rdip, 781 hdlp->ih_inum + i, px_msiqid_to_devino(px_p, 782 msiq_id), hdlp->ih_pri, PX_INTR_STATE_DISABLE, 783 msiq_rec_type, msi_num)) != DDI_SUCCESS) 784 return (ret); 785 } 786 787 break; 788 case DDI_INTROP_SETMASK: 789 ret = pci_msi_set_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 790 break; 791 case DDI_INTROP_CLRMASK: 792 ret = pci_msi_clr_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 793 break; 794 case DDI_INTROP_GETPENDING: 795 ret = pci_msi_get_pending(rdip, hdlp->ih_type, 796 hdlp->ih_inum, (int *)result); 797 break; 798 case DDI_INTROP_NINTRS: 799 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 800 break; 801 case DDI_INTROP_NAVAIL: 802 /* XXX - a new interface may be needed */ 803 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 804 break; 805 default: 806 ret = DDI_ENOTSUP; 807 break; 808 } 809 810 return (ret); 811 } 812 813 static struct { 814 kstat_named_t pxintr_ks_name; 815 kstat_named_t pxintr_ks_type; 816 kstat_named_t pxintr_ks_cpu; 817 kstat_named_t pxintr_ks_pil; 818 kstat_named_t pxintr_ks_time; 819 kstat_named_t pxintr_ks_ino; 820 kstat_named_t pxintr_ks_cookie; 821 kstat_named_t pxintr_ks_devpath; 822 kstat_named_t pxintr_ks_buspath; 823 } pxintr_ks_template = { 824 { "name", KSTAT_DATA_CHAR }, 825 { "type", KSTAT_DATA_CHAR }, 826 { "cpu", KSTAT_DATA_UINT64 }, 827 { "pil", KSTAT_DATA_UINT64 }, 828 { "time", KSTAT_DATA_UINT64 }, 829 { "ino", KSTAT_DATA_UINT64 }, 830 { "cookie", KSTAT_DATA_UINT64 }, 831 { "devpath", KSTAT_DATA_STRING }, 832 { "buspath", KSTAT_DATA_STRING }, 833 }; 834 835 static uint32_t pxintr_ks_instance; 836 static char ih_devpath[MAXPATHLEN]; 837 static char ih_buspath[MAXPATHLEN]; 838 kmutex_t pxintr_ks_template_lock; 839 840 int 841 px_ks_update(kstat_t *ksp, int rw) 842 { 843 px_ih_t *ih_p = ksp->ks_private; 844 int maxlen = sizeof (pxintr_ks_template.pxintr_ks_name.value.c); 845 px_ino_pil_t *ipil_p = ih_p->ih_ipil_p; 846 px_ino_t *ino_p = ipil_p->ipil_ino_p; 847 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 848 devino_t ino; 849 sysino_t sysino; 850 851 ino = ino_p->ino_ino; 852 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino, &sysino) != 853 DDI_SUCCESS) { 854 cmn_err(CE_WARN, "px_ks_update: px_lib_intr_devino_to_sysino " 855 "failed"); 856 } 857 858 (void) snprintf(pxintr_ks_template.pxintr_ks_name.value.c, maxlen, 859 "%s%d", ddi_driver_name(ih_p->ih_dip), 860 ddi_get_instance(ih_p->ih_dip)); 861 862 (void) ddi_pathname(ih_p->ih_dip, ih_devpath); 863 (void) ddi_pathname(px_p->px_dip, ih_buspath); 864 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_devpath, ih_devpath); 865 kstat_named_setstr(&pxintr_ks_template.pxintr_ks_buspath, ih_buspath); 866 867 if (ih_p->ih_intr_state == PX_INTR_STATE_ENABLE) { 868 869 switch (i_ddi_intr_get_current_type(ih_p->ih_dip)) { 870 case DDI_INTR_TYPE_MSI: 871 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 872 "msi"); 873 break; 874 case DDI_INTR_TYPE_MSIX: 875 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 876 "msix"); 877 break; 878 default: 879 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 880 "fixed"); 881 break; 882 } 883 884 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = ino_p->ino_cpuid; 885 pxintr_ks_template.pxintr_ks_pil.value.ui64 = ipil_p->ipil_pil; 886 pxintr_ks_template.pxintr_ks_time.value.ui64 = ih_p->ih_nsec + 887 (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks, 888 ino_p->ino_cpuid); 889 pxintr_ks_template.pxintr_ks_ino.value.ui64 = ino; 890 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = sysino; 891 } else { 892 (void) strcpy(pxintr_ks_template.pxintr_ks_type.value.c, 893 "disabled"); 894 pxintr_ks_template.pxintr_ks_cpu.value.ui64 = 0; 895 pxintr_ks_template.pxintr_ks_pil.value.ui64 = 0; 896 pxintr_ks_template.pxintr_ks_time.value.ui64 = 0; 897 pxintr_ks_template.pxintr_ks_ino.value.ui64 = 0; 898 pxintr_ks_template.pxintr_ks_cookie.value.ui64 = 0; 899 } 900 return (0); 901 } 902 903 void 904 px_create_intr_kstats(px_ih_t *ih_p) 905 { 906 msiq_rec_type_t rec_type = ih_p->ih_rec_type; 907 908 ASSERT(ih_p->ih_ksp == NULL); 909 910 /* 911 * Create pci_intrs::: kstats for all ih types except messages, 912 * which represent unusual conditions and don't need to be tracked. 913 */ 914 if (rec_type == 0 || rec_type == MSI32_REC || rec_type == MSI64_REC) { 915 ih_p->ih_ksp = kstat_create("pci_intrs", 916 atomic_inc_32_nv(&pxintr_ks_instance), "config", 917 "interrupts", KSTAT_TYPE_NAMED, 918 sizeof (pxintr_ks_template) / sizeof (kstat_named_t), 919 KSTAT_FLAG_VIRTUAL); 920 } 921 if (ih_p->ih_ksp != NULL) { 922 ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2; 923 ih_p->ih_ksp->ks_lock = &pxintr_ks_template_lock; 924 ih_p->ih_ksp->ks_data = &pxintr_ks_template; 925 ih_p->ih_ksp->ks_private = ih_p; 926 ih_p->ih_ksp->ks_update = px_ks_update; 927 } 928 } 929 930 /* 931 * px_add_intx_intr: 932 * 933 * This function is called to register INTx and legacy hardware 934 * interrupt pins interrupts. 935 */ 936 int 937 px_add_intx_intr(dev_info_t *dip, dev_info_t *rdip, 938 ddi_intr_handle_impl_t *hdlp) 939 { 940 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 941 px_ib_t *ib_p = px_p->px_ib_p; 942 devino_t ino; 943 px_ih_t *ih_p; 944 px_ino_t *ino_p; 945 px_ino_pil_t *ipil_p, *ipil_list; 946 int32_t weight; 947 int ret = DDI_SUCCESS; 948 949 ino = hdlp->ih_vector; 950 951 DBG(DBG_A_INTX, dip, "px_add_intx_intr: rdip=%s%d ino=%x " 952 "handler=%x arg1=%x arg2=%x\n", ddi_driver_name(rdip), 953 ddi_get_instance(rdip), ino, hdlp->ih_cb_func, 954 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 955 956 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, 957 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, 0, 0); 958 959 mutex_enter(&ib_p->ib_ino_lst_mutex); 960 961 ino_p = px_ib_locate_ino(ib_p, ino); 962 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 963 964 /* Sharing ino */ 965 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 966 if (px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0)) { 967 DBG(DBG_A_INTX, dip, "px_add_intx_intr: " 968 "dup intr #%d\n", hdlp->ih_inum); 969 970 ret = DDI_FAILURE; 971 goto fail1; 972 } 973 974 /* Save mondo value in hdlp */ 975 hdlp->ih_vector = ino_p->ino_sysino; 976 977 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 978 ih_p)) != DDI_SUCCESS) 979 goto fail1; 980 981 goto ino_done; 982 } 983 984 if (hdlp->ih_pri == 0) 985 hdlp->ih_pri = pci_class_to_pil(rdip); 986 987 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 988 ino_p = ipil_p->ipil_ino_p; 989 990 /* Save mondo value in hdlp */ 991 hdlp->ih_vector = ino_p->ino_sysino; 992 993 DBG(DBG_A_INTX, dip, "px_add_intx_intr: pil=0x%x mondo=0x%x\n", 994 hdlp->ih_pri, hdlp->ih_vector); 995 996 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 997 (ddi_intr_handler_t *)px_intx_intr, (caddr_t)ipil_p, NULL); 998 999 ret = i_ddi_add_ivintr(hdlp); 1000 1001 /* 1002 * Restore original interrupt handler 1003 * and arguments in interrupt handle. 1004 */ 1005 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1006 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1007 1008 if (ret != DDI_SUCCESS) 1009 goto fail2; 1010 1011 /* Save the pil for this ino */ 1012 ipil_p->ipil_pil = hdlp->ih_pri; 1013 1014 /* Select cpu, saving it for sharing and removal */ 1015 if (ipil_list == NULL) { 1016 ino_p->ino_cpuid = intr_dist_cpuid(); 1017 1018 /* Enable interrupt */ 1019 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1020 } 1021 1022 ino_done: 1023 /* Add weight to the cpu that we are already targeting */ 1024 weight = pci_class_to_intr_weight(rdip); 1025 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1026 1027 ih_p->ih_ipil_p = ipil_p; 1028 px_create_intr_kstats(ih_p); 1029 if (ih_p->ih_ksp) 1030 kstat_install(ih_p->ih_ksp); 1031 mutex_exit(&ib_p->ib_ino_lst_mutex); 1032 1033 DBG(DBG_A_INTX, dip, "px_add_intx_intr: done! Interrupt 0x%x pil=%x\n", 1034 ino_p->ino_sysino, hdlp->ih_pri); 1035 1036 return (ret); 1037 fail2: 1038 px_ib_delete_ino_pil(ib_p, ipil_p); 1039 fail1: 1040 if (ih_p->ih_config_handle) 1041 pci_config_teardown(&ih_p->ih_config_handle); 1042 1043 mutex_exit(&ib_p->ib_ino_lst_mutex); 1044 kmem_free(ih_p, sizeof (px_ih_t)); 1045 1046 DBG(DBG_A_INTX, dip, "px_add_intx_intr: Failed! Interrupt 0x%x " 1047 "pil=%x\n", ino_p->ino_sysino, hdlp->ih_pri); 1048 1049 return (ret); 1050 } 1051 1052 /* 1053 * px_rem_intx_intr: 1054 * 1055 * This function is called to unregister INTx and legacy hardware 1056 * interrupt pins interrupts. 1057 */ 1058 int 1059 px_rem_intx_intr(dev_info_t *dip, dev_info_t *rdip, 1060 ddi_intr_handle_impl_t *hdlp) 1061 { 1062 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1063 px_ib_t *ib_p = px_p->px_ib_p; 1064 devino_t ino; 1065 cpuid_t curr_cpu; 1066 px_ino_t *ino_p; 1067 px_ino_pil_t *ipil_p; 1068 px_ih_t *ih_p; 1069 int ret = DDI_SUCCESS; 1070 1071 ino = hdlp->ih_vector; 1072 1073 DBG(DBG_R_INTX, dip, "px_rem_intx_intr: rdip=%s%d ino=%x\n", 1074 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 1075 1076 mutex_enter(&ib_p->ib_ino_lst_mutex); 1077 1078 ino_p = px_ib_locate_ino(ib_p, ino); 1079 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1080 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, 0, 0); 1081 1082 /* Get the current cpu */ 1083 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1084 &curr_cpu)) != DDI_SUCCESS) 1085 goto fail; 1086 1087 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1088 goto fail; 1089 1090 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1091 1092 if (ipil_p->ipil_ih_size == 0) { 1093 hdlp->ih_vector = ino_p->ino_sysino; 1094 i_ddi_rem_ivintr(hdlp); 1095 1096 px_ib_delete_ino_pil(ib_p, ipil_p); 1097 } 1098 1099 if (ino_p->ino_ipil_size == 0) { 1100 kmem_free(ino_p, sizeof (px_ino_t)); 1101 } else { 1102 /* Re-enable interrupt only if mapping register still shared */ 1103 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1104 } 1105 1106 fail: 1107 mutex_exit(&ib_p->ib_ino_lst_mutex); 1108 return (ret); 1109 } 1110 1111 /* 1112 * px_add_msiq_intr: 1113 * 1114 * This function is called to register MSI/Xs and PCIe message interrupts. 1115 */ 1116 int 1117 px_add_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1118 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1119 msgcode_t msg_code, msiqid_t *msiq_id_p) 1120 { 1121 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1122 px_ib_t *ib_p = px_p->px_ib_p; 1123 px_msiq_state_t *msiq_state_p = &ib_p->ib_msiq_state; 1124 devino_t ino; 1125 px_ih_t *ih_p; 1126 px_ino_t *ino_p; 1127 px_ino_pil_t *ipil_p, *ipil_list; 1128 int32_t weight; 1129 int ret = DDI_SUCCESS; 1130 1131 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: rdip=%s%d handler=%x " 1132 "arg1=%x arg2=%x\n", ddi_driver_name(rdip), ddi_get_instance(rdip), 1133 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 1134 1135 if ((ret = px_msiq_alloc(px_p, rec_type, msiq_id_p)) != DDI_SUCCESS) { 1136 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1137 "msiq allocation failed\n"); 1138 return (ret); 1139 } 1140 1141 ino = px_msiqid_to_devino(px_p, *msiq_id_p); 1142 1143 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, hdlp->ih_cb_func, 1144 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rec_type, msg_code); 1145 1146 mutex_enter(&ib_p->ib_ino_lst_mutex); 1147 1148 ino_p = px_ib_locate_ino(ib_p, ino); 1149 ipil_list = ino_p ? ino_p->ino_ipil_p : NULL; 1150 1151 /* Sharing ino */ 1152 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) { 1153 if (px_ib_intr_locate_ih(ipil_p, rdip, 1154 hdlp->ih_inum, rec_type, msg_code)) { 1155 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 1156 "dup intr #%d\n", hdlp->ih_inum); 1157 1158 ret = DDI_FAILURE; 1159 goto fail1; 1160 } 1161 1162 /* Save mondo value in hdlp */ 1163 hdlp->ih_vector = ino_p->ino_sysino; 1164 1165 if ((ret = px_ib_ino_add_intr(px_p, ipil_p, 1166 ih_p)) != DDI_SUCCESS) 1167 goto fail1; 1168 1169 goto ino_done; 1170 } 1171 1172 if (hdlp->ih_pri == 0) 1173 hdlp->ih_pri = pci_class_to_pil(rdip); 1174 1175 ipil_p = px_ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p); 1176 ino_p = ipil_p->ipil_ino_p; 1177 1178 ino_p->ino_msiq_p = msiq_state_p->msiq_p + 1179 (*msiq_id_p - msiq_state_p->msiq_1st_msiq_id); 1180 1181 /* Save mondo value in hdlp */ 1182 hdlp->ih_vector = ino_p->ino_sysino; 1183 1184 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: pil=0x%x mondo=0x%x\n", 1185 hdlp->ih_pri, hdlp->ih_vector); 1186 1187 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1188 (ddi_intr_handler_t *)px_msiq_intr, (caddr_t)ipil_p, NULL); 1189 1190 ret = i_ddi_add_ivintr(hdlp); 1191 1192 /* 1193 * Restore original interrupt handler 1194 * and arguments in interrupt handle. 1195 */ 1196 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1197 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1198 1199 if (ret != DDI_SUCCESS) 1200 goto fail2; 1201 1202 /* Save the pil for this ino */ 1203 ipil_p->ipil_pil = hdlp->ih_pri; 1204 1205 /* Select cpu, saving it for sharing and removal */ 1206 if (ipil_list == NULL) { 1207 ino_p->ino_cpuid = intr_dist_cpuid(); 1208 1209 /* Enable MSIQ */ 1210 px_lib_msiq_setstate(dip, *msiq_id_p, PCI_MSIQ_STATE_IDLE); 1211 px_lib_msiq_setvalid(dip, *msiq_id_p, PCI_MSIQ_VALID); 1212 1213 /* Enable interrupt */ 1214 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 1215 } 1216 1217 ino_done: 1218 /* Add weight to the cpu that we are already targeting */ 1219 weight = pci_class_to_intr_weight(rdip); 1220 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1221 1222 ih_p->ih_ipil_p = ipil_p; 1223 px_create_intr_kstats(ih_p); 1224 if (ih_p->ih_ksp) 1225 kstat_install(ih_p->ih_ksp); 1226 mutex_exit(&ib_p->ib_ino_lst_mutex); 1227 1228 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: done! Interrupt 0x%x pil=%x\n", 1229 ino_p->ino_sysino, hdlp->ih_pri); 1230 1231 return (ret); 1232 fail2: 1233 px_ib_delete_ino_pil(ib_p, ipil_p); 1234 fail1: 1235 if (ih_p->ih_config_handle) 1236 pci_config_teardown(&ih_p->ih_config_handle); 1237 1238 mutex_exit(&ib_p->ib_ino_lst_mutex); 1239 kmem_free(ih_p, sizeof (px_ih_t)); 1240 1241 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: Failed! Interrupt 0x%x pil=%x\n", 1242 ino_p->ino_sysino, hdlp->ih_pri); 1243 1244 return (ret); 1245 } 1246 1247 /* 1248 * px_rem_msiq_intr: 1249 * 1250 * This function is called to unregister MSI/Xs and PCIe message interrupts. 1251 */ 1252 int 1253 px_rem_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1254 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1255 msgcode_t msg_code, msiqid_t msiq_id) 1256 { 1257 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1258 px_ib_t *ib_p = px_p->px_ib_p; 1259 devino_t ino = px_msiqid_to_devino(px_p, msiq_id); 1260 cpuid_t curr_cpu; 1261 px_ino_t *ino_p; 1262 px_ino_pil_t *ipil_p; 1263 px_ih_t *ih_p; 1264 int ret = DDI_SUCCESS; 1265 1266 DBG(DBG_MSIQ, dip, "px_rem_msiq_intr: rdip=%s%d msiq_id=%x ino=%x\n", 1267 ddi_driver_name(rdip), ddi_get_instance(rdip), msiq_id, ino); 1268 1269 mutex_enter(&ib_p->ib_ino_lst_mutex); 1270 1271 ino_p = px_ib_locate_ino(ib_p, ino); 1272 ipil_p = px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri); 1273 ih_p = px_ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum, rec_type, 1274 msg_code); 1275 1276 /* Get the current cpu */ 1277 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1278 &curr_cpu)) != DDI_SUCCESS) 1279 goto fail; 1280 1281 if ((ret = px_ib_ino_rem_intr(px_p, ipil_p, ih_p)) != DDI_SUCCESS) 1282 goto fail; 1283 1284 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1285 1286 if (ipil_p->ipil_ih_size == 0) { 1287 hdlp->ih_vector = ino_p->ino_sysino; 1288 i_ddi_rem_ivintr(hdlp); 1289 1290 px_ib_delete_ino_pil(ib_p, ipil_p); 1291 1292 if (ino_p->ino_ipil_size == 0) 1293 px_lib_msiq_setvalid(dip, 1294 px_devino_to_msiqid(px_p, ino), PCI_MSIQ_INVALID); 1295 1296 (void) px_msiq_free(px_p, msiq_id); 1297 } 1298 1299 if (ino_p->ino_ipil_size == 0) { 1300 kmem_free(ino_p, sizeof (px_ino_t)); 1301 } else { 1302 /* Re-enable interrupt only if mapping register still shared */ 1303 PX_INTR_ENABLE(px_p->px_dip, ino_p->ino_sysino, curr_cpu); 1304 } 1305 1306 fail: 1307 mutex_exit(&ib_p->ib_ino_lst_mutex); 1308 return (ret); 1309 } 1310