1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PX nexus interrupt handling: 31 * PX device interrupt handler wrapper 32 * PIL lookup routine 33 * PX device interrupt related initchild code 34 */ 35 36 #include <sys/types.h> 37 #include <sys/kmem.h> 38 #include <sys/async.h> 39 #include <sys/spl.h> 40 #include <sys/sunddi.h> 41 #include <sys/machsystm.h> /* e_ddi_nodeid_to_dip() */ 42 #include <sys/ddi_impldefs.h> 43 #include <sys/sdt.h> 44 #include <sys/atomic.h> 45 #include "px_obj.h" 46 47 /* 48 * interrupt jabber: 49 * 50 * When an interrupt line is jabbering, every time the state machine for the 51 * associated ino is idled, a new mondo will be sent and the ino will go into 52 * the pending state again. The mondo will cause a new call to 53 * px_intr_wrapper() which normally idles the ino's state machine which would 54 * precipitate another trip round the loop. 55 * 56 * The loop can be broken by preventing the ino's state machine from being 57 * idled when an interrupt line is jabbering. See the comment at the 58 * beginning of px_intr_wrapper() explaining how the 'interrupt jabber 59 * protection' code does this. 60 */ 61 62 /*LINTLIBRARY*/ 63 64 65 /* 66 * If the unclaimed interrupt count has reached the limit set by 67 * pci_unclaimed_intr_max within the time limit, then all interrupts 68 * on this ino is blocked by not idling the interrupt state machine. 69 */ 70 static int 71 px_spurintr(px_ib_ino_info_t *ino_p) 72 { 73 px_ih_t *ih_p = ino_p->ino_ih_start; 74 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 75 char *err_fmt_str; 76 int i; 77 78 if (ino_p->ino_unclaimed > px_unclaimed_intr_max) 79 return (DDI_INTR_CLAIMED); 80 81 if (!ino_p->ino_unclaimed) 82 ino_p->ino_spurintr_begin = ddi_get_lbolt(); 83 84 ino_p->ino_unclaimed++; 85 86 if (ino_p->ino_unclaimed <= px_unclaimed_intr_max) 87 goto clear; 88 89 if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin) 90 > px_spurintr_duration) { 91 ino_p->ino_unclaimed = 0; 92 goto clear; 93 } 94 err_fmt_str = "%s%d: ino 0x%x blocked"; 95 goto warn; 96 clear: 97 /* Clear the pending state */ 98 if (px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 99 INTR_IDLE_STATE) != DDI_SUCCESS) 100 return (DDI_INTR_UNCLAIMED); 101 102 err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x"; 103 warn: 104 cmn_err(CE_WARN, err_fmt_str, NAMEINST(px_p->px_dip), ino_p->ino_ino); 105 for (i = 0; i < ino_p->ino_ih_size; i++, ih_p = ih_p->ih_next) 106 cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip), 107 ih_p->ih_inum); 108 cmn_err(CE_CONT, "!\n"); 109 return (DDI_INTR_CLAIMED); 110 } 111 112 113 extern uint64_t intr_get_time(void); 114 115 /* 116 * px_intx_intr (legacy or intx interrupt handler) 117 * 118 * This routine is used as wrapper around interrupt handlers installed by child 119 * device drivers. This routine invokes the driver interrupt handlers and 120 * examines the return codes. 121 * 122 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 123 * least one handler claims the interrupt then the counter is halved and the 124 * interrupt state machine is idled. If no handler claims the interrupt then 125 * the counter is incremented by one and the state machine is idled. 126 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 127 * then the interrupt state machine is not idled thus preventing any further 128 * interrupts on that ino. The state machine will only be idled again if a 129 * handler is subsequently added or removed. 130 * 131 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 132 * DDI_INTR_UNCLAIMED otherwise. 133 */ 134 uint_t 135 px_intx_intr(caddr_t arg) 136 { 137 px_ib_ino_info_t *ino_p = (px_ib_ino_info_t *)arg; 138 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 139 px_ih_t *ih_p = ino_p->ino_ih_start; 140 uint_t result = 0, r; 141 int i; 142 143 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 144 "ino=%x sysino=%llx pil=%x ih_size=%x ih_lst=%x\n", 145 ino_p->ino_ino, ino_p->ino_sysino, ino_p->ino_pil, 146 ino_p->ino_ih_size, ino_p->ino_ih_head); 147 148 for (i = 0; i < ino_p->ino_ih_size; i++, ih_p = ih_p->ih_next) { 149 dev_info_t *dip = ih_p->ih_dip; 150 uint_t (*handler)() = ih_p->ih_handler; 151 caddr_t arg1 = ih_p->ih_handler_arg1; 152 caddr_t arg2 = ih_p->ih_handler_arg2; 153 154 if (ih_p->ih_intr_state == PX_INTR_STATE_DISABLE) { 155 DBG(DBG_INTX_INTR, px_p->px_dip, 156 "px_intx_intr: %s%d interrupt %d is disabled\n", 157 ddi_driver_name(dip), ddi_get_instance(dip), 158 ino_p->ino_ino); 159 160 continue; 161 } 162 163 DBG(DBG_INTX_INTR, px_p->px_dip, "px_intx_intr:" 164 "ino=%x handler=%p arg1 =%p arg2 = %p\n", 165 ino_p->ino_ino, handler, arg1, arg2); 166 167 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 168 void *, handler, caddr_t, arg1, caddr_t, arg2); 169 170 r = (*handler)(arg1, arg2); 171 172 /* 173 * Account for time used by this interrupt. Protect against 174 * conflicting writes to ih_ticks from ib_intr_dist_all() by 175 * using atomic ops. 176 */ 177 178 if (ino_p->ino_pil <= LOCK_LEVEL) 179 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 180 181 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 182 void *, handler, caddr_t, arg1, int, r); 183 184 result += r; 185 186 if (px_check_all_handlers) 187 continue; 188 if (result) 189 break; 190 } 191 192 if (!result && px_unclaimed_intr_block) 193 return (px_spurintr(ino_p)); 194 195 ino_p->ino_unclaimed = 0; 196 197 /* Clear the pending state */ 198 if (px_lib_intr_setstate(ino_p->ino_ib_p->ib_px_p->px_dip, 199 ino_p->ino_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 200 return (DDI_INTR_UNCLAIMED); 201 202 return (DDI_INTR_CLAIMED); 203 } 204 205 /* 206 * px_msiq_intr (MSI/MSIX/MSG interrupt handler) 207 * 208 * This routine is used as wrapper around interrupt handlers installed by child 209 * device drivers. This routine invokes the driver interrupt handlers and 210 * examines the return codes. 211 * 212 * There is a count of unclaimed interrupts kept on a per-ino basis. If at 213 * least one handler claims the interrupt then the counter is halved and the 214 * interrupt state machine is idled. If no handler claims the interrupt then 215 * the counter is incremented by one and the state machine is idled. 216 * If the count ever reaches the limit value set by pci_unclaimed_intr_max 217 * then the interrupt state machine is not idled thus preventing any further 218 * interrupts on that ino. The state machine will only be idled again if a 219 * handler is subsequently added or removed. 220 * 221 * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt, 222 * DDI_INTR_UNCLAIMED otherwise. 223 */ 224 uint_t 225 px_msiq_intr(caddr_t arg) 226 { 227 px_ib_ino_info_t *ino_p = (px_ib_ino_info_t *)arg; 228 px_t *px_p = ino_p->ino_ib_p->ib_px_p; 229 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 230 px_msiq_t *msiq_p = ino_p->ino_msiq_p; 231 dev_info_t *dip = px_p->px_dip; 232 msiq_rec_t msiq_rec, *msiq_rec_p = &msiq_rec; 233 msiqhead_t curr_msiq_rec_cnt, new_msiq_rec_cnt; 234 msgcode_t msg_code; 235 px_ih_t *ih_p; 236 int ret; 237 238 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: msiq_id =%x ino=%x pil=%x " 239 "ih_size=%x ih_lst=%x\n", msiq_p->msiq_id, ino_p->ino_ino, 240 ino_p->ino_pil, ino_p->ino_ih_size, ino_p->ino_ih_head); 241 242 /* Read current MSIQ head index */ 243 px_lib_msiq_gethead(dip, msiq_p->msiq_id, &curr_msiq_rec_cnt); 244 msiq_p->msiq_curr = (uint64_t)((caddr_t)msiq_p->msiq_base + 245 curr_msiq_rec_cnt * sizeof (msiq_rec_t)); 246 new_msiq_rec_cnt = curr_msiq_rec_cnt; 247 248 /* Read next MSIQ record */ 249 px_lib_get_msiq_rec(dip, msiq_p, msiq_rec_p); 250 251 /* 252 * Process current MSIQ record as long as request id 253 * field is non-zero. 254 */ 255 while (msiq_rec_p->msiq_rec_rid) { 256 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSIQ RECORD, " 257 "msiq_rec_type 0x%llx msiq_rec_rid 0x%llx\n", 258 msiq_rec_p->msiq_rec_type, msiq_rec_p->msiq_rec_rid); 259 260 /* Get the pointer next EQ record */ 261 msiq_p->msiq_curr = (uint64_t) 262 ((caddr_t)msiq_p->msiq_curr + sizeof (msiq_rec_t)); 263 264 /* Check for overflow condition */ 265 if (msiq_p->msiq_curr >= (uint64_t)((caddr_t)msiq_p->msiq_base + 266 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t))) 267 msiq_p->msiq_curr = msiq_p->msiq_base; 268 269 /* Check MSIQ record type */ 270 switch (msiq_rec_p->msiq_rec_type) { 271 case MSG_REC: 272 msg_code = msiq_rec_p->msiq_rec_data.msg.msg_code; 273 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: PCIE MSG " 274 "record, msg type 0x%x\n", msg_code); 275 break; 276 case MSI32_REC: 277 case MSI64_REC: 278 msg_code = msiq_rec_p->msiq_rec_data.msi.msi_data; 279 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: MSI record, " 280 "msi 0x%x\n", msg_code); 281 282 /* Clear MSI state */ 283 px_lib_msi_setstate(dip, (msinum_t)msg_code, 284 PCI_MSI_STATE_IDLE); 285 break; 286 default: 287 msg_code = 0; 288 cmn_err(CE_WARN, "%s%d: px_msiq_intr: 0x%x MSIQ " 289 "record type is not supported", 290 ddi_driver_name(dip), ddi_get_instance(dip), 291 msiq_rec_p->msiq_rec_type); 292 goto next_rec; 293 } 294 295 ih_p = ino_p->ino_ih_start; 296 297 /* 298 * Scan through px_ih_t linked list, searching for the 299 * right px_ih_t, matching MSIQ record data. 300 */ 301 while ((ih_p) && (ih_p->ih_msg_code != msg_code) && 302 (ih_p->ih_rec_type != msiq_rec_p->msiq_rec_type)) 303 ih_p = ih_p->ih_next; 304 305 if ((ih_p->ih_msg_code == msg_code) && 306 (ih_p->ih_rec_type == msiq_rec_p->msiq_rec_type)) { 307 dev_info_t *dip = ih_p->ih_dip; 308 uint_t (*handler)() = ih_p->ih_handler; 309 caddr_t arg1 = ih_p->ih_handler_arg1; 310 caddr_t arg2 = ih_p->ih_handler_arg2; 311 312 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: ino=%x data=%x " 313 "handler=%p arg1 =%p arg2=%p\n", ino_p->ino_ino, 314 msg_code, handler, arg1, arg2); 315 316 DTRACE_PROBE4(interrupt__start, dev_info_t, dip, 317 void *, handler, caddr_t, arg1, caddr_t, arg2); 318 319 if (msiq_rec_p->msiq_rec_type == MSG_REC) 320 px_p->px_pec_p->pec_msiq_rec_p = msiq_rec_p; 321 322 ret = (*handler)(arg1, arg2); 323 324 /* 325 * Account for time used by this interrupt. Protect 326 * against conflicting writes to ih_ticks from 327 * ib_intr_dist_all() by using atomic ops. 328 */ 329 330 if (ino_p->ino_pil <= LOCK_LEVEL) 331 atomic_add_64(&ih_p->ih_ticks, intr_get_time()); 332 333 DTRACE_PROBE4(interrupt__complete, dev_info_t, dip, 334 void *, handler, caddr_t, arg1, int, ret); 335 } else { 336 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr:" 337 "Not found matching MSIQ record\n"); 338 339 /* px_spurintr(ino_p); */ 340 ino_p->ino_unclaimed++; 341 } 342 343 next_rec: 344 new_msiq_rec_cnt++; 345 346 /* Zero out msiq_rec_rid field */ 347 msiq_rec_p->msiq_rec_rid = 0; 348 349 /* Read next MSIQ record */ 350 px_lib_get_msiq_rec(dip, msiq_p, msiq_rec_p); 351 } 352 353 DBG(DBG_MSIQ_INTR, dip, "px_msiq_intr: No of MSIQ recs processed %x\n", 354 (new_msiq_rec_cnt - curr_msiq_rec_cnt)); 355 356 /* Update MSIQ head index with no of MSIQ records processed */ 357 if (new_msiq_rec_cnt > curr_msiq_rec_cnt) { 358 if (new_msiq_rec_cnt >= msiq_state_p->msiq_rec_cnt) 359 new_msiq_rec_cnt -= msiq_state_p->msiq_rec_cnt; 360 361 px_lib_msiq_sethead(dip, msiq_p->msiq_id, new_msiq_rec_cnt); 362 } 363 364 /* Clear the pending state */ 365 if (px_lib_intr_setstate(dip, ino_p->ino_sysino, 366 INTR_IDLE_STATE) != DDI_SUCCESS) 367 return (DDI_INTR_UNCLAIMED); 368 369 return (DDI_INTR_CLAIMED); 370 } 371 372 dev_info_t * 373 px_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 374 { 375 dev_info_t *cdip = rdip; 376 377 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 378 ; 379 380 return (cdip); 381 } 382 383 /* Default class to pil value mapping */ 384 px_class_val_t px_default_pil [] = { 385 {0x000000, 0xff0000, 0x1}, /* Class code for pre-2.0 devices */ 386 {0x010000, 0xff0000, 0x4}, /* Mass Storage Controller */ 387 {0x020000, 0xff0000, 0x6}, /* Network Controller */ 388 {0x030000, 0xff0000, 0x9}, /* Display Controller */ 389 {0x040000, 0xff0000, 0x9}, /* Multimedia Controller */ 390 {0x050000, 0xff0000, 0xb}, /* Memory Controller */ 391 {0x060000, 0xff0000, 0xb}, /* Bridge Controller */ 392 {0x0c0000, 0xffff00, 0x9}, /* Serial Bus, FireWire (IEEE 1394) */ 393 {0x0c0100, 0xffff00, 0x4}, /* Serial Bus, ACCESS.bus */ 394 {0x0c0200, 0xffff00, 0x4}, /* Serial Bus, SSA */ 395 {0x0c0300, 0xffff00, 0x9}, /* Serial Bus Universal Serial Bus */ 396 {0x0c0400, 0xffff00, 0x6}, /* Serial Bus, Fibre Channel */ 397 {0x0c0600, 0xffff00, 0x6} /* Serial Bus, Infiniband */ 398 }; 399 400 /* 401 * Default class to intr_weight value mapping (% of CPU). A driver.conf 402 * entry on or above the pci node like 403 * 404 * pci-class-intr-weights= 0x020000, 0xff0000, 30; 405 * 406 * can be used to augment or override entries in the default table below. 407 * 408 * NB: The values below give NICs preference on redistribution, and provide 409 * NICs some isolation from other interrupt sources. We need better interfaces 410 * that allow the NIC driver to identify a specific NIC instance as high 411 * bandwidth, and thus deserving of separation from other low bandwidth 412 * NICs additional isolation from other interrupt sources. 413 * 414 * NB: We treat Infiniband like a NIC. 415 */ 416 px_class_val_t px_default_intr_weight [] = { 417 {0x020000, 0xff0000, 35}, /* Network Controller */ 418 {0x010000, 0xff0000, 10}, /* Mass Storage Controller */ 419 {0x0c0400, 0xffff00, 10}, /* Serial Bus, Fibre Channel */ 420 {0x0c0600, 0xffff00, 50} /* Serial Bus, Infiniband */ 421 }; 422 423 static uint32_t 424 px_match_class_val(uint32_t key, px_class_val_t *rec_p, int nrec, 425 uint32_t default_val) 426 { 427 int i; 428 429 for (i = 0; i < nrec; rec_p++, i++) { 430 if ((rec_p->class_code & rec_p->class_mask) == 431 (key & rec_p->class_mask)) 432 return (rec_p->class_val); 433 } 434 435 return (default_val); 436 } 437 438 /* 439 * px_class_to_val 440 * 441 * Return the configuration value, based on class code and sub class code, 442 * from the specified property based or default px_class_val_t table. 443 */ 444 uint32_t 445 px_class_to_val(dev_info_t *rdip, char *property_name, px_class_val_t *rec_p, 446 int nrec, uint32_t default_val) 447 { 448 int property_len; 449 uint32_t class_code; 450 px_class_val_t *conf; 451 uint32_t val = default_val; 452 453 /* 454 * Use the "class-code" property to get the base and sub class 455 * codes for the requesting device. 456 */ 457 class_code = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 458 DDI_PROP_DONTPASS, "class-code", -1); 459 460 if (class_code == -1) 461 return (val); 462 463 /* look up the val from the default table */ 464 val = px_match_class_val(class_code, rec_p, nrec, val); 465 466 /* see if there is a more specific property specified value */ 467 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_NOTPROM, 468 property_name, (caddr_t)&conf, &property_len)) 469 return (val); 470 471 if ((property_len % sizeof (px_class_val_t)) == 0) 472 val = px_match_class_val(class_code, conf, 473 property_len / sizeof (px_class_val_t), val); 474 kmem_free(conf, property_len); 475 return (val); 476 } 477 478 479 /* px_class_to_pil: return the pil for a given device. */ 480 uint32_t 481 px_class_to_pil(dev_info_t *rdip) 482 { 483 uint32_t pil; 484 485 /* default pil is 0 (uninitialized) */ 486 pil = px_class_to_val(rdip, 487 "pci-class-priorities", px_default_pil, 488 sizeof (px_default_pil) / sizeof (px_class_val_t), 0); 489 490 /* range check the result */ 491 if (pil >= 0xf) 492 pil = 0; 493 494 return (pil); 495 } 496 497 498 /* px_class_to_intr_weight: return the intr_weight for a given device. */ 499 static int32_t 500 px_class_to_intr_weight(dev_info_t *rdip) 501 { 502 int32_t intr_weight; 503 504 /* default weight is 0% */ 505 intr_weight = px_class_to_val(rdip, 506 "pci-class-intr-weights", px_default_intr_weight, 507 sizeof (px_default_intr_weight) / sizeof (px_class_val_t), 0); 508 509 /* range check the result */ 510 if (intr_weight < 0) 511 intr_weight = 0; 512 if (intr_weight > 1000) 513 intr_weight = 1000; 514 515 return (intr_weight); 516 } 517 518 519 /* ARGSUSED */ 520 int 521 px_intx_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 522 ddi_intr_handle_impl_t *hdlp, void *result) 523 { 524 px_t *px_p = DIP_TO_STATE(dip); 525 ddi_ispec_t *ip = (ddi_ispec_t *)hdlp->ih_private; 526 int ret = DDI_SUCCESS; 527 528 DBG(DBG_INTROPS, dip, "px_intx_ops: dip=%x rdip=%x intr_op=%x " 529 "handle=%p\n", dip, rdip, intr_op, hdlp); 530 531 switch (intr_op) { 532 case DDI_INTROP_GETCAP: 533 ret = pci_intx_get_cap(rdip, (int *)result); 534 break; 535 case DDI_INTROP_SETCAP: 536 DBG(DBG_INTROPS, dip, "px_intx_ops: SetCap is not supported\n"); 537 ret = DDI_ENOTSUP; 538 break; 539 case DDI_INTROP_ALLOC: 540 *(int *)result = hdlp->ih_scratch1; 541 break; 542 case DDI_INTROP_FREE: 543 break; 544 case DDI_INTROP_GETPRI: 545 *(int *)result = ip->is_pil ? 546 ip->is_pil : px_class_to_pil(rdip); 547 break; 548 case DDI_INTROP_SETPRI: 549 ip->is_pil = (*(int *)result); 550 break; 551 case DDI_INTROP_ADDISR: 552 hdlp->ih_vector = *ip->is_intr; 553 554 ret = px_add_intx_intr(dip, rdip, hdlp); 555 break; 556 case DDI_INTROP_REMISR: 557 hdlp->ih_vector = *ip->is_intr; 558 559 ret = px_rem_intx_intr(dip, rdip, hdlp); 560 break; 561 case DDI_INTROP_ENABLE: 562 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 563 *ip->is_intr, PX_INTR_STATE_ENABLE); 564 break; 565 case DDI_INTROP_DISABLE: 566 ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum, 567 *ip->is_intr, PX_INTR_STATE_DISABLE); 568 break; 569 case DDI_INTROP_SETMASK: 570 ret = pci_intx_set_mask(rdip); 571 break; 572 case DDI_INTROP_CLRMASK: 573 ret = pci_intx_clr_mask(rdip); 574 break; 575 case DDI_INTROP_GETPENDING: 576 ret = pci_intx_get_pending(rdip, (int *)result); 577 break; 578 case DDI_INTROP_NINTRS: 579 case DDI_INTROP_NAVAIL: 580 *(int *)result = i_ddi_get_nintrs(rdip); 581 break; 582 case DDI_INTROP_SUPPORTED_TYPES: 583 *(int *)result = DDI_INTR_TYPE_FIXED; 584 break; 585 default: 586 ret = DDI_ENOTSUP; 587 break; 588 } 589 590 return (ret); 591 } 592 593 /* ARGSUSED */ 594 int 595 px_msix_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 596 ddi_intr_handle_impl_t *hdlp, void *result) 597 { 598 px_t *px_p = DIP_TO_STATE(dip); 599 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 600 msinum_t msi_num; 601 msiqid_t msiq_id; 602 uint_t nintrs; 603 int i, ret = DDI_SUCCESS; 604 605 DBG(DBG_INTROPS, dip, "px_msix_ops: dip=%x rdip=%x intr_op=%x " 606 "handle=%p\n", dip, rdip, intr_op, hdlp); 607 608 switch (intr_op) { 609 case DDI_INTROP_GETCAP: 610 ret = pci_msi_get_cap(rdip, hdlp->ih_type, (int *)result); 611 break; 612 case DDI_INTROP_SETCAP: 613 DBG(DBG_INTROPS, dip, "px_msix_ops: SetCap is not supported\n"); 614 ret = DDI_ENOTSUP; 615 break; 616 case DDI_INTROP_ALLOC: 617 /* 618 * We need to restrict this allocation in future 619 * based on Resource Management policies. 620 */ 621 if ((ret = px_msi_alloc(px_p, rdip, hdlp->ih_inum, 622 hdlp->ih_scratch1, hdlp->ih_scratch2, &msi_num, 623 (int *)result)) != DDI_SUCCESS) { 624 DBG(DBG_INTROPS, dip, "px_msix_ops: MSI allocation " 625 "failed, rdip 0x%p inum 0x%x count 0x%x\n", 626 rdip, hdlp->ih_inum, hdlp->ih_scratch1); 627 628 return (ret); 629 } 630 631 break; 632 case DDI_INTROP_FREE: 633 (void) pci_msi_disable_mode(rdip, hdlp->ih_type, hdlp->ih_inum); 634 (void) pci_msi_unconfigure(rdip, hdlp->ih_type, hdlp->ih_inum); 635 (void) px_msi_free(px_p, rdip, hdlp->ih_inum, 636 hdlp->ih_scratch1); 637 break; 638 case DDI_INTROP_GETPRI: 639 *(int *)result = hdlp->ih_pri ? 640 hdlp->ih_pri : px_class_to_pil(rdip); 641 break; 642 case DDI_INTROP_SETPRI: 643 break; 644 case DDI_INTROP_ADDISR: 645 if ((ret = px_msi_get_msinum(px_p, hdlp->ih_dip, 646 hdlp->ih_inum, &msi_num)) != DDI_SUCCESS) 647 return (ret); 648 649 if ((ret = px_add_msiq_intr(dip, rdip, hdlp, 650 MSI32_REC, msi_num, &msiq_id)) != DDI_SUCCESS) { 651 DBG(DBG_INTROPS, dip, "px_msix_ops: Add MSI handler " 652 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num); 653 return (ret); 654 } 655 656 DBG(DBG_INTROPS, dip, "px_msix_ops: msiq used 0x%x\n", msiq_id); 657 658 if ((ret = px_lib_msi_setmsiq(dip, msi_num, 659 msiq_id, MSI32_TYPE)) != DDI_SUCCESS) { 660 (void) px_rem_msiq_intr(dip, rdip, 661 hdlp, MSI32_REC, msi_num, msiq_id); 662 return (ret); 663 } 664 665 if ((ret = px_lib_msi_setstate(dip, msi_num, 666 PCI_MSI_STATE_IDLE)) != DDI_SUCCESS) { 667 (void) px_rem_msiq_intr(dip, rdip, 668 hdlp, MSI32_REC, msi_num, msiq_id); 669 return (ret); 670 } 671 672 hdlp->ih_vector = msi_num; 673 break; 674 case DDI_INTROP_DUPVEC: 675 DBG(DBG_INTROPS, dip, "px_msix_ops: DupIsr is not supported\n"); 676 ret = DDI_ENOTSUP; 677 break; 678 case DDI_INTROP_REMISR: 679 msi_num = hdlp->ih_vector; 680 681 if ((ret = px_lib_msi_getmsiq(dip, msi_num, 682 &msiq_id)) != DDI_SUCCESS) 683 return (ret); 684 685 if ((ret = px_lib_msi_setstate(dip, msi_num, 686 PCI_MSI_STATE_DELIVERED)) != DDI_SUCCESS) 687 return (ret); 688 689 ret = px_rem_msiq_intr(dip, rdip, 690 hdlp, MSI32_REC, msi_num, msiq_id); 691 692 hdlp->ih_vector = 0; 693 break; 694 case DDI_INTROP_ENABLE: 695 msi_num = hdlp->ih_vector; 696 697 if ((ret = px_lib_msi_setvalid(dip, msi_num, 698 PCI_MSI_VALID)) != DDI_SUCCESS) 699 return (ret); 700 701 if (pci_is_msi_enabled(rdip, hdlp->ih_type) != DDI_SUCCESS) { 702 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 703 704 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 705 nintrs, hdlp->ih_inum, msi_state_p->msi_addr32, 706 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 707 return (ret); 708 709 if ((ret = pci_msi_enable_mode(rdip, hdlp->ih_type, 710 hdlp->ih_inum)) != DDI_SUCCESS) 711 return (ret); 712 } 713 714 ret = pci_msi_clr_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 715 716 break; 717 case DDI_INTROP_DISABLE: 718 msi_num = hdlp->ih_vector; 719 720 if ((ret = pci_msi_set_mask(rdip, hdlp->ih_type, 721 hdlp->ih_inum)) != DDI_SUCCESS) 722 return (ret); 723 724 ret = px_lib_msi_setvalid(dip, msi_num, PCI_MSI_INVALID); 725 break; 726 case DDI_INTROP_BLOCKENABLE: 727 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 728 msi_num = hdlp->ih_vector; 729 730 if ((ret = pci_msi_configure(rdip, hdlp->ih_type, 731 nintrs, hdlp->ih_inum, msi_state_p->msi_addr32, 732 msi_num & ~(nintrs - 1))) != DDI_SUCCESS) 733 return (ret); 734 735 for (i = 0; i < nintrs; i++, msi_num++) { 736 if ((ret = px_lib_msi_setvalid(dip, msi_num, 737 PCI_MSI_VALID)) != DDI_SUCCESS) 738 return (ret); 739 } 740 741 ret = pci_msi_enable_mode(rdip, hdlp->ih_type, hdlp->ih_inum); 742 break; 743 case DDI_INTROP_BLOCKDISABLE: 744 nintrs = i_ddi_intr_get_current_nintrs(hdlp->ih_dip); 745 msi_num = hdlp->ih_vector; 746 747 if ((ret = pci_msi_disable_mode(rdip, hdlp->ih_type, 748 hdlp->ih_inum)) != DDI_SUCCESS) 749 return (ret); 750 751 for (i = 0; i < nintrs; i++, msi_num++) { 752 if ((ret = px_lib_msi_setvalid(dip, msi_num, 753 PCI_MSI_INVALID)) != DDI_SUCCESS) 754 return (ret); 755 } 756 757 break; 758 case DDI_INTROP_SETMASK: 759 ret = pci_msi_set_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 760 break; 761 case DDI_INTROP_CLRMASK: 762 ret = pci_msi_clr_mask(rdip, hdlp->ih_type, hdlp->ih_inum); 763 break; 764 case DDI_INTROP_GETPENDING: 765 ret = pci_msi_get_pending(rdip, hdlp->ih_type, 766 hdlp->ih_inum, (int *)result); 767 break; 768 case DDI_INTROP_NINTRS: 769 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 770 break; 771 case DDI_INTROP_NAVAIL: 772 /* XXX - a new interface may be needed */ 773 ret = pci_msi_get_nintrs(rdip, hdlp->ih_type, (int *)result); 774 break; 775 case DDI_INTROP_SUPPORTED_TYPES: 776 ret = pci_msi_get_supported_type(rdip, (int *)result); 777 break; 778 default: 779 ret = DDI_ENOTSUP; 780 break; 781 } 782 783 return (ret); 784 } 785 786 int 787 px_add_intx_intr(dev_info_t *dip, dev_info_t *rdip, 788 ddi_intr_handle_impl_t *hdlp) 789 { 790 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 791 px_ib_t *ib_p = px_p->px_ib_p; 792 devino_t ino; 793 px_ih_t *ih_p; 794 px_ib_ino_info_t *ino_p; 795 int32_t weight; 796 int ret = DDI_SUCCESS; 797 798 ino = hdlp->ih_vector; 799 800 DBG(DBG_A_INTX, dip, "px_add_intx_intr: rdip=%s%d ino=%x " 801 "handler=%x arg1=%x arg2=%x\n", ddi_driver_name(rdip), 802 ddi_get_instance(rdip), ino, hdlp->ih_cb_func, 803 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 804 805 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, 806 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, 0, 0); 807 808 mutex_enter(&ib_p->ib_ino_lst_mutex); 809 810 if (ino_p = px_ib_locate_ino(ib_p, ino)) { /* sharing ino */ 811 uint32_t intr_index = hdlp->ih_inum; 812 if (px_ib_ino_locate_intr(ino_p, rdip, intr_index, 0, 0)) { 813 DBG(DBG_A_INTX, dip, "px_add_intx_intr: " 814 "dup intr #%d\n", intr_index); 815 816 ret = DDI_FAILURE; 817 goto fail1; 818 } 819 820 /* Save mondo value in hdlp */ 821 hdlp->ih_vector = ino_p->ino_sysino; 822 823 if ((ret = px_ib_ino_add_intr(px_p, ino_p, ih_p)) 824 != DDI_SUCCESS) 825 goto fail1; 826 } else { 827 ino_p = px_ib_new_ino(ib_p, ino, ih_p); 828 829 if (hdlp->ih_pri == 0) 830 hdlp->ih_pri = px_class_to_pil(rdip); 831 832 /* Save mondo value in hdlp */ 833 hdlp->ih_vector = ino_p->ino_sysino; 834 835 DBG(DBG_A_INTX, dip, "px_add_intx_intr: pil=0x%x mondo=0x%x\n", 836 hdlp->ih_pri, hdlp->ih_vector); 837 838 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 839 (ddi_intr_handler_t *)px_intx_intr, (caddr_t)ino_p, NULL); 840 841 ret = i_ddi_add_ivintr(hdlp); 842 843 /* 844 * Restore original interrupt handler 845 * and arguments in interrupt handle. 846 */ 847 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 848 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 849 850 if (ret != DDI_SUCCESS) 851 goto fail2; 852 853 /* Save the pil for this ino */ 854 ino_p->ino_pil = hdlp->ih_pri; 855 856 /* select cpu, saving it for sharing and removal */ 857 ino_p->ino_cpuid = intr_dist_cpuid(); 858 859 /* Enable interrupt */ 860 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino); 861 } 862 863 /* add weight to the cpu that we are already targeting */ 864 weight = px_class_to_intr_weight(rdip); 865 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 866 867 ih_p->ih_ino_p = ino_p; 868 if (ih_p->ih_ksp) 869 kstat_install(ih_p->ih_ksp); 870 mutex_exit(&ib_p->ib_ino_lst_mutex); 871 872 DBG(DBG_A_INTX, dip, "px_add_intx_intr: done! Interrupt 0x%x pil=%x\n", 873 ino_p->ino_sysino, hdlp->ih_pri); 874 875 return (ret); 876 fail2: 877 px_ib_delete_ino(ib_p, ino_p); 878 fail1: 879 if (ih_p->ih_config_handle) 880 pci_config_teardown(&ih_p->ih_config_handle); 881 882 mutex_exit(&ib_p->ib_ino_lst_mutex); 883 kmem_free(ih_p, sizeof (px_ih_t)); 884 885 DBG(DBG_A_INTX, dip, "px_add_intx_intr: Failed! Interrupt 0x%x " 886 "pil=%x\n", ino_p->ino_sysino, hdlp->ih_pri); 887 888 return (ret); 889 } 890 891 int 892 px_rem_intx_intr(dev_info_t *dip, dev_info_t *rdip, 893 ddi_intr_handle_impl_t *hdlp) 894 { 895 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 896 px_ib_t *ib_p = px_p->px_ib_p; 897 devino_t ino; 898 cpuid_t curr_cpu; 899 px_ib_ino_info_t *ino_p; 900 px_ih_t *ih_p; 901 int ret = DDI_SUCCESS; 902 903 ino = hdlp->ih_vector; 904 905 DBG(DBG_R_INTX, dip, "px_rem_intx_intr: rdip=%s%d ino=%x\n", 906 ddi_driver_name(rdip), ddi_get_instance(rdip), ino); 907 908 mutex_enter(&ib_p->ib_ino_lst_mutex); 909 910 ino_p = px_ib_locate_ino(ib_p, ino); 911 ih_p = px_ib_ino_locate_intr(ino_p, rdip, hdlp->ih_inum, 0, 0); 912 913 /* Get the current cpu */ 914 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 915 &curr_cpu)) != DDI_SUCCESS) 916 goto fail; 917 918 if ((ret = px_ib_ino_rem_intr(px_p, ino_p, ih_p)) != DDI_SUCCESS) 919 goto fail; 920 921 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 922 923 if (ino_p->ino_ih_size == 0) { 924 if ((ret = px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 925 INTR_DELIVERED_STATE)) != DDI_SUCCESS) 926 goto fail; 927 928 hdlp->ih_vector = ino_p->ino_sysino; 929 i_ddi_rem_ivintr(hdlp); 930 931 px_ib_delete_ino(ib_p, ino_p); 932 kmem_free(ino_p, sizeof (px_ib_ino_info_t)); 933 } else { 934 /* Re-enable interrupt only if mapping regsiter still shared */ 935 if ((ret = px_lib_intr_settarget(px_p->px_dip, 936 ino_p->ino_sysino, curr_cpu)) != DDI_SUCCESS) 937 goto fail; 938 939 ret = px_lib_intr_setvalid(px_p->px_dip, ino_p->ino_sysino, 940 INTR_VALID); 941 } 942 943 fail: 944 mutex_exit(&ib_p->ib_ino_lst_mutex); 945 return (ret); 946 } 947 948 int 949 px_add_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 950 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 951 msgcode_t msg_code, msiqid_t *msiq_id_p) 952 { 953 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 954 px_ib_t *ib_p = px_p->px_ib_p; 955 px_msiq_state_t *msiq_state_p = &ib_p->ib_msiq_state; 956 devino_t ino; 957 px_ih_t *ih_p; 958 px_ib_ino_info_t *ino_p; 959 int32_t weight; 960 int ret = DDI_SUCCESS; 961 962 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: rdip=%s%d handler=%x " 963 "arg1=%x arg2=%x\n", ddi_driver_name(rdip), ddi_get_instance(rdip), 964 hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2); 965 966 if ((ret = px_msiq_alloc(px_p, rec_type, msiq_id_p)) != DDI_SUCCESS) { 967 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 968 "msiq allocation failed\n"); 969 return (ret); 970 } 971 972 ino = px_msiqid_to_devino(px_p, *msiq_id_p); 973 974 ih_p = px_ib_alloc_ih(rdip, hdlp->ih_inum, hdlp->ih_cb_func, 975 hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, rec_type, msg_code); 976 977 mutex_enter(&ib_p->ib_ino_lst_mutex); 978 979 if (ino_p = px_ib_locate_ino(ib_p, ino)) { /* sharing ino */ 980 uint32_t intr_index = hdlp->ih_inum; 981 if (px_ib_ino_locate_intr(ino_p, rdip, 982 intr_index, rec_type, msg_code)) { 983 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: " 984 "dup intr #%d\n", intr_index); 985 986 ret = DDI_FAILURE; 987 goto fail1; 988 } 989 990 if ((ret = px_ib_ino_add_intr(px_p, ino_p, ih_p)) 991 != DDI_SUCCESS) 992 goto fail1; 993 } else { 994 ino_p = px_ib_new_ino(ib_p, ino, ih_p); 995 996 ino_p->ino_msiq_p = msiq_state_p->msiq_p + 997 (*msiq_id_p - msiq_state_p->msiq_1st_msiq_id); 998 999 if (hdlp->ih_pri == 0) 1000 hdlp->ih_pri = px_class_to_pil(rdip); 1001 1002 /* Save mondo value in hdlp */ 1003 hdlp->ih_vector = ino_p->ino_sysino; 1004 1005 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: pil=0x%x mondo=0x%x\n", 1006 hdlp->ih_pri, hdlp->ih_vector); 1007 1008 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, 1009 (ddi_intr_handler_t *)px_msiq_intr, (caddr_t)ino_p, NULL); 1010 1011 ret = i_ddi_add_ivintr(hdlp); 1012 1013 /* 1014 * Restore original interrupt handler 1015 * and arguments in interrupt handle. 1016 */ 1017 DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler, 1018 ih_p->ih_handler_arg1, ih_p->ih_handler_arg2); 1019 1020 if (ret != DDI_SUCCESS) 1021 goto fail2; 1022 1023 /* Save the pil for this ino */ 1024 ino_p->ino_pil = hdlp->ih_pri; 1025 1026 /* Enable MSIQ */ 1027 px_lib_msiq_setstate(dip, *msiq_id_p, PCI_MSIQ_STATE_IDLE); 1028 px_lib_msiq_setvalid(dip, *msiq_id_p, PCI_MSIQ_VALID); 1029 1030 /* select cpu, saving it for sharing and removal */ 1031 ino_p->ino_cpuid = intr_dist_cpuid(); 1032 1033 /* Enable interrupt */ 1034 px_ib_intr_enable(px_p, ino_p->ino_cpuid, ino_p->ino_ino); 1035 } 1036 1037 /* add weight to the cpu that we are already targeting */ 1038 weight = px_class_to_intr_weight(rdip); 1039 intr_dist_cpuid_add_device_weight(ino_p->ino_cpuid, rdip, weight); 1040 1041 ih_p->ih_ino_p = ino_p; 1042 if (ih_p->ih_ksp) 1043 kstat_install(ih_p->ih_ksp); 1044 mutex_exit(&ib_p->ib_ino_lst_mutex); 1045 1046 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: done! Interrupt 0x%x pil=%x\n", 1047 ino_p->ino_sysino, hdlp->ih_pri); 1048 1049 return (ret); 1050 fail2: 1051 px_ib_delete_ino(ib_p, ino_p); 1052 fail1: 1053 if (ih_p->ih_config_handle) 1054 pci_config_teardown(&ih_p->ih_config_handle); 1055 1056 mutex_exit(&ib_p->ib_ino_lst_mutex); 1057 kmem_free(ih_p, sizeof (px_ih_t)); 1058 1059 DBG(DBG_MSIQ, dip, "px_add_msiq_intr: Failed! Interrupt 0x%x pil=%x\n", 1060 ino_p->ino_sysino, hdlp->ih_pri); 1061 1062 return (ret); 1063 } 1064 1065 int 1066 px_rem_msiq_intr(dev_info_t *dip, dev_info_t *rdip, 1067 ddi_intr_handle_impl_t *hdlp, msiq_rec_type_t rec_type, 1068 msgcode_t msg_code, msiqid_t msiq_id) 1069 { 1070 px_t *px_p = INST_TO_STATE(ddi_get_instance(dip)); 1071 px_ib_t *ib_p = px_p->px_ib_p; 1072 devino_t ino = px_msiqid_to_devino(px_p, msiq_id); 1073 cpuid_t curr_cpu; 1074 px_ib_ino_info_t *ino_p; 1075 px_ih_t *ih_p; 1076 int ret = DDI_SUCCESS; 1077 1078 DBG(DBG_MSIQ, dip, "px_rem_msiq_intr: rdip=%s%d msiq_id=%x ino=%x\n", 1079 ddi_driver_name(rdip), ddi_get_instance(rdip), msiq_id, ino); 1080 1081 mutex_enter(&ib_p->ib_ino_lst_mutex); 1082 1083 ino_p = px_ib_locate_ino(ib_p, ino); 1084 ih_p = px_ib_ino_locate_intr(ino_p, rdip, hdlp->ih_inum, 1085 rec_type, msg_code); 1086 1087 /* Get the current cpu */ 1088 if ((ret = px_lib_intr_gettarget(px_p->px_dip, ino_p->ino_sysino, 1089 &curr_cpu)) != DDI_SUCCESS) 1090 goto fail; 1091 1092 if ((ret = px_ib_ino_rem_intr(px_p, ino_p, ih_p)) != DDI_SUCCESS) 1093 goto fail; 1094 1095 intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip); 1096 1097 if (ino_p->ino_ih_size == 0) { 1098 if ((ret = px_lib_intr_setstate(px_p->px_dip, ino_p->ino_sysino, 1099 INTR_DELIVERED_STATE)) != DDI_SUCCESS) 1100 goto fail; 1101 1102 px_lib_msiq_setvalid(dip, px_devino_to_msiqid(px_p, ino), 1103 PCI_MSIQ_INVALID); 1104 1105 hdlp->ih_vector = ino_p->ino_sysino; 1106 i_ddi_rem_ivintr(hdlp); 1107 1108 px_ib_delete_ino(ib_p, ino_p); 1109 1110 (void) px_msiq_free(px_p, msiq_id); 1111 kmem_free(ino_p, sizeof (px_ib_ino_info_t)); 1112 } else { 1113 /* Re-enable interrupt only if mapping regsiter still shared */ 1114 if ((ret = px_lib_intr_settarget(px_p->px_dip, 1115 ino_p->ino_sysino, curr_cpu)) != DDI_SUCCESS) 1116 goto fail; 1117 1118 ret = px_lib_intr_setvalid(px_p->px_dip, ino_p->ino_sysino, 1119 INTR_VALID); 1120 } 1121 1122 fail: 1123 mutex_exit(&ib_p->ib_ino_lst_mutex); 1124 return (ret); 1125 } 1126