1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Logical domain channel devices are devices implemented entirely 30 * in software; cnex is the nexus for channel-devices. They use 31 * the HV channel interfaces via the LDC transport module to send 32 * and receive data and to register callbacks. 33 */ 34 35 #include <sys/types.h> 36 #include <sys/cmn_err.h> 37 #include <sys/conf.h> 38 #include <sys/ddi.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/devops.h> 41 #include <sys/instance.h> 42 #include <sys/modctl.h> 43 #include <sys/open.h> 44 #include <sys/stat.h> 45 #include <sys/sunddi.h> 46 #include <sys/sunndi.h> 47 #include <sys/systm.h> 48 #include <sys/mkdev.h> 49 #include <sys/machsystm.h> 50 #include <sys/intr.h> 51 #include <sys/ddi_intr_impl.h> 52 #include <sys/ivintr.h> 53 #include <sys/hypervisor_api.h> 54 #include <sys/ldc.h> 55 #include <sys/cnex.h> 56 #include <sys/mach_descrip.h> 57 #include <sys/hsvc.h> 58 #include <sys/sdt.h> 59 60 /* 61 * Internal functions/information 62 */ 63 static struct cnex_pil_map cnex_class_to_pil[] = { 64 {LDC_DEV_GENERIC, PIL_3}, 65 {LDC_DEV_BLK, PIL_4}, 66 {LDC_DEV_BLK_SVC, PIL_3}, 67 {LDC_DEV_NT, PIL_6}, 68 {LDC_DEV_NT_SVC, PIL_4}, 69 {LDC_DEV_SERIAL, PIL_6} 70 }; 71 #define CNEX_MAX_DEVS (sizeof (cnex_class_to_pil) / \ 72 sizeof (cnex_class_to_pil[0])) 73 74 #define SUN4V_REG_SPEC2CFG_HDL(x) ((x >> 32) & ~(0xfull << 28)) 75 76 static clock_t cnex_wait_usecs = 1000; /* wait time in usecs */ 77 static int cnex_wait_retries = 3; 78 static void *cnex_state; 79 80 static void cnex_intr_redist(void *arg); 81 static uint_t cnex_intr_wrapper(caddr_t arg); 82 static dev_info_t *cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id, 83 md_t *mdp, mde_cookie_t mde); 84 85 /* 86 * Debug info 87 */ 88 #ifdef DEBUG 89 90 /* 91 * Print debug messages 92 * 93 * set cnexdbg to 0xf for enabling all msgs 94 * 0x8 - Errors 95 * 0x4 - Warnings 96 * 0x2 - All debug messages 97 * 0x1 - Minimal debug messages 98 */ 99 100 int cnexdbg = 0x8; 101 102 static void 103 cnexdebug(const char *fmt, ...) 104 { 105 char buf[512]; 106 va_list ap; 107 108 va_start(ap, fmt); 109 (void) vsprintf(buf, fmt, ap); 110 va_end(ap); 111 112 cmn_err(CE_CONT, "%s\n", buf); 113 } 114 115 #define D1 \ 116 if (cnexdbg & 0x01) \ 117 cnexdebug 118 119 #define D2 \ 120 if (cnexdbg & 0x02) \ 121 cnexdebug 122 123 #define DWARN \ 124 if (cnexdbg & 0x04) \ 125 cnexdebug 126 127 #define DERR \ 128 if (cnexdbg & 0x08) \ 129 cnexdebug 130 131 #else 132 133 #define D1 134 #define D2 135 #define DWARN 136 #define DERR 137 138 #endif 139 140 /* 141 * Config information 142 */ 143 static int cnex_attach(dev_info_t *, ddi_attach_cmd_t); 144 static int cnex_detach(dev_info_t *, ddi_detach_cmd_t); 145 static int cnex_open(dev_t *, int, int, cred_t *); 146 static int cnex_close(dev_t, int, int, cred_t *); 147 static int cnex_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 148 static int cnex_ctl(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *, 149 void *); 150 151 static struct bus_ops cnex_bus_ops = { 152 BUSO_REV, 153 nullbusmap, /* bus_map */ 154 NULL, /* bus_get_intrspec */ 155 NULL, /* bus_add_intrspec */ 156 NULL, /* bus_remove_intrspec */ 157 i_ddi_map_fault, /* bus_map_fault */ 158 ddi_no_dma_map, /* bus_dma_map */ 159 ddi_no_dma_allochdl, /* bus_dma_allochdl */ 160 NULL, /* bus_dma_freehdl */ 161 NULL, /* bus_dma_bindhdl */ 162 NULL, /* bus_dma_unbindhdl */ 163 NULL, /* bus_dma_flush */ 164 NULL, /* bus_dma_win */ 165 NULL, /* bus_dma_ctl */ 166 cnex_ctl, /* bus_ctl */ 167 ddi_bus_prop_op, /* bus_prop_op */ 168 0, /* bus_get_eventcookie */ 169 0, /* bus_add_eventcall */ 170 0, /* bus_remove_eventcall */ 171 0, /* bus_post_event */ 172 NULL, /* bus_intr_ctl */ 173 NULL, /* bus_config */ 174 NULL, /* bus_unconfig */ 175 NULL, /* bus_fm_init */ 176 NULL, /* bus_fm_fini */ 177 NULL, /* bus_fm_access_enter */ 178 NULL, /* bus_fm_access_exit */ 179 NULL, /* bus_power */ 180 NULL /* bus_intr_op */ 181 }; 182 183 static struct cb_ops cnex_cb_ops = { 184 cnex_open, /* open */ 185 cnex_close, /* close */ 186 nodev, /* strategy */ 187 nodev, /* print */ 188 nodev, /* dump */ 189 nodev, /* read */ 190 nodev, /* write */ 191 cnex_ioctl, /* ioctl */ 192 nodev, /* devmap */ 193 nodev, /* mmap */ 194 nodev, /* segmap */ 195 nochpoll, /* poll */ 196 ddi_prop_op, /* cb_prop_op */ 197 0, /* streamtab */ 198 D_MP | D_NEW | D_HOTPLUG /* Driver compatibility flag */ 199 }; 200 201 static struct dev_ops cnex_ops = { 202 DEVO_REV, /* devo_rev, */ 203 0, /* refcnt */ 204 ddi_getinfo_1to1, /* info */ 205 nulldev, /* identify */ 206 nulldev, /* probe */ 207 cnex_attach, /* attach */ 208 cnex_detach, /* detach */ 209 nodev, /* reset */ 210 &cnex_cb_ops, /* driver operations */ 211 &cnex_bus_ops, /* bus operations */ 212 nulldev /* power */ 213 }; 214 215 /* 216 * Module linkage information for the kernel. 217 */ 218 static struct modldrv modldrv = { 219 &mod_driverops, 220 "sun4v channel-devices nexus %I%", 221 &cnex_ops, 222 }; 223 224 static struct modlinkage modlinkage = { 225 MODREV_1, (void *)&modldrv, NULL 226 }; 227 228 int 229 _init(void) 230 { 231 int err; 232 uint64_t majornum; 233 uint64_t minornum; 234 235 /* 236 * Check HV intr group api versioning. 237 * Note that cnex assumes interrupt cookies is 238 * in version 1.0 of the intr group api. 239 */ 240 if ((err = hsvc_version(HSVC_GROUP_INTR, &majornum, &minornum)) != 0) { 241 cmn_err(CE_WARN, "cnex: failed to get intr api " 242 "group versioning errno=%d", err); 243 return (err); 244 } else if ((majornum != 1) && (majornum != 2)) { 245 cmn_err(CE_WARN, "cnex: unsupported intr api group: " 246 "maj:0x%lx, min:0x%lx", majornum, minornum); 247 return (ENOTSUP); 248 } 249 250 if ((err = ddi_soft_state_init(&cnex_state, 251 sizeof (cnex_soft_state_t), 0)) != 0) { 252 return (err); 253 } 254 if ((err = mod_install(&modlinkage)) != 0) { 255 ddi_soft_state_fini(&cnex_state); 256 return (err); 257 } 258 return (0); 259 } 260 261 int 262 _fini(void) 263 { 264 int err; 265 266 if ((err = mod_remove(&modlinkage)) != 0) 267 return (err); 268 ddi_soft_state_fini(&cnex_state); 269 return (0); 270 } 271 272 int 273 _info(struct modinfo *modinfop) 274 { 275 return (mod_info(&modlinkage, modinfop)); 276 } 277 278 /* 279 * Callback function invoked by the interrupt redistribution 280 * framework. This will redirect interrupts at CPUs that are 281 * currently available in the system. 282 */ 283 static void 284 cnex_intr_redist(void *arg) 285 { 286 cnex_ldc_t *cldcp; 287 cnex_soft_state_t *cnex_ssp = arg; 288 int intr_state; 289 uint64_t cpuid; 290 int rv, retries = 0; 291 292 ASSERT(cnex_ssp != NULL); 293 mutex_enter(&cnex_ssp->clist_lock); 294 295 cldcp = cnex_ssp->clist; 296 while (cldcp != NULL) { 297 298 mutex_enter(&cldcp->lock); 299 300 if (cldcp->tx.hdlr) { 301 /* 302 * Don't do anything for disabled interrupts. 303 */ 304 rv = hvldc_intr_getvalid(cnex_ssp->cfghdl, 305 cldcp->tx.ino, &intr_state); 306 if (rv) { 307 DWARN("cnex_intr_redist: tx ino=0x%llx, " 308 "can't get valid\n", cldcp->tx.ino); 309 mutex_exit(&cldcp->lock); 310 mutex_exit(&cnex_ssp->clist_lock); 311 return; 312 } 313 if (intr_state == HV_INTR_NOTVALID) { 314 mutex_exit(&cldcp->lock); 315 cldcp = cldcp->next; 316 continue; 317 } 318 319 cpuid = intr_dist_cpuid(); 320 321 /* disable interrupts */ 322 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, 323 cldcp->tx.ino, HV_INTR_NOTVALID); 324 if (rv) { 325 DWARN("cnex_intr_redist: tx ino=0x%llx, " 326 "can't set valid\n", cldcp->tx.ino); 327 mutex_exit(&cldcp->lock); 328 mutex_exit(&cnex_ssp->clist_lock); 329 return; 330 } 331 332 /* 333 * Make a best effort to wait for pending interrupts 334 * to finish. There is not much we can do if we timeout. 335 */ 336 retries = 0; 337 338 do { 339 rv = hvldc_intr_getstate(cnex_ssp->cfghdl, 340 cldcp->tx.ino, &intr_state); 341 if (rv) { 342 DWARN("cnex_intr_redist: tx ino=0x%llx," 343 "can't get state\n", cldcp->tx.ino); 344 mutex_exit(&cldcp->lock); 345 mutex_exit(&cnex_ssp->clist_lock); 346 return; 347 } 348 349 if (intr_state != HV_INTR_DELIVERED_STATE) 350 break; 351 352 drv_usecwait(cnex_wait_usecs); 353 354 } while (!panicstr && ++retries <= cnex_wait_retries); 355 356 cldcp->tx.cpuid = cpuid; 357 (void) hvldc_intr_settarget(cnex_ssp->cfghdl, 358 cldcp->tx.ino, cpuid); 359 (void) hvldc_intr_setvalid(cnex_ssp->cfghdl, 360 cldcp->tx.ino, HV_INTR_VALID); 361 } 362 363 if (cldcp->rx.hdlr) { 364 /* 365 * Don't do anything for disabled interrupts. 366 */ 367 rv = hvldc_intr_getvalid(cnex_ssp->cfghdl, 368 cldcp->rx.ino, &intr_state); 369 if (rv) { 370 DWARN("cnex_intr_redist: rx ino=0x%llx, " 371 "can't get valid\n", cldcp->rx.ino); 372 mutex_exit(&cldcp->lock); 373 mutex_exit(&cnex_ssp->clist_lock); 374 return; 375 } 376 if (intr_state == HV_INTR_NOTVALID) { 377 mutex_exit(&cldcp->lock); 378 cldcp = cldcp->next; 379 continue; 380 } 381 382 cpuid = intr_dist_cpuid(); 383 384 /* disable interrupts */ 385 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, 386 cldcp->rx.ino, HV_INTR_NOTVALID); 387 if (rv) { 388 DWARN("cnex_intr_redist: rx ino=0x%llx, " 389 "can't set valid\n", cldcp->rx.ino); 390 mutex_exit(&cldcp->lock); 391 mutex_exit(&cnex_ssp->clist_lock); 392 return; 393 } 394 395 /* 396 * Make a best effort to wait for pending interrupts 397 * to finish. There is not much we can do if we timeout. 398 */ 399 retries = 0; 400 401 do { 402 rv = hvldc_intr_getstate(cnex_ssp->cfghdl, 403 cldcp->rx.ino, &intr_state); 404 if (rv) { 405 DWARN("cnex_intr_redist: rx ino=0x%llx," 406 "can't get state\n", cldcp->rx.ino); 407 mutex_exit(&cldcp->lock); 408 mutex_exit(&cnex_ssp->clist_lock); 409 return; 410 } 411 412 if (intr_state != HV_INTR_DELIVERED_STATE) 413 break; 414 415 drv_usecwait(cnex_wait_usecs); 416 417 } while (!panicstr && ++retries <= cnex_wait_retries); 418 419 cldcp->rx.cpuid = cpuid; 420 (void) hvldc_intr_settarget(cnex_ssp->cfghdl, 421 cldcp->rx.ino, cpuid); 422 (void) hvldc_intr_setvalid(cnex_ssp->cfghdl, 423 cldcp->rx.ino, HV_INTR_VALID); 424 } 425 426 mutex_exit(&cldcp->lock); 427 428 /* next channel */ 429 cldcp = cldcp->next; 430 } 431 432 mutex_exit(&cnex_ssp->clist_lock); 433 } 434 435 /* 436 * Exported interface to register a LDC endpoint with 437 * the channel nexus 438 */ 439 static int 440 cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass) 441 { 442 int idx; 443 cnex_ldc_t *cldcp; 444 int listsz, num_nodes, num_channels; 445 md_t *mdp = NULL; 446 mde_cookie_t rootnode, *listp = NULL; 447 uint64_t tmp_id; 448 uint64_t rxino = (uint64_t)-1; 449 uint64_t txino = (uint64_t)-1; 450 cnex_soft_state_t *cnex_ssp; 451 int status, instance; 452 dev_info_t *chan_dip = NULL; 453 454 /* Get device instance and structure */ 455 instance = ddi_get_instance(dip); 456 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 457 458 /* Check to see if channel is already registered */ 459 mutex_enter(&cnex_ssp->clist_lock); 460 cldcp = cnex_ssp->clist; 461 while (cldcp) { 462 if (cldcp->id == id) { 463 DWARN("cnex_reg_chan: channel 0x%llx exists\n", id); 464 mutex_exit(&cnex_ssp->clist_lock); 465 return (EINVAL); 466 } 467 cldcp = cldcp->next; 468 } 469 470 /* Get the Tx/Rx inos from the MD */ 471 if ((mdp = md_get_handle()) == NULL) { 472 DWARN("cnex_reg_chan: cannot init MD\n"); 473 mutex_exit(&cnex_ssp->clist_lock); 474 return (ENXIO); 475 } 476 num_nodes = md_node_count(mdp); 477 ASSERT(num_nodes > 0); 478 479 listsz = num_nodes * sizeof (mde_cookie_t); 480 listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP); 481 482 rootnode = md_root_node(mdp); 483 484 /* search for all channel_endpoint nodes */ 485 num_channels = md_scan_dag(mdp, rootnode, 486 md_find_name(mdp, "channel-endpoint"), 487 md_find_name(mdp, "fwd"), listp); 488 if (num_channels <= 0) { 489 DWARN("cnex_reg_chan: invalid channel id\n"); 490 kmem_free(listp, listsz); 491 (void) md_fini_handle(mdp); 492 mutex_exit(&cnex_ssp->clist_lock); 493 return (EINVAL); 494 } 495 496 for (idx = 0; idx < num_channels; idx++) { 497 498 /* Get the channel ID */ 499 status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id); 500 if (status) { 501 DWARN("cnex_reg_chan: cannot read LDC ID\n"); 502 kmem_free(listp, listsz); 503 (void) md_fini_handle(mdp); 504 mutex_exit(&cnex_ssp->clist_lock); 505 return (ENXIO); 506 } 507 if (tmp_id != id) 508 continue; 509 510 /* Get the Tx and Rx ino */ 511 status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino); 512 if (status) { 513 DWARN("cnex_reg_chan: cannot read Tx ino\n"); 514 kmem_free(listp, listsz); 515 (void) md_fini_handle(mdp); 516 mutex_exit(&cnex_ssp->clist_lock); 517 return (ENXIO); 518 } 519 status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino); 520 if (status) { 521 DWARN("cnex_reg_chan: cannot read Rx ino\n"); 522 kmem_free(listp, listsz); 523 (void) md_fini_handle(mdp); 524 mutex_exit(&cnex_ssp->clist_lock); 525 return (ENXIO); 526 } 527 chan_dip = cnex_find_chan_dip(dip, id, mdp, listp[idx]); 528 ASSERT(chan_dip != NULL); 529 } 530 kmem_free(listp, listsz); 531 (void) md_fini_handle(mdp); 532 533 /* 534 * check to see if we looped through the list of channel IDs without 535 * matching one (i.e. an 'ino' has not been initialised). 536 */ 537 if ((rxino == -1) || (txino == -1)) { 538 DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id); 539 mutex_exit(&cnex_ssp->clist_lock); 540 return (ENOENT); 541 } 542 543 /* Allocate a new channel structure */ 544 cldcp = kmem_zalloc(sizeof (*cldcp), KM_SLEEP); 545 546 /* Initialize the channel */ 547 mutex_init(&cldcp->lock, NULL, MUTEX_DRIVER, NULL); 548 549 cldcp->id = id; 550 cldcp->tx.ino = txino; 551 cldcp->rx.ino = rxino; 552 cldcp->devclass = devclass; 553 cldcp->dip = chan_dip; 554 555 /* add channel to nexus channel list */ 556 cldcp->next = cnex_ssp->clist; 557 cnex_ssp->clist = cldcp; 558 559 mutex_exit(&cnex_ssp->clist_lock); 560 561 return (0); 562 } 563 564 /* 565 * Add Tx/Rx interrupt handler for the channel 566 */ 567 static int 568 cnex_add_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype, 569 uint_t (*hdlr)(), caddr_t arg1, caddr_t arg2) 570 { 571 int rv, idx, pil; 572 cnex_ldc_t *cldcp; 573 cnex_intr_t *iinfo; 574 cnex_soft_state_t *cnex_ssp; 575 int instance; 576 577 /* Get device instance and structure */ 578 instance = ddi_get_instance(dip); 579 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 580 581 /* get channel info */ 582 mutex_enter(&cnex_ssp->clist_lock); 583 cldcp = cnex_ssp->clist; 584 while (cldcp) { 585 if (cldcp->id == id) 586 break; 587 cldcp = cldcp->next; 588 } 589 if (cldcp == NULL) { 590 DWARN("cnex_add_intr: channel 0x%llx does not exist\n", id); 591 mutex_exit(&cnex_ssp->clist_lock); 592 return (EINVAL); 593 } 594 mutex_exit(&cnex_ssp->clist_lock); 595 596 /* get channel lock */ 597 mutex_enter(&cldcp->lock); 598 599 /* get interrupt type */ 600 if (itype == CNEX_TX_INTR) { 601 iinfo = &(cldcp->tx); 602 } else if (itype == CNEX_RX_INTR) { 603 iinfo = &(cldcp->rx); 604 } else { 605 DWARN("cnex_add_intr: invalid interrupt type\n", id); 606 mutex_exit(&cldcp->lock); 607 return (EINVAL); 608 } 609 610 /* check if a handler is already added */ 611 if (iinfo->hdlr != 0) { 612 DWARN("cnex_add_intr: interrupt handler exists\n"); 613 mutex_exit(&cldcp->lock); 614 return (EINVAL); 615 } 616 617 /* save interrupt handler info */ 618 iinfo->hdlr = hdlr; 619 iinfo->arg1 = arg1; 620 iinfo->arg2 = arg2; 621 622 iinfo->cldcp = cldcp; 623 624 /* 625 * FIXME - generate the interrupt cookie 626 * using the interrupt registry 627 */ 628 iinfo->icookie = cnex_ssp->cfghdl | iinfo->ino; 629 630 D1("cnex_add_intr: add hdlr, cfghdl=0x%llx, ino=0x%llx, " 631 "cookie=0x%llx\n", cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie); 632 633 /* Pick a PIL on the basis of the channel's devclass */ 634 for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) { 635 if (cldcp->devclass == cnex_class_to_pil[idx].devclass) { 636 pil = cnex_class_to_pil[idx].pil; 637 break; 638 } 639 } 640 641 /* add interrupt to solaris ivec table */ 642 VERIFY(add_ivintr(iinfo->icookie, pil, (intrfunc)cnex_intr_wrapper, 643 (caddr_t)iinfo, NULL, NULL) == 0); 644 645 /* set the cookie in the HV */ 646 rv = hvldc_intr_setcookie(cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie); 647 648 /* pick next CPU in the domain for this channel */ 649 iinfo->cpuid = intr_dist_cpuid(); 650 651 /* set the target CPU and then enable interrupts */ 652 rv = hvldc_intr_settarget(cnex_ssp->cfghdl, iinfo->ino, iinfo->cpuid); 653 if (rv) { 654 DWARN("cnex_add_intr: ino=0x%llx, cannot set target cpu\n", 655 iinfo->ino); 656 goto hv_error; 657 } 658 rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino, 659 HV_INTR_IDLE_STATE); 660 if (rv) { 661 DWARN("cnex_add_intr: ino=0x%llx, cannot set state\n", 662 iinfo->ino); 663 goto hv_error; 664 } 665 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_VALID); 666 if (rv) { 667 DWARN("cnex_add_intr: ino=0x%llx, cannot set valid\n", 668 iinfo->ino); 669 goto hv_error; 670 } 671 672 mutex_exit(&cldcp->lock); 673 return (0); 674 675 hv_error: 676 (void) rem_ivintr(iinfo->icookie, pil); 677 mutex_exit(&cldcp->lock); 678 return (ENXIO); 679 } 680 681 682 /* 683 * Exported interface to unregister a LDC endpoint with 684 * the channel nexus 685 */ 686 static int 687 cnex_unreg_chan(dev_info_t *dip, uint64_t id) 688 { 689 cnex_ldc_t *cldcp, *prev_cldcp; 690 cnex_soft_state_t *cnex_ssp; 691 int instance; 692 693 /* Get device instance and structure */ 694 instance = ddi_get_instance(dip); 695 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 696 697 /* find and remove channel from list */ 698 mutex_enter(&cnex_ssp->clist_lock); 699 prev_cldcp = NULL; 700 cldcp = cnex_ssp->clist; 701 while (cldcp) { 702 if (cldcp->id == id) 703 break; 704 prev_cldcp = cldcp; 705 cldcp = cldcp->next; 706 } 707 708 if (cldcp == 0) { 709 DWARN("cnex_unreg_chan: invalid channel %d\n", id); 710 mutex_exit(&cnex_ssp->clist_lock); 711 return (EINVAL); 712 } 713 714 if (cldcp->tx.hdlr || cldcp->rx.hdlr) { 715 DWARN("cnex_unreg_chan: handlers still exist: chan %lx\n", id); 716 mutex_exit(&cnex_ssp->clist_lock); 717 return (ENXIO); 718 } 719 720 if (prev_cldcp) 721 prev_cldcp->next = cldcp->next; 722 else 723 cnex_ssp->clist = cldcp->next; 724 725 mutex_exit(&cnex_ssp->clist_lock); 726 727 /* destroy mutex */ 728 mutex_destroy(&cldcp->lock); 729 730 /* free channel */ 731 kmem_free(cldcp, sizeof (*cldcp)); 732 733 return (0); 734 } 735 736 /* 737 * Remove Tx/Rx interrupt handler for the channel 738 */ 739 static int 740 cnex_rem_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype) 741 { 742 int rv, idx, pil; 743 cnex_ldc_t *cldcp; 744 cnex_intr_t *iinfo; 745 cnex_soft_state_t *cnex_ssp; 746 int instance, istate; 747 748 /* Get device instance and structure */ 749 instance = ddi_get_instance(dip); 750 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 751 752 /* get channel info */ 753 mutex_enter(&cnex_ssp->clist_lock); 754 cldcp = cnex_ssp->clist; 755 while (cldcp) { 756 if (cldcp->id == id) 757 break; 758 cldcp = cldcp->next; 759 } 760 if (cldcp == NULL) { 761 DWARN("cnex_rem_intr: channel 0x%llx does not exist\n", id); 762 mutex_exit(&cnex_ssp->clist_lock); 763 return (EINVAL); 764 } 765 mutex_exit(&cnex_ssp->clist_lock); 766 767 /* get rid of the channel intr handler */ 768 mutex_enter(&cldcp->lock); 769 770 /* get interrupt type */ 771 if (itype == CNEX_TX_INTR) { 772 iinfo = &(cldcp->tx); 773 } else if (itype == CNEX_RX_INTR) { 774 iinfo = &(cldcp->rx); 775 } else { 776 DWARN("cnex_rem_intr: invalid interrupt type\n"); 777 mutex_exit(&cldcp->lock); 778 return (EINVAL); 779 } 780 781 D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino); 782 783 /* check if a handler is already added */ 784 if (iinfo->hdlr == 0) { 785 DWARN("cnex_rem_intr: interrupt handler does not exist\n"); 786 mutex_exit(&cldcp->lock); 787 return (EINVAL); 788 } 789 790 D1("cnex_rem_intr: set intr to invalid ino=0x%x\n", iinfo->ino); 791 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, 792 iinfo->ino, HV_INTR_NOTVALID); 793 if (rv) { 794 DWARN("cnex_rem_intr: cannot set valid ino=%x\n", iinfo->ino); 795 mutex_exit(&cldcp->lock); 796 return (ENXIO); 797 } 798 799 /* 800 * Check if there are pending interrupts. If interrupts are 801 * pending return EAGAIN. 802 */ 803 rv = hvldc_intr_getstate(cnex_ssp->cfghdl, iinfo->ino, &istate); 804 if (rv) { 805 DWARN("cnex_rem_intr: ino=0x%llx, cannot get state\n", 806 iinfo->ino); 807 mutex_exit(&cldcp->lock); 808 return (ENXIO); 809 } 810 811 /* if interrupts are still pending print warning */ 812 if (istate != HV_INTR_IDLE_STATE) { 813 DWARN("cnex_rem_intr: cannot remove intr busy ino=%x\n", 814 iinfo->ino); 815 mutex_exit(&cldcp->lock); 816 return (EAGAIN); 817 } 818 819 /* Pick a PIL on the basis of the channel's devclass */ 820 for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) { 821 if (cldcp->devclass == cnex_class_to_pil[idx].devclass) { 822 pil = cnex_class_to_pil[idx].pil; 823 break; 824 } 825 } 826 827 /* remove interrupt */ 828 (void) rem_ivintr(iinfo->icookie, pil); 829 830 /* clear interrupt info */ 831 bzero(iinfo, sizeof (*iinfo)); 832 833 mutex_exit(&cldcp->lock); 834 835 return (0); 836 } 837 838 839 /* 840 * Clear pending Tx/Rx interrupt 841 */ 842 static int 843 cnex_clr_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype) 844 { 845 int rv; 846 cnex_ldc_t *cldcp; 847 cnex_intr_t *iinfo; 848 cnex_soft_state_t *cnex_ssp; 849 int instance; 850 851 /* Get device instance and structure */ 852 instance = ddi_get_instance(dip); 853 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 854 855 /* get channel info */ 856 mutex_enter(&cnex_ssp->clist_lock); 857 cldcp = cnex_ssp->clist; 858 while (cldcp) { 859 if (cldcp->id == id) 860 break; 861 cldcp = cldcp->next; 862 } 863 if (cldcp == NULL) { 864 DWARN("cnex_clr_intr: channel 0x%llx does not exist\n", id); 865 mutex_exit(&cnex_ssp->clist_lock); 866 return (EINVAL); 867 } 868 mutex_exit(&cnex_ssp->clist_lock); 869 870 mutex_enter(&cldcp->lock); 871 872 /* get interrupt type */ 873 if (itype == CNEX_TX_INTR) { 874 iinfo = &(cldcp->tx); 875 } else if (itype == CNEX_RX_INTR) { 876 iinfo = &(cldcp->rx); 877 } else { 878 DWARN("cnex_clr_intr: invalid interrupt type\n"); 879 mutex_exit(&cldcp->lock); 880 return (EINVAL); 881 } 882 883 D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino); 884 885 /* check if a handler is already added */ 886 if (iinfo->hdlr == 0) { 887 DWARN("cnex_clr_intr: interrupt handler does not exist\n"); 888 mutex_exit(&cldcp->lock); 889 return (EINVAL); 890 } 891 892 rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino, 893 HV_INTR_IDLE_STATE); 894 if (rv) { 895 DWARN("cnex_clr_intr: cannot clear interrupt state\n"); 896 mutex_exit(&cldcp->lock); 897 return (ENXIO); 898 } 899 900 mutex_exit(&cldcp->lock); 901 902 return (0); 903 } 904 905 /* 906 * Channel nexus interrupt handler wrapper 907 */ 908 static uint_t 909 cnex_intr_wrapper(caddr_t arg) 910 { 911 int res; 912 uint_t (*handler)(); 913 caddr_t handler_arg1; 914 caddr_t handler_arg2; 915 cnex_intr_t *iinfo = (cnex_intr_t *)arg; 916 917 ASSERT(iinfo != NULL); 918 919 handler = iinfo->hdlr; 920 handler_arg1 = iinfo->arg1; 921 handler_arg2 = iinfo->arg2; 922 923 /* 924 * The 'interrupt__start' and 'interrupt__complete' probes 925 * are provided to support 'intrstat' command. These probes 926 * help monitor the interrupts on a per device basis only. 927 * In order to provide the ability to monitor the 928 * activity on a per channel basis, two additional 929 * probes('channelintr__start','channelintr__complete') 930 * are provided here. 931 */ 932 DTRACE_PROBE4(channelintr__start, uint64_t, iinfo->cldcp->id, 933 cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1); 934 935 DTRACE_PROBE4(interrupt__start, dev_info_t, iinfo->cldcp->dip, 936 void *, handler, caddr_t, handler_arg1, caddr_t, handler_arg2); 937 938 D1("cnex_intr_wrapper:ino=0x%llx invoke client handler\n", iinfo->ino); 939 res = (*handler)(handler_arg1, handler_arg2); 940 941 DTRACE_PROBE4(interrupt__complete, dev_info_t, iinfo->cldcp->dip, 942 void *, handler, caddr_t, handler_arg1, int, res); 943 944 DTRACE_PROBE4(channelintr__complete, uint64_t, iinfo->cldcp->id, 945 cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1); 946 947 return (res); 948 } 949 950 /*ARGSUSED*/ 951 static int 952 cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 953 { 954 int rv, instance, reglen; 955 cnex_regspec_t *reg_p; 956 ldc_cnex_t cinfo; 957 cnex_soft_state_t *cnex_ssp; 958 959 switch (cmd) { 960 case DDI_ATTACH: 961 break; 962 case DDI_RESUME: 963 return (DDI_SUCCESS); 964 default: 965 return (DDI_FAILURE); 966 } 967 968 /* 969 * Get the instance specific soft state structure. 970 * Save the devi for this instance in the soft_state data. 971 */ 972 instance = ddi_get_instance(devi); 973 if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS) 974 return (DDI_FAILURE); 975 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 976 977 cnex_ssp->devi = devi; 978 cnex_ssp->clist = NULL; 979 980 if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 981 "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS) { 982 return (DDI_FAILURE); 983 } 984 985 /* get the sun4v config handle for this device */ 986 cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr); 987 kmem_free(reg_p, reglen); 988 989 D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl); 990 991 /* init channel list mutex */ 992 mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL); 993 994 /* Register with LDC module */ 995 cinfo.dip = devi; 996 cinfo.reg_chan = cnex_reg_chan; 997 cinfo.unreg_chan = cnex_unreg_chan; 998 cinfo.add_intr = cnex_add_intr; 999 cinfo.rem_intr = cnex_rem_intr; 1000 cinfo.clr_intr = cnex_clr_intr; 1001 1002 /* 1003 * LDC register will fail if an nexus instance had already 1004 * registered with the LDC framework 1005 */ 1006 rv = ldc_register(&cinfo); 1007 if (rv) { 1008 DWARN("cnex_attach: unable to register with LDC\n"); 1009 ddi_soft_state_free(cnex_state, instance); 1010 mutex_destroy(&cnex_ssp->clist_lock); 1011 return (DDI_FAILURE); 1012 } 1013 1014 if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance, 1015 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 1016 ddi_remove_minor_node(devi, NULL); 1017 ddi_soft_state_free(cnex_state, instance); 1018 mutex_destroy(&cnex_ssp->clist_lock); 1019 return (DDI_FAILURE); 1020 } 1021 1022 /* Add interrupt redistribution callback. */ 1023 intr_dist_add(cnex_intr_redist, cnex_ssp); 1024 1025 ddi_report_dev(devi); 1026 return (DDI_SUCCESS); 1027 } 1028 1029 /*ARGSUSED*/ 1030 static int 1031 cnex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1032 { 1033 int instance; 1034 ldc_cnex_t cinfo; 1035 cnex_soft_state_t *cnex_ssp; 1036 1037 switch (cmd) { 1038 case DDI_DETACH: 1039 break; 1040 case DDI_SUSPEND: 1041 return (DDI_SUCCESS); 1042 default: 1043 return (DDI_FAILURE); 1044 } 1045 1046 instance = ddi_get_instance(devi); 1047 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 1048 1049 /* check if there are any channels still registered */ 1050 if (cnex_ssp->clist) { 1051 cmn_err(CE_WARN, "?cnex_dettach: channels registered %d\n", 1052 ddi_get_instance(devi)); 1053 return (DDI_FAILURE); 1054 } 1055 1056 /* Unregister with LDC module */ 1057 cinfo.dip = devi; 1058 (void) ldc_unregister(&cinfo); 1059 1060 /* Remove interrupt redistribution callback. */ 1061 intr_dist_rem(cnex_intr_redist, cnex_ssp); 1062 1063 /* destroy mutex */ 1064 mutex_destroy(&cnex_ssp->clist_lock); 1065 1066 /* free soft state structure */ 1067 ddi_soft_state_free(cnex_state, instance); 1068 1069 return (DDI_SUCCESS); 1070 } 1071 1072 /*ARGSUSED*/ 1073 static int 1074 cnex_open(dev_t *devp, int flags, int otyp, cred_t *credp) 1075 { 1076 int instance; 1077 1078 if (otyp != OTYP_CHR) 1079 return (EINVAL); 1080 1081 instance = getminor(*devp); 1082 if (ddi_get_soft_state(cnex_state, instance) == NULL) 1083 return (ENXIO); 1084 1085 return (0); 1086 } 1087 1088 /*ARGSUSED*/ 1089 static int 1090 cnex_close(dev_t dev, int flags, int otyp, cred_t *credp) 1091 { 1092 int instance; 1093 1094 if (otyp != OTYP_CHR) 1095 return (EINVAL); 1096 1097 instance = getminor(dev); 1098 if (ddi_get_soft_state(cnex_state, instance) == NULL) 1099 return (ENXIO); 1100 1101 return (0); 1102 } 1103 1104 /*ARGSUSED*/ 1105 static int 1106 cnex_ioctl(dev_t dev, 1107 int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p) 1108 { 1109 int instance; 1110 cnex_soft_state_t *cnex_ssp; 1111 1112 instance = getminor(dev); 1113 if ((cnex_ssp = ddi_get_soft_state(cnex_state, instance)) == NULL) 1114 return (ENXIO); 1115 ASSERT(cnex_ssp->devi); 1116 return (ndi_devctl_ioctl(cnex_ssp->devi, cmd, arg, mode, 0)); 1117 } 1118 1119 static int 1120 cnex_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 1121 void *arg, void *result) 1122 { 1123 char name[MAXNAMELEN]; 1124 uint32_t reglen; 1125 int *cnex_regspec; 1126 1127 switch (ctlop) { 1128 case DDI_CTLOPS_REPORTDEV: 1129 if (rdip == NULL) 1130 return (DDI_FAILURE); 1131 cmn_err(CE_CONT, "?channel-device: %s%d\n", 1132 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1133 return (DDI_SUCCESS); 1134 1135 case DDI_CTLOPS_INITCHILD: 1136 { 1137 dev_info_t *child = (dev_info_t *)arg; 1138 1139 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, 1140 DDI_PROP_DONTPASS, "reg", 1141 &cnex_regspec, ®len) != DDI_SUCCESS) { 1142 return (DDI_FAILURE); 1143 } 1144 1145 (void) snprintf(name, sizeof (name), "%x", *cnex_regspec); 1146 ddi_set_name_addr(child, name); 1147 ddi_set_parent_data(child, NULL); 1148 ddi_prop_free(cnex_regspec); 1149 return (DDI_SUCCESS); 1150 } 1151 1152 case DDI_CTLOPS_UNINITCHILD: 1153 { 1154 dev_info_t *child = (dev_info_t *)arg; 1155 1156 NDI_CONFIG_DEBUG((CE_NOTE, 1157 "DDI_CTLOPS_UNINITCHILD(%s, instance=%d)", 1158 ddi_driver_name(child), DEVI(child)->devi_instance)); 1159 1160 ddi_set_name_addr(child, NULL); 1161 1162 return (DDI_SUCCESS); 1163 } 1164 1165 case DDI_CTLOPS_DMAPMAPC: 1166 case DDI_CTLOPS_REPORTINT: 1167 case DDI_CTLOPS_REGSIZE: 1168 case DDI_CTLOPS_NREGS: 1169 case DDI_CTLOPS_SIDDEV: 1170 case DDI_CTLOPS_SLAVEONLY: 1171 case DDI_CTLOPS_AFFINITY: 1172 case DDI_CTLOPS_POKE: 1173 case DDI_CTLOPS_PEEK: 1174 /* 1175 * These ops correspond to functions that "shouldn't" be called 1176 * by a channel-device driver. So we whine when we're called. 1177 */ 1178 cmn_err(CE_WARN, "%s%d: invalid op (%d) from %s%d\n", 1179 ddi_driver_name(dip), ddi_get_instance(dip), ctlop, 1180 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1181 return (DDI_FAILURE); 1182 1183 case DDI_CTLOPS_ATTACH: 1184 case DDI_CTLOPS_BTOP: 1185 case DDI_CTLOPS_BTOPR: 1186 case DDI_CTLOPS_DETACH: 1187 case DDI_CTLOPS_DVMAPAGESIZE: 1188 case DDI_CTLOPS_IOMIN: 1189 case DDI_CTLOPS_POWER: 1190 case DDI_CTLOPS_PTOB: 1191 default: 1192 /* 1193 * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up 1194 */ 1195 return (ddi_ctlops(dip, rdip, ctlop, arg, result)); 1196 } 1197 } 1198 1199 /* 1200 * cnex_find_chan_dip -- Find the dip of a device that is corresponding 1201 * to the specific channel. Below are the details on how the dip 1202 * is derived. 1203 * 1204 * - In the MD, the cfg-handle is expected to be unique for 1205 * virtual-device nodes that have the same 'name' property value. 1206 * This value is expected to be the same as that of "reg" property 1207 * of the corresponding OBP device node. 1208 * 1209 * - The value of the 'name' property of a virtual-device node 1210 * in the MD is expected to be the same for the corresponding 1211 * OBP device node. 1212 * 1213 * - Find the virtual-device node corresponding to a channel-endpoint 1214 * by walking backwards. Then obtain the values for the 'name' and 1215 * 'cfg-handle' properties. 1216 * 1217 * - Walk all the children of the cnex, find a matching dip which 1218 * has the same 'name' and 'reg' property values. 1219 * 1220 * - The channels that have no corresponding device driver are 1221 * treated as if they correspond to the cnex driver, 1222 * that is, return cnex dip for them. This means, the 1223 * cnex acts as an umbrella device driver. Note, this is 1224 * for 'intrstat' statistics purposes only. As a result of this, 1225 * the 'intrstat' shows cnex as the device that is servicing the 1226 * interrupts corresponding to these channels. 1227 * 1228 * For now, only one such case is known, that is, the channels that 1229 * are used by the "domain-services". 1230 */ 1231 static dev_info_t * 1232 cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id, 1233 md_t *mdp, mde_cookie_t mde) 1234 { 1235 int listsz; 1236 int num_nodes; 1237 int num_devs; 1238 uint64_t cfghdl; 1239 char *md_name; 1240 mde_cookie_t *listp; 1241 dev_info_t *cdip = NULL; 1242 1243 num_nodes = md_node_count(mdp); 1244 ASSERT(num_nodes > 0); 1245 listsz = num_nodes * sizeof (mde_cookie_t); 1246 listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP); 1247 1248 num_devs = md_scan_dag(mdp, mde, md_find_name(mdp, "virtual-device"), 1249 md_find_name(mdp, "back"), listp); 1250 ASSERT(num_devs <= 1); 1251 if (num_devs <= 0) { 1252 DWARN("cnex_find_chan_dip:channel(0x%llx): " 1253 "No virtual-device found\n", chan_id); 1254 goto fdip_exit; 1255 } 1256 if (md_get_prop_str(mdp, listp[0], "name", &md_name) != 0) { 1257 DWARN("cnex_find_chan_dip:channel(0x%llx): " 1258 "name property not found\n", chan_id); 1259 goto fdip_exit; 1260 } 1261 1262 D1("cnex_find_chan_dip: channel(0x%llx): virtual-device " 1263 "name property value = %s\n", chan_id, md_name); 1264 1265 if (md_get_prop_val(mdp, listp[0], "cfg-handle", &cfghdl) != 0) { 1266 DWARN("cnex_find_chan_dip:channel(0x%llx): virtual-device's " 1267 "cfg-handle property not found\n", chan_id); 1268 goto fdip_exit; 1269 } 1270 1271 D1("cnex_find_chan_dip:channel(0x%llx): virtual-device cfg-handle " 1272 " property value = 0x%x\n", chan_id, cfghdl); 1273 1274 for (cdip = ddi_get_child(dip); cdip != NULL; 1275 cdip = ddi_get_next_sibling(cdip)) { 1276 1277 int *cnex_regspec; 1278 uint32_t reglen; 1279 char *dev_name; 1280 1281 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, 1282 DDI_PROP_DONTPASS, "name", 1283 &dev_name) != DDI_PROP_SUCCESS) { 1284 DWARN("cnex_find_chan_dip: name property not" 1285 " found for dip(0x%p)\n", cdip); 1286 continue; 1287 } 1288 if (strcmp(md_name, dev_name) != 0) { 1289 ddi_prop_free(dev_name); 1290 continue; 1291 } 1292 ddi_prop_free(dev_name); 1293 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip, 1294 DDI_PROP_DONTPASS, "reg", 1295 &cnex_regspec, ®len) != DDI_SUCCESS) { 1296 DWARN("cnex_find_chan_dip: reg property not" 1297 " found for dip(0x%p)\n", cdip); 1298 continue; 1299 } 1300 if (*cnex_regspec == cfghdl) { 1301 D1("cnex_find_chan_dip:channel(0x%llx): found " 1302 "dip(0x%p) drvname=%s\n", chan_id, cdip, 1303 ddi_driver_name(cdip)); 1304 break; 1305 } 1306 ddi_prop_free(cnex_regspec); 1307 } 1308 1309 fdip_exit: 1310 if (cdip == NULL) { 1311 /* 1312 * If a virtual-device node exists but no dip found, 1313 * then for now print a DEBUG error message only. 1314 */ 1315 if (num_devs > 0) { 1316 DERR("cnex_find_chan_dip:channel(0x%llx): " 1317 "No device found\n", chan_id); 1318 } 1319 1320 /* If no dip was found, return cnex device's dip. */ 1321 cdip = dip; 1322 } 1323 1324 kmem_free(listp, listsz); 1325 D1("cnex_find_chan_dip:channel(0x%llx): returning dip=0x%p\n", 1326 chan_id, cdip); 1327 return (cdip); 1328 } 1329 1330 /* -------------------------------------------------------------------------- */ 1331