1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * Logical domain channel devices are devices implemented entirely 30 * in software; cnex is the nexus for channel-devices. They use 31 * the HV channel interfaces via the LDC transport module to send 32 * and receive data and to register callbacks. 33 */ 34 35 #include <sys/types.h> 36 #include <sys/cmn_err.h> 37 #include <sys/conf.h> 38 #include <sys/ddi.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/devops.h> 41 #include <sys/instance.h> 42 #include <sys/modctl.h> 43 #include <sys/open.h> 44 #include <sys/stat.h> 45 #include <sys/sunddi.h> 46 #include <sys/sunndi.h> 47 #include <sys/systm.h> 48 #include <sys/mkdev.h> 49 #include <sys/machsystm.h> 50 #include <sys/intreg.h> 51 #include <sys/intr.h> 52 #include <sys/ddi_intr_impl.h> 53 #include <sys/ivintr.h> 54 #include <sys/hypervisor_api.h> 55 #include <sys/ldc.h> 56 #include <sys/cnex.h> 57 #include <sys/mach_descrip.h> 58 #include <sys/hsvc.h> 59 #include <sys/sdt.h> 60 61 /* 62 * Internal functions/information 63 */ 64 static struct cnex_pil_map cnex_class_to_pil[] = { 65 {LDC_DEV_GENERIC, PIL_3}, 66 {LDC_DEV_BLK, PIL_4}, 67 {LDC_DEV_BLK_SVC, PIL_3}, 68 {LDC_DEV_NT, PIL_6}, 69 {LDC_DEV_NT_SVC, PIL_4}, 70 {LDC_DEV_SERIAL, PIL_6} 71 }; 72 #define CNEX_MAX_DEVS (sizeof (cnex_class_to_pil) / \ 73 sizeof (cnex_class_to_pil[0])) 74 75 #define SUN4V_REG_SPEC2CFG_HDL(x) ((x >> 32) & ~(0xfull << 28)) 76 77 static clock_t cnex_wait_usecs = 1000; /* wait time in usecs */ 78 static int cnex_wait_retries = 3; 79 static void *cnex_state; 80 81 static void cnex_intr_redist(void *arg); 82 static uint_t cnex_intr_wrapper(caddr_t arg); 83 static dev_info_t *cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id, 84 md_t *mdp, mde_cookie_t mde); 85 86 /* 87 * Debug info 88 */ 89 #ifdef DEBUG 90 91 /* 92 * Print debug messages 93 * 94 * set cnexdbg to 0xf for enabling all msgs 95 * 0x8 - Errors 96 * 0x4 - Warnings 97 * 0x2 - All debug messages 98 * 0x1 - Minimal debug messages 99 */ 100 101 int cnexdbg = 0x8; 102 103 static void 104 cnexdebug(const char *fmt, ...) 105 { 106 char buf[512]; 107 va_list ap; 108 109 va_start(ap, fmt); 110 (void) vsprintf(buf, fmt, ap); 111 va_end(ap); 112 113 cmn_err(CE_CONT, "%s\n", buf); 114 } 115 116 #define D1 \ 117 if (cnexdbg & 0x01) \ 118 cnexdebug 119 120 #define D2 \ 121 if (cnexdbg & 0x02) \ 122 cnexdebug 123 124 #define DWARN \ 125 if (cnexdbg & 0x04) \ 126 cnexdebug 127 128 #define DERR \ 129 if (cnexdbg & 0x08) \ 130 cnexdebug 131 132 #else 133 134 #define D1 135 #define D2 136 #define DWARN 137 #define DERR 138 139 #endif 140 141 /* 142 * Config information 143 */ 144 static int cnex_attach(dev_info_t *, ddi_attach_cmd_t); 145 static int cnex_detach(dev_info_t *, ddi_detach_cmd_t); 146 static int cnex_open(dev_t *, int, int, cred_t *); 147 static int cnex_close(dev_t, int, int, cred_t *); 148 static int cnex_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 149 static int cnex_ctl(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *, 150 void *); 151 152 static struct bus_ops cnex_bus_ops = { 153 BUSO_REV, 154 nullbusmap, /* bus_map */ 155 NULL, /* bus_get_intrspec */ 156 NULL, /* bus_add_intrspec */ 157 NULL, /* bus_remove_intrspec */ 158 i_ddi_map_fault, /* bus_map_fault */ 159 ddi_no_dma_map, /* bus_dma_map */ 160 ddi_no_dma_allochdl, /* bus_dma_allochdl */ 161 NULL, /* bus_dma_freehdl */ 162 NULL, /* bus_dma_bindhdl */ 163 NULL, /* bus_dma_unbindhdl */ 164 NULL, /* bus_dma_flush */ 165 NULL, /* bus_dma_win */ 166 NULL, /* bus_dma_ctl */ 167 cnex_ctl, /* bus_ctl */ 168 ddi_bus_prop_op, /* bus_prop_op */ 169 0, /* bus_get_eventcookie */ 170 0, /* bus_add_eventcall */ 171 0, /* bus_remove_eventcall */ 172 0, /* bus_post_event */ 173 NULL, /* bus_intr_ctl */ 174 NULL, /* bus_config */ 175 NULL, /* bus_unconfig */ 176 NULL, /* bus_fm_init */ 177 NULL, /* bus_fm_fini */ 178 NULL, /* bus_fm_access_enter */ 179 NULL, /* bus_fm_access_exit */ 180 NULL, /* bus_power */ 181 NULL /* bus_intr_op */ 182 }; 183 184 static struct cb_ops cnex_cb_ops = { 185 cnex_open, /* open */ 186 cnex_close, /* close */ 187 nodev, /* strategy */ 188 nodev, /* print */ 189 nodev, /* dump */ 190 nodev, /* read */ 191 nodev, /* write */ 192 cnex_ioctl, /* ioctl */ 193 nodev, /* devmap */ 194 nodev, /* mmap */ 195 nodev, /* segmap */ 196 nochpoll, /* poll */ 197 ddi_prop_op, /* cb_prop_op */ 198 0, /* streamtab */ 199 D_MP | D_NEW | D_HOTPLUG /* Driver compatibility flag */ 200 }; 201 202 static struct dev_ops cnex_ops = { 203 DEVO_REV, /* devo_rev, */ 204 0, /* refcnt */ 205 ddi_getinfo_1to1, /* info */ 206 nulldev, /* identify */ 207 nulldev, /* probe */ 208 cnex_attach, /* attach */ 209 cnex_detach, /* detach */ 210 nodev, /* reset */ 211 &cnex_cb_ops, /* driver operations */ 212 &cnex_bus_ops, /* bus operations */ 213 nulldev /* power */ 214 }; 215 216 /* 217 * Module linkage information for the kernel. 218 */ 219 static struct modldrv modldrv = { 220 &mod_driverops, 221 "sun4v channel-devices nexus 1.11", 222 &cnex_ops, 223 }; 224 225 static struct modlinkage modlinkage = { 226 MODREV_1, (void *)&modldrv, NULL 227 }; 228 229 int 230 _init(void) 231 { 232 int err; 233 uint64_t majornum; 234 uint64_t minornum; 235 236 /* 237 * Check HV intr group api versioning. 238 * Note that cnex assumes interrupt cookies is 239 * in version 1.0 of the intr group api. 240 */ 241 if ((err = hsvc_version(HSVC_GROUP_INTR, &majornum, &minornum)) != 0) { 242 cmn_err(CE_WARN, "cnex: failed to get intr api " 243 "group versioning errno=%d", err); 244 return (err); 245 } else if ((majornum != 1) && (majornum != 2)) { 246 cmn_err(CE_WARN, "cnex: unsupported intr api group: " 247 "maj:0x%lx, min:0x%lx", majornum, minornum); 248 return (ENOTSUP); 249 } 250 251 if ((err = ddi_soft_state_init(&cnex_state, 252 sizeof (cnex_soft_state_t), 0)) != 0) { 253 return (err); 254 } 255 if ((err = mod_install(&modlinkage)) != 0) { 256 ddi_soft_state_fini(&cnex_state); 257 return (err); 258 } 259 return (0); 260 } 261 262 int 263 _fini(void) 264 { 265 int err; 266 267 if ((err = mod_remove(&modlinkage)) != 0) 268 return (err); 269 ddi_soft_state_fini(&cnex_state); 270 return (0); 271 } 272 273 int 274 _info(struct modinfo *modinfop) 275 { 276 return (mod_info(&modlinkage, modinfop)); 277 } 278 279 /* 280 * Callback function invoked by the interrupt redistribution 281 * framework. This will redirect interrupts at CPUs that are 282 * currently available in the system. 283 */ 284 static void 285 cnex_intr_redist(void *arg) 286 { 287 cnex_ldc_t *cldcp; 288 cnex_soft_state_t *cnex_ssp = arg; 289 int intr_state; 290 uint64_t cpuid; 291 int rv, retries = 0; 292 293 ASSERT(cnex_ssp != NULL); 294 mutex_enter(&cnex_ssp->clist_lock); 295 296 cldcp = cnex_ssp->clist; 297 while (cldcp != NULL) { 298 299 mutex_enter(&cldcp->lock); 300 301 if (cldcp->tx.hdlr) { 302 /* 303 * Don't do anything for disabled interrupts. 304 */ 305 rv = hvldc_intr_getvalid(cnex_ssp->cfghdl, 306 cldcp->tx.ino, &intr_state); 307 if (rv) { 308 DWARN("cnex_intr_redist: tx ino=0x%llx, " 309 "can't get valid\n", cldcp->tx.ino); 310 mutex_exit(&cldcp->lock); 311 mutex_exit(&cnex_ssp->clist_lock); 312 return; 313 } 314 if (intr_state == HV_INTR_NOTVALID) { 315 mutex_exit(&cldcp->lock); 316 cldcp = cldcp->next; 317 continue; 318 } 319 320 cpuid = intr_dist_cpuid(); 321 322 /* disable interrupts */ 323 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, 324 cldcp->tx.ino, HV_INTR_NOTVALID); 325 if (rv) { 326 DWARN("cnex_intr_redist: tx ino=0x%llx, " 327 "can't set valid\n", cldcp->tx.ino); 328 mutex_exit(&cldcp->lock); 329 mutex_exit(&cnex_ssp->clist_lock); 330 return; 331 } 332 333 /* 334 * Make a best effort to wait for pending interrupts 335 * to finish. There is not much we can do if we timeout. 336 */ 337 retries = 0; 338 339 do { 340 rv = hvldc_intr_getstate(cnex_ssp->cfghdl, 341 cldcp->tx.ino, &intr_state); 342 if (rv) { 343 DWARN("cnex_intr_redist: tx ino=0x%llx," 344 "can't get state\n", cldcp->tx.ino); 345 mutex_exit(&cldcp->lock); 346 mutex_exit(&cnex_ssp->clist_lock); 347 return; 348 } 349 350 if (intr_state != HV_INTR_DELIVERED_STATE) 351 break; 352 353 drv_usecwait(cnex_wait_usecs); 354 355 } while (!panicstr && ++retries <= cnex_wait_retries); 356 357 cldcp->tx.cpuid = cpuid; 358 (void) hvldc_intr_settarget(cnex_ssp->cfghdl, 359 cldcp->tx.ino, cpuid); 360 (void) hvldc_intr_setvalid(cnex_ssp->cfghdl, 361 cldcp->tx.ino, HV_INTR_VALID); 362 } 363 364 if (cldcp->rx.hdlr) { 365 /* 366 * Don't do anything for disabled interrupts. 367 */ 368 rv = hvldc_intr_getvalid(cnex_ssp->cfghdl, 369 cldcp->rx.ino, &intr_state); 370 if (rv) { 371 DWARN("cnex_intr_redist: rx ino=0x%llx, " 372 "can't get valid\n", cldcp->rx.ino); 373 mutex_exit(&cldcp->lock); 374 mutex_exit(&cnex_ssp->clist_lock); 375 return; 376 } 377 if (intr_state == HV_INTR_NOTVALID) { 378 mutex_exit(&cldcp->lock); 379 cldcp = cldcp->next; 380 continue; 381 } 382 383 cpuid = intr_dist_cpuid(); 384 385 /* disable interrupts */ 386 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, 387 cldcp->rx.ino, HV_INTR_NOTVALID); 388 if (rv) { 389 DWARN("cnex_intr_redist: rx ino=0x%llx, " 390 "can't set valid\n", cldcp->rx.ino); 391 mutex_exit(&cldcp->lock); 392 mutex_exit(&cnex_ssp->clist_lock); 393 return; 394 } 395 396 /* 397 * Make a best effort to wait for pending interrupts 398 * to finish. There is not much we can do if we timeout. 399 */ 400 retries = 0; 401 402 do { 403 rv = hvldc_intr_getstate(cnex_ssp->cfghdl, 404 cldcp->rx.ino, &intr_state); 405 if (rv) { 406 DWARN("cnex_intr_redist: rx ino=0x%llx," 407 "can't get state\n", cldcp->rx.ino); 408 mutex_exit(&cldcp->lock); 409 mutex_exit(&cnex_ssp->clist_lock); 410 return; 411 } 412 413 if (intr_state != HV_INTR_DELIVERED_STATE) 414 break; 415 416 drv_usecwait(cnex_wait_usecs); 417 418 } while (!panicstr && ++retries <= cnex_wait_retries); 419 420 cldcp->rx.cpuid = cpuid; 421 (void) hvldc_intr_settarget(cnex_ssp->cfghdl, 422 cldcp->rx.ino, cpuid); 423 (void) hvldc_intr_setvalid(cnex_ssp->cfghdl, 424 cldcp->rx.ino, HV_INTR_VALID); 425 } 426 427 mutex_exit(&cldcp->lock); 428 429 /* next channel */ 430 cldcp = cldcp->next; 431 } 432 433 mutex_exit(&cnex_ssp->clist_lock); 434 } 435 436 /* 437 * Exported interface to register a LDC endpoint with 438 * the channel nexus 439 */ 440 static int 441 cnex_reg_chan(dev_info_t *dip, uint64_t id, ldc_dev_t devclass) 442 { 443 int idx; 444 cnex_ldc_t *cldcp; 445 int listsz, num_nodes, num_channels; 446 md_t *mdp = NULL; 447 mde_cookie_t rootnode, *listp = NULL; 448 uint64_t tmp_id; 449 uint64_t rxino = (uint64_t)-1; 450 uint64_t txino = (uint64_t)-1; 451 cnex_soft_state_t *cnex_ssp; 452 int status, instance; 453 dev_info_t *chan_dip = NULL; 454 455 /* Get device instance and structure */ 456 instance = ddi_get_instance(dip); 457 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 458 459 /* Check to see if channel is already registered */ 460 mutex_enter(&cnex_ssp->clist_lock); 461 cldcp = cnex_ssp->clist; 462 while (cldcp) { 463 if (cldcp->id == id) { 464 DWARN("cnex_reg_chan: channel 0x%llx exists\n", id); 465 mutex_exit(&cnex_ssp->clist_lock); 466 return (EINVAL); 467 } 468 cldcp = cldcp->next; 469 } 470 471 /* Get the Tx/Rx inos from the MD */ 472 if ((mdp = md_get_handle()) == NULL) { 473 DWARN("cnex_reg_chan: cannot init MD\n"); 474 mutex_exit(&cnex_ssp->clist_lock); 475 return (ENXIO); 476 } 477 num_nodes = md_node_count(mdp); 478 ASSERT(num_nodes > 0); 479 480 listsz = num_nodes * sizeof (mde_cookie_t); 481 listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP); 482 483 rootnode = md_root_node(mdp); 484 485 /* search for all channel_endpoint nodes */ 486 num_channels = md_scan_dag(mdp, rootnode, 487 md_find_name(mdp, "channel-endpoint"), 488 md_find_name(mdp, "fwd"), listp); 489 if (num_channels <= 0) { 490 DWARN("cnex_reg_chan: invalid channel id\n"); 491 kmem_free(listp, listsz); 492 (void) md_fini_handle(mdp); 493 mutex_exit(&cnex_ssp->clist_lock); 494 return (EINVAL); 495 } 496 497 for (idx = 0; idx < num_channels; idx++) { 498 499 /* Get the channel ID */ 500 status = md_get_prop_val(mdp, listp[idx], "id", &tmp_id); 501 if (status) { 502 DWARN("cnex_reg_chan: cannot read LDC ID\n"); 503 kmem_free(listp, listsz); 504 (void) md_fini_handle(mdp); 505 mutex_exit(&cnex_ssp->clist_lock); 506 return (ENXIO); 507 } 508 if (tmp_id != id) 509 continue; 510 511 /* Get the Tx and Rx ino */ 512 status = md_get_prop_val(mdp, listp[idx], "tx-ino", &txino); 513 if (status) { 514 DWARN("cnex_reg_chan: cannot read Tx ino\n"); 515 kmem_free(listp, listsz); 516 (void) md_fini_handle(mdp); 517 mutex_exit(&cnex_ssp->clist_lock); 518 return (ENXIO); 519 } 520 status = md_get_prop_val(mdp, listp[idx], "rx-ino", &rxino); 521 if (status) { 522 DWARN("cnex_reg_chan: cannot read Rx ino\n"); 523 kmem_free(listp, listsz); 524 (void) md_fini_handle(mdp); 525 mutex_exit(&cnex_ssp->clist_lock); 526 return (ENXIO); 527 } 528 chan_dip = cnex_find_chan_dip(dip, id, mdp, listp[idx]); 529 ASSERT(chan_dip != NULL); 530 } 531 kmem_free(listp, listsz); 532 (void) md_fini_handle(mdp); 533 534 /* 535 * check to see if we looped through the list of channel IDs without 536 * matching one (i.e. an 'ino' has not been initialised). 537 */ 538 if ((rxino == -1) || (txino == -1)) { 539 DERR("cnex_reg_chan: no ID matching '%llx' in MD\n", id); 540 mutex_exit(&cnex_ssp->clist_lock); 541 return (ENOENT); 542 } 543 544 /* Allocate a new channel structure */ 545 cldcp = kmem_zalloc(sizeof (*cldcp), KM_SLEEP); 546 547 /* Initialize the channel */ 548 mutex_init(&cldcp->lock, NULL, MUTEX_DRIVER, NULL); 549 550 cldcp->id = id; 551 cldcp->tx.ino = txino; 552 cldcp->rx.ino = rxino; 553 cldcp->devclass = devclass; 554 cldcp->dip = chan_dip; 555 556 /* add channel to nexus channel list */ 557 cldcp->next = cnex_ssp->clist; 558 cnex_ssp->clist = cldcp; 559 560 mutex_exit(&cnex_ssp->clist_lock); 561 562 return (0); 563 } 564 565 /* 566 * Add Tx/Rx interrupt handler for the channel 567 */ 568 static int 569 cnex_add_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype, 570 uint_t (*hdlr)(), caddr_t arg1, caddr_t arg2) 571 { 572 int rv, idx, pil; 573 cnex_ldc_t *cldcp; 574 cnex_intr_t *iinfo; 575 cnex_soft_state_t *cnex_ssp; 576 int instance; 577 578 /* Get device instance and structure */ 579 instance = ddi_get_instance(dip); 580 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 581 582 /* get channel info */ 583 mutex_enter(&cnex_ssp->clist_lock); 584 cldcp = cnex_ssp->clist; 585 while (cldcp) { 586 if (cldcp->id == id) 587 break; 588 cldcp = cldcp->next; 589 } 590 if (cldcp == NULL) { 591 DWARN("cnex_add_intr: channel 0x%llx does not exist\n", id); 592 mutex_exit(&cnex_ssp->clist_lock); 593 return (EINVAL); 594 } 595 mutex_exit(&cnex_ssp->clist_lock); 596 597 /* get channel lock */ 598 mutex_enter(&cldcp->lock); 599 600 /* get interrupt type */ 601 if (itype == CNEX_TX_INTR) { 602 iinfo = &(cldcp->tx); 603 } else if (itype == CNEX_RX_INTR) { 604 iinfo = &(cldcp->rx); 605 } else { 606 DWARN("cnex_add_intr: invalid interrupt type\n", id); 607 mutex_exit(&cldcp->lock); 608 return (EINVAL); 609 } 610 611 /* check if a handler is already added */ 612 if (iinfo->hdlr != 0) { 613 DWARN("cnex_add_intr: interrupt handler exists\n"); 614 mutex_exit(&cldcp->lock); 615 return (EINVAL); 616 } 617 618 /* save interrupt handler info */ 619 iinfo->hdlr = hdlr; 620 iinfo->arg1 = arg1; 621 iinfo->arg2 = arg2; 622 623 /* save data for DTrace probes used by intrstat(1m) */ 624 iinfo->dip = cldcp->dip; 625 iinfo->id = cldcp->id; 626 627 iinfo->icookie = MINVINTR_COOKIE + iinfo->ino; 628 629 /* 630 * Verify that the ino does not generate a cookie which 631 * is outside the (MINVINTR_COOKIE, MAXIVNUM) range of the 632 * system interrupt table. 633 */ 634 if (iinfo->icookie >= MAXIVNUM || iinfo->icookie < MINVINTR_COOKIE) { 635 DWARN("cnex_add_intr: invalid cookie %x ino %x\n", 636 iinfo->icookie, iinfo->ino); 637 mutex_exit(&cldcp->lock); 638 return (EINVAL); 639 } 640 641 D1("cnex_add_intr: add hdlr, cfghdl=0x%llx, ino=0x%llx, " 642 "cookie=0x%llx\n", cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie); 643 644 /* Pick a PIL on the basis of the channel's devclass */ 645 for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) { 646 if (cldcp->devclass == cnex_class_to_pil[idx].devclass) { 647 pil = cnex_class_to_pil[idx].pil; 648 break; 649 } 650 } 651 652 /* add interrupt to solaris ivec table */ 653 if (add_ivintr(iinfo->icookie, pil, (intrfunc)cnex_intr_wrapper, 654 (caddr_t)iinfo, NULL, NULL) != 0) { 655 DWARN("cnex_add_intr: add_ivintr fail cookie %x ino %x\n", 656 iinfo->icookie, iinfo->ino); 657 mutex_exit(&cldcp->lock); 658 return (EINVAL); 659 } 660 661 /* set the cookie in the HV */ 662 rv = hvldc_intr_setcookie(cnex_ssp->cfghdl, iinfo->ino, iinfo->icookie); 663 664 /* pick next CPU in the domain for this channel */ 665 iinfo->cpuid = intr_dist_cpuid(); 666 667 /* set the target CPU and then enable interrupts */ 668 rv = hvldc_intr_settarget(cnex_ssp->cfghdl, iinfo->ino, iinfo->cpuid); 669 if (rv) { 670 DWARN("cnex_add_intr: ino=0x%llx, cannot set target cpu\n", 671 iinfo->ino); 672 goto hv_error; 673 } 674 rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino, 675 HV_INTR_IDLE_STATE); 676 if (rv) { 677 DWARN("cnex_add_intr: ino=0x%llx, cannot set state\n", 678 iinfo->ino); 679 goto hv_error; 680 } 681 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, iinfo->ino, HV_INTR_VALID); 682 if (rv) { 683 DWARN("cnex_add_intr: ino=0x%llx, cannot set valid\n", 684 iinfo->ino); 685 goto hv_error; 686 } 687 688 mutex_exit(&cldcp->lock); 689 return (0); 690 691 hv_error: 692 (void) rem_ivintr(iinfo->icookie, pil); 693 mutex_exit(&cldcp->lock); 694 return (ENXIO); 695 } 696 697 698 /* 699 * Exported interface to unregister a LDC endpoint with 700 * the channel nexus 701 */ 702 static int 703 cnex_unreg_chan(dev_info_t *dip, uint64_t id) 704 { 705 cnex_ldc_t *cldcp, *prev_cldcp; 706 cnex_soft_state_t *cnex_ssp; 707 int instance; 708 709 /* Get device instance and structure */ 710 instance = ddi_get_instance(dip); 711 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 712 713 /* find and remove channel from list */ 714 mutex_enter(&cnex_ssp->clist_lock); 715 prev_cldcp = NULL; 716 cldcp = cnex_ssp->clist; 717 while (cldcp) { 718 if (cldcp->id == id) 719 break; 720 prev_cldcp = cldcp; 721 cldcp = cldcp->next; 722 } 723 724 if (cldcp == 0) { 725 DWARN("cnex_unreg_chan: invalid channel %d\n", id); 726 mutex_exit(&cnex_ssp->clist_lock); 727 return (EINVAL); 728 } 729 730 if (cldcp->tx.hdlr || cldcp->rx.hdlr) { 731 DWARN("cnex_unreg_chan: handlers still exist: chan %lx\n", id); 732 mutex_exit(&cnex_ssp->clist_lock); 733 return (ENXIO); 734 } 735 736 if (prev_cldcp) 737 prev_cldcp->next = cldcp->next; 738 else 739 cnex_ssp->clist = cldcp->next; 740 741 mutex_exit(&cnex_ssp->clist_lock); 742 743 /* destroy mutex */ 744 mutex_destroy(&cldcp->lock); 745 746 /* free channel */ 747 kmem_free(cldcp, sizeof (*cldcp)); 748 749 return (0); 750 } 751 752 /* 753 * Remove Tx/Rx interrupt handler for the channel 754 */ 755 static int 756 cnex_rem_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype) 757 { 758 int rv, idx, pil; 759 cnex_ldc_t *cldcp; 760 cnex_intr_t *iinfo; 761 cnex_soft_state_t *cnex_ssp; 762 int instance, istate; 763 764 /* Get device instance and structure */ 765 instance = ddi_get_instance(dip); 766 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 767 768 /* get channel info */ 769 mutex_enter(&cnex_ssp->clist_lock); 770 cldcp = cnex_ssp->clist; 771 while (cldcp) { 772 if (cldcp->id == id) 773 break; 774 cldcp = cldcp->next; 775 } 776 if (cldcp == NULL) { 777 DWARN("cnex_rem_intr: channel 0x%llx does not exist\n", id); 778 mutex_exit(&cnex_ssp->clist_lock); 779 return (EINVAL); 780 } 781 mutex_exit(&cnex_ssp->clist_lock); 782 783 /* get rid of the channel intr handler */ 784 mutex_enter(&cldcp->lock); 785 786 /* get interrupt type */ 787 if (itype == CNEX_TX_INTR) { 788 iinfo = &(cldcp->tx); 789 } else if (itype == CNEX_RX_INTR) { 790 iinfo = &(cldcp->rx); 791 } else { 792 DWARN("cnex_rem_intr: invalid interrupt type\n"); 793 mutex_exit(&cldcp->lock); 794 return (EINVAL); 795 } 796 797 D1("cnex_rem_intr: interrupt ino=0x%x\n", iinfo->ino); 798 799 /* check if a handler is already added */ 800 if (iinfo->hdlr == 0) { 801 DWARN("cnex_rem_intr: interrupt handler does not exist\n"); 802 mutex_exit(&cldcp->lock); 803 return (EINVAL); 804 } 805 806 D1("cnex_rem_intr: set intr to invalid ino=0x%x\n", iinfo->ino); 807 rv = hvldc_intr_setvalid(cnex_ssp->cfghdl, 808 iinfo->ino, HV_INTR_NOTVALID); 809 if (rv) { 810 DWARN("cnex_rem_intr: cannot set valid ino=%x\n", iinfo->ino); 811 mutex_exit(&cldcp->lock); 812 return (ENXIO); 813 } 814 815 /* 816 * Check if there are pending interrupts. If interrupts are 817 * pending return EAGAIN. 818 */ 819 rv = hvldc_intr_getstate(cnex_ssp->cfghdl, iinfo->ino, &istate); 820 if (rv) { 821 DWARN("cnex_rem_intr: ino=0x%llx, cannot get state\n", 822 iinfo->ino); 823 mutex_exit(&cldcp->lock); 824 return (ENXIO); 825 } 826 827 /* if interrupts are still pending print warning */ 828 if (istate != HV_INTR_IDLE_STATE) { 829 DWARN("cnex_rem_intr: cannot remove intr busy ino=%x\n", 830 iinfo->ino); 831 mutex_exit(&cldcp->lock); 832 return (EAGAIN); 833 } 834 835 /* Pick a PIL on the basis of the channel's devclass */ 836 for (idx = 0, pil = PIL_3; idx < CNEX_MAX_DEVS; idx++) { 837 if (cldcp->devclass == cnex_class_to_pil[idx].devclass) { 838 pil = cnex_class_to_pil[idx].pil; 839 break; 840 } 841 } 842 843 /* remove interrupt */ 844 (void) rem_ivintr(iinfo->icookie, pil); 845 846 /* clear interrupt info */ 847 bzero(iinfo, sizeof (*iinfo)); 848 849 mutex_exit(&cldcp->lock); 850 851 return (0); 852 } 853 854 855 /* 856 * Clear pending Tx/Rx interrupt 857 */ 858 static int 859 cnex_clr_intr(dev_info_t *dip, uint64_t id, cnex_intrtype_t itype) 860 { 861 int rv; 862 cnex_ldc_t *cldcp; 863 cnex_intr_t *iinfo; 864 cnex_soft_state_t *cnex_ssp; 865 int instance; 866 867 /* Get device instance and structure */ 868 instance = ddi_get_instance(dip); 869 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 870 871 /* get channel info */ 872 mutex_enter(&cnex_ssp->clist_lock); 873 cldcp = cnex_ssp->clist; 874 while (cldcp) { 875 if (cldcp->id == id) 876 break; 877 cldcp = cldcp->next; 878 } 879 if (cldcp == NULL) { 880 DWARN("cnex_clr_intr: channel 0x%llx does not exist\n", id); 881 mutex_exit(&cnex_ssp->clist_lock); 882 return (EINVAL); 883 } 884 mutex_exit(&cnex_ssp->clist_lock); 885 886 mutex_enter(&cldcp->lock); 887 888 /* get interrupt type */ 889 if (itype == CNEX_TX_INTR) { 890 iinfo = &(cldcp->tx); 891 } else if (itype == CNEX_RX_INTR) { 892 iinfo = &(cldcp->rx); 893 } else { 894 DWARN("cnex_clr_intr: invalid interrupt type\n"); 895 mutex_exit(&cldcp->lock); 896 return (EINVAL); 897 } 898 899 D1("%s: interrupt ino=0x%x\n", __func__, iinfo->ino); 900 901 /* check if a handler is already added */ 902 if (iinfo->hdlr == 0) { 903 DWARN("cnex_clr_intr: interrupt handler does not exist\n"); 904 mutex_exit(&cldcp->lock); 905 return (EINVAL); 906 } 907 908 rv = hvldc_intr_setstate(cnex_ssp->cfghdl, iinfo->ino, 909 HV_INTR_IDLE_STATE); 910 if (rv) { 911 DWARN("cnex_clr_intr: cannot clear interrupt state\n"); 912 mutex_exit(&cldcp->lock); 913 return (ENXIO); 914 } 915 916 mutex_exit(&cldcp->lock); 917 918 return (0); 919 } 920 921 /* 922 * Channel nexus interrupt handler wrapper 923 */ 924 static uint_t 925 cnex_intr_wrapper(caddr_t arg) 926 { 927 int res; 928 uint_t (*handler)(); 929 caddr_t handler_arg1; 930 caddr_t handler_arg2; 931 cnex_intr_t *iinfo = (cnex_intr_t *)arg; 932 933 ASSERT(iinfo != NULL); 934 935 handler = iinfo->hdlr; 936 handler_arg1 = iinfo->arg1; 937 handler_arg2 = iinfo->arg2; 938 939 /* 940 * The 'interrupt__start' and 'interrupt__complete' probes 941 * are provided to support 'intrstat' command. These probes 942 * help monitor the interrupts on a per device basis only. 943 * In order to provide the ability to monitor the 944 * activity on a per channel basis, two additional 945 * probes('channelintr__start','channelintr__complete') 946 * are provided here. 947 */ 948 DTRACE_PROBE4(channelintr__start, uint64_t, iinfo->id, 949 cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1); 950 951 DTRACE_PROBE4(interrupt__start, dev_info_t, iinfo->dip, 952 void *, handler, caddr_t, handler_arg1, caddr_t, handler_arg2); 953 954 D1("cnex_intr_wrapper:ino=0x%llx invoke client handler\n", iinfo->ino); 955 res = (*handler)(handler_arg1, handler_arg2); 956 957 DTRACE_PROBE4(interrupt__complete, dev_info_t, iinfo->dip, 958 void *, handler, caddr_t, handler_arg1, int, res); 959 960 DTRACE_PROBE4(channelintr__complete, uint64_t, iinfo->id, 961 cnex_intr_t *, iinfo, void *, handler, caddr_t, handler_arg1); 962 963 return (res); 964 } 965 966 /*ARGSUSED*/ 967 static int 968 cnex_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 969 { 970 int rv, instance, reglen; 971 cnex_regspec_t *reg_p; 972 ldc_cnex_t cinfo; 973 cnex_soft_state_t *cnex_ssp; 974 975 switch (cmd) { 976 case DDI_ATTACH: 977 break; 978 case DDI_RESUME: 979 return (DDI_SUCCESS); 980 default: 981 return (DDI_FAILURE); 982 } 983 984 /* 985 * Get the instance specific soft state structure. 986 * Save the devi for this instance in the soft_state data. 987 */ 988 instance = ddi_get_instance(devi); 989 if (ddi_soft_state_zalloc(cnex_state, instance) != DDI_SUCCESS) 990 return (DDI_FAILURE); 991 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 992 993 cnex_ssp->devi = devi; 994 cnex_ssp->clist = NULL; 995 996 if (ddi_getlongprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 997 "reg", (caddr_t)®_p, ®len) != DDI_SUCCESS) { 998 return (DDI_FAILURE); 999 } 1000 1001 /* get the sun4v config handle for this device */ 1002 cnex_ssp->cfghdl = SUN4V_REG_SPEC2CFG_HDL(reg_p->physaddr); 1003 kmem_free(reg_p, reglen); 1004 1005 D1("cnex_attach: cfghdl=0x%llx\n", cnex_ssp->cfghdl); 1006 1007 /* init channel list mutex */ 1008 mutex_init(&cnex_ssp->clist_lock, NULL, MUTEX_DRIVER, NULL); 1009 1010 /* Register with LDC module */ 1011 cinfo.dip = devi; 1012 cinfo.reg_chan = cnex_reg_chan; 1013 cinfo.unreg_chan = cnex_unreg_chan; 1014 cinfo.add_intr = cnex_add_intr; 1015 cinfo.rem_intr = cnex_rem_intr; 1016 cinfo.clr_intr = cnex_clr_intr; 1017 1018 /* 1019 * LDC register will fail if an nexus instance had already 1020 * registered with the LDC framework 1021 */ 1022 rv = ldc_register(&cinfo); 1023 if (rv) { 1024 DWARN("cnex_attach: unable to register with LDC\n"); 1025 ddi_soft_state_free(cnex_state, instance); 1026 mutex_destroy(&cnex_ssp->clist_lock); 1027 return (DDI_FAILURE); 1028 } 1029 1030 if (ddi_create_minor_node(devi, "devctl", S_IFCHR, instance, 1031 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 1032 ddi_remove_minor_node(devi, NULL); 1033 ddi_soft_state_free(cnex_state, instance); 1034 mutex_destroy(&cnex_ssp->clist_lock); 1035 return (DDI_FAILURE); 1036 } 1037 1038 /* Add interrupt redistribution callback. */ 1039 intr_dist_add(cnex_intr_redist, cnex_ssp); 1040 1041 ddi_report_dev(devi); 1042 return (DDI_SUCCESS); 1043 } 1044 1045 /*ARGSUSED*/ 1046 static int 1047 cnex_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1048 { 1049 int instance; 1050 ldc_cnex_t cinfo; 1051 cnex_soft_state_t *cnex_ssp; 1052 1053 switch (cmd) { 1054 case DDI_DETACH: 1055 break; 1056 case DDI_SUSPEND: 1057 return (DDI_SUCCESS); 1058 default: 1059 return (DDI_FAILURE); 1060 } 1061 1062 instance = ddi_get_instance(devi); 1063 cnex_ssp = ddi_get_soft_state(cnex_state, instance); 1064 1065 /* check if there are any channels still registered */ 1066 if (cnex_ssp->clist) { 1067 cmn_err(CE_WARN, "?cnex_dettach: channels registered %d\n", 1068 ddi_get_instance(devi)); 1069 return (DDI_FAILURE); 1070 } 1071 1072 /* Unregister with LDC module */ 1073 cinfo.dip = devi; 1074 (void) ldc_unregister(&cinfo); 1075 1076 /* Remove interrupt redistribution callback. */ 1077 intr_dist_rem(cnex_intr_redist, cnex_ssp); 1078 1079 /* destroy mutex */ 1080 mutex_destroy(&cnex_ssp->clist_lock); 1081 1082 /* free soft state structure */ 1083 ddi_soft_state_free(cnex_state, instance); 1084 1085 return (DDI_SUCCESS); 1086 } 1087 1088 /*ARGSUSED*/ 1089 static int 1090 cnex_open(dev_t *devp, int flags, int otyp, cred_t *credp) 1091 { 1092 int instance; 1093 1094 if (otyp != OTYP_CHR) 1095 return (EINVAL); 1096 1097 instance = getminor(*devp); 1098 if (ddi_get_soft_state(cnex_state, instance) == NULL) 1099 return (ENXIO); 1100 1101 return (0); 1102 } 1103 1104 /*ARGSUSED*/ 1105 static int 1106 cnex_close(dev_t dev, int flags, int otyp, cred_t *credp) 1107 { 1108 int instance; 1109 1110 if (otyp != OTYP_CHR) 1111 return (EINVAL); 1112 1113 instance = getminor(dev); 1114 if (ddi_get_soft_state(cnex_state, instance) == NULL) 1115 return (ENXIO); 1116 1117 return (0); 1118 } 1119 1120 /*ARGSUSED*/ 1121 static int 1122 cnex_ioctl(dev_t dev, 1123 int cmd, intptr_t arg, int mode, cred_t *cred_p, int *rval_p) 1124 { 1125 int instance; 1126 cnex_soft_state_t *cnex_ssp; 1127 1128 instance = getminor(dev); 1129 if ((cnex_ssp = ddi_get_soft_state(cnex_state, instance)) == NULL) 1130 return (ENXIO); 1131 ASSERT(cnex_ssp->devi); 1132 return (ndi_devctl_ioctl(cnex_ssp->devi, cmd, arg, mode, 0)); 1133 } 1134 1135 static int 1136 cnex_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 1137 void *arg, void *result) 1138 { 1139 char name[MAXNAMELEN]; 1140 uint32_t reglen; 1141 int *cnex_regspec; 1142 1143 switch (ctlop) { 1144 case DDI_CTLOPS_REPORTDEV: 1145 if (rdip == NULL) 1146 return (DDI_FAILURE); 1147 cmn_err(CE_CONT, "?channel-device: %s%d\n", 1148 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1149 return (DDI_SUCCESS); 1150 1151 case DDI_CTLOPS_INITCHILD: 1152 { 1153 dev_info_t *child = (dev_info_t *)arg; 1154 1155 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, 1156 DDI_PROP_DONTPASS, "reg", 1157 &cnex_regspec, ®len) != DDI_SUCCESS) { 1158 return (DDI_FAILURE); 1159 } 1160 1161 (void) snprintf(name, sizeof (name), "%x", *cnex_regspec); 1162 ddi_set_name_addr(child, name); 1163 ddi_set_parent_data(child, NULL); 1164 ddi_prop_free(cnex_regspec); 1165 return (DDI_SUCCESS); 1166 } 1167 1168 case DDI_CTLOPS_UNINITCHILD: 1169 { 1170 dev_info_t *child = (dev_info_t *)arg; 1171 1172 NDI_CONFIG_DEBUG((CE_NOTE, 1173 "DDI_CTLOPS_UNINITCHILD(%s, instance=%d)", 1174 ddi_driver_name(child), DEVI(child)->devi_instance)); 1175 1176 ddi_set_name_addr(child, NULL); 1177 1178 return (DDI_SUCCESS); 1179 } 1180 1181 case DDI_CTLOPS_DMAPMAPC: 1182 case DDI_CTLOPS_REPORTINT: 1183 case DDI_CTLOPS_REGSIZE: 1184 case DDI_CTLOPS_NREGS: 1185 case DDI_CTLOPS_SIDDEV: 1186 case DDI_CTLOPS_SLAVEONLY: 1187 case DDI_CTLOPS_AFFINITY: 1188 case DDI_CTLOPS_POKE: 1189 case DDI_CTLOPS_PEEK: 1190 /* 1191 * These ops correspond to functions that "shouldn't" be called 1192 * by a channel-device driver. So we whine when we're called. 1193 */ 1194 cmn_err(CE_WARN, "%s%d: invalid op (%d) from %s%d\n", 1195 ddi_driver_name(dip), ddi_get_instance(dip), ctlop, 1196 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1197 return (DDI_FAILURE); 1198 1199 case DDI_CTLOPS_ATTACH: 1200 case DDI_CTLOPS_BTOP: 1201 case DDI_CTLOPS_BTOPR: 1202 case DDI_CTLOPS_DETACH: 1203 case DDI_CTLOPS_DVMAPAGESIZE: 1204 case DDI_CTLOPS_IOMIN: 1205 case DDI_CTLOPS_POWER: 1206 case DDI_CTLOPS_PTOB: 1207 default: 1208 /* 1209 * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up 1210 */ 1211 return (ddi_ctlops(dip, rdip, ctlop, arg, result)); 1212 } 1213 } 1214 1215 /* 1216 * cnex_find_chan_dip -- Find the dip of a device that is corresponding 1217 * to the specific channel. Below are the details on how the dip 1218 * is derived. 1219 * 1220 * - In the MD, the cfg-handle is expected to be unique for 1221 * virtual-device nodes that have the same 'name' property value. 1222 * This value is expected to be the same as that of "reg" property 1223 * of the corresponding OBP device node. 1224 * 1225 * - The value of the 'name' property of a virtual-device node 1226 * in the MD is expected to be the same for the corresponding 1227 * OBP device node. 1228 * 1229 * - Find the virtual-device node corresponding to a channel-endpoint 1230 * by walking backwards. Then obtain the values for the 'name' and 1231 * 'cfg-handle' properties. 1232 * 1233 * - Walk all the children of the cnex, find a matching dip which 1234 * has the same 'name' and 'reg' property values. 1235 * 1236 * - The channels that have no corresponding device driver are 1237 * treated as if they correspond to the cnex driver, 1238 * that is, return cnex dip for them. This means, the 1239 * cnex acts as an umbrella device driver. Note, this is 1240 * for 'intrstat' statistics purposes only. As a result of this, 1241 * the 'intrstat' shows cnex as the device that is servicing the 1242 * interrupts corresponding to these channels. 1243 * 1244 * For now, only one such case is known, that is, the channels that 1245 * are used by the "domain-services". 1246 */ 1247 static dev_info_t * 1248 cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id, 1249 md_t *mdp, mde_cookie_t mde) 1250 { 1251 int listsz; 1252 int num_nodes; 1253 int num_devs; 1254 uint64_t cfghdl; 1255 char *md_name; 1256 mde_cookie_t *listp; 1257 dev_info_t *cdip = NULL; 1258 1259 num_nodes = md_node_count(mdp); 1260 ASSERT(num_nodes > 0); 1261 listsz = num_nodes * sizeof (mde_cookie_t); 1262 listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP); 1263 1264 num_devs = md_scan_dag(mdp, mde, md_find_name(mdp, "virtual-device"), 1265 md_find_name(mdp, "back"), listp); 1266 ASSERT(num_devs <= 1); 1267 if (num_devs <= 0) { 1268 DWARN("cnex_find_chan_dip:channel(0x%llx): " 1269 "No virtual-device found\n", chan_id); 1270 goto fdip_exit; 1271 } 1272 if (md_get_prop_str(mdp, listp[0], "name", &md_name) != 0) { 1273 DWARN("cnex_find_chan_dip:channel(0x%llx): " 1274 "name property not found\n", chan_id); 1275 goto fdip_exit; 1276 } 1277 1278 D1("cnex_find_chan_dip: channel(0x%llx): virtual-device " 1279 "name property value = %s\n", chan_id, md_name); 1280 1281 if (md_get_prop_val(mdp, listp[0], "cfg-handle", &cfghdl) != 0) { 1282 DWARN("cnex_find_chan_dip:channel(0x%llx): virtual-device's " 1283 "cfg-handle property not found\n", chan_id); 1284 goto fdip_exit; 1285 } 1286 1287 D1("cnex_find_chan_dip:channel(0x%llx): virtual-device cfg-handle " 1288 " property value = 0x%x\n", chan_id, cfghdl); 1289 1290 for (cdip = ddi_get_child(dip); cdip != NULL; 1291 cdip = ddi_get_next_sibling(cdip)) { 1292 1293 int *cnex_regspec; 1294 uint32_t reglen; 1295 char *dev_name; 1296 1297 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip, 1298 DDI_PROP_DONTPASS, "name", 1299 &dev_name) != DDI_PROP_SUCCESS) { 1300 DWARN("cnex_find_chan_dip: name property not" 1301 " found for dip(0x%p)\n", cdip); 1302 continue; 1303 } 1304 if (strcmp(md_name, dev_name) != 0) { 1305 ddi_prop_free(dev_name); 1306 continue; 1307 } 1308 ddi_prop_free(dev_name); 1309 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip, 1310 DDI_PROP_DONTPASS, "reg", 1311 &cnex_regspec, ®len) != DDI_SUCCESS) { 1312 DWARN("cnex_find_chan_dip: reg property not" 1313 " found for dip(0x%p)\n", cdip); 1314 continue; 1315 } 1316 if (*cnex_regspec == cfghdl) { 1317 D1("cnex_find_chan_dip:channel(0x%llx): found " 1318 "dip(0x%p) drvname=%s\n", chan_id, cdip, 1319 ddi_driver_name(cdip)); 1320 break; 1321 } 1322 ddi_prop_free(cnex_regspec); 1323 } 1324 1325 fdip_exit: 1326 if (cdip == NULL) { 1327 /* 1328 * If a virtual-device node exists but no dip found, 1329 * then for now print a DEBUG error message only. 1330 */ 1331 if (num_devs > 0) { 1332 DERR("cnex_find_chan_dip:channel(0x%llx): " 1333 "No device found\n", chan_id); 1334 } 1335 1336 /* If no dip was found, return cnex device's dip. */ 1337 cdip = dip; 1338 } 1339 1340 kmem_free(listp, listsz); 1341 D1("cnex_find_chan_dip:channel(0x%llx): returning dip=0x%p\n", 1342 chan_id, cdip); 1343 return (cdip); 1344 } 1345 1346 /* -------------------------------------------------------------------------- */ 1347