1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/uio.h> 34 #include <sys/cred.h> 35 #include <sys/poll.h> 36 #include <sys/mman.h> 37 #include <sys/kmem.h> 38 #include <sys/model.h> 39 #include <sys/file.h> 40 #include <sys/proc.h> 41 #include <sys/open.h> 42 #include <sys/user.h> 43 #include <sys/t_lock.h> 44 #include <sys/vm.h> 45 #include <sys/stat.h> 46 #include <vm/hat.h> 47 #include <vm/seg.h> 48 #include <vm/as.h> 49 #include <sys/cmn_err.h> 50 #include <sys/debug.h> 51 #include <sys/avintr.h> 52 #include <sys/autoconf.h> 53 #include <sys/sunddi.h> 54 #include <sys/esunddi.h> 55 #include <sys/sunndi.h> 56 #include <sys/ddi.h> 57 #include <sys/kstat.h> 58 #include <sys/conf.h> 59 #include <sys/ddi_impldefs.h> /* include implementation structure defs */ 60 #include <sys/ndi_impldefs.h> 61 #include <sys/hwconf.h> 62 #include <sys/pathname.h> 63 #include <sys/modctl.h> 64 #include <sys/epm.h> 65 #include <sys/devctl.h> 66 #include <sys/callb.h> 67 #include <sys/bootconf.h> 68 #include <sys/dacf_impl.h> 69 #include <sys/nvpair.h> 70 #include <sys/sunmdi.h> 71 #include <sys/fs/dv_node.h> 72 73 #ifdef __sparc 74 #include <sys/archsystm.h> /* getpil/setpil */ 75 #include <sys/membar.h> /* membar_sync */ 76 #endif 77 78 /* 79 * ndi property handling 80 */ 81 int 82 ndi_prop_update_int(dev_t match_dev, dev_info_t *dip, 83 char *name, int data) 84 { 85 return (ddi_prop_update_common(match_dev, dip, 86 DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT | DDI_PROP_DONTSLEEP, 87 name, &data, 1, ddi_prop_fm_encode_ints)); 88 } 89 90 int 91 ndi_prop_update_int64(dev_t match_dev, dev_info_t *dip, 92 char *name, int64_t data) 93 { 94 return (ddi_prop_update_common(match_dev, dip, 95 DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT64 | DDI_PROP_DONTSLEEP, 96 name, &data, 1, ddi_prop_fm_encode_int64)); 97 } 98 99 int 100 ndi_prop_create_boolean(dev_t match_dev, dev_info_t *dip, 101 char *name) 102 { 103 return (ddi_prop_update_common(match_dev, dip, 104 DDI_PROP_HW_DEF | DDI_PROP_TYPE_ANY | DDI_PROP_DONTSLEEP, 105 name, NULL, 0, ddi_prop_fm_encode_bytes)); 106 } 107 108 int 109 ndi_prop_update_int_array(dev_t match_dev, dev_info_t *dip, 110 char *name, int *data, uint_t nelements) 111 { 112 return (ddi_prop_update_common(match_dev, dip, 113 DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT | DDI_PROP_DONTSLEEP, 114 name, data, nelements, ddi_prop_fm_encode_ints)); 115 } 116 117 int 118 ndi_prop_update_int64_array(dev_t match_dev, dev_info_t *dip, 119 char *name, int64_t *data, uint_t nelements) 120 { 121 return (ddi_prop_update_common(match_dev, dip, 122 DDI_PROP_HW_DEF | DDI_PROP_TYPE_INT64 | DDI_PROP_DONTSLEEP, 123 name, data, nelements, ddi_prop_fm_encode_int64)); 124 } 125 126 int 127 ndi_prop_update_string(dev_t match_dev, dev_info_t *dip, 128 char *name, char *data) 129 { 130 return (ddi_prop_update_common(match_dev, dip, 131 DDI_PROP_HW_DEF | DDI_PROP_TYPE_STRING | DDI_PROP_DONTSLEEP, 132 name, &data, 1, ddi_prop_fm_encode_string)); 133 } 134 135 int 136 ndi_prop_update_string_array(dev_t match_dev, dev_info_t *dip, 137 char *name, char **data, uint_t nelements) 138 { 139 return (ddi_prop_update_common(match_dev, dip, 140 DDI_PROP_HW_DEF | DDI_PROP_TYPE_STRING | DDI_PROP_DONTSLEEP, 141 name, data, nelements, 142 ddi_prop_fm_encode_strings)); 143 } 144 145 int 146 ndi_prop_update_byte_array(dev_t match_dev, dev_info_t *dip, 147 char *name, uchar_t *data, uint_t nelements) 148 { 149 if (nelements == 0) 150 return (DDI_PROP_INVAL_ARG); 151 152 return (ddi_prop_update_common(match_dev, dip, 153 DDI_PROP_HW_DEF | DDI_PROP_TYPE_BYTE | DDI_PROP_DONTSLEEP, 154 name, data, nelements, ddi_prop_fm_encode_bytes)); 155 } 156 157 int 158 ndi_prop_remove(dev_t dev, dev_info_t *dip, char *name) 159 { 160 return (ddi_prop_remove_common(dev, dip, name, DDI_PROP_HW_DEF)); 161 } 162 163 void 164 ndi_prop_remove_all(dev_info_t *dip) 165 { 166 ddi_prop_remove_all_common(dip, (int)DDI_PROP_HW_DEF); 167 } 168 169 /* 170 * Post an event notification to nexus driver responsible for handling 171 * the event. The responsible nexus is defined in the cookie passed in as 172 * the third parameter. 173 * The dip parameter is an artifact of an older implementation in which all 174 * requests to remove an eventcall would bubble up the tree. Today, this 175 * parameter is ignored. 176 * Input Parameters: 177 * dip - Ignored. 178 * rdip - device driver posting the event 179 * cookie - valid ddi_eventcookie_t, obtained by caller prior to 180 * invocation of this routine 181 * impl_data - used by framework 182 */ 183 /*ARGSUSED*/ 184 int 185 ndi_post_event(dev_info_t *dip, dev_info_t *rdip, 186 ddi_eventcookie_t cookie, void *impl_data) 187 { 188 dev_info_t *ddip; 189 190 ASSERT(cookie); 191 ddip = NDI_EVENT_DDIP(cookie); 192 193 /* 194 * perform sanity checks. These conditions should never be true. 195 */ 196 197 ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops != NULL); 198 ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->busops_rev >= BUSO_REV_6); 199 ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->bus_post_event != NULL); 200 201 /* 202 * post the event to the responsible ancestor 203 */ 204 return ((*(DEVI(ddip)->devi_ops->devo_bus_ops->bus_post_event)) 205 (ddip, rdip, cookie, impl_data)); 206 } 207 208 /* 209 * Calls the bus nexus driver's implementation of the 210 * (*bus_remove_eventcall)() interface. 211 */ 212 int 213 ndi_busop_remove_eventcall(dev_info_t *ddip, ddi_callback_id_t id) 214 { 215 216 ASSERT(id); 217 /* check for a correct revno before calling up the device tree. */ 218 ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops != NULL); 219 ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->busops_rev >= BUSO_REV_6); 220 221 if (DEVI(ddip)->devi_ops->devo_bus_ops->bus_remove_eventcall == NULL) 222 return (DDI_FAILURE); 223 224 /* 225 * request responsible nexus to remove the eventcall 226 */ 227 return ((*(DEVI(ddip)->devi_ops->devo_bus_ops->bus_remove_eventcall)) 228 (ddip, id)); 229 } 230 231 /* 232 * Calls the bus nexus driver's implementation of the 233 * (*bus_add_eventcall)() interface. The dip parameter is an 234 * artifact of an older implementation in which all requests to 235 * add an eventcall would bubble up the tree. Today, this parameter is 236 * ignored. 237 */ 238 /*ARGSUSED*/ 239 int 240 ndi_busop_add_eventcall(dev_info_t *dip, dev_info_t *rdip, 241 ddi_eventcookie_t cookie, void (*callback)(), void *arg, 242 ddi_callback_id_t *cb_id) 243 { 244 dev_info_t *ddip = (dev_info_t *)NDI_EVENT_DDIP(cookie); 245 246 /* 247 * check for a correct revno before calling up the device tree. 248 */ 249 ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops != NULL); 250 ASSERT(DEVI(ddip)->devi_ops->devo_bus_ops->busops_rev >= BUSO_REV_6); 251 252 if (DEVI(ddip)->devi_ops->devo_bus_ops->bus_add_eventcall == NULL) 253 return (DDI_FAILURE); 254 255 /* 256 * request responsible ancestor to add the eventcall 257 */ 258 return ((*(DEVI(ddip)->devi_ops->devo_bus_ops->bus_add_eventcall)) 259 (ddip, rdip, cookie, callback, arg, cb_id)); 260 } 261 262 /* 263 * Calls the bus nexus driver's implementation of the 264 * (*bus_get_eventcookie)() interface up the device tree hierarchy. 265 */ 266 int 267 ndi_busop_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name, 268 ddi_eventcookie_t *event_cookiep) 269 { 270 dev_info_t *pdip = (dev_info_t *)DEVI(dip)->devi_parent; 271 272 /* Can not be called from rootnex. */ 273 ASSERT(pdip); 274 275 /* 276 * check for a correct revno before calling up the device tree. 277 */ 278 ASSERT(DEVI(pdip)->devi_ops->devo_bus_ops != NULL); 279 280 if ((DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_6) || 281 (DEVI(pdip)->devi_ops->devo_bus_ops->bus_get_eventcookie == NULL)) { 282 #ifdef DEBUG 283 if ((DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev >= 284 BUSO_REV_3) && 285 (DEVI(pdip)->devi_ops->devo_bus_ops->bus_get_eventcookie)) { 286 cmn_err(CE_WARN, 287 "Warning: %s%d busops_rev=%d no longer supported" 288 " by the NDI event framework.\nBUSO_REV_6 or " 289 "greater must be used.", 290 DEVI(pdip)->devi_binding_name, 291 DEVI(pdip)->devi_instance, 292 DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev); 293 } 294 #endif /* DEBUG */ 295 296 return (ndi_busop_get_eventcookie(pdip, rdip, name, 297 event_cookiep)); 298 } 299 300 return ((*(DEVI(pdip)->devi_ops->devo_bus_ops->bus_get_eventcookie)) 301 (pdip, rdip, name, event_cookiep)); 302 } 303 304 /* 305 * Copy in the devctl IOCTL data and return a handle to 306 * the data. 307 */ 308 int 309 ndi_dc_allochdl(void *iocarg, struct devctl_iocdata **rdcp) 310 { 311 struct devctl_iocdata *dcp; 312 char *cpybuf; 313 314 ASSERT(rdcp != NULL); 315 316 dcp = kmem_zalloc(sizeof (*dcp), KM_SLEEP); 317 318 if (get_udatamodel() == DATAMODEL_NATIVE) { 319 if (copyin(iocarg, dcp, sizeof (*dcp)) != 0) { 320 kmem_free(dcp, sizeof (*dcp)); 321 return (NDI_FAULT); 322 } 323 } 324 #ifdef _SYSCALL32_IMPL 325 else { 326 struct devctl_iocdata32 dcp32; 327 328 if (copyin(iocarg, &dcp32, sizeof (dcp32)) != 0) { 329 kmem_free(dcp, sizeof (*dcp)); 330 return (NDI_FAULT); 331 } 332 dcp->cmd = (uint_t)dcp32.cmd; 333 dcp->flags = (uint_t)dcp32.flags; 334 dcp->cpyout_buf = (uint_t *)(uintptr_t)dcp32.cpyout_buf; 335 dcp->nvl_user = (nvlist_t *)(uintptr_t)dcp32.nvl_user; 336 dcp->nvl_usersz = (size_t)dcp32.nvl_usersz; 337 dcp->c_nodename = (char *)(uintptr_t)dcp32.c_nodename; 338 dcp->c_unitaddr = (char *)(uintptr_t)dcp32.c_unitaddr; 339 } 340 #endif 341 if (dcp->c_nodename != NULL) { 342 cpybuf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 343 if (copyinstr(dcp->c_nodename, cpybuf, MAXNAMELEN, 0) != 0) { 344 kmem_free(cpybuf, MAXNAMELEN); 345 kmem_free(dcp, sizeof (*dcp)); 346 return (NDI_FAULT); 347 } 348 cpybuf[MAXNAMELEN - 1] = '\0'; 349 dcp->c_nodename = cpybuf; 350 } 351 352 if (dcp->c_unitaddr != NULL) { 353 cpybuf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 354 if (copyinstr(dcp->c_unitaddr, cpybuf, MAXNAMELEN, 0) != 0) { 355 kmem_free(cpybuf, MAXNAMELEN); 356 if (dcp->c_nodename != NULL) 357 kmem_free(dcp->c_nodename, MAXNAMELEN); 358 kmem_free(dcp, sizeof (*dcp)); 359 return (NDI_FAULT); 360 } 361 cpybuf[MAXNAMELEN - 1] = '\0'; 362 dcp->c_unitaddr = cpybuf; 363 } 364 365 /* 366 * copyin and unpack a user defined nvlist if one was passed 367 */ 368 if (dcp->nvl_user != NULL) { 369 if (dcp->nvl_usersz == 0) { 370 if (dcp->c_nodename != NULL) 371 kmem_free(dcp->c_nodename, MAXNAMELEN); 372 if (dcp->c_unitaddr != NULL) 373 kmem_free(dcp->c_unitaddr, MAXNAMELEN); 374 kmem_free(dcp, sizeof (*dcp)); 375 return (NDI_FAILURE); 376 } 377 cpybuf = kmem_alloc(dcp->nvl_usersz, KM_SLEEP); 378 if (copyin(dcp->nvl_user, cpybuf, dcp->nvl_usersz) != 0) { 379 kmem_free(cpybuf, dcp->nvl_usersz); 380 if (dcp->c_nodename != NULL) 381 kmem_free(dcp->c_nodename, MAXNAMELEN); 382 if (dcp->c_unitaddr != NULL) 383 kmem_free(dcp->c_unitaddr, MAXNAMELEN); 384 kmem_free(dcp, sizeof (*dcp)); 385 return (NDI_FAULT); 386 } 387 388 if (nvlist_unpack(cpybuf, dcp->nvl_usersz, &dcp->nvl_user, 389 KM_SLEEP)) { 390 kmem_free(cpybuf, dcp->nvl_usersz); 391 if (dcp->c_nodename != NULL) 392 kmem_free(dcp->c_nodename, MAXNAMELEN); 393 if (dcp->c_unitaddr != NULL) 394 kmem_free(dcp->c_unitaddr, MAXNAMELEN); 395 kmem_free(dcp, sizeof (*dcp)); 396 return (NDI_FAULT); 397 } 398 /* 399 * free the buffer containing the packed nvlist 400 */ 401 kmem_free(cpybuf, dcp->nvl_usersz); 402 403 } 404 405 *rdcp = dcp; 406 return (NDI_SUCCESS); 407 } 408 409 /* 410 * free all space allocated to a handle. 411 */ 412 void 413 ndi_dc_freehdl(struct devctl_iocdata *dcp) 414 { 415 ASSERT(dcp != NULL); 416 417 if (dcp->c_nodename != NULL) 418 kmem_free(dcp->c_nodename, MAXNAMELEN); 419 420 if (dcp->c_unitaddr != NULL) 421 kmem_free(dcp->c_unitaddr, MAXNAMELEN); 422 423 if (dcp->nvl_user != NULL) 424 nvlist_free(dcp->nvl_user); 425 426 kmem_free(dcp, sizeof (*dcp)); 427 } 428 429 char * 430 ndi_dc_getname(struct devctl_iocdata *dcp) 431 { 432 ASSERT(dcp != NULL); 433 return (dcp->c_nodename); 434 435 } 436 437 char * 438 ndi_dc_getaddr(struct devctl_iocdata *dcp) 439 { 440 ASSERT(dcp != NULL); 441 return (dcp->c_unitaddr); 442 } 443 444 nvlist_t * 445 ndi_dc_get_ap_data(struct devctl_iocdata *dcp) 446 { 447 ASSERT(dcp != NULL); 448 449 return (dcp->nvl_user); 450 } 451 452 /* 453 * Transition the child named by "devname@devaddr" to the online state. 454 * For use by a driver's DEVCTL_DEVICE_ONLINE handler. 455 */ 456 int 457 ndi_devctl_device_online(dev_info_t *dip, struct devctl_iocdata *dcp, 458 uint_t flags) 459 { 460 int rval; 461 char *name; 462 dev_info_t *rdip; 463 464 if (ndi_dc_getname(dcp) == NULL || ndi_dc_getaddr(dcp) == NULL) 465 return (EINVAL); 466 467 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 468 (void) snprintf(name, MAXNAMELEN, "%s@%s", 469 ndi_dc_getname(dcp), ndi_dc_getaddr(dcp)); 470 471 if ((rval = ndi_devi_config_one(dip, name, &rdip, 472 flags | NDI_DEVI_ONLINE | NDI_CONFIG)) == NDI_SUCCESS) { 473 ndi_rele_devi(rdip); 474 475 /* 476 * Invalidate devfs cached directory contents. For the checks 477 * in the "if" condition see the comment in ndi_devi_online(). 478 */ 479 if (i_ddi_node_state(dip) == DS_READY && !DEVI_BUSY_OWNED(dip)) 480 (void) devfs_clean(dip, NULL, 0); 481 482 } else if (rval == NDI_BUSY) { 483 rval = EBUSY; 484 } else if (rval == NDI_FAILURE) { 485 rval = EIO; 486 } 487 488 NDI_DEBUG(flags, (CE_CONT, "%s%d: online: %s: %s\n", 489 ddi_driver_name(dip), ddi_get_instance(dip), name, 490 ((rval == NDI_SUCCESS) ? "ok" : "failed"))); 491 492 kmem_free(name, MAXNAMELEN); 493 494 return (rval); 495 } 496 497 /* 498 * Transition the child named by "devname@devaddr" to the offline state. 499 * For use by a driver's DEVCTL_DEVICE_OFFLINE handler. 500 */ 501 int 502 ndi_devctl_device_offline(dev_info_t *dip, struct devctl_iocdata *dcp, 503 uint_t flags) 504 { 505 int rval; 506 char *name; 507 508 if (ndi_dc_getname(dcp) == NULL || ndi_dc_getaddr(dcp) == NULL) 509 return (EINVAL); 510 511 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 512 (void) snprintf(name, MAXNAMELEN, "%s@%s", 513 ndi_dc_getname(dcp), ndi_dc_getaddr(dcp)); 514 515 rval = devfs_clean(dip, name, DV_CLEAN_FORCE); 516 if (rval) { 517 rval = EBUSY; 518 } else { 519 rval = ndi_devi_unconfig_one(dip, name, NULL, 520 flags | NDI_DEVI_OFFLINE); 521 522 if (rval == NDI_BUSY) { 523 rval = EBUSY; 524 } else if (rval == NDI_FAILURE) { 525 rval = EIO; 526 } 527 } 528 529 NDI_DEBUG(flags, (CE_CONT, "%s%d: offline: %s: %s\n", 530 ddi_driver_name(dip), ddi_get_instance(dip), name, 531 (rval == NDI_SUCCESS) ? "ok" : "failed")); 532 533 kmem_free(name, MAXNAMELEN); 534 535 return (rval); 536 } 537 538 /* 539 * Remove the child named by "devname@devaddr". 540 * For use by a driver's DEVCTL_DEVICE_REMOVE handler. 541 */ 542 int 543 ndi_devctl_device_remove(dev_info_t *dip, struct devctl_iocdata *dcp, 544 uint_t flags) 545 { 546 int rval; 547 char *name; 548 549 if (ndi_dc_getname(dcp) == NULL || ndi_dc_getaddr(dcp) == NULL) 550 return (EINVAL); 551 552 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 553 (void) snprintf(name, MAXNAMELEN, "%s@%s", 554 ndi_dc_getname(dcp), ndi_dc_getaddr(dcp)); 555 556 (void) devfs_clean(dip, name, DV_CLEAN_FORCE); 557 558 rval = ndi_devi_unconfig_one(dip, name, NULL, flags | NDI_DEVI_REMOVE); 559 560 if (rval == NDI_BUSY) { 561 rval = EBUSY; 562 } else if (rval == NDI_FAILURE) { 563 rval = EIO; 564 } 565 566 NDI_DEBUG(flags, (CE_CONT, "%s%d: remove: %s: %s\n", 567 ddi_driver_name(dip), ddi_get_instance(dip), name, 568 (rval == NDI_SUCCESS) ? "ok" : "failed")); 569 570 kmem_free(name, MAXNAMELEN); 571 572 return (rval); 573 } 574 575 /* 576 * Return devctl state of the child named by "name@addr". 577 * For use by a driver's DEVCTL_DEVICE_GETSTATE handler. 578 */ 579 int 580 ndi_devctl_device_getstate(dev_info_t *parent, struct devctl_iocdata *dcp, 581 uint_t *state) 582 { 583 dev_info_t *dip; 584 char *name, *addr; 585 char *devname; 586 int devnamelen; 587 int circ; 588 589 if (parent == NULL || 590 ((name = ndi_dc_getname(dcp)) == NULL) || 591 ((addr = ndi_dc_getaddr(dcp)) == NULL)) 592 return (NDI_FAILURE); 593 594 devnamelen = strlen(name) + strlen(addr) + 2; 595 devname = kmem_alloc(devnamelen, KM_SLEEP); 596 if (strlen(addr) > 0) { 597 (void) snprintf(devname, devnamelen, "%s@%s", name, addr); 598 } else { 599 (void) snprintf(devname, devnamelen, "%s", name); 600 } 601 602 ndi_devi_enter(parent, &circ); 603 604 dip = ndi_devi_findchild(parent, devname); 605 kmem_free(devname, devnamelen); 606 607 if (dip == NULL) { 608 ndi_devi_exit(parent, circ); 609 return (NDI_FAILURE); 610 } 611 612 mutex_enter(&(DEVI(dip)->devi_lock)); 613 if (DEVI_IS_DEVICE_OFFLINE(dip)) { 614 *state = DEVICE_OFFLINE; 615 } else if (DEVI_IS_DEVICE_DOWN(dip)) { 616 *state = DEVICE_DOWN; 617 } else { 618 *state = DEVICE_ONLINE; 619 if (devi_stillreferenced(dip) == DEVI_REFERENCED) 620 *state |= DEVICE_BUSY; 621 } 622 623 mutex_exit(&(DEVI(dip)->devi_lock)); 624 ndi_devi_exit(parent, circ); 625 626 return (NDI_SUCCESS); 627 } 628 629 /* 630 * return the current state of the device "dip" 631 * 632 * recommend using ndi_devctl_ioctl() or 633 * ndi_devctl_device_getstate() instead 634 */ 635 int 636 ndi_dc_return_dev_state(dev_info_t *dip, struct devctl_iocdata *dcp) 637 { 638 dev_info_t *pdip; 639 uint_t devstate = 0; 640 int circ; 641 642 if ((dip == NULL) || (dcp == NULL)) 643 return (NDI_FAILURE); 644 645 pdip = ddi_get_parent(dip); 646 647 ndi_devi_enter(pdip, &circ); 648 mutex_enter(&(DEVI(dip)->devi_lock)); 649 if (DEVI_IS_DEVICE_OFFLINE(dip)) { 650 devstate = DEVICE_OFFLINE; 651 } else if (DEVI_IS_DEVICE_DOWN(dip)) { 652 devstate = DEVICE_DOWN; 653 } else { 654 devstate = DEVICE_ONLINE; 655 if (devi_stillreferenced(dip) == DEVI_REFERENCED) 656 devstate |= DEVICE_BUSY; 657 } 658 659 mutex_exit(&(DEVI(dip)->devi_lock)); 660 ndi_devi_exit(pdip, circ); 661 662 if (copyout(&devstate, dcp->cpyout_buf, sizeof (uint_t)) != 0) 663 return (NDI_FAULT); 664 665 return (NDI_SUCCESS); 666 } 667 668 /* 669 * Return device's bus state 670 * For use by a driver's DEVCTL_BUS_GETSTATE handler. 671 */ 672 int 673 ndi_devctl_bus_getstate(dev_info_t *dip, struct devctl_iocdata *dcp, 674 uint_t *state) 675 { 676 if ((dip == NULL) || (dcp == NULL)) 677 return (NDI_FAILURE); 678 679 return (ndi_get_bus_state(dip, state)); 680 } 681 682 /* 683 * Generic devctl ioctl handler 684 */ 685 int 686 ndi_devctl_ioctl(dev_info_t *dip, int cmd, intptr_t arg, int mode, uint_t flags) 687 { 688 _NOTE(ARGUNUSED(mode)) 689 struct devctl_iocdata *dcp; 690 uint_t state; 691 int rval = ENOTTY; 692 693 /* 694 * read devctl ioctl data 695 */ 696 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 697 return (EFAULT); 698 699 switch (cmd) { 700 701 case DEVCTL_BUS_GETSTATE: 702 rval = ndi_devctl_bus_getstate(dip, dcp, &state); 703 if (rval == NDI_SUCCESS) { 704 if (copyout(&state, dcp->cpyout_buf, 705 sizeof (uint_t)) != 0) 706 rval = NDI_FAULT; 707 } 708 break; 709 710 case DEVCTL_DEVICE_ONLINE: 711 rval = ndi_devctl_device_online(dip, dcp, flags); 712 break; 713 714 case DEVCTL_DEVICE_OFFLINE: 715 rval = ndi_devctl_device_offline(dip, dcp, flags); 716 break; 717 718 case DEVCTL_DEVICE_GETSTATE: 719 rval = ndi_devctl_device_getstate(dip, dcp, &state); 720 if (rval == NDI_SUCCESS) { 721 if (copyout(&state, dcp->cpyout_buf, 722 sizeof (uint_t)) != 0) 723 rval = NDI_FAULT; 724 } 725 break; 726 727 case DEVCTL_DEVICE_REMOVE: 728 rval = ndi_devctl_device_remove(dip, dcp, flags); 729 break; 730 731 case DEVCTL_BUS_DEV_CREATE: 732 rval = ndi_dc_devi_create(dcp, dip, 0, NULL); 733 break; 734 735 /* 736 * ioctls for which a generic implementation makes no sense 737 */ 738 case DEVCTL_BUS_RESET: 739 case DEVCTL_BUS_RESETALL: 740 case DEVCTL_DEVICE_RESET: 741 case DEVCTL_AP_CONNECT: 742 case DEVCTL_AP_DISCONNECT: 743 case DEVCTL_AP_INSERT: 744 case DEVCTL_AP_REMOVE: 745 case DEVCTL_AP_CONFIGURE: 746 case DEVCTL_AP_UNCONFIGURE: 747 case DEVCTL_AP_GETSTATE: 748 case DEVCTL_AP_CONTROL: 749 case DEVCTL_BUS_QUIESCE: 750 case DEVCTL_BUS_UNQUIESCE: 751 rval = ENOTSUP; 752 break; 753 } 754 755 ndi_dc_freehdl(dcp); 756 return (rval); 757 } 758 759 /* 760 * Copyout the state of the Attachment Point "ap" to the requesting 761 * user process. 762 */ 763 int 764 ndi_dc_return_ap_state(devctl_ap_state_t *ap, struct devctl_iocdata *dcp) 765 { 766 if ((ap == NULL) || (dcp == NULL)) 767 return (NDI_FAILURE); 768 769 770 if (get_udatamodel() == DATAMODEL_NATIVE) { 771 if (copyout(ap, dcp->cpyout_buf, 772 sizeof (devctl_ap_state_t)) != 0) 773 return (NDI_FAULT); 774 } 775 #ifdef _SYSCALL32_IMPL 776 else { 777 struct devctl_ap_state32 ap_state32; 778 779 ap_state32.ap_rstate = ap->ap_rstate; 780 ap_state32.ap_ostate = ap->ap_ostate; 781 ap_state32.ap_condition = ap->ap_condition; 782 ap_state32.ap_error_code = ap->ap_error_code; 783 ap_state32.ap_in_transition = ap->ap_in_transition; 784 ap_state32.ap_last_change = (time32_t)ap->ap_last_change; 785 if (copyout(&ap_state32, dcp->cpyout_buf, 786 sizeof (devctl_ap_state32_t)) != 0) 787 return (NDI_FAULT); 788 } 789 #endif 790 791 return (NDI_SUCCESS); 792 } 793 794 /* 795 * Copyout the bus state of the bus nexus device "dip" to the requesting 796 * user process. 797 */ 798 int 799 ndi_dc_return_bus_state(dev_info_t *dip, struct devctl_iocdata *dcp) 800 { 801 uint_t devstate = 0; 802 803 if ((dip == NULL) || (dcp == NULL)) 804 return (NDI_FAILURE); 805 806 if (ndi_get_bus_state(dip, &devstate) != NDI_SUCCESS) 807 return (NDI_FAILURE); 808 809 if (copyout(&devstate, dcp->cpyout_buf, sizeof (uint_t)) != 0) 810 return (NDI_FAULT); 811 812 return (NDI_SUCCESS); 813 } 814 815 static int 816 i_dc_devi_create(struct devctl_iocdata *, dev_info_t *, dev_info_t **); 817 818 /* 819 * create a child device node given the property definitions 820 * supplied by the userland process 821 */ 822 int 823 ndi_dc_devi_create(struct devctl_iocdata *dcp, dev_info_t *pdip, int flags, 824 dev_info_t **rdip) 825 { 826 dev_info_t *cdip; 827 int rv, circular = 0; 828 char devnm[MAXNAMELEN]; 829 int nmlen; 830 831 /* 832 * The child device may have been pre-constructed by an earlier 833 * call to this function with the flag DEVCTL_CONSTRUCT set. 834 */ 835 836 if ((cdip = (rdip != NULL) ? *rdip : NULL) == NULL) 837 if ((rv = i_dc_devi_create(dcp, pdip, &cdip)) != 0) 838 return (rv); 839 840 ASSERT(cdip != NULL); 841 842 /* 843 * Return the device node partially constructed if the 844 * DEVCTL_CONSTRUCT flag is set. 845 */ 846 if (flags & DEVCTL_CONSTRUCT) { 847 if (rdip == NULL) { 848 (void) ndi_devi_free(cdip); 849 return (EINVAL); 850 } 851 *rdip = cdip; 852 return (0); 853 } 854 855 /* 856 * Bring the node up to a named but OFFLINE state. The calling 857 * application will need to manage the node from here on. 858 */ 859 if (dcp->flags & DEVCTL_OFFLINE) { 860 /* 861 * hand set the OFFLINE flag to prevent any asynchronous 862 * autoconfiguration operations from attaching this node. 863 */ 864 mutex_enter(&(DEVI(cdip)->devi_lock)); 865 DEVI_SET_DEVICE_OFFLINE(cdip); 866 mutex_exit(&(DEVI(cdip)->devi_lock)); 867 868 rv = ndi_devi_bind_driver(cdip, flags); 869 if (rv != NDI_SUCCESS) { 870 (void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE); 871 return (ENXIO); 872 } 873 874 /* 875 * remove the dev_info node if it failed to bind to a 876 * driver above. 877 */ 878 if (i_ddi_node_state(cdip) < DS_BOUND) { 879 (void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE); 880 return (ENXIO); 881 } 882 883 /* 884 * add the node to the per-driver list and INITCHILD it 885 * to give it a name. 886 */ 887 ndi_devi_enter(pdip, &circular); 888 if ((rv = ddi_initchild(pdip, cdip)) != DDI_SUCCESS) { 889 (void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE); 890 ndi_devi_exit(pdip, circular); 891 return (EINVAL); 892 } 893 ndi_devi_exit(pdip, circular); 894 895 } else { 896 /* 897 * Attempt to bring the device ONLINE. If the request to 898 * fails, remove the dev_info node. 899 */ 900 if (ndi_devi_online(cdip, NDI_ONLINE_ATTACH) != NDI_SUCCESS) { 901 (void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE); 902 return (ENXIO); 903 } 904 905 /* 906 * if the node was successfully added but there was 907 * no driver available for the device, remove the node 908 */ 909 if (i_ddi_node_state(cdip) < DS_BOUND) { 910 (void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE); 911 return (ENODEV); 912 } 913 } 914 915 /* 916 * return a handle to the child device 917 * copy out the name of the newly attached child device if 918 * the IOCTL request has provided a copyout buffer. 919 */ 920 if (rdip != NULL) 921 *rdip = cdip; 922 923 if (dcp->cpyout_buf == NULL) 924 return (0); 925 926 ASSERT(ddi_node_name(cdip) != NULL); 927 ASSERT(ddi_get_name_addr(cdip) != NULL); 928 929 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s", 930 ddi_node_name(cdip), ddi_get_name_addr(cdip)); 931 932 if (copyout(&devnm, dcp->cpyout_buf, nmlen) != 0) { 933 (void) ndi_devi_offline(cdip, NDI_DEVI_REMOVE); 934 return (EFAULT); 935 } 936 return (0); 937 } 938 939 static int 940 i_dc_devi_create(struct devctl_iocdata *dcp, dev_info_t *pdip, 941 dev_info_t **rdip) 942 { 943 944 dev_info_t *cdip; 945 char *cname = NULL; 946 nvlist_t *nvlp = dcp->nvl_user; 947 nvpair_t *npp; 948 char *np; 949 int rv = 0; 950 951 ASSERT(rdip != NULL && *rdip == NULL); 952 953 if ((nvlp == NULL) || 954 (nvlist_lookup_string(nvlp, DC_DEVI_NODENAME, &cname) != 0)) 955 return (EINVAL); 956 957 /* 958 * construct a new dev_info node with a user-provided nodename 959 */ 960 ndi_devi_alloc_sleep(pdip, cname, (dnode_t)DEVI_SID_NODEID, &cdip); 961 962 /* 963 * create hardware properties for each member in the property 964 * list. 965 */ 966 for (npp = nvlist_next_nvpair(nvlp, NULL); (npp != NULL && !rv); 967 npp = nvlist_next_nvpair(nvlp, npp)) { 968 969 np = nvpair_name(npp); 970 971 /* 972 * skip the nodename property 973 */ 974 if (strcmp(np, DC_DEVI_NODENAME) == 0) 975 continue; 976 977 switch (nvpair_type(npp)) { 978 979 case DATA_TYPE_INT32: { 980 int32_t prop_val; 981 982 if ((rv = nvpair_value_int32(npp, &prop_val)) != 0) 983 break; 984 985 (void) ndi_prop_update_int(DDI_DEV_T_NONE, cdip, np, 986 (int)prop_val); 987 break; 988 } 989 990 case DATA_TYPE_STRING: { 991 char *prop_val; 992 993 if ((rv = nvpair_value_string(npp, &prop_val)) != 0) 994 break; 995 996 (void) ndi_prop_update_string(DDI_DEV_T_NONE, cdip, 997 np, prop_val); 998 break; 999 } 1000 1001 case DATA_TYPE_BYTE_ARRAY: { 1002 uchar_t *val; 1003 uint_t nelms; 1004 1005 if ((rv = nvpair_value_byte_array(npp, &val, 1006 &nelms)) != 0) 1007 break; 1008 1009 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, 1010 cdip, np, (uchar_t *)val, nelms); 1011 break; 1012 } 1013 1014 case DATA_TYPE_INT32_ARRAY: { 1015 int32_t *val; 1016 uint_t nelms; 1017 1018 if ((rv = nvpair_value_int32_array(npp, &val, 1019 &nelms)) != 0) 1020 break; 1021 1022 (void) ndi_prop_update_int_array(DDI_DEV_T_NONE, 1023 cdip, np, val, nelms); 1024 break; 1025 } 1026 1027 case DATA_TYPE_STRING_ARRAY: { 1028 char **val; 1029 uint_t nelms; 1030 1031 if ((rv = nvpair_value_string_array(npp, &val, 1032 &nelms)) != 0) 1033 break; 1034 1035 (void) ndi_prop_update_string_array(DDI_DEV_T_NONE, 1036 cdip, np, val, nelms); 1037 break; 1038 } 1039 1040 /* 1041 * unsupported property data type 1042 */ 1043 default: 1044 rv = EINVAL; 1045 } 1046 } 1047 1048 /* 1049 * something above failed 1050 * destroy the partially child device and abort the request 1051 */ 1052 if (rv != 0) { 1053 (void) ndi_devi_free(cdip); 1054 return (rv); 1055 } 1056 1057 *rdip = cdip; 1058 return (0); 1059 } 1060 1061 /* 1062 * return current soft bus state of bus nexus "dip" 1063 */ 1064 int 1065 ndi_get_bus_state(dev_info_t *dip, uint_t *rstate) 1066 { 1067 if (dip == NULL || rstate == NULL) 1068 return (NDI_FAILURE); 1069 1070 if (DEVI(dip)->devi_ops->devo_bus_ops == NULL) 1071 return (NDI_FAILURE); 1072 1073 mutex_enter(&(DEVI(dip)->devi_lock)); 1074 if (DEVI_IS_BUS_QUIESCED(dip)) 1075 *rstate = BUS_QUIESCED; 1076 else if (DEVI_IS_BUS_DOWN(dip)) 1077 *rstate = BUS_SHUTDOWN; 1078 else 1079 *rstate = BUS_ACTIVE; 1080 mutex_exit(&(DEVI(dip)->devi_lock)); 1081 return (NDI_SUCCESS); 1082 } 1083 1084 /* 1085 * Set the soft state of bus nexus "dip" 1086 */ 1087 int 1088 ndi_set_bus_state(dev_info_t *dip, uint_t state) 1089 { 1090 int rv = NDI_SUCCESS; 1091 1092 if (dip == NULL) 1093 return (NDI_FAILURE); 1094 1095 mutex_enter(&(DEVI(dip)->devi_lock)); 1096 1097 switch (state) { 1098 case BUS_QUIESCED: 1099 DEVI_SET_BUS_QUIESCE(dip); 1100 break; 1101 1102 case BUS_ACTIVE: 1103 DEVI_SET_BUS_ACTIVE(dip); 1104 DEVI_SET_BUS_UP(dip); 1105 break; 1106 1107 case BUS_SHUTDOWN: 1108 DEVI_SET_BUS_DOWN(dip); 1109 break; 1110 1111 default: 1112 rv = NDI_FAILURE; 1113 } 1114 1115 mutex_exit(&(DEVI(dip)->devi_lock)); 1116 return (rv); 1117 } 1118 1119 /* 1120 * These dummy functions are obsolete and may be removed. 1121 * Retained for existing driver compatibility only. 1122 * Drivers should be fixed not to use these functions. 1123 * Don't write new code using these obsolete interfaces. 1124 */ 1125 /*ARGSUSED*/ 1126 void 1127 i_ndi_block_device_tree_changes(uint_t *lkcnt) /* obsolete */ 1128 { 1129 /* obsolete dummy function */ 1130 } 1131 1132 /*ARGSUSED*/ 1133 void 1134 i_ndi_allow_device_tree_changes(uint_t lkcnt) /* obsolete */ 1135 { 1136 /* obsolete dummy function */ 1137 } 1138 1139 /* 1140 * Single thread entry into per-driver list 1141 */ 1142 /*ARGSUSED*/ 1143 void 1144 e_ddi_enter_driver_list(struct devnames *dnp, int *listcnt) /* obsolete */ 1145 { 1146 /* obsolete dummy function */ 1147 } 1148 1149 /* 1150 * release the per-driver list 1151 */ 1152 /*ARGSUSED*/ 1153 void 1154 e_ddi_exit_driver_list(struct devnames *dnp, int listcnt) /* obsolete */ 1155 { 1156 /* obsolete dummy function */ 1157 } 1158 1159 /* 1160 * Attempt to enter driver list 1161 */ 1162 /*ARGSUSED*/ 1163 int 1164 e_ddi_tryenter_driver_list(struct devnames *dnp, int *listcnt) /* obsolete */ 1165 { 1166 return (1); /* obsolete dummy function */ 1167 } 1168 1169 /* 1170 * ndi event handling support functions: 1171 * The NDI event support model is as follows: 1172 * 1173 * The nexus driver defines a set of events using some static structures (so 1174 * these structures can be shared by all instances of the nexus driver). 1175 * The nexus driver allocates an event handle and binds the event set 1176 * to this handle. The nexus driver's event busop functions can just 1177 * call the appropriate NDI event support function using this handle 1178 * as the first argument. 1179 * 1180 * The reasoning for tying events to the device tree is that the entity 1181 * generating the callback will typically be one of the device driver's 1182 * ancestors in the tree. 1183 */ 1184 static int ndi_event_debug = 0; 1185 1186 #ifdef DEBUG 1187 #define NDI_EVENT_DEBUG ndi_event_debug 1188 #endif /* DEBUG */ 1189 1190 /* 1191 * allocate a new ndi event handle 1192 */ 1193 int 1194 ndi_event_alloc_hdl(dev_info_t *dip, ddi_iblock_cookie_t cookie, 1195 ndi_event_hdl_t *handle, uint_t flag) 1196 { 1197 struct ndi_event_hdl *ndi_event_hdl; 1198 1199 ndi_event_hdl = kmem_zalloc(sizeof (struct ndi_event_hdl), 1200 ((flag & NDI_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP)); 1201 1202 if (!ndi_event_hdl) { 1203 return (NDI_FAILURE); 1204 } 1205 1206 ndi_event_hdl->ndi_evthdl_dip = dip; 1207 ndi_event_hdl->ndi_evthdl_iblock_cookie = cookie; 1208 mutex_init(&ndi_event_hdl->ndi_evthdl_mutex, NULL, 1209 MUTEX_DRIVER, (void *)cookie); 1210 1211 mutex_init(&ndi_event_hdl->ndi_evthdl_cb_mutex, NULL, 1212 MUTEX_DRIVER, (void *)cookie); 1213 1214 *handle = (ndi_event_hdl_t)ndi_event_hdl; 1215 1216 return (NDI_SUCCESS); 1217 } 1218 1219 /* 1220 * free the ndi event handle 1221 */ 1222 int 1223 ndi_event_free_hdl(ndi_event_hdl_t handle) 1224 { 1225 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1226 ndi_event_cookie_t *cookie; 1227 ndi_event_cookie_t *free; 1228 1229 ASSERT(handle); 1230 1231 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1232 mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1233 1234 cookie = ndi_event_hdl->ndi_evthdl_cookie_list; 1235 1236 /* deallocate all defined cookies */ 1237 while (cookie != NULL) { 1238 ASSERT(cookie->callback_list == NULL); 1239 free = cookie; 1240 cookie = cookie->next_cookie; 1241 1242 kmem_free(free, sizeof (ndi_event_cookie_t)); 1243 } 1244 1245 1246 mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1247 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1248 1249 /* destroy mutexes */ 1250 mutex_destroy(&ndi_event_hdl->ndi_evthdl_mutex); 1251 mutex_destroy(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1252 1253 /* free event handle */ 1254 kmem_free(ndi_event_hdl, sizeof (struct ndi_event_hdl)); 1255 1256 return (NDI_SUCCESS); 1257 } 1258 1259 1260 /* 1261 * ndi_event_bind_set() adds a set of events to the NDI event 1262 * handle. 1263 * 1264 * Events generated by high level interrupts should not 1265 * be mixed in the same event set with events generated by 1266 * normal interrupts or kernel events. 1267 * 1268 * This function can be called multiple times to bind 1269 * additional sets to the event handle. 1270 * However, events generated by high level interrupts cannot 1271 * be bound to a handle that already has bound events generated 1272 * by normal interrupts or from kernel context and vice versa. 1273 */ 1274 int 1275 ndi_event_bind_set(ndi_event_hdl_t handle, 1276 ndi_event_set_t *ndi_events, 1277 uint_t flag) 1278 { 1279 struct ndi_event_hdl *ndi_event_hdl; 1280 ndi_event_cookie_t *next, *prev, *new_cookie; 1281 uint_t i, len; 1282 uint_t dup = 0; 1283 uint_t high_plevels, other_plevels; 1284 ndi_event_definition_t *ndi_event_defs; 1285 1286 int km_flag = ((flag & NDI_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP); 1287 1288 ASSERT(handle); 1289 ASSERT(ndi_events); 1290 1291 /* 1292 * binding must be performed during attach/detach 1293 */ 1294 if (!DEVI_IS_ATTACHING(handle->ndi_evthdl_dip) && 1295 !DEVI_IS_DETACHING(handle->ndi_evthdl_dip)) { 1296 cmn_err(CE_WARN, "ndi_event_bind_set must be called within " 1297 "attach or detach"); 1298 return (NDI_FAILURE); 1299 } 1300 1301 /* 1302 * if it is not the correct version or the event set is 1303 * empty, bail out 1304 */ 1305 if (ndi_events->ndi_events_version != NDI_EVENTS_REV1) 1306 return (NDI_FAILURE); 1307 1308 ndi_event_hdl = (struct ndi_event_hdl *)handle; 1309 ndi_event_defs = ndi_events->ndi_event_defs; 1310 high_plevels = other_plevels = 0; 1311 1312 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1313 1314 /* check for mixing events at high level with the other types */ 1315 for (i = 0; i < ndi_events->ndi_n_events; i++) { 1316 if (ndi_event_defs[i].ndi_event_plevel == EPL_HIGHLEVEL) { 1317 high_plevels++; 1318 } else { 1319 other_plevels++; 1320 } 1321 } 1322 1323 /* 1324 * bail out if high level events are mixed with other types in this 1325 * event set or the set is incompatible with the set in the handle 1326 */ 1327 if ((high_plevels && other_plevels) || 1328 (other_plevels && ndi_event_hdl->ndi_evthdl_high_plevels) || 1329 (high_plevels && ndi_event_hdl->ndi_evthdl_other_plevels)) { 1330 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1331 1332 return (NDI_FAILURE); 1333 } 1334 1335 /* 1336 * check for duplicate events in both the existing handle 1337 * and the event set, add events if not duplicates 1338 */ 1339 next = ndi_event_hdl->ndi_evthdl_cookie_list; 1340 for (i = 0; i < ndi_events->ndi_n_events; i++) { 1341 while (next != NULL) { 1342 len = strlen(NDI_EVENT_NAME(next)) + 1; 1343 if (strncmp(NDI_EVENT_NAME(next), 1344 ndi_event_defs[i].ndi_event_name, len) == 0) { 1345 dup = 1; 1346 break; 1347 } 1348 1349 prev = next; 1350 next = next->next_cookie; 1351 } 1352 1353 if (dup == 0) { 1354 new_cookie = kmem_zalloc(sizeof (ndi_event_cookie_t), 1355 km_flag); 1356 1357 if (!new_cookie) 1358 return (NDI_FAILURE); 1359 1360 if (ndi_event_hdl->ndi_evthdl_n_events == 0) { 1361 ndi_event_hdl->ndi_evthdl_cookie_list = 1362 new_cookie; 1363 } else { 1364 prev->next_cookie = new_cookie; 1365 } 1366 1367 ndi_event_hdl->ndi_evthdl_n_events++; 1368 1369 /* 1370 * set up new cookie 1371 */ 1372 new_cookie->definition = &ndi_event_defs[i]; 1373 new_cookie->ddip = ndi_event_hdl->ndi_evthdl_dip; 1374 1375 } else { 1376 /* 1377 * event not added, must correct plevel numbers 1378 */ 1379 if (ndi_event_defs[i].ndi_event_plevel == 1380 EPL_HIGHLEVEL) { 1381 high_plevels--; 1382 } else { 1383 other_plevels--; 1384 } 1385 } 1386 1387 dup = 0; 1388 next = ndi_event_hdl->ndi_evthdl_cookie_list; 1389 prev = NULL; 1390 1391 } 1392 1393 ndi_event_hdl->ndi_evthdl_high_plevels += high_plevels; 1394 ndi_event_hdl->ndi_evthdl_other_plevels += other_plevels; 1395 1396 ASSERT((ndi_event_hdl->ndi_evthdl_high_plevels == 0) || 1397 (ndi_event_hdl->ndi_evthdl_other_plevels == 0)); 1398 1399 #ifdef NDI_EVENT_DEBUG 1400 if (ndi_event_debug) { 1401 ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_bind_set"); 1402 } 1403 #endif /* NDI_EVENT_DEBUG */ 1404 1405 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1406 1407 return (NDI_SUCCESS); 1408 } 1409 1410 /* 1411 * ndi_event_unbind_set() unbinds a set of events, previously 1412 * bound using ndi_event_bind_set(), from the NDI event 1413 * handle. 1414 * 1415 * This routine will unbind all events in the event set. If an event, 1416 * specified in the event set, is not found in the handle, this 1417 * routine will proceed onto the next member of the set as if the event 1418 * was never specified. 1419 * 1420 * The event set may be a subset of the set of events that 1421 * was previously bound to the handle. For example, events 1422 * can be individually unbound. 1423 * 1424 * An event cannot be unbound if callbacks are still 1425 * registered against the event. 1426 */ 1427 /*ARGSUSED*/ 1428 int 1429 ndi_event_unbind_set(ndi_event_hdl_t handle, ndi_event_set_t *ndi_events, 1430 uint_t flag) 1431 { 1432 ndi_event_definition_t *ndi_event_defs; 1433 int len; 1434 uint_t i; 1435 int rval; 1436 ndi_event_cookie_t *cookie_list; 1437 ndi_event_cookie_t *prev = NULL; 1438 1439 ASSERT(ndi_events); 1440 ASSERT(handle); 1441 1442 /* 1443 * binding must be performed during attach/detac 1444 */ 1445 if (!DEVI_IS_ATTACHING(handle->ndi_evthdl_dip) && 1446 !DEVI_IS_DETACHING(handle->ndi_evthdl_dip)) { 1447 cmn_err(CE_WARN, "ndi_event_bind_set must be called within " 1448 "attach or detach"); 1449 return (NDI_FAILURE); 1450 } 1451 1452 /* bail out if ndi_event_set is outdated */ 1453 if (ndi_events->ndi_events_version != NDI_EVENTS_REV1) { 1454 return (NDI_FAILURE); 1455 } 1456 1457 ASSERT(ndi_events->ndi_event_defs); 1458 1459 ndi_event_defs = ndi_events->ndi_event_defs; 1460 1461 mutex_enter(&handle->ndi_evthdl_mutex); 1462 mutex_enter(&handle->ndi_evthdl_cb_mutex); 1463 1464 /* 1465 * Verify that all events in the event set are eligible 1466 * for unbinding(ie. there are no outstanding callbacks). 1467 * If any one of the events are ineligible, fail entire 1468 * operation. 1469 */ 1470 1471 for (i = 0; i < ndi_events->ndi_n_events; i++) { 1472 cookie_list = handle->ndi_evthdl_cookie_list; 1473 while (cookie_list != NULL) { 1474 len = strlen(NDI_EVENT_NAME(cookie_list)) + 1; 1475 if (strncmp(NDI_EVENT_NAME(cookie_list), 1476 ndi_event_defs[i].ndi_event_name, len) == 0) { 1477 1478 ASSERT(cookie_list->callback_list == NULL); 1479 if (cookie_list->callback_list) { 1480 rval = NDI_FAILURE; 1481 goto done; 1482 } 1483 break; 1484 } else { 1485 cookie_list = cookie_list->next_cookie; 1486 } 1487 } 1488 } 1489 1490 /* 1491 * remove all events found within the handle 1492 * If an event is not found, this function will proceed as if the event 1493 * was never specified. 1494 */ 1495 1496 for (i = 0; i < ndi_events->ndi_n_events; i++) { 1497 cookie_list = handle->ndi_evthdl_cookie_list; 1498 prev = NULL; 1499 while (cookie_list != NULL) { 1500 len = strlen(NDI_EVENT_NAME(cookie_list)) + 1; 1501 if (strncmp(NDI_EVENT_NAME(cookie_list), 1502 ndi_event_defs[i].ndi_event_name, len) == 0) { 1503 1504 /* 1505 * can not unbind an event definition with 1506 * outstanding callbacks 1507 */ 1508 if (cookie_list->callback_list) { 1509 rval = NDI_FAILURE; 1510 goto done; 1511 } 1512 1513 /* remove this cookie from the list */ 1514 if (prev != NULL) { 1515 prev->next_cookie = 1516 cookie_list->next_cookie; 1517 } else { 1518 handle->ndi_evthdl_cookie_list = 1519 cookie_list->next_cookie; 1520 } 1521 1522 /* adjust plevel counts */ 1523 if (NDI_EVENT_PLEVEL(cookie_list) == 1524 EPL_HIGHLEVEL) { 1525 handle->ndi_evthdl_high_plevels--; 1526 } else { 1527 handle->ndi_evthdl_other_plevels--; 1528 } 1529 1530 /* adjust cookie count */ 1531 handle->ndi_evthdl_n_events--; 1532 1533 /* free the cookie */ 1534 kmem_free(cookie_list, 1535 sizeof (ndi_event_cookie_t)); 1536 1537 cookie_list = handle->ndi_evthdl_cookie_list; 1538 break; 1539 1540 } else { 1541 prev = cookie_list; 1542 cookie_list = cookie_list->next_cookie; 1543 } 1544 1545 } 1546 1547 } 1548 1549 #ifdef NDI_EVENT_DEBUG 1550 if (ndi_event_debug) { 1551 ndi_event_dump_hdl(handle, "ndi_event_unbind_set"); 1552 } 1553 #endif /* NDI_EVENT_DEBUG */ 1554 1555 rval = NDI_SUCCESS; 1556 1557 done: 1558 mutex_exit(&handle->ndi_evthdl_cb_mutex); 1559 mutex_exit(&handle->ndi_evthdl_mutex); 1560 1561 return (rval); 1562 } 1563 1564 /* 1565 * ndi_event_retrieve_cookie(): 1566 * Return an event cookie for eventname if this nexus driver 1567 * has defined the named event. The event cookie returned 1568 * by this function is used to register callback handlers 1569 * for the event. 1570 * 1571 * ndi_event_retrieve_cookie() is intended to be used in the 1572 * nexus driver's bus_get_eventcookie busop routine. 1573 * 1574 * If the event is not defined by this bus nexus driver, and flag 1575 * does not include NDI_EVENT_NOPASS, then ndi_event_retrieve_cookie() 1576 * will pass the request up the device tree hierarchy by calling 1577 * ndi_busop_get_eventcookie(9N). 1578 * If the event is not defined by this bus nexus driver, and flag 1579 * does include NDI_EVENT_NOPASS, ndi_event_retrieve_cookie() 1580 * will return NDI_FAILURE. The caller may then determine what further 1581 * action to take, such as using a different handle, passing the 1582 * request up the device tree using ndi_busop_get_eventcookie(9N), 1583 * or returning the failure to the caller, thus blocking the 1584 * progress of the request up the tree. 1585 */ 1586 int 1587 ndi_event_retrieve_cookie(ndi_event_hdl_t handle, 1588 dev_info_t *rdip, 1589 char *eventname, 1590 ddi_eventcookie_t *cookiep, 1591 uint_t flag) 1592 { 1593 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1594 int len; 1595 ndi_event_cookie_t *cookie_list; 1596 1597 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1598 1599 cookie_list = ndi_event_hdl->ndi_evthdl_cookie_list; 1600 /* 1601 * search the cookie list for the event name and return 1602 * cookie if found. 1603 */ 1604 while (cookie_list != NULL) { 1605 1606 len = strlen(NDI_EVENT_NAME(cookie_list)) + 1; 1607 if (strncmp(NDI_EVENT_NAME(cookie_list), eventname, 1608 len) == 0) { 1609 *cookiep = (ddi_eventcookie_t)cookie_list; 1610 1611 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1612 return (NDI_SUCCESS); 1613 } 1614 1615 cookie_list = cookie_list->next_cookie; 1616 } 1617 1618 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1619 /* 1620 * event was not found, pass up or return failure 1621 */ 1622 if ((flag & NDI_EVENT_NOPASS) == 0) { 1623 return (ndi_busop_get_eventcookie( 1624 ndi_event_hdl->ndi_evthdl_dip, rdip, 1625 eventname, cookiep)); 1626 } else { 1627 return (NDI_FAILURE); 1628 } 1629 } 1630 1631 /* 1632 * check whether this nexus defined this event and look up attributes 1633 */ 1634 static int 1635 ndi_event_is_defined(ndi_event_hdl_t handle, 1636 ddi_eventcookie_t cookie, int *attributes) 1637 { 1638 1639 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1640 ndi_event_cookie_t *cookie_list; 1641 1642 ASSERT(mutex_owned(&handle->ndi_evthdl_mutex)); 1643 1644 cookie_list = ndi_event_hdl->ndi_evthdl_cookie_list; 1645 while (cookie_list != NULL) { 1646 if (cookie_list == NDI_EVENT(cookie)) { 1647 if (attributes) 1648 *attributes = 1649 NDI_EVENT_ATTRIBUTES(cookie_list); 1650 1651 return (NDI_SUCCESS); 1652 } 1653 1654 cookie_list = cookie_list->next_cookie; 1655 } 1656 1657 return (NDI_FAILURE); 1658 } 1659 1660 /* 1661 * ndi_event_add_callback(): adds an event callback registration 1662 * to the event cookie defining this event. 1663 * 1664 * Refer also to bus_add_eventcall(9n) and ndi_busop_add_eventcall(9n). 1665 * 1666 * ndi_event_add_callback(9n) is intended to be used in 1667 * the nexus driver's bus_add_eventcall(9n) busop function. 1668 * 1669 * If the event is not defined by this bus nexus driver, 1670 * ndi_event_add_callback() will return NDI_FAILURE. 1671 */ 1672 int 1673 ndi_event_add_callback(ndi_event_hdl_t handle, dev_info_t *child_dip, 1674 ddi_eventcookie_t cookie, 1675 void (*event_callback)(dev_info_t *, 1676 ddi_eventcookie_t, void *arg, void *impldata), 1677 void *arg, 1678 uint_t flag, 1679 ddi_callback_id_t *cb_id) 1680 { 1681 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1682 int km_flag = ((flag & NDI_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP); 1683 ndi_event_callbacks_t *cb; 1684 1685 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1686 1687 /* 1688 * if the event was not bound to this handle, return failure 1689 */ 1690 if (ndi_event_is_defined(handle, cookie, NULL) != NDI_SUCCESS) { 1691 1692 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1693 return (NDI_FAILURE); 1694 1695 } 1696 1697 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1698 1699 /* 1700 * allocate space for a callback structure 1701 */ 1702 cb = kmem_zalloc(sizeof (ndi_event_callbacks_t), km_flag); 1703 if (cb == NULL) { 1704 return (NDI_FAILURE); 1705 } 1706 1707 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1708 1709 /* initialize callback structure */ 1710 cb->ndi_evtcb_dip = child_dip; 1711 cb->ndi_evtcb_callback = event_callback; 1712 cb->ndi_evtcb_arg = arg; 1713 cb->ndi_evtcb_cookie = cookie; 1714 cb->devname = (char *)ddi_driver_name(child_dip); 1715 1716 *cb_id = (ddi_callback_id_t)cb; 1717 mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1718 1719 /* add this callback structure to the list */ 1720 if (NDI_EVENT(cookie)->callback_list) { 1721 cb->ndi_evtcb_next = NDI_EVENT(cookie)->callback_list; 1722 NDI_EVENT(cookie)->callback_list->ndi_evtcb_prev = cb; 1723 NDI_EVENT(cookie)->callback_list = cb; 1724 } else { 1725 NDI_EVENT(cookie)->callback_list = cb; 1726 } 1727 #ifdef NDI_EVENT_DEBUG 1728 if (ndi_event_debug) { 1729 ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_add_callback"); 1730 } 1731 #endif /* NDI_EVENT_DEBUG */ 1732 1733 mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1734 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1735 1736 return (NDI_SUCCESS); 1737 } 1738 1739 /* 1740 * ndi_event_remove_callback(): 1741 * 1742 * ndi_event_remove_callback() removes a callback that was 1743 * previously registered using ndi_event_add_callback(9N). 1744 * Refer also to bus_remove_eventcall(9n) and 1745 * ndi_busop_remove_eventcall(9n). 1746 * ndi_event_remove_callback(9n) is intended to be used in 1747 * the nexus driver's bus_remove_eventcall (9n) busop function. 1748 * If the event is not defined by this bus nexus driver, 1749 * ndi_event_remove_callback() will return NDI_FAILURE. 1750 */ 1751 static void do_ndi_event_remove_callback(struct ndi_event_hdl *ndi_event_hdl, 1752 ddi_callback_id_t cb_id); 1753 1754 int 1755 ndi_event_remove_callback(ndi_event_hdl_t handle, ddi_callback_id_t cb_id) 1756 { 1757 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1758 1759 ASSERT(cb_id); 1760 1761 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1762 mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1763 1764 do_ndi_event_remove_callback(ndi_event_hdl, cb_id); 1765 1766 mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1767 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1768 1769 return (NDI_SUCCESS); 1770 } 1771 1772 /*ARGSUSED*/ 1773 static void 1774 do_ndi_event_remove_callback(struct ndi_event_hdl *ndi_event_hdl, 1775 ddi_callback_id_t cb_id) 1776 { 1777 ndi_event_callbacks_t *cb = (ndi_event_callbacks_t *)cb_id; 1778 ASSERT(cb); 1779 1780 ASSERT(mutex_owned(&ndi_event_hdl->ndi_evthdl_mutex)); 1781 ASSERT(mutex_owned(&ndi_event_hdl->ndi_evthdl_cb_mutex)); 1782 1783 /* remove from callback linked list */ 1784 if (cb->ndi_evtcb_prev) { 1785 cb->ndi_evtcb_prev->ndi_evtcb_next = cb->ndi_evtcb_next; 1786 } 1787 1788 if (cb->ndi_evtcb_next) { 1789 cb->ndi_evtcb_next->ndi_evtcb_prev = cb->ndi_evtcb_prev; 1790 } 1791 1792 if (NDI_EVENT(cb->ndi_evtcb_cookie)->callback_list == cb) { 1793 NDI_EVENT(cb->ndi_evtcb_cookie)->callback_list = 1794 cb->ndi_evtcb_next; 1795 } 1796 1797 kmem_free(cb, sizeof (ndi_event_callbacks_t)); 1798 } 1799 1800 /* 1801 * ndi_event_run_callbacks() performs event callbacks for the event 1802 * specified by cookie, if this is among those bound to the 1803 * supplied handle. 1804 * If the event is among those bound to the handle, none, 1805 * some, or all of the handlers registered for the event 1806 * will be called, according to the delivery attributes of 1807 * the event. 1808 * If the event attributes include NDI_EVENT_POST_TO_ALL 1809 * (the default), all the handlers for the event will be 1810 * called in an unspecified order. 1811 * If the event attributes include NDI_EVENT_POST_TO_TGT, only 1812 * the handlers (if any) registered by the driver identified by 1813 * rdip will be called. 1814 * If the event identified by cookie is not bound to the handle, 1815 * NDI_FAILURE will be returned. 1816 */ 1817 int 1818 ndi_event_run_callbacks(ndi_event_hdl_t handle, dev_info_t *child_dip, 1819 ddi_eventcookie_t cookie, void *bus_impldata) 1820 { 1821 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1822 ndi_event_callbacks_t *next, *cb; 1823 int attributes; 1824 1825 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1826 1827 /* if this is not our event, fail */ 1828 if (ndi_event_is_defined(handle, cookie, &attributes) != 1829 NDI_SUCCESS) { 1830 1831 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1832 return (NDI_FAILURE); 1833 } 1834 1835 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1836 1837 #ifdef NDI_EVENT_DEBUG 1838 if (ndi_event_debug) { 1839 cmn_err(CE_CONT, "ndi_event_run_callbacks:\n\t" 1840 "producer dip=%p (%s%d): cookie = %p, name = %s\n", 1841 (void *)ndi_event_hdl->ndi_evthdl_dip, 1842 ddi_node_name(ndi_event_hdl->ndi_evthdl_dip), 1843 ddi_get_instance(ndi_event_hdl->ndi_evthdl_dip), 1844 (void *)cookie, 1845 ndi_event_cookie_to_name(handle, cookie)); 1846 } 1847 #endif /* #ifdef NDI_EVENT_DEBUG */ 1848 1849 1850 /* 1851 * The callback handlers may call conversion functions. The conversion 1852 * functions may hold the ndi_evthdl_mutex during execution. Thus, to 1853 * avoid a recursive mutex problem, only the ndi_evthdl_cb_mutex is 1854 * held. The ndi_evthdl_mutex is not held when running the callbacks. 1855 */ 1856 mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1857 1858 /* perform callbacks */ 1859 next = NDI_EVENT(cookie)->callback_list; 1860 while (next != NULL) { 1861 1862 cb = next; 1863 next = next->ndi_evtcb_next; 1864 1865 ASSERT(cb->ndi_evtcb_cookie == cookie); 1866 1867 if (attributes == NDI_EVENT_POST_TO_TGT && 1868 child_dip != cb->ndi_evtcb_dip) { 1869 continue; 1870 } 1871 1872 cb->ndi_evtcb_callback(cb->ndi_evtcb_dip, cb->ndi_evtcb_cookie, 1873 cb->ndi_evtcb_arg, bus_impldata); 1874 1875 #ifdef NDI_EVENT_DEBUG 1876 if (ndi_event_debug) { 1877 cmn_err(CE_CONT, 1878 "\t\tconsumer dip=%p (%s%d)\n", 1879 (void *)cb->ndi_evtcb_dip, 1880 ddi_node_name(cb->ndi_evtcb_dip), 1881 ddi_get_instance(cb->ndi_evtcb_dip)); 1882 } 1883 #endif 1884 1885 } 1886 1887 mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1888 1889 #ifdef NDI_EVENT_DEBUG 1890 if (ndi_event_debug) { 1891 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1892 ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_run_callbacks"); 1893 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1894 } 1895 #endif /* NDI_EVENT_DEBUG */ 1896 1897 return (NDI_SUCCESS); 1898 } 1899 1900 1901 /* 1902 * perform one callback for a specified cookie and just one target 1903 */ 1904 int 1905 ndi_event_do_callback(ndi_event_hdl_t handle, dev_info_t *child_dip, 1906 ddi_eventcookie_t cookie, void *bus_impldata) 1907 { 1908 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1909 ndi_event_callbacks_t *next, *cb; 1910 int attributes; 1911 1912 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1913 1914 /* if this is not our event, fail */ 1915 if (ndi_event_is_defined(handle, cookie, &attributes) != 1916 NDI_SUCCESS) { 1917 1918 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1919 1920 return (NDI_FAILURE); 1921 } 1922 1923 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1924 1925 #ifdef NDI_EVENT_DEBUG 1926 if (ndi_event_debug) { 1927 cmn_err(CE_CONT, "ndi_event_run_callbacks:\n\t" 1928 "producer dip=%p (%s%d): cookie = %p, name = %s\n", 1929 (void *)ndi_event_hdl->ndi_evthdl_dip, 1930 ddi_node_name(ndi_event_hdl->ndi_evthdl_dip), 1931 ddi_get_instance(ndi_event_hdl->ndi_evthdl_dip), 1932 (void *)cookie, 1933 ndi_event_cookie_to_name(handle, cookie)); 1934 } 1935 #endif 1936 1937 1938 /* 1939 * we only grab the cb mutex because the callback handlers 1940 * may call the conversion functions which would cause a recursive 1941 * mutex problem 1942 */ 1943 mutex_enter(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1944 1945 /* perform callbacks */ 1946 for (next = NDI_EVENT(cookie)->callback_list; next != NULL; ) { 1947 cb = next; 1948 next = next->ndi_evtcb_next; 1949 1950 if (cb->ndi_evtcb_dip == child_dip) { 1951 cb->ndi_evtcb_callback(cb->ndi_evtcb_dip, 1952 cb->ndi_evtcb_cookie, cb->ndi_evtcb_arg, 1953 bus_impldata); 1954 1955 #ifdef NDI_EVENT_DEBUG 1956 if (ndi_event_debug) { 1957 cmn_err(CE_CONT, 1958 "\t\tconsumer dip=%p (%s%d)\n", 1959 (void *)cb->ndi_evtcb_dip, 1960 ddi_node_name(cb->ndi_evtcb_dip), 1961 ddi_get_instance(cb->ndi_evtcb_dip)); 1962 } 1963 #endif 1964 break; 1965 } 1966 } 1967 1968 mutex_exit(&ndi_event_hdl->ndi_evthdl_cb_mutex); 1969 1970 #ifdef NDI_EVENT_DEBUG 1971 if (ndi_event_debug) { 1972 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1973 ndi_event_dump_hdl(ndi_event_hdl, "ndi_event_run_callbacks"); 1974 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1975 } 1976 #endif /* NDI_EVENT_DEBUG */ 1977 1978 return (NDI_SUCCESS); 1979 } 1980 1981 1982 /* 1983 * ndi_event_tag_to_cookie: utility function to find an event cookie 1984 * given an event tag 1985 */ 1986 ddi_eventcookie_t 1987 ndi_event_tag_to_cookie(ndi_event_hdl_t handle, int event_tag) 1988 { 1989 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 1990 ndi_event_cookie_t *list; 1991 1992 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 1993 1994 list = ndi_event_hdl->ndi_evthdl_cookie_list; 1995 while (list != NULL) { 1996 if (NDI_EVENT_TAG(list) == event_tag) { 1997 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 1998 return ((ddi_eventcookie_t)list); 1999 } 2000 2001 list = list->next_cookie; 2002 } 2003 2004 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 2005 return (NULL); 2006 } 2007 2008 /* 2009 * ndi_event_cookie_to_tag: utility function to find a event tag 2010 * given an event_cookie 2011 */ 2012 int 2013 ndi_event_cookie_to_tag(ndi_event_hdl_t handle, ddi_eventcookie_t cookie) 2014 { 2015 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 2016 ndi_event_cookie_t *list; 2017 2018 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 2019 2020 list = ndi_event_hdl->ndi_evthdl_cookie_list; 2021 2022 while (list != NULL) { 2023 if ((ddi_eventcookie_t)list == cookie) { 2024 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 2025 return (NDI_EVENT_TAG(list)); 2026 } 2027 2028 list = list->next_cookie; 2029 } 2030 2031 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 2032 return (NDI_FAILURE); 2033 2034 } 2035 2036 /* 2037 * ndi_event_cookie_to_name: utility function to find an event name 2038 * given an event_cookie 2039 */ 2040 char * 2041 ndi_event_cookie_to_name(ndi_event_hdl_t handle, ddi_eventcookie_t cookie) 2042 { 2043 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 2044 ndi_event_cookie_t *list; 2045 2046 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 2047 2048 list = ndi_event_hdl->ndi_evthdl_cookie_list; 2049 2050 while (list != NULL) { 2051 if (list == NDI_EVENT(cookie)) { 2052 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 2053 return (NDI_EVENT_NAME(list)); 2054 } 2055 2056 list = list->next_cookie; 2057 } 2058 2059 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 2060 return (NULL); 2061 } 2062 2063 /* 2064 * ndi_event_tag_to_name: utility function to find an event name 2065 * given an event tag 2066 */ 2067 char * 2068 ndi_event_tag_to_name(ndi_event_hdl_t handle, int event_tag) 2069 { 2070 struct ndi_event_hdl *ndi_event_hdl = (struct ndi_event_hdl *)handle; 2071 ndi_event_cookie_t *list; 2072 2073 mutex_enter(&ndi_event_hdl->ndi_evthdl_mutex); 2074 2075 list = ndi_event_hdl->ndi_evthdl_cookie_list; 2076 2077 while (list) { 2078 if (NDI_EVENT_TAG(list) == event_tag) { 2079 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 2080 return (NDI_EVENT_NAME(list)); 2081 } 2082 2083 list = list->next_cookie; 2084 } 2085 2086 mutex_exit(&ndi_event_hdl->ndi_evthdl_mutex); 2087 2088 return (NULL); 2089 } 2090 2091 #ifdef NDI_EVENT_DEBUG 2092 void 2093 ndi_event_dump_hdl(struct ndi_event_hdl *hdl, char *location) 2094 { 2095 2096 2097 ndi_event_callbacks_t *next; 2098 ndi_event_cookie_t *list; 2099 2100 ASSERT(mutex_owned(&hdl->ndi_evthdl_mutex)); 2101 list = hdl->ndi_evthdl_cookie_list; 2102 2103 cmn_err(CE_CONT, "%s: event handle (%p): dip = %p (%s%d)\n", 2104 location, (void *)hdl, 2105 (void *)hdl->ndi_evthdl_dip, 2106 ddi_node_name(hdl->ndi_evthdl_dip), 2107 ddi_get_instance(hdl->ndi_evthdl_dip)); 2108 cmn_err(CE_CONT, "\thigh=%d other=%d n=%d\n", 2109 hdl->ndi_evthdl_high_plevels, 2110 hdl->ndi_evthdl_other_plevels, 2111 hdl->ndi_evthdl_n_events); 2112 2113 2114 cmn_err(CE_CONT, "\tevent cookies:\n"); 2115 while (list) { 2116 cmn_err(CE_CONT, 2117 "\t\ttag=%d name=%s p=%d a=%x dd=%p\n", 2118 NDI_EVENT_TAG(list), 2119 NDI_EVENT_NAME(list), 2120 NDI_EVENT_PLEVEL(list), 2121 NDI_EVENT_ATTRIBUTES(list), 2122 (void *)NDI_EVENT_DDIP(list)); 2123 cmn_err(CE_CONT, "\t\tcallbacks:\n"); 2124 for (next = list->callback_list; next != NULL; 2125 next = next->ndi_evtcb_next) { 2126 cmn_err(CE_CONT, 2127 "\t\t dip=%p (%s%d) cookie=%p arg=%p\n", 2128 (void*)next->ndi_evtcb_dip, 2129 ddi_driver_name(next->ndi_evtcb_dip), 2130 ddi_get_instance(next->ndi_evtcb_dip), 2131 (void *)next->ndi_evtcb_cookie, 2132 next->ndi_evtcb_arg); 2133 } 2134 2135 list = list->next_cookie; 2136 } 2137 2138 cmn_err(CE_CONT, "\n"); 2139 } 2140 #endif 2141 2142 int 2143 ndi_dev_is_prom_node(dev_info_t *dip) 2144 { 2145 return (DEVI(dip)->devi_node_class == DDI_NC_PROM); 2146 } 2147 2148 int 2149 ndi_dev_is_pseudo_node(dev_info_t *dip) 2150 { 2151 /* 2152 * NOTE: this does NOT mean the pseudo branch of the device tree, 2153 * it means the node was created by software (DEVI_SID_NODEID | 2154 * DEVI_PSEUDO_NODEID) instead of being generated from a PROM node. 2155 */ 2156 return (DEVI(dip)->devi_node_class == DDI_NC_PSEUDO); 2157 } 2158 2159 int 2160 ndi_dev_is_persistent_node(dev_info_t *dip) 2161 { 2162 return ((DEVI(dip)->devi_node_attributes & DDI_PERSISTENT) != 0); 2163 } 2164 2165 int 2166 i_ndi_dev_is_auto_assigned_node(dev_info_t *dip) 2167 { 2168 return ((DEVI(dip)->devi_node_attributes & 2169 DDI_AUTO_ASSIGNED_NODEID) != 0); 2170 } 2171 2172 void 2173 i_ndi_set_node_class(dev_info_t *dip, ddi_node_class_t c) 2174 { 2175 DEVI(dip)->devi_node_class = c; 2176 } 2177 2178 ddi_node_class_t 2179 i_ndi_get_node_class(dev_info_t *dip) 2180 { 2181 return (DEVI(dip)->devi_node_class); 2182 } 2183 2184 void 2185 i_ndi_set_node_attributes(dev_info_t *dip, int p) 2186 { 2187 DEVI(dip)->devi_node_attributes = p; 2188 } 2189 2190 int 2191 i_ndi_get_node_attributes(dev_info_t *dip) 2192 { 2193 return (DEVI(dip)->devi_node_attributes); 2194 } 2195 2196 void 2197 i_ndi_set_nodeid(dev_info_t *dip, int n) 2198 { 2199 DEVI(dip)->devi_nodeid = n; 2200 } 2201 2202 void 2203 ndi_set_acc_fault(ddi_acc_handle_t ah) 2204 { 2205 i_ddi_acc_set_fault(ah); 2206 } 2207 2208 void 2209 ndi_clr_acc_fault(ddi_acc_handle_t ah) 2210 { 2211 i_ddi_acc_clr_fault(ah); 2212 } 2213 2214 void 2215 ndi_set_dma_fault(ddi_dma_handle_t dh) 2216 { 2217 i_ddi_dma_set_fault(dh); 2218 } 2219 2220 void 2221 ndi_clr_dma_fault(ddi_dma_handle_t dh) 2222 { 2223 i_ddi_dma_clr_fault(dh); 2224 } 2225 2226 /* 2227 * The default fault-handler, called when the event posted by 2228 * ddi_dev_report_fault() reaches rootnex. 2229 */ 2230 static void 2231 i_ddi_fault_handler(dev_info_t *dip, struct ddi_fault_event_data *fedp) 2232 { 2233 ASSERT(fedp); 2234 2235 mutex_enter(&(DEVI(dip)->devi_lock)); 2236 if (!DEVI_IS_DEVICE_OFFLINE(dip)) { 2237 switch (fedp->f_impact) { 2238 case DDI_SERVICE_LOST: 2239 DEVI_SET_DEVICE_DOWN(dip); 2240 break; 2241 2242 case DDI_SERVICE_DEGRADED: 2243 DEVI_SET_DEVICE_DEGRADED(dip); 2244 break; 2245 2246 case DDI_SERVICE_UNAFFECTED: 2247 default: 2248 break; 2249 2250 case DDI_SERVICE_RESTORED: 2251 DEVI_SET_DEVICE_UP(dip); 2252 break; 2253 } 2254 } 2255 mutex_exit(&(DEVI(dip)->devi_lock)); 2256 } 2257 2258 /* 2259 * The default fault-logger, called when the event posted by 2260 * ddi_dev_report_fault() reaches rootnex. 2261 */ 2262 /*ARGSUSED*/ 2263 static void 2264 i_ddi_fault_logger(dev_info_t *rdip, struct ddi_fault_event_data *fedp) 2265 { 2266 ddi_devstate_t newstate; 2267 const char *action; 2268 const char *servstate; 2269 const char *location; 2270 int bad; 2271 int changed; 2272 int level; 2273 int still; 2274 2275 ASSERT(fedp); 2276 2277 bad = 0; 2278 switch (fedp->f_location) { 2279 case DDI_DATAPATH_FAULT: 2280 location = "in datapath to"; 2281 break; 2282 case DDI_DEVICE_FAULT: 2283 location = "in"; 2284 break; 2285 case DDI_EXTERNAL_FAULT: 2286 location = "external to"; 2287 break; 2288 default: 2289 location = "somewhere near"; 2290 bad = 1; 2291 break; 2292 } 2293 2294 newstate = ddi_get_devstate(fedp->f_dip); 2295 switch (newstate) { 2296 case DDI_DEVSTATE_OFFLINE: 2297 servstate = "unavailable"; 2298 break; 2299 case DDI_DEVSTATE_DOWN: 2300 servstate = "unavailable"; 2301 break; 2302 case DDI_DEVSTATE_QUIESCED: 2303 servstate = "suspended"; 2304 break; 2305 case DDI_DEVSTATE_DEGRADED: 2306 servstate = "degraded"; 2307 break; 2308 default: 2309 servstate = "available"; 2310 break; 2311 } 2312 2313 changed = (newstate != fedp->f_oldstate); 2314 level = (newstate < fedp->f_oldstate) ? CE_WARN : CE_NOTE; 2315 switch (fedp->f_impact) { 2316 case DDI_SERVICE_LOST: 2317 case DDI_SERVICE_DEGRADED: 2318 case DDI_SERVICE_UNAFFECTED: 2319 /* fault detected; service [still] <servstate> */ 2320 action = "fault detected"; 2321 still = !changed; 2322 break; 2323 2324 case DDI_SERVICE_RESTORED: 2325 if (newstate != DDI_DEVSTATE_UP) { 2326 /* fault cleared; service still <servstate> */ 2327 action = "fault cleared"; 2328 still = 1; 2329 } else if (changed) { 2330 /* fault cleared; service <servstate> */ 2331 action = "fault cleared"; 2332 still = 0; 2333 } else { 2334 /* no fault; service <servstate> */ 2335 action = "no fault"; 2336 still = 0; 2337 } 2338 break; 2339 2340 default: 2341 bad = 1; 2342 break; 2343 } 2344 2345 cmn_err(level, "!%s%d: %s %s device; service %s%s"+(bad|changed), 2346 ddi_driver_name(fedp->f_dip), 2347 ddi_get_instance(fedp->f_dip), 2348 bad ? "invalid report of fault" : action, 2349 location, still ? "still " : "", servstate); 2350 2351 cmn_err(level, "!%s%d: %s"+(bad|changed), 2352 ddi_driver_name(fedp->f_dip), 2353 ddi_get_instance(fedp->f_dip), 2354 fedp->f_message); 2355 } 2356 2357 /* 2358 * Platform-settable pointers to fault handler and logger functions. 2359 * These are called by the default rootnex event-posting code when 2360 * a fault event reaches rootnex. 2361 */ 2362 void (*plat_fault_handler)(dev_info_t *, struct ddi_fault_event_data *) = 2363 i_ddi_fault_handler; 2364 void (*plat_fault_logger)(dev_info_t *, struct ddi_fault_event_data *) = 2365 i_ddi_fault_logger; 2366 2367 /* 2368 * Rootnex event definitions ... 2369 */ 2370 enum rootnex_event_tags { 2371 ROOTNEX_FAULT_EVENT 2372 }; 2373 static ndi_event_hdl_t rootnex_event_hdl; 2374 static ndi_event_definition_t rootnex_event_set[] = { 2375 { 2376 ROOTNEX_FAULT_EVENT, 2377 DDI_DEVI_FAULT_EVENT, 2378 EPL_INTERRUPT, 2379 NDI_EVENT_POST_TO_ALL 2380 } 2381 }; 2382 static ndi_event_set_t rootnex_events = { 2383 NDI_EVENTS_REV1, 2384 sizeof (rootnex_event_set) / sizeof (rootnex_event_set[0]), 2385 rootnex_event_set 2386 }; 2387 2388 /* 2389 * Initialize rootnex event handle 2390 */ 2391 void 2392 i_ddi_rootnex_init_events(dev_info_t *dip) 2393 { 2394 if (ndi_event_alloc_hdl(dip, (ddi_iblock_cookie_t)(LOCK_LEVEL-1), 2395 &rootnex_event_hdl, NDI_SLEEP) == NDI_SUCCESS) { 2396 if (ndi_event_bind_set(rootnex_event_hdl, 2397 &rootnex_events, NDI_SLEEP) != NDI_SUCCESS) { 2398 (void) ndi_event_free_hdl(rootnex_event_hdl); 2399 rootnex_event_hdl = NULL; 2400 } 2401 } 2402 } 2403 2404 /* 2405 * Event-handling functions for rootnex 2406 * These provide the standard implementation of fault handling 2407 */ 2408 /*ARGSUSED*/ 2409 int 2410 i_ddi_rootnex_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, 2411 char *eventname, ddi_eventcookie_t *cookiep) 2412 { 2413 if (rootnex_event_hdl == NULL) 2414 return (NDI_FAILURE); 2415 return (ndi_event_retrieve_cookie(rootnex_event_hdl, rdip, eventname, 2416 cookiep, NDI_EVENT_NOPASS)); 2417 } 2418 2419 /*ARGSUSED*/ 2420 int 2421 i_ddi_rootnex_add_eventcall(dev_info_t *dip, dev_info_t *rdip, 2422 ddi_eventcookie_t eventid, void (*handler)(dev_info_t *dip, 2423 ddi_eventcookie_t event, void *arg, void *impl_data), void *arg, 2424 ddi_callback_id_t *cb_id) 2425 { 2426 if (rootnex_event_hdl == NULL) 2427 return (NDI_FAILURE); 2428 return (ndi_event_add_callback(rootnex_event_hdl, rdip, 2429 eventid, handler, arg, NDI_SLEEP, cb_id)); 2430 } 2431 2432 /*ARGSUSED*/ 2433 int 2434 i_ddi_rootnex_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id) 2435 { 2436 if (rootnex_event_hdl == NULL) 2437 return (NDI_FAILURE); 2438 2439 return (ndi_event_remove_callback(rootnex_event_hdl, cb_id)); 2440 } 2441 2442 /*ARGSUSED*/ 2443 int 2444 i_ddi_rootnex_post_event(dev_info_t *dip, dev_info_t *rdip, 2445 ddi_eventcookie_t eventid, void *impl_data) 2446 { 2447 int tag; 2448 2449 if (rootnex_event_hdl == NULL) 2450 return (NDI_FAILURE); 2451 2452 tag = ndi_event_cookie_to_tag(rootnex_event_hdl, eventid); 2453 if (tag == ROOTNEX_FAULT_EVENT) { 2454 (*plat_fault_handler)(rdip, impl_data); 2455 (*plat_fault_logger)(rdip, impl_data); 2456 } 2457 return (ndi_event_run_callbacks(rootnex_event_hdl, rdip, 2458 eventid, impl_data)); 2459 } 2460