1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/note.h> 30 #include <sys/t_lock.h> 31 #include <sys/cmn_err.h> 32 #include <sys/instance.h> 33 #include <sys/conf.h> 34 #include <sys/stat.h> 35 #include <sys/ddi.h> 36 #include <sys/hwconf.h> 37 #include <sys/sunddi.h> 38 #include <sys/sunndi.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/ndi_impldefs.h> 41 #include <sys/modctl.h> 42 #include <sys/dacf.h> 43 #include <sys/promif.h> 44 #include <sys/cpuvar.h> 45 #include <sys/pathname.h> 46 #include <sys/taskq.h> 47 #include <sys/sysevent.h> 48 #include <sys/sunmdi.h> 49 #include <sys/stream.h> 50 #include <sys/strsubr.h> 51 #include <sys/fs/snode.h> 52 #include <sys/fs/dv_node.h> 53 54 #ifdef DEBUG 55 int ddidebug = DDI_AUDIT; 56 #else 57 int ddidebug = 0; 58 #endif 59 60 #define MT_CONFIG_OP 0 61 #define MT_UNCONFIG_OP 1 62 63 /* Multi-threaded configuration */ 64 struct mt_config_handle { 65 kmutex_t mtc_lock; 66 kcondvar_t mtc_cv; 67 int mtc_thr_count; 68 dev_info_t *mtc_pdip; /* parent dip for mt_config_children */ 69 dev_info_t **mtc_fdip; /* "a" dip where unconfigure failed */ 70 major_t mtc_parmajor; /* parent major for mt_config_driver */ 71 major_t mtc_major; 72 int mtc_flags; 73 int mtc_op; /* config or unconfig */ 74 int mtc_error; /* operation error */ 75 struct brevq_node **mtc_brevqp; /* outstanding branch events queue */ 76 #ifdef DEBUG 77 int total_time; 78 timestruc_t start_time; 79 #endif /* DEBUG */ 80 }; 81 82 struct devi_nodeid { 83 dnode_t nodeid; 84 dev_info_t *dip; 85 struct devi_nodeid *next; 86 }; 87 88 struct devi_nodeid_list { 89 kmutex_t dno_lock; /* Protects other fields */ 90 struct devi_nodeid *dno_head; /* list of devi nodeid elements */ 91 struct devi_nodeid *dno_free; /* Free list */ 92 uint_t dno_list_length; /* number of dips in list */ 93 }; 94 95 /* used to keep track of branch remove events to be generated */ 96 struct brevq_node { 97 char *deviname; 98 struct brevq_node *sibling; 99 struct brevq_node *child; 100 }; 101 102 static struct devi_nodeid_list devi_nodeid_list; 103 static struct devi_nodeid_list *devimap = &devi_nodeid_list; 104 105 /* 106 * Well known nodes which are attached first at boot time. 107 */ 108 dev_info_t *top_devinfo; /* root of device tree */ 109 dev_info_t *options_dip; 110 dev_info_t *pseudo_dip; 111 dev_info_t *clone_dip; 112 dev_info_t *scsi_vhci_dip; /* MPXIO dip */ 113 major_t clone_major; 114 115 /* block all future dev_info state changes */ 116 static hrtime_t volatile devinfo_freeze = 0; 117 118 /* number of dev_info attaches/detaches currently in progress */ 119 static ulong_t devinfo_attach_detach = 0; 120 121 extern kmutex_t global_vhci_lock; 122 123 /* 124 * The devinfo snapshot cache and related variables. 125 * The only field in the di_cache structure that needs initialization 126 * is the mutex (cache_lock). However, since this is an adaptive mutex 127 * (MUTEX_DEFAULT) - it is automatically initialized by being allocated 128 * in zeroed memory (static storage class). Therefore no explicit 129 * initialization of the di_cache structure is needed. 130 */ 131 struct di_cache di_cache = {1}; 132 int di_cache_debug = 0; 133 134 /* For ddvis, which needs pseudo children under PCI */ 135 int pci_allow_pseudo_children = 0; 136 137 /* 138 * The following switch is for service people, in case a 139 * 3rd party driver depends on identify(9e) being called. 140 */ 141 int identify_9e = 0; 142 143 int mtc_off; /* turn off mt config */ 144 145 static kmem_cache_t *ddi_node_cache; /* devinfo node cache */ 146 static devinfo_log_header_t *devinfo_audit_log; /* devinfo log */ 147 static int devinfo_log_size; /* size in pages */ 148 149 static int lookup_compatible(dev_info_t *, uint_t); 150 static char *encode_composite_string(char **, uint_t, size_t *, uint_t); 151 static void link_to_driver_list(dev_info_t *); 152 static void unlink_from_driver_list(dev_info_t *); 153 static void add_to_dn_list(struct devnames *, dev_info_t *); 154 static void remove_from_dn_list(struct devnames *, dev_info_t *); 155 static dev_info_t *find_child_by_callback(dev_info_t *, char *, char *, 156 int (*)(dev_info_t *, char *, int)); 157 static dev_info_t *find_duplicate_child(); 158 static void add_global_props(dev_info_t *); 159 static void remove_global_props(dev_info_t *); 160 static int uninit_node(dev_info_t *); 161 static void da_log_init(void); 162 static void da_log_enter(dev_info_t *); 163 static int walk_devs(dev_info_t *, int (*f)(dev_info_t *, void *), void *, int); 164 static int reset_nexus_flags(dev_info_t *, void *); 165 static void ddi_optimize_dtree(dev_info_t *); 166 static int is_leaf_node(dev_info_t *); 167 static struct mt_config_handle *mt_config_init(dev_info_t *, dev_info_t **, 168 int, major_t, int, struct brevq_node **); 169 static void mt_config_children(struct mt_config_handle *); 170 static void mt_config_driver(struct mt_config_handle *); 171 static int mt_config_fini(struct mt_config_handle *); 172 static int devi_unconfig_common(dev_info_t *, dev_info_t **, int, major_t, 173 struct brevq_node **); 174 static int 175 ndi_devi_config_obp_args(dev_info_t *parent, char *devnm, 176 dev_info_t **childp, int flags); 177 178 /* 179 * dev_info cache and node management 180 */ 181 182 /* initialize dev_info node cache */ 183 void 184 i_ddi_node_cache_init() 185 { 186 ASSERT(ddi_node_cache == NULL); 187 ddi_node_cache = kmem_cache_create("dev_info_node_cache", 188 sizeof (struct dev_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 189 190 if (ddidebug & DDI_AUDIT) 191 da_log_init(); 192 } 193 194 /* 195 * Allocating a dev_info node, callable from interrupt context with KM_NOSLEEP 196 * The allocated node has a reference count of 0. 197 */ 198 dev_info_t * 199 i_ddi_alloc_node(dev_info_t *pdip, char *node_name, dnode_t nodeid, 200 int instance, ddi_prop_t *sys_prop, int flag) 201 { 202 struct dev_info *devi; 203 struct devi_nodeid *elem; 204 static char failed[] = "i_ddi_alloc_node: out of memory"; 205 206 ASSERT(node_name != NULL); 207 208 if ((devi = kmem_cache_alloc(ddi_node_cache, flag)) == NULL) { 209 cmn_err(CE_NOTE, failed); 210 return (NULL); 211 } 212 213 bzero(devi, sizeof (struct dev_info)); 214 215 if (devinfo_audit_log) { 216 devi->devi_audit = kmem_zalloc(sizeof (devinfo_audit_t), flag); 217 if (devi->devi_audit == NULL) 218 goto fail; 219 } 220 221 if ((devi->devi_node_name = i_ddi_strdup(node_name, flag)) == NULL) 222 goto fail; 223 /* default binding name is node name */ 224 devi->devi_binding_name = devi->devi_node_name; 225 devi->devi_major = (major_t)-1; /* unbound by default */ 226 227 /* 228 * Make a copy of system property 229 */ 230 if (sys_prop && 231 (devi->devi_sys_prop_ptr = i_ddi_prop_list_dup(sys_prop, flag)) 232 == NULL) 233 goto fail; 234 235 /* 236 * Assign devi_nodeid, devi_node_class, devi_node_attributes 237 * according to the following algorithm: 238 * 239 * nodeid arg node class node attributes 240 * 241 * DEVI_PSEUDO_NODEID DDI_NC_PSEUDO A 242 * DEVI_SID_NODEID DDI_NC_PSEUDO A,P 243 * other DDI_NC_PROM P 244 * 245 * Where A = DDI_AUTO_ASSIGNED_NODEID (auto-assign a nodeid) 246 * and P = DDI_PERSISTENT 247 * 248 * auto-assigned nodeids are also auto-freed. 249 */ 250 switch (nodeid) { 251 case DEVI_SID_NODEID: 252 devi->devi_node_attributes = DDI_PERSISTENT; 253 if ((elem = kmem_zalloc(sizeof (*elem), flag)) == NULL) 254 goto fail; 255 /*FALLTHROUGH*/ 256 case DEVI_PSEUDO_NODEID: 257 devi->devi_node_attributes |= DDI_AUTO_ASSIGNED_NODEID; 258 devi->devi_node_class = DDI_NC_PSEUDO; 259 if (impl_ddi_alloc_nodeid(&devi->devi_nodeid)) { 260 panic("i_ddi_alloc_node: out of nodeids"); 261 /*NOTREACHED*/ 262 } 263 break; 264 default: 265 if ((elem = kmem_zalloc(sizeof (*elem), flag)) == NULL) 266 goto fail; 267 /* 268 * the nodetype is 'prom', try to 'take' the nodeid now. 269 * This requires memory allocation, so check for failure. 270 */ 271 if (impl_ddi_take_nodeid(nodeid, flag) != 0) { 272 kmem_free(elem, sizeof (*elem)); 273 goto fail; 274 } 275 276 devi->devi_nodeid = nodeid; 277 devi->devi_node_class = DDI_NC_PROM; 278 devi->devi_node_attributes = DDI_PERSISTENT; 279 280 } 281 282 if (ndi_dev_is_persistent_node((dev_info_t *)devi)) { 283 mutex_enter(&devimap->dno_lock); 284 elem->next = devimap->dno_free; 285 devimap->dno_free = elem; 286 mutex_exit(&devimap->dno_lock); 287 } 288 289 /* 290 * Instance is normally initialized to -1. In a few special 291 * cases, the caller may specify an instance (e.g. CPU nodes). 292 */ 293 devi->devi_instance = instance; 294 295 /* 296 * set parent and bus_ctl parent 297 */ 298 devi->devi_parent = DEVI(pdip); 299 devi->devi_bus_ctl = DEVI(pdip); 300 301 NDI_CONFIG_DEBUG((CE_CONT, 302 "i_ddi_alloc_node: name=%s id=%d\n", node_name, devi->devi_nodeid)); 303 304 cv_init(&(devi->devi_cv), NULL, CV_DEFAULT, NULL); 305 mutex_init(&(devi->devi_lock), NULL, MUTEX_DEFAULT, NULL); 306 mutex_init(&(devi->devi_pm_lock), NULL, MUTEX_DEFAULT, NULL); 307 mutex_init(&(devi->devi_pm_busy_lock), NULL, MUTEX_DEFAULT, NULL); 308 309 i_ddi_set_node_state((dev_info_t *)devi, DS_PROTO); 310 da_log_enter((dev_info_t *)devi); 311 return ((dev_info_t *)devi); 312 313 fail: 314 if (devi->devi_sys_prop_ptr) 315 i_ddi_prop_list_delete(devi->devi_sys_prop_ptr); 316 if (devi->devi_node_name) 317 kmem_free(devi->devi_node_name, strlen(node_name) + 1); 318 if (devi->devi_audit) 319 kmem_free(devi->devi_audit, sizeof (devinfo_audit_t)); 320 kmem_cache_free(ddi_node_cache, devi); 321 cmn_err(CE_NOTE, failed); 322 return (NULL); 323 } 324 325 /* 326 * free a dev_info structure. 327 * NB. Not callable from interrupt since impl_ddi_free_nodeid may block. 328 */ 329 void 330 i_ddi_free_node(dev_info_t *dip) 331 { 332 struct dev_info *devi = DEVI(dip); 333 struct devi_nodeid *elem; 334 335 ASSERT(devi->devi_ref == 0); 336 ASSERT(devi->devi_addr == NULL); 337 ASSERT(devi->devi_node_state == DS_PROTO); 338 ASSERT(devi->devi_child == NULL); 339 340 if (devi->devi_intr_p) 341 i_ddi_intr_devi_fini((dev_info_t *)devi); 342 343 /* free devi_addr_buf allocated by ddi_set_name_addr() */ 344 if (devi->devi_addr_buf) 345 kmem_free(devi->devi_addr_buf, 2 * MAXNAMELEN); 346 347 if (i_ndi_dev_is_auto_assigned_node(dip)) 348 impl_ddi_free_nodeid(DEVI(dip)->devi_nodeid); 349 350 if (ndi_dev_is_persistent_node(dip)) { 351 mutex_enter(&devimap->dno_lock); 352 ASSERT(devimap->dno_free); 353 elem = devimap->dno_free; 354 devimap->dno_free = elem->next; 355 mutex_exit(&devimap->dno_lock); 356 kmem_free(elem, sizeof (*elem)); 357 } 358 359 if (DEVI(dip)->devi_compat_names) 360 kmem_free(DEVI(dip)->devi_compat_names, 361 DEVI(dip)->devi_compat_length); 362 363 ddi_prop_remove_all(dip); /* remove driver properties */ 364 if (devi->devi_sys_prop_ptr) 365 i_ddi_prop_list_delete(devi->devi_sys_prop_ptr); 366 if (devi->devi_hw_prop_ptr) 367 i_ddi_prop_list_delete(devi->devi_hw_prop_ptr); 368 369 i_ddi_set_node_state(dip, DS_INVAL); 370 da_log_enter(dip); 371 if (devi->devi_audit) { 372 kmem_free(devi->devi_audit, sizeof (devinfo_audit_t)); 373 } 374 kmem_free(devi->devi_node_name, strlen(devi->devi_node_name) + 1); 375 if (devi->devi_device_class) 376 kmem_free(devi->devi_device_class, 377 strlen(devi->devi_device_class) + 1); 378 cv_destroy(&(devi->devi_cv)); 379 mutex_destroy(&(devi->devi_lock)); 380 mutex_destroy(&(devi->devi_pm_lock)); 381 mutex_destroy(&(devi->devi_pm_busy_lock)); 382 383 kmem_cache_free(ddi_node_cache, devi); 384 } 385 386 387 /* 388 * Node state transitions 389 */ 390 391 /* 392 * Change the node name 393 */ 394 int 395 ndi_devi_set_nodename(dev_info_t *dip, char *name, int flags) 396 { 397 _NOTE(ARGUNUSED(flags)) 398 char *nname, *oname; 399 400 ASSERT(dip && name); 401 402 oname = DEVI(dip)->devi_node_name; 403 if (strcmp(oname, name) == 0) 404 return (DDI_SUCCESS); 405 406 /* 407 * pcicfg_fix_ethernet requires a name change after node 408 * is linked into the tree. When pcicfg is fixed, we 409 * should only allow name change in DS_PROTO state. 410 */ 411 if (i_ddi_node_state(dip) >= DS_BOUND) { 412 /* 413 * Don't allow name change once node is bound 414 */ 415 cmn_err(CE_NOTE, 416 "ndi_devi_set_nodename: node already bound dip = %p," 417 " %s -> %s", (void *)dip, ddi_node_name(dip), name); 418 return (NDI_FAILURE); 419 } 420 421 nname = i_ddi_strdup(name, KM_SLEEP); 422 DEVI(dip)->devi_node_name = nname; 423 i_ddi_set_binding_name(dip, nname); 424 kmem_free(oname, strlen(oname) + 1); 425 426 da_log_enter(dip); 427 return (NDI_SUCCESS); 428 } 429 430 void 431 i_ddi_add_devimap(dev_info_t *dip) 432 { 433 struct devi_nodeid *elem; 434 435 ASSERT(dip); 436 437 if (!ndi_dev_is_persistent_node(dip)) 438 return; 439 440 ASSERT(ddi_get_parent(dip) == NULL || (DEVI_VHCI_NODE(dip)) || 441 DEVI_BUSY_OWNED(ddi_get_parent(dip))); 442 443 mutex_enter(&devimap->dno_lock); 444 445 ASSERT(devimap->dno_free); 446 447 elem = devimap->dno_free; 448 devimap->dno_free = elem->next; 449 450 elem->nodeid = ddi_get_nodeid(dip); 451 elem->dip = dip; 452 elem->next = devimap->dno_head; 453 devimap->dno_head = elem; 454 455 devimap->dno_list_length++; 456 457 mutex_exit(&devimap->dno_lock); 458 } 459 460 static int 461 i_ddi_remove_devimap(dev_info_t *dip) 462 { 463 struct devi_nodeid *prev, *elem; 464 static const char *fcn = "i_ddi_remove_devimap"; 465 466 ASSERT(dip); 467 468 if (!ndi_dev_is_persistent_node(dip)) 469 return (DDI_SUCCESS); 470 471 mutex_enter(&devimap->dno_lock); 472 473 /* 474 * The following check is done with dno_lock held 475 * to prevent race between dip removal and 476 * e_ddi_prom_node_to_dip() 477 */ 478 if (e_ddi_devi_holdcnt(dip)) { 479 mutex_exit(&devimap->dno_lock); 480 return (DDI_FAILURE); 481 } 482 483 ASSERT(devimap->dno_head); 484 ASSERT(devimap->dno_list_length > 0); 485 486 prev = NULL; 487 for (elem = devimap->dno_head; elem; elem = elem->next) { 488 if (elem->dip == dip) { 489 ASSERT(elem->nodeid == ddi_get_nodeid(dip)); 490 break; 491 } 492 prev = elem; 493 } 494 495 if (elem && prev) 496 prev->next = elem->next; 497 else if (elem) 498 devimap->dno_head = elem->next; 499 else 500 panic("%s: devinfo node(%p) not found", 501 fcn, (void *)dip); 502 503 devimap->dno_list_length--; 504 505 elem->nodeid = 0; 506 elem->dip = NULL; 507 508 elem->next = devimap->dno_free; 509 devimap->dno_free = elem; 510 511 mutex_exit(&devimap->dno_lock); 512 513 return (DDI_SUCCESS); 514 } 515 516 /* 517 * Link this node into the devinfo tree and add to orphan list 518 * Not callable from interrupt context 519 */ 520 static void 521 link_node(dev_info_t *dip) 522 { 523 struct dev_info *devi = DEVI(dip); 524 struct dev_info *parent = devi->devi_parent; 525 dev_info_t **dipp; 526 527 ASSERT(parent); /* never called for root node */ 528 529 NDI_CONFIG_DEBUG((CE_CONT, "link_node: parent = %s child = %s\n", 530 parent->devi_node_name, devi->devi_node_name)); 531 532 /* 533 * Hold the global_vhci_lock before linking any direct 534 * children of rootnex driver. This special lock protects 535 * linking and unlinking for rootnext direct children. 536 */ 537 if ((dev_info_t *)parent == ddi_root_node()) 538 mutex_enter(&global_vhci_lock); 539 540 /* 541 * attach the node to end of the list unless the node is already there 542 */ 543 dipp = (dev_info_t **)(&DEVI(parent)->devi_child); 544 while (*dipp && (*dipp != dip)) { 545 dipp = (dev_info_t **)(&DEVI(*dipp)->devi_sibling); 546 } 547 ASSERT(*dipp == NULL); /* node is not linked */ 548 549 /* 550 * Now that we are in the tree, update the devi-nodeid map. 551 */ 552 i_ddi_add_devimap(dip); 553 554 /* 555 * This is a temporary workaround for Bug 4618861. 556 * We keep the scsi_vhci nexus node on the left side of the devinfo 557 * tree (under the root nexus driver), so that virtual nodes under 558 * scsi_vhci will be SUSPENDed first and RESUMEd last. This ensures 559 * that the pHCI nodes are active during times when their clients 560 * may be depending on them. This workaround embodies the knowledge 561 * that system PM and CPR both traverse the tree left-to-right during 562 * SUSPEND and right-to-left during RESUME. 563 */ 564 if (strcmp(devi->devi_name, "scsi_vhci") == 0) { 565 /* Add scsi_vhci to beginning of list */ 566 ASSERT((dev_info_t *)parent == top_devinfo); 567 /* scsi_vhci under rootnex */ 568 devi->devi_sibling = parent->devi_child; 569 parent->devi_child = devi; 570 } else { 571 /* Add to end of list */ 572 *dipp = dip; 573 DEVI(dip)->devi_sibling = NULL; 574 } 575 576 /* 577 * Release the global_vhci_lock before linking any direct 578 * children of rootnex driver. 579 */ 580 if ((dev_info_t *)parent == ddi_root_node()) 581 mutex_exit(&global_vhci_lock); 582 583 /* persistent nodes go on orphan list */ 584 if (ndi_dev_is_persistent_node(dip)) 585 add_to_dn_list(&orphanlist, dip); 586 } 587 588 /* 589 * Unlink this node from the devinfo tree 590 */ 591 static int 592 unlink_node(dev_info_t *dip) 593 { 594 struct dev_info *devi = DEVI(dip); 595 struct dev_info *parent = devi->devi_parent; 596 dev_info_t **dipp; 597 598 ASSERT(parent != NULL); 599 ASSERT(devi->devi_node_state == DS_LINKED); 600 601 NDI_CONFIG_DEBUG((CE_CONT, "unlink_node: name = %s\n", 602 ddi_node_name(dip))); 603 604 /* check references */ 605 if (devi->devi_ref || i_ddi_remove_devimap(dip) != DDI_SUCCESS) 606 return (DDI_FAILURE); 607 608 /* 609 * Hold the global_vhci_lock before linking any direct 610 * children of rootnex driver. 611 */ 612 if ((dev_info_t *)parent == ddi_root_node()) 613 mutex_enter(&global_vhci_lock); 614 615 dipp = (dev_info_t **)(&DEVI(parent)->devi_child); 616 while (*dipp && (*dipp != dip)) { 617 dipp = (dev_info_t **)(&DEVI(*dipp)->devi_sibling); 618 } 619 if (*dipp) { 620 *dipp = (dev_info_t *)(devi->devi_sibling); 621 devi->devi_sibling = NULL; 622 } else { 623 NDI_CONFIG_DEBUG((CE_NOTE, "unlink_node: %s not linked", 624 devi->devi_node_name)); 625 } 626 627 /* 628 * Release the global_vhci_lock before linking any direct 629 * children of rootnex driver. 630 */ 631 if ((dev_info_t *)parent == ddi_root_node()) 632 mutex_exit(&global_vhci_lock); 633 634 /* Remove node from orphan list */ 635 if (ndi_dev_is_persistent_node(dip)) { 636 remove_from_dn_list(&orphanlist, dip); 637 } 638 639 return (DDI_SUCCESS); 640 } 641 642 /* 643 * Bind this devinfo node to a driver. If compat is NON-NULL, try that first. 644 * Else, use the node-name. 645 * 646 * NOTE: IEEE1275 specifies that nodename should be tried before compatible. 647 * Solaris implementation binds nodename after compatible. 648 * 649 * If we find a binding, 650 * - set the binding name to the the string, 651 * - set major number to driver major 652 * 653 * If we don't find a binding, 654 * - return failure 655 */ 656 static int 657 bind_node(dev_info_t *dip) 658 { 659 char *p = NULL; 660 major_t major = (major_t)(major_t)-1; 661 struct dev_info *devi = DEVI(dip); 662 dev_info_t *parent = ddi_get_parent(dip); 663 664 ASSERT(devi->devi_node_state == DS_LINKED); 665 666 NDI_CONFIG_DEBUG((CE_CONT, "bind_node: 0x%p(name = %s)\n", 667 (void *)dip, ddi_node_name(dip))); 668 669 mutex_enter(&DEVI(dip)->devi_lock); 670 if (DEVI(dip)->devi_flags & DEVI_NO_BIND) { 671 mutex_exit(&DEVI(dip)->devi_lock); 672 return (DDI_FAILURE); 673 } 674 mutex_exit(&DEVI(dip)->devi_lock); 675 676 /* find the driver with most specific binding using compatible */ 677 major = ddi_compatible_driver_major(dip, &p); 678 if (major == (major_t)-1) 679 return (DDI_FAILURE); 680 681 devi->devi_major = major; 682 if (p != NULL) { 683 i_ddi_set_binding_name(dip, p); 684 NDI_CONFIG_DEBUG((CE_CONT, "bind_node: %s bound to %s\n", 685 devi->devi_node_name, p)); 686 } 687 688 /* Link node to per-driver list */ 689 link_to_driver_list(dip); 690 691 /* 692 * reset parent flag so that nexus will merge .conf props 693 */ 694 if (ndi_dev_is_persistent_node(dip)) { 695 mutex_enter(&DEVI(parent)->devi_lock); 696 DEVI(parent)->devi_flags &= 697 ~(DEVI_ATTACHED_CHILDREN|DEVI_MADE_CHILDREN); 698 mutex_exit(&DEVI(parent)->devi_lock); 699 } 700 return (DDI_SUCCESS); 701 } 702 703 /* 704 * Unbind this devinfo node 705 * Called before the node is destroyed or driver is removed from system 706 */ 707 static int 708 unbind_node(dev_info_t *dip) 709 { 710 ASSERT(DEVI(dip)->devi_node_state == DS_BOUND); 711 ASSERT(DEVI(dip)->devi_major != (major_t)-1); 712 713 /* check references */ 714 if (DEVI(dip)->devi_ref) 715 return (DDI_FAILURE); 716 717 NDI_CONFIG_DEBUG((CE_CONT, "unbind_node: 0x%p(name = %s)\n", 718 (void *)dip, ddi_node_name(dip))); 719 720 unlink_from_driver_list(dip); 721 DEVI(dip)->devi_major = (major_t)-1; 722 return (DDI_SUCCESS); 723 } 724 725 /* 726 * Initialize a node: calls the parent nexus' bus_ctl ops to do the operation. 727 * Must hold parent and per-driver list while calling this function. 728 * A successful init_node() returns with an active ndi_hold_devi() hold on 729 * the parent. 730 */ 731 static int 732 init_node(dev_info_t *dip) 733 { 734 int error; 735 dev_info_t *pdip = ddi_get_parent(dip); 736 int (*f)(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *, void *); 737 char *path; 738 739 ASSERT(i_ddi_node_state(dip) == DS_BOUND); 740 741 /* should be DS_READY except for pcmcia ... */ 742 ASSERT(i_ddi_node_state(pdip) >= DS_PROBED); 743 744 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 745 (void) ddi_pathname(dip, path); 746 NDI_CONFIG_DEBUG((CE_CONT, "init_node: entry: path %s 0x%p\n", 747 path, (void *)dip)); 748 749 /* 750 * The parent must have a bus_ctl operation. 751 */ 752 if ((DEVI(pdip)->devi_ops->devo_bus_ops == NULL) || 753 (f = DEVI(pdip)->devi_ops->devo_bus_ops->bus_ctl) == NULL) { 754 error = DDI_FAILURE; 755 goto out; 756 } 757 758 add_global_props(dip); 759 760 /* 761 * Invoke the parent's bus_ctl operation with the DDI_CTLOPS_INITCHILD 762 * command to transform the child to canonical form 1. If there 763 * is an error, ddi_remove_child should be called, to clean up. 764 */ 765 error = (*f)(pdip, pdip, DDI_CTLOPS_INITCHILD, dip, NULL); 766 if (error != DDI_SUCCESS) { 767 NDI_CONFIG_DEBUG((CE_CONT, "init_node: %s 0x%p failed\n", 768 path, (void *)dip)); 769 remove_global_props(dip); 770 /* in case nexus driver didn't clear this field */ 771 ddi_set_name_addr(dip, NULL); 772 error = DDI_FAILURE; 773 goto out; 774 } 775 776 ndi_hold_devi(pdip); 777 778 /* check for duplicate nodes */ 779 if (find_duplicate_child(pdip, dip) != NULL) { 780 /* recompute path after initchild for @addr information */ 781 (void) ddi_pathname(dip, path); 782 783 /* 784 * uninit_node() the duplicate - a successful uninit_node() 785 * does a ndi_rele_devi 786 */ 787 if ((error = uninit_node(dip)) != DDI_SUCCESS) { 788 ndi_rele_devi(pdip); 789 cmn_err(CE_WARN, "init_node: uninit of duplicate " 790 "node %s failed", path); 791 } 792 NDI_CONFIG_DEBUG((CE_CONT, "init_node: duplicate uninit " 793 "%s 0x%p%s\n", path, (void *)dip, 794 (error == DDI_SUCCESS) ? "" : " failed")); 795 error = DDI_FAILURE; 796 goto out; 797 } 798 799 /* 800 * Apply multi-parent/deep-nexus optimization to the new node 801 */ 802 DEVI(dip)->devi_instance = e_ddi_assign_instance(dip); 803 ddi_optimize_dtree(dip); 804 error = DDI_SUCCESS; 805 806 out: kmem_free(path, MAXPATHLEN); 807 return (error); 808 } 809 810 /* 811 * Uninitialize node 812 * The per-driver list must be held busy during the call. 813 * A successful uninit_node() releases the init_node() hold on 814 * the parent by calling ndi_rele_devi(). 815 */ 816 static int 817 uninit_node(dev_info_t *dip) 818 { 819 int node_state_entry; 820 dev_info_t *pdip; 821 struct dev_ops *ops; 822 int (*f)(); 823 int error; 824 char *addr; 825 826 /* 827 * Don't check for references here or else a ref-counted 828 * dip cannot be downgraded by the framework. 829 */ 830 node_state_entry = i_ddi_node_state(dip); 831 ASSERT((node_state_entry == DS_BOUND) || 832 (node_state_entry == DS_INITIALIZED)); 833 pdip = ddi_get_parent(dip); 834 ASSERT(pdip); 835 836 NDI_CONFIG_DEBUG((CE_CONT, "uninit_node: 0x%p(%s%d)\n", 837 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 838 839 if (((ops = ddi_get_driver(pdip)) == NULL) || 840 (ops->devo_bus_ops == NULL) || 841 ((f = ops->devo_bus_ops->bus_ctl) == NULL)) { 842 return (DDI_FAILURE); 843 } 844 845 /* 846 * save the @addr prior to DDI_CTLOPS_UNINITCHILD for use in 847 * freeing the instance if it succeeds. 848 */ 849 if (node_state_entry == DS_INITIALIZED) { 850 addr = ddi_get_name_addr(dip); 851 if (addr) 852 addr = i_ddi_strdup(addr, KM_SLEEP); 853 } else { 854 addr = NULL; 855 } 856 857 error = (*f)(pdip, pdip, DDI_CTLOPS_UNINITCHILD, dip, (void *)NULL); 858 if (error == DDI_SUCCESS) { 859 /* if uninitchild forgot to set devi_addr to NULL do it now */ 860 ddi_set_name_addr(dip, NULL); 861 862 /* 863 * Free instance number. This is a no-op if instance has 864 * been kept by probe_node(). Avoid free when we are called 865 * from init_node (DS_BOUND) because the instance has not yet 866 * been assigned. 867 */ 868 if (node_state_entry == DS_INITIALIZED) { 869 e_ddi_free_instance(dip, addr); 870 DEVI(dip)->devi_instance = -1; 871 } 872 873 /* release the init_node hold */ 874 ndi_rele_devi(pdip); 875 876 remove_global_props(dip); 877 e_ddi_prop_remove_all(dip); 878 } else { 879 NDI_CONFIG_DEBUG((CE_CONT, "uninit_node failed: 0x%p(%s%d)\n", 880 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 881 } 882 883 if (addr) 884 kmem_free(addr, strlen(addr) + 1); 885 return (error); 886 } 887 888 /* 889 * Invoke driver's probe entry point to probe for existence of hardware. 890 * Keep instance permanent for successful probe and leaf nodes. 891 * 892 * Per-driver list must be held busy while calling this function. 893 */ 894 static int 895 probe_node(dev_info_t *dip) 896 { 897 int rv; 898 899 ASSERT(i_ddi_node_state(dip) == DS_INITIALIZED); 900 901 NDI_CONFIG_DEBUG((CE_CONT, "probe_node: 0x%p(%s%d)\n", 902 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 903 904 /* temporarily hold the driver while we probe */ 905 DEVI(dip)->devi_ops = ndi_hold_driver(dip); 906 if (DEVI(dip)->devi_ops == NULL) { 907 NDI_CONFIG_DEBUG((CE_CONT, 908 "probe_node: 0x%p(%s%d) cannot load driver\n", 909 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 910 return (DDI_FAILURE); 911 } 912 913 if (identify_9e != 0) 914 (void) devi_identify(dip); 915 916 rv = devi_probe(dip); 917 918 /* release the driver now that probe is complete */ 919 ndi_rele_driver(dip); 920 DEVI(dip)->devi_ops = NULL; 921 922 switch (rv) { 923 case DDI_PROBE_SUCCESS: /* found */ 924 case DDI_PROBE_DONTCARE: /* ddi_dev_is_sid */ 925 e_ddi_keep_instance(dip); /* persist instance */ 926 rv = DDI_SUCCESS; 927 break; 928 929 case DDI_PROBE_PARTIAL: /* maybe later */ 930 case DDI_PROBE_FAILURE: /* not found */ 931 NDI_CONFIG_DEBUG((CE_CONT, 932 "probe_node: 0x%p(%s%d) no hardware found%s\n", 933 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip), 934 (rv == DDI_PROBE_PARTIAL) ? " yet" : "")); 935 rv = DDI_FAILURE; 936 break; 937 938 default: 939 #ifdef DEBUG 940 cmn_err(CE_WARN, "probe_node: %s%d: illegal probe(9E) value", 941 ddi_driver_name(dip), ddi_get_instance(dip)); 942 #endif /* DEBUG */ 943 rv = DDI_FAILURE; 944 break; 945 } 946 return (rv); 947 } 948 949 /* 950 * Unprobe a node. Simply reset the node state. 951 * Per-driver list must be held busy while calling this function. 952 */ 953 static int 954 unprobe_node(dev_info_t *dip) 955 { 956 ASSERT(i_ddi_node_state(dip) == DS_PROBED); 957 958 /* 959 * Don't check for references here or else a ref-counted 960 * dip cannot be downgraded by the framework. 961 */ 962 963 NDI_CONFIG_DEBUG((CE_CONT, "unprobe_node: 0x%p(name = %s)\n", 964 (void *)dip, ddi_node_name(dip))); 965 return (DDI_SUCCESS); 966 } 967 968 /* 969 * Attach devinfo node. 970 * Per-driver list must be held busy. 971 */ 972 static int 973 attach_node(dev_info_t *dip) 974 { 975 int rv; 976 977 ASSERT(i_ddi_node_state(dip) == DS_PROBED); 978 979 NDI_CONFIG_DEBUG((CE_CONT, "attach_node: 0x%p(%s%d)\n", 980 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 981 982 /* 983 * Tell mpxio framework that a node is about to online. 984 */ 985 if ((rv = mdi_devi_online(dip, 0)) != NDI_SUCCESS) { 986 return (DDI_FAILURE); 987 } 988 989 /* no recursive attachment */ 990 ASSERT(DEVI(dip)->devi_ops == NULL); 991 992 /* 993 * Hold driver the node is bound to. 994 */ 995 DEVI(dip)->devi_ops = ndi_hold_driver(dip); 996 if (DEVI(dip)->devi_ops == NULL) { 997 /* 998 * We were able to load driver for probing, so we should 999 * not get here unless something really bad happened. 1000 */ 1001 cmn_err(CE_WARN, "attach_node: no driver for major %d", 1002 DEVI(dip)->devi_major); 1003 return (DDI_FAILURE); 1004 } 1005 1006 if (NEXUS_DRV(DEVI(dip)->devi_ops)) 1007 DEVI(dip)->devi_taskq = ddi_taskq_create(dip, 1008 "nexus_enum_tq", 1, 1009 TASKQ_DEFAULTPRI, 0); 1010 1011 DEVI_SET_ATTACHING(dip); 1012 DEVI_SET_NEED_RESET(dip); 1013 rv = devi_attach(dip, DDI_ATTACH); 1014 if (rv != DDI_SUCCESS) 1015 DEVI_CLR_NEED_RESET(dip); 1016 DEVI_CLR_ATTACHING(dip); 1017 1018 if (rv != DDI_SUCCESS) { 1019 /* ensure that devids are unregistered */ 1020 mutex_enter(&DEVI(dip)->devi_lock); 1021 if (DEVI(dip)->devi_flags & DEVI_REGISTERED_DEVID) { 1022 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 1023 mutex_exit(&DEVI(dip)->devi_lock); 1024 1025 e_devid_cache_unregister(dip); 1026 } else 1027 mutex_exit(&DEVI(dip)->devi_lock); 1028 1029 /* 1030 * Cleanup dacf reservations 1031 */ 1032 mutex_enter(&dacf_lock); 1033 dacf_clr_rsrvs(dip, DACF_OPID_POSTATTACH); 1034 dacf_clr_rsrvs(dip, DACF_OPID_PREDETACH); 1035 mutex_exit(&dacf_lock); 1036 if (DEVI(dip)->devi_taskq) 1037 ddi_taskq_destroy(DEVI(dip)->devi_taskq); 1038 ddi_remove_minor_node(dip, NULL); 1039 1040 /* release the driver if attach failed */ 1041 ndi_rele_driver(dip); 1042 DEVI(dip)->devi_ops = NULL; 1043 NDI_CONFIG_DEBUG((CE_CONT, "attach_node: 0x%p(%s%d) failed\n", 1044 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1045 return (DDI_FAILURE); 1046 } 1047 1048 /* successful attach, return with driver held */ 1049 return (DDI_SUCCESS); 1050 } 1051 1052 /* 1053 * Detach devinfo node. 1054 * Per-driver list must be held busy. 1055 */ 1056 static int 1057 detach_node(dev_info_t *dip, uint_t flag) 1058 { 1059 struct devnames *dnp; 1060 int rv; 1061 1062 ASSERT(i_ddi_node_state(dip) == DS_ATTACHED); 1063 1064 /* check references */ 1065 if (DEVI(dip)->devi_ref) 1066 return (DDI_FAILURE); 1067 1068 NDI_CONFIG_DEBUG((CE_CONT, "detach_node: 0x%p(%s%d)\n", 1069 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1070 1071 /* Offline the device node with the mpxio framework. */ 1072 if (mdi_devi_offline(dip, flag) != NDI_SUCCESS) { 1073 return (DDI_FAILURE); 1074 } 1075 1076 /* drain the taskq */ 1077 if (DEVI(dip)->devi_taskq) 1078 ddi_taskq_wait(DEVI(dip)->devi_taskq); 1079 1080 rv = devi_detach(dip, DDI_DETACH); 1081 if (rv == DDI_SUCCESS) 1082 DEVI_CLR_NEED_RESET(dip); 1083 1084 if (rv != DDI_SUCCESS) { 1085 NDI_CONFIG_DEBUG((CE_CONT, 1086 "detach_node: 0x%p(%s%d) failed\n", 1087 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1088 return (DDI_FAILURE); 1089 } 1090 1091 /* destroy the taskq */ 1092 if (DEVI(dip)->devi_taskq) { 1093 ddi_taskq_destroy(DEVI(dip)->devi_taskq); 1094 DEVI(dip)->devi_taskq = NULL; 1095 } 1096 1097 /* Cleanup dacf reservations */ 1098 mutex_enter(&dacf_lock); 1099 dacf_clr_rsrvs(dip, DACF_OPID_POSTATTACH); 1100 dacf_clr_rsrvs(dip, DACF_OPID_PREDETACH); 1101 mutex_exit(&dacf_lock); 1102 1103 /* Remove properties and minor nodes in case driver forgots */ 1104 ddi_remove_minor_node(dip, NULL); 1105 ddi_prop_remove_all(dip); 1106 1107 /* a detached node can't have attached or .conf children */ 1108 mutex_enter(&DEVI(dip)->devi_lock); 1109 DEVI(dip)->devi_flags &= ~(DEVI_MADE_CHILDREN|DEVI_ATTACHED_CHILDREN); 1110 1111 /* ensure that devids registered during attach are unregistered */ 1112 if (DEVI(dip)->devi_flags & DEVI_REGISTERED_DEVID) { 1113 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 1114 mutex_exit(&DEVI(dip)->devi_lock); 1115 1116 e_devid_cache_unregister(dip); 1117 } else 1118 mutex_exit(&DEVI(dip)->devi_lock); 1119 1120 /* 1121 * If the instance has successfully detached in detach_driver() context, 1122 * clear DN_DRIVER_HELD for correct ddi_hold_installed_driver() 1123 * behavior. Consumers like qassociate() depend on this (via clnopen()). 1124 */ 1125 if (flag & NDI_DETACH_DRIVER) { 1126 dnp = &(devnamesp[DEVI(dip)->devi_major]); 1127 LOCK_DEV_OPS(&dnp->dn_lock); 1128 dnp->dn_flags &= ~DN_DRIVER_HELD; 1129 UNLOCK_DEV_OPS(&dnp->dn_lock); 1130 } 1131 1132 /* successful detach, release the driver */ 1133 ndi_rele_driver(dip); 1134 DEVI(dip)->devi_ops = NULL; 1135 return (DDI_SUCCESS); 1136 } 1137 1138 /* 1139 * Run dacf post_attach routines 1140 */ 1141 static int 1142 postattach_node(dev_info_t *dip) 1143 { 1144 int rval; 1145 1146 /* 1147 * For hotplug busses like USB, it's possible that devices 1148 * are removed but dip is still around. We don't want to 1149 * run dacf routines as part of detach failure recovery. 1150 * 1151 * Pretend success until we figure out how to prevent 1152 * access to such devinfo nodes. 1153 */ 1154 if (DEVI_IS_DEVICE_REMOVED(dip)) 1155 return (DDI_SUCCESS); 1156 1157 /* 1158 * if dacf_postattach failed, report it to the framework 1159 * so that it can be retried later at the open time. 1160 */ 1161 mutex_enter(&dacf_lock); 1162 rval = dacfc_postattach(dip); 1163 mutex_exit(&dacf_lock); 1164 1165 /* 1166 * Plumbing during postattach may fail because of the 1167 * underlying device is not ready. This will fail ndi_devi_config() 1168 * in dv_filldir() and a warning message is issued. The message 1169 * from here will explain what happened 1170 */ 1171 if (rval != DACF_SUCCESS) { 1172 cmn_err(CE_WARN, "Postattach failed for %s%d\n", 1173 ddi_driver_name(dip), ddi_get_instance(dip)); 1174 return (DDI_FAILURE); 1175 } 1176 1177 return (DDI_SUCCESS); 1178 } 1179 1180 /* 1181 * Run dacf pre-detach routines 1182 */ 1183 static int 1184 predetach_node(dev_info_t *dip, uint_t flag) 1185 { 1186 int ret; 1187 1188 /* 1189 * Don't auto-detach if DDI_FORCEATTACH or DDI_NO_AUTODETACH 1190 * properties are set. 1191 */ 1192 if (flag & NDI_AUTODETACH) { 1193 struct devnames *dnp; 1194 int pflag = DDI_PROP_NOTPROM | DDI_PROP_DONTPASS; 1195 1196 if ((ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1197 pflag, DDI_FORCEATTACH, 0) == 1) || 1198 (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1199 pflag, DDI_NO_AUTODETACH, 0) == 1)) 1200 return (DDI_FAILURE); 1201 1202 /* check for driver global version of DDI_NO_AUTODETACH */ 1203 dnp = &devnamesp[DEVI(dip)->devi_major]; 1204 LOCK_DEV_OPS(&dnp->dn_lock); 1205 if (dnp->dn_flags & DN_NO_AUTODETACH) { 1206 UNLOCK_DEV_OPS(&dnp->dn_lock); 1207 return (DDI_FAILURE); 1208 } 1209 UNLOCK_DEV_OPS(&dnp->dn_lock); 1210 } 1211 1212 mutex_enter(&dacf_lock); 1213 ret = dacfc_predetach(dip); 1214 mutex_exit(&dacf_lock); 1215 1216 return (ret); 1217 } 1218 1219 /* 1220 * Wrapper for making multiple state transitions 1221 */ 1222 1223 /* 1224 * i_ndi_config_node: upgrade dev_info node into a specified state. 1225 * It is a bit tricky because the locking protocol changes before and 1226 * after a node is bound to a driver. All locks are held external to 1227 * this function. 1228 */ 1229 int 1230 i_ndi_config_node(dev_info_t *dip, ddi_node_state_t state, uint_t flag) 1231 { 1232 _NOTE(ARGUNUSED(flag)) 1233 int rv = DDI_SUCCESS; 1234 1235 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1236 1237 while ((i_ddi_node_state(dip) < state) && (rv == DDI_SUCCESS)) { 1238 1239 /* don't allow any more changes to the device tree */ 1240 if (devinfo_freeze) { 1241 rv = DDI_FAILURE; 1242 break; 1243 } 1244 1245 switch (i_ddi_node_state(dip)) { 1246 case DS_PROTO: 1247 /* 1248 * only caller can reference this node, no external 1249 * locking needed. 1250 */ 1251 link_node(dip); 1252 i_ddi_set_node_state(dip, DS_LINKED); 1253 break; 1254 case DS_LINKED: 1255 /* 1256 * Three code path may attempt to bind a node: 1257 * - boot code 1258 * - add_drv 1259 * - hotplug thread 1260 * Boot code is single threaded, add_drv synchronize 1261 * on a userland lock, and hotplug synchronize on 1262 * hotplug_lk. There could be a race between add_drv 1263 * and hotplug thread. We'll live with this until the 1264 * conversion to top-down loading. 1265 */ 1266 if ((rv = bind_node(dip)) == DDI_SUCCESS) 1267 i_ddi_set_node_state(dip, DS_BOUND); 1268 break; 1269 case DS_BOUND: 1270 /* 1271 * The following transitions synchronizes on the 1272 * per-driver busy changing flag, since we already 1273 * have a driver. 1274 */ 1275 if ((rv = init_node(dip)) == DDI_SUCCESS) 1276 i_ddi_set_node_state(dip, DS_INITIALIZED); 1277 break; 1278 case DS_INITIALIZED: 1279 if ((rv = probe_node(dip)) == DDI_SUCCESS) 1280 i_ddi_set_node_state(dip, DS_PROBED); 1281 break; 1282 case DS_PROBED: 1283 atomic_add_long(&devinfo_attach_detach, 1); 1284 if ((rv = attach_node(dip)) == DDI_SUCCESS) 1285 i_ddi_set_node_state(dip, DS_ATTACHED); 1286 atomic_add_long(&devinfo_attach_detach, -1); 1287 break; 1288 case DS_ATTACHED: 1289 if ((rv = postattach_node(dip)) == DDI_SUCCESS) 1290 i_ddi_set_node_state(dip, DS_READY); 1291 break; 1292 case DS_READY: 1293 break; 1294 default: 1295 /* should never reach here */ 1296 ASSERT("unknown devinfo state"); 1297 } 1298 } 1299 1300 if (ddidebug & DDI_AUDIT) 1301 da_log_enter(dip); 1302 return (rv); 1303 } 1304 1305 /* 1306 * i_ndi_unconfig_node: downgrade dev_info node into a specified state. 1307 */ 1308 int 1309 i_ndi_unconfig_node(dev_info_t *dip, ddi_node_state_t state, uint_t flag) 1310 { 1311 int rv = DDI_SUCCESS; 1312 1313 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1314 1315 while ((i_ddi_node_state(dip) > state) && (rv == DDI_SUCCESS)) { 1316 1317 /* don't allow any more changes to the device tree */ 1318 if (devinfo_freeze) { 1319 rv = DDI_FAILURE; 1320 break; 1321 } 1322 1323 switch (i_ddi_node_state(dip)) { 1324 case DS_PROTO: 1325 break; 1326 case DS_LINKED: 1327 /* 1328 * Persistent nodes are only removed by hotplug code 1329 * .conf nodes synchronizes on per-driver list. 1330 */ 1331 if ((rv = unlink_node(dip)) == DDI_SUCCESS) 1332 i_ddi_set_node_state(dip, DS_PROTO); 1333 break; 1334 case DS_BOUND: 1335 /* 1336 * The following transitions synchronizes on the 1337 * per-driver busy changing flag, since we already 1338 * have a driver. 1339 */ 1340 if ((rv = unbind_node(dip)) == DDI_SUCCESS) 1341 i_ddi_set_node_state(dip, DS_LINKED); 1342 break; 1343 case DS_INITIALIZED: 1344 if ((rv = uninit_node(dip)) == DDI_SUCCESS) 1345 i_ddi_set_node_state(dip, DS_BOUND); 1346 break; 1347 case DS_PROBED: 1348 if ((rv = unprobe_node(dip)) == DDI_SUCCESS) 1349 i_ddi_set_node_state(dip, DS_INITIALIZED); 1350 break; 1351 case DS_ATTACHED: 1352 atomic_add_long(&devinfo_attach_detach, 1); 1353 DEVI_SET_DETACHING(dip); 1354 membar_enter(); /* ensure visibility for hold_devi */ 1355 1356 if ((rv = detach_node(dip, flag)) == DDI_SUCCESS) 1357 i_ddi_set_node_state(dip, DS_PROBED); 1358 DEVI_CLR_DETACHING(dip); 1359 atomic_add_long(&devinfo_attach_detach, -1); 1360 break; 1361 case DS_READY: 1362 if ((rv = predetach_node(dip, flag)) == DDI_SUCCESS) 1363 i_ddi_set_node_state(dip, DS_ATTACHED); 1364 break; 1365 default: 1366 ASSERT("unknown devinfo state"); 1367 } 1368 } 1369 da_log_enter(dip); 1370 return (rv); 1371 } 1372 1373 /* 1374 * ddi_initchild: transform node to DS_INITIALIZED state 1375 */ 1376 int 1377 ddi_initchild(dev_info_t *parent, dev_info_t *proto) 1378 { 1379 int ret, circ; 1380 1381 ndi_devi_enter(parent, &circ); 1382 ret = i_ndi_config_node(proto, DS_INITIALIZED, 0); 1383 ndi_devi_exit(parent, circ); 1384 1385 return (ret); 1386 } 1387 1388 /* 1389 * ddi_uninitchild: transform node down to DS_BOUND state 1390 */ 1391 int 1392 ddi_uninitchild(dev_info_t *dip) 1393 { 1394 int ret, circ; 1395 dev_info_t *parent = ddi_get_parent(dip); 1396 ASSERT(parent); 1397 1398 ndi_devi_enter(parent, &circ); 1399 ret = i_ndi_unconfig_node(dip, DS_BOUND, 0); 1400 ndi_devi_exit(parent, circ); 1401 1402 return (ret); 1403 } 1404 1405 /* 1406 * i_ddi_attachchild: transform node to DS_READY state 1407 */ 1408 static int 1409 i_ddi_attachchild(dev_info_t *dip) 1410 { 1411 int ret, circ; 1412 dev_info_t *parent = ddi_get_parent(dip); 1413 ASSERT(parent); 1414 1415 if ((i_ddi_node_state(dip) < DS_BOUND) || DEVI_IS_DEVICE_OFFLINE(dip)) 1416 return (DDI_FAILURE); 1417 1418 ndi_devi_enter(parent, &circ); 1419 ret = i_ndi_config_node(dip, DS_READY, 0); 1420 if (ret == NDI_SUCCESS) { 1421 ret = DDI_SUCCESS; 1422 } else { 1423 /* 1424 * Take it down to DS_INITIALIZED so pm_pre_probe is run 1425 * on the next attach 1426 */ 1427 (void) i_ndi_unconfig_node(dip, DS_INITIALIZED, 0); 1428 ret = DDI_FAILURE; 1429 } 1430 ndi_devi_exit(parent, circ); 1431 1432 return (ret); 1433 } 1434 1435 /* 1436 * i_ddi_detachchild: transform node down to DS_PROBED state 1437 * If it fails, put it back to DS_READY state. 1438 * NOTE: A node that fails detach may be at DS_ATTACHED instead 1439 * of DS_READY for a small amount of time. 1440 */ 1441 static int 1442 i_ddi_detachchild(dev_info_t *dip, uint_t flags) 1443 { 1444 int ret, circ; 1445 dev_info_t *parent = ddi_get_parent(dip); 1446 ASSERT(parent); 1447 1448 ndi_devi_enter(parent, &circ); 1449 ret = i_ndi_unconfig_node(dip, DS_PROBED, flags); 1450 if (ret != DDI_SUCCESS) 1451 (void) i_ndi_config_node(dip, DS_READY, 0); 1452 else 1453 /* allow pm_pre_probe to reestablish pm state */ 1454 (void) i_ndi_unconfig_node(dip, DS_INITIALIZED, 0); 1455 ndi_devi_exit(parent, circ); 1456 1457 return (ret); 1458 } 1459 1460 /* 1461 * Add a child and bind to driver 1462 */ 1463 dev_info_t * 1464 ddi_add_child(dev_info_t *pdip, char *name, uint_t nodeid, uint_t unit) 1465 { 1466 int circ; 1467 dev_info_t *dip; 1468 1469 /* allocate a new node */ 1470 dip = i_ddi_alloc_node(pdip, name, nodeid, (int)unit, NULL, KM_SLEEP); 1471 1472 ndi_devi_enter(pdip, &circ); 1473 (void) i_ndi_config_node(dip, DS_BOUND, 0); 1474 ndi_devi_exit(pdip, circ); 1475 return (dip); 1476 } 1477 1478 /* 1479 * ddi_remove_child: remove the dip. The parent must be attached and held 1480 */ 1481 int 1482 ddi_remove_child(dev_info_t *dip, int dummy) 1483 { 1484 _NOTE(ARGUNUSED(dummy)) 1485 int circ, ret; 1486 dev_info_t *parent = ddi_get_parent(dip); 1487 ASSERT(parent); 1488 1489 ndi_devi_enter(parent, &circ); 1490 1491 /* 1492 * If we still have children, for example SID nodes marked 1493 * as persistent but not attached, attempt to remove them. 1494 */ 1495 if (DEVI(dip)->devi_child) { 1496 ret = ndi_devi_unconfig(dip, NDI_DEVI_REMOVE); 1497 if (ret != NDI_SUCCESS) { 1498 ndi_devi_exit(parent, circ); 1499 return (DDI_FAILURE); 1500 } 1501 ASSERT(DEVI(dip)->devi_child == NULL); 1502 } 1503 1504 ret = i_ndi_unconfig_node(dip, DS_PROTO, 0); 1505 ndi_devi_exit(parent, circ); 1506 1507 if (ret != DDI_SUCCESS) 1508 return (ret); 1509 1510 ASSERT(i_ddi_node_state(dip) == DS_PROTO); 1511 i_ddi_free_node(dip); 1512 return (DDI_SUCCESS); 1513 } 1514 1515 /* 1516 * NDI wrappers for ref counting, node allocation, and transitions 1517 */ 1518 1519 /* 1520 * Hold/release the devinfo node itself. 1521 * Caller is assumed to prevent the devi from detaching during this call 1522 */ 1523 void 1524 ndi_hold_devi(dev_info_t *dip) 1525 { 1526 mutex_enter(&DEVI(dip)->devi_lock); 1527 ASSERT(DEVI(dip)->devi_ref >= 0); 1528 DEVI(dip)->devi_ref++; 1529 membar_enter(); /* make sure stores are flushed */ 1530 mutex_exit(&DEVI(dip)->devi_lock); 1531 } 1532 1533 void 1534 ndi_rele_devi(dev_info_t *dip) 1535 { 1536 ASSERT(DEVI(dip)->devi_ref > 0); 1537 1538 mutex_enter(&DEVI(dip)->devi_lock); 1539 DEVI(dip)->devi_ref--; 1540 membar_enter(); /* make sure stores are flushed */ 1541 mutex_exit(&DEVI(dip)->devi_lock); 1542 } 1543 1544 int 1545 e_ddi_devi_holdcnt(dev_info_t *dip) 1546 { 1547 return (DEVI(dip)->devi_ref); 1548 } 1549 1550 /* 1551 * Hold/release the driver the devinfo node is bound to. 1552 */ 1553 struct dev_ops * 1554 ndi_hold_driver(dev_info_t *dip) 1555 { 1556 if (i_ddi_node_state(dip) < DS_BOUND) 1557 return (NULL); 1558 1559 ASSERT(DEVI(dip)->devi_major != -1); 1560 return (mod_hold_dev_by_major(DEVI(dip)->devi_major)); 1561 } 1562 1563 void 1564 ndi_rele_driver(dev_info_t *dip) 1565 { 1566 ASSERT(i_ddi_node_state(dip) >= DS_BOUND); 1567 mod_rele_dev_by_major(DEVI(dip)->devi_major); 1568 } 1569 1570 /* 1571 * Single thread entry into devinfo node for modifying its children. 1572 * To verify in ASSERTS use DEVI_BUSY_OWNED macro. 1573 */ 1574 void 1575 ndi_devi_enter(dev_info_t *dip, int *circular) 1576 { 1577 struct dev_info *devi = DEVI(dip); 1578 ASSERT(dip != NULL); 1579 1580 mutex_enter(&devi->devi_lock); 1581 if (devi->devi_busy_thread == curthread) { 1582 devi->devi_circular++; 1583 } else { 1584 while (DEVI_BUSY_CHANGING(devi) && !panicstr) 1585 cv_wait(&(devi->devi_cv), &(devi->devi_lock)); 1586 if (panicstr) { 1587 mutex_exit(&devi->devi_lock); 1588 return; 1589 } 1590 devi->devi_flags |= DEVI_BUSY; 1591 devi->devi_busy_thread = curthread; 1592 } 1593 *circular = devi->devi_circular; 1594 mutex_exit(&devi->devi_lock); 1595 } 1596 1597 /* 1598 * Release ndi_devi_enter or successful ndi_devi_tryenter. 1599 */ 1600 void 1601 ndi_devi_exit(dev_info_t *dip, int circular) 1602 { 1603 struct dev_info *devi = DEVI(dip); 1604 ASSERT(dip != NULL); 1605 1606 if (panicstr) 1607 return; 1608 1609 mutex_enter(&(devi->devi_lock)); 1610 if (circular != 0) { 1611 devi->devi_circular--; 1612 } else { 1613 devi->devi_flags &= ~DEVI_BUSY; 1614 ASSERT(devi->devi_busy_thread == curthread); 1615 devi->devi_busy_thread = NULL; 1616 cv_broadcast(&(devi->devi_cv)); 1617 } 1618 mutex_exit(&(devi->devi_lock)); 1619 } 1620 1621 /* 1622 * Attempt to single thread entry into devinfo node for modifying its children. 1623 */ 1624 int 1625 ndi_devi_tryenter(dev_info_t *dip, int *circular) 1626 { 1627 int rval = 1; /* assume we enter */ 1628 struct dev_info *devi = DEVI(dip); 1629 ASSERT(dip != NULL); 1630 1631 mutex_enter(&devi->devi_lock); 1632 if (devi->devi_busy_thread == (void *)curthread) { 1633 devi->devi_circular++; 1634 } else { 1635 if (!DEVI_BUSY_CHANGING(devi)) { 1636 devi->devi_flags |= DEVI_BUSY; 1637 devi->devi_busy_thread = (void *)curthread; 1638 } else { 1639 rval = 0; /* devi is busy */ 1640 } 1641 } 1642 *circular = devi->devi_circular; 1643 mutex_exit(&devi->devi_lock); 1644 return (rval); 1645 } 1646 1647 /* 1648 * Allocate and initialize a new dev_info structure. 1649 * 1650 * This routine may be called at interrupt time by a nexus in 1651 * response to a hotplug event, therefore memory allocations are 1652 * not allowed to sleep. 1653 */ 1654 int 1655 ndi_devi_alloc(dev_info_t *parent, char *node_name, dnode_t nodeid, 1656 dev_info_t **ret_dip) 1657 { 1658 ASSERT(node_name != NULL); 1659 ASSERT(ret_dip != NULL); 1660 1661 *ret_dip = i_ddi_alloc_node(parent, node_name, nodeid, -1, NULL, 1662 KM_NOSLEEP); 1663 if (*ret_dip == NULL) { 1664 return (NDI_NOMEM); 1665 } 1666 1667 return (NDI_SUCCESS); 1668 } 1669 1670 /* 1671 * Allocate and initialize a new dev_info structure 1672 * This routine may sleep and should not be called at interrupt time 1673 */ 1674 void 1675 ndi_devi_alloc_sleep(dev_info_t *parent, char *node_name, dnode_t nodeid, 1676 dev_info_t **ret_dip) 1677 { 1678 ASSERT(node_name != NULL); 1679 ASSERT(ret_dip != NULL); 1680 1681 *ret_dip = i_ddi_alloc_node(parent, node_name, nodeid, -1, NULL, 1682 KM_SLEEP); 1683 ASSERT(*ret_dip); 1684 } 1685 1686 /* 1687 * Remove an initialized (but not yet attached) dev_info 1688 * node from it's parent. 1689 */ 1690 int 1691 ndi_devi_free(dev_info_t *dip) 1692 { 1693 ASSERT(dip != NULL); 1694 1695 if (i_ddi_node_state(dip) >= DS_INITIALIZED) 1696 return (DDI_FAILURE); 1697 1698 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_free: %s%d (%p)\n", 1699 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip)); 1700 1701 (void) ddi_remove_child(dip, 0); 1702 1703 return (NDI_SUCCESS); 1704 } 1705 1706 /* 1707 * ndi_devi_bind_driver() binds a driver to a given device. If it fails 1708 * to bind the driver, it returns an appropriate error back. Some drivers 1709 * may want to know if the actually failed to bind. 1710 */ 1711 int 1712 ndi_devi_bind_driver(dev_info_t *dip, uint_t flags) 1713 { 1714 int ret = NDI_FAILURE; 1715 int circ; 1716 dev_info_t *pdip = ddi_get_parent(dip); 1717 ASSERT(pdip); 1718 1719 NDI_CONFIG_DEBUG((CE_CONT, 1720 "ndi_devi_bind_driver: %s%d (%p) flags: %x\n", 1721 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 1722 1723 ndi_devi_enter(pdip, &circ); 1724 if (i_ndi_config_node(dip, DS_BOUND, flags) == DDI_SUCCESS) 1725 ret = NDI_SUCCESS; 1726 ndi_devi_exit(pdip, circ); 1727 1728 return (ret); 1729 } 1730 1731 /* 1732 * ndi_devi_unbind_driver: unbind the dip 1733 */ 1734 static int 1735 ndi_devi_unbind_driver(dev_info_t *dip) 1736 { 1737 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1738 1739 return (i_ndi_unconfig_node(dip, DS_LINKED, 0)); 1740 } 1741 1742 /* 1743 * Misc. help routines called by framework only 1744 */ 1745 1746 /* 1747 * Get the state of node 1748 */ 1749 ddi_node_state_t 1750 i_ddi_node_state(dev_info_t *dip) 1751 { 1752 return (DEVI(dip)->devi_node_state); 1753 } 1754 1755 /* 1756 * Set the state of node 1757 */ 1758 void 1759 i_ddi_set_node_state(dev_info_t *dip, ddi_node_state_t state) 1760 { 1761 DEVI(dip)->devi_node_state = state; 1762 membar_enter(); /* make sure stores are flushed */ 1763 } 1764 1765 /* 1766 * Common function for finding a node in a sibling list given name and addr. 1767 * 1768 * By default, name is matched with devi_node_name. The following 1769 * alternative match strategies are supported: 1770 * 1771 * FIND_NAME_BY_DRIVER: A match on driver name bound to node is conducted. 1772 * This support is used for support of OBP generic names and 1773 * for the conversion from driver names to generic names. When 1774 * more consistency in the generic name environment is achieved 1775 * (and not needed for upgrade) this support can be removed. 1776 * 1777 * If a child is not named (dev_addr == NULL), there are three 1778 * possible actions: 1779 * 1780 * (1) skip it 1781 * (2) FIND_ADDR_BY_INIT: bring child to DS_INITIALIZED state 1782 * (3) FIND_ADDR_BY_CALLBACK: use a caller-supplied callback function 1783 */ 1784 #define FIND_NAME_BY_DRIVER 0x01 1785 #define FIND_ADDR_BY_INIT 0x10 1786 #define FIND_ADDR_BY_CALLBACK 0x20 1787 1788 static dev_info_t * 1789 find_sibling(dev_info_t *head, char *cname, char *caddr, uint_t flag, 1790 int (*callback)(dev_info_t *, char *, int)) 1791 { 1792 dev_info_t *dip; 1793 char *addr, *buf; 1794 major_t major; 1795 1796 /* only one way to name a node */ 1797 ASSERT(((flag & FIND_ADDR_BY_INIT) == 0) || 1798 ((flag & FIND_ADDR_BY_CALLBACK) == 0)); 1799 1800 if (flag & FIND_NAME_BY_DRIVER) { 1801 major = ddi_name_to_major(cname); 1802 if (major == (major_t)-1) 1803 return (NULL); 1804 } 1805 1806 /* preallocate buffer of naming node by callback */ 1807 if (flag & FIND_ADDR_BY_CALLBACK) 1808 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1809 1810 /* 1811 * Walk the child list to find a match 1812 */ 1813 1814 for (dip = head; dip; dip = ddi_get_next_sibling(dip)) { 1815 if (flag & FIND_NAME_BY_DRIVER) { 1816 /* match driver major */ 1817 if (DEVI(dip)->devi_major != major) 1818 continue; 1819 } else { 1820 /* match node name */ 1821 if (strcmp(cname, DEVI(dip)->devi_node_name) != 0) 1822 continue; 1823 } 1824 1825 if ((addr = DEVI(dip)->devi_addr) == NULL) { 1826 /* name the child based on the flag */ 1827 if (flag & FIND_ADDR_BY_INIT) { 1828 if (ddi_initchild(ddi_get_parent(dip), dip) 1829 != DDI_SUCCESS) 1830 continue; 1831 addr = DEVI(dip)->devi_addr; 1832 } else if (flag & FIND_ADDR_BY_CALLBACK) { 1833 if ((callback == NULL) || (callback( 1834 dip, buf, MAXNAMELEN) != DDI_SUCCESS)) 1835 continue; 1836 addr = buf; 1837 } else { 1838 continue; /* skip */ 1839 } 1840 } 1841 1842 /* match addr */ 1843 ASSERT(addr != NULL); 1844 if (strcmp(caddr, addr) == 0) 1845 break; /* node found */ 1846 1847 } 1848 if (flag & FIND_ADDR_BY_CALLBACK) 1849 kmem_free(buf, MAXNAMELEN); 1850 return (dip); 1851 } 1852 1853 /* 1854 * Find child of pdip with name: cname@caddr 1855 * Called by init_node() to look for duplicate nodes 1856 */ 1857 static dev_info_t * 1858 find_duplicate_child(dev_info_t *pdip, dev_info_t *dip) 1859 { 1860 dev_info_t *dup; 1861 char *cname = DEVI(dip)->devi_node_name; 1862 char *caddr = DEVI(dip)->devi_addr; 1863 1864 /* search nodes before dip */ 1865 dup = find_sibling(ddi_get_child(pdip), cname, caddr, 0, NULL); 1866 if (dup != dip) 1867 return (dup); 1868 1869 /* 1870 * search nodes after dip; normally this is not needed, 1871 */ 1872 return (find_sibling(ddi_get_next_sibling(dip), cname, caddr, 1873 0, NULL)); 1874 } 1875 1876 /* 1877 * Find a child of a given name and address, using a callback to name 1878 * unnamed children. cname is the binding name. 1879 */ 1880 static dev_info_t * 1881 find_child_by_callback(dev_info_t *pdip, char *cname, char *caddr, 1882 int (*name_node)(dev_info_t *, char *, int)) 1883 { 1884 return (find_sibling(ddi_get_child(pdip), cname, caddr, 1885 FIND_NAME_BY_DRIVER|FIND_ADDR_BY_CALLBACK, name_node)); 1886 } 1887 1888 /* 1889 * Find a child of a given name and address, invoking initchild to name 1890 * unnamed children. cname is the node name. 1891 */ 1892 static dev_info_t * 1893 find_child_by_name(dev_info_t *pdip, char *cname, char *caddr) 1894 { 1895 dev_info_t *dip; 1896 1897 /* attempt search without changing state of preceeding siblings */ 1898 dip = find_sibling(ddi_get_child(pdip), cname, caddr, 0, NULL); 1899 if (dip) 1900 return (dip); 1901 1902 return (find_sibling(ddi_get_child(pdip), cname, caddr, 1903 FIND_ADDR_BY_INIT, NULL)); 1904 } 1905 1906 /* 1907 * Find a child of a given name and address, invoking initchild to name 1908 * unnamed children. cname is the node name. 1909 */ 1910 static dev_info_t * 1911 find_child_by_driver(dev_info_t *pdip, char *cname, char *caddr) 1912 { 1913 dev_info_t *dip; 1914 1915 /* attempt search without changing state of preceeding siblings */ 1916 dip = find_sibling(ddi_get_child(pdip), cname, caddr, 1917 FIND_NAME_BY_DRIVER, NULL); 1918 if (dip) 1919 return (dip); 1920 1921 return (find_sibling(ddi_get_child(pdip), cname, caddr, 1922 FIND_NAME_BY_DRIVER|FIND_ADDR_BY_INIT, NULL)); 1923 } 1924 1925 /* 1926 * Deleting a property list. Take care, since some property structures 1927 * may not be fully built. 1928 */ 1929 void 1930 i_ddi_prop_list_delete(ddi_prop_t *prop) 1931 { 1932 while (prop) { 1933 ddi_prop_t *next = prop->prop_next; 1934 if (prop->prop_name) 1935 kmem_free(prop->prop_name, strlen(prop->prop_name) + 1); 1936 if ((prop->prop_len != 0) && prop->prop_val) 1937 kmem_free(prop->prop_val, prop->prop_len); 1938 kmem_free(prop, sizeof (struct ddi_prop)); 1939 prop = next; 1940 } 1941 } 1942 1943 /* 1944 * Duplicate property list 1945 */ 1946 ddi_prop_t * 1947 i_ddi_prop_list_dup(ddi_prop_t *prop, uint_t flag) 1948 { 1949 ddi_prop_t *result, *prev, *copy; 1950 1951 if (prop == NULL) 1952 return (NULL); 1953 1954 result = prev = NULL; 1955 for (; prop != NULL; prop = prop->prop_next) { 1956 ASSERT(prop->prop_name != NULL); 1957 copy = kmem_zalloc(sizeof (struct ddi_prop), flag); 1958 if (copy == NULL) 1959 goto fail; 1960 1961 copy->prop_dev = prop->prop_dev; 1962 copy->prop_flags = prop->prop_flags; 1963 copy->prop_name = i_ddi_strdup(prop->prop_name, flag); 1964 if (copy->prop_name == NULL) 1965 goto fail; 1966 1967 if ((copy->prop_len = prop->prop_len) != 0) { 1968 copy->prop_val = kmem_zalloc(prop->prop_len, flag); 1969 if (copy->prop_val == NULL) 1970 goto fail; 1971 1972 bcopy(prop->prop_val, copy->prop_val, prop->prop_len); 1973 } 1974 1975 if (prev == NULL) 1976 result = prev = copy; 1977 else 1978 prev->prop_next = copy; 1979 prev = copy; 1980 } 1981 return (result); 1982 1983 fail: 1984 i_ddi_prop_list_delete(result); 1985 return (NULL); 1986 } 1987 1988 /* 1989 * Create a reference property list, currently used only for 1990 * driver global properties. Created with ref count of 1. 1991 */ 1992 ddi_prop_list_t * 1993 i_ddi_prop_list_create(ddi_prop_t *props) 1994 { 1995 ddi_prop_list_t *list = kmem_alloc(sizeof (*list), KM_SLEEP); 1996 list->prop_list = props; 1997 list->prop_ref = 1; 1998 return (list); 1999 } 2000 2001 /* 2002 * Increment/decrement reference count. The reference is 2003 * protected by dn_lock. The only interfaces modifying 2004 * dn_global_prop_ptr is in impl_make[free]_parlist(). 2005 */ 2006 void 2007 i_ddi_prop_list_hold(ddi_prop_list_t *prop_list, struct devnames *dnp) 2008 { 2009 ASSERT(prop_list->prop_ref >= 0); 2010 ASSERT(mutex_owned(&dnp->dn_lock)); 2011 prop_list->prop_ref++; 2012 } 2013 2014 void 2015 i_ddi_prop_list_rele(ddi_prop_list_t *prop_list, struct devnames *dnp) 2016 { 2017 ASSERT(prop_list->prop_ref > 0); 2018 ASSERT(mutex_owned(&dnp->dn_lock)); 2019 prop_list->prop_ref--; 2020 2021 if (prop_list->prop_ref == 0) { 2022 i_ddi_prop_list_delete(prop_list->prop_list); 2023 kmem_free(prop_list, sizeof (*prop_list)); 2024 } 2025 } 2026 2027 /* 2028 * Free table of classes by drivers 2029 */ 2030 void 2031 i_ddi_free_exported_classes(char **classes, int n) 2032 { 2033 if ((n == 0) || (classes == NULL)) 2034 return; 2035 2036 kmem_free(classes, n * sizeof (char *)); 2037 } 2038 2039 /* 2040 * Get all classes exported by dip 2041 */ 2042 int 2043 i_ddi_get_exported_classes(dev_info_t *dip, char ***classes) 2044 { 2045 extern void lock_hw_class_list(); 2046 extern void unlock_hw_class_list(); 2047 extern int get_class(const char *, char **); 2048 2049 static char *rootclass = "root"; 2050 int n = 0, nclass = 0; 2051 char **buf; 2052 2053 ASSERT(i_ddi_node_state(dip) >= DS_BOUND); 2054 2055 if (dip == ddi_root_node()) /* rootnode exports class "root" */ 2056 nclass = 1; 2057 lock_hw_class_list(); 2058 nclass += get_class(ddi_driver_name(dip), NULL); 2059 if (nclass == 0) { 2060 unlock_hw_class_list(); 2061 return (0); /* no class exported */ 2062 } 2063 2064 *classes = buf = kmem_alloc(nclass * sizeof (char *), KM_SLEEP); 2065 if (dip == ddi_root_node()) { 2066 *buf++ = rootclass; 2067 n = 1; 2068 } 2069 n += get_class(ddi_driver_name(dip), buf); 2070 unlock_hw_class_list(); 2071 2072 ASSERT(n == nclass); /* make sure buf wasn't overrun */ 2073 return (nclass); 2074 } 2075 2076 /* 2077 * Helper functions, returns NULL if no memory. 2078 */ 2079 char * 2080 i_ddi_strdup(char *str, uint_t flag) 2081 { 2082 char *copy; 2083 2084 if (str == NULL) 2085 return (NULL); 2086 2087 copy = kmem_alloc(strlen(str) + 1, flag); 2088 if (copy == NULL) 2089 return (NULL); 2090 2091 (void) strcpy(copy, str); 2092 return (copy); 2093 } 2094 2095 /* 2096 * Load driver.conf file for major. Load all if major == -1. 2097 * 2098 * This is called 2099 * - early in boot after devnames array is initialized 2100 * - from vfs code when certain file systems are mounted 2101 * - from add_drv when a new driver is added 2102 */ 2103 int 2104 i_ddi_load_drvconf(major_t major) 2105 { 2106 extern int modrootloaded; 2107 2108 major_t low, high, m; 2109 2110 if (major == (major_t)-1) { 2111 low = 0; 2112 high = devcnt - 1; 2113 } else { 2114 if (major >= devcnt) 2115 return (EINVAL); 2116 low = high = major; 2117 } 2118 2119 for (m = low; m <= high; m++) { 2120 struct devnames *dnp = &devnamesp[m]; 2121 LOCK_DEV_OPS(&dnp->dn_lock); 2122 dnp->dn_flags &= ~DN_DRIVER_HELD; 2123 (void) impl_make_parlist(m); 2124 UNLOCK_DEV_OPS(&dnp->dn_lock); 2125 } 2126 2127 if (modrootloaded) { 2128 ddi_walk_devs(ddi_root_node(), reset_nexus_flags, 2129 (void *)(uintptr_t)major); 2130 } 2131 2132 /* build dn_list from old entries in path_to_inst */ 2133 e_ddi_unorphan_instance_nos(); 2134 return (0); 2135 } 2136 2137 /* 2138 * Unload a specific driver.conf. 2139 * Don't support unload all because it doesn't make any sense 2140 */ 2141 int 2142 i_ddi_unload_drvconf(major_t major) 2143 { 2144 int error; 2145 struct devnames *dnp; 2146 2147 if (major >= devcnt) 2148 return (EINVAL); 2149 2150 /* 2151 * Take the per-driver lock while unloading driver.conf 2152 */ 2153 dnp = &devnamesp[major]; 2154 LOCK_DEV_OPS(&dnp->dn_lock); 2155 error = impl_free_parlist(major); 2156 UNLOCK_DEV_OPS(&dnp->dn_lock); 2157 return (error); 2158 } 2159 2160 /* 2161 * Merge a .conf node. This is called by nexus drivers to augment 2162 * hw node with properties specified in driver.conf file. This function 2163 * takes a callback routine to name nexus children. 2164 * The parent node must be held busy. 2165 * 2166 * It returns DDI_SUCCESS if the node is merged and DDI_FAILURE otherwise. 2167 */ 2168 int 2169 ndi_merge_node(dev_info_t *dip, int (*name_node)(dev_info_t *, char *, int)) 2170 { 2171 dev_info_t *hwdip; 2172 2173 ASSERT(ndi_dev_is_persistent_node(dip) == 0); 2174 ASSERT(ddi_get_name_addr(dip) != NULL); 2175 2176 hwdip = find_child_by_callback(ddi_get_parent(dip), 2177 ddi_binding_name(dip), ddi_get_name_addr(dip), name_node); 2178 2179 /* 2180 * Look for the hardware node that is the target of the merge; 2181 * return failure if not found. 2182 */ 2183 if ((hwdip == NULL) || (hwdip == dip)) { 2184 char *buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2185 NDI_CONFIG_DEBUG((CE_WARN, "No HW node to merge conf node %s", 2186 ddi_deviname(dip, buf))); 2187 kmem_free(buf, MAXNAMELEN); 2188 return (DDI_FAILURE); 2189 } 2190 2191 /* 2192 * Make sure the hardware node is uninitialized and has no property. 2193 * This may not be the case if new .conf files are load after some 2194 * hardware nodes have already been initialized and attached. 2195 * 2196 * N.B. We return success here because the node was *intended* 2197 * to be a merge node because there is a hw node with the name. 2198 */ 2199 mutex_enter(&DEVI(hwdip)->devi_lock); 2200 if (ndi_dev_is_persistent_node(hwdip) == 0) { 2201 char *buf; 2202 mutex_exit(&DEVI(hwdip)->devi_lock); 2203 2204 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2205 NDI_CONFIG_DEBUG((CE_NOTE, "Duplicate .conf node %s", 2206 ddi_deviname(dip, buf))); 2207 kmem_free(buf, MAXNAMELEN); 2208 return (DDI_SUCCESS); 2209 } 2210 2211 /* 2212 * If it is possible that the hardware has already been touched 2213 * then don't merge. 2214 */ 2215 if (i_ddi_node_state(hwdip) >= DS_INITIALIZED || 2216 (DEVI(hwdip)->devi_sys_prop_ptr != NULL) || 2217 (DEVI(hwdip)->devi_drv_prop_ptr != NULL)) { 2218 char *buf; 2219 mutex_exit(&DEVI(hwdip)->devi_lock); 2220 2221 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2222 NDI_CONFIG_DEBUG((CE_NOTE, 2223 "!Cannot merge .conf node %s with hw node %p " 2224 "-- not in proper state", 2225 ddi_deviname(dip, buf), (void *)hwdip)); 2226 kmem_free(buf, MAXNAMELEN); 2227 return (DDI_SUCCESS); 2228 } 2229 2230 mutex_enter(&DEVI(dip)->devi_lock); 2231 DEVI(hwdip)->devi_sys_prop_ptr = DEVI(dip)->devi_sys_prop_ptr; 2232 DEVI(hwdip)->devi_drv_prop_ptr = DEVI(dip)->devi_drv_prop_ptr; 2233 DEVI(dip)->devi_sys_prop_ptr = NULL; 2234 DEVI(dip)->devi_drv_prop_ptr = NULL; 2235 mutex_exit(&DEVI(dip)->devi_lock); 2236 mutex_exit(&DEVI(hwdip)->devi_lock); 2237 2238 return (DDI_SUCCESS); 2239 } 2240 2241 /* 2242 * Merge a "wildcard" .conf node. This is called by nexus drivers to 2243 * augment a set of hw node with properties specified in driver.conf file. 2244 * The parent node must be held busy. 2245 * 2246 * There is no failure mode, since the nexus may or may not have child 2247 * node bound the driver specified by the wildcard node. 2248 */ 2249 void 2250 ndi_merge_wildcard_node(dev_info_t *dip) 2251 { 2252 dev_info_t *hwdip; 2253 dev_info_t *pdip = ddi_get_parent(dip); 2254 major_t major = ddi_driver_major(dip); 2255 2256 /* never attempt to merge a hw node */ 2257 ASSERT(ndi_dev_is_persistent_node(dip) == 0); 2258 /* must be bound to a driver major number */ 2259 ASSERT(major != (major_t)-1); 2260 2261 /* 2262 * Walk the child list to find all nodes bound to major 2263 * and copy properties. 2264 */ 2265 mutex_enter(&DEVI(dip)->devi_lock); 2266 for (hwdip = ddi_get_child(pdip); hwdip; 2267 hwdip = ddi_get_next_sibling(hwdip)) { 2268 /* 2269 * Skip nodes not bound to same driver 2270 */ 2271 if (ddi_driver_major(hwdip) != major) 2272 continue; 2273 2274 /* 2275 * Skip .conf nodes 2276 */ 2277 if (ndi_dev_is_persistent_node(hwdip) == 0) 2278 continue; 2279 2280 /* 2281 * Make sure the node is uninitialized and has no property. 2282 */ 2283 mutex_enter(&DEVI(hwdip)->devi_lock); 2284 if (i_ddi_node_state(hwdip) >= DS_INITIALIZED || 2285 (DEVI(hwdip)->devi_sys_prop_ptr != NULL) || 2286 (DEVI(hwdip)->devi_drv_prop_ptr != NULL)) { 2287 mutex_exit(&DEVI(hwdip)->devi_lock); 2288 NDI_CONFIG_DEBUG((CE_NOTE, "HW node %p state not " 2289 "suitable for merging wildcard conf node %s", 2290 (void *)hwdip, ddi_node_name(dip))); 2291 continue; 2292 } 2293 2294 DEVI(hwdip)->devi_sys_prop_ptr = 2295 i_ddi_prop_list_dup(DEVI(dip)->devi_sys_prop_ptr, KM_SLEEP); 2296 DEVI(hwdip)->devi_drv_prop_ptr = 2297 i_ddi_prop_list_dup(DEVI(dip)->devi_drv_prop_ptr, KM_SLEEP); 2298 mutex_exit(&DEVI(hwdip)->devi_lock); 2299 } 2300 mutex_exit(&DEVI(dip)->devi_lock); 2301 } 2302 2303 /* 2304 * Return the major number based on the compatible property. This interface 2305 * may be used in situations where we are trying to detect if a better driver 2306 * now exists for a device, so it must use the 'compatible' property. If 2307 * a non-NULL formp is specified and the binding was based on compatible then 2308 * return the pointer to the form used in *formp. 2309 */ 2310 major_t 2311 ddi_compatible_driver_major(dev_info_t *dip, char **formp) 2312 { 2313 struct dev_info *devi = DEVI(dip); 2314 void *compat; 2315 size_t len; 2316 char *p = NULL; 2317 major_t major = (major_t)-1; 2318 2319 if (formp) 2320 *formp = NULL; 2321 2322 /* look up compatible property */ 2323 (void) lookup_compatible(dip, KM_SLEEP); 2324 compat = (void *)(devi->devi_compat_names); 2325 len = devi->devi_compat_length; 2326 2327 /* find the highest precedence compatible form with a driver binding */ 2328 while ((p = prom_decode_composite_string(compat, len, p)) != NULL) { 2329 major = ddi_name_to_major(p); 2330 if ((major != (major_t)-1) && 2331 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED)) { 2332 if (formp) 2333 *formp = p; 2334 return (major); 2335 } 2336 } 2337 2338 /* 2339 * none of the compatible forms have a driver binding, see if 2340 * the node name has a driver binding. 2341 */ 2342 major = ddi_name_to_major(ddi_node_name(dip)); 2343 if ((major != (major_t)-1) && 2344 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED)) 2345 return (major); 2346 2347 /* no driver */ 2348 return ((major_t)-1); 2349 } 2350 2351 /* 2352 * Static help functions 2353 */ 2354 2355 /* 2356 * lookup the "compatible" property and cache it's contents in the 2357 * device node. 2358 */ 2359 static int 2360 lookup_compatible(dev_info_t *dip, uint_t flag) 2361 { 2362 int rv; 2363 int prop_flags; 2364 uint_t ncompatstrs; 2365 char **compatstrpp; 2366 char *di_compat_strp; 2367 size_t di_compat_strlen; 2368 2369 if (DEVI(dip)->devi_compat_names) { 2370 return (DDI_SUCCESS); 2371 } 2372 2373 prop_flags = DDI_PROP_TYPE_STRING | DDI_PROP_DONTPASS; 2374 2375 if (flag & KM_NOSLEEP) { 2376 prop_flags |= DDI_PROP_DONTSLEEP; 2377 } 2378 2379 if (ndi_dev_is_prom_node(dip) == 0) { 2380 prop_flags |= DDI_PROP_NOTPROM; 2381 } 2382 2383 rv = ddi_prop_lookup_common(DDI_DEV_T_ANY, dip, prop_flags, 2384 "compatible", &compatstrpp, &ncompatstrs, 2385 ddi_prop_fm_decode_strings); 2386 2387 if (rv == DDI_PROP_NOT_FOUND) { 2388 return (DDI_SUCCESS); 2389 } 2390 2391 if (rv != DDI_PROP_SUCCESS) { 2392 return (DDI_FAILURE); 2393 } 2394 2395 /* 2396 * encode the compatible property data in the dev_info node 2397 */ 2398 rv = DDI_SUCCESS; 2399 if (ncompatstrs != 0) { 2400 di_compat_strp = encode_composite_string(compatstrpp, 2401 ncompatstrs, &di_compat_strlen, flag); 2402 if (di_compat_strp != NULL) { 2403 DEVI(dip)->devi_compat_names = di_compat_strp; 2404 DEVI(dip)->devi_compat_length = di_compat_strlen; 2405 } else { 2406 rv = DDI_FAILURE; 2407 } 2408 } 2409 ddi_prop_free(compatstrpp); 2410 return (rv); 2411 } 2412 2413 /* 2414 * Create a composite string from a list of strings. 2415 * 2416 * A composite string consists of a single buffer containing one 2417 * or more NULL terminated strings. 2418 */ 2419 static char * 2420 encode_composite_string(char **strings, uint_t nstrings, size_t *retsz, 2421 uint_t flag) 2422 { 2423 uint_t index; 2424 char **strpp; 2425 uint_t slen; 2426 size_t cbuf_sz = 0; 2427 char *cbuf_p; 2428 char *cbuf_ip; 2429 2430 if (strings == NULL || nstrings == 0 || retsz == NULL) { 2431 return (NULL); 2432 } 2433 2434 for (index = 0, strpp = strings; index < nstrings; index++) 2435 cbuf_sz += strlen(*(strpp++)) + 1; 2436 2437 if ((cbuf_p = kmem_alloc(cbuf_sz, flag)) == NULL) { 2438 cmn_err(CE_NOTE, 2439 "?failed to allocate device node compatstr"); 2440 return (NULL); 2441 } 2442 2443 cbuf_ip = cbuf_p; 2444 for (index = 0, strpp = strings; index < nstrings; index++) { 2445 slen = strlen(*strpp); 2446 bcopy(*(strpp++), cbuf_ip, slen); 2447 cbuf_ip += slen; 2448 *(cbuf_ip++) = '\0'; 2449 } 2450 2451 *retsz = cbuf_sz; 2452 return (cbuf_p); 2453 } 2454 2455 static void 2456 link_to_driver_list(dev_info_t *dip) 2457 { 2458 major_t major = DEVI(dip)->devi_major; 2459 struct devnames *dnp; 2460 2461 ASSERT(major != (major_t)-1); 2462 2463 /* 2464 * Remove from orphan list 2465 */ 2466 if (ndi_dev_is_persistent_node(dip)) { 2467 dnp = &orphanlist; 2468 remove_from_dn_list(dnp, dip); 2469 } 2470 2471 /* 2472 * Add to per driver list 2473 */ 2474 dnp = &devnamesp[major]; 2475 add_to_dn_list(dnp, dip); 2476 } 2477 2478 static void 2479 unlink_from_driver_list(dev_info_t *dip) 2480 { 2481 major_t major = DEVI(dip)->devi_major; 2482 struct devnames *dnp; 2483 2484 ASSERT(major != (major_t)-1); 2485 2486 /* 2487 * Remove from per-driver list 2488 */ 2489 dnp = &devnamesp[major]; 2490 remove_from_dn_list(dnp, dip); 2491 2492 /* 2493 * Add to orphan list 2494 */ 2495 if (ndi_dev_is_persistent_node(dip)) { 2496 dnp = &orphanlist; 2497 add_to_dn_list(dnp, dip); 2498 } 2499 } 2500 2501 /* 2502 * scan the per-driver list looking for dev_info "dip" 2503 */ 2504 static dev_info_t * 2505 in_dn_list(struct devnames *dnp, dev_info_t *dip) 2506 { 2507 struct dev_info *idevi; 2508 2509 if ((idevi = DEVI(dnp->dn_head)) == NULL) 2510 return (NULL); 2511 2512 while (idevi) { 2513 if (idevi == DEVI(dip)) 2514 return (dip); 2515 idevi = idevi->devi_next; 2516 } 2517 return (NULL); 2518 } 2519 2520 /* 2521 * insert devinfo node 'dip' into the per-driver instance list 2522 * headed by 'dnp' 2523 * 2524 * Nodes on the per-driver list are ordered: HW - SID - PSEUDO. The order is 2525 * required for merging of .conf file data to work properly. 2526 */ 2527 static void 2528 add_to_ordered_dn_list(struct devnames *dnp, dev_info_t *dip) 2529 { 2530 dev_info_t **dipp; 2531 2532 ASSERT(mutex_owned(&(dnp->dn_lock))); 2533 2534 dipp = &dnp->dn_head; 2535 if (ndi_dev_is_prom_node(dip)) { 2536 /* 2537 * Find the first non-prom node or end of list 2538 */ 2539 while (*dipp && (ndi_dev_is_prom_node(*dipp) != 0)) { 2540 dipp = (dev_info_t **)&DEVI(*dipp)->devi_next; 2541 } 2542 } else if (ndi_dev_is_persistent_node(dip)) { 2543 /* 2544 * Find the first non-persistent node 2545 */ 2546 while (*dipp && (ndi_dev_is_persistent_node(*dipp) != 0)) { 2547 dipp = (dev_info_t **)&DEVI(*dipp)->devi_next; 2548 } 2549 } else { 2550 /* 2551 * Find the end of the list 2552 */ 2553 while (*dipp) { 2554 dipp = (dev_info_t **)&DEVI(*dipp)->devi_next; 2555 } 2556 } 2557 2558 DEVI(dip)->devi_next = DEVI(*dipp); 2559 *dipp = dip; 2560 } 2561 2562 /* 2563 * add a list of device nodes to the device node list in the 2564 * devnames structure 2565 */ 2566 static void 2567 add_to_dn_list(struct devnames *dnp, dev_info_t *dip) 2568 { 2569 /* 2570 * Look to see if node already exists 2571 */ 2572 LOCK_DEV_OPS(&(dnp->dn_lock)); 2573 if (in_dn_list(dnp, dip)) { 2574 cmn_err(CE_NOTE, "add_to_dn_list: node %s already in list", 2575 DEVI(dip)->devi_node_name); 2576 } else { 2577 add_to_ordered_dn_list(dnp, dip); 2578 } 2579 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 2580 } 2581 2582 static void 2583 remove_from_dn_list(struct devnames *dnp, dev_info_t *dip) 2584 { 2585 dev_info_t **plist; 2586 2587 LOCK_DEV_OPS(&(dnp->dn_lock)); 2588 2589 plist = (dev_info_t **)&dnp->dn_head; 2590 while (*plist && (*plist != dip)) { 2591 plist = (dev_info_t **)&DEVI(*plist)->devi_next; 2592 } 2593 2594 if (*plist != NULL) { 2595 ASSERT(*plist == dip); 2596 *plist = (dev_info_t *)(DEVI(dip)->devi_next); 2597 DEVI(dip)->devi_next = NULL; 2598 } else { 2599 NDI_CONFIG_DEBUG((CE_NOTE, 2600 "remove_from_dn_list: node %s not found in list", 2601 DEVI(dip)->devi_node_name)); 2602 } 2603 2604 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 2605 } 2606 2607 /* 2608 * Add and remove reference driver global property list 2609 */ 2610 static void 2611 add_global_props(dev_info_t *dip) 2612 { 2613 struct devnames *dnp; 2614 ddi_prop_list_t *plist; 2615 2616 ASSERT(DEVI(dip)->devi_global_prop_list == NULL); 2617 ASSERT(DEVI(dip)->devi_major != (major_t)-1); 2618 2619 dnp = &devnamesp[DEVI(dip)->devi_major]; 2620 LOCK_DEV_OPS(&dnp->dn_lock); 2621 plist = dnp->dn_global_prop_ptr; 2622 if (plist == NULL) { 2623 UNLOCK_DEV_OPS(&dnp->dn_lock); 2624 return; 2625 } 2626 i_ddi_prop_list_hold(plist, dnp); 2627 UNLOCK_DEV_OPS(&dnp->dn_lock); 2628 2629 mutex_enter(&DEVI(dip)->devi_lock); 2630 DEVI(dip)->devi_global_prop_list = plist; 2631 mutex_exit(&DEVI(dip)->devi_lock); 2632 } 2633 2634 static void 2635 remove_global_props(dev_info_t *dip) 2636 { 2637 ddi_prop_list_t *proplist; 2638 2639 mutex_enter(&DEVI(dip)->devi_lock); 2640 proplist = DEVI(dip)->devi_global_prop_list; 2641 DEVI(dip)->devi_global_prop_list = NULL; 2642 mutex_exit(&DEVI(dip)->devi_lock); 2643 2644 if (proplist) { 2645 major_t major; 2646 struct devnames *dnp; 2647 2648 major = ddi_driver_major(dip); 2649 ASSERT(major != (major_t)-1); 2650 dnp = &devnamesp[major]; 2651 LOCK_DEV_OPS(&dnp->dn_lock); 2652 i_ddi_prop_list_rele(proplist, dnp); 2653 UNLOCK_DEV_OPS(&dnp->dn_lock); 2654 } 2655 } 2656 2657 #ifdef DEBUG 2658 /* 2659 * Set this variable to '0' to disable the optimization, 2660 * and to 2 to print debug message. 2661 */ 2662 static int optimize_dtree = 1; 2663 2664 static void 2665 debug_dtree(dev_info_t *devi, struct dev_info *adevi, char *service) 2666 { 2667 char *adeviname, *buf; 2668 2669 /* 2670 * Don't print unless optimize dtree is set to 2+ 2671 */ 2672 if (optimize_dtree <= 1) 2673 return; 2674 2675 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2676 adeviname = ddi_deviname((dev_info_t *)adevi, buf); 2677 if (*adeviname == '\0') 2678 adeviname = "root"; 2679 2680 cmn_err(CE_CONT, "%s %s -> %s\n", 2681 ddi_deviname(devi, buf), service, adeviname); 2682 2683 kmem_free(buf, MAXNAMELEN); 2684 } 2685 #else /* DEBUG */ 2686 #define debug_dtree(a1, a2, a3) /* nothing */ 2687 #endif /* DEBUG */ 2688 2689 static void 2690 ddi_optimize_dtree(dev_info_t *devi) 2691 { 2692 struct dev_info *pdevi; 2693 struct bus_ops *b; 2694 2695 pdevi = DEVI(devi)->devi_parent; 2696 ASSERT(pdevi); 2697 2698 /* 2699 * Set the unoptimized values 2700 */ 2701 DEVI(devi)->devi_bus_map_fault = pdevi; 2702 DEVI(devi)->devi_bus_dma_map = pdevi; 2703 DEVI(devi)->devi_bus_dma_allochdl = pdevi; 2704 DEVI(devi)->devi_bus_dma_freehdl = pdevi; 2705 DEVI(devi)->devi_bus_dma_bindhdl = pdevi; 2706 DEVI(devi)->devi_bus_dma_bindfunc = 2707 pdevi->devi_ops->devo_bus_ops->bus_dma_bindhdl; 2708 DEVI(devi)->devi_bus_dma_unbindhdl = pdevi; 2709 DEVI(devi)->devi_bus_dma_unbindfunc = 2710 pdevi->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 2711 DEVI(devi)->devi_bus_dma_flush = pdevi; 2712 DEVI(devi)->devi_bus_dma_win = pdevi; 2713 DEVI(devi)->devi_bus_dma_ctl = pdevi; 2714 DEVI(devi)->devi_bus_ctl = pdevi; 2715 2716 #ifdef DEBUG 2717 if (optimize_dtree == 0) 2718 return; 2719 #endif /* DEBUG */ 2720 2721 b = pdevi->devi_ops->devo_bus_ops; 2722 2723 if (i_ddi_map_fault == b->bus_map_fault) { 2724 DEVI(devi)->devi_bus_map_fault = pdevi->devi_bus_map_fault; 2725 debug_dtree(devi, DEVI(devi)->devi_bus_map_fault, 2726 "bus_map_fault"); 2727 } 2728 2729 if (ddi_dma_map == b->bus_dma_map) { 2730 DEVI(devi)->devi_bus_dma_map = pdevi->devi_bus_dma_map; 2731 debug_dtree(devi, DEVI(devi)->devi_bus_dma_map, "bus_dma_map"); 2732 } 2733 2734 if (ddi_dma_allochdl == b->bus_dma_allochdl) { 2735 DEVI(devi)->devi_bus_dma_allochdl = 2736 pdevi->devi_bus_dma_allochdl; 2737 debug_dtree(devi, DEVI(devi)->devi_bus_dma_allochdl, 2738 "bus_dma_allochdl"); 2739 } 2740 2741 if (ddi_dma_freehdl == b->bus_dma_freehdl) { 2742 DEVI(devi)->devi_bus_dma_freehdl = pdevi->devi_bus_dma_freehdl; 2743 debug_dtree(devi, DEVI(devi)->devi_bus_dma_freehdl, 2744 "bus_dma_freehdl"); 2745 } 2746 2747 if (ddi_dma_bindhdl == b->bus_dma_bindhdl) { 2748 DEVI(devi)->devi_bus_dma_bindhdl = pdevi->devi_bus_dma_bindhdl; 2749 DEVI(devi)->devi_bus_dma_bindfunc = 2750 pdevi->devi_bus_dma_bindhdl->devi_ops-> 2751 devo_bus_ops->bus_dma_bindhdl; 2752 debug_dtree(devi, DEVI(devi)->devi_bus_dma_bindhdl, 2753 "bus_dma_bindhdl"); 2754 } 2755 2756 if (ddi_dma_unbindhdl == b->bus_dma_unbindhdl) { 2757 DEVI(devi)->devi_bus_dma_unbindhdl = 2758 pdevi->devi_bus_dma_unbindhdl; 2759 DEVI(devi)->devi_bus_dma_unbindfunc = 2760 pdevi->devi_bus_dma_unbindhdl->devi_ops-> 2761 devo_bus_ops->bus_dma_unbindhdl; 2762 debug_dtree(devi, DEVI(devi)->devi_bus_dma_unbindhdl, 2763 "bus_dma_unbindhdl"); 2764 } 2765 2766 if (ddi_dma_flush == b->bus_dma_flush) { 2767 DEVI(devi)->devi_bus_dma_flush = pdevi->devi_bus_dma_flush; 2768 debug_dtree(devi, DEVI(devi)->devi_bus_dma_flush, 2769 "bus_dma_flush"); 2770 } 2771 2772 if (ddi_dma_win == b->bus_dma_win) { 2773 DEVI(devi)->devi_bus_dma_win = pdevi->devi_bus_dma_win; 2774 debug_dtree(devi, DEVI(devi)->devi_bus_dma_win, 2775 "bus_dma_win"); 2776 } 2777 2778 if (ddi_dma_mctl == b->bus_dma_ctl) { 2779 DEVI(devi)->devi_bus_dma_ctl = pdevi->devi_bus_dma_ctl; 2780 debug_dtree(devi, DEVI(devi)->devi_bus_dma_ctl, "bus_dma_ctl"); 2781 } 2782 2783 if (ddi_ctlops == b->bus_ctl) { 2784 DEVI(devi)->devi_bus_ctl = pdevi->devi_bus_ctl; 2785 debug_dtree(devi, DEVI(devi)->devi_bus_ctl, "bus_ctl"); 2786 } 2787 } 2788 2789 #define MIN_DEVINFO_LOG_SIZE max_ncpus 2790 #define MAX_DEVINFO_LOG_SIZE max_ncpus * 10 2791 2792 static void 2793 da_log_init() 2794 { 2795 devinfo_log_header_t *dh; 2796 int logsize = devinfo_log_size; 2797 2798 if (logsize == 0) 2799 logsize = MIN_DEVINFO_LOG_SIZE; 2800 else if (logsize > MAX_DEVINFO_LOG_SIZE) 2801 logsize = MAX_DEVINFO_LOG_SIZE; 2802 2803 dh = kmem_alloc(logsize * PAGESIZE, KM_SLEEP); 2804 mutex_init(&dh->dh_lock, NULL, MUTEX_DEFAULT, NULL); 2805 dh->dh_max = ((logsize * PAGESIZE) - sizeof (*dh)) / 2806 sizeof (devinfo_audit_t) + 1; 2807 dh->dh_curr = -1; 2808 dh->dh_hits = 0; 2809 2810 devinfo_audit_log = dh; 2811 } 2812 2813 /* 2814 * Log the stack trace in per-devinfo audit structure and also enter 2815 * it into a system wide log for recording the time history. 2816 */ 2817 static void 2818 da_log_enter(dev_info_t *dip) 2819 { 2820 devinfo_audit_t *da_log, *da = DEVI(dip)->devi_audit; 2821 devinfo_log_header_t *dh = devinfo_audit_log; 2822 2823 if (devinfo_audit_log == NULL) 2824 return; 2825 2826 ASSERT(da != NULL); 2827 2828 da->da_devinfo = dip; 2829 da->da_timestamp = gethrtime(); 2830 da->da_thread = curthread; 2831 da->da_node_state = DEVI(dip)->devi_node_state; 2832 da->da_device_state = DEVI(dip)->devi_state; 2833 da->da_depth = getpcstack(da->da_stack, DDI_STACK_DEPTH); 2834 2835 /* 2836 * Copy into common log and note the location for tracing history 2837 */ 2838 mutex_enter(&dh->dh_lock); 2839 dh->dh_hits++; 2840 dh->dh_curr++; 2841 if (dh->dh_curr >= dh->dh_max) 2842 dh->dh_curr -= dh->dh_max; 2843 da_log = &dh->dh_entry[dh->dh_curr]; 2844 mutex_exit(&dh->dh_lock); 2845 2846 bcopy(da, da_log, sizeof (devinfo_audit_t)); 2847 da->da_lastlog = da_log; 2848 } 2849 2850 static void 2851 attach_drivers() 2852 { 2853 int i; 2854 for (i = 0; i < devcnt; i++) { 2855 struct devnames *dnp = &devnamesp[i]; 2856 if ((dnp->dn_flags & DN_FORCE_ATTACH) && 2857 (ddi_hold_installed_driver((major_t)i) != NULL)) 2858 ddi_rele_driver((major_t)i); 2859 } 2860 } 2861 2862 /* 2863 * Launch a thread to force attach drivers. This avoids penalty on boot time. 2864 */ 2865 void 2866 i_ddi_forceattach_drivers() 2867 { 2868 /* 2869 * On i386, the USB drivers need to load and take over from the 2870 * SMM BIOS drivers ASAP after consconfig(), so make sure they 2871 * get loaded right here rather than letting the thread do it. 2872 * 2873 * The order here is important. EHCI must be loaded first, as 2874 * we have observed many systems on which hangs occur if the 2875 * {U,O}HCI companion controllers take over control from the BIOS 2876 * before EHCI does. These hangs are also caused by BIOSes leaving 2877 * interrupt-on-port-change enabled in the ehci controller, so that 2878 * when uhci/ohci reset themselves, it induces a port change on 2879 * the ehci companion controller. Since there's no interrupt handler 2880 * installed at the time, the moment that interrupt is unmasked, an 2881 * interrupt storm will occur. All this is averted when ehci is 2882 * loaded first. And now you know..... the REST of the story. 2883 * 2884 * Regardless of platform, ehci needs to initialize first to avoid 2885 * unnecessary connects and disconnects on the companion controller 2886 * when ehci sets up the routing. 2887 */ 2888 (void) ddi_hold_installed_driver(ddi_name_to_major("ehci")); 2889 (void) ddi_hold_installed_driver(ddi_name_to_major("uhci")); 2890 (void) ddi_hold_installed_driver(ddi_name_to_major("ohci")); 2891 2892 (void) thread_create(NULL, 0, (void (*)())attach_drivers, NULL, 0, &p0, 2893 TS_RUN, minclsyspri); 2894 } 2895 2896 /* 2897 * This is a private DDI interface for optimizing boot performance. 2898 * I/O subsystem initialization is considered complete when devfsadm 2899 * is executed. 2900 * 2901 * NOTE: The start of syseventd in S60devfsadm happen to be convenient 2902 * indicator for the completion of I/O initialization during boot. 2903 * The implementation should be replaced by something more robust. 2904 */ 2905 int 2906 i_ddi_io_initialized() 2907 { 2908 extern int sysevent_daemon_init; 2909 return (sysevent_daemon_init); 2910 } 2911 2912 2913 /* 2914 * device tree walking 2915 */ 2916 2917 struct walk_elem { 2918 struct walk_elem *next; 2919 dev_info_t *dip; 2920 }; 2921 2922 static void 2923 free_list(struct walk_elem *list) 2924 { 2925 while (list) { 2926 struct walk_elem *next = list->next; 2927 kmem_free(list, sizeof (*list)); 2928 list = next; 2929 } 2930 } 2931 2932 static void 2933 append_node(struct walk_elem **list, dev_info_t *dip) 2934 { 2935 struct walk_elem *tail; 2936 struct walk_elem *elem = kmem_alloc(sizeof (*elem), KM_SLEEP); 2937 2938 elem->next = NULL; 2939 elem->dip = dip; 2940 2941 if (*list == NULL) { 2942 *list = elem; 2943 return; 2944 } 2945 2946 tail = *list; 2947 while (tail->next) 2948 tail = tail->next; 2949 2950 tail->next = elem; 2951 } 2952 2953 /* 2954 * The implementation of ddi_walk_devs(). 2955 */ 2956 static int 2957 walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg, 2958 int do_locking) 2959 { 2960 struct walk_elem *head = NULL; 2961 2962 /* 2963 * Do it in two passes. First pass invoke callback on each 2964 * dip on the sibling list. Second pass invoke callback on 2965 * children of each dip. 2966 */ 2967 while (dip) { 2968 switch ((*f)(dip, arg)) { 2969 case DDI_WALK_TERMINATE: 2970 free_list(head); 2971 return (DDI_WALK_TERMINATE); 2972 2973 case DDI_WALK_PRUNESIB: 2974 /* ignore sibling by setting dip to NULL */ 2975 append_node(&head, dip); 2976 dip = NULL; 2977 break; 2978 2979 case DDI_WALK_PRUNECHILD: 2980 /* don't worry about children */ 2981 dip = ddi_get_next_sibling(dip); 2982 break; 2983 2984 case DDI_WALK_CONTINUE: 2985 default: 2986 append_node(&head, dip); 2987 dip = ddi_get_next_sibling(dip); 2988 break; 2989 } 2990 2991 } 2992 2993 /* second pass */ 2994 while (head) { 2995 int circ; 2996 struct walk_elem *next = head->next; 2997 2998 if (do_locking) 2999 ndi_devi_enter(head->dip, &circ); 3000 if (walk_devs(ddi_get_child(head->dip), f, arg, do_locking) == 3001 DDI_WALK_TERMINATE) { 3002 if (do_locking) 3003 ndi_devi_exit(head->dip, circ); 3004 free_list(head); 3005 return (DDI_WALK_TERMINATE); 3006 } 3007 if (do_locking) 3008 ndi_devi_exit(head->dip, circ); 3009 kmem_free(head, sizeof (*head)); 3010 head = next; 3011 } 3012 3013 return (DDI_WALK_CONTINUE); 3014 } 3015 3016 /* 3017 * This general-purpose routine traverses the tree of dev_info nodes, 3018 * starting from the given node, and calls the given function for each 3019 * node that it finds with the current node and the pointer arg (which 3020 * can point to a structure of information that the function 3021 * needs) as arguments. 3022 * 3023 * It does the walk a layer at a time, not depth-first. The given function 3024 * must return one of the following values: 3025 * DDI_WALK_CONTINUE 3026 * DDI_WALK_PRUNESIB 3027 * DDI_WALK_PRUNECHILD 3028 * DDI_WALK_TERMINATE 3029 * 3030 * N.B. Since we walk the sibling list, the caller must ensure that 3031 * the parent of dip is held against changes, unless the parent 3032 * is rootnode. ndi_devi_enter() on the parent is sufficient. 3033 * 3034 * To avoid deadlock situations, caller must not attempt to 3035 * configure/unconfigure/remove device node in (*f)(), nor should 3036 * it attempt to recurse on other nodes in the system. 3037 * 3038 * This is not callable from device autoconfiguration routines. 3039 * They include, but not limited to, _init(9e), _fini(9e), probe(9e), 3040 * attach(9e), and detach(9e). 3041 */ 3042 3043 void 3044 ddi_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg) 3045 { 3046 3047 ASSERT(dip == NULL || ddi_get_parent(dip) == NULL || 3048 DEVI_BUSY_OWNED(ddi_get_parent(dip))); 3049 3050 (void) walk_devs(dip, f, arg, 1); 3051 } 3052 3053 /* 3054 * This is a general-purpose routine traverses the per-driver list 3055 * and calls the given function for each node. must return one of 3056 * the following values: 3057 * DDI_WALK_CONTINUE 3058 * DDI_WALK_TERMINATE 3059 * 3060 * N.B. The same restrictions from ddi_walk_devs() apply. 3061 */ 3062 3063 void 3064 e_ddi_walk_driver(char *drv, int (*f)(dev_info_t *, void *), void *arg) 3065 { 3066 major_t major; 3067 struct devnames *dnp; 3068 dev_info_t *dip; 3069 3070 major = ddi_name_to_major(drv); 3071 if (major == (major_t)-1) 3072 return; 3073 3074 dnp = &devnamesp[major]; 3075 LOCK_DEV_OPS(&dnp->dn_lock); 3076 dip = dnp->dn_head; 3077 while (dip) { 3078 ndi_hold_devi(dip); 3079 UNLOCK_DEV_OPS(&dnp->dn_lock); 3080 if ((*f)(dip, arg) == DDI_WALK_TERMINATE) { 3081 ndi_rele_devi(dip); 3082 return; 3083 } 3084 LOCK_DEV_OPS(&dnp->dn_lock); 3085 ndi_rele_devi(dip); 3086 dip = ddi_get_next(dip); 3087 } 3088 UNLOCK_DEV_OPS(&dnp->dn_lock); 3089 } 3090 3091 /* 3092 * argument to i_find_devi, a devinfo node search callback function. 3093 */ 3094 struct match_info { 3095 dev_info_t *dip; /* result */ 3096 char *nodename; /* if non-null, nodename must match */ 3097 int instance; /* if != -1, instance must match */ 3098 int attached; /* if != 0, state >= DS_ATTACHED */ 3099 }; 3100 3101 static int 3102 i_find_devi(dev_info_t *dip, void *arg) 3103 { 3104 struct match_info *info = (struct match_info *)arg; 3105 3106 if (((info->nodename == NULL) || 3107 (strcmp(ddi_node_name(dip), info->nodename) == 0)) && 3108 ((info->instance == -1) || 3109 (ddi_get_instance(dip) == info->instance)) && 3110 ((info->attached == 0) || 3111 (i_ddi_node_state(dip) >= DS_ATTACHED))) { 3112 info->dip = dip; 3113 ndi_hold_devi(dip); 3114 return (DDI_WALK_TERMINATE); 3115 } 3116 3117 return (DDI_WALK_CONTINUE); 3118 } 3119 3120 /* 3121 * Find dip with a known node name and instance and return with it held 3122 */ 3123 dev_info_t * 3124 ddi_find_devinfo(char *nodename, int instance, int attached) 3125 { 3126 struct match_info info; 3127 3128 info.nodename = nodename; 3129 info.instance = instance; 3130 info.attached = attached; 3131 info.dip = NULL; 3132 3133 ddi_walk_devs(ddi_root_node(), i_find_devi, &info); 3134 return (info.dip); 3135 } 3136 3137 /* 3138 * Parse for name, addr, and minor names. Some args may be NULL. 3139 */ 3140 void 3141 i_ddi_parse_name(char *name, char **nodename, char **addrname, char **minorname) 3142 { 3143 char *cp; 3144 static char nulladdrname[] = ""; 3145 3146 /* default values */ 3147 if (nodename) 3148 *nodename = name; 3149 if (addrname) 3150 *addrname = nulladdrname; 3151 if (minorname) 3152 *minorname = NULL; 3153 3154 cp = name; 3155 while (*cp != '\0') { 3156 if (addrname && *cp == '@') { 3157 *addrname = cp + 1; 3158 *cp = '\0'; 3159 } else if (minorname && *cp == ':') { 3160 *minorname = cp + 1; 3161 *cp = '\0'; 3162 } 3163 ++cp; 3164 } 3165 } 3166 3167 static char * 3168 child_path_to_driver(dev_info_t *parent, char *child_name, char *unit_address) 3169 { 3170 char *p, *drvname = NULL; 3171 major_t maj; 3172 3173 /* 3174 * Construct the pathname and ask the implementation 3175 * if it can do a driver = f(pathname) for us, if not 3176 * we'll just default to using the node-name that 3177 * was given to us. We want to do this first to 3178 * allow the platform to use 'generic' names for 3179 * legacy device drivers. 3180 */ 3181 p = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 3182 (void) ddi_pathname(parent, p); 3183 (void) strcat(p, "/"); 3184 (void) strcat(p, child_name); 3185 if (unit_address && *unit_address) { 3186 (void) strcat(p, "@"); 3187 (void) strcat(p, unit_address); 3188 } 3189 3190 /* 3191 * Get the binding. If there is none, return the child_name 3192 * and let the caller deal with it. 3193 */ 3194 maj = path_to_major(p); 3195 3196 kmem_free(p, MAXPATHLEN); 3197 3198 if (maj != (major_t)-1) 3199 drvname = ddi_major_to_name(maj); 3200 if (drvname == NULL) 3201 drvname = child_name; 3202 3203 return (drvname); 3204 } 3205 3206 3207 /* 3208 * Given the pathname of a device, fill in the dev_info_t value and/or the 3209 * dev_t value and/or the spectype, depending on which parameters are non-NULL. 3210 * If there is an error, this function returns -1. 3211 * 3212 * NOTE: If this function returns the dev_info_t structure, then it 3213 * does so with a hold on the devi. Caller should ensure that they get 3214 * decremented via ddi_release_devi() or ndi_rele_devi(); 3215 * 3216 * This function can be invoked in the boot case for a pathname without 3217 * device argument (:xxxx), traditionally treated as a minor name. 3218 * In this case, we do the following 3219 * (1) search the minor node of type DDM_DEFAULT. 3220 * (2) if no DDM_DEFAULT minor exists, then the first non-alias minor is chosen. 3221 * (3) if neither exists, a dev_t is faked with minor number = instance. 3222 * As of S9 FCS, no instance of #1 exists. #2 is used by several platforms 3223 * to default the boot partition to :a possibly by other OBP definitions. 3224 * #3 is used for booting off network interfaces, most SPARC network 3225 * drivers support Style-2 only, so only DDM_ALIAS minor exists. 3226 * 3227 * It is possible for OBP to present device args at the end of the path as 3228 * well as in the middle. For example, with IB the following strings are 3229 * valid boot paths. 3230 * a /pci@8,700000/ib@1,2:port=1,pkey=ff,dhcp,... 3231 * b /pci@8,700000/ib@1,1:port=1/ioc@xxxxxx,yyyyyyy:dhcp 3232 * Case (a), we first look for minor node "port=1,pkey...". 3233 * Failing that, we will pass "port=1,pkey..." to the bus_config 3234 * entry point of ib (HCA) driver. 3235 * Case (b), configure ib@1,1 as usual. Then invoke ib's bus_config 3236 * with argument "ioc@xxxxxxx,yyyyyyy:port=1". After configuring 3237 * the ioc, look for minor node dhcp. If not found, pass ":dhcp" 3238 * to ioc's bus_config entry point. 3239 */ 3240 int 3241 resolve_pathname(char *pathname, 3242 dev_info_t **dipp, dev_t *devtp, int *spectypep) 3243 { 3244 int error; 3245 dev_info_t *parent, *child; 3246 struct pathname pn; 3247 char *component, *config_name; 3248 char *minorname = NULL; 3249 char *prev_minor = NULL; 3250 dev_t devt = NODEV; 3251 int spectype; 3252 struct ddi_minor_data *dmn; 3253 3254 if (*pathname != '/') 3255 return (EINVAL); 3256 parent = ddi_root_node(); /* Begin at the top of the tree */ 3257 3258 if (error = pn_get(pathname, UIO_SYSSPACE, &pn)) 3259 return (error); 3260 pn_skipslash(&pn); 3261 3262 ASSERT(i_ddi_node_state(parent) >= DS_ATTACHED); 3263 ndi_hold_devi(parent); 3264 3265 component = kmem_alloc(MAXNAMELEN, KM_SLEEP); 3266 config_name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 3267 3268 while (pn_pathleft(&pn)) { 3269 /* remember prev minor (:xxx) in the middle of path */ 3270 if (minorname) 3271 prev_minor = i_ddi_strdup(minorname, KM_SLEEP); 3272 3273 /* Get component and chop off minorname */ 3274 (void) pn_getcomponent(&pn, component); 3275 i_ddi_parse_name(component, NULL, NULL, &minorname); 3276 3277 if (prev_minor == NULL) { 3278 (void) snprintf(config_name, MAXNAMELEN, "%s", 3279 component); 3280 } else { 3281 (void) snprintf(config_name, MAXNAMELEN, "%s:%s", 3282 component, prev_minor); 3283 kmem_free(prev_minor, strlen(prev_minor) + 1); 3284 prev_minor = NULL; 3285 } 3286 3287 /* 3288 * Find and configure the child 3289 */ 3290 if (ndi_devi_config_one(parent, config_name, &child, 3291 NDI_PROMNAME | NDI_NO_EVENT) != NDI_SUCCESS) { 3292 ndi_rele_devi(parent); 3293 pn_free(&pn); 3294 kmem_free(component, MAXNAMELEN); 3295 kmem_free(config_name, MAXNAMELEN); 3296 return (-1); 3297 } 3298 3299 ASSERT(i_ddi_node_state(child) >= DS_ATTACHED); 3300 ndi_rele_devi(parent); 3301 parent = child; 3302 pn_skipslash(&pn); 3303 } 3304 3305 /* 3306 * First look for a minor node matching minorname. 3307 * Failing that, try to pass minorname to bus_config(). 3308 */ 3309 if (minorname && i_ddi_minorname_to_devtspectype(parent, 3310 minorname, &devt, &spectype) == DDI_FAILURE) { 3311 (void) snprintf(config_name, MAXNAMELEN, "%s", minorname); 3312 if (ndi_devi_config_obp_args(parent, 3313 config_name, &child, 0) != NDI_SUCCESS) { 3314 ndi_rele_devi(parent); 3315 pn_free(&pn); 3316 kmem_free(component, MAXNAMELEN); 3317 kmem_free(config_name, MAXNAMELEN); 3318 NDI_CONFIG_DEBUG((CE_NOTE, 3319 "%s: minor node not found\n", pathname)); 3320 return (-1); 3321 } 3322 minorname = NULL; /* look for default minor */ 3323 ASSERT(i_ddi_node_state(child) >= DS_ATTACHED); 3324 ndi_rele_devi(parent); 3325 parent = child; 3326 } 3327 3328 if (devtp || spectypep) { 3329 if (minorname == NULL) { 3330 /* search for a default entry */ 3331 mutex_enter(&(DEVI(parent)->devi_lock)); 3332 for (dmn = DEVI(parent)->devi_minor; dmn; 3333 dmn = dmn->next) { 3334 if (dmn->type == DDM_DEFAULT) { 3335 devt = dmn->ddm_dev; 3336 spectype = dmn->ddm_spec_type; 3337 break; 3338 } 3339 } 3340 3341 if (devt == NODEV) { 3342 /* 3343 * No default minor node, try the first one; 3344 * else, assume 1-1 instance-minor mapping 3345 */ 3346 dmn = DEVI(parent)->devi_minor; 3347 if (dmn && ((dmn->type == DDM_MINOR) || 3348 (dmn->type == DDM_INTERNAL_PATH))) { 3349 devt = dmn->ddm_dev; 3350 spectype = dmn->ddm_spec_type; 3351 } else { 3352 devt = makedevice( 3353 DEVI(parent)->devi_major, 3354 ddi_get_instance(parent)); 3355 spectype = S_IFCHR; 3356 } 3357 } 3358 mutex_exit(&(DEVI(parent)->devi_lock)); 3359 } 3360 if (devtp) 3361 *devtp = devt; 3362 if (spectypep) 3363 *spectypep = spectype; 3364 } 3365 3366 pn_free(&pn); 3367 kmem_free(component, MAXNAMELEN); 3368 kmem_free(config_name, MAXNAMELEN); 3369 3370 /* 3371 * If there is no error, return the appropriate parameters 3372 */ 3373 if (dipp != NULL) 3374 *dipp = parent; 3375 else { 3376 /* 3377 * We should really keep the ref count to keep the node from 3378 * detaching but ddi_pathname_to_dev_t() specifies a NULL dipp, 3379 * so we have no way of passing back the held dip. Not holding 3380 * the dip allows detaches to occur - which can cause problems 3381 * for subsystems which call ddi_pathname_to_dev_t (console). 3382 * 3383 * Instead of holding the dip, we place a ddi-no-autodetach 3384 * property on the node to prevent auto detaching. 3385 * 3386 * The right fix is to remove ddi_pathname_to_dev_t and replace 3387 * it, and all references, with a call that specifies a dipp. 3388 * In addition, the callers of this new interfaces would then 3389 * need to call ndi_rele_devi when the reference is complete. 3390 */ 3391 (void) ddi_prop_update_int(DDI_DEV_T_NONE, parent, 3392 DDI_NO_AUTODETACH, 1); 3393 ndi_rele_devi(parent); 3394 } 3395 3396 return (0); 3397 } 3398 3399 /* 3400 * Given the pathname of a device, return the dev_t of the corresponding 3401 * device. Returns NODEV on failure. 3402 * 3403 * Note that this call sets the DDI_NO_AUTODETACH property on the devinfo node. 3404 */ 3405 dev_t 3406 ddi_pathname_to_dev_t(char *pathname) 3407 { 3408 dev_t devt; 3409 int error; 3410 3411 error = resolve_pathname(pathname, NULL, &devt, NULL); 3412 3413 return (error ? NODEV : devt); 3414 } 3415 3416 /* 3417 * Translate a prom pathname to kernel devfs pathname. 3418 * Caller is assumed to allocate devfspath memory of 3419 * size at least MAXPATHLEN 3420 * 3421 * The prom pathname may not include minor name, but 3422 * devfs pathname has a minor name portion. 3423 */ 3424 int 3425 i_ddi_prompath_to_devfspath(char *prompath, char *devfspath) 3426 { 3427 dev_t devt = (dev_t)NODEV; 3428 dev_info_t *dip = NULL; 3429 char *minor_name = NULL; 3430 int spectype; 3431 int error; 3432 3433 error = resolve_pathname(prompath, &dip, &devt, &spectype); 3434 if (error) 3435 return (DDI_FAILURE); 3436 ASSERT(dip && devt != NODEV); 3437 3438 /* 3439 * Get in-kernel devfs pathname 3440 */ 3441 (void) ddi_pathname(dip, devfspath); 3442 3443 mutex_enter(&(DEVI(dip)->devi_lock)); 3444 minor_name = i_ddi_devtspectype_to_minorname(dip, devt, spectype); 3445 if (minor_name) { 3446 (void) strcat(devfspath, ":"); 3447 (void) strcat(devfspath, minor_name); 3448 } else { 3449 /* 3450 * If minor_name is NULL, we have an alias minor node. 3451 * So manufacture a path to the corresponding clone minor. 3452 */ 3453 (void) snprintf(devfspath, MAXPATHLEN, "%s:%s", 3454 CLONE_PATH, ddi_driver_name(dip)); 3455 } 3456 mutex_exit(&(DEVI(dip)->devi_lock)); 3457 3458 /* release hold from resolve_pathname() */ 3459 ndi_rele_devi(dip); 3460 return (0); 3461 } 3462 3463 /* 3464 * Reset all the pure leaf drivers on the system at halt time 3465 */ 3466 static int 3467 reset_leaf_device(dev_info_t *dip, void *arg) 3468 { 3469 _NOTE(ARGUNUSED(arg)) 3470 struct dev_ops *ops; 3471 3472 /* if the device doesn't need to be reset then there's nothing to do */ 3473 if (!DEVI_NEED_RESET(dip)) 3474 return (DDI_WALK_CONTINUE); 3475 3476 /* 3477 * if the device isn't a char/block device or doesn't have a 3478 * reset entry point then there's nothing to do. 3479 */ 3480 ops = ddi_get_driver(dip); 3481 if ((ops == NULL) || (ops->devo_cb_ops == NULL) || 3482 (ops->devo_reset == nodev) || (ops->devo_reset == nulldev) || 3483 (ops->devo_reset == NULL)) 3484 return (DDI_WALK_CONTINUE); 3485 3486 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) { 3487 static char path[MAXPATHLEN]; 3488 3489 /* 3490 * bad news, this device has blocked in it's attach or 3491 * detach routine, which means it not safe to call it's 3492 * devo_reset() entry point. 3493 */ 3494 cmn_err(CE_WARN, "unable to reset device: %s", 3495 ddi_pathname(dip, path)); 3496 return (DDI_WALK_CONTINUE); 3497 } 3498 3499 NDI_CONFIG_DEBUG((CE_NOTE, "resetting %s%d\n", 3500 ddi_driver_name(dip), ddi_get_instance(dip))); 3501 3502 (void) devi_reset(dip, DDI_RESET_FORCE); 3503 return (DDI_WALK_CONTINUE); 3504 } 3505 3506 void 3507 reset_leaves(void) 3508 { 3509 /* 3510 * if we're reached here, the device tree better not be changing. 3511 * so either devinfo_freeze better be set or we better be panicing. 3512 */ 3513 ASSERT(devinfo_freeze || panicstr); 3514 3515 (void) walk_devs(top_devinfo, reset_leaf_device, NULL, 0); 3516 } 3517 3518 /* 3519 * devtree_freeze() must be called before reset_leaves() during a 3520 * normal system shutdown. It attempts to ensure that there are no 3521 * outstanding attach or detach operations in progress when reset_leaves() 3522 * is invoked. It must be called before the system becomes single-threaded 3523 * because device attach and detach are multi-threaded operations. (note 3524 * that during system shutdown the system doesn't actually become 3525 * single-thread since other threads still exist, but the shutdown thread 3526 * will disable preemption for itself, raise it's pil, and stop all the 3527 * other cpus in the system there by effectively making the system 3528 * single-threaded.) 3529 */ 3530 void 3531 devtree_freeze(void) 3532 { 3533 int delayed = 0; 3534 3535 /* if we're panicing then the device tree isn't going to be changing */ 3536 if (panicstr) 3537 return; 3538 3539 /* stop all dev_info state changes in the device tree */ 3540 devinfo_freeze = gethrtime(); 3541 3542 /* 3543 * if we're not panicing and there are on-going attach or detach 3544 * operations, wait for up to 3 seconds for them to finish. This 3545 * is a randomly chosen interval but this should be ok because: 3546 * - 3 seconds is very small relative to the deadman timer. 3547 * - normal attach and detach operations should be very quick. 3548 * - attach and detach operations are fairly rare. 3549 */ 3550 while (!panicstr && atomic_add_long_nv(&devinfo_attach_detach, 0) && 3551 (delayed < 3)) { 3552 delayed += 1; 3553 3554 /* do a sleeping wait for one second */ 3555 ASSERT(!servicing_interrupt()); 3556 delay(drv_usectohz(MICROSEC)); 3557 } 3558 } 3559 3560 static int 3561 bind_dip(dev_info_t *dip, void *arg) 3562 { 3563 _NOTE(ARGUNUSED(arg)) 3564 if (i_ddi_node_state(dip) < DS_BOUND) 3565 (void) ndi_devi_bind_driver(dip, 0); 3566 3567 return (DDI_WALK_CONTINUE); 3568 } 3569 3570 void 3571 i_ddi_bind_devs(void) 3572 { 3573 ddi_walk_devs(top_devinfo, bind_dip, (void *)NULL); 3574 } 3575 3576 static int 3577 unbind_children(dev_info_t *dip, void *arg) 3578 { 3579 int circ; 3580 dev_info_t *cdip; 3581 major_t major = (major_t)(uintptr_t)arg; 3582 3583 ndi_devi_enter(dip, &circ); 3584 cdip = ddi_get_child(dip); 3585 /* 3586 * We are called either from rem_drv or update_drv. 3587 * In both cases, we unbind persistent nodes and destroy 3588 * .conf nodes. In the case of rem_drv, this will be the 3589 * final state. In the case of update_drv, i_ddi_bind_devs() 3590 * will be invoked later to reenumerate (new) driver.conf 3591 * rebind persistent nodes. 3592 */ 3593 while (cdip) { 3594 dev_info_t *next = ddi_get_next_sibling(cdip); 3595 if ((i_ddi_node_state(cdip) > DS_INITIALIZED) || 3596 (ddi_driver_major(cdip) != major)) { 3597 cdip = next; 3598 continue; 3599 } 3600 (void) ndi_devi_unbind_driver(cdip); 3601 if (ndi_dev_is_persistent_node(cdip) == 0) 3602 (void) ddi_remove_child(cdip, 0); 3603 cdip = next; 3604 } 3605 ndi_devi_exit(dip, circ); 3606 3607 return (DDI_WALK_CONTINUE); 3608 } 3609 3610 void 3611 i_ddi_unbind_devs(major_t major) 3612 { 3613 ddi_walk_devs(top_devinfo, unbind_children, (void *)(uintptr_t)major); 3614 } 3615 3616 /* 3617 * I/O Hotplug control 3618 */ 3619 3620 /* 3621 * create and attach a dev_info node from a .conf file spec 3622 */ 3623 static void 3624 init_spec_child(dev_info_t *pdip, struct hwc_spec *specp, uint_t flags) 3625 { 3626 _NOTE(ARGUNUSED(flags)) 3627 dev_info_t *dip; 3628 char *node_name; 3629 3630 if (((node_name = specp->hwc_devi_name) == NULL) || 3631 (ddi_name_to_major(node_name) == (major_t)-1)) { 3632 char *tmp = node_name; 3633 if (tmp == NULL) 3634 tmp = "<none>"; 3635 cmn_err(CE_CONT, 3636 "init_spec_child: parent=%s, bad spec (%s)\n", 3637 ddi_node_name(pdip), tmp); 3638 return; 3639 } 3640 3641 dip = i_ddi_alloc_node(pdip, node_name, (dnode_t)DEVI_PSEUDO_NODEID, 3642 -1, specp->hwc_devi_sys_prop_ptr, KM_SLEEP); 3643 3644 if (dip == NULL) 3645 return; 3646 3647 if (ddi_initchild(pdip, dip) != DDI_SUCCESS) 3648 (void) ddi_remove_child(dip, 0); 3649 } 3650 3651 /* 3652 * Lookup hwc specs from hash tables and make children from the spec 3653 * Because some .conf children are "merge" nodes, we also initialize 3654 * .conf children to merge properties onto hardware nodes. 3655 * 3656 * The pdip must be held busy. 3657 */ 3658 int 3659 i_ndi_make_spec_children(dev_info_t *pdip, uint_t flags) 3660 { 3661 extern struct hwc_spec *hwc_get_child_spec(dev_info_t *, major_t); 3662 int circ; 3663 struct hwc_spec *list, *spec; 3664 3665 ndi_devi_enter(pdip, &circ); 3666 if (DEVI(pdip)->devi_flags & DEVI_MADE_CHILDREN) { 3667 ndi_devi_exit(pdip, circ); 3668 return (DDI_SUCCESS); 3669 } 3670 3671 list = hwc_get_child_spec(pdip, (major_t)-1); 3672 for (spec = list; spec != NULL; spec = spec->hwc_next) { 3673 init_spec_child(pdip, spec, flags); 3674 } 3675 hwc_free_spec_list(list); 3676 3677 mutex_enter(&DEVI(pdip)->devi_lock); 3678 DEVI(pdip)->devi_flags |= DEVI_MADE_CHILDREN; 3679 mutex_exit(&DEVI(pdip)->devi_lock); 3680 ndi_devi_exit(pdip, circ); 3681 return (DDI_SUCCESS); 3682 } 3683 3684 /* 3685 * Run initchild on all child nodes such that instance assignment 3686 * for multiport network cards are contiguous. 3687 * 3688 * The pdip must be held busy. 3689 */ 3690 static void 3691 i_ndi_init_hw_children(dev_info_t *pdip, uint_t flags) 3692 { 3693 dev_info_t *dip; 3694 3695 ASSERT(DEVI(pdip)->devi_flags & DEVI_MADE_CHILDREN); 3696 3697 /* contiguous instance assignment */ 3698 e_ddi_enter_instance(); 3699 dip = ddi_get_child(pdip); 3700 while (dip) { 3701 if (ndi_dev_is_persistent_node(dip)) 3702 (void) i_ndi_config_node(dip, DS_INITIALIZED, flags); 3703 dip = ddi_get_next_sibling(dip); 3704 } 3705 e_ddi_exit_instance(); 3706 } 3707 3708 /* 3709 * report device status 3710 */ 3711 static void 3712 i_ndi_devi_report_status_change(dev_info_t *dip, char *path) 3713 { 3714 char *status; 3715 3716 if (!DEVI_NEED_REPORT(dip) || 3717 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 3718 return; 3719 } 3720 3721 if (DEVI_IS_DEVICE_OFFLINE(dip)) { 3722 status = "offline"; 3723 } else if (DEVI_IS_DEVICE_DOWN(dip)) { 3724 status = "down"; 3725 } else if (DEVI_IS_BUS_QUIESCED(dip)) { 3726 status = "quiesced"; 3727 } else if (DEVI_IS_BUS_DOWN(dip)) { 3728 status = "down"; 3729 } else if (i_ddi_node_state(dip) == DS_READY) { 3730 status = "online"; 3731 } else { 3732 status = "unknown"; 3733 } 3734 3735 if (path == NULL) { 3736 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3737 cmn_err(CE_CONT, "?%s (%s%d) %s\n", 3738 ddi_pathname(dip, path), ddi_driver_name(dip), 3739 ddi_get_instance(dip), status); 3740 kmem_free(path, MAXPATHLEN); 3741 } else { 3742 cmn_err(CE_CONT, "?%s (%s%d) %s\n", 3743 path, ddi_driver_name(dip), 3744 ddi_get_instance(dip), status); 3745 } 3746 3747 DEVI_REPORT_DONE(dip); 3748 } 3749 3750 /* 3751 * log a notification that a dev_info node has been configured. 3752 */ 3753 static int 3754 i_log_devfs_add_devinfo(dev_info_t *dip, uint_t flags) 3755 { 3756 int se_err; 3757 char *pathname; 3758 sysevent_t *ev; 3759 sysevent_id_t eid; 3760 sysevent_value_t se_val; 3761 sysevent_attr_list_t *ev_attr_list = NULL; 3762 char *class_name; 3763 int no_transport = 0; 3764 3765 ASSERT(dip); 3766 3767 /* 3768 * Invalidate the devinfo snapshot cache 3769 */ 3770 i_ddi_di_cache_invalidate(KM_SLEEP); 3771 3772 /* do not generate ESC_DEVFS_DEVI_ADD event during boot */ 3773 if (!i_ddi_io_initialized()) 3774 return (DDI_SUCCESS); 3775 3776 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_DEVI_ADD, EP_DDI, SE_SLEEP); 3777 3778 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3779 3780 (void) ddi_pathname(dip, pathname); 3781 ASSERT(strlen(pathname)); 3782 3783 se_val.value_type = SE_DATA_TYPE_STRING; 3784 se_val.value.sv_string = pathname; 3785 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 3786 &se_val, SE_SLEEP) != 0) { 3787 goto fail; 3788 } 3789 3790 /* add the device class attribute */ 3791 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 3792 se_val.value_type = SE_DATA_TYPE_STRING; 3793 se_val.value.sv_string = class_name; 3794 3795 if (sysevent_add_attr(&ev_attr_list, 3796 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 3797 sysevent_free_attr(ev_attr_list); 3798 goto fail; 3799 } 3800 } 3801 3802 /* 3803 * must log a branch event too unless NDI_BRANCH_EVENT_OP is set, 3804 * in which case the branch event will be logged by the caller 3805 * after the entire branch has been configured. 3806 */ 3807 if ((flags & NDI_BRANCH_EVENT_OP) == 0) { 3808 /* 3809 * Instead of logging a separate branch event just add 3810 * DEVFS_BRANCH_EVENT attribute. It indicates devfsadmd to 3811 * generate a EC_DEV_BRANCH event. 3812 */ 3813 se_val.value_type = SE_DATA_TYPE_INT32; 3814 se_val.value.sv_int32 = 1; 3815 if (sysevent_add_attr(&ev_attr_list, 3816 DEVFS_BRANCH_EVENT, &se_val, SE_SLEEP) != 0) { 3817 sysevent_free_attr(ev_attr_list); 3818 goto fail; 3819 } 3820 } 3821 3822 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 3823 sysevent_free_attr(ev_attr_list); 3824 goto fail; 3825 } 3826 3827 if ((se_err = log_sysevent(ev, SE_SLEEP, &eid)) != 0) { 3828 if (se_err == SE_NO_TRANSPORT) 3829 no_transport = 1; 3830 goto fail; 3831 } 3832 3833 sysevent_free(ev); 3834 kmem_free(pathname, MAXPATHLEN); 3835 3836 return (DDI_SUCCESS); 3837 3838 fail: 3839 cmn_err(CE_WARN, "failed to log ESC_DEVFS_DEVI_ADD event for %s%s", 3840 pathname, (no_transport) ? " (syseventd not responding)" : ""); 3841 3842 cmn_err(CE_WARN, "/dev may not be current for driver %s. " 3843 "Run devfsadm -i %s", 3844 ddi_driver_name(dip), ddi_driver_name(dip)); 3845 3846 sysevent_free(ev); 3847 kmem_free(pathname, MAXPATHLEN); 3848 return (DDI_SUCCESS); 3849 } 3850 3851 /* 3852 * log a notification that a dev_info node has been unconfigured. 3853 */ 3854 static int 3855 i_log_devfs_remove_devinfo(char *pathname, char *class_name, char *driver_name, 3856 int instance, uint_t flags) 3857 { 3858 sysevent_t *ev; 3859 sysevent_id_t eid; 3860 sysevent_value_t se_val; 3861 sysevent_attr_list_t *ev_attr_list = NULL; 3862 int se_err; 3863 int no_transport = 0; 3864 3865 i_ddi_di_cache_invalidate(KM_SLEEP); 3866 3867 if (!i_ddi_io_initialized()) 3868 return (DDI_SUCCESS); 3869 3870 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_DEVI_REMOVE, EP_DDI, SE_SLEEP); 3871 3872 se_val.value_type = SE_DATA_TYPE_STRING; 3873 se_val.value.sv_string = pathname; 3874 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 3875 &se_val, SE_SLEEP) != 0) { 3876 goto fail; 3877 } 3878 3879 if (class_name) { 3880 /* add the device class, driver name and instance attributes */ 3881 3882 se_val.value_type = SE_DATA_TYPE_STRING; 3883 se_val.value.sv_string = class_name; 3884 if (sysevent_add_attr(&ev_attr_list, 3885 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 3886 sysevent_free_attr(ev_attr_list); 3887 goto fail; 3888 } 3889 3890 se_val.value_type = SE_DATA_TYPE_STRING; 3891 se_val.value.sv_string = driver_name; 3892 if (sysevent_add_attr(&ev_attr_list, 3893 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) { 3894 sysevent_free_attr(ev_attr_list); 3895 goto fail; 3896 } 3897 3898 se_val.value_type = SE_DATA_TYPE_INT32; 3899 se_val.value.sv_int32 = instance; 3900 if (sysevent_add_attr(&ev_attr_list, 3901 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) { 3902 sysevent_free_attr(ev_attr_list); 3903 goto fail; 3904 } 3905 } 3906 3907 /* 3908 * must log a branch event too unless NDI_BRANCH_EVENT_OP is set, 3909 * in which case the branch event will be logged by the caller 3910 * after the entire branch has been unconfigured. 3911 */ 3912 if ((flags & NDI_BRANCH_EVENT_OP) == 0) { 3913 /* 3914 * Instead of logging a separate branch event just add 3915 * DEVFS_BRANCH_EVENT attribute. It indicates devfsadmd to 3916 * generate a EC_DEV_BRANCH event. 3917 */ 3918 se_val.value_type = SE_DATA_TYPE_INT32; 3919 se_val.value.sv_int32 = 1; 3920 if (sysevent_add_attr(&ev_attr_list, 3921 DEVFS_BRANCH_EVENT, &se_val, SE_SLEEP) != 0) { 3922 sysevent_free_attr(ev_attr_list); 3923 goto fail; 3924 } 3925 } 3926 3927 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 3928 sysevent_free_attr(ev_attr_list); 3929 goto fail; 3930 } 3931 3932 if ((se_err = log_sysevent(ev, SE_SLEEP, &eid)) != 0) { 3933 if (se_err == SE_NO_TRANSPORT) 3934 no_transport = 1; 3935 goto fail; 3936 } 3937 3938 sysevent_free(ev); 3939 return (DDI_SUCCESS); 3940 3941 fail: 3942 sysevent_free(ev); 3943 cmn_err(CE_WARN, "failed to log ESC_DEVFS_DEVI_REMOVE event for %s%s", 3944 pathname, (no_transport) ? " (syseventd not responding)" : ""); 3945 return (DDI_SUCCESS); 3946 } 3947 3948 /* 3949 * log an event that a dev_info branch has been configured or unconfigured. 3950 */ 3951 static int 3952 i_log_devfs_branch(char *node_path, char *subclass) 3953 { 3954 int se_err; 3955 sysevent_t *ev; 3956 sysevent_id_t eid; 3957 sysevent_value_t se_val; 3958 sysevent_attr_list_t *ev_attr_list = NULL; 3959 int no_transport = 0; 3960 3961 /* do not generate the event during boot */ 3962 if (!i_ddi_io_initialized()) 3963 return (DDI_SUCCESS); 3964 3965 ev = sysevent_alloc(EC_DEVFS, subclass, EP_DDI, SE_SLEEP); 3966 3967 se_val.value_type = SE_DATA_TYPE_STRING; 3968 se_val.value.sv_string = node_path; 3969 3970 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 3971 &se_val, SE_SLEEP) != 0) { 3972 goto fail; 3973 } 3974 3975 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 3976 sysevent_free_attr(ev_attr_list); 3977 goto fail; 3978 } 3979 3980 if ((se_err = log_sysevent(ev, SE_SLEEP, &eid)) != 0) { 3981 if (se_err == SE_NO_TRANSPORT) 3982 no_transport = 1; 3983 goto fail; 3984 } 3985 3986 sysevent_free(ev); 3987 return (DDI_SUCCESS); 3988 3989 fail: 3990 cmn_err(CE_WARN, "failed to log %s branch event for %s%s", 3991 subclass, node_path, 3992 (no_transport) ? " (syseventd not responding)" : ""); 3993 3994 sysevent_free(ev); 3995 return (DDI_FAILURE); 3996 } 3997 3998 /* 3999 * log an event that a dev_info tree branch has been configured. 4000 */ 4001 static int 4002 i_log_devfs_branch_add(dev_info_t *dip) 4003 { 4004 char *node_path; 4005 int rv; 4006 4007 node_path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4008 (void) ddi_pathname(dip, node_path); 4009 rv = i_log_devfs_branch(node_path, ESC_DEVFS_BRANCH_ADD); 4010 kmem_free(node_path, MAXPATHLEN); 4011 4012 return (rv); 4013 } 4014 4015 /* 4016 * log an event that a dev_info tree branch has been unconfigured. 4017 */ 4018 static int 4019 i_log_devfs_branch_remove(char *node_path) 4020 { 4021 return (i_log_devfs_branch(node_path, ESC_DEVFS_BRANCH_REMOVE)); 4022 } 4023 4024 /* 4025 * enqueue the dip's deviname on the branch event queue. 4026 */ 4027 static struct brevq_node * 4028 brevq_enqueue(struct brevq_node **brevqp, dev_info_t *dip, 4029 struct brevq_node *child) 4030 { 4031 struct brevq_node *brn; 4032 char *deviname; 4033 4034 deviname = kmem_alloc(MAXNAMELEN, KM_SLEEP); 4035 (void) ddi_deviname(dip, deviname); 4036 4037 brn = kmem_zalloc(sizeof (*brn), KM_SLEEP); 4038 brn->deviname = i_ddi_strdup(deviname, KM_SLEEP); 4039 kmem_free(deviname, MAXNAMELEN); 4040 brn->child = child; 4041 brn->sibling = *brevqp; 4042 *brevqp = brn; 4043 4044 return (brn); 4045 } 4046 4047 /* 4048 * free the memory allocated for the elements on the branch event queue. 4049 */ 4050 static void 4051 free_brevq(struct brevq_node *brevq) 4052 { 4053 struct brevq_node *brn, *next_brn; 4054 4055 for (brn = brevq; brn != NULL; brn = next_brn) { 4056 next_brn = brn->sibling; 4057 ASSERT(brn->child == NULL); 4058 kmem_free(brn->deviname, strlen(brn->deviname) + 1); 4059 kmem_free(brn, sizeof (*brn)); 4060 } 4061 } 4062 4063 /* 4064 * log the events queued up on the branch event queue and free the 4065 * associated memory. 4066 * 4067 * node_path must have been allocated with at least MAXPATHLEN bytes. 4068 */ 4069 static void 4070 log_and_free_brevq(char *node_path, struct brevq_node *brevq) 4071 { 4072 struct brevq_node *brn; 4073 char *p; 4074 4075 p = node_path + strlen(node_path); 4076 for (brn = brevq; brn != NULL; brn = brn->sibling) { 4077 (void) strcpy(p, brn->deviname); 4078 (void) i_log_devfs_branch_remove(node_path); 4079 } 4080 *p = '\0'; 4081 4082 free_brevq(brevq); 4083 } 4084 4085 /* 4086 * log the events queued up on the branch event queue and free the 4087 * associated memory. Same as the previous function but operates on dip. 4088 */ 4089 static void 4090 log_and_free_brevq_dip(dev_info_t *dip, struct brevq_node *brevq) 4091 { 4092 char *path; 4093 4094 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4095 (void) ddi_pathname(dip, path); 4096 log_and_free_brevq(path, brevq); 4097 kmem_free(path, MAXPATHLEN); 4098 } 4099 4100 /* 4101 * log the outstanding branch remove events for the grand children of the dip 4102 * and free the associated memory. 4103 */ 4104 static void 4105 log_and_free_br_events_on_grand_children(dev_info_t *dip, 4106 struct brevq_node *brevq) 4107 { 4108 struct brevq_node *brn; 4109 char *path; 4110 char *p; 4111 4112 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4113 (void) ddi_pathname(dip, path); 4114 p = path + strlen(path); 4115 for (brn = brevq; brn != NULL; brn = brn->sibling) { 4116 if (brn->child) { 4117 (void) strcpy(p, brn->deviname); 4118 /* now path contains the node path to the dip's child */ 4119 log_and_free_brevq(path, brn->child); 4120 brn->child = NULL; 4121 } 4122 } 4123 kmem_free(path, MAXPATHLEN); 4124 } 4125 4126 /* 4127 * log and cleanup branch remove events for the grand children of the dip. 4128 */ 4129 static void 4130 cleanup_br_events_on_grand_children(dev_info_t *dip, struct brevq_node **brevqp) 4131 { 4132 dev_info_t *child; 4133 struct brevq_node *brevq, *brn, *prev_brn, *next_brn; 4134 char *path; 4135 int circ; 4136 4137 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4138 prev_brn = NULL; 4139 brevq = *brevqp; 4140 4141 ndi_devi_enter(dip, &circ); 4142 for (brn = brevq; brn != NULL; brn = next_brn) { 4143 next_brn = brn->sibling; 4144 for (child = ddi_get_child(dip); child != NULL; 4145 child = ddi_get_next_sibling(child)) { 4146 if (i_ddi_node_state(child) >= DS_INITIALIZED) { 4147 (void) ddi_deviname(child, path); 4148 if (strcmp(path, brn->deviname) == 0) 4149 break; 4150 } 4151 } 4152 4153 if (child != NULL && !(DEVI_EVREMOVE(child))) { 4154 /* 4155 * Event state is not REMOVE. So branch remove event 4156 * is not going be generated on brn->child. 4157 * If any branch remove events were queued up on 4158 * brn->child log them and remove the brn 4159 * from the queue. 4160 */ 4161 if (brn->child) { 4162 (void) ddi_pathname(dip, path); 4163 (void) strcat(path, brn->deviname); 4164 log_and_free_brevq(path, brn->child); 4165 } 4166 4167 if (prev_brn) 4168 prev_brn->sibling = next_brn; 4169 else 4170 *brevqp = next_brn; 4171 4172 kmem_free(brn->deviname, strlen(brn->deviname) + 1); 4173 kmem_free(brn, sizeof (*brn)); 4174 } else { 4175 /* 4176 * Free up the outstanding branch remove events 4177 * queued on brn->child since brn->child 4178 * itself is eligible for branch remove event. 4179 */ 4180 if (brn->child) { 4181 free_brevq(brn->child); 4182 brn->child = NULL; 4183 } 4184 prev_brn = brn; 4185 } 4186 } 4187 4188 ndi_devi_exit(dip, circ); 4189 kmem_free(path, MAXPATHLEN); 4190 } 4191 4192 static int 4193 need_remove_event(dev_info_t *dip, int flags) 4194 { 4195 if ((flags & (NDI_NO_EVENT | NDI_AUTODETACH)) == 0 && 4196 (flags & (NDI_DEVI_OFFLINE | NDI_UNCONFIG | NDI_DEVI_REMOVE)) && 4197 !(DEVI_EVREMOVE(dip))) 4198 return (1); 4199 else 4200 return (0); 4201 } 4202 4203 /* 4204 * Unconfigure children/descendants of the dip. 4205 * 4206 * If the operation involves a branch event NDI_BRANCH_EVENT_OP is set 4207 * through out the unconfiguration. On successful return *brevqp is set to 4208 * a queue of dip's child devinames for which branch remove events need 4209 * to be generated. 4210 */ 4211 static int 4212 devi_unconfig_branch(dev_info_t *dip, dev_info_t **dipp, int flags, 4213 struct brevq_node **brevqp) 4214 { 4215 int rval; 4216 4217 *brevqp = NULL; 4218 4219 if ((!(flags & NDI_BRANCH_EVENT_OP)) && need_remove_event(dip, flags)) 4220 flags |= NDI_BRANCH_EVENT_OP; 4221 4222 if (flags & NDI_BRANCH_EVENT_OP) { 4223 rval = devi_unconfig_common(dip, dipp, flags, (major_t)-1, 4224 brevqp); 4225 4226 if (rval != NDI_SUCCESS && (*brevqp)) { 4227 log_and_free_brevq_dip(dip, *brevqp); 4228 *brevqp = NULL; 4229 } 4230 } else 4231 rval = devi_unconfig_common(dip, dipp, flags, (major_t)-1, 4232 NULL); 4233 4234 return (rval); 4235 } 4236 4237 /* 4238 * If the dip is already bound to a driver transition to DS_INITIALIZED 4239 * in order to generate an event in the case where the node was left in 4240 * DS_BOUND state since boot (never got attached) and the node is now 4241 * being offlined. 4242 */ 4243 static void 4244 init_bound_node_ev(dev_info_t *pdip, dev_info_t *dip, int flags) 4245 { 4246 if (need_remove_event(dip, flags) && 4247 i_ddi_node_state(dip) == DS_BOUND && 4248 i_ddi_node_state(pdip) >= DS_ATTACHED && 4249 !(DEVI_IS_DEVICE_OFFLINE(dip))) 4250 (void) ddi_initchild(pdip, dip); 4251 } 4252 4253 /* 4254 * attach a node/branch with parent already held busy 4255 */ 4256 static int 4257 devi_attach_node(dev_info_t *dip, uint_t flags) 4258 { 4259 if (flags & NDI_DEVI_ONLINE) { 4260 DEVI_SET_DEVICE_ONLINE(dip); 4261 } 4262 4263 if (DEVI_IS_DEVICE_OFFLINE(dip)) { 4264 return (NDI_FAILURE); 4265 } 4266 4267 if (i_ddi_attachchild(dip) != DDI_SUCCESS) { 4268 DEVI_SET_EVUNINIT(dip); 4269 if (ndi_dev_is_persistent_node(dip)) 4270 (void) ddi_uninitchild(dip); 4271 else { 4272 /* 4273 * Delete .conf nodes and nodes that are not 4274 * well formed. 4275 */ 4276 (void) ddi_remove_child(dip, 0); 4277 } 4278 return (NDI_FAILURE); 4279 } 4280 4281 i_ndi_devi_report_status_change(dip, NULL); 4282 4283 /* 4284 * log an event, but not during devfs lookups in which case 4285 * NDI_NO_EVENT is set. 4286 */ 4287 if ((flags & NDI_NO_EVENT) == 0 && !(DEVI_EVADD(dip))) { 4288 (void) i_log_devfs_add_devinfo(dip, flags); 4289 DEVI_SET_EVADD(dip); 4290 } else if (!(flags & NDI_NO_EVENT_STATE_CHNG)) 4291 DEVI_SET_EVADD(dip); 4292 4293 return (NDI_SUCCESS); 4294 } 4295 4296 /* 4297 * Configure all children of a nexus, assuming all spec children have 4298 * been made. 4299 */ 4300 static int 4301 devi_attach_children(dev_info_t *pdip, uint_t flags, major_t major) 4302 { 4303 dev_info_t *dip; 4304 4305 ASSERT(DEVI(pdip)->devi_flags & DEVI_MADE_CHILDREN); 4306 4307 dip = ddi_get_child(pdip); 4308 while (dip) { 4309 /* 4310 * NOTE: devi_attach_node() may remove the dip 4311 */ 4312 dev_info_t *next = ddi_get_next_sibling(dip); 4313 4314 /* 4315 * Configure all nexus nodes or leaf nodes with 4316 * matching driver major 4317 */ 4318 if ((major == (major_t)-1) || 4319 (major == ddi_driver_major(dip)) || 4320 ((flags & NDI_CONFIG) && (is_leaf_node(dip) == 0))) 4321 (void) devi_attach_node(dip, flags); 4322 dip = next; 4323 } 4324 4325 return (NDI_SUCCESS); 4326 } 4327 4328 /* internal function to config immediate children */ 4329 static int 4330 config_immediate_children(dev_info_t *pdip, uint_t flags, major_t major) 4331 { 4332 int circ; 4333 ASSERT(i_ddi_node_state(pdip) >= DS_ATTACHED); 4334 4335 if (!NEXUS_DRV(ddi_get_driver(pdip))) 4336 return (NDI_SUCCESS); 4337 4338 NDI_CONFIG_DEBUG((CE_CONT, 4339 "config_immediate_children: %s%d (%p), flags=%x\n", 4340 ddi_driver_name(pdip), ddi_get_instance(pdip), 4341 (void *)pdip, flags)); 4342 4343 ndi_devi_enter(pdip, &circ); 4344 4345 if (flags & NDI_CONFIG_REPROBE) { 4346 mutex_enter(&DEVI(pdip)->devi_lock); 4347 DEVI(pdip)->devi_flags &= ~DEVI_MADE_CHILDREN; 4348 mutex_exit(&DEVI(pdip)->devi_lock); 4349 } 4350 (void) i_ndi_make_spec_children(pdip, flags); 4351 i_ndi_init_hw_children(pdip, flags); 4352 (void) devi_attach_children(pdip, flags, major); 4353 4354 ndi_devi_exit(pdip, circ); 4355 4356 return (NDI_SUCCESS); 4357 } 4358 4359 /* internal function to config grand children */ 4360 static int 4361 config_grand_children(dev_info_t *pdip, uint_t flags, major_t major) 4362 { 4363 struct mt_config_handle *hdl; 4364 4365 /* multi-threaded configuration of child nexus */ 4366 hdl = mt_config_init(pdip, NULL, flags, major, MT_CONFIG_OP, NULL); 4367 mt_config_children(hdl); 4368 4369 return (mt_config_fini(hdl)); /* wait for threads to exit */ 4370 } 4371 4372 /* 4373 * Common function for device tree configuration, 4374 * either BUS_CONFIG_ALL or BUS_CONFIG_DRIVER. 4375 * The NDI_CONFIG flag causes recursive configuration of 4376 * grandchildren, devfs usage should not recurse. 4377 */ 4378 static int 4379 devi_config_common(dev_info_t *dip, int flags, major_t major) 4380 { 4381 int error; 4382 int (*f)(); 4383 4384 if (i_ddi_node_state(dip) < DS_READY) 4385 return (NDI_FAILURE); 4386 4387 if (pm_pre_config(dip, NULL) != DDI_SUCCESS) 4388 return (NDI_FAILURE); 4389 4390 if ((DEVI(dip)->devi_ops->devo_bus_ops == NULL) || 4391 (DEVI(dip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 4392 (f = DEVI(dip)->devi_ops->devo_bus_ops->bus_config) == NULL) { 4393 error = config_immediate_children(dip, flags, major); 4394 } else { 4395 /* call bus_config entry point */ 4396 ddi_bus_config_op_t bus_op = (major == (major_t)-1) ? 4397 BUS_CONFIG_ALL : BUS_CONFIG_DRIVER; 4398 error = (*f)(dip, 4399 flags, bus_op, (void *)(uintptr_t)major, NULL, 0); 4400 } 4401 4402 if (error) { 4403 pm_post_config(dip, NULL); 4404 return (error); 4405 } 4406 4407 /* 4408 * Some callers, notably SCSI, need to mark the devfs cache 4409 * to be rebuilt together with the config operation. 4410 */ 4411 if (flags & NDI_DEVFS_CLEAN) 4412 (void) devfs_clean(dip, NULL, 0); 4413 4414 if (flags & NDI_CONFIG) 4415 (void) config_grand_children(dip, flags, major); 4416 4417 pm_post_config(dip, NULL); 4418 4419 return (NDI_SUCCESS); 4420 } 4421 4422 /* 4423 * Framework entry point for BUS_CONFIG_ALL 4424 */ 4425 int 4426 ndi_devi_config(dev_info_t *dip, int flags) 4427 { 4428 NDI_CONFIG_DEBUG((CE_CONT, 4429 "ndi_devi_config: par = %s%d (%p), flags = 0x%x\n", 4430 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 4431 4432 return (devi_config_common(dip, flags, (major_t)-1)); 4433 } 4434 4435 /* 4436 * Framework entry point for BUS_CONFIG_DRIVER, bound to major 4437 */ 4438 int 4439 ndi_devi_config_driver(dev_info_t *dip, int flags, major_t major) 4440 { 4441 /* don't abuse this function */ 4442 ASSERT(major != (major_t)-1); 4443 4444 NDI_CONFIG_DEBUG((CE_CONT, 4445 "ndi_devi_config_driver: par = %s%d (%p), flags = 0x%x\n", 4446 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 4447 4448 return (devi_config_common(dip, flags, major)); 4449 } 4450 4451 /* 4452 * called by nexus drivers to configure/unconfigure its children 4453 */ 4454 static int 4455 devi_config_one(dev_info_t *pdip, char *devnm, dev_info_t **dipp, 4456 uint_t flags, clock_t timeout) 4457 { 4458 int circ, probed, rv; 4459 dev_info_t *dip = NULL; 4460 char *name, *addr, *drivername = NULL; 4461 clock_t end_time; /* 60 sec */ 4462 4463 if (!NEXUS_DRV(ddi_get_driver(pdip))) 4464 return (NDI_FAILURE); 4465 4466 if (MDI_PHCI(pdip)) { 4467 /* Call mdi_ to configure the child */ 4468 rv = mdi_devi_config_one(pdip, devnm, dipp, flags, timeout); 4469 if (rv == MDI_SUCCESS) 4470 return (NDI_SUCCESS); 4471 4472 /* 4473 * Normally, we should return failure here. 4474 * 4475 * Leadville implemented an unfortunate fallback mechanism. 4476 * If a target is non-standard and scsi_vhci doesn't know 4477 * how to do failover, then the node is enumerated under 4478 * phci. Leadville specifies NDI_MDI_FALLBACK flag to 4479 * maintain the old behavior. 4480 */ 4481 if ((flags & NDI_MDI_FALLBACK) == 0) 4482 return (NDI_FAILURE); 4483 } 4484 4485 /* split name into "name@addr" parts */ 4486 i_ddi_parse_name(devnm, &name, &addr, NULL); 4487 4488 if (flags & NDI_PROMNAME) { 4489 /* 4490 * We may have a genericname on a system that creates 4491 * drivername nodes (from .conf files). Find the drivername 4492 * by nodeid. If we can't find a node with devnm as the 4493 * node name then we search by drivername. This allows an 4494 * implementation to supply a genericly named boot path (disk) 4495 * and locate drivename nodes (sd). 4496 */ 4497 drivername = child_path_to_driver(pdip, name, addr); 4498 } 4499 4500 if (timeout > 0) { 4501 end_time = ddi_get_lbolt() + timeout; 4502 } 4503 4504 ndi_devi_enter(pdip, &circ); 4505 4506 reprobe: 4507 probed = (DEVI(pdip)->devi_flags & DEVI_MADE_CHILDREN); 4508 (void) i_ndi_make_spec_children(pdip, flags); 4509 for (;;) { 4510 dip = find_child_by_name(pdip, name, addr); 4511 /* 4512 * Search for a node bound to the drivername driver with 4513 * the specified "@addr". 4514 */ 4515 if (dip == NULL && drivername) 4516 dip = find_child_by_driver(pdip, drivername, addr); 4517 4518 if (dip || timeout <= 0 || ddi_get_lbolt() >= end_time) 4519 break; 4520 4521 /* 4522 * Wait up to end_time for asynchronous enumeration 4523 */ 4524 ndi_devi_exit(pdip, circ); 4525 NDI_DEBUG(flags, (CE_CONT, 4526 "%s%d: waiting for child %s@%s, timeout %ld", 4527 ddi_driver_name(pdip), ddi_get_instance(pdip), 4528 name, addr, timeout)); 4529 4530 mutex_enter(&DEVI(pdip)->devi_lock); 4531 (void) cv_timedwait(&DEVI(pdip)->devi_cv, 4532 &DEVI(pdip)->devi_lock, end_time); 4533 mutex_exit(&DEVI(pdip)->devi_lock); 4534 ndi_devi_enter(pdip, &circ); 4535 (void) i_ndi_make_spec_children(pdip, flags); 4536 } 4537 4538 if ((dip == NULL) && probed && (flags & NDI_CONFIG_REPROBE) && 4539 i_ddi_io_initialized()) { 4540 /* 4541 * reenumerate .conf nodes and probe again 4542 */ 4543 mutex_enter(&DEVI(pdip)->devi_lock); 4544 DEVI(pdip)->devi_flags &= ~DEVI_MADE_CHILDREN; 4545 mutex_exit(&DEVI(pdip)->devi_lock); 4546 goto reprobe; 4547 } 4548 4549 if (addr[0] != '\0') 4550 *(addr - 1) = '@'; 4551 4552 if (dip == NULL || devi_attach_node(dip, flags) != NDI_SUCCESS) { 4553 ndi_devi_exit(pdip, circ); 4554 return (NDI_FAILURE); 4555 } 4556 4557 *dipp = dip; 4558 ndi_hold_devi(dip); 4559 ndi_devi_exit(pdip, circ); 4560 return (NDI_SUCCESS); 4561 } 4562 4563 /* 4564 * Enumerate and attach a child specified by name 'devnm'. 4565 * Called by devfs lookup and DR to perform a BUS_CONFIG_ONE. 4566 * Note: devfs does not make use of NDI_CONFIG to configure 4567 * an entire branch. 4568 */ 4569 int 4570 ndi_devi_config_one(dev_info_t *dip, char *devnm, dev_info_t **dipp, int flags) 4571 { 4572 int error; 4573 int (*f)(); 4574 int branch_event = 0; 4575 4576 ASSERT(dipp); 4577 ASSERT(i_ddi_node_state(dip) >= DS_ATTACHED); 4578 4579 NDI_CONFIG_DEBUG((CE_CONT, 4580 "ndi_devi_config_one: par = %s%d (%p), child = %s\n", 4581 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, devnm)); 4582 4583 if (pm_pre_config(dip, devnm) != DDI_SUCCESS) 4584 return (NDI_FAILURE); 4585 4586 if ((flags & (NDI_NO_EVENT | NDI_BRANCH_EVENT_OP)) == 0 && 4587 (flags & NDI_CONFIG)) { 4588 flags |= NDI_BRANCH_EVENT_OP; 4589 branch_event = 1; 4590 } 4591 4592 if ((DEVI(dip)->devi_ops->devo_bus_ops == NULL) || 4593 (DEVI(dip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 4594 (f = DEVI(dip)->devi_ops->devo_bus_ops->bus_config) == NULL) { 4595 error = devi_config_one(dip, devnm, dipp, flags, 0); 4596 } else { 4597 /* call bus_config entry point */ 4598 error = (*f)(dip, flags, BUS_CONFIG_ONE, (void *)devnm, dipp); 4599 } 4600 4601 if (error || (flags & NDI_CONFIG) == 0) { 4602 pm_post_config(dip, devnm); 4603 return (error); 4604 } 4605 4606 /* 4607 * DR usage ((i.e. call with NDI_CONFIG) recursively configures 4608 * grandchildren, performing a BUS_CONFIG_ALL from the node attached 4609 * by the BUS_CONFIG_ONE. 4610 */ 4611 ASSERT(*dipp); 4612 4613 error = devi_config_common(*dipp, flags, (major_t)-1); 4614 4615 pm_post_config(dip, devnm); 4616 4617 if (branch_event) 4618 (void) i_log_devfs_branch_add(*dipp); 4619 4620 return (error); 4621 } 4622 4623 4624 /* 4625 * Enumerate and attach a child specified by name 'devnm'. 4626 * Called during configure the OBP options. This configures 4627 * only one node. 4628 */ 4629 static int 4630 ndi_devi_config_obp_args(dev_info_t *parent, char *devnm, 4631 dev_info_t **childp, int flags) 4632 { 4633 int error; 4634 int (*f)(); 4635 4636 ASSERT(childp); 4637 ASSERT(i_ddi_node_state(parent) >= DS_ATTACHED); 4638 4639 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_config_obp_args: " 4640 "par = %s%d (%p), child = %s\n", ddi_driver_name(parent), 4641 ddi_get_instance(parent), (void *)parent, devnm)); 4642 4643 if ((DEVI(parent)->devi_ops->devo_bus_ops == NULL) || 4644 (DEVI(parent)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 4645 (f = DEVI(parent)->devi_ops->devo_bus_ops->bus_config) == NULL) { 4646 error = NDI_FAILURE; 4647 } else { 4648 /* call bus_config entry point */ 4649 error = (*f)(parent, flags, 4650 BUS_CONFIG_OBP_ARGS, (void *)devnm, childp); 4651 } 4652 return (error); 4653 } 4654 4655 4656 /* 4657 * detach a node with parent already held busy 4658 */ 4659 static int 4660 devi_detach_node(dev_info_t *dip, uint_t flags) 4661 { 4662 dev_info_t *pdip = ddi_get_parent(dip); 4663 int ret = NDI_SUCCESS; 4664 ddi_eventcookie_t cookie; 4665 4666 if (flags & NDI_POST_EVENT) { 4667 if (pdip && i_ddi_node_state(pdip) >= DS_ATTACHED) { 4668 if (ddi_get_eventcookie(dip, DDI_DEVI_REMOVE_EVENT, 4669 &cookie) == NDI_SUCCESS) 4670 (void) ndi_post_event(dip, dip, cookie, NULL); 4671 } 4672 } 4673 4674 if (i_ddi_detachchild(dip, flags) != DDI_SUCCESS) 4675 return (NDI_FAILURE); 4676 4677 if (flags & NDI_AUTODETACH) 4678 return (NDI_SUCCESS); 4679 4680 /* 4681 * For DR, even bound nodes may need to have offline 4682 * flag set. 4683 */ 4684 if (flags & NDI_DEVI_OFFLINE) { 4685 DEVI_SET_DEVICE_OFFLINE(dip); 4686 } 4687 4688 if (i_ddi_node_state(dip) == DS_INITIALIZED) { 4689 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4690 (void) ddi_pathname(dip, path); 4691 if (flags & NDI_DEVI_OFFLINE) 4692 i_ndi_devi_report_status_change(dip, path); 4693 4694 if (need_remove_event(dip, flags)) { 4695 (void) i_log_devfs_remove_devinfo(path, 4696 i_ddi_devi_class(dip), 4697 (char *)ddi_driver_name(dip), 4698 ddi_get_instance(dip), 4699 flags); 4700 DEVI_SET_EVREMOVE(dip); 4701 } 4702 kmem_free(path, MAXPATHLEN); 4703 } 4704 4705 if (flags & (NDI_UNCONFIG | NDI_DEVI_REMOVE)) { 4706 ret = ddi_uninitchild(dip); 4707 if (ret == NDI_SUCCESS) { 4708 /* 4709 * Remove uninitialized pseudo nodes because 4710 * system props are lost and the node cannot be 4711 * reattached. 4712 */ 4713 if (!ndi_dev_is_persistent_node(dip)) 4714 flags |= NDI_DEVI_REMOVE; 4715 4716 if (flags & NDI_DEVI_REMOVE) 4717 ret = ddi_remove_child(dip, 0); 4718 } 4719 } 4720 4721 return (ret); 4722 } 4723 4724 /* 4725 * unconfigure immediate children of bus nexus device 4726 */ 4727 static int 4728 unconfig_immediate_children( 4729 dev_info_t *dip, 4730 dev_info_t **dipp, 4731 int flags, 4732 major_t major) 4733 { 4734 int rv = NDI_SUCCESS, circ; 4735 dev_info_t *child; 4736 4737 ASSERT(dipp == NULL || *dipp == NULL); 4738 4739 ndi_devi_enter(dip, &circ); 4740 child = ddi_get_child(dip); 4741 while (child) { 4742 dev_info_t *next = ddi_get_next_sibling(child); 4743 if ((major != (major_t)-1) && 4744 (major != ddi_driver_major(child))) { 4745 child = next; 4746 continue; 4747 } 4748 4749 /* skip nexus nodes during autodetach */ 4750 if ((flags & NDI_AUTODETACH) && !is_leaf_node(child)) { 4751 child = next; 4752 continue; 4753 } 4754 4755 if (devi_detach_node(child, flags) != NDI_SUCCESS) { 4756 if (dipp && *dipp == NULL) { 4757 ndi_hold_devi(child); 4758 *dipp = child; 4759 } 4760 rv = NDI_FAILURE; 4761 } 4762 4763 /* 4764 * Continue upon failure--best effort algorithm 4765 */ 4766 child = next; 4767 } 4768 ndi_devi_exit(dip, circ); 4769 return (rv); 4770 } 4771 4772 /* 4773 * unconfigure grand children of bus nexus device 4774 */ 4775 static int 4776 unconfig_grand_children( 4777 dev_info_t *dip, 4778 dev_info_t **dipp, 4779 int flags, 4780 major_t major, 4781 struct brevq_node **brevqp) 4782 { 4783 struct mt_config_handle *hdl; 4784 4785 if (brevqp) 4786 *brevqp = NULL; 4787 4788 /* multi-threaded configuration of child nexus */ 4789 hdl = mt_config_init(dip, dipp, flags, major, MT_UNCONFIG_OP, brevqp); 4790 mt_config_children(hdl); 4791 4792 return (mt_config_fini(hdl)); /* wait for threads to exit */ 4793 } 4794 4795 /* 4796 * Unconfigure children/descendants of the dip. 4797 * 4798 * If brevqp is not NULL, on return *brevqp is set to a queue of dip's 4799 * child devinames for which branch remove events need to be generated. 4800 */ 4801 static int 4802 devi_unconfig_common( 4803 dev_info_t *dip, 4804 dev_info_t **dipp, 4805 int flags, 4806 major_t major, 4807 struct brevq_node **brevqp) 4808 { 4809 int rv; 4810 int pm_cookie; 4811 int (*f)(); 4812 ddi_bus_config_op_t bus_op; 4813 4814 if (dipp) 4815 *dipp = NULL; 4816 if (brevqp) 4817 *brevqp = NULL; 4818 4819 /* 4820 * Power up the dip if it is powered off. If the flag bit 4821 * NDI_AUTODETACH is set and the dip is not at its full power, 4822 * skip the rest of the branch. 4823 */ 4824 if (pm_pre_unconfig(dip, flags, &pm_cookie, NULL) != DDI_SUCCESS) 4825 return ((flags & NDI_AUTODETACH) ? NDI_SUCCESS : 4826 NDI_FAILURE); 4827 4828 /* 4829 * Some callers, notably SCSI, need to clear out the devfs 4830 * cache together with the unconfig to prevent stale entries. 4831 */ 4832 if (flags & NDI_DEVFS_CLEAN) 4833 (void) devfs_clean(dip, NULL, 0); 4834 4835 rv = unconfig_grand_children(dip, dipp, flags, major, brevqp); 4836 4837 if ((rv != NDI_SUCCESS) && ((flags & NDI_AUTODETACH) == 0)) { 4838 if (brevqp && *brevqp) { 4839 log_and_free_br_events_on_grand_children(dip, *brevqp); 4840 free_brevq(*brevqp); 4841 *brevqp = NULL; 4842 } 4843 pm_post_unconfig(dip, pm_cookie, NULL); 4844 return (rv); 4845 } 4846 4847 if (dipp && *dipp) { 4848 ndi_rele_devi(*dipp); 4849 *dipp = NULL; 4850 } 4851 4852 /* 4853 * It is possible to have a detached nexus with children 4854 * and grandchildren (for example: a branch consisting 4855 * entirely of bound nodes.) Since the nexus is detached 4856 * the bus_unconfig entry point cannot be used to remove 4857 * or unconfigure the descendants. 4858 */ 4859 if (i_ddi_node_state(dip) < DS_ATTACHED || 4860 (DEVI(dip)->devi_ops->devo_bus_ops == NULL) || 4861 (DEVI(dip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 4862 (f = DEVI(dip)->devi_ops->devo_bus_ops->bus_unconfig) == NULL) { 4863 rv = unconfig_immediate_children(dip, dipp, flags, major); 4864 } else { 4865 /* 4866 * call bus_unconfig entry point 4867 * It should reset nexus flags if unconfigure succeeds. 4868 */ 4869 bus_op = (major == (major_t)-1) ? 4870 BUS_UNCONFIG_ALL : BUS_UNCONFIG_DRIVER; 4871 rv = (*f)(dip, flags, bus_op, (void *)(uintptr_t)major); 4872 } 4873 4874 pm_post_unconfig(dip, pm_cookie, NULL); 4875 4876 if (brevqp && *brevqp) 4877 cleanup_br_events_on_grand_children(dip, brevqp); 4878 4879 return (rv); 4880 } 4881 4882 /* 4883 * called by devfs/framework to unconfigure children bound to major 4884 * If NDI_AUTODETACH is specified, this is invoked by either the 4885 * moduninstall daemon or the modunload -i 0 command. 4886 */ 4887 int 4888 ndi_devi_unconfig_driver(dev_info_t *dip, int flags, major_t major) 4889 { 4890 NDI_CONFIG_DEBUG((CE_CONT, 4891 "ndi_devi_unconfig_driver: par = %s%d (%p), flags = 0x%x\n", 4892 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 4893 4894 return (devi_unconfig_common(dip, NULL, flags, major, NULL)); 4895 } 4896 4897 int 4898 ndi_devi_unconfig(dev_info_t *dip, int flags) 4899 { 4900 NDI_CONFIG_DEBUG((CE_CONT, 4901 "ndi_devi_unconfig: par = %s%d (%p), flags = 0x%x\n", 4902 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 4903 4904 return (devi_unconfig_common(dip, NULL, flags, (major_t)-1, NULL)); 4905 } 4906 4907 int 4908 e_ddi_devi_unconfig(dev_info_t *dip, dev_info_t **dipp, int flags) 4909 { 4910 NDI_CONFIG_DEBUG((CE_CONT, 4911 "e_ddi_devi_unconfig: par = %s%d (%p), flags = 0x%x\n", 4912 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 4913 4914 return (devi_unconfig_common(dip, dipp, flags, (major_t)-1, NULL)); 4915 } 4916 4917 /* 4918 * Unconfigure child by name 4919 */ 4920 static int 4921 devi_unconfig_one(dev_info_t *pdip, char *devnm, int flags) 4922 { 4923 int rv, circ; 4924 dev_info_t *child; 4925 4926 ndi_devi_enter(pdip, &circ); 4927 child = ndi_devi_findchild(pdip, devnm); 4928 if (child == NULL) { 4929 NDI_CONFIG_DEBUG((CE_CONT, 4930 "devi_unconfig_one: %s not found\n", devnm)); 4931 ndi_devi_exit(pdip, circ); 4932 return (NDI_SUCCESS); 4933 } 4934 rv = devi_detach_node(child, flags); 4935 ndi_devi_exit(pdip, circ); 4936 return (rv); 4937 } 4938 4939 int 4940 ndi_devi_unconfig_one( 4941 dev_info_t *pdip, 4942 char *devnm, 4943 dev_info_t **dipp, 4944 int flags) 4945 { 4946 int (*f)(); 4947 int circ, rv; 4948 int pm_cookie; 4949 dev_info_t *child; 4950 struct brevq_node *brevq = NULL; 4951 4952 ASSERT(i_ddi_node_state(pdip) >= DS_ATTACHED); 4953 4954 NDI_CONFIG_DEBUG((CE_CONT, 4955 "ndi_devi_unconfig_one: par = %s%d (%p), child = %s\n", 4956 ddi_driver_name(pdip), ddi_get_instance(pdip), 4957 (void *)pdip, devnm)); 4958 4959 if (pm_pre_unconfig(pdip, flags, &pm_cookie, devnm) != DDI_SUCCESS) 4960 return (NDI_FAILURE); 4961 4962 if (dipp) 4963 *dipp = NULL; 4964 4965 ndi_devi_enter(pdip, &circ); 4966 child = ndi_devi_findchild(pdip, devnm); 4967 if (child == NULL) { 4968 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_unconfig_one: %s" 4969 " not found\n", devnm)); 4970 ndi_devi_exit(pdip, circ); 4971 pm_post_unconfig(pdip, pm_cookie, devnm); 4972 return (NDI_SUCCESS); 4973 } 4974 4975 /* 4976 * Unconfigure children/descendants of named child 4977 */ 4978 rv = devi_unconfig_branch(child, dipp, flags | NDI_UNCONFIG, &brevq); 4979 if (rv != NDI_SUCCESS) 4980 goto out; 4981 4982 init_bound_node_ev(pdip, child, flags); 4983 4984 if ((DEVI(pdip)->devi_ops->devo_bus_ops == NULL) || 4985 (DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 4986 (f = DEVI(pdip)->devi_ops->devo_bus_ops->bus_unconfig) == NULL) { 4987 rv = devi_detach_node(child, flags); 4988 } else { 4989 /* call bus_config entry point */ 4990 rv = (*f)(pdip, flags, BUS_UNCONFIG_ONE, (void *)devnm); 4991 } 4992 4993 if (brevq) { 4994 if (rv != NDI_SUCCESS) 4995 log_and_free_brevq_dip(child, brevq); 4996 else 4997 free_brevq(brevq); 4998 } 4999 5000 if (dipp && rv != NDI_SUCCESS) { 5001 ndi_hold_devi(child); 5002 ASSERT(*dipp == NULL); 5003 *dipp = child; 5004 } 5005 5006 out: 5007 ndi_devi_exit(pdip, circ); 5008 pm_post_unconfig(pdip, pm_cookie, devnm); 5009 5010 return (rv); 5011 } 5012 5013 struct async_arg { 5014 dev_info_t *dip; 5015 uint_t flags; 5016 }; 5017 5018 /* 5019 * Common async handler for: 5020 * ndi_devi_bind_driver_async 5021 * ndi_devi_online_async 5022 */ 5023 static int 5024 i_ndi_devi_async_common(dev_info_t *dip, uint_t flags, void (*func)()) 5025 { 5026 int tqflag; 5027 int kmflag; 5028 struct async_arg *arg; 5029 dev_info_t *pdip = ddi_get_parent(dip); 5030 5031 ASSERT(pdip); 5032 ASSERT(DEVI(pdip)->devi_taskq); 5033 ASSERT(ndi_dev_is_persistent_node(dip)); 5034 5035 if (flags & NDI_NOSLEEP) { 5036 kmflag = KM_NOSLEEP; 5037 tqflag = TQ_NOSLEEP; 5038 } else { 5039 kmflag = KM_SLEEP; 5040 tqflag = TQ_SLEEP; 5041 } 5042 5043 arg = kmem_alloc(sizeof (*arg), kmflag); 5044 if (arg == NULL) 5045 goto fail; 5046 5047 arg->flags = flags; 5048 arg->dip = dip; 5049 if (ddi_taskq_dispatch(DEVI(pdip)->devi_taskq, func, arg, tqflag) == 5050 DDI_SUCCESS) { 5051 return (NDI_SUCCESS); 5052 } 5053 5054 fail: 5055 NDI_CONFIG_DEBUG((CE_CONT, "%s%d: ddi_taskq_dispatch failed", 5056 ddi_driver_name(pdip), ddi_get_instance(pdip))); 5057 5058 if (arg) 5059 kmem_free(arg, sizeof (*arg)); 5060 return (NDI_FAILURE); 5061 } 5062 5063 static void 5064 i_ndi_devi_bind_driver_cb(struct async_arg *arg) 5065 { 5066 (void) ndi_devi_bind_driver(arg->dip, arg->flags); 5067 kmem_free(arg, sizeof (*arg)); 5068 } 5069 5070 int 5071 ndi_devi_bind_driver_async(dev_info_t *dip, uint_t flags) 5072 { 5073 return (i_ndi_devi_async_common(dip, flags, 5074 (void (*)())i_ndi_devi_bind_driver_cb)); 5075 } 5076 5077 /* 5078 * place the devinfo in the ONLINE state. 5079 */ 5080 int 5081 ndi_devi_online(dev_info_t *dip, uint_t flags) 5082 { 5083 int circ, rv; 5084 dev_info_t *pdip = ddi_get_parent(dip); 5085 int branch_event = 0; 5086 5087 ASSERT(pdip); 5088 5089 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_online: %s%d (%p)\n", 5090 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip)); 5091 5092 ndi_devi_enter(pdip, &circ); 5093 /* bind child before merging .conf nodes */ 5094 rv = i_ndi_config_node(dip, DS_BOUND, flags); 5095 if (rv != NDI_SUCCESS) { 5096 ndi_devi_exit(pdip, circ); 5097 return (rv); 5098 } 5099 5100 /* merge .conf properties */ 5101 (void) i_ndi_make_spec_children(pdip, flags); 5102 5103 flags |= (NDI_DEVI_ONLINE | NDI_CONFIG); 5104 5105 if (flags & NDI_NO_EVENT) { 5106 /* 5107 * Caller is specifically asking for not to generate an event. 5108 * Set the following flag so that devi_attach_node() don't 5109 * change the event state. 5110 */ 5111 flags |= NDI_NO_EVENT_STATE_CHNG; 5112 } 5113 5114 if ((flags & (NDI_NO_EVENT | NDI_BRANCH_EVENT_OP)) == 0 && 5115 ((flags & NDI_CONFIG) || DEVI_NEED_NDI_CONFIG(dip))) { 5116 flags |= NDI_BRANCH_EVENT_OP; 5117 branch_event = 1; 5118 } 5119 5120 /* 5121 * devi_attach_node() may remove dip on failure 5122 */ 5123 if ((rv = devi_attach_node(dip, flags)) == NDI_SUCCESS) { 5124 if ((flags & NDI_CONFIG) || DEVI_NEED_NDI_CONFIG(dip)) { 5125 (void) ndi_devi_config(dip, flags); 5126 } 5127 5128 if (branch_event) 5129 (void) i_log_devfs_branch_add(dip); 5130 } 5131 5132 ndi_devi_exit(pdip, circ); 5133 5134 /* 5135 * Notify devfs that we have a new node. Devfs needs to invalidate 5136 * cached directory contents. 5137 * 5138 * For PCMCIA devices, it is possible the pdip is not fully 5139 * attached. In this case, calling back into devfs will 5140 * result in a loop or assertion error. Hence, the check 5141 * on node state. 5142 * 5143 * If we own parent lock, this is part of a branch operation. 5144 * We skip the devfs_clean() step because the cache invalidation 5145 * is done higher up in the device tree. 5146 */ 5147 if (rv == NDI_SUCCESS && i_ddi_node_state(pdip) == DS_READY && 5148 !DEVI_BUSY_OWNED(pdip)) 5149 (void) devfs_clean(pdip, NULL, 0); 5150 return (rv); 5151 } 5152 5153 static void 5154 i_ndi_devi_online_cb(struct async_arg *arg) 5155 { 5156 (void) ndi_devi_online(arg->dip, arg->flags); 5157 kmem_free(arg, sizeof (*arg)); 5158 } 5159 5160 int 5161 ndi_devi_online_async(dev_info_t *dip, uint_t flags) 5162 { 5163 /* mark child as need config if requested. */ 5164 if (flags & NDI_CONFIG) 5165 DEVI_SET_NDI_CONFIG(dip); 5166 5167 return (i_ndi_devi_async_common(dip, flags, 5168 (void (*)())i_ndi_devi_online_cb)); 5169 } 5170 5171 /* 5172 * Take a device node Offline 5173 * To take a device Offline means to detach the device instance from 5174 * the driver and prevent devfs requests from re-attaching the device 5175 * instance. 5176 * 5177 * The flag NDI_DEVI_REMOVE causes removes the device node from 5178 * the driver list and the device tree. In this case, the device 5179 * is assumed to be removed from the system. 5180 */ 5181 int 5182 ndi_devi_offline(dev_info_t *dip, uint_t flags) 5183 { 5184 int circ, rval = 0; 5185 dev_info_t *pdip = ddi_get_parent(dip); 5186 struct brevq_node *brevq = NULL; 5187 5188 ASSERT(pdip); 5189 5190 flags |= NDI_DEVI_OFFLINE; 5191 ndi_devi_enter(pdip, &circ); 5192 if (i_ddi_node_state(dip) == DS_READY) { 5193 /* 5194 * If dip is in DS_READY state, there may be cached dv_nodes 5195 * referencing this dip, so we invoke devfs code path. 5196 * Note that we must release busy changing on pdip to 5197 * avoid deadlock against devfs. 5198 */ 5199 char *devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 5200 (void) ddi_deviname(dip, devname); 5201 ndi_devi_exit(pdip, circ); 5202 5203 /* 5204 * If we own parent lock, this is part of a branch 5205 * operation. We skip the devfs_clean() step. 5206 */ 5207 if (!DEVI_BUSY_OWNED(pdip)) 5208 rval = devfs_clean(pdip, devname + 1, DV_CLEAN_FORCE); 5209 kmem_free(devname, MAXNAMELEN + 1); 5210 5211 if (rval == 0) 5212 rval = devi_unconfig_branch(dip, NULL, 5213 flags|NDI_UNCONFIG, &brevq); 5214 if (rval) 5215 return (NDI_FAILURE); 5216 5217 ndi_devi_enter(pdip, &circ); 5218 } 5219 5220 init_bound_node_ev(pdip, dip, flags); 5221 5222 rval = devi_detach_node(dip, flags); 5223 if (brevq) { 5224 if (rval != NDI_SUCCESS) 5225 log_and_free_brevq_dip(dip, brevq); 5226 else 5227 free_brevq(brevq); 5228 } 5229 5230 ndi_devi_exit(pdip, circ); 5231 5232 return (rval); 5233 } 5234 5235 /* 5236 * Find the child dev_info node of parent nexus 'p' whose name 5237 * matches "cname@caddr". Recommend use of ndi_devi_findchild() instead. 5238 */ 5239 dev_info_t * 5240 ndi_devi_find(dev_info_t *pdip, char *cname, char *caddr) 5241 { 5242 dev_info_t *child; 5243 int circ; 5244 5245 if (pdip == NULL || cname == NULL || caddr == NULL) 5246 return ((dev_info_t *)NULL); 5247 5248 ndi_devi_enter(pdip, &circ); 5249 child = find_sibling(ddi_get_child(pdip), cname, caddr, 0, NULL); 5250 ndi_devi_exit(pdip, circ); 5251 return (child); 5252 } 5253 5254 /* 5255 * Find the child dev_info node of parent nexus 'p' whose name 5256 * matches devname "name@addr". Permits caller to hold the parent. 5257 */ 5258 dev_info_t * 5259 ndi_devi_findchild(dev_info_t *pdip, char *devname) 5260 { 5261 dev_info_t *child; 5262 char *cname, *caddr; 5263 char *devstr; 5264 5265 ASSERT(DEVI_BUSY_OWNED(pdip)); 5266 5267 devstr = i_ddi_strdup(devname, KM_SLEEP); 5268 i_ddi_parse_name(devstr, &cname, &caddr, NULL); 5269 5270 if (cname == NULL || caddr == NULL) { 5271 kmem_free(devstr, strlen(devname)+1); 5272 return ((dev_info_t *)NULL); 5273 } 5274 5275 child = find_sibling(ddi_get_child(pdip), cname, caddr, 0, NULL); 5276 kmem_free(devstr, strlen(devname)+1); 5277 return (child); 5278 } 5279 5280 /* 5281 * Misc. routines called by framework only 5282 */ 5283 5284 /* 5285 * Clear the DEVI_MADE_CHILDREN/DEVI_ATTACHED_CHILDREN flags 5286 * if new child spec has been added. 5287 */ 5288 static int 5289 reset_nexus_flags(dev_info_t *dip, void *arg) 5290 { 5291 struct hwc_spec *list; 5292 int circ; 5293 5294 if (((DEVI(dip)->devi_flags & DEVI_MADE_CHILDREN) == 0) || 5295 ((list = hwc_get_child_spec(dip, (major_t)(uintptr_t)arg)) == NULL)) 5296 return (DDI_WALK_CONTINUE); 5297 5298 hwc_free_spec_list(list); 5299 5300 /* coordinate child state update */ 5301 ndi_devi_enter(dip, &circ); 5302 mutex_enter(&DEVI(dip)->devi_lock); 5303 DEVI(dip)->devi_flags &= ~(DEVI_MADE_CHILDREN | DEVI_ATTACHED_CHILDREN); 5304 mutex_exit(&DEVI(dip)->devi_lock); 5305 ndi_devi_exit(dip, circ); 5306 5307 return (DDI_WALK_CONTINUE); 5308 } 5309 5310 /* 5311 * Helper functions, returns NULL if no memory. 5312 */ 5313 5314 /* 5315 * path_to_major: 5316 * 5317 * Return an alternate driver name binding for the leaf device 5318 * of the given pathname, if there is one. The purpose of this 5319 * function is to deal with generic pathnames. The default action 5320 * for platforms that can't do this (ie: x86 or any platform that 5321 * does not have prom_finddevice functionality, which matches 5322 * nodenames and unit-addresses without the drivers participation) 5323 * is to return (major_t)-1. 5324 * 5325 * Used in loadrootmodules() in the swapgeneric module to 5326 * associate a given pathname with a given leaf driver. 5327 * 5328 */ 5329 major_t 5330 path_to_major(char *path) 5331 { 5332 dev_info_t *dip; 5333 char *p, *q; 5334 dnode_t nodeid; 5335 major_t maj; 5336 5337 /* 5338 * Get the nodeid of the given pathname, if such a mapping exists. 5339 */ 5340 dip = NULL; 5341 nodeid = prom_finddevice(path); 5342 if (nodeid != OBP_BADNODE) { 5343 /* 5344 * Find the nodeid in our copy of the device tree and return 5345 * whatever name we used to bind this node to a driver. 5346 */ 5347 dip = e_ddi_nodeid_to_dip(nodeid); 5348 } 5349 5350 if (dip == NULL) { 5351 NDI_CONFIG_DEBUG((CE_WARN, 5352 "path_to_major: can't bind <%s>\n", path)); 5353 return ((major_t)-1); 5354 } 5355 5356 /* 5357 * If we're bound to something other than the nodename, 5358 * note that in the message buffer and system log. 5359 */ 5360 p = ddi_binding_name(dip); 5361 q = ddi_node_name(dip); 5362 if (p && q && (strcmp(p, q) != 0)) 5363 NDI_CONFIG_DEBUG((CE_NOTE, "path_to_major: %s bound to %s\n", 5364 path, p)); 5365 5366 maj = ddi_name_to_major(p); 5367 5368 ndi_rele_devi(dip); /* release node held during walk */ 5369 5370 return (maj); 5371 } 5372 5373 /* 5374 * Return the held dip for the specified major and instance, attempting to do 5375 * an attach if specified. Return NULL if the devi can't be found or put in 5376 * the proper state. The caller must release the hold via ddi_release_devi if 5377 * a non-NULL value is returned. 5378 * 5379 * Some callers expect to be able to perform a hold_devi() while in a context 5380 * where using ndi_devi_enter() to ensure the hold might cause deadlock (see 5381 * open-from-attach code in consconfig_dacf.c). Such special-case callers 5382 * must ensure that an ndi_devi_enter(parent)/ndi_devi_hold() from a safe 5383 * context is already active. The hold_devi() implementation must accommodate 5384 * these callers. 5385 */ 5386 static dev_info_t * 5387 hold_devi(major_t major, int instance, int flags) 5388 { 5389 struct devnames *dnp; 5390 dev_info_t *dip; 5391 char *path; 5392 5393 if ((major >= devcnt) || (instance == -1)) 5394 return (NULL); 5395 5396 /* try to find the instance in the per driver list */ 5397 dnp = &(devnamesp[major]); 5398 LOCK_DEV_OPS(&(dnp->dn_lock)); 5399 for (dip = dnp->dn_head; dip; 5400 dip = (dev_info_t *)DEVI(dip)->devi_next) { 5401 /* skip node if instance field is not valid */ 5402 if (i_ddi_node_state(dip) < DS_INITIALIZED) 5403 continue; 5404 5405 /* look for instance match */ 5406 if (DEVI(dip)->devi_instance == instance) { 5407 /* 5408 * To accommodate callers that can't block in 5409 * ndi_devi_enter() we do an ndi_devi_hold(), and 5410 * afterwards check that the node is in a state where 5411 * the hold prevents detach(). If we did not manage to 5412 * prevent detach then we ndi_rele_devi() and perform 5413 * the slow path below (which can result in a blocking 5414 * ndi_devi_enter() while driving attach top-down). 5415 * This code depends on the ordering of 5416 * DEVI_SET_DETACHING and the devi_ref check in the 5417 * detach_node() code path. 5418 */ 5419 ndi_hold_devi(dip); 5420 if ((i_ddi_node_state(dip) >= DS_ATTACHED) && 5421 !DEVI_IS_DETACHING(dip)) { 5422 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 5423 return (dip); /* fast-path with devi held */ 5424 } 5425 ndi_rele_devi(dip); 5426 5427 /* try slow-path */ 5428 dip = NULL; 5429 break; 5430 } 5431 } 5432 ASSERT(dip == NULL); 5433 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 5434 5435 if (flags & E_DDI_HOLD_DEVI_NOATTACH) 5436 return (NULL); /* told not to drive attach */ 5437 5438 /* slow-path may block, so it should not occur from interrupt */ 5439 ASSERT(!servicing_interrupt()); 5440 if (servicing_interrupt()) 5441 return (NULL); 5442 5443 /* reconstruct the path and drive attach by path through devfs. */ 5444 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5445 if (e_ddi_majorinstance_to_path(major, instance, path) == 0) 5446 dip = e_ddi_hold_devi_by_path(path, flags); 5447 kmem_free(path, MAXPATHLEN); 5448 return (dip); /* with devi held */ 5449 } 5450 5451 /* 5452 * The {e_}ddi_hold_devi{_by_{instance|dev|path}} hold the devinfo node 5453 * associated with the specified arguments. This hold should be released 5454 * by calling ddi_release_devi. 5455 * 5456 * The E_DDI_HOLD_DEVI_NOATTACH flag argument allows the caller to to specify 5457 * a failure return if the node is not already attached. 5458 * 5459 * NOTE: by the time we make e_ddi_hold_devi public, we should be able to reuse 5460 * ddi_hold_devi again. 5461 */ 5462 dev_info_t * 5463 ddi_hold_devi_by_instance(major_t major, int instance, int flags) 5464 { 5465 return (hold_devi(major, instance, flags)); 5466 } 5467 5468 dev_info_t * 5469 e_ddi_hold_devi_by_dev(dev_t dev, int flags) 5470 { 5471 major_t major = getmajor(dev); 5472 dev_info_t *dip; 5473 struct dev_ops *ops; 5474 dev_info_t *ddip = NULL; 5475 5476 dip = hold_devi(major, dev_to_instance(dev), flags); 5477 5478 /* 5479 * The rest of this routine is legacy support for drivers that 5480 * have broken DDI_INFO_DEVT2INSTANCE implementations but may have 5481 * functional DDI_INFO_DEVT2DEVINFO implementations. This code will 5482 * diagnose inconsistency and, for maximum compatibility with legacy 5483 * drivers, give preference to the drivers DDI_INFO_DEVT2DEVINFO 5484 * implementation over the above derived dip based the driver's 5485 * DDI_INFO_DEVT2INSTANCE implementation. This legacy support should 5486 * be removed when DDI_INFO_DEVT2DEVINFO is deprecated. 5487 * 5488 * NOTE: The following code has a race condition. DEVT2DEVINFO 5489 * returns a dip which is not held. By the time we ref ddip, 5490 * it could have been freed. The saving grace is that for 5491 * most drivers, the dip returned from hold_devi() is the 5492 * same one as the one returned by DEVT2DEVINFO, so we are 5493 * safe for drivers with the correct getinfo(9e) impl. 5494 */ 5495 if (((ops = ddi_hold_driver(major)) != NULL) && 5496 CB_DRV_INSTALLED(ops) && ops->devo_getinfo) { 5497 if ((*ops->devo_getinfo)(NULL, DDI_INFO_DEVT2DEVINFO, 5498 (void *)dev, (void **)&ddip) != DDI_SUCCESS) 5499 ddip = NULL; 5500 } 5501 5502 /* give preference to the driver returned DEVT2DEVINFO dip */ 5503 if (ddip && (dip != ddip)) { 5504 #ifdef DEBUG 5505 cmn_err(CE_WARN, "%s: inconsistent getinfo(9E) implementation", 5506 ddi_driver_name(ddip)); 5507 #endif /* DEBUG */ 5508 ndi_hold_devi(ddip); 5509 if (dip) 5510 ndi_rele_devi(dip); 5511 dip = ddip; 5512 } 5513 5514 if (ops) 5515 ddi_rele_driver(major); 5516 5517 return (dip); 5518 } 5519 5520 /* 5521 * For compatibility only. Do not call this function! 5522 */ 5523 dev_info_t * 5524 e_ddi_get_dev_info(dev_t dev, vtype_t type) 5525 { 5526 dev_info_t *dip = NULL; 5527 if (getmajor(dev) >= devcnt) 5528 return (NULL); 5529 5530 switch (type) { 5531 case VCHR: 5532 case VBLK: 5533 dip = e_ddi_hold_devi_by_dev(dev, 0); 5534 default: 5535 break; 5536 } 5537 5538 /* 5539 * For compatibility reasons, we can only return the dip with 5540 * the driver ref count held. This is not a safe thing to do. 5541 * For certain broken third-party software, we are willing 5542 * to venture into unknown territory. 5543 */ 5544 if (dip) { 5545 (void) ndi_hold_driver(dip); 5546 ndi_rele_devi(dip); 5547 } 5548 return (dip); 5549 } 5550 5551 dev_info_t * 5552 e_ddi_hold_devi_by_path(char *path, int flags) 5553 { 5554 dev_info_t *dip; 5555 5556 /* can't specify NOATTACH by path */ 5557 ASSERT(!(flags & E_DDI_HOLD_DEVI_NOATTACH)); 5558 5559 return (resolve_pathname(path, &dip, NULL, NULL) ? NULL : dip); 5560 } 5561 5562 void 5563 e_ddi_hold_devi(dev_info_t *dip) 5564 { 5565 ndi_hold_devi(dip); 5566 } 5567 5568 void 5569 ddi_release_devi(dev_info_t *dip) 5570 { 5571 ndi_rele_devi(dip); 5572 } 5573 5574 /* 5575 * Associate a streams queue with a devinfo node 5576 * NOTE: This function is called by STREAM driver's put procedure. 5577 * It cannot block. 5578 */ 5579 void 5580 ddi_assoc_queue_with_devi(queue_t *q, dev_info_t *dip) 5581 { 5582 queue_t *rq = _RD(q); 5583 struct stdata *stp; 5584 vnode_t *vp; 5585 5586 /* set flag indicating that ddi_assoc_queue_with_devi was called */ 5587 mutex_enter(QLOCK(rq)); 5588 rq->q_flag |= _QASSOCIATED; 5589 mutex_exit(QLOCK(rq)); 5590 5591 /* get the vnode associated with the queue */ 5592 stp = STREAM(rq); 5593 vp = stp->sd_vnode; 5594 ASSERT(vp); 5595 5596 /* change the hardware association of the vnode */ 5597 spec_assoc_vp_with_devi(vp, dip); 5598 } 5599 5600 /* 5601 * ddi_install_driver(name) 5602 * 5603 * Driver installation is currently a byproduct of driver loading. This 5604 * may change. 5605 */ 5606 int 5607 ddi_install_driver(char *name) 5608 { 5609 major_t major = ddi_name_to_major(name); 5610 5611 if ((major == (major_t)-1) || 5612 (ddi_hold_installed_driver(major) == NULL)) { 5613 return (DDI_FAILURE); 5614 } 5615 ddi_rele_driver(major); 5616 return (DDI_SUCCESS); 5617 } 5618 5619 struct dev_ops * 5620 ddi_hold_driver(major_t major) 5621 { 5622 return (mod_hold_dev_by_major(major)); 5623 } 5624 5625 5626 void 5627 ddi_rele_driver(major_t major) 5628 { 5629 mod_rele_dev_by_major(major); 5630 } 5631 5632 5633 /* 5634 * This is called during boot to force attachment order of special dips 5635 * dip must be referenced via ndi_hold_devi() 5636 */ 5637 int 5638 i_ddi_attach_node_hierarchy(dev_info_t *dip) 5639 { 5640 dev_info_t *parent; 5641 5642 if (i_ddi_node_state(dip) == DS_READY) 5643 return (DDI_SUCCESS); 5644 5645 /* 5646 * Attach parent dip 5647 */ 5648 parent = ddi_get_parent(dip); 5649 if (i_ddi_attach_node_hierarchy(parent) != DDI_SUCCESS) 5650 return (DDI_FAILURE); 5651 5652 /* 5653 * Expand .conf nodes under this parent 5654 */ 5655 (void) i_ndi_make_spec_children(parent, 0); 5656 return (i_ddi_attachchild(dip)); 5657 } 5658 5659 /* keep this function static */ 5660 static int 5661 attach_driver_nodes(major_t major) 5662 { 5663 struct devnames *dnp; 5664 dev_info_t *dip; 5665 int error = DDI_FAILURE; 5666 5667 dnp = &devnamesp[major]; 5668 LOCK_DEV_OPS(&dnp->dn_lock); 5669 dip = dnp->dn_head; 5670 while (dip) { 5671 ndi_hold_devi(dip); 5672 UNLOCK_DEV_OPS(&dnp->dn_lock); 5673 if (i_ddi_attach_node_hierarchy(dip) == DDI_SUCCESS) 5674 error = DDI_SUCCESS; 5675 LOCK_DEV_OPS(&dnp->dn_lock); 5676 ndi_rele_devi(dip); 5677 dip = ddi_get_next(dip); 5678 } 5679 if (error == DDI_SUCCESS) 5680 dnp->dn_flags |= DN_NO_AUTODETACH; 5681 UNLOCK_DEV_OPS(&dnp->dn_lock); 5682 5683 5684 return (error); 5685 } 5686 5687 /* 5688 * i_ddi_attach_hw_nodes configures and attaches all hw nodes 5689 * bound to a specific driver. This function replaces calls to 5690 * ddi_hold_installed_driver() for drivers with no .conf 5691 * enumerated nodes. 5692 * 5693 * This facility is typically called at boot time to attach 5694 * platform-specific hardware nodes, such as ppm nodes on xcal 5695 * and grover and keyswitch nodes on cherrystone. It does not 5696 * deal with .conf enumerated node. Calling it beyond the boot 5697 * process is strongly discouraged. 5698 */ 5699 int 5700 i_ddi_attach_hw_nodes(char *driver) 5701 { 5702 major_t major; 5703 5704 major = ddi_name_to_major(driver); 5705 if (major == (major_t)-1) 5706 return (DDI_FAILURE); 5707 5708 return (attach_driver_nodes(major)); 5709 } 5710 5711 /* 5712 * i_ddi_attach_pseudo_node configures pseudo drivers which 5713 * has a single node. The .conf nodes must be enumerated 5714 * before calling this interface. The dip is held attached 5715 * upon returning. 5716 * 5717 * This facility should only be called only at boot time 5718 * by the I/O framework. 5719 */ 5720 dev_info_t * 5721 i_ddi_attach_pseudo_node(char *driver) 5722 { 5723 major_t major; 5724 dev_info_t *dip; 5725 5726 major = ddi_name_to_major(driver); 5727 if (major == (major_t)-1) 5728 return (NULL); 5729 5730 if (attach_driver_nodes(major) != DDI_SUCCESS) 5731 return (NULL); 5732 5733 dip = devnamesp[major].dn_head; 5734 ASSERT(dip && ddi_get_next(dip) == NULL); 5735 ndi_hold_devi(dip); 5736 return (dip); 5737 } 5738 5739 static void 5740 diplist_to_parent_major(dev_info_t *head, char parents[]) 5741 { 5742 major_t major; 5743 dev_info_t *dip, *pdip; 5744 5745 for (dip = head; dip != NULL; dip = ddi_get_next(dip)) { 5746 pdip = ddi_get_parent(dip); 5747 ASSERT(pdip); /* disallow rootnex.conf nodes */ 5748 major = ddi_driver_major(pdip); 5749 if ((major != (major_t)-1) && parents[major] == 0) 5750 parents[major] = 1; 5751 } 5752 } 5753 5754 /* 5755 * Call ddi_hold_installed_driver() on each parent major 5756 * and invoke mt_config_driver() to attach child major. 5757 * This is part of the implementation of ddi_hold_installed_driver. 5758 */ 5759 static int 5760 attach_driver_by_parent(major_t child_major, char parents[]) 5761 { 5762 major_t par_major; 5763 struct mt_config_handle *hdl; 5764 int flags = NDI_DEVI_PERSIST | NDI_NO_EVENT; 5765 5766 hdl = mt_config_init(NULL, NULL, flags, child_major, MT_CONFIG_OP, 5767 NULL); 5768 for (par_major = 0; par_major < devcnt; par_major++) { 5769 /* disallow recursion on the same driver */ 5770 if (parents[par_major] == 0 || par_major == child_major) 5771 continue; 5772 if (ddi_hold_installed_driver(par_major) == NULL) 5773 continue; 5774 hdl->mtc_parmajor = par_major; 5775 mt_config_driver(hdl); 5776 ddi_rele_driver(par_major); 5777 } 5778 (void) mt_config_fini(hdl); 5779 5780 return (i_ddi_devs_attached(child_major)); 5781 } 5782 5783 int 5784 i_ddi_devs_attached(major_t major) 5785 { 5786 dev_info_t *dip; 5787 struct devnames *dnp; 5788 int error = DDI_FAILURE; 5789 5790 /* check for attached instances */ 5791 dnp = &devnamesp[major]; 5792 LOCK_DEV_OPS(&dnp->dn_lock); 5793 for (dip = dnp->dn_head; dip != NULL; dip = ddi_get_next(dip)) { 5794 if (i_ddi_node_state(dip) >= DS_ATTACHED) { 5795 error = DDI_SUCCESS; 5796 break; 5797 } 5798 } 5799 UNLOCK_DEV_OPS(&dnp->dn_lock); 5800 5801 return (error); 5802 } 5803 5804 /* 5805 * ddi_hold_installed_driver configures and attaches all 5806 * instances of the specified driver. To accomplish this 5807 * it configures and attaches all possible parents of 5808 * the driver, enumerated both in h/w nodes and in the 5809 * driver's .conf file. 5810 * 5811 * NOTE: This facility is for compatibility purposes only and will 5812 * eventually go away. Its usage is strongly discouraged. 5813 */ 5814 static void 5815 enter_driver(struct devnames *dnp) 5816 { 5817 mutex_enter(&dnp->dn_lock); 5818 ASSERT(dnp->dn_busy_thread != curthread); 5819 while (dnp->dn_flags & DN_DRIVER_BUSY) 5820 cv_wait(&dnp->dn_wait, &dnp->dn_lock); 5821 dnp->dn_flags |= DN_DRIVER_BUSY; 5822 dnp->dn_busy_thread = curthread; 5823 mutex_exit(&dnp->dn_lock); 5824 } 5825 5826 static void 5827 exit_driver(struct devnames *dnp) 5828 { 5829 mutex_enter(&dnp->dn_lock); 5830 ASSERT(dnp->dn_busy_thread == curthread); 5831 dnp->dn_flags &= ~DN_DRIVER_BUSY; 5832 dnp->dn_busy_thread = NULL; 5833 cv_broadcast(&dnp->dn_wait); 5834 mutex_exit(&dnp->dn_lock); 5835 } 5836 5837 struct dev_ops * 5838 ddi_hold_installed_driver(major_t major) 5839 { 5840 struct dev_ops *ops; 5841 struct devnames *dnp; 5842 char *parents; 5843 int error; 5844 5845 ops = ddi_hold_driver(major); 5846 if (ops == NULL) 5847 return (NULL); 5848 5849 /* 5850 * Return immediately if all the attach operations associated 5851 * with a ddi_hold_installed_driver() call have already been done. 5852 */ 5853 dnp = &devnamesp[major]; 5854 enter_driver(dnp); 5855 if (dnp->dn_flags & DN_DRIVER_HELD) { 5856 exit_driver(dnp); 5857 if (i_ddi_devs_attached(major) == DDI_SUCCESS) 5858 return (ops); 5859 ddi_rele_driver(major); 5860 return (NULL); 5861 } 5862 5863 LOCK_DEV_OPS(&dnp->dn_lock); 5864 dnp->dn_flags |= (DN_DRIVER_HELD | DN_NO_AUTODETACH); 5865 UNLOCK_DEV_OPS(&dnp->dn_lock); 5866 5867 DCOMPATPRINTF((CE_CONT, 5868 "ddi_hold_installed_driver: %s\n", dnp->dn_name)); 5869 5870 /* 5871 * When the driver has no .conf children, it is sufficient 5872 * to attach existing nodes in the device tree. Nodes not 5873 * enumerated by the OBP are not attached. 5874 */ 5875 if (dnp->dn_pl == NULL) { 5876 if (attach_driver_nodes(major) == DDI_SUCCESS) { 5877 exit_driver(dnp); 5878 return (ops); 5879 } 5880 exit_driver(dnp); 5881 ddi_rele_driver(major); 5882 return (NULL); 5883 } 5884 5885 /* 5886 * Driver has .conf nodes. We find all possible parents 5887 * and recursively all ddi_hold_installed_driver on the 5888 * parent driver; then we invoke ndi_config_driver() 5889 * on all possible parent node in parallel to speed up 5890 * performance. 5891 */ 5892 parents = kmem_zalloc(devcnt * sizeof (char), KM_SLEEP); 5893 5894 LOCK_DEV_OPS(&dnp->dn_lock); 5895 /* find .conf parents */ 5896 (void) impl_parlist_to_major(dnp->dn_pl, parents); 5897 /* find hw node parents */ 5898 diplist_to_parent_major(dnp->dn_head, parents); 5899 UNLOCK_DEV_OPS(&dnp->dn_lock); 5900 5901 error = attach_driver_by_parent(major, parents); 5902 kmem_free(parents, devcnt * sizeof (char)); 5903 if (error == DDI_SUCCESS) { 5904 exit_driver(dnp); 5905 return (ops); 5906 } 5907 5908 exit_driver(dnp); 5909 ddi_rele_driver(major); 5910 return (NULL); 5911 } 5912 5913 /* 5914 * Default bus_config entry point for nexus drivers 5915 */ 5916 int 5917 ndi_busop_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 5918 void *arg, dev_info_t **child, clock_t timeout) 5919 { 5920 major_t major; 5921 5922 /* 5923 * A timeout of 30 minutes or more is probably a mistake 5924 * This is intended to catch uses where timeout is in 5925 * the wrong units. timeout must be in units of ticks. 5926 */ 5927 ASSERT(timeout < SEC_TO_TICK(1800)); 5928 5929 major = (major_t)-1; 5930 switch (op) { 5931 case BUS_CONFIG_ONE: 5932 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus config %s timeout=%ld\n", 5933 ddi_driver_name(pdip), ddi_get_instance(pdip), 5934 (char *)arg, timeout)); 5935 return (devi_config_one(pdip, (char *)arg, child, flags, 5936 timeout)); 5937 5938 case BUS_CONFIG_DRIVER: 5939 major = (major_t)(uintptr_t)arg; 5940 /*FALLTHROUGH*/ 5941 case BUS_CONFIG_ALL: 5942 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus config timeout=%ld\n", 5943 ddi_driver_name(pdip), ddi_get_instance(pdip), 5944 timeout)); 5945 if (timeout > 0) { 5946 NDI_DEBUG(flags, (CE_CONT, 5947 "%s%d: bus config all timeout=%ld\n", 5948 ddi_driver_name(pdip), ddi_get_instance(pdip), 5949 timeout)); 5950 delay(timeout); 5951 } 5952 return (config_immediate_children(pdip, flags, major)); 5953 5954 default: 5955 return (NDI_FAILURE); 5956 } 5957 /*NOTREACHED*/ 5958 } 5959 5960 /* 5961 * Default busop bus_unconfig handler for nexus drivers 5962 */ 5963 int 5964 ndi_busop_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 5965 void *arg) 5966 { 5967 major_t major; 5968 5969 major = (major_t)-1; 5970 switch (op) { 5971 case BUS_UNCONFIG_ONE: 5972 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus unconfig %s\n", 5973 ddi_driver_name(pdip), ddi_get_instance(pdip), 5974 (char *)arg)); 5975 return (devi_unconfig_one(pdip, (char *)arg, flags)); 5976 5977 case BUS_UNCONFIG_DRIVER: 5978 major = (major_t)(uintptr_t)arg; 5979 /*FALLTHROUGH*/ 5980 case BUS_UNCONFIG_ALL: 5981 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus unconfig all\n", 5982 ddi_driver_name(pdip), ddi_get_instance(pdip))); 5983 return (unconfig_immediate_children(pdip, NULL, flags, major)); 5984 5985 default: 5986 return (NDI_FAILURE); 5987 } 5988 /*NOTREACHED*/ 5989 } 5990 5991 /* 5992 * dummy functions to be removed 5993 */ 5994 void 5995 impl_rem_dev_props(dev_info_t *dip) 5996 { 5997 _NOTE(ARGUNUSED(dip)) 5998 /* do nothing */ 5999 } 6000 6001 /* 6002 * Determine if a node is a leaf node. If not sure, return false (0). 6003 */ 6004 static int 6005 is_leaf_node(dev_info_t *dip) 6006 { 6007 major_t major = ddi_driver_major(dip); 6008 6009 if (major == (major_t)-1) 6010 return (0); 6011 6012 return (devnamesp[major].dn_flags & DN_LEAF_DRIVER); 6013 } 6014 6015 /* 6016 * Multithreaded [un]configuration 6017 */ 6018 static struct mt_config_handle * 6019 mt_config_init(dev_info_t *pdip, dev_info_t **dipp, int flags, 6020 major_t major, int op, struct brevq_node **brevqp) 6021 { 6022 struct mt_config_handle *hdl = kmem_alloc(sizeof (*hdl), KM_SLEEP); 6023 6024 mutex_init(&hdl->mtc_lock, NULL, MUTEX_DEFAULT, NULL); 6025 cv_init(&hdl->mtc_cv, NULL, CV_DEFAULT, NULL); 6026 hdl->mtc_pdip = pdip; 6027 hdl->mtc_fdip = dipp; 6028 hdl->mtc_parmajor = (major_t)-1; 6029 hdl->mtc_flags = flags; 6030 hdl->mtc_major = major; 6031 hdl->mtc_thr_count = 0; 6032 hdl->mtc_op = op; 6033 hdl->mtc_error = 0; 6034 hdl->mtc_brevqp = brevqp; 6035 6036 #ifdef DEBUG 6037 gethrestime(&hdl->start_time); 6038 hdl->total_time = 0; 6039 #endif /* DEBUG */ 6040 6041 return (hdl); 6042 } 6043 6044 #ifdef DEBUG 6045 static int 6046 time_diff_in_msec(timestruc_t start, timestruc_t end) 6047 { 6048 int nsec, sec; 6049 6050 sec = end.tv_sec - start.tv_sec; 6051 nsec = end.tv_nsec - start.tv_nsec; 6052 if (nsec < 0) { 6053 nsec += NANOSEC; 6054 sec -= 1; 6055 } 6056 6057 return (sec * (NANOSEC >> 20) + (nsec >> 20)); 6058 } 6059 6060 #endif /* DEBUG */ 6061 6062 static int 6063 mt_config_fini(struct mt_config_handle *hdl) 6064 { 6065 int rv; 6066 #ifdef DEBUG 6067 int real_time; 6068 timestruc_t end_time; 6069 #endif /* DEBUG */ 6070 6071 mutex_enter(&hdl->mtc_lock); 6072 while (hdl->mtc_thr_count > 0) 6073 cv_wait(&hdl->mtc_cv, &hdl->mtc_lock); 6074 rv = hdl->mtc_error; 6075 mutex_exit(&hdl->mtc_lock); 6076 6077 #ifdef DEBUG 6078 gethrestime(&end_time); 6079 real_time = time_diff_in_msec(hdl->start_time, end_time); 6080 if ((ddidebug & DDI_MTCONFIG) && hdl->mtc_pdip) 6081 cmn_err(CE_NOTE, 6082 "config %s%d: total time %d msec, real time %d msec", 6083 ddi_driver_name(hdl->mtc_pdip), 6084 ddi_get_instance(hdl->mtc_pdip), 6085 hdl->total_time, real_time); 6086 #endif /* DEBUG */ 6087 6088 cv_destroy(&hdl->mtc_cv); 6089 mutex_destroy(&hdl->mtc_lock); 6090 kmem_free(hdl, sizeof (*hdl)); 6091 6092 return (rv); 6093 } 6094 6095 struct mt_config_data { 6096 struct mt_config_handle *mtc_hdl; 6097 dev_info_t *mtc_dip; 6098 major_t mtc_major; 6099 int mtc_flags; 6100 struct brevq_node *mtc_brn; 6101 struct mt_config_data *mtc_next; 6102 }; 6103 6104 static void 6105 mt_config_thread(void *arg) 6106 { 6107 struct mt_config_data *mcd = (struct mt_config_data *)arg; 6108 struct mt_config_handle *hdl = mcd->mtc_hdl; 6109 dev_info_t *dip = mcd->mtc_dip; 6110 dev_info_t *rdip, **dipp; 6111 major_t major = mcd->mtc_major; 6112 int flags = mcd->mtc_flags; 6113 int rv = 0; 6114 6115 #ifdef DEBUG 6116 timestruc_t start_time, end_time; 6117 gethrestime(&start_time); 6118 #endif /* DEBUG */ 6119 6120 rdip = NULL; 6121 dipp = hdl->mtc_fdip ? &rdip : NULL; 6122 6123 switch (hdl->mtc_op) { 6124 case MT_CONFIG_OP: 6125 rv = devi_config_common(dip, flags, major); 6126 break; 6127 case MT_UNCONFIG_OP: 6128 if (mcd->mtc_brn) { 6129 struct brevq_node *brevq = NULL; 6130 rv = devi_unconfig_common(dip, dipp, flags, major, 6131 &brevq); 6132 mcd->mtc_brn->child = brevq; 6133 } else 6134 rv = devi_unconfig_common(dip, dipp, flags, major, 6135 NULL); 6136 break; 6137 } 6138 6139 mutex_enter(&hdl->mtc_lock); 6140 #ifdef DEBUG 6141 gethrestime(&end_time); 6142 hdl->total_time += time_diff_in_msec(start_time, end_time); 6143 #endif /* DEBUG */ 6144 if (rv != NDI_SUCCESS) 6145 hdl->mtc_error = rv; 6146 if (hdl->mtc_fdip && *hdl->mtc_fdip == NULL) { 6147 *hdl->mtc_fdip = rdip; 6148 rdip = NULL; 6149 } 6150 6151 if (--hdl->mtc_thr_count == 0) 6152 cv_broadcast(&hdl->mtc_cv); 6153 mutex_exit(&hdl->mtc_lock); 6154 6155 if (rdip) { 6156 ASSERT(rv != NDI_SUCCESS); 6157 ndi_rele_devi(rdip); 6158 } 6159 6160 ndi_rele_devi(dip); 6161 kmem_free(mcd, sizeof (*mcd)); 6162 } 6163 6164 /* 6165 * Multi-threaded config/unconfig of child nexus 6166 */ 6167 static void 6168 mt_config_children(struct mt_config_handle *hdl) 6169 { 6170 dev_info_t *pdip = hdl->mtc_pdip; 6171 major_t major = hdl->mtc_major; 6172 dev_info_t *dip; 6173 int circ; 6174 struct brevq_node *brn = NULL; 6175 struct mt_config_data *mcd_head = NULL; 6176 struct mt_config_data *mcd_tail = NULL; 6177 struct mt_config_data *mcd; 6178 #ifdef DEBUG 6179 timestruc_t end_time; 6180 6181 /* Update total_time in handle */ 6182 gethrestime(&end_time); 6183 hdl->total_time += time_diff_in_msec(hdl->start_time, end_time); 6184 #endif 6185 6186 ndi_devi_enter(pdip, &circ); 6187 dip = ddi_get_child(pdip); 6188 while (dip) { 6189 if (hdl->mtc_op == MT_UNCONFIG_OP && hdl->mtc_brevqp && 6190 !(DEVI_EVREMOVE(dip)) && 6191 i_ddi_node_state(dip) >= DS_INITIALIZED) { 6192 /* 6193 * Enqueue this dip's deviname. 6194 * No need to hold a lock while enqueuing since this 6195 * is the only thread doing the enqueue and no one 6196 * walks the queue while we are in multithreaded 6197 * unconfiguration. 6198 */ 6199 brn = brevq_enqueue(hdl->mtc_brevqp, dip, NULL); 6200 } 6201 6202 /* 6203 * Hold the child that we are processing so he does not get 6204 * removed. The corrisponding ndi_rele_devi() for children 6205 * that are not being skipped is done at the end of 6206 * mt_config_thread(). 6207 */ 6208 ndi_hold_devi(dip); 6209 6210 /* 6211 * skip leaf nodes and (for configure) nodes not 6212 * fully attached. 6213 */ 6214 if (is_leaf_node(dip) || 6215 (hdl->mtc_op == MT_CONFIG_OP && 6216 i_ddi_node_state(dip) < DS_READY)) { 6217 ndi_rele_devi(dip); 6218 dip = ddi_get_next_sibling(dip); 6219 continue; 6220 } 6221 6222 mcd = kmem_alloc(sizeof (*mcd), KM_SLEEP); 6223 mcd->mtc_dip = dip; 6224 mcd->mtc_hdl = hdl; 6225 mcd->mtc_brn = brn; 6226 6227 /* 6228 * Switch a 'driver' operation to an 'all' operation below a 6229 * node bound to the driver. 6230 */ 6231 if ((major == (major_t)-1) || (major == ddi_driver_major(pdip))) 6232 mcd->mtc_major = (major_t)-1; 6233 else 6234 mcd->mtc_major = major; 6235 6236 /* 6237 * The unconfig-driver to unconfig-all conversion above 6238 * constitutes an autodetach for NDI_DETACH_DRIVER calls, 6239 * set NDI_AUTODETACH. 6240 */ 6241 mcd->mtc_flags = hdl->mtc_flags; 6242 if ((mcd->mtc_flags & NDI_DETACH_DRIVER) && 6243 (hdl->mtc_op == MT_UNCONFIG_OP) && 6244 (major == ddi_driver_major(pdip))) 6245 mcd->mtc_flags |= NDI_AUTODETACH; 6246 6247 mutex_enter(&hdl->mtc_lock); 6248 hdl->mtc_thr_count++; 6249 mutex_exit(&hdl->mtc_lock); 6250 6251 /* 6252 * Add to end of list to process after ndi_devi_exit to avoid 6253 * locking differences depending on value of mtc_off. 6254 */ 6255 mcd->mtc_next = NULL; 6256 if (mcd_head == NULL) 6257 mcd_head = mcd; 6258 else 6259 mcd_tail->mtc_next = mcd; 6260 mcd_tail = mcd; 6261 6262 dip = ddi_get_next_sibling(dip); 6263 } 6264 ndi_devi_exit(pdip, circ); 6265 6266 /* go through the list of held children */ 6267 for (mcd = mcd_head; mcd; mcd = mcd_head) { 6268 mcd_head = mcd->mtc_next; 6269 if (mtc_off) 6270 mt_config_thread(mcd); 6271 else 6272 (void) thread_create(NULL, 0, mt_config_thread, mcd, 6273 0, &p0, TS_RUN, minclsyspri); 6274 } 6275 } 6276 6277 static void 6278 mt_config_driver(struct mt_config_handle *hdl) 6279 { 6280 major_t par_major = hdl->mtc_parmajor; 6281 major_t major = hdl->mtc_major; 6282 struct devnames *dnp = &devnamesp[par_major]; 6283 dev_info_t *dip; 6284 struct mt_config_data *mcd_head = NULL; 6285 struct mt_config_data *mcd_tail = NULL; 6286 struct mt_config_data *mcd; 6287 #ifdef DEBUG 6288 timestruc_t end_time; 6289 6290 /* Update total_time in handle */ 6291 gethrestime(&end_time); 6292 hdl->total_time += time_diff_in_msec(hdl->start_time, end_time); 6293 #endif 6294 ASSERT(par_major != (major_t)-1); 6295 ASSERT(major != (major_t)-1); 6296 6297 LOCK_DEV_OPS(&dnp->dn_lock); 6298 dip = devnamesp[par_major].dn_head; 6299 while (dip) { 6300 /* 6301 * Hold the child that we are processing so he does not get 6302 * removed. The corrisponding ndi_rele_devi() for children 6303 * that are not being skipped is done at the end of 6304 * mt_config_thread(). 6305 */ 6306 ndi_hold_devi(dip); 6307 6308 /* skip leaf nodes and nodes not fully attached */ 6309 if ((i_ddi_node_state(dip) < DS_READY) || is_leaf_node(dip)) { 6310 ndi_rele_devi(dip); 6311 dip = ddi_get_next(dip); 6312 continue; 6313 } 6314 6315 mcd = kmem_alloc(sizeof (*mcd), KM_SLEEP); 6316 mcd->mtc_dip = dip; 6317 mcd->mtc_hdl = hdl; 6318 mcd->mtc_major = major; 6319 mcd->mtc_flags = hdl->mtc_flags; 6320 6321 mutex_enter(&hdl->mtc_lock); 6322 hdl->mtc_thr_count++; 6323 mutex_exit(&hdl->mtc_lock); 6324 6325 /* 6326 * Add to end of list to process after UNLOCK_DEV_OPS to avoid 6327 * locking differences depending on value of mtc_off. 6328 */ 6329 mcd->mtc_next = NULL; 6330 if (mcd_head == NULL) 6331 mcd_head = mcd; 6332 else 6333 mcd_tail->mtc_next = mcd; 6334 mcd_tail = mcd; 6335 6336 dip = ddi_get_next(dip); 6337 } 6338 UNLOCK_DEV_OPS(&dnp->dn_lock); 6339 6340 /* go through the list of held children */ 6341 for (mcd = mcd_head; mcd; mcd = mcd_head) { 6342 mcd_head = mcd->mtc_next; 6343 if (mtc_off) 6344 mt_config_thread(mcd); 6345 else 6346 (void) thread_create(NULL, 0, mt_config_thread, mcd, 6347 0, &p0, TS_RUN, minclsyspri); 6348 } 6349 } 6350 6351 /* 6352 * Given the nodeid for a persistent (PROM or SID) node, return 6353 * the corresponding devinfo node 6354 * NOTE: This function will return NULL for .conf nodeids. 6355 */ 6356 dev_info_t * 6357 e_ddi_nodeid_to_dip(dnode_t nodeid) 6358 { 6359 dev_info_t *dip = NULL; 6360 struct devi_nodeid *prev, *elem; 6361 6362 mutex_enter(&devimap->dno_lock); 6363 6364 prev = NULL; 6365 for (elem = devimap->dno_head; elem; elem = elem->next) { 6366 if (elem->nodeid == nodeid) { 6367 ndi_hold_devi(elem->dip); 6368 dip = elem->dip; 6369 break; 6370 } 6371 prev = elem; 6372 } 6373 6374 /* 6375 * Move to head for faster lookup next time 6376 */ 6377 if (elem && prev) { 6378 prev->next = elem->next; 6379 elem->next = devimap->dno_head; 6380 devimap->dno_head = elem; 6381 } 6382 6383 mutex_exit(&devimap->dno_lock); 6384 return (dip); 6385 } 6386 6387 static void 6388 free_cache_task(void *arg) 6389 { 6390 ASSERT(arg == NULL); 6391 6392 mutex_enter(&di_cache.cache_lock); 6393 6394 /* 6395 * The cache can be invalidated without holding the lock 6396 * but it can be made valid again only while the lock is held. 6397 * So if the cache is invalid when the lock is held, it will 6398 * stay invalid until lock is released. 6399 */ 6400 if (!di_cache.cache_valid) 6401 i_ddi_di_cache_free(&di_cache); 6402 6403 mutex_exit(&di_cache.cache_lock); 6404 6405 if (di_cache_debug) 6406 cmn_err(CE_NOTE, "system_taskq: di_cache freed"); 6407 } 6408 6409 extern int modrootloaded; 6410 6411 void 6412 i_ddi_di_cache_free(struct di_cache *cache) 6413 { 6414 int error; 6415 6416 ASSERT(mutex_owned(&cache->cache_lock)); 6417 6418 if (cache->cache_size) { 6419 ASSERT(cache->cache_size > 0); 6420 ASSERT(cache->cache_data); 6421 6422 kmem_free(cache->cache_data, cache->cache_size); 6423 cache->cache_data = NULL; 6424 cache->cache_size = 0; 6425 6426 if (di_cache_debug) 6427 cmn_err(CE_NOTE, "i_ddi_di_cache_free: freed cachemem"); 6428 } else { 6429 ASSERT(cache->cache_data == NULL); 6430 if (di_cache_debug) 6431 cmn_err(CE_NOTE, "i_ddi_di_cache_free: NULL cache"); 6432 } 6433 6434 if (!modrootloaded || rootvp == NULL || vn_is_readonly(rootvp)) { 6435 if (di_cache_debug) { 6436 cmn_err(CE_WARN, "/ not mounted/RDONLY. Skip unlink"); 6437 } 6438 return; 6439 } 6440 6441 error = vn_remove(DI_CACHE_FILE, UIO_SYSSPACE, RMFILE); 6442 if (di_cache_debug && error && error != ENOENT) { 6443 cmn_err(CE_WARN, "%s: unlink failed: %d", DI_CACHE_FILE, error); 6444 } else if (di_cache_debug && !error) { 6445 cmn_err(CE_NOTE, "i_ddi_di_cache_free: unlinked cache file"); 6446 } 6447 } 6448 6449 void 6450 i_ddi_di_cache_invalidate(int kmflag) 6451 { 6452 uint_t flag; 6453 6454 if (!modrootloaded || !i_ddi_io_initialized()) { 6455 if (di_cache_debug) 6456 cmn_err(CE_NOTE, "I/O not inited. Skipping invalidate"); 6457 return; 6458 } 6459 6460 /* 6461 * Invalidate the in-core cache 6462 */ 6463 atomic_and_32(&di_cache.cache_valid, 0); 6464 6465 flag = (kmflag == KM_SLEEP) ? TQ_SLEEP : TQ_NOSLEEP; 6466 6467 (void) taskq_dispatch(system_taskq, free_cache_task, NULL, flag); 6468 6469 if (di_cache_debug) { 6470 cmn_err(CE_NOTE, "invalidation with km_flag: %s", 6471 kmflag == KM_SLEEP ? "KM_SLEEP" : "KM_NOSLEEP"); 6472 } 6473 } 6474 6475 6476 static void 6477 i_bind_vhci_node(dev_info_t *dip) 6478 { 6479 char *node_name; 6480 6481 node_name = i_ddi_strdup(ddi_node_name(dip), KM_SLEEP); 6482 i_ddi_set_binding_name(dip, node_name); 6483 DEVI(dip)->devi_major = ddi_name_to_major(node_name); 6484 i_ddi_set_node_state(dip, DS_BOUND); 6485 } 6486 6487 6488 static void 6489 i_free_vhci_bind_name(dev_info_t *dip) 6490 { 6491 if (DEVI(dip)->devi_binding_name) { 6492 kmem_free(DEVI(dip)->devi_binding_name, 6493 sizeof (ddi_node_name(dip))); 6494 } 6495 } 6496 6497 6498 static char vhci_node_addr[2]; 6499 6500 static int 6501 i_init_vhci_node(dev_info_t *dip) 6502 { 6503 add_global_props(dip); 6504 DEVI(dip)->devi_ops = ndi_hold_driver(dip); 6505 if (DEVI(dip)->devi_ops == NULL) 6506 return (-1); 6507 6508 DEVI(dip)->devi_instance = e_ddi_assign_instance(dip); 6509 e_ddi_keep_instance(dip); 6510 vhci_node_addr[0] = '\0'; 6511 ddi_set_name_addr(dip, vhci_node_addr); 6512 i_ddi_set_node_state(dip, DS_INITIALIZED); 6513 return (0); 6514 } 6515 6516 static void 6517 i_link_vhci_node(dev_info_t *dip) 6518 { 6519 /* 6520 * scsi_vhci should be kept left most of the device tree. 6521 */ 6522 mutex_enter(&global_vhci_lock); 6523 if (scsi_vhci_dip) { 6524 DEVI(dip)->devi_sibling = DEVI(scsi_vhci_dip)->devi_sibling; 6525 DEVI(scsi_vhci_dip)->devi_sibling = DEVI(dip); 6526 } else { 6527 DEVI(dip)->devi_sibling = DEVI(top_devinfo)->devi_child; 6528 DEVI(top_devinfo)->devi_child = DEVI(dip); 6529 } 6530 mutex_exit(&global_vhci_lock); 6531 } 6532 6533 6534 /* 6535 * This a special routine to enumerate vhci node (child of rootnex 6536 * node) without holding the ndi_devi_enter() lock. The device node 6537 * is allocated, initialized and brought into DS_READY state before 6538 * inserting into the device tree. The VHCI node is handcrafted 6539 * here to bring the node to DS_READY, similar to rootnex node. 6540 * 6541 * The global_vhci_lock protects linking the node into the device 6542 * as same lock is held before linking/unlinking any direct child 6543 * of rootnex children. 6544 * 6545 * This routine is a workaround to handle a possible deadlock 6546 * that occurs while trying to enumerate node in a different sub-tree 6547 * during _init/_attach entry points. 6548 */ 6549 /*ARGSUSED*/ 6550 dev_info_t * 6551 ndi_devi_config_vhci(char *drvname, int flags) 6552 { 6553 struct devnames *dnp; 6554 dev_info_t *dip; 6555 major_t major = ddi_name_to_major(drvname); 6556 6557 if (major == -1) 6558 return (NULL); 6559 6560 /* Make sure we create the VHCI node only once */ 6561 dnp = &devnamesp[major]; 6562 LOCK_DEV_OPS(&dnp->dn_lock); 6563 if (dnp->dn_head) { 6564 dip = dnp->dn_head; 6565 UNLOCK_DEV_OPS(&dnp->dn_lock); 6566 return (dip); 6567 } 6568 UNLOCK_DEV_OPS(&dnp->dn_lock); 6569 6570 /* Allocate the VHCI node */ 6571 ndi_devi_alloc_sleep(top_devinfo, drvname, DEVI_SID_NODEID, &dip); 6572 ndi_hold_devi(dip); 6573 6574 /* Mark the node as VHCI */ 6575 DEVI(dip)->devi_node_attributes |= DDI_VHCI_NODE; 6576 6577 i_ddi_add_devimap(dip); 6578 i_bind_vhci_node(dip); 6579 if (i_init_vhci_node(dip) == -1) { 6580 i_free_vhci_bind_name(dip); 6581 ndi_rele_devi(dip); 6582 (void) ndi_devi_free(dip); 6583 return (NULL); 6584 } 6585 6586 DEVI_SET_ATTACHING(dip); 6587 if (devi_attach(dip, DDI_ATTACH) != DDI_SUCCESS) { 6588 cmn_err(CE_CONT, "Could not attach %s driver", drvname); 6589 e_ddi_free_instance(dip, vhci_node_addr); 6590 i_free_vhci_bind_name(dip); 6591 ndi_rele_devi(dip); 6592 (void) ndi_devi_free(dip); 6593 return (NULL); 6594 } 6595 DEVI_CLR_ATTACHING(dip); 6596 6597 i_link_vhci_node(dip); 6598 i_ddi_set_node_state(dip, DS_READY); 6599 6600 LOCK_DEV_OPS(&dnp->dn_lock); 6601 dnp->dn_flags |= DN_DRIVER_HELD; 6602 dnp->dn_head = dip; 6603 UNLOCK_DEV_OPS(&dnp->dn_lock); 6604 6605 i_ndi_devi_report_status_change(dip, NULL); 6606 6607 return (dip); 6608 } 6609 6610 /* 6611 * ibt_hw_is_present() returns 0 when there is no IB hardware actively 6612 * running. This is primarily useful for modules like rpcmod which 6613 * needs a quick check to decide whether or not it should try to use 6614 * InfiniBand 6615 */ 6616 int ib_hw_status = 0; 6617 int 6618 ibt_hw_is_present() 6619 { 6620 return (ib_hw_status); 6621 } 6622