1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/note.h> 29 #include <sys/t_lock.h> 30 #include <sys/cmn_err.h> 31 #include <sys/instance.h> 32 #include <sys/conf.h> 33 #include <sys/stat.h> 34 #include <sys/ddi.h> 35 #include <sys/hwconf.h> 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/ndi_impldefs.h> 40 #include <sys/modctl.h> 41 #include <sys/dacf.h> 42 #include <sys/promif.h> 43 #include <sys/cpuvar.h> 44 #include <sys/pathname.h> 45 #include <sys/taskq.h> 46 #include <sys/sysevent.h> 47 #include <sys/sunmdi.h> 48 #include <sys/stream.h> 49 #include <sys/strsubr.h> 50 #include <sys/fs/snode.h> 51 #include <sys/fs/dv_node.h> 52 #include <sys/reboot.h> 53 54 #ifdef DEBUG 55 int ddidebug = DDI_AUDIT; 56 #else 57 int ddidebug = 0; 58 #endif 59 60 #define MT_CONFIG_OP 0 61 #define MT_UNCONFIG_OP 1 62 63 /* Multi-threaded configuration */ 64 struct mt_config_handle { 65 kmutex_t mtc_lock; 66 kcondvar_t mtc_cv; 67 int mtc_thr_count; 68 dev_info_t *mtc_pdip; /* parent dip for mt_config_children */ 69 dev_info_t **mtc_fdip; /* "a" dip where unconfigure failed */ 70 major_t mtc_parmajor; /* parent major for mt_config_driver */ 71 major_t mtc_major; 72 int mtc_flags; 73 int mtc_op; /* config or unconfig */ 74 int mtc_error; /* operation error */ 75 struct brevq_node **mtc_brevqp; /* outstanding branch events queue */ 76 #ifdef DEBUG 77 int total_time; 78 timestruc_t start_time; 79 #endif /* DEBUG */ 80 }; 81 82 struct devi_nodeid { 83 pnode_t nodeid; 84 dev_info_t *dip; 85 struct devi_nodeid *next; 86 }; 87 88 struct devi_nodeid_list { 89 kmutex_t dno_lock; /* Protects other fields */ 90 struct devi_nodeid *dno_head; /* list of devi nodeid elements */ 91 struct devi_nodeid *dno_free; /* Free list */ 92 uint_t dno_list_length; /* number of dips in list */ 93 }; 94 95 /* used to keep track of branch remove events to be generated */ 96 struct brevq_node { 97 char *brn_deviname; 98 struct brevq_node *brn_sibling; 99 struct brevq_node *brn_child; 100 }; 101 102 static struct devi_nodeid_list devi_nodeid_list; 103 static struct devi_nodeid_list *devimap = &devi_nodeid_list; 104 105 /* 106 * Well known nodes which are attached first at boot time. 107 */ 108 dev_info_t *top_devinfo; /* root of device tree */ 109 dev_info_t *options_dip; 110 dev_info_t *pseudo_dip; 111 dev_info_t *clone_dip; 112 dev_info_t *scsi_vhci_dip; /* MPXIO dip */ 113 major_t clone_major; 114 115 /* 116 * A non-global zone's /dev is derived from the device tree. 117 * This generation number serves to indicate when a zone's 118 * /dev may need to be updated. 119 */ 120 volatile ulong_t devtree_gen; /* generation number */ 121 122 /* block all future dev_info state changes */ 123 static hrtime_t volatile devinfo_freeze = 0; 124 125 /* number of dev_info attaches/detaches currently in progress */ 126 static ulong_t devinfo_attach_detach = 0; 127 128 extern kmutex_t global_vhci_lock; 129 130 /* bitset of DS_SYSAVAIL & DS_RECONFIG - no races, no lock */ 131 static int devname_state = 0; 132 133 /* 134 * The devinfo snapshot cache and related variables. 135 * The only field in the di_cache structure that needs initialization 136 * is the mutex (cache_lock). However, since this is an adaptive mutex 137 * (MUTEX_DEFAULT) - it is automatically initialized by being allocated 138 * in zeroed memory (static storage class). Therefore no explicit 139 * initialization of the di_cache structure is needed. 140 */ 141 struct di_cache di_cache = {1}; 142 int di_cache_debug = 0; 143 144 /* For ddvis, which needs pseudo children under PCI */ 145 int pci_allow_pseudo_children = 0; 146 147 /* Allow path-oriented alias driver binding on driver.conf enumerated nodes */ 148 int driver_conf_allow_path_alias = 1; 149 150 /* 151 * The following switch is for service people, in case a 152 * 3rd party driver depends on identify(9e) being called. 153 */ 154 int identify_9e = 0; 155 156 int mtc_off; /* turn off mt config */ 157 158 static kmem_cache_t *ddi_node_cache; /* devinfo node cache */ 159 static devinfo_log_header_t *devinfo_audit_log; /* devinfo log */ 160 static int devinfo_log_size; /* size in pages */ 161 162 static int lookup_compatible(dev_info_t *, uint_t); 163 static char *encode_composite_string(char **, uint_t, size_t *, uint_t); 164 static void link_to_driver_list(dev_info_t *); 165 static void unlink_from_driver_list(dev_info_t *); 166 static void add_to_dn_list(struct devnames *, dev_info_t *); 167 static void remove_from_dn_list(struct devnames *, dev_info_t *); 168 static dev_info_t *find_child_by_callback(dev_info_t *, char *, char *, 169 int (*)(dev_info_t *, char *, int)); 170 static dev_info_t *find_duplicate_child(); 171 static void add_global_props(dev_info_t *); 172 static void remove_global_props(dev_info_t *); 173 static int uninit_node(dev_info_t *); 174 static void da_log_init(void); 175 static void da_log_enter(dev_info_t *); 176 static int walk_devs(dev_info_t *, int (*f)(dev_info_t *, void *), void *, int); 177 static int reset_nexus_flags(dev_info_t *, void *); 178 static void ddi_optimize_dtree(dev_info_t *); 179 static int is_leaf_node(dev_info_t *); 180 static struct mt_config_handle *mt_config_init(dev_info_t *, dev_info_t **, 181 int, major_t, int, struct brevq_node **); 182 static void mt_config_children(struct mt_config_handle *); 183 static void mt_config_driver(struct mt_config_handle *); 184 static int mt_config_fini(struct mt_config_handle *); 185 static int devi_unconfig_common(dev_info_t *, dev_info_t **, int, major_t, 186 struct brevq_node **); 187 static int 188 ndi_devi_config_obp_args(dev_info_t *parent, char *devnm, 189 dev_info_t **childp, int flags); 190 static void i_link_vhci_node(dev_info_t *); 191 static void ndi_devi_exit_and_wait(dev_info_t *dip, 192 int circular, clock_t end_time); 193 static int ndi_devi_unbind_driver(dev_info_t *dip); 194 195 /* 196 * dev_info cache and node management 197 */ 198 199 /* initialize dev_info node cache */ 200 void 201 i_ddi_node_cache_init() 202 { 203 ASSERT(ddi_node_cache == NULL); 204 ddi_node_cache = kmem_cache_create("dev_info_node_cache", 205 sizeof (struct dev_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 206 207 if (ddidebug & DDI_AUDIT) 208 da_log_init(); 209 } 210 211 /* 212 * Allocating a dev_info node, callable from interrupt context with KM_NOSLEEP 213 * The allocated node has a reference count of 0. 214 */ 215 dev_info_t * 216 i_ddi_alloc_node(dev_info_t *pdip, char *node_name, pnode_t nodeid, 217 int instance, ddi_prop_t *sys_prop, int flag) 218 { 219 struct dev_info *devi; 220 struct devi_nodeid *elem; 221 static char failed[] = "i_ddi_alloc_node: out of memory"; 222 223 ASSERT(node_name != NULL); 224 225 if ((devi = kmem_cache_alloc(ddi_node_cache, flag)) == NULL) { 226 cmn_err(CE_NOTE, failed); 227 return (NULL); 228 } 229 230 bzero(devi, sizeof (struct dev_info)); 231 232 if (devinfo_audit_log) { 233 devi->devi_audit = kmem_zalloc(sizeof (devinfo_audit_t), flag); 234 if (devi->devi_audit == NULL) 235 goto fail; 236 } 237 238 if ((devi->devi_node_name = i_ddi_strdup(node_name, flag)) == NULL) 239 goto fail; 240 241 /* default binding name is node name */ 242 devi->devi_binding_name = devi->devi_node_name; 243 devi->devi_major = (major_t)-1; /* unbound by default */ 244 245 /* 246 * Make a copy of system property 247 */ 248 if (sys_prop && 249 (devi->devi_sys_prop_ptr = i_ddi_prop_list_dup(sys_prop, flag)) 250 == NULL) 251 goto fail; 252 253 /* 254 * Assign devi_nodeid, devi_node_class, devi_node_attributes 255 * according to the following algorithm: 256 * 257 * nodeid arg node class node attributes 258 * 259 * DEVI_PSEUDO_NODEID DDI_NC_PSEUDO A 260 * DEVI_SID_NODEID DDI_NC_PSEUDO A,P 261 * other DDI_NC_PROM P 262 * 263 * Where A = DDI_AUTO_ASSIGNED_NODEID (auto-assign a nodeid) 264 * and P = DDI_PERSISTENT 265 * 266 * auto-assigned nodeids are also auto-freed. 267 */ 268 switch (nodeid) { 269 case DEVI_SID_NODEID: 270 devi->devi_node_attributes = DDI_PERSISTENT; 271 if ((elem = kmem_zalloc(sizeof (*elem), flag)) == NULL) 272 goto fail; 273 /*FALLTHROUGH*/ 274 case DEVI_PSEUDO_NODEID: 275 devi->devi_node_attributes |= DDI_AUTO_ASSIGNED_NODEID; 276 devi->devi_node_class = DDI_NC_PSEUDO; 277 if (impl_ddi_alloc_nodeid(&devi->devi_nodeid)) { 278 panic("i_ddi_alloc_node: out of nodeids"); 279 /*NOTREACHED*/ 280 } 281 break; 282 default: 283 if ((elem = kmem_zalloc(sizeof (*elem), flag)) == NULL) 284 goto fail; 285 /* 286 * the nodetype is 'prom', try to 'take' the nodeid now. 287 * This requires memory allocation, so check for failure. 288 */ 289 if (impl_ddi_take_nodeid(nodeid, flag) != 0) { 290 kmem_free(elem, sizeof (*elem)); 291 goto fail; 292 } 293 294 devi->devi_nodeid = nodeid; 295 devi->devi_node_class = DDI_NC_PROM; 296 devi->devi_node_attributes = DDI_PERSISTENT; 297 298 } 299 300 if (ndi_dev_is_persistent_node((dev_info_t *)devi)) { 301 mutex_enter(&devimap->dno_lock); 302 elem->next = devimap->dno_free; 303 devimap->dno_free = elem; 304 mutex_exit(&devimap->dno_lock); 305 } 306 307 /* 308 * Instance is normally initialized to -1. In a few special 309 * cases, the caller may specify an instance (e.g. CPU nodes). 310 */ 311 devi->devi_instance = instance; 312 313 /* 314 * set parent and bus_ctl parent 315 */ 316 devi->devi_parent = DEVI(pdip); 317 devi->devi_bus_ctl = DEVI(pdip); 318 319 NDI_CONFIG_DEBUG((CE_CONT, 320 "i_ddi_alloc_node: name=%s id=%d\n", node_name, devi->devi_nodeid)); 321 322 cv_init(&(devi->devi_cv), NULL, CV_DEFAULT, NULL); 323 mutex_init(&(devi->devi_lock), NULL, MUTEX_DEFAULT, NULL); 324 mutex_init(&(devi->devi_pm_lock), NULL, MUTEX_DEFAULT, NULL); 325 mutex_init(&(devi->devi_pm_busy_lock), NULL, MUTEX_DEFAULT, NULL); 326 327 i_ddi_set_node_state((dev_info_t *)devi, DS_PROTO); 328 da_log_enter((dev_info_t *)devi); 329 return ((dev_info_t *)devi); 330 331 fail: 332 if (devi->devi_sys_prop_ptr) 333 i_ddi_prop_list_delete(devi->devi_sys_prop_ptr); 334 if (devi->devi_node_name) 335 kmem_free(devi->devi_node_name, strlen(node_name) + 1); 336 if (devi->devi_audit) 337 kmem_free(devi->devi_audit, sizeof (devinfo_audit_t)); 338 kmem_cache_free(ddi_node_cache, devi); 339 cmn_err(CE_NOTE, failed); 340 return (NULL); 341 } 342 343 /* 344 * free a dev_info structure. 345 * NB. Not callable from interrupt since impl_ddi_free_nodeid may block. 346 */ 347 void 348 i_ddi_free_node(dev_info_t *dip) 349 { 350 struct dev_info *devi = DEVI(dip); 351 struct devi_nodeid *elem; 352 353 ASSERT(devi->devi_ref == 0); 354 ASSERT(devi->devi_addr == NULL); 355 ASSERT(devi->devi_node_state == DS_PROTO); 356 ASSERT(devi->devi_child == NULL); 357 358 /* free devi_addr_buf allocated by ddi_set_name_addr() */ 359 if (devi->devi_addr_buf) 360 kmem_free(devi->devi_addr_buf, 2 * MAXNAMELEN); 361 362 if (i_ndi_dev_is_auto_assigned_node(dip)) 363 impl_ddi_free_nodeid(DEVI(dip)->devi_nodeid); 364 365 if (ndi_dev_is_persistent_node(dip)) { 366 mutex_enter(&devimap->dno_lock); 367 ASSERT(devimap->dno_free); 368 elem = devimap->dno_free; 369 devimap->dno_free = elem->next; 370 mutex_exit(&devimap->dno_lock); 371 kmem_free(elem, sizeof (*elem)); 372 } 373 374 if (DEVI(dip)->devi_compat_names) 375 kmem_free(DEVI(dip)->devi_compat_names, 376 DEVI(dip)->devi_compat_length); 377 if (DEVI(dip)->devi_rebinding_name) 378 kmem_free(DEVI(dip)->devi_rebinding_name, 379 strlen(DEVI(dip)->devi_rebinding_name) + 1); 380 381 ddi_prop_remove_all(dip); /* remove driver properties */ 382 if (devi->devi_sys_prop_ptr) 383 i_ddi_prop_list_delete(devi->devi_sys_prop_ptr); 384 if (devi->devi_hw_prop_ptr) 385 i_ddi_prop_list_delete(devi->devi_hw_prop_ptr); 386 387 i_ddi_set_node_state(dip, DS_INVAL); 388 da_log_enter(dip); 389 if (devi->devi_audit) { 390 kmem_free(devi->devi_audit, sizeof (devinfo_audit_t)); 391 } 392 kmem_free(devi->devi_node_name, strlen(devi->devi_node_name) + 1); 393 if (devi->devi_device_class) 394 kmem_free(devi->devi_device_class, 395 strlen(devi->devi_device_class) + 1); 396 cv_destroy(&(devi->devi_cv)); 397 mutex_destroy(&(devi->devi_lock)); 398 mutex_destroy(&(devi->devi_pm_lock)); 399 mutex_destroy(&(devi->devi_pm_busy_lock)); 400 401 kmem_cache_free(ddi_node_cache, devi); 402 } 403 404 405 /* 406 * Node state transitions 407 */ 408 409 /* 410 * Change the node name 411 */ 412 int 413 ndi_devi_set_nodename(dev_info_t *dip, char *name, int flags) 414 { 415 _NOTE(ARGUNUSED(flags)) 416 char *nname, *oname; 417 418 ASSERT(dip && name); 419 420 oname = DEVI(dip)->devi_node_name; 421 if (strcmp(oname, name) == 0) 422 return (DDI_SUCCESS); 423 424 /* 425 * pcicfg_fix_ethernet requires a name change after node 426 * is linked into the tree. When pcicfg is fixed, we 427 * should only allow name change in DS_PROTO state. 428 */ 429 if (i_ddi_node_state(dip) >= DS_BOUND) { 430 /* 431 * Don't allow name change once node is bound 432 */ 433 cmn_err(CE_NOTE, 434 "ndi_devi_set_nodename: node already bound dip = %p," 435 " %s -> %s", (void *)dip, ddi_node_name(dip), name); 436 return (NDI_FAILURE); 437 } 438 439 nname = i_ddi_strdup(name, KM_SLEEP); 440 DEVI(dip)->devi_node_name = nname; 441 i_ddi_set_binding_name(dip, nname); 442 kmem_free(oname, strlen(oname) + 1); 443 444 da_log_enter(dip); 445 return (NDI_SUCCESS); 446 } 447 448 void 449 i_ddi_add_devimap(dev_info_t *dip) 450 { 451 struct devi_nodeid *elem; 452 453 ASSERT(dip); 454 455 if (!ndi_dev_is_persistent_node(dip)) 456 return; 457 458 ASSERT(ddi_get_parent(dip) == NULL || (DEVI_VHCI_NODE(dip)) || 459 DEVI_BUSY_OWNED(ddi_get_parent(dip))); 460 461 mutex_enter(&devimap->dno_lock); 462 463 ASSERT(devimap->dno_free); 464 465 elem = devimap->dno_free; 466 devimap->dno_free = elem->next; 467 468 elem->nodeid = ddi_get_nodeid(dip); 469 elem->dip = dip; 470 elem->next = devimap->dno_head; 471 devimap->dno_head = elem; 472 473 devimap->dno_list_length++; 474 475 mutex_exit(&devimap->dno_lock); 476 } 477 478 static int 479 i_ddi_remove_devimap(dev_info_t *dip) 480 { 481 struct devi_nodeid *prev, *elem; 482 static const char *fcn = "i_ddi_remove_devimap"; 483 484 ASSERT(dip); 485 486 if (!ndi_dev_is_persistent_node(dip)) 487 return (DDI_SUCCESS); 488 489 mutex_enter(&devimap->dno_lock); 490 491 /* 492 * The following check is done with dno_lock held 493 * to prevent race between dip removal and 494 * e_ddi_prom_node_to_dip() 495 */ 496 if (e_ddi_devi_holdcnt(dip)) { 497 mutex_exit(&devimap->dno_lock); 498 return (DDI_FAILURE); 499 } 500 501 ASSERT(devimap->dno_head); 502 ASSERT(devimap->dno_list_length > 0); 503 504 prev = NULL; 505 for (elem = devimap->dno_head; elem; elem = elem->next) { 506 if (elem->dip == dip) { 507 ASSERT(elem->nodeid == ddi_get_nodeid(dip)); 508 break; 509 } 510 prev = elem; 511 } 512 513 if (elem && prev) 514 prev->next = elem->next; 515 else if (elem) 516 devimap->dno_head = elem->next; 517 else 518 panic("%s: devinfo node(%p) not found", 519 fcn, (void *)dip); 520 521 devimap->dno_list_length--; 522 523 elem->nodeid = 0; 524 elem->dip = NULL; 525 526 elem->next = devimap->dno_free; 527 devimap->dno_free = elem; 528 529 mutex_exit(&devimap->dno_lock); 530 531 return (DDI_SUCCESS); 532 } 533 534 /* 535 * Link this node into the devinfo tree and add to orphan list 536 * Not callable from interrupt context 537 */ 538 static void 539 link_node(dev_info_t *dip) 540 { 541 struct dev_info *devi = DEVI(dip); 542 struct dev_info *parent = devi->devi_parent; 543 dev_info_t **dipp; 544 545 ASSERT(parent); /* never called for root node */ 546 547 NDI_CONFIG_DEBUG((CE_CONT, "link_node: parent = %s child = %s\n", 548 parent->devi_node_name, devi->devi_node_name)); 549 550 /* 551 * Hold the global_vhci_lock before linking any direct 552 * children of rootnex driver. This special lock protects 553 * linking and unlinking for rootnext direct children. 554 */ 555 if ((dev_info_t *)parent == ddi_root_node()) 556 mutex_enter(&global_vhci_lock); 557 558 /* 559 * attach the node to end of the list unless the node is already there 560 */ 561 dipp = (dev_info_t **)(&DEVI(parent)->devi_child); 562 while (*dipp && (*dipp != dip)) { 563 dipp = (dev_info_t **)(&DEVI(*dipp)->devi_sibling); 564 } 565 ASSERT(*dipp == NULL); /* node is not linked */ 566 567 /* 568 * Now that we are in the tree, update the devi-nodeid map. 569 */ 570 i_ddi_add_devimap(dip); 571 572 /* 573 * This is a temporary workaround for Bug 4618861. 574 * We keep the scsi_vhci nexus node on the left side of the devinfo 575 * tree (under the root nexus driver), so that virtual nodes under 576 * scsi_vhci will be SUSPENDed first and RESUMEd last. This ensures 577 * that the pHCI nodes are active during times when their clients 578 * may be depending on them. This workaround embodies the knowledge 579 * that system PM and CPR both traverse the tree left-to-right during 580 * SUSPEND and right-to-left during RESUME. 581 * Extending the workaround to IB Nexus/VHCI 582 * driver also. 583 */ 584 if (strcmp(devi->devi_binding_name, "scsi_vhci") == 0) { 585 /* Add scsi_vhci to beginning of list */ 586 ASSERT((dev_info_t *)parent == top_devinfo); 587 /* scsi_vhci under rootnex */ 588 devi->devi_sibling = parent->devi_child; 589 parent->devi_child = devi; 590 } else if (strcmp(devi->devi_binding_name, "ib") == 0) { 591 i_link_vhci_node(dip); 592 } else { 593 /* Add to end of list */ 594 *dipp = dip; 595 DEVI(dip)->devi_sibling = NULL; 596 } 597 598 /* 599 * Release the global_vhci_lock before linking any direct 600 * children of rootnex driver. 601 */ 602 if ((dev_info_t *)parent == ddi_root_node()) 603 mutex_exit(&global_vhci_lock); 604 605 /* persistent nodes go on orphan list */ 606 if (ndi_dev_is_persistent_node(dip)) 607 add_to_dn_list(&orphanlist, dip); 608 } 609 610 /* 611 * Unlink this node from the devinfo tree 612 */ 613 static int 614 unlink_node(dev_info_t *dip) 615 { 616 struct dev_info *devi = DEVI(dip); 617 struct dev_info *parent = devi->devi_parent; 618 dev_info_t **dipp; 619 620 ASSERT(parent != NULL); 621 ASSERT(devi->devi_node_state == DS_LINKED); 622 623 NDI_CONFIG_DEBUG((CE_CONT, "unlink_node: name = %s\n", 624 ddi_node_name(dip))); 625 626 /* check references */ 627 if (devi->devi_ref || i_ddi_remove_devimap(dip) != DDI_SUCCESS) 628 return (DDI_FAILURE); 629 630 /* 631 * Hold the global_vhci_lock before linking any direct 632 * children of rootnex driver. 633 */ 634 if ((dev_info_t *)parent == ddi_root_node()) 635 mutex_enter(&global_vhci_lock); 636 637 dipp = (dev_info_t **)(&DEVI(parent)->devi_child); 638 while (*dipp && (*dipp != dip)) { 639 dipp = (dev_info_t **)(&DEVI(*dipp)->devi_sibling); 640 } 641 if (*dipp) { 642 *dipp = (dev_info_t *)(devi->devi_sibling); 643 devi->devi_sibling = NULL; 644 } else { 645 NDI_CONFIG_DEBUG((CE_NOTE, "unlink_node: %s not linked", 646 devi->devi_node_name)); 647 } 648 649 /* 650 * Release the global_vhci_lock before linking any direct 651 * children of rootnex driver. 652 */ 653 if ((dev_info_t *)parent == ddi_root_node()) 654 mutex_exit(&global_vhci_lock); 655 656 /* Remove node from orphan list */ 657 if (ndi_dev_is_persistent_node(dip)) { 658 remove_from_dn_list(&orphanlist, dip); 659 } 660 661 return (DDI_SUCCESS); 662 } 663 664 /* 665 * Bind this devinfo node to a driver. If compat is NON-NULL, try that first. 666 * Else, use the node-name. 667 * 668 * NOTE: IEEE1275 specifies that nodename should be tried before compatible. 669 * Solaris implementation binds nodename after compatible. 670 * 671 * If we find a binding, 672 * - set the binding name to the the string, 673 * - set major number to driver major 674 * 675 * If we don't find a binding, 676 * - return failure 677 */ 678 static int 679 bind_node(dev_info_t *dip) 680 { 681 char *p = NULL; 682 major_t major = (major_t)(major_t)-1; 683 struct dev_info *devi = DEVI(dip); 684 dev_info_t *parent = ddi_get_parent(dip); 685 686 ASSERT(devi->devi_node_state == DS_LINKED); 687 688 NDI_CONFIG_DEBUG((CE_CONT, "bind_node: 0x%p(name = %s)\n", 689 (void *)dip, ddi_node_name(dip))); 690 691 mutex_enter(&DEVI(dip)->devi_lock); 692 if (DEVI(dip)->devi_flags & DEVI_NO_BIND) { 693 mutex_exit(&DEVI(dip)->devi_lock); 694 return (DDI_FAILURE); 695 } 696 mutex_exit(&DEVI(dip)->devi_lock); 697 698 /* find the driver with most specific binding using compatible */ 699 major = ddi_compatible_driver_major(dip, &p); 700 if (major == (major_t)-1) 701 return (DDI_FAILURE); 702 703 devi->devi_major = major; 704 if (p != NULL) { 705 i_ddi_set_binding_name(dip, p); 706 NDI_CONFIG_DEBUG((CE_CONT, "bind_node: %s bound to %s\n", 707 devi->devi_node_name, p)); 708 } 709 710 /* Link node to per-driver list */ 711 link_to_driver_list(dip); 712 713 /* 714 * reset parent flag so that nexus will merge .conf props 715 */ 716 if (ndi_dev_is_persistent_node(dip)) { 717 mutex_enter(&DEVI(parent)->devi_lock); 718 DEVI(parent)->devi_flags &= 719 ~(DEVI_ATTACHED_CHILDREN|DEVI_MADE_CHILDREN); 720 mutex_exit(&DEVI(parent)->devi_lock); 721 } 722 return (DDI_SUCCESS); 723 } 724 725 /* 726 * Unbind this devinfo node 727 * Called before the node is destroyed or driver is removed from system 728 */ 729 static int 730 unbind_node(dev_info_t *dip) 731 { 732 ASSERT(DEVI(dip)->devi_node_state == DS_BOUND); 733 ASSERT(DEVI(dip)->devi_major != (major_t)-1); 734 735 /* check references */ 736 if (DEVI(dip)->devi_ref) 737 return (DDI_FAILURE); 738 739 NDI_CONFIG_DEBUG((CE_CONT, "unbind_node: 0x%p(name = %s)\n", 740 (void *)dip, ddi_node_name(dip))); 741 742 unlink_from_driver_list(dip); 743 744 DEVI(dip)->devi_major = (major_t)-1; 745 DEVI(dip)->devi_binding_name = DEVI(dip)->devi_node_name; 746 return (DDI_SUCCESS); 747 } 748 749 /* 750 * Initialize a node: calls the parent nexus' bus_ctl ops to do the operation. 751 * Must hold parent and per-driver list while calling this function. 752 * A successful init_node() returns with an active ndi_hold_devi() hold on 753 * the parent. 754 */ 755 static int 756 init_node(dev_info_t *dip) 757 { 758 int error; 759 dev_info_t *pdip = ddi_get_parent(dip); 760 int (*f)(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, void *, void *); 761 char *path; 762 major_t major; 763 764 ASSERT(i_ddi_node_state(dip) == DS_BOUND); 765 766 /* should be DS_READY except for pcmcia ... */ 767 ASSERT(i_ddi_node_state(pdip) >= DS_PROBED); 768 769 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 770 (void) ddi_pathname(dip, path); 771 NDI_CONFIG_DEBUG((CE_CONT, "init_node: entry: path %s 0x%p\n", 772 path, (void *)dip)); 773 774 /* 775 * The parent must have a bus_ctl operation. 776 */ 777 if ((DEVI(pdip)->devi_ops->devo_bus_ops == NULL) || 778 (f = DEVI(pdip)->devi_ops->devo_bus_ops->bus_ctl) == NULL) { 779 error = DDI_FAILURE; 780 goto out; 781 } 782 783 add_global_props(dip); 784 785 /* 786 * Invoke the parent's bus_ctl operation with the DDI_CTLOPS_INITCHILD 787 * command to transform the child to canonical form 1. If there 788 * is an error, ddi_remove_child should be called, to clean up. 789 */ 790 error = (*f)(pdip, pdip, DDI_CTLOPS_INITCHILD, dip, NULL); 791 if (error != DDI_SUCCESS) { 792 NDI_CONFIG_DEBUG((CE_CONT, "init_node: %s 0x%p failed\n", 793 path, (void *)dip)); 794 remove_global_props(dip); 795 /* in case nexus driver didn't clear this field */ 796 ddi_set_name_addr(dip, NULL); 797 error = DDI_FAILURE; 798 goto out; 799 } 800 801 ndi_hold_devi(pdip); 802 803 /* recompute path after initchild for @addr information */ 804 (void) ddi_pathname(dip, path); 805 806 /* Check for duplicate nodes */ 807 if (find_duplicate_child(pdip, dip) != NULL) { 808 /* 809 * uninit_node() the duplicate - a successful uninit_node() 810 * does a ndi_rele_devi. 811 */ 812 if ((error = uninit_node(dip)) != DDI_SUCCESS) { 813 ndi_rele_devi(pdip); 814 cmn_err(CE_WARN, "init_node: uninit of duplicate " 815 "node %s failed", path); 816 } 817 NDI_CONFIG_DEBUG((CE_CONT, "init_node: duplicate uninit " 818 "%s 0x%p%s\n", path, (void *)dip, 819 (error == DDI_SUCCESS) ? "" : " failed")); 820 error = DDI_FAILURE; 821 goto out; 822 } 823 824 /* 825 * Check to see if we have a path-oriented driver alias that overrides 826 * the current driver binding. If so, we need to rebind. This check 827 * needs to be delayed until after a successful DDI_CTLOPS_INITCHILD, 828 * so the unit-address is established on the last component of the path. 829 * 830 * NOTE: Allowing a path-oriented alias to change the driver binding 831 * of a driver.conf node results in non-intuitive property behavior. 832 * We provide a tunable (driver_conf_allow_path_alias) to control 833 * this behavior. See uninit_node() for more details. 834 * 835 * NOTE: If you are adding a path-oriented alias for the boot device, 836 * and there is mismatch between OBP and the kernel in regard to 837 * generic name use, like "disk" .vs. "ssd", then you will need 838 * to add a path-oriented alias for both paths. 839 */ 840 major = ddi_name_to_major(path); 841 if ((major != (major_t)-1) && 842 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED) && 843 (major != DEVI(dip)->devi_major) && 844 (ndi_dev_is_persistent_node(dip) || driver_conf_allow_path_alias)) { 845 846 /* Mark node for rebind processing. */ 847 mutex_enter(&DEVI(dip)->devi_lock); 848 DEVI(dip)->devi_flags |= DEVI_REBIND; 849 mutex_exit(&DEVI(dip)->devi_lock); 850 851 /* 852 * uninit_node() current binding - a successful uninit_node() 853 * does a ndi_rele_devi. 854 */ 855 if ((error = uninit_node(dip)) != DDI_SUCCESS) { 856 ndi_rele_devi(pdip); 857 cmn_err(CE_WARN, "init_node: uninit for rebind " 858 "of node %s failed", path); 859 goto out; 860 } 861 862 /* Unbind: demote the node back to DS_LINKED. */ 863 if ((error = ndi_devi_unbind_driver(dip)) != DDI_SUCCESS) { 864 cmn_err(CE_WARN, "init_node: unbind for rebind " 865 "of node %s failed", path); 866 goto out; 867 } 868 869 /* establish rebinding name */ 870 if (DEVI(dip)->devi_rebinding_name == NULL) 871 DEVI(dip)->devi_rebinding_name = 872 i_ddi_strdup(path, KM_SLEEP); 873 874 /* 875 * Now that we are demoted and marked for rebind, repromote. 876 * We need to do this in steps, instead of just calling 877 * ddi_initchild, so that we can redo the merge operation 878 * after we are rebound to the path-bound driver. 879 * 880 * Start by rebinding node to the path-bound driver. 881 */ 882 if ((error = ndi_devi_bind_driver(dip, 0)) != DDI_SUCCESS) { 883 cmn_err(CE_WARN, "init_node: rebind " 884 "of node %s failed", path); 885 goto out; 886 } 887 888 /* 889 * If the node is not a driver.conf node then merge 890 * driver.conf properties from new path-bound driver.conf. 891 */ 892 if (ndi_dev_is_persistent_node(dip)) 893 (void) i_ndi_make_spec_children(pdip, 0); 894 895 /* 896 * Now that we have taken care of merge, repromote back 897 * to DS_INITIALIZED. 898 */ 899 error = ddi_initchild(pdip, dip); 900 NDI_CONFIG_DEBUG((CE_CONT, "init_node: rebind " 901 "%s 0x%p\n", path, (void *)dip)); 902 goto out; 903 } 904 905 /* 906 * Apply multi-parent/deep-nexus optimization to the new node 907 */ 908 DEVI(dip)->devi_instance = e_ddi_assign_instance(dip); 909 ddi_optimize_dtree(dip); 910 error = DDI_SUCCESS; 911 912 out: if (error != DDI_SUCCESS) { 913 /* On failure ensure that DEVI_REBIND is cleared */ 914 mutex_enter(&DEVI(dip)->devi_lock); 915 DEVI(dip)->devi_flags &= ~DEVI_REBIND; 916 mutex_exit(&DEVI(dip)->devi_lock); 917 } 918 kmem_free(path, MAXPATHLEN); 919 return (error); 920 } 921 922 /* 923 * Uninitialize node 924 * The per-driver list must be held busy during the call. 925 * A successful uninit_node() releases the init_node() hold on 926 * the parent by calling ndi_rele_devi(). 927 */ 928 static int 929 uninit_node(dev_info_t *dip) 930 { 931 int node_state_entry; 932 dev_info_t *pdip; 933 struct dev_ops *ops; 934 int (*f)(); 935 int error; 936 char *addr; 937 938 /* 939 * Don't check for references here or else a ref-counted 940 * dip cannot be downgraded by the framework. 941 */ 942 node_state_entry = i_ddi_node_state(dip); 943 ASSERT((node_state_entry == DS_BOUND) || 944 (node_state_entry == DS_INITIALIZED)); 945 pdip = ddi_get_parent(dip); 946 ASSERT(pdip); 947 948 NDI_CONFIG_DEBUG((CE_CONT, "uninit_node: 0x%p(%s%d)\n", 949 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 950 951 if (((ops = ddi_get_driver(pdip)) == NULL) || 952 (ops->devo_bus_ops == NULL) || 953 ((f = ops->devo_bus_ops->bus_ctl) == NULL)) { 954 return (DDI_FAILURE); 955 } 956 957 /* 958 * save the @addr prior to DDI_CTLOPS_UNINITCHILD for use in 959 * freeing the instance if it succeeds. 960 */ 961 if (node_state_entry == DS_INITIALIZED) { 962 addr = ddi_get_name_addr(dip); 963 if (addr) 964 addr = i_ddi_strdup(addr, KM_SLEEP); 965 } else { 966 addr = NULL; 967 } 968 969 error = (*f)(pdip, pdip, DDI_CTLOPS_UNINITCHILD, dip, (void *)NULL); 970 if (error == DDI_SUCCESS) { 971 /* if uninitchild forgot to set devi_addr to NULL do it now */ 972 ddi_set_name_addr(dip, NULL); 973 974 /* 975 * Free instance number. This is a no-op if instance has 976 * been kept by probe_node(). Avoid free when we are called 977 * from init_node (DS_BOUND) because the instance has not yet 978 * been assigned. 979 */ 980 if (node_state_entry == DS_INITIALIZED) { 981 e_ddi_free_instance(dip, addr); 982 DEVI(dip)->devi_instance = -1; 983 } 984 985 /* release the init_node hold */ 986 ndi_rele_devi(pdip); 987 988 remove_global_props(dip); 989 990 /* 991 * NOTE: The decision on whether to allow a path-oriented 992 * rebind of a driver.conf enumerated node is made by 993 * init_node() based on driver_conf_allow_path_alias. The 994 * rebind code below prevents deletion of system properties 995 * on driver.conf nodes. 996 * 997 * When driver_conf_allow_path_alias is set, property behavior 998 * on rebound driver.conf file is non-intuitive. For a 999 * driver.conf node, the unit-address properties come from 1000 * the driver.conf file as system properties. Removing system 1001 * properties from a driver.conf node makes the node 1002 * useless (we get node without unit-address properties) - so 1003 * we leave system properties in place. The result is a node 1004 * where system properties come from the node being rebound, 1005 * and global properties come from the driver.conf file 1006 * of the driver we are rebinding to. If we could determine 1007 * that the path-oriented alias driver.conf file defined a 1008 * node at the same unit address, it would be best to use 1009 * that node and avoid the non-intuitive property behavior. 1010 * Unfortunately, the current "merge" code does not support 1011 * this, so we live with the non-intuitive property behavior. 1012 */ 1013 if (!((ndi_dev_is_persistent_node(dip) == 0) && 1014 (DEVI(dip)->devi_flags & DEVI_REBIND))) 1015 e_ddi_prop_remove_all(dip); 1016 } else { 1017 NDI_CONFIG_DEBUG((CE_CONT, "uninit_node failed: 0x%p(%s%d)\n", 1018 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1019 } 1020 1021 if (addr) 1022 kmem_free(addr, strlen(addr) + 1); 1023 return (error); 1024 } 1025 1026 /* 1027 * Invoke driver's probe entry point to probe for existence of hardware. 1028 * Keep instance permanent for successful probe and leaf nodes. 1029 * 1030 * Per-driver list must be held busy while calling this function. 1031 */ 1032 static int 1033 probe_node(dev_info_t *dip) 1034 { 1035 int rv; 1036 1037 ASSERT(i_ddi_node_state(dip) == DS_INITIALIZED); 1038 1039 NDI_CONFIG_DEBUG((CE_CONT, "probe_node: 0x%p(%s%d)\n", 1040 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1041 1042 /* temporarily hold the driver while we probe */ 1043 DEVI(dip)->devi_ops = ndi_hold_driver(dip); 1044 if (DEVI(dip)->devi_ops == NULL) { 1045 NDI_CONFIG_DEBUG((CE_CONT, 1046 "probe_node: 0x%p(%s%d) cannot load driver\n", 1047 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1048 return (DDI_FAILURE); 1049 } 1050 1051 if (identify_9e != 0) 1052 (void) devi_identify(dip); 1053 1054 rv = devi_probe(dip); 1055 1056 /* release the driver now that probe is complete */ 1057 ndi_rele_driver(dip); 1058 DEVI(dip)->devi_ops = NULL; 1059 1060 switch (rv) { 1061 case DDI_PROBE_SUCCESS: /* found */ 1062 case DDI_PROBE_DONTCARE: /* ddi_dev_is_sid */ 1063 e_ddi_keep_instance(dip); /* persist instance */ 1064 rv = DDI_SUCCESS; 1065 break; 1066 1067 case DDI_PROBE_PARTIAL: /* maybe later */ 1068 case DDI_PROBE_FAILURE: /* not found */ 1069 NDI_CONFIG_DEBUG((CE_CONT, 1070 "probe_node: 0x%p(%s%d) no hardware found%s\n", 1071 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip), 1072 (rv == DDI_PROBE_PARTIAL) ? " yet" : "")); 1073 rv = DDI_FAILURE; 1074 break; 1075 1076 default: 1077 #ifdef DEBUG 1078 cmn_err(CE_WARN, "probe_node: %s%d: illegal probe(9E) value", 1079 ddi_driver_name(dip), ddi_get_instance(dip)); 1080 #endif /* DEBUG */ 1081 rv = DDI_FAILURE; 1082 break; 1083 } 1084 return (rv); 1085 } 1086 1087 /* 1088 * Unprobe a node. Simply reset the node state. 1089 * Per-driver list must be held busy while calling this function. 1090 */ 1091 static int 1092 unprobe_node(dev_info_t *dip) 1093 { 1094 ASSERT(i_ddi_node_state(dip) == DS_PROBED); 1095 1096 /* 1097 * Don't check for references here or else a ref-counted 1098 * dip cannot be downgraded by the framework. 1099 */ 1100 1101 NDI_CONFIG_DEBUG((CE_CONT, "unprobe_node: 0x%p(name = %s)\n", 1102 (void *)dip, ddi_node_name(dip))); 1103 return (DDI_SUCCESS); 1104 } 1105 1106 /* 1107 * Attach devinfo node. 1108 * Per-driver list must be held busy. 1109 */ 1110 static int 1111 attach_node(dev_info_t *dip) 1112 { 1113 int rv; 1114 1115 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1116 ASSERT(i_ddi_node_state(dip) == DS_PROBED); 1117 1118 NDI_CONFIG_DEBUG((CE_CONT, "attach_node: 0x%p(%s%d)\n", 1119 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1120 1121 /* 1122 * Tell mpxio framework that a node is about to online. 1123 */ 1124 if ((rv = mdi_devi_online(dip, 0)) != NDI_SUCCESS) { 1125 return (DDI_FAILURE); 1126 } 1127 1128 /* no recursive attachment */ 1129 ASSERT(DEVI(dip)->devi_ops == NULL); 1130 1131 /* 1132 * Hold driver the node is bound to. 1133 */ 1134 DEVI(dip)->devi_ops = ndi_hold_driver(dip); 1135 if (DEVI(dip)->devi_ops == NULL) { 1136 /* 1137 * We were able to load driver for probing, so we should 1138 * not get here unless something really bad happened. 1139 */ 1140 cmn_err(CE_WARN, "attach_node: no driver for major %d", 1141 DEVI(dip)->devi_major); 1142 return (DDI_FAILURE); 1143 } 1144 1145 if (NEXUS_DRV(DEVI(dip)->devi_ops)) 1146 DEVI(dip)->devi_taskq = ddi_taskq_create(dip, 1147 "nexus_enum_tq", 1, 1148 TASKQ_DEFAULTPRI, 0); 1149 1150 mutex_enter(&(DEVI(dip)->devi_lock)); 1151 DEVI_SET_ATTACHING(dip); 1152 DEVI_SET_NEED_RESET(dip); 1153 mutex_exit(&(DEVI(dip)->devi_lock)); 1154 1155 rv = devi_attach(dip, DDI_ATTACH); 1156 1157 mutex_enter(&(DEVI(dip)->devi_lock)); 1158 DEVI_CLR_ATTACHING(dip); 1159 1160 if (rv != DDI_SUCCESS) { 1161 DEVI_CLR_NEED_RESET(dip); 1162 1163 /* ensure that devids are unregistered */ 1164 if (DEVI(dip)->devi_flags & DEVI_REGISTERED_DEVID) { 1165 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 1166 mutex_exit(&DEVI(dip)->devi_lock); 1167 1168 e_devid_cache_unregister(dip); 1169 } else 1170 mutex_exit(&DEVI(dip)->devi_lock); 1171 1172 /* 1173 * Cleanup dacf reservations 1174 */ 1175 mutex_enter(&dacf_lock); 1176 dacf_clr_rsrvs(dip, DACF_OPID_POSTATTACH); 1177 dacf_clr_rsrvs(dip, DACF_OPID_PREDETACH); 1178 mutex_exit(&dacf_lock); 1179 if (DEVI(dip)->devi_taskq) 1180 ddi_taskq_destroy(DEVI(dip)->devi_taskq); 1181 ddi_remove_minor_node(dip, NULL); 1182 1183 /* release the driver if attach failed */ 1184 ndi_rele_driver(dip); 1185 DEVI(dip)->devi_ops = NULL; 1186 NDI_CONFIG_DEBUG((CE_CONT, "attach_node: 0x%p(%s%d) failed\n", 1187 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1188 return (DDI_FAILURE); 1189 } else 1190 mutex_exit(&DEVI(dip)->devi_lock); 1191 1192 /* successful attach, return with driver held */ 1193 1194 return (DDI_SUCCESS); 1195 } 1196 1197 /* 1198 * Detach devinfo node. 1199 * Per-driver list must be held busy. 1200 */ 1201 static int 1202 detach_node(dev_info_t *dip, uint_t flag) 1203 { 1204 struct devnames *dnp; 1205 int rv; 1206 1207 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1208 ASSERT(i_ddi_node_state(dip) == DS_ATTACHED); 1209 1210 /* check references */ 1211 if (DEVI(dip)->devi_ref) 1212 return (DDI_FAILURE); 1213 1214 NDI_CONFIG_DEBUG((CE_CONT, "detach_node: 0x%p(%s%d)\n", 1215 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1216 1217 /* 1218 * NOTE: If we are processing a pHCI node then the calling code 1219 * must detect this and ndi_devi_enter() in (vHCI, parent(pHCI)) 1220 * order unless pHCI and vHCI are siblings. Code paths leading 1221 * here that must ensure this ordering include: 1222 * unconfig_immediate_children(), devi_unconfig_one(), 1223 * ndi_devi_unconfig_one(), ndi_devi_offline(). 1224 */ 1225 ASSERT(!MDI_PHCI(dip) || 1226 (ddi_get_parent(mdi_devi_get_vdip(dip)) == ddi_get_parent(dip)) || 1227 DEVI_BUSY_OWNED(mdi_devi_get_vdip(dip))); 1228 1229 /* Offline the device node with the mpxio framework. */ 1230 if (mdi_devi_offline(dip, flag) != NDI_SUCCESS) { 1231 return (DDI_FAILURE); 1232 } 1233 1234 /* drain the taskq */ 1235 if (DEVI(dip)->devi_taskq) 1236 ddi_taskq_wait(DEVI(dip)->devi_taskq); 1237 1238 rv = devi_detach(dip, DDI_DETACH); 1239 1240 if (rv != DDI_SUCCESS) { 1241 NDI_CONFIG_DEBUG((CE_CONT, 1242 "detach_node: 0x%p(%s%d) failed\n", 1243 (void *)dip, ddi_driver_name(dip), ddi_get_instance(dip))); 1244 return (DDI_FAILURE); 1245 } 1246 1247 mutex_enter(&(DEVI(dip)->devi_lock)); 1248 DEVI_CLR_NEED_RESET(dip); 1249 mutex_exit(&(DEVI(dip)->devi_lock)); 1250 1251 /* destroy the taskq */ 1252 if (DEVI(dip)->devi_taskq) { 1253 ddi_taskq_destroy(DEVI(dip)->devi_taskq); 1254 DEVI(dip)->devi_taskq = NULL; 1255 } 1256 1257 /* Cleanup dacf reservations */ 1258 mutex_enter(&dacf_lock); 1259 dacf_clr_rsrvs(dip, DACF_OPID_POSTATTACH); 1260 dacf_clr_rsrvs(dip, DACF_OPID_PREDETACH); 1261 mutex_exit(&dacf_lock); 1262 1263 /* Remove properties and minor nodes in case driver forgots */ 1264 ddi_remove_minor_node(dip, NULL); 1265 ddi_prop_remove_all(dip); 1266 1267 /* a detached node can't have attached or .conf children */ 1268 mutex_enter(&DEVI(dip)->devi_lock); 1269 DEVI(dip)->devi_flags &= ~(DEVI_MADE_CHILDREN|DEVI_ATTACHED_CHILDREN); 1270 1271 /* ensure that devids registered during attach are unregistered */ 1272 if (DEVI(dip)->devi_flags & DEVI_REGISTERED_DEVID) { 1273 DEVI(dip)->devi_flags &= ~DEVI_REGISTERED_DEVID; 1274 mutex_exit(&DEVI(dip)->devi_lock); 1275 1276 e_devid_cache_unregister(dip); 1277 } else 1278 mutex_exit(&DEVI(dip)->devi_lock); 1279 1280 /* 1281 * If the instance has successfully detached in detach_driver() context, 1282 * clear DN_DRIVER_HELD for correct ddi_hold_installed_driver() 1283 * behavior. Consumers like qassociate() depend on this (via clnopen()). 1284 */ 1285 if (flag & NDI_DETACH_DRIVER) { 1286 dnp = &(devnamesp[DEVI(dip)->devi_major]); 1287 LOCK_DEV_OPS(&dnp->dn_lock); 1288 dnp->dn_flags &= ~DN_DRIVER_HELD; 1289 UNLOCK_DEV_OPS(&dnp->dn_lock); 1290 } 1291 1292 /* successful detach, release the driver */ 1293 ndi_rele_driver(dip); 1294 DEVI(dip)->devi_ops = NULL; 1295 return (DDI_SUCCESS); 1296 } 1297 1298 /* 1299 * Run dacf post_attach routines 1300 */ 1301 static int 1302 postattach_node(dev_info_t *dip) 1303 { 1304 int rval; 1305 1306 /* 1307 * For hotplug busses like USB, it's possible that devices 1308 * are removed but dip is still around. We don't want to 1309 * run dacf routines as part of detach failure recovery. 1310 * 1311 * Pretend success until we figure out how to prevent 1312 * access to such devinfo nodes. 1313 */ 1314 if (DEVI_IS_DEVICE_REMOVED(dip)) 1315 return (DDI_SUCCESS); 1316 1317 /* 1318 * if dacf_postattach failed, report it to the framework 1319 * so that it can be retried later at the open time. 1320 */ 1321 mutex_enter(&dacf_lock); 1322 rval = dacfc_postattach(dip); 1323 mutex_exit(&dacf_lock); 1324 1325 /* 1326 * Plumbing during postattach may fail because of the 1327 * underlying device is not ready. This will fail ndi_devi_config() 1328 * in dv_filldir() and a warning message is issued. The message 1329 * from here will explain what happened 1330 */ 1331 if (rval != DACF_SUCCESS) { 1332 cmn_err(CE_WARN, "Postattach failed for %s%d\n", 1333 ddi_driver_name(dip), ddi_get_instance(dip)); 1334 return (DDI_FAILURE); 1335 } 1336 1337 return (DDI_SUCCESS); 1338 } 1339 1340 /* 1341 * Run dacf pre-detach routines 1342 */ 1343 static int 1344 predetach_node(dev_info_t *dip, uint_t flag) 1345 { 1346 int ret; 1347 1348 /* 1349 * Don't auto-detach if DDI_FORCEATTACH or DDI_NO_AUTODETACH 1350 * properties are set. 1351 */ 1352 if (flag & NDI_AUTODETACH) { 1353 struct devnames *dnp; 1354 int pflag = DDI_PROP_NOTPROM | DDI_PROP_DONTPASS; 1355 1356 if ((ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1357 pflag, DDI_FORCEATTACH, 0) == 1) || 1358 (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1359 pflag, DDI_NO_AUTODETACH, 0) == 1)) 1360 return (DDI_FAILURE); 1361 1362 /* check for driver global version of DDI_NO_AUTODETACH */ 1363 dnp = &devnamesp[DEVI(dip)->devi_major]; 1364 LOCK_DEV_OPS(&dnp->dn_lock); 1365 if (dnp->dn_flags & DN_NO_AUTODETACH) { 1366 UNLOCK_DEV_OPS(&dnp->dn_lock); 1367 return (DDI_FAILURE); 1368 } 1369 UNLOCK_DEV_OPS(&dnp->dn_lock); 1370 } 1371 1372 mutex_enter(&dacf_lock); 1373 ret = dacfc_predetach(dip); 1374 mutex_exit(&dacf_lock); 1375 1376 return (ret); 1377 } 1378 1379 /* 1380 * Wrapper for making multiple state transitions 1381 */ 1382 1383 /* 1384 * i_ndi_config_node: upgrade dev_info node into a specified state. 1385 * It is a bit tricky because the locking protocol changes before and 1386 * after a node is bound to a driver. All locks are held external to 1387 * this function. 1388 */ 1389 int 1390 i_ndi_config_node(dev_info_t *dip, ddi_node_state_t state, uint_t flag) 1391 { 1392 _NOTE(ARGUNUSED(flag)) 1393 int rv = DDI_SUCCESS; 1394 1395 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1396 1397 while ((i_ddi_node_state(dip) < state) && (rv == DDI_SUCCESS)) { 1398 1399 /* don't allow any more changes to the device tree */ 1400 if (devinfo_freeze) { 1401 rv = DDI_FAILURE; 1402 break; 1403 } 1404 1405 switch (i_ddi_node_state(dip)) { 1406 case DS_PROTO: 1407 /* 1408 * only caller can reference this node, no external 1409 * locking needed. 1410 */ 1411 link_node(dip); 1412 i_ddi_set_node_state(dip, DS_LINKED); 1413 break; 1414 case DS_LINKED: 1415 /* 1416 * Three code path may attempt to bind a node: 1417 * - boot code 1418 * - add_drv 1419 * - hotplug thread 1420 * Boot code is single threaded, add_drv synchronize 1421 * on a userland lock, and hotplug synchronize on 1422 * hotplug_lk. There could be a race between add_drv 1423 * and hotplug thread. We'll live with this until the 1424 * conversion to top-down loading. 1425 */ 1426 if ((rv = bind_node(dip)) == DDI_SUCCESS) 1427 i_ddi_set_node_state(dip, DS_BOUND); 1428 1429 break; 1430 case DS_BOUND: 1431 /* 1432 * The following transitions synchronizes on the 1433 * per-driver busy changing flag, since we already 1434 * have a driver. 1435 */ 1436 if ((rv = init_node(dip)) == DDI_SUCCESS) 1437 i_ddi_set_node_state(dip, DS_INITIALIZED); 1438 break; 1439 case DS_INITIALIZED: 1440 if ((rv = probe_node(dip)) == DDI_SUCCESS) 1441 i_ddi_set_node_state(dip, DS_PROBED); 1442 break; 1443 case DS_PROBED: 1444 atomic_add_long(&devinfo_attach_detach, 1); 1445 if ((rv = attach_node(dip)) == DDI_SUCCESS) 1446 i_ddi_set_node_state(dip, DS_ATTACHED); 1447 atomic_add_long(&devinfo_attach_detach, -1); 1448 break; 1449 case DS_ATTACHED: 1450 if ((rv = postattach_node(dip)) == DDI_SUCCESS) 1451 i_ddi_set_node_state(dip, DS_READY); 1452 break; 1453 case DS_READY: 1454 break; 1455 default: 1456 /* should never reach here */ 1457 ASSERT("unknown devinfo state"); 1458 } 1459 } 1460 1461 if (ddidebug & DDI_AUDIT) 1462 da_log_enter(dip); 1463 return (rv); 1464 } 1465 1466 /* 1467 * i_ndi_unconfig_node: downgrade dev_info node into a specified state. 1468 */ 1469 int 1470 i_ndi_unconfig_node(dev_info_t *dip, ddi_node_state_t state, uint_t flag) 1471 { 1472 int rv = DDI_SUCCESS; 1473 1474 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1475 1476 while ((i_ddi_node_state(dip) > state) && (rv == DDI_SUCCESS)) { 1477 1478 /* don't allow any more changes to the device tree */ 1479 if (devinfo_freeze) { 1480 rv = DDI_FAILURE; 1481 break; 1482 } 1483 1484 switch (i_ddi_node_state(dip)) { 1485 case DS_PROTO: 1486 break; 1487 case DS_LINKED: 1488 /* 1489 * Persistent nodes are only removed by hotplug code 1490 * .conf nodes synchronizes on per-driver list. 1491 */ 1492 if ((rv = unlink_node(dip)) == DDI_SUCCESS) 1493 i_ddi_set_node_state(dip, DS_PROTO); 1494 break; 1495 case DS_BOUND: 1496 /* 1497 * The following transitions synchronizes on the 1498 * per-driver busy changing flag, since we already 1499 * have a driver. 1500 */ 1501 if ((rv = unbind_node(dip)) == DDI_SUCCESS) 1502 i_ddi_set_node_state(dip, DS_LINKED); 1503 break; 1504 case DS_INITIALIZED: 1505 if ((rv = uninit_node(dip)) == DDI_SUCCESS) 1506 i_ddi_set_node_state(dip, DS_BOUND); 1507 break; 1508 case DS_PROBED: 1509 if ((rv = unprobe_node(dip)) == DDI_SUCCESS) 1510 i_ddi_set_node_state(dip, DS_INITIALIZED); 1511 break; 1512 case DS_ATTACHED: 1513 atomic_add_long(&devinfo_attach_detach, 1); 1514 1515 mutex_enter(&(DEVI(dip)->devi_lock)); 1516 DEVI_SET_DETACHING(dip); 1517 mutex_exit(&(DEVI(dip)->devi_lock)); 1518 1519 membar_enter(); /* ensure visibility for hold_devi */ 1520 1521 if ((rv = detach_node(dip, flag)) == DDI_SUCCESS) 1522 i_ddi_set_node_state(dip, DS_PROBED); 1523 1524 mutex_enter(&(DEVI(dip)->devi_lock)); 1525 DEVI_CLR_DETACHING(dip); 1526 mutex_exit(&(DEVI(dip)->devi_lock)); 1527 1528 atomic_add_long(&devinfo_attach_detach, -1); 1529 break; 1530 case DS_READY: 1531 if ((rv = predetach_node(dip, flag)) == DDI_SUCCESS) 1532 i_ddi_set_node_state(dip, DS_ATTACHED); 1533 break; 1534 default: 1535 ASSERT("unknown devinfo state"); 1536 } 1537 } 1538 da_log_enter(dip); 1539 return (rv); 1540 } 1541 1542 /* 1543 * ddi_initchild: transform node to DS_INITIALIZED state 1544 */ 1545 int 1546 ddi_initchild(dev_info_t *parent, dev_info_t *proto) 1547 { 1548 int ret, circ; 1549 1550 ndi_devi_enter(parent, &circ); 1551 ret = i_ndi_config_node(proto, DS_INITIALIZED, 0); 1552 ndi_devi_exit(parent, circ); 1553 1554 return (ret); 1555 } 1556 1557 /* 1558 * ddi_uninitchild: transform node down to DS_BOUND state 1559 */ 1560 int 1561 ddi_uninitchild(dev_info_t *dip) 1562 { 1563 int ret, circ; 1564 dev_info_t *parent = ddi_get_parent(dip); 1565 ASSERT(parent); 1566 1567 ndi_devi_enter(parent, &circ); 1568 ret = i_ndi_unconfig_node(dip, DS_BOUND, 0); 1569 ndi_devi_exit(parent, circ); 1570 1571 return (ret); 1572 } 1573 1574 /* 1575 * i_ddi_attachchild: transform node to DS_READY/i_ddi_devi_attached() state 1576 */ 1577 static int 1578 i_ddi_attachchild(dev_info_t *dip) 1579 { 1580 dev_info_t *parent = ddi_get_parent(dip); 1581 int ret; 1582 1583 ASSERT(parent && DEVI_BUSY_OWNED(parent)); 1584 1585 if ((i_ddi_node_state(dip) < DS_BOUND) || DEVI_IS_DEVICE_OFFLINE(dip)) 1586 return (DDI_FAILURE); 1587 1588 ret = i_ndi_config_node(dip, DS_READY, 0); 1589 if (ret == NDI_SUCCESS) { 1590 ret = DDI_SUCCESS; 1591 } else { 1592 /* 1593 * Take it down to DS_INITIALIZED so pm_pre_probe is run 1594 * on the next attach 1595 */ 1596 (void) i_ndi_unconfig_node(dip, DS_INITIALIZED, 0); 1597 ret = DDI_FAILURE; 1598 } 1599 1600 return (ret); 1601 } 1602 1603 /* 1604 * i_ddi_detachchild: transform node down to DS_PROBED state 1605 * If it fails, put it back to DS_READY state. 1606 * NOTE: A node that fails detach may be at DS_ATTACHED instead 1607 * of DS_READY for a small amount of time - this is the source of 1608 * transient DS_READY->DS_ATTACHED->DS_READY state changes. 1609 */ 1610 static int 1611 i_ddi_detachchild(dev_info_t *dip, uint_t flags) 1612 { 1613 dev_info_t *parent = ddi_get_parent(dip); 1614 int ret; 1615 1616 ASSERT(parent && DEVI_BUSY_OWNED(parent)); 1617 1618 ret = i_ndi_unconfig_node(dip, DS_PROBED, flags); 1619 if (ret != DDI_SUCCESS) 1620 (void) i_ndi_config_node(dip, DS_READY, 0); 1621 else 1622 /* allow pm_pre_probe to reestablish pm state */ 1623 (void) i_ndi_unconfig_node(dip, DS_INITIALIZED, 0); 1624 return (ret); 1625 } 1626 1627 /* 1628 * Add a child and bind to driver 1629 */ 1630 dev_info_t * 1631 ddi_add_child(dev_info_t *pdip, char *name, uint_t nodeid, uint_t unit) 1632 { 1633 int circ; 1634 dev_info_t *dip; 1635 1636 /* allocate a new node */ 1637 dip = i_ddi_alloc_node(pdip, name, nodeid, (int)unit, NULL, KM_SLEEP); 1638 1639 ndi_devi_enter(pdip, &circ); 1640 (void) i_ndi_config_node(dip, DS_BOUND, 0); 1641 ndi_devi_exit(pdip, circ); 1642 return (dip); 1643 } 1644 1645 /* 1646 * ddi_remove_child: remove the dip. The parent must be attached and held 1647 */ 1648 int 1649 ddi_remove_child(dev_info_t *dip, int dummy) 1650 { 1651 _NOTE(ARGUNUSED(dummy)) 1652 int circ, ret; 1653 dev_info_t *parent = ddi_get_parent(dip); 1654 ASSERT(parent); 1655 1656 ndi_devi_enter(parent, &circ); 1657 1658 /* 1659 * If we still have children, for example SID nodes marked 1660 * as persistent but not attached, attempt to remove them. 1661 */ 1662 if (DEVI(dip)->devi_child) { 1663 ret = ndi_devi_unconfig(dip, NDI_DEVI_REMOVE); 1664 if (ret != NDI_SUCCESS) { 1665 ndi_devi_exit(parent, circ); 1666 return (DDI_FAILURE); 1667 } 1668 ASSERT(DEVI(dip)->devi_child == NULL); 1669 } 1670 1671 ret = i_ndi_unconfig_node(dip, DS_PROTO, 0); 1672 ndi_devi_exit(parent, circ); 1673 1674 if (ret != DDI_SUCCESS) 1675 return (ret); 1676 1677 ASSERT(i_ddi_node_state(dip) == DS_PROTO); 1678 i_ddi_free_node(dip); 1679 return (DDI_SUCCESS); 1680 } 1681 1682 /* 1683 * NDI wrappers for ref counting, node allocation, and transitions 1684 */ 1685 1686 /* 1687 * Hold/release the devinfo node itself. 1688 * Caller is assumed to prevent the devi from detaching during this call 1689 */ 1690 void 1691 ndi_hold_devi(dev_info_t *dip) 1692 { 1693 mutex_enter(&DEVI(dip)->devi_lock); 1694 ASSERT(DEVI(dip)->devi_ref >= 0); 1695 DEVI(dip)->devi_ref++; 1696 membar_enter(); /* make sure stores are flushed */ 1697 mutex_exit(&DEVI(dip)->devi_lock); 1698 } 1699 1700 void 1701 ndi_rele_devi(dev_info_t *dip) 1702 { 1703 ASSERT(DEVI(dip)->devi_ref > 0); 1704 1705 mutex_enter(&DEVI(dip)->devi_lock); 1706 DEVI(dip)->devi_ref--; 1707 membar_enter(); /* make sure stores are flushed */ 1708 mutex_exit(&DEVI(dip)->devi_lock); 1709 } 1710 1711 int 1712 e_ddi_devi_holdcnt(dev_info_t *dip) 1713 { 1714 return (DEVI(dip)->devi_ref); 1715 } 1716 1717 /* 1718 * Hold/release the driver the devinfo node is bound to. 1719 */ 1720 struct dev_ops * 1721 ndi_hold_driver(dev_info_t *dip) 1722 { 1723 if (i_ddi_node_state(dip) < DS_BOUND) 1724 return (NULL); 1725 1726 ASSERT(DEVI(dip)->devi_major != -1); 1727 return (mod_hold_dev_by_major(DEVI(dip)->devi_major)); 1728 } 1729 1730 void 1731 ndi_rele_driver(dev_info_t *dip) 1732 { 1733 ASSERT(i_ddi_node_state(dip) >= DS_BOUND); 1734 mod_rele_dev_by_major(DEVI(dip)->devi_major); 1735 } 1736 1737 /* 1738 * Single thread entry into devinfo node for modifying its children. 1739 * To verify in ASSERTS use DEVI_BUSY_OWNED macro. 1740 */ 1741 void 1742 ndi_devi_enter(dev_info_t *dip, int *circular) 1743 { 1744 struct dev_info *devi = DEVI(dip); 1745 ASSERT(dip != NULL); 1746 1747 /* for vHCI, enforce (vHCI, pHCI) ndi_deve_enter() order */ 1748 ASSERT(!MDI_VHCI(dip) || (mdi_devi_pdip_entered(dip) == 0) || 1749 DEVI_BUSY_OWNED(dip)); 1750 1751 mutex_enter(&devi->devi_lock); 1752 if (devi->devi_busy_thread == curthread) { 1753 devi->devi_circular++; 1754 } else { 1755 while (DEVI_BUSY_CHANGING(devi) && !panicstr) 1756 cv_wait(&(devi->devi_cv), &(devi->devi_lock)); 1757 if (panicstr) { 1758 mutex_exit(&devi->devi_lock); 1759 return; 1760 } 1761 devi->devi_flags |= DEVI_BUSY; 1762 devi->devi_busy_thread = curthread; 1763 } 1764 *circular = devi->devi_circular; 1765 mutex_exit(&devi->devi_lock); 1766 } 1767 1768 /* 1769 * Release ndi_devi_enter or successful ndi_devi_tryenter. 1770 */ 1771 void 1772 ndi_devi_exit(dev_info_t *dip, int circular) 1773 { 1774 struct dev_info *devi = DEVI(dip); 1775 struct dev_info *vdevi; 1776 ASSERT(dip != NULL); 1777 1778 if (panicstr) 1779 return; 1780 1781 mutex_enter(&(devi->devi_lock)); 1782 if (circular != 0) { 1783 devi->devi_circular--; 1784 } else { 1785 devi->devi_flags &= ~DEVI_BUSY; 1786 ASSERT(devi->devi_busy_thread == curthread); 1787 devi->devi_busy_thread = NULL; 1788 cv_broadcast(&(devi->devi_cv)); 1789 } 1790 mutex_exit(&(devi->devi_lock)); 1791 1792 /* 1793 * For pHCI exit we issue a broadcast to vHCI for ndi_devi_config_one() 1794 * doing cv_wait on vHCI. 1795 */ 1796 if (MDI_PHCI(dip)) { 1797 vdevi = DEVI(mdi_devi_get_vdip(dip)); 1798 if (vdevi) { 1799 mutex_enter(&(vdevi->devi_lock)); 1800 if (vdevi->devi_flags & DEVI_PHCI_SIGNALS_VHCI) { 1801 vdevi->devi_flags &= ~DEVI_PHCI_SIGNALS_VHCI; 1802 cv_broadcast(&(vdevi->devi_cv)); 1803 } 1804 mutex_exit(&(vdevi->devi_lock)); 1805 } 1806 } 1807 } 1808 1809 /* 1810 * Release ndi_devi_enter and wait for possibility of new children, avoiding 1811 * possibility of missing broadcast before getting to cv_timedwait(). 1812 */ 1813 static void 1814 ndi_devi_exit_and_wait(dev_info_t *dip, int circular, clock_t end_time) 1815 { 1816 struct dev_info *devi = DEVI(dip); 1817 ASSERT(dip != NULL); 1818 1819 if (panicstr) 1820 return; 1821 1822 /* 1823 * We are called to wait for of a new child, and new child can 1824 * only be added if circular is zero. 1825 */ 1826 ASSERT(circular == 0); 1827 1828 /* like ndi_devi_exit with circular of zero */ 1829 mutex_enter(&(devi->devi_lock)); 1830 devi->devi_flags &= ~DEVI_BUSY; 1831 ASSERT(devi->devi_busy_thread == curthread); 1832 devi->devi_busy_thread = NULL; 1833 cv_broadcast(&(devi->devi_cv)); 1834 1835 /* now wait for new children while still holding devi_lock */ 1836 (void) cv_timedwait(&devi->devi_cv, &(devi->devi_lock), end_time); 1837 mutex_exit(&(devi->devi_lock)); 1838 } 1839 1840 /* 1841 * Attempt to single thread entry into devinfo node for modifying its children. 1842 */ 1843 int 1844 ndi_devi_tryenter(dev_info_t *dip, int *circular) 1845 { 1846 int rval = 1; /* assume we enter */ 1847 struct dev_info *devi = DEVI(dip); 1848 ASSERT(dip != NULL); 1849 1850 mutex_enter(&devi->devi_lock); 1851 if (devi->devi_busy_thread == (void *)curthread) { 1852 devi->devi_circular++; 1853 } else { 1854 if (!DEVI_BUSY_CHANGING(devi)) { 1855 devi->devi_flags |= DEVI_BUSY; 1856 devi->devi_busy_thread = (void *)curthread; 1857 } else { 1858 rval = 0; /* devi is busy */ 1859 } 1860 } 1861 *circular = devi->devi_circular; 1862 mutex_exit(&devi->devi_lock); 1863 return (rval); 1864 } 1865 1866 /* 1867 * Allocate and initialize a new dev_info structure. 1868 * 1869 * This routine may be called at interrupt time by a nexus in 1870 * response to a hotplug event, therefore memory allocations are 1871 * not allowed to sleep. 1872 */ 1873 int 1874 ndi_devi_alloc(dev_info_t *parent, char *node_name, pnode_t nodeid, 1875 dev_info_t **ret_dip) 1876 { 1877 ASSERT(node_name != NULL); 1878 ASSERT(ret_dip != NULL); 1879 1880 *ret_dip = i_ddi_alloc_node(parent, node_name, nodeid, -1, NULL, 1881 KM_NOSLEEP); 1882 if (*ret_dip == NULL) { 1883 return (NDI_NOMEM); 1884 } 1885 1886 return (NDI_SUCCESS); 1887 } 1888 1889 /* 1890 * Allocate and initialize a new dev_info structure 1891 * This routine may sleep and should not be called at interrupt time 1892 */ 1893 void 1894 ndi_devi_alloc_sleep(dev_info_t *parent, char *node_name, pnode_t nodeid, 1895 dev_info_t **ret_dip) 1896 { 1897 ASSERT(node_name != NULL); 1898 ASSERT(ret_dip != NULL); 1899 1900 *ret_dip = i_ddi_alloc_node(parent, node_name, nodeid, -1, NULL, 1901 KM_SLEEP); 1902 ASSERT(*ret_dip); 1903 } 1904 1905 /* 1906 * Remove an initialized (but not yet attached) dev_info 1907 * node from it's parent. 1908 */ 1909 int 1910 ndi_devi_free(dev_info_t *dip) 1911 { 1912 ASSERT(dip != NULL); 1913 1914 if (i_ddi_node_state(dip) >= DS_INITIALIZED) 1915 return (DDI_FAILURE); 1916 1917 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_free: %s%d (%p)\n", 1918 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip)); 1919 1920 (void) ddi_remove_child(dip, 0); 1921 1922 return (NDI_SUCCESS); 1923 } 1924 1925 /* 1926 * ndi_devi_bind_driver() binds a driver to a given device. If it fails 1927 * to bind the driver, it returns an appropriate error back. Some drivers 1928 * may want to know if the actually failed to bind. 1929 */ 1930 int 1931 ndi_devi_bind_driver(dev_info_t *dip, uint_t flags) 1932 { 1933 int ret = NDI_FAILURE; 1934 int circ; 1935 dev_info_t *pdip = ddi_get_parent(dip); 1936 ASSERT(pdip); 1937 1938 NDI_CONFIG_DEBUG((CE_CONT, 1939 "ndi_devi_bind_driver: %s%d (%p) flags: %x\n", 1940 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 1941 1942 ndi_devi_enter(pdip, &circ); 1943 if (i_ndi_config_node(dip, DS_BOUND, flags) == DDI_SUCCESS) 1944 ret = NDI_SUCCESS; 1945 ndi_devi_exit(pdip, circ); 1946 1947 return (ret); 1948 } 1949 1950 /* 1951 * ndi_devi_unbind_driver: unbind the dip 1952 */ 1953 static int 1954 ndi_devi_unbind_driver(dev_info_t *dip) 1955 { 1956 ASSERT(DEVI_BUSY_OWNED(ddi_get_parent(dip))); 1957 1958 return (i_ndi_unconfig_node(dip, DS_LINKED, 0)); 1959 } 1960 1961 /* 1962 * Misc. help routines called by framework only 1963 */ 1964 1965 /* 1966 * Get the state of node 1967 */ 1968 ddi_node_state_t 1969 i_ddi_node_state(dev_info_t *dip) 1970 { 1971 return (DEVI(dip)->devi_node_state); 1972 } 1973 1974 /* 1975 * Set the state of node 1976 */ 1977 void 1978 i_ddi_set_node_state(dev_info_t *dip, ddi_node_state_t state) 1979 { 1980 DEVI(dip)->devi_node_state = state; 1981 membar_enter(); /* make sure stores are flushed */ 1982 } 1983 1984 /* 1985 * Determine if node is attached. The implementation accommodates transient 1986 * DS_READY->DS_ATTACHED->DS_READY state changes. Outside this file, this 1987 * function should be instead of i_ddi_node_state() DS_ATTACHED/DS_READY 1988 * state checks. 1989 */ 1990 int 1991 i_ddi_devi_attached(dev_info_t *dip) 1992 { 1993 return (DEVI(dip)->devi_node_state >= DS_ATTACHED); 1994 } 1995 1996 /* 1997 * Common function for finding a node in a sibling list given name and addr. 1998 * 1999 * By default, name is matched with devi_node_name. The following 2000 * alternative match strategies are supported: 2001 * 2002 * FIND_NODE_BY_NODENAME: Match on node name - typical use. 2003 * FIND_NODE_BY_DRIVER: A match on driver name bound to node is conducted. 2004 * This support is used for support of OBP generic names and 2005 * for the conversion from driver names to generic names. When 2006 * more consistency in the generic name environment is achieved 2007 * (and not needed for upgrade) this support can be removed. 2008 * FIND_NODE_BY_ADDR: Match on just the addr. 2009 * This support is only used/needed during boot to match 2010 * a node bound via a path-based driver alias. 2011 * 2012 * If a child is not named (dev_addr == NULL), there are three 2013 * possible actions: 2014 * 2015 * (1) skip it 2016 * (2) FIND_ADDR_BY_INIT: bring child to DS_INITIALIZED state 2017 * (3) FIND_ADDR_BY_CALLBACK: use a caller-supplied callback function 2018 */ 2019 #define FIND_NODE_BY_NODENAME 0x01 2020 #define FIND_NODE_BY_DRIVER 0x02 2021 #define FIND_NODE_BY_ADDR 0x04 2022 #define FIND_ADDR_BY_INIT 0x10 2023 #define FIND_ADDR_BY_CALLBACK 0x20 2024 2025 static dev_info_t * 2026 find_sibling(dev_info_t *head, char *cname, char *caddr, uint_t flag, 2027 int (*callback)(dev_info_t *, char *, int)) 2028 { 2029 dev_info_t *dip; 2030 char *addr, *buf; 2031 major_t major; 2032 uint_t by; 2033 2034 /* only one way to find a node */ 2035 by = flag & 2036 (FIND_NODE_BY_DRIVER | FIND_NODE_BY_NODENAME | FIND_NODE_BY_ADDR); 2037 ASSERT(by && BIT_ONLYONESET(by)); 2038 2039 /* only one way to name a node */ 2040 ASSERT(((flag & FIND_ADDR_BY_INIT) == 0) || 2041 ((flag & FIND_ADDR_BY_CALLBACK) == 0)); 2042 2043 if (by == FIND_NODE_BY_DRIVER) { 2044 major = ddi_name_to_major(cname); 2045 if (major == (major_t)-1) 2046 return (NULL); 2047 } 2048 2049 /* preallocate buffer of naming node by callback */ 2050 if (flag & FIND_ADDR_BY_CALLBACK) 2051 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2052 2053 /* 2054 * Walk the child list to find a match 2055 */ 2056 2057 for (dip = head; dip; dip = ddi_get_next_sibling(dip)) { 2058 if (by == FIND_NODE_BY_NODENAME) { 2059 /* match node name */ 2060 if (strcmp(cname, DEVI(dip)->devi_node_name) != 0) 2061 continue; 2062 } else if (by == FIND_NODE_BY_DRIVER) { 2063 /* match driver major */ 2064 if (DEVI(dip)->devi_major != major) 2065 continue; 2066 } 2067 2068 if ((addr = DEVI(dip)->devi_addr) == NULL) { 2069 /* name the child based on the flag */ 2070 if (flag & FIND_ADDR_BY_INIT) { 2071 if (ddi_initchild(ddi_get_parent(dip), dip) 2072 != DDI_SUCCESS) 2073 continue; 2074 addr = DEVI(dip)->devi_addr; 2075 } else if (flag & FIND_ADDR_BY_CALLBACK) { 2076 if ((callback == NULL) || (callback( 2077 dip, buf, MAXNAMELEN) != DDI_SUCCESS)) 2078 continue; 2079 addr = buf; 2080 } else { 2081 continue; /* skip */ 2082 } 2083 } 2084 2085 /* match addr */ 2086 ASSERT(addr != NULL); 2087 if (strcmp(caddr, addr) == 0) 2088 break; /* node found */ 2089 2090 } 2091 if (flag & FIND_ADDR_BY_CALLBACK) 2092 kmem_free(buf, MAXNAMELEN); 2093 return (dip); 2094 } 2095 2096 /* 2097 * Find child of pdip with name: cname@caddr 2098 * Called by init_node() to look for duplicate nodes 2099 */ 2100 static dev_info_t * 2101 find_duplicate_child(dev_info_t *pdip, dev_info_t *dip) 2102 { 2103 dev_info_t *dup; 2104 char *cname = DEVI(dip)->devi_node_name; 2105 char *caddr = DEVI(dip)->devi_addr; 2106 2107 /* search nodes before dip */ 2108 dup = find_sibling(ddi_get_child(pdip), cname, caddr, 2109 FIND_NODE_BY_NODENAME, NULL); 2110 if (dup != dip) 2111 return (dup); 2112 2113 /* 2114 * search nodes after dip; normally this is not needed, 2115 */ 2116 return (find_sibling(ddi_get_next_sibling(dip), cname, caddr, 2117 FIND_NODE_BY_NODENAME, NULL)); 2118 } 2119 2120 /* 2121 * Find a child of a given name and address, using a callback to name 2122 * unnamed children. cname is the binding name. 2123 */ 2124 static dev_info_t * 2125 find_child_by_callback(dev_info_t *pdip, char *cname, char *caddr, 2126 int (*name_node)(dev_info_t *, char *, int)) 2127 { 2128 return (find_sibling(ddi_get_child(pdip), cname, caddr, 2129 FIND_NODE_BY_DRIVER|FIND_ADDR_BY_CALLBACK, name_node)); 2130 } 2131 2132 /* 2133 * Find a child of a given name and address, invoking initchild to name 2134 * unnamed children. cname is the node name. 2135 */ 2136 static dev_info_t * 2137 find_child_by_name(dev_info_t *pdip, char *cname, char *caddr) 2138 { 2139 dev_info_t *dip; 2140 2141 /* attempt search without changing state of preceding siblings */ 2142 dip = find_sibling(ddi_get_child(pdip), cname, caddr, 2143 FIND_NODE_BY_NODENAME, NULL); 2144 if (dip) 2145 return (dip); 2146 2147 return (find_sibling(ddi_get_child(pdip), cname, caddr, 2148 FIND_NODE_BY_NODENAME|FIND_ADDR_BY_INIT, NULL)); 2149 } 2150 2151 /* 2152 * Find a child of a given name and address, invoking initchild to name 2153 * unnamed children. cname is the node name. 2154 */ 2155 static dev_info_t * 2156 find_child_by_driver(dev_info_t *pdip, char *cname, char *caddr) 2157 { 2158 dev_info_t *dip; 2159 2160 /* attempt search without changing state of preceding siblings */ 2161 dip = find_sibling(ddi_get_child(pdip), cname, caddr, 2162 FIND_NODE_BY_DRIVER, NULL); 2163 if (dip) 2164 return (dip); 2165 2166 return (find_sibling(ddi_get_child(pdip), cname, caddr, 2167 FIND_NODE_BY_DRIVER|FIND_ADDR_BY_INIT, NULL)); 2168 } 2169 2170 /* 2171 * Find a child of a given address, invoking initchild to name 2172 * unnamed children. cname is the node name. 2173 * 2174 * NOTE: This function is only used during boot. One would hope that 2175 * unique sibling unit-addresses on hardware branches of the tree would 2176 * be a requirement to avoid two drivers trying to control the same 2177 * piece of hardware. Unfortunately there are some cases where this 2178 * situation exists (/ssm@0,0/pci@1c,700000 /ssm@0,0/sghsc@1c,700000). 2179 * Until unit-address uniqueness of siblings is guaranteed, use of this 2180 * interface for purposes other than boot should be avoided. 2181 */ 2182 static dev_info_t * 2183 find_child_by_addr(dev_info_t *pdip, char *caddr) 2184 { 2185 dev_info_t *dip; 2186 2187 /* return NULL if called without a unit-address */ 2188 if ((caddr == NULL) || (*caddr == '\0')) 2189 return (NULL); 2190 2191 /* attempt search without changing state of preceding siblings */ 2192 dip = find_sibling(ddi_get_child(pdip), NULL, caddr, 2193 FIND_NODE_BY_ADDR, NULL); 2194 if (dip) 2195 return (dip); 2196 2197 return (find_sibling(ddi_get_child(pdip), NULL, caddr, 2198 FIND_NODE_BY_ADDR|FIND_ADDR_BY_INIT, NULL)); 2199 } 2200 2201 /* 2202 * Deleting a property list. Take care, since some property structures 2203 * may not be fully built. 2204 */ 2205 void 2206 i_ddi_prop_list_delete(ddi_prop_t *prop) 2207 { 2208 while (prop) { 2209 ddi_prop_t *next = prop->prop_next; 2210 if (prop->prop_name) 2211 kmem_free(prop->prop_name, strlen(prop->prop_name) + 1); 2212 if ((prop->prop_len != 0) && prop->prop_val) 2213 kmem_free(prop->prop_val, prop->prop_len); 2214 kmem_free(prop, sizeof (struct ddi_prop)); 2215 prop = next; 2216 } 2217 } 2218 2219 /* 2220 * Duplicate property list 2221 */ 2222 ddi_prop_t * 2223 i_ddi_prop_list_dup(ddi_prop_t *prop, uint_t flag) 2224 { 2225 ddi_prop_t *result, *prev, *copy; 2226 2227 if (prop == NULL) 2228 return (NULL); 2229 2230 result = prev = NULL; 2231 for (; prop != NULL; prop = prop->prop_next) { 2232 ASSERT(prop->prop_name != NULL); 2233 copy = kmem_zalloc(sizeof (struct ddi_prop), flag); 2234 if (copy == NULL) 2235 goto fail; 2236 2237 copy->prop_dev = prop->prop_dev; 2238 copy->prop_flags = prop->prop_flags; 2239 copy->prop_name = i_ddi_strdup(prop->prop_name, flag); 2240 if (copy->prop_name == NULL) 2241 goto fail; 2242 2243 if ((copy->prop_len = prop->prop_len) != 0) { 2244 copy->prop_val = kmem_zalloc(prop->prop_len, flag); 2245 if (copy->prop_val == NULL) 2246 goto fail; 2247 2248 bcopy(prop->prop_val, copy->prop_val, prop->prop_len); 2249 } 2250 2251 if (prev == NULL) 2252 result = prev = copy; 2253 else 2254 prev->prop_next = copy; 2255 prev = copy; 2256 } 2257 return (result); 2258 2259 fail: 2260 i_ddi_prop_list_delete(result); 2261 return (NULL); 2262 } 2263 2264 /* 2265 * Create a reference property list, currently used only for 2266 * driver global properties. Created with ref count of 1. 2267 */ 2268 ddi_prop_list_t * 2269 i_ddi_prop_list_create(ddi_prop_t *props) 2270 { 2271 ddi_prop_list_t *list = kmem_alloc(sizeof (*list), KM_SLEEP); 2272 list->prop_list = props; 2273 list->prop_ref = 1; 2274 return (list); 2275 } 2276 2277 /* 2278 * Increment/decrement reference count. The reference is 2279 * protected by dn_lock. The only interfaces modifying 2280 * dn_global_prop_ptr is in impl_make[free]_parlist(). 2281 */ 2282 void 2283 i_ddi_prop_list_hold(ddi_prop_list_t *prop_list, struct devnames *dnp) 2284 { 2285 ASSERT(prop_list->prop_ref >= 0); 2286 ASSERT(mutex_owned(&dnp->dn_lock)); 2287 prop_list->prop_ref++; 2288 } 2289 2290 void 2291 i_ddi_prop_list_rele(ddi_prop_list_t *prop_list, struct devnames *dnp) 2292 { 2293 ASSERT(prop_list->prop_ref > 0); 2294 ASSERT(mutex_owned(&dnp->dn_lock)); 2295 prop_list->prop_ref--; 2296 2297 if (prop_list->prop_ref == 0) { 2298 i_ddi_prop_list_delete(prop_list->prop_list); 2299 kmem_free(prop_list, sizeof (*prop_list)); 2300 } 2301 } 2302 2303 /* 2304 * Free table of classes by drivers 2305 */ 2306 void 2307 i_ddi_free_exported_classes(char **classes, int n) 2308 { 2309 if ((n == 0) || (classes == NULL)) 2310 return; 2311 2312 kmem_free(classes, n * sizeof (char *)); 2313 } 2314 2315 /* 2316 * Get all classes exported by dip 2317 */ 2318 int 2319 i_ddi_get_exported_classes(dev_info_t *dip, char ***classes) 2320 { 2321 extern void lock_hw_class_list(); 2322 extern void unlock_hw_class_list(); 2323 extern int get_class(const char *, char **); 2324 2325 static char *rootclass = "root"; 2326 int n = 0, nclass = 0; 2327 char **buf; 2328 2329 ASSERT(i_ddi_node_state(dip) >= DS_BOUND); 2330 2331 if (dip == ddi_root_node()) /* rootnode exports class "root" */ 2332 nclass = 1; 2333 lock_hw_class_list(); 2334 nclass += get_class(ddi_driver_name(dip), NULL); 2335 if (nclass == 0) { 2336 unlock_hw_class_list(); 2337 return (0); /* no class exported */ 2338 } 2339 2340 *classes = buf = kmem_alloc(nclass * sizeof (char *), KM_SLEEP); 2341 if (dip == ddi_root_node()) { 2342 *buf++ = rootclass; 2343 n = 1; 2344 } 2345 n += get_class(ddi_driver_name(dip), buf); 2346 unlock_hw_class_list(); 2347 2348 ASSERT(n == nclass); /* make sure buf wasn't overrun */ 2349 return (nclass); 2350 } 2351 2352 /* 2353 * Helper functions, returns NULL if no memory. 2354 */ 2355 char * 2356 i_ddi_strdup(char *str, uint_t flag) 2357 { 2358 char *copy; 2359 2360 if (str == NULL) 2361 return (NULL); 2362 2363 copy = kmem_alloc(strlen(str) + 1, flag); 2364 if (copy == NULL) 2365 return (NULL); 2366 2367 (void) strcpy(copy, str); 2368 return (copy); 2369 } 2370 2371 /* 2372 * Load driver.conf file for major. Load all if major == -1. 2373 * 2374 * This is called 2375 * - early in boot after devnames array is initialized 2376 * - from vfs code when certain file systems are mounted 2377 * - from add_drv when a new driver is added 2378 */ 2379 int 2380 i_ddi_load_drvconf(major_t major) 2381 { 2382 extern int modrootloaded; 2383 2384 major_t low, high, m; 2385 2386 if (major == (major_t)-1) { 2387 low = 0; 2388 high = devcnt - 1; 2389 } else { 2390 if (major >= devcnt) 2391 return (EINVAL); 2392 low = high = major; 2393 } 2394 2395 for (m = low; m <= high; m++) { 2396 struct devnames *dnp = &devnamesp[m]; 2397 LOCK_DEV_OPS(&dnp->dn_lock); 2398 dnp->dn_flags &= ~DN_DRIVER_HELD; 2399 (void) impl_make_parlist(m); 2400 UNLOCK_DEV_OPS(&dnp->dn_lock); 2401 } 2402 2403 if (modrootloaded) { 2404 ddi_walk_devs(ddi_root_node(), reset_nexus_flags, 2405 (void *)(uintptr_t)major); 2406 } 2407 2408 /* build dn_list from old entries in path_to_inst */ 2409 e_ddi_unorphan_instance_nos(); 2410 return (0); 2411 } 2412 2413 /* 2414 * Unload a specific driver.conf. 2415 * Don't support unload all because it doesn't make any sense 2416 */ 2417 int 2418 i_ddi_unload_drvconf(major_t major) 2419 { 2420 int error; 2421 struct devnames *dnp; 2422 2423 if (major >= devcnt) 2424 return (EINVAL); 2425 2426 /* 2427 * Take the per-driver lock while unloading driver.conf 2428 */ 2429 dnp = &devnamesp[major]; 2430 LOCK_DEV_OPS(&dnp->dn_lock); 2431 error = impl_free_parlist(major); 2432 UNLOCK_DEV_OPS(&dnp->dn_lock); 2433 return (error); 2434 } 2435 2436 /* 2437 * Merge a .conf node. This is called by nexus drivers to augment 2438 * hw node with properties specified in driver.conf file. This function 2439 * takes a callback routine to name nexus children. 2440 * The parent node must be held busy. 2441 * 2442 * It returns DDI_SUCCESS if the node is merged and DDI_FAILURE otherwise. 2443 */ 2444 int 2445 ndi_merge_node(dev_info_t *dip, int (*name_node)(dev_info_t *, char *, int)) 2446 { 2447 dev_info_t *hwdip; 2448 2449 ASSERT(ndi_dev_is_persistent_node(dip) == 0); 2450 ASSERT(ddi_get_name_addr(dip) != NULL); 2451 2452 hwdip = find_child_by_callback(ddi_get_parent(dip), 2453 ddi_binding_name(dip), ddi_get_name_addr(dip), name_node); 2454 2455 /* 2456 * Look for the hardware node that is the target of the merge; 2457 * return failure if not found. 2458 */ 2459 if ((hwdip == NULL) || (hwdip == dip)) { 2460 char *buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2461 NDI_CONFIG_DEBUG((CE_WARN, "No HW node to merge conf node %s", 2462 ddi_deviname(dip, buf))); 2463 kmem_free(buf, MAXNAMELEN); 2464 return (DDI_FAILURE); 2465 } 2466 2467 /* 2468 * Make sure the hardware node is uninitialized and has no property. 2469 * This may not be the case if new .conf files are load after some 2470 * hardware nodes have already been initialized and attached. 2471 * 2472 * N.B. We return success here because the node was *intended* 2473 * to be a merge node because there is a hw node with the name. 2474 */ 2475 mutex_enter(&DEVI(hwdip)->devi_lock); 2476 if (ndi_dev_is_persistent_node(hwdip) == 0) { 2477 char *buf; 2478 mutex_exit(&DEVI(hwdip)->devi_lock); 2479 2480 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2481 NDI_CONFIG_DEBUG((CE_NOTE, "Duplicate .conf node %s", 2482 ddi_deviname(dip, buf))); 2483 kmem_free(buf, MAXNAMELEN); 2484 return (DDI_SUCCESS); 2485 } 2486 2487 /* 2488 * If it is possible that the hardware has already been touched 2489 * then don't merge. 2490 */ 2491 if (i_ddi_node_state(hwdip) >= DS_INITIALIZED || 2492 (DEVI(hwdip)->devi_sys_prop_ptr != NULL) || 2493 (DEVI(hwdip)->devi_drv_prop_ptr != NULL)) { 2494 char *buf; 2495 mutex_exit(&DEVI(hwdip)->devi_lock); 2496 2497 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2498 NDI_CONFIG_DEBUG((CE_NOTE, 2499 "!Cannot merge .conf node %s with hw node %p " 2500 "-- not in proper state", 2501 ddi_deviname(dip, buf), (void *)hwdip)); 2502 kmem_free(buf, MAXNAMELEN); 2503 return (DDI_SUCCESS); 2504 } 2505 2506 mutex_enter(&DEVI(dip)->devi_lock); 2507 DEVI(hwdip)->devi_sys_prop_ptr = DEVI(dip)->devi_sys_prop_ptr; 2508 DEVI(hwdip)->devi_drv_prop_ptr = DEVI(dip)->devi_drv_prop_ptr; 2509 DEVI(dip)->devi_sys_prop_ptr = NULL; 2510 DEVI(dip)->devi_drv_prop_ptr = NULL; 2511 mutex_exit(&DEVI(dip)->devi_lock); 2512 mutex_exit(&DEVI(hwdip)->devi_lock); 2513 2514 return (DDI_SUCCESS); 2515 } 2516 2517 /* 2518 * Merge a "wildcard" .conf node. This is called by nexus drivers to 2519 * augment a set of hw node with properties specified in driver.conf file. 2520 * The parent node must be held busy. 2521 * 2522 * There is no failure mode, since the nexus may or may not have child 2523 * node bound the driver specified by the wildcard node. 2524 */ 2525 void 2526 ndi_merge_wildcard_node(dev_info_t *dip) 2527 { 2528 dev_info_t *hwdip; 2529 dev_info_t *pdip = ddi_get_parent(dip); 2530 major_t major = ddi_driver_major(dip); 2531 2532 /* never attempt to merge a hw node */ 2533 ASSERT(ndi_dev_is_persistent_node(dip) == 0); 2534 /* must be bound to a driver major number */ 2535 ASSERT(major != (major_t)-1); 2536 2537 /* 2538 * Walk the child list to find all nodes bound to major 2539 * and copy properties. 2540 */ 2541 mutex_enter(&DEVI(dip)->devi_lock); 2542 for (hwdip = ddi_get_child(pdip); hwdip; 2543 hwdip = ddi_get_next_sibling(hwdip)) { 2544 /* 2545 * Skip nodes not bound to same driver 2546 */ 2547 if (ddi_driver_major(hwdip) != major) 2548 continue; 2549 2550 /* 2551 * Skip .conf nodes 2552 */ 2553 if (ndi_dev_is_persistent_node(hwdip) == 0) 2554 continue; 2555 2556 /* 2557 * Make sure the node is uninitialized and has no property. 2558 */ 2559 mutex_enter(&DEVI(hwdip)->devi_lock); 2560 if (i_ddi_node_state(hwdip) >= DS_INITIALIZED || 2561 (DEVI(hwdip)->devi_sys_prop_ptr != NULL) || 2562 (DEVI(hwdip)->devi_drv_prop_ptr != NULL)) { 2563 mutex_exit(&DEVI(hwdip)->devi_lock); 2564 NDI_CONFIG_DEBUG((CE_NOTE, "HW node %p state not " 2565 "suitable for merging wildcard conf node %s", 2566 (void *)hwdip, ddi_node_name(dip))); 2567 continue; 2568 } 2569 2570 DEVI(hwdip)->devi_sys_prop_ptr = 2571 i_ddi_prop_list_dup(DEVI(dip)->devi_sys_prop_ptr, KM_SLEEP); 2572 DEVI(hwdip)->devi_drv_prop_ptr = 2573 i_ddi_prop_list_dup(DEVI(dip)->devi_drv_prop_ptr, KM_SLEEP); 2574 mutex_exit(&DEVI(hwdip)->devi_lock); 2575 } 2576 mutex_exit(&DEVI(dip)->devi_lock); 2577 } 2578 2579 /* 2580 * Return the major number based on the compatible property. This interface 2581 * may be used in situations where we are trying to detect if a better driver 2582 * now exists for a device, so it must use the 'compatible' property. If 2583 * a non-NULL formp is specified and the binding was based on compatible then 2584 * return the pointer to the form used in *formp. 2585 */ 2586 major_t 2587 ddi_compatible_driver_major(dev_info_t *dip, char **formp) 2588 { 2589 struct dev_info *devi = DEVI(dip); 2590 void *compat; 2591 size_t len; 2592 char *p = NULL; 2593 major_t major = (major_t)-1; 2594 2595 if (formp) 2596 *formp = NULL; 2597 2598 /* 2599 * Highest precedence binding is a path-oriented alias. Since this 2600 * requires a 'path', this type of binding occurs via more obtuse 2601 * 'rebind'. The need for a path-oriented alias 'rebind' is detected 2602 * after a successful DDI_CTLOPS_INITCHILD to another driver: this is 2603 * is the first point at which the unit-address (or instance) of the 2604 * last component of the path is available (even though the path is 2605 * bound to the wrong driver at this point). 2606 */ 2607 if (devi->devi_flags & DEVI_REBIND) { 2608 p = devi->devi_rebinding_name; 2609 major = ddi_name_to_major(p); 2610 if ((major != (major_t)-1) && 2611 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED)) { 2612 if (formp) 2613 *formp = p; 2614 return (major); 2615 } 2616 2617 /* 2618 * If for some reason devi_rebinding_name no longer resolves 2619 * to a proper driver then clear DEVI_REBIND. 2620 */ 2621 mutex_enter(&devi->devi_lock); 2622 devi->devi_flags &= ~DEVI_REBIND; 2623 mutex_exit(&devi->devi_lock); 2624 } 2625 2626 /* look up compatible property */ 2627 (void) lookup_compatible(dip, KM_SLEEP); 2628 compat = (void *)(devi->devi_compat_names); 2629 len = devi->devi_compat_length; 2630 2631 /* find the highest precedence compatible form with a driver binding */ 2632 while ((p = prom_decode_composite_string(compat, len, p)) != NULL) { 2633 major = ddi_name_to_major(p); 2634 if ((major != (major_t)-1) && 2635 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED)) { 2636 if (formp) 2637 *formp = p; 2638 return (major); 2639 } 2640 } 2641 2642 /* 2643 * none of the compatible forms have a driver binding, see if 2644 * the node name has a driver binding. 2645 */ 2646 major = ddi_name_to_major(ddi_node_name(dip)); 2647 if ((major != (major_t)-1) && 2648 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED)) 2649 return (major); 2650 2651 /* no driver */ 2652 return ((major_t)-1); 2653 } 2654 2655 /* 2656 * Static help functions 2657 */ 2658 2659 /* 2660 * lookup the "compatible" property and cache it's contents in the 2661 * device node. 2662 */ 2663 static int 2664 lookup_compatible(dev_info_t *dip, uint_t flag) 2665 { 2666 int rv; 2667 int prop_flags; 2668 uint_t ncompatstrs; 2669 char **compatstrpp; 2670 char *di_compat_strp; 2671 size_t di_compat_strlen; 2672 2673 if (DEVI(dip)->devi_compat_names) { 2674 return (DDI_SUCCESS); 2675 } 2676 2677 prop_flags = DDI_PROP_TYPE_STRING | DDI_PROP_DONTPASS; 2678 2679 if (flag & KM_NOSLEEP) { 2680 prop_flags |= DDI_PROP_DONTSLEEP; 2681 } 2682 2683 if (ndi_dev_is_prom_node(dip) == 0) { 2684 prop_flags |= DDI_PROP_NOTPROM; 2685 } 2686 2687 rv = ddi_prop_lookup_common(DDI_DEV_T_ANY, dip, prop_flags, 2688 "compatible", &compatstrpp, &ncompatstrs, 2689 ddi_prop_fm_decode_strings); 2690 2691 if (rv == DDI_PROP_NOT_FOUND) { 2692 return (DDI_SUCCESS); 2693 } 2694 2695 if (rv != DDI_PROP_SUCCESS) { 2696 return (DDI_FAILURE); 2697 } 2698 2699 /* 2700 * encode the compatible property data in the dev_info node 2701 */ 2702 rv = DDI_SUCCESS; 2703 if (ncompatstrs != 0) { 2704 di_compat_strp = encode_composite_string(compatstrpp, 2705 ncompatstrs, &di_compat_strlen, flag); 2706 if (di_compat_strp != NULL) { 2707 DEVI(dip)->devi_compat_names = di_compat_strp; 2708 DEVI(dip)->devi_compat_length = di_compat_strlen; 2709 } else { 2710 rv = DDI_FAILURE; 2711 } 2712 } 2713 ddi_prop_free(compatstrpp); 2714 return (rv); 2715 } 2716 2717 /* 2718 * Create a composite string from a list of strings. 2719 * 2720 * A composite string consists of a single buffer containing one 2721 * or more NULL terminated strings. 2722 */ 2723 static char * 2724 encode_composite_string(char **strings, uint_t nstrings, size_t *retsz, 2725 uint_t flag) 2726 { 2727 uint_t index; 2728 char **strpp; 2729 uint_t slen; 2730 size_t cbuf_sz = 0; 2731 char *cbuf_p; 2732 char *cbuf_ip; 2733 2734 if (strings == NULL || nstrings == 0 || retsz == NULL) { 2735 return (NULL); 2736 } 2737 2738 for (index = 0, strpp = strings; index < nstrings; index++) 2739 cbuf_sz += strlen(*(strpp++)) + 1; 2740 2741 if ((cbuf_p = kmem_alloc(cbuf_sz, flag)) == NULL) { 2742 cmn_err(CE_NOTE, 2743 "?failed to allocate device node compatstr"); 2744 return (NULL); 2745 } 2746 2747 cbuf_ip = cbuf_p; 2748 for (index = 0, strpp = strings; index < nstrings; index++) { 2749 slen = strlen(*strpp); 2750 bcopy(*(strpp++), cbuf_ip, slen); 2751 cbuf_ip += slen; 2752 *(cbuf_ip++) = '\0'; 2753 } 2754 2755 *retsz = cbuf_sz; 2756 return (cbuf_p); 2757 } 2758 2759 static void 2760 link_to_driver_list(dev_info_t *dip) 2761 { 2762 major_t major = DEVI(dip)->devi_major; 2763 struct devnames *dnp; 2764 2765 ASSERT(major != (major_t)-1); 2766 2767 /* 2768 * Remove from orphan list 2769 */ 2770 if (ndi_dev_is_persistent_node(dip)) { 2771 dnp = &orphanlist; 2772 remove_from_dn_list(dnp, dip); 2773 } 2774 2775 /* 2776 * Add to per driver list 2777 */ 2778 dnp = &devnamesp[major]; 2779 add_to_dn_list(dnp, dip); 2780 } 2781 2782 static void 2783 unlink_from_driver_list(dev_info_t *dip) 2784 { 2785 major_t major = DEVI(dip)->devi_major; 2786 struct devnames *dnp; 2787 2788 ASSERT(major != (major_t)-1); 2789 2790 /* 2791 * Remove from per-driver list 2792 */ 2793 dnp = &devnamesp[major]; 2794 remove_from_dn_list(dnp, dip); 2795 2796 /* 2797 * Add to orphan list 2798 */ 2799 if (ndi_dev_is_persistent_node(dip)) { 2800 dnp = &orphanlist; 2801 add_to_dn_list(dnp, dip); 2802 } 2803 } 2804 2805 /* 2806 * scan the per-driver list looking for dev_info "dip" 2807 */ 2808 static dev_info_t * 2809 in_dn_list(struct devnames *dnp, dev_info_t *dip) 2810 { 2811 struct dev_info *idevi; 2812 2813 if ((idevi = DEVI(dnp->dn_head)) == NULL) 2814 return (NULL); 2815 2816 while (idevi) { 2817 if (idevi == DEVI(dip)) 2818 return (dip); 2819 idevi = idevi->devi_next; 2820 } 2821 return (NULL); 2822 } 2823 2824 /* 2825 * insert devinfo node 'dip' into the per-driver instance list 2826 * headed by 'dnp' 2827 * 2828 * Nodes on the per-driver list are ordered: HW - SID - PSEUDO. The order is 2829 * required for merging of .conf file data to work properly. 2830 */ 2831 static void 2832 add_to_ordered_dn_list(struct devnames *dnp, dev_info_t *dip) 2833 { 2834 dev_info_t **dipp; 2835 2836 ASSERT(mutex_owned(&(dnp->dn_lock))); 2837 2838 dipp = &dnp->dn_head; 2839 if (ndi_dev_is_prom_node(dip)) { 2840 /* 2841 * Find the first non-prom node or end of list 2842 */ 2843 while (*dipp && (ndi_dev_is_prom_node(*dipp) != 0)) { 2844 dipp = (dev_info_t **)&DEVI(*dipp)->devi_next; 2845 } 2846 } else if (ndi_dev_is_persistent_node(dip)) { 2847 /* 2848 * Find the first non-persistent node 2849 */ 2850 while (*dipp && (ndi_dev_is_persistent_node(*dipp) != 0)) { 2851 dipp = (dev_info_t **)&DEVI(*dipp)->devi_next; 2852 } 2853 } else { 2854 /* 2855 * Find the end of the list 2856 */ 2857 while (*dipp) { 2858 dipp = (dev_info_t **)&DEVI(*dipp)->devi_next; 2859 } 2860 } 2861 2862 DEVI(dip)->devi_next = DEVI(*dipp); 2863 *dipp = dip; 2864 } 2865 2866 /* 2867 * add a list of device nodes to the device node list in the 2868 * devnames structure 2869 */ 2870 static void 2871 add_to_dn_list(struct devnames *dnp, dev_info_t *dip) 2872 { 2873 /* 2874 * Look to see if node already exists 2875 */ 2876 LOCK_DEV_OPS(&(dnp->dn_lock)); 2877 if (in_dn_list(dnp, dip)) { 2878 cmn_err(CE_NOTE, "add_to_dn_list: node %s already in list", 2879 DEVI(dip)->devi_node_name); 2880 } else { 2881 add_to_ordered_dn_list(dnp, dip); 2882 } 2883 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 2884 } 2885 2886 static void 2887 remove_from_dn_list(struct devnames *dnp, dev_info_t *dip) 2888 { 2889 dev_info_t **plist; 2890 2891 LOCK_DEV_OPS(&(dnp->dn_lock)); 2892 2893 plist = (dev_info_t **)&dnp->dn_head; 2894 while (*plist && (*plist != dip)) { 2895 plist = (dev_info_t **)&DEVI(*plist)->devi_next; 2896 } 2897 2898 if (*plist != NULL) { 2899 ASSERT(*plist == dip); 2900 *plist = (dev_info_t *)(DEVI(dip)->devi_next); 2901 DEVI(dip)->devi_next = NULL; 2902 } else { 2903 NDI_CONFIG_DEBUG((CE_NOTE, 2904 "remove_from_dn_list: node %s not found in list", 2905 DEVI(dip)->devi_node_name)); 2906 } 2907 2908 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 2909 } 2910 2911 /* 2912 * Add and remove reference driver global property list 2913 */ 2914 static void 2915 add_global_props(dev_info_t *dip) 2916 { 2917 struct devnames *dnp; 2918 ddi_prop_list_t *plist; 2919 2920 ASSERT(DEVI(dip)->devi_global_prop_list == NULL); 2921 ASSERT(DEVI(dip)->devi_major != (major_t)-1); 2922 2923 dnp = &devnamesp[DEVI(dip)->devi_major]; 2924 LOCK_DEV_OPS(&dnp->dn_lock); 2925 plist = dnp->dn_global_prop_ptr; 2926 if (plist == NULL) { 2927 UNLOCK_DEV_OPS(&dnp->dn_lock); 2928 return; 2929 } 2930 i_ddi_prop_list_hold(plist, dnp); 2931 UNLOCK_DEV_OPS(&dnp->dn_lock); 2932 2933 mutex_enter(&DEVI(dip)->devi_lock); 2934 DEVI(dip)->devi_global_prop_list = plist; 2935 mutex_exit(&DEVI(dip)->devi_lock); 2936 } 2937 2938 static void 2939 remove_global_props(dev_info_t *dip) 2940 { 2941 ddi_prop_list_t *proplist; 2942 2943 mutex_enter(&DEVI(dip)->devi_lock); 2944 proplist = DEVI(dip)->devi_global_prop_list; 2945 DEVI(dip)->devi_global_prop_list = NULL; 2946 mutex_exit(&DEVI(dip)->devi_lock); 2947 2948 if (proplist) { 2949 major_t major; 2950 struct devnames *dnp; 2951 2952 major = ddi_driver_major(dip); 2953 ASSERT(major != (major_t)-1); 2954 dnp = &devnamesp[major]; 2955 LOCK_DEV_OPS(&dnp->dn_lock); 2956 i_ddi_prop_list_rele(proplist, dnp); 2957 UNLOCK_DEV_OPS(&dnp->dn_lock); 2958 } 2959 } 2960 2961 #ifdef DEBUG 2962 /* 2963 * Set this variable to '0' to disable the optimization, 2964 * and to 2 to print debug message. 2965 */ 2966 static int optimize_dtree = 1; 2967 2968 static void 2969 debug_dtree(dev_info_t *devi, struct dev_info *adevi, char *service) 2970 { 2971 char *adeviname, *buf; 2972 2973 /* 2974 * Don't print unless optimize dtree is set to 2+ 2975 */ 2976 if (optimize_dtree <= 1) 2977 return; 2978 2979 buf = kmem_alloc(MAXNAMELEN, KM_SLEEP); 2980 adeviname = ddi_deviname((dev_info_t *)adevi, buf); 2981 if (*adeviname == '\0') 2982 adeviname = "root"; 2983 2984 cmn_err(CE_CONT, "%s %s -> %s\n", 2985 ddi_deviname(devi, buf), service, adeviname); 2986 2987 kmem_free(buf, MAXNAMELEN); 2988 } 2989 #else /* DEBUG */ 2990 #define debug_dtree(a1, a2, a3) /* nothing */ 2991 #endif /* DEBUG */ 2992 2993 static void 2994 ddi_optimize_dtree(dev_info_t *devi) 2995 { 2996 struct dev_info *pdevi; 2997 struct bus_ops *b; 2998 2999 pdevi = DEVI(devi)->devi_parent; 3000 ASSERT(pdevi); 3001 3002 /* 3003 * Set the unoptimized values 3004 */ 3005 DEVI(devi)->devi_bus_map_fault = pdevi; 3006 DEVI(devi)->devi_bus_dma_map = pdevi; 3007 DEVI(devi)->devi_bus_dma_allochdl = pdevi; 3008 DEVI(devi)->devi_bus_dma_freehdl = pdevi; 3009 DEVI(devi)->devi_bus_dma_bindhdl = pdevi; 3010 DEVI(devi)->devi_bus_dma_bindfunc = 3011 pdevi->devi_ops->devo_bus_ops->bus_dma_bindhdl; 3012 DEVI(devi)->devi_bus_dma_unbindhdl = pdevi; 3013 DEVI(devi)->devi_bus_dma_unbindfunc = 3014 pdevi->devi_ops->devo_bus_ops->bus_dma_unbindhdl; 3015 DEVI(devi)->devi_bus_dma_flush = pdevi; 3016 DEVI(devi)->devi_bus_dma_win = pdevi; 3017 DEVI(devi)->devi_bus_dma_ctl = pdevi; 3018 DEVI(devi)->devi_bus_ctl = pdevi; 3019 3020 #ifdef DEBUG 3021 if (optimize_dtree == 0) 3022 return; 3023 #endif /* DEBUG */ 3024 3025 b = pdevi->devi_ops->devo_bus_ops; 3026 3027 if (i_ddi_map_fault == b->bus_map_fault) { 3028 DEVI(devi)->devi_bus_map_fault = pdevi->devi_bus_map_fault; 3029 debug_dtree(devi, DEVI(devi)->devi_bus_map_fault, 3030 "bus_map_fault"); 3031 } 3032 3033 if (ddi_dma_map == b->bus_dma_map) { 3034 DEVI(devi)->devi_bus_dma_map = pdevi->devi_bus_dma_map; 3035 debug_dtree(devi, DEVI(devi)->devi_bus_dma_map, "bus_dma_map"); 3036 } 3037 3038 if (ddi_dma_allochdl == b->bus_dma_allochdl) { 3039 DEVI(devi)->devi_bus_dma_allochdl = 3040 pdevi->devi_bus_dma_allochdl; 3041 debug_dtree(devi, DEVI(devi)->devi_bus_dma_allochdl, 3042 "bus_dma_allochdl"); 3043 } 3044 3045 if (ddi_dma_freehdl == b->bus_dma_freehdl) { 3046 DEVI(devi)->devi_bus_dma_freehdl = pdevi->devi_bus_dma_freehdl; 3047 debug_dtree(devi, DEVI(devi)->devi_bus_dma_freehdl, 3048 "bus_dma_freehdl"); 3049 } 3050 3051 if (ddi_dma_bindhdl == b->bus_dma_bindhdl) { 3052 DEVI(devi)->devi_bus_dma_bindhdl = pdevi->devi_bus_dma_bindhdl; 3053 DEVI(devi)->devi_bus_dma_bindfunc = 3054 pdevi->devi_bus_dma_bindhdl->devi_ops-> 3055 devo_bus_ops->bus_dma_bindhdl; 3056 debug_dtree(devi, DEVI(devi)->devi_bus_dma_bindhdl, 3057 "bus_dma_bindhdl"); 3058 } 3059 3060 if (ddi_dma_unbindhdl == b->bus_dma_unbindhdl) { 3061 DEVI(devi)->devi_bus_dma_unbindhdl = 3062 pdevi->devi_bus_dma_unbindhdl; 3063 DEVI(devi)->devi_bus_dma_unbindfunc = 3064 pdevi->devi_bus_dma_unbindhdl->devi_ops-> 3065 devo_bus_ops->bus_dma_unbindhdl; 3066 debug_dtree(devi, DEVI(devi)->devi_bus_dma_unbindhdl, 3067 "bus_dma_unbindhdl"); 3068 } 3069 3070 if (ddi_dma_flush == b->bus_dma_flush) { 3071 DEVI(devi)->devi_bus_dma_flush = pdevi->devi_bus_dma_flush; 3072 debug_dtree(devi, DEVI(devi)->devi_bus_dma_flush, 3073 "bus_dma_flush"); 3074 } 3075 3076 if (ddi_dma_win == b->bus_dma_win) { 3077 DEVI(devi)->devi_bus_dma_win = pdevi->devi_bus_dma_win; 3078 debug_dtree(devi, DEVI(devi)->devi_bus_dma_win, 3079 "bus_dma_win"); 3080 } 3081 3082 if (ddi_dma_mctl == b->bus_dma_ctl) { 3083 DEVI(devi)->devi_bus_dma_ctl = pdevi->devi_bus_dma_ctl; 3084 debug_dtree(devi, DEVI(devi)->devi_bus_dma_ctl, "bus_dma_ctl"); 3085 } 3086 3087 if (ddi_ctlops == b->bus_ctl) { 3088 DEVI(devi)->devi_bus_ctl = pdevi->devi_bus_ctl; 3089 debug_dtree(devi, DEVI(devi)->devi_bus_ctl, "bus_ctl"); 3090 } 3091 } 3092 3093 #define MIN_DEVINFO_LOG_SIZE max_ncpus 3094 #define MAX_DEVINFO_LOG_SIZE max_ncpus * 10 3095 3096 static void 3097 da_log_init() 3098 { 3099 devinfo_log_header_t *dh; 3100 int logsize = devinfo_log_size; 3101 3102 if (logsize == 0) 3103 logsize = MIN_DEVINFO_LOG_SIZE; 3104 else if (logsize > MAX_DEVINFO_LOG_SIZE) 3105 logsize = MAX_DEVINFO_LOG_SIZE; 3106 3107 dh = kmem_alloc(logsize * PAGESIZE, KM_SLEEP); 3108 mutex_init(&dh->dh_lock, NULL, MUTEX_DEFAULT, NULL); 3109 dh->dh_max = ((logsize * PAGESIZE) - sizeof (*dh)) / 3110 sizeof (devinfo_audit_t) + 1; 3111 dh->dh_curr = -1; 3112 dh->dh_hits = 0; 3113 3114 devinfo_audit_log = dh; 3115 } 3116 3117 /* 3118 * Log the stack trace in per-devinfo audit structure and also enter 3119 * it into a system wide log for recording the time history. 3120 */ 3121 static void 3122 da_log_enter(dev_info_t *dip) 3123 { 3124 devinfo_audit_t *da_log, *da = DEVI(dip)->devi_audit; 3125 devinfo_log_header_t *dh = devinfo_audit_log; 3126 3127 if (devinfo_audit_log == NULL) 3128 return; 3129 3130 ASSERT(da != NULL); 3131 3132 da->da_devinfo = dip; 3133 da->da_timestamp = gethrtime(); 3134 da->da_thread = curthread; 3135 da->da_node_state = DEVI(dip)->devi_node_state; 3136 da->da_device_state = DEVI(dip)->devi_state; 3137 da->da_depth = getpcstack(da->da_stack, DDI_STACK_DEPTH); 3138 3139 /* 3140 * Copy into common log and note the location for tracing history 3141 */ 3142 mutex_enter(&dh->dh_lock); 3143 dh->dh_hits++; 3144 dh->dh_curr++; 3145 if (dh->dh_curr >= dh->dh_max) 3146 dh->dh_curr -= dh->dh_max; 3147 da_log = &dh->dh_entry[dh->dh_curr]; 3148 mutex_exit(&dh->dh_lock); 3149 3150 bcopy(da, da_log, sizeof (devinfo_audit_t)); 3151 da->da_lastlog = da_log; 3152 } 3153 3154 static void 3155 attach_drivers() 3156 { 3157 int i; 3158 for (i = 0; i < devcnt; i++) { 3159 struct devnames *dnp = &devnamesp[i]; 3160 if ((dnp->dn_flags & DN_FORCE_ATTACH) && 3161 (ddi_hold_installed_driver((major_t)i) != NULL)) 3162 ddi_rele_driver((major_t)i); 3163 } 3164 } 3165 3166 /* 3167 * Launch a thread to force attach drivers. This avoids penalty on boot time. 3168 */ 3169 void 3170 i_ddi_forceattach_drivers() 3171 { 3172 /* 3173 * On i386, the USB drivers need to load and take over from the 3174 * SMM BIOS drivers ASAP after consconfig(), so make sure they 3175 * get loaded right here rather than letting the thread do it. 3176 * 3177 * The order here is important. EHCI must be loaded first, as 3178 * we have observed many systems on which hangs occur if the 3179 * {U,O}HCI companion controllers take over control from the BIOS 3180 * before EHCI does. These hangs are also caused by BIOSes leaving 3181 * interrupt-on-port-change enabled in the ehci controller, so that 3182 * when uhci/ohci reset themselves, it induces a port change on 3183 * the ehci companion controller. Since there's no interrupt handler 3184 * installed at the time, the moment that interrupt is unmasked, an 3185 * interrupt storm will occur. All this is averted when ehci is 3186 * loaded first. And now you know..... the REST of the story. 3187 * 3188 * Regardless of platform, ehci needs to initialize first to avoid 3189 * unnecessary connects and disconnects on the companion controller 3190 * when ehci sets up the routing. 3191 */ 3192 (void) ddi_hold_installed_driver(ddi_name_to_major("ehci")); 3193 (void) ddi_hold_installed_driver(ddi_name_to_major("uhci")); 3194 (void) ddi_hold_installed_driver(ddi_name_to_major("ohci")); 3195 3196 /* 3197 * Attach IB VHCI driver before the force-attach thread attaches the 3198 * IB HCA driver. IB HCA driver will fail if IB Nexus has not yet 3199 * been attached. 3200 */ 3201 (void) ddi_hold_installed_driver(ddi_name_to_major("ib")); 3202 3203 (void) thread_create(NULL, 0, (void (*)())attach_drivers, NULL, 0, &p0, 3204 TS_RUN, minclsyspri); 3205 } 3206 3207 /* 3208 * This is a private DDI interface for optimizing boot performance. 3209 * I/O subsystem initialization is considered complete when devfsadm 3210 * is executed. 3211 * 3212 * NOTE: The start of syseventd happens to be a convenient indicator 3213 * of the completion of I/O initialization during boot. 3214 * The implementation should be replaced by something more robust. 3215 */ 3216 int 3217 i_ddi_io_initialized() 3218 { 3219 extern int sysevent_daemon_init; 3220 return (sysevent_daemon_init); 3221 } 3222 3223 /* 3224 * May be used to determine system boot state 3225 * "Available" means the system is for the most part up 3226 * and initialized, with all system services either up or 3227 * capable of being started. This state is set by devfsadm 3228 * during the boot process. The /dev filesystem infers 3229 * from this when implicit reconfig can be performed, 3230 * ie, devfsadm can be invoked. Please avoid making 3231 * further use of this unless it's really necessary. 3232 */ 3233 int 3234 i_ddi_sysavail() 3235 { 3236 return (devname_state & DS_SYSAVAIL); 3237 } 3238 3239 /* 3240 * May be used to determine if boot is a reconfigure boot. 3241 */ 3242 int 3243 i_ddi_reconfig() 3244 { 3245 return (devname_state & DS_RECONFIG); 3246 } 3247 3248 /* 3249 * Note system services are up, inform /dev. 3250 */ 3251 void 3252 i_ddi_set_sysavail() 3253 { 3254 if ((devname_state & DS_SYSAVAIL) == 0) { 3255 devname_state |= DS_SYSAVAIL; 3256 sdev_devstate_change(); 3257 } 3258 } 3259 3260 /* 3261 * Note reconfiguration boot, inform /dev. 3262 */ 3263 void 3264 i_ddi_set_reconfig() 3265 { 3266 if ((devname_state & DS_RECONFIG) == 0) { 3267 devname_state |= DS_RECONFIG; 3268 sdev_devstate_change(); 3269 } 3270 } 3271 3272 3273 /* 3274 * device tree walking 3275 */ 3276 3277 struct walk_elem { 3278 struct walk_elem *next; 3279 dev_info_t *dip; 3280 }; 3281 3282 static void 3283 free_list(struct walk_elem *list) 3284 { 3285 while (list) { 3286 struct walk_elem *next = list->next; 3287 kmem_free(list, sizeof (*list)); 3288 list = next; 3289 } 3290 } 3291 3292 static void 3293 append_node(struct walk_elem **list, dev_info_t *dip) 3294 { 3295 struct walk_elem *tail; 3296 struct walk_elem *elem = kmem_alloc(sizeof (*elem), KM_SLEEP); 3297 3298 elem->next = NULL; 3299 elem->dip = dip; 3300 3301 if (*list == NULL) { 3302 *list = elem; 3303 return; 3304 } 3305 3306 tail = *list; 3307 while (tail->next) 3308 tail = tail->next; 3309 3310 tail->next = elem; 3311 } 3312 3313 /* 3314 * The implementation of ddi_walk_devs(). 3315 */ 3316 static int 3317 walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg, 3318 int do_locking) 3319 { 3320 struct walk_elem *head = NULL; 3321 3322 /* 3323 * Do it in two passes. First pass invoke callback on each 3324 * dip on the sibling list. Second pass invoke callback on 3325 * children of each dip. 3326 */ 3327 while (dip) { 3328 switch ((*f)(dip, arg)) { 3329 case DDI_WALK_TERMINATE: 3330 free_list(head); 3331 return (DDI_WALK_TERMINATE); 3332 3333 case DDI_WALK_PRUNESIB: 3334 /* ignore sibling by setting dip to NULL */ 3335 append_node(&head, dip); 3336 dip = NULL; 3337 break; 3338 3339 case DDI_WALK_PRUNECHILD: 3340 /* don't worry about children */ 3341 dip = ddi_get_next_sibling(dip); 3342 break; 3343 3344 case DDI_WALK_CONTINUE: 3345 default: 3346 append_node(&head, dip); 3347 dip = ddi_get_next_sibling(dip); 3348 break; 3349 } 3350 3351 } 3352 3353 /* second pass */ 3354 while (head) { 3355 int circ; 3356 struct walk_elem *next = head->next; 3357 3358 if (do_locking) 3359 ndi_devi_enter(head->dip, &circ); 3360 if (walk_devs(ddi_get_child(head->dip), f, arg, do_locking) == 3361 DDI_WALK_TERMINATE) { 3362 if (do_locking) 3363 ndi_devi_exit(head->dip, circ); 3364 free_list(head); 3365 return (DDI_WALK_TERMINATE); 3366 } 3367 if (do_locking) 3368 ndi_devi_exit(head->dip, circ); 3369 kmem_free(head, sizeof (*head)); 3370 head = next; 3371 } 3372 3373 return (DDI_WALK_CONTINUE); 3374 } 3375 3376 /* 3377 * This general-purpose routine traverses the tree of dev_info nodes, 3378 * starting from the given node, and calls the given function for each 3379 * node that it finds with the current node and the pointer arg (which 3380 * can point to a structure of information that the function 3381 * needs) as arguments. 3382 * 3383 * It does the walk a layer at a time, not depth-first. The given function 3384 * must return one of the following values: 3385 * DDI_WALK_CONTINUE 3386 * DDI_WALK_PRUNESIB 3387 * DDI_WALK_PRUNECHILD 3388 * DDI_WALK_TERMINATE 3389 * 3390 * N.B. Since we walk the sibling list, the caller must ensure that 3391 * the parent of dip is held against changes, unless the parent 3392 * is rootnode. ndi_devi_enter() on the parent is sufficient. 3393 * 3394 * To avoid deadlock situations, caller must not attempt to 3395 * configure/unconfigure/remove device node in (*f)(), nor should 3396 * it attempt to recurse on other nodes in the system. Any 3397 * ndi_devi_enter() done by (*f)() must occur 'at-or-below' the 3398 * node entered prior to ddi_walk_devs(). Furthermore, if (*f)() 3399 * does any multi-threading (in framework *or* in driver) then the 3400 * ndi_devi_enter() calls done by dependent threads must be 3401 * 'strictly-below'. 3402 * 3403 * This is not callable from device autoconfiguration routines. 3404 * They include, but not limited to, _init(9e), _fini(9e), probe(9e), 3405 * attach(9e), and detach(9e). 3406 */ 3407 3408 void 3409 ddi_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg) 3410 { 3411 3412 ASSERT(dip == NULL || ddi_get_parent(dip) == NULL || 3413 DEVI_BUSY_OWNED(ddi_get_parent(dip))); 3414 3415 (void) walk_devs(dip, f, arg, 1); 3416 } 3417 3418 /* 3419 * This is a general-purpose routine traverses the per-driver list 3420 * and calls the given function for each node. must return one of 3421 * the following values: 3422 * DDI_WALK_CONTINUE 3423 * DDI_WALK_TERMINATE 3424 * 3425 * N.B. The same restrictions from ddi_walk_devs() apply. 3426 */ 3427 3428 void 3429 e_ddi_walk_driver(char *drv, int (*f)(dev_info_t *, void *), void *arg) 3430 { 3431 major_t major; 3432 struct devnames *dnp; 3433 dev_info_t *dip; 3434 3435 major = ddi_name_to_major(drv); 3436 if (major == (major_t)-1) 3437 return; 3438 3439 dnp = &devnamesp[major]; 3440 LOCK_DEV_OPS(&dnp->dn_lock); 3441 dip = dnp->dn_head; 3442 while (dip) { 3443 ndi_hold_devi(dip); 3444 UNLOCK_DEV_OPS(&dnp->dn_lock); 3445 if ((*f)(dip, arg) == DDI_WALK_TERMINATE) { 3446 ndi_rele_devi(dip); 3447 return; 3448 } 3449 LOCK_DEV_OPS(&dnp->dn_lock); 3450 ndi_rele_devi(dip); 3451 dip = ddi_get_next(dip); 3452 } 3453 UNLOCK_DEV_OPS(&dnp->dn_lock); 3454 } 3455 3456 /* 3457 * argument to i_find_devi, a devinfo node search callback function. 3458 */ 3459 struct match_info { 3460 dev_info_t *dip; /* result */ 3461 char *nodename; /* if non-null, nodename must match */ 3462 int instance; /* if != -1, instance must match */ 3463 int attached; /* if != 0, i_ddi_devi_attached() */ 3464 }; 3465 3466 static int 3467 i_find_devi(dev_info_t *dip, void *arg) 3468 { 3469 struct match_info *info = (struct match_info *)arg; 3470 3471 if (((info->nodename == NULL) || 3472 (strcmp(ddi_node_name(dip), info->nodename) == 0)) && 3473 ((info->instance == -1) || 3474 (ddi_get_instance(dip) == info->instance)) && 3475 ((info->attached == 0) || i_ddi_devi_attached(dip))) { 3476 info->dip = dip; 3477 ndi_hold_devi(dip); 3478 return (DDI_WALK_TERMINATE); 3479 } 3480 3481 return (DDI_WALK_CONTINUE); 3482 } 3483 3484 /* 3485 * Find dip with a known node name and instance and return with it held 3486 */ 3487 dev_info_t * 3488 ddi_find_devinfo(char *nodename, int instance, int attached) 3489 { 3490 struct match_info info; 3491 3492 info.nodename = nodename; 3493 info.instance = instance; 3494 info.attached = attached; 3495 info.dip = NULL; 3496 3497 ddi_walk_devs(ddi_root_node(), i_find_devi, &info); 3498 return (info.dip); 3499 } 3500 3501 /* 3502 * Parse for name, addr, and minor names. Some args may be NULL. 3503 */ 3504 void 3505 i_ddi_parse_name(char *name, char **nodename, char **addrname, char **minorname) 3506 { 3507 char *cp; 3508 static char nulladdrname[] = ""; 3509 3510 /* default values */ 3511 if (nodename) 3512 *nodename = name; 3513 if (addrname) 3514 *addrname = nulladdrname; 3515 if (minorname) 3516 *minorname = NULL; 3517 3518 cp = name; 3519 while (*cp != '\0') { 3520 if (addrname && *cp == '@') { 3521 *addrname = cp + 1; 3522 *cp = '\0'; 3523 } else if (minorname && *cp == ':') { 3524 *minorname = cp + 1; 3525 *cp = '\0'; 3526 } 3527 ++cp; 3528 } 3529 } 3530 3531 static char * 3532 child_path_to_driver(dev_info_t *parent, char *child_name, char *unit_address) 3533 { 3534 char *p, *drvname = NULL; 3535 major_t maj; 3536 3537 /* 3538 * Construct the pathname and ask the implementation 3539 * if it can do a driver = f(pathname) for us, if not 3540 * we'll just default to using the node-name that 3541 * was given to us. We want to do this first to 3542 * allow the platform to use 'generic' names for 3543 * legacy device drivers. 3544 */ 3545 p = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 3546 (void) ddi_pathname(parent, p); 3547 (void) strcat(p, "/"); 3548 (void) strcat(p, child_name); 3549 if (unit_address && *unit_address) { 3550 (void) strcat(p, "@"); 3551 (void) strcat(p, unit_address); 3552 } 3553 3554 /* 3555 * Get the binding. If there is none, return the child_name 3556 * and let the caller deal with it. 3557 */ 3558 maj = path_to_major(p); 3559 3560 kmem_free(p, MAXPATHLEN); 3561 3562 if (maj != (major_t)-1) 3563 drvname = ddi_major_to_name(maj); 3564 if (drvname == NULL) 3565 drvname = child_name; 3566 3567 return (drvname); 3568 } 3569 3570 3571 /* 3572 * Given the pathname of a device, fill in the dev_info_t value and/or the 3573 * dev_t value and/or the spectype, depending on which parameters are non-NULL. 3574 * If there is an error, this function returns -1. 3575 * 3576 * NOTE: If this function returns the dev_info_t structure, then it 3577 * does so with a hold on the devi. Caller should ensure that they get 3578 * decremented via ddi_release_devi() or ndi_rele_devi(); 3579 * 3580 * This function can be invoked in the boot case for a pathname without 3581 * device argument (:xxxx), traditionally treated as a minor name. 3582 * In this case, we do the following 3583 * (1) search the minor node of type DDM_DEFAULT. 3584 * (2) if no DDM_DEFAULT minor exists, then the first non-alias minor is chosen. 3585 * (3) if neither exists, a dev_t is faked with minor number = instance. 3586 * As of S9 FCS, no instance of #1 exists. #2 is used by several platforms 3587 * to default the boot partition to :a possibly by other OBP definitions. 3588 * #3 is used for booting off network interfaces, most SPARC network 3589 * drivers support Style-2 only, so only DDM_ALIAS minor exists. 3590 * 3591 * It is possible for OBP to present device args at the end of the path as 3592 * well as in the middle. For example, with IB the following strings are 3593 * valid boot paths. 3594 * a /pci@8,700000/ib@1,2:port=1,pkey=ff,dhcp,... 3595 * b /pci@8,700000/ib@1,1:port=1/ioc@xxxxxx,yyyyyyy:dhcp 3596 * Case (a), we first look for minor node "port=1,pkey...". 3597 * Failing that, we will pass "port=1,pkey..." to the bus_config 3598 * entry point of ib (HCA) driver. 3599 * Case (b), configure ib@1,1 as usual. Then invoke ib's bus_config 3600 * with argument "ioc@xxxxxxx,yyyyyyy:port=1". After configuring 3601 * the ioc, look for minor node dhcp. If not found, pass ":dhcp" 3602 * to ioc's bus_config entry point. 3603 */ 3604 int 3605 resolve_pathname(char *pathname, 3606 dev_info_t **dipp, dev_t *devtp, int *spectypep) 3607 { 3608 int error; 3609 dev_info_t *parent, *child; 3610 struct pathname pn; 3611 char *component, *config_name; 3612 char *minorname = NULL; 3613 char *prev_minor = NULL; 3614 dev_t devt = NODEV; 3615 int spectype; 3616 struct ddi_minor_data *dmn; 3617 3618 if (*pathname != '/') 3619 return (EINVAL); 3620 parent = ddi_root_node(); /* Begin at the top of the tree */ 3621 3622 if (error = pn_get(pathname, UIO_SYSSPACE, &pn)) 3623 return (error); 3624 pn_skipslash(&pn); 3625 3626 ASSERT(i_ddi_devi_attached(parent)); 3627 ndi_hold_devi(parent); 3628 3629 component = kmem_alloc(MAXNAMELEN, KM_SLEEP); 3630 config_name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 3631 3632 while (pn_pathleft(&pn)) { 3633 /* remember prev minor (:xxx) in the middle of path */ 3634 if (minorname) 3635 prev_minor = i_ddi_strdup(minorname, KM_SLEEP); 3636 3637 /* Get component and chop off minorname */ 3638 (void) pn_getcomponent(&pn, component); 3639 i_ddi_parse_name(component, NULL, NULL, &minorname); 3640 3641 if (prev_minor == NULL) { 3642 (void) snprintf(config_name, MAXNAMELEN, "%s", 3643 component); 3644 } else { 3645 (void) snprintf(config_name, MAXNAMELEN, "%s:%s", 3646 component, prev_minor); 3647 kmem_free(prev_minor, strlen(prev_minor) + 1); 3648 prev_minor = NULL; 3649 } 3650 3651 /* 3652 * Find and configure the child 3653 */ 3654 if (ndi_devi_config_one(parent, config_name, &child, 3655 NDI_PROMNAME | NDI_NO_EVENT) != NDI_SUCCESS) { 3656 ndi_rele_devi(parent); 3657 pn_free(&pn); 3658 kmem_free(component, MAXNAMELEN); 3659 kmem_free(config_name, MAXNAMELEN); 3660 return (-1); 3661 } 3662 3663 ASSERT(i_ddi_devi_attached(child)); 3664 ndi_rele_devi(parent); 3665 parent = child; 3666 pn_skipslash(&pn); 3667 } 3668 3669 /* 3670 * First look for a minor node matching minorname. 3671 * Failing that, try to pass minorname to bus_config(). 3672 */ 3673 if (minorname && i_ddi_minorname_to_devtspectype(parent, 3674 minorname, &devt, &spectype) == DDI_FAILURE) { 3675 (void) snprintf(config_name, MAXNAMELEN, "%s", minorname); 3676 if (ndi_devi_config_obp_args(parent, 3677 config_name, &child, 0) != NDI_SUCCESS) { 3678 ndi_rele_devi(parent); 3679 pn_free(&pn); 3680 kmem_free(component, MAXNAMELEN); 3681 kmem_free(config_name, MAXNAMELEN); 3682 NDI_CONFIG_DEBUG((CE_NOTE, 3683 "%s: minor node not found\n", pathname)); 3684 return (-1); 3685 } 3686 minorname = NULL; /* look for default minor */ 3687 ASSERT(i_ddi_devi_attached(child)); 3688 ndi_rele_devi(parent); 3689 parent = child; 3690 } 3691 3692 if (devtp || spectypep) { 3693 if (minorname == NULL) { 3694 /* search for a default entry */ 3695 mutex_enter(&(DEVI(parent)->devi_lock)); 3696 for (dmn = DEVI(parent)->devi_minor; dmn; 3697 dmn = dmn->next) { 3698 if (dmn->type == DDM_DEFAULT) { 3699 devt = dmn->ddm_dev; 3700 spectype = dmn->ddm_spec_type; 3701 break; 3702 } 3703 } 3704 3705 if (devt == NODEV) { 3706 /* 3707 * No default minor node, try the first one; 3708 * else, assume 1-1 instance-minor mapping 3709 */ 3710 dmn = DEVI(parent)->devi_minor; 3711 if (dmn && ((dmn->type == DDM_MINOR) || 3712 (dmn->type == DDM_INTERNAL_PATH))) { 3713 devt = dmn->ddm_dev; 3714 spectype = dmn->ddm_spec_type; 3715 } else { 3716 devt = makedevice( 3717 DEVI(parent)->devi_major, 3718 ddi_get_instance(parent)); 3719 spectype = S_IFCHR; 3720 } 3721 } 3722 mutex_exit(&(DEVI(parent)->devi_lock)); 3723 } 3724 if (devtp) 3725 *devtp = devt; 3726 if (spectypep) 3727 *spectypep = spectype; 3728 } 3729 3730 pn_free(&pn); 3731 kmem_free(component, MAXNAMELEN); 3732 kmem_free(config_name, MAXNAMELEN); 3733 3734 /* 3735 * If there is no error, return the appropriate parameters 3736 */ 3737 if (dipp != NULL) 3738 *dipp = parent; 3739 else { 3740 /* 3741 * We should really keep the ref count to keep the node from 3742 * detaching but ddi_pathname_to_dev_t() specifies a NULL dipp, 3743 * so we have no way of passing back the held dip. Not holding 3744 * the dip allows detaches to occur - which can cause problems 3745 * for subsystems which call ddi_pathname_to_dev_t (console). 3746 * 3747 * Instead of holding the dip, we place a ddi-no-autodetach 3748 * property on the node to prevent auto detaching. 3749 * 3750 * The right fix is to remove ddi_pathname_to_dev_t and replace 3751 * it, and all references, with a call that specifies a dipp. 3752 * In addition, the callers of this new interfaces would then 3753 * need to call ndi_rele_devi when the reference is complete. 3754 */ 3755 (void) ddi_prop_update_int(DDI_DEV_T_NONE, parent, 3756 DDI_NO_AUTODETACH, 1); 3757 ndi_rele_devi(parent); 3758 } 3759 3760 return (0); 3761 } 3762 3763 /* 3764 * Given the pathname of a device, return the dev_t of the corresponding 3765 * device. Returns NODEV on failure. 3766 * 3767 * Note that this call sets the DDI_NO_AUTODETACH property on the devinfo node. 3768 */ 3769 dev_t 3770 ddi_pathname_to_dev_t(char *pathname) 3771 { 3772 dev_t devt; 3773 int error; 3774 3775 error = resolve_pathname(pathname, NULL, &devt, NULL); 3776 3777 return (error ? NODEV : devt); 3778 } 3779 3780 /* 3781 * Translate a prom pathname to kernel devfs pathname. 3782 * Caller is assumed to allocate devfspath memory of 3783 * size at least MAXPATHLEN 3784 * 3785 * The prom pathname may not include minor name, but 3786 * devfs pathname has a minor name portion. 3787 */ 3788 int 3789 i_ddi_prompath_to_devfspath(char *prompath, char *devfspath) 3790 { 3791 dev_t devt = (dev_t)NODEV; 3792 dev_info_t *dip = NULL; 3793 char *minor_name = NULL; 3794 int spectype; 3795 int error; 3796 3797 error = resolve_pathname(prompath, &dip, &devt, &spectype); 3798 if (error) 3799 return (DDI_FAILURE); 3800 ASSERT(dip && devt != NODEV); 3801 3802 /* 3803 * Get in-kernel devfs pathname 3804 */ 3805 (void) ddi_pathname(dip, devfspath); 3806 3807 mutex_enter(&(DEVI(dip)->devi_lock)); 3808 minor_name = i_ddi_devtspectype_to_minorname(dip, devt, spectype); 3809 if (minor_name) { 3810 (void) strcat(devfspath, ":"); 3811 (void) strcat(devfspath, minor_name); 3812 } else { 3813 /* 3814 * If minor_name is NULL, we have an alias minor node. 3815 * So manufacture a path to the corresponding clone minor. 3816 */ 3817 (void) snprintf(devfspath, MAXPATHLEN, "%s:%s", 3818 CLONE_PATH, ddi_driver_name(dip)); 3819 } 3820 mutex_exit(&(DEVI(dip)->devi_lock)); 3821 3822 /* release hold from resolve_pathname() */ 3823 ndi_rele_devi(dip); 3824 return (0); 3825 } 3826 3827 /* 3828 * Reset all the pure leaf drivers on the system at halt time 3829 */ 3830 static int 3831 reset_leaf_device(dev_info_t *dip, void *arg) 3832 { 3833 _NOTE(ARGUNUSED(arg)) 3834 struct dev_ops *ops; 3835 3836 /* if the device doesn't need to be reset then there's nothing to do */ 3837 if (!DEVI_NEED_RESET(dip)) 3838 return (DDI_WALK_CONTINUE); 3839 3840 /* 3841 * if the device isn't a char/block device or doesn't have a 3842 * reset entry point then there's nothing to do. 3843 */ 3844 ops = ddi_get_driver(dip); 3845 if ((ops == NULL) || (ops->devo_cb_ops == NULL) || 3846 (ops->devo_reset == nodev) || (ops->devo_reset == nulldev) || 3847 (ops->devo_reset == NULL)) 3848 return (DDI_WALK_CONTINUE); 3849 3850 if (DEVI_IS_ATTACHING(dip) || DEVI_IS_DETACHING(dip)) { 3851 static char path[MAXPATHLEN]; 3852 3853 /* 3854 * bad news, this device has blocked in it's attach or 3855 * detach routine, which means it not safe to call it's 3856 * devo_reset() entry point. 3857 */ 3858 cmn_err(CE_WARN, "unable to reset device: %s", 3859 ddi_pathname(dip, path)); 3860 return (DDI_WALK_CONTINUE); 3861 } 3862 3863 NDI_CONFIG_DEBUG((CE_NOTE, "resetting %s%d\n", 3864 ddi_driver_name(dip), ddi_get_instance(dip))); 3865 3866 (void) devi_reset(dip, DDI_RESET_FORCE); 3867 return (DDI_WALK_CONTINUE); 3868 } 3869 3870 void 3871 reset_leaves(void) 3872 { 3873 /* 3874 * if we're reached here, the device tree better not be changing. 3875 * so either devinfo_freeze better be set or we better be panicing. 3876 */ 3877 ASSERT(devinfo_freeze || panicstr); 3878 3879 (void) walk_devs(top_devinfo, reset_leaf_device, NULL, 0); 3880 } 3881 3882 /* 3883 * devtree_freeze() must be called before reset_leaves() during a 3884 * normal system shutdown. It attempts to ensure that there are no 3885 * outstanding attach or detach operations in progress when reset_leaves() 3886 * is invoked. It must be called before the system becomes single-threaded 3887 * because device attach and detach are multi-threaded operations. (note 3888 * that during system shutdown the system doesn't actually become 3889 * single-thread since other threads still exist, but the shutdown thread 3890 * will disable preemption for itself, raise it's pil, and stop all the 3891 * other cpus in the system there by effectively making the system 3892 * single-threaded.) 3893 */ 3894 void 3895 devtree_freeze(void) 3896 { 3897 int delayed = 0; 3898 3899 /* if we're panicing then the device tree isn't going to be changing */ 3900 if (panicstr) 3901 return; 3902 3903 /* stop all dev_info state changes in the device tree */ 3904 devinfo_freeze = gethrtime(); 3905 3906 /* 3907 * if we're not panicing and there are on-going attach or detach 3908 * operations, wait for up to 3 seconds for them to finish. This 3909 * is a randomly chosen interval but this should be ok because: 3910 * - 3 seconds is very small relative to the deadman timer. 3911 * - normal attach and detach operations should be very quick. 3912 * - attach and detach operations are fairly rare. 3913 */ 3914 while (!panicstr && atomic_add_long_nv(&devinfo_attach_detach, 0) && 3915 (delayed < 3)) { 3916 delayed += 1; 3917 3918 /* do a sleeping wait for one second */ 3919 ASSERT(!servicing_interrupt()); 3920 delay(drv_usectohz(MICROSEC)); 3921 } 3922 } 3923 3924 static int 3925 bind_dip(dev_info_t *dip, void *arg) 3926 { 3927 _NOTE(ARGUNUSED(arg)) 3928 char *path; 3929 major_t major, pmajor; 3930 3931 /* 3932 * If the node is currently bound to the wrong driver, try to unbind 3933 * so that we can rebind to the correct driver. 3934 */ 3935 if (i_ddi_node_state(dip) >= DS_BOUND) { 3936 major = ddi_compatible_driver_major(dip, NULL); 3937 if ((DEVI(dip)->devi_major == major) && 3938 (i_ddi_node_state(dip) >= DS_INITIALIZED)) { 3939 /* 3940 * Check for a path-oriented driver alias that 3941 * takes precedence over current driver binding. 3942 */ 3943 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3944 (void) ddi_pathname(dip, path); 3945 pmajor = ddi_name_to_major(path); 3946 if ((pmajor != (major_t)-1) && 3947 !(devnamesp[pmajor].dn_flags & DN_DRIVER_REMOVED)) 3948 major = pmajor; 3949 kmem_free(path, MAXPATHLEN); 3950 } 3951 3952 /* attempt unbind if current driver is incorrect */ 3953 if ((major != (major_t)-1) && 3954 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED) && 3955 (major != DEVI(dip)->devi_major)) 3956 (void) ndi_devi_unbind_driver(dip); 3957 } 3958 3959 /* If unbound, try to bind to a driver */ 3960 if (i_ddi_node_state(dip) < DS_BOUND) 3961 (void) ndi_devi_bind_driver(dip, 0); 3962 3963 return (DDI_WALK_CONTINUE); 3964 } 3965 3966 void 3967 i_ddi_bind_devs(void) 3968 { 3969 /* flush devfs so that ndi_devi_unbind_driver will work when possible */ 3970 (void) devfs_clean(top_devinfo, NULL, 0); 3971 3972 ddi_walk_devs(top_devinfo, bind_dip, (void *)NULL); 3973 } 3974 3975 static int 3976 unbind_children(dev_info_t *dip, void *arg) 3977 { 3978 int circ; 3979 dev_info_t *cdip; 3980 major_t major = (major_t)(uintptr_t)arg; 3981 3982 ndi_devi_enter(dip, &circ); 3983 cdip = ddi_get_child(dip); 3984 /* 3985 * We are called either from rem_drv or update_drv. 3986 * In both cases, we unbind persistent nodes and destroy 3987 * .conf nodes. In the case of rem_drv, this will be the 3988 * final state. In the case of update_drv, i_ddi_bind_devs() 3989 * will be invoked later to reenumerate (new) driver.conf 3990 * rebind persistent nodes. 3991 */ 3992 while (cdip) { 3993 dev_info_t *next = ddi_get_next_sibling(cdip); 3994 if ((i_ddi_node_state(cdip) > DS_INITIALIZED) || 3995 (ddi_driver_major(cdip) != major)) { 3996 cdip = next; 3997 continue; 3998 } 3999 (void) ndi_devi_unbind_driver(cdip); 4000 if (ndi_dev_is_persistent_node(cdip) == 0) 4001 (void) ddi_remove_child(cdip, 0); 4002 cdip = next; 4003 } 4004 ndi_devi_exit(dip, circ); 4005 4006 return (DDI_WALK_CONTINUE); 4007 } 4008 4009 void 4010 i_ddi_unbind_devs(major_t major) 4011 { 4012 ddi_walk_devs(top_devinfo, unbind_children, (void *)(uintptr_t)major); 4013 } 4014 4015 /* 4016 * I/O Hotplug control 4017 */ 4018 4019 /* 4020 * create and attach a dev_info node from a .conf file spec 4021 */ 4022 static void 4023 init_spec_child(dev_info_t *pdip, struct hwc_spec *specp, uint_t flags) 4024 { 4025 _NOTE(ARGUNUSED(flags)) 4026 dev_info_t *dip; 4027 char *node_name; 4028 4029 if (((node_name = specp->hwc_devi_name) == NULL) || 4030 (ddi_name_to_major(node_name) == (major_t)-1)) { 4031 char *tmp = node_name; 4032 if (tmp == NULL) 4033 tmp = "<none>"; 4034 cmn_err(CE_CONT, 4035 "init_spec_child: parent=%s, bad spec (%s)\n", 4036 ddi_node_name(pdip), tmp); 4037 return; 4038 } 4039 4040 dip = i_ddi_alloc_node(pdip, node_name, (pnode_t)DEVI_PSEUDO_NODEID, 4041 -1, specp->hwc_devi_sys_prop_ptr, KM_SLEEP); 4042 4043 if (dip == NULL) 4044 return; 4045 4046 if (ddi_initchild(pdip, dip) != DDI_SUCCESS) 4047 (void) ddi_remove_child(dip, 0); 4048 } 4049 4050 /* 4051 * Lookup hwc specs from hash tables and make children from the spec 4052 * Because some .conf children are "merge" nodes, we also initialize 4053 * .conf children to merge properties onto hardware nodes. 4054 * 4055 * The pdip must be held busy. 4056 */ 4057 int 4058 i_ndi_make_spec_children(dev_info_t *pdip, uint_t flags) 4059 { 4060 extern struct hwc_spec *hwc_get_child_spec(dev_info_t *, major_t); 4061 int circ; 4062 struct hwc_spec *list, *spec; 4063 4064 ndi_devi_enter(pdip, &circ); 4065 if (DEVI(pdip)->devi_flags & DEVI_MADE_CHILDREN) { 4066 ndi_devi_exit(pdip, circ); 4067 return (DDI_SUCCESS); 4068 } 4069 4070 list = hwc_get_child_spec(pdip, (major_t)-1); 4071 for (spec = list; spec != NULL; spec = spec->hwc_next) { 4072 init_spec_child(pdip, spec, flags); 4073 } 4074 hwc_free_spec_list(list); 4075 4076 mutex_enter(&DEVI(pdip)->devi_lock); 4077 DEVI(pdip)->devi_flags |= DEVI_MADE_CHILDREN; 4078 mutex_exit(&DEVI(pdip)->devi_lock); 4079 ndi_devi_exit(pdip, circ); 4080 return (DDI_SUCCESS); 4081 } 4082 4083 /* 4084 * Run initchild on all child nodes such that instance assignment 4085 * for multiport network cards are contiguous. 4086 * 4087 * The pdip must be held busy. 4088 */ 4089 static void 4090 i_ndi_init_hw_children(dev_info_t *pdip, uint_t flags) 4091 { 4092 dev_info_t *dip; 4093 4094 ASSERT(DEVI(pdip)->devi_flags & DEVI_MADE_CHILDREN); 4095 4096 /* contiguous instance assignment */ 4097 e_ddi_enter_instance(); 4098 dip = ddi_get_child(pdip); 4099 while (dip) { 4100 if (ndi_dev_is_persistent_node(dip)) 4101 (void) i_ndi_config_node(dip, DS_INITIALIZED, flags); 4102 dip = ddi_get_next_sibling(dip); 4103 } 4104 e_ddi_exit_instance(); 4105 } 4106 4107 /* 4108 * report device status 4109 */ 4110 static void 4111 i_ndi_devi_report_status_change(dev_info_t *dip, char *path) 4112 { 4113 char *status; 4114 4115 if (!DEVI_NEED_REPORT(dip) || 4116 (i_ddi_node_state(dip) < DS_INITIALIZED)) { 4117 return; 4118 } 4119 4120 if (DEVI_IS_DEVICE_OFFLINE(dip)) { 4121 status = "offline"; 4122 } else if (DEVI_IS_DEVICE_DOWN(dip)) { 4123 status = "down"; 4124 } else if (DEVI_IS_BUS_QUIESCED(dip)) { 4125 status = "quiesced"; 4126 } else if (DEVI_IS_BUS_DOWN(dip)) { 4127 status = "down"; 4128 } else if (i_ddi_devi_attached(dip)) { 4129 status = "online"; 4130 } else { 4131 status = "unknown"; 4132 } 4133 4134 if (path == NULL) { 4135 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4136 cmn_err(CE_CONT, "?%s (%s%d) %s\n", 4137 ddi_pathname(dip, path), ddi_driver_name(dip), 4138 ddi_get_instance(dip), status); 4139 kmem_free(path, MAXPATHLEN); 4140 } else { 4141 cmn_err(CE_CONT, "?%s (%s%d) %s\n", 4142 path, ddi_driver_name(dip), 4143 ddi_get_instance(dip), status); 4144 } 4145 4146 mutex_enter(&(DEVI(dip)->devi_lock)); 4147 DEVI_REPORT_DONE(dip); 4148 mutex_exit(&(DEVI(dip)->devi_lock)); 4149 } 4150 4151 /* 4152 * log a notification that a dev_info node has been configured. 4153 */ 4154 static int 4155 i_log_devfs_add_devinfo(dev_info_t *dip, uint_t flags) 4156 { 4157 int se_err; 4158 char *pathname; 4159 sysevent_t *ev; 4160 sysevent_id_t eid; 4161 sysevent_value_t se_val; 4162 sysevent_attr_list_t *ev_attr_list = NULL; 4163 char *class_name; 4164 int no_transport = 0; 4165 4166 ASSERT(dip); 4167 4168 /* 4169 * Invalidate the devinfo snapshot cache 4170 */ 4171 i_ddi_di_cache_invalidate(KM_SLEEP); 4172 4173 /* do not generate ESC_DEVFS_DEVI_ADD event during boot */ 4174 if (!i_ddi_io_initialized()) 4175 return (DDI_SUCCESS); 4176 4177 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_DEVI_ADD, EP_DDI, SE_SLEEP); 4178 4179 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4180 4181 (void) ddi_pathname(dip, pathname); 4182 ASSERT(strlen(pathname)); 4183 4184 se_val.value_type = SE_DATA_TYPE_STRING; 4185 se_val.value.sv_string = pathname; 4186 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 4187 &se_val, SE_SLEEP) != 0) { 4188 goto fail; 4189 } 4190 4191 /* add the device class attribute */ 4192 if ((class_name = i_ddi_devi_class(dip)) != NULL) { 4193 se_val.value_type = SE_DATA_TYPE_STRING; 4194 se_val.value.sv_string = class_name; 4195 4196 if (sysevent_add_attr(&ev_attr_list, 4197 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 4198 sysevent_free_attr(ev_attr_list); 4199 goto fail; 4200 } 4201 } 4202 4203 /* 4204 * must log a branch event too unless NDI_BRANCH_EVENT_OP is set, 4205 * in which case the branch event will be logged by the caller 4206 * after the entire branch has been configured. 4207 */ 4208 if ((flags & NDI_BRANCH_EVENT_OP) == 0) { 4209 /* 4210 * Instead of logging a separate branch event just add 4211 * DEVFS_BRANCH_EVENT attribute. It indicates devfsadmd to 4212 * generate a EC_DEV_BRANCH event. 4213 */ 4214 se_val.value_type = SE_DATA_TYPE_INT32; 4215 se_val.value.sv_int32 = 1; 4216 if (sysevent_add_attr(&ev_attr_list, 4217 DEVFS_BRANCH_EVENT, &se_val, SE_SLEEP) != 0) { 4218 sysevent_free_attr(ev_attr_list); 4219 goto fail; 4220 } 4221 } 4222 4223 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 4224 sysevent_free_attr(ev_attr_list); 4225 goto fail; 4226 } 4227 4228 if ((se_err = log_sysevent(ev, SE_SLEEP, &eid)) != 0) { 4229 if (se_err == SE_NO_TRANSPORT) 4230 no_transport = 1; 4231 goto fail; 4232 } 4233 4234 sysevent_free(ev); 4235 kmem_free(pathname, MAXPATHLEN); 4236 4237 return (DDI_SUCCESS); 4238 4239 fail: 4240 cmn_err(CE_WARN, "failed to log ESC_DEVFS_DEVI_ADD event for %s%s", 4241 pathname, (no_transport) ? " (syseventd not responding)" : ""); 4242 4243 cmn_err(CE_WARN, "/dev may not be current for driver %s. " 4244 "Run devfsadm -i %s", 4245 ddi_driver_name(dip), ddi_driver_name(dip)); 4246 4247 sysevent_free(ev); 4248 kmem_free(pathname, MAXPATHLEN); 4249 return (DDI_SUCCESS); 4250 } 4251 4252 /* 4253 * log a notification that a dev_info node has been unconfigured. 4254 */ 4255 static int 4256 i_log_devfs_remove_devinfo(char *pathname, char *class_name, char *driver_name, 4257 int instance, uint_t flags) 4258 { 4259 sysevent_t *ev; 4260 sysevent_id_t eid; 4261 sysevent_value_t se_val; 4262 sysevent_attr_list_t *ev_attr_list = NULL; 4263 int se_err; 4264 int no_transport = 0; 4265 4266 i_ddi_di_cache_invalidate(KM_SLEEP); 4267 4268 if (!i_ddi_io_initialized()) 4269 return (DDI_SUCCESS); 4270 4271 ev = sysevent_alloc(EC_DEVFS, ESC_DEVFS_DEVI_REMOVE, EP_DDI, SE_SLEEP); 4272 4273 se_val.value_type = SE_DATA_TYPE_STRING; 4274 se_val.value.sv_string = pathname; 4275 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 4276 &se_val, SE_SLEEP) != 0) { 4277 goto fail; 4278 } 4279 4280 if (class_name) { 4281 /* add the device class, driver name and instance attributes */ 4282 4283 se_val.value_type = SE_DATA_TYPE_STRING; 4284 se_val.value.sv_string = class_name; 4285 if (sysevent_add_attr(&ev_attr_list, 4286 DEVFS_DEVI_CLASS, &se_val, SE_SLEEP) != 0) { 4287 sysevent_free_attr(ev_attr_list); 4288 goto fail; 4289 } 4290 4291 se_val.value_type = SE_DATA_TYPE_STRING; 4292 se_val.value.sv_string = driver_name; 4293 if (sysevent_add_attr(&ev_attr_list, 4294 DEVFS_DRIVER_NAME, &se_val, SE_SLEEP) != 0) { 4295 sysevent_free_attr(ev_attr_list); 4296 goto fail; 4297 } 4298 4299 se_val.value_type = SE_DATA_TYPE_INT32; 4300 se_val.value.sv_int32 = instance; 4301 if (sysevent_add_attr(&ev_attr_list, 4302 DEVFS_INSTANCE, &se_val, SE_SLEEP) != 0) { 4303 sysevent_free_attr(ev_attr_list); 4304 goto fail; 4305 } 4306 } 4307 4308 /* 4309 * must log a branch event too unless NDI_BRANCH_EVENT_OP is set, 4310 * in which case the branch event will be logged by the caller 4311 * after the entire branch has been unconfigured. 4312 */ 4313 if ((flags & NDI_BRANCH_EVENT_OP) == 0) { 4314 /* 4315 * Instead of logging a separate branch event just add 4316 * DEVFS_BRANCH_EVENT attribute. It indicates devfsadmd to 4317 * generate a EC_DEV_BRANCH event. 4318 */ 4319 se_val.value_type = SE_DATA_TYPE_INT32; 4320 se_val.value.sv_int32 = 1; 4321 if (sysevent_add_attr(&ev_attr_list, 4322 DEVFS_BRANCH_EVENT, &se_val, SE_SLEEP) != 0) { 4323 sysevent_free_attr(ev_attr_list); 4324 goto fail; 4325 } 4326 } 4327 4328 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 4329 sysevent_free_attr(ev_attr_list); 4330 goto fail; 4331 } 4332 4333 if ((se_err = log_sysevent(ev, SE_SLEEP, &eid)) != 0) { 4334 if (se_err == SE_NO_TRANSPORT) 4335 no_transport = 1; 4336 goto fail; 4337 } 4338 4339 sysevent_free(ev); 4340 return (DDI_SUCCESS); 4341 4342 fail: 4343 sysevent_free(ev); 4344 cmn_err(CE_WARN, "failed to log ESC_DEVFS_DEVI_REMOVE event for %s%s", 4345 pathname, (no_transport) ? " (syseventd not responding)" : ""); 4346 return (DDI_SUCCESS); 4347 } 4348 4349 /* 4350 * log an event that a dev_info branch has been configured or unconfigured. 4351 */ 4352 static int 4353 i_log_devfs_branch(char *node_path, char *subclass) 4354 { 4355 int se_err; 4356 sysevent_t *ev; 4357 sysevent_id_t eid; 4358 sysevent_value_t se_val; 4359 sysevent_attr_list_t *ev_attr_list = NULL; 4360 int no_transport = 0; 4361 4362 /* do not generate the event during boot */ 4363 if (!i_ddi_io_initialized()) 4364 return (DDI_SUCCESS); 4365 4366 ev = sysevent_alloc(EC_DEVFS, subclass, EP_DDI, SE_SLEEP); 4367 4368 se_val.value_type = SE_DATA_TYPE_STRING; 4369 se_val.value.sv_string = node_path; 4370 4371 if (sysevent_add_attr(&ev_attr_list, DEVFS_PATHNAME, 4372 &se_val, SE_SLEEP) != 0) { 4373 goto fail; 4374 } 4375 4376 if (sysevent_attach_attributes(ev, ev_attr_list) != 0) { 4377 sysevent_free_attr(ev_attr_list); 4378 goto fail; 4379 } 4380 4381 if ((se_err = log_sysevent(ev, SE_SLEEP, &eid)) != 0) { 4382 if (se_err == SE_NO_TRANSPORT) 4383 no_transport = 1; 4384 goto fail; 4385 } 4386 4387 sysevent_free(ev); 4388 return (DDI_SUCCESS); 4389 4390 fail: 4391 cmn_err(CE_WARN, "failed to log %s branch event for %s%s", 4392 subclass, node_path, 4393 (no_transport) ? " (syseventd not responding)" : ""); 4394 4395 sysevent_free(ev); 4396 return (DDI_FAILURE); 4397 } 4398 4399 /* 4400 * log an event that a dev_info tree branch has been configured. 4401 */ 4402 static int 4403 i_log_devfs_branch_add(dev_info_t *dip) 4404 { 4405 char *node_path; 4406 int rv; 4407 4408 node_path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4409 (void) ddi_pathname(dip, node_path); 4410 rv = i_log_devfs_branch(node_path, ESC_DEVFS_BRANCH_ADD); 4411 kmem_free(node_path, MAXPATHLEN); 4412 4413 return (rv); 4414 } 4415 4416 /* 4417 * log an event that a dev_info tree branch has been unconfigured. 4418 */ 4419 static int 4420 i_log_devfs_branch_remove(char *node_path) 4421 { 4422 return (i_log_devfs_branch(node_path, ESC_DEVFS_BRANCH_REMOVE)); 4423 } 4424 4425 /* 4426 * enqueue the dip's deviname on the branch event queue. 4427 */ 4428 static struct brevq_node * 4429 brevq_enqueue(struct brevq_node **brevqp, dev_info_t *dip, 4430 struct brevq_node *child) 4431 { 4432 struct brevq_node *brn; 4433 char *deviname; 4434 4435 deviname = kmem_alloc(MAXNAMELEN, KM_SLEEP); 4436 (void) ddi_deviname(dip, deviname); 4437 4438 brn = kmem_zalloc(sizeof (*brn), KM_SLEEP); 4439 brn->brn_deviname = i_ddi_strdup(deviname, KM_SLEEP); 4440 kmem_free(deviname, MAXNAMELEN); 4441 brn->brn_child = child; 4442 brn->brn_sibling = *brevqp; 4443 *brevqp = brn; 4444 4445 return (brn); 4446 } 4447 4448 /* 4449 * free the memory allocated for the elements on the branch event queue. 4450 */ 4451 static void 4452 free_brevq(struct brevq_node *brevq) 4453 { 4454 struct brevq_node *brn, *next_brn; 4455 4456 for (brn = brevq; brn != NULL; brn = next_brn) { 4457 next_brn = brn->brn_sibling; 4458 ASSERT(brn->brn_child == NULL); 4459 kmem_free(brn->brn_deviname, strlen(brn->brn_deviname) + 1); 4460 kmem_free(brn, sizeof (*brn)); 4461 } 4462 } 4463 4464 /* 4465 * log the events queued up on the branch event queue and free the 4466 * associated memory. 4467 * 4468 * node_path must have been allocated with at least MAXPATHLEN bytes. 4469 */ 4470 static void 4471 log_and_free_brevq(char *node_path, struct brevq_node *brevq) 4472 { 4473 struct brevq_node *brn; 4474 char *p; 4475 4476 p = node_path + strlen(node_path); 4477 for (brn = brevq; brn != NULL; brn = brn->brn_sibling) { 4478 (void) strcpy(p, brn->brn_deviname); 4479 (void) i_log_devfs_branch_remove(node_path); 4480 } 4481 *p = '\0'; 4482 4483 free_brevq(brevq); 4484 } 4485 4486 /* 4487 * log the events queued up on the branch event queue and free the 4488 * associated memory. Same as the previous function but operates on dip. 4489 */ 4490 static void 4491 log_and_free_brevq_dip(dev_info_t *dip, struct brevq_node *brevq) 4492 { 4493 char *path; 4494 4495 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4496 (void) ddi_pathname(dip, path); 4497 log_and_free_brevq(path, brevq); 4498 kmem_free(path, MAXPATHLEN); 4499 } 4500 4501 /* 4502 * log the outstanding branch remove events for the grand children of the dip 4503 * and free the associated memory. 4504 */ 4505 static void 4506 log_and_free_br_events_on_grand_children(dev_info_t *dip, 4507 struct brevq_node *brevq) 4508 { 4509 struct brevq_node *brn; 4510 char *path; 4511 char *p; 4512 4513 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4514 (void) ddi_pathname(dip, path); 4515 p = path + strlen(path); 4516 for (brn = brevq; brn != NULL; brn = brn->brn_sibling) { 4517 if (brn->brn_child) { 4518 (void) strcpy(p, brn->brn_deviname); 4519 /* now path contains the node path to the dip's child */ 4520 log_and_free_brevq(path, brn->brn_child); 4521 brn->brn_child = NULL; 4522 } 4523 } 4524 kmem_free(path, MAXPATHLEN); 4525 } 4526 4527 /* 4528 * log and cleanup branch remove events for the grand children of the dip. 4529 */ 4530 static void 4531 cleanup_br_events_on_grand_children(dev_info_t *dip, struct brevq_node **brevqp) 4532 { 4533 dev_info_t *child; 4534 struct brevq_node *brevq, *brn, *prev_brn, *next_brn; 4535 char *path; 4536 int circ; 4537 4538 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4539 prev_brn = NULL; 4540 brevq = *brevqp; 4541 4542 ndi_devi_enter(dip, &circ); 4543 for (brn = brevq; brn != NULL; brn = next_brn) { 4544 next_brn = brn->brn_sibling; 4545 for (child = ddi_get_child(dip); child != NULL; 4546 child = ddi_get_next_sibling(child)) { 4547 if (i_ddi_node_state(child) >= DS_INITIALIZED) { 4548 (void) ddi_deviname(child, path); 4549 if (strcmp(path, brn->brn_deviname) == 0) 4550 break; 4551 } 4552 } 4553 4554 if (child != NULL && !(DEVI_EVREMOVE(child))) { 4555 /* 4556 * Event state is not REMOVE. So branch remove event 4557 * is not going be generated on brn->brn_child. 4558 * If any branch remove events were queued up on 4559 * brn->brn_child log them and remove the brn 4560 * from the queue. 4561 */ 4562 if (brn->brn_child) { 4563 (void) ddi_pathname(dip, path); 4564 (void) strcat(path, brn->brn_deviname); 4565 log_and_free_brevq(path, brn->brn_child); 4566 } 4567 4568 if (prev_brn) 4569 prev_brn->brn_sibling = next_brn; 4570 else 4571 *brevqp = next_brn; 4572 4573 kmem_free(brn->brn_deviname, 4574 strlen(brn->brn_deviname) + 1); 4575 kmem_free(brn, sizeof (*brn)); 4576 } else { 4577 /* 4578 * Free up the outstanding branch remove events 4579 * queued on brn->brn_child since brn->brn_child 4580 * itself is eligible for branch remove event. 4581 */ 4582 if (brn->brn_child) { 4583 free_brevq(brn->brn_child); 4584 brn->brn_child = NULL; 4585 } 4586 prev_brn = brn; 4587 } 4588 } 4589 4590 ndi_devi_exit(dip, circ); 4591 kmem_free(path, MAXPATHLEN); 4592 } 4593 4594 static int 4595 need_remove_event(dev_info_t *dip, int flags) 4596 { 4597 if ((flags & (NDI_NO_EVENT | NDI_AUTODETACH)) == 0 && 4598 (flags & (NDI_DEVI_OFFLINE | NDI_UNCONFIG | NDI_DEVI_REMOVE)) && 4599 !(DEVI_EVREMOVE(dip))) 4600 return (1); 4601 else 4602 return (0); 4603 } 4604 4605 /* 4606 * Unconfigure children/descendants of the dip. 4607 * 4608 * If the operation involves a branch event NDI_BRANCH_EVENT_OP is set 4609 * through out the unconfiguration. On successful return *brevqp is set to 4610 * a queue of dip's child devinames for which branch remove events need 4611 * to be generated. 4612 */ 4613 static int 4614 devi_unconfig_branch(dev_info_t *dip, dev_info_t **dipp, int flags, 4615 struct brevq_node **brevqp) 4616 { 4617 int rval; 4618 4619 *brevqp = NULL; 4620 4621 if ((!(flags & NDI_BRANCH_EVENT_OP)) && need_remove_event(dip, flags)) 4622 flags |= NDI_BRANCH_EVENT_OP; 4623 4624 if (flags & NDI_BRANCH_EVENT_OP) { 4625 rval = devi_unconfig_common(dip, dipp, flags, (major_t)-1, 4626 brevqp); 4627 4628 if (rval != NDI_SUCCESS && (*brevqp)) { 4629 log_and_free_brevq_dip(dip, *brevqp); 4630 *brevqp = NULL; 4631 } 4632 } else 4633 rval = devi_unconfig_common(dip, dipp, flags, (major_t)-1, 4634 NULL); 4635 4636 return (rval); 4637 } 4638 4639 /* 4640 * If the dip is already bound to a driver transition to DS_INITIALIZED 4641 * in order to generate an event in the case where the node was left in 4642 * DS_BOUND state since boot (never got attached) and the node is now 4643 * being offlined. 4644 */ 4645 static void 4646 init_bound_node_ev(dev_info_t *pdip, dev_info_t *dip, int flags) 4647 { 4648 if (need_remove_event(dip, flags) && 4649 i_ddi_node_state(dip) == DS_BOUND && 4650 i_ddi_devi_attached(pdip) && !DEVI_IS_DEVICE_OFFLINE(dip)) 4651 (void) ddi_initchild(pdip, dip); 4652 } 4653 4654 /* 4655 * attach a node/branch with parent already held busy 4656 */ 4657 static int 4658 devi_attach_node(dev_info_t *dip, uint_t flags) 4659 { 4660 dev_info_t *pdip = ddi_get_parent(dip); 4661 4662 ASSERT(pdip && DEVI_BUSY_OWNED(pdip)); 4663 4664 mutex_enter(&(DEVI(dip)->devi_lock)); 4665 if (flags & NDI_DEVI_ONLINE) { 4666 if (!i_ddi_devi_attached(dip)) 4667 DEVI_SET_REPORT(dip); 4668 DEVI_SET_DEVICE_ONLINE(dip); 4669 } 4670 if (DEVI_IS_DEVICE_OFFLINE(dip)) { 4671 mutex_exit(&(DEVI(dip)->devi_lock)); 4672 return (NDI_FAILURE); 4673 } 4674 mutex_exit(&(DEVI(dip)->devi_lock)); 4675 4676 if (i_ddi_attachchild(dip) != DDI_SUCCESS) { 4677 mutex_enter(&(DEVI(dip)->devi_lock)); 4678 DEVI_SET_EVUNINIT(dip); 4679 mutex_exit(&(DEVI(dip)->devi_lock)); 4680 4681 if (ndi_dev_is_persistent_node(dip)) 4682 (void) ddi_uninitchild(dip); 4683 else { 4684 /* 4685 * Delete .conf nodes and nodes that are not 4686 * well formed. 4687 */ 4688 (void) ddi_remove_child(dip, 0); 4689 } 4690 return (NDI_FAILURE); 4691 } 4692 4693 i_ndi_devi_report_status_change(dip, NULL); 4694 4695 /* 4696 * log an event, but not during devfs lookups in which case 4697 * NDI_NO_EVENT is set. 4698 */ 4699 if ((flags & NDI_NO_EVENT) == 0 && !(DEVI_EVADD(dip))) { 4700 (void) i_log_devfs_add_devinfo(dip, flags); 4701 4702 mutex_enter(&(DEVI(dip)->devi_lock)); 4703 DEVI_SET_EVADD(dip); 4704 mutex_exit(&(DEVI(dip)->devi_lock)); 4705 } else if (!(flags & NDI_NO_EVENT_STATE_CHNG)) { 4706 mutex_enter(&(DEVI(dip)->devi_lock)); 4707 DEVI_SET_EVADD(dip); 4708 mutex_exit(&(DEVI(dip)->devi_lock)); 4709 } 4710 4711 return (NDI_SUCCESS); 4712 } 4713 4714 /* internal function to config immediate children */ 4715 static int 4716 config_immediate_children(dev_info_t *pdip, uint_t flags, major_t major) 4717 { 4718 dev_info_t *child, *next; 4719 int circ; 4720 4721 ASSERT(i_ddi_devi_attached(pdip)); 4722 4723 if (!NEXUS_DRV(ddi_get_driver(pdip))) 4724 return (NDI_SUCCESS); 4725 4726 NDI_CONFIG_DEBUG((CE_CONT, 4727 "config_immediate_children: %s%d (%p), flags=%x\n", 4728 ddi_driver_name(pdip), ddi_get_instance(pdip), 4729 (void *)pdip, flags)); 4730 4731 ndi_devi_enter(pdip, &circ); 4732 4733 if (flags & NDI_CONFIG_REPROBE) { 4734 mutex_enter(&DEVI(pdip)->devi_lock); 4735 DEVI(pdip)->devi_flags &= ~DEVI_MADE_CHILDREN; 4736 mutex_exit(&DEVI(pdip)->devi_lock); 4737 } 4738 (void) i_ndi_make_spec_children(pdip, flags); 4739 i_ndi_init_hw_children(pdip, flags); 4740 4741 child = ddi_get_child(pdip); 4742 while (child) { 4743 /* NOTE: devi_attach_node() may remove the dip */ 4744 next = ddi_get_next_sibling(child); 4745 4746 /* 4747 * Configure all nexus nodes or leaf nodes with 4748 * matching driver major 4749 */ 4750 if ((major == (major_t)-1) || 4751 (major == ddi_driver_major(child)) || 4752 ((flags & NDI_CONFIG) && (is_leaf_node(child) == 0))) 4753 (void) devi_attach_node(child, flags); 4754 child = next; 4755 } 4756 4757 ndi_devi_exit(pdip, circ); 4758 4759 return (NDI_SUCCESS); 4760 } 4761 4762 /* internal function to config grand children */ 4763 static int 4764 config_grand_children(dev_info_t *pdip, uint_t flags, major_t major) 4765 { 4766 struct mt_config_handle *hdl; 4767 4768 /* multi-threaded configuration of child nexus */ 4769 hdl = mt_config_init(pdip, NULL, flags, major, MT_CONFIG_OP, NULL); 4770 mt_config_children(hdl); 4771 4772 return (mt_config_fini(hdl)); /* wait for threads to exit */ 4773 } 4774 4775 /* 4776 * Common function for device tree configuration, 4777 * either BUS_CONFIG_ALL or BUS_CONFIG_DRIVER. 4778 * The NDI_CONFIG flag causes recursive configuration of 4779 * grandchildren, devfs usage should not recurse. 4780 */ 4781 static int 4782 devi_config_common(dev_info_t *dip, int flags, major_t major) 4783 { 4784 int error; 4785 int (*f)(); 4786 4787 if (!i_ddi_devi_attached(dip)) 4788 return (NDI_FAILURE); 4789 4790 if (pm_pre_config(dip, NULL) != DDI_SUCCESS) 4791 return (NDI_FAILURE); 4792 4793 if ((DEVI(dip)->devi_ops->devo_bus_ops == NULL) || 4794 (DEVI(dip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 4795 (f = DEVI(dip)->devi_ops->devo_bus_ops->bus_config) == NULL) { 4796 error = config_immediate_children(dip, flags, major); 4797 } else { 4798 /* call bus_config entry point */ 4799 ddi_bus_config_op_t bus_op = (major == (major_t)-1) ? 4800 BUS_CONFIG_ALL : BUS_CONFIG_DRIVER; 4801 error = (*f)(dip, 4802 flags, bus_op, (void *)(uintptr_t)major, NULL, 0); 4803 } 4804 4805 if (error) { 4806 pm_post_config(dip, NULL); 4807 return (error); 4808 } 4809 4810 /* 4811 * Some callers, notably SCSI, need to mark the devfs cache 4812 * to be rebuilt together with the config operation. 4813 */ 4814 if (flags & NDI_DEVFS_CLEAN) 4815 (void) devfs_clean(dip, NULL, 0); 4816 4817 if (flags & NDI_CONFIG) 4818 (void) config_grand_children(dip, flags, major); 4819 4820 pm_post_config(dip, NULL); 4821 4822 return (NDI_SUCCESS); 4823 } 4824 4825 /* 4826 * Framework entry point for BUS_CONFIG_ALL 4827 */ 4828 int 4829 ndi_devi_config(dev_info_t *dip, int flags) 4830 { 4831 NDI_CONFIG_DEBUG((CE_CONT, 4832 "ndi_devi_config: par = %s%d (%p), flags = 0x%x\n", 4833 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 4834 4835 return (devi_config_common(dip, flags, (major_t)-1)); 4836 } 4837 4838 /* 4839 * Framework entry point for BUS_CONFIG_DRIVER, bound to major 4840 */ 4841 int 4842 ndi_devi_config_driver(dev_info_t *dip, int flags, major_t major) 4843 { 4844 /* don't abuse this function */ 4845 ASSERT(major != (major_t)-1); 4846 4847 NDI_CONFIG_DEBUG((CE_CONT, 4848 "ndi_devi_config_driver: par = %s%d (%p), flags = 0x%x\n", 4849 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 4850 4851 return (devi_config_common(dip, flags, major)); 4852 } 4853 4854 /* 4855 * Called by nexus drivers to configure its children. 4856 */ 4857 static int 4858 devi_config_one(dev_info_t *pdip, char *devnm, dev_info_t **cdipp, 4859 uint_t flags, clock_t timeout) 4860 { 4861 dev_info_t *vdip = NULL; 4862 char *drivername = NULL; 4863 int find_by_addr = 0; 4864 char *name, *addr; 4865 int v_circ, p_circ; 4866 clock_t end_time; /* 60 sec */ 4867 int probed; 4868 dev_info_t *cdip; 4869 mdi_pathinfo_t *cpip; 4870 4871 *cdipp = NULL; 4872 4873 if (!NEXUS_DRV(ddi_get_driver(pdip))) 4874 return (NDI_FAILURE); 4875 4876 /* split name into "name@addr" parts */ 4877 i_ddi_parse_name(devnm, &name, &addr, NULL); 4878 4879 /* 4880 * If the nexus is a pHCI and we are not processing a pHCI from 4881 * mdi bus_config code then we need to know the vHCI. 4882 */ 4883 if (MDI_PHCI(pdip)) 4884 vdip = mdi_devi_get_vdip(pdip); 4885 4886 /* 4887 * We may have a genericname on a system that creates drivername 4888 * nodes (from .conf files). Find the drivername by nodeid. If we 4889 * can't find a node with devnm as the node name then we search by 4890 * drivername. This allows an implementation to supply a genericly 4891 * named boot path (disk) and locate drivename nodes (sd). 4892 */ 4893 if (flags & NDI_PROMNAME) { 4894 drivername = child_path_to_driver(pdip, name, addr); 4895 find_by_addr = 1; 4896 } 4897 4898 /* 4899 * Determine end_time: This routine should *not* be called with a 4900 * constant non-zero timeout argument, the caller should be adjusting 4901 * the timeout argument relative to when it *started* its asynchronous 4902 * enumeration. 4903 */ 4904 if (timeout > 0) 4905 end_time = ddi_get_lbolt() + timeout; 4906 4907 for (;;) { 4908 /* 4909 * For pHCI, enter (vHCI, pHCI) and search for pathinfo/client 4910 * child - break out of for(;;) loop if child found. 4911 * NOTE: Lock order for ndi_devi_enter is (vHCI, pHCI). 4912 */ 4913 if (vdip) { 4914 /* use mdi_devi_enter ordering */ 4915 ndi_devi_enter(vdip, &v_circ); 4916 ndi_devi_enter(pdip, &p_circ); 4917 cpip = mdi_pi_find(pdip, NULL, addr); 4918 cdip = mdi_pi_get_client(cpip); 4919 if (cdip) 4920 break; 4921 } else 4922 ndi_devi_enter(pdip, &p_circ); 4923 4924 /* 4925 * When not a vHCI or not all pHCI devices are required to 4926 * enumerated under the vHCI (NDI_MDI_FALLBACK) search for 4927 * devinfo child. 4928 */ 4929 if ((vdip == NULL) || (flags & NDI_MDI_FALLBACK)) { 4930 /* determine if .conf nodes already built */ 4931 probed = (DEVI(pdip)->devi_flags & DEVI_MADE_CHILDREN); 4932 4933 /* 4934 * Search for child by name, if not found then search 4935 * for a node bound to the drivername driver with the 4936 * specified "@addr". Break out of for(;;) loop if 4937 * child found. To support path-oriented aliases 4938 * binding on boot-device, we do a search_by_addr too. 4939 */ 4940 again: (void) i_ndi_make_spec_children(pdip, flags); 4941 cdip = find_child_by_name(pdip, name, addr); 4942 if ((cdip == NULL) && drivername) 4943 cdip = find_child_by_driver(pdip, 4944 drivername, addr); 4945 if ((cdip == NULL) && find_by_addr) 4946 cdip = find_child_by_addr(pdip, addr); 4947 if (cdip) 4948 break; 4949 4950 /* 4951 * determine if we should reenumerate .conf nodes 4952 * and look for child again. 4953 */ 4954 if (probed && 4955 i_ddi_io_initialized() && 4956 (flags & NDI_CONFIG_REPROBE) && 4957 ((timeout <= 0) || (ddi_get_lbolt() >= end_time))) { 4958 probed = 0; 4959 mutex_enter(&DEVI(pdip)->devi_lock); 4960 DEVI(pdip)->devi_flags &= ~DEVI_MADE_CHILDREN; 4961 mutex_exit(&DEVI(pdip)->devi_lock); 4962 goto again; 4963 } 4964 } 4965 4966 /* break out of for(;;) if time expired */ 4967 if ((timeout <= 0) || (ddi_get_lbolt() >= end_time)) 4968 break; 4969 4970 /* 4971 * Child not found, exit and wait for asynchronous enumeration 4972 * to add child (or timeout). The addition of a new child (vhci 4973 * or phci) requires the asynchronous enumeration thread to 4974 * ndi_devi_enter/ndi_devi_exit. This exit will signal devi_cv 4975 * and cause us to return from ndi_devi_exit_and_wait, after 4976 * which we loop and search for the requested child again. 4977 */ 4978 NDI_DEBUG(flags, (CE_CONT, 4979 "%s%d: waiting for child %s@%s, timeout %ld", 4980 ddi_driver_name(pdip), ddi_get_instance(pdip), 4981 name, addr, timeout)); 4982 if (vdip) { 4983 /* 4984 * Mark vHCI for pHCI ndi_devi_exit broadcast. 4985 */ 4986 mutex_enter(&DEVI(vdip)->devi_lock); 4987 DEVI(vdip)->devi_flags |= 4988 DEVI_PHCI_SIGNALS_VHCI; 4989 mutex_exit(&DEVI(vdip)->devi_lock); 4990 ndi_devi_exit(pdip, p_circ); 4991 4992 /* 4993 * NB: There is a small race window from above 4994 * ndi_devi_exit() of pdip to cv_wait() in 4995 * ndi_devi_exit_and_wait() which can result in 4996 * not immediately finding a new pHCI child 4997 * of a pHCI that uses NDI_MDI_FAILBACK. 4998 */ 4999 ndi_devi_exit_and_wait(vdip, v_circ, end_time); 5000 } else { 5001 ndi_devi_exit_and_wait(pdip, p_circ, end_time); 5002 } 5003 } 5004 5005 /* done with paddr, fixup i_ddi_parse_name '@'->'\0' change */ 5006 if (addr && *addr != '\0') 5007 *(addr - 1) = '@'; 5008 5009 /* attach and hold the child, returning pointer to child */ 5010 if (cdip && (devi_attach_node(cdip, flags) == NDI_SUCCESS)) { 5011 ndi_hold_devi(cdip); 5012 *cdipp = cdip; 5013 } 5014 5015 ndi_devi_exit(pdip, p_circ); 5016 if (vdip) 5017 ndi_devi_exit(vdip, v_circ); 5018 return (*cdipp ? NDI_SUCCESS : NDI_FAILURE); 5019 } 5020 5021 /* 5022 * Enumerate and attach a child specified by name 'devnm'. 5023 * Called by devfs lookup and DR to perform a BUS_CONFIG_ONE. 5024 * Note: devfs does not make use of NDI_CONFIG to configure 5025 * an entire branch. 5026 */ 5027 int 5028 ndi_devi_config_one(dev_info_t *dip, char *devnm, dev_info_t **dipp, int flags) 5029 { 5030 int error; 5031 int (*f)(); 5032 int branch_event = 0; 5033 5034 ASSERT(dipp); 5035 ASSERT(i_ddi_devi_attached(dip)); 5036 5037 NDI_CONFIG_DEBUG((CE_CONT, 5038 "ndi_devi_config_one: par = %s%d (%p), child = %s\n", 5039 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, devnm)); 5040 5041 if (pm_pre_config(dip, devnm) != DDI_SUCCESS) 5042 return (NDI_FAILURE); 5043 5044 if ((flags & (NDI_NO_EVENT | NDI_BRANCH_EVENT_OP)) == 0 && 5045 (flags & NDI_CONFIG)) { 5046 flags |= NDI_BRANCH_EVENT_OP; 5047 branch_event = 1; 5048 } 5049 5050 if ((DEVI(dip)->devi_ops->devo_bus_ops == NULL) || 5051 (DEVI(dip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 5052 (f = DEVI(dip)->devi_ops->devo_bus_ops->bus_config) == NULL) { 5053 error = devi_config_one(dip, devnm, dipp, flags, 0); 5054 } else { 5055 /* call bus_config entry point */ 5056 error = (*f)(dip, flags, BUS_CONFIG_ONE, (void *)devnm, dipp); 5057 } 5058 5059 if (error || (flags & NDI_CONFIG) == 0) { 5060 pm_post_config(dip, devnm); 5061 return (error); 5062 } 5063 5064 /* 5065 * DR usage (i.e. call with NDI_CONFIG) recursively configures 5066 * grandchildren, performing a BUS_CONFIG_ALL from the node attached 5067 * by the BUS_CONFIG_ONE. 5068 */ 5069 ASSERT(*dipp); 5070 5071 error = devi_config_common(*dipp, flags, (major_t)-1); 5072 5073 pm_post_config(dip, devnm); 5074 5075 if (branch_event) 5076 (void) i_log_devfs_branch_add(*dipp); 5077 5078 return (error); 5079 } 5080 5081 5082 /* 5083 * Enumerate and attach a child specified by name 'devnm'. 5084 * Called during configure the OBP options. This configures 5085 * only one node. 5086 */ 5087 static int 5088 ndi_devi_config_obp_args(dev_info_t *parent, char *devnm, 5089 dev_info_t **childp, int flags) 5090 { 5091 int error; 5092 int (*f)(); 5093 5094 ASSERT(childp); 5095 ASSERT(i_ddi_devi_attached(parent)); 5096 5097 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_config_obp_args: " 5098 "par = %s%d (%p), child = %s\n", ddi_driver_name(parent), 5099 ddi_get_instance(parent), (void *)parent, devnm)); 5100 5101 if ((DEVI(parent)->devi_ops->devo_bus_ops == NULL) || 5102 (DEVI(parent)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 5103 (f = DEVI(parent)->devi_ops->devo_bus_ops->bus_config) == NULL) { 5104 error = NDI_FAILURE; 5105 } else { 5106 /* call bus_config entry point */ 5107 error = (*f)(parent, flags, 5108 BUS_CONFIG_OBP_ARGS, (void *)devnm, childp); 5109 } 5110 return (error); 5111 } 5112 5113 5114 /* 5115 * detach a node with parent already held busy 5116 */ 5117 static int 5118 devi_detach_node(dev_info_t *dip, uint_t flags) 5119 { 5120 dev_info_t *pdip = ddi_get_parent(dip); 5121 int ret = NDI_SUCCESS; 5122 ddi_eventcookie_t cookie; 5123 5124 ASSERT(pdip && DEVI_BUSY_OWNED(pdip)); 5125 5126 if (flags & NDI_POST_EVENT) { 5127 if (i_ddi_devi_attached(pdip)) { 5128 if (ddi_get_eventcookie(dip, DDI_DEVI_REMOVE_EVENT, 5129 &cookie) == NDI_SUCCESS) 5130 (void) ndi_post_event(dip, dip, cookie, NULL); 5131 } 5132 } 5133 5134 if (i_ddi_detachchild(dip, flags) != DDI_SUCCESS) 5135 return (NDI_FAILURE); 5136 5137 if (flags & NDI_AUTODETACH) 5138 return (NDI_SUCCESS); 5139 5140 /* 5141 * For DR, even bound nodes may need to have offline 5142 * flag set. 5143 */ 5144 if (flags & NDI_DEVI_OFFLINE) { 5145 mutex_enter(&(DEVI(dip)->devi_lock)); 5146 DEVI_SET_DEVICE_OFFLINE(dip); 5147 mutex_exit(&(DEVI(dip)->devi_lock)); 5148 } 5149 5150 if (i_ddi_node_state(dip) == DS_INITIALIZED) { 5151 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 5152 (void) ddi_pathname(dip, path); 5153 if (flags & NDI_DEVI_OFFLINE) 5154 i_ndi_devi_report_status_change(dip, path); 5155 5156 if (need_remove_event(dip, flags)) { 5157 (void) i_log_devfs_remove_devinfo(path, 5158 i_ddi_devi_class(dip), 5159 (char *)ddi_driver_name(dip), 5160 ddi_get_instance(dip), 5161 flags); 5162 mutex_enter(&(DEVI(dip)->devi_lock)); 5163 DEVI_SET_EVREMOVE(dip); 5164 mutex_exit(&(DEVI(dip)->devi_lock)); 5165 } 5166 kmem_free(path, MAXPATHLEN); 5167 } 5168 5169 if (flags & (NDI_UNCONFIG | NDI_DEVI_REMOVE)) { 5170 ret = ddi_uninitchild(dip); 5171 if (ret == NDI_SUCCESS) { 5172 /* 5173 * Remove uninitialized pseudo nodes because 5174 * system props are lost and the node cannot be 5175 * reattached. 5176 */ 5177 if (!ndi_dev_is_persistent_node(dip)) 5178 flags |= NDI_DEVI_REMOVE; 5179 5180 if (flags & NDI_DEVI_REMOVE) 5181 ret = ddi_remove_child(dip, 0); 5182 } 5183 } 5184 5185 return (ret); 5186 } 5187 5188 /* 5189 * unconfigure immediate children of bus nexus device 5190 */ 5191 static int 5192 unconfig_immediate_children( 5193 dev_info_t *dip, 5194 dev_info_t **dipp, 5195 int flags, 5196 major_t major) 5197 { 5198 int rv = NDI_SUCCESS; 5199 int circ, vcirc; 5200 dev_info_t *child; 5201 dev_info_t *vdip = NULL; 5202 dev_info_t *next; 5203 5204 ASSERT(dipp == NULL || *dipp == NULL); 5205 5206 /* 5207 * Scan forward to see if we will be processing a pHCI child. If we 5208 * have a child that is a pHCI and vHCI and pHCI are not siblings then 5209 * enter vHCI before parent(pHCI) to prevent deadlock with mpxio 5210 * Client power management operations. 5211 */ 5212 ndi_devi_enter(dip, &circ); 5213 for (child = ddi_get_child(dip); child; 5214 child = ddi_get_next_sibling(child)) { 5215 /* skip same nodes we skip below */ 5216 if (((major != (major_t)-1) && 5217 (major != ddi_driver_major(child))) || 5218 ((flags & NDI_AUTODETACH) && !is_leaf_node(child))) 5219 continue; 5220 5221 if (MDI_PHCI(child)) { 5222 vdip = mdi_devi_get_vdip(child); 5223 /* 5224 * If vHCI and vHCI is not a sibling of pHCI 5225 * then enter in (vHCI, parent(pHCI)) order. 5226 */ 5227 if (vdip && (ddi_get_parent(vdip) != dip)) { 5228 ndi_devi_exit(dip, circ); 5229 5230 /* use mdi_devi_enter ordering */ 5231 ndi_devi_enter(vdip, &vcirc); 5232 ndi_devi_enter(dip, &circ); 5233 break; 5234 } else 5235 vdip = NULL; 5236 } 5237 } 5238 5239 child = ddi_get_child(dip); 5240 while (child) { 5241 next = ddi_get_next_sibling(child); 5242 5243 if ((major != (major_t)-1) && 5244 (major != ddi_driver_major(child))) { 5245 child = next; 5246 continue; 5247 } 5248 5249 /* skip nexus nodes during autodetach */ 5250 if ((flags & NDI_AUTODETACH) && !is_leaf_node(child)) { 5251 child = next; 5252 continue; 5253 } 5254 5255 if (devi_detach_node(child, flags) != NDI_SUCCESS) { 5256 if (dipp && *dipp == NULL) { 5257 ndi_hold_devi(child); 5258 *dipp = child; 5259 } 5260 rv = NDI_FAILURE; 5261 } 5262 5263 /* 5264 * Continue upon failure--best effort algorithm 5265 */ 5266 child = next; 5267 } 5268 5269 ndi_devi_exit(dip, circ); 5270 if (vdip) 5271 ndi_devi_exit(vdip, vcirc); 5272 5273 return (rv); 5274 } 5275 5276 /* 5277 * unconfigure grand children of bus nexus device 5278 */ 5279 static int 5280 unconfig_grand_children( 5281 dev_info_t *dip, 5282 dev_info_t **dipp, 5283 int flags, 5284 major_t major, 5285 struct brevq_node **brevqp) 5286 { 5287 struct mt_config_handle *hdl; 5288 5289 if (brevqp) 5290 *brevqp = NULL; 5291 5292 /* multi-threaded configuration of child nexus */ 5293 hdl = mt_config_init(dip, dipp, flags, major, MT_UNCONFIG_OP, brevqp); 5294 mt_config_children(hdl); 5295 5296 return (mt_config_fini(hdl)); /* wait for threads to exit */ 5297 } 5298 5299 /* 5300 * Unconfigure children/descendants of the dip. 5301 * 5302 * If brevqp is not NULL, on return *brevqp is set to a queue of dip's 5303 * child devinames for which branch remove events need to be generated. 5304 */ 5305 static int 5306 devi_unconfig_common( 5307 dev_info_t *dip, 5308 dev_info_t **dipp, 5309 int flags, 5310 major_t major, 5311 struct brevq_node **brevqp) 5312 { 5313 int rv; 5314 int pm_cookie; 5315 int (*f)(); 5316 ddi_bus_config_op_t bus_op; 5317 5318 if (dipp) 5319 *dipp = NULL; 5320 if (brevqp) 5321 *brevqp = NULL; 5322 5323 /* 5324 * Power up the dip if it is powered off. If the flag bit 5325 * NDI_AUTODETACH is set and the dip is not at its full power, 5326 * skip the rest of the branch. 5327 */ 5328 if (pm_pre_unconfig(dip, flags, &pm_cookie, NULL) != DDI_SUCCESS) 5329 return ((flags & NDI_AUTODETACH) ? NDI_SUCCESS : 5330 NDI_FAILURE); 5331 5332 /* 5333 * Some callers, notably SCSI, need to clear out the devfs 5334 * cache together with the unconfig to prevent stale entries. 5335 */ 5336 if (flags & NDI_DEVFS_CLEAN) 5337 (void) devfs_clean(dip, NULL, 0); 5338 5339 rv = unconfig_grand_children(dip, dipp, flags, major, brevqp); 5340 5341 if ((rv != NDI_SUCCESS) && ((flags & NDI_AUTODETACH) == 0)) { 5342 if (brevqp && *brevqp) { 5343 log_and_free_br_events_on_grand_children(dip, *brevqp); 5344 free_brevq(*brevqp); 5345 *brevqp = NULL; 5346 } 5347 pm_post_unconfig(dip, pm_cookie, NULL); 5348 return (rv); 5349 } 5350 5351 if (dipp && *dipp) { 5352 ndi_rele_devi(*dipp); 5353 *dipp = NULL; 5354 } 5355 5356 /* 5357 * It is possible to have a detached nexus with children 5358 * and grandchildren (for example: a branch consisting 5359 * entirely of bound nodes.) Since the nexus is detached 5360 * the bus_unconfig entry point cannot be used to remove 5361 * or unconfigure the descendants. 5362 */ 5363 if (!i_ddi_devi_attached(dip) || 5364 (DEVI(dip)->devi_ops->devo_bus_ops == NULL) || 5365 (DEVI(dip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 5366 (f = DEVI(dip)->devi_ops->devo_bus_ops->bus_unconfig) == NULL) { 5367 rv = unconfig_immediate_children(dip, dipp, flags, major); 5368 } else { 5369 /* 5370 * call bus_unconfig entry point 5371 * It should reset nexus flags if unconfigure succeeds. 5372 */ 5373 bus_op = (major == (major_t)-1) ? 5374 BUS_UNCONFIG_ALL : BUS_UNCONFIG_DRIVER; 5375 rv = (*f)(dip, flags, bus_op, (void *)(uintptr_t)major); 5376 } 5377 5378 pm_post_unconfig(dip, pm_cookie, NULL); 5379 5380 if (brevqp && *brevqp) 5381 cleanup_br_events_on_grand_children(dip, brevqp); 5382 5383 return (rv); 5384 } 5385 5386 /* 5387 * called by devfs/framework to unconfigure children bound to major 5388 * If NDI_AUTODETACH is specified, this is invoked by either the 5389 * moduninstall daemon or the modunload -i 0 command. 5390 */ 5391 int 5392 ndi_devi_unconfig_driver(dev_info_t *dip, int flags, major_t major) 5393 { 5394 NDI_CONFIG_DEBUG((CE_CONT, 5395 "ndi_devi_unconfig_driver: par = %s%d (%p), flags = 0x%x\n", 5396 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 5397 5398 return (devi_unconfig_common(dip, NULL, flags, major, NULL)); 5399 } 5400 5401 int 5402 ndi_devi_unconfig(dev_info_t *dip, int flags) 5403 { 5404 NDI_CONFIG_DEBUG((CE_CONT, 5405 "ndi_devi_unconfig: par = %s%d (%p), flags = 0x%x\n", 5406 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 5407 5408 return (devi_unconfig_common(dip, NULL, flags, (major_t)-1, NULL)); 5409 } 5410 5411 int 5412 e_ddi_devi_unconfig(dev_info_t *dip, dev_info_t **dipp, int flags) 5413 { 5414 NDI_CONFIG_DEBUG((CE_CONT, 5415 "e_ddi_devi_unconfig: par = %s%d (%p), flags = 0x%x\n", 5416 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip, flags)); 5417 5418 return (devi_unconfig_common(dip, dipp, flags, (major_t)-1, NULL)); 5419 } 5420 5421 /* 5422 * Unconfigure child by name 5423 */ 5424 static int 5425 devi_unconfig_one(dev_info_t *pdip, char *devnm, int flags) 5426 { 5427 int rv, circ; 5428 dev_info_t *child; 5429 dev_info_t *vdip = NULL; 5430 int v_circ; 5431 5432 ndi_devi_enter(pdip, &circ); 5433 child = ndi_devi_findchild(pdip, devnm); 5434 5435 /* 5436 * If child is pHCI and vHCI and pHCI are not siblings then enter vHCI 5437 * before parent(pHCI) to avoid deadlock with mpxio Client power 5438 * management operations. 5439 */ 5440 if (child && MDI_PHCI(child)) { 5441 vdip = mdi_devi_get_vdip(child); 5442 if (vdip && (ddi_get_parent(vdip) != pdip)) { 5443 ndi_devi_exit(pdip, circ); 5444 5445 /* use mdi_devi_enter ordering */ 5446 ndi_devi_enter(vdip, &v_circ); 5447 ndi_devi_enter(pdip, &circ); 5448 child = ndi_devi_findchild(pdip, devnm); 5449 } else 5450 vdip = NULL; 5451 } 5452 5453 if (child) { 5454 rv = devi_detach_node(child, flags); 5455 } else { 5456 NDI_CONFIG_DEBUG((CE_CONT, 5457 "devi_unconfig_one: %s not found\n", devnm)); 5458 rv = NDI_SUCCESS; 5459 } 5460 5461 ndi_devi_exit(pdip, circ); 5462 if (vdip) 5463 ndi_devi_exit(pdip, v_circ); 5464 5465 return (rv); 5466 } 5467 5468 int 5469 ndi_devi_unconfig_one( 5470 dev_info_t *pdip, 5471 char *devnm, 5472 dev_info_t **dipp, 5473 int flags) 5474 { 5475 int (*f)(); 5476 int circ, rv; 5477 int pm_cookie; 5478 dev_info_t *child; 5479 dev_info_t *vdip = NULL; 5480 int v_circ; 5481 struct brevq_node *brevq = NULL; 5482 5483 ASSERT(i_ddi_devi_attached(pdip)); 5484 5485 NDI_CONFIG_DEBUG((CE_CONT, 5486 "ndi_devi_unconfig_one: par = %s%d (%p), child = %s\n", 5487 ddi_driver_name(pdip), ddi_get_instance(pdip), 5488 (void *)pdip, devnm)); 5489 5490 if (pm_pre_unconfig(pdip, flags, &pm_cookie, devnm) != DDI_SUCCESS) 5491 return (NDI_FAILURE); 5492 5493 if (dipp) 5494 *dipp = NULL; 5495 5496 ndi_devi_enter(pdip, &circ); 5497 child = ndi_devi_findchild(pdip, devnm); 5498 5499 /* 5500 * If child is pHCI and vHCI and pHCI are not siblings then enter vHCI 5501 * before parent(pHCI) to avoid deadlock with mpxio Client power 5502 * management operations. 5503 */ 5504 if (child && MDI_PHCI(child)) { 5505 vdip = mdi_devi_get_vdip(child); 5506 if (vdip && (ddi_get_parent(vdip) != pdip)) { 5507 ndi_devi_exit(pdip, circ); 5508 5509 /* use mdi_devi_enter ordering */ 5510 ndi_devi_enter(vdip, &v_circ); 5511 ndi_devi_enter(pdip, &circ); 5512 child = ndi_devi_findchild(pdip, devnm); 5513 } else 5514 vdip = NULL; 5515 } 5516 5517 if (child == NULL) { 5518 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_unconfig_one: %s" 5519 " not found\n", devnm)); 5520 rv = NDI_SUCCESS; 5521 goto out; 5522 } 5523 5524 /* 5525 * Unconfigure children/descendants of named child 5526 */ 5527 rv = devi_unconfig_branch(child, dipp, flags | NDI_UNCONFIG, &brevq); 5528 if (rv != NDI_SUCCESS) 5529 goto out; 5530 5531 init_bound_node_ev(pdip, child, flags); 5532 5533 if ((DEVI(pdip)->devi_ops->devo_bus_ops == NULL) || 5534 (DEVI(pdip)->devi_ops->devo_bus_ops->busops_rev < BUSO_REV_5) || 5535 (f = DEVI(pdip)->devi_ops->devo_bus_ops->bus_unconfig) == NULL) { 5536 rv = devi_detach_node(child, flags); 5537 } else { 5538 /* call bus_config entry point */ 5539 rv = (*f)(pdip, flags, BUS_UNCONFIG_ONE, (void *)devnm); 5540 } 5541 5542 if (brevq) { 5543 if (rv != NDI_SUCCESS) 5544 log_and_free_brevq_dip(child, brevq); 5545 else 5546 free_brevq(brevq); 5547 } 5548 5549 if (dipp && rv != NDI_SUCCESS) { 5550 ndi_hold_devi(child); 5551 ASSERT(*dipp == NULL); 5552 *dipp = child; 5553 } 5554 5555 out: 5556 ndi_devi_exit(pdip, circ); 5557 if (vdip) 5558 ndi_devi_exit(pdip, v_circ); 5559 5560 pm_post_unconfig(pdip, pm_cookie, devnm); 5561 5562 return (rv); 5563 } 5564 5565 struct async_arg { 5566 dev_info_t *dip; 5567 uint_t flags; 5568 }; 5569 5570 /* 5571 * Common async handler for: 5572 * ndi_devi_bind_driver_async 5573 * ndi_devi_online_async 5574 */ 5575 static int 5576 i_ndi_devi_async_common(dev_info_t *dip, uint_t flags, void (*func)()) 5577 { 5578 int tqflag; 5579 int kmflag; 5580 struct async_arg *arg; 5581 dev_info_t *pdip = ddi_get_parent(dip); 5582 5583 ASSERT(pdip); 5584 ASSERT(DEVI(pdip)->devi_taskq); 5585 ASSERT(ndi_dev_is_persistent_node(dip)); 5586 5587 if (flags & NDI_NOSLEEP) { 5588 kmflag = KM_NOSLEEP; 5589 tqflag = TQ_NOSLEEP; 5590 } else { 5591 kmflag = KM_SLEEP; 5592 tqflag = TQ_SLEEP; 5593 } 5594 5595 arg = kmem_alloc(sizeof (*arg), kmflag); 5596 if (arg == NULL) 5597 goto fail; 5598 5599 arg->flags = flags; 5600 arg->dip = dip; 5601 if (ddi_taskq_dispatch(DEVI(pdip)->devi_taskq, func, arg, tqflag) == 5602 DDI_SUCCESS) { 5603 return (NDI_SUCCESS); 5604 } 5605 5606 fail: 5607 NDI_CONFIG_DEBUG((CE_CONT, "%s%d: ddi_taskq_dispatch failed", 5608 ddi_driver_name(pdip), ddi_get_instance(pdip))); 5609 5610 if (arg) 5611 kmem_free(arg, sizeof (*arg)); 5612 return (NDI_FAILURE); 5613 } 5614 5615 static void 5616 i_ndi_devi_bind_driver_cb(struct async_arg *arg) 5617 { 5618 (void) ndi_devi_bind_driver(arg->dip, arg->flags); 5619 kmem_free(arg, sizeof (*arg)); 5620 } 5621 5622 int 5623 ndi_devi_bind_driver_async(dev_info_t *dip, uint_t flags) 5624 { 5625 return (i_ndi_devi_async_common(dip, flags, 5626 (void (*)())i_ndi_devi_bind_driver_cb)); 5627 } 5628 5629 /* 5630 * place the devinfo in the ONLINE state. 5631 */ 5632 int 5633 ndi_devi_online(dev_info_t *dip, uint_t flags) 5634 { 5635 int circ, rv; 5636 dev_info_t *pdip = ddi_get_parent(dip); 5637 int branch_event = 0; 5638 5639 ASSERT(pdip); 5640 5641 NDI_CONFIG_DEBUG((CE_CONT, "ndi_devi_online: %s%d (%p)\n", 5642 ddi_driver_name(dip), ddi_get_instance(dip), (void *)dip)); 5643 5644 ndi_devi_enter(pdip, &circ); 5645 /* bind child before merging .conf nodes */ 5646 rv = i_ndi_config_node(dip, DS_BOUND, flags); 5647 if (rv != NDI_SUCCESS) { 5648 ndi_devi_exit(pdip, circ); 5649 return (rv); 5650 } 5651 5652 /* merge .conf properties */ 5653 (void) i_ndi_make_spec_children(pdip, flags); 5654 5655 flags |= (NDI_DEVI_ONLINE | NDI_CONFIG); 5656 5657 if (flags & NDI_NO_EVENT) { 5658 /* 5659 * Caller is specifically asking for not to generate an event. 5660 * Set the following flag so that devi_attach_node() don't 5661 * change the event state. 5662 */ 5663 flags |= NDI_NO_EVENT_STATE_CHNG; 5664 } 5665 5666 if ((flags & (NDI_NO_EVENT | NDI_BRANCH_EVENT_OP)) == 0 && 5667 ((flags & NDI_CONFIG) || DEVI_NEED_NDI_CONFIG(dip))) { 5668 flags |= NDI_BRANCH_EVENT_OP; 5669 branch_event = 1; 5670 } 5671 5672 /* 5673 * devi_attach_node() may remove dip on failure 5674 */ 5675 if ((rv = devi_attach_node(dip, flags)) == NDI_SUCCESS) { 5676 if ((flags & NDI_CONFIG) || DEVI_NEED_NDI_CONFIG(dip)) { 5677 (void) ndi_devi_config(dip, flags); 5678 } 5679 5680 if (branch_event) 5681 (void) i_log_devfs_branch_add(dip); 5682 } 5683 5684 ndi_devi_exit(pdip, circ); 5685 5686 /* 5687 * Notify devfs that we have a new node. Devfs needs to invalidate 5688 * cached directory contents. 5689 * 5690 * For PCMCIA devices, it is possible the pdip is not fully 5691 * attached. In this case, calling back into devfs will 5692 * result in a loop or assertion error. Hence, the check 5693 * on node state. 5694 * 5695 * If we own parent lock, this is part of a branch operation. 5696 * We skip the devfs_clean() step because the cache invalidation 5697 * is done higher up in the device tree. 5698 */ 5699 if (rv == NDI_SUCCESS && i_ddi_devi_attached(pdip) && 5700 !DEVI_BUSY_OWNED(pdip)) 5701 (void) devfs_clean(pdip, NULL, 0); 5702 return (rv); 5703 } 5704 5705 static void 5706 i_ndi_devi_online_cb(struct async_arg *arg) 5707 { 5708 (void) ndi_devi_online(arg->dip, arg->flags); 5709 kmem_free(arg, sizeof (*arg)); 5710 } 5711 5712 int 5713 ndi_devi_online_async(dev_info_t *dip, uint_t flags) 5714 { 5715 /* mark child as need config if requested. */ 5716 if (flags & NDI_CONFIG) { 5717 mutex_enter(&(DEVI(dip)->devi_lock)); 5718 DEVI_SET_NDI_CONFIG(dip); 5719 mutex_exit(&(DEVI(dip)->devi_lock)); 5720 } 5721 5722 return (i_ndi_devi_async_common(dip, flags, 5723 (void (*)())i_ndi_devi_online_cb)); 5724 } 5725 5726 /* 5727 * Take a device node Offline 5728 * To take a device Offline means to detach the device instance from 5729 * the driver and prevent devfs requests from re-attaching the device 5730 * instance. 5731 * 5732 * The flag NDI_DEVI_REMOVE causes removes the device node from 5733 * the driver list and the device tree. In this case, the device 5734 * is assumed to be removed from the system. 5735 */ 5736 int 5737 ndi_devi_offline(dev_info_t *dip, uint_t flags) 5738 { 5739 int circ, rval = 0; 5740 dev_info_t *pdip = ddi_get_parent(dip); 5741 dev_info_t *vdip = NULL; 5742 int v_circ; 5743 struct brevq_node *brevq = NULL; 5744 5745 ASSERT(pdip); 5746 5747 flags |= NDI_DEVI_OFFLINE; 5748 5749 /* 5750 * If child is pHCI and vHCI and pHCI are not siblings then enter vHCI 5751 * before parent(pHCI) to avoid deadlock with mpxio Client power 5752 * management operations. 5753 */ 5754 if (MDI_PHCI(dip)) { 5755 vdip = mdi_devi_get_vdip(dip); 5756 if (vdip && (ddi_get_parent(vdip) != pdip)) 5757 ndi_devi_enter(vdip, &v_circ); 5758 else 5759 vdip = NULL; 5760 } 5761 ndi_devi_enter(pdip, &circ); 5762 5763 if (i_ddi_node_state(dip) == DS_READY) { 5764 /* 5765 * If dip is in DS_READY state, there may be cached dv_nodes 5766 * referencing this dip, so we invoke devfs code path. 5767 * Note that we must release busy changing on pdip to 5768 * avoid deadlock against devfs. 5769 */ 5770 char *devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 5771 (void) ddi_deviname(dip, devname); 5772 5773 ndi_devi_exit(pdip, circ); 5774 if (vdip) 5775 ndi_devi_exit(vdip, v_circ); 5776 5777 /* 5778 * If we own parent lock, this is part of a branch 5779 * operation. We skip the devfs_clean() step. 5780 */ 5781 if (!DEVI_BUSY_OWNED(pdip)) 5782 (void) devfs_clean(pdip, devname + 1, DV_CLEAN_FORCE); 5783 kmem_free(devname, MAXNAMELEN + 1); 5784 5785 rval = devi_unconfig_branch(dip, NULL, flags|NDI_UNCONFIG, 5786 &brevq); 5787 5788 if (rval) 5789 return (NDI_FAILURE); 5790 5791 if (vdip) 5792 ndi_devi_enter(vdip, &v_circ); 5793 ndi_devi_enter(pdip, &circ); 5794 } 5795 5796 init_bound_node_ev(pdip, dip, flags); 5797 5798 rval = devi_detach_node(dip, flags); 5799 if (brevq) { 5800 if (rval != NDI_SUCCESS) 5801 log_and_free_brevq_dip(dip, brevq); 5802 else 5803 free_brevq(brevq); 5804 } 5805 5806 ndi_devi_exit(pdip, circ); 5807 if (vdip) 5808 ndi_devi_exit(vdip, v_circ); 5809 5810 return (rval); 5811 } 5812 5813 /* 5814 * Find the child dev_info node of parent nexus 'p' whose name 5815 * matches "cname@caddr". Recommend use of ndi_devi_findchild() instead. 5816 */ 5817 dev_info_t * 5818 ndi_devi_find(dev_info_t *pdip, char *cname, char *caddr) 5819 { 5820 dev_info_t *child; 5821 int circ; 5822 5823 if (pdip == NULL || cname == NULL || caddr == NULL) 5824 return ((dev_info_t *)NULL); 5825 5826 ndi_devi_enter(pdip, &circ); 5827 child = find_sibling(ddi_get_child(pdip), cname, caddr, 5828 FIND_NODE_BY_NODENAME, NULL); 5829 ndi_devi_exit(pdip, circ); 5830 return (child); 5831 } 5832 5833 /* 5834 * Find the child dev_info node of parent nexus 'p' whose name 5835 * matches devname "name@addr". Permits caller to hold the parent. 5836 */ 5837 dev_info_t * 5838 ndi_devi_findchild(dev_info_t *pdip, char *devname) 5839 { 5840 dev_info_t *child; 5841 char *cname, *caddr; 5842 char *devstr; 5843 5844 ASSERT(DEVI_BUSY_OWNED(pdip)); 5845 5846 devstr = i_ddi_strdup(devname, KM_SLEEP); 5847 i_ddi_parse_name(devstr, &cname, &caddr, NULL); 5848 5849 if (cname == NULL || caddr == NULL) { 5850 kmem_free(devstr, strlen(devname)+1); 5851 return ((dev_info_t *)NULL); 5852 } 5853 5854 child = find_sibling(ddi_get_child(pdip), cname, caddr, 5855 FIND_NODE_BY_NODENAME, NULL); 5856 kmem_free(devstr, strlen(devname)+1); 5857 return (child); 5858 } 5859 5860 /* 5861 * Misc. routines called by framework only 5862 */ 5863 5864 /* 5865 * Clear the DEVI_MADE_CHILDREN/DEVI_ATTACHED_CHILDREN flags 5866 * if new child spec has been added. 5867 */ 5868 static int 5869 reset_nexus_flags(dev_info_t *dip, void *arg) 5870 { 5871 struct hwc_spec *list; 5872 int circ; 5873 5874 if (((DEVI(dip)->devi_flags & DEVI_MADE_CHILDREN) == 0) || 5875 ((list = hwc_get_child_spec(dip, (major_t)(uintptr_t)arg)) == NULL)) 5876 return (DDI_WALK_CONTINUE); 5877 5878 hwc_free_spec_list(list); 5879 5880 /* coordinate child state update */ 5881 ndi_devi_enter(dip, &circ); 5882 mutex_enter(&DEVI(dip)->devi_lock); 5883 DEVI(dip)->devi_flags &= ~(DEVI_MADE_CHILDREN | DEVI_ATTACHED_CHILDREN); 5884 mutex_exit(&DEVI(dip)->devi_lock); 5885 ndi_devi_exit(dip, circ); 5886 5887 return (DDI_WALK_CONTINUE); 5888 } 5889 5890 /* 5891 * Helper functions, returns NULL if no memory. 5892 */ 5893 5894 /* 5895 * path_to_major: 5896 * 5897 * Return an alternate driver name binding for the leaf device 5898 * of the given pathname, if there is one. The purpose of this 5899 * function is to deal with generic pathnames. The default action 5900 * for platforms that can't do this (ie: x86 or any platform that 5901 * does not have prom_finddevice functionality, which matches 5902 * nodenames and unit-addresses without the drivers participation) 5903 * is to return (major_t)-1. 5904 * 5905 * Used in loadrootmodules() in the swapgeneric module to 5906 * associate a given pathname with a given leaf driver. 5907 * 5908 */ 5909 major_t 5910 path_to_major(char *path) 5911 { 5912 dev_info_t *dip; 5913 char *p, *q; 5914 pnode_t nodeid; 5915 major_t major; 5916 5917 /* check for path-oriented alias */ 5918 major = ddi_name_to_major(path); 5919 if ((major != (major_t)-1) && 5920 !(devnamesp[major].dn_flags & DN_DRIVER_REMOVED)) { 5921 NDI_CONFIG_DEBUG((CE_NOTE, "path_to_major: %s path bound %s\n", 5922 path, ddi_major_to_name(major))); 5923 return (major); 5924 } 5925 5926 /* 5927 * Get the nodeid of the given pathname, if such a mapping exists. 5928 */ 5929 dip = NULL; 5930 nodeid = prom_finddevice(path); 5931 if (nodeid != OBP_BADNODE) { 5932 /* 5933 * Find the nodeid in our copy of the device tree and return 5934 * whatever name we used to bind this node to a driver. 5935 */ 5936 dip = e_ddi_nodeid_to_dip(nodeid); 5937 } 5938 5939 if (dip == NULL) { 5940 NDI_CONFIG_DEBUG((CE_WARN, 5941 "path_to_major: can't bind <%s>\n", path)); 5942 return ((major_t)-1); 5943 } 5944 5945 /* 5946 * If we're bound to something other than the nodename, 5947 * note that in the message buffer and system log. 5948 */ 5949 p = ddi_binding_name(dip); 5950 q = ddi_node_name(dip); 5951 if (p && q && (strcmp(p, q) != 0)) 5952 NDI_CONFIG_DEBUG((CE_NOTE, "path_to_major: %s bound to %s\n", 5953 path, p)); 5954 5955 major = ddi_name_to_major(p); 5956 5957 ndi_rele_devi(dip); /* release e_ddi_nodeid_to_dip hold */ 5958 5959 return (major); 5960 } 5961 5962 /* 5963 * Return the held dip for the specified major and instance, attempting to do 5964 * an attach if specified. Return NULL if the devi can't be found or put in 5965 * the proper state. The caller must release the hold via ddi_release_devi if 5966 * a non-NULL value is returned. 5967 * 5968 * Some callers expect to be able to perform a hold_devi() while in a context 5969 * where using ndi_devi_enter() to ensure the hold might cause deadlock (see 5970 * open-from-attach code in consconfig_dacf.c). Such special-case callers 5971 * must ensure that an ndi_devi_enter(parent)/ndi_devi_hold() from a safe 5972 * context is already active. The hold_devi() implementation must accommodate 5973 * these callers. 5974 */ 5975 static dev_info_t * 5976 hold_devi(major_t major, int instance, int flags) 5977 { 5978 struct devnames *dnp; 5979 dev_info_t *dip; 5980 char *path; 5981 5982 if ((major >= devcnt) || (instance == -1)) 5983 return (NULL); 5984 5985 /* try to find the instance in the per driver list */ 5986 dnp = &(devnamesp[major]); 5987 LOCK_DEV_OPS(&(dnp->dn_lock)); 5988 for (dip = dnp->dn_head; dip; 5989 dip = (dev_info_t *)DEVI(dip)->devi_next) { 5990 /* skip node if instance field is not valid */ 5991 if (i_ddi_node_state(dip) < DS_INITIALIZED) 5992 continue; 5993 5994 /* look for instance match */ 5995 if (DEVI(dip)->devi_instance == instance) { 5996 /* 5997 * To accommodate callers that can't block in 5998 * ndi_devi_enter() we do an ndi_devi_hold(), and 5999 * afterwards check that the node is in a state where 6000 * the hold prevents detach(). If we did not manage to 6001 * prevent detach then we ndi_rele_devi() and perform 6002 * the slow path below (which can result in a blocking 6003 * ndi_devi_enter() while driving attach top-down). 6004 * This code depends on the ordering of 6005 * DEVI_SET_DETACHING and the devi_ref check in the 6006 * detach_node() code path. 6007 */ 6008 ndi_hold_devi(dip); 6009 if (i_ddi_devi_attached(dip) && 6010 !DEVI_IS_DETACHING(dip)) { 6011 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6012 return (dip); /* fast-path with devi held */ 6013 } 6014 ndi_rele_devi(dip); 6015 6016 /* try slow-path */ 6017 dip = NULL; 6018 break; 6019 } 6020 } 6021 ASSERT(dip == NULL); 6022 UNLOCK_DEV_OPS(&(dnp->dn_lock)); 6023 6024 if (flags & E_DDI_HOLD_DEVI_NOATTACH) 6025 return (NULL); /* told not to drive attach */ 6026 6027 /* slow-path may block, so it should not occur from interrupt */ 6028 ASSERT(!servicing_interrupt()); 6029 if (servicing_interrupt()) 6030 return (NULL); 6031 6032 /* reconstruct the path and drive attach by path through devfs. */ 6033 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6034 if (e_ddi_majorinstance_to_path(major, instance, path) == 0) 6035 dip = e_ddi_hold_devi_by_path(path, flags); 6036 kmem_free(path, MAXPATHLEN); 6037 return (dip); /* with devi held */ 6038 } 6039 6040 /* 6041 * The {e_}ddi_hold_devi{_by_{instance|dev|path}} hold the devinfo node 6042 * associated with the specified arguments. This hold should be released 6043 * by calling ddi_release_devi. 6044 * 6045 * The E_DDI_HOLD_DEVI_NOATTACH flag argument allows the caller to to specify 6046 * a failure return if the node is not already attached. 6047 * 6048 * NOTE: by the time we make e_ddi_hold_devi public, we should be able to reuse 6049 * ddi_hold_devi again. 6050 */ 6051 dev_info_t * 6052 ddi_hold_devi_by_instance(major_t major, int instance, int flags) 6053 { 6054 return (hold_devi(major, instance, flags)); 6055 } 6056 6057 dev_info_t * 6058 e_ddi_hold_devi_by_dev(dev_t dev, int flags) 6059 { 6060 major_t major = getmajor(dev); 6061 dev_info_t *dip; 6062 struct dev_ops *ops; 6063 dev_info_t *ddip = NULL; 6064 6065 dip = hold_devi(major, dev_to_instance(dev), flags); 6066 6067 /* 6068 * The rest of this routine is legacy support for drivers that 6069 * have broken DDI_INFO_DEVT2INSTANCE implementations but may have 6070 * functional DDI_INFO_DEVT2DEVINFO implementations. This code will 6071 * diagnose inconsistency and, for maximum compatibility with legacy 6072 * drivers, give preference to the drivers DDI_INFO_DEVT2DEVINFO 6073 * implementation over the above derived dip based the driver's 6074 * DDI_INFO_DEVT2INSTANCE implementation. This legacy support should 6075 * be removed when DDI_INFO_DEVT2DEVINFO is deprecated. 6076 * 6077 * NOTE: The following code has a race condition. DEVT2DEVINFO 6078 * returns a dip which is not held. By the time we ref ddip, 6079 * it could have been freed. The saving grace is that for 6080 * most drivers, the dip returned from hold_devi() is the 6081 * same one as the one returned by DEVT2DEVINFO, so we are 6082 * safe for drivers with the correct getinfo(9e) impl. 6083 */ 6084 if (((ops = ddi_hold_driver(major)) != NULL) && 6085 CB_DRV_INSTALLED(ops) && ops->devo_getinfo) { 6086 if ((*ops->devo_getinfo)(NULL, DDI_INFO_DEVT2DEVINFO, 6087 (void *)dev, (void **)&ddip) != DDI_SUCCESS) 6088 ddip = NULL; 6089 } 6090 6091 /* give preference to the driver returned DEVT2DEVINFO dip */ 6092 if (ddip && (dip != ddip)) { 6093 #ifdef DEBUG 6094 cmn_err(CE_WARN, "%s: inconsistent getinfo(9E) implementation", 6095 ddi_driver_name(ddip)); 6096 #endif /* DEBUG */ 6097 ndi_hold_devi(ddip); 6098 if (dip) 6099 ndi_rele_devi(dip); 6100 dip = ddip; 6101 } 6102 6103 if (ops) 6104 ddi_rele_driver(major); 6105 6106 return (dip); 6107 } 6108 6109 /* 6110 * For compatibility only. Do not call this function! 6111 */ 6112 dev_info_t * 6113 e_ddi_get_dev_info(dev_t dev, vtype_t type) 6114 { 6115 dev_info_t *dip = NULL; 6116 if (getmajor(dev) >= devcnt) 6117 return (NULL); 6118 6119 switch (type) { 6120 case VCHR: 6121 case VBLK: 6122 dip = e_ddi_hold_devi_by_dev(dev, 0); 6123 default: 6124 break; 6125 } 6126 6127 /* 6128 * For compatibility reasons, we can only return the dip with 6129 * the driver ref count held. This is not a safe thing to do. 6130 * For certain broken third-party software, we are willing 6131 * to venture into unknown territory. 6132 */ 6133 if (dip) { 6134 (void) ndi_hold_driver(dip); 6135 ndi_rele_devi(dip); 6136 } 6137 return (dip); 6138 } 6139 6140 dev_info_t * 6141 e_ddi_hold_devi_by_path(char *path, int flags) 6142 { 6143 dev_info_t *dip; 6144 6145 /* can't specify NOATTACH by path */ 6146 ASSERT(!(flags & E_DDI_HOLD_DEVI_NOATTACH)); 6147 6148 return (resolve_pathname(path, &dip, NULL, NULL) ? NULL : dip); 6149 } 6150 6151 void 6152 e_ddi_hold_devi(dev_info_t *dip) 6153 { 6154 ndi_hold_devi(dip); 6155 } 6156 6157 void 6158 ddi_release_devi(dev_info_t *dip) 6159 { 6160 ndi_rele_devi(dip); 6161 } 6162 6163 /* 6164 * Associate a streams queue with a devinfo node 6165 * NOTE: This function is called by STREAM driver's put procedure. 6166 * It cannot block. 6167 */ 6168 void 6169 ddi_assoc_queue_with_devi(queue_t *q, dev_info_t *dip) 6170 { 6171 queue_t *rq = _RD(q); 6172 struct stdata *stp; 6173 vnode_t *vp; 6174 6175 /* set flag indicating that ddi_assoc_queue_with_devi was called */ 6176 mutex_enter(QLOCK(rq)); 6177 rq->q_flag |= _QASSOCIATED; 6178 mutex_exit(QLOCK(rq)); 6179 6180 /* get the vnode associated with the queue */ 6181 stp = STREAM(rq); 6182 vp = stp->sd_vnode; 6183 ASSERT(vp); 6184 6185 /* change the hardware association of the vnode */ 6186 spec_assoc_vp_with_devi(vp, dip); 6187 } 6188 6189 /* 6190 * ddi_install_driver(name) 6191 * 6192 * Driver installation is currently a byproduct of driver loading. This 6193 * may change. 6194 */ 6195 int 6196 ddi_install_driver(char *name) 6197 { 6198 major_t major = ddi_name_to_major(name); 6199 6200 if ((major == (major_t)-1) || 6201 (ddi_hold_installed_driver(major) == NULL)) { 6202 return (DDI_FAILURE); 6203 } 6204 ddi_rele_driver(major); 6205 return (DDI_SUCCESS); 6206 } 6207 6208 struct dev_ops * 6209 ddi_hold_driver(major_t major) 6210 { 6211 return (mod_hold_dev_by_major(major)); 6212 } 6213 6214 6215 void 6216 ddi_rele_driver(major_t major) 6217 { 6218 mod_rele_dev_by_major(major); 6219 } 6220 6221 6222 /* 6223 * This is called during boot to force attachment order of special dips 6224 * dip must be referenced via ndi_hold_devi() 6225 */ 6226 int 6227 i_ddi_attach_node_hierarchy(dev_info_t *dip) 6228 { 6229 dev_info_t *parent; 6230 int ret, circ; 6231 6232 /* 6233 * Recurse up until attached parent is found. 6234 */ 6235 if (i_ddi_devi_attached(dip)) 6236 return (DDI_SUCCESS); 6237 parent = ddi_get_parent(dip); 6238 if (i_ddi_attach_node_hierarchy(parent) != DDI_SUCCESS) 6239 return (DDI_FAILURE); 6240 6241 /* 6242 * Come top-down, expanding .conf nodes under this parent 6243 * and driving attach. 6244 */ 6245 ndi_devi_enter(parent, &circ); 6246 (void) i_ndi_make_spec_children(parent, 0); 6247 ret = i_ddi_attachchild(dip); 6248 ndi_devi_exit(parent, circ); 6249 6250 return (ret); 6251 } 6252 6253 /* keep this function static */ 6254 static int 6255 attach_driver_nodes(major_t major) 6256 { 6257 struct devnames *dnp; 6258 dev_info_t *dip; 6259 int error = DDI_FAILURE; 6260 6261 dnp = &devnamesp[major]; 6262 LOCK_DEV_OPS(&dnp->dn_lock); 6263 dip = dnp->dn_head; 6264 while (dip) { 6265 ndi_hold_devi(dip); 6266 UNLOCK_DEV_OPS(&dnp->dn_lock); 6267 if (i_ddi_attach_node_hierarchy(dip) == DDI_SUCCESS) 6268 error = DDI_SUCCESS; 6269 LOCK_DEV_OPS(&dnp->dn_lock); 6270 ndi_rele_devi(dip); 6271 dip = ddi_get_next(dip); 6272 } 6273 if (error == DDI_SUCCESS) 6274 dnp->dn_flags |= DN_NO_AUTODETACH; 6275 UNLOCK_DEV_OPS(&dnp->dn_lock); 6276 6277 6278 return (error); 6279 } 6280 6281 /* 6282 * i_ddi_attach_hw_nodes configures and attaches all hw nodes 6283 * bound to a specific driver. This function replaces calls to 6284 * ddi_hold_installed_driver() for drivers with no .conf 6285 * enumerated nodes. 6286 * 6287 * This facility is typically called at boot time to attach 6288 * platform-specific hardware nodes, such as ppm nodes on xcal 6289 * and grover and keyswitch nodes on cherrystone. It does not 6290 * deal with .conf enumerated node. Calling it beyond the boot 6291 * process is strongly discouraged. 6292 */ 6293 int 6294 i_ddi_attach_hw_nodes(char *driver) 6295 { 6296 major_t major; 6297 6298 major = ddi_name_to_major(driver); 6299 if (major == (major_t)-1) 6300 return (DDI_FAILURE); 6301 6302 return (attach_driver_nodes(major)); 6303 } 6304 6305 /* 6306 * i_ddi_attach_pseudo_node configures pseudo drivers which 6307 * has a single node. The .conf nodes must be enumerated 6308 * before calling this interface. The dip is held attached 6309 * upon returning. 6310 * 6311 * This facility should only be called only at boot time 6312 * by the I/O framework. 6313 */ 6314 dev_info_t * 6315 i_ddi_attach_pseudo_node(char *driver) 6316 { 6317 major_t major; 6318 dev_info_t *dip; 6319 6320 major = ddi_name_to_major(driver); 6321 if (major == (major_t)-1) 6322 return (NULL); 6323 6324 if (attach_driver_nodes(major) != DDI_SUCCESS) 6325 return (NULL); 6326 6327 dip = devnamesp[major].dn_head; 6328 ASSERT(dip && ddi_get_next(dip) == NULL); 6329 ndi_hold_devi(dip); 6330 return (dip); 6331 } 6332 6333 static void 6334 diplist_to_parent_major(dev_info_t *head, char parents[]) 6335 { 6336 major_t major; 6337 dev_info_t *dip, *pdip; 6338 6339 for (dip = head; dip != NULL; dip = ddi_get_next(dip)) { 6340 pdip = ddi_get_parent(dip); 6341 ASSERT(pdip); /* disallow rootnex.conf nodes */ 6342 major = ddi_driver_major(pdip); 6343 if ((major != (major_t)-1) && parents[major] == 0) 6344 parents[major] = 1; 6345 } 6346 } 6347 6348 /* 6349 * Call ddi_hold_installed_driver() on each parent major 6350 * and invoke mt_config_driver() to attach child major. 6351 * This is part of the implementation of ddi_hold_installed_driver. 6352 */ 6353 static int 6354 attach_driver_by_parent(major_t child_major, char parents[]) 6355 { 6356 major_t par_major; 6357 struct mt_config_handle *hdl; 6358 int flags = NDI_DEVI_PERSIST | NDI_NO_EVENT; 6359 6360 hdl = mt_config_init(NULL, NULL, flags, child_major, MT_CONFIG_OP, 6361 NULL); 6362 for (par_major = 0; par_major < devcnt; par_major++) { 6363 /* disallow recursion on the same driver */ 6364 if (parents[par_major] == 0 || par_major == child_major) 6365 continue; 6366 if (ddi_hold_installed_driver(par_major) == NULL) 6367 continue; 6368 hdl->mtc_parmajor = par_major; 6369 mt_config_driver(hdl); 6370 ddi_rele_driver(par_major); 6371 } 6372 (void) mt_config_fini(hdl); 6373 6374 return (i_ddi_devs_attached(child_major)); 6375 } 6376 6377 int 6378 i_ddi_devs_attached(major_t major) 6379 { 6380 dev_info_t *dip; 6381 struct devnames *dnp; 6382 int error = DDI_FAILURE; 6383 6384 /* check for attached instances */ 6385 dnp = &devnamesp[major]; 6386 LOCK_DEV_OPS(&dnp->dn_lock); 6387 for (dip = dnp->dn_head; dip != NULL; dip = ddi_get_next(dip)) { 6388 if (i_ddi_devi_attached(dip)) { 6389 error = DDI_SUCCESS; 6390 break; 6391 } 6392 } 6393 UNLOCK_DEV_OPS(&dnp->dn_lock); 6394 6395 return (error); 6396 } 6397 6398 /* 6399 * ddi_hold_installed_driver configures and attaches all 6400 * instances of the specified driver. To accomplish this 6401 * it configures and attaches all possible parents of 6402 * the driver, enumerated both in h/w nodes and in the 6403 * driver's .conf file. 6404 * 6405 * NOTE: This facility is for compatibility purposes only and will 6406 * eventually go away. Its usage is strongly discouraged. 6407 */ 6408 static void 6409 enter_driver(struct devnames *dnp) 6410 { 6411 mutex_enter(&dnp->dn_lock); 6412 ASSERT(dnp->dn_busy_thread != curthread); 6413 while (dnp->dn_flags & DN_DRIVER_BUSY) 6414 cv_wait(&dnp->dn_wait, &dnp->dn_lock); 6415 dnp->dn_flags |= DN_DRIVER_BUSY; 6416 dnp->dn_busy_thread = curthread; 6417 mutex_exit(&dnp->dn_lock); 6418 } 6419 6420 static void 6421 exit_driver(struct devnames *dnp) 6422 { 6423 mutex_enter(&dnp->dn_lock); 6424 ASSERT(dnp->dn_busy_thread == curthread); 6425 dnp->dn_flags &= ~DN_DRIVER_BUSY; 6426 dnp->dn_busy_thread = NULL; 6427 cv_broadcast(&dnp->dn_wait); 6428 mutex_exit(&dnp->dn_lock); 6429 } 6430 6431 struct dev_ops * 6432 ddi_hold_installed_driver(major_t major) 6433 { 6434 struct dev_ops *ops; 6435 struct devnames *dnp; 6436 char *parents; 6437 int error; 6438 6439 ops = ddi_hold_driver(major); 6440 if (ops == NULL) 6441 return (NULL); 6442 6443 /* 6444 * Return immediately if all the attach operations associated 6445 * with a ddi_hold_installed_driver() call have already been done. 6446 */ 6447 dnp = &devnamesp[major]; 6448 enter_driver(dnp); 6449 if (dnp->dn_flags & DN_DRIVER_HELD) { 6450 exit_driver(dnp); 6451 if (i_ddi_devs_attached(major) == DDI_SUCCESS) 6452 return (ops); 6453 ddi_rele_driver(major); 6454 return (NULL); 6455 } 6456 6457 LOCK_DEV_OPS(&dnp->dn_lock); 6458 dnp->dn_flags |= (DN_DRIVER_HELD | DN_NO_AUTODETACH); 6459 UNLOCK_DEV_OPS(&dnp->dn_lock); 6460 6461 DCOMPATPRINTF((CE_CONT, 6462 "ddi_hold_installed_driver: %s\n", dnp->dn_name)); 6463 6464 /* 6465 * When the driver has no .conf children, it is sufficient 6466 * to attach existing nodes in the device tree. Nodes not 6467 * enumerated by the OBP are not attached. 6468 */ 6469 if (dnp->dn_pl == NULL) { 6470 if (attach_driver_nodes(major) == DDI_SUCCESS) { 6471 exit_driver(dnp); 6472 return (ops); 6473 } 6474 exit_driver(dnp); 6475 ddi_rele_driver(major); 6476 return (NULL); 6477 } 6478 6479 /* 6480 * Driver has .conf nodes. We find all possible parents 6481 * and recursively all ddi_hold_installed_driver on the 6482 * parent driver; then we invoke ndi_config_driver() 6483 * on all possible parent node in parallel to speed up 6484 * performance. 6485 */ 6486 parents = kmem_zalloc(devcnt * sizeof (char), KM_SLEEP); 6487 6488 LOCK_DEV_OPS(&dnp->dn_lock); 6489 /* find .conf parents */ 6490 (void) impl_parlist_to_major(dnp->dn_pl, parents); 6491 /* find hw node parents */ 6492 diplist_to_parent_major(dnp->dn_head, parents); 6493 UNLOCK_DEV_OPS(&dnp->dn_lock); 6494 6495 error = attach_driver_by_parent(major, parents); 6496 kmem_free(parents, devcnt * sizeof (char)); 6497 if (error == DDI_SUCCESS) { 6498 exit_driver(dnp); 6499 return (ops); 6500 } 6501 6502 exit_driver(dnp); 6503 ddi_rele_driver(major); 6504 return (NULL); 6505 } 6506 6507 /* 6508 * Default bus_config entry point for nexus drivers 6509 */ 6510 int 6511 ndi_busop_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 6512 void *arg, dev_info_t **child, clock_t timeout) 6513 { 6514 major_t major; 6515 6516 /* 6517 * A timeout of 30 minutes or more is probably a mistake 6518 * This is intended to catch uses where timeout is in 6519 * the wrong units. timeout must be in units of ticks. 6520 */ 6521 ASSERT(timeout < SEC_TO_TICK(1800)); 6522 6523 major = (major_t)-1; 6524 switch (op) { 6525 case BUS_CONFIG_ONE: 6526 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus config %s timeout=%ld\n", 6527 ddi_driver_name(pdip), ddi_get_instance(pdip), 6528 (char *)arg, timeout)); 6529 return (devi_config_one(pdip, (char *)arg, child, flags, 6530 timeout)); 6531 6532 case BUS_CONFIG_DRIVER: 6533 major = (major_t)(uintptr_t)arg; 6534 /*FALLTHROUGH*/ 6535 case BUS_CONFIG_ALL: 6536 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus config timeout=%ld\n", 6537 ddi_driver_name(pdip), ddi_get_instance(pdip), 6538 timeout)); 6539 if (timeout > 0) { 6540 NDI_DEBUG(flags, (CE_CONT, 6541 "%s%d: bus config all timeout=%ld\n", 6542 ddi_driver_name(pdip), ddi_get_instance(pdip), 6543 timeout)); 6544 delay(timeout); 6545 } 6546 return (config_immediate_children(pdip, flags, major)); 6547 6548 default: 6549 return (NDI_FAILURE); 6550 } 6551 /*NOTREACHED*/ 6552 } 6553 6554 /* 6555 * Default busop bus_unconfig handler for nexus drivers 6556 */ 6557 int 6558 ndi_busop_bus_unconfig(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op, 6559 void *arg) 6560 { 6561 major_t major; 6562 6563 major = (major_t)-1; 6564 switch (op) { 6565 case BUS_UNCONFIG_ONE: 6566 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus unconfig %s\n", 6567 ddi_driver_name(pdip), ddi_get_instance(pdip), 6568 (char *)arg)); 6569 return (devi_unconfig_one(pdip, (char *)arg, flags)); 6570 6571 case BUS_UNCONFIG_DRIVER: 6572 major = (major_t)(uintptr_t)arg; 6573 /*FALLTHROUGH*/ 6574 case BUS_UNCONFIG_ALL: 6575 NDI_DEBUG(flags, (CE_CONT, "%s%d: bus unconfig all\n", 6576 ddi_driver_name(pdip), ddi_get_instance(pdip))); 6577 return (unconfig_immediate_children(pdip, NULL, flags, major)); 6578 6579 default: 6580 return (NDI_FAILURE); 6581 } 6582 /*NOTREACHED*/ 6583 } 6584 6585 /* 6586 * dummy functions to be removed 6587 */ 6588 void 6589 impl_rem_dev_props(dev_info_t *dip) 6590 { 6591 _NOTE(ARGUNUSED(dip)) 6592 /* do nothing */ 6593 } 6594 6595 /* 6596 * Determine if a node is a leaf node. If not sure, return false (0). 6597 */ 6598 static int 6599 is_leaf_node(dev_info_t *dip) 6600 { 6601 major_t major = ddi_driver_major(dip); 6602 6603 if (major == (major_t)-1) 6604 return (0); 6605 6606 return (devnamesp[major].dn_flags & DN_LEAF_DRIVER); 6607 } 6608 6609 /* 6610 * Multithreaded [un]configuration 6611 */ 6612 static struct mt_config_handle * 6613 mt_config_init(dev_info_t *pdip, dev_info_t **dipp, int flags, 6614 major_t major, int op, struct brevq_node **brevqp) 6615 { 6616 struct mt_config_handle *hdl = kmem_alloc(sizeof (*hdl), KM_SLEEP); 6617 6618 mutex_init(&hdl->mtc_lock, NULL, MUTEX_DEFAULT, NULL); 6619 cv_init(&hdl->mtc_cv, NULL, CV_DEFAULT, NULL); 6620 hdl->mtc_pdip = pdip; 6621 hdl->mtc_fdip = dipp; 6622 hdl->mtc_parmajor = (major_t)-1; 6623 hdl->mtc_flags = flags; 6624 hdl->mtc_major = major; 6625 hdl->mtc_thr_count = 0; 6626 hdl->mtc_op = op; 6627 hdl->mtc_error = 0; 6628 hdl->mtc_brevqp = brevqp; 6629 6630 #ifdef DEBUG 6631 gethrestime(&hdl->start_time); 6632 hdl->total_time = 0; 6633 #endif /* DEBUG */ 6634 6635 return (hdl); 6636 } 6637 6638 #ifdef DEBUG 6639 static int 6640 time_diff_in_msec(timestruc_t start, timestruc_t end) 6641 { 6642 int nsec, sec; 6643 6644 sec = end.tv_sec - start.tv_sec; 6645 nsec = end.tv_nsec - start.tv_nsec; 6646 if (nsec < 0) { 6647 nsec += NANOSEC; 6648 sec -= 1; 6649 } 6650 6651 return (sec * (NANOSEC >> 20) + (nsec >> 20)); 6652 } 6653 6654 #endif /* DEBUG */ 6655 6656 static int 6657 mt_config_fini(struct mt_config_handle *hdl) 6658 { 6659 int rv; 6660 #ifdef DEBUG 6661 int real_time; 6662 timestruc_t end_time; 6663 #endif /* DEBUG */ 6664 6665 mutex_enter(&hdl->mtc_lock); 6666 while (hdl->mtc_thr_count > 0) 6667 cv_wait(&hdl->mtc_cv, &hdl->mtc_lock); 6668 rv = hdl->mtc_error; 6669 mutex_exit(&hdl->mtc_lock); 6670 6671 #ifdef DEBUG 6672 gethrestime(&end_time); 6673 real_time = time_diff_in_msec(hdl->start_time, end_time); 6674 if ((ddidebug & DDI_MTCONFIG) && hdl->mtc_pdip) 6675 cmn_err(CE_NOTE, 6676 "config %s%d: total time %d msec, real time %d msec", 6677 ddi_driver_name(hdl->mtc_pdip), 6678 ddi_get_instance(hdl->mtc_pdip), 6679 hdl->total_time, real_time); 6680 #endif /* DEBUG */ 6681 6682 cv_destroy(&hdl->mtc_cv); 6683 mutex_destroy(&hdl->mtc_lock); 6684 kmem_free(hdl, sizeof (*hdl)); 6685 6686 return (rv); 6687 } 6688 6689 struct mt_config_data { 6690 struct mt_config_handle *mtc_hdl; 6691 dev_info_t *mtc_dip; 6692 major_t mtc_major; 6693 int mtc_flags; 6694 struct brevq_node *mtc_brn; 6695 struct mt_config_data *mtc_next; 6696 }; 6697 6698 static void 6699 mt_config_thread(void *arg) 6700 { 6701 struct mt_config_data *mcd = (struct mt_config_data *)arg; 6702 struct mt_config_handle *hdl = mcd->mtc_hdl; 6703 dev_info_t *dip = mcd->mtc_dip; 6704 dev_info_t *rdip, **dipp; 6705 major_t major = mcd->mtc_major; 6706 int flags = mcd->mtc_flags; 6707 int rv = 0; 6708 6709 #ifdef DEBUG 6710 timestruc_t start_time, end_time; 6711 gethrestime(&start_time); 6712 #endif /* DEBUG */ 6713 6714 rdip = NULL; 6715 dipp = hdl->mtc_fdip ? &rdip : NULL; 6716 6717 switch (hdl->mtc_op) { 6718 case MT_CONFIG_OP: 6719 rv = devi_config_common(dip, flags, major); 6720 break; 6721 case MT_UNCONFIG_OP: 6722 if (mcd->mtc_brn) { 6723 struct brevq_node *brevq = NULL; 6724 rv = devi_unconfig_common(dip, dipp, flags, major, 6725 &brevq); 6726 mcd->mtc_brn->brn_child = brevq; 6727 } else 6728 rv = devi_unconfig_common(dip, dipp, flags, major, 6729 NULL); 6730 break; 6731 } 6732 6733 mutex_enter(&hdl->mtc_lock); 6734 #ifdef DEBUG 6735 gethrestime(&end_time); 6736 hdl->total_time += time_diff_in_msec(start_time, end_time); 6737 #endif /* DEBUG */ 6738 6739 if ((rv != NDI_SUCCESS) && (hdl->mtc_error == 0)) { 6740 hdl->mtc_error = rv; 6741 #ifdef DEBUG 6742 if ((ddidebug & DDI_DEBUG) && (major != (major_t)-1)) { 6743 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6744 6745 (void) ddi_pathname(dip, path); 6746 cmn_err(CE_NOTE, "mt_config_thread: " 6747 "op %d.%d.%x at %s failed %d", 6748 hdl->mtc_op, major, flags, path, rv); 6749 kmem_free(path, MAXPATHLEN); 6750 } 6751 #endif /* DEBUG */ 6752 } 6753 6754 if (hdl->mtc_fdip && *hdl->mtc_fdip == NULL) { 6755 *hdl->mtc_fdip = rdip; 6756 rdip = NULL; 6757 } 6758 6759 if (rdip) { 6760 ASSERT(rv != NDI_SUCCESS); 6761 ndi_rele_devi(rdip); 6762 } 6763 6764 ndi_rele_devi(dip); 6765 6766 if (--hdl->mtc_thr_count == 0) 6767 cv_broadcast(&hdl->mtc_cv); 6768 mutex_exit(&hdl->mtc_lock); 6769 kmem_free(mcd, sizeof (*mcd)); 6770 } 6771 6772 /* 6773 * Multi-threaded config/unconfig of child nexus 6774 */ 6775 static void 6776 mt_config_children(struct mt_config_handle *hdl) 6777 { 6778 dev_info_t *pdip = hdl->mtc_pdip; 6779 major_t major = hdl->mtc_major; 6780 dev_info_t *dip; 6781 int circ; 6782 struct brevq_node *brn; 6783 struct mt_config_data *mcd_head = NULL; 6784 struct mt_config_data *mcd_tail = NULL; 6785 struct mt_config_data *mcd; 6786 #ifdef DEBUG 6787 timestruc_t end_time; 6788 6789 /* Update total_time in handle */ 6790 gethrestime(&end_time); 6791 hdl->total_time += time_diff_in_msec(hdl->start_time, end_time); 6792 #endif 6793 6794 ndi_devi_enter(pdip, &circ); 6795 dip = ddi_get_child(pdip); 6796 while (dip) { 6797 if (hdl->mtc_op == MT_UNCONFIG_OP && hdl->mtc_brevqp && 6798 !(DEVI_EVREMOVE(dip)) && 6799 i_ddi_node_state(dip) >= DS_INITIALIZED) { 6800 /* 6801 * Enqueue this dip's deviname. 6802 * No need to hold a lock while enqueuing since this 6803 * is the only thread doing the enqueue and no one 6804 * walks the queue while we are in multithreaded 6805 * unconfiguration. 6806 */ 6807 brn = brevq_enqueue(hdl->mtc_brevqp, dip, NULL); 6808 } else 6809 brn = NULL; 6810 6811 /* 6812 * Hold the child that we are processing so he does not get 6813 * removed. The corrisponding ndi_rele_devi() for children 6814 * that are not being skipped is done at the end of 6815 * mt_config_thread(). 6816 */ 6817 ndi_hold_devi(dip); 6818 6819 /* 6820 * skip leaf nodes and (for configure) nodes not 6821 * fully attached. 6822 */ 6823 if (is_leaf_node(dip) || 6824 (hdl->mtc_op == MT_CONFIG_OP && 6825 i_ddi_node_state(dip) < DS_READY)) { 6826 ndi_rele_devi(dip); 6827 dip = ddi_get_next_sibling(dip); 6828 continue; 6829 } 6830 6831 mcd = kmem_alloc(sizeof (*mcd), KM_SLEEP); 6832 mcd->mtc_dip = dip; 6833 mcd->mtc_hdl = hdl; 6834 mcd->mtc_brn = brn; 6835 6836 /* 6837 * Switch a 'driver' operation to an 'all' operation below a 6838 * node bound to the driver. 6839 */ 6840 if ((major == (major_t)-1) || (major == ddi_driver_major(dip))) 6841 mcd->mtc_major = (major_t)-1; 6842 else 6843 mcd->mtc_major = major; 6844 6845 /* 6846 * The unconfig-driver to unconfig-all conversion above 6847 * constitutes an autodetach for NDI_DETACH_DRIVER calls, 6848 * set NDI_AUTODETACH. 6849 */ 6850 mcd->mtc_flags = hdl->mtc_flags; 6851 if ((mcd->mtc_flags & NDI_DETACH_DRIVER) && 6852 (hdl->mtc_op == MT_UNCONFIG_OP) && 6853 (major == ddi_driver_major(pdip))) 6854 mcd->mtc_flags |= NDI_AUTODETACH; 6855 6856 mutex_enter(&hdl->mtc_lock); 6857 hdl->mtc_thr_count++; 6858 mutex_exit(&hdl->mtc_lock); 6859 6860 /* 6861 * Add to end of list to process after ndi_devi_exit to avoid 6862 * locking differences depending on value of mtc_off. 6863 */ 6864 mcd->mtc_next = NULL; 6865 if (mcd_head == NULL) 6866 mcd_head = mcd; 6867 else 6868 mcd_tail->mtc_next = mcd; 6869 mcd_tail = mcd; 6870 6871 dip = ddi_get_next_sibling(dip); 6872 } 6873 ndi_devi_exit(pdip, circ); 6874 6875 /* go through the list of held children */ 6876 for (mcd = mcd_head; mcd; mcd = mcd_head) { 6877 mcd_head = mcd->mtc_next; 6878 if (mtc_off || (mcd->mtc_flags & NDI_MTC_OFF)) 6879 mt_config_thread(mcd); 6880 else 6881 (void) thread_create(NULL, 0, mt_config_thread, mcd, 6882 0, &p0, TS_RUN, minclsyspri); 6883 } 6884 } 6885 6886 static void 6887 mt_config_driver(struct mt_config_handle *hdl) 6888 { 6889 major_t par_major = hdl->mtc_parmajor; 6890 major_t major = hdl->mtc_major; 6891 struct devnames *dnp = &devnamesp[par_major]; 6892 dev_info_t *dip; 6893 struct mt_config_data *mcd_head = NULL; 6894 struct mt_config_data *mcd_tail = NULL; 6895 struct mt_config_data *mcd; 6896 #ifdef DEBUG 6897 timestruc_t end_time; 6898 6899 /* Update total_time in handle */ 6900 gethrestime(&end_time); 6901 hdl->total_time += time_diff_in_msec(hdl->start_time, end_time); 6902 #endif 6903 ASSERT(par_major != (major_t)-1); 6904 ASSERT(major != (major_t)-1); 6905 6906 LOCK_DEV_OPS(&dnp->dn_lock); 6907 dip = devnamesp[par_major].dn_head; 6908 while (dip) { 6909 /* 6910 * Hold the child that we are processing so he does not get 6911 * removed. The corrisponding ndi_rele_devi() for children 6912 * that are not being skipped is done at the end of 6913 * mt_config_thread(). 6914 */ 6915 ndi_hold_devi(dip); 6916 6917 /* skip leaf nodes and nodes not fully attached */ 6918 if (!i_ddi_devi_attached(dip) || is_leaf_node(dip)) { 6919 ndi_rele_devi(dip); 6920 dip = ddi_get_next(dip); 6921 continue; 6922 } 6923 6924 mcd = kmem_alloc(sizeof (*mcd), KM_SLEEP); 6925 mcd->mtc_dip = dip; 6926 mcd->mtc_hdl = hdl; 6927 mcd->mtc_major = major; 6928 mcd->mtc_flags = hdl->mtc_flags; 6929 6930 mutex_enter(&hdl->mtc_lock); 6931 hdl->mtc_thr_count++; 6932 mutex_exit(&hdl->mtc_lock); 6933 6934 /* 6935 * Add to end of list to process after UNLOCK_DEV_OPS to avoid 6936 * locking differences depending on value of mtc_off. 6937 */ 6938 mcd->mtc_next = NULL; 6939 if (mcd_head == NULL) 6940 mcd_head = mcd; 6941 else 6942 mcd_tail->mtc_next = mcd; 6943 mcd_tail = mcd; 6944 6945 dip = ddi_get_next(dip); 6946 } 6947 UNLOCK_DEV_OPS(&dnp->dn_lock); 6948 6949 /* go through the list of held children */ 6950 for (mcd = mcd_head; mcd; mcd = mcd_head) { 6951 mcd_head = mcd->mtc_next; 6952 if (mtc_off || (mcd->mtc_flags & NDI_MTC_OFF)) 6953 mt_config_thread(mcd); 6954 else 6955 (void) thread_create(NULL, 0, mt_config_thread, mcd, 6956 0, &p0, TS_RUN, minclsyspri); 6957 } 6958 } 6959 6960 /* 6961 * Given the nodeid for a persistent (PROM or SID) node, return 6962 * the corresponding devinfo node 6963 * NOTE: This function will return NULL for .conf nodeids. 6964 */ 6965 dev_info_t * 6966 e_ddi_nodeid_to_dip(pnode_t nodeid) 6967 { 6968 dev_info_t *dip = NULL; 6969 struct devi_nodeid *prev, *elem; 6970 6971 mutex_enter(&devimap->dno_lock); 6972 6973 prev = NULL; 6974 for (elem = devimap->dno_head; elem; elem = elem->next) { 6975 if (elem->nodeid == nodeid) { 6976 ndi_hold_devi(elem->dip); 6977 dip = elem->dip; 6978 break; 6979 } 6980 prev = elem; 6981 } 6982 6983 /* 6984 * Move to head for faster lookup next time 6985 */ 6986 if (elem && prev) { 6987 prev->next = elem->next; 6988 elem->next = devimap->dno_head; 6989 devimap->dno_head = elem; 6990 } 6991 6992 mutex_exit(&devimap->dno_lock); 6993 return (dip); 6994 } 6995 6996 static void 6997 free_cache_task(void *arg) 6998 { 6999 ASSERT(arg == NULL); 7000 7001 mutex_enter(&di_cache.cache_lock); 7002 7003 /* 7004 * The cache can be invalidated without holding the lock 7005 * but it can be made valid again only while the lock is held. 7006 * So if the cache is invalid when the lock is held, it will 7007 * stay invalid until lock is released. 7008 */ 7009 if (!di_cache.cache_valid) 7010 i_ddi_di_cache_free(&di_cache); 7011 7012 mutex_exit(&di_cache.cache_lock); 7013 7014 if (di_cache_debug) 7015 cmn_err(CE_NOTE, "system_taskq: di_cache freed"); 7016 } 7017 7018 extern int modrootloaded; 7019 7020 void 7021 i_ddi_di_cache_free(struct di_cache *cache) 7022 { 7023 int error; 7024 7025 ASSERT(mutex_owned(&cache->cache_lock)); 7026 7027 if (cache->cache_size) { 7028 ASSERT(cache->cache_size > 0); 7029 ASSERT(cache->cache_data); 7030 7031 kmem_free(cache->cache_data, cache->cache_size); 7032 cache->cache_data = NULL; 7033 cache->cache_size = 0; 7034 7035 if (di_cache_debug) 7036 cmn_err(CE_NOTE, "i_ddi_di_cache_free: freed cachemem"); 7037 } else { 7038 ASSERT(cache->cache_data == NULL); 7039 if (di_cache_debug) 7040 cmn_err(CE_NOTE, "i_ddi_di_cache_free: NULL cache"); 7041 } 7042 7043 if (!modrootloaded || rootvp == NULL || vn_is_readonly(rootvp)) { 7044 if (di_cache_debug) { 7045 cmn_err(CE_WARN, "/ not mounted/RDONLY. Skip unlink"); 7046 } 7047 return; 7048 } 7049 7050 error = vn_remove(DI_CACHE_FILE, UIO_SYSSPACE, RMFILE); 7051 if (di_cache_debug && error && error != ENOENT) { 7052 cmn_err(CE_WARN, "%s: unlink failed: %d", DI_CACHE_FILE, error); 7053 } else if (di_cache_debug && !error) { 7054 cmn_err(CE_NOTE, "i_ddi_di_cache_free: unlinked cache file"); 7055 } 7056 } 7057 7058 void 7059 i_ddi_di_cache_invalidate(int kmflag) 7060 { 7061 uint_t flag; 7062 7063 if (!modrootloaded || !i_ddi_io_initialized()) { 7064 if (di_cache_debug) 7065 cmn_err(CE_NOTE, "I/O not inited. Skipping invalidate"); 7066 return; 7067 } 7068 7069 /* 7070 * Invalidate the in-core cache and 7071 * increment devtree generation number 7072 */ 7073 atomic_and_32(&di_cache.cache_valid, 0); 7074 atomic_inc_ulong(&devtree_gen); 7075 7076 flag = (kmflag == KM_SLEEP) ? TQ_SLEEP : TQ_NOSLEEP; 7077 7078 (void) taskq_dispatch(system_taskq, free_cache_task, NULL, flag); 7079 7080 if (di_cache_debug) { 7081 cmn_err(CE_NOTE, "invalidation with km_flag: %s", 7082 kmflag == KM_SLEEP ? "KM_SLEEP" : "KM_NOSLEEP"); 7083 } 7084 } 7085 7086 7087 static void 7088 i_bind_vhci_node(dev_info_t *dip) 7089 { 7090 DEVI(dip)->devi_major = ddi_name_to_major(ddi_node_name(dip)); 7091 i_ddi_set_node_state(dip, DS_BOUND); 7092 } 7093 7094 static char vhci_node_addr[2]; 7095 7096 static int 7097 i_init_vhci_node(dev_info_t *dip) 7098 { 7099 add_global_props(dip); 7100 DEVI(dip)->devi_ops = ndi_hold_driver(dip); 7101 if (DEVI(dip)->devi_ops == NULL) 7102 return (-1); 7103 7104 DEVI(dip)->devi_instance = e_ddi_assign_instance(dip); 7105 e_ddi_keep_instance(dip); 7106 vhci_node_addr[0] = '\0'; 7107 ddi_set_name_addr(dip, vhci_node_addr); 7108 i_ddi_set_node_state(dip, DS_INITIALIZED); 7109 return (0); 7110 } 7111 7112 static void 7113 i_link_vhci_node(dev_info_t *dip) 7114 { 7115 ASSERT(MUTEX_HELD(&global_vhci_lock)); 7116 7117 /* 7118 * scsi_vhci should be kept left most of the device tree. 7119 */ 7120 if (scsi_vhci_dip) { 7121 DEVI(dip)->devi_sibling = DEVI(scsi_vhci_dip)->devi_sibling; 7122 DEVI(scsi_vhci_dip)->devi_sibling = DEVI(dip); 7123 } else { 7124 DEVI(dip)->devi_sibling = DEVI(top_devinfo)->devi_child; 7125 DEVI(top_devinfo)->devi_child = DEVI(dip); 7126 } 7127 } 7128 7129 7130 /* 7131 * This a special routine to enumerate vhci node (child of rootnex 7132 * node) without holding the ndi_devi_enter() lock. The device node 7133 * is allocated, initialized and brought into DS_READY state before 7134 * inserting into the device tree. The VHCI node is handcrafted 7135 * here to bring the node to DS_READY, similar to rootnex node. 7136 * 7137 * The global_vhci_lock protects linking the node into the device 7138 * as same lock is held before linking/unlinking any direct child 7139 * of rootnex children. 7140 * 7141 * This routine is a workaround to handle a possible deadlock 7142 * that occurs while trying to enumerate node in a different sub-tree 7143 * during _init/_attach entry points. 7144 */ 7145 /*ARGSUSED*/ 7146 dev_info_t * 7147 ndi_devi_config_vhci(char *drvname, int flags) 7148 { 7149 struct devnames *dnp; 7150 dev_info_t *dip; 7151 major_t major = ddi_name_to_major(drvname); 7152 7153 if (major == -1) 7154 return (NULL); 7155 7156 /* Make sure we create the VHCI node only once */ 7157 dnp = &devnamesp[major]; 7158 LOCK_DEV_OPS(&dnp->dn_lock); 7159 if (dnp->dn_head) { 7160 dip = dnp->dn_head; 7161 UNLOCK_DEV_OPS(&dnp->dn_lock); 7162 return (dip); 7163 } 7164 UNLOCK_DEV_OPS(&dnp->dn_lock); 7165 7166 /* Allocate the VHCI node */ 7167 ndi_devi_alloc_sleep(top_devinfo, drvname, DEVI_SID_NODEID, &dip); 7168 ndi_hold_devi(dip); 7169 7170 /* Mark the node as VHCI */ 7171 DEVI(dip)->devi_node_attributes |= DDI_VHCI_NODE; 7172 7173 i_ddi_add_devimap(dip); 7174 i_bind_vhci_node(dip); 7175 if (i_init_vhci_node(dip) == -1) { 7176 ndi_rele_devi(dip); 7177 (void) ndi_devi_free(dip); 7178 return (NULL); 7179 } 7180 7181 mutex_enter(&(DEVI(dip)->devi_lock)); 7182 DEVI_SET_ATTACHING(dip); 7183 mutex_exit(&(DEVI(dip)->devi_lock)); 7184 7185 if (devi_attach(dip, DDI_ATTACH) != DDI_SUCCESS) { 7186 cmn_err(CE_CONT, "Could not attach %s driver", drvname); 7187 e_ddi_free_instance(dip, vhci_node_addr); 7188 ndi_rele_devi(dip); 7189 (void) ndi_devi_free(dip); 7190 return (NULL); 7191 } 7192 mutex_enter(&(DEVI(dip)->devi_lock)); 7193 DEVI_CLR_ATTACHING(dip); 7194 mutex_exit(&(DEVI(dip)->devi_lock)); 7195 7196 mutex_enter(&global_vhci_lock); 7197 i_link_vhci_node(dip); 7198 mutex_exit(&global_vhci_lock); 7199 i_ddi_set_node_state(dip, DS_READY); 7200 7201 LOCK_DEV_OPS(&dnp->dn_lock); 7202 dnp->dn_flags |= DN_DRIVER_HELD; 7203 dnp->dn_head = dip; 7204 UNLOCK_DEV_OPS(&dnp->dn_lock); 7205 7206 i_ndi_devi_report_status_change(dip, NULL); 7207 7208 return (dip); 7209 } 7210 7211 /* 7212 * ibt_hw_is_present() returns 0 when there is no IB hardware actively 7213 * running. This is primarily useful for modules like rpcmod which 7214 * needs a quick check to decide whether or not it should try to use 7215 * InfiniBand 7216 */ 7217 int ib_hw_status = 0; 7218 int 7219 ibt_hw_is_present() 7220 { 7221 return (ib_hw_status); 7222 } 7223