183c4dfe9Sjg /* 283c4dfe9Sjg * CDDL HEADER START 383c4dfe9Sjg * 483c4dfe9Sjg * The contents of this file are subject to the terms of the 583c4dfe9Sjg * Common Development and Distribution License (the "License"). 683c4dfe9Sjg * You may not use this file except in compliance with the License. 783c4dfe9Sjg * 883c4dfe9Sjg * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 983c4dfe9Sjg * or http://www.opensolaris.org/os/licensing. 1083c4dfe9Sjg * See the License for the specific language governing permissions 1183c4dfe9Sjg * and limitations under the License. 1283c4dfe9Sjg * 1383c4dfe9Sjg * When distributing Covered Code, include this CDDL HEADER in each 1483c4dfe9Sjg * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1583c4dfe9Sjg * If applicable, add the following below this CDDL HEADER, with the 1683c4dfe9Sjg * fields enclosed by brackets "[]" replaced with your own identifying 1783c4dfe9Sjg * information: Portions Copyright [yyyy] [name of copyright owner] 1883c4dfe9Sjg * 1983c4dfe9Sjg * CDDL HEADER END 2083c4dfe9Sjg */ 2183c4dfe9Sjg /* 224f1e984dSReed * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 2383c4dfe9Sjg */ 2483c4dfe9Sjg 2583c4dfe9Sjg #include <sys/note.h> 2683c4dfe9Sjg #include <sys/t_lock.h> 2783c4dfe9Sjg #include <sys/cmn_err.h> 2883c4dfe9Sjg #include <sys/instance.h> 2983c4dfe9Sjg #include <sys/conf.h> 3083c4dfe9Sjg #include <sys/stat.h> 3183c4dfe9Sjg #include <sys/ddi.h> 3283c4dfe9Sjg #include <sys/hwconf.h> 3383c4dfe9Sjg #include <sys/sunddi.h> 3483c4dfe9Sjg #include <sys/sunndi.h> 35392e836bSGavin Maltby #include <sys/sunmdi.h> 3683c4dfe9Sjg #include <sys/ddi_impldefs.h> 3783c4dfe9Sjg #include <sys/ndi_impldefs.h> 3883c4dfe9Sjg #include <sys/kobj.h> 3983c4dfe9Sjg #include <sys/devcache.h> 4083c4dfe9Sjg #include <sys/devid_cache.h> 4183c4dfe9Sjg #include <sys/sysmacros.h> 4283c4dfe9Sjg 4383c4dfe9Sjg /* 4483c4dfe9Sjg * Discovery refers to the heroic effort made to discover a device which 4583c4dfe9Sjg * cannot be accessed at the physical path where it once resided. Discovery 4683c4dfe9Sjg * involves walking the entire device tree attaching all possible disk 4783c4dfe9Sjg * instances, to search for the device referenced by a devid. Obviously, 4883c4dfe9Sjg * full device discovery is something to be avoided where possible. 4983c4dfe9Sjg * Note that simply invoking devfsadm(1M) is equivalent to running full 5083c4dfe9Sjg * discovery at the devid cache level. 5183c4dfe9Sjg * 5283c4dfe9Sjg * Reasons why a disk may not be accessible: 5383c4dfe9Sjg * disk powered off 5483c4dfe9Sjg * disk removed or cable disconnected 5583c4dfe9Sjg * disk or adapter broken 5683c4dfe9Sjg * 5783c4dfe9Sjg * Note that discovery is not needed and cannot succeed in any of these 5883c4dfe9Sjg * cases. 5983c4dfe9Sjg * 6083c4dfe9Sjg * When discovery may succeed: 6183c4dfe9Sjg * Discovery will result in success when a device has been moved 6283c4dfe9Sjg * to a different address. Note that it's recommended that 6383c4dfe9Sjg * devfsadm(1M) be invoked (no arguments required) whenever a system's 6483c4dfe9Sjg * h/w configuration has been updated. Alternatively, a 6583c4dfe9Sjg * reconfiguration boot can be used to accomplish the same result. 6683c4dfe9Sjg * 6783c4dfe9Sjg * Note that discovery is not necessary to be able to correct an access 6883c4dfe9Sjg * failure for a device which was powered off. Assuming the cache has an 6983c4dfe9Sjg * entry for such a device, simply powering it on should permit the system 7083c4dfe9Sjg * to access it. If problems persist after powering it on, invoke 7183c4dfe9Sjg * devfsadm(1M). 7283c4dfe9Sjg * 7383c4dfe9Sjg * Discovery prior to mounting root is only of interest when booting 7483c4dfe9Sjg * from a filesystem which accesses devices by device id, which of 7583c4dfe9Sjg * not all do. 7683c4dfe9Sjg * 7783c4dfe9Sjg * Tunables 7883c4dfe9Sjg * 7983c4dfe9Sjg * devid_discovery_boot (default 1) 8083c4dfe9Sjg * Number of times discovery will be attempted prior to mounting root. 8183c4dfe9Sjg * Must be done at least once to recover from corrupted or missing 8283c4dfe9Sjg * devid cache backing store. Probably there's no reason to ever 8383c4dfe9Sjg * set this to greater than one as a missing device will remain 8483c4dfe9Sjg * unavailable no matter how often the system searches for it. 8583c4dfe9Sjg * 8683c4dfe9Sjg * devid_discovery_postboot (default 1) 8783c4dfe9Sjg * Number of times discovery will be attempted after mounting root. 8883c4dfe9Sjg * This must be performed at least once to discover any devices 8983c4dfe9Sjg * needed after root is mounted which may have been powered 9083c4dfe9Sjg * off and moved before booting. 9183c4dfe9Sjg * Setting this to a larger positive number will introduce 9283c4dfe9Sjg * some inconsistency in system operation. Searching for a device 9383c4dfe9Sjg * will take an indeterminate amount of time, sometimes slower, 9483c4dfe9Sjg * sometimes faster. In addition, the system will sometimes 9583c4dfe9Sjg * discover a newly powered on device, sometimes it won't. 9683c4dfe9Sjg * Use of this option is not therefore recommended. 9783c4dfe9Sjg * 9883c4dfe9Sjg * devid_discovery_postboot_always (default 0) 9983c4dfe9Sjg * Set to 1, the system will always attempt full discovery. 10083c4dfe9Sjg * 10183c4dfe9Sjg * devid_discovery_secs (default 0) 10283c4dfe9Sjg * Set to a positive value, the system will attempt full discovery 10383c4dfe9Sjg * but with a minimum delay between attempts. A device search 10483c4dfe9Sjg * within the period of time specified will result in failure. 10583c4dfe9Sjg * 10683c4dfe9Sjg * devid_cache_read_disable (default 0) 10783c4dfe9Sjg * Set to 1 to disable reading /etc/devices/devid_cache. 10883c4dfe9Sjg * Devid cache will continue to operate normally but 10983c4dfe9Sjg * at least one discovery attempt will be required. 11083c4dfe9Sjg * 11183c4dfe9Sjg * devid_cache_write_disable (default 0) 11283c4dfe9Sjg * Set to 1 to disable updates to /etc/devices/devid_cache. 11383c4dfe9Sjg * Any updates to the devid cache will not be preserved across a reboot. 11483c4dfe9Sjg * 11583c4dfe9Sjg * devid_report_error (default 0) 11683c4dfe9Sjg * Set to 1 to enable some error messages related to devid 11783c4dfe9Sjg * cache failures. 11883c4dfe9Sjg * 11983c4dfe9Sjg * The devid is packed in the cache file as a byte array. For 12083c4dfe9Sjg * portability, this could be done in the encoded string format. 12183c4dfe9Sjg */ 12283c4dfe9Sjg 12383c4dfe9Sjg 12483c4dfe9Sjg int devid_discovery_boot = 1; 12583c4dfe9Sjg int devid_discovery_postboot = 1; 12683c4dfe9Sjg int devid_discovery_postboot_always = 0; 12783c4dfe9Sjg int devid_discovery_secs = 0; 12883c4dfe9Sjg 12983c4dfe9Sjg int devid_cache_read_disable = 0; 13083c4dfe9Sjg int devid_cache_write_disable = 0; 13183c4dfe9Sjg 13283c4dfe9Sjg int devid_report_error = 0; 13383c4dfe9Sjg 13483c4dfe9Sjg 13583c4dfe9Sjg /* 13683c4dfe9Sjg * State to manage discovery of devices providing a devid 13783c4dfe9Sjg */ 13883c4dfe9Sjg static int devid_discovery_busy = 0; 13983c4dfe9Sjg static kmutex_t devid_discovery_mutex; 14083c4dfe9Sjg static kcondvar_t devid_discovery_cv; 14183c4dfe9Sjg static clock_t devid_last_discovery = 0; 14283c4dfe9Sjg 14383c4dfe9Sjg 14483c4dfe9Sjg #ifdef DEBUG 14583c4dfe9Sjg int nvp_devid_debug = 0; 14683c4dfe9Sjg int devid_debug = 0; 14783c4dfe9Sjg int devid_log_registers = 0; 14883c4dfe9Sjg int devid_log_finds = 0; 14983c4dfe9Sjg int devid_log_lookups = 0; 15083c4dfe9Sjg int devid_log_discovery = 0; 15183c4dfe9Sjg int devid_log_matches = 0; 15283c4dfe9Sjg int devid_log_paths = 0; 15383c4dfe9Sjg int devid_log_failures = 0; 15483c4dfe9Sjg int devid_log_hold = 0; 15583c4dfe9Sjg int devid_log_unregisters = 0; 15683c4dfe9Sjg int devid_log_removes = 0; 15783c4dfe9Sjg int devid_register_debug = 0; 15883c4dfe9Sjg int devid_log_stale = 0; 15983c4dfe9Sjg int devid_log_detaches = 0; 16083c4dfe9Sjg #endif /* DEBUG */ 16183c4dfe9Sjg 16283c4dfe9Sjg /* 16383c4dfe9Sjg * devid cache file registration for cache reads and updates 16483c4dfe9Sjg */ 16583c4dfe9Sjg static nvf_ops_t devid_cache_ops = { 16683c4dfe9Sjg "/etc/devices/devid_cache", /* path to cache */ 16783c4dfe9Sjg devid_cache_unpack_nvlist, /* read: nvlist to nvp */ 16883c4dfe9Sjg devid_cache_pack_list, /* write: nvp to nvlist */ 16983c4dfe9Sjg devid_list_free, /* free data list */ 17083c4dfe9Sjg NULL /* write complete callback */ 17183c4dfe9Sjg }; 17283c4dfe9Sjg 17383c4dfe9Sjg /* 17483c4dfe9Sjg * handle to registered devid cache handlers 17583c4dfe9Sjg */ 17683c4dfe9Sjg nvf_handle_t dcfd_handle; 17783c4dfe9Sjg 17883c4dfe9Sjg 17983c4dfe9Sjg /* 18083c4dfe9Sjg * Initialize devid cache file management 18183c4dfe9Sjg */ 18283c4dfe9Sjg void 18383c4dfe9Sjg devid_cache_init(void) 18483c4dfe9Sjg { 18583c4dfe9Sjg dcfd_handle = nvf_register_file(&devid_cache_ops); 18683c4dfe9Sjg ASSERT(dcfd_handle); 18783c4dfe9Sjg 18883c4dfe9Sjg list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t), 18983c4dfe9Sjg offsetof(nvp_devid_t, nvp_link)); 19083c4dfe9Sjg 19183c4dfe9Sjg mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL); 19283c4dfe9Sjg cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL); 19383c4dfe9Sjg } 19483c4dfe9Sjg 19583c4dfe9Sjg /* 19683c4dfe9Sjg * Read and initialize the devid cache from the persistent store 19783c4dfe9Sjg */ 19883c4dfe9Sjg void 19983c4dfe9Sjg devid_cache_read(void) 20083c4dfe9Sjg { 20183c4dfe9Sjg if (!devid_cache_read_disable) { 20283c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 20383c4dfe9Sjg ASSERT(list_head(nvf_list(dcfd_handle)) == NULL); 20483c4dfe9Sjg (void) nvf_read_file(dcfd_handle); 20583c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 20683c4dfe9Sjg } 20783c4dfe9Sjg } 20883c4dfe9Sjg 20983c4dfe9Sjg static void 21083c4dfe9Sjg devid_nvp_free(nvp_devid_t *dp) 21183c4dfe9Sjg { 21283c4dfe9Sjg if (dp->nvp_devpath) 21383c4dfe9Sjg kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1); 21483c4dfe9Sjg if (dp->nvp_devid) 21583c4dfe9Sjg kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid)); 21683c4dfe9Sjg 21783c4dfe9Sjg kmem_free(dp, sizeof (nvp_devid_t)); 21883c4dfe9Sjg } 21983c4dfe9Sjg 22083c4dfe9Sjg static void 22183c4dfe9Sjg devid_list_free(nvf_handle_t fd) 22283c4dfe9Sjg { 22383c4dfe9Sjg list_t *listp; 22483c4dfe9Sjg nvp_devid_t *np; 22583c4dfe9Sjg 22683c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 22783c4dfe9Sjg 22883c4dfe9Sjg listp = nvf_list(fd); 22983c4dfe9Sjg while (np = list_head(listp)) { 23083c4dfe9Sjg list_remove(listp, np); 23183c4dfe9Sjg devid_nvp_free(np); 23283c4dfe9Sjg } 23383c4dfe9Sjg } 23483c4dfe9Sjg 23583c4dfe9Sjg /* 23683c4dfe9Sjg * Free an nvp element in a list 23783c4dfe9Sjg */ 23883c4dfe9Sjg static void 23983c4dfe9Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np) 24083c4dfe9Sjg { 24183c4dfe9Sjg list_remove(nvf_list(fd), np); 24283c4dfe9Sjg devid_nvp_free(np); 24383c4dfe9Sjg } 24483c4dfe9Sjg 24583c4dfe9Sjg /* 24683c4dfe9Sjg * Unpack a device path/nvlist pair to the list of devid cache elements. 24783c4dfe9Sjg * Used to parse the nvlist format when reading 24883c4dfe9Sjg * /etc/devices/devid_cache 24983c4dfe9Sjg */ 25083c4dfe9Sjg static int 25183c4dfe9Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name) 25283c4dfe9Sjg { 25383c4dfe9Sjg nvp_devid_t *np; 25483c4dfe9Sjg ddi_devid_t devidp; 25583c4dfe9Sjg int rval; 25683c4dfe9Sjg uint_t n; 25783c4dfe9Sjg 25883c4dfe9Sjg NVP_DEVID_DEBUG_PATH((name)); 25983c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 26083c4dfe9Sjg 26183c4dfe9Sjg /* 26283c4dfe9Sjg * check path for a devid 26383c4dfe9Sjg */ 26483c4dfe9Sjg rval = nvlist_lookup_byte_array(nvl, 26583c4dfe9Sjg DP_DEVID_ID, (uchar_t **)&devidp, &n); 26683c4dfe9Sjg if (rval == 0) { 26783c4dfe9Sjg if (ddi_devid_valid(devidp) == DDI_SUCCESS) { 26883c4dfe9Sjg ASSERT(n == ddi_devid_sizeof(devidp)); 26983c4dfe9Sjg np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 27083c4dfe9Sjg np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP); 27183c4dfe9Sjg np->nvp_devid = kmem_alloc(n, KM_SLEEP); 27283c4dfe9Sjg (void) bcopy(devidp, np->nvp_devid, n); 27383c4dfe9Sjg list_insert_tail(nvf_list(fd), np); 27483c4dfe9Sjg NVP_DEVID_DEBUG_DEVID((np->nvp_devid)); 27583c4dfe9Sjg } else { 27683c4dfe9Sjg DEVIDERR((CE_CONT, 27783c4dfe9Sjg "%s: invalid devid\n", name)); 27883c4dfe9Sjg } 27983c4dfe9Sjg } else { 28083c4dfe9Sjg DEVIDERR((CE_CONT, 28183c4dfe9Sjg "%s: devid not available\n", name)); 28283c4dfe9Sjg } 28383c4dfe9Sjg 28483c4dfe9Sjg return (0); 28583c4dfe9Sjg } 28683c4dfe9Sjg 28783c4dfe9Sjg /* 28883c4dfe9Sjg * Pack the list of devid cache elements into a single nvlist 28983c4dfe9Sjg * Used when writing the nvlist file. 29083c4dfe9Sjg */ 29183c4dfe9Sjg static int 29283c4dfe9Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl) 29383c4dfe9Sjg { 29483c4dfe9Sjg nvlist_t *nvl, *sub_nvl; 29583c4dfe9Sjg nvp_devid_t *np; 29683c4dfe9Sjg int rval; 29783c4dfe9Sjg list_t *listp; 29883c4dfe9Sjg 29983c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 30083c4dfe9Sjg 30183c4dfe9Sjg rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); 30283c4dfe9Sjg if (rval != 0) { 30383c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n", 30483c4dfe9Sjg nvf_cache_name(fd), rval); 30583c4dfe9Sjg return (DDI_FAILURE); 30683c4dfe9Sjg } 30783c4dfe9Sjg 30883c4dfe9Sjg listp = nvf_list(fd); 30983c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 31083c4dfe9Sjg if (np->nvp_devid == NULL) 31183c4dfe9Sjg continue; 31283c4dfe9Sjg NVP_DEVID_DEBUG_PATH(np->nvp_devpath); 31383c4dfe9Sjg rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); 31483c4dfe9Sjg if (rval != 0) { 31583c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n", 31683c4dfe9Sjg nvf_cache_name(fd), rval); 31783c4dfe9Sjg sub_nvl = NULL; 31883c4dfe9Sjg goto err; 31983c4dfe9Sjg } 32083c4dfe9Sjg 32183c4dfe9Sjg rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID, 32283c4dfe9Sjg (uchar_t *)np->nvp_devid, 32383c4dfe9Sjg ddi_devid_sizeof(np->nvp_devid)); 32483c4dfe9Sjg if (rval == 0) { 32583c4dfe9Sjg NVP_DEVID_DEBUG_DEVID(np->nvp_devid); 32683c4dfe9Sjg } else { 32783c4dfe9Sjg nvf_error( 32883c4dfe9Sjg "%s: nvlist add error %d (devid)\n", 32983c4dfe9Sjg nvf_cache_name(fd), rval); 33083c4dfe9Sjg goto err; 33183c4dfe9Sjg } 33283c4dfe9Sjg 33383c4dfe9Sjg rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl); 33483c4dfe9Sjg if (rval != 0) { 33583c4dfe9Sjg nvf_error("%s: nvlist add error %d (sublist)\n", 33683c4dfe9Sjg nvf_cache_name(fd), rval); 33783c4dfe9Sjg goto err; 33883c4dfe9Sjg } 33983c4dfe9Sjg nvlist_free(sub_nvl); 34083c4dfe9Sjg } 34183c4dfe9Sjg 34283c4dfe9Sjg *ret_nvl = nvl; 34383c4dfe9Sjg return (DDI_SUCCESS); 34483c4dfe9Sjg 34583c4dfe9Sjg err: 34683c4dfe9Sjg nvlist_free(sub_nvl); 34783c4dfe9Sjg nvlist_free(nvl); 34883c4dfe9Sjg *ret_nvl = NULL; 34983c4dfe9Sjg return (DDI_FAILURE); 35083c4dfe9Sjg } 35183c4dfe9Sjg 35283c4dfe9Sjg static int 35383c4dfe9Sjg e_devid_do_discovery(void) 35483c4dfe9Sjg { 35583c4dfe9Sjg ASSERT(mutex_owned(&devid_discovery_mutex)); 35683c4dfe9Sjg 35783c4dfe9Sjg if (i_ddi_io_initialized() == 0) { 35883c4dfe9Sjg if (devid_discovery_boot > 0) { 35983c4dfe9Sjg devid_discovery_boot--; 36083c4dfe9Sjg return (1); 36183c4dfe9Sjg } 36283c4dfe9Sjg } else { 36383c4dfe9Sjg if (devid_discovery_postboot_always > 0) 36483c4dfe9Sjg return (1); 36583c4dfe9Sjg if (devid_discovery_postboot > 0) { 36683c4dfe9Sjg devid_discovery_postboot--; 36783c4dfe9Sjg return (1); 36883c4dfe9Sjg } 36983c4dfe9Sjg if (devid_discovery_secs > 0) { 37083c4dfe9Sjg if ((ddi_get_lbolt() - devid_last_discovery) > 37183c4dfe9Sjg drv_usectohz(devid_discovery_secs * MICROSEC)) { 37283c4dfe9Sjg return (1); 37383c4dfe9Sjg } 37483c4dfe9Sjg } 37583c4dfe9Sjg } 37683c4dfe9Sjg 37783c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n")); 37883c4dfe9Sjg return (0); 37983c4dfe9Sjg } 38083c4dfe9Sjg 38183c4dfe9Sjg static void 38283c4dfe9Sjg e_ddi_devid_hold_by_major(major_t major) 38383c4dfe9Sjg { 38483c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, 38583c4dfe9Sjg "devid_discovery: ddi_hold_installed_driver %d\n", major)); 38683c4dfe9Sjg 38783c4dfe9Sjg if (ddi_hold_installed_driver(major) == NULL) 38883c4dfe9Sjg return; 38983c4dfe9Sjg 39083c4dfe9Sjg ddi_rele_driver(major); 39183c4dfe9Sjg } 39283c4dfe9Sjg 393*6f25ad7fSJerry Gilliam /* legacy support - see below */ 394*6f25ad7fSJerry Gilliam static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd" }; 39583c4dfe9Sjg 39683c4dfe9Sjg #define N_DRIVERS_TO_HOLD \ 39783c4dfe9Sjg (sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *)) 39883c4dfe9Sjg 39983c4dfe9Sjg static void 40083c4dfe9Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid) 40183c4dfe9Sjg { 40283c4dfe9Sjg impl_devid_t *id = (impl_devid_t *)devid; 40383c4dfe9Sjg major_t major, hint_major; 40483c4dfe9Sjg char hint[DEVID_HINT_SIZE + 1]; 405*6f25ad7fSJerry Gilliam struct devnames *dnp; 40683c4dfe9Sjg char **drvp; 40783c4dfe9Sjg int i; 40883c4dfe9Sjg 40983c4dfe9Sjg /* Count non-null bytes */ 41083c4dfe9Sjg for (i = 0; i < DEVID_HINT_SIZE; i++) 41183c4dfe9Sjg if (id->did_driver[i] == '\0') 41283c4dfe9Sjg break; 41383c4dfe9Sjg 41483c4dfe9Sjg /* Make a copy of the driver hint */ 41583c4dfe9Sjg bcopy(id->did_driver, hint, i); 41683c4dfe9Sjg hint[i] = '\0'; 41783c4dfe9Sjg 41883c4dfe9Sjg /* search for the devid using the hint driver */ 41983c4dfe9Sjg hint_major = ddi_name_to_major(hint); 420a204de77Scth if (hint_major != DDI_MAJOR_T_NONE) { 42183c4dfe9Sjg e_ddi_devid_hold_by_major(hint_major); 42283c4dfe9Sjg } 42383c4dfe9Sjg 424*6f25ad7fSJerry Gilliam /* 425*6f25ad7fSJerry Gilliam * search for the devid with each driver declaring 426*6f25ad7fSJerry Gilliam * itself as a devid registrant. 427*6f25ad7fSJerry Gilliam */ 428*6f25ad7fSJerry Gilliam for (major = 0; major < devcnt; major++) { 429*6f25ad7fSJerry Gilliam if (major == hint_major) 430*6f25ad7fSJerry Gilliam continue; 431*6f25ad7fSJerry Gilliam dnp = &devnamesp[major]; 432*6f25ad7fSJerry Gilliam if (dnp->dn_flags & DN_DEVID_REGISTRANT) { 433*6f25ad7fSJerry Gilliam e_ddi_devid_hold_by_major(major); 434*6f25ad7fSJerry Gilliam } 435*6f25ad7fSJerry Gilliam } 436*6f25ad7fSJerry Gilliam 437*6f25ad7fSJerry Gilliam /* 438*6f25ad7fSJerry Gilliam * Legacy support: may be removed once an upgrade mechanism 439*6f25ad7fSJerry Gilliam * for driver conf files is available. 440*6f25ad7fSJerry Gilliam */ 44183c4dfe9Sjg drvp = e_ddi_devid_hold_driver_list; 44283c4dfe9Sjg for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) { 44383c4dfe9Sjg major = ddi_name_to_major(*drvp); 444a204de77Scth if (major != DDI_MAJOR_T_NONE && major != hint_major) { 44583c4dfe9Sjg e_ddi_devid_hold_by_major(major); 44683c4dfe9Sjg } 44783c4dfe9Sjg } 44883c4dfe9Sjg } 44983c4dfe9Sjg 45083c4dfe9Sjg /* 45183c4dfe9Sjg * Return success if discovery was attempted, to indicate 45283c4dfe9Sjg * that the desired device may now be available. 45383c4dfe9Sjg */ 45483c4dfe9Sjg int 45583c4dfe9Sjg e_ddi_devid_discovery(ddi_devid_t devid) 45683c4dfe9Sjg { 45783c4dfe9Sjg int flags; 45883c4dfe9Sjg int rval = DDI_SUCCESS; 45983c4dfe9Sjg 46083c4dfe9Sjg mutex_enter(&devid_discovery_mutex); 46183c4dfe9Sjg 46283c4dfe9Sjg if (devid_discovery_busy) { 46383c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n")); 46483c4dfe9Sjg while (devid_discovery_busy) { 46583c4dfe9Sjg cv_wait(&devid_discovery_cv, &devid_discovery_mutex); 46683c4dfe9Sjg } 46783c4dfe9Sjg } else if (e_devid_do_discovery()) { 46883c4dfe9Sjg devid_discovery_busy = 1; 46983c4dfe9Sjg mutex_exit(&devid_discovery_mutex); 47083c4dfe9Sjg 47183c4dfe9Sjg if (i_ddi_io_initialized() == 0) { 47283c4dfe9Sjg e_ddi_devid_hold_installed_driver(devid); 47383c4dfe9Sjg } else { 47483c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, 47583c4dfe9Sjg "devid_discovery: ndi_devi_config\n")); 47683c4dfe9Sjg flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT; 47783c4dfe9Sjg if (i_ddi_io_initialized()) 47883c4dfe9Sjg flags |= NDI_DRV_CONF_REPROBE; 47983c4dfe9Sjg (void) ndi_devi_config(ddi_root_node(), flags); 48083c4dfe9Sjg } 48183c4dfe9Sjg 48283c4dfe9Sjg mutex_enter(&devid_discovery_mutex); 48383c4dfe9Sjg devid_discovery_busy = 0; 48483c4dfe9Sjg cv_broadcast(&devid_discovery_cv); 48583c4dfe9Sjg if (devid_discovery_secs > 0) 48683c4dfe9Sjg devid_last_discovery = ddi_get_lbolt(); 48783c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n")); 48883c4dfe9Sjg } else { 48983c4dfe9Sjg rval = DDI_FAILURE; 49083c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "no devid discovery\n")); 49183c4dfe9Sjg } 49283c4dfe9Sjg 49383c4dfe9Sjg mutex_exit(&devid_discovery_mutex); 49483c4dfe9Sjg 49583c4dfe9Sjg return (rval); 49683c4dfe9Sjg } 49783c4dfe9Sjg 49883c4dfe9Sjg /* 49983c4dfe9Sjg * As part of registering a devid for a device, 50083c4dfe9Sjg * update the devid cache with this device/devid pair 50183c4dfe9Sjg * or note that this combination has registered. 502392e836bSGavin Maltby * 503392e836bSGavin Maltby * If a devpath is provided it will be used as the path to register the 504392e836bSGavin Maltby * devid against, otherwise we use ddi_pathname(dip). In both cases 505392e836bSGavin Maltby * we duplicate the path string so that it can be cached/freed indepdently 506392e836bSGavin Maltby * of the original owner. 50783c4dfe9Sjg */ 508392e836bSGavin Maltby static int 509392e836bSGavin Maltby e_devid_cache_register_cmn(dev_info_t *dip, ddi_devid_t devid, char *devpath) 51083c4dfe9Sjg { 51183c4dfe9Sjg nvp_devid_t *np; 51283c4dfe9Sjg nvp_devid_t *new_nvp; 51383c4dfe9Sjg ddi_devid_t new_devid; 51483c4dfe9Sjg int new_devid_size; 51583c4dfe9Sjg char *path, *fullpath; 51683c4dfe9Sjg ddi_devid_t free_devid = NULL; 51783c4dfe9Sjg int pathlen; 51883c4dfe9Sjg list_t *listp; 51983c4dfe9Sjg int is_dirty = 0; 52083c4dfe9Sjg 521392e836bSGavin Maltby 522392e836bSGavin Maltby ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 523392e836bSGavin Maltby 524392e836bSGavin Maltby if (devpath) { 525392e836bSGavin Maltby pathlen = strlen(devpath) + 1; 526392e836bSGavin Maltby path = kmem_alloc(pathlen, KM_SLEEP); 527392e836bSGavin Maltby bcopy(devpath, path, pathlen); 528392e836bSGavin Maltby } else { 5294f1e984dSReed /* 5304f1e984dSReed * We are willing to accept DS_BOUND nodes if we can form a full 5314f1e984dSReed * ddi_pathname (i.e. the node is part way to becomming 5324f1e984dSReed * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL). 5334f1e984dSReed */ 534392e836bSGavin Maltby if (ddi_get_name_addr(dip) == NULL) 5354f1e984dSReed return (DDI_FAILURE); 53683c4dfe9Sjg 53783c4dfe9Sjg fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 53883c4dfe9Sjg (void) ddi_pathname(dip, fullpath); 53983c4dfe9Sjg pathlen = strlen(fullpath) + 1; 54083c4dfe9Sjg path = kmem_alloc(pathlen, KM_SLEEP); 54183c4dfe9Sjg bcopy(fullpath, path, pathlen); 54283c4dfe9Sjg kmem_free(fullpath, MAXPATHLEN); 543392e836bSGavin Maltby } 54483c4dfe9Sjg 54583c4dfe9Sjg DEVID_LOG_REG(("register", devid, path)); 54683c4dfe9Sjg 54783c4dfe9Sjg new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 54883c4dfe9Sjg new_devid_size = ddi_devid_sizeof(devid); 54983c4dfe9Sjg new_devid = kmem_alloc(new_devid_size, KM_SLEEP); 55083c4dfe9Sjg (void) bcopy(devid, new_devid, new_devid_size); 55183c4dfe9Sjg 55283c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 55383c4dfe9Sjg 55483c4dfe9Sjg listp = nvf_list(dcfd_handle); 55583c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 55683c4dfe9Sjg if (strcmp(path, np->nvp_devpath) == 0) { 55783c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 55883c4dfe9Sjg "register: %s path match\n", path)); 55983c4dfe9Sjg if (np->nvp_devid == NULL) { 560a204de77Scth replace: np->nvp_devid = new_devid; 56183c4dfe9Sjg np->nvp_flags |= 56283c4dfe9Sjg NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 56383c4dfe9Sjg np->nvp_dip = dip; 56483c4dfe9Sjg if (!devid_cache_write_disable) { 56583c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 56683c4dfe9Sjg is_dirty = 1; 56783c4dfe9Sjg } 56883c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 56983c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 57083c4dfe9Sjg kmem_free(path, pathlen); 57183c4dfe9Sjg goto exit; 57283c4dfe9Sjg } 57383c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 57483c4dfe9Sjg /* replace invalid devid */ 57583c4dfe9Sjg free_devid = np->nvp_devid; 57683c4dfe9Sjg goto replace; 57783c4dfe9Sjg } 57883c4dfe9Sjg /* 57983c4dfe9Sjg * We're registering an already-cached path 58083c4dfe9Sjg * Does the device's devid match the cache? 58183c4dfe9Sjg */ 58283c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) != 0) { 58383c4dfe9Sjg DEVID_DEBUG((CE_CONT, "devid register: " 58483c4dfe9Sjg "devid %s does not match\n", path)); 58583c4dfe9Sjg /* 58683c4dfe9Sjg * Replace cached devid for this path 58783c4dfe9Sjg * with newly registered devid. A devid 58883c4dfe9Sjg * may map to multiple paths but one path 58983c4dfe9Sjg * should only map to one devid. 59083c4dfe9Sjg */ 59183c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 59283c4dfe9Sjg np = NULL; 59383c4dfe9Sjg break; 59483c4dfe9Sjg } else { 59583c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 59683c4dfe9Sjg "devid register: %s devid match\n", path)); 59783c4dfe9Sjg np->nvp_flags |= 59883c4dfe9Sjg NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 59983c4dfe9Sjg np->nvp_dip = dip; 60083c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 60183c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 60283c4dfe9Sjg kmem_free(path, pathlen); 60383c4dfe9Sjg kmem_free(new_devid, new_devid_size); 60483c4dfe9Sjg return (DDI_SUCCESS); 60583c4dfe9Sjg } 60683c4dfe9Sjg } 60783c4dfe9Sjg } 60883c4dfe9Sjg 60983c4dfe9Sjg /* 61083c4dfe9Sjg * Add newly registered devid to the cache 61183c4dfe9Sjg */ 61283c4dfe9Sjg ASSERT(np == NULL); 61383c4dfe9Sjg 61483c4dfe9Sjg new_nvp->nvp_devpath = path; 61583c4dfe9Sjg new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 61683c4dfe9Sjg new_nvp->nvp_dip = dip; 61783c4dfe9Sjg new_nvp->nvp_devid = new_devid; 61883c4dfe9Sjg 61983c4dfe9Sjg if (!devid_cache_write_disable) { 62083c4dfe9Sjg is_dirty = 1; 62183c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 62283c4dfe9Sjg } 62383c4dfe9Sjg list_insert_tail(nvf_list(dcfd_handle), new_nvp); 62483c4dfe9Sjg 62583c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 62683c4dfe9Sjg 62783c4dfe9Sjg exit: 62883c4dfe9Sjg if (free_devid) 62983c4dfe9Sjg kmem_free(free_devid, ddi_devid_sizeof(free_devid)); 63083c4dfe9Sjg 63183c4dfe9Sjg if (is_dirty) 63283c4dfe9Sjg nvf_wake_daemon(); 63383c4dfe9Sjg 63483c4dfe9Sjg return (DDI_SUCCESS); 63583c4dfe9Sjg } 63683c4dfe9Sjg 637392e836bSGavin Maltby int 638392e836bSGavin Maltby e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid) 639392e836bSGavin Maltby { 640392e836bSGavin Maltby return (e_devid_cache_register_cmn(dip, devid, NULL)); 641392e836bSGavin Maltby } 642392e836bSGavin Maltby 64383c4dfe9Sjg /* 64488acca04SGavin Maltby * Unregister a device's devid; the devinfo may hit on multiple entries 64588acca04SGavin Maltby * arising from both pHCI and vHCI paths. 64688acca04SGavin Maltby * Called as an instance detachs. 64788acca04SGavin Maltby * Invalidate the devid's devinfo reference. 64888acca04SGavin Maltby * Devid-path remains in the cache. 64983c4dfe9Sjg */ 65088acca04SGavin Maltby 65183c4dfe9Sjg void 65283c4dfe9Sjg e_devid_cache_unregister(dev_info_t *dip) 65383c4dfe9Sjg { 65483c4dfe9Sjg nvp_devid_t *np; 65583c4dfe9Sjg list_t *listp; 65683c4dfe9Sjg 65783c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 65883c4dfe9Sjg 65983c4dfe9Sjg listp = nvf_list(dcfd_handle); 66083c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 66183c4dfe9Sjg if (np->nvp_devid == NULL) 66283c4dfe9Sjg continue; 66383c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) { 66483c4dfe9Sjg DEVID_LOG_UNREG((CE_CONT, 66583c4dfe9Sjg "unregister: %s\n", np->nvp_devpath)); 66683c4dfe9Sjg np->nvp_flags &= ~NVP_DEVID_DIP; 66783c4dfe9Sjg np->nvp_dip = NULL; 66883c4dfe9Sjg } 66983c4dfe9Sjg } 67083c4dfe9Sjg 67183c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 67283c4dfe9Sjg } 67383c4dfe9Sjg 674392e836bSGavin Maltby int 675392e836bSGavin Maltby e_devid_cache_pathinfo(mdi_pathinfo_t *pip, ddi_devid_t devid) 676392e836bSGavin Maltby { 677392e836bSGavin Maltby char *path = mdi_pi_pathname(pip); 678392e836bSGavin Maltby 679392e836bSGavin Maltby return (e_devid_cache_register_cmn(mdi_pi_get_client(pip), devid, 680392e836bSGavin Maltby path)); 681392e836bSGavin Maltby } 682392e836bSGavin Maltby 68383c4dfe9Sjg /* 68483c4dfe9Sjg * Purge devid cache of stale devids 68583c4dfe9Sjg */ 68683c4dfe9Sjg void 68783c4dfe9Sjg devid_cache_cleanup(void) 68883c4dfe9Sjg { 68983c4dfe9Sjg nvp_devid_t *np, *next; 69083c4dfe9Sjg list_t *listp; 69183c4dfe9Sjg int is_dirty = 0; 69283c4dfe9Sjg 69383c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 69483c4dfe9Sjg 69583c4dfe9Sjg listp = nvf_list(dcfd_handle); 69683c4dfe9Sjg for (np = list_head(listp); np; np = next) { 69783c4dfe9Sjg next = list_next(listp, np); 69883c4dfe9Sjg if (np->nvp_devid == NULL) 69983c4dfe9Sjg continue; 70083c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) { 70183c4dfe9Sjg DEVID_LOG_REMOVE((CE_CONT, 70283c4dfe9Sjg "cleanup: %s\n", np->nvp_devpath)); 70383c4dfe9Sjg if (!devid_cache_write_disable) { 70483c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 70583c4dfe9Sjg is_dirty = 0; 70683c4dfe9Sjg } 70783c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 70883c4dfe9Sjg } 70983c4dfe9Sjg } 71083c4dfe9Sjg 71183c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 71283c4dfe9Sjg 71383c4dfe9Sjg if (is_dirty) 71483c4dfe9Sjg nvf_wake_daemon(); 71583c4dfe9Sjg } 71683c4dfe9Sjg 71783c4dfe9Sjg 71883c4dfe9Sjg /* 71983c4dfe9Sjg * Build a list of dev_t's for a device/devid 72083c4dfe9Sjg * 72183c4dfe9Sjg * The effect of this function is cumulative, adding dev_t's 72283c4dfe9Sjg * for the device to the list of all dev_t's for a given 72383c4dfe9Sjg * devid. 72483c4dfe9Sjg */ 72583c4dfe9Sjg static void 72683c4dfe9Sjg e_devid_minor_to_devlist( 72783c4dfe9Sjg dev_info_t *dip, 72883c4dfe9Sjg char *minor_name, 72983c4dfe9Sjg int ndevts_alloced, 73083c4dfe9Sjg int *devtcntp, 73183c4dfe9Sjg dev_t *devtsp) 73283c4dfe9Sjg { 733b9ccdc5aScth int circ; 73483c4dfe9Sjg struct ddi_minor_data *dmdp; 73583c4dfe9Sjg int minor_all = 0; 73683c4dfe9Sjg int ndevts = *devtcntp; 73783c4dfe9Sjg 73883c4dfe9Sjg ASSERT(i_ddi_devi_attached(dip)); 73983c4dfe9Sjg 74083c4dfe9Sjg /* are we looking for a set of minor nodes? */ 74183c4dfe9Sjg if ((minor_name == DEVID_MINOR_NAME_ALL) || 74283c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_CHR) || 74383c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_BLK)) 74483c4dfe9Sjg minor_all = 1; 74583c4dfe9Sjg 74683c4dfe9Sjg /* Find matching minor names */ 747b9ccdc5aScth ndi_devi_enter(dip, &circ); 74883c4dfe9Sjg for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 74983c4dfe9Sjg 75083c4dfe9Sjg /* Skip non-minors, and non matching minor names */ 75183c4dfe9Sjg if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) && 75283c4dfe9Sjg strcmp(dmdp->ddm_name, minor_name))) 75383c4dfe9Sjg continue; 75483c4dfe9Sjg 75583c4dfe9Sjg /* filter out minor_all mismatches */ 75683c4dfe9Sjg if (minor_all && 75783c4dfe9Sjg (((minor_name == DEVID_MINOR_NAME_ALL_CHR) && 75883c4dfe9Sjg (dmdp->ddm_spec_type != S_IFCHR)) || 75983c4dfe9Sjg ((minor_name == DEVID_MINOR_NAME_ALL_BLK) && 76083c4dfe9Sjg (dmdp->ddm_spec_type != S_IFBLK)))) 76183c4dfe9Sjg continue; 76283c4dfe9Sjg 76383c4dfe9Sjg if (ndevts < ndevts_alloced) 76483c4dfe9Sjg devtsp[ndevts] = dmdp->ddm_dev; 76583c4dfe9Sjg ndevts++; 76683c4dfe9Sjg } 767b9ccdc5aScth ndi_devi_exit(dip, circ); 76883c4dfe9Sjg 76983c4dfe9Sjg *devtcntp = ndevts; 77083c4dfe9Sjg } 77183c4dfe9Sjg 77283c4dfe9Sjg /* 77383c4dfe9Sjg * Search for cached entries matching a devid 77483c4dfe9Sjg * Return two lists: 77583c4dfe9Sjg * a list of dev_info nodes, for those devices in the attached state 77683c4dfe9Sjg * a list of pathnames whose instances registered the given devid 77783c4dfe9Sjg * If the lists passed in are not sufficient to return the matching 77883c4dfe9Sjg * references, return the size of lists required. 77983c4dfe9Sjg * The dev_info nodes are returned with a hold that the caller must release. 78083c4dfe9Sjg */ 78183c4dfe9Sjg static int 78283c4dfe9Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax, 78383c4dfe9Sjg int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths) 78483c4dfe9Sjg { 78583c4dfe9Sjg nvp_devid_t *np; 78683c4dfe9Sjg int ndevis, npaths; 78783c4dfe9Sjg dev_info_t *dip, *pdip; 78883c4dfe9Sjg int circ; 78983c4dfe9Sjg int maxdevis = 0; 79083c4dfe9Sjg int maxpaths = 0; 79183c4dfe9Sjg list_t *listp; 79283c4dfe9Sjg 79383c4dfe9Sjg ndevis = 0; 79483c4dfe9Sjg npaths = 0; 79583c4dfe9Sjg listp = nvf_list(dcfd_handle); 79683c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 79783c4dfe9Sjg if (np->nvp_devid == NULL) 79883c4dfe9Sjg continue; 79983c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 80083c4dfe9Sjg DEVIDERR((CE_CONT, 80183c4dfe9Sjg "find: invalid devid %s\n", 80283c4dfe9Sjg np->nvp_devpath)); 80383c4dfe9Sjg continue; 80483c4dfe9Sjg } 80583c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) == 0) { 80683c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 80783c4dfe9Sjg "find: devid match: %s 0x%x\n", 80883c4dfe9Sjg np->nvp_devpath, np->nvp_flags)); 80983c4dfe9Sjg DEVID_LOG_MATCH(("find", devid, np->nvp_devpath)); 81083c4dfe9Sjg DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath)); 81183c4dfe9Sjg 81283c4dfe9Sjg /* 81383c4dfe9Sjg * Check if we have a cached devinfo reference for this 81483c4dfe9Sjg * devid. Place a hold on it to prevent detach 81583c4dfe9Sjg * Otherwise, use the path instead. 81683c4dfe9Sjg * Note: returns with a hold on each dev_info 81783c4dfe9Sjg * node in the list. 81883c4dfe9Sjg */ 81983c4dfe9Sjg dip = NULL; 82083c4dfe9Sjg if (np->nvp_flags & NVP_DEVID_DIP) { 82183c4dfe9Sjg pdip = ddi_get_parent(np->nvp_dip); 82283c4dfe9Sjg if (ndi_devi_tryenter(pdip, &circ)) { 82383c4dfe9Sjg dip = np->nvp_dip; 82483c4dfe9Sjg ndi_hold_devi(dip); 82583c4dfe9Sjg ndi_devi_exit(pdip, circ); 82683c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(dip)); 82783c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(dip)); 82883c4dfe9Sjg } else { 82983c4dfe9Sjg DEVID_LOG_DETACH((CE_CONT, 83083c4dfe9Sjg "may be detaching: %s\n", 83183c4dfe9Sjg np->nvp_devpath)); 83283c4dfe9Sjg } 83383c4dfe9Sjg } 83483c4dfe9Sjg 83583c4dfe9Sjg if (dip) { 83683c4dfe9Sjg if (ndevis < retmax) { 83783c4dfe9Sjg retdevis[ndevis++] = dip; 83883c4dfe9Sjg } else { 83983c4dfe9Sjg ndi_rele_devi(dip); 84083c4dfe9Sjg } 84183c4dfe9Sjg maxdevis++; 84283c4dfe9Sjg } else { 84383c4dfe9Sjg if (npaths < retmax) 84483c4dfe9Sjg retpaths[npaths++] = np->nvp_devpath; 84583c4dfe9Sjg maxpaths++; 84683c4dfe9Sjg } 84783c4dfe9Sjg } 84883c4dfe9Sjg } 84983c4dfe9Sjg 85083c4dfe9Sjg *retndevis = ndevis; 85183c4dfe9Sjg *retnpaths = npaths; 85283c4dfe9Sjg return (maxdevis > maxpaths ? maxdevis : maxpaths); 85383c4dfe9Sjg } 85483c4dfe9Sjg 85583c4dfe9Sjg 85683c4dfe9Sjg /* 85783c4dfe9Sjg * Search the devid cache, returning dev_t list for all 85883c4dfe9Sjg * device paths mapping to the device identified by the 85983c4dfe9Sjg * given devid. 86083c4dfe9Sjg * 86183c4dfe9Sjg * Primary interface used by ddi_lyr_devid_to_devlist() 86283c4dfe9Sjg */ 86383c4dfe9Sjg int 86483c4dfe9Sjg e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name, 86583c4dfe9Sjg int *retndevts, dev_t **retdevts) 86683c4dfe9Sjg { 86783c4dfe9Sjg char *path, **paths; 86883c4dfe9Sjg int i, j, n; 86983c4dfe9Sjg dev_t *devts, *udevts; 87083c4dfe9Sjg dev_t tdevt; 87183c4dfe9Sjg int ndevts, undevts, ndevts_alloced; 87283c4dfe9Sjg dev_info_t *devi, **devis; 87383c4dfe9Sjg int ndevis, npaths, nalloced; 87483c4dfe9Sjg ddi_devid_t match_devid; 87583c4dfe9Sjg 87683c4dfe9Sjg DEVID_LOG_FIND(("find", devid, NULL)); 87783c4dfe9Sjg 87883c4dfe9Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 87983c4dfe9Sjg if (ddi_devid_valid(devid) != DDI_SUCCESS) { 88083c4dfe9Sjg DEVID_LOG_ERR(("invalid devid", devid, NULL)); 88183c4dfe9Sjg return (DDI_FAILURE); 88283c4dfe9Sjg } 88383c4dfe9Sjg 88483c4dfe9Sjg nalloced = 128; 88583c4dfe9Sjg 88683c4dfe9Sjg for (;;) { 88783c4dfe9Sjg paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP); 88883c4dfe9Sjg devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP); 88983c4dfe9Sjg 89083c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_READER); 89183c4dfe9Sjg n = e_devid_cache_devi_path_lists(devid, nalloced, 89283c4dfe9Sjg &ndevis, devis, &npaths, paths); 89383c4dfe9Sjg if (n <= nalloced) 89483c4dfe9Sjg break; 89583c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 89683c4dfe9Sjg for (i = 0; i < ndevis; i++) 89783c4dfe9Sjg ndi_rele_devi(devis[i]); 89883c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 89983c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 90083c4dfe9Sjg nalloced = n + 128; 90183c4dfe9Sjg } 90283c4dfe9Sjg 90383c4dfe9Sjg for (i = 0; i < npaths; i++) { 90483c4dfe9Sjg path = i_ddi_strdup(paths[i], KM_SLEEP); 90583c4dfe9Sjg paths[i] = path; 90683c4dfe9Sjg } 90783c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 90883c4dfe9Sjg 90983c4dfe9Sjg if (ndevis == 0 && npaths == 0) { 91083c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 91183c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 91283c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 91383c4dfe9Sjg return (DDI_FAILURE); 91483c4dfe9Sjg } 91583c4dfe9Sjg 91683c4dfe9Sjg ndevts_alloced = 128; 91783c4dfe9Sjg restart: 91883c4dfe9Sjg ndevts = 0; 91983c4dfe9Sjg devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP); 92083c4dfe9Sjg for (i = 0; i < ndevis; i++) { 92183c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(devis[i])); 92283c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(devis[i])); 92383c4dfe9Sjg e_devid_minor_to_devlist(devis[i], minor_name, 92483c4dfe9Sjg ndevts_alloced, &ndevts, devts); 92583c4dfe9Sjg if (ndevts > ndevts_alloced) { 92683c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 92783c4dfe9Sjg ndevts_alloced += 128; 92883c4dfe9Sjg goto restart; 92983c4dfe9Sjg } 93083c4dfe9Sjg } 93183c4dfe9Sjg for (i = 0; i < npaths; i++) { 93283c4dfe9Sjg DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i])); 93383c4dfe9Sjg devi = e_ddi_hold_devi_by_path(paths[i], 0); 93483c4dfe9Sjg if (devi == NULL) { 93583c4dfe9Sjg DEVID_LOG_STALE(("stale device reference", 93683c4dfe9Sjg devid, paths[i])); 93783c4dfe9Sjg continue; 93883c4dfe9Sjg } 93983c4dfe9Sjg /* 94083c4dfe9Sjg * Verify the newly attached device registered a matching devid 94183c4dfe9Sjg */ 94283c4dfe9Sjg if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi, 94383c4dfe9Sjg &match_devid) != DDI_SUCCESS) { 94483c4dfe9Sjg DEVIDERR((CE_CONT, 94583c4dfe9Sjg "%s: no devid registered on attach\n", 94683c4dfe9Sjg paths[i])); 94783c4dfe9Sjg ddi_release_devi(devi); 94883c4dfe9Sjg continue; 94983c4dfe9Sjg } 95083c4dfe9Sjg 95183c4dfe9Sjg if (ddi_devid_compare(devid, match_devid) != 0) { 95283c4dfe9Sjg DEVID_LOG_STALE(("new devid registered", 95383c4dfe9Sjg devid, paths[i])); 95483c4dfe9Sjg ddi_release_devi(devi); 95583c4dfe9Sjg ddi_devid_free(match_devid); 95683c4dfe9Sjg continue; 95783c4dfe9Sjg } 95883c4dfe9Sjg ddi_devid_free(match_devid); 95983c4dfe9Sjg 96083c4dfe9Sjg e_devid_minor_to_devlist(devi, minor_name, 96183c4dfe9Sjg ndevts_alloced, &ndevts, devts); 96283c4dfe9Sjg ddi_release_devi(devi); 96383c4dfe9Sjg if (ndevts > ndevts_alloced) { 96483c4dfe9Sjg kmem_free(devts, 96583c4dfe9Sjg ndevts_alloced * sizeof (dev_t)); 96683c4dfe9Sjg ndevts_alloced += 128; 96783c4dfe9Sjg goto restart; 96883c4dfe9Sjg } 96983c4dfe9Sjg } 97083c4dfe9Sjg 97183c4dfe9Sjg /* drop hold from e_devid_cache_devi_path_lists */ 97283c4dfe9Sjg for (i = 0; i < ndevis; i++) { 97383c4dfe9Sjg ndi_rele_devi(devis[i]); 97483c4dfe9Sjg } 97583c4dfe9Sjg for (i = 0; i < npaths; i++) { 97683c4dfe9Sjg kmem_free(paths[i], strlen(paths[i]) + 1); 97783c4dfe9Sjg } 97883c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 97983c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 98083c4dfe9Sjg 98183c4dfe9Sjg if (ndevts == 0) { 98283c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 98383c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 98483c4dfe9Sjg return (DDI_FAILURE); 98583c4dfe9Sjg } 98683c4dfe9Sjg 98783c4dfe9Sjg /* 98883c4dfe9Sjg * Build the final list of sorted dev_t's with duplicates collapsed so 98983c4dfe9Sjg * returned results are consistent. This prevents implementation 99083c4dfe9Sjg * artifacts from causing unnecessary changes in SVM namespace. 99183c4dfe9Sjg */ 99283c4dfe9Sjg /* bubble sort */ 99383c4dfe9Sjg for (i = 0; i < (ndevts - 1); i++) { 99483c4dfe9Sjg for (j = 0; j < ((ndevts - 1) - i); j++) { 99583c4dfe9Sjg if (devts[j + 1] < devts[j]) { 99683c4dfe9Sjg tdevt = devts[j]; 99783c4dfe9Sjg devts[j] = devts[j + 1]; 99883c4dfe9Sjg devts[j + 1] = tdevt; 99983c4dfe9Sjg } 100083c4dfe9Sjg } 100183c4dfe9Sjg } 100283c4dfe9Sjg 100383c4dfe9Sjg /* determine number of unique values */ 100483c4dfe9Sjg for (undevts = ndevts, i = 1; i < ndevts; i++) { 100583c4dfe9Sjg if (devts[i - 1] == devts[i]) 100683c4dfe9Sjg undevts--; 100783c4dfe9Sjg } 100883c4dfe9Sjg 100983c4dfe9Sjg /* allocate unique */ 101083c4dfe9Sjg udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP); 101183c4dfe9Sjg 101283c4dfe9Sjg /* copy unique */ 101383c4dfe9Sjg udevts[0] = devts[0]; 101483c4dfe9Sjg for (i = 1, j = 1; i < ndevts; i++) { 101583c4dfe9Sjg if (devts[i - 1] != devts[i]) 101683c4dfe9Sjg udevts[j++] = devts[i]; 101783c4dfe9Sjg } 101883c4dfe9Sjg ASSERT(j == undevts); 101983c4dfe9Sjg 102083c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 102183c4dfe9Sjg 102283c4dfe9Sjg *retndevts = undevts; 102383c4dfe9Sjg *retdevts = udevts; 102483c4dfe9Sjg 102583c4dfe9Sjg return (DDI_SUCCESS); 102683c4dfe9Sjg } 102783c4dfe9Sjg 102883c4dfe9Sjg void 102983c4dfe9Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list) 103083c4dfe9Sjg { 103183c4dfe9Sjg kmem_free(devt_list, ndevts * sizeof (dev_t *)); 103283c4dfe9Sjg } 103383c4dfe9Sjg 1034392e836bSGavin Maltby /* 1035392e836bSGavin Maltby * If given a full path and NULL ua, search for a cache entry 1036392e836bSGavin Maltby * whose path matches the full path. On a cache hit duplicate the 1037392e836bSGavin Maltby * devid of the matched entry into the given devid (caller 1038392e836bSGavin Maltby * must free); nodenamebuf is not touched for this usage. 1039392e836bSGavin Maltby * 1040392e836bSGavin Maltby * Given a path and a non-NULL unit address, search the cache for any entry 1041392e836bSGavin Maltby * matching "<path>/%@<unit-address>" where '%' is a wildcard meaning 1042392e836bSGavin Maltby * any node name. The path should not end a '/'. On a cache hit 1043392e836bSGavin Maltby * duplicate the devid as before (caller must free) and copy into 1044392e836bSGavin Maltby * the caller-provided nodenamebuf (if not NULL) the nodename of the 1045392e836bSGavin Maltby * matched entry. 1046392e836bSGavin Maltby * 1047392e836bSGavin Maltby * We must not make use of nvp_dip since that may be NULL for cached 1048392e836bSGavin Maltby * entries that are not present in the current tree. 1049392e836bSGavin Maltby */ 1050392e836bSGavin Maltby int 1051392e836bSGavin Maltby e_devid_cache_path_to_devid(char *path, char *ua, 1052392e836bSGavin Maltby char *nodenamebuf, ddi_devid_t *devidp) 1053392e836bSGavin Maltby { 1054392e836bSGavin Maltby size_t pathlen, ualen; 1055392e836bSGavin Maltby int rv = DDI_FAILURE; 1056392e836bSGavin Maltby nvp_devid_t *np; 1057392e836bSGavin Maltby list_t *listp; 1058392e836bSGavin Maltby char *cand; 1059392e836bSGavin Maltby 1060392e836bSGavin Maltby if (path == NULL || *path == '\0' || (ua && *ua == '\0') || 1061392e836bSGavin Maltby devidp == NULL) 1062392e836bSGavin Maltby return (DDI_FAILURE); 1063392e836bSGavin Maltby 1064392e836bSGavin Maltby *devidp = NULL; 1065392e836bSGavin Maltby 1066392e836bSGavin Maltby if (ua) { 1067392e836bSGavin Maltby pathlen = strlen(path); 1068392e836bSGavin Maltby ualen = strlen(ua); 1069392e836bSGavin Maltby } 1070392e836bSGavin Maltby 1071392e836bSGavin Maltby rw_enter(nvf_lock(dcfd_handle), RW_READER); 1072392e836bSGavin Maltby 1073392e836bSGavin Maltby listp = nvf_list(dcfd_handle); 1074392e836bSGavin Maltby for (np = list_head(listp); np; np = list_next(listp, np)) { 1075392e836bSGavin Maltby size_t nodelen, candlen, n; 1076392e836bSGavin Maltby ddi_devid_t devid_dup; 1077392e836bSGavin Maltby char *uasep, *node; 1078392e836bSGavin Maltby 1079392e836bSGavin Maltby if (np->nvp_devid == NULL) 1080392e836bSGavin Maltby continue; 1081392e836bSGavin Maltby 1082392e836bSGavin Maltby if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 1083392e836bSGavin Maltby DEVIDERR((CE_CONT, 1084392e836bSGavin Maltby "pathsearch: invalid devid %s\n", 1085392e836bSGavin Maltby np->nvp_devpath)); 1086392e836bSGavin Maltby continue; 1087392e836bSGavin Maltby } 1088392e836bSGavin Maltby 1089392e836bSGavin Maltby cand = np->nvp_devpath; /* candidate path */ 1090392e836bSGavin Maltby 1091392e836bSGavin Maltby /* If a full pathname was provided the compare is easy */ 1092392e836bSGavin Maltby if (ua == NULL) { 1093392e836bSGavin Maltby if (strcmp(cand, path) == 0) 1094392e836bSGavin Maltby goto match; 1095392e836bSGavin Maltby else 1096392e836bSGavin Maltby continue; 1097392e836bSGavin Maltby } 1098392e836bSGavin Maltby 1099392e836bSGavin Maltby /* 1100392e836bSGavin Maltby * The compare for initial path plus ua and unknown nodename 1101392e836bSGavin Maltby * is trickier. 1102392e836bSGavin Maltby * 1103392e836bSGavin Maltby * Does the initial path component match 'path'? 1104392e836bSGavin Maltby */ 1105392e836bSGavin Maltby if (strncmp(path, cand, pathlen) != 0) 1106392e836bSGavin Maltby continue; 1107392e836bSGavin Maltby 1108392e836bSGavin Maltby candlen = strlen(cand); 1109392e836bSGavin Maltby 1110392e836bSGavin Maltby /* 1111392e836bSGavin Maltby * The next character must be a '/' and there must be no 1112392e836bSGavin Maltby * further '/' thereafter. Begin by checking that the 1113392e836bSGavin Maltby * candidate is long enough to include at mininum a 1114392e836bSGavin Maltby * "/<nodename>@<ua>" after the initial portion already 1115392e836bSGavin Maltby * matched assuming a nodename length of 1. 1116392e836bSGavin Maltby */ 1117392e836bSGavin Maltby if (candlen < pathlen + 1 + 1 + 1 + ualen || 1118392e836bSGavin Maltby cand[pathlen] != '/' || 1119392e836bSGavin Maltby strchr(cand + pathlen + 1, '/') != NULL) 1120392e836bSGavin Maltby continue; 1121392e836bSGavin Maltby 1122392e836bSGavin Maltby node = cand + pathlen + 1; /* <node>@<ua> string */ 1123392e836bSGavin Maltby 1124392e836bSGavin Maltby /* 1125392e836bSGavin Maltby * Find the '@' before the unit address. Check for 1126392e836bSGavin Maltby * unit address match. 1127392e836bSGavin Maltby */ 1128392e836bSGavin Maltby if ((uasep = strchr(node, '@')) == NULL) 1129392e836bSGavin Maltby continue; 1130392e836bSGavin Maltby 1131392e836bSGavin Maltby /* 1132392e836bSGavin Maltby * Check we still have enough length and that ua matches 1133392e836bSGavin Maltby */ 1134392e836bSGavin Maltby nodelen = (uintptr_t)uasep - (uintptr_t)node; 1135392e836bSGavin Maltby if (candlen < pathlen + 1 + nodelen + 1 + ualen || 1136392e836bSGavin Maltby strncmp(ua, uasep + 1, ualen) != 0) 1137392e836bSGavin Maltby continue; 1138392e836bSGavin Maltby match: 1139392e836bSGavin Maltby n = ddi_devid_sizeof(np->nvp_devid); 1140392e836bSGavin Maltby devid_dup = kmem_alloc(n, KM_SLEEP); /* caller must free */ 1141392e836bSGavin Maltby (void) bcopy(np->nvp_devid, devid_dup, n); 1142392e836bSGavin Maltby *devidp = devid_dup; 1143392e836bSGavin Maltby 1144392e836bSGavin Maltby if (ua && nodenamebuf) { 1145392e836bSGavin Maltby (void) strncpy(nodenamebuf, node, nodelen); 1146392e836bSGavin Maltby nodenamebuf[nodelen] = '\0'; 1147392e836bSGavin Maltby } 1148392e836bSGavin Maltby 1149392e836bSGavin Maltby rv = DDI_SUCCESS; 1150392e836bSGavin Maltby break; 1151392e836bSGavin Maltby } 1152392e836bSGavin Maltby 1153392e836bSGavin Maltby rw_exit(nvf_lock(dcfd_handle)); 1154392e836bSGavin Maltby 1155392e836bSGavin Maltby return (rv); 1156392e836bSGavin Maltby } 1157392e836bSGavin Maltby 115883c4dfe9Sjg #ifdef DEBUG 115983c4dfe9Sjg static void 116083c4dfe9Sjg devid_log(char *fmt, ddi_devid_t devid, char *path) 116183c4dfe9Sjg { 116283c4dfe9Sjg char *devidstr = ddi_devid_str_encode(devid, NULL); 116383c4dfe9Sjg if (path) { 116483c4dfe9Sjg cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr); 116583c4dfe9Sjg } else { 116683c4dfe9Sjg cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr); 116783c4dfe9Sjg } 116883c4dfe9Sjg ddi_devid_str_free(devidstr); 116983c4dfe9Sjg } 117083c4dfe9Sjg #endif /* DEBUG */ 1171