183c4dfe9Sjg /*
283c4dfe9Sjg * CDDL HEADER START
383c4dfe9Sjg *
483c4dfe9Sjg * The contents of this file are subject to the terms of the
583c4dfe9Sjg * Common Development and Distribution License (the "License").
683c4dfe9Sjg * You may not use this file except in compliance with the License.
783c4dfe9Sjg *
883c4dfe9Sjg * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
983c4dfe9Sjg * or http://www.opensolaris.org/os/licensing.
1083c4dfe9Sjg * See the License for the specific language governing permissions
1183c4dfe9Sjg * and limitations under the License.
1283c4dfe9Sjg *
1383c4dfe9Sjg * When distributing Covered Code, include this CDDL HEADER in each
1483c4dfe9Sjg * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1583c4dfe9Sjg * If applicable, add the following below this CDDL HEADER, with the
1683c4dfe9Sjg * fields enclosed by brackets "[]" replaced with your own identifying
1783c4dfe9Sjg * information: Portions Copyright [yyyy] [name of copyright owner]
1883c4dfe9Sjg *
1983c4dfe9Sjg * CDDL HEADER END
2083c4dfe9Sjg */
2183c4dfe9Sjg /*
224f1e984dSReed * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
236fe4f300SPavel Zakharov * Copyright (c) 2018 by Delphix. All rights reserved.
243fe80ca4SDan Cross * Copyright 2023 Oxide Computer Company
2583c4dfe9Sjg */
2683c4dfe9Sjg
2783c4dfe9Sjg #include <sys/note.h>
2883c4dfe9Sjg #include <sys/t_lock.h>
2983c4dfe9Sjg #include <sys/cmn_err.h>
3083c4dfe9Sjg #include <sys/instance.h>
3183c4dfe9Sjg #include <sys/conf.h>
3283c4dfe9Sjg #include <sys/stat.h>
3383c4dfe9Sjg #include <sys/ddi.h>
3483c4dfe9Sjg #include <sys/hwconf.h>
3583c4dfe9Sjg #include <sys/sunddi.h>
3683c4dfe9Sjg #include <sys/sunndi.h>
37392e836bSGavin Maltby #include <sys/sunmdi.h>
3883c4dfe9Sjg #include <sys/ddi_impldefs.h>
3983c4dfe9Sjg #include <sys/ndi_impldefs.h>
4083c4dfe9Sjg #include <sys/kobj.h>
4183c4dfe9Sjg #include <sys/devcache.h>
4283c4dfe9Sjg #include <sys/devid_cache.h>
4383c4dfe9Sjg #include <sys/sysmacros.h>
4483c4dfe9Sjg
4583c4dfe9Sjg /*
4683c4dfe9Sjg * Discovery refers to the heroic effort made to discover a device which
4783c4dfe9Sjg * cannot be accessed at the physical path where it once resided. Discovery
4883c4dfe9Sjg * involves walking the entire device tree attaching all possible disk
4983c4dfe9Sjg * instances, to search for the device referenced by a devid. Obviously,
5083c4dfe9Sjg * full device discovery is something to be avoided where possible.
51bbf21555SRichard Lowe * Note that simply invoking devfsadm(8) is equivalent to running full
5283c4dfe9Sjg * discovery at the devid cache level.
5383c4dfe9Sjg *
5483c4dfe9Sjg * Reasons why a disk may not be accessible:
5583c4dfe9Sjg * disk powered off
5683c4dfe9Sjg * disk removed or cable disconnected
5783c4dfe9Sjg * disk or adapter broken
5883c4dfe9Sjg *
5983c4dfe9Sjg * Note that discovery is not needed and cannot succeed in any of these
6083c4dfe9Sjg * cases.
6183c4dfe9Sjg *
6283c4dfe9Sjg * When discovery may succeed:
6383c4dfe9Sjg * Discovery will result in success when a device has been moved
6483c4dfe9Sjg * to a different address. Note that it's recommended that
65bbf21555SRichard Lowe * devfsadm(8) be invoked (no arguments required) whenever a system's
6683c4dfe9Sjg * h/w configuration has been updated. Alternatively, a
6783c4dfe9Sjg * reconfiguration boot can be used to accomplish the same result.
6883c4dfe9Sjg *
6983c4dfe9Sjg * Note that discovery is not necessary to be able to correct an access
7083c4dfe9Sjg * failure for a device which was powered off. Assuming the cache has an
7183c4dfe9Sjg * entry for such a device, simply powering it on should permit the system
7283c4dfe9Sjg * to access it. If problems persist after powering it on, invoke
73bbf21555SRichard Lowe * devfsadm(8).
7483c4dfe9Sjg *
7583c4dfe9Sjg * Discovery prior to mounting root is only of interest when booting
7683c4dfe9Sjg * from a filesystem which accesses devices by device id, which of
7783c4dfe9Sjg * not all do.
7883c4dfe9Sjg *
7983c4dfe9Sjg * Tunables
8083c4dfe9Sjg *
8183c4dfe9Sjg * devid_discovery_boot (default 1)
8283c4dfe9Sjg * Number of times discovery will be attempted prior to mounting root.
8383c4dfe9Sjg * Must be done at least once to recover from corrupted or missing
8483c4dfe9Sjg * devid cache backing store. Probably there's no reason to ever
8583c4dfe9Sjg * set this to greater than one as a missing device will remain
8683c4dfe9Sjg * unavailable no matter how often the system searches for it.
8783c4dfe9Sjg *
8883c4dfe9Sjg * devid_discovery_postboot (default 1)
8983c4dfe9Sjg * Number of times discovery will be attempted after mounting root.
9083c4dfe9Sjg * This must be performed at least once to discover any devices
9183c4dfe9Sjg * needed after root is mounted which may have been powered
9283c4dfe9Sjg * off and moved before booting.
9383c4dfe9Sjg * Setting this to a larger positive number will introduce
9483c4dfe9Sjg * some inconsistency in system operation. Searching for a device
9583c4dfe9Sjg * will take an indeterminate amount of time, sometimes slower,
9683c4dfe9Sjg * sometimes faster. In addition, the system will sometimes
9783c4dfe9Sjg * discover a newly powered on device, sometimes it won't.
9883c4dfe9Sjg * Use of this option is not therefore recommended.
9983c4dfe9Sjg *
10083c4dfe9Sjg * devid_discovery_postboot_always (default 0)
10183c4dfe9Sjg * Set to 1, the system will always attempt full discovery.
10283c4dfe9Sjg *
10383c4dfe9Sjg * devid_discovery_secs (default 0)
10483c4dfe9Sjg * Set to a positive value, the system will attempt full discovery
10583c4dfe9Sjg * but with a minimum delay between attempts. A device search
10683c4dfe9Sjg * within the period of time specified will result in failure.
10783c4dfe9Sjg *
10883c4dfe9Sjg * devid_cache_read_disable (default 0)
10983c4dfe9Sjg * Set to 1 to disable reading /etc/devices/devid_cache.
11083c4dfe9Sjg * Devid cache will continue to operate normally but
11183c4dfe9Sjg * at least one discovery attempt will be required.
11283c4dfe9Sjg *
11383c4dfe9Sjg * devid_cache_write_disable (default 0)
11483c4dfe9Sjg * Set to 1 to disable updates to /etc/devices/devid_cache.
11583c4dfe9Sjg * Any updates to the devid cache will not be preserved across a reboot.
11683c4dfe9Sjg *
11783c4dfe9Sjg * devid_report_error (default 0)
11883c4dfe9Sjg * Set to 1 to enable some error messages related to devid
11983c4dfe9Sjg * cache failures.
12083c4dfe9Sjg *
12183c4dfe9Sjg * The devid is packed in the cache file as a byte array. For
12283c4dfe9Sjg * portability, this could be done in the encoded string format.
12383c4dfe9Sjg */
12483c4dfe9Sjg
12583c4dfe9Sjg
12683c4dfe9Sjg int devid_discovery_boot = 1;
12783c4dfe9Sjg int devid_discovery_postboot = 1;
12883c4dfe9Sjg int devid_discovery_postboot_always = 0;
12983c4dfe9Sjg int devid_discovery_secs = 0;
13083c4dfe9Sjg
13183c4dfe9Sjg int devid_cache_read_disable = 0;
13283c4dfe9Sjg int devid_cache_write_disable = 0;
13383c4dfe9Sjg
13483c4dfe9Sjg int devid_report_error = 0;
13583c4dfe9Sjg
13683c4dfe9Sjg
13783c4dfe9Sjg /*
13883c4dfe9Sjg * State to manage discovery of devices providing a devid
13983c4dfe9Sjg */
14083c4dfe9Sjg static int devid_discovery_busy = 0;
14183c4dfe9Sjg static kmutex_t devid_discovery_mutex;
14283c4dfe9Sjg static kcondvar_t devid_discovery_cv;
14383c4dfe9Sjg static clock_t devid_last_discovery = 0;
14483c4dfe9Sjg
14583c4dfe9Sjg
14683c4dfe9Sjg #ifdef DEBUG
14783c4dfe9Sjg int nvp_devid_debug = 0;
14883c4dfe9Sjg int devid_debug = 0;
14983c4dfe9Sjg int devid_log_registers = 0;
15083c4dfe9Sjg int devid_log_finds = 0;
15183c4dfe9Sjg int devid_log_lookups = 0;
15283c4dfe9Sjg int devid_log_discovery = 0;
15383c4dfe9Sjg int devid_log_matches = 0;
15483c4dfe9Sjg int devid_log_paths = 0;
15583c4dfe9Sjg int devid_log_failures = 0;
15683c4dfe9Sjg int devid_log_hold = 0;
15783c4dfe9Sjg int devid_log_unregisters = 0;
15883c4dfe9Sjg int devid_log_removes = 0;
15983c4dfe9Sjg int devid_register_debug = 0;
16083c4dfe9Sjg int devid_log_stale = 0;
16183c4dfe9Sjg int devid_log_detaches = 0;
16283c4dfe9Sjg #endif /* DEBUG */
16383c4dfe9Sjg
16483c4dfe9Sjg /*
16583c4dfe9Sjg * devid cache file registration for cache reads and updates
16683c4dfe9Sjg */
16783c4dfe9Sjg static nvf_ops_t devid_cache_ops = {
16883c4dfe9Sjg "/etc/devices/devid_cache", /* path to cache */
16983c4dfe9Sjg devid_cache_unpack_nvlist, /* read: nvlist to nvp */
17083c4dfe9Sjg devid_cache_pack_list, /* write: nvp to nvlist */
17183c4dfe9Sjg devid_list_free, /* free data list */
17283c4dfe9Sjg NULL /* write complete callback */
17383c4dfe9Sjg };
17483c4dfe9Sjg
17583c4dfe9Sjg /*
17683c4dfe9Sjg * handle to registered devid cache handlers
17783c4dfe9Sjg */
17883c4dfe9Sjg nvf_handle_t dcfd_handle;
17983c4dfe9Sjg
18083c4dfe9Sjg
18183c4dfe9Sjg /*
18283c4dfe9Sjg * Initialize devid cache file management
18383c4dfe9Sjg */
18483c4dfe9Sjg void
devid_cache_init(void)18583c4dfe9Sjg devid_cache_init(void)
18683c4dfe9Sjg {
18783c4dfe9Sjg dcfd_handle = nvf_register_file(&devid_cache_ops);
18883c4dfe9Sjg ASSERT(dcfd_handle);
18983c4dfe9Sjg
19083c4dfe9Sjg list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t),
19183c4dfe9Sjg offsetof(nvp_devid_t, nvp_link));
19283c4dfe9Sjg
19383c4dfe9Sjg mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL);
19483c4dfe9Sjg cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL);
19583c4dfe9Sjg }
19683c4dfe9Sjg
19783c4dfe9Sjg /*
19883c4dfe9Sjg * Read and initialize the devid cache from the persistent store
19983c4dfe9Sjg */
20083c4dfe9Sjg void
devid_cache_read(void)20183c4dfe9Sjg devid_cache_read(void)
20283c4dfe9Sjg {
20383c4dfe9Sjg if (!devid_cache_read_disable) {
20483c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
20583c4dfe9Sjg ASSERT(list_head(nvf_list(dcfd_handle)) == NULL);
20683c4dfe9Sjg (void) nvf_read_file(dcfd_handle);
20783c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
20883c4dfe9Sjg }
20983c4dfe9Sjg }
21083c4dfe9Sjg
21183c4dfe9Sjg static void
devid_nvp_free(nvp_devid_t * dp)21283c4dfe9Sjg devid_nvp_free(nvp_devid_t *dp)
21383c4dfe9Sjg {
21483c4dfe9Sjg if (dp->nvp_devpath)
21583c4dfe9Sjg kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1);
21683c4dfe9Sjg if (dp->nvp_devid)
21783c4dfe9Sjg kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid));
21883c4dfe9Sjg
21983c4dfe9Sjg kmem_free(dp, sizeof (nvp_devid_t));
22083c4dfe9Sjg }
22183c4dfe9Sjg
22283c4dfe9Sjg static void
devid_list_free(nvf_handle_t fd)22383c4dfe9Sjg devid_list_free(nvf_handle_t fd)
22483c4dfe9Sjg {
22583c4dfe9Sjg list_t *listp;
22683c4dfe9Sjg nvp_devid_t *np;
22783c4dfe9Sjg
22883c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
22983c4dfe9Sjg
23083c4dfe9Sjg listp = nvf_list(fd);
23183c4dfe9Sjg while (np = list_head(listp)) {
23283c4dfe9Sjg list_remove(listp, np);
23383c4dfe9Sjg devid_nvp_free(np);
23483c4dfe9Sjg }
23583c4dfe9Sjg }
23683c4dfe9Sjg
23783c4dfe9Sjg /*
23883c4dfe9Sjg * Free an nvp element in a list
23983c4dfe9Sjg */
24083c4dfe9Sjg static void
devid_nvp_unlink_and_free(nvf_handle_t fd,nvp_devid_t * np)24183c4dfe9Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np)
24283c4dfe9Sjg {
24383c4dfe9Sjg list_remove(nvf_list(fd), np);
24483c4dfe9Sjg devid_nvp_free(np);
24583c4dfe9Sjg }
24683c4dfe9Sjg
24783c4dfe9Sjg /*
24883c4dfe9Sjg * Unpack a device path/nvlist pair to the list of devid cache elements.
24983c4dfe9Sjg * Used to parse the nvlist format when reading
25083c4dfe9Sjg * /etc/devices/devid_cache
25183c4dfe9Sjg */
25283c4dfe9Sjg static int
devid_cache_unpack_nvlist(nvf_handle_t fd,nvlist_t * nvl,char * name)25383c4dfe9Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
25483c4dfe9Sjg {
25583c4dfe9Sjg nvp_devid_t *np;
25683c4dfe9Sjg ddi_devid_t devidp;
25783c4dfe9Sjg int rval;
25883c4dfe9Sjg uint_t n;
25983c4dfe9Sjg
26083c4dfe9Sjg NVP_DEVID_DEBUG_PATH((name));
26183c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
26283c4dfe9Sjg
26383c4dfe9Sjg /*
26483c4dfe9Sjg * check path for a devid
26583c4dfe9Sjg */
26683c4dfe9Sjg rval = nvlist_lookup_byte_array(nvl,
26783c4dfe9Sjg DP_DEVID_ID, (uchar_t **)&devidp, &n);
26883c4dfe9Sjg if (rval == 0) {
26983c4dfe9Sjg if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
27083c4dfe9Sjg ASSERT(n == ddi_devid_sizeof(devidp));
27183c4dfe9Sjg np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
27283c4dfe9Sjg np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);
27383c4dfe9Sjg np->nvp_devid = kmem_alloc(n, KM_SLEEP);
27483c4dfe9Sjg (void) bcopy(devidp, np->nvp_devid, n);
27583c4dfe9Sjg list_insert_tail(nvf_list(fd), np);
27683c4dfe9Sjg NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
27783c4dfe9Sjg } else {
27883c4dfe9Sjg DEVIDERR((CE_CONT,
27983c4dfe9Sjg "%s: invalid devid\n", name));
28083c4dfe9Sjg }
28183c4dfe9Sjg } else {
28283c4dfe9Sjg DEVIDERR((CE_CONT,
28383c4dfe9Sjg "%s: devid not available\n", name));
28483c4dfe9Sjg }
28583c4dfe9Sjg
28683c4dfe9Sjg return (0);
28783c4dfe9Sjg }
28883c4dfe9Sjg
28983c4dfe9Sjg /*
29083c4dfe9Sjg * Pack the list of devid cache elements into a single nvlist
29183c4dfe9Sjg * Used when writing the nvlist file.
29283c4dfe9Sjg */
29383c4dfe9Sjg static int
devid_cache_pack_list(nvf_handle_t fd,nvlist_t ** ret_nvl)29483c4dfe9Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl)
29583c4dfe9Sjg {
29683c4dfe9Sjg nvlist_t *nvl, *sub_nvl;
29783c4dfe9Sjg nvp_devid_t *np;
29883c4dfe9Sjg int rval;
29983c4dfe9Sjg list_t *listp;
30083c4dfe9Sjg
30183c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
30283c4dfe9Sjg
30383c4dfe9Sjg rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
30483c4dfe9Sjg if (rval != 0) {
30583c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n",
30683c4dfe9Sjg nvf_cache_name(fd), rval);
30783c4dfe9Sjg return (DDI_FAILURE);
30883c4dfe9Sjg }
30983c4dfe9Sjg
31083c4dfe9Sjg listp = nvf_list(fd);
31183c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
31283c4dfe9Sjg if (np->nvp_devid == NULL)
31383c4dfe9Sjg continue;
31483c4dfe9Sjg NVP_DEVID_DEBUG_PATH(np->nvp_devpath);
31583c4dfe9Sjg rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
31683c4dfe9Sjg if (rval != 0) {
31783c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n",
31883c4dfe9Sjg nvf_cache_name(fd), rval);
31983c4dfe9Sjg sub_nvl = NULL;
32083c4dfe9Sjg goto err;
32183c4dfe9Sjg }
32283c4dfe9Sjg
32383c4dfe9Sjg rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID,
32483c4dfe9Sjg (uchar_t *)np->nvp_devid,
32583c4dfe9Sjg ddi_devid_sizeof(np->nvp_devid));
32683c4dfe9Sjg if (rval == 0) {
32783c4dfe9Sjg NVP_DEVID_DEBUG_DEVID(np->nvp_devid);
32883c4dfe9Sjg } else {
32983c4dfe9Sjg nvf_error(
33083c4dfe9Sjg "%s: nvlist add error %d (devid)\n",
33183c4dfe9Sjg nvf_cache_name(fd), rval);
33283c4dfe9Sjg goto err;
33383c4dfe9Sjg }
33483c4dfe9Sjg
33583c4dfe9Sjg rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl);
33683c4dfe9Sjg if (rval != 0) {
33783c4dfe9Sjg nvf_error("%s: nvlist add error %d (sublist)\n",
33883c4dfe9Sjg nvf_cache_name(fd), rval);
33983c4dfe9Sjg goto err;
34083c4dfe9Sjg }
34183c4dfe9Sjg nvlist_free(sub_nvl);
34283c4dfe9Sjg }
34383c4dfe9Sjg
34483c4dfe9Sjg *ret_nvl = nvl;
34583c4dfe9Sjg return (DDI_SUCCESS);
34683c4dfe9Sjg
34783c4dfe9Sjg err:
34883c4dfe9Sjg nvlist_free(sub_nvl);
34983c4dfe9Sjg nvlist_free(nvl);
35083c4dfe9Sjg *ret_nvl = NULL;
35183c4dfe9Sjg return (DDI_FAILURE);
35283c4dfe9Sjg }
35383c4dfe9Sjg
35483c4dfe9Sjg static int
e_devid_do_discovery(void)35583c4dfe9Sjg e_devid_do_discovery(void)
35683c4dfe9Sjg {
35783c4dfe9Sjg ASSERT(mutex_owned(&devid_discovery_mutex));
35883c4dfe9Sjg
35983c4dfe9Sjg if (i_ddi_io_initialized() == 0) {
36083c4dfe9Sjg if (devid_discovery_boot > 0) {
36183c4dfe9Sjg devid_discovery_boot--;
36283c4dfe9Sjg return (1);
36383c4dfe9Sjg }
36483c4dfe9Sjg } else {
36583c4dfe9Sjg if (devid_discovery_postboot_always > 0)
36683c4dfe9Sjg return (1);
36783c4dfe9Sjg if (devid_discovery_postboot > 0) {
36883c4dfe9Sjg devid_discovery_postboot--;
36983c4dfe9Sjg return (1);
37083c4dfe9Sjg }
37183c4dfe9Sjg if (devid_discovery_secs > 0) {
37283c4dfe9Sjg if ((ddi_get_lbolt() - devid_last_discovery) >
37383c4dfe9Sjg drv_usectohz(devid_discovery_secs * MICROSEC)) {
37483c4dfe9Sjg return (1);
37583c4dfe9Sjg }
37683c4dfe9Sjg }
37783c4dfe9Sjg }
37883c4dfe9Sjg
37983c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
38083c4dfe9Sjg return (0);
38183c4dfe9Sjg }
38283c4dfe9Sjg
38383c4dfe9Sjg static void
e_ddi_devid_hold_by_major(major_t major)38483c4dfe9Sjg e_ddi_devid_hold_by_major(major_t major)
38583c4dfe9Sjg {
38683c4dfe9Sjg DEVID_LOG_DISC((CE_CONT,
38783c4dfe9Sjg "devid_discovery: ddi_hold_installed_driver %d\n", major));
38883c4dfe9Sjg
38983c4dfe9Sjg if (ddi_hold_installed_driver(major) == NULL)
39083c4dfe9Sjg return;
39183c4dfe9Sjg
39283c4dfe9Sjg ddi_rele_driver(major);
39383c4dfe9Sjg }
39483c4dfe9Sjg
3956f25ad7fSJerry Gilliam /* legacy support - see below */
3966f25ad7fSJerry Gilliam static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd" };
39783c4dfe9Sjg
39883c4dfe9Sjg #define N_DRIVERS_TO_HOLD \
39983c4dfe9Sjg (sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *))
40083c4dfe9Sjg
40183c4dfe9Sjg static void
e_ddi_devid_hold_installed_driver(ddi_devid_t devid)40283c4dfe9Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid)
40383c4dfe9Sjg {
40483c4dfe9Sjg impl_devid_t *id = (impl_devid_t *)devid;
40583c4dfe9Sjg major_t major, hint_major;
40683c4dfe9Sjg char hint[DEVID_HINT_SIZE + 1];
4076f25ad7fSJerry Gilliam struct devnames *dnp;
40883c4dfe9Sjg char **drvp;
40983c4dfe9Sjg int i;
41083c4dfe9Sjg
41183c4dfe9Sjg /* Count non-null bytes */
41283c4dfe9Sjg for (i = 0; i < DEVID_HINT_SIZE; i++)
41383c4dfe9Sjg if (id->did_driver[i] == '\0')
41483c4dfe9Sjg break;
41583c4dfe9Sjg
41683c4dfe9Sjg /* Make a copy of the driver hint */
41783c4dfe9Sjg bcopy(id->did_driver, hint, i);
41883c4dfe9Sjg hint[i] = '\0';
41983c4dfe9Sjg
42083c4dfe9Sjg /* search for the devid using the hint driver */
42183c4dfe9Sjg hint_major = ddi_name_to_major(hint);
422a204de77Scth if (hint_major != DDI_MAJOR_T_NONE) {
42383c4dfe9Sjg e_ddi_devid_hold_by_major(hint_major);
42483c4dfe9Sjg }
42583c4dfe9Sjg
4266f25ad7fSJerry Gilliam /*
4276f25ad7fSJerry Gilliam * search for the devid with each driver declaring
4286f25ad7fSJerry Gilliam * itself as a devid registrant.
4296f25ad7fSJerry Gilliam */
4306f25ad7fSJerry Gilliam for (major = 0; major < devcnt; major++) {
4316f25ad7fSJerry Gilliam if (major == hint_major)
4326f25ad7fSJerry Gilliam continue;
4336f25ad7fSJerry Gilliam dnp = &devnamesp[major];
4346f25ad7fSJerry Gilliam if (dnp->dn_flags & DN_DEVID_REGISTRANT) {
4356f25ad7fSJerry Gilliam e_ddi_devid_hold_by_major(major);
4366f25ad7fSJerry Gilliam }
4376f25ad7fSJerry Gilliam }
4386f25ad7fSJerry Gilliam
4396f25ad7fSJerry Gilliam /*
4406f25ad7fSJerry Gilliam * Legacy support: may be removed once an upgrade mechanism
4416f25ad7fSJerry Gilliam * for driver conf files is available.
4426f25ad7fSJerry Gilliam */
44383c4dfe9Sjg drvp = e_ddi_devid_hold_driver_list;
44483c4dfe9Sjg for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) {
44583c4dfe9Sjg major = ddi_name_to_major(*drvp);
446a204de77Scth if (major != DDI_MAJOR_T_NONE && major != hint_major) {
44783c4dfe9Sjg e_ddi_devid_hold_by_major(major);
44883c4dfe9Sjg }
44983c4dfe9Sjg }
45083c4dfe9Sjg }
45183c4dfe9Sjg
45283c4dfe9Sjg /*
45383c4dfe9Sjg * Return success if discovery was attempted, to indicate
45483c4dfe9Sjg * that the desired device may now be available.
45583c4dfe9Sjg */
45683c4dfe9Sjg int
e_ddi_devid_discovery(ddi_devid_t devid)45783c4dfe9Sjg e_ddi_devid_discovery(ddi_devid_t devid)
45883c4dfe9Sjg {
45983c4dfe9Sjg int flags;
46083c4dfe9Sjg int rval = DDI_SUCCESS;
46183c4dfe9Sjg
46283c4dfe9Sjg mutex_enter(&devid_discovery_mutex);
46383c4dfe9Sjg
46483c4dfe9Sjg if (devid_discovery_busy) {
46583c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n"));
46683c4dfe9Sjg while (devid_discovery_busy) {
46783c4dfe9Sjg cv_wait(&devid_discovery_cv, &devid_discovery_mutex);
46883c4dfe9Sjg }
46983c4dfe9Sjg } else if (e_devid_do_discovery()) {
47083c4dfe9Sjg devid_discovery_busy = 1;
47183c4dfe9Sjg mutex_exit(&devid_discovery_mutex);
47283c4dfe9Sjg
47383c4dfe9Sjg if (i_ddi_io_initialized() == 0) {
47483c4dfe9Sjg e_ddi_devid_hold_installed_driver(devid);
47583c4dfe9Sjg } else {
47683c4dfe9Sjg DEVID_LOG_DISC((CE_CONT,
47783c4dfe9Sjg "devid_discovery: ndi_devi_config\n"));
47883c4dfe9Sjg flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
47983c4dfe9Sjg if (i_ddi_io_initialized())
48083c4dfe9Sjg flags |= NDI_DRV_CONF_REPROBE;
48183c4dfe9Sjg (void) ndi_devi_config(ddi_root_node(), flags);
48283c4dfe9Sjg }
48383c4dfe9Sjg
48483c4dfe9Sjg mutex_enter(&devid_discovery_mutex);
48583c4dfe9Sjg devid_discovery_busy = 0;
48683c4dfe9Sjg cv_broadcast(&devid_discovery_cv);
48783c4dfe9Sjg if (devid_discovery_secs > 0)
48883c4dfe9Sjg devid_last_discovery = ddi_get_lbolt();
48983c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n"));
49083c4dfe9Sjg } else {
49183c4dfe9Sjg rval = DDI_FAILURE;
49283c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "no devid discovery\n"));
49383c4dfe9Sjg }
49483c4dfe9Sjg
49583c4dfe9Sjg mutex_exit(&devid_discovery_mutex);
49683c4dfe9Sjg
49783c4dfe9Sjg return (rval);
49883c4dfe9Sjg }
49983c4dfe9Sjg
50083c4dfe9Sjg /*
50183c4dfe9Sjg * As part of registering a devid for a device,
50283c4dfe9Sjg * update the devid cache with this device/devid pair
50383c4dfe9Sjg * or note that this combination has registered.
504392e836bSGavin Maltby *
505392e836bSGavin Maltby * If a devpath is provided it will be used as the path to register the
506392e836bSGavin Maltby * devid against, otherwise we use ddi_pathname(dip). In both cases
507392e836bSGavin Maltby * we duplicate the path string so that it can be cached/freed indepdently
508392e836bSGavin Maltby * of the original owner.
50983c4dfe9Sjg */
510392e836bSGavin Maltby static int
e_devid_cache_register_cmn(dev_info_t * dip,ddi_devid_t devid,char * devpath)511392e836bSGavin Maltby e_devid_cache_register_cmn(dev_info_t *dip, ddi_devid_t devid, char *devpath)
51283c4dfe9Sjg {
51383c4dfe9Sjg nvp_devid_t *np;
51483c4dfe9Sjg nvp_devid_t *new_nvp;
51583c4dfe9Sjg ddi_devid_t new_devid;
51683c4dfe9Sjg int new_devid_size;
51783c4dfe9Sjg char *path, *fullpath;
51883c4dfe9Sjg ddi_devid_t free_devid = NULL;
51983c4dfe9Sjg int pathlen;
52083c4dfe9Sjg list_t *listp;
52183c4dfe9Sjg int is_dirty = 0;
52283c4dfe9Sjg
523392e836bSGavin Maltby
524392e836bSGavin Maltby ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
525392e836bSGavin Maltby
526392e836bSGavin Maltby if (devpath) {
527392e836bSGavin Maltby pathlen = strlen(devpath) + 1;
528392e836bSGavin Maltby path = kmem_alloc(pathlen, KM_SLEEP);
529392e836bSGavin Maltby bcopy(devpath, path, pathlen);
530392e836bSGavin Maltby } else {
5314f1e984dSReed /*
5324f1e984dSReed * We are willing to accept DS_BOUND nodes if we can form a full
5334f1e984dSReed * ddi_pathname (i.e. the node is part way to becomming
5344f1e984dSReed * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL).
5354f1e984dSReed */
536392e836bSGavin Maltby if (ddi_get_name_addr(dip) == NULL)
5374f1e984dSReed return (DDI_FAILURE);
53883c4dfe9Sjg
53983c4dfe9Sjg fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
54083c4dfe9Sjg (void) ddi_pathname(dip, fullpath);
54183c4dfe9Sjg pathlen = strlen(fullpath) + 1;
54283c4dfe9Sjg path = kmem_alloc(pathlen, KM_SLEEP);
54383c4dfe9Sjg bcopy(fullpath, path, pathlen);
54483c4dfe9Sjg kmem_free(fullpath, MAXPATHLEN);
545392e836bSGavin Maltby }
54683c4dfe9Sjg
54783c4dfe9Sjg DEVID_LOG_REG(("register", devid, path));
54883c4dfe9Sjg
54983c4dfe9Sjg new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
55083c4dfe9Sjg new_devid_size = ddi_devid_sizeof(devid);
55183c4dfe9Sjg new_devid = kmem_alloc(new_devid_size, KM_SLEEP);
55283c4dfe9Sjg (void) bcopy(devid, new_devid, new_devid_size);
55383c4dfe9Sjg
55483c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
55583c4dfe9Sjg
55683c4dfe9Sjg listp = nvf_list(dcfd_handle);
55783c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
55883c4dfe9Sjg if (strcmp(path, np->nvp_devpath) == 0) {
55983c4dfe9Sjg DEVID_DEBUG2((CE_CONT,
56083c4dfe9Sjg "register: %s path match\n", path));
56183c4dfe9Sjg if (np->nvp_devid == NULL) {
562a204de77Scth replace: np->nvp_devid = new_devid;
56383c4dfe9Sjg np->nvp_flags |=
56483c4dfe9Sjg NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
56583c4dfe9Sjg np->nvp_dip = dip;
56683c4dfe9Sjg if (!devid_cache_write_disable) {
56783c4dfe9Sjg nvf_mark_dirty(dcfd_handle);
56883c4dfe9Sjg is_dirty = 1;
56983c4dfe9Sjg }
57083c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
57183c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t));
57283c4dfe9Sjg kmem_free(path, pathlen);
57383c4dfe9Sjg goto exit;
57483c4dfe9Sjg }
57583c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
57683c4dfe9Sjg /* replace invalid devid */
57783c4dfe9Sjg free_devid = np->nvp_devid;
57883c4dfe9Sjg goto replace;
57983c4dfe9Sjg }
58083c4dfe9Sjg /*
58183c4dfe9Sjg * We're registering an already-cached path
58283c4dfe9Sjg * Does the device's devid match the cache?
58383c4dfe9Sjg */
58483c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) != 0) {
58583c4dfe9Sjg DEVID_DEBUG((CE_CONT, "devid register: "
58683c4dfe9Sjg "devid %s does not match\n", path));
58783c4dfe9Sjg /*
5886fe4f300SPavel Zakharov * We do not expect devids to change, log it.
5896fe4f300SPavel Zakharov */
5906fe4f300SPavel Zakharov char *devid_stored =
5916fe4f300SPavel Zakharov ddi_devid_str_encode(np->nvp_devid, NULL);
5926fe4f300SPavel Zakharov char *devid_new =
5936fe4f300SPavel Zakharov ddi_devid_str_encode(devid, NULL);
5946fe4f300SPavel Zakharov
5956fe4f300SPavel Zakharov cmn_err(CE_CONT, "devid register: devid for "
5966fe4f300SPavel Zakharov "%s does not match. stored: %s, new: %s.",
5976fe4f300SPavel Zakharov path, devid_stored, devid_new);
5986fe4f300SPavel Zakharov
5996fe4f300SPavel Zakharov ddi_devid_str_free(devid_stored);
6006fe4f300SPavel Zakharov ddi_devid_str_free(devid_new);
6016fe4f300SPavel Zakharov
6026fe4f300SPavel Zakharov /*
60383c4dfe9Sjg * Replace cached devid for this path
60483c4dfe9Sjg * with newly registered devid. A devid
60583c4dfe9Sjg * may map to multiple paths but one path
60683c4dfe9Sjg * should only map to one devid.
60783c4dfe9Sjg */
60883c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np);
60983c4dfe9Sjg np = NULL;
61083c4dfe9Sjg break;
61183c4dfe9Sjg } else {
61283c4dfe9Sjg DEVID_DEBUG2((CE_CONT,
61383c4dfe9Sjg "devid register: %s devid match\n", path));
61483c4dfe9Sjg np->nvp_flags |=
61583c4dfe9Sjg NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
61683c4dfe9Sjg np->nvp_dip = dip;
61783c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
61883c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t));
61983c4dfe9Sjg kmem_free(path, pathlen);
62083c4dfe9Sjg kmem_free(new_devid, new_devid_size);
62183c4dfe9Sjg return (DDI_SUCCESS);
62283c4dfe9Sjg }
62383c4dfe9Sjg }
62483c4dfe9Sjg }
62583c4dfe9Sjg
62683c4dfe9Sjg /*
62783c4dfe9Sjg * Add newly registered devid to the cache
62883c4dfe9Sjg */
62983c4dfe9Sjg ASSERT(np == NULL);
63083c4dfe9Sjg
63183c4dfe9Sjg new_nvp->nvp_devpath = path;
63283c4dfe9Sjg new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
63383c4dfe9Sjg new_nvp->nvp_dip = dip;
63483c4dfe9Sjg new_nvp->nvp_devid = new_devid;
63583c4dfe9Sjg
63683c4dfe9Sjg if (!devid_cache_write_disable) {
63783c4dfe9Sjg is_dirty = 1;
63883c4dfe9Sjg nvf_mark_dirty(dcfd_handle);
63983c4dfe9Sjg }
64083c4dfe9Sjg list_insert_tail(nvf_list(dcfd_handle), new_nvp);
64183c4dfe9Sjg
64283c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
64383c4dfe9Sjg
64483c4dfe9Sjg exit:
64583c4dfe9Sjg if (free_devid)
64683c4dfe9Sjg kmem_free(free_devid, ddi_devid_sizeof(free_devid));
64783c4dfe9Sjg
64883c4dfe9Sjg if (is_dirty)
64983c4dfe9Sjg nvf_wake_daemon();
65083c4dfe9Sjg
65183c4dfe9Sjg return (DDI_SUCCESS);
65283c4dfe9Sjg }
65383c4dfe9Sjg
654392e836bSGavin Maltby int
e_devid_cache_register(dev_info_t * dip,ddi_devid_t devid)655392e836bSGavin Maltby e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid)
656392e836bSGavin Maltby {
657392e836bSGavin Maltby return (e_devid_cache_register_cmn(dip, devid, NULL));
658392e836bSGavin Maltby }
659392e836bSGavin Maltby
66083c4dfe9Sjg /*
66188acca04SGavin Maltby * Unregister a device's devid; the devinfo may hit on multiple entries
66288acca04SGavin Maltby * arising from both pHCI and vHCI paths.
66388acca04SGavin Maltby * Called as an instance detachs.
66488acca04SGavin Maltby * Invalidate the devid's devinfo reference.
66588acca04SGavin Maltby * Devid-path remains in the cache.
66683c4dfe9Sjg */
66788acca04SGavin Maltby
66883c4dfe9Sjg void
e_devid_cache_unregister(dev_info_t * dip)66983c4dfe9Sjg e_devid_cache_unregister(dev_info_t *dip)
67083c4dfe9Sjg {
67183c4dfe9Sjg nvp_devid_t *np;
67283c4dfe9Sjg list_t *listp;
67383c4dfe9Sjg
67483c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
67583c4dfe9Sjg
67683c4dfe9Sjg listp = nvf_list(dcfd_handle);
67783c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
67883c4dfe9Sjg if (np->nvp_devid == NULL)
67983c4dfe9Sjg continue;
68083c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) {
68183c4dfe9Sjg DEVID_LOG_UNREG((CE_CONT,
68283c4dfe9Sjg "unregister: %s\n", np->nvp_devpath));
68383c4dfe9Sjg np->nvp_flags &= ~NVP_DEVID_DIP;
68483c4dfe9Sjg np->nvp_dip = NULL;
68583c4dfe9Sjg }
68683c4dfe9Sjg }
68783c4dfe9Sjg
68883c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
68983c4dfe9Sjg }
69083c4dfe9Sjg
691392e836bSGavin Maltby int
e_devid_cache_pathinfo(mdi_pathinfo_t * pip,ddi_devid_t devid)692392e836bSGavin Maltby e_devid_cache_pathinfo(mdi_pathinfo_t *pip, ddi_devid_t devid)
693392e836bSGavin Maltby {
694392e836bSGavin Maltby char *path = mdi_pi_pathname(pip);
695392e836bSGavin Maltby
696392e836bSGavin Maltby return (e_devid_cache_register_cmn(mdi_pi_get_client(pip), devid,
697392e836bSGavin Maltby path));
698392e836bSGavin Maltby }
699392e836bSGavin Maltby
70083c4dfe9Sjg /*
70183c4dfe9Sjg * Purge devid cache of stale devids
70283c4dfe9Sjg */
70383c4dfe9Sjg void
devid_cache_cleanup(void)70483c4dfe9Sjg devid_cache_cleanup(void)
70583c4dfe9Sjg {
70683c4dfe9Sjg nvp_devid_t *np, *next;
70783c4dfe9Sjg list_t *listp;
70883c4dfe9Sjg int is_dirty = 0;
70983c4dfe9Sjg
71083c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
71183c4dfe9Sjg
71283c4dfe9Sjg listp = nvf_list(dcfd_handle);
71383c4dfe9Sjg for (np = list_head(listp); np; np = next) {
71483c4dfe9Sjg next = list_next(listp, np);
71583c4dfe9Sjg if (np->nvp_devid == NULL)
71683c4dfe9Sjg continue;
71783c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
71883c4dfe9Sjg DEVID_LOG_REMOVE((CE_CONT,
71983c4dfe9Sjg "cleanup: %s\n", np->nvp_devpath));
72083c4dfe9Sjg if (!devid_cache_write_disable) {
72183c4dfe9Sjg nvf_mark_dirty(dcfd_handle);
72283c4dfe9Sjg is_dirty = 0;
72383c4dfe9Sjg }
72483c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np);
72583c4dfe9Sjg }
72683c4dfe9Sjg }
72783c4dfe9Sjg
72883c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
72983c4dfe9Sjg
73083c4dfe9Sjg if (is_dirty)
73183c4dfe9Sjg nvf_wake_daemon();
73283c4dfe9Sjg }
73383c4dfe9Sjg
73483c4dfe9Sjg
73583c4dfe9Sjg /*
73683c4dfe9Sjg * Build a list of dev_t's for a device/devid
73783c4dfe9Sjg *
73883c4dfe9Sjg * The effect of this function is cumulative, adding dev_t's
73983c4dfe9Sjg * for the device to the list of all dev_t's for a given
74083c4dfe9Sjg * devid.
74183c4dfe9Sjg */
74283c4dfe9Sjg static void
e_devid_minor_to_devlist(dev_info_t * dip,const char * minor_name,int ndevts_alloced,int * devtcntp,dev_t * devtsp)74383c4dfe9Sjg e_devid_minor_to_devlist(
74483c4dfe9Sjg dev_info_t *dip,
745*5f61829aSRobert Mustacchi const char *minor_name,
74683c4dfe9Sjg int ndevts_alloced,
74783c4dfe9Sjg int *devtcntp,
74883c4dfe9Sjg dev_t *devtsp)
74983c4dfe9Sjg {
75083c4dfe9Sjg struct ddi_minor_data *dmdp;
75183c4dfe9Sjg int minor_all = 0;
75283c4dfe9Sjg int ndevts = *devtcntp;
75383c4dfe9Sjg
75483c4dfe9Sjg ASSERT(i_ddi_devi_attached(dip));
75583c4dfe9Sjg
75683c4dfe9Sjg /* are we looking for a set of minor nodes? */
75783c4dfe9Sjg if ((minor_name == DEVID_MINOR_NAME_ALL) ||
75883c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_CHR) ||
75983c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_BLK))
76083c4dfe9Sjg minor_all = 1;
76183c4dfe9Sjg
76283c4dfe9Sjg /* Find matching minor names */
7633fe80ca4SDan Cross ndi_devi_enter(dip);
76483c4dfe9Sjg for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
76583c4dfe9Sjg
76683c4dfe9Sjg /* Skip non-minors, and non matching minor names */
76783c4dfe9Sjg if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) &&
76883c4dfe9Sjg strcmp(dmdp->ddm_name, minor_name)))
76983c4dfe9Sjg continue;
77083c4dfe9Sjg
77183c4dfe9Sjg /* filter out minor_all mismatches */
77283c4dfe9Sjg if (minor_all &&
77383c4dfe9Sjg (((minor_name == DEVID_MINOR_NAME_ALL_CHR) &&
77483c4dfe9Sjg (dmdp->ddm_spec_type != S_IFCHR)) ||
77583c4dfe9Sjg ((minor_name == DEVID_MINOR_NAME_ALL_BLK) &&
77683c4dfe9Sjg (dmdp->ddm_spec_type != S_IFBLK))))
77783c4dfe9Sjg continue;
77883c4dfe9Sjg
77983c4dfe9Sjg if (ndevts < ndevts_alloced)
78083c4dfe9Sjg devtsp[ndevts] = dmdp->ddm_dev;
78183c4dfe9Sjg ndevts++;
78283c4dfe9Sjg }
7833fe80ca4SDan Cross ndi_devi_exit(dip);
78483c4dfe9Sjg
78583c4dfe9Sjg *devtcntp = ndevts;
78683c4dfe9Sjg }
78783c4dfe9Sjg
78883c4dfe9Sjg /*
78983c4dfe9Sjg * Search for cached entries matching a devid
79083c4dfe9Sjg * Return two lists:
79183c4dfe9Sjg * a list of dev_info nodes, for those devices in the attached state
79283c4dfe9Sjg * a list of pathnames whose instances registered the given devid
79383c4dfe9Sjg * If the lists passed in are not sufficient to return the matching
79483c4dfe9Sjg * references, return the size of lists required.
79583c4dfe9Sjg * The dev_info nodes are returned with a hold that the caller must release.
79683c4dfe9Sjg */
79783c4dfe9Sjg static int
e_devid_cache_devi_path_lists(ddi_devid_t devid,int retmax,int * retndevis,dev_info_t ** retdevis,int * retnpaths,char ** retpaths)79883c4dfe9Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
79983c4dfe9Sjg int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
80083c4dfe9Sjg {
80183c4dfe9Sjg nvp_devid_t *np;
80283c4dfe9Sjg int ndevis, npaths;
80383c4dfe9Sjg dev_info_t *dip, *pdip;
80483c4dfe9Sjg int maxdevis = 0;
80583c4dfe9Sjg int maxpaths = 0;
80683c4dfe9Sjg list_t *listp;
80783c4dfe9Sjg
80883c4dfe9Sjg ndevis = 0;
80983c4dfe9Sjg npaths = 0;
81083c4dfe9Sjg listp = nvf_list(dcfd_handle);
81183c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) {
81283c4dfe9Sjg if (np->nvp_devid == NULL)
81383c4dfe9Sjg continue;
81483c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
81583c4dfe9Sjg DEVIDERR((CE_CONT,
81683c4dfe9Sjg "find: invalid devid %s\n",
81783c4dfe9Sjg np->nvp_devpath));
81883c4dfe9Sjg continue;
81983c4dfe9Sjg }
82083c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
82183c4dfe9Sjg DEVID_DEBUG2((CE_CONT,
82283c4dfe9Sjg "find: devid match: %s 0x%x\n",
82383c4dfe9Sjg np->nvp_devpath, np->nvp_flags));
82483c4dfe9Sjg DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
82583c4dfe9Sjg DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));
82683c4dfe9Sjg
82783c4dfe9Sjg /*
82883c4dfe9Sjg * Check if we have a cached devinfo reference for this
82983c4dfe9Sjg * devid. Place a hold on it to prevent detach
83083c4dfe9Sjg * Otherwise, use the path instead.
83183c4dfe9Sjg * Note: returns with a hold on each dev_info
83283c4dfe9Sjg * node in the list.
83383c4dfe9Sjg */
83483c4dfe9Sjg dip = NULL;
83583c4dfe9Sjg if (np->nvp_flags & NVP_DEVID_DIP) {
83683c4dfe9Sjg pdip = ddi_get_parent(np->nvp_dip);
8373fe80ca4SDan Cross if (ndi_devi_tryenter(pdip)) {
83883c4dfe9Sjg dip = np->nvp_dip;
83983c4dfe9Sjg ndi_hold_devi(dip);
8403fe80ca4SDan Cross ndi_devi_exit(pdip);
84183c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(dip));
84283c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(dip));
84383c4dfe9Sjg } else {
84483c4dfe9Sjg DEVID_LOG_DETACH((CE_CONT,
84583c4dfe9Sjg "may be detaching: %s\n",
84683c4dfe9Sjg np->nvp_devpath));
84783c4dfe9Sjg }
84883c4dfe9Sjg }
84983c4dfe9Sjg
85083c4dfe9Sjg if (dip) {
85183c4dfe9Sjg if (ndevis < retmax) {
85283c4dfe9Sjg retdevis[ndevis++] = dip;
85383c4dfe9Sjg } else {
85483c4dfe9Sjg ndi_rele_devi(dip);
85583c4dfe9Sjg }
85683c4dfe9Sjg maxdevis++;
85783c4dfe9Sjg } else {
85883c4dfe9Sjg if (npaths < retmax)
85983c4dfe9Sjg retpaths[npaths++] = np->nvp_devpath;
86083c4dfe9Sjg maxpaths++;
86183c4dfe9Sjg }
86283c4dfe9Sjg }
86383c4dfe9Sjg }
86483c4dfe9Sjg
86583c4dfe9Sjg *retndevis = ndevis;
86683c4dfe9Sjg *retnpaths = npaths;
86783c4dfe9Sjg return (maxdevis > maxpaths ? maxdevis : maxpaths);
86883c4dfe9Sjg }
86983c4dfe9Sjg
87083c4dfe9Sjg
87183c4dfe9Sjg /*
87283c4dfe9Sjg * Search the devid cache, returning dev_t list for all
87383c4dfe9Sjg * device paths mapping to the device identified by the
87483c4dfe9Sjg * given devid.
87583c4dfe9Sjg *
87683c4dfe9Sjg * Primary interface used by ddi_lyr_devid_to_devlist()
87783c4dfe9Sjg */
87883c4dfe9Sjg int
e_devid_cache_to_devt_list(ddi_devid_t devid,const char * minor_name,int * retndevts,dev_t ** retdevts)879*5f61829aSRobert Mustacchi e_devid_cache_to_devt_list(ddi_devid_t devid, const char *minor_name,
88083c4dfe9Sjg int *retndevts, dev_t **retdevts)
88183c4dfe9Sjg {
88283c4dfe9Sjg char *path, **paths;
88383c4dfe9Sjg int i, j, n;
88483c4dfe9Sjg dev_t *devts, *udevts;
88583c4dfe9Sjg dev_t tdevt;
88683c4dfe9Sjg int ndevts, undevts, ndevts_alloced;
88783c4dfe9Sjg dev_info_t *devi, **devis;
88883c4dfe9Sjg int ndevis, npaths, nalloced;
88983c4dfe9Sjg ddi_devid_t match_devid;
89083c4dfe9Sjg
89183c4dfe9Sjg DEVID_LOG_FIND(("find", devid, NULL));
89283c4dfe9Sjg
89383c4dfe9Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
89483c4dfe9Sjg if (ddi_devid_valid(devid) != DDI_SUCCESS) {
89583c4dfe9Sjg DEVID_LOG_ERR(("invalid devid", devid, NULL));
89683c4dfe9Sjg return (DDI_FAILURE);
89783c4dfe9Sjg }
89883c4dfe9Sjg
89983c4dfe9Sjg nalloced = 128;
90083c4dfe9Sjg
90183c4dfe9Sjg for (;;) {
90283c4dfe9Sjg paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
90383c4dfe9Sjg devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);
90483c4dfe9Sjg
90583c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_READER);
90683c4dfe9Sjg n = e_devid_cache_devi_path_lists(devid, nalloced,
90783c4dfe9Sjg &ndevis, devis, &npaths, paths);
90883c4dfe9Sjg if (n <= nalloced)
90983c4dfe9Sjg break;
91083c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
91183c4dfe9Sjg for (i = 0; i < ndevis; i++)
91283c4dfe9Sjg ndi_rele_devi(devis[i]);
91383c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *));
91483c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *));
91583c4dfe9Sjg nalloced = n + 128;
91683c4dfe9Sjg }
91783c4dfe9Sjg
91883c4dfe9Sjg for (i = 0; i < npaths; i++) {
91983c4dfe9Sjg path = i_ddi_strdup(paths[i], KM_SLEEP);
92083c4dfe9Sjg paths[i] = path;
92183c4dfe9Sjg }
92283c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle));
92383c4dfe9Sjg
92483c4dfe9Sjg if (ndevis == 0 && npaths == 0) {
92583c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL));
92683c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *));
92783c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *));
92883c4dfe9Sjg return (DDI_FAILURE);
92983c4dfe9Sjg }
93083c4dfe9Sjg
93183c4dfe9Sjg ndevts_alloced = 128;
93283c4dfe9Sjg restart:
93383c4dfe9Sjg ndevts = 0;
93483c4dfe9Sjg devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
93583c4dfe9Sjg for (i = 0; i < ndevis; i++) {
93683c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(devis[i]));
93783c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(devis[i]));
93883c4dfe9Sjg e_devid_minor_to_devlist(devis[i], minor_name,
93983c4dfe9Sjg ndevts_alloced, &ndevts, devts);
94083c4dfe9Sjg if (ndevts > ndevts_alloced) {
94183c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t));
94283c4dfe9Sjg ndevts_alloced += 128;
94383c4dfe9Sjg goto restart;
94483c4dfe9Sjg }
94583c4dfe9Sjg }
94683c4dfe9Sjg for (i = 0; i < npaths; i++) {
94783c4dfe9Sjg DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
94883c4dfe9Sjg devi = e_ddi_hold_devi_by_path(paths[i], 0);
94983c4dfe9Sjg if (devi == NULL) {
95083c4dfe9Sjg DEVID_LOG_STALE(("stale device reference",
95183c4dfe9Sjg devid, paths[i]));
95283c4dfe9Sjg continue;
95383c4dfe9Sjg }
95483c4dfe9Sjg /*
95583c4dfe9Sjg * Verify the newly attached device registered a matching devid
95683c4dfe9Sjg */
95783c4dfe9Sjg if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
95883c4dfe9Sjg &match_devid) != DDI_SUCCESS) {
95983c4dfe9Sjg DEVIDERR((CE_CONT,
96083c4dfe9Sjg "%s: no devid registered on attach\n",
96183c4dfe9Sjg paths[i]));
96283c4dfe9Sjg ddi_release_devi(devi);
96383c4dfe9Sjg continue;
96483c4dfe9Sjg }
96583c4dfe9Sjg
96683c4dfe9Sjg if (ddi_devid_compare(devid, match_devid) != 0) {
96783c4dfe9Sjg DEVID_LOG_STALE(("new devid registered",
96883c4dfe9Sjg devid, paths[i]));
96983c4dfe9Sjg ddi_release_devi(devi);
97083c4dfe9Sjg ddi_devid_free(match_devid);
97183c4dfe9Sjg continue;
97283c4dfe9Sjg }
97383c4dfe9Sjg ddi_devid_free(match_devid);
97483c4dfe9Sjg
97583c4dfe9Sjg e_devid_minor_to_devlist(devi, minor_name,
97683c4dfe9Sjg ndevts_alloced, &ndevts, devts);
97783c4dfe9Sjg ddi_release_devi(devi);
97883c4dfe9Sjg if (ndevts > ndevts_alloced) {
97983c4dfe9Sjg kmem_free(devts,
98083c4dfe9Sjg ndevts_alloced * sizeof (dev_t));
98183c4dfe9Sjg ndevts_alloced += 128;
98283c4dfe9Sjg goto restart;
98383c4dfe9Sjg }
98483c4dfe9Sjg }
98583c4dfe9Sjg
98683c4dfe9Sjg /* drop hold from e_devid_cache_devi_path_lists */
98783c4dfe9Sjg for (i = 0; i < ndevis; i++) {
98883c4dfe9Sjg ndi_rele_devi(devis[i]);
98983c4dfe9Sjg }
99083c4dfe9Sjg for (i = 0; i < npaths; i++) {
99183c4dfe9Sjg kmem_free(paths[i], strlen(paths[i]) + 1);
99283c4dfe9Sjg }
99383c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *));
99483c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *));
99583c4dfe9Sjg
99683c4dfe9Sjg if (ndevts == 0) {
99783c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL));
99883c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t));
99983c4dfe9Sjg return (DDI_FAILURE);
100083c4dfe9Sjg }
100183c4dfe9Sjg
100283c4dfe9Sjg /*
100383c4dfe9Sjg * Build the final list of sorted dev_t's with duplicates collapsed so
100483c4dfe9Sjg * returned results are consistent. This prevents implementation
100583c4dfe9Sjg * artifacts from causing unnecessary changes in SVM namespace.
100683c4dfe9Sjg */
100783c4dfe9Sjg /* bubble sort */
100883c4dfe9Sjg for (i = 0; i < (ndevts - 1); i++) {
100983c4dfe9Sjg for (j = 0; j < ((ndevts - 1) - i); j++) {
101083c4dfe9Sjg if (devts[j + 1] < devts[j]) {
101183c4dfe9Sjg tdevt = devts[j];
101283c4dfe9Sjg devts[j] = devts[j + 1];
101383c4dfe9Sjg devts[j + 1] = tdevt;
101483c4dfe9Sjg }
101583c4dfe9Sjg }
101683c4dfe9Sjg }
101783c4dfe9Sjg
101883c4dfe9Sjg /* determine number of unique values */
101983c4dfe9Sjg for (undevts = ndevts, i = 1; i < ndevts; i++) {
102083c4dfe9Sjg if (devts[i - 1] == devts[i])
102183c4dfe9Sjg undevts--;
102283c4dfe9Sjg }
102383c4dfe9Sjg
102483c4dfe9Sjg /* allocate unique */
102583c4dfe9Sjg udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);
102683c4dfe9Sjg
102783c4dfe9Sjg /* copy unique */
102883c4dfe9Sjg udevts[0] = devts[0];
102983c4dfe9Sjg for (i = 1, j = 1; i < ndevts; i++) {
103083c4dfe9Sjg if (devts[i - 1] != devts[i])
103183c4dfe9Sjg udevts[j++] = devts[i];
103283c4dfe9Sjg }
103383c4dfe9Sjg ASSERT(j == undevts);
103483c4dfe9Sjg
103583c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t));
103683c4dfe9Sjg
103783c4dfe9Sjg *retndevts = undevts;
103883c4dfe9Sjg *retdevts = udevts;
103983c4dfe9Sjg
104083c4dfe9Sjg return (DDI_SUCCESS);
104183c4dfe9Sjg }
104283c4dfe9Sjg
104383c4dfe9Sjg void
e_devid_cache_free_devt_list(int ndevts,dev_t * devt_list)104483c4dfe9Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list)
104583c4dfe9Sjg {
104683c4dfe9Sjg kmem_free(devt_list, ndevts * sizeof (dev_t *));
104783c4dfe9Sjg }
104883c4dfe9Sjg
1049392e836bSGavin Maltby /*
1050392e836bSGavin Maltby * If given a full path and NULL ua, search for a cache entry
1051392e836bSGavin Maltby * whose path matches the full path. On a cache hit duplicate the
1052392e836bSGavin Maltby * devid of the matched entry into the given devid (caller
1053392e836bSGavin Maltby * must free); nodenamebuf is not touched for this usage.
1054392e836bSGavin Maltby *
1055392e836bSGavin Maltby * Given a path and a non-NULL unit address, search the cache for any entry
1056392e836bSGavin Maltby * matching "<path>/%@<unit-address>" where '%' is a wildcard meaning
1057392e836bSGavin Maltby * any node name. The path should not end a '/'. On a cache hit
1058392e836bSGavin Maltby * duplicate the devid as before (caller must free) and copy into
1059392e836bSGavin Maltby * the caller-provided nodenamebuf (if not NULL) the nodename of the
1060392e836bSGavin Maltby * matched entry.
1061392e836bSGavin Maltby *
1062392e836bSGavin Maltby * We must not make use of nvp_dip since that may be NULL for cached
1063392e836bSGavin Maltby * entries that are not present in the current tree.
1064392e836bSGavin Maltby */
1065392e836bSGavin Maltby int
e_devid_cache_path_to_devid(char * path,char * ua,char * nodenamebuf,ddi_devid_t * devidp)1066392e836bSGavin Maltby e_devid_cache_path_to_devid(char *path, char *ua,
1067392e836bSGavin Maltby char *nodenamebuf, ddi_devid_t *devidp)
1068392e836bSGavin Maltby {
1069392e836bSGavin Maltby size_t pathlen, ualen;
1070392e836bSGavin Maltby int rv = DDI_FAILURE;
1071392e836bSGavin Maltby nvp_devid_t *np;
1072392e836bSGavin Maltby list_t *listp;
1073392e836bSGavin Maltby char *cand;
1074392e836bSGavin Maltby
1075392e836bSGavin Maltby if (path == NULL || *path == '\0' || (ua && *ua == '\0') ||
1076392e836bSGavin Maltby devidp == NULL)
1077392e836bSGavin Maltby return (DDI_FAILURE);
1078392e836bSGavin Maltby
1079392e836bSGavin Maltby *devidp = NULL;
1080392e836bSGavin Maltby
1081c6f039c7SToomas Soome pathlen = 0;
1082c6f039c7SToomas Soome ualen = 0;
1083392e836bSGavin Maltby if (ua) {
1084392e836bSGavin Maltby pathlen = strlen(path);
1085392e836bSGavin Maltby ualen = strlen(ua);
1086392e836bSGavin Maltby }
1087392e836bSGavin Maltby
1088392e836bSGavin Maltby rw_enter(nvf_lock(dcfd_handle), RW_READER);
1089392e836bSGavin Maltby
1090392e836bSGavin Maltby listp = nvf_list(dcfd_handle);
1091392e836bSGavin Maltby for (np = list_head(listp); np; np = list_next(listp, np)) {
1092392e836bSGavin Maltby size_t nodelen, candlen, n;
1093392e836bSGavin Maltby ddi_devid_t devid_dup;
1094392e836bSGavin Maltby char *uasep, *node;
1095392e836bSGavin Maltby
1096392e836bSGavin Maltby if (np->nvp_devid == NULL)
1097392e836bSGavin Maltby continue;
1098392e836bSGavin Maltby
1099392e836bSGavin Maltby if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
1100392e836bSGavin Maltby DEVIDERR((CE_CONT,
1101392e836bSGavin Maltby "pathsearch: invalid devid %s\n",
1102392e836bSGavin Maltby np->nvp_devpath));
1103392e836bSGavin Maltby continue;
1104392e836bSGavin Maltby }
1105392e836bSGavin Maltby
1106392e836bSGavin Maltby cand = np->nvp_devpath; /* candidate path */
1107392e836bSGavin Maltby
1108392e836bSGavin Maltby /* If a full pathname was provided the compare is easy */
1109392e836bSGavin Maltby if (ua == NULL) {
1110392e836bSGavin Maltby if (strcmp(cand, path) == 0)
1111392e836bSGavin Maltby goto match;
1112392e836bSGavin Maltby else
1113392e836bSGavin Maltby continue;
1114392e836bSGavin Maltby }
1115392e836bSGavin Maltby
1116392e836bSGavin Maltby /*
1117392e836bSGavin Maltby * The compare for initial path plus ua and unknown nodename
1118392e836bSGavin Maltby * is trickier.
1119392e836bSGavin Maltby *
1120392e836bSGavin Maltby * Does the initial path component match 'path'?
1121392e836bSGavin Maltby */
1122392e836bSGavin Maltby if (strncmp(path, cand, pathlen) != 0)
1123392e836bSGavin Maltby continue;
1124392e836bSGavin Maltby
1125392e836bSGavin Maltby candlen = strlen(cand);
1126392e836bSGavin Maltby
1127392e836bSGavin Maltby /*
1128392e836bSGavin Maltby * The next character must be a '/' and there must be no
1129392e836bSGavin Maltby * further '/' thereafter. Begin by checking that the
1130392e836bSGavin Maltby * candidate is long enough to include at mininum a
1131392e836bSGavin Maltby * "/<nodename>@<ua>" after the initial portion already
1132392e836bSGavin Maltby * matched assuming a nodename length of 1.
1133392e836bSGavin Maltby */
1134392e836bSGavin Maltby if (candlen < pathlen + 1 + 1 + 1 + ualen ||
1135392e836bSGavin Maltby cand[pathlen] != '/' ||
1136392e836bSGavin Maltby strchr(cand + pathlen + 1, '/') != NULL)
1137392e836bSGavin Maltby continue;
1138392e836bSGavin Maltby
1139392e836bSGavin Maltby node = cand + pathlen + 1; /* <node>@<ua> string */
1140392e836bSGavin Maltby
1141392e836bSGavin Maltby /*
1142392e836bSGavin Maltby * Find the '@' before the unit address. Check for
1143392e836bSGavin Maltby * unit address match.
1144392e836bSGavin Maltby */
1145392e836bSGavin Maltby if ((uasep = strchr(node, '@')) == NULL)
1146392e836bSGavin Maltby continue;
1147392e836bSGavin Maltby
1148392e836bSGavin Maltby /*
1149392e836bSGavin Maltby * Check we still have enough length and that ua matches
1150392e836bSGavin Maltby */
1151392e836bSGavin Maltby nodelen = (uintptr_t)uasep - (uintptr_t)node;
1152392e836bSGavin Maltby if (candlen < pathlen + 1 + nodelen + 1 + ualen ||
1153392e836bSGavin Maltby strncmp(ua, uasep + 1, ualen) != 0)
1154392e836bSGavin Maltby continue;
1155392e836bSGavin Maltby match:
1156392e836bSGavin Maltby n = ddi_devid_sizeof(np->nvp_devid);
1157392e836bSGavin Maltby devid_dup = kmem_alloc(n, KM_SLEEP); /* caller must free */
1158392e836bSGavin Maltby (void) bcopy(np->nvp_devid, devid_dup, n);
1159392e836bSGavin Maltby *devidp = devid_dup;
1160392e836bSGavin Maltby
1161392e836bSGavin Maltby if (ua && nodenamebuf) {
1162392e836bSGavin Maltby (void) strncpy(nodenamebuf, node, nodelen);
1163392e836bSGavin Maltby nodenamebuf[nodelen] = '\0';
1164392e836bSGavin Maltby }
1165392e836bSGavin Maltby
1166392e836bSGavin Maltby rv = DDI_SUCCESS;
1167392e836bSGavin Maltby break;
1168392e836bSGavin Maltby }
1169392e836bSGavin Maltby
1170392e836bSGavin Maltby rw_exit(nvf_lock(dcfd_handle));
1171392e836bSGavin Maltby
1172392e836bSGavin Maltby return (rv);
1173392e836bSGavin Maltby }
1174392e836bSGavin Maltby
117583c4dfe9Sjg #ifdef DEBUG
117683c4dfe9Sjg static void
devid_log(char * fmt,ddi_devid_t devid,char * path)117783c4dfe9Sjg devid_log(char *fmt, ddi_devid_t devid, char *path)
117883c4dfe9Sjg {
117983c4dfe9Sjg char *devidstr = ddi_devid_str_encode(devid, NULL);
118083c4dfe9Sjg if (path) {
118183c4dfe9Sjg cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr);
118283c4dfe9Sjg } else {
118383c4dfe9Sjg cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr);
118483c4dfe9Sjg }
118583c4dfe9Sjg ddi_devid_str_free(devidstr);
118683c4dfe9Sjg }
118783c4dfe9Sjg #endif /* DEBUG */
1188