1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * Copyright 2023 Oxide Computer Company
28 */
29
30 /*
31 * driver for accessing kernel devinfo tree.
32 */
33 #include <sys/types.h>
34 #include <sys/pathname.h>
35 #include <sys/debug.h>
36 #include <sys/autoconf.h>
37 #include <sys/vmsystm.h>
38 #include <sys/conf.h>
39 #include <sys/file.h>
40 #include <sys/kmem.h>
41 #include <sys/modctl.h>
42 #include <sys/stat.h>
43 #include <sys/ddi.h>
44 #include <sys/sunddi.h>
45 #include <sys/sunldi_impl.h>
46 #include <sys/sunndi.h>
47 #include <sys/esunddi.h>
48 #include <sys/sunmdi.h>
49 #include <sys/ddi_impldefs.h>
50 #include <sys/ndi_impldefs.h>
51 #include <sys/mdi_impldefs.h>
52 #include <sys/devinfo_impl.h>
53 #include <sys/thread.h>
54 #include <sys/modhash.h>
55 #include <sys/bitmap.h>
56 #include <util/qsort.h>
57 #include <sys/disp.h>
58 #include <sys/kobj.h>
59 #include <sys/crc32.h>
60 #include <sys/ddi_hp.h>
61 #include <sys/ddi_hp_impl.h>
62 #include <sys/sysmacros.h>
63 #include <sys/list.h>
64
65
66 #ifdef DEBUG
67 static int di_debug;
68 #define dcmn_err(args) if (di_debug >= 1) cmn_err args
69 #define dcmn_err2(args) if (di_debug >= 2) cmn_err args
70 #define dcmn_err3(args) if (di_debug >= 3) cmn_err args
71 #else
72 #define dcmn_err(args) /* nothing */
73 #define dcmn_err2(args) /* nothing */
74 #define dcmn_err3(args) /* nothing */
75 #endif
76
77 /*
78 * We partition the space of devinfo minor nodes equally between the full and
79 * unprivileged versions of the driver. The even-numbered minor nodes are the
80 * full version, while the odd-numbered ones are the read-only version.
81 */
82 static int di_max_opens = 32;
83
84 static int di_prop_dyn = 1; /* enable dynamic property support */
85
86 #define DI_FULL_PARENT 0
87 #define DI_READONLY_PARENT 1
88 #define DI_NODE_SPECIES 2
89 #define DI_UNPRIVILEGED_NODE(x) (((x) % 2) != 0)
90
91 #define IOC_IDLE 0 /* snapshot ioctl states */
92 #define IOC_SNAP 1 /* snapshot in progress */
93 #define IOC_DONE 2 /* snapshot done, but not copied out */
94 #define IOC_COPY 3 /* copyout in progress */
95
96 /*
97 * Keep max alignment so we can move snapshot to different platforms.
98 *
99 * NOTE: Most callers should rely on the di_checkmem return value
100 * being aligned, and reestablish *off_p with aligned value, instead
101 * of trying to align size of their allocations: this approach will
102 * minimize memory use.
103 */
104 #define DI_ALIGN(addr) ((addr + 7l) & ~7l)
105
106 /*
107 * To avoid wasting memory, make a linked list of memory chunks.
108 * Size of each chunk is buf_size.
109 */
110 struct di_mem {
111 struct di_mem *next; /* link to next chunk */
112 char *buf; /* contiguous kernel memory */
113 size_t buf_size; /* size of buf in bytes */
114 devmap_cookie_t cook; /* cookie from ddi_umem_alloc */
115 };
116
117 /*
118 * This is a stack for walking the tree without using recursion.
119 * When the devinfo tree height is above some small size, one
120 * gets watchdog resets on sun4m.
121 */
122 struct di_stack {
123 void *offset[MAX_TREE_DEPTH];
124 struct dev_info *dip[MAX_TREE_DEPTH];
125 int depth; /* depth of current node to be copied */
126 };
127
128 #define TOP_OFFSET(stack) \
129 ((di_off_t *)(stack)->offset[(stack)->depth - 1])
130 #define TOP_NODE(stack) \
131 ((stack)->dip[(stack)->depth - 1])
132 #define PARENT_OFFSET(stack) \
133 ((di_off_t *)(stack)->offset[(stack)->depth - 2])
134 #define EMPTY_STACK(stack) ((stack)->depth == 0)
135 #define POP_STACK(stack) { \
136 ndi_devi_exit((dev_info_t *)TOP_NODE(stack)); \
137 ((stack)->depth--); \
138 }
139 #define PUSH_STACK(stack, node, off_p) { \
140 ASSERT(node != NULL); \
141 ndi_devi_enter((dev_info_t *)node); \
142 (stack)->dip[(stack)->depth] = (node); \
143 (stack)->offset[(stack)->depth] = (void *)(off_p); \
144 ((stack)->depth)++; \
145 }
146
147 #define DI_ALL_PTR(s) DI_ALL(di_mem_addr((s), 0))
148
149 /*
150 * With devfs, the device tree has no global locks. The device tree is
151 * dynamic and dips may come and go if they are not locked locally. Under
152 * these conditions, pointers are no longer reliable as unique IDs.
153 * Specifically, these pointers cannot be used as keys for hash tables
154 * as the same devinfo structure may be freed in one part of the tree only
155 * to be allocated as the structure for a different device in another
156 * part of the tree. This can happen if DR and the snapshot are
157 * happening concurrently.
158 * The following data structures act as keys for devinfo nodes and
159 * pathinfo nodes.
160 */
161
162 enum di_ktype {
163 DI_DKEY = 1,
164 DI_PKEY = 2
165 };
166
167 struct di_dkey {
168 dev_info_t *dk_dip;
169 major_t dk_major;
170 int dk_inst;
171 pnode_t dk_nodeid;
172 };
173
174 struct di_pkey {
175 mdi_pathinfo_t *pk_pip;
176 char *pk_path_addr;
177 dev_info_t *pk_client;
178 dev_info_t *pk_phci;
179 };
180
181 struct di_key {
182 enum di_ktype k_type;
183 union {
184 struct di_dkey dkey;
185 struct di_pkey pkey;
186 } k_u;
187 };
188
189
190 struct i_lnode;
191
192 typedef struct i_link {
193 /*
194 * If a di_link struct representing this i_link struct makes it
195 * into the snapshot, then self will point to the offset of
196 * the di_link struct in the snapshot
197 */
198 di_off_t self;
199
200 int spec_type; /* block or char access type */
201 struct i_lnode *src_lnode; /* src i_lnode */
202 struct i_lnode *tgt_lnode; /* tgt i_lnode */
203 struct i_link *src_link_next; /* next src i_link /w same i_lnode */
204 struct i_link *tgt_link_next; /* next tgt i_link /w same i_lnode */
205 } i_link_t;
206
207 typedef struct i_lnode {
208 /*
209 * If a di_lnode struct representing this i_lnode struct makes it
210 * into the snapshot, then self will point to the offset of
211 * the di_lnode struct in the snapshot
212 */
213 di_off_t self;
214
215 /*
216 * used for hashing and comparing i_lnodes
217 */
218 int modid;
219
220 /*
221 * public information describing a link endpoint
222 */
223 struct di_node *di_node; /* di_node in snapshot */
224 dev_t devt; /* devt */
225
226 /*
227 * i_link ptr to links coming into this i_lnode node
228 * (this i_lnode is the target of these i_links)
229 */
230 i_link_t *link_in;
231
232 /*
233 * i_link ptr to links going out of this i_lnode node
234 * (this i_lnode is the source of these i_links)
235 */
236 i_link_t *link_out;
237 } i_lnode_t;
238
239 typedef struct i_hp {
240 di_off_t hp_off; /* Offset of di_hp_t in snapshot */
241 dev_info_t *hp_child; /* Child devinfo node of the di_hp_t */
242 list_node_t hp_link; /* List linkage */
243 } i_hp_t;
244
245 /*
246 * Soft state associated with each instance of driver open.
247 */
248 static struct di_state {
249 di_off_t mem_size; /* total # bytes in memlist */
250 struct di_mem *memlist; /* head of memlist */
251 uint_t command; /* command from ioctl */
252 int di_iocstate; /* snapshot ioctl state */
253 mod_hash_t *reg_dip_hash;
254 mod_hash_t *reg_pip_hash;
255 int lnode_count;
256 int link_count;
257
258 mod_hash_t *lnode_hash;
259 mod_hash_t *link_hash;
260
261 list_t hp_list;
262 } **di_states;
263
264 static kmutex_t di_lock; /* serialize instance assignment */
265
266 typedef enum {
267 DI_QUIET = 0, /* DI_QUIET must always be 0 */
268 DI_ERR,
269 DI_INFO,
270 DI_TRACE,
271 DI_TRACE1,
272 DI_TRACE2
273 } di_cache_debug_t;
274
275 static uint_t di_chunk = 32; /* I/O chunk size in pages */
276
277 #define DI_CACHE_LOCK(c) (mutex_enter(&(c).cache_lock))
278 #define DI_CACHE_UNLOCK(c) (mutex_exit(&(c).cache_lock))
279 #define DI_CACHE_LOCKED(c) (mutex_owned(&(c).cache_lock))
280
281 /*
282 * Check that whole device tree is being configured as a pre-condition for
283 * cleaning up /etc/devices files.
284 */
285 #define DEVICES_FILES_CLEANABLE(st) \
286 (((st)->command & DINFOSUBTREE) && ((st)->command & DINFOFORCE) && \
287 strcmp(DI_ALL_PTR(st)->root_path, "/") == 0)
288
289 #define CACHE_DEBUG(args) \
290 { if (di_cache_debug != DI_QUIET) di_cache_print args; }
291
292 typedef struct phci_walk_arg {
293 di_off_t off;
294 struct di_state *st;
295 } phci_walk_arg_t;
296
297 static int di_open(dev_t *, int, int, cred_t *);
298 static int di_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
299 static int di_close(dev_t, int, int, cred_t *);
300 static int di_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
301 static int di_attach(dev_info_t *, ddi_attach_cmd_t);
302 static int di_detach(dev_info_t *, ddi_detach_cmd_t);
303
304 static di_off_t di_copyformat(di_off_t, struct di_state *, intptr_t, int);
305 static di_off_t di_snapshot_and_clean(struct di_state *);
306 static di_off_t di_copydevnm(di_off_t *, struct di_state *);
307 static di_off_t di_copytree(struct dev_info *, di_off_t *, struct di_state *);
308 static di_off_t di_copynode(struct dev_info *, struct di_stack *,
309 struct di_state *);
310 static di_off_t di_getmdata(struct ddi_minor_data *, di_off_t *, di_off_t,
311 struct di_state *);
312 static di_off_t di_getppdata(struct dev_info *, di_off_t *, struct di_state *);
313 static di_off_t di_getdpdata(struct dev_info *, di_off_t *, struct di_state *);
314 static di_off_t di_gethpdata(ddi_hp_cn_handle_t *, di_off_t *,
315 struct di_state *);
316 static di_off_t di_getprop(int, struct ddi_prop **, di_off_t *,
317 struct di_state *, struct dev_info *);
318 static void di_allocmem(struct di_state *, size_t);
319 static void di_freemem(struct di_state *);
320 static void di_copymem(struct di_state *st, caddr_t buf, size_t bufsiz);
321 static di_off_t di_checkmem(struct di_state *, di_off_t, size_t);
322 static void *di_mem_addr(struct di_state *, di_off_t);
323 static int di_setstate(struct di_state *, int);
324 static void di_register_dip(struct di_state *, dev_info_t *, di_off_t);
325 static void di_register_pip(struct di_state *, mdi_pathinfo_t *, di_off_t);
326 static di_off_t di_getpath_data(dev_info_t *, di_off_t *, di_off_t,
327 struct di_state *, int);
328 static di_off_t di_getlink_data(di_off_t, struct di_state *);
329 static int di_dip_find(struct di_state *st, dev_info_t *node, di_off_t *off_p);
330
331 static int cache_args_valid(struct di_state *st, int *error);
332 static int snapshot_is_cacheable(struct di_state *st);
333 static int di_cache_lookup(struct di_state *st);
334 static int di_cache_update(struct di_state *st);
335 static void di_cache_print(di_cache_debug_t msglevel, char *fmt, ...);
336 static int build_vhci_list(dev_info_t *vh_devinfo, void *arg);
337 static int build_phci_list(dev_info_t *ph_devinfo, void *arg);
338 static void di_hotplug_children(struct di_state *st);
339
340 extern int modrootloaded;
341 extern void mdi_walk_vhcis(int (*)(dev_info_t *, void *), void *);
342 extern void mdi_vhci_walk_phcis(dev_info_t *,
343 int (*)(dev_info_t *, void *), void *);
344
345
346 static struct cb_ops di_cb_ops = {
347 di_open, /* open */
348 di_close, /* close */
349 nodev, /* strategy */
350 nodev, /* print */
351 nodev, /* dump */
352 nodev, /* read */
353 nodev, /* write */
354 di_ioctl, /* ioctl */
355 nodev, /* devmap */
356 nodev, /* mmap */
357 nodev, /* segmap */
358 nochpoll, /* poll */
359 ddi_prop_op, /* prop_op */
360 NULL, /* streamtab */
361 D_NEW | D_MP /* Driver compatibility flag */
362 };
363
364 static struct dev_ops di_ops = {
365 DEVO_REV, /* devo_rev, */
366 0, /* refcnt */
367 di_info, /* info */
368 nulldev, /* identify */
369 nulldev, /* probe */
370 di_attach, /* attach */
371 di_detach, /* detach */
372 nodev, /* reset */
373 &di_cb_ops, /* driver operations */
374 NULL /* bus operations */
375 };
376
377 /*
378 * Module linkage information for the kernel.
379 */
380 static struct modldrv modldrv = {
381 &mod_driverops,
382 "DEVINFO Driver",
383 &di_ops
384 };
385
386 static struct modlinkage modlinkage = {
387 MODREV_1,
388 &modldrv,
389 NULL
390 };
391
392 int
_init(void)393 _init(void)
394 {
395 int error;
396
397 mutex_init(&di_lock, NULL, MUTEX_DRIVER, NULL);
398
399 error = mod_install(&modlinkage);
400 if (error != 0) {
401 mutex_destroy(&di_lock);
402 return (error);
403 }
404
405 return (0);
406 }
407
408 int
_info(struct modinfo * modinfop)409 _info(struct modinfo *modinfop)
410 {
411 return (mod_info(&modlinkage, modinfop));
412 }
413
414 int
_fini(void)415 _fini(void)
416 {
417 int error;
418
419 error = mod_remove(&modlinkage);
420 if (error != 0) {
421 return (error);
422 }
423
424 mutex_destroy(&di_lock);
425 return (0);
426 }
427
428 static dev_info_t *di_dip;
429
430 /*ARGSUSED*/
431 static int
di_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)432 di_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
433 {
434 int error = DDI_FAILURE;
435
436 switch (infocmd) {
437 case DDI_INFO_DEVT2DEVINFO:
438 *result = (void *)di_dip;
439 error = DDI_SUCCESS;
440 break;
441 case DDI_INFO_DEVT2INSTANCE:
442 /*
443 * All dev_t's map to the same, single instance.
444 */
445 *result = (void *)0;
446 error = DDI_SUCCESS;
447 break;
448 default:
449 break;
450 }
451
452 return (error);
453 }
454
455 static int
di_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)456 di_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
457 {
458 int error = DDI_FAILURE;
459
460 switch (cmd) {
461 case DDI_ATTACH:
462 di_states = kmem_zalloc(
463 di_max_opens * sizeof (struct di_state *), KM_SLEEP);
464
465 if (ddi_create_minor_node(dip, "devinfo", S_IFCHR,
466 DI_FULL_PARENT, DDI_PSEUDO, 0) == DDI_FAILURE ||
467 ddi_create_minor_node(dip, "devinfo,ro", S_IFCHR,
468 DI_READONLY_PARENT, DDI_PSEUDO, 0) == DDI_FAILURE) {
469 kmem_free(di_states,
470 di_max_opens * sizeof (struct di_state *));
471 ddi_remove_minor_node(dip, NULL);
472 error = DDI_FAILURE;
473 } else {
474 di_dip = dip;
475 ddi_report_dev(dip);
476
477 error = DDI_SUCCESS;
478 }
479 break;
480 default:
481 error = DDI_FAILURE;
482 break;
483 }
484
485 return (error);
486 }
487
488 static int
di_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)489 di_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
490 {
491 int error = DDI_FAILURE;
492
493 switch (cmd) {
494 case DDI_DETACH:
495 ddi_remove_minor_node(dip, NULL);
496 di_dip = NULL;
497 kmem_free(di_states, di_max_opens * sizeof (struct di_state *));
498
499 error = DDI_SUCCESS;
500 break;
501 default:
502 error = DDI_FAILURE;
503 break;
504 }
505
506 return (error);
507 }
508
509 /*
510 * Allow multiple opens by tweaking the dev_t such that it looks like each
511 * open is getting a different minor device. Each minor gets a separate
512 * entry in the di_states[] table. Based on the original minor number, we
513 * discriminate opens of the full and read-only nodes. If all of the instances
514 * of the selected minor node are currently open, we return EAGAIN.
515 */
516 /*ARGSUSED*/
517 static int
di_open(dev_t * devp,int flag,int otyp,cred_t * credp)518 di_open(dev_t *devp, int flag, int otyp, cred_t *credp)
519 {
520 int m;
521 minor_t minor_parent = getminor(*devp);
522
523 if (minor_parent != DI_FULL_PARENT &&
524 minor_parent != DI_READONLY_PARENT)
525 return (ENXIO);
526
527 mutex_enter(&di_lock);
528
529 for (m = minor_parent; m < di_max_opens; m += DI_NODE_SPECIES) {
530 if (di_states[m] != NULL)
531 continue;
532
533 di_states[m] = kmem_zalloc(sizeof (struct di_state), KM_SLEEP);
534 break; /* It's ours. */
535 }
536
537 if (m >= di_max_opens) {
538 /*
539 * maximum open instance for device reached
540 */
541 mutex_exit(&di_lock);
542 dcmn_err((CE_WARN, "devinfo: maximum devinfo open reached"));
543 return (EAGAIN);
544 }
545 mutex_exit(&di_lock);
546
547 ASSERT(m < di_max_opens);
548 *devp = makedevice(getmajor(*devp), (minor_t)(m + DI_NODE_SPECIES));
549
550 dcmn_err((CE_CONT, "di_open: thread = %p, assigned minor = %d\n",
551 (void *)curthread, m + DI_NODE_SPECIES));
552
553 return (0);
554 }
555
556 /*ARGSUSED*/
557 static int
di_close(dev_t dev,int flag,int otype,cred_t * cred_p)558 di_close(dev_t dev, int flag, int otype, cred_t *cred_p)
559 {
560 struct di_state *st;
561 int m = (int)getminor(dev) - DI_NODE_SPECIES;
562
563 if (m < 0) {
564 cmn_err(CE_WARN, "closing non-existent devinfo minor %d",
565 m + DI_NODE_SPECIES);
566 return (ENXIO);
567 }
568
569 st = di_states[m];
570 ASSERT(m < di_max_opens && st != NULL);
571
572 di_freemem(st);
573 kmem_free(st, sizeof (struct di_state));
574
575 /*
576 * empty slot in state table
577 */
578 mutex_enter(&di_lock);
579 di_states[m] = NULL;
580 dcmn_err((CE_CONT, "di_close: thread = %p, assigned minor = %d\n",
581 (void *)curthread, m + DI_NODE_SPECIES));
582 mutex_exit(&di_lock);
583
584 return (0);
585 }
586
587
588 /*ARGSUSED*/
589 static int
di_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * credp,int * rvalp)590 di_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
591 {
592 int rv, error;
593 di_off_t off;
594 struct di_all *all;
595 struct di_state *st;
596 int m = (int)getminor(dev) - DI_NODE_SPECIES;
597 major_t i;
598 char *drv_name;
599 size_t map_size, size;
600 struct di_mem *dcp;
601 int ndi_flags;
602
603 if (m < 0 || m >= di_max_opens) {
604 return (ENXIO);
605 }
606
607 st = di_states[m];
608 ASSERT(st != NULL);
609
610 dcmn_err2((CE_CONT, "di_ioctl: mode = %x, cmd = %x\n", mode, cmd));
611
612 switch (cmd) {
613 case DINFOIDENT:
614 /*
615 * This is called from di_init to verify that the driver
616 * opened is indeed devinfo. The purpose is to guard against
617 * sending ioctl to an unknown driver in case of an
618 * unresolved major number conflict during bfu.
619 */
620 *rvalp = DI_MAGIC;
621 return (0);
622
623 case DINFOLODRV:
624 /*
625 * Hold an installed driver and return the result
626 */
627 if (DI_UNPRIVILEGED_NODE(m)) {
628 /*
629 * Only the fully enabled instances may issue
630 * DINFOLDDRV.
631 */
632 return (EACCES);
633 }
634
635 drv_name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
636 if (ddi_copyin((void *)arg, drv_name, MAXNAMELEN, mode) != 0) {
637 kmem_free(drv_name, MAXNAMELEN);
638 return (EFAULT);
639 }
640
641 /*
642 * Some 3rd party driver's _init() walks the device tree,
643 * so we load the driver module before configuring driver.
644 */
645 i = ddi_name_to_major(drv_name);
646 if (ddi_hold_driver(i) == NULL) {
647 kmem_free(drv_name, MAXNAMELEN);
648 return (ENXIO);
649 }
650
651 ndi_flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
652
653 /*
654 * i_ddi_load_drvconf() below will trigger a reprobe
655 * via reset_nexus_flags(). NDI_DRV_CONF_REPROBE isn't
656 * needed here.
657 */
658 modunload_disable();
659 (void) i_ddi_load_drvconf(i);
660 (void) ndi_devi_config_driver(ddi_root_node(), ndi_flags, i);
661 kmem_free(drv_name, MAXNAMELEN);
662 ddi_rele_driver(i);
663 rv = i_ddi_devs_attached(i);
664 modunload_enable();
665
666 i_ddi_di_cache_invalidate();
667
668 return ((rv == DDI_SUCCESS)? 0 : ENXIO);
669
670 case DINFOUSRLD:
671 /*
672 * The case for copying snapshot to userland
673 */
674 if (di_setstate(st, IOC_COPY) == -1)
675 return (EBUSY);
676
677 map_size = DI_ALL_PTR(st)->map_size;
678 if (map_size == 0) {
679 (void) di_setstate(st, IOC_DONE);
680 return (EFAULT);
681 }
682
683 /*
684 * copyout the snapshot
685 */
686 map_size = (map_size + PAGEOFFSET) & PAGEMASK;
687
688 /*
689 * Return the map size, so caller may do a sanity
690 * check against the return value of snapshot ioctl()
691 */
692 *rvalp = (int)map_size;
693
694 /*
695 * Copy one chunk at a time
696 */
697 off = 0;
698 dcp = st->memlist;
699 while (map_size) {
700 size = dcp->buf_size;
701 if (map_size <= size) {
702 size = map_size;
703 }
704
705 if (ddi_copyout(di_mem_addr(st, off),
706 (void *)(arg + off), size, mode) != 0) {
707 (void) di_setstate(st, IOC_DONE);
708 return (EFAULT);
709 }
710
711 map_size -= size;
712 off += size;
713 dcp = dcp->next;
714 }
715
716 di_freemem(st);
717 (void) di_setstate(st, IOC_IDLE);
718 return (0);
719
720 default:
721 if ((cmd & ~DIIOC_MASK) != DIIOC) {
722 /*
723 * Invalid ioctl command
724 */
725 return (ENOTTY);
726 }
727 /*
728 * take a snapshot
729 */
730 st->command = cmd & DIIOC_MASK;
731 /*FALLTHROUGH*/
732 }
733
734 /*
735 * Obtain enough memory to hold header + rootpath. We prevent kernel
736 * memory exhaustion by freeing any previously allocated snapshot and
737 * refusing the operation; otherwise we would be allowing ioctl(),
738 * ioctl(), ioctl(), ..., panic.
739 */
740 if (di_setstate(st, IOC_SNAP) == -1)
741 return (EBUSY);
742
743 /*
744 * Initial memlist always holds di_all and the root_path - and
745 * is at least a page and size.
746 */
747 size = sizeof (struct di_all) +
748 sizeof (((struct dinfo_io *)(NULL))->root_path);
749 if (size < PAGESIZE)
750 size = PAGESIZE;
751 off = di_checkmem(st, 0, size);
752 all = DI_ALL_PTR(st);
753 off += sizeof (struct di_all); /* real length of di_all */
754
755 all->devcnt = devcnt;
756 all->command = st->command;
757 all->version = DI_SNAPSHOT_VERSION;
758 all->top_vhci_devinfo = 0; /* filled by build_vhci_list. */
759
760 /*
761 * Note the endianness in case we need to transport snapshot
762 * over the network.
763 */
764 #if defined(_LITTLE_ENDIAN)
765 all->endianness = DI_LITTLE_ENDIAN;
766 #else
767 all->endianness = DI_BIG_ENDIAN;
768 #endif
769
770 /* Copyin ioctl args, store in the snapshot. */
771 if (copyinstr((void *)arg, all->req_path,
772 sizeof (((struct dinfo_io *)(NULL))->root_path), &size) != 0) {
773 di_freemem(st);
774 (void) di_setstate(st, IOC_IDLE);
775 return (EFAULT);
776 }
777 (void) strcpy(all->root_path, all->req_path);
778 off += size; /* real length of root_path */
779
780 if ((st->command & DINFOCLEANUP) && !DEVICES_FILES_CLEANABLE(st)) {
781 di_freemem(st);
782 (void) di_setstate(st, IOC_IDLE);
783 return (EINVAL);
784 }
785
786 error = 0;
787 if ((st->command & DINFOCACHE) && !cache_args_valid(st, &error)) {
788 di_freemem(st);
789 (void) di_setstate(st, IOC_IDLE);
790 return (error);
791 }
792
793 /*
794 * Only the fully enabled version may force load drivers or read
795 * the parent private data from a driver.
796 */
797 if ((st->command & (DINFOPRIVDATA | DINFOFORCE)) != 0 &&
798 DI_UNPRIVILEGED_NODE(m)) {
799 di_freemem(st);
800 (void) di_setstate(st, IOC_IDLE);
801 return (EACCES);
802 }
803
804 /* Do we need private data? */
805 if (st->command & DINFOPRIVDATA) {
806 arg += sizeof (((struct dinfo_io *)(NULL))->root_path);
807
808 #ifdef _MULTI_DATAMODEL
809 switch (ddi_model_convert_from(mode & FMODELS)) {
810 case DDI_MODEL_ILP32: {
811 /*
812 * Cannot copy private data from 64-bit kernel
813 * to 32-bit app
814 */
815 di_freemem(st);
816 (void) di_setstate(st, IOC_IDLE);
817 return (EINVAL);
818 }
819 case DDI_MODEL_NONE:
820 if ((off = di_copyformat(off, st, arg, mode)) == 0) {
821 di_freemem(st);
822 (void) di_setstate(st, IOC_IDLE);
823 return (EFAULT);
824 }
825 break;
826 }
827 #else /* !_MULTI_DATAMODEL */
828 if ((off = di_copyformat(off, st, arg, mode)) == 0) {
829 di_freemem(st);
830 (void) di_setstate(st, IOC_IDLE);
831 return (EFAULT);
832 }
833 #endif /* _MULTI_DATAMODEL */
834 }
835
836 all->top_devinfo = DI_ALIGN(off);
837
838 /*
839 * For cache lookups we reallocate memory from scratch,
840 * so the value of "all" is no longer valid.
841 */
842 all = NULL;
843
844 if (st->command & DINFOCACHE) {
845 *rvalp = di_cache_lookup(st);
846 } else if (snapshot_is_cacheable(st)) {
847 DI_CACHE_LOCK(di_cache);
848 *rvalp = di_cache_update(st);
849 DI_CACHE_UNLOCK(di_cache);
850 } else
851 *rvalp = di_snapshot_and_clean(st);
852
853 if (*rvalp) {
854 DI_ALL_PTR(st)->map_size = *rvalp;
855 (void) di_setstate(st, IOC_DONE);
856 } else {
857 di_freemem(st);
858 (void) di_setstate(st, IOC_IDLE);
859 }
860
861 return (0);
862 }
863
864 /*
865 * Get a chunk of memory >= size, for the snapshot
866 */
867 static void
di_allocmem(struct di_state * st,size_t size)868 di_allocmem(struct di_state *st, size_t size)
869 {
870 struct di_mem *mem = kmem_zalloc(sizeof (struct di_mem), KM_SLEEP);
871
872 /*
873 * Round up size to nearest power of 2. If it is less
874 * than st->mem_size, set it to st->mem_size (i.e.,
875 * the mem_size is doubled every time) to reduce the
876 * number of memory allocations.
877 */
878 size_t tmp = 1;
879 while (tmp < size) {
880 tmp <<= 1;
881 }
882 size = (tmp > st->mem_size) ? tmp : st->mem_size;
883
884 mem->buf = ddi_umem_alloc(size, DDI_UMEM_SLEEP, &mem->cook);
885 mem->buf_size = size;
886
887 dcmn_err2((CE_CONT, "di_allocmem: mem_size=%x\n", st->mem_size));
888
889 if (st->mem_size == 0) { /* first chunk */
890 st->memlist = mem;
891 } else {
892 /*
893 * locate end of linked list and add a chunk at the end
894 */
895 struct di_mem *dcp = st->memlist;
896 while (dcp->next != NULL) {
897 dcp = dcp->next;
898 }
899
900 dcp->next = mem;
901 }
902
903 st->mem_size += size;
904 }
905
906 /*
907 * Copy upto bufsiz bytes of the memlist to buf
908 */
909 static void
di_copymem(struct di_state * st,caddr_t buf,size_t bufsiz)910 di_copymem(struct di_state *st, caddr_t buf, size_t bufsiz)
911 {
912 struct di_mem *dcp;
913 size_t copysz;
914
915 if (st->mem_size == 0) {
916 ASSERT(st->memlist == NULL);
917 return;
918 }
919
920 copysz = 0;
921 for (dcp = st->memlist; dcp; dcp = dcp->next) {
922
923 ASSERT(bufsiz > 0);
924
925 if (bufsiz <= dcp->buf_size)
926 copysz = bufsiz;
927 else
928 copysz = dcp->buf_size;
929
930 bcopy(dcp->buf, buf, copysz);
931
932 buf += copysz;
933 bufsiz -= copysz;
934
935 if (bufsiz == 0)
936 break;
937 }
938 }
939
940 /*
941 * Free all memory for the snapshot
942 */
943 static void
di_freemem(struct di_state * st)944 di_freemem(struct di_state *st)
945 {
946 struct di_mem *dcp, *tmp;
947
948 dcmn_err2((CE_CONT, "di_freemem\n"));
949
950 if (st->mem_size) {
951 dcp = st->memlist;
952 while (dcp) { /* traverse the linked list */
953 tmp = dcp;
954 dcp = dcp->next;
955 ddi_umem_free(tmp->cook);
956 kmem_free(tmp, sizeof (struct di_mem));
957 }
958 st->mem_size = 0;
959 st->memlist = NULL;
960 }
961
962 ASSERT(st->mem_size == 0);
963 ASSERT(st->memlist == NULL);
964 }
965
966 /*
967 * Copies cached data to the di_state structure.
968 * Returns:
969 * - size of data copied, on SUCCESS
970 * - 0 on failure
971 */
972 static int
di_cache2mem(struct di_cache * cache,struct di_state * st)973 di_cache2mem(struct di_cache *cache, struct di_state *st)
974 {
975 caddr_t pa;
976
977 ASSERT(st->mem_size == 0);
978 ASSERT(st->memlist == NULL);
979 ASSERT(!servicing_interrupt());
980 ASSERT(DI_CACHE_LOCKED(*cache));
981
982 if (cache->cache_size == 0) {
983 ASSERT(cache->cache_data == NULL);
984 CACHE_DEBUG((DI_ERR, "Empty cache. Skipping copy"));
985 return (0);
986 }
987
988 ASSERT(cache->cache_data);
989
990 di_allocmem(st, cache->cache_size);
991
992 pa = di_mem_addr(st, 0);
993
994 ASSERT(pa);
995
996 /*
997 * Verify that di_allocmem() allocates contiguous memory,
998 * so that it is safe to do straight bcopy()
999 */
1000 ASSERT(st->memlist != NULL);
1001 ASSERT(st->memlist->next == NULL);
1002 bcopy(cache->cache_data, pa, cache->cache_size);
1003
1004 return (cache->cache_size);
1005 }
1006
1007 /*
1008 * Copies a snapshot from di_state to the cache
1009 * Returns:
1010 * - 0 on failure
1011 * - size of copied data on success
1012 */
1013 static size_t
di_mem2cache(struct di_state * st,struct di_cache * cache)1014 di_mem2cache(struct di_state *st, struct di_cache *cache)
1015 {
1016 size_t map_size;
1017
1018 ASSERT(cache->cache_size == 0);
1019 ASSERT(cache->cache_data == NULL);
1020 ASSERT(!servicing_interrupt());
1021 ASSERT(DI_CACHE_LOCKED(*cache));
1022
1023 if (st->mem_size == 0) {
1024 ASSERT(st->memlist == NULL);
1025 CACHE_DEBUG((DI_ERR, "Empty memlist. Skipping copy"));
1026 return (0);
1027 }
1028
1029 ASSERT(st->memlist);
1030
1031 /*
1032 * The size of the memory list may be much larger than the
1033 * size of valid data (map_size). Cache only the valid data
1034 */
1035 map_size = DI_ALL_PTR(st)->map_size;
1036 if (map_size == 0 || map_size < sizeof (struct di_all) ||
1037 map_size > st->mem_size) {
1038 CACHE_DEBUG((DI_ERR, "cannot cache: bad size: 0x%x", map_size));
1039 return (0);
1040 }
1041
1042 cache->cache_data = kmem_alloc(map_size, KM_SLEEP);
1043 cache->cache_size = map_size;
1044 di_copymem(st, cache->cache_data, cache->cache_size);
1045
1046 return (map_size);
1047 }
1048
1049 /*
1050 * Make sure there is at least "size" bytes memory left before
1051 * going on. Otherwise, start on a new chunk.
1052 */
1053 static di_off_t
di_checkmem(struct di_state * st,di_off_t off,size_t size)1054 di_checkmem(struct di_state *st, di_off_t off, size_t size)
1055 {
1056 dcmn_err3((CE_CONT, "di_checkmem: off=%x size=%x\n",
1057 off, (int)size));
1058
1059 /*
1060 * di_checkmem() shouldn't be called with a size of zero.
1061 * But in case it is, we want to make sure we return a valid
1062 * offset within the memlist and not an offset that points us
1063 * at the end of the memlist.
1064 */
1065 if (size == 0) {
1066 dcmn_err((CE_WARN, "di_checkmem: invalid zero size used"));
1067 size = 1;
1068 }
1069
1070 off = DI_ALIGN(off);
1071 if ((st->mem_size - off) < size) {
1072 off = st->mem_size;
1073 di_allocmem(st, size);
1074 }
1075
1076 /* verify that return value is aligned */
1077 ASSERT(off == DI_ALIGN(off));
1078 return (off);
1079 }
1080
1081 /*
1082 * Copy the private data format from ioctl arg.
1083 * On success, the ending offset is returned. On error 0 is returned.
1084 */
1085 static di_off_t
di_copyformat(di_off_t off,struct di_state * st,intptr_t arg,int mode)1086 di_copyformat(di_off_t off, struct di_state *st, intptr_t arg, int mode)
1087 {
1088 di_off_t size;
1089 struct di_priv_data *priv;
1090 struct di_all *all = DI_ALL_PTR(st);
1091
1092 dcmn_err2((CE_CONT, "di_copyformat: off=%x, arg=%p mode=%x\n",
1093 off, (void *)arg, mode));
1094
1095 /*
1096 * Copyin data and check version.
1097 * We only handle private data version 0.
1098 */
1099 priv = kmem_alloc(sizeof (struct di_priv_data), KM_SLEEP);
1100 if ((ddi_copyin((void *)arg, priv, sizeof (struct di_priv_data),
1101 mode) != 0) || (priv->version != DI_PRIVDATA_VERSION_0)) {
1102 kmem_free(priv, sizeof (struct di_priv_data));
1103 return (0);
1104 }
1105
1106 /*
1107 * Save di_priv_data copied from userland in snapshot.
1108 */
1109 all->pd_version = priv->version;
1110 all->n_ppdata = priv->n_parent;
1111 all->n_dpdata = priv->n_driver;
1112
1113 /*
1114 * copyin private data format, modify offset accordingly
1115 */
1116 if (all->n_ppdata) { /* parent private data format */
1117 /*
1118 * check memory
1119 */
1120 size = all->n_ppdata * sizeof (struct di_priv_format);
1121 all->ppdata_format = off = di_checkmem(st, off, size);
1122 if (ddi_copyin(priv->parent, di_mem_addr(st, off), size,
1123 mode) != 0) {
1124 kmem_free(priv, sizeof (struct di_priv_data));
1125 return (0);
1126 }
1127
1128 off += size;
1129 }
1130
1131 if (all->n_dpdata) { /* driver private data format */
1132 /*
1133 * check memory
1134 */
1135 size = all->n_dpdata * sizeof (struct di_priv_format);
1136 all->dpdata_format = off = di_checkmem(st, off, size);
1137 if (ddi_copyin(priv->driver, di_mem_addr(st, off), size,
1138 mode) != 0) {
1139 kmem_free(priv, sizeof (struct di_priv_data));
1140 return (0);
1141 }
1142
1143 off += size;
1144 }
1145
1146 kmem_free(priv, sizeof (struct di_priv_data));
1147 return (off);
1148 }
1149
1150 /*
1151 * Return the real address based on the offset (off) within snapshot
1152 */
1153 static void *
di_mem_addr(struct di_state * st,di_off_t off)1154 di_mem_addr(struct di_state *st, di_off_t off)
1155 {
1156 struct di_mem *dcp = st->memlist;
1157
1158 dcmn_err3((CE_CONT, "di_mem_addr: dcp=%p off=%x\n",
1159 (void *)dcp, off));
1160
1161 ASSERT(off < st->mem_size);
1162
1163 while (off >= dcp->buf_size) {
1164 off -= dcp->buf_size;
1165 dcp = dcp->next;
1166 }
1167
1168 dcmn_err3((CE_CONT, "di_mem_addr: new off=%x, return = %p\n",
1169 off, (void *)(dcp->buf + off)));
1170
1171 return (dcp->buf + off);
1172 }
1173
1174 /*
1175 * Ideally we would use the whole key to derive the hash
1176 * value. However, the probability that two keys will
1177 * have the same dip (or pip) is very low, so
1178 * hashing by dip (or pip) pointer should suffice.
1179 */
1180 static uint_t
di_hash_byptr(void * arg,mod_hash_key_t key)1181 di_hash_byptr(void *arg, mod_hash_key_t key)
1182 {
1183 struct di_key *dik = key;
1184 size_t rshift;
1185 void *ptr;
1186
1187 ASSERT(arg == NULL);
1188
1189 switch (dik->k_type) {
1190 case DI_DKEY:
1191 ptr = dik->k_u.dkey.dk_dip;
1192 rshift = highbit(sizeof (struct dev_info));
1193 break;
1194 case DI_PKEY:
1195 ptr = dik->k_u.pkey.pk_pip;
1196 rshift = highbit(sizeof (struct mdi_pathinfo));
1197 break;
1198 default:
1199 panic("devinfo: unknown key type");
1200 /*NOTREACHED*/
1201 }
1202 return (mod_hash_byptr((void *)rshift, ptr));
1203 }
1204
1205 static void
di_key_dtor(mod_hash_key_t key)1206 di_key_dtor(mod_hash_key_t key)
1207 {
1208 char *path_addr;
1209 struct di_key *dik = key;
1210
1211 switch (dik->k_type) {
1212 case DI_DKEY:
1213 break;
1214 case DI_PKEY:
1215 path_addr = dik->k_u.pkey.pk_path_addr;
1216 if (path_addr)
1217 kmem_free(path_addr, strlen(path_addr) + 1);
1218 break;
1219 default:
1220 panic("devinfo: unknown key type");
1221 /*NOTREACHED*/
1222 }
1223
1224 kmem_free(dik, sizeof (struct di_key));
1225 }
1226
1227 static int
di_dkey_cmp(struct di_dkey * dk1,struct di_dkey * dk2)1228 di_dkey_cmp(struct di_dkey *dk1, struct di_dkey *dk2)
1229 {
1230 if (dk1->dk_dip != dk2->dk_dip)
1231 return (dk1->dk_dip > dk2->dk_dip ? 1 : -1);
1232
1233 if (dk1->dk_major != DDI_MAJOR_T_NONE &&
1234 dk2->dk_major != DDI_MAJOR_T_NONE) {
1235 if (dk1->dk_major != dk2->dk_major)
1236 return (dk1->dk_major > dk2->dk_major ? 1 : -1);
1237
1238 if (dk1->dk_inst != dk2->dk_inst)
1239 return (dk1->dk_inst > dk2->dk_inst ? 1 : -1);
1240 }
1241
1242 if (dk1->dk_nodeid != dk2->dk_nodeid)
1243 return (dk1->dk_nodeid > dk2->dk_nodeid ? 1 : -1);
1244
1245 return (0);
1246 }
1247
1248 static int
di_pkey_cmp(struct di_pkey * pk1,struct di_pkey * pk2)1249 di_pkey_cmp(struct di_pkey *pk1, struct di_pkey *pk2)
1250 {
1251 char *p1, *p2;
1252 int rv;
1253
1254 if (pk1->pk_pip != pk2->pk_pip)
1255 return (pk1->pk_pip > pk2->pk_pip ? 1 : -1);
1256
1257 p1 = pk1->pk_path_addr;
1258 p2 = pk2->pk_path_addr;
1259
1260 p1 = p1 ? p1 : "";
1261 p2 = p2 ? p2 : "";
1262
1263 rv = strcmp(p1, p2);
1264 if (rv)
1265 return (rv > 0 ? 1 : -1);
1266
1267 if (pk1->pk_client != pk2->pk_client)
1268 return (pk1->pk_client > pk2->pk_client ? 1 : -1);
1269
1270 if (pk1->pk_phci != pk2->pk_phci)
1271 return (pk1->pk_phci > pk2->pk_phci ? 1 : -1);
1272
1273 return (0);
1274 }
1275
1276 static int
di_key_cmp(mod_hash_key_t key1,mod_hash_key_t key2)1277 di_key_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
1278 {
1279 struct di_key *dik1, *dik2;
1280
1281 dik1 = key1;
1282 dik2 = key2;
1283
1284 if (dik1->k_type != dik2->k_type) {
1285 panic("devinfo: mismatched keys");
1286 /*NOTREACHED*/
1287 }
1288
1289 switch (dik1->k_type) {
1290 case DI_DKEY:
1291 return (di_dkey_cmp(&(dik1->k_u.dkey), &(dik2->k_u.dkey)));
1292 case DI_PKEY:
1293 return (di_pkey_cmp(&(dik1->k_u.pkey), &(dik2->k_u.pkey)));
1294 default:
1295 panic("devinfo: unknown key type");
1296 /*NOTREACHED*/
1297 }
1298 }
1299
1300 static void
di_copy_aliases(struct di_state * st,alias_pair_t * apair,di_off_t * offp)1301 di_copy_aliases(struct di_state *st, alias_pair_t *apair, di_off_t *offp)
1302 {
1303 di_off_t off;
1304 struct di_all *all = DI_ALL_PTR(st);
1305 struct di_alias *di_alias;
1306 di_off_t curroff;
1307 dev_info_t *currdip;
1308 size_t size;
1309
1310 currdip = NULL;
1311 if (resolve_pathname(apair->pair_alias, &currdip, NULL, NULL) != 0) {
1312 return;
1313 }
1314
1315 if (di_dip_find(st, currdip, &curroff) != 0) {
1316 ndi_rele_devi(currdip);
1317 return;
1318 }
1319 ndi_rele_devi(currdip);
1320
1321 off = *offp;
1322 size = sizeof (struct di_alias);
1323 size += strlen(apair->pair_alias) + 1;
1324 off = di_checkmem(st, off, size);
1325 di_alias = DI_ALIAS(di_mem_addr(st, off));
1326
1327 di_alias->self = off;
1328 di_alias->next = all->aliases;
1329 all->aliases = off;
1330 (void) strcpy(di_alias->alias, apair->pair_alias);
1331 di_alias->curroff = curroff;
1332
1333 off += size;
1334
1335 *offp = off;
1336 }
1337
1338 /*
1339 * This is the main function that takes a snapshot
1340 */
1341 static di_off_t
di_snapshot(struct di_state * st)1342 di_snapshot(struct di_state *st)
1343 {
1344 di_off_t off;
1345 struct di_all *all;
1346 dev_info_t *rootnode;
1347 char buf[80];
1348 int plen;
1349 char *path;
1350 vnode_t *vp;
1351 int i;
1352
1353 all = DI_ALL_PTR(st);
1354 dcmn_err((CE_CONT, "Taking a snapshot of devinfo tree...\n"));
1355
1356 /*
1357 * Translate requested root path if an alias and snap-root != "/"
1358 */
1359 if (ddi_aliases_present == B_TRUE && strcmp(all->root_path, "/") != 0) {
1360 /* If there is no redirected alias, use root_path as is */
1361 rootnode = ddi_alias_redirect(all->root_path);
1362 if (rootnode) {
1363 (void) ddi_pathname(rootnode, all->root_path);
1364 goto got_root;
1365 }
1366 }
1367
1368 /*
1369 * Verify path before entrusting it to e_ddi_hold_devi_by_path because
1370 * some platforms have OBP bugs where executing the NDI_PROMNAME code
1371 * path against an invalid path results in panic. The lookupnameat
1372 * is done relative to rootdir without a leading '/' on "devices/"
1373 * to force the lookup to occur in the global zone.
1374 */
1375 plen = strlen("devices/") + strlen(all->root_path) + 1;
1376 path = kmem_alloc(plen, KM_SLEEP);
1377 (void) snprintf(path, plen, "devices/%s", all->root_path);
1378 if (lookupnameat(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp, rootdir)) {
1379 dcmn_err((CE_CONT, "Devinfo node %s not found\n",
1380 all->root_path));
1381 kmem_free(path, plen);
1382 return (0);
1383 }
1384 kmem_free(path, plen);
1385 VN_RELE(vp);
1386
1387 /*
1388 * Hold the devinfo node referred by the path.
1389 */
1390 rootnode = e_ddi_hold_devi_by_path(all->root_path, 0);
1391 if (rootnode == NULL) {
1392 dcmn_err((CE_CONT, "Devinfo node %s not found\n",
1393 all->root_path));
1394 return (0);
1395 }
1396
1397 got_root:
1398 (void) snprintf(buf, sizeof (buf),
1399 "devinfo registered dips (statep=%p)", (void *)st);
1400
1401 st->reg_dip_hash = mod_hash_create_extended(buf, 64,
1402 di_key_dtor, mod_hash_null_valdtor, di_hash_byptr,
1403 NULL, di_key_cmp, KM_SLEEP);
1404
1405
1406 (void) snprintf(buf, sizeof (buf),
1407 "devinfo registered pips (statep=%p)", (void *)st);
1408
1409 st->reg_pip_hash = mod_hash_create_extended(buf, 64,
1410 di_key_dtor, mod_hash_null_valdtor, di_hash_byptr,
1411 NULL, di_key_cmp, KM_SLEEP);
1412
1413 if (DINFOHP & st->command) {
1414 list_create(&st->hp_list, sizeof (i_hp_t),
1415 offsetof(i_hp_t, hp_link));
1416 }
1417
1418 /*
1419 * copy the device tree
1420 */
1421 off = di_copytree(DEVI(rootnode), &all->top_devinfo, st);
1422
1423 if (DINFOPATH & st->command) {
1424 mdi_walk_vhcis(build_vhci_list, st);
1425 }
1426
1427 if (DINFOHP & st->command) {
1428 di_hotplug_children(st);
1429 }
1430
1431 ddi_release_devi(rootnode);
1432
1433 /*
1434 * copy the devnames array
1435 */
1436 all->devnames = off;
1437 off = di_copydevnm(&all->devnames, st);
1438
1439
1440 /* initialize the hash tables */
1441 st->lnode_count = 0;
1442 st->link_count = 0;
1443
1444 if (DINFOLYR & st->command) {
1445 off = di_getlink_data(off, st);
1446 }
1447
1448 all->aliases = 0;
1449 if (ddi_aliases_present == B_FALSE)
1450 goto done;
1451
1452 for (i = 0; i < ddi_aliases.dali_num_pairs; i++) {
1453 di_copy_aliases(st, &(ddi_aliases.dali_alias_pairs[i]), &off);
1454 }
1455
1456 done:
1457 /*
1458 * Free up hash tables
1459 */
1460 mod_hash_destroy_hash(st->reg_dip_hash);
1461 mod_hash_destroy_hash(st->reg_pip_hash);
1462
1463 /*
1464 * Record the timestamp now that we are done with snapshot.
1465 *
1466 * We compute the checksum later and then only if we cache
1467 * the snapshot, since checksumming adds some overhead.
1468 * The checksum is checked later if we read the cache file.
1469 * from disk.
1470 *
1471 * Set checksum field to 0 as CRC is calculated with that
1472 * field set to 0.
1473 */
1474 all->snapshot_time = ddi_get_time();
1475 all->cache_checksum = 0;
1476
1477 ASSERT(all->snapshot_time != 0);
1478
1479 return (off);
1480 }
1481
1482 /*
1483 * Take a snapshot and clean /etc/devices files if DINFOCLEANUP is set
1484 */
1485 static di_off_t
di_snapshot_and_clean(struct di_state * st)1486 di_snapshot_and_clean(struct di_state *st)
1487 {
1488 di_off_t off;
1489
1490 modunload_disable();
1491 off = di_snapshot(st);
1492 if (off != 0 && (st->command & DINFOCLEANUP)) {
1493 ASSERT(DEVICES_FILES_CLEANABLE(st));
1494 /*
1495 * Cleanup /etc/devices files:
1496 * In order to accurately account for the system configuration
1497 * in /etc/devices files, the appropriate drivers must be
1498 * fully configured before the cleanup starts.
1499 * So enable modunload only after the cleanup.
1500 */
1501 i_ddi_clean_devices_files();
1502 /*
1503 * Remove backing store nodes for unused devices,
1504 * which retain past permissions customizations
1505 * and may be undesired for newly configured devices.
1506 */
1507 dev_devices_cleanup();
1508 }
1509 modunload_enable();
1510
1511 return (off);
1512 }
1513
1514 /*
1515 * construct vhci linkage in the snapshot.
1516 */
1517 static int
build_vhci_list(dev_info_t * vh_devinfo,void * arg)1518 build_vhci_list(dev_info_t *vh_devinfo, void *arg)
1519 {
1520 struct di_all *all;
1521 struct di_node *me;
1522 struct di_state *st;
1523 di_off_t off;
1524 phci_walk_arg_t pwa;
1525
1526 dcmn_err3((CE_CONT, "build_vhci list\n"));
1527
1528 dcmn_err3((CE_CONT, "vhci node %s%d\n",
1529 ddi_driver_name(vh_devinfo), ddi_get_instance(vh_devinfo)));
1530
1531 st = (struct di_state *)arg;
1532 if (di_dip_find(st, vh_devinfo, &off) != 0) {
1533 dcmn_err((CE_WARN, "di_dip_find error for the given node\n"));
1534 return (DDI_WALK_TERMINATE);
1535 }
1536
1537 dcmn_err3((CE_CONT, "st->mem_size: %d vh_devinfo off: 0x%x\n",
1538 st->mem_size, off));
1539
1540 all = DI_ALL_PTR(st);
1541 if (all->top_vhci_devinfo == 0) {
1542 all->top_vhci_devinfo = off;
1543 } else {
1544 me = DI_NODE(di_mem_addr(st, all->top_vhci_devinfo));
1545
1546 while (me->next_vhci != 0) {
1547 me = DI_NODE(di_mem_addr(st, me->next_vhci));
1548 }
1549
1550 me->next_vhci = off;
1551 }
1552
1553 pwa.off = off;
1554 pwa.st = st;
1555 mdi_vhci_walk_phcis(vh_devinfo, build_phci_list, &pwa);
1556
1557 return (DDI_WALK_CONTINUE);
1558 }
1559
1560 /*
1561 * construct phci linkage for the given vhci in the snapshot.
1562 */
1563 static int
build_phci_list(dev_info_t * ph_devinfo,void * arg)1564 build_phci_list(dev_info_t *ph_devinfo, void *arg)
1565 {
1566 struct di_node *vh_di_node;
1567 struct di_node *me;
1568 phci_walk_arg_t *pwa;
1569 di_off_t off;
1570
1571 pwa = (phci_walk_arg_t *)arg;
1572
1573 dcmn_err3((CE_CONT, "build_phci list for vhci at offset: 0x%x\n",
1574 pwa->off));
1575
1576 vh_di_node = DI_NODE(di_mem_addr(pwa->st, pwa->off));
1577 if (di_dip_find(pwa->st, ph_devinfo, &off) != 0) {
1578 dcmn_err((CE_WARN, "di_dip_find error for the given node\n"));
1579 return (DDI_WALK_TERMINATE);
1580 }
1581
1582 dcmn_err3((CE_CONT, "phci node %s%d, at offset 0x%x\n",
1583 ddi_driver_name(ph_devinfo), ddi_get_instance(ph_devinfo), off));
1584
1585 if (vh_di_node->top_phci == 0) {
1586 vh_di_node->top_phci = off;
1587 return (DDI_WALK_CONTINUE);
1588 }
1589
1590 me = DI_NODE(di_mem_addr(pwa->st, vh_di_node->top_phci));
1591
1592 while (me->next_phci != 0) {
1593 me = DI_NODE(di_mem_addr(pwa->st, me->next_phci));
1594 }
1595 me->next_phci = off;
1596
1597 return (DDI_WALK_CONTINUE);
1598 }
1599
1600 /*
1601 * Assumes all devinfo nodes in device tree have been snapshotted
1602 */
1603 static void
snap_driver_list(struct di_state * st,struct devnames * dnp,di_off_t * off_p)1604 snap_driver_list(struct di_state *st, struct devnames *dnp, di_off_t *off_p)
1605 {
1606 struct dev_info *node;
1607 struct di_node *me;
1608 di_off_t off;
1609
1610 ASSERT(mutex_owned(&dnp->dn_lock));
1611
1612 node = DEVI(dnp->dn_head);
1613 for (; node; node = node->devi_next) {
1614 if (di_dip_find(st, (dev_info_t *)node, &off) != 0)
1615 continue;
1616
1617 ASSERT(off > 0);
1618 me = DI_NODE(di_mem_addr(st, off));
1619 ASSERT(me->next == 0 || me->next == -1);
1620 /*
1621 * Only nodes which were BOUND when they were
1622 * snapshotted will be added to per-driver list.
1623 */
1624 if (me->next != -1)
1625 continue;
1626
1627 *off_p = off;
1628 off_p = &me->next;
1629 }
1630
1631 *off_p = 0;
1632 }
1633
1634 /*
1635 * Copy the devnames array, so we have a list of drivers in the snapshot.
1636 * Also makes it possible to locate the per-driver devinfo nodes.
1637 */
1638 static di_off_t
di_copydevnm(di_off_t * off_p,struct di_state * st)1639 di_copydevnm(di_off_t *off_p, struct di_state *st)
1640 {
1641 int i;
1642 di_off_t off;
1643 size_t size;
1644 struct di_devnm *dnp;
1645
1646 dcmn_err2((CE_CONT, "di_copydevnm: *off_p = %p\n", (void *)off_p));
1647
1648 /*
1649 * make sure there is some allocated memory
1650 */
1651 size = devcnt * sizeof (struct di_devnm);
1652 *off_p = off = di_checkmem(st, *off_p, size);
1653 dnp = DI_DEVNM(di_mem_addr(st, off));
1654 off += size;
1655
1656 dcmn_err((CE_CONT, "Start copying devnamesp[%d] at offset 0x%x\n",
1657 devcnt, off));
1658
1659 for (i = 0; i < devcnt; i++) {
1660 if (devnamesp[i].dn_name == NULL) {
1661 continue;
1662 }
1663
1664 /*
1665 * dn_name is not freed during driver unload or removal.
1666 *
1667 * There is a race condition when make_devname() changes
1668 * dn_name during our strcpy. This should be rare since
1669 * only add_drv does this. At any rate, we never had a
1670 * problem with ddi_name_to_major(), which should have
1671 * the same problem.
1672 */
1673 dcmn_err2((CE_CONT, "di_copydevnm: %s%d, off=%x\n",
1674 devnamesp[i].dn_name, devnamesp[i].dn_instance, off));
1675
1676 size = strlen(devnamesp[i].dn_name) + 1;
1677 dnp[i].name = off = di_checkmem(st, off, size);
1678 (void) strcpy((char *)di_mem_addr(st, off),
1679 devnamesp[i].dn_name);
1680 off += size;
1681
1682 mutex_enter(&devnamesp[i].dn_lock);
1683
1684 /*
1685 * Snapshot per-driver node list
1686 */
1687 snap_driver_list(st, &devnamesp[i], &dnp[i].head);
1688
1689 /*
1690 * This is not used by libdevinfo, leave it for now
1691 */
1692 dnp[i].flags = devnamesp[i].dn_flags;
1693 dnp[i].instance = devnamesp[i].dn_instance;
1694
1695 /*
1696 * get global properties
1697 */
1698 if ((DINFOPROP & st->command) &&
1699 devnamesp[i].dn_global_prop_ptr) {
1700 dnp[i].global_prop = off;
1701 off = di_getprop(DI_PROP_GLB_LIST,
1702 &devnamesp[i].dn_global_prop_ptr->prop_list,
1703 &dnp[i].global_prop, st, NULL);
1704 }
1705
1706 /*
1707 * Bit encode driver ops: & bus_ops, cb_ops, & cb_ops->cb_str
1708 */
1709 if (CB_DRV_INSTALLED(devopsp[i])) {
1710 if (devopsp[i]->devo_cb_ops) {
1711 dnp[i].ops |= DI_CB_OPS;
1712 if (devopsp[i]->devo_cb_ops->cb_str)
1713 dnp[i].ops |= DI_STREAM_OPS;
1714 }
1715 if (NEXUS_DRV(devopsp[i])) {
1716 dnp[i].ops |= DI_BUS_OPS;
1717 }
1718 }
1719
1720 mutex_exit(&devnamesp[i].dn_lock);
1721 }
1722
1723 dcmn_err((CE_CONT, "End copying devnamesp at offset 0x%x\n", off));
1724
1725 return (off);
1726 }
1727
1728 /*
1729 * Copy the kernel devinfo tree. The tree and the devnames array forms
1730 * the entire snapshot (see also di_copydevnm).
1731 */
1732 static di_off_t
di_copytree(struct dev_info * root,di_off_t * off_p,struct di_state * st)1733 di_copytree(struct dev_info *root, di_off_t *off_p, struct di_state *st)
1734 {
1735 di_off_t off;
1736 struct dev_info *node;
1737 struct di_stack *dsp = kmem_zalloc(sizeof (struct di_stack), KM_SLEEP);
1738
1739 dcmn_err((CE_CONT, "di_copytree: root = %p, *off_p = %x\n",
1740 (void *)root, *off_p));
1741
1742 /* force attach drivers */
1743 if (i_ddi_devi_attached((dev_info_t *)root) &&
1744 (st->command & DINFOSUBTREE) && (st->command & DINFOFORCE)) {
1745 (void) ndi_devi_config((dev_info_t *)root,
1746 NDI_CONFIG | NDI_DEVI_PERSIST | NDI_NO_EVENT |
1747 NDI_DRV_CONF_REPROBE);
1748 }
1749
1750 /*
1751 * Push top_devinfo onto a stack
1752 *
1753 * The stack is necessary to avoid recursion, which can overrun
1754 * the kernel stack.
1755 */
1756 PUSH_STACK(dsp, root, off_p);
1757
1758 /*
1759 * As long as there is a node on the stack, copy the node.
1760 * di_copynode() is responsible for pushing and popping
1761 * child and sibling nodes on the stack.
1762 */
1763 while (!EMPTY_STACK(dsp)) {
1764 node = TOP_NODE(dsp);
1765 off = di_copynode(node, dsp, st);
1766 }
1767
1768 /*
1769 * Free the stack structure
1770 */
1771 kmem_free(dsp, sizeof (struct di_stack));
1772
1773 return (off);
1774 }
1775
1776 /*
1777 * This is the core function, which copies all data associated with a single
1778 * node into the snapshot. The amount of information is determined by the
1779 * ioctl command.
1780 */
1781 static di_off_t
di_copynode(struct dev_info * node,struct di_stack * dsp,struct di_state * st)1782 di_copynode(struct dev_info *node, struct di_stack *dsp, struct di_state *st)
1783 {
1784 di_off_t off;
1785 struct di_node *me;
1786 size_t size;
1787 struct dev_info *n;
1788
1789 dcmn_err2((CE_CONT, "di_copynode: depth = %x\n", dsp->depth));
1790 ASSERT((node != NULL) && (node == TOP_NODE(dsp)));
1791
1792 /*
1793 * check memory usage, and fix offsets accordingly.
1794 */
1795 size = sizeof (struct di_node);
1796 *(TOP_OFFSET(dsp)) = off = di_checkmem(st, *(TOP_OFFSET(dsp)), size);
1797 me = DI_NODE(di_mem_addr(st, off));
1798 me->self = off;
1799 off += size;
1800
1801 dcmn_err((CE_CONT, "copy node %s, instance #%d, at offset 0x%x\n",
1802 node->devi_node_name, node->devi_instance, off));
1803
1804 /*
1805 * Node parameters:
1806 * self -- offset of current node within snapshot
1807 * nodeid -- pointer to PROM node (tri-valued)
1808 * state -- hot plugging device state
1809 * node_state -- devinfo node state
1810 */
1811 me->instance = node->devi_instance;
1812 me->nodeid = node->devi_nodeid;
1813 me->node_class = node->devi_node_class;
1814 me->attributes = node->devi_node_attributes;
1815 me->state = node->devi_state;
1816 me->flags = node->devi_flags;
1817 me->node_state = node->devi_node_state;
1818 me->next_vhci = 0; /* Filled up by build_vhci_list. */
1819 me->top_phci = 0; /* Filled up by build_phci_list. */
1820 me->next_phci = 0; /* Filled up by build_phci_list. */
1821 me->multipath_component = MULTIPATH_COMPONENT_NONE; /* set default. */
1822 me->user_private_data = 0;
1823
1824 /*
1825 * Get parent's offset in snapshot from the stack
1826 * and store it in the current node
1827 */
1828 if (dsp->depth > 1) {
1829 me->parent = *(PARENT_OFFSET(dsp));
1830 }
1831
1832 /*
1833 * Save the offset of this di_node in a hash table.
1834 * This is used later to resolve references to this
1835 * dip from other parts of the tree (per-driver list,
1836 * multipathing linkages, layered usage linkages).
1837 * The key used for the hash table is derived from
1838 * information in the dip.
1839 */
1840 di_register_dip(st, (dev_info_t *)node, me->self);
1841
1842 #ifdef DEVID_COMPATIBILITY
1843 /* check for devid as property marker */
1844 if (node->devi_devid_str) {
1845 ddi_devid_t devid;
1846
1847 /*
1848 * The devid is now represented as a property. For
1849 * compatibility with di_devid() interface in libdevinfo we
1850 * must return it as a binary structure in the snapshot. When
1851 * (if) di_devid() is removed from libdevinfo then the code
1852 * related to DEVID_COMPATIBILITY can be removed.
1853 */
1854 if (ddi_devid_str_decode(node->devi_devid_str, &devid, NULL) ==
1855 DDI_SUCCESS) {
1856 size = ddi_devid_sizeof(devid);
1857 off = di_checkmem(st, off, size);
1858 me->devid = off;
1859 bcopy(devid, di_mem_addr(st, off), size);
1860 off += size;
1861 ddi_devid_free(devid);
1862 }
1863 }
1864 #endif /* DEVID_COMPATIBILITY */
1865
1866 if (node->devi_node_name) {
1867 size = strlen(node->devi_node_name) + 1;
1868 me->node_name = off = di_checkmem(st, off, size);
1869 (void) strcpy(di_mem_addr(st, off), node->devi_node_name);
1870 off += size;
1871 }
1872
1873 if (node->devi_compat_names && (node->devi_compat_length > 1)) {
1874 size = node->devi_compat_length;
1875 me->compat_names = off = di_checkmem(st, off, size);
1876 me->compat_length = (int)size;
1877 bcopy(node->devi_compat_names, di_mem_addr(st, off), size);
1878 off += size;
1879 }
1880
1881 if (node->devi_addr) {
1882 size = strlen(node->devi_addr) + 1;
1883 me->address = off = di_checkmem(st, off, size);
1884 (void) strcpy(di_mem_addr(st, off), node->devi_addr);
1885 off += size;
1886 }
1887
1888 if (node->devi_binding_name) {
1889 size = strlen(node->devi_binding_name) + 1;
1890 me->bind_name = off = di_checkmem(st, off, size);
1891 (void) strcpy(di_mem_addr(st, off), node->devi_binding_name);
1892 off += size;
1893 }
1894
1895 me->drv_major = node->devi_major;
1896
1897 /*
1898 * If the dip is BOUND, set the next pointer of the
1899 * per-instance list to -1, indicating that it is yet to be resolved.
1900 * This will be resolved later in snap_driver_list().
1901 */
1902 if (me->drv_major != -1) {
1903 me->next = -1;
1904 } else {
1905 me->next = 0;
1906 }
1907
1908 /*
1909 * An optimization to skip mutex_enter when not needed.
1910 */
1911 if (!((DINFOMINOR | DINFOPROP | DINFOPATH | DINFOHP) & st->command)) {
1912 goto priv_data;
1913 }
1914
1915 /*
1916 * LOCKING: We already have an active ndi_devi_enter to gather the
1917 * minor data, and we will take devi_lock to gather properties as
1918 * needed off di_getprop.
1919 */
1920 if (!(DINFOMINOR & st->command)) {
1921 goto path;
1922 }
1923
1924 ASSERT(DEVI_BUSY_OWNED(node));
1925 if (node->devi_minor) { /* minor data */
1926 me->minor_data = off;
1927 off = di_getmdata(node->devi_minor, &me->minor_data,
1928 me->self, st);
1929 }
1930
1931 path:
1932 if (!(DINFOPATH & st->command)) {
1933 goto property;
1934 }
1935
1936 if (MDI_VHCI(node)) {
1937 me->multipath_component = MULTIPATH_COMPONENT_VHCI;
1938 }
1939
1940 if (MDI_CLIENT(node)) {
1941 me->multipath_component = MULTIPATH_COMPONENT_CLIENT;
1942 me->multipath_client = off;
1943 off = di_getpath_data((dev_info_t *)node, &me->multipath_client,
1944 me->self, st, 1);
1945 dcmn_err((CE_WARN, "me->multipath_client = %x for node %p "
1946 "component type = %d. off=%d",
1947 me->multipath_client,
1948 (void *)node, node->devi_mdi_component, off));
1949 }
1950
1951 if (MDI_PHCI(node)) {
1952 me->multipath_component = MULTIPATH_COMPONENT_PHCI;
1953 me->multipath_phci = off;
1954 off = di_getpath_data((dev_info_t *)node, &me->multipath_phci,
1955 me->self, st, 0);
1956 dcmn_err((CE_WARN, "me->multipath_phci = %x for node %p "
1957 "component type = %d. off=%d",
1958 me->multipath_phci,
1959 (void *)node, node->devi_mdi_component, off));
1960 }
1961
1962 property:
1963 if (!(DINFOPROP & st->command)) {
1964 goto hotplug_data;
1965 }
1966
1967 if (node->devi_drv_prop_ptr) { /* driver property list */
1968 me->drv_prop = off;
1969 off = di_getprop(DI_PROP_DRV_LIST, &node->devi_drv_prop_ptr,
1970 &me->drv_prop, st, node);
1971 }
1972
1973 if (node->devi_sys_prop_ptr) { /* system property list */
1974 me->sys_prop = off;
1975 off = di_getprop(DI_PROP_SYS_LIST, &node->devi_sys_prop_ptr,
1976 &me->sys_prop, st, node);
1977 }
1978
1979 if (node->devi_hw_prop_ptr) { /* hardware property list */
1980 me->hw_prop = off;
1981 off = di_getprop(DI_PROP_HW_LIST, &node->devi_hw_prop_ptr,
1982 &me->hw_prop, st, node);
1983 }
1984
1985 if (node->devi_global_prop_list == NULL) {
1986 me->glob_prop = (di_off_t)-1; /* not global property */
1987 } else {
1988 /*
1989 * Make copy of global property list if this devinfo refers
1990 * global properties different from what's on the devnames
1991 * array. It can happen if there has been a forced
1992 * driver.conf update. See update_drv(8).
1993 */
1994 ASSERT(me->drv_major != -1);
1995 if (node->devi_global_prop_list !=
1996 devnamesp[me->drv_major].dn_global_prop_ptr) {
1997 me->glob_prop = off;
1998 off = di_getprop(DI_PROP_GLB_LIST,
1999 &node->devi_global_prop_list->prop_list,
2000 &me->glob_prop, st, node);
2001 }
2002 }
2003
2004 hotplug_data:
2005 if (!(DINFOHP & st->command)) {
2006 goto priv_data;
2007 }
2008
2009 if (node->devi_hp_hdlp) { /* hotplug data */
2010 me->hp_data = off;
2011 off = di_gethpdata(node->devi_hp_hdlp, &me->hp_data, st);
2012 }
2013
2014 priv_data:
2015 if (!(DINFOPRIVDATA & st->command)) {
2016 goto pm_info;
2017 }
2018
2019 if (ddi_get_parent_data((dev_info_t *)node) != NULL) {
2020 me->parent_data = off;
2021 off = di_getppdata(node, &me->parent_data, st);
2022 }
2023
2024 if (ddi_get_driver_private((dev_info_t *)node) != NULL) {
2025 me->driver_data = off;
2026 off = di_getdpdata(node, &me->driver_data, st);
2027 }
2028
2029 pm_info: /* NOT implemented */
2030
2031 subtree:
2032 /* keep the stack aligned */
2033 off = DI_ALIGN(off);
2034
2035 if (!(DINFOSUBTREE & st->command)) {
2036 POP_STACK(dsp);
2037 return (off);
2038 }
2039
2040 child:
2041 /*
2042 * If there is a visible child--push child onto stack.
2043 * Hold the parent (me) busy while doing so.
2044 */
2045 if ((n = node->devi_child) != NULL) {
2046 /* skip hidden nodes */
2047 while (n && ndi_dev_is_hidden_node((dev_info_t *)n))
2048 n = n->devi_sibling;
2049 if (n) {
2050 me->child = off;
2051 PUSH_STACK(dsp, n, &me->child);
2052 return (me->child);
2053 }
2054 }
2055
2056 sibling:
2057 /*
2058 * Done with any child nodes, unroll the stack till a visible
2059 * sibling of a parent node is found or root node is reached.
2060 */
2061 POP_STACK(dsp);
2062 while (!EMPTY_STACK(dsp)) {
2063 if ((n = node->devi_sibling) != NULL) {
2064 /* skip hidden nodes */
2065 while (n && ndi_dev_is_hidden_node((dev_info_t *)n))
2066 n = n->devi_sibling;
2067 if (n) {
2068 me->sibling = DI_ALIGN(off);
2069 PUSH_STACK(dsp, n, &me->sibling);
2070 return (me->sibling);
2071 }
2072 }
2073 node = TOP_NODE(dsp);
2074 me = DI_NODE(di_mem_addr(st, *(TOP_OFFSET(dsp))));
2075 POP_STACK(dsp);
2076 }
2077
2078 /*
2079 * DONE with all nodes
2080 */
2081 return (off);
2082 }
2083
2084 static i_lnode_t *
i_lnode_alloc(int modid)2085 i_lnode_alloc(int modid)
2086 {
2087 i_lnode_t *i_lnode;
2088
2089 i_lnode = kmem_zalloc(sizeof (i_lnode_t), KM_SLEEP);
2090
2091 ASSERT(modid != -1);
2092 i_lnode->modid = modid;
2093
2094 return (i_lnode);
2095 }
2096
2097 static void
i_lnode_free(i_lnode_t * i_lnode)2098 i_lnode_free(i_lnode_t *i_lnode)
2099 {
2100 kmem_free(i_lnode, sizeof (i_lnode_t));
2101 }
2102
2103 static void
i_lnode_check_free(i_lnode_t * i_lnode)2104 i_lnode_check_free(i_lnode_t *i_lnode)
2105 {
2106 /* This lnode and its dip must have been snapshotted */
2107 ASSERT(i_lnode->self > 0);
2108 ASSERT(i_lnode->di_node->self > 0);
2109
2110 /* at least 1 link (in or out) must exist for this lnode */
2111 ASSERT(i_lnode->link_in || i_lnode->link_out);
2112
2113 i_lnode_free(i_lnode);
2114 }
2115
2116 static i_link_t *
i_link_alloc(int spec_type)2117 i_link_alloc(int spec_type)
2118 {
2119 i_link_t *i_link;
2120
2121 i_link = kmem_zalloc(sizeof (i_link_t), KM_SLEEP);
2122 i_link->spec_type = spec_type;
2123
2124 return (i_link);
2125 }
2126
2127 static void
i_link_check_free(i_link_t * i_link)2128 i_link_check_free(i_link_t *i_link)
2129 {
2130 /* This link must have been snapshotted */
2131 ASSERT(i_link->self > 0);
2132
2133 /* Both endpoint lnodes must exist for this link */
2134 ASSERT(i_link->src_lnode);
2135 ASSERT(i_link->tgt_lnode);
2136
2137 kmem_free(i_link, sizeof (i_link_t));
2138 }
2139
2140 /*ARGSUSED*/
2141 static uint_t
i_lnode_hashfunc(void * arg,mod_hash_key_t key)2142 i_lnode_hashfunc(void *arg, mod_hash_key_t key)
2143 {
2144 i_lnode_t *i_lnode = (i_lnode_t *)key;
2145 struct di_node *ptr;
2146 dev_t dev;
2147
2148 dev = i_lnode->devt;
2149 if (dev != DDI_DEV_T_NONE)
2150 return (i_lnode->modid + getminor(dev) + getmajor(dev));
2151
2152 ptr = i_lnode->di_node;
2153 ASSERT(ptr->self > 0);
2154 if (ptr) {
2155 uintptr_t k = (uintptr_t)ptr;
2156 k >>= (int)highbit(sizeof (struct di_node));
2157 return ((uint_t)k);
2158 }
2159
2160 return (i_lnode->modid);
2161 }
2162
2163 static int
i_lnode_cmp(void * arg1,void * arg2)2164 i_lnode_cmp(void *arg1, void *arg2)
2165 {
2166 i_lnode_t *i_lnode1 = (i_lnode_t *)arg1;
2167 i_lnode_t *i_lnode2 = (i_lnode_t *)arg2;
2168
2169 if (i_lnode1->modid != i_lnode2->modid) {
2170 return ((i_lnode1->modid < i_lnode2->modid) ? -1 : 1);
2171 }
2172
2173 if (i_lnode1->di_node != i_lnode2->di_node)
2174 return ((i_lnode1->di_node < i_lnode2->di_node) ? -1 : 1);
2175
2176 if (i_lnode1->devt != i_lnode2->devt)
2177 return ((i_lnode1->devt < i_lnode2->devt) ? -1 : 1);
2178
2179 return (0);
2180 }
2181
2182 /*
2183 * An lnode represents a {dip, dev_t} tuple. A link represents a
2184 * {src_lnode, tgt_lnode, spec_type} tuple.
2185 * The following callback assumes that LDI framework ref-counts the
2186 * src_dip and tgt_dip while invoking this callback.
2187 */
2188 static int
di_ldi_callback(const ldi_usage_t * ldi_usage,void * arg)2189 di_ldi_callback(const ldi_usage_t *ldi_usage, void *arg)
2190 {
2191 struct di_state *st = (struct di_state *)arg;
2192 i_lnode_t *src_lnode, *tgt_lnode, *i_lnode;
2193 i_link_t **i_link_next, *i_link;
2194 di_off_t soff, toff;
2195 mod_hash_val_t nodep = NULL;
2196 int res;
2197
2198 /*
2199 * if the source or target of this device usage information doesn't
2200 * correspond to a device node then we don't report it via
2201 * libdevinfo so return.
2202 */
2203 if ((ldi_usage->src_dip == NULL) || (ldi_usage->tgt_dip == NULL))
2204 return (LDI_USAGE_CONTINUE);
2205
2206 ASSERT(e_ddi_devi_holdcnt(ldi_usage->src_dip));
2207 ASSERT(e_ddi_devi_holdcnt(ldi_usage->tgt_dip));
2208
2209 /*
2210 * Skip the ldi_usage if either src or tgt dip is not in the
2211 * snapshot. This saves us from pruning bad lnodes/links later.
2212 */
2213 if (di_dip_find(st, ldi_usage->src_dip, &soff) != 0)
2214 return (LDI_USAGE_CONTINUE);
2215 if (di_dip_find(st, ldi_usage->tgt_dip, &toff) != 0)
2216 return (LDI_USAGE_CONTINUE);
2217
2218 ASSERT(soff > 0);
2219 ASSERT(toff > 0);
2220
2221 /*
2222 * allocate an i_lnode and add it to the lnode hash
2223 * if it is not already present. For this particular
2224 * link the lnode is a source, but it may
2225 * participate as tgt or src in any number of layered
2226 * operations - so it may already be in the hash.
2227 */
2228 i_lnode = i_lnode_alloc(ldi_usage->src_modid);
2229 i_lnode->di_node = DI_NODE(di_mem_addr(st, soff));
2230 i_lnode->devt = ldi_usage->src_devt;
2231
2232 res = mod_hash_find(st->lnode_hash, i_lnode, &nodep);
2233 if (res == MH_ERR_NOTFOUND) {
2234 /*
2235 * new i_lnode
2236 * add it to the hash and increment the lnode count
2237 */
2238 res = mod_hash_insert(st->lnode_hash, i_lnode, i_lnode);
2239 ASSERT(res == 0);
2240 st->lnode_count++;
2241 src_lnode = i_lnode;
2242 } else {
2243 /* this i_lnode already exists in the lnode_hash */
2244 i_lnode_free(i_lnode);
2245 src_lnode = (i_lnode_t *)nodep;
2246 }
2247
2248 /*
2249 * allocate a tgt i_lnode and add it to the lnode hash
2250 */
2251 i_lnode = i_lnode_alloc(ldi_usage->tgt_modid);
2252 i_lnode->di_node = DI_NODE(di_mem_addr(st, toff));
2253 i_lnode->devt = ldi_usage->tgt_devt;
2254
2255 res = mod_hash_find(st->lnode_hash, i_lnode, &nodep);
2256 if (res == MH_ERR_NOTFOUND) {
2257 /*
2258 * new i_lnode
2259 * add it to the hash and increment the lnode count
2260 */
2261 res = mod_hash_insert(st->lnode_hash, i_lnode, i_lnode);
2262 ASSERT(res == 0);
2263 st->lnode_count++;
2264 tgt_lnode = i_lnode;
2265 } else {
2266 /* this i_lnode already exists in the lnode_hash */
2267 i_lnode_free(i_lnode);
2268 tgt_lnode = (i_lnode_t *)nodep;
2269 }
2270
2271 /*
2272 * allocate a i_link
2273 */
2274 i_link = i_link_alloc(ldi_usage->tgt_spec_type);
2275 i_link->src_lnode = src_lnode;
2276 i_link->tgt_lnode = tgt_lnode;
2277
2278 /*
2279 * add this link onto the src i_lnodes outbound i_link list
2280 */
2281 i_link_next = &(src_lnode->link_out);
2282 while (*i_link_next != NULL) {
2283 if ((i_lnode_cmp(tgt_lnode, (*i_link_next)->tgt_lnode) == 0) &&
2284 (i_link->spec_type == (*i_link_next)->spec_type)) {
2285 /* this link already exists */
2286 kmem_free(i_link, sizeof (i_link_t));
2287 return (LDI_USAGE_CONTINUE);
2288 }
2289 i_link_next = &((*i_link_next)->src_link_next);
2290 }
2291 *i_link_next = i_link;
2292
2293 /*
2294 * add this link onto the tgt i_lnodes inbound i_link list
2295 */
2296 i_link_next = &(tgt_lnode->link_in);
2297 while (*i_link_next != NULL) {
2298 ASSERT(i_lnode_cmp(src_lnode, (*i_link_next)->src_lnode) != 0);
2299 i_link_next = &((*i_link_next)->tgt_link_next);
2300 }
2301 *i_link_next = i_link;
2302
2303 /*
2304 * add this i_link to the link hash
2305 */
2306 res = mod_hash_insert(st->link_hash, i_link, i_link);
2307 ASSERT(res == 0);
2308 st->link_count++;
2309
2310 return (LDI_USAGE_CONTINUE);
2311 }
2312
2313 struct i_layer_data {
2314 struct di_state *st;
2315 int lnode_count;
2316 int link_count;
2317 di_off_t lnode_off;
2318 di_off_t link_off;
2319 };
2320
2321 /*ARGSUSED*/
2322 static uint_t
i_link_walker(mod_hash_key_t key,mod_hash_val_t * val,void * arg)2323 i_link_walker(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
2324 {
2325 i_link_t *i_link = (i_link_t *)key;
2326 struct i_layer_data *data = arg;
2327 struct di_link *me;
2328 struct di_lnode *melnode;
2329 struct di_node *medinode;
2330
2331 ASSERT(i_link->self == 0);
2332
2333 i_link->self = data->link_off +
2334 (data->link_count * sizeof (struct di_link));
2335 data->link_count++;
2336
2337 ASSERT(data->link_off > 0 && data->link_count > 0);
2338 ASSERT(data->lnode_count == data->st->lnode_count); /* lnodes done */
2339 ASSERT(data->link_count <= data->st->link_count);
2340
2341 /* fill in fields for the di_link snapshot */
2342 me = DI_LINK(di_mem_addr(data->st, i_link->self));
2343 me->self = i_link->self;
2344 me->spec_type = i_link->spec_type;
2345
2346 /*
2347 * The src_lnode and tgt_lnode i_lnode_t for this i_link_t
2348 * are created during the LDI table walk. Since we are
2349 * walking the link hash, the lnode hash has already been
2350 * walked and the lnodes have been snapshotted. Save lnode
2351 * offsets.
2352 */
2353 me->src_lnode = i_link->src_lnode->self;
2354 me->tgt_lnode = i_link->tgt_lnode->self;
2355
2356 /*
2357 * Save this link's offset in the src_lnode snapshot's link_out
2358 * field
2359 */
2360 melnode = DI_LNODE(di_mem_addr(data->st, me->src_lnode));
2361 me->src_link_next = melnode->link_out;
2362 melnode->link_out = me->self;
2363
2364 /*
2365 * Put this link on the tgt_lnode's link_in field
2366 */
2367 melnode = DI_LNODE(di_mem_addr(data->st, me->tgt_lnode));
2368 me->tgt_link_next = melnode->link_in;
2369 melnode->link_in = me->self;
2370
2371 /*
2372 * An i_lnode_t is only created if the corresponding dip exists
2373 * in the snapshot. A pointer to the di_node is saved in the
2374 * i_lnode_t when it is allocated. For this link, get the di_node
2375 * for the source lnode. Then put the link on the di_node's list
2376 * of src links
2377 */
2378 medinode = i_link->src_lnode->di_node;
2379 me->src_node_next = medinode->src_links;
2380 medinode->src_links = me->self;
2381
2382 /*
2383 * Put this link on the tgt_links list of the target
2384 * dip.
2385 */
2386 medinode = i_link->tgt_lnode->di_node;
2387 me->tgt_node_next = medinode->tgt_links;
2388 medinode->tgt_links = me->self;
2389
2390 return (MH_WALK_CONTINUE);
2391 }
2392
2393 /*ARGSUSED*/
2394 static uint_t
i_lnode_walker(mod_hash_key_t key,mod_hash_val_t * val,void * arg)2395 i_lnode_walker(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
2396 {
2397 i_lnode_t *i_lnode = (i_lnode_t *)key;
2398 struct i_layer_data *data = arg;
2399 struct di_lnode *me;
2400 struct di_node *medinode;
2401
2402 ASSERT(i_lnode->self == 0);
2403
2404 i_lnode->self = data->lnode_off +
2405 (data->lnode_count * sizeof (struct di_lnode));
2406 data->lnode_count++;
2407
2408 ASSERT(data->lnode_off > 0 && data->lnode_count > 0);
2409 ASSERT(data->link_count == 0); /* links not done yet */
2410 ASSERT(data->lnode_count <= data->st->lnode_count);
2411
2412 /* fill in fields for the di_lnode snapshot */
2413 me = DI_LNODE(di_mem_addr(data->st, i_lnode->self));
2414 me->self = i_lnode->self;
2415
2416 if (i_lnode->devt == DDI_DEV_T_NONE) {
2417 me->dev_major = DDI_MAJOR_T_NONE;
2418 me->dev_minor = DDI_MAJOR_T_NONE;
2419 } else {
2420 me->dev_major = getmajor(i_lnode->devt);
2421 me->dev_minor = getminor(i_lnode->devt);
2422 }
2423
2424 /*
2425 * The dip corresponding to this lnode must exist in
2426 * the snapshot or we wouldn't have created the i_lnode_t
2427 * during LDI walk. Save the offset of the dip.
2428 */
2429 ASSERT(i_lnode->di_node && i_lnode->di_node->self > 0);
2430 me->node = i_lnode->di_node->self;
2431
2432 /*
2433 * There must be at least one link in or out of this lnode
2434 * or we wouldn't have created it. These fields will be set
2435 * during the link hash walk.
2436 */
2437 ASSERT((i_lnode->link_in != NULL) || (i_lnode->link_out != NULL));
2438
2439 /*
2440 * set the offset of the devinfo node associated with this
2441 * lnode. Also update the node_next next pointer. this pointer
2442 * is set if there are multiple lnodes associated with the same
2443 * devinfo node. (could occure when multiple minor nodes
2444 * are open for one device, etc.)
2445 */
2446 medinode = i_lnode->di_node;
2447 me->node_next = medinode->lnodes;
2448 medinode->lnodes = me->self;
2449
2450 return (MH_WALK_CONTINUE);
2451 }
2452
2453 static di_off_t
di_getlink_data(di_off_t off,struct di_state * st)2454 di_getlink_data(di_off_t off, struct di_state *st)
2455 {
2456 struct i_layer_data data = {0};
2457 size_t size;
2458
2459 dcmn_err2((CE_CONT, "di_copylyr: off = %x\n", off));
2460
2461 st->lnode_hash = mod_hash_create_extended("di_lnode_hash", 32,
2462 mod_hash_null_keydtor, (void (*)(mod_hash_val_t))i_lnode_check_free,
2463 i_lnode_hashfunc, NULL, i_lnode_cmp, KM_SLEEP);
2464
2465 st->link_hash = mod_hash_create_ptrhash("di_link_hash", 32,
2466 (void (*)(mod_hash_val_t))i_link_check_free, sizeof (i_link_t));
2467
2468 /* get driver layering information */
2469 (void) ldi_usage_walker(st, di_ldi_callback);
2470
2471 /* check if there is any link data to include in the snapshot */
2472 if (st->lnode_count == 0) {
2473 ASSERT(st->link_count == 0);
2474 goto out;
2475 }
2476
2477 ASSERT(st->link_count != 0);
2478
2479 /* get a pointer to snapshot memory for all the di_lnodes */
2480 size = sizeof (struct di_lnode) * st->lnode_count;
2481 data.lnode_off = off = di_checkmem(st, off, size);
2482 off += size;
2483
2484 /* get a pointer to snapshot memory for all the di_links */
2485 size = sizeof (struct di_link) * st->link_count;
2486 data.link_off = off = di_checkmem(st, off, size);
2487 off += size;
2488
2489 data.lnode_count = data.link_count = 0;
2490 data.st = st;
2491
2492 /*
2493 * We have lnodes and links that will go into the
2494 * snapshot, so let's walk the respective hashes
2495 * and snapshot them. The various linkages are
2496 * also set up during the walk.
2497 */
2498 mod_hash_walk(st->lnode_hash, i_lnode_walker, (void *)&data);
2499 ASSERT(data.lnode_count == st->lnode_count);
2500
2501 mod_hash_walk(st->link_hash, i_link_walker, (void *)&data);
2502 ASSERT(data.link_count == st->link_count);
2503
2504 out:
2505 /* free up the i_lnodes and i_links used to create the snapshot */
2506 mod_hash_destroy_hash(st->lnode_hash);
2507 mod_hash_destroy_hash(st->link_hash);
2508 st->lnode_count = 0;
2509 st->link_count = 0;
2510
2511 return (off);
2512 }
2513
2514
2515 /*
2516 * Copy all minor data nodes attached to a devinfo node into the snapshot.
2517 * It is called from di_copynode with active ndi_devi_enter to protect
2518 * the list of minor nodes.
2519 */
2520 static di_off_t
di_getmdata(struct ddi_minor_data * mnode,di_off_t * off_p,di_off_t node,struct di_state * st)2521 di_getmdata(struct ddi_minor_data *mnode, di_off_t *off_p, di_off_t node,
2522 struct di_state *st)
2523 {
2524 di_off_t off;
2525 struct di_minor *me;
2526 size_t size;
2527
2528 dcmn_err2((CE_CONT, "di_getmdata:\n"));
2529
2530 /*
2531 * check memory first
2532 */
2533 off = di_checkmem(st, *off_p, sizeof (struct di_minor));
2534 *off_p = off;
2535
2536 do {
2537 me = DI_MINOR(di_mem_addr(st, off));
2538 me->self = off;
2539 me->type = mnode->type;
2540 me->node = node;
2541 me->user_private_data = 0;
2542
2543 off += sizeof (struct di_minor);
2544
2545 /*
2546 * Split dev_t to major/minor, so it works for
2547 * both ILP32 and LP64 model
2548 */
2549 me->dev_major = getmajor(mnode->ddm_dev);
2550 me->dev_minor = getminor(mnode->ddm_dev);
2551 me->spec_type = mnode->ddm_spec_type;
2552
2553 if (mnode->ddm_name) {
2554 size = strlen(mnode->ddm_name) + 1;
2555 me->name = off = di_checkmem(st, off, size);
2556 (void) strcpy(di_mem_addr(st, off), mnode->ddm_name);
2557 off += size;
2558 }
2559
2560 if (mnode->ddm_node_type) {
2561 size = strlen(mnode->ddm_node_type) + 1;
2562 me->node_type = off = di_checkmem(st, off, size);
2563 (void) strcpy(di_mem_addr(st, off),
2564 mnode->ddm_node_type);
2565 off += size;
2566 }
2567
2568 off = di_checkmem(st, off, sizeof (struct di_minor));
2569 me->next = off;
2570 mnode = mnode->next;
2571 } while (mnode);
2572
2573 me->next = 0;
2574
2575 return (off);
2576 }
2577
2578 /*
2579 * di_register_dip(), di_find_dip(): The dip must be protected
2580 * from deallocation when using these routines - this can either
2581 * be a reference count, a busy hold or a per-driver lock.
2582 */
2583
2584 static void
di_register_dip(struct di_state * st,dev_info_t * dip,di_off_t off)2585 di_register_dip(struct di_state *st, dev_info_t *dip, di_off_t off)
2586 {
2587 struct dev_info *node = DEVI(dip);
2588 struct di_key *key = kmem_zalloc(sizeof (*key), KM_SLEEP);
2589 struct di_dkey *dk;
2590
2591 ASSERT(dip);
2592 ASSERT(off > 0);
2593
2594 key->k_type = DI_DKEY;
2595 dk = &(key->k_u.dkey);
2596
2597 dk->dk_dip = dip;
2598 dk->dk_major = node->devi_major;
2599 dk->dk_inst = node->devi_instance;
2600 dk->dk_nodeid = node->devi_nodeid;
2601
2602 if (mod_hash_insert(st->reg_dip_hash, (mod_hash_key_t)key,
2603 (mod_hash_val_t)(uintptr_t)off) != 0) {
2604 panic(
2605 "duplicate devinfo (%p) registered during device "
2606 "tree walk", (void *)dip);
2607 }
2608 }
2609
2610
2611 static int
di_dip_find(struct di_state * st,dev_info_t * dip,di_off_t * off_p)2612 di_dip_find(struct di_state *st, dev_info_t *dip, di_off_t *off_p)
2613 {
2614 /*
2615 * uintptr_t must be used because it matches the size of void *;
2616 * mod_hash expects clients to place results into pointer-size
2617 * containers; since di_off_t is always a 32-bit offset, alignment
2618 * would otherwise be broken on 64-bit kernels.
2619 */
2620 uintptr_t offset;
2621 struct di_key key = {0};
2622 struct di_dkey *dk;
2623
2624 ASSERT(st->reg_dip_hash);
2625 ASSERT(dip);
2626 ASSERT(off_p);
2627
2628
2629 key.k_type = DI_DKEY;
2630 dk = &(key.k_u.dkey);
2631
2632 dk->dk_dip = dip;
2633 dk->dk_major = DEVI(dip)->devi_major;
2634 dk->dk_inst = DEVI(dip)->devi_instance;
2635 dk->dk_nodeid = DEVI(dip)->devi_nodeid;
2636
2637 if (mod_hash_find(st->reg_dip_hash, (mod_hash_key_t)&key,
2638 (mod_hash_val_t *)&offset) == 0) {
2639 *off_p = (di_off_t)offset;
2640 return (0);
2641 } else {
2642 return (-1);
2643 }
2644 }
2645
2646 /*
2647 * di_register_pip(), di_find_pip(): The pip must be protected from deallocation
2648 * when using these routines. The caller must do this by protecting the
2649 * client(or phci)<->pip linkage while traversing the list and then holding the
2650 * pip when it is found in the list.
2651 */
2652
2653 static void
di_register_pip(struct di_state * st,mdi_pathinfo_t * pip,di_off_t off)2654 di_register_pip(struct di_state *st, mdi_pathinfo_t *pip, di_off_t off)
2655 {
2656 struct di_key *key = kmem_zalloc(sizeof (*key), KM_SLEEP);
2657 char *path_addr;
2658 struct di_pkey *pk;
2659
2660 ASSERT(pip);
2661 ASSERT(off > 0);
2662
2663 key->k_type = DI_PKEY;
2664 pk = &(key->k_u.pkey);
2665
2666 pk->pk_pip = pip;
2667 path_addr = mdi_pi_get_addr(pip);
2668 if (path_addr)
2669 pk->pk_path_addr = i_ddi_strdup(path_addr, KM_SLEEP);
2670 pk->pk_client = mdi_pi_get_client(pip);
2671 pk->pk_phci = mdi_pi_get_phci(pip);
2672
2673 if (mod_hash_insert(st->reg_pip_hash, (mod_hash_key_t)key,
2674 (mod_hash_val_t)(uintptr_t)off) != 0) {
2675 panic(
2676 "duplicate pathinfo (%p) registered during device "
2677 "tree walk", (void *)pip);
2678 }
2679 }
2680
2681 /*
2682 * As with di_register_pip, the caller must hold or lock the pip
2683 */
2684 static int
di_pip_find(struct di_state * st,mdi_pathinfo_t * pip,di_off_t * off_p)2685 di_pip_find(struct di_state *st, mdi_pathinfo_t *pip, di_off_t *off_p)
2686 {
2687 /*
2688 * uintptr_t must be used because it matches the size of void *;
2689 * mod_hash expects clients to place results into pointer-size
2690 * containers; since di_off_t is always a 32-bit offset, alignment
2691 * would otherwise be broken on 64-bit kernels.
2692 */
2693 uintptr_t offset;
2694 struct di_key key = {0};
2695 struct di_pkey *pk;
2696
2697 ASSERT(st->reg_pip_hash);
2698 ASSERT(off_p);
2699
2700 if (pip == NULL) {
2701 *off_p = 0;
2702 return (0);
2703 }
2704
2705 key.k_type = DI_PKEY;
2706 pk = &(key.k_u.pkey);
2707
2708 pk->pk_pip = pip;
2709 pk->pk_path_addr = mdi_pi_get_addr(pip);
2710 pk->pk_client = mdi_pi_get_client(pip);
2711 pk->pk_phci = mdi_pi_get_phci(pip);
2712
2713 if (mod_hash_find(st->reg_pip_hash, (mod_hash_key_t)&key,
2714 (mod_hash_val_t *)&offset) == 0) {
2715 *off_p = (di_off_t)offset;
2716 return (0);
2717 } else {
2718 return (-1);
2719 }
2720 }
2721
2722 static di_path_state_t
path_state_convert(mdi_pathinfo_state_t st)2723 path_state_convert(mdi_pathinfo_state_t st)
2724 {
2725 switch (st) {
2726 case MDI_PATHINFO_STATE_ONLINE:
2727 return (DI_PATH_STATE_ONLINE);
2728 case MDI_PATHINFO_STATE_STANDBY:
2729 return (DI_PATH_STATE_STANDBY);
2730 case MDI_PATHINFO_STATE_OFFLINE:
2731 return (DI_PATH_STATE_OFFLINE);
2732 case MDI_PATHINFO_STATE_FAULT:
2733 return (DI_PATH_STATE_FAULT);
2734 default:
2735 return (DI_PATH_STATE_UNKNOWN);
2736 }
2737 }
2738
2739 static uint_t
path_flags_convert(uint_t pi_path_flags)2740 path_flags_convert(uint_t pi_path_flags)
2741 {
2742 uint_t di_path_flags = 0;
2743
2744 /* MDI_PATHINFO_FLAGS_HIDDEN nodes not in snapshot */
2745
2746 if (pi_path_flags & MDI_PATHINFO_FLAGS_DEVICE_REMOVED)
2747 di_path_flags |= DI_PATH_FLAGS_DEVICE_REMOVED;
2748
2749 return (di_path_flags);
2750 }
2751
2752
2753 static di_off_t
di_path_getprop(mdi_pathinfo_t * pip,di_off_t * off_p,struct di_state * st)2754 di_path_getprop(mdi_pathinfo_t *pip, di_off_t *off_p,
2755 struct di_state *st)
2756 {
2757 nvpair_t *prop = NULL;
2758 struct di_path_prop *me;
2759 int off;
2760 size_t size;
2761 char *str;
2762 uchar_t *buf;
2763 uint_t nelems;
2764
2765 off = *off_p;
2766 if (mdi_pi_get_next_prop(pip, NULL) == NULL) {
2767 *off_p = 0;
2768 return (off);
2769 }
2770
2771 off = di_checkmem(st, off, sizeof (struct di_path_prop));
2772 *off_p = off;
2773
2774 while (prop = mdi_pi_get_next_prop(pip, prop)) {
2775 me = DI_PATHPROP(di_mem_addr(st, off));
2776 me->self = off;
2777 off += sizeof (struct di_path_prop);
2778
2779 /*
2780 * property name
2781 */
2782 size = strlen(nvpair_name(prop)) + 1;
2783 me->prop_name = off = di_checkmem(st, off, size);
2784 (void) strcpy(di_mem_addr(st, off), nvpair_name(prop));
2785 off += size;
2786
2787 switch (nvpair_type(prop)) {
2788 case DATA_TYPE_BYTE:
2789 case DATA_TYPE_INT16:
2790 case DATA_TYPE_UINT16:
2791 case DATA_TYPE_INT32:
2792 case DATA_TYPE_UINT32:
2793 me->prop_type = DDI_PROP_TYPE_INT;
2794 size = sizeof (int32_t);
2795 off = di_checkmem(st, off, size);
2796 (void) nvpair_value_int32(prop,
2797 (int32_t *)di_mem_addr(st, off));
2798 break;
2799
2800 case DATA_TYPE_INT64:
2801 case DATA_TYPE_UINT64:
2802 me->prop_type = DDI_PROP_TYPE_INT64;
2803 size = sizeof (int64_t);
2804 off = di_checkmem(st, off, size);
2805 (void) nvpair_value_int64(prop,
2806 (int64_t *)di_mem_addr(st, off));
2807 break;
2808
2809 case DATA_TYPE_STRING:
2810 me->prop_type = DDI_PROP_TYPE_STRING;
2811 (void) nvpair_value_string(prop, &str);
2812 size = strlen(str) + 1;
2813 off = di_checkmem(st, off, size);
2814 (void) strcpy(di_mem_addr(st, off), str);
2815 break;
2816
2817 case DATA_TYPE_BYTE_ARRAY:
2818 case DATA_TYPE_INT16_ARRAY:
2819 case DATA_TYPE_UINT16_ARRAY:
2820 case DATA_TYPE_INT32_ARRAY:
2821 case DATA_TYPE_UINT32_ARRAY:
2822 case DATA_TYPE_INT64_ARRAY:
2823 case DATA_TYPE_UINT64_ARRAY:
2824 me->prop_type = DDI_PROP_TYPE_BYTE;
2825 (void) nvpair_value_byte_array(prop, &buf, &nelems);
2826 size = nelems;
2827 if (nelems != 0) {
2828 off = di_checkmem(st, off, size);
2829 bcopy(buf, di_mem_addr(st, off), size);
2830 }
2831 break;
2832
2833 default: /* Unknown or unhandled type; skip it */
2834 size = 0;
2835 break;
2836 }
2837
2838 if (size > 0) {
2839 me->prop_data = off;
2840 }
2841
2842 me->prop_len = (int)size;
2843 off += size;
2844
2845 off = di_checkmem(st, off, sizeof (struct di_path_prop));
2846 me->prop_next = off;
2847 }
2848
2849 me->prop_next = 0;
2850 return (off);
2851 }
2852
2853
2854 static void
di_path_one_endpoint(struct di_path * me,di_off_t noff,di_off_t ** off_pp,int get_client)2855 di_path_one_endpoint(struct di_path *me, di_off_t noff, di_off_t **off_pp,
2856 int get_client)
2857 {
2858 if (get_client) {
2859 ASSERT(me->path_client == 0);
2860 me->path_client = noff;
2861 ASSERT(me->path_c_link == 0);
2862 *off_pp = &me->path_c_link;
2863 me->path_snap_state &=
2864 ~(DI_PATH_SNAP_NOCLIENT | DI_PATH_SNAP_NOCLINK);
2865 } else {
2866 ASSERT(me->path_phci == 0);
2867 me->path_phci = noff;
2868 ASSERT(me->path_p_link == 0);
2869 *off_pp = &me->path_p_link;
2870 me->path_snap_state &=
2871 ~(DI_PATH_SNAP_NOPHCI | DI_PATH_SNAP_NOPLINK);
2872 }
2873 }
2874
2875 /*
2876 * off_p: pointer to the linkage field. This links pips along the client|phci
2877 * linkage list.
2878 * noff : Offset for the endpoint dip snapshot.
2879 */
2880 static di_off_t
di_getpath_data(dev_info_t * dip,di_off_t * off_p,di_off_t noff,struct di_state * st,int get_client)2881 di_getpath_data(dev_info_t *dip, di_off_t *off_p, di_off_t noff,
2882 struct di_state *st, int get_client)
2883 {
2884 di_off_t off;
2885 mdi_pathinfo_t *pip;
2886 struct di_path *me;
2887 mdi_pathinfo_t *(*next_pip)(dev_info_t *, mdi_pathinfo_t *);
2888 size_t size;
2889
2890 dcmn_err2((CE_WARN, "di_getpath_data: client = %d", get_client));
2891
2892 /*
2893 * The naming of the following mdi_xyz() is unfortunately
2894 * non-intuitive. mdi_get_next_phci_path() follows the
2895 * client_link i.e. the list of pip's belonging to the
2896 * given client dip.
2897 */
2898 if (get_client)
2899 next_pip = &mdi_get_next_phci_path;
2900 else
2901 next_pip = &mdi_get_next_client_path;
2902
2903 off = *off_p;
2904
2905 pip = NULL;
2906 while (pip = (*next_pip)(dip, pip)) {
2907 di_off_t stored_offset;
2908
2909 dcmn_err((CE_WARN, "marshalling pip = %p", (void *)pip));
2910
2911 mdi_pi_lock(pip);
2912
2913 /* We don't represent hidden paths in the snapshot */
2914 if (mdi_pi_ishidden(pip)) {
2915 dcmn_err((CE_WARN, "hidden, skip"));
2916 mdi_pi_unlock(pip);
2917 continue;
2918 }
2919
2920 if (di_pip_find(st, pip, &stored_offset) != -1) {
2921 /*
2922 * We've already seen this pathinfo node so we need to
2923 * take care not to snap it again; However, one endpoint
2924 * and linkage will be set here. The other endpoint
2925 * and linkage has already been set when the pip was
2926 * first snapshotted i.e. when the other endpoint dip
2927 * was snapshotted.
2928 */
2929 me = DI_PATH(di_mem_addr(st, stored_offset));
2930 *off_p = stored_offset;
2931
2932 di_path_one_endpoint(me, noff, &off_p, get_client);
2933
2934 /*
2935 * The other endpoint and linkage were set when this
2936 * pip was snapshotted. So we are done with both
2937 * endpoints and linkages.
2938 */
2939 ASSERT(!(me->path_snap_state &
2940 (DI_PATH_SNAP_NOCLIENT|DI_PATH_SNAP_NOPHCI)));
2941 ASSERT(!(me->path_snap_state &
2942 (DI_PATH_SNAP_NOCLINK|DI_PATH_SNAP_NOPLINK)));
2943
2944 mdi_pi_unlock(pip);
2945 continue;
2946 }
2947
2948 /*
2949 * Now that we need to snapshot this pip, check memory
2950 */
2951 size = sizeof (struct di_path);
2952 *off_p = off = di_checkmem(st, off, size);
2953 me = DI_PATH(di_mem_addr(st, off));
2954 me->self = off;
2955 off += size;
2956
2957 me->path_snap_state =
2958 DI_PATH_SNAP_NOCLINK | DI_PATH_SNAP_NOPLINK;
2959 me->path_snap_state |=
2960 DI_PATH_SNAP_NOCLIENT | DI_PATH_SNAP_NOPHCI;
2961
2962 /*
2963 * Zero out fields as di_checkmem() doesn't guarantee
2964 * zero-filled memory
2965 */
2966 me->path_client = me->path_phci = 0;
2967 me->path_c_link = me->path_p_link = 0;
2968
2969 di_path_one_endpoint(me, noff, &off_p, get_client);
2970
2971 /*
2972 * Note the existence of this pathinfo
2973 */
2974 di_register_pip(st, pip, me->self);
2975
2976 me->path_state = path_state_convert(mdi_pi_get_state(pip));
2977 me->path_flags = path_flags_convert(mdi_pi_get_flags(pip));
2978
2979 me->path_instance = mdi_pi_get_path_instance(pip);
2980
2981 /*
2982 * Get intermediate addressing info.
2983 */
2984 size = strlen(mdi_pi_get_addr(pip)) + 1;
2985 me->path_addr = off = di_checkmem(st, off, size);
2986 (void) strcpy(di_mem_addr(st, off), mdi_pi_get_addr(pip));
2987 off += size;
2988
2989 /*
2990 * Get path properties if props are to be included in the
2991 * snapshot
2992 */
2993 if (DINFOPROP & st->command) {
2994 me->path_prop = off;
2995 off = di_path_getprop(pip, &me->path_prop, st);
2996 } else {
2997 me->path_prop = 0;
2998 }
2999
3000 mdi_pi_unlock(pip);
3001 }
3002
3003 *off_p = 0;
3004 return (off);
3005 }
3006
3007 /*
3008 * Return driver prop_op entry point for the specified devinfo node.
3009 *
3010 * To return a non-NULL value:
3011 * - driver must be attached and held:
3012 * If driver is not attached we ignore the driver property list.
3013 * No one should rely on such properties.
3014 * - driver "cb_prop_op != ddi_prop_op":
3015 * If "cb_prop_op == ddi_prop_op", framework does not need to call driver.
3016 * XXX or parent's bus_prop_op != ddi_bus_prop_op
3017 */
3018 static int
di_getprop_prop_op(struct dev_info * dip)3019 (*di_getprop_prop_op(struct dev_info *dip))
3020 (dev_t, dev_info_t *, ddi_prop_op_t, int, char *, caddr_t, int *)
3021 {
3022 struct dev_ops *ops;
3023
3024 /* If driver is not attached we ignore the driver property list. */
3025 if ((dip == NULL) || !i_ddi_devi_attached((dev_info_t *)dip))
3026 return (NULL);
3027
3028 /*
3029 * Some nexus drivers incorrectly set cb_prop_op to nodev, nulldev,
3030 * or even NULL.
3031 */
3032 ops = dip->devi_ops;
3033 if (ops && ops->devo_cb_ops &&
3034 (ops->devo_cb_ops->cb_prop_op != ddi_prop_op) &&
3035 (ops->devo_cb_ops->cb_prop_op != nodev) &&
3036 (ops->devo_cb_ops->cb_prop_op != nulldev) &&
3037 (ops->devo_cb_ops->cb_prop_op != NULL))
3038 return (ops->devo_cb_ops->cb_prop_op);
3039 return (NULL);
3040 }
3041
3042 static di_off_t
di_getprop_add(int list,int dyn,struct di_state * st,struct dev_info * dip,int (* prop_op)(),char * name,dev_t devt,int aflags,int alen,caddr_t aval,di_off_t off,di_off_t ** off_pp)3043 di_getprop_add(int list, int dyn, struct di_state *st, struct dev_info *dip,
3044 int (*prop_op)(),
3045 char *name, dev_t devt, int aflags, int alen, caddr_t aval,
3046 di_off_t off, di_off_t **off_pp)
3047 {
3048 int need_free = 0;
3049 dev_t pdevt;
3050 int pflags;
3051 int rv;
3052 caddr_t val;
3053 int len;
3054 size_t size;
3055 struct di_prop *pp;
3056
3057 /* If we have prop_op function, ask driver for latest value */
3058 if (prop_op) {
3059 ASSERT(dip);
3060
3061 /* Must search DDI_DEV_T_NONE with DDI_DEV_T_ANY */
3062 pdevt = (devt == DDI_DEV_T_NONE) ? DDI_DEV_T_ANY : devt;
3063
3064 /*
3065 * We have type information in flags, but are invoking an
3066 * old non-typed prop_op(9E) interface. Since not all types are
3067 * part of DDI_PROP_TYPE_ANY (example is DDI_PROP_TYPE_INT64),
3068 * we set DDI_PROP_CONSUMER_TYPED - causing the framework to
3069 * expand type bits beyond DDI_PROP_TYPE_ANY. This allows us
3070 * to use the legacy prop_op(9E) interface to obtain updates
3071 * non-DDI_PROP_TYPE_ANY dynamic properties.
3072 */
3073 pflags = aflags & ~DDI_PROP_TYPE_MASK;
3074 pflags |= DDI_PROP_DONTPASS | DDI_PROP_NOTPROM |
3075 DDI_PROP_CONSUMER_TYPED;
3076
3077 /*
3078 * Hold and exit across prop_op(9E) to avoid lock order
3079 * issues between
3080 * [ndi_devi_enter() ..prop_op(9E).. driver-lock]
3081 * .vs.
3082 * [..ioctl(9E).. driver-lock ..ddi_remove_minor_node(9F)..
3083 * ndi_devi_enter()]
3084 * ordering.
3085 */
3086 ndi_hold_devi((dev_info_t *)dip);
3087 ndi_devi_exit((dev_info_t *)dip);
3088 rv = (*prop_op)(pdevt, (dev_info_t *)dip,
3089 PROP_LEN_AND_VAL_ALLOC, pflags, name, &val, &len);
3090 ndi_devi_enter((dev_info_t *)dip);
3091 ndi_rele_devi((dev_info_t *)dip);
3092
3093 if (rv == DDI_PROP_SUCCESS) {
3094 need_free = 1; /* dynamic prop obtained */
3095 } else if (dyn) {
3096 /*
3097 * A dynamic property must succeed prop_op(9E) to show
3098 * up in the snapshot - that is the only source of its
3099 * value.
3100 */
3101 return (off); /* dynamic prop not supported */
3102 } else {
3103 /*
3104 * In case calling the driver caused an update off
3105 * prop_op(9E) of a non-dynamic property (code leading
3106 * to ddi_prop_change), we defer picking up val and
3107 * len informatiojn until after prop_op(9E) to ensure
3108 * that we snapshot the latest value.
3109 */
3110 val = aval;
3111 len = alen;
3112
3113 }
3114 } else {
3115 val = aval;
3116 len = alen;
3117 }
3118
3119 dcmn_err((CE_CONT, "di_getprop_add: list %d %s len %d val %p\n",
3120 list, name ? name : "NULL", len, (void *)val));
3121
3122 size = sizeof (struct di_prop);
3123 **off_pp = off = di_checkmem(st, off, size);
3124 pp = DI_PROP(di_mem_addr(st, off));
3125 pp->self = off;
3126 off += size;
3127
3128 pp->dev_major = getmajor(devt);
3129 pp->dev_minor = getminor(devt);
3130 pp->prop_flags = aflags;
3131 pp->prop_list = list;
3132
3133 /* property name */
3134 if (name) {
3135 size = strlen(name) + 1;
3136 pp->prop_name = off = di_checkmem(st, off, size);
3137 (void) strcpy(di_mem_addr(st, off), name);
3138 off += size;
3139 } else {
3140 pp->prop_name = -1;
3141 }
3142
3143 pp->prop_len = len;
3144 if (val == NULL) {
3145 pp->prop_data = -1;
3146 } else if (len != 0) {
3147 size = len;
3148 pp->prop_data = off = di_checkmem(st, off, size);
3149 bcopy(val, di_mem_addr(st, off), size);
3150 off += size;
3151 }
3152
3153 pp->next = 0; /* assume tail for now */
3154 *off_pp = &pp->next; /* return pointer to our next */
3155
3156 if (need_free) /* free PROP_LEN_AND_VAL_ALLOC alloc */
3157 kmem_free(val, len);
3158 return (off);
3159 }
3160
3161
3162 /*
3163 * Copy a list of properties attached to a devinfo node. Called from
3164 * di_copynode with active ndi_devi_enter. The major number is passed in case
3165 * we need to call driver's prop_op entry. The value of list indicates
3166 * which list we are copying. Possible values are:
3167 * DI_PROP_DRV_LIST, DI_PROP_SYS_LIST, DI_PROP_GLB_LIST, DI_PROP_HW_LIST
3168 */
3169 static di_off_t
di_getprop(int list,struct ddi_prop ** pprop,di_off_t * off_p,struct di_state * st,struct dev_info * dip)3170 di_getprop(int list, struct ddi_prop **pprop, di_off_t *off_p,
3171 struct di_state *st, struct dev_info *dip)
3172 {
3173 struct ddi_prop *prop;
3174 int (*prop_op)();
3175 int off;
3176 struct ddi_minor_data *mn;
3177 i_ddi_prop_dyn_t *dp;
3178 struct plist {
3179 struct plist *pl_next;
3180 char *pl_name;
3181 int pl_flags;
3182 dev_t pl_dev;
3183 int pl_len;
3184 caddr_t pl_val;
3185 } *pl, *pl0, **plp;
3186
3187 ASSERT(st != NULL);
3188
3189 off = *off_p;
3190 *off_p = 0;
3191 dcmn_err((CE_CONT, "di_getprop: copy property list %d at addr %p\n",
3192 list, (void *)*pprop));
3193
3194 /* get pointer to driver's prop_op(9E) implementation if DRV_LIST */
3195 prop_op = (list == DI_PROP_DRV_LIST) ? di_getprop_prop_op(dip) : NULL;
3196
3197 /*
3198 * Form private list of properties, holding devi_lock for properties
3199 * that hang off the dip.
3200 */
3201 if (dip)
3202 mutex_enter(&(dip->devi_lock));
3203 for (pl0 = NULL, plp = &pl0, prop = *pprop;
3204 prop; plp = &pl->pl_next, prop = prop->prop_next) {
3205 pl = kmem_alloc(sizeof (*pl), KM_SLEEP);
3206 *plp = pl;
3207 pl->pl_next = NULL;
3208 if (prop->prop_name)
3209 pl->pl_name = i_ddi_strdup(prop->prop_name, KM_SLEEP);
3210 else
3211 pl->pl_name = NULL;
3212 pl->pl_flags = prop->prop_flags;
3213 pl->pl_dev = prop->prop_dev;
3214 if (prop->prop_len) {
3215 pl->pl_len = prop->prop_len;
3216 pl->pl_val = kmem_alloc(pl->pl_len, KM_SLEEP);
3217 bcopy(prop->prop_val, pl->pl_val, pl->pl_len);
3218 } else {
3219 pl->pl_len = 0;
3220 pl->pl_val = NULL;
3221 }
3222 }
3223 if (dip)
3224 mutex_exit(&(dip->devi_lock));
3225
3226 /*
3227 * Now that we have dropped devi_lock, perform a second-pass to
3228 * add properties to the snapshot. We do this as a second pass
3229 * because we may need to call prop_op(9E) and we can't hold
3230 * devi_lock across that call.
3231 */
3232 for (pl = pl0; pl; pl = pl0) {
3233 pl0 = pl->pl_next;
3234 off = di_getprop_add(list, 0, st, dip, prop_op, pl->pl_name,
3235 pl->pl_dev, pl->pl_flags, pl->pl_len, pl->pl_val,
3236 off, &off_p);
3237 if (pl->pl_val)
3238 kmem_free(pl->pl_val, pl->pl_len);
3239 if (pl->pl_name)
3240 kmem_free(pl->pl_name, strlen(pl->pl_name) + 1);
3241 kmem_free(pl, sizeof (*pl));
3242 }
3243
3244 /*
3245 * If there is no prop_op or dynamic property support has been
3246 * disabled, we are done.
3247 */
3248 if ((prop_op == NULL) || (di_prop_dyn == 0)) {
3249 *off_p = 0;
3250 return (off);
3251 }
3252
3253 /* Add dynamic driver properties to snapshot */
3254 for (dp = i_ddi_prop_dyn_driver_get((dev_info_t *)dip);
3255 dp && dp->dp_name; dp++) {
3256 if (dp->dp_spec_type) {
3257 /* if spec_type, property of matching minor */
3258 ASSERT(DEVI_BUSY_OWNED(dip));
3259 for (mn = dip->devi_minor; mn; mn = mn->next) {
3260 if (mn->ddm_spec_type != dp->dp_spec_type)
3261 continue;
3262 off = di_getprop_add(list, 1, st, dip, prop_op,
3263 dp->dp_name, mn->ddm_dev, dp->dp_type,
3264 0, NULL, off, &off_p);
3265 }
3266 } else {
3267 /* property of devinfo node */
3268 off = di_getprop_add(list, 1, st, dip, prop_op,
3269 dp->dp_name, DDI_DEV_T_NONE, dp->dp_type,
3270 0, NULL, off, &off_p);
3271 }
3272 }
3273
3274 /* Add dynamic parent properties to snapshot */
3275 for (dp = i_ddi_prop_dyn_parent_get((dev_info_t *)dip);
3276 dp && dp->dp_name; dp++) {
3277 if (dp->dp_spec_type) {
3278 /* if spec_type, property of matching minor */
3279 ASSERT(DEVI_BUSY_OWNED(dip));
3280 for (mn = dip->devi_minor; mn; mn = mn->next) {
3281 if (mn->ddm_spec_type != dp->dp_spec_type)
3282 continue;
3283 off = di_getprop_add(list, 1, st, dip, prop_op,
3284 dp->dp_name, mn->ddm_dev, dp->dp_type,
3285 0, NULL, off, &off_p);
3286 }
3287 } else {
3288 /* property of devinfo node */
3289 off = di_getprop_add(list, 1, st, dip, prop_op,
3290 dp->dp_name, DDI_DEV_T_NONE, dp->dp_type,
3291 0, NULL, off, &off_p);
3292 }
3293 }
3294
3295 *off_p = 0;
3296 return (off);
3297 }
3298
3299 /*
3300 * find private data format attached to a dip
3301 * parent = 1 to match driver name of parent dip (for parent private data)
3302 * 0 to match driver name of current dip (for driver private data)
3303 */
3304 #define DI_MATCH_DRIVER 0
3305 #define DI_MATCH_PARENT 1
3306
3307 struct di_priv_format *
di_match_drv_name(struct dev_info * node,struct di_state * st,int match)3308 di_match_drv_name(struct dev_info *node, struct di_state *st, int match)
3309 {
3310 int i, count, len;
3311 char *drv_name;
3312 major_t major;
3313 struct di_all *all;
3314 struct di_priv_format *form;
3315
3316 dcmn_err2((CE_CONT, "di_match_drv_name: node = %s, match = %x\n",
3317 node->devi_node_name, match));
3318
3319 if (match == DI_MATCH_PARENT) {
3320 node = DEVI(node->devi_parent);
3321 }
3322
3323 if (node == NULL) {
3324 return (NULL);
3325 }
3326
3327 major = node->devi_major;
3328 if (major == (major_t)(-1)) {
3329 return (NULL);
3330 }
3331
3332 /*
3333 * Match the driver name.
3334 */
3335 drv_name = ddi_major_to_name(major);
3336 if ((drv_name == NULL) || *drv_name == '\0') {
3337 return (NULL);
3338 }
3339
3340 /* Now get the di_priv_format array */
3341 all = DI_ALL_PTR(st);
3342 if (match == DI_MATCH_PARENT) {
3343 count = all->n_ppdata;
3344 form = DI_PRIV_FORMAT(di_mem_addr(st, all->ppdata_format));
3345 } else {
3346 count = all->n_dpdata;
3347 form = DI_PRIV_FORMAT(di_mem_addr(st, all->dpdata_format));
3348 }
3349
3350 len = strlen(drv_name);
3351 for (i = 0; i < count; i++) {
3352 char *tmp;
3353
3354 tmp = form[i].drv_name;
3355 while (tmp && (*tmp != '\0')) {
3356 if (strncmp(drv_name, tmp, len) == 0) {
3357 return (&form[i]);
3358 }
3359 /*
3360 * Move to next driver name, skipping a white space
3361 */
3362 if (tmp = strchr(tmp, ' ')) {
3363 tmp++;
3364 }
3365 }
3366 }
3367
3368 return (NULL);
3369 }
3370
3371 /*
3372 * The following functions copy data as specified by the format passed in.
3373 * To prevent invalid format from panicing the system, we call on_fault().
3374 * A return value of 0 indicates an error. Otherwise, the total offset
3375 * is returned.
3376 */
3377 #define DI_MAX_PRIVDATA (PAGESIZE >> 1) /* max private data size */
3378
3379 static di_off_t
di_getprvdata(struct di_priv_format * pdp,struct dev_info * node,void * data,di_off_t * off_p,struct di_state * st)3380 di_getprvdata(struct di_priv_format *pdp, struct dev_info *node,
3381 void *data, di_off_t *off_p, struct di_state *st)
3382 {
3383 caddr_t pa;
3384 void *ptr;
3385 int i, size, repeat;
3386 di_off_t off, off0, *tmp;
3387 char *path;
3388 label_t ljb;
3389
3390 dcmn_err2((CE_CONT, "di_getprvdata:\n"));
3391
3392 /*
3393 * check memory availability. Private data size is
3394 * limited to DI_MAX_PRIVDATA.
3395 */
3396 off = di_checkmem(st, *off_p, DI_MAX_PRIVDATA);
3397 *off_p = off;
3398
3399 if ((pdp->bytes == 0) || pdp->bytes > DI_MAX_PRIVDATA) {
3400 goto failure;
3401 }
3402
3403 if (!on_fault(&ljb)) {
3404 /* copy the struct */
3405 bcopy(data, di_mem_addr(st, off), pdp->bytes);
3406 off0 = DI_ALIGN(pdp->bytes); /* XXX remove DI_ALIGN */
3407
3408 /* dereferencing pointers */
3409 for (i = 0; i < MAX_PTR_IN_PRV; i++) {
3410
3411 if (pdp->ptr[i].size == 0) {
3412 goto success; /* no more ptrs */
3413 }
3414
3415 /*
3416 * first, get the pointer content
3417 */
3418 if ((pdp->ptr[i].offset < 0) ||
3419 (pdp->ptr[i].offset > pdp->bytes - sizeof (char *)))
3420 goto failure; /* wrong offset */
3421
3422 pa = di_mem_addr(st, off + pdp->ptr[i].offset);
3423
3424 /* save a tmp ptr to store off_t later */
3425 tmp = (di_off_t *)(intptr_t)pa;
3426
3427 /* get pointer value, if NULL continue */
3428 ptr = *((void **) (intptr_t)pa);
3429 if (ptr == NULL) {
3430 continue;
3431 }
3432
3433 /*
3434 * next, find the repeat count (array dimension)
3435 */
3436 repeat = pdp->ptr[i].len_offset;
3437
3438 /*
3439 * Positive value indicates a fixed sized array.
3440 * 0 or negative value indicates variable sized array.
3441 *
3442 * For variable sized array, the variable must be
3443 * an int member of the structure, with an offset
3444 * equal to the absolution value of struct member.
3445 */
3446 if (repeat > pdp->bytes - sizeof (int)) {
3447 goto failure; /* wrong offset */
3448 }
3449
3450 if (repeat >= 0) {
3451 repeat = *((int *)
3452 (intptr_t)((caddr_t)data + repeat));
3453 } else {
3454 repeat = -repeat;
3455 }
3456
3457 /*
3458 * next, get the size of the object to be copied
3459 */
3460 size = pdp->ptr[i].size * repeat;
3461
3462 /*
3463 * Arbitrarily limit the total size of object to be
3464 * copied (1 byte to 1/4 page).
3465 */
3466 if ((size <= 0) || (size > (DI_MAX_PRIVDATA - off0))) {
3467 goto failure; /* wrong size or too big */
3468 }
3469
3470 /*
3471 * Now copy the data
3472 */
3473 *tmp = off0;
3474 bcopy(ptr, di_mem_addr(st, off + off0), size);
3475 off0 += DI_ALIGN(size); /* XXX remove DI_ALIGN */
3476 }
3477 } else {
3478 goto failure;
3479 }
3480
3481 success:
3482 /*
3483 * success if reached here
3484 */
3485 no_fault();
3486 return (off + off0);
3487 /*NOTREACHED*/
3488
3489 failure:
3490 /*
3491 * fault occurred
3492 */
3493 no_fault();
3494 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
3495 cmn_err(CE_WARN, "devinfo: fault on private data for '%s' at %p",
3496 ddi_pathname((dev_info_t *)node, path), data);
3497 kmem_free(path, MAXPATHLEN);
3498 *off_p = -1; /* set private data to indicate error */
3499
3500 return (off);
3501 }
3502
3503 /*
3504 * get parent private data; on error, returns original offset
3505 */
3506 static di_off_t
di_getppdata(struct dev_info * node,di_off_t * off_p,struct di_state * st)3507 di_getppdata(struct dev_info *node, di_off_t *off_p, struct di_state *st)
3508 {
3509 int off;
3510 struct di_priv_format *ppdp;
3511
3512 dcmn_err2((CE_CONT, "di_getppdata:\n"));
3513
3514 /* find the parent data format */
3515 if ((ppdp = di_match_drv_name(node, st, DI_MATCH_PARENT)) == NULL) {
3516 off = *off_p;
3517 *off_p = 0; /* set parent data to none */
3518 return (off);
3519 }
3520
3521 return (di_getprvdata(ppdp, node,
3522 ddi_get_parent_data((dev_info_t *)node), off_p, st));
3523 }
3524
3525 /*
3526 * get parent private data; returns original offset
3527 */
3528 static di_off_t
di_getdpdata(struct dev_info * node,di_off_t * off_p,struct di_state * st)3529 di_getdpdata(struct dev_info *node, di_off_t *off_p, struct di_state *st)
3530 {
3531 int off;
3532 struct di_priv_format *dpdp;
3533
3534 dcmn_err2((CE_CONT, "di_getdpdata:"));
3535
3536 /* find the parent data format */
3537 if ((dpdp = di_match_drv_name(node, st, DI_MATCH_DRIVER)) == NULL) {
3538 off = *off_p;
3539 *off_p = 0; /* set driver data to none */
3540 return (off);
3541 }
3542
3543 return (di_getprvdata(dpdp, node,
3544 ddi_get_driver_private((dev_info_t *)node), off_p, st));
3545 }
3546
3547 /*
3548 * Copy hotplug data associated with a devinfo node into the snapshot.
3549 */
3550 static di_off_t
di_gethpdata(ddi_hp_cn_handle_t * hp_hdl,di_off_t * off_p,struct di_state * st)3551 di_gethpdata(ddi_hp_cn_handle_t *hp_hdl, di_off_t *off_p,
3552 struct di_state *st)
3553 {
3554 struct i_hp *hp;
3555 struct di_hp *me;
3556 size_t size;
3557 di_off_t off;
3558
3559 dcmn_err2((CE_CONT, "di_gethpdata:\n"));
3560
3561 /*
3562 * check memory first
3563 */
3564 off = di_checkmem(st, *off_p, sizeof (struct di_hp));
3565 *off_p = off;
3566
3567 do {
3568 me = DI_HP(di_mem_addr(st, off));
3569 me->self = off;
3570 me->hp_name = 0;
3571 me->hp_connection = (int)hp_hdl->cn_info.cn_num;
3572 me->hp_depends_on = (int)hp_hdl->cn_info.cn_num_dpd_on;
3573 (void) ddihp_cn_getstate(hp_hdl);
3574 me->hp_state = (int)hp_hdl->cn_info.cn_state;
3575 me->hp_type = (int)hp_hdl->cn_info.cn_type;
3576 me->hp_type_str = 0;
3577 me->hp_last_change = (uint32_t)hp_hdl->cn_info.cn_last_change;
3578 me->hp_child = 0;
3579
3580 /*
3581 * Child links are resolved later by di_hotplug_children().
3582 * Store a reference to this di_hp_t in the list used later
3583 * by di_hotplug_children().
3584 */
3585 hp = kmem_zalloc(sizeof (i_hp_t), KM_SLEEP);
3586 hp->hp_off = off;
3587 hp->hp_child = hp_hdl->cn_info.cn_child;
3588 list_insert_tail(&st->hp_list, hp);
3589
3590 off += sizeof (struct di_hp);
3591
3592 /* Add name of this di_hp_t to the snapshot */
3593 if (hp_hdl->cn_info.cn_name) {
3594 size = strlen(hp_hdl->cn_info.cn_name) + 1;
3595 me->hp_name = off = di_checkmem(st, off, size);
3596 (void) strcpy(di_mem_addr(st, off),
3597 hp_hdl->cn_info.cn_name);
3598 off += size;
3599 }
3600
3601 /* Add type description of this di_hp_t to the snapshot */
3602 if (hp_hdl->cn_info.cn_type_str) {
3603 size = strlen(hp_hdl->cn_info.cn_type_str) + 1;
3604 me->hp_type_str = off = di_checkmem(st, off, size);
3605 (void) strcpy(di_mem_addr(st, off),
3606 hp_hdl->cn_info.cn_type_str);
3607 off += size;
3608 }
3609
3610 /*
3611 * Set link to next in the chain of di_hp_t nodes,
3612 * or terminate the chain when processing the last node.
3613 */
3614 if (hp_hdl->next != NULL) {
3615 off = di_checkmem(st, off, sizeof (struct di_hp));
3616 me->next = off;
3617 } else {
3618 me->next = 0;
3619 }
3620
3621 /* Update pointer to next in the chain */
3622 hp_hdl = hp_hdl->next;
3623
3624 } while (hp_hdl);
3625
3626 return (off);
3627 }
3628
3629 /*
3630 * The driver is stateful across DINFOCPYALL and DINFOUSRLD.
3631 * This function encapsulates the state machine:
3632 *
3633 * -> IOC_IDLE -> IOC_SNAP -> IOC_DONE -> IOC_COPY ->
3634 * | SNAPSHOT USRLD |
3635 * --------------------------------------------------
3636 *
3637 * Returns 0 on success and -1 on failure
3638 */
3639 static int
di_setstate(struct di_state * st,int new_state)3640 di_setstate(struct di_state *st, int new_state)
3641 {
3642 int ret = 0;
3643
3644 mutex_enter(&di_lock);
3645 switch (new_state) {
3646 case IOC_IDLE:
3647 case IOC_DONE:
3648 break;
3649 case IOC_SNAP:
3650 if (st->di_iocstate != IOC_IDLE)
3651 ret = -1;
3652 break;
3653 case IOC_COPY:
3654 if (st->di_iocstate != IOC_DONE)
3655 ret = -1;
3656 break;
3657 default:
3658 ret = -1;
3659 }
3660
3661 if (ret == 0)
3662 st->di_iocstate = new_state;
3663 else
3664 cmn_err(CE_NOTE, "incorrect state transition from %d to %d",
3665 st->di_iocstate, new_state);
3666 mutex_exit(&di_lock);
3667 return (ret);
3668 }
3669
3670 /*
3671 * We cannot assume the presence of the entire
3672 * snapshot in this routine. All we are guaranteed
3673 * is the di_all struct + 1 byte (for root_path)
3674 */
3675 static int
header_plus_one_ok(struct di_all * all)3676 header_plus_one_ok(struct di_all *all)
3677 {
3678 /*
3679 * Refuse to read old versions
3680 */
3681 if (all->version != DI_SNAPSHOT_VERSION) {
3682 CACHE_DEBUG((DI_ERR, "bad version: 0x%x", all->version));
3683 return (0);
3684 }
3685
3686 if (all->cache_magic != DI_CACHE_MAGIC) {
3687 CACHE_DEBUG((DI_ERR, "bad magic #: 0x%x", all->cache_magic));
3688 return (0);
3689 }
3690
3691 if (all->snapshot_time == 0) {
3692 CACHE_DEBUG((DI_ERR, "bad timestamp: %ld", all->snapshot_time));
3693 return (0);
3694 }
3695
3696 if (all->top_devinfo == 0) {
3697 CACHE_DEBUG((DI_ERR, "NULL top devinfo"));
3698 return (0);
3699 }
3700
3701 if (all->map_size < sizeof (*all) + 1) {
3702 CACHE_DEBUG((DI_ERR, "bad map size: %u", all->map_size));
3703 return (0);
3704 }
3705
3706 if (all->root_path[0] != '/' || all->root_path[1] != '\0') {
3707 CACHE_DEBUG((DI_ERR, "bad rootpath: %c%c",
3708 all->root_path[0], all->root_path[1]));
3709 return (0);
3710 }
3711
3712 /*
3713 * We can't check checksum here as we just have the header
3714 */
3715
3716 return (1);
3717 }
3718
3719 static int
chunk_write(struct vnode * vp,offset_t off,caddr_t buf,size_t len)3720 chunk_write(struct vnode *vp, offset_t off, caddr_t buf, size_t len)
3721 {
3722 rlim64_t rlimit;
3723 ssize_t resid;
3724 int error = 0;
3725
3726
3727 rlimit = RLIM64_INFINITY;
3728
3729 while (len) {
3730 resid = 0;
3731 error = vn_rdwr(UIO_WRITE, vp, buf, len, off,
3732 UIO_SYSSPACE, FSYNC, rlimit, kcred, &resid);
3733
3734 if (error || resid < 0) {
3735 error = error ? error : EIO;
3736 CACHE_DEBUG((DI_ERR, "write error: %d", error));
3737 break;
3738 }
3739
3740 /*
3741 * Check if we are making progress
3742 */
3743 if (resid >= len) {
3744 error = ENOSPC;
3745 break;
3746 }
3747 buf += len - resid;
3748 off += len - resid;
3749 len = resid;
3750 }
3751
3752 return (error);
3753 }
3754
3755 static void
di_cache_write(struct di_cache * cache)3756 di_cache_write(struct di_cache *cache)
3757 {
3758 struct di_all *all;
3759 struct vnode *vp;
3760 int oflags;
3761 size_t map_size;
3762 size_t chunk;
3763 offset_t off;
3764 int error;
3765 char *buf;
3766
3767 ASSERT(DI_CACHE_LOCKED(*cache));
3768 ASSERT(!servicing_interrupt());
3769
3770 if (cache->cache_size == 0) {
3771 ASSERT(cache->cache_data == NULL);
3772 CACHE_DEBUG((DI_ERR, "Empty cache. Skipping write"));
3773 return;
3774 }
3775
3776 ASSERT(cache->cache_size > 0);
3777 ASSERT(cache->cache_data);
3778
3779 if (!modrootloaded || rootvp == NULL || vn_is_readonly(rootvp)) {
3780 CACHE_DEBUG((DI_ERR, "Can't write to rootFS. Skipping write"));
3781 return;
3782 }
3783
3784 all = (struct di_all *)cache->cache_data;
3785
3786 if (!header_plus_one_ok(all)) {
3787 CACHE_DEBUG((DI_ERR, "Invalid header. Skipping write"));
3788 return;
3789 }
3790
3791 ASSERT(strcmp(all->root_path, "/") == 0);
3792
3793 /*
3794 * The cache_size is the total allocated memory for the cache.
3795 * The map_size is the actual size of valid data in the cache.
3796 * map_size may be smaller than cache_size but cannot exceed
3797 * cache_size.
3798 */
3799 if (all->map_size > cache->cache_size) {
3800 CACHE_DEBUG((DI_ERR, "map_size (0x%x) > cache_size (0x%x)."
3801 " Skipping write", all->map_size, cache->cache_size));
3802 return;
3803 }
3804
3805 /*
3806 * First unlink the temp file
3807 */
3808 error = vn_remove(DI_CACHE_TEMP, UIO_SYSSPACE, RMFILE);
3809 if (error && error != ENOENT) {
3810 CACHE_DEBUG((DI_ERR, "%s: unlink failed: %d",
3811 DI_CACHE_TEMP, error));
3812 }
3813
3814 if (error == EROFS) {
3815 CACHE_DEBUG((DI_ERR, "RDONLY FS. Skipping write"));
3816 return;
3817 }
3818
3819 vp = NULL;
3820 oflags = (FCREAT|FWRITE);
3821 if (error = vn_open(DI_CACHE_TEMP, UIO_SYSSPACE, oflags,
3822 DI_CACHE_PERMS, &vp, CRCREAT, 0)) {
3823 CACHE_DEBUG((DI_ERR, "%s: create failed: %d",
3824 DI_CACHE_TEMP, error));
3825 return;
3826 }
3827
3828 ASSERT(vp);
3829
3830 /*
3831 * Paranoid: Check if the file is on a read-only FS
3832 */
3833 if (vn_is_readonly(vp)) {
3834 CACHE_DEBUG((DI_ERR, "cannot write: readonly FS"));
3835 goto fail;
3836 }
3837
3838 /*
3839 * Note that we only write map_size bytes to disk - this saves
3840 * space as the actual cache size may be larger than size of
3841 * valid data in the cache.
3842 * Another advantage is that it makes verification of size
3843 * easier when the file is read later.
3844 */
3845 map_size = all->map_size;
3846 off = 0;
3847 buf = cache->cache_data;
3848
3849 while (map_size) {
3850 ASSERT(map_size > 0);
3851 /*
3852 * Write in chunks so that VM system
3853 * is not overwhelmed
3854 */
3855 if (map_size > di_chunk * PAGESIZE)
3856 chunk = di_chunk * PAGESIZE;
3857 else
3858 chunk = map_size;
3859
3860 error = chunk_write(vp, off, buf, chunk);
3861 if (error) {
3862 CACHE_DEBUG((DI_ERR, "write failed: off=0x%x: %d",
3863 off, error));
3864 goto fail;
3865 }
3866
3867 off += chunk;
3868 buf += chunk;
3869 map_size -= chunk;
3870
3871 /* If low on memory, give pageout a chance to run */
3872 if (freemem < desfree)
3873 delay(1);
3874 }
3875
3876 /*
3877 * Now sync the file and close it
3878 */
3879 if (error = VOP_FSYNC(vp, FSYNC, kcred, NULL)) {
3880 CACHE_DEBUG((DI_ERR, "FSYNC failed: %d", error));
3881 }
3882
3883 if (error = VOP_CLOSE(vp, oflags, 1, (offset_t)0, kcred, NULL)) {
3884 CACHE_DEBUG((DI_ERR, "close() failed: %d", error));
3885 VN_RELE(vp);
3886 return;
3887 }
3888
3889 VN_RELE(vp);
3890
3891 /*
3892 * Now do the rename
3893 */
3894 if (error = vn_rename(DI_CACHE_TEMP, DI_CACHE_FILE, UIO_SYSSPACE)) {
3895 CACHE_DEBUG((DI_ERR, "rename failed: %d", error));
3896 return;
3897 }
3898
3899 CACHE_DEBUG((DI_INFO, "Cache write successful."));
3900
3901 return;
3902
3903 fail:
3904 (void) VOP_CLOSE(vp, oflags, 1, (offset_t)0, kcred, NULL);
3905 VN_RELE(vp);
3906 }
3907
3908
3909 /*
3910 * Since we could be called early in boot,
3911 * use kobj_read_file()
3912 */
3913 static void
di_cache_read(struct di_cache * cache)3914 di_cache_read(struct di_cache *cache)
3915 {
3916 struct _buf *file;
3917 struct di_all *all;
3918 int n;
3919 size_t map_size, sz, chunk;
3920 offset_t off;
3921 caddr_t buf;
3922 uint32_t saved_crc, crc;
3923
3924 ASSERT(modrootloaded);
3925 ASSERT(DI_CACHE_LOCKED(*cache));
3926 ASSERT(cache->cache_data == NULL);
3927 ASSERT(cache->cache_size == 0);
3928 ASSERT(!servicing_interrupt());
3929
3930 file = kobj_open_file(DI_CACHE_FILE);
3931 if (file == (struct _buf *)-1) {
3932 CACHE_DEBUG((DI_ERR, "%s: open failed: %d",
3933 DI_CACHE_FILE, ENOENT));
3934 return;
3935 }
3936
3937 /*
3938 * Read in the header+root_path first. The root_path must be "/"
3939 */
3940 all = kmem_zalloc(sizeof (*all) + 1, KM_SLEEP);
3941 n = kobj_read_file(file, (caddr_t)all, sizeof (*all) + 1, 0);
3942
3943 if ((n != sizeof (*all) + 1) || !header_plus_one_ok(all)) {
3944 kmem_free(all, sizeof (*all) + 1);
3945 kobj_close_file(file);
3946 CACHE_DEBUG((DI_ERR, "cache header: read error or invalid"));
3947 return;
3948 }
3949
3950 map_size = all->map_size;
3951
3952 kmem_free(all, sizeof (*all) + 1);
3953
3954 ASSERT(map_size >= sizeof (*all) + 1);
3955
3956 buf = di_cache.cache_data = kmem_alloc(map_size, KM_SLEEP);
3957 sz = map_size;
3958 off = 0;
3959 while (sz) {
3960 /* Don't overload VM with large reads */
3961 chunk = (sz > di_chunk * PAGESIZE) ? di_chunk * PAGESIZE : sz;
3962 n = kobj_read_file(file, buf, chunk, off);
3963 if (n != chunk) {
3964 CACHE_DEBUG((DI_ERR, "%s: read error at offset: %lld",
3965 DI_CACHE_FILE, off));
3966 goto fail;
3967 }
3968 off += chunk;
3969 buf += chunk;
3970 sz -= chunk;
3971 }
3972
3973 ASSERT(off == map_size);
3974
3975 /*
3976 * Read past expected EOF to verify size.
3977 */
3978 if (kobj_read_file(file, (caddr_t)&sz, 1, off) > 0) {
3979 CACHE_DEBUG((DI_ERR, "%s: file size changed", DI_CACHE_FILE));
3980 goto fail;
3981 }
3982
3983 all = (struct di_all *)di_cache.cache_data;
3984 if (!header_plus_one_ok(all)) {
3985 CACHE_DEBUG((DI_ERR, "%s: file header changed", DI_CACHE_FILE));
3986 goto fail;
3987 }
3988
3989 /*
3990 * Compute CRC with checksum field in the cache data set to 0
3991 */
3992 saved_crc = all->cache_checksum;
3993 all->cache_checksum = 0;
3994 CRC32(crc, di_cache.cache_data, map_size, -1U, crc32_table);
3995 all->cache_checksum = saved_crc;
3996
3997 if (crc != all->cache_checksum) {
3998 CACHE_DEBUG((DI_ERR,
3999 "%s: checksum error: expected=0x%x actual=0x%x",
4000 DI_CACHE_FILE, all->cache_checksum, crc));
4001 goto fail;
4002 }
4003
4004 if (all->map_size != map_size) {
4005 CACHE_DEBUG((DI_ERR, "%s: map size changed", DI_CACHE_FILE));
4006 goto fail;
4007 }
4008
4009 kobj_close_file(file);
4010
4011 di_cache.cache_size = map_size;
4012
4013 return;
4014
4015 fail:
4016 kmem_free(di_cache.cache_data, map_size);
4017 kobj_close_file(file);
4018 di_cache.cache_data = NULL;
4019 di_cache.cache_size = 0;
4020 }
4021
4022
4023 /*
4024 * Checks if arguments are valid for using the cache.
4025 */
4026 static int
cache_args_valid(struct di_state * st,int * error)4027 cache_args_valid(struct di_state *st, int *error)
4028 {
4029 ASSERT(error);
4030 ASSERT(st->mem_size > 0);
4031 ASSERT(st->memlist != NULL);
4032
4033 if (!modrootloaded || !i_ddi_io_initialized()) {
4034 CACHE_DEBUG((DI_ERR,
4035 "cache lookup failure: I/O subsystem not inited"));
4036 *error = ENOTACTIVE;
4037 return (0);
4038 }
4039
4040 /*
4041 * No other flags allowed with DINFOCACHE
4042 */
4043 if (st->command != (DINFOCACHE & DIIOC_MASK)) {
4044 CACHE_DEBUG((DI_ERR,
4045 "cache lookup failure: bad flags: 0x%x",
4046 st->command));
4047 *error = EINVAL;
4048 return (0);
4049 }
4050
4051 if (strcmp(DI_ALL_PTR(st)->root_path, "/") != 0) {
4052 CACHE_DEBUG((DI_ERR,
4053 "cache lookup failure: bad root: %s",
4054 DI_ALL_PTR(st)->root_path));
4055 *error = EINVAL;
4056 return (0);
4057 }
4058
4059 CACHE_DEBUG((DI_INFO, "cache lookup args ok: 0x%x", st->command));
4060
4061 *error = 0;
4062
4063 return (1);
4064 }
4065
4066 static int
snapshot_is_cacheable(struct di_state * st)4067 snapshot_is_cacheable(struct di_state *st)
4068 {
4069 ASSERT(st->mem_size > 0);
4070 ASSERT(st->memlist != NULL);
4071
4072 if ((st->command & DI_CACHE_SNAPSHOT_FLAGS) !=
4073 (DI_CACHE_SNAPSHOT_FLAGS & DIIOC_MASK)) {
4074 CACHE_DEBUG((DI_INFO,
4075 "not cacheable: incompatible flags: 0x%x",
4076 st->command));
4077 return (0);
4078 }
4079
4080 if (strcmp(DI_ALL_PTR(st)->root_path, "/") != 0) {
4081 CACHE_DEBUG((DI_INFO,
4082 "not cacheable: incompatible root path: %s",
4083 DI_ALL_PTR(st)->root_path));
4084 return (0);
4085 }
4086
4087 CACHE_DEBUG((DI_INFO, "cacheable snapshot request: 0x%x", st->command));
4088
4089 return (1);
4090 }
4091
4092 static int
di_cache_lookup(struct di_state * st)4093 di_cache_lookup(struct di_state *st)
4094 {
4095 size_t rval;
4096 int cache_valid;
4097
4098 ASSERT(cache_args_valid(st, &cache_valid));
4099 ASSERT(modrootloaded);
4100
4101 DI_CACHE_LOCK(di_cache);
4102
4103 /*
4104 * The following assignment determines the validity
4105 * of the cache as far as this snapshot is concerned.
4106 */
4107 cache_valid = di_cache.cache_valid;
4108
4109 if (cache_valid && di_cache.cache_data == NULL) {
4110 di_cache_read(&di_cache);
4111 /* check for read or file error */
4112 if (di_cache.cache_data == NULL)
4113 cache_valid = 0;
4114 }
4115
4116 if (cache_valid) {
4117 /*
4118 * Ok, the cache was valid as of this particular
4119 * snapshot. Copy the cached snapshot. This is safe
4120 * to do as the cache cannot be freed (we hold the
4121 * cache lock). Free the memory allocated in di_state
4122 * up until this point - we will simply copy everything
4123 * in the cache.
4124 */
4125
4126 ASSERT(di_cache.cache_data != NULL);
4127 ASSERT(di_cache.cache_size > 0);
4128
4129 di_freemem(st);
4130
4131 rval = 0;
4132 if (di_cache2mem(&di_cache, st) > 0) {
4133 /*
4134 * map_size is size of valid data in the
4135 * cached snapshot and may be less than
4136 * size of the cache.
4137 */
4138 ASSERT(DI_ALL_PTR(st));
4139 rval = DI_ALL_PTR(st)->map_size;
4140
4141 ASSERT(rval >= sizeof (struct di_all));
4142 ASSERT(rval <= di_cache.cache_size);
4143 }
4144 } else {
4145 /*
4146 * The cache isn't valid, we need to take a snapshot.
4147 * Set the command flags appropriately
4148 */
4149 ASSERT(st->command == (DINFOCACHE & DIIOC_MASK));
4150 st->command = (DI_CACHE_SNAPSHOT_FLAGS & DIIOC_MASK);
4151 rval = di_cache_update(st);
4152 st->command = (DINFOCACHE & DIIOC_MASK);
4153 }
4154
4155 DI_CACHE_UNLOCK(di_cache);
4156
4157 /*
4158 * For cached snapshots, the devinfo driver always returns
4159 * a snapshot rooted at "/".
4160 */
4161 ASSERT(rval == 0 || strcmp(DI_ALL_PTR(st)->root_path, "/") == 0);
4162
4163 return ((int)rval);
4164 }
4165
4166 /*
4167 * This is a forced update of the cache - the previous state of the cache
4168 * may be:
4169 * - unpopulated
4170 * - populated and invalid
4171 * - populated and valid
4172 */
4173 static int
di_cache_update(struct di_state * st)4174 di_cache_update(struct di_state *st)
4175 {
4176 int rval;
4177 uint32_t crc;
4178 struct di_all *all;
4179
4180 ASSERT(DI_CACHE_LOCKED(di_cache));
4181 ASSERT(snapshot_is_cacheable(st));
4182
4183 /*
4184 * Free the in-core cache and the on-disk file (if they exist)
4185 */
4186 i_ddi_di_cache_free(&di_cache);
4187
4188 /*
4189 * Set valid flag before taking the snapshot,
4190 * so that any invalidations that arrive
4191 * during or after the snapshot are not
4192 * removed by us.
4193 */
4194 atomic_or_32(&di_cache.cache_valid, 1);
4195
4196 rval = di_snapshot_and_clean(st);
4197
4198 if (rval == 0) {
4199 CACHE_DEBUG((DI_ERR, "can't update cache: bad snapshot"));
4200 return (0);
4201 }
4202
4203 DI_ALL_PTR(st)->map_size = rval;
4204 if (di_mem2cache(st, &di_cache) == 0) {
4205 CACHE_DEBUG((DI_ERR, "can't update cache: copy failed"));
4206 return (0);
4207 }
4208
4209 ASSERT(di_cache.cache_data);
4210 ASSERT(di_cache.cache_size > 0);
4211
4212 /*
4213 * Now that we have cached the snapshot, compute its checksum.
4214 * The checksum is only computed over the valid data in the
4215 * cache, not the entire cache.
4216 * Also, set all the fields (except checksum) before computing
4217 * checksum.
4218 */
4219 all = (struct di_all *)di_cache.cache_data;
4220 all->cache_magic = DI_CACHE_MAGIC;
4221 all->map_size = rval;
4222
4223 ASSERT(all->cache_checksum == 0);
4224 CRC32(crc, di_cache.cache_data, all->map_size, -1U, crc32_table);
4225 all->cache_checksum = crc;
4226
4227 di_cache_write(&di_cache);
4228
4229 return (rval);
4230 }
4231
4232 static void
di_cache_print(di_cache_debug_t msglevel,char * fmt,...)4233 di_cache_print(di_cache_debug_t msglevel, char *fmt, ...)
4234 {
4235 va_list ap;
4236
4237 if (di_cache_debug <= DI_QUIET)
4238 return;
4239
4240 if (di_cache_debug < msglevel)
4241 return;
4242
4243 switch (msglevel) {
4244 case DI_ERR:
4245 msglevel = CE_WARN;
4246 break;
4247 case DI_INFO:
4248 case DI_TRACE:
4249 default:
4250 msglevel = CE_NOTE;
4251 break;
4252 }
4253
4254 va_start(ap, fmt);
4255 vcmn_err(msglevel, fmt, ap);
4256 va_end(ap);
4257 }
4258
4259 static void
di_hotplug_children(struct di_state * st)4260 di_hotplug_children(struct di_state *st)
4261 {
4262 di_off_t off;
4263 struct di_hp *hp;
4264 struct i_hp *hp_list_node;
4265
4266 while (hp_list_node = (struct i_hp *)list_remove_head(&st->hp_list)) {
4267
4268 if ((hp_list_node->hp_child != NULL) &&
4269 (di_dip_find(st, hp_list_node->hp_child, &off) == 0)) {
4270 hp = DI_HP(di_mem_addr(st, hp_list_node->hp_off));
4271 hp->hp_child = off;
4272 }
4273
4274 kmem_free(hp_list_node, sizeof (i_hp_t));
4275 }
4276
4277 list_destroy(&st->hp_list);
4278 }
4279