xref: /titanic_44/usr/src/uts/sun4v/io/dr_io.c (revision 62a24de03df1f2399ceda704cb3874dabc98bbbd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * sun4v VIO DR Module
29  */
30 
31 #include <sys/modctl.h>
32 #include <sys/sunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/note.h>
35 #include <sys/sysevent/dr.h>
36 #include <sys/hypervisor_api.h>
37 #include <sys/mach_descrip.h>
38 #include <sys/mdesc.h>
39 #include <sys/mdesc_impl.h>
40 #include <sys/ds.h>
41 #include <sys/drctl.h>
42 #include <sys/dr_util.h>
43 #include <sys/dr_io.h>
44 #include <sys/promif.h>
45 #include <sys/machsystm.h>
46 #include <sys/ethernet.h>
47 #include <sys/hotplug/pci/pcicfg.h>
48 
49 
50 static struct modlmisc modlmisc = {
51 	&mod_miscops,
52 	"sun4v VIO DR"
53 };
54 
55 static struct modlinkage modlinkage = {
56 	MODREV_1,
57 	(void *)&modlmisc,
58 	NULL
59 };
60 
61 
62 /*
63  * VIO DS Interface
64  */
65 
66 /*
67  * Global DS Handle
68  */
69 static ds_svc_hdl_t ds_vio_handle;
70 
71 /*
72  * Supported DS Capability Versions
73  */
74 static ds_ver_t		dr_vio_vers[] = { { 1, 0 } };
75 #define	DR_VIO_NVERS	(sizeof (dr_vio_vers) / sizeof (dr_vio_vers[0]))
76 
77 /*
78  * DS Capability Description
79  */
80 static ds_capability_t dr_vio_cap = {
81 	DR_VIO_DS_ID,		/* svc_id */
82 	dr_vio_vers,		/* vers */
83 	DR_VIO_NVERS		/* nvers */
84 };
85 
86 /*
87  * DS Callbacks
88  */
89 static void dr_vio_reg_handler(ds_cb_arg_t, ds_ver_t *, ds_svc_hdl_t);
90 static void dr_vio_unreg_handler(ds_cb_arg_t arg);
91 static void dr_vio_data_handler(ds_cb_arg_t arg, void *buf, size_t buflen);
92 
93 /*
94  * DS Client Ops Vector
95  */
96 static ds_clnt_ops_t dr_vio_ops = {
97 	dr_vio_reg_handler,	/* ds_reg_cb */
98 	dr_vio_unreg_handler,	/* ds_unreg_cb */
99 	dr_vio_data_handler,	/* ds_data_cb */
100 	NULL			/* cb_arg */
101 };
102 
103 
104 typedef struct {
105 	char		*name;
106 	uint64_t	devid;
107 	dev_info_t	*dip;
108 } dr_search_arg_t;
109 
110 static int
111 dr_io_check_node(dev_info_t *dip, void *arg)
112 {
113 	char 		*name;
114 	uint64_t	devid;
115 	dr_search_arg_t	*sarg = (dr_search_arg_t *)arg;
116 
117 	name = ddi_node_name(dip);
118 
119 	if (strcmp(name, sarg->name) != 0)
120 		return (DDI_WALK_CONTINUE);
121 
122 	devid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
123 	    "reg", -1);
124 
125 	DR_DBG_IO("%s: found devid=%ld, looking for %ld\n",
126 	    __func__, devid, sarg->devid);
127 
128 	if (devid == sarg->devid) {
129 		DR_DBG_IO("%s: matched", __func__);
130 
131 		/* matching node must be returned held */
132 		if (!e_ddi_branch_held(dip))
133 			e_ddi_branch_hold(dip);
134 
135 		sarg->dip = dip;
136 		return (DDI_WALK_TERMINATE);
137 	}
138 
139 	return (DDI_WALK_CONTINUE);
140 }
141 
142 /*
143  * Walk the device tree to find the dip corresponding to the devid
144  * passed in. If present, the dip is returned held. The caller must
145  * release the hold on the dip once it is no longer required. If no
146  * matching node if found, NULL is returned.
147  */
148 static dev_info_t *
149 dr_io_find_node(char *name, uint64_t devid)
150 {
151 	dr_search_arg_t	arg;
152 
153 	DR_DBG_IO("dr_io_find_node...\n");
154 
155 	arg.name = name;
156 	arg.devid = devid;
157 	arg.dip = NULL;
158 
159 	ddi_walk_devs(ddi_root_node(), dr_io_check_node, &arg);
160 
161 	ASSERT((arg.dip == NULL) || (e_ddi_branch_held(arg.dip)));
162 
163 	return ((arg.dip) ? arg.dip : NULL);
164 }
165 
166 /*
167  * Look up a particular IO node in the MD. Returns the mde_cookie_t
168  * representing that IO node if present, and MDE_INVAL_ELEM_COOKIE otherwise.
169  * It is assumed the scratch array has already been allocated so that
170  * it can accommodate the worst case scenario, every node in the MD.
171  */
172 static mde_cookie_t
173 dr_io_find_node_md(md_t *mdp, char *name, uint64_t id, mde_cookie_t *listp)
174 {
175 	int		i;
176 	int		nnodes;
177 	char		*devnm;
178 	uint64_t	devid;
179 	mde_cookie_t	rootnode;
180 	mde_cookie_t	result = MDE_INVAL_ELEM_COOKIE;
181 
182 	DR_DBG_IO("%s: %s@%ld\n", __func__, name, id);
183 
184 	rootnode = md_root_node(mdp);
185 	ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE);
186 
187 	/*
188 	 * Scan the DAG for all candidate nodes.
189 	 */
190 	nnodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "virtual-device"),
191 	    md_find_name(mdp, "fwd"), listp);
192 
193 	if (nnodes < 0) {
194 		DR_DBG_IO("%s: scan for "
195 		    "'virtual-device' nodes failed\n", __func__);
196 		return (result);
197 	}
198 
199 	DR_DBG_IO("%s: found %d nodes in the MD\n", __func__, nnodes);
200 
201 	/*
202 	 * Find the node of interest
203 	 */
204 	for (i = 0; i < nnodes; i++) {
205 
206 		if (md_get_prop_str(mdp, listp[i], "name", &devnm)) {
207 			DR_DBG_IO("%s: missing 'name' property for"
208 			    " IO node %d\n", __func__, i);
209 			return (DDI_WALK_ERROR);
210 		}
211 
212 		if (strcmp(devnm, name) != 0)
213 			continue;
214 
215 		if (md_get_prop_val(mdp, listp[i], "cfg-handle", &devid)) {
216 			DR_DBG_IO("%s: missing 'cfg-handle' property for"
217 			    " IO node %d\n", __func__, i);
218 			break;
219 		}
220 
221 		if (devid == id) {
222 			/* found a match */
223 			DR_DBG_IO("%s: found IO node %s@%ld "
224 			    "in MD\n", __func__, name, id);
225 			result = listp[i];
226 			break;
227 		}
228 	}
229 
230 	if (result == MDE_INVAL_ELEM_COOKIE)
231 		DR_DBG_IO("%s: IO node %ld not in MD\n", __func__, id);
232 
233 	return (result);
234 }
235 
236 typedef struct {
237 	md_t		*mdp;
238 	mde_cookie_t	node;
239 	dev_info_t	*dip;
240 } cb_arg_t;
241 
242 #define	STR_ARR_LEN	5
243 
244 static int
245 new_dev_node(dev_info_t *new_node, void *arg, uint_t flags)
246 {
247 	_NOTE(ARGUNUSED(flags))
248 
249 	cb_arg_t	*cba;
250 	char		*devnm, *devtype;
251 	char		*compat;
252 	uint64_t	devid;
253 	int		len = 0;
254 	char		*curr;
255 	int		i = 0;
256 	char		*str_arr[STR_ARR_LEN];
257 
258 	cba = (cb_arg_t *)arg;
259 
260 	/*
261 	 * Add 'name' property
262 	 */
263 	if (md_get_prop_str(cba->mdp, cba->node, "name", &devnm)) {
264 		DR_DBG_IO("%s: failed to read 'name' prop from MD\n", __func__);
265 		return (DDI_WALK_ERROR);
266 	}
267 	DR_DBG_IO("%s: device name is %s\n", __func__, devnm);
268 
269 	if (ndi_prop_update_string(DDI_DEV_T_NONE, new_node,
270 	    "name", devnm) != DDI_SUCCESS) {
271 		DR_DBG_IO("%s: failed to create 'name' prop\n", __func__);
272 		return (DDI_WALK_ERROR);
273 	}
274 
275 	/*
276 	 * Add 'compatible' property
277 	 */
278 	if (md_get_prop_data(cba->mdp, cba->node, "compatible",
279 	    (uint8_t **)&compat, &len)) {
280 		DR_DBG_IO("%s: failed to read "
281 		    "'compatible' prop from MD\n", __func__);
282 		return (DDI_WALK_ERROR);
283 	}
284 
285 	/* parse the MD string array */
286 	curr = compat;
287 	while (curr < (compat + len)) {
288 
289 		DR_DBG_IO("%s: adding '%s' to "
290 		    "'compatible' prop\n", __func__, curr);
291 
292 		str_arr[i++] = curr;
293 		curr += strlen(curr) + 1;
294 
295 		if (i == STR_ARR_LEN) {
296 			DR_DBG_CPU("exceeded str_arr len (%d)\n", STR_ARR_LEN);
297 			break;
298 		}
299 	}
300 
301 
302 	if (ndi_prop_update_string_array(DDI_DEV_T_NONE, new_node,
303 	    "compatible", str_arr, i) != DDI_SUCCESS) {
304 		DR_DBG_IO("%s: cannot create 'compatible' prop\n", __func__);
305 		return (DDI_WALK_ERROR);
306 	}
307 
308 	/*
309 	 * Add 'device_type' property
310 	 */
311 	if (md_get_prop_str(cba->mdp, cba->node, "device-type", &devtype)) {
312 		DR_DBG_IO("%s: failed to read "
313 		    "'device-type' prop from MD\n", __func__);
314 		return (DDI_WALK_ERROR);
315 	}
316 	if (ndi_prop_update_string(DDI_DEV_T_NONE, new_node,
317 	    "device_type", devtype) != DDI_SUCCESS) {
318 		DR_DBG_IO("%s: failed to create "
319 		    "'device-type' prop\n", __func__);
320 		return (DDI_WALK_ERROR);
321 	}
322 
323 	DR_DBG_IO("%s: device type is %s\n", __func__, devtype);
324 
325 	/*
326 	 * Add 'reg' (cfg-handle) property
327 	 */
328 	if (md_get_prop_val(cba->mdp, cba->node, "cfg-handle", &devid)) {
329 		DR_DBG_IO("%s: failed to read "
330 		    "'cfg-handle' prop from MD\n", __func__);
331 		return (DDI_WALK_ERROR);
332 	}
333 
334 	DR_DBG_IO("%s: new device is %s@%ld\n", __func__, devnm, devid);
335 
336 	if (ndi_prop_update_int(DDI_DEV_T_NONE, new_node, "reg", devid)
337 	    != DDI_SUCCESS) {
338 		DR_DBG_IO("%s: failed to create 'reg' prop\n", __func__);
339 		return (DDI_WALK_ERROR);
340 	}
341 
342 	/* if vnet/vswitch, probe and add mac-address and mtu properties */
343 	if (strcmp(devnm, "vsw") == 0 || strcmp(devnm, "network") == 0) {
344 
345 		int i, j;
346 		uint64_t mtu, macaddr;
347 		uchar_t maddr_arr[ETHERADDRL];
348 
349 		if (md_get_prop_val(cba->mdp, cba->node, "local-mac-address",
350 		    &macaddr)) {
351 			DR_DBG_IO("%s: failed to read "
352 			    "'local-mac-address' prop from MD\n", __func__);
353 			return (DDI_WALK_ERROR);
354 		}
355 
356 		for (i = 0, j = (ETHERADDRL - 1); i < ETHERADDRL; i++, j--)
357 			maddr_arr[j] = (macaddr >> (i * 8)) & 0xff;
358 
359 		if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, new_node,
360 		    "local-mac-address", maddr_arr, ETHERADDRL)
361 		    != DDI_SUCCESS) {
362 			DR_DBG_IO("%s: failed to create "
363 			    "'local-mac-address' prop\n", __func__);
364 			return (DDI_WALK_ERROR);
365 		}
366 
367 		if (md_get_prop_val(cba->mdp, cba->node, "mtu", &mtu)) {
368 			DR_DBG_IO("%s: failed to read "
369 			    "'mtu' prop from MD\n", __func__);
370 			return (DDI_WALK_ERROR);
371 		}
372 
373 		if (ndi_prop_update_int64(DDI_DEV_T_NONE, new_node, "mtu",
374 		    mtu) != DDI_SUCCESS) {
375 			DR_DBG_IO("%s: failed to "
376 			    "create 'mtu' prop\n", __func__);
377 			return (DDI_WALK_ERROR);
378 		}
379 
380 		DR_DBG_IO("%s: Added properties for %s@%ld, "
381 		    "mac=%ld, mtu=%ld\n", __func__, devnm, devid, macaddr, mtu);
382 	}
383 
384 	cba->dip = new_node;
385 
386 	return (DDI_WALK_TERMINATE);
387 }
388 
389 /*
390  * Find the parent node of the argument virtual device node in
391  * the MD.  For virtual devices, the parent is always
392  * "channel-devices", so scan the MD using the "back" arcs
393  * looking for a node with that name.
394  */
395 static mde_cookie_t
396 dr_vio_find_parent_md(md_t *mdp, mde_cookie_t node)
397 {
398 	int		max_nodes;
399 	int		num_nodes;
400 	int		listsz;
401 	mde_cookie_t    *listp;
402 	mde_cookie_t	pnode = MDE_INVAL_ELEM_COOKIE;
403 
404 	max_nodes = md_node_count(mdp);
405 	listsz = max_nodes * sizeof (mde_cookie_t);
406 	listp = kmem_zalloc(listsz, KM_SLEEP);
407 	DR_DBG_KMEM("%s: alloc addr %p size %d\n",
408 	    __func__, (void *)listp, listsz);
409 
410 	num_nodes = md_scan_dag(mdp, node,
411 	    md_find_name(mdp, "channel-devices"),
412 	    md_find_name(mdp, "back"), listp);
413 
414 	ASSERT(num_nodes == 1);
415 
416 	if (num_nodes == 1)
417 		pnode = listp[0];
418 
419 	DR_DBG_KMEM("%s: free addr %p size %d\n",
420 	    __func__, (void *)listp, listsz);
421 	kmem_free(listp, listsz);
422 
423 	return (pnode);
424 }
425 
426 static int
427 dr_io_configure(dr_vio_req_t *req, dr_vio_res_t *res)
428 {
429 	int		rv = ENXIO;
430 	int		listsz;
431 	int		nnodes;
432 	uint64_t	devid = req->dev_id;
433 	uint64_t	pdevid;
434 	char		*name = req->name;
435 	char		*pname;
436 	md_t		*mdp = NULL;
437 	mde_cookie_t	*listp = NULL;
438 	mde_cookie_t	node;
439 	mde_cookie_t	pnode;
440 	dev_info_t	*pdip = NULL;
441 	dev_info_t	*dip;
442 	devi_branch_t	br;
443 	cb_arg_t	cba;
444 	int		drctl_cmd;
445 	int		drctl_flags = 0;
446 	drctl_rsrc_t	*drctl_req;
447 	size_t		drctl_req_len;
448 	drctl_rsrc_t	*drctl_rsrc = NULL;
449 	drctl_cookie_t	drctl_res_ck;
450 	char		*p;
451 	drctl_resp_t	*drctl_resp;
452 	size_t		drctl_resp_len = 0;
453 
454 	res->result = DR_VIO_RES_FAILURE;
455 
456 	if ((dip = dr_io_find_node(name, devid)) != NULL) {
457 		DR_DBG_IO("%s: %s@%ld already configured\n",
458 		    __func__, name, devid);
459 
460 		/* Return success if resources is already there. */
461 		res->result = DR_VIO_RES_OK;
462 		res->status = DR_VIO_STAT_CONFIGURED;
463 		e_ddi_branch_rele(dip);
464 		return (0);
465 	}
466 
467 	/* Assume we fail to find the node to be added. */
468 	res->status = DR_VIO_STAT_NOT_PRESENT;
469 
470 	if ((mdp = md_get_handle()) == NULL) {
471 		DR_DBG_IO("%s: unable to initialize MD\n", __func__);
472 		return (ENXIO);
473 	}
474 
475 	nnodes = md_node_count(mdp);
476 	ASSERT(nnodes > 0);
477 
478 	listsz = nnodes * sizeof (mde_cookie_t);
479 	listp = kmem_zalloc(listsz, KM_SLEEP);
480 	DR_DBG_KMEM("%s: alloc addr %p size %d\n",
481 	    __func__, (void *)listp, listsz);
482 
483 	/*
484 	 * Get the MD device node.
485 	 */
486 	node = dr_io_find_node_md(mdp, name, devid, listp);
487 
488 	if (node == MDE_INVAL_ELEM_COOKIE) {
489 		DR_DBG_IO("%s: scan for %s name node failed\n", __func__, name);
490 		res->result = DR_VIO_RES_NOT_IN_MD;
491 		goto done;
492 	}
493 
494 	/*
495 	 * Get the MD parent node.
496 	 */
497 	pnode = dr_vio_find_parent_md(mdp, node);
498 	if (pnode == MDE_INVAL_ELEM_COOKIE) {
499 		DR_DBG_IO("%s: failed to find MD parent of %lx\n",
500 		    __func__, pnode);
501 		goto done;
502 	}
503 
504 	if (md_get_prop_str(mdp, pnode, "name", &pname)) {
505 		DR_DBG_IO("%s: failed to read "
506 		    "'name' for pnode %lx from MD\n", __func__, pnode);
507 		goto done;
508 	}
509 
510 	if (md_get_prop_val(mdp, pnode, "cfg-handle", &pdevid)) {
511 		DR_DBG_IO("%s: failed to read 'cfg-handle' "
512 		    "for pnode '%s' from MD\n", __func__, pname);
513 		goto done;
514 	}
515 
516 	DR_DBG_IO("%s: parent device %s@%lx\n", __func__, pname, pdevid);
517 
518 	/*
519 	 * Get the devinfo parent node.
520 	 */
521 	if ((pdip = dr_io_find_node(pname, pdevid)) == NULL) {
522 		DR_DBG_IO("%s: parent device %s@%ld not found\n",
523 		    __func__, pname, pdevid);
524 		goto done;
525 	}
526 
527 	drctl_req_len = sizeof (drctl_rsrc_t) + MAXPATHLEN;
528 	drctl_req = kmem_zalloc(drctl_req_len, KM_SLEEP);
529 	DR_DBG_KMEM("%s: alloc addr %p size %ld\n",
530 	    __func__, (void *)drctl_req, drctl_req_len);
531 	drctl_req->status = DRCTL_STATUS_INIT;
532 
533 	drctl_cmd = DRCTL_IO_CONFIG_REQUEST;
534 
535 	/*
536 	 * Construct the path of the device as it will be if it
537 	 * is successfully added.
538 	 */
539 	p = drctl_req->res_dev_path;
540 	(void) sprintf(p, "/devices");
541 	(void) ddi_pathname(pdip, p + strlen(p));
542 	(void) sprintf(p + strlen(p), "/%s@%ld", name, devid);
543 	DR_DBG_IO("%s: devpath=%s\n", __func__, drctl_req->res_dev_path);
544 
545 	rv = drctl_config_init(drctl_cmd, drctl_flags, drctl_req,
546 	    1, &drctl_resp, &drctl_resp_len, &drctl_res_ck);
547 
548 	ASSERT((drctl_resp != NULL) && (drctl_resp_len != 0));
549 
550 	drctl_rsrc = drctl_resp->resp_resources;
551 
552 	if (rv != 0) {
553 		DR_DBG_IO("%s: drctl_config_init failed: %d\n", __func__, rv);
554 
555 		ASSERT(drctl_resp->resp_type == DRCTL_RESP_ERR);
556 
557 		(void) strlcpy(res->reason,
558 		    drctl_resp->resp_err_msg, DR_VIO_MAXREASONLEN);
559 
560 		DR_DBG_IO("%s: %s\n", __func__, res->reason);
561 
562 		goto done;
563 
564 	}
565 
566 	ASSERT(drctl_resp->resp_type == DRCTL_RESP_OK);
567 
568 	if (drctl_rsrc->status == DRCTL_STATUS_DENY) {
569 
570 		res->result = DR_VIO_RES_BLOCKED;
571 
572 		DR_DBG_IO("%s: drctl_config_init denied\n", __func__);
573 		p = (char *)drctl_rsrc + drctl_rsrc->offset;
574 
575 		(void) strlcpy(res->reason, p, DR_VIO_MAXREASONLEN);
576 
577 		DR_DBG_IO("%s: %s\n", __func__, res->reason);
578 
579 		drctl_req->status = DRCTL_STATUS_CONFIG_FAILURE;
580 
581 		rv = EPERM;
582 	} else {
583 		cba.mdp = mdp;
584 		cba.node = node;
585 
586 		br.arg = (void *)&cba;
587 		br.type = DEVI_BRANCH_SID;
588 		br.create.sid_branch_create = new_dev_node;
589 		br.devi_branch_callback = NULL;
590 
591 		rv = e_ddi_branch_create(pdip,
592 		    &br, NULL, DEVI_BRANCH_CONFIGURE);
593 
594 		drctl_req->status = (rv == 0) ?
595 		    DRCTL_STATUS_CONFIG_SUCCESS : DRCTL_STATUS_CONFIG_FAILURE;
596 
597 		DR_DBG_IO("%s: %s@%ld = %d\n", __func__, name, devid, rv);
598 	}
599 
600 	if (drctl_config_fini(&drctl_res_ck, drctl_req, 1) != 0)
601 		DR_DBG_IO("%s: drctl_config_fini returned: %d\n", __func__, rv);
602 
603 done:
604 	if (listp) {
605 		DR_DBG_KMEM("%s: free addr %p size %d\n",
606 		    __func__, (void *)listp, listsz);
607 		kmem_free(listp, listsz);
608 	}
609 
610 	if (mdp)
611 		(void) md_fini_handle(mdp);
612 
613 	if (pdip)
614 		e_ddi_branch_rele(pdip);
615 
616 	DR_DBG_KMEM("%s: free addr %p size %ld\n",
617 	    __func__, (void *)drctl_req, drctl_req_len);
618 	kmem_free(drctl_req, drctl_req_len);
619 
620 	if (drctl_resp) {
621 		DR_DBG_KMEM("%s: free addr %p size %ld\n",
622 		    __func__, (void *)drctl_resp, drctl_resp_len);
623 		kmem_free(drctl_resp, drctl_resp_len);
624 	}
625 
626 	if (rv == 0) {
627 		res->result = DR_VIO_RES_OK;
628 		res->status = DR_VIO_STAT_CONFIGURED;
629 
630 		/* notify interested parties about the operation */
631 		dr_generate_event(DR_TYPE_VIO, SE_HINT_INSERT);
632 	} else {
633 		res->status = DR_VIO_STAT_UNCONFIGURED;
634 	}
635 
636 	return (rv);
637 }
638 
639 static int
640 dr_io_unconfigure(dr_vio_req_t *req, dr_vio_res_t *res)
641 {
642 	int		rv;
643 	char		*name = req->name;
644 	char		*p;
645 	uint64_t	devid = req->dev_id;
646 	dev_info_t	*dip;
647 	dev_info_t	*fdip = NULL;
648 	int		drctl_cmd;
649 	int		drctl_flags = 0;
650 	drctl_rsrc_t	*drctl_req;
651 	size_t		drctl_req_len;
652 	drctl_rsrc_t	*drctl_rsrc = NULL;
653 	drctl_cookie_t	drctl_res_ck;
654 	drctl_resp_t	*drctl_resp;
655 	size_t		drctl_resp_len;
656 
657 	if ((dip = dr_io_find_node(name, devid)) == NULL) {
658 		DR_DBG_IO("%s: %s@%ld already unconfigured\n",
659 		    __func__, name, devid);
660 		res->result = DR_VIO_RES_OK;
661 		res->status = DR_VIO_STAT_NOT_PRESENT;
662 		return (0);
663 	}
664 
665 	res->result = DR_VIO_RES_FAILURE;
666 
667 	ASSERT(e_ddi_branch_held(dip));
668 
669 	/* Assume we fail to unconfigure the resource. */
670 	res->status = DR_VIO_STAT_CONFIGURED;
671 
672 	drctl_req_len = sizeof (drctl_rsrc_t) + MAXPATHLEN;
673 	drctl_req = kmem_zalloc(drctl_req_len, KM_SLEEP);
674 	DR_DBG_KMEM("%s: alloc addr %p size %ld\n",
675 	    __func__, (void *)drctl_req, drctl_req_len);
676 	drctl_req->status = DRCTL_STATUS_INIT;
677 
678 	drctl_cmd = DRCTL_IO_UNCONFIG_REQUEST;
679 
680 	if (req->msg_type == DR_VIO_FORCE_UNCONFIG)
681 		drctl_flags = DRCTL_FLAG_FORCE;
682 
683 	p = drctl_req->res_dev_path;
684 	(void) sprintf(p, "/devices");
685 	(void) ddi_pathname(dip, p + strlen(p));
686 	DR_DBG_IO("%s: devpath=%s\n", __func__, drctl_req->res_dev_path);
687 
688 	rv = drctl_config_init(drctl_cmd, drctl_flags, drctl_req,
689 	    1, &drctl_resp, &drctl_resp_len, &drctl_res_ck);
690 
691 	ASSERT((drctl_resp != NULL) && (drctl_resp_len != 0));
692 
693 	drctl_rsrc = drctl_resp->resp_resources;
694 
695 	if (rv != 0) {
696 
697 		DR_DBG_IO("%s: drctl_config_init failed: %d\n", __func__, rv);
698 
699 		ASSERT(drctl_resp->resp_type == DRCTL_RESP_ERR);
700 
701 		(void) strlcpy(res->reason,
702 		    drctl_resp->resp_err_msg, DR_VIO_MAXREASONLEN);
703 
704 		DR_DBG_IO("%s: %s\n", __func__, res->reason);
705 
706 		goto done;
707 	}
708 
709 	if (drctl_rsrc->status == DRCTL_STATUS_DENY) {
710 		res->result = DR_VIO_RES_BLOCKED;
711 
712 		DR_DBG_IO("%s: drctl_config_init denied\n", __func__);
713 		p = (char *)drctl_rsrc + drctl_rsrc->offset;
714 
715 		(void) strlcpy(res->reason, p, DR_VIO_MAXREASONLEN);
716 
717 		DR_DBG_IO("%s: %s\n", __func__, res->reason);
718 
719 		drctl_req->status = DRCTL_STATUS_CONFIG_FAILURE;
720 
721 		rv = EPERM;
722 	} else if (rv = e_ddi_branch_destroy(dip, &fdip, 0)) {
723 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
724 
725 		DR_DBG_KMEM("%s: alloc addr %p size %d\n",
726 		    __func__, (void *)path, MAXPATHLEN);
727 		/*
728 		 * If non-NULL, fdip is held and must be released.
729 		 */
730 		if (fdip != NULL) {
731 			(void) ddi_pathname(fdip, path);
732 			ddi_release_devi(fdip);
733 		} else {
734 			(void) ddi_pathname(dip, path);
735 		}
736 
737 		DR_DBG_IO("%s: node removal failed: %s (%p)",
738 		    __func__, path, (fdip) ? (void *)fdip : (void *)dip);
739 
740 		drctl_req->status = DRCTL_STATUS_CONFIG_FAILURE;
741 
742 		DR_DBG_KMEM("%s: free addr %p size %d\n",
743 		    __func__, (void *)path, MAXPATHLEN);
744 		kmem_free(path, MAXPATHLEN);
745 	} else {
746 		drctl_req->status = DRCTL_STATUS_CONFIG_SUCCESS;
747 	}
748 
749 	if (drctl_config_fini(&drctl_res_ck, drctl_req, 1) != 0)
750 		DR_DBG_IO("%s: drctl_config_fini returned: %d\n", __func__, rv);
751 
752 	DR_DBG_IO("%s: (%s@%ld) = %d\n", __func__, name, devid, rv);
753 
754 	if (rv == 0) {
755 		res->result = DR_VIO_RES_OK;
756 		res->status = DR_VIO_STAT_UNCONFIGURED;
757 
758 		/* Notify interested parties about the operation. */
759 		dr_generate_event(DR_TYPE_VIO, SE_HINT_REMOVE);
760 	}
761 done:
762 	DR_DBG_KMEM("%s: free addr %p size %ld\n",
763 	    __func__, (void *)drctl_req, drctl_req_len);
764 	kmem_free(drctl_req, drctl_req_len);
765 
766 	if (drctl_resp) {
767 		DR_DBG_KMEM("%s: free addr %p size %ld\n",
768 		    __func__, (void *)drctl_resp, drctl_resp_len);
769 		kmem_free(drctl_resp, drctl_resp_len);
770 	}
771 
772 	return (rv);
773 }
774 
775 static void
776 dr_vio_data_handler(ds_cb_arg_t arg, void *buf, size_t buflen)
777 {
778 	_NOTE(ARGUNUSED(arg))
779 
780 	size_t		res_len;
781 	dr_vio_res_t	*res;
782 	dr_vio_req_t	*req;
783 
784 	/*
785 	 * Allocate a response buffer, because we always want to
786 	 * send back a response message.
787 	 */
788 	res_len = sizeof (dr_vio_res_t) + DR_VIO_MAXREASONLEN;
789 	res = kmem_zalloc(res_len, KM_SLEEP);
790 	DR_DBG_KMEM("%s: alloc addr %p size %ld\n",
791 	    __func__, (void *)res, res_len);
792 	res->result = DR_VIO_RES_FAILURE;
793 
794 	/*
795 	 * Sanity check the message
796 	 */
797 	if (buf == NULL) {
798 		DR_DBG_IO("empty message: expected at least %ld bytes\n",
799 		    sizeof (dr_vio_req_t));
800 		goto done;
801 	}
802 	if (buflen < sizeof (dr_vio_req_t)) {
803 		DR_DBG_IO("incoming message short: expected at least %ld "
804 		    "bytes, received %ld\n", sizeof (dr_vio_req_t), buflen);
805 		goto done;
806 	}
807 
808 	DR_DBG_TRANS("incoming request:\n");
809 	DR_DBG_DUMP_MSG(buf, buflen);
810 
811 	req = buf;
812 	switch (req->msg_type) {
813 	case DR_VIO_CONFIGURE:
814 		(void) dr_io_configure(req, res);
815 		break;
816 	case DR_VIO_FORCE_UNCONFIG:
817 	case DR_VIO_UNCONFIGURE:
818 		(void) dr_io_unconfigure(req, res);
819 		break;
820 	default:
821 		cmn_err(CE_NOTE, "bad msg_type %d\n", req->msg_type);
822 		break;
823 	}
824 done:
825 	res->req_num = (req) ? req->req_num : 0;
826 
827 	DR_DBG_TRANS("outgoing response:\n");
828 	DR_DBG_DUMP_MSG(res, res_len);
829 
830 	/* send back the response */
831 	if (ds_cap_send(ds_vio_handle, res, res_len) != 0)
832 		DR_DBG_IO("ds_send failed\n");
833 
834 	if (res) {
835 		DR_DBG_KMEM("%s: free addr %p size %ld\n",
836 		    __func__, (void *)res, res_len);
837 		kmem_free(res, res_len);
838 	}
839 }
840 
841 static void
842 dr_vio_reg_handler(ds_cb_arg_t arg, ds_ver_t *ver, ds_svc_hdl_t hdl)
843 {
844 	DR_DBG_IO("vio_reg_handler: arg=0x%p, ver=%d.%d, hdl=0x%lx\n",
845 	    arg, ver->major, ver->minor, hdl);
846 
847 	ds_vio_handle = hdl;
848 }
849 
850 static void
851 dr_vio_unreg_handler(ds_cb_arg_t arg)
852 {
853 	DR_DBG_IO("vio_unreg_handler: arg=0x%p\n", arg);
854 
855 	ds_vio_handle = DS_INVALID_HDL;
856 }
857 
858 static int
859 dr_io_init(void)
860 {
861 	int	rv;
862 
863 	if ((rv = ds_cap_init(&dr_vio_cap, &dr_vio_ops)) != 0) {
864 		cmn_err(CE_NOTE, "ds_cap_init vio failed: %d", rv);
865 		return (-1);
866 	}
867 
868 	return (0);
869 }
870 
871 static int
872 dr_io_fini(void)
873 {
874 	int	rv;
875 
876 	if ((rv = ds_cap_fini(&dr_vio_cap)) != 0) {
877 		cmn_err(CE_NOTE, "ds_cap_fini vio failed: %d", rv);
878 		return (-1);
879 	}
880 
881 	return (0);
882 }
883 
884 int
885 _init(void)
886 {
887 	int	status;
888 
889 	/* check that IO DR is enabled */
890 	if (dr_is_disabled(DR_TYPE_VIO)) {
891 		cmn_err(CE_CONT, "!VIO DR is disabled\n");
892 		return (-1);
893 	}
894 
895 	if ((status = dr_io_init()) != 0) {
896 		cmn_err(CE_NOTE, "VIO DR initialization failed");
897 		return (status);
898 	}
899 
900 	if ((status = mod_install(&modlinkage)) != 0) {
901 		(void) dr_io_fini();
902 	}
903 
904 	return (status);
905 }
906 
907 int
908 _info(struct modinfo *modinfop)
909 {
910 	return (mod_info(&modlinkage, modinfop));
911 }
912 
913 int dr_io_allow_unload = 0;
914 
915 int
916 _fini(void)
917 {
918 	int	status;
919 
920 	if (dr_io_allow_unload == 0)
921 		return (EBUSY);
922 
923 	if ((status = mod_remove(&modlinkage)) == 0) {
924 		(void) dr_io_fini();
925 	}
926 
927 	return (status);
928 }
929