xref: /titanic_52/usr/src/uts/sun4v/io/vsw_hio.c (revision e4b86885570d77af552e9cf94f142f4d744fb8c8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/errno.h>
31 #include <sys/debug.h>
32 #include <sys/time.h>
33 #include <sys/sysmacros.h>
34 #include <sys/systm.h>
35 #include <sys/user.h>
36 #include <sys/stropts.h>
37 #include <sys/stream.h>
38 #include <sys/strlog.h>
39 #include <sys/strsubr.h>
40 #include <sys/cmn_err.h>
41 #include <sys/cpu.h>
42 #include <sys/kmem.h>
43 #include <sys/conf.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/ksynch.h>
47 #include <sys/stat.h>
48 #include <sys/kstat.h>
49 #include <sys/vtrace.h>
50 #include <sys/strsun.h>
51 #include <sys/dlpi.h>
52 #include <sys/ethernet.h>
53 #include <net/if.h>
54 #include <sys/varargs.h>
55 #include <sys/machsystm.h>
56 #include <sys/modctl.h>
57 #include <sys/modhash.h>
58 #include <sys/mac.h>
59 #include <sys/mac_ether.h>
60 #include <sys/taskq.h>
61 #include <sys/note.h>
62 #include <sys/mach_descrip.h>
63 #include <sys/mac.h>
64 #include <sys/mdeg.h>
65 #include <sys/ldc.h>
66 #include <sys/vsw_fdb.h>
67 #include <sys/vsw.h>
68 #include <sys/vio_mailbox.h>
69 #include <sys/vnet_mailbox.h>
70 #include <sys/vnet_common.h>
71 #include <sys/vio_util.h>
72 #include <sys/sdt.h>
73 #include <sys/atomic.h>
74 #include <sys/callb.h>
75 
76 
77 #define	VSW_DDS_NEXT_REQID(vsharep)	(++vsharep->vs_req_id)
78 
79 extern boolean_t vsw_hio_enabled;		/* HybridIO enabled? */
80 extern int vsw_hio_max_cleanup_retries;
81 extern int vsw_hio_cleanup_delay;
82 
83 /* Functions imported from other files */
84 extern int vsw_send_msg(vsw_ldc_t *, void *, int, boolean_t);
85 extern int vsw_set_hw(vsw_t *, vsw_port_t *, int);
86 extern int vsw_unset_hw(vsw_t *, vsw_port_t *, int);
87 extern void vsw_hio_port_reset(vsw_port_t *portp, boolean_t immediate);
88 
89 /* Functions exported to other files */
90 void vsw_hio_init(vsw_t *vswp);
91 void vsw_hio_cleanup(vsw_t *vswp);
92 void vsw_hio_start(vsw_t *vswp, vsw_ldc_t *ldcp);
93 void vsw_hio_stop(vsw_t *vswp, vsw_ldc_t *ldcp);
94 void vsw_process_dds_msg(vsw_t *vswp, vsw_ldc_t *ldcp, void *msg);
95 void vsw_hio_start_ports(vsw_t *vswp);
96 void vsw_hio_stop_port(vsw_port_t *portp);
97 
98 /* Support functions */
99 static void vsw_hio_free_all_shares(vsw_t *vswp, boolean_t reboot);
100 static vsw_share_t *vsw_hio_alloc_share(vsw_t *vswp, vsw_ldc_t *ldcp);
101 static void vsw_hio_free_share(vsw_share_t *vsharep);
102 static vsw_share_t *vsw_hio_find_free_share(vsw_t *vswp);
103 static vsw_share_t *vsw_hio_find_vshare_ldcid(vsw_t *vswp, uint64_t ldc_id);
104 static vsw_share_t *vsw_hio_find_vshare_port(vsw_t *vswp, vsw_port_t *portp);
105 static int vsw_send_dds_msg(vsw_ldc_t *ldcp, uint8_t dds_subclass,
106     uint64_t cookie, uint64_t macaddr, uint32_t req_id);
107 static int vsw_send_dds_resp_msg(vsw_ldc_t *ldcp, vio_dds_msg_t *dmsg, int ack);
108 static int vsw_hio_send_delshare_msg(vsw_share_t *vsharep);
109 static int vsw_hio_bind_macaddr(vsw_share_t *vsharep);
110 static void vsw_hio_unbind_macaddr(vsw_share_t *vsharep);
111 static boolean_t vsw_hio_reboot_callb(void *arg, int code);
112 static boolean_t vsw_hio_panic_callb(void *arg, int code);
113 
114 
115 /*
116  * vsw_hio_init -- Initialize the HybridIO related info.
117  *	- Query SHARES and RINGS capability. Both capabilities
118  *	  need to be supported by the physical-device.
119  */
120 void
121 vsw_hio_init(vsw_t *vswp)
122 {
123 	vsw_hio_t	*hiop = &vswp->vhio;
124 	int		i;
125 	int		rv;
126 
127 	D1(vswp, "%s:enter\n", __func__);
128 	mutex_enter(&vswp->hw_lock);
129 	if (vsw_hio_enabled == B_FALSE) {
130 		mutex_exit(&vswp->hw_lock);
131 		return;
132 	}
133 
134 	vswp->hio_capable = B_FALSE;
135 	rv = mac_capab_get(vswp->mh, MAC_CAPAB_SHARES, &hiop->vh_scapab);
136 	if (rv == B_FALSE) {
137 		D2(vswp, "%s: %s is not HybridIO capable\n", __func__,
138 		    vswp->physname);
139 		mutex_exit(&vswp->hw_lock);
140 		return;
141 	}
142 	rv = mac_capab_get(vswp->mh, MAC_CAPAB_RINGS, &hiop->vh_rcapab);
143 	if (rv == B_FALSE) {
144 		DWARN(vswp, "%s: %s has no RINGS capability\n", __func__,
145 		    vswp->physname);
146 		mutex_exit(&vswp->hw_lock);
147 		return;
148 	}
149 	hiop->vh_num_shares = hiop->vh_scapab.ms_snum;
150 	hiop->vh_shares = kmem_zalloc((sizeof (vsw_share_t) *
151 	    hiop->vh_num_shares), KM_SLEEP);
152 	for (i = 0; i < hiop->vh_num_shares; i++) {
153 		hiop->vh_shares[i].vs_state = VSW_SHARE_FREE;
154 		hiop->vh_shares[i].vs_index = i;
155 		hiop->vh_shares[i].vs_vswp = vswp;
156 	}
157 	vswp->hio_capable = B_TRUE;
158 
159 	/*
160 	 * Register to get reboot and panic events so that
161 	 * we can cleanup HybridIO resources gracefully.
162 	 */
163 	vswp->hio_reboot_cb_id = callb_add(vsw_hio_reboot_callb,
164 	    (void *)vswp, CB_CL_MDBOOT, "vsw_hio");
165 
166 	vswp->hio_panic_cb_id = callb_add(vsw_hio_panic_callb,
167 	    (void *)vswp, CB_CL_PANIC, "vsw_hio");
168 
169 	D2(vswp, "%s: %s is HybridIO capable num_shares=%d\n", __func__,
170 	    vswp->physname, hiop->vh_num_shares);
171 	D1(vswp, "%s:exit\n", __func__);
172 	mutex_exit(&vswp->hw_lock);
173 }
174 
175 /*
176  * vsw_hio_alloc_share -- Allocate and setup the share for a guest domain.
177  *	- Allocate a free share.
178  *	- Bind the Guest's MAC address.
179  */
180 static vsw_share_t *
181 vsw_hio_alloc_share(vsw_t *vswp, vsw_ldc_t *ldcp)
182 {
183 	vsw_hio_t	*hiop = &vswp->vhio;
184 	mac_capab_share_t *hcapab = &hiop->vh_scapab;
185 	vsw_share_t	*vsharep;
186 	vsw_port_t	*portp = ldcp->ldc_port;
187 	uint64_t	ldc_id = ldcp->ldc_id;
188 	uint32_t	rmin, rmax;
189 	uint64_t	rmap;
190 	int		rv;
191 
192 	D1(vswp, "%s:enter\n", __func__);
193 	vsharep = vsw_hio_find_free_share(vswp);
194 	if (vsharep == NULL) {
195 		/* No free shares available */
196 		return (NULL);
197 	}
198 	/*
199 	 * Allocate a Share - it will come with rings/groups
200 	 * already assigned to it.
201 	 */
202 	rv = hcapab->ms_salloc(hcapab->ms_handle, ldc_id,
203 	    &vsharep->vs_cookie, &vsharep->vs_shdl);
204 	if (rv != 0) {
205 		D2(vswp, "Alloc a share failed for ldc=0x%lx rv=%d",
206 		    ldc_id, rv);
207 		return (NULL);
208 	}
209 
210 	/*
211 	 * Query the RX group number to bind the port's
212 	 * MAC address to it.
213 	 */
214 	hcapab->ms_squery(vsharep->vs_shdl, MAC_RING_TYPE_RX,
215 	    &rmin, &rmax, &rmap, &vsharep->vs_gnum);
216 
217 	/* Cache some useful info */
218 	vsharep->vs_ldcid = ldcp->ldc_id;
219 	vsharep->vs_macaddr = vnet_macaddr_strtoul(
220 	    portp->p_macaddr.ether_addr_octet);
221 	vsharep->vs_portp = ldcp->ldc_port;
222 
223 	/* Bind the Guest's MAC address */
224 	rv = vsw_hio_bind_macaddr(vsharep);
225 	if (rv != 0) {
226 		/* something went wrong, cleanup */
227 		hcapab->ms_sfree(vsharep->vs_shdl);
228 		return (NULL);
229 	}
230 
231 	vsharep->vs_state |= VSW_SHARE_ASSIGNED;
232 
233 	D1(vswp, "%s:exit\n", __func__);
234 	return (vsharep);
235 }
236 
237 /*
238  * vsw_hio_bind_macaddr -- Remove the port's MAC address from the
239  *	physdev and bind it to the Share's RX group.
240  */
241 static int
242 vsw_hio_bind_macaddr(vsw_share_t *vsharep)
243 {
244 	vsw_t		*vswp = vsharep->vs_vswp;
245 	vsw_port_t	*portp = vsharep->vs_portp;
246 	mac_capab_rings_t *rcapab = &vswp->vhio.vh_rcapab;
247 	mac_group_info_t *ginfop = &vsharep->vs_rxginfo;
248 	int		rv;
249 
250 	/* Get the RX groupinfo */
251 	rcapab->mr_gget(rcapab->mr_handle, MAC_RING_TYPE_RX,
252 	    vsharep->vs_gnum, &vsharep->vs_rxginfo, NULL);
253 
254 	/* Unset the MAC address first */
255 	if (portp->addr_set != VSW_ADDR_UNSET) {
256 		(void) vsw_unset_hw(vswp, portp, VSW_VNETPORT);
257 	}
258 
259 	/* Bind the MAC address to the RX group */
260 	rv = ginfop->mrg_addmac(ginfop->mrg_driver,
261 	    (uint8_t *)&portp->p_macaddr.ether_addr_octet);
262 	if (rv != 0) {
263 		/* Restore the address back as it was */
264 		(void) vsw_set_hw(vswp, portp, VSW_VNETPORT);
265 		return (rv);
266 	}
267 	return (0);
268 }
269 
270 /*
271  * vsw_hio_unbind_macaddr -- Unbind the port's MAC address and restore
272  *	it back as it was before.
273  */
274 static void
275 vsw_hio_unbind_macaddr(vsw_share_t *vsharep)
276 {
277 	vsw_t		*vswp = vsharep->vs_vswp;
278 	vsw_port_t	*portp = vsharep->vs_portp;
279 	mac_group_info_t *ginfop = &vsharep->vs_rxginfo;
280 
281 	if (portp == NULL) {
282 		return;
283 	}
284 	/* Unbind the MAC address from the RX group */
285 	(void) ginfop->mrg_remmac(ginfop->mrg_driver,
286 	    (uint8_t *)&portp->p_macaddr.ether_addr_octet);
287 
288 	/* Program the MAC address back */
289 	(void) vsw_set_hw(vswp, portp, VSW_VNETPORT);
290 }
291 
292 /*
293  * vsw_hio_find_free_share -- Find a free Share.
294  */
295 static vsw_share_t *
296 vsw_hio_find_free_share(vsw_t *vswp)
297 {
298 	vsw_hio_t *hiop = &vswp->vhio;
299 	vsw_share_t *vsharep;
300 	int i;
301 
302 	D1(vswp, "%s:enter\n", __func__);
303 	for (i = 0; i < hiop->vh_num_shares; i++) {
304 		vsharep = &hiop->vh_shares[i];
305 		if (vsharep->vs_state == VSW_SHARE_FREE) {
306 			D1(vswp, "%s:Returning free share(%d)\n",
307 			    __func__, vsharep->vs_index);
308 			return (vsharep);
309 		}
310 	}
311 	D1(vswp, "%s:no free share\n", __func__);
312 	return (NULL);
313 }
314 
315 /*
316  * vsw_hio_find_vshare_ldcid -- Given ldc_id, find the corresponding
317  *	share structure.
318  */
319 static vsw_share_t *
320 vsw_hio_find_vshare_ldcid(vsw_t *vswp, uint64_t ldc_id)
321 {
322 	vsw_hio_t *hiop = &vswp->vhio;
323 	vsw_share_t *vsharep;
324 	int i;
325 
326 	D1(vswp, "%s:enter, ldc=0x%lx", __func__, ldc_id);
327 	for (i = 0; i < hiop->vh_num_shares; i++) {
328 		vsharep = &hiop->vh_shares[i];
329 		if (vsharep->vs_state == VSW_SHARE_FREE) {
330 			continue;
331 		}
332 		if (vsharep->vs_ldcid == ldc_id) {
333 			D1(vswp, "%s:returning share(%d)",
334 			    __func__, vsharep->vs_index);
335 			return (vsharep);
336 		}
337 	}
338 	D1(vswp, "%s:returning NULL", __func__);
339 	return (NULL);
340 }
341 
342 /*
343  * vsw_hio_find_vshare_port -- Given portp, find the corresponding
344  *	share structure.
345  */
346 static vsw_share_t *
347 vsw_hio_find_vshare_port(vsw_t *vswp, vsw_port_t *portp)
348 {
349 	vsw_hio_t *hiop = &vswp->vhio;
350 	vsw_share_t *vsharep;
351 	int i;
352 
353 	D1(vswp, "%s:enter, portp=0x%p", __func__, portp);
354 	for (i = 0; i < hiop->vh_num_shares; i++) {
355 		vsharep = &hiop->vh_shares[i];
356 		if (vsharep->vs_state == VSW_SHARE_FREE) {
357 			continue;
358 		}
359 		if (vsharep->vs_portp == portp) {
360 			D1(vswp, "%s:returning share(%d)",
361 			    __func__, vsharep->vs_index);
362 			return (vsharep);
363 		}
364 	}
365 	D1(vswp, "%s:returning NULL", __func__);
366 	return (NULL);
367 }
368 
369 /*
370  * vsw_hio_free_share -- Unbind the MAC address and free share.
371  */
372 static void
373 vsw_hio_free_share(vsw_share_t *vsharep)
374 {
375 	vsw_t		*vswp = vsharep->vs_vswp;
376 	vsw_hio_t	*hiop = &vswp->vhio;
377 	mac_capab_share_t *hcapab = &hiop->vh_scapab;
378 
379 	D1(vswp, "%s:enter\n", __func__);
380 
381 	/* First unbind the MAC address and restore it back */
382 	vsw_hio_unbind_macaddr(vsharep);
383 
384 	/* free share */
385 	hcapab->ms_sfree(vsharep->vs_shdl);
386 	vsharep->vs_state = VSW_SHARE_FREE;
387 
388 	/* DERR only for printing by default */
389 	DERR(vswp, "Share freed for ldc_id=0x%lx Cookie=0x%lX",
390 	    vsharep->vs_ldcid, vsharep->vs_cookie);
391 	D1(vswp, "%s:exit\n", __func__);
392 }
393 
394 
395 /*
396  * vsw_hio_cleanup -- Cleanup the HybridIO. It unregisters the callbs
397  *	and frees all shares.
398  */
399 void
400 vsw_hio_cleanup(vsw_t *vswp)
401 {
402 	D1(vswp, "%s:enter\n", __func__);
403 
404 	/* Unregister reboot and panic callbs. */
405 	if (vswp->hio_reboot_cb_id) {
406 		(void) callb_delete(vswp->hio_reboot_cb_id);
407 		vswp->hio_reboot_cb_id = 0;
408 	}
409 	if (vswp->hio_panic_cb_id) {
410 		(void) callb_delete(vswp->hio_panic_cb_id);
411 		vswp->hio_panic_cb_id = 0;
412 	}
413 	vsw_hio_free_all_shares(vswp, B_FALSE);
414 	D1(vswp, "%s:exit\n", __func__);
415 }
416 
417 /*
418  * vsw_hio_free_all_shares -- A routine to free all shares gracefully.
419  *	The following are the steps followed to accomplish this:
420  *
421  *	- First clear 'hio_capable' to avoid further share allocations.
422  *	- If a share is in accepted(ACKD) state, that means the guest
423  *	  has HybridIO setup etc. If so, send a DEL_SHARE message and
424  *	  give some time(delay) for the guest to ACK.
425  *	- If the Share is another state, give some time to transition to
426  *	  ACKD state, then try the above.
427  *	- After max retries, reset the ports to brute force the shares
428  *	  to be freed. Give a little delay for the LDC reset code to
429  *	  free the Share.
430  */
431 static void
432 vsw_hio_free_all_shares(vsw_t *vswp, boolean_t reboot)
433 {
434 	vsw_hio_t	*hiop = &vswp->vhio;
435 	vsw_port_list_t	*plist = &vswp->plist;
436 	vsw_share_t	*vsharep;
437 	int		free_shares = 0;
438 	int		max_retries = vsw_hio_max_cleanup_retries;
439 	int		i;
440 
441 	D1(vswp, "%s:enter\n", __func__);
442 
443 	/*
444 	 * Acquire plist->lockrw to make the locking a bit easier
445 	 * and keep the ports in a stable state while we are cleaningup
446 	 * HybridIO.
447 	 */
448 	READ_ENTER(&plist->lockrw);
449 	mutex_enter(&vswp->hw_lock);
450 	/*
451 	 * first clear the hio_capable flag so that no more
452 	 * HybridIO operations are initiated.
453 	 */
454 	vswp->hio_capable = B_FALSE;
455 
456 	do {
457 		free_shares = 0;
458 		for (i = 0; i < hiop->vh_num_shares; i++) {
459 			vsharep = &hiop->vh_shares[i];
460 			if (vsharep->vs_state == VSW_SHARE_FREE) {
461 				free_shares++;
462 				continue;
463 			}
464 			/*
465 			 * If the share is in DDS_ACKD state, then
466 			 * send DEL_SHARE message so that guest can
467 			 * release its Hybrid resource.
468 			 */
469 			if (vsharep->vs_state & VSW_SHARE_DDS_ACKD) {
470 				int rv;
471 
472 				/* send DDS_DEL_SHARE */
473 				D1(vswp, "%s:sending DEL_SHARE msg for "
474 				    "share(%d)", __func__, vsharep->vs_index);
475 				rv = vsw_hio_send_delshare_msg(vsharep);
476 				if (rv != 0) {
477 					/*
478 					 * No alternative, reset the port
479 					 * to force the release of Hybrid
480 					 * resources.
481 					 */
482 					vsw_hio_port_reset(vsharep->vs_portp,
483 					    B_FALSE);
484 				}
485 			}
486 			if (max_retries == 1) {
487 				/*
488 				 * Last retry,  reset the port.
489 				 * If it is reboot case, issue an immediate
490 				 * reset.
491 				 */
492 				DWARN(vswp, "%s:All retries failed, "
493 				    " cause a reset to trigger cleanup for "
494 				    "share(%d)", __func__, vsharep->vs_index);
495 				vsw_hio_port_reset(vsharep->vs_portp, reboot);
496 			}
497 		}
498 		if (free_shares == hiop->vh_num_shares) {
499 			/* Clean up is done */
500 			break;
501 		}
502 		/*
503 		 * Release the lock so that reply for DEL_SHARE
504 		 * messages come and get processed, that is, shares
505 		 * get freed.
506 		 * This delay is also needed for the port reset to
507 		 * release the Hybrid resource.
508 		 */
509 		mutex_exit(&vswp->hw_lock);
510 		delay(drv_usectohz(vsw_hio_cleanup_delay));
511 		mutex_enter(&vswp->hw_lock);
512 		max_retries--;
513 	} while ((free_shares < hiop->vh_num_shares) && (max_retries > 0));
514 
515 	/* By now, all shares should be freed */
516 	if (free_shares != hiop->vh_num_shares) {
517 		if (reboot == B_FALSE) {
518 			cmn_err(CE_NOTE, "vsw%d: All physical resources "
519 			    "could not be freed", vswp->instance);
520 		}
521 	}
522 
523 	kmem_free(hiop->vh_shares, sizeof (vsw_share_t) * hiop->vh_num_shares);
524 	hiop->vh_shares = NULL;
525 	hiop->vh_num_shares = 0;
526 	mutex_exit(&vswp->hw_lock);
527 	RW_EXIT(&plist->lockrw);
528 	D1(vswp, "%s:exit\n", __func__);
529 }
530 
531 /*
532  * vsw_hio_start_ports -- Start HybridIO for ports that have
533  *	already established connection before HybridIO is intialized.
534  */
535 void
536 vsw_hio_start_ports(vsw_t *vswp)
537 {
538 	vsw_port_list_t	*plist = &vswp->plist;
539 	vsw_port_t	*portp;
540 	vsw_share_t	*vsharep;
541 	boolean_t	reset;
542 
543 	if (vswp->hio_capable == B_FALSE) {
544 		return;
545 	}
546 	READ_ENTER(&plist->lockrw);
547 	for (portp = plist->head; portp != NULL; portp = portp->p_next) {
548 		if ((portp->p_hio_enabled == B_FALSE) ||
549 		    (portp->p_hio_capable == B_FALSE)) {
550 			continue;
551 		}
552 
553 		reset = B_FALSE;
554 		mutex_enter(&vswp->hw_lock);
555 		vsharep = vsw_hio_find_vshare_port(vswp, portp);
556 		if (vsharep == NULL) {
557 			reset = B_TRUE;
558 		}
559 		mutex_exit(&vswp->hw_lock);
560 
561 		if (reset == B_TRUE) {
562 			/* Cause a rest to trigger HybridIO setup */
563 			vsw_hio_port_reset(portp, B_FALSE);
564 		}
565 	}
566 	RW_EXIT(&plist->lockrw);
567 }
568 
569 /*
570  * vsw_hio_start -- Start HybridIO for a guest(given LDC)
571  */
572 void
573 vsw_hio_start(vsw_t *vswp, vsw_ldc_t *ldcp)
574 {
575 	vsw_share_t	*vsharep;
576 	uint32_t	req_id;
577 	int		rv;
578 
579 	D1(vswp, "%s:enter ldc=0x%lx", __func__, ldcp->ldc_id);
580 	mutex_enter(&vswp->hw_lock);
581 	if (vswp->hio_capable == B_FALSE) {
582 		mutex_exit(&vswp->hw_lock);
583 		D2(vswp, "%s:not HIO capable", __func__);
584 		return;
585 	}
586 
587 	/* Verify if a share was already allocated */
588 	vsharep = vsw_hio_find_vshare_ldcid(vswp, ldcp->ldc_id);
589 	if (vsharep != NULL) {
590 		mutex_exit(&vswp->hw_lock);
591 		D2(vswp, "%s:Share already allocated to ldc=0x%lx",
592 		    __func__, ldcp->ldc_id);
593 		return;
594 	}
595 	vsharep = vsw_hio_alloc_share(vswp, ldcp);
596 	if (vsharep == NULL) {
597 		mutex_exit(&vswp->hw_lock);
598 		D2(vswp, "%s: no Share available for ldc=0x%lx",
599 		    __func__, ldcp->ldc_id);
600 		return;
601 	}
602 	req_id = VSW_DDS_NEXT_REQID(vsharep);
603 	rv = vsw_send_dds_msg(ldcp, DDS_VNET_ADD_SHARE, vsharep->vs_cookie,
604 	    vsharep->vs_macaddr, req_id);
605 	if (rv != 0) {
606 		/*
607 		 * Failed to send a DDS message, so cleanup now.
608 		 */
609 		vsw_hio_free_share(vsharep);
610 		mutex_exit(&vswp->hw_lock);
611 		return;
612 	}
613 	vsharep->vs_state &= ~VSW_SHARE_DDS_ACKD;
614 	vsharep->vs_state |= VSW_SHARE_DDS_SENT;
615 	mutex_exit(&vswp->hw_lock);
616 
617 	/* DERR only to print by default */
618 	DERR(vswp, "Share allocated for ldc_id=0x%lx Cookie=0x%lX",
619 	    ldcp->ldc_id, vsharep->vs_cookie);
620 
621 	D1(vswp, "%s:exit ldc=0x%lx", __func__, ldcp->ldc_id);
622 }
623 
624 /*
625  * vsw_hio_stop -- Stop/clean the HybridIO config for a guest(given ldc).
626  */
627 void
628 vsw_hio_stop(vsw_t *vswp, vsw_ldc_t *ldcp)
629 {
630 	vsw_share_t *vsharep;
631 
632 	D1(vswp, "%s:enter ldc=0x%lx", __func__, ldcp->ldc_id);
633 
634 	mutex_enter(&vswp->hw_lock);
635 	vsharep = vsw_hio_find_vshare_ldcid(vswp, ldcp->ldc_id);
636 	if (vsharep == NULL) {
637 		D1(vswp, "%s:no share found for ldc=0x%lx",
638 		    __func__, ldcp->ldc_id);
639 		mutex_exit(&vswp->hw_lock);
640 		return;
641 	}
642 	vsw_hio_free_share(vsharep);
643 	mutex_exit(&vswp->hw_lock);
644 
645 	D1(vswp, "%s:exit ldc=0x%lx", __func__, ldcp->ldc_id);
646 }
647 
648 /*
649  * vsw_hio_send_delshare_msg -- Send a DEL_SHARE message to the	guest.
650  */
651 static int
652 vsw_hio_send_delshare_msg(vsw_share_t *vsharep)
653 {
654 	vsw_t *vswp = vsharep->vs_vswp;
655 	vsw_port_t *portp;
656 	vsw_ldc_list_t	*ldcl;
657 	vsw_ldc_t	*ldcp;
658 	uint32_t	req_id;
659 	uint64_t	cookie = vsharep->vs_cookie;
660 	uint64_t	macaddr = vsharep->vs_macaddr;
661 	int		rv;
662 
663 	ASSERT(MUTEX_HELD(&vswp->hw_lock));
664 	mutex_exit(&vswp->hw_lock);
665 
666 	portp = vsharep->vs_portp;
667 	if (portp == NULL) {
668 		mutex_enter(&vswp->hw_lock);
669 		return (0);
670 	}
671 
672 	ldcl = &portp->p_ldclist;
673 	READ_ENTER(&ldcl->lockrw);
674 	ldcp = ldcl->head;
675 	if ((ldcp == NULL) || (ldcp->ldc_id != vsharep->vs_ldcid)) {
676 		RW_EXIT(&ldcl->lockrw);
677 		mutex_enter(&vswp->hw_lock);
678 		return (0);
679 	}
680 	req_id = VSW_DDS_NEXT_REQID(vsharep);
681 	rv = vsw_send_dds_msg(ldcp, DDS_VNET_DEL_SHARE,
682 	    cookie, macaddr, req_id);
683 
684 	RW_EXIT(&ldcl->lockrw);
685 	mutex_enter(&vswp->hw_lock);
686 	if (rv == 0) {
687 		vsharep->vs_state &= ~VSW_SHARE_DDS_ACKD;
688 		vsharep->vs_state |= VSW_SHARE_DDS_SENT;
689 	}
690 	return (rv);
691 }
692 
693 /*
694  * vsw_send_dds_msg -- Send a DDS message.
695  */
696 static int
697 vsw_send_dds_msg(vsw_ldc_t *ldcp, uint8_t dds_subclass, uint64_t
698     cookie, uint64_t macaddr, uint32_t req_id)
699 {
700 	vsw_t *vswp = ldcp->ldc_port->p_vswp;
701 	vio_dds_msg_t	vmsg;
702 	dds_share_msg_t	*smsg = &vmsg.msg.share_msg;
703 	int rv;
704 
705 	D1(vswp, "%s:enter\n", __func__);
706 	vmsg.tag.vio_msgtype = VIO_TYPE_CTRL;
707 	vmsg.tag.vio_subtype = VIO_SUBTYPE_INFO;
708 	vmsg.tag.vio_subtype_env = VIO_DDS_INFO;
709 	vmsg.tag.vio_sid = ldcp->local_session;
710 	vmsg.dds_class = DDS_VNET_NIU;
711 	vmsg.dds_subclass = dds_subclass;
712 	vmsg.dds_req_id = req_id;
713 	smsg->macaddr = macaddr;
714 	smsg->cookie = cookie;
715 	rv = vsw_send_msg(ldcp, &vmsg, sizeof (vmsg), B_FALSE);
716 	D1(vswp, "%s:exit rv=%d\n", __func__, rv);
717 	return (rv);
718 }
719 
720 /*
721  * vsw_process_dds_msg -- Process a DDS message received from a guest.
722  */
723 void
724 vsw_process_dds_msg(vsw_t *vswp, vsw_ldc_t *ldcp, void *msg)
725 {
726 	vsw_share_t	*vsharep;
727 	vio_dds_msg_t	*dmsg = msg;
728 
729 	D1(vswp, "%s:enter ldc=0x%lx\n", __func__, ldcp->ldc_id);
730 	if (dmsg->dds_class != DDS_VNET_NIU) {
731 		/* discard */
732 		return;
733 	}
734 	mutex_enter(&vswp->hw_lock);
735 	/*
736 	 * We expect to receive DDS messages only from guests that
737 	 * have HybridIO started.
738 	 */
739 	vsharep = vsw_hio_find_vshare_ldcid(vswp, ldcp->ldc_id);
740 	if (vsharep == NULL) {
741 		mutex_exit(&vswp->hw_lock);
742 		return;
743 	}
744 
745 	switch (dmsg->dds_subclass) {
746 	case DDS_VNET_ADD_SHARE:
747 		/* A response for ADD_SHARE message. */
748 		D1(vswp, "%s:DDS_VNET_ADD_SHARE\n", __func__);
749 		if (!(vsharep->vs_state & VSW_SHARE_DDS_SENT)) {
750 			DWARN(vswp, "%s: invalid ADD_SHARE response  message "
751 			    " share state=0x%X", __func__, vsharep->vs_state);
752 			break;
753 		}
754 
755 		if (dmsg->dds_req_id != vsharep->vs_req_id) {
756 			DWARN(vswp, "%s: invalid req_id in ADD_SHARE response"
757 			    " message req_id=0x%X share's req_id=0x%X",
758 			    __func__, dmsg->dds_req_id, vsharep->vs_req_id);
759 			break;
760 		}
761 
762 		if (dmsg->tag.vio_subtype == VIO_SUBTYPE_NACK) {
763 			DWARN(vswp, "%s: NACK received for ADD_SHARE"
764 			    " message ldcid=0x%lx", __func__, ldcp->ldc_id);
765 			/* cleanup for NACK */
766 			vsw_hio_free_share(vsharep);
767 		} else {
768 			D2(vswp, "%s: ACK received for ADD_SHARE", __func__);
769 			vsharep->vs_state &= ~VSW_SHARE_DDS_SENT;
770 			vsharep->vs_state |= VSW_SHARE_DDS_ACKD;
771 		}
772 		break;
773 
774 	case DDS_VNET_DEL_SHARE:
775 		/* A response for DEL_SHARE message */
776 		D1(vswp, "%s:DDS_VNET_DEL_SHARE\n", __func__);
777 		if (!(vsharep->vs_state & VSW_SHARE_DDS_SENT)) {
778 			DWARN(vswp, "%s: invalid DEL_SHARE response message "
779 			    " share state=0x%X", __func__, vsharep->vs_state);
780 			break;
781 		}
782 
783 		if (dmsg->dds_req_id != vsharep->vs_req_id) {
784 			DWARN(vswp, "%s: invalid req_id in DEL_SHARE response"
785 			    " message share req_id=0x%X share's req_id=0x%X",
786 			    __func__, dmsg->dds_req_id, vsharep->vs_req_id);
787 			break;
788 		}
789 		if (dmsg->tag.vio_subtype == VIO_SUBTYPE_NACK) {
790 			DWARN(vswp, "%s: NACK received for DEL_SHARE",
791 			    __func__);
792 		}
793 
794 		/* There is nothing we can do, free share now */
795 		vsw_hio_free_share(vsharep);
796 		break;
797 
798 	case DDS_VNET_REL_SHARE:
799 		/* Guest has released Share voluntarily, so free it now */
800 		D1(vswp, "%s:DDS_VNET_REL_SHARE\n", __func__);
801 		/* send ACK */
802 		(void) vsw_send_dds_resp_msg(ldcp, dmsg, B_FALSE);
803 		vsw_hio_free_share(vsharep);
804 		break;
805 	default:
806 		DERR(vswp, "%s: Invalid DDS message type=0x%X",
807 		    __func__, dmsg->dds_subclass);
808 		break;
809 	}
810 	mutex_exit(&vswp->hw_lock);
811 	D1(vswp, "%s:exit ldc=0x%lx\n", __func__, ldcp->ldc_id);
812 }
813 
814 /*
815  * vsw_send_dds_resp_msg -- Send a DDS response message.
816  */
817 static int
818 vsw_send_dds_resp_msg(vsw_ldc_t *ldcp, vio_dds_msg_t *dmsg, int ack)
819 {
820 	vsw_t	*vswp = ldcp->ldc_port->p_vswp;
821 	int	rv;
822 
823 	D1(vswp, "%s:enter\n", __func__);
824 	if (ack == B_TRUE) {
825 		dmsg->tag.vio_subtype = VIO_SUBTYPE_ACK;
826 		dmsg->msg.share_resp_msg.status = DDS_VNET_SUCCESS;
827 	} else {
828 		dmsg->tag.vio_subtype = VIO_SUBTYPE_NACK;
829 		dmsg->msg.share_resp_msg.status = DDS_VNET_FAIL;
830 	}
831 	rv = vsw_send_msg(ldcp, dmsg, sizeof (vio_dds_msg_t), B_FALSE);
832 	D1(vswp, "%s:exit rv=%d\n", __func__, rv);
833 	return (rv);
834 }
835 
836 /*
837  * vsw_hio_port_update -- update Hybrid mode change for a port.
838  */
839 void
840 vsw_hio_port_update(vsw_port_t *portp, boolean_t hio_enabled)
841 {
842 	/* Verify if the mode really changed */
843 	if (portp->p_hio_enabled == hio_enabled) {
844 		return;
845 	}
846 
847 	if (hio_enabled == B_FALSE) {
848 		/* Hybrid Mode is disabled, so stop HybridIO */
849 		vsw_hio_stop_port(portp);
850 		portp->p_hio_enabled = B_FALSE;
851 	} else {
852 		portp->p_hio_enabled =  B_TRUE;
853 		/* reset the port to initiate HybridIO setup */
854 		vsw_hio_port_reset(portp, B_FALSE);
855 	}
856 }
857 
858 /*
859  * vsw_hio_stop_port -- Stop HybridIO for a given port. Sequence
860  *	followed is similar to vsw_hio_free_all_shares().
861  *
862  */
863 void
864 vsw_hio_stop_port(vsw_port_t *portp)
865 {
866 	vsw_t *vswp = portp->p_vswp;
867 	vsw_share_t *vsharep;
868 	int max_retries = vsw_hio_max_cleanup_retries;
869 
870 	D1(vswp, "%s:enter\n", __func__);
871 	mutex_enter(&vswp->hw_lock);
872 
873 	if (vswp->hio_capable == B_FALSE) {
874 		mutex_exit(&vswp->hw_lock);
875 		return;
876 	}
877 
878 	vsharep = vsw_hio_find_vshare_port(vswp, portp);
879 	if (vsharep == NULL) {
880 		mutex_exit(&vswp->hw_lock);
881 		return;
882 	}
883 
884 	do {
885 		if (vsharep->vs_state & VSW_SHARE_DDS_ACKD) {
886 			int rv;
887 
888 			/* send DDS_DEL_SHARE */
889 			D1(vswp, "%s:sending DEL_SHARE msg for "
890 			    "share(%d)", __func__, vsharep->vs_index);
891 			rv = vsw_hio_send_delshare_msg(vsharep);
892 			if (rv != 0) {
893 				/*
894 				 * Cause a port reset to trigger
895 				 * cleanup.
896 				 */
897 				vsw_hio_port_reset(vsharep->vs_portp, B_FALSE);
898 			}
899 		}
900 		if (max_retries == 1) {
901 			/* last retry */
902 			DWARN(vswp, "%s:All retries failed, "
903 			    " cause a reset to trigger cleanup for "
904 			    "share(%d)", __func__, vsharep->vs_index);
905 			vsw_hio_port_reset(vsharep->vs_portp, B_FALSE);
906 		}
907 
908 		/* Check if the share still assigned to this port */
909 		if ((vsharep->vs_portp != portp) ||
910 		    (vsharep->vs_state == VSW_SHARE_FREE)) {
911 			break;
912 		}
913 
914 		/*
915 		 * Release the lock so that reply for DEL_SHARE
916 		 * messages come and get processed, that is, shares
917 		 * get freed.
918 		 */
919 		mutex_exit(&vswp->hw_lock);
920 		delay(drv_usectohz(vsw_hio_cleanup_delay));
921 		mutex_enter(&vswp->hw_lock);
922 
923 		/* Check if the share still assigned to this port */
924 		if ((vsharep->vs_portp != portp) ||
925 		    (vsharep->vs_state == VSW_SHARE_FREE)) {
926 			break;
927 		}
928 		max_retries--;
929 	} while ((vsharep->vs_state != VSW_SHARE_FREE) && (max_retries > 0));
930 
931 	mutex_exit(&vswp->hw_lock);
932 	D1(vswp, "%s:exit\n", __func__);
933 }
934 
935 /*
936  * vsw_hio_rest_all -- Resets all ports that have shares allocated.
937  *	It is called only in the panic code path, so the LDC channels
938  *	are reset immediately.
939  */
940 static void
941 vsw_hio_reset_all(vsw_t *vswp)
942 {
943 	vsw_hio_t	*hiop = &vswp->vhio;
944 	vsw_share_t	*vsharep;
945 	int		i;
946 
947 	D1(vswp, "%s:enter\n", __func__);
948 
949 	if (vswp->hio_capable != B_TRUE)
950 		return;
951 
952 	for (i = 0; i < hiop->vh_num_shares; i++) {
953 		vsharep = &hiop->vh_shares[i];
954 		if (vsharep->vs_state == VSW_SHARE_FREE) {
955 			continue;
956 		}
957 		/*
958 		 * Reset the port with immediate flag enabled,
959 		 * to cause LDC reset immediately.
960 		 */
961 		vsw_hio_port_reset(vsharep->vs_portp, B_TRUE);
962 	}
963 	D1(vswp, "%s:exit\n", __func__);
964 }
965 
966 /*
967  * vsw_hio_reboot_callb -- Called for reboot event. It tries to
968  *	free all currently allocated shares.
969  */
970 /* ARGSUSED */
971 static boolean_t
972 vsw_hio_reboot_callb(void *arg, int code)
973 {
974 	vsw_t *vswp = arg;
975 
976 	D1(vswp, "%s:enter\n", __func__);
977 	vsw_hio_free_all_shares(vswp, B_TRUE);
978 	D1(vswp, "%s:exit\n", __func__);
979 	return (B_TRUE);
980 }
981 
982 /*
983  * vsw_hio_panic_callb -- Called from panic event. It resets all
984  *	the ports that have shares allocated. This is done to
985  *	trigger the cleanup in the guest ahead of HV reset.
986  */
987 /* ARGSUSED */
988 static boolean_t
989 vsw_hio_panic_callb(void *arg, int code)
990 {
991 	vsw_t *vswp = arg;
992 
993 	D1(vswp, "%s:enter\n", __func__);
994 	vsw_hio_reset_all(vswp);
995 	D1(vswp, "%s:exit\n", __func__);
996 	return (B_TRUE);
997 }
998