xref: /titanic_41/usr/src/uts/common/io/nxge/nxge_hio.c (revision 635216b673cf196ac523ff2a7ab715717e553292)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio.c
29  *
30  * This file manages the virtualization resources for Neptune
31  * devices.  That is, it implements a hybrid I/O (HIO) approach in the
32  * Solaris kernel, whereby a guest domain on an LDOMs server may
33  * request & use hardware resources from the service domain.
34  *
35  */
36 
37 #include <sys/mac_provider.h>
38 #include <sys/nxge/nxge_impl.h>
39 #include <sys/nxge/nxge_fzc.h>
40 #include <sys/nxge/nxge_rxdma.h>
41 #include <sys/nxge/nxge_txdma.h>
42 #include <sys/nxge/nxge_hio.h>
43 
44 /*
45  * External prototypes
46  */
47 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
48 
49 /* The following function may be found in nxge_main.c */
50 extern int nxge_m_mmac_remove(void *arg, int slot);
51 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
52 	boolean_t usetbl);
53 
54 /* The following function may be found in nxge_[t|r]xdma.c */
55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
57 
58 /*
59  * Local prototypes
60  */
61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
63 static void nxge_grp_dc_map(nxge_grp_t *group);
64 
65 /*
66  * These functions are used by both service & guest domains to
67  * decide whether they're running in an LDOMs/XEN environment
68  * or not.  If so, then the Hybrid I/O (HIO) module is initialized.
69  */
70 
71 /*
72  * nxge_get_environs
73  *
74  *	Figure out if we are in a guest domain or not.
75  *
76  * Arguments:
77  * 	nxge
78  *
79  * Notes:
80  *
81  * Context:
82  *	Any domain
83  */
84 void
85 nxge_get_environs(
86 	nxge_t *nxge)
87 {
88 	char *string;
89 
90 	/*
91 	 * In the beginning, assume that we are running sans LDOMs/XEN.
92 	 */
93 	nxge->environs = SOLARIS_DOMAIN;
94 
95 	/*
96 	 * Are we a hybrid I/O (HIO) guest domain driver?
97 	 */
98 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
99 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
100 	    "niutype", &string)) == DDI_PROP_SUCCESS) {
101 		if (strcmp(string, "n2niu") == 0) {
102 			nxge->environs = SOLARIS_GUEST_DOMAIN;
103 			/* So we can allocate properly-aligned memory. */
104 			nxge->niu_type = N2_NIU;
105 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
106 			    "Hybrid IO-capable guest domain"));
107 		}
108 		ddi_prop_free(string);
109 	}
110 }
111 
112 #if !defined(sun4v)
113 
114 /*
115  * nxge_hio_init
116  *
117  *	Initialize the HIO module of the NXGE driver.
118  *
119  * Arguments:
120  * 	nxge
121  *
122  * Notes:
123  *	This is the non-hybrid I/O version of this function.
124  *
125  * Context:
126  *	Any domain
127  */
128 int
129 nxge_hio_init(nxge_t *nxge)
130 {
131 	nxge_hio_data_t *nhd;
132 	int i;
133 
134 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
135 	if (nhd == NULL) {
136 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
137 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
138 		nhd->type = NXGE_HIO_TYPE_SERVICE;
139 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
140 	}
141 
142 	/*
143 	 * Initialize share and ring group structures.
144 	 */
145 	for (i = 0; i < NXGE_MAX_TDCS; i++)
146 		nxge->tdc_is_shared[i] = B_FALSE;
147 
148 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
149 		nxge->tx_hio_groups[i].ghandle = NULL;
150 		nxge->tx_hio_groups[i].nxgep = nxge;
151 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
152 		nxge->tx_hio_groups[i].gindex = 0;
153 		nxge->tx_hio_groups[i].sindex = 0;
154 	}
155 
156 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
157 		nxge->rx_hio_groups[i].ghandle = NULL;
158 		nxge->rx_hio_groups[i].nxgep = nxge;
159 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
160 		nxge->rx_hio_groups[i].gindex = 0;
161 		nxge->rx_hio_groups[i].sindex = 0;
162 		nxge->rx_hio_groups[i].started = B_FALSE;
163 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
164 		nxge->rx_hio_groups[i].rdctbl = -1;
165 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
166 	}
167 
168 	nhd->hio.ldoms = B_FALSE;
169 
170 	return (NXGE_OK);
171 }
172 
173 #endif
174 
175 void
176 nxge_hio_uninit(nxge_t *nxge)
177 {
178 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
179 
180 	ASSERT(nxge->nxge_hw_p->ndevs == 0);
181 
182 	if (nhd != NULL) {
183 		MUTEX_DESTROY(&nhd->lock);
184 		KMEM_FREE(nhd, sizeof (*nhd));
185 		nxge->nxge_hw_p->hio = 0;
186 	}
187 }
188 
189 /*
190  * nxge_dci_map
191  *
192  *	Map a DMA channel index to a channel number.
193  *
194  * Arguments:
195  * 	instance	The instance number of the driver.
196  * 	type		The type of channel this is: Tx or Rx.
197  * 	index		The index to convert to a channel number
198  *
199  * Notes:
200  *	This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
201  *
202  * Context:
203  *	Any domain
204  */
205 int
206 nxge_dci_map(
207 	nxge_t *nxge,
208 	vpc_type_t type,
209 	int index)
210 {
211 	nxge_grp_set_t *set;
212 	int dc;
213 
214 	switch (type) {
215 	case VP_BOUND_TX:
216 		set = &nxge->tx_set;
217 		break;
218 	case VP_BOUND_RX:
219 		set = &nxge->rx_set;
220 		break;
221 	}
222 
223 	for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
224 		if ((1 << dc) & set->owned.map) {
225 			if (index == 0)
226 				return (dc);
227 			else
228 				index--;
229 		}
230 	}
231 
232 	return (-1);
233 }
234 
235 /*
236  * ---------------------------------------------------------------------
237  * These are the general-purpose DMA channel group functions.  That is,
238  * these functions are used to manage groups of TDCs or RDCs in an HIO
239  * environment.
240  *
241  * But is also expected that in the future they will be able to manage
242  * Crossbow groups.
243  * ---------------------------------------------------------------------
244  */
245 
246 /*
247  * nxge_grp_cleanup(p_nxge_t nxge)
248  *
249  *	Remove all outstanding groups.
250  *
251  * Arguments:
252  *	nxge
253  */
254 void
255 nxge_grp_cleanup(p_nxge_t nxge)
256 {
257 	nxge_grp_set_t *set;
258 	int i;
259 
260 	MUTEX_ENTER(&nxge->group_lock);
261 
262 	/*
263 	 * Find RX groups that need to be cleaned up.
264 	 */
265 	set = &nxge->rx_set;
266 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
267 		if (set->group[i] != NULL) {
268 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
269 			set->group[i] = NULL;
270 		}
271 	}
272 
273 	/*
274 	 * Find TX groups that need to be cleaned up.
275 	 */
276 	set = &nxge->tx_set;
277 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
278 		if (set->group[i] != NULL) {
279 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
280 			set->group[i] = NULL;
281 		}
282 	}
283 	MUTEX_EXIT(&nxge->group_lock);
284 }
285 
286 
287 /*
288  * nxge_grp_add
289  *
290  *	Add a group to an instance of NXGE.
291  *
292  * Arguments:
293  * 	nxge
294  * 	type	Tx or Rx
295  *
296  * Notes:
297  *
298  * Context:
299  *	Any domain
300  */
301 nxge_grp_t *
302 nxge_grp_add(
303 	nxge_t *nxge,
304 	nxge_grp_type_t type)
305 {
306 	nxge_grp_set_t *set;
307 	nxge_grp_t *group;
308 	int i;
309 
310 	group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
311 	group->nxge = nxge;
312 
313 	MUTEX_ENTER(&nxge->group_lock);
314 	switch (type) {
315 	case NXGE_TRANSMIT_GROUP:
316 	case EXT_TRANSMIT_GROUP:
317 		set = &nxge->tx_set;
318 		break;
319 	default:
320 		set = &nxge->rx_set;
321 		break;
322 	}
323 
324 	group->type = type;
325 	group->active = B_TRUE;
326 	group->sequence = set->sequence++;
327 
328 	/* Find an empty slot for this logical group. */
329 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
330 		if (set->group[i] == 0) {
331 			group->index = i;
332 			set->group[i] = group;
333 			NXGE_DC_SET(set->lg.map, i);
334 			set->lg.count++;
335 			break;
336 		}
337 	}
338 	MUTEX_EXIT(&nxge->group_lock);
339 
340 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
341 	    "nxge_grp_add: %cgroup = %d.%d",
342 	    type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
343 	    nxge->mac.portnum, group->sequence));
344 
345 	return (group);
346 }
347 
348 void
349 nxge_grp_remove(
350 	nxge_t *nxge,
351 	nxge_grp_t *group)	/* The group to remove. */
352 {
353 	nxge_grp_set_t *set;
354 	vpc_type_t type;
355 
356 	MUTEX_ENTER(&nxge->group_lock);
357 	switch (group->type) {
358 	case NXGE_TRANSMIT_GROUP:
359 	case EXT_TRANSMIT_GROUP:
360 		set = &nxge->tx_set;
361 		break;
362 	default:
363 		set = &nxge->rx_set;
364 		break;
365 	}
366 
367 	if (set->group[group->index] != group) {
368 		MUTEX_EXIT(&nxge->group_lock);
369 		return;
370 	}
371 
372 	set->group[group->index] = 0;
373 	NXGE_DC_RESET(set->lg.map, group->index);
374 	set->lg.count--;
375 
376 	/* While inside the mutex, deactivate <group>. */
377 	group->active = B_FALSE;
378 
379 	MUTEX_EXIT(&nxge->group_lock);
380 
381 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
382 	    "nxge_grp_remove(%c.%d.%d) called",
383 	    group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
384 	    nxge->mac.portnum, group->sequence));
385 
386 	/* Now, remove any DCs which are still active. */
387 	switch (group->type) {
388 	default:
389 		type = VP_BOUND_TX;
390 		break;
391 	case NXGE_RECEIVE_GROUP:
392 	case EXT_RECEIVE_GROUP:
393 		type = VP_BOUND_RX;
394 	}
395 
396 	while (group->dc) {
397 		nxge_grp_dc_remove(nxge, type, group->dc->channel);
398 	}
399 
400 	KMEM_FREE(group, sizeof (*group));
401 }
402 
403 /*
404  * nxge_grp_dc_add
405  *
406  *	Add a DMA channel to a VR/Group.
407  *
408  * Arguments:
409  * 	nxge
410  * 	channel	The channel to add.
411  * Notes:
412  *
413  * Context:
414  *	Any domain
415  */
416 /* ARGSUSED */
417 int
418 nxge_grp_dc_add(
419 	nxge_t *nxge,
420 	nxge_grp_t *group,	/* The group to add <channel> to. */
421 	vpc_type_t type,	/* Rx or Tx */
422 	int channel)		/* A physical/logical channel number */
423 {
424 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
425 	nxge_hio_dc_t *dc;
426 	nxge_grp_set_t *set;
427 	nxge_status_t status = NXGE_OK;
428 
429 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
430 
431 	if (group == 0)
432 		return (0);
433 
434 	switch (type) {
435 	case VP_BOUND_TX:
436 		set = &nxge->tx_set;
437 		if (channel > NXGE_MAX_TDCS) {
438 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
439 			    "nxge_grp_dc_add: TDC = %d", channel));
440 			return (NXGE_ERROR);
441 		}
442 		break;
443 	case VP_BOUND_RX:
444 		set = &nxge->rx_set;
445 		if (channel > NXGE_MAX_RDCS) {
446 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
447 			    "nxge_grp_dc_add: RDC = %d", channel));
448 			return (NXGE_ERROR);
449 		}
450 		break;
451 
452 	default:
453 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
454 		    "nxge_grp_dc_add: unknown type channel(%d)", channel));
455 	}
456 
457 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
458 	    "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
459 	    type == VP_BOUND_TX ? 't' : 'r',
460 	    nxge->mac.portnum, group->sequence, group->count, channel));
461 
462 	MUTEX_ENTER(&nxge->group_lock);
463 	if (group->active != B_TRUE) {
464 		/* We may be in the process of removing this group. */
465 		MUTEX_EXIT(&nxge->group_lock);
466 		return (NXGE_ERROR);
467 	}
468 	MUTEX_EXIT(&nxge->group_lock);
469 
470 	if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
471 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
472 		    "nxge_grp_dc_add(%d): DC FIND failed", channel));
473 		return (NXGE_ERROR);
474 	}
475 
476 	MUTEX_ENTER(&nhd->lock);
477 
478 	if (dc->group) {
479 		MUTEX_EXIT(&nhd->lock);
480 		/* This channel is already in use! */
481 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
482 		    "nxge_grp_dc_add(%d): channel already in group", channel));
483 		return (NXGE_ERROR);
484 	}
485 
486 	dc->next = 0;
487 	dc->page = channel;
488 	dc->channel = (nxge_channel_t)channel;
489 
490 	dc->type = type;
491 	if (type == VP_BOUND_RX) {
492 		dc->init = nxge_init_rxdma_channel;
493 		dc->uninit = nxge_uninit_rxdma_channel;
494 	} else {
495 		dc->init = nxge_init_txdma_channel;
496 		dc->uninit = nxge_uninit_txdma_channel;
497 	}
498 
499 	dc->group = group;
500 
501 	if (isLDOMguest(nxge))
502 		(void) nxge_hio_ldsv_add(nxge, dc);
503 
504 	NXGE_DC_SET(set->owned.map, channel);
505 	set->owned.count++;
506 
507 	MUTEX_EXIT(&nhd->lock);
508 
509 	if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
510 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
511 		    "nxge_grp_dc_add(%d): channel init failed", channel));
512 		MUTEX_ENTER(&nhd->lock);
513 		(void) memset(dc, 0, sizeof (*dc));
514 		NXGE_DC_RESET(set->owned.map, channel);
515 		set->owned.count--;
516 		MUTEX_EXIT(&nhd->lock);
517 		return (NXGE_ERROR);
518 	}
519 
520 	nxge_grp_dc_append(nxge, group, dc);
521 
522 	if (type == VP_BOUND_TX) {
523 		MUTEX_ENTER(&nhd->lock);
524 		nxge->tdc_is_shared[channel] = B_FALSE;
525 		MUTEX_EXIT(&nhd->lock);
526 	}
527 
528 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
529 
530 	return ((int)status);
531 }
532 
533 void
534 nxge_grp_dc_remove(
535 	nxge_t *nxge,
536 	vpc_type_t type,
537 	int channel)
538 {
539 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
540 	nxge_hio_dc_t *dc;
541 	nxge_grp_set_t *set;
542 	nxge_grp_t *group;
543 
544 	dc_uninit_t uninit;
545 
546 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
547 
548 	if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
549 		goto nxge_grp_dc_remove_exit;
550 
551 	if ((dc->group == NULL) && (dc->next == 0) &&
552 	    (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
553 		goto nxge_grp_dc_remove_exit;
554 	}
555 
556 	group = (nxge_grp_t *)dc->group;
557 
558 	if (isLDOMguest(nxge)) {
559 		(void) nxge_hio_intr_remove(nxge, type, channel);
560 	}
561 
562 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
563 	    "DC remove: group = %d.%d.%d, %cdc %d",
564 	    nxge->mac.portnum, group->sequence, group->count,
565 	    type == VP_BOUND_TX ? 't' : 'r', dc->channel));
566 
567 	MUTEX_ENTER(&nhd->lock);
568 
569 	set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
570 
571 	/* Remove the DC from its group. */
572 	if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
573 		MUTEX_EXIT(&nhd->lock);
574 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
575 		    "nxge_grp_dc_remove(%d) failed", channel));
576 		goto nxge_grp_dc_remove_exit;
577 	}
578 
579 	uninit = dc->uninit;
580 	channel = dc->channel;
581 
582 	NXGE_DC_RESET(set->owned.map, channel);
583 	set->owned.count--;
584 
585 	(void) memset(dc, 0, sizeof (*dc));
586 
587 	MUTEX_EXIT(&nhd->lock);
588 
589 	(*uninit)(nxge, channel);
590 
591 nxge_grp_dc_remove_exit:
592 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
593 }
594 
595 nxge_hio_dc_t *
596 nxge_grp_dc_find(
597 	nxge_t *nxge,
598 	vpc_type_t type,	/* Rx or Tx */
599 	int channel)
600 {
601 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
602 	nxge_hio_dc_t *current;
603 
604 	current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
605 
606 	if (!isLDOMguest(nxge)) {
607 		return (&current[channel]);
608 	} else {
609 		/* We're in a guest domain. */
610 		int i, limit = (type == VP_BOUND_TX) ?
611 		    NXGE_MAX_TDCS : NXGE_MAX_RDCS;
612 
613 		MUTEX_ENTER(&nhd->lock);
614 		for (i = 0; i < limit; i++, current++) {
615 			if (current->channel == channel) {
616 				if (current->vr && current->vr->nxge ==
617 				    (uintptr_t)nxge) {
618 					MUTEX_EXIT(&nhd->lock);
619 					return (current);
620 				}
621 			}
622 		}
623 		MUTEX_EXIT(&nhd->lock);
624 	}
625 
626 	return (0);
627 }
628 
629 /*
630  * nxge_grp_dc_append
631  *
632  *	Append a DMA channel to a group.
633  *
634  * Arguments:
635  * 	nxge
636  * 	group	The group to append to
637  * 	dc	The DMA channel to append
638  *
639  * Notes:
640  *
641  * Context:
642  *	Any domain
643  */
644 static
645 void
646 nxge_grp_dc_append(
647 	nxge_t *nxge,
648 	nxge_grp_t *group,
649 	nxge_hio_dc_t *dc)
650 {
651 	MUTEX_ENTER(&nxge->group_lock);
652 
653 	if (group->dc == 0) {
654 		group->dc = dc;
655 	} else {
656 		nxge_hio_dc_t *current = group->dc;
657 		do {
658 			if (current->next == 0) {
659 				current->next = dc;
660 				break;
661 			}
662 			current = current->next;
663 		} while (current);
664 	}
665 
666 	NXGE_DC_SET(group->map, dc->channel);
667 
668 	nxge_grp_dc_map(group);
669 	group->count++;
670 
671 	MUTEX_EXIT(&nxge->group_lock);
672 }
673 
674 /*
675  * nxge_grp_dc_unlink
676  *
677  *	Unlink a DMA channel fromits linked list (group).
678  *
679  * Arguments:
680  * 	nxge
681  * 	group	The group (linked list) to unlink from
682  * 	dc	The DMA channel to append
683  *
684  * Notes:
685  *
686  * Context:
687  *	Any domain
688  */
689 nxge_hio_dc_t *
690 nxge_grp_dc_unlink(
691 	nxge_t *nxge,
692 	nxge_grp_t *group,
693 	int channel)
694 {
695 	nxge_hio_dc_t *current, *previous;
696 
697 	MUTEX_ENTER(&nxge->group_lock);
698 
699 	if (group == NULL) {
700 		MUTEX_EXIT(&nxge->group_lock);
701 		return (0);
702 	}
703 
704 	if ((current = group->dc) == 0) {
705 		MUTEX_EXIT(&nxge->group_lock);
706 		return (0);
707 	}
708 
709 	previous = 0;
710 	do {
711 		if (current->channel == channel) {
712 			if (previous)
713 				previous->next = current->next;
714 			else
715 				group->dc = current->next;
716 			break;
717 		}
718 		previous = current;
719 		current = current->next;
720 	} while (current);
721 
722 	if (current == 0) {
723 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
724 		    "DC unlink: DC %d not found", channel));
725 	} else {
726 		current->next = 0;
727 		current->group = 0;
728 
729 		NXGE_DC_RESET(group->map, channel);
730 		group->count--;
731 	}
732 
733 	nxge_grp_dc_map(group);
734 
735 	MUTEX_EXIT(&nxge->group_lock);
736 
737 	return (current);
738 }
739 
740 /*
741  * nxge_grp_dc_map
742  *
743  *	Map a linked list to an array of channel numbers.
744  *
745  * Arguments:
746  * 	nxge
747  * 	group	The group to remap.
748  *
749  * Notes:
750  *	It is expected that the caller will hold the correct mutex.
751  *
752  * Context:
753  *	Service domain
754  */
755 void
756 nxge_grp_dc_map(
757 	nxge_grp_t *group)
758 {
759 	nxge_channel_t *legend;
760 	nxge_hio_dc_t *dc;
761 
762 	(void) memset(group->legend, 0, sizeof (group->legend));
763 
764 	legend = group->legend;
765 	dc = group->dc;
766 	while (dc) {
767 		*legend = dc->channel;
768 		legend++;
769 		dc = dc->next;
770 	}
771 }
772 
773 /*
774  * ---------------------------------------------------------------------
775  * These are HIO debugging functions.
776  * ---------------------------------------------------------------------
777  */
778 
779 /*
780  * nxge_delay
781  *
782  *	Delay <seconds> number of seconds.
783  *
784  * Arguments:
785  * 	nxge
786  * 	group	The group to append to
787  * 	dc	The DMA channel to append
788  *
789  * Notes:
790  *	This is a developer-only function.
791  *
792  * Context:
793  *	Any domain
794  */
795 void
796 nxge_delay(
797 	int seconds)
798 {
799 	delay(drv_usectohz(seconds * 1000000));
800 }
801 
802 static dmc_reg_name_t rx_names[] = {
803 	{ "RXDMA_CFIG1",	0 },
804 	{ "RXDMA_CFIG2",	8 },
805 	{ "RBR_CFIG_A",		0x10 },
806 	{ "RBR_CFIG_B",		0x18 },
807 	{ "RBR_KICK",		0x20 },
808 	{ "RBR_STAT",		0x28 },
809 	{ "RBR_HDH",		0x30 },
810 	{ "RBR_HDL",		0x38 },
811 	{ "RCRCFIG_A",		0x40 },
812 	{ "RCRCFIG_B",		0x48 },
813 	{ "RCRSTAT_A",		0x50 },
814 	{ "RCRSTAT_B",		0x58 },
815 	{ "RCRSTAT_C",		0x60 },
816 	{ "RX_DMA_ENT_MSK",	0x68 },
817 	{ "RX_DMA_CTL_STAT",	0x70 },
818 	{ "RCR_FLSH",		0x78 },
819 	{ "RXMISC",		0x90 },
820 	{ "RX_DMA_CTL_STAT_DBG", 0x98 },
821 	{ 0, -1 }
822 };
823 
824 static dmc_reg_name_t tx_names[] = {
825 	{ "Tx_RNG_CFIG",	0 },
826 	{ "Tx_RNG_HDL",		0x10 },
827 	{ "Tx_RNG_KICK",	0x18 },
828 	{ "Tx_ENT_MASK",	0x20 },
829 	{ "Tx_CS",		0x28 },
830 	{ "TxDMA_MBH",		0x30 },
831 	{ "TxDMA_MBL",		0x38 },
832 	{ "TxDMA_PRE_ST",	0x40 },
833 	{ "Tx_RNG_ERR_LOGH",	0x48 },
834 	{ "Tx_RNG_ERR_LOGL",	0x50 },
835 	{ "TDMC_INTR_DBG",	0x60 },
836 	{ "Tx_CS_DBG",		0x68 },
837 	{ 0, -1 }
838 };
839 
840 /*
841  * nxge_xx2str
842  *
843  *	Translate a register address into a string.
844  *
845  * Arguments:
846  * 	offset	The address of the register to translate.
847  *
848  * Notes:
849  *	These are developer-only function.
850  *
851  * Context:
852  *	Any domain
853  */
854 const char *
855 nxge_rx2str(
856 	int offset)
857 {
858 	dmc_reg_name_t *reg = &rx_names[0];
859 
860 	offset &= DMA_CSR_MASK;
861 
862 	while (reg->name) {
863 		if (offset == reg->offset)
864 			return (reg->name);
865 		reg++;
866 	}
867 
868 	return (0);
869 }
870 
871 const char *
872 nxge_tx2str(
873 	int offset)
874 {
875 	dmc_reg_name_t *reg = &tx_names[0];
876 
877 	offset &= DMA_CSR_MASK;
878 
879 	while (reg->name) {
880 		if (offset == reg->offset)
881 			return (reg->name);
882 		reg++;
883 	}
884 
885 	return (0);
886 }
887 
888 /*
889  * nxge_ddi_perror
890  *
891  *	Map a DDI error number to a string.
892  *
893  * Arguments:
894  * 	ddi_error	The DDI error number to map.
895  *
896  * Notes:
897  *
898  * Context:
899  *	Any domain
900  */
901 const char *
902 nxge_ddi_perror(
903 	int ddi_error)
904 {
905 	switch (ddi_error) {
906 	case DDI_SUCCESS:
907 		return ("DDI_SUCCESS");
908 	case DDI_FAILURE:
909 		return ("DDI_FAILURE");
910 	case DDI_NOT_WELL_FORMED:
911 		return ("DDI_NOT_WELL_FORMED");
912 	case DDI_EAGAIN:
913 		return ("DDI_EAGAIN");
914 	case DDI_EINVAL:
915 		return ("DDI_EINVAL");
916 	case DDI_ENOTSUP:
917 		return ("DDI_ENOTSUP");
918 	case DDI_EPENDING:
919 		return ("DDI_EPENDING");
920 	case DDI_ENOMEM:
921 		return ("DDI_ENOMEM");
922 	case DDI_EBUSY:
923 		return ("DDI_EBUSY");
924 	case DDI_ETRANSPORT:
925 		return ("DDI_ETRANSPORT");
926 	case DDI_ECONTEXT:
927 		return ("DDI_ECONTEXT");
928 	default:
929 		return ("Unknown error");
930 	}
931 }
932 
933 /*
934  * ---------------------------------------------------------------------
935  * These are Sun4v HIO function definitions
936  * ---------------------------------------------------------------------
937  */
938 
939 #if defined(sun4v)
940 
941 /*
942  * Local prototypes
943  */
944 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
945 static void nxge_hio_unshare(nxge_hio_vr_t *);
946 
947 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
948 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
949 
950 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
951 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
952 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
953 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
954     mac_ring_type_t, int);
955 
956 /*
957  * nxge_hio_init
958  *
959  *	Initialize the HIO module of the NXGE driver.
960  *
961  * Arguments:
962  * 	nxge
963  *
964  * Notes:
965  *
966  * Context:
967  *	Any domain
968  */
969 int
970 nxge_hio_init(nxge_t *nxge)
971 {
972 	nxge_hio_data_t *nhd;
973 	int i, region;
974 
975 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
976 	if (nhd == 0) {
977 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
978 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
979 		if (isLDOMguest(nxge))
980 			nhd->type = NXGE_HIO_TYPE_GUEST;
981 		else
982 			nhd->type = NXGE_HIO_TYPE_SERVICE;
983 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
984 	}
985 
986 	if ((nxge->environs == SOLARIS_DOMAIN) &&
987 	    (nxge->niu_type == N2_NIU)) {
988 		if (nxge->niu_hsvc_available == B_TRUE) {
989 			hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
990 			if (niu_hsvc->hsvc_major == 1 &&
991 			    niu_hsvc->hsvc_minor == 1)
992 				nxge->environs = SOLARIS_SERVICE_DOMAIN;
993 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
994 			    "nxge_hio_init: hypervisor services "
995 			    "version %d.%d",
996 			    niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor));
997 		}
998 	}
999 
1000 	/*
1001 	 * Initialize share and ring group structures.
1002 	 */
1003 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1004 		nxge->tx_hio_groups[i].ghandle = NULL;
1005 		nxge->tx_hio_groups[i].nxgep = nxge;
1006 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1007 		nxge->tx_hio_groups[i].gindex = 0;
1008 		nxge->tx_hio_groups[i].sindex = 0;
1009 	}
1010 
1011 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1012 		nxge->rx_hio_groups[i].ghandle = NULL;
1013 		nxge->rx_hio_groups[i].nxgep = nxge;
1014 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1015 		nxge->rx_hio_groups[i].gindex = 0;
1016 		nxge->rx_hio_groups[i].sindex = 0;
1017 		nxge->rx_hio_groups[i].started = B_FALSE;
1018 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1019 		nxge->rx_hio_groups[i].rdctbl = -1;
1020 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
1021 	}
1022 
1023 	if (!isLDOMs(nxge)) {
1024 		nhd->hio.ldoms = B_FALSE;
1025 		return (NXGE_OK);
1026 	}
1027 
1028 	nhd->hio.ldoms = B_TRUE;
1029 
1030 	/*
1031 	 * Fill in what we can.
1032 	 */
1033 	for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1034 		nhd->vr[region].region = region;
1035 	}
1036 	nhd->vrs = NXGE_VR_SR_MAX - 2;
1037 
1038 	/*
1039 	 * Initialize the share stuctures.
1040 	 */
1041 	for (i = 0; i < NXGE_MAX_TDCS; i++)
1042 		nxge->tdc_is_shared[i] = B_FALSE;
1043 
1044 	for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1045 		nxge->shares[i].nxgep = nxge;
1046 		nxge->shares[i].index = 0;
1047 		nxge->shares[i].vrp = NULL;
1048 		nxge->shares[i].tmap = 0;
1049 		nxge->shares[i].rmap = 0;
1050 		nxge->shares[i].rxgroup = 0;
1051 		nxge->shares[i].active = B_FALSE;
1052 	}
1053 
1054 	/* Fill in the HV HIO function pointers. */
1055 	nxge_hio_hv_init(nxge);
1056 
1057 	if (isLDOMservice(nxge)) {
1058 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
1059 		    "Hybrid IO-capable service domain"));
1060 		return (NXGE_OK);
1061 	}
1062 
1063 	return (0);
1064 }
1065 #endif /* defined(sun4v) */
1066 
1067 static int
1068 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1069     const uint8_t *macaddr)
1070 {
1071 	int rv;
1072 	nxge_rdc_grp_t *group;
1073 
1074 	mutex_enter(nxge->genlock);
1075 
1076 	/*
1077 	 * Initialize the NXGE RDC table data structure.
1078 	 */
1079 	group = &nxge->pt_config.rdc_grps[g->rdctbl];
1080 	if (!group->flag) {
1081 		group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1082 		group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1083 		group->flag = B_TRUE;	/* This group has been configured. */
1084 	}
1085 
1086 	mutex_exit(nxge->genlock);
1087 
1088 	/*
1089 	 * Add the MAC address.
1090 	 */
1091 	if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1092 	    g->rdctbl, B_TRUE)) != 0) {
1093 		return (rv);
1094 	}
1095 
1096 	mutex_enter(nxge->genlock);
1097 	g->n_mac_addrs++;
1098 	mutex_exit(nxge->genlock);
1099 	return (0);
1100 }
1101 
1102 static int
1103 nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
1104 {
1105 	p_nxge_t		nxgep = (p_nxge_t)arg;
1106 	struct ether_addr	addrp;
1107 
1108 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
1109 	if (nxge_set_mac_addr(nxgep, &addrp)) {
1110 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1111 		    "<== nxge_m_unicst: set unitcast failed"));
1112 		return (EINVAL);
1113 	}
1114 
1115 	nxgep->primary = B_TRUE;
1116 
1117 	return (0);
1118 }
1119 
1120 /*ARGSUSED*/
1121 static int
1122 nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
1123 {
1124 	nxgep->primary = B_FALSE;
1125 	return (0);
1126 }
1127 
1128 static int
1129 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1130 {
1131 	nxge_ring_group_t	*group = (nxge_ring_group_t *)arg;
1132 	p_nxge_t		nxge = group->nxgep;
1133 	int			rv;
1134 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1135 
1136 	ASSERT(group->type == MAC_RING_TYPE_RX);
1137 	ASSERT(group->nxgep != NULL);
1138 
1139 	if (isLDOMguest(group->nxgep))
1140 		return (0);
1141 
1142 	mutex_enter(nxge->genlock);
1143 
1144 	if (!nxge->primary && group->port_default_grp) {
1145 		rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
1146 		mutex_exit(nxge->genlock);
1147 		return (rv);
1148 	}
1149 
1150 	/*
1151 	 * If the group is associated with a VR, then only one
1152 	 * address may be assigned to the group.
1153 	 */
1154 	vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1155 	if ((vr != NULL) && (group->n_mac_addrs)) {
1156 		mutex_exit(nxge->genlock);
1157 		return (ENOSPC);
1158 	}
1159 
1160 	mutex_exit(nxge->genlock);
1161 
1162 	/*
1163 	 * Program the mac address for the group.
1164 	 */
1165 	if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) {
1166 		return (rv);
1167 	}
1168 
1169 	return (0);
1170 }
1171 
1172 static int
1173 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1174 {
1175 	int i;
1176 	for (i = 0; i <= mmac_info->num_mmac; i++) {
1177 		if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1178 		    ETHERADDRL) == 0) {
1179 			return (i);
1180 		}
1181 	}
1182 	return (-1);
1183 }
1184 
1185 /* ARGSUSED */
1186 static int
1187 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1188 {
1189 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1190 	struct ether_addr addrp;
1191 	p_nxge_t nxge = group->nxgep;
1192 	nxge_mmac_t *mmac_info;
1193 	int rv, slot;
1194 
1195 	ASSERT(group->type == MAC_RING_TYPE_RX);
1196 	ASSERT(group->nxgep != NULL);
1197 
1198 	if (isLDOMguest(group->nxgep))
1199 		return (0);
1200 
1201 	mutex_enter(nxge->genlock);
1202 
1203 	mmac_info = &nxge->nxge_mmac_info;
1204 	slot = find_mac_slot(mmac_info, mac_addr);
1205 	if (slot < 0) {
1206 		if (group->port_default_grp && nxge->primary) {
1207 			bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
1208 			if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
1209 				rv = nxge_hio_clear_unicst(nxge, mac_addr);
1210 				mutex_exit(nxge->genlock);
1211 				return (rv);
1212 			} else {
1213 				mutex_exit(nxge->genlock);
1214 				return (EINVAL);
1215 			}
1216 		} else {
1217 			mutex_exit(nxge->genlock);
1218 			return (EINVAL);
1219 		}
1220 	}
1221 
1222 	mutex_exit(nxge->genlock);
1223 
1224 	/*
1225 	 * Remove the mac address for the group
1226 	 */
1227 	if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1228 		return (rv);
1229 	}
1230 
1231 	mutex_enter(nxge->genlock);
1232 	group->n_mac_addrs--;
1233 	mutex_exit(nxge->genlock);
1234 
1235 	return (0);
1236 }
1237 
1238 static int
1239 nxge_hio_group_start(mac_group_driver_t gdriver)
1240 {
1241 	nxge_ring_group_t	*group = (nxge_ring_group_t *)gdriver;
1242 	nxge_rdc_grp_t		*rdc_grp_p;
1243 	int			rdctbl;
1244 	int			dev_gindex;
1245 
1246 	ASSERT(group->type == MAC_RING_TYPE_RX);
1247 	ASSERT(group->nxgep != NULL);
1248 
1249 	ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1250 	if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1251 		return (ENXIO);
1252 
1253 	mutex_enter(group->nxgep->genlock);
1254 	if (isLDOMguest(group->nxgep))
1255 		goto nxge_hio_group_start_exit;
1256 
1257 	dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1258 	    group->gindex;
1259 	rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex];
1260 
1261 	/*
1262 	 * Get an rdc table for this group.
1263 	 * Group ID is given by the caller, and that's the group it needs
1264 	 * to bind to.  The default group is already bound when the driver
1265 	 * was attached.
1266 	 *
1267 	 * For Group 0, it's RDC table was allocated at attach time
1268 	 * no need to allocate a new table.
1269 	 */
1270 	if (group->gindex != 0) {
1271 		rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1272 		    dev_gindex, B_TRUE);
1273 		if (rdctbl < 0) {
1274 			mutex_exit(group->nxgep->genlock);
1275 			return (rdctbl);
1276 		}
1277 	} else {
1278 		rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1279 	}
1280 
1281 	group->rdctbl = rdctbl;
1282 
1283 	(void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl);
1284 
1285 nxge_hio_group_start_exit:
1286 	group->started = B_TRUE;
1287 	mutex_exit(group->nxgep->genlock);
1288 	return (0);
1289 }
1290 
1291 static void
1292 nxge_hio_group_stop(mac_group_driver_t gdriver)
1293 {
1294 	nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1295 
1296 	ASSERT(group->type == MAC_RING_TYPE_RX);
1297 
1298 	mutex_enter(group->nxgep->genlock);
1299 	group->started = B_FALSE;
1300 
1301 	if (isLDOMguest(group->nxgep))
1302 		goto nxge_hio_group_stop_exit;
1303 
1304 	/*
1305 	 * Unbind the RDC table previously bound for this group.
1306 	 *
1307 	 * Since RDC table for group 0 was allocated at attach
1308 	 * time, no need to unbind the table here.
1309 	 */
1310 	if (group->gindex != 0)
1311 		(void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1312 
1313 nxge_hio_group_stop_exit:
1314 	mutex_exit(group->nxgep->genlock);
1315 }
1316 
1317 /* ARGSUSED */
1318 void
1319 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1320 	mac_group_info_t *infop, mac_group_handle_t ghdl)
1321 {
1322 	p_nxge_t		nxgep = (p_nxge_t)arg;
1323 	nxge_ring_group_t	*group;
1324 	int			dev_gindex;
1325 
1326 	switch (type) {
1327 	case MAC_RING_TYPE_RX:
1328 		group = &nxgep->rx_hio_groups[groupid];
1329 		group->nxgep = nxgep;
1330 		group->ghandle = ghdl;
1331 		group->gindex = groupid;
1332 		group->sindex = 0;	/* not yet bound to a share */
1333 
1334 		if (!isLDOMguest(nxgep)) {
1335 			dev_gindex =
1336 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1337 			    groupid;
1338 
1339 			if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
1340 			    dev_gindex)
1341 				group->port_default_grp = B_TRUE;
1342 
1343 			infop->mgi_count =
1344 			    nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1345 		} else {
1346 			infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS;
1347 		}
1348 
1349 		infop->mgi_driver = (mac_group_driver_t)group;
1350 		infop->mgi_start = nxge_hio_group_start;
1351 		infop->mgi_stop = nxge_hio_group_stop;
1352 		infop->mgi_addmac = nxge_hio_add_mac;
1353 		infop->mgi_remmac = nxge_hio_rem_mac;
1354 		break;
1355 
1356 	case MAC_RING_TYPE_TX:
1357 		/*
1358 		 * 'groupid' for TX should be incremented by one since
1359 		 * the default group (groupid 0) is not known by the MAC layer
1360 		 */
1361 		group = &nxgep->tx_hio_groups[groupid + 1];
1362 		group->nxgep = nxgep;
1363 		group->ghandle = ghdl;
1364 		group->gindex = groupid + 1;
1365 		group->sindex = 0;	/* not yet bound to a share */
1366 
1367 		infop->mgi_driver = (mac_group_driver_t)group;
1368 		infop->mgi_start = NULL;
1369 		infop->mgi_stop = NULL;
1370 		infop->mgi_addmac = NULL;	/* not needed */
1371 		infop->mgi_remmac = NULL;	/* not needed */
1372 		/* no rings associated with group initially */
1373 		infop->mgi_count = 0;
1374 		break;
1375 	}
1376 }
1377 
1378 #if defined(sun4v)
1379 
1380 int
1381 nxge_hio_share_assign(
1382 	nxge_t *nxge,
1383 	uint64_t cookie,
1384 	res_map_t *tmap,
1385 	res_map_t *rmap,
1386 	nxge_hio_vr_t *vr)
1387 {
1388 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1389 	uint64_t slot, hv_rv;
1390 	nxge_hio_dc_t *dc;
1391 	nxhv_vr_fp_t *fp;
1392 	int i;
1393 
1394 	/*
1395 	 * Ask the Hypervisor to set up the VR for us
1396 	 */
1397 	fp = &nhd->hio.vr;
1398 	if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1399 		NXGE_ERROR_MSG((nxge, HIO_CTL,
1400 		    "nxge_hio_share_assign: "
1401 		    "vr->assign() returned %d", hv_rv));
1402 		return (-EIO);
1403 	}
1404 
1405 	/*
1406 	 * For each shared TDC, ask the HV to find us an empty slot.
1407 	 * -----------------------------------------------------
1408 	 */
1409 	dc = vr->tx_group.dc;
1410 	for (i = 0; i < NXGE_MAX_TDCS; i++) {
1411 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1412 		while (dc) {
1413 			hv_rv = (*tx->assign)
1414 			    (vr->cookie, dc->channel, &slot);
1415 			if (hv_rv != 0) {
1416 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1417 				    "nxge_hio_share_assign: "
1418 				    "tx->assign(%x, %d) failed: %ld",
1419 				    vr->cookie, dc->channel, hv_rv));
1420 				return (-EIO);
1421 			}
1422 
1423 			dc->cookie = vr->cookie;
1424 			dc->page = (vp_channel_t)slot;
1425 
1426 			/* Inform the caller about the slot chosen. */
1427 			(*tmap) |= 1 << slot;
1428 
1429 			dc = dc->next;
1430 		}
1431 	}
1432 
1433 	/*
1434 	 * For each shared RDC, ask the HV to find us an empty slot.
1435 	 * -----------------------------------------------------
1436 	 */
1437 	dc = vr->rx_group.dc;
1438 	for (i = 0; i < NXGE_MAX_RDCS; i++) {
1439 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1440 		while (dc) {
1441 			hv_rv = (*rx->assign)
1442 			    (vr->cookie, dc->channel, &slot);
1443 			if (hv_rv != 0) {
1444 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1445 				    "nxge_hio_share_assign: "
1446 				    "rx->assign(%x, %d) failed: %ld",
1447 				    vr->cookie, dc->channel, hv_rv));
1448 				return (-EIO);
1449 			}
1450 
1451 			dc->cookie = vr->cookie;
1452 			dc->page = (vp_channel_t)slot;
1453 
1454 			/* Inform the caller about the slot chosen. */
1455 			(*rmap) |= 1 << slot;
1456 
1457 			dc = dc->next;
1458 		}
1459 	}
1460 
1461 	return (0);
1462 }
1463 
1464 void
1465 nxge_hio_share_unassign(
1466 	nxge_hio_vr_t *vr)
1467 {
1468 	nxge_t *nxge = (nxge_t *)vr->nxge;
1469 	nxge_hio_data_t *nhd;
1470 	nxge_hio_dc_t *dc;
1471 	nxhv_vr_fp_t *fp;
1472 	uint64_t hv_rv;
1473 
1474 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1475 
1476 	dc = vr->tx_group.dc;
1477 	while (dc) {
1478 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1479 		hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1480 		if (hv_rv != 0) {
1481 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1482 			    "nxge_hio_share_unassign: "
1483 			    "tx->unassign(%x, %d) failed: %ld",
1484 			    vr->cookie, dc->page, hv_rv));
1485 		}
1486 		dc = dc->next;
1487 	}
1488 
1489 	dc = vr->rx_group.dc;
1490 	while (dc) {
1491 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1492 		hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1493 		if (hv_rv != 0) {
1494 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1495 			    "nxge_hio_share_unassign: "
1496 			    "rx->unassign(%x, %d) failed: %ld",
1497 			    vr->cookie, dc->page, hv_rv));
1498 		}
1499 		dc = dc->next;
1500 	}
1501 
1502 	fp = &nhd->hio.vr;
1503 	if (fp->unassign) {
1504 		hv_rv = (*fp->unassign)(vr->cookie);
1505 		if (hv_rv != 0) {
1506 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1507 			    "nxge_hio_share_unassign: "
1508 			    "vr->assign(%x) failed: %ld",
1509 			    vr->cookie, hv_rv));
1510 		}
1511 	}
1512 }
1513 
1514 int
1515 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1516 {
1517 	p_nxge_t		nxge = (p_nxge_t)arg;
1518 	nxge_share_handle_t	*shp;
1519 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1520 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1521 
1522 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1523 
1524 	if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1525 	    nhd->hio.rx.assign == 0) {
1526 		NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1527 		return (EIO);
1528 	}
1529 
1530 	/*
1531 	 * Get a VR.
1532 	 */
1533 	if ((vr = nxge_hio_vr_share(nxge)) == 0)
1534 		return (EAGAIN);
1535 
1536 	shp = &nxge->shares[vr->region];
1537 	shp->nxgep = nxge;
1538 	shp->index = vr->region;
1539 	shp->vrp = (void *)vr;
1540 	shp->tmap = shp->rmap = 0;	/* to be assigned by ms_sbind */
1541 	shp->rxgroup = 0;		/* to be assigned by ms_sadd */
1542 	shp->active = B_FALSE;		/* not bound yet */
1543 
1544 	*shandle = (mac_share_handle_t)shp;
1545 
1546 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1547 	return (0);
1548 }
1549 
1550 
1551 void
1552 nxge_hio_share_free(mac_share_handle_t shandle)
1553 {
1554 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1555 	nxge_hio_vr_t		*vr;
1556 
1557 	/*
1558 	 * Clear internal handle state.
1559 	 */
1560 	vr = shp->vrp;
1561 	shp->vrp = (void *)NULL;
1562 	shp->index = 0;
1563 	shp->tmap = 0;
1564 	shp->rmap = 0;
1565 	shp->rxgroup = 0;
1566 	shp->active = B_FALSE;
1567 
1568 	/*
1569 	 * Free VR resource.
1570 	 */
1571 	nxge_hio_unshare(vr);
1572 }
1573 
1574 
1575 void
1576 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1577     mac_ring_handle_t *rings, uint_t *n_rings)
1578 {
1579 	nxge_t			*nxge;
1580 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1581 	nxge_ring_handle_t	*rh;
1582 	uint32_t		offset;
1583 
1584 	nxge = shp->nxgep;
1585 
1586 	switch (type) {
1587 	case MAC_RING_TYPE_RX:
1588 		rh = nxge->rx_ring_handles;
1589 		offset = nxge->pt_config.hw_config.start_rdc;
1590 		break;
1591 
1592 	case MAC_RING_TYPE_TX:
1593 		rh = nxge->tx_ring_handles;
1594 		offset = nxge->pt_config.hw_config.tdc.start;
1595 		break;
1596 	}
1597 
1598 	/*
1599 	 * In version 1.0, we may only give a VR 2 RDCs/TDCs.  Not only that,
1600 	 * but the HV has statically assigned the channels like so:
1601 	 * VR0: RDC0 & RDC1
1602 	 * VR1: RDC2 & RDC3, etc.
1603 	 * The TDCs are assigned in exactly the same way.
1604 	 */
1605 	if (rings != NULL) {
1606 		rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1607 		rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1608 	}
1609 	if (n_rings != NULL) {
1610 		*n_rings = 2;
1611 	}
1612 }
1613 
1614 int
1615 nxge_hio_share_add_group(mac_share_handle_t shandle,
1616     mac_group_driver_t ghandle)
1617 {
1618 	nxge_t			*nxge;
1619 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1620 	nxge_ring_group_t	*rg = (nxge_ring_group_t *)ghandle;
1621 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1622 	nxge_grp_t		*group;
1623 	int			i;
1624 
1625 	if (rg->sindex != 0) {
1626 		/* the group is already bound to a share */
1627 		return (EALREADY);
1628 	}
1629 
1630 	/*
1631 	 * If we are adding a group 0 to a share, this
1632 	 * is not correct.
1633 	 */
1634 	ASSERT(rg->gindex != 0);
1635 
1636 	nxge = rg->nxgep;
1637 	vr = shp->vrp;
1638 
1639 	switch (rg->type) {
1640 	case MAC_RING_TYPE_RX:
1641 		/*
1642 		 * Make sure that the group has the right rings associated
1643 		 * for the share. In version 1.0, we may only give a VR
1644 		 * 2 RDCs.  Not only that, but the HV has statically
1645 		 * assigned the channels like so:
1646 		 * VR0: RDC0 & RDC1
1647 		 * VR1: RDC2 & RDC3, etc.
1648 		 */
1649 		group = nxge->rx_set.group[rg->gindex];
1650 
1651 		if (group->count > 2) {
1652 			/* a share can have at most 2 rings */
1653 			return (EINVAL);
1654 		}
1655 
1656 		for (i = 0; i < NXGE_MAX_RDCS; i++) {
1657 			if (group->map & (1 << i)) {
1658 				if ((i != shp->index * 2) &&
1659 				    (i != (shp->index * 2 + 1))) {
1660 					/*
1661 					 * A group with invalid rings was
1662 					 * attempted to bind to this share
1663 					 */
1664 					return (EINVAL);
1665 				}
1666 			}
1667 		}
1668 
1669 		rg->sindex = vr->region;
1670 		vr->rdc_tbl = rg->rdctbl;
1671 		shp->rxgroup = vr->rdc_tbl;
1672 		break;
1673 
1674 	case MAC_RING_TYPE_TX:
1675 		/*
1676 		 * Make sure that the group has the right rings associated
1677 		 * for the share. In version 1.0, we may only give a VR
1678 		 * 2 TDCs.  Not only that, but the HV has statically
1679 		 * assigned the channels like so:
1680 		 * VR0: TDC0 & TDC1
1681 		 * VR1: TDC2 & TDC3, etc.
1682 		 */
1683 		group = nxge->tx_set.group[rg->gindex];
1684 
1685 		if (group->count > 2) {
1686 			/* a share can have at most 2 rings */
1687 			return (EINVAL);
1688 		}
1689 
1690 		for (i = 0; i < NXGE_MAX_TDCS; i++) {
1691 			if (group->map & (1 << i)) {
1692 				if ((i != shp->index * 2) &&
1693 				    (i != (shp->index * 2 + 1))) {
1694 					/*
1695 					 * A group with invalid rings was
1696 					 * attempted to bind to this share
1697 					 */
1698 					return (EINVAL);
1699 				}
1700 			}
1701 		}
1702 
1703 		vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1704 		    rg->gindex;
1705 		rg->sindex = vr->region;
1706 		break;
1707 	}
1708 	return (0);
1709 }
1710 
1711 int
1712 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1713     mac_group_driver_t ghandle)
1714 {
1715 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1716 	nxge_ring_group_t	*group = (nxge_ring_group_t *)ghandle;
1717 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1718 	int			rv = 0;
1719 
1720 	vr = shp->vrp;
1721 
1722 	switch (group->type) {
1723 	case MAC_RING_TYPE_RX:
1724 		group->sindex = 0;
1725 		vr->rdc_tbl = 0;
1726 		shp->rxgroup = 0;
1727 		break;
1728 
1729 	case MAC_RING_TYPE_TX:
1730 		group->sindex = 0;
1731 		vr->tdc_tbl = 0;
1732 		break;
1733 	}
1734 
1735 	return (rv);
1736 }
1737 
1738 int
1739 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1740     uint64_t *rcookie)
1741 {
1742 	nxge_t			*nxge;
1743 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1744 	nxge_hio_vr_t		*vr;
1745 	uint64_t		rmap, tmap, hv_rmap, hv_tmap;
1746 	int			rv;
1747 
1748 	nxge = shp->nxgep;
1749 	vr = (nxge_hio_vr_t *)shp->vrp;
1750 
1751 	/*
1752 	 * Add resources to the share.
1753 	 * For each DMA channel associated with the VR, bind its resources
1754 	 * to the VR.
1755 	 */
1756 	tmap = 0;
1757 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1758 	if (rv != 0) {
1759 		return (rv);
1760 	}
1761 
1762 	rmap = 0;
1763 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1764 	if (rv != 0) {
1765 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1766 		return (rv);
1767 	}
1768 
1769 	/*
1770 	 * Ask the Hypervisor to set up the VR and allocate slots for
1771 	 * each rings associated with the VR.
1772 	 */
1773 	hv_tmap = hv_rmap = 0;
1774 	if ((rv = nxge_hio_share_assign(nxge, cookie,
1775 	    &hv_tmap, &hv_rmap, vr))) {
1776 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1777 		nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1778 		return (rv);
1779 	}
1780 
1781 	shp->active = B_TRUE;
1782 	shp->tmap = hv_tmap;
1783 	shp->rmap = hv_rmap;
1784 
1785 	/* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1786 	*rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1787 
1788 	return (0);
1789 }
1790 
1791 void
1792 nxge_hio_share_unbind(mac_share_handle_t shandle)
1793 {
1794 	nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1795 
1796 	/*
1797 	 * First, unassign the VR (take it back),
1798 	 * so we can enable interrupts again.
1799 	 */
1800 	nxge_hio_share_unassign(shp->vrp);
1801 
1802 	/*
1803 	 * Free Ring Resources for TX and RX
1804 	 */
1805 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1806 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1807 }
1808 
1809 
1810 /*
1811  * nxge_hio_vr_share
1812  *
1813  *	Find an unused Virtualization Region (VR).
1814  *
1815  * Arguments:
1816  * 	nxge
1817  *
1818  * Notes:
1819  *
1820  * Context:
1821  *	Service domain
1822  */
1823 nxge_hio_vr_t *
1824 nxge_hio_vr_share(
1825 	nxge_t *nxge)
1826 {
1827 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1828 	nxge_hio_vr_t *vr;
1829 
1830 	int first, limit, region;
1831 
1832 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1833 
1834 	MUTEX_ENTER(&nhd->lock);
1835 
1836 	if (nhd->vrs == 0) {
1837 		MUTEX_EXIT(&nhd->lock);
1838 		return (0);
1839 	}
1840 
1841 	/* Find an empty virtual region (VR). */
1842 	if (nxge->function_num == 0) {
1843 		// FUNC0_VIR0 'belongs' to NIU port 0.
1844 		first = FUNC0_VIR1;
1845 		limit = FUNC2_VIR0;
1846 	} else if (nxge->function_num == 1) {
1847 		// FUNC2_VIR0 'belongs' to NIU port 1.
1848 		first = FUNC2_VIR1;
1849 		limit = FUNC_VIR_MAX;
1850 	} else {
1851 		cmn_err(CE_WARN,
1852 		    "Shares not supported on function(%d) at this time.\n",
1853 		    nxge->function_num);
1854 	}
1855 
1856 	for (region = first; region < limit; region++) {
1857 		if (nhd->vr[region].nxge == 0)
1858 			break;
1859 	}
1860 
1861 	if (region == limit) {
1862 		MUTEX_EXIT(&nhd->lock);
1863 		return (0);
1864 	}
1865 
1866 	vr = &nhd->vr[region];
1867 	vr->nxge = (uintptr_t)nxge;
1868 	vr->region = (uintptr_t)region;
1869 
1870 	nhd->vrs--;
1871 
1872 	MUTEX_EXIT(&nhd->lock);
1873 
1874 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1875 
1876 	return (vr);
1877 }
1878 
1879 void
1880 nxge_hio_unshare(
1881 	nxge_hio_vr_t *vr)
1882 {
1883 	nxge_t *nxge = (nxge_t *)vr->nxge;
1884 	nxge_hio_data_t *nhd;
1885 
1886 	vr_region_t region;
1887 
1888 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1889 
1890 	if (!nxge) {
1891 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1892 		    "vr->nxge is NULL"));
1893 		return;
1894 	}
1895 
1896 	/*
1897 	 * This function is no longer called, but I will keep it
1898 	 * here in case we want to revisit this topic in the future.
1899 	 *
1900 	 * nxge_hio_hostinfo_uninit(nxge, vr);
1901 	 */
1902 
1903 	/*
1904 	 * XXX: This is done by ms_sremove?
1905 	 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1906 	 */
1907 
1908 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1909 
1910 	MUTEX_ENTER(&nhd->lock);
1911 
1912 	region = vr->region;
1913 	(void) memset(vr, 0, sizeof (*vr));
1914 	vr->region = region;
1915 
1916 	nhd->vrs++;
1917 
1918 	MUTEX_EXIT(&nhd->lock);
1919 
1920 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1921 }
1922 
1923 int
1924 nxge_hio_addres(nxge_hio_vr_t *vr, mac_ring_type_t type, uint64_t *map)
1925 {
1926 	nxge_t		*nxge = (nxge_t *)vr->nxge;
1927 	nxge_grp_t	*group;
1928 	int		groupid;
1929 	int		i, rv = 0;
1930 	int		max_dcs;
1931 
1932 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1933 
1934 	if (!nxge)
1935 		return (EINVAL);
1936 
1937 	/*
1938 	 * For each ring associated with the group, add the resources
1939 	 * to the group and bind.
1940 	 */
1941 	max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1942 	if (type == MAC_RING_TYPE_TX) {
1943 		/* set->group is an array of group indexed by a port group id */
1944 		groupid = vr->tdc_tbl -
1945 		    nxge->pt_config.hw_config.def_mac_txdma_grpid;
1946 		group = nxge->tx_set.group[groupid];
1947 	} else {
1948 		/* set->group is an array of group indexed by a port group id */
1949 		groupid = vr->rdc_tbl -
1950 		    nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1951 		group = nxge->rx_set.group[groupid];
1952 	}
1953 
1954 	if (group->map == 0) {
1955 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
1956 		    "with this VR"));
1957 		return (EINVAL);
1958 	}
1959 
1960 	for (i = 0; i < max_dcs; i++) {
1961 		if (group->map & (1 << i)) {
1962 			if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
1963 				if (*map == 0) /* Couldn't get even one DC. */
1964 					return (-rv);
1965 				else
1966 					break;
1967 			}
1968 			*map |= (1 << i);
1969 		}
1970 	}
1971 
1972 	if ((*map == 0) || (rv != 0)) {
1973 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
1974 		    "<== nxge_hio_addres: rv(%x)", rv));
1975 		return (EIO);
1976 	}
1977 
1978 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
1979 	return (0);
1980 }
1981 
1982 /* ARGSUSED */
1983 void
1984 nxge_hio_remres(
1985 	nxge_hio_vr_t *vr,
1986 	mac_ring_type_t type,
1987 	res_map_t res_map)
1988 {
1989 	nxge_t *nxge = (nxge_t *)vr->nxge;
1990 	nxge_grp_t *group;
1991 
1992 	if (!nxge) {
1993 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
1994 		    "vr->nxge is NULL"));
1995 		return;
1996 	}
1997 
1998 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
1999 
2000 	/*
2001 	 * For each ring bound to the group, remove the DMA resources
2002 	 * from the group and unbind.
2003 	 */
2004 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2005 	while (group->dc) {
2006 		nxge_hio_dc_t *dc = group->dc;
2007 		NXGE_DC_RESET(res_map, dc->page);
2008 		nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
2009 	}
2010 
2011 	if (res_map) {
2012 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2013 		    "res_map %lx", res_map));
2014 	}
2015 
2016 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
2017 }
2018 
2019 /*
2020  * nxge_hio_tdc_share
2021  *
2022  *	Share an unused TDC channel.
2023  *
2024  * Arguments:
2025  * 	nxge
2026  *
2027  * Notes:
2028  *
2029  * A.7.3 Reconfigure Tx DMA channel
2030  *	Disable TxDMA			A.9.6.10
2031  *     [Rebind TxDMA channel to Port	A.9.6.7]
2032  *
2033  * We don't have to Rebind the TDC to the port - it always already bound.
2034  *
2035  *	Soft Reset TxDMA		A.9.6.2
2036  *
2037  * This procedure will be executed by nxge_init_txdma_channel() in the
2038  * guest domain:
2039  *
2040  *	Re-initialize TxDMA		A.9.6.8
2041  *	Reconfigure TxDMA
2042  *	Enable TxDMA			A.9.6.9
2043  *
2044  * Context:
2045  *	Service domain
2046  */
2047 int
2048 nxge_hio_tdc_share(
2049 	nxge_t *nxge,
2050 	int channel)
2051 {
2052 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2053 	nxge_grp_set_t *set = &nxge->tx_set;
2054 	tx_ring_t *ring;
2055 	int count;
2056 
2057 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2058 
2059 	/*
2060 	 * Wait until this channel is idle.
2061 	 */
2062 	ring = nxge->tx_rings->rings[channel];
2063 	ASSERT(ring != NULL);
2064 
2065 	(void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2066 	if (ring->tx_ring_busy) {
2067 		/*
2068 		 * Wait for 30 seconds.
2069 		 */
2070 		for (count = 30 * 1000; count; count--) {
2071 			if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2072 				break;
2073 			}
2074 
2075 			drv_usecwait(1000);
2076 		}
2077 
2078 		if (count == 0) {
2079 			(void) atomic_swap_32(&ring->tx_ring_offline,
2080 			    NXGE_TX_RING_ONLINE);
2081 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2082 			    "nxge_hio_tdc_share: "
2083 			    "Tx ring %d was always BUSY", channel));
2084 			return (-EIO);
2085 		}
2086 	} else {
2087 		(void) atomic_swap_32(&ring->tx_ring_offline,
2088 		    NXGE_TX_RING_OFFLINED);
2089 	}
2090 
2091 	MUTEX_ENTER(&nhd->lock);
2092 	nxge->tdc_is_shared[channel] = B_TRUE;
2093 	MUTEX_EXIT(&nhd->lock);
2094 
2095 	if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2096 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2097 		    "Failed to remove interrupt for TxDMA channel %d",
2098 		    channel));
2099 		return (-EINVAL);
2100 	}
2101 
2102 	/* Disable TxDMA A.9.6.10 */
2103 	(void) nxge_txdma_channel_disable(nxge, channel);
2104 
2105 	/* The SD is sharing this channel. */
2106 	NXGE_DC_SET(set->shared.map, channel);
2107 	set->shared.count++;
2108 
2109 	/* Soft Reset TxDMA A.9.6.2 */
2110 	nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2111 
2112 	/*
2113 	 * Initialize the DC-specific FZC control registers.
2114 	 * -----------------------------------------------------
2115 	 */
2116 	if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2117 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2118 		    "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2119 		return (-EIO);
2120 	}
2121 
2122 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2123 
2124 	return (0);
2125 }
2126 
2127 /*
2128  * nxge_hio_rdc_share
2129  *
2130  *	Share an unused RDC channel.
2131  *
2132  * Arguments:
2133  * 	nxge
2134  *
2135  * Notes:
2136  *
2137  * This is the latest version of the procedure to
2138  * Reconfigure an Rx DMA channel:
2139  *
2140  * A.6.3 Reconfigure Rx DMA channel
2141  *	Stop RxMAC		A.9.2.6
2142  *	Drain IPP Port		A.9.3.6
2143  *	Stop and reset RxDMA	A.9.5.3
2144  *
2145  * This procedure will be executed by nxge_init_rxdma_channel() in the
2146  * guest domain:
2147  *
2148  *	Initialize RxDMA	A.9.5.4
2149  *	Reconfigure RxDMA
2150  *	Enable RxDMA		A.9.5.5
2151  *
2152  * We will do this here, since the RDC is a canalis non grata:
2153  *	Enable RxMAC		A.9.2.10
2154  *
2155  * Context:
2156  *	Service domain
2157  */
2158 int
2159 nxge_hio_rdc_share(
2160 	nxge_t *nxge,
2161 	nxge_hio_vr_t *vr,
2162 	int channel)
2163 {
2164 	nxge_grp_set_t *set = &nxge->rx_set;
2165 	nxge_rdc_grp_t *rdc_grp;
2166 
2167 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2168 
2169 	/* Disable interrupts. */
2170 	if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2171 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2172 		    "Failed to remove interrupt for RxDMA channel %d",
2173 		    channel));
2174 		return (NXGE_ERROR);
2175 	}
2176 
2177 	/* Stop RxMAC = A.9.2.6 */
2178 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2179 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2180 		    "Failed to disable RxMAC"));
2181 	}
2182 
2183 	/* Drain IPP Port = A.9.3.6 */
2184 	(void) nxge_ipp_drain(nxge);
2185 
2186 	/* Stop and reset RxDMA = A.9.5.3 */
2187 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2188 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2189 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2190 		    "Failed to disable RxDMA channel %d", channel));
2191 	}
2192 
2193 	/* The SD is sharing this channel. */
2194 	NXGE_DC_SET(set->shared.map, channel);
2195 	set->shared.count++;
2196 
2197 	// Assert RST: RXDMA_CFIG1[30] = 1
2198 	nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2199 
2200 	/*
2201 	 * The guest domain will reconfigure the RDC later.
2202 	 *
2203 	 * But in the meantime, we must re-enable the Rx MAC so
2204 	 * that we can start receiving packets again on the
2205 	 * remaining RDCs:
2206 	 *
2207 	 * Enable RxMAC = A.9.2.10
2208 	 */
2209 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2210 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2211 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2212 	}
2213 
2214 	/*
2215 	 * Initialize the DC-specific FZC control registers.
2216 	 * -----------------------------------------------------
2217 	 */
2218 	if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2219 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2220 		    "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2221 		return (-EIO);
2222 	}
2223 
2224 	/*
2225 	 * Update the RDC group.
2226 	 */
2227 	rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2228 	NXGE_DC_SET(rdc_grp->map, channel);
2229 
2230 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2231 
2232 	return (0);
2233 }
2234 
2235 /*
2236  * nxge_hio_dc_share
2237  *
2238  *	Share a DMA channel with a guest domain.
2239  *
2240  * Arguments:
2241  * 	nxge
2242  * 	vr	The VR that <channel> will belong to.
2243  * 	type	Tx or Rx.
2244  * 	channel	Channel to share
2245  *
2246  * Notes:
2247  *
2248  * Context:
2249  *	Service domain
2250  */
2251 int
2252 nxge_hio_dc_share(
2253 	nxge_t *nxge,
2254 	nxge_hio_vr_t *vr,
2255 	mac_ring_type_t type,
2256 	int channel)
2257 {
2258 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2259 	nxge_hio_dc_t *dc;
2260 	nxge_grp_t *group;
2261 	int slot;
2262 
2263 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2264 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2265 
2266 
2267 	/* -------------------------------------------------- */
2268 	slot = (type == MAC_RING_TYPE_TX) ?
2269 	    nxge_hio_tdc_share(nxge, channel) :
2270 	    nxge_hio_rdc_share(nxge, vr, channel);
2271 
2272 	if (slot < 0) {
2273 		if (type == MAC_RING_TYPE_RX) {
2274 			nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2275 		} else {
2276 			nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2277 		}
2278 		return (slot);
2279 	}
2280 
2281 	MUTEX_ENTER(&nhd->lock);
2282 
2283 	/*
2284 	 * Tag this channel.
2285 	 * --------------------------------------------------
2286 	 */
2287 	dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2288 
2289 	dc->vr = vr;
2290 	dc->channel = (nxge_channel_t)channel;
2291 
2292 	MUTEX_EXIT(&nhd->lock);
2293 
2294 	/*
2295 	 * vr->[t|r]x_group is used by the service domain to
2296 	 * keep track of its shared DMA channels.
2297 	 */
2298 	MUTEX_ENTER(&nxge->group_lock);
2299 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2300 
2301 	dc->group = group;
2302 	/* Initialize <group>, if necessary */
2303 	if (group->count == 0) {
2304 		group->nxge = nxge;
2305 		group->type = (type == MAC_RING_TYPE_TX) ?
2306 		    VP_BOUND_TX : VP_BOUND_RX;
2307 		group->sequence	= nhd->sequence++;
2308 		group->active = B_TRUE;
2309 	}
2310 
2311 	MUTEX_EXIT(&nxge->group_lock);
2312 
2313 	NXGE_ERROR_MSG((nxge, HIO_CTL,
2314 	    "DC share: %cDC %d was assigned to slot %d",
2315 	    type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2316 
2317 	nxge_grp_dc_append(nxge, group, dc);
2318 
2319 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2320 
2321 	return (0);
2322 }
2323 
2324 /*
2325  * nxge_hio_tdc_unshare
2326  *
2327  *	Unshare a TDC.
2328  *
2329  * Arguments:
2330  * 	nxge
2331  * 	channel	The channel to unshare (add again).
2332  *
2333  * Notes:
2334  *
2335  * Context:
2336  *	Service domain
2337  */
2338 void
2339 nxge_hio_tdc_unshare(
2340 	nxge_t *nxge,
2341 	int dev_grpid,
2342 	int channel)
2343 {
2344 	nxge_grp_set_t *set = &nxge->tx_set;
2345 	nxge_grp_t *group;
2346 	int grpid;
2347 
2348 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2349 
2350 	NXGE_DC_RESET(set->shared.map, channel);
2351 	set->shared.count--;
2352 
2353 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2354 	group = set->group[grpid];
2355 
2356 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2357 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2358 		    "Failed to initialize TxDMA channel %d", channel));
2359 		return;
2360 	}
2361 
2362 	/* Re-add this interrupt. */
2363 	if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2364 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2365 		    "Failed to add interrupt for TxDMA channel %d", channel));
2366 	}
2367 
2368 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2369 }
2370 
2371 /*
2372  * nxge_hio_rdc_unshare
2373  *
2374  *	Unshare an RDC: add it to the SD's RDC groups (tables).
2375  *
2376  * Arguments:
2377  * 	nxge
2378  * 	channel	The channel to unshare (add again).
2379  *
2380  * Notes:
2381  *
2382  * Context:
2383  *	Service domain
2384  */
2385 void
2386 nxge_hio_rdc_unshare(
2387 	nxge_t *nxge,
2388 	int dev_grpid,
2389 	int channel)
2390 {
2391 	nxge_grp_set_t		*set = &nxge->rx_set;
2392 	nxge_grp_t		*group;
2393 	int			grpid;
2394 
2395 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2396 
2397 	/* Stop RxMAC = A.9.2.6 */
2398 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2399 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2400 		    "Failed to disable RxMAC"));
2401 	}
2402 
2403 	/* Drain IPP Port = A.9.3.6 */
2404 	(void) nxge_ipp_drain(nxge);
2405 
2406 	/* Stop and reset RxDMA = A.9.5.3 */
2407 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2408 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2409 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2410 		    "Failed to disable RxDMA channel %d", channel));
2411 	}
2412 
2413 	NXGE_DC_RESET(set->shared.map, channel);
2414 	set->shared.count--;
2415 
2416 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2417 	group = set->group[grpid];
2418 
2419 	/*
2420 	 * Assert RST: RXDMA_CFIG1[30] = 1
2421 	 *
2422 	 * Initialize RxDMA	A.9.5.4
2423 	 * Reconfigure RxDMA
2424 	 * Enable RxDMA		A.9.5.5
2425 	 */
2426 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2427 		/* Be sure to re-enable the RX MAC. */
2428 		if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2429 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2430 			    "nxge_hio_rdc_share: Rx MAC still disabled"));
2431 		}
2432 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2433 		    "Failed to initialize RxDMA channel %d", channel));
2434 		return;
2435 	}
2436 
2437 	/*
2438 	 * Enable RxMAC = A.9.2.10
2439 	 */
2440 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2441 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2442 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2443 		return;
2444 	}
2445 
2446 	/* Re-add this interrupt. */
2447 	if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2448 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2449 		    "nxge_hio_rdc_unshare: Failed to add interrupt for "
2450 		    "RxDMA CHANNEL %d", channel));
2451 	}
2452 
2453 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2454 }
2455 
2456 /*
2457  * nxge_hio_dc_unshare
2458  *
2459  *	Unshare (reuse) a DMA channel.
2460  *
2461  * Arguments:
2462  * 	nxge
2463  * 	vr	The VR that <channel> belongs to.
2464  * 	type	Tx or Rx.
2465  * 	channel	The DMA channel to reuse.
2466  *
2467  * Notes:
2468  *
2469  * Context:
2470  *	Service domain
2471  */
2472 void
2473 nxge_hio_dc_unshare(
2474 	nxge_t *nxge,
2475 	nxge_hio_vr_t *vr,
2476 	mac_ring_type_t type,
2477 	int channel)
2478 {
2479 	nxge_grp_t *group;
2480 	nxge_hio_dc_t *dc;
2481 
2482 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2483 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2484 
2485 	/* Unlink the channel from its group. */
2486 	/* -------------------------------------------------- */
2487 	group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2488 	NXGE_DC_RESET(group->map, channel);
2489 	if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2490 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2491 		    "nxge_hio_dc_unshare(%d) failed", channel));
2492 		return;
2493 	}
2494 
2495 	dc->vr = 0;
2496 	dc->cookie = 0;
2497 
2498 	if (type == MAC_RING_TYPE_RX) {
2499 		nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2500 	} else {
2501 		nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2502 	}
2503 
2504 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2505 }
2506 
2507 
2508 /*
2509  * nxge_hio_rxdma_bind_intr():
2510  *
2511  *	For the guest domain driver, need to bind the interrupt group
2512  *	and state to the rx_rcr_ring_t.
2513  */
2514 
2515 int
2516 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2517 {
2518 	nxge_hio_dc_t	*dc;
2519 	nxge_ldgv_t	*control;
2520 	nxge_ldg_t	*group;
2521 	nxge_ldv_t	*device;
2522 
2523 	/*
2524 	 * Find the DMA channel.
2525 	 */
2526 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2527 		return (NXGE_ERROR);
2528 	}
2529 
2530 	/*
2531 	 * Get the control structure.
2532 	 */
2533 	control = nxge->ldgvp;
2534 	if (control == NULL) {
2535 		return (NXGE_ERROR);
2536 	}
2537 
2538 	group = &control->ldgp[dc->ldg.vector];
2539 	device = &control->ldvp[dc->ldg.ldsv];
2540 
2541 	MUTEX_ENTER(&ring->lock);
2542 	ring->ldgp = group;
2543 	ring->ldvp = device;
2544 	MUTEX_EXIT(&ring->lock);
2545 
2546 	return (NXGE_OK);
2547 }
2548 #endif	/* if defined(sun4v) */
2549