xref: /titanic_41/usr/src/uts/common/io/nxge/nxge_hio.c (revision 03494a9880d80f834bec10a1e8f0a2f8f7c97bf4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio.c
29  *
30  * This file manages the virtualization resources for Neptune
31  * devices.  That is, it implements a hybrid I/O (HIO) approach in the
32  * Solaris kernel, whereby a guest domain on an LDOMs server may
33  * request & use hardware resources from the service domain.
34  *
35  */
36 
37 #include <sys/mac_provider.h>
38 #include <sys/nxge/nxge_impl.h>
39 #include <sys/nxge/nxge_fzc.h>
40 #include <sys/nxge/nxge_rxdma.h>
41 #include <sys/nxge/nxge_txdma.h>
42 #include <sys/nxge/nxge_hio.h>
43 
44 #define	NXGE_HIO_SHARE_MIN_CHANNELS 2
45 #define	NXGE_HIO_SHARE_MAX_CHANNELS 2
46 
47 /*
48  * External prototypes
49  */
50 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
51 
52 /* The following function may be found in nxge_main.c */
53 extern int nxge_m_mmac_remove(void *arg, int slot);
54 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
55 	boolean_t usetbl);
56 
57 /* The following function may be found in nxge_[t|r]xdma.c */
58 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
59 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
60 
61 /*
62  * Local prototypes
63  */
64 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
65 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
66 static void nxge_grp_dc_map(nxge_grp_t *group);
67 
68 /*
69  * These functions are used by both service & guest domains to
70  * decide whether they're running in an LDOMs/XEN environment
71  * or not.  If so, then the Hybrid I/O (HIO) module is initialized.
72  */
73 
74 /*
75  * nxge_get_environs
76  *
77  *	Figure out if we are in a guest domain or not.
78  *
79  * Arguments:
80  * 	nxge
81  *
82  * Notes:
83  *
84  * Context:
85  *	Any domain
86  */
87 void
88 nxge_get_environs(
89 	nxge_t *nxge)
90 {
91 	char *string;
92 
93 	/*
94 	 * In the beginning, assume that we are running sans LDOMs/XEN.
95 	 */
96 	nxge->environs = SOLARIS_DOMAIN;
97 
98 	/*
99 	 * Are we a hybrid I/O (HIO) guest domain driver?
100 	 */
101 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
102 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
103 	    "niutype", &string)) == DDI_PROP_SUCCESS) {
104 		if (strcmp(string, "n2niu") == 0) {
105 			nxge->environs = SOLARIS_GUEST_DOMAIN;
106 			/* So we can allocate properly-aligned memory. */
107 			nxge->niu_type = N2_NIU;
108 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
109 			    "Hybrid IO-capable guest domain"));
110 		}
111 		ddi_prop_free(string);
112 	}
113 }
114 
115 #if !defined(sun4v)
116 
117 /*
118  * nxge_hio_init
119  *
120  *	Initialize the HIO module of the NXGE driver.
121  *
122  * Arguments:
123  * 	nxge
124  *
125  * Notes:
126  *	This is the non-hybrid I/O version of this function.
127  *
128  * Context:
129  *	Any domain
130  */
131 int
132 nxge_hio_init(nxge_t *nxge)
133 {
134 	nxge_hio_data_t *nhd;
135 	int i;
136 
137 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
138 	if (nhd == 0) {
139 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
140 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
141 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
142 	}
143 
144 	/*
145 	 * Initialize share and ring group structures.
146 	 */
147 	for (i = 0; i < NXGE_MAX_TDCS; i++)
148 		nxge->tdc_is_shared[i] = B_FALSE;
149 
150 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
151 		nxge->tx_hio_groups[i].ghandle = NULL;
152 		nxge->tx_hio_groups[i].nxgep = nxge;
153 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
154 		nxge->tx_hio_groups[i].gindex = 0;
155 		nxge->tx_hio_groups[i].sindex = 0;
156 	}
157 
158 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
159 		nxge->rx_hio_groups[i].ghandle = NULL;
160 		nxge->rx_hio_groups[i].nxgep = nxge;
161 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
162 		nxge->rx_hio_groups[i].gindex = 0;
163 		nxge->rx_hio_groups[i].sindex = 0;
164 		nxge->rx_hio_groups[i].started = B_FALSE;
165 		nxge->rx_hio_groups[i].rdctbl = -1;
166 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
167 	}
168 
169 	nhd->hio.ldoms = B_FALSE;
170 
171 	return (NXGE_OK);
172 }
173 
174 #endif
175 
176 void
177 nxge_hio_uninit(nxge_t *nxge)
178 {
179 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
180 
181 	ASSERT(nxge->nxge_hw_p->ndevs == 0);
182 
183 	if (nhd != NULL) {
184 		MUTEX_DESTROY(&nhd->lock);
185 		KMEM_FREE(nhd, sizeof (*nhd));
186 		nxge->nxge_hw_p->hio = 0;
187 	}
188 }
189 
190 /*
191  * nxge_dci_map
192  *
193  *	Map a DMA channel index to a channel number.
194  *
195  * Arguments:
196  * 	instance	The instance number of the driver.
197  * 	type		The type of channel this is: Tx or Rx.
198  * 	index		The index to convert to a channel number
199  *
200  * Notes:
201  *	This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
202  *
203  * Context:
204  *	Any domain
205  */
206 int
207 nxge_dci_map(
208 	nxge_t *nxge,
209 	vpc_type_t type,
210 	int index)
211 {
212 	nxge_grp_set_t *set;
213 	int dc;
214 
215 	switch (type) {
216 	case VP_BOUND_TX:
217 		set = &nxge->tx_set;
218 		break;
219 	case VP_BOUND_RX:
220 		set = &nxge->rx_set;
221 		break;
222 	}
223 
224 	for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
225 		if ((1 << dc) & set->owned.map) {
226 			if (index == 0)
227 				return (dc);
228 			else
229 				index--;
230 		}
231 	}
232 
233 	return (-1);
234 }
235 
236 /*
237  * ---------------------------------------------------------------------
238  * These are the general-purpose DMA channel group functions.  That is,
239  * these functions are used to manage groups of TDCs or RDCs in an HIO
240  * environment.
241  *
242  * But is also expected that in the future they will be able to manage
243  * Crossbow groups.
244  * ---------------------------------------------------------------------
245  */
246 
247 /*
248  * nxge_grp_cleanup(p_nxge_t nxge)
249  *
250  *	Remove all outstanding groups.
251  *
252  * Arguments:
253  *	nxge
254  */
255 void
256 nxge_grp_cleanup(p_nxge_t nxge)
257 {
258 	nxge_grp_set_t *set;
259 	int i;
260 
261 	MUTEX_ENTER(&nxge->group_lock);
262 
263 	/*
264 	 * Find RX groups that need to be cleaned up.
265 	 */
266 	set = &nxge->rx_set;
267 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
268 		if (set->group[i] != NULL) {
269 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
270 			set->group[i] = NULL;
271 		}
272 	}
273 
274 	/*
275 	 * Find TX groups that need to be cleaned up.
276 	 */
277 	set = &nxge->tx_set;
278 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
279 		if (set->group[i] != NULL) {
280 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
281 			set->group[i] = NULL;
282 		}
283 	}
284 	MUTEX_EXIT(&nxge->group_lock);
285 }
286 
287 
288 /*
289  * nxge_grp_add
290  *
291  *	Add a group to an instance of NXGE.
292  *
293  * Arguments:
294  * 	nxge
295  * 	type	Tx or Rx
296  *
297  * Notes:
298  *
299  * Context:
300  *	Any domain
301  */
302 nxge_grp_t *
303 nxge_grp_add(
304 	nxge_t *nxge,
305 	nxge_grp_type_t type)
306 {
307 	nxge_grp_set_t *set;
308 	nxge_grp_t *group;
309 	int i;
310 
311 	group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
312 	group->nxge = nxge;
313 
314 	MUTEX_ENTER(&nxge->group_lock);
315 	switch (type) {
316 	case NXGE_TRANSMIT_GROUP:
317 	case EXT_TRANSMIT_GROUP:
318 		set = &nxge->tx_set;
319 		break;
320 	default:
321 		set = &nxge->rx_set;
322 		break;
323 	}
324 
325 	group->type = type;
326 	group->active = B_TRUE;
327 	group->sequence = set->sequence++;
328 
329 	/* Find an empty slot for this logical group. */
330 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
331 		if (set->group[i] == 0) {
332 			group->index = i;
333 			set->group[i] = group;
334 			NXGE_DC_SET(set->lg.map, i);
335 			set->lg.count++;
336 			break;
337 		}
338 	}
339 	MUTEX_EXIT(&nxge->group_lock);
340 
341 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
342 	    "nxge_grp_add: %cgroup = %d.%d",
343 	    type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
344 	    nxge->mac.portnum, group->sequence));
345 
346 	return (group);
347 }
348 
349 void
350 nxge_grp_remove(
351 	nxge_t *nxge,
352 	nxge_grp_t *group)	/* The group to remove. */
353 {
354 	nxge_grp_set_t *set;
355 	vpc_type_t type;
356 
357 	MUTEX_ENTER(&nxge->group_lock);
358 	switch (group->type) {
359 	case NXGE_TRANSMIT_GROUP:
360 	case EXT_TRANSMIT_GROUP:
361 		set = &nxge->tx_set;
362 		break;
363 	default:
364 		set = &nxge->rx_set;
365 		break;
366 	}
367 
368 	if (set->group[group->index] != group) {
369 		MUTEX_EXIT(&nxge->group_lock);
370 		return;
371 	}
372 
373 	set->group[group->index] = 0;
374 	NXGE_DC_RESET(set->lg.map, group->index);
375 	set->lg.count--;
376 
377 	/* While inside the mutex, deactivate <group>. */
378 	group->active = B_FALSE;
379 
380 	MUTEX_EXIT(&nxge->group_lock);
381 
382 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
383 	    "nxge_grp_remove(%c.%d.%d) called",
384 	    group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
385 	    nxge->mac.portnum, group->sequence));
386 
387 	/* Now, remove any DCs which are still active. */
388 	switch (group->type) {
389 	default:
390 		type = VP_BOUND_TX;
391 		break;
392 	case NXGE_RECEIVE_GROUP:
393 	case EXT_RECEIVE_GROUP:
394 		type = VP_BOUND_RX;
395 	}
396 
397 	while (group->dc) {
398 		nxge_grp_dc_remove(nxge, type, group->dc->channel);
399 	}
400 
401 	KMEM_FREE(group, sizeof (*group));
402 }
403 
404 /*
405  * nxge_grp_dc_add
406  *
407  *	Add a DMA channel to a VR/Group.
408  *
409  * Arguments:
410  * 	nxge
411  * 	channel	The channel to add.
412  * Notes:
413  *
414  * Context:
415  *	Any domain
416  */
417 /* ARGSUSED */
418 int
419 nxge_grp_dc_add(
420 	nxge_t *nxge,
421 	nxge_grp_t *group,	/* The group to add <channel> to. */
422 	vpc_type_t type,	/* Rx or Tx */
423 	int channel)		/* A physical/logical channel number */
424 {
425 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
426 	nxge_hio_dc_t *dc;
427 	nxge_grp_set_t *set;
428 	nxge_status_t status = NXGE_OK;
429 
430 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
431 
432 	if (group == 0)
433 		return (0);
434 
435 	switch (type) {
436 	case VP_BOUND_TX:
437 		set = &nxge->tx_set;
438 		if (channel > NXGE_MAX_TDCS) {
439 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
440 			    "nxge_grp_dc_add: TDC = %d", channel));
441 			return (NXGE_ERROR);
442 		}
443 		break;
444 	case VP_BOUND_RX:
445 		set = &nxge->rx_set;
446 		if (channel > NXGE_MAX_RDCS) {
447 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
448 			    "nxge_grp_dc_add: RDC = %d", channel));
449 			return (NXGE_ERROR);
450 		}
451 		break;
452 
453 	default:
454 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
455 		    "nxge_grp_dc_add: unknown type channel(%d)", channel));
456 	}
457 
458 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
459 	    "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
460 	    type == VP_BOUND_TX ? 't' : 'r',
461 	    nxge->mac.portnum, group->sequence, group->count, channel));
462 
463 	MUTEX_ENTER(&nxge->group_lock);
464 	if (group->active != B_TRUE) {
465 		/* We may be in the process of removing this group. */
466 		MUTEX_EXIT(&nxge->group_lock);
467 		return (NXGE_ERROR);
468 	}
469 	MUTEX_EXIT(&nxge->group_lock);
470 
471 	if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
472 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
473 		    "nxge_grp_dc_add(%d): DC FIND failed", channel));
474 		return (NXGE_ERROR);
475 	}
476 
477 	MUTEX_ENTER(&nhd->lock);
478 
479 	if (dc->group) {
480 		MUTEX_EXIT(&nhd->lock);
481 		/* This channel is already in use! */
482 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
483 		    "nxge_grp_dc_add(%d): channel already in group", channel));
484 		return (NXGE_ERROR);
485 	}
486 
487 	dc->next = 0;
488 	dc->page = channel;
489 	dc->channel = (nxge_channel_t)channel;
490 
491 	dc->type = type;
492 	if (type == VP_BOUND_RX) {
493 		dc->init = nxge_init_rxdma_channel;
494 		dc->uninit = nxge_uninit_rxdma_channel;
495 	} else {
496 		dc->init = nxge_init_txdma_channel;
497 		dc->uninit = nxge_uninit_txdma_channel;
498 	}
499 
500 	dc->group = group;
501 
502 	if (isLDOMguest(nxge))
503 		(void) nxge_hio_ldsv_add(nxge, dc);
504 
505 	NXGE_DC_SET(set->owned.map, channel);
506 	set->owned.count++;
507 
508 	MUTEX_EXIT(&nhd->lock);
509 
510 	if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
511 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
512 		    "nxge_grp_dc_add(%d): channel init failed", channel));
513 		MUTEX_ENTER(&nhd->lock);
514 		(void) memset(dc, 0, sizeof (*dc));
515 		NXGE_DC_RESET(set->owned.map, channel);
516 		set->owned.count--;
517 		MUTEX_EXIT(&nhd->lock);
518 		return (NXGE_ERROR);
519 	}
520 
521 	nxge_grp_dc_append(nxge, group, dc);
522 
523 	if (type == VP_BOUND_TX) {
524 		MUTEX_ENTER(&nhd->lock);
525 		nxge->tdc_is_shared[channel] = B_FALSE;
526 		MUTEX_EXIT(&nhd->lock);
527 	}
528 
529 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
530 
531 	return ((int)status);
532 }
533 
534 void
535 nxge_grp_dc_remove(
536 	nxge_t *nxge,
537 	vpc_type_t type,
538 	int channel)
539 {
540 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
541 	nxge_hio_dc_t *dc;
542 	nxge_grp_set_t *set;
543 	nxge_grp_t *group;
544 
545 	dc_uninit_t uninit;
546 
547 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
548 
549 	if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
550 		goto nxge_grp_dc_remove_exit;
551 
552 	if ((dc->group == NULL) && (dc->next == 0) &&
553 	    (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
554 		goto nxge_grp_dc_remove_exit;
555 	}
556 
557 	group = (nxge_grp_t *)dc->group;
558 
559 	if (isLDOMguest(nxge)) {
560 		(void) nxge_hio_intr_remove(nxge, type, channel);
561 	}
562 
563 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
564 	    "DC remove: group = %d.%d.%d, %cdc %d",
565 	    nxge->mac.portnum, group->sequence, group->count,
566 	    type == VP_BOUND_TX ? 't' : 'r', dc->channel));
567 
568 	MUTEX_ENTER(&nhd->lock);
569 
570 	set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
571 
572 	/* Remove the DC from its group. */
573 	if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
574 		MUTEX_EXIT(&nhd->lock);
575 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
576 		    "nxge_grp_dc_remove(%d) failed", channel));
577 		goto nxge_grp_dc_remove_exit;
578 	}
579 
580 	uninit = dc->uninit;
581 	channel = dc->channel;
582 
583 	NXGE_DC_RESET(set->owned.map, channel);
584 	set->owned.count--;
585 
586 	(void) memset(dc, 0, sizeof (*dc));
587 
588 	MUTEX_EXIT(&nhd->lock);
589 
590 	(*uninit)(nxge, channel);
591 
592 nxge_grp_dc_remove_exit:
593 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
594 }
595 
596 nxge_hio_dc_t *
597 nxge_grp_dc_find(
598 	nxge_t *nxge,
599 	vpc_type_t type,	/* Rx or Tx */
600 	int channel)
601 {
602 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
603 	nxge_hio_dc_t *current;
604 
605 	current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
606 
607 	if (!isLDOMguest(nxge)) {
608 		return (&current[channel]);
609 	} else {
610 		/* We're in a guest domain. */
611 		int i, limit = (type == VP_BOUND_TX) ?
612 		    NXGE_MAX_TDCS : NXGE_MAX_RDCS;
613 
614 		MUTEX_ENTER(&nhd->lock);
615 		for (i = 0; i < limit; i++, current++) {
616 			if (current->channel == channel) {
617 				if (current->vr && current->vr->nxge ==
618 				    (uintptr_t)nxge) {
619 					MUTEX_EXIT(&nhd->lock);
620 					return (current);
621 				}
622 			}
623 		}
624 		MUTEX_EXIT(&nhd->lock);
625 	}
626 
627 	return (0);
628 }
629 
630 /*
631  * nxge_grp_dc_append
632  *
633  *	Append a DMA channel to a group.
634  *
635  * Arguments:
636  * 	nxge
637  * 	group	The group to append to
638  * 	dc	The DMA channel to append
639  *
640  * Notes:
641  *
642  * Context:
643  *	Any domain
644  */
645 static
646 void
647 nxge_grp_dc_append(
648 	nxge_t *nxge,
649 	nxge_grp_t *group,
650 	nxge_hio_dc_t *dc)
651 {
652 	MUTEX_ENTER(&nxge->group_lock);
653 
654 	if (group->dc == 0) {
655 		group->dc = dc;
656 	} else {
657 		nxge_hio_dc_t *current = group->dc;
658 		do {
659 			if (current->next == 0) {
660 				current->next = dc;
661 				break;
662 			}
663 			current = current->next;
664 		} while (current);
665 	}
666 
667 	NXGE_DC_SET(group->map, dc->channel);
668 
669 	nxge_grp_dc_map(group);
670 	group->count++;
671 
672 	MUTEX_EXIT(&nxge->group_lock);
673 }
674 
675 /*
676  * nxge_grp_dc_unlink
677  *
678  *	Unlink a DMA channel fromits linked list (group).
679  *
680  * Arguments:
681  * 	nxge
682  * 	group	The group (linked list) to unlink from
683  * 	dc	The DMA channel to append
684  *
685  * Notes:
686  *
687  * Context:
688  *	Any domain
689  */
690 nxge_hio_dc_t *
691 nxge_grp_dc_unlink(
692 	nxge_t *nxge,
693 	nxge_grp_t *group,
694 	int channel)
695 {
696 	nxge_hio_dc_t *current, *previous;
697 
698 	MUTEX_ENTER(&nxge->group_lock);
699 
700 	if (group == NULL) {
701 		MUTEX_EXIT(&nxge->group_lock);
702 		return (0);
703 	}
704 
705 	if ((current = group->dc) == 0) {
706 		MUTEX_EXIT(&nxge->group_lock);
707 		return (0);
708 	}
709 
710 	previous = 0;
711 	do {
712 		if (current->channel == channel) {
713 			if (previous)
714 				previous->next = current->next;
715 			else
716 				group->dc = current->next;
717 			break;
718 		}
719 		previous = current;
720 		current = current->next;
721 	} while (current);
722 
723 	if (current == 0) {
724 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
725 		    "DC unlink: DC %d not found", channel));
726 	} else {
727 		current->next = 0;
728 		current->group = 0;
729 
730 		NXGE_DC_RESET(group->map, channel);
731 		group->count--;
732 	}
733 
734 	nxge_grp_dc_map(group);
735 
736 	MUTEX_EXIT(&nxge->group_lock);
737 
738 	return (current);
739 }
740 
741 /*
742  * nxge_grp_dc_map
743  *
744  *	Map a linked list to an array of channel numbers.
745  *
746  * Arguments:
747  * 	nxge
748  * 	group	The group to remap.
749  *
750  * Notes:
751  *	It is expected that the caller will hold the correct mutex.
752  *
753  * Context:
754  *	Service domain
755  */
756 void
757 nxge_grp_dc_map(
758 	nxge_grp_t *group)
759 {
760 	nxge_channel_t *legend;
761 	nxge_hio_dc_t *dc;
762 
763 	(void) memset(group->legend, 0, sizeof (group->legend));
764 
765 	legend = group->legend;
766 	dc = group->dc;
767 	while (dc) {
768 		*legend = dc->channel;
769 		legend++;
770 		dc = dc->next;
771 	}
772 }
773 
774 /*
775  * ---------------------------------------------------------------------
776  * These are HIO debugging functions.
777  * ---------------------------------------------------------------------
778  */
779 
780 /*
781  * nxge_delay
782  *
783  *	Delay <seconds> number of seconds.
784  *
785  * Arguments:
786  * 	nxge
787  * 	group	The group to append to
788  * 	dc	The DMA channel to append
789  *
790  * Notes:
791  *	This is a developer-only function.
792  *
793  * Context:
794  *	Any domain
795  */
796 void
797 nxge_delay(
798 	int seconds)
799 {
800 	delay(drv_usectohz(seconds * 1000000));
801 }
802 
803 static dmc_reg_name_t rx_names[] = {
804 	{ "RXDMA_CFIG1",	0 },
805 	{ "RXDMA_CFIG2",	8 },
806 	{ "RBR_CFIG_A",		0x10 },
807 	{ "RBR_CFIG_B",		0x18 },
808 	{ "RBR_KICK",		0x20 },
809 	{ "RBR_STAT",		0x28 },
810 	{ "RBR_HDH",		0x30 },
811 	{ "RBR_HDL",		0x38 },
812 	{ "RCRCFIG_A",		0x40 },
813 	{ "RCRCFIG_B",		0x48 },
814 	{ "RCRSTAT_A",		0x50 },
815 	{ "RCRSTAT_B",		0x58 },
816 	{ "RCRSTAT_C",		0x60 },
817 	{ "RX_DMA_ENT_MSK",	0x68 },
818 	{ "RX_DMA_CTL_STAT",	0x70 },
819 	{ "RCR_FLSH",		0x78 },
820 	{ "RXMISC",		0x90 },
821 	{ "RX_DMA_CTL_STAT_DBG", 0x98 },
822 	{ 0, -1 }
823 };
824 
825 static dmc_reg_name_t tx_names[] = {
826 	{ "Tx_RNG_CFIG",	0 },
827 	{ "Tx_RNG_HDL",		0x10 },
828 	{ "Tx_RNG_KICK",	0x18 },
829 	{ "Tx_ENT_MASK",	0x20 },
830 	{ "Tx_CS",		0x28 },
831 	{ "TxDMA_MBH",		0x30 },
832 	{ "TxDMA_MBL",		0x38 },
833 	{ "TxDMA_PRE_ST",	0x40 },
834 	{ "Tx_RNG_ERR_LOGH",	0x48 },
835 	{ "Tx_RNG_ERR_LOGL",	0x50 },
836 	{ "TDMC_INTR_DBG",	0x60 },
837 	{ "Tx_CS_DBG",		0x68 },
838 	{ 0, -1 }
839 };
840 
841 /*
842  * nxge_xx2str
843  *
844  *	Translate a register address into a string.
845  *
846  * Arguments:
847  * 	offset	The address of the register to translate.
848  *
849  * Notes:
850  *	These are developer-only function.
851  *
852  * Context:
853  *	Any domain
854  */
855 const char *
856 nxge_rx2str(
857 	int offset)
858 {
859 	dmc_reg_name_t *reg = &rx_names[0];
860 
861 	offset &= DMA_CSR_MASK;
862 
863 	while (reg->name) {
864 		if (offset == reg->offset)
865 			return (reg->name);
866 		reg++;
867 	}
868 
869 	return (0);
870 }
871 
872 const char *
873 nxge_tx2str(
874 	int offset)
875 {
876 	dmc_reg_name_t *reg = &tx_names[0];
877 
878 	offset &= DMA_CSR_MASK;
879 
880 	while (reg->name) {
881 		if (offset == reg->offset)
882 			return (reg->name);
883 		reg++;
884 	}
885 
886 	return (0);
887 }
888 
889 /*
890  * nxge_ddi_perror
891  *
892  *	Map a DDI error number to a string.
893  *
894  * Arguments:
895  * 	ddi_error	The DDI error number to map.
896  *
897  * Notes:
898  *
899  * Context:
900  *	Any domain
901  */
902 const char *
903 nxge_ddi_perror(
904 	int ddi_error)
905 {
906 	switch (ddi_error) {
907 	case DDI_SUCCESS:
908 		return ("DDI_SUCCESS");
909 	case DDI_FAILURE:
910 		return ("DDI_FAILURE");
911 	case DDI_NOT_WELL_FORMED:
912 		return ("DDI_NOT_WELL_FORMED");
913 	case DDI_EAGAIN:
914 		return ("DDI_EAGAIN");
915 	case DDI_EINVAL:
916 		return ("DDI_EINVAL");
917 	case DDI_ENOTSUP:
918 		return ("DDI_ENOTSUP");
919 	case DDI_EPENDING:
920 		return ("DDI_EPENDING");
921 	case DDI_ENOMEM:
922 		return ("DDI_ENOMEM");
923 	case DDI_EBUSY:
924 		return ("DDI_EBUSY");
925 	case DDI_ETRANSPORT:
926 		return ("DDI_ETRANSPORT");
927 	case DDI_ECONTEXT:
928 		return ("DDI_ECONTEXT");
929 	default:
930 		return ("Unknown error");
931 	}
932 }
933 
934 /*
935  * ---------------------------------------------------------------------
936  * These are Sun4v HIO function definitions
937  * ---------------------------------------------------------------------
938  */
939 
940 #if defined(sun4v)
941 
942 /*
943  * Local prototypes
944  */
945 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
946 static void nxge_hio_unshare(nxge_hio_vr_t *);
947 
948 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
949 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
950 
951 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
952 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
953 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
954 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
955     mac_ring_type_t, int);
956 
957 /*
958  * nxge_hio_init
959  *
960  *	Initialize the HIO module of the NXGE driver.
961  *
962  * Arguments:
963  * 	nxge
964  *
965  * Notes:
966  *
967  * Context:
968  *	Any domain
969  */
970 int
971 nxge_hio_init(
972 	nxge_t *nxge)
973 {
974 	nxge_hio_data_t *nhd;
975 	int i, region;
976 
977 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
978 	if (nhd == 0) {
979 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
980 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
981 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
982 	}
983 
984 	if ((nxge->environs == SOLARIS_DOMAIN) &&
985 	    (nxge->niu_type == N2_NIU)) {
986 		if (nxge->niu_hsvc_available == B_TRUE) {
987 			hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
988 			if (niu_hsvc->hsvc_major == 1 &&
989 			    niu_hsvc->hsvc_minor == 1)
990 				nxge->environs = SOLARIS_SERVICE_DOMAIN;
991 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
992 			    "nxge_hio_init: hypervisor services "
993 			    "version %d.%d",
994 			    niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor));
995 		}
996 	}
997 
998 	/*
999 	 * Initialize share and ring group structures.
1000 	 */
1001 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1002 		nxge->tx_hio_groups[i].ghandle = NULL;
1003 		nxge->tx_hio_groups[i].nxgep = nxge;
1004 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1005 		nxge->tx_hio_groups[i].gindex = 0;
1006 		nxge->tx_hio_groups[i].sindex = 0;
1007 	}
1008 
1009 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1010 		nxge->rx_hio_groups[i].ghandle = NULL;
1011 		nxge->rx_hio_groups[i].nxgep = nxge;
1012 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1013 		nxge->rx_hio_groups[i].gindex = 0;
1014 		nxge->rx_hio_groups[i].sindex = 0;
1015 		nxge->rx_hio_groups[i].started = B_FALSE;
1016 		nxge->rx_hio_groups[i].rdctbl = -1;
1017 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
1018 	}
1019 
1020 	if (!isLDOMs(nxge)) {
1021 		nhd->hio.ldoms = B_FALSE;
1022 		return (NXGE_OK);
1023 	}
1024 
1025 	nhd->hio.ldoms = B_TRUE;
1026 
1027 	/*
1028 	 * Fill in what we can.
1029 	 */
1030 	for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1031 		nhd->vr[region].region = region;
1032 	}
1033 	nhd->vrs = NXGE_VR_SR_MAX - 2;
1034 
1035 	/*
1036 	 * Initialize the share stuctures.
1037 	 */
1038 	for (i = 0; i < NXGE_MAX_TDCS; i++)
1039 		nxge->tdc_is_shared[i] = B_FALSE;
1040 
1041 	for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1042 		nxge->shares[i].nxgep = nxge;
1043 		nxge->shares[i].index = 0;
1044 		nxge->shares[i].vrp = NULL;
1045 		nxge->shares[i].tmap = 0;
1046 		nxge->shares[i].rmap = 0;
1047 		nxge->shares[i].rxgroup = 0;
1048 		nxge->shares[i].active = B_FALSE;
1049 	}
1050 
1051 	/* Fill in the HV HIO function pointers. */
1052 	nxge_hio_hv_init(nxge);
1053 
1054 	if (isLDOMservice(nxge)) {
1055 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
1056 		    "Hybrid IO-capable service domain"));
1057 		return (NXGE_OK);
1058 	} else {
1059 		/*
1060 		 * isLDOMguest(nxge) == B_TRUE
1061 		 */
1062 		nx_vio_fp_t *vio;
1063 		nhd->type = NXGE_HIO_TYPE_GUEST;
1064 
1065 		vio = &nhd->hio.vio;
1066 		vio->__register = (vio_net_resource_reg_t)
1067 		    modgetsymvalue("vio_net_resource_reg", 0);
1068 		vio->unregister = (vio_net_resource_unreg_t)
1069 		    modgetsymvalue("vio_net_resource_unreg", 0);
1070 
1071 		if (vio->__register == 0 || vio->unregister == 0) {
1072 			NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!"));
1073 			return (NXGE_ERROR);
1074 		}
1075 	}
1076 
1077 	return (0);
1078 }
1079 #endif /* defined(sun4v) */
1080 
1081 static int
1082 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1083     const uint8_t *macaddr)
1084 {
1085 	int rv;
1086 	nxge_rdc_grp_t *group;
1087 
1088 	mutex_enter(nxge->genlock);
1089 
1090 	/*
1091 	 * Initialize the NXGE RDC table data structure.
1092 	 */
1093 	group = &nxge->pt_config.rdc_grps[g->rdctbl];
1094 	if (!group->flag) {
1095 		group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1096 		group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1097 		group->flag = B_TRUE;	/* This group has been configured. */
1098 	}
1099 
1100 	mutex_exit(nxge->genlock);
1101 
1102 	/*
1103 	 * Add the MAC address.
1104 	 */
1105 	if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1106 	    g->rdctbl, B_TRUE)) != 0) {
1107 		return (rv);
1108 	}
1109 
1110 	mutex_enter(nxge->genlock);
1111 	g->n_mac_addrs++;
1112 	mutex_exit(nxge->genlock);
1113 	return (0);
1114 }
1115 
1116 static int
1117 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1118 {
1119 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1120 	p_nxge_t nxge = group->nxgep;
1121 	int rv;
1122 	nxge_hio_vr_t *vr;	/* The Virtualization Region */
1123 
1124 	ASSERT(group->type == MAC_RING_TYPE_RX);
1125 
1126 	mutex_enter(nxge->genlock);
1127 
1128 	/*
1129 	 * If the group is associated with a VR, then only one
1130 	 * address may be assigned to the group.
1131 	 */
1132 	vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1133 	if ((vr != NULL) && (group->n_mac_addrs)) {
1134 		mutex_exit(nxge->genlock);
1135 		return (ENOSPC);
1136 	}
1137 
1138 	mutex_exit(nxge->genlock);
1139 
1140 	/*
1141 	 * Program the mac address for the group.
1142 	 */
1143 	if ((rv = nxge_hio_group_mac_add(nxge, group,
1144 	    mac_addr)) != 0) {
1145 		return (rv);
1146 	}
1147 
1148 	return (0);
1149 }
1150 
1151 static int
1152 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1153 {
1154 	int i;
1155 	for (i = 0; i <= mmac_info->num_mmac; i++) {
1156 		if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1157 		    ETHERADDRL) == 0) {
1158 			return (i);
1159 		}
1160 	}
1161 	return (-1);
1162 }
1163 
1164 /* ARGSUSED */
1165 static int
1166 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1167 {
1168 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1169 	p_nxge_t nxge = group->nxgep;
1170 	nxge_mmac_t *mmac_info;
1171 	int rv, slot;
1172 
1173 	ASSERT(group->type == MAC_RING_TYPE_RX);
1174 
1175 	mutex_enter(nxge->genlock);
1176 
1177 	mmac_info = &nxge->nxge_mmac_info;
1178 	slot = find_mac_slot(mmac_info, mac_addr);
1179 	if (slot < 0) {
1180 		mutex_exit(nxge->genlock);
1181 		return (EINVAL);
1182 	}
1183 
1184 	mutex_exit(nxge->genlock);
1185 
1186 	/*
1187 	 * Remove the mac address for the group
1188 	 */
1189 	if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1190 		return (rv);
1191 	}
1192 
1193 	mutex_enter(nxge->genlock);
1194 	group->n_mac_addrs--;
1195 	mutex_exit(nxge->genlock);
1196 
1197 	return (0);
1198 }
1199 
1200 static int
1201 nxge_hio_group_start(mac_group_driver_t gdriver)
1202 {
1203 	nxge_ring_group_t	*group = (nxge_ring_group_t *)gdriver;
1204 	int			rdctbl;
1205 	int			dev_gindex;
1206 
1207 	ASSERT(group->type == MAC_RING_TYPE_RX);
1208 
1209 #ifdef later
1210 	ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1211 #endif
1212 	if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1213 		return (ENXIO);
1214 
1215 	mutex_enter(group->nxgep->genlock);
1216 	dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1217 	    group->gindex;
1218 
1219 	/*
1220 	 * Get an rdc table for this group.
1221 	 * Group ID is given by the caller, and that's the group it needs
1222 	 * to bind to.  The default group is already bound when the driver
1223 	 * was attached.
1224 	 *
1225 	 * For Group 0, it's RDC table was allocated at attach time
1226 	 * no need to allocate a new table.
1227 	 */
1228 	if (group->gindex != 0) {
1229 		rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1230 		    dev_gindex, B_TRUE);
1231 		if (rdctbl < 0) {
1232 			mutex_exit(group->nxgep->genlock);
1233 			return (rdctbl);
1234 		}
1235 	} else {
1236 		rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1237 	}
1238 
1239 	group->rdctbl = rdctbl;
1240 
1241 	(void) nxge_init_fzc_rdc_tbl(group->nxgep, rdctbl);
1242 
1243 	group->started = B_TRUE;
1244 	mutex_exit(group->nxgep->genlock);
1245 
1246 	return (0);
1247 }
1248 
1249 static void
1250 nxge_hio_group_stop(mac_group_driver_t gdriver)
1251 {
1252 	nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1253 
1254 	ASSERT(group->type == MAC_RING_TYPE_RX);
1255 
1256 	mutex_enter(group->nxgep->genlock);
1257 	group->started = B_FALSE;
1258 
1259 	/*
1260 	 * Unbind the RDC table previously bound for this group.
1261 	 *
1262 	 * Since RDC table for group 0 was allocated at attach
1263 	 * time, no need to unbind the table here.
1264 	 */
1265 	if (group->gindex != 0)
1266 		(void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1267 
1268 	mutex_exit(group->nxgep->genlock);
1269 }
1270 
1271 /* ARGSUSED */
1272 void
1273 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1274 	mac_group_info_t *infop, mac_group_handle_t ghdl)
1275 {
1276 	p_nxge_t		nxgep = (p_nxge_t)arg;
1277 	nxge_ring_group_t	*group;
1278 	int			dev_gindex;
1279 
1280 	switch (type) {
1281 	case MAC_RING_TYPE_RX:
1282 		group = &nxgep->rx_hio_groups[groupid];
1283 		group->nxgep = nxgep;
1284 		group->ghandle = ghdl;
1285 		group->gindex = groupid;
1286 		group->sindex = 0;	/* not yet bound to a share */
1287 
1288 		dev_gindex = nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1289 		    groupid;
1290 
1291 		infop->mgi_driver = (mac_group_driver_t)group;
1292 		infop->mgi_start = nxge_hio_group_start;
1293 		infop->mgi_stop = nxge_hio_group_stop;
1294 		infop->mgi_addmac = nxge_hio_add_mac;
1295 		infop->mgi_remmac = nxge_hio_rem_mac;
1296 		infop->mgi_count =
1297 		    nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1298 		break;
1299 
1300 	case MAC_RING_TYPE_TX:
1301 		/*
1302 		 * 'groupid' for TX should be incremented by one since
1303 		 * the default group (groupid 0) is not known by the MAC layer
1304 		 */
1305 		group = &nxgep->tx_hio_groups[groupid + 1];
1306 		group->nxgep = nxgep;
1307 		group->ghandle = ghdl;
1308 		group->gindex = groupid + 1;
1309 		group->sindex = 0;	/* not yet bound to a share */
1310 
1311 		infop->mgi_driver = (mac_group_driver_t)group;
1312 		infop->mgi_start = NULL;
1313 		infop->mgi_stop = NULL;
1314 		infop->mgi_addmac = NULL;	/* not needed */
1315 		infop->mgi_remmac = NULL;	/* not needed */
1316 		/* no rings associated with group initially */
1317 		infop->mgi_count = 0;
1318 		break;
1319 	}
1320 }
1321 
1322 #if defined(sun4v)
1323 
1324 int
1325 nxge_hio_share_assign(
1326 	nxge_t *nxge,
1327 	uint64_t cookie,
1328 	res_map_t *tmap,
1329 	res_map_t *rmap,
1330 	nxge_hio_vr_t *vr)
1331 {
1332 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1333 	uint64_t slot, hv_rv;
1334 	nxge_hio_dc_t *dc;
1335 	nxhv_vr_fp_t *fp;
1336 	int i;
1337 
1338 	/*
1339 	 * Ask the Hypervisor to set up the VR for us
1340 	 */
1341 	fp = &nhd->hio.vr;
1342 	if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1343 		NXGE_ERROR_MSG((nxge, HIO_CTL,
1344 		    "nxge_hio_share_assign: "
1345 		    "vr->assign() returned %d", hv_rv));
1346 		return (-EIO);
1347 	}
1348 
1349 	/*
1350 	 * For each shared TDC, ask the HV to find us an empty slot.
1351 	 * -----------------------------------------------------
1352 	 */
1353 	dc = vr->tx_group.dc;
1354 	for (i = 0; i < NXGE_MAX_TDCS; i++) {
1355 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1356 		while (dc) {
1357 			hv_rv = (*tx->assign)
1358 			    (vr->cookie, dc->channel, &slot);
1359 			if (hv_rv != 0) {
1360 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1361 				    "nxge_hio_share_assign: "
1362 				    "tx->assign(%x, %d) failed: %ld",
1363 				    vr->cookie, dc->channel, hv_rv));
1364 				return (-EIO);
1365 			}
1366 
1367 			dc->cookie = vr->cookie;
1368 			dc->page = (vp_channel_t)slot;
1369 
1370 			/* Inform the caller about the slot chosen. */
1371 			(*tmap) |= 1 << slot;
1372 
1373 			dc = dc->next;
1374 		}
1375 	}
1376 
1377 	/*
1378 	 * For each shared RDC, ask the HV to find us an empty slot.
1379 	 * -----------------------------------------------------
1380 	 */
1381 	dc = vr->rx_group.dc;
1382 	for (i = 0; i < NXGE_MAX_RDCS; i++) {
1383 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1384 		while (dc) {
1385 			hv_rv = (*rx->assign)
1386 			    (vr->cookie, dc->channel, &slot);
1387 			if (hv_rv != 0) {
1388 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1389 				    "nxge_hio_share_assign: "
1390 				    "rx->assign(%x, %d) failed: %ld",
1391 				    vr->cookie, dc->channel, hv_rv));
1392 				return (-EIO);
1393 			}
1394 
1395 			dc->cookie = vr->cookie;
1396 			dc->page = (vp_channel_t)slot;
1397 
1398 			/* Inform the caller about the slot chosen. */
1399 			(*rmap) |= 1 << slot;
1400 
1401 			dc = dc->next;
1402 		}
1403 	}
1404 
1405 	return (0);
1406 }
1407 
1408 void
1409 nxge_hio_share_unassign(
1410 	nxge_hio_vr_t *vr)
1411 {
1412 	nxge_t *nxge = (nxge_t *)vr->nxge;
1413 	nxge_hio_data_t *nhd;
1414 	nxge_hio_dc_t *dc;
1415 	nxhv_vr_fp_t *fp;
1416 	uint64_t hv_rv;
1417 
1418 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1419 
1420 	dc = vr->tx_group.dc;
1421 	while (dc) {
1422 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1423 		hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1424 		if (hv_rv != 0) {
1425 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1426 			    "nxge_hio_share_unassign: "
1427 			    "tx->unassign(%x, %d) failed: %ld",
1428 			    vr->cookie, dc->page, hv_rv));
1429 		}
1430 		dc = dc->next;
1431 	}
1432 
1433 	dc = vr->rx_group.dc;
1434 	while (dc) {
1435 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1436 		hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1437 		if (hv_rv != 0) {
1438 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1439 			    "nxge_hio_share_unassign: "
1440 			    "rx->unassign(%x, %d) failed: %ld",
1441 			    vr->cookie, dc->page, hv_rv));
1442 		}
1443 		dc = dc->next;
1444 	}
1445 
1446 	fp = &nhd->hio.vr;
1447 	if (fp->unassign) {
1448 		hv_rv = (*fp->unassign)(vr->cookie);
1449 		if (hv_rv != 0) {
1450 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1451 			    "nxge_hio_share_unassign: "
1452 			    "vr->assign(%x) failed: %ld",
1453 			    vr->cookie, hv_rv));
1454 		}
1455 	}
1456 }
1457 
1458 int
1459 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1460 {
1461 	p_nxge_t		nxge = (p_nxge_t)arg;
1462 	nxge_share_handle_t	*shp;
1463 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1464 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1465 
1466 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1467 
1468 	if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1469 	    nhd->hio.rx.assign == 0) {
1470 		NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1471 		return (EIO);
1472 	}
1473 
1474 	/*
1475 	 * Get a VR.
1476 	 */
1477 	if ((vr = nxge_hio_vr_share(nxge)) == 0)
1478 		return (EAGAIN);
1479 
1480 	shp = &nxge->shares[vr->region];
1481 	shp->nxgep = nxge;
1482 	shp->index = vr->region;
1483 	shp->vrp = (void *)vr;
1484 	shp->tmap = shp->rmap = 0;	/* to be assigned by ms_sbind */
1485 	shp->rxgroup = 0;		/* to be assigned by ms_sadd */
1486 	shp->active = B_FALSE;		/* not bound yet */
1487 
1488 	*shandle = (mac_share_handle_t)shp;
1489 
1490 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1491 	return (0);
1492 }
1493 
1494 
1495 void
1496 nxge_hio_share_free(mac_share_handle_t shandle)
1497 {
1498 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1499 	nxge_hio_vr_t		*vr;
1500 
1501 	/*
1502 	 * Clear internal handle state.
1503 	 */
1504 	vr = shp->vrp;
1505 	shp->vrp = (void *)NULL;
1506 	shp->index = 0;
1507 	shp->tmap = 0;
1508 	shp->rmap = 0;
1509 	shp->rxgroup = 0;
1510 	shp->active = B_FALSE;
1511 
1512 	/*
1513 	 * Free VR resource.
1514 	 */
1515 	nxge_hio_unshare(vr);
1516 }
1517 
1518 
1519 void
1520 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1521     mac_ring_handle_t *rings, uint_t *n_rings)
1522 {
1523 	nxge_t			*nxge;
1524 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1525 	nxge_ring_handle_t	*rh;
1526 	uint32_t		offset;
1527 
1528 	nxge = shp->nxgep;
1529 
1530 	switch (type) {
1531 	case MAC_RING_TYPE_RX:
1532 		rh = nxge->rx_ring_handles;
1533 		offset = nxge->pt_config.hw_config.start_rdc;
1534 		break;
1535 
1536 	case MAC_RING_TYPE_TX:
1537 		rh = nxge->tx_ring_handles;
1538 		offset = nxge->pt_config.hw_config.tdc.start;
1539 		break;
1540 	}
1541 
1542 	/*
1543 	 * In version 1.0, we may only give a VR 2 RDCs/TDCs.  Not only that,
1544 	 * but the HV has statically assigned the channels like so:
1545 	 * VR0: RDC0 & RDC1
1546 	 * VR1: RDC2 & RDC3, etc.
1547 	 * The TDCs are assigned in exactly the same way.
1548 	 */
1549 	if (rings != NULL) {
1550 		rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1551 		rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1552 	}
1553 	if (n_rings != NULL) {
1554 		*n_rings = 2;
1555 	}
1556 }
1557 
1558 int
1559 nxge_hio_share_add_group(mac_share_handle_t shandle,
1560     mac_group_driver_t ghandle)
1561 {
1562 	nxge_t			*nxge;
1563 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1564 	nxge_ring_group_t	*rg = (nxge_ring_group_t *)ghandle;
1565 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1566 	nxge_grp_t		*group;
1567 	int			i;
1568 
1569 	if (rg->sindex != 0) {
1570 		/* the group is already bound to a share */
1571 		return (EALREADY);
1572 	}
1573 
1574 	/*
1575 	 * If we are adding a group 0 to a share, this
1576 	 * is not correct.
1577 	 */
1578 	ASSERT(rg->gindex != 0);
1579 
1580 	nxge = rg->nxgep;
1581 	vr = shp->vrp;
1582 
1583 	switch (rg->type) {
1584 	case MAC_RING_TYPE_RX:
1585 		/*
1586 		 * Make sure that the group has the right rings associated
1587 		 * for the share. In version 1.0, we may only give a VR
1588 		 * 2 RDCs.  Not only that, but the HV has statically
1589 		 * assigned the channels like so:
1590 		 * VR0: RDC0 & RDC1
1591 		 * VR1: RDC2 & RDC3, etc.
1592 		 */
1593 		group = nxge->rx_set.group[rg->gindex];
1594 
1595 		if (group->count > 2) {
1596 			/* a share can have at most 2 rings */
1597 			return (EINVAL);
1598 		}
1599 
1600 		for (i = 0; i < NXGE_MAX_RDCS; i++) {
1601 			if (group->map & (1 << i)) {
1602 				if ((i != shp->index * 2) &&
1603 				    (i != (shp->index * 2 + 1))) {
1604 					/*
1605 					 * A group with invalid rings was
1606 					 * attempted to bind to this share
1607 					 */
1608 					return (EINVAL);
1609 				}
1610 			}
1611 		}
1612 
1613 		rg->sindex = vr->region;
1614 		vr->rdc_tbl = rg->rdctbl;
1615 		shp->rxgroup = vr->rdc_tbl;
1616 		break;
1617 
1618 	case MAC_RING_TYPE_TX:
1619 		/*
1620 		 * Make sure that the group has the right rings associated
1621 		 * for the share. In version 1.0, we may only give a VR
1622 		 * 2 TDCs.  Not only that, but the HV has statically
1623 		 * assigned the channels like so:
1624 		 * VR0: TDC0 & TDC1
1625 		 * VR1: TDC2 & TDC3, etc.
1626 		 */
1627 		group = nxge->tx_set.group[rg->gindex];
1628 
1629 		if (group->count > 2) {
1630 			/* a share can have at most 2 rings */
1631 			return (EINVAL);
1632 		}
1633 
1634 		for (i = 0; i < NXGE_MAX_TDCS; i++) {
1635 			if (group->map & (1 << i)) {
1636 				if ((i != shp->index * 2) &&
1637 				    (i != (shp->index * 2 + 1))) {
1638 					/*
1639 					 * A group with invalid rings was
1640 					 * attempted to bind to this share
1641 					 */
1642 					return (EINVAL);
1643 				}
1644 			}
1645 		}
1646 
1647 		vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1648 		    rg->gindex;
1649 		rg->sindex = vr->region;
1650 		break;
1651 	}
1652 	return (0);
1653 }
1654 
1655 int
1656 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1657     mac_group_driver_t ghandle)
1658 {
1659 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1660 	nxge_ring_group_t	*group = (nxge_ring_group_t *)ghandle;
1661 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1662 	int			rv = 0;
1663 
1664 	vr = shp->vrp;
1665 
1666 	switch (group->type) {
1667 	case MAC_RING_TYPE_RX:
1668 		group->sindex = 0;
1669 		vr->rdc_tbl = 0;
1670 		shp->rxgroup = 0;
1671 		break;
1672 
1673 	case MAC_RING_TYPE_TX:
1674 		group->sindex = 0;
1675 		vr->tdc_tbl = 0;
1676 		break;
1677 	}
1678 
1679 	return (rv);
1680 }
1681 
1682 int
1683 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1684     uint64_t *rcookie)
1685 {
1686 	nxge_t			*nxge;
1687 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1688 	nxge_hio_vr_t		*vr;
1689 	uint64_t		rmap, tmap, hv_rmap, hv_tmap;
1690 	int			rv;
1691 
1692 	nxge = shp->nxgep;
1693 	vr = (nxge_hio_vr_t *)shp->vrp;
1694 
1695 	/*
1696 	 * Add resources to the share.
1697 	 * For each DMA channel associated with the VR, bind its resources
1698 	 * to the VR.
1699 	 */
1700 	tmap = 0;
1701 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1702 	if (rv != 0) {
1703 		return (rv);
1704 	}
1705 
1706 	rmap = 0;
1707 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1708 	if (rv != 0) {
1709 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1710 		return (rv);
1711 	}
1712 
1713 	/*
1714 	 * Ask the Hypervisor to set up the VR and allocate slots for
1715 	 * each rings associated with the VR.
1716 	 */
1717 	hv_tmap = hv_rmap = 0;
1718 	if ((rv = nxge_hio_share_assign(nxge, cookie,
1719 	    &hv_tmap, &hv_rmap, vr))) {
1720 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1721 		nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1722 		return (rv);
1723 	}
1724 
1725 	shp->active = B_TRUE;
1726 	shp->tmap = hv_tmap;
1727 	shp->rmap = hv_rmap;
1728 
1729 	/* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1730 	*rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1731 
1732 	return (0);
1733 }
1734 
1735 void
1736 nxge_hio_share_unbind(mac_share_handle_t shandle)
1737 {
1738 	nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1739 
1740 	/*
1741 	 * First, unassign the VR (take it back),
1742 	 * so we can enable interrupts again.
1743 	 */
1744 	nxge_hio_share_unassign(shp->vrp);
1745 
1746 	/*
1747 	 * Free Ring Resources for TX and RX
1748 	 */
1749 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1750 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1751 }
1752 
1753 
1754 /*
1755  * nxge_hio_vr_share
1756  *
1757  *	Find an unused Virtualization Region (VR).
1758  *
1759  * Arguments:
1760  * 	nxge
1761  *
1762  * Notes:
1763  *
1764  * Context:
1765  *	Service domain
1766  */
1767 nxge_hio_vr_t *
1768 nxge_hio_vr_share(
1769 	nxge_t *nxge)
1770 {
1771 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1772 	nxge_hio_vr_t *vr;
1773 
1774 	int first, limit, region;
1775 
1776 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1777 
1778 	MUTEX_ENTER(&nhd->lock);
1779 
1780 	if (nhd->vrs == 0) {
1781 		MUTEX_EXIT(&nhd->lock);
1782 		return (0);
1783 	}
1784 
1785 	/* Find an empty virtual region (VR). */
1786 	if (nxge->function_num == 0) {
1787 		// FUNC0_VIR0 'belongs' to NIU port 0.
1788 		first = FUNC0_VIR1;
1789 		limit = FUNC2_VIR0;
1790 	} else if (nxge->function_num == 1) {
1791 		// FUNC2_VIR0 'belongs' to NIU port 1.
1792 		first = FUNC2_VIR1;
1793 		limit = FUNC_VIR_MAX;
1794 	} else {
1795 		cmn_err(CE_WARN,
1796 		    "Shares not supported on function(%d) at this time.\n",
1797 		    nxge->function_num);
1798 	}
1799 
1800 	for (region = first; region < limit; region++) {
1801 		if (nhd->vr[region].nxge == 0)
1802 			break;
1803 	}
1804 
1805 	if (region == limit) {
1806 		MUTEX_EXIT(&nhd->lock);
1807 		return (0);
1808 	}
1809 
1810 	vr = &nhd->vr[region];
1811 	vr->nxge = (uintptr_t)nxge;
1812 	vr->region = (uintptr_t)region;
1813 
1814 	nhd->vrs--;
1815 
1816 	MUTEX_EXIT(&nhd->lock);
1817 
1818 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1819 
1820 	return (vr);
1821 }
1822 
1823 void
1824 nxge_hio_unshare(
1825 	nxge_hio_vr_t *vr)
1826 {
1827 	nxge_t *nxge = (nxge_t *)vr->nxge;
1828 	nxge_hio_data_t *nhd;
1829 
1830 	vr_region_t region;
1831 
1832 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1833 
1834 	if (!nxge) {
1835 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1836 		    "vr->nxge is NULL"));
1837 		return;
1838 	}
1839 
1840 	/*
1841 	 * This function is no longer called, but I will keep it
1842 	 * here in case we want to revisit this topic in the future.
1843 	 *
1844 	 * nxge_hio_hostinfo_uninit(nxge, vr);
1845 	 */
1846 
1847 	/*
1848 	 * XXX: This is done by ms_sremove?
1849 	 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1850 	 */
1851 
1852 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1853 
1854 	MUTEX_ENTER(&nhd->lock);
1855 
1856 	region = vr->region;
1857 	(void) memset(vr, 0, sizeof (*vr));
1858 	vr->region = region;
1859 
1860 	nhd->vrs++;
1861 
1862 	MUTEX_EXIT(&nhd->lock);
1863 
1864 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1865 }
1866 
1867 int
1868 nxge_hio_addres(
1869 	nxge_hio_vr_t *vr,
1870 	mac_ring_type_t type,
1871 	uint64_t *map)
1872 {
1873 	nxge_t		*nxge = (nxge_t *)vr->nxge;
1874 	nxge_grp_t	*group;
1875 	int		groupid;
1876 	int		i;
1877 	int		max_dcs;
1878 
1879 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1880 
1881 	if (!nxge)
1882 		return (EINVAL);
1883 
1884 	/*
1885 	 * For each ring associated with the group, add the resources
1886 	 * to the group and bind.
1887 	 */
1888 	max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1889 	if (type == MAC_RING_TYPE_TX) {
1890 		/* set->group is an array of group indexed by a port group id */
1891 		groupid = vr->tdc_tbl -
1892 		    nxge->pt_config.hw_config.def_mac_txdma_grpid;
1893 		group = nxge->tx_set.group[groupid];
1894 	} else {
1895 		/* set->group is an array of group indexed by a port group id */
1896 		groupid = vr->rdc_tbl -
1897 		    nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1898 		group = nxge->rx_set.group[groupid];
1899 	}
1900 
1901 	if (group->map == 0) {
1902 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
1903 		    "with this VR"));
1904 		return (EINVAL);
1905 	}
1906 
1907 	for (i = 0; i < max_dcs; i++) {
1908 		if (group->map & (1 << i)) {
1909 			int rv;
1910 
1911 			if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
1912 				if (*map == 0) /* Couldn't get even one DC. */
1913 					return (-rv);
1914 				else
1915 					break;
1916 			}
1917 			*map |= (1 << i);
1918 		}
1919 	}
1920 
1921 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
1922 
1923 	return (0);
1924 }
1925 
1926 /* ARGSUSED */
1927 void
1928 nxge_hio_remres(
1929 	nxge_hio_vr_t *vr,
1930 	mac_ring_type_t type,
1931 	res_map_t res_map)
1932 {
1933 	nxge_t *nxge = (nxge_t *)vr->nxge;
1934 	nxge_grp_t *group;
1935 
1936 	if (!nxge) {
1937 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
1938 		    "vr->nxge is NULL"));
1939 		return;
1940 	}
1941 
1942 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
1943 
1944 	/*
1945 	 * For each ring bound to the group, remove the DMA resources
1946 	 * from the group and unbind.
1947 	 */
1948 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
1949 	while (group->dc) {
1950 		nxge_hio_dc_t *dc = group->dc;
1951 		NXGE_DC_RESET(res_map, dc->page);
1952 		nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
1953 	}
1954 
1955 	if (res_map) {
1956 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
1957 		    "res_map %lx", res_map));
1958 	}
1959 
1960 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
1961 }
1962 
1963 /*
1964  * nxge_hio_tdc_share
1965  *
1966  *	Share an unused TDC channel.
1967  *
1968  * Arguments:
1969  * 	nxge
1970  *
1971  * Notes:
1972  *
1973  * A.7.3 Reconfigure Tx DMA channel
1974  *	Disable TxDMA			A.9.6.10
1975  *     [Rebind TxDMA channel to Port	A.9.6.7]
1976  *
1977  * We don't have to Rebind the TDC to the port - it always already bound.
1978  *
1979  *	Soft Reset TxDMA		A.9.6.2
1980  *
1981  * This procedure will be executed by nxge_init_txdma_channel() in the
1982  * guest domain:
1983  *
1984  *	Re-initialize TxDMA		A.9.6.8
1985  *	Reconfigure TxDMA
1986  *	Enable TxDMA			A.9.6.9
1987  *
1988  * Context:
1989  *	Service domain
1990  */
1991 int
1992 nxge_hio_tdc_share(
1993 	nxge_t *nxge,
1994 	int channel)
1995 {
1996 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1997 	nxge_grp_set_t *set = &nxge->tx_set;
1998 	tx_ring_t *ring;
1999 	int count;
2000 
2001 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2002 
2003 	/*
2004 	 * Wait until this channel is idle.
2005 	 */
2006 	ring = nxge->tx_rings->rings[channel];
2007 
2008 	(void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2009 	if (ring->tx_ring_busy) {
2010 		/*
2011 		 * Wait for 30 seconds.
2012 		 */
2013 		for (count = 30 * 1000; count; count--) {
2014 			if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2015 				break;
2016 			}
2017 
2018 			drv_usecwait(1000);
2019 		}
2020 
2021 		if (count == 0) {
2022 			(void) atomic_swap_32(&ring->tx_ring_offline,
2023 			    NXGE_TX_RING_ONLINE);
2024 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2025 			    "nxge_hio_tdc_share: "
2026 			    "Tx ring %d was always BUSY", channel));
2027 			return (-EIO);
2028 		}
2029 	} else {
2030 		(void) atomic_swap_32(&ring->tx_ring_offline,
2031 		    NXGE_TX_RING_OFFLINED);
2032 	}
2033 
2034 	MUTEX_ENTER(&nhd->lock);
2035 	nxge->tdc_is_shared[channel] = B_TRUE;
2036 	MUTEX_EXIT(&nhd->lock);
2037 
2038 	if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2039 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2040 		    "Failed to remove interrupt for TxDMA channel %d",
2041 		    channel));
2042 		return (-EINVAL);
2043 	}
2044 
2045 	/* Disable TxDMA A.9.6.10 */
2046 	(void) nxge_txdma_channel_disable(nxge, channel);
2047 
2048 	/* The SD is sharing this channel. */
2049 	NXGE_DC_SET(set->shared.map, channel);
2050 	set->shared.count++;
2051 
2052 	/* Soft Reset TxDMA A.9.6.2 */
2053 	nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2054 
2055 	/*
2056 	 * Initialize the DC-specific FZC control registers.
2057 	 * -----------------------------------------------------
2058 	 */
2059 	if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2060 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2061 		    "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2062 		return (-EIO);
2063 	}
2064 
2065 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2066 
2067 	return (0);
2068 }
2069 
2070 /*
2071  * nxge_hio_rdc_share
2072  *
2073  *	Share an unused RDC channel.
2074  *
2075  * Arguments:
2076  * 	nxge
2077  *
2078  * Notes:
2079  *
2080  * This is the latest version of the procedure to
2081  * Reconfigure an Rx DMA channel:
2082  *
2083  * A.6.3 Reconfigure Rx DMA channel
2084  *	Stop RxMAC		A.9.2.6
2085  *	Drain IPP Port		A.9.3.6
2086  *	Stop and reset RxDMA	A.9.5.3
2087  *
2088  * This procedure will be executed by nxge_init_rxdma_channel() in the
2089  * guest domain:
2090  *
2091  *	Initialize RxDMA	A.9.5.4
2092  *	Reconfigure RxDMA
2093  *	Enable RxDMA		A.9.5.5
2094  *
2095  * We will do this here, since the RDC is a canalis non grata:
2096  *	Enable RxMAC		A.9.2.10
2097  *
2098  * Context:
2099  *	Service domain
2100  */
2101 int
2102 nxge_hio_rdc_share(
2103 	nxge_t *nxge,
2104 	nxge_hio_vr_t *vr,
2105 	int channel)
2106 {
2107 	nxge_grp_set_t *set = &nxge->rx_set;
2108 	nxge_rdc_grp_t *rdc_grp;
2109 
2110 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2111 
2112 	/* Disable interrupts. */
2113 	if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2114 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2115 		    "Failed to remove interrupt for RxDMA channel %d",
2116 		    channel));
2117 		return (NXGE_ERROR);
2118 	}
2119 
2120 	/* Stop RxMAC = A.9.2.6 */
2121 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2122 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2123 		    "Failed to disable RxMAC"));
2124 	}
2125 
2126 	/* Drain IPP Port = A.9.3.6 */
2127 	(void) nxge_ipp_drain(nxge);
2128 
2129 	/* Stop and reset RxDMA = A.9.5.3 */
2130 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2131 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2132 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2133 		    "Failed to disable RxDMA channel %d", channel));
2134 	}
2135 
2136 	/* The SD is sharing this channel. */
2137 	NXGE_DC_SET(set->shared.map, channel);
2138 	set->shared.count++;
2139 
2140 	// Assert RST: RXDMA_CFIG1[30] = 1
2141 	nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2142 
2143 	/*
2144 	 * The guest domain will reconfigure the RDC later.
2145 	 *
2146 	 * But in the meantime, we must re-enable the Rx MAC so
2147 	 * that we can start receiving packets again on the
2148 	 * remaining RDCs:
2149 	 *
2150 	 * Enable RxMAC = A.9.2.10
2151 	 */
2152 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2153 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2154 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2155 	}
2156 
2157 	/*
2158 	 * Initialize the DC-specific FZC control registers.
2159 	 * -----------------------------------------------------
2160 	 */
2161 	if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2162 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2163 		    "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2164 		return (-EIO);
2165 	}
2166 
2167 	/*
2168 	 * We have to initialize the guest's RDC table, too.
2169 	 * -----------------------------------------------------
2170 	 */
2171 	rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2172 	if (rdc_grp->max_rdcs == 0) {
2173 		rdc_grp->start_rdc = (uint8_t)channel;
2174 		rdc_grp->def_rdc = (uint8_t)channel;
2175 		rdc_grp->max_rdcs = 1;
2176 	} else {
2177 		rdc_grp->max_rdcs++;
2178 	}
2179 	NXGE_DC_SET(rdc_grp->map, channel);
2180 
2181 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2182 
2183 	return (0);
2184 }
2185 
2186 /*
2187  * nxge_hio_dc_share
2188  *
2189  *	Share a DMA channel with a guest domain.
2190  *
2191  * Arguments:
2192  * 	nxge
2193  * 	vr	The VR that <channel> will belong to.
2194  * 	type	Tx or Rx.
2195  * 	channel	Channel to share
2196  *
2197  * Notes:
2198  *
2199  * Context:
2200  *	Service domain
2201  */
2202 int
2203 nxge_hio_dc_share(
2204 	nxge_t *nxge,
2205 	nxge_hio_vr_t *vr,
2206 	mac_ring_type_t type,
2207 	int channel)
2208 {
2209 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2210 	nxge_hio_dc_t *dc;
2211 	nxge_grp_t *group;
2212 	int slot;
2213 
2214 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2215 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2216 
2217 
2218 	/* -------------------------------------------------- */
2219 	slot = (type == MAC_RING_TYPE_TX) ?
2220 	    nxge_hio_tdc_share(nxge, channel) :
2221 	    nxge_hio_rdc_share(nxge, vr, channel);
2222 
2223 	if (slot < 0) {
2224 		if (type == MAC_RING_TYPE_RX) {
2225 			nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2226 		} else {
2227 			nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2228 		}
2229 		return (slot);
2230 	}
2231 
2232 	MUTEX_ENTER(&nhd->lock);
2233 
2234 	/*
2235 	 * Tag this channel.
2236 	 * --------------------------------------------------
2237 	 */
2238 	dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2239 
2240 	dc->vr = vr;
2241 	dc->channel = (nxge_channel_t)channel;
2242 
2243 	MUTEX_EXIT(&nhd->lock);
2244 
2245 	/*
2246 	 * vr->[t|r]x_group is used by the service domain to
2247 	 * keep track of its shared DMA channels.
2248 	 */
2249 	MUTEX_ENTER(&nxge->group_lock);
2250 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2251 
2252 	dc->group = group;
2253 	/* Initialize <group>, if necessary */
2254 	if (group->count == 0) {
2255 		group->nxge = nxge;
2256 		group->type = (type == MAC_RING_TYPE_TX) ?
2257 		    VP_BOUND_TX : VP_BOUND_RX;
2258 		group->sequence	= nhd->sequence++;
2259 		group->active = B_TRUE;
2260 	}
2261 
2262 	MUTEX_EXIT(&nxge->group_lock);
2263 
2264 	NXGE_ERROR_MSG((nxge, HIO_CTL,
2265 	    "DC share: %cDC %d was assigned to slot %d",
2266 	    type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2267 
2268 	nxge_grp_dc_append(nxge, group, dc);
2269 
2270 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2271 
2272 	return (0);
2273 }
2274 
2275 /*
2276  * nxge_hio_tdc_unshare
2277  *
2278  *	Unshare a TDC.
2279  *
2280  * Arguments:
2281  * 	nxge
2282  * 	channel	The channel to unshare (add again).
2283  *
2284  * Notes:
2285  *
2286  * Context:
2287  *	Service domain
2288  */
2289 void
2290 nxge_hio_tdc_unshare(
2291 	nxge_t *nxge,
2292 	int dev_grpid,
2293 	int channel)
2294 {
2295 	nxge_grp_set_t *set = &nxge->tx_set;
2296 	nxge_grp_t *group;
2297 	int grpid;
2298 
2299 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2300 
2301 	NXGE_DC_RESET(set->shared.map, channel);
2302 	set->shared.count--;
2303 
2304 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2305 	group = set->group[grpid];
2306 
2307 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2308 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2309 		    "Failed to initialize TxDMA channel %d", channel));
2310 		return;
2311 	}
2312 
2313 	/* Re-add this interrupt. */
2314 	if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2315 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2316 		    "Failed to add interrupt for TxDMA channel %d", channel));
2317 	}
2318 
2319 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2320 }
2321 
2322 /*
2323  * nxge_hio_rdc_unshare
2324  *
2325  *	Unshare an RDC: add it to the SD's RDC groups (tables).
2326  *
2327  * Arguments:
2328  * 	nxge
2329  * 	channel	The channel to unshare (add again).
2330  *
2331  * Notes:
2332  *
2333  * Context:
2334  *	Service domain
2335  */
2336 void
2337 nxge_hio_rdc_unshare(
2338 	nxge_t *nxge,
2339 	int dev_grpid,
2340 	int channel)
2341 {
2342 	nxge_grp_set_t		*set = &nxge->rx_set;
2343 	nxge_grp_t		*group;
2344 	int			grpid;
2345 
2346 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2347 
2348 	/* Stop RxMAC = A.9.2.6 */
2349 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2350 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2351 		    "Failed to disable RxMAC"));
2352 	}
2353 
2354 	/* Drain IPP Port = A.9.3.6 */
2355 	(void) nxge_ipp_drain(nxge);
2356 
2357 	/* Stop and reset RxDMA = A.9.5.3 */
2358 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2359 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2360 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2361 		    "Failed to disable RxDMA channel %d", channel));
2362 	}
2363 
2364 	NXGE_DC_RESET(set->shared.map, channel);
2365 	set->shared.count--;
2366 
2367 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2368 	group = set->group[grpid];
2369 
2370 	/*
2371 	 * Assert RST: RXDMA_CFIG1[30] = 1
2372 	 *
2373 	 * Initialize RxDMA	A.9.5.4
2374 	 * Reconfigure RxDMA
2375 	 * Enable RxDMA		A.9.5.5
2376 	 */
2377 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2378 		/* Be sure to re-enable the RX MAC. */
2379 		if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2380 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2381 			    "nxge_hio_rdc_share: Rx MAC still disabled"));
2382 		}
2383 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2384 		    "Failed to initialize RxDMA channel %d", channel));
2385 		return;
2386 	}
2387 
2388 	/*
2389 	 * Enable RxMAC = A.9.2.10
2390 	 */
2391 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2392 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2393 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2394 		return;
2395 	}
2396 
2397 	/* Re-add this interrupt. */
2398 	if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2399 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2400 		    "nxge_hio_rdc_unshare: Failed to add interrupt for "
2401 		    "RxDMA CHANNEL %d", channel));
2402 	}
2403 
2404 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2405 }
2406 
2407 /*
2408  * nxge_hio_dc_unshare
2409  *
2410  *	Unshare (reuse) a DMA channel.
2411  *
2412  * Arguments:
2413  * 	nxge
2414  * 	vr	The VR that <channel> belongs to.
2415  * 	type	Tx or Rx.
2416  * 	channel	The DMA channel to reuse.
2417  *
2418  * Notes:
2419  *
2420  * Context:
2421  *	Service domain
2422  */
2423 void
2424 nxge_hio_dc_unshare(
2425 	nxge_t *nxge,
2426 	nxge_hio_vr_t *vr,
2427 	mac_ring_type_t type,
2428 	int channel)
2429 {
2430 	nxge_grp_t *group;
2431 	nxge_hio_dc_t *dc;
2432 
2433 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2434 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2435 
2436 	/* Unlink the channel from its group. */
2437 	/* -------------------------------------------------- */
2438 	group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2439 	NXGE_DC_RESET(group->map, channel);
2440 	if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2441 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2442 		    "nxge_hio_dc_unshare(%d) failed", channel));
2443 		return;
2444 	}
2445 
2446 	dc->vr = 0;
2447 	dc->cookie = 0;
2448 
2449 	if (type == MAC_RING_TYPE_RX) {
2450 		nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2451 	} else {
2452 		nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2453 	}
2454 
2455 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2456 }
2457 
2458 
2459 /*
2460  * nxge_hio_rxdma_bind_intr():
2461  *
2462  *	For the guest domain driver, need to bind the interrupt group
2463  *	and state to the rx_rcr_ring_t.
2464  */
2465 
2466 int
2467 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2468 {
2469 	nxge_hio_dc_t	*dc;
2470 	nxge_ldgv_t	*control;
2471 	nxge_ldg_t	*group;
2472 	nxge_ldv_t	*device;
2473 
2474 	/*
2475 	 * Find the DMA channel.
2476 	 */
2477 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2478 		return (NXGE_ERROR);
2479 	}
2480 
2481 	/*
2482 	 * Get the control structure.
2483 	 */
2484 	control = nxge->ldgvp;
2485 	if (control == NULL) {
2486 		return (NXGE_ERROR);
2487 	}
2488 
2489 	group = &control->ldgp[dc->ldg.vector];
2490 	device = &control->ldvp[dc->ldg.ldsv];
2491 
2492 	MUTEX_ENTER(&ring->lock);
2493 	ring->ldgp = group;
2494 	ring->ldvp = device;
2495 	MUTEX_EXIT(&ring->lock);
2496 
2497 	return (NXGE_OK);
2498 }
2499 #endif	/* if defined(sun4v) */
2500