xref: /titanic_50/usr/src/uts/common/io/nxge/nxge_hio.c (revision 98e8d17584d08c481c8a827f2311c1e3e6aceabb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio.c
29  *
30  * This file manages the virtualization resources for Neptune
31  * devices.  That is, it implements a hybrid I/O (HIO) approach in the
32  * Solaris kernel, whereby a guest domain on an LDOMs server may
33  * request & use hardware resources from the service domain.
34  *
35  */
36 
37 #include <sys/mac_provider.h>
38 #include <sys/nxge/nxge_impl.h>
39 #include <sys/nxge/nxge_fzc.h>
40 #include <sys/nxge/nxge_rxdma.h>
41 #include <sys/nxge/nxge_txdma.h>
42 #include <sys/nxge/nxge_hio.h>
43 
44 /*
45  * External prototypes
46  */
47 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
48 
49 /* The following function may be found in nxge_main.c */
50 extern int nxge_m_mmac_remove(void *arg, int slot);
51 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
52 	boolean_t usetbl);
53 
54 /* The following function may be found in nxge_[t|r]xdma.c */
55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
57 
58 /*
59  * Local prototypes
60  */
61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
63 static void nxge_grp_dc_map(nxge_grp_t *group);
64 
65 /*
66  * These functions are used by both service & guest domains to
67  * decide whether they're running in an LDOMs/XEN environment
68  * or not.  If so, then the Hybrid I/O (HIO) module is initialized.
69  */
70 
71 /*
72  * nxge_get_environs
73  *
74  *	Figure out if we are in a guest domain or not.
75  *
76  * Arguments:
77  * 	nxge
78  *
79  * Notes:
80  *
81  * Context:
82  *	Any domain
83  */
84 void
85 nxge_get_environs(
86 	nxge_t *nxge)
87 {
88 	char *string;
89 
90 	/*
91 	 * In the beginning, assume that we are running sans LDOMs/XEN.
92 	 */
93 	nxge->environs = SOLARIS_DOMAIN;
94 
95 	/*
96 	 * Are we a hybrid I/O (HIO) guest domain driver?
97 	 */
98 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
99 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
100 	    "niutype", &string)) == DDI_PROP_SUCCESS) {
101 		if (strcmp(string, "n2niu") == 0) {
102 			nxge->environs = SOLARIS_GUEST_DOMAIN;
103 			/* So we can allocate properly-aligned memory. */
104 			nxge->niu_type = N2_NIU;
105 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
106 			    "Hybrid IO-capable guest domain"));
107 		}
108 		ddi_prop_free(string);
109 	}
110 }
111 
112 #if !defined(sun4v)
113 
114 /*
115  * nxge_hio_init
116  *
117  *	Initialize the HIO module of the NXGE driver.
118  *
119  * Arguments:
120  * 	nxge
121  *
122  * Notes:
123  *	This is the non-hybrid I/O version of this function.
124  *
125  * Context:
126  *	Any domain
127  */
128 int
129 nxge_hio_init(nxge_t *nxge)
130 {
131 	nxge_hio_data_t *nhd;
132 	int i;
133 
134 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
135 	if (nhd == 0) {
136 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
137 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
138 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
139 	}
140 
141 	/*
142 	 * Initialize share and ring group structures.
143 	 */
144 	for (i = 0; i < NXGE_MAX_TDCS; i++)
145 		nxge->tdc_is_shared[i] = B_FALSE;
146 
147 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
148 		nxge->tx_hio_groups[i].ghandle = NULL;
149 		nxge->tx_hio_groups[i].nxgep = nxge;
150 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
151 		nxge->tx_hio_groups[i].gindex = 0;
152 		nxge->tx_hio_groups[i].sindex = 0;
153 	}
154 
155 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
156 		nxge->rx_hio_groups[i].ghandle = NULL;
157 		nxge->rx_hio_groups[i].nxgep = nxge;
158 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
159 		nxge->rx_hio_groups[i].gindex = 0;
160 		nxge->rx_hio_groups[i].sindex = 0;
161 		nxge->rx_hio_groups[i].started = B_FALSE;
162 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
163 		nxge->rx_hio_groups[i].rdctbl = -1;
164 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
165 	}
166 
167 	nhd->hio.ldoms = B_FALSE;
168 
169 	return (NXGE_OK);
170 }
171 
172 #endif
173 
174 void
175 nxge_hio_uninit(nxge_t *nxge)
176 {
177 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
178 
179 	ASSERT(nxge->nxge_hw_p->ndevs == 0);
180 
181 	if (nhd != NULL) {
182 		MUTEX_DESTROY(&nhd->lock);
183 		KMEM_FREE(nhd, sizeof (*nhd));
184 		nxge->nxge_hw_p->hio = 0;
185 	}
186 }
187 
188 /*
189  * nxge_dci_map
190  *
191  *	Map a DMA channel index to a channel number.
192  *
193  * Arguments:
194  * 	instance	The instance number of the driver.
195  * 	type		The type of channel this is: Tx or Rx.
196  * 	index		The index to convert to a channel number
197  *
198  * Notes:
199  *	This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
200  *
201  * Context:
202  *	Any domain
203  */
204 int
205 nxge_dci_map(
206 	nxge_t *nxge,
207 	vpc_type_t type,
208 	int index)
209 {
210 	nxge_grp_set_t *set;
211 	int dc;
212 
213 	switch (type) {
214 	case VP_BOUND_TX:
215 		set = &nxge->tx_set;
216 		break;
217 	case VP_BOUND_RX:
218 		set = &nxge->rx_set;
219 		break;
220 	}
221 
222 	for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
223 		if ((1 << dc) & set->owned.map) {
224 			if (index == 0)
225 				return (dc);
226 			else
227 				index--;
228 		}
229 	}
230 
231 	return (-1);
232 }
233 
234 /*
235  * ---------------------------------------------------------------------
236  * These are the general-purpose DMA channel group functions.  That is,
237  * these functions are used to manage groups of TDCs or RDCs in an HIO
238  * environment.
239  *
240  * But is also expected that in the future they will be able to manage
241  * Crossbow groups.
242  * ---------------------------------------------------------------------
243  */
244 
245 /*
246  * nxge_grp_cleanup(p_nxge_t nxge)
247  *
248  *	Remove all outstanding groups.
249  *
250  * Arguments:
251  *	nxge
252  */
253 void
254 nxge_grp_cleanup(p_nxge_t nxge)
255 {
256 	nxge_grp_set_t *set;
257 	int i;
258 
259 	MUTEX_ENTER(&nxge->group_lock);
260 
261 	/*
262 	 * Find RX groups that need to be cleaned up.
263 	 */
264 	set = &nxge->rx_set;
265 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
266 		if (set->group[i] != NULL) {
267 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
268 			set->group[i] = NULL;
269 		}
270 	}
271 
272 	/*
273 	 * Find TX groups that need to be cleaned up.
274 	 */
275 	set = &nxge->tx_set;
276 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
277 		if (set->group[i] != NULL) {
278 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
279 			set->group[i] = NULL;
280 		}
281 	}
282 	MUTEX_EXIT(&nxge->group_lock);
283 }
284 
285 
286 /*
287  * nxge_grp_add
288  *
289  *	Add a group to an instance of NXGE.
290  *
291  * Arguments:
292  * 	nxge
293  * 	type	Tx or Rx
294  *
295  * Notes:
296  *
297  * Context:
298  *	Any domain
299  */
300 nxge_grp_t *
301 nxge_grp_add(
302 	nxge_t *nxge,
303 	nxge_grp_type_t type)
304 {
305 	nxge_grp_set_t *set;
306 	nxge_grp_t *group;
307 	int i;
308 
309 	group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
310 	group->nxge = nxge;
311 
312 	MUTEX_ENTER(&nxge->group_lock);
313 	switch (type) {
314 	case NXGE_TRANSMIT_GROUP:
315 	case EXT_TRANSMIT_GROUP:
316 		set = &nxge->tx_set;
317 		break;
318 	default:
319 		set = &nxge->rx_set;
320 		break;
321 	}
322 
323 	group->type = type;
324 	group->active = B_TRUE;
325 	group->sequence = set->sequence++;
326 
327 	/* Find an empty slot for this logical group. */
328 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
329 		if (set->group[i] == 0) {
330 			group->index = i;
331 			set->group[i] = group;
332 			NXGE_DC_SET(set->lg.map, i);
333 			set->lg.count++;
334 			break;
335 		}
336 	}
337 	MUTEX_EXIT(&nxge->group_lock);
338 
339 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
340 	    "nxge_grp_add: %cgroup = %d.%d",
341 	    type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
342 	    nxge->mac.portnum, group->sequence));
343 
344 	return (group);
345 }
346 
347 void
348 nxge_grp_remove(
349 	nxge_t *nxge,
350 	nxge_grp_t *group)	/* The group to remove. */
351 {
352 	nxge_grp_set_t *set;
353 	vpc_type_t type;
354 
355 	MUTEX_ENTER(&nxge->group_lock);
356 	switch (group->type) {
357 	case NXGE_TRANSMIT_GROUP:
358 	case EXT_TRANSMIT_GROUP:
359 		set = &nxge->tx_set;
360 		break;
361 	default:
362 		set = &nxge->rx_set;
363 		break;
364 	}
365 
366 	if (set->group[group->index] != group) {
367 		MUTEX_EXIT(&nxge->group_lock);
368 		return;
369 	}
370 
371 	set->group[group->index] = 0;
372 	NXGE_DC_RESET(set->lg.map, group->index);
373 	set->lg.count--;
374 
375 	/* While inside the mutex, deactivate <group>. */
376 	group->active = B_FALSE;
377 
378 	MUTEX_EXIT(&nxge->group_lock);
379 
380 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
381 	    "nxge_grp_remove(%c.%d.%d) called",
382 	    group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
383 	    nxge->mac.portnum, group->sequence));
384 
385 	/* Now, remove any DCs which are still active. */
386 	switch (group->type) {
387 	default:
388 		type = VP_BOUND_TX;
389 		break;
390 	case NXGE_RECEIVE_GROUP:
391 	case EXT_RECEIVE_GROUP:
392 		type = VP_BOUND_RX;
393 	}
394 
395 	while (group->dc) {
396 		nxge_grp_dc_remove(nxge, type, group->dc->channel);
397 	}
398 
399 	KMEM_FREE(group, sizeof (*group));
400 }
401 
402 /*
403  * nxge_grp_dc_add
404  *
405  *	Add a DMA channel to a VR/Group.
406  *
407  * Arguments:
408  * 	nxge
409  * 	channel	The channel to add.
410  * Notes:
411  *
412  * Context:
413  *	Any domain
414  */
415 /* ARGSUSED */
416 int
417 nxge_grp_dc_add(
418 	nxge_t *nxge,
419 	nxge_grp_t *group,	/* The group to add <channel> to. */
420 	vpc_type_t type,	/* Rx or Tx */
421 	int channel)		/* A physical/logical channel number */
422 {
423 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
424 	nxge_hio_dc_t *dc;
425 	nxge_grp_set_t *set;
426 	nxge_status_t status = NXGE_OK;
427 
428 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
429 
430 	if (group == 0)
431 		return (0);
432 
433 	switch (type) {
434 	case VP_BOUND_TX:
435 		set = &nxge->tx_set;
436 		if (channel > NXGE_MAX_TDCS) {
437 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
438 			    "nxge_grp_dc_add: TDC = %d", channel));
439 			return (NXGE_ERROR);
440 		}
441 		break;
442 	case VP_BOUND_RX:
443 		set = &nxge->rx_set;
444 		if (channel > NXGE_MAX_RDCS) {
445 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
446 			    "nxge_grp_dc_add: RDC = %d", channel));
447 			return (NXGE_ERROR);
448 		}
449 		break;
450 
451 	default:
452 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
453 		    "nxge_grp_dc_add: unknown type channel(%d)", channel));
454 	}
455 
456 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
457 	    "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
458 	    type == VP_BOUND_TX ? 't' : 'r',
459 	    nxge->mac.portnum, group->sequence, group->count, channel));
460 
461 	MUTEX_ENTER(&nxge->group_lock);
462 	if (group->active != B_TRUE) {
463 		/* We may be in the process of removing this group. */
464 		MUTEX_EXIT(&nxge->group_lock);
465 		return (NXGE_ERROR);
466 	}
467 	MUTEX_EXIT(&nxge->group_lock);
468 
469 	if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
470 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
471 		    "nxge_grp_dc_add(%d): DC FIND failed", channel));
472 		return (NXGE_ERROR);
473 	}
474 
475 	MUTEX_ENTER(&nhd->lock);
476 
477 	if (dc->group) {
478 		MUTEX_EXIT(&nhd->lock);
479 		/* This channel is already in use! */
480 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
481 		    "nxge_grp_dc_add(%d): channel already in group", channel));
482 		return (NXGE_ERROR);
483 	}
484 
485 	dc->next = 0;
486 	dc->page = channel;
487 	dc->channel = (nxge_channel_t)channel;
488 
489 	dc->type = type;
490 	if (type == VP_BOUND_RX) {
491 		dc->init = nxge_init_rxdma_channel;
492 		dc->uninit = nxge_uninit_rxdma_channel;
493 	} else {
494 		dc->init = nxge_init_txdma_channel;
495 		dc->uninit = nxge_uninit_txdma_channel;
496 	}
497 
498 	dc->group = group;
499 
500 	if (isLDOMguest(nxge))
501 		(void) nxge_hio_ldsv_add(nxge, dc);
502 
503 	NXGE_DC_SET(set->owned.map, channel);
504 	set->owned.count++;
505 
506 	MUTEX_EXIT(&nhd->lock);
507 
508 	if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
509 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
510 		    "nxge_grp_dc_add(%d): channel init failed", channel));
511 		MUTEX_ENTER(&nhd->lock);
512 		(void) memset(dc, 0, sizeof (*dc));
513 		NXGE_DC_RESET(set->owned.map, channel);
514 		set->owned.count--;
515 		MUTEX_EXIT(&nhd->lock);
516 		return (NXGE_ERROR);
517 	}
518 
519 	nxge_grp_dc_append(nxge, group, dc);
520 
521 	if (type == VP_BOUND_TX) {
522 		MUTEX_ENTER(&nhd->lock);
523 		nxge->tdc_is_shared[channel] = B_FALSE;
524 		MUTEX_EXIT(&nhd->lock);
525 	}
526 
527 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
528 
529 	return ((int)status);
530 }
531 
532 void
533 nxge_grp_dc_remove(
534 	nxge_t *nxge,
535 	vpc_type_t type,
536 	int channel)
537 {
538 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
539 	nxge_hio_dc_t *dc;
540 	nxge_grp_set_t *set;
541 	nxge_grp_t *group;
542 
543 	dc_uninit_t uninit;
544 
545 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
546 
547 	if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
548 		goto nxge_grp_dc_remove_exit;
549 
550 	if ((dc->group == NULL) && (dc->next == 0) &&
551 	    (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
552 		goto nxge_grp_dc_remove_exit;
553 	}
554 
555 	group = (nxge_grp_t *)dc->group;
556 
557 	if (isLDOMguest(nxge)) {
558 		(void) nxge_hio_intr_remove(nxge, type, channel);
559 	}
560 
561 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
562 	    "DC remove: group = %d.%d.%d, %cdc %d",
563 	    nxge->mac.portnum, group->sequence, group->count,
564 	    type == VP_BOUND_TX ? 't' : 'r', dc->channel));
565 
566 	MUTEX_ENTER(&nhd->lock);
567 
568 	set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
569 
570 	/* Remove the DC from its group. */
571 	if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
572 		MUTEX_EXIT(&nhd->lock);
573 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
574 		    "nxge_grp_dc_remove(%d) failed", channel));
575 		goto nxge_grp_dc_remove_exit;
576 	}
577 
578 	uninit = dc->uninit;
579 	channel = dc->channel;
580 
581 	NXGE_DC_RESET(set->owned.map, channel);
582 	set->owned.count--;
583 
584 	(void) memset(dc, 0, sizeof (*dc));
585 
586 	MUTEX_EXIT(&nhd->lock);
587 
588 	(*uninit)(nxge, channel);
589 
590 nxge_grp_dc_remove_exit:
591 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
592 }
593 
594 nxge_hio_dc_t *
595 nxge_grp_dc_find(
596 	nxge_t *nxge,
597 	vpc_type_t type,	/* Rx or Tx */
598 	int channel)
599 {
600 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
601 	nxge_hio_dc_t *current;
602 
603 	current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
604 
605 	if (!isLDOMguest(nxge)) {
606 		return (&current[channel]);
607 	} else {
608 		/* We're in a guest domain. */
609 		int i, limit = (type == VP_BOUND_TX) ?
610 		    NXGE_MAX_TDCS : NXGE_MAX_RDCS;
611 
612 		MUTEX_ENTER(&nhd->lock);
613 		for (i = 0; i < limit; i++, current++) {
614 			if (current->channel == channel) {
615 				if (current->vr && current->vr->nxge ==
616 				    (uintptr_t)nxge) {
617 					MUTEX_EXIT(&nhd->lock);
618 					return (current);
619 				}
620 			}
621 		}
622 		MUTEX_EXIT(&nhd->lock);
623 	}
624 
625 	return (0);
626 }
627 
628 /*
629  * nxge_grp_dc_append
630  *
631  *	Append a DMA channel to a group.
632  *
633  * Arguments:
634  * 	nxge
635  * 	group	The group to append to
636  * 	dc	The DMA channel to append
637  *
638  * Notes:
639  *
640  * Context:
641  *	Any domain
642  */
643 static
644 void
645 nxge_grp_dc_append(
646 	nxge_t *nxge,
647 	nxge_grp_t *group,
648 	nxge_hio_dc_t *dc)
649 {
650 	MUTEX_ENTER(&nxge->group_lock);
651 
652 	if (group->dc == 0) {
653 		group->dc = dc;
654 	} else {
655 		nxge_hio_dc_t *current = group->dc;
656 		do {
657 			if (current->next == 0) {
658 				current->next = dc;
659 				break;
660 			}
661 			current = current->next;
662 		} while (current);
663 	}
664 
665 	NXGE_DC_SET(group->map, dc->channel);
666 
667 	nxge_grp_dc_map(group);
668 	group->count++;
669 
670 	MUTEX_EXIT(&nxge->group_lock);
671 }
672 
673 /*
674  * nxge_grp_dc_unlink
675  *
676  *	Unlink a DMA channel fromits linked list (group).
677  *
678  * Arguments:
679  * 	nxge
680  * 	group	The group (linked list) to unlink from
681  * 	dc	The DMA channel to append
682  *
683  * Notes:
684  *
685  * Context:
686  *	Any domain
687  */
688 nxge_hio_dc_t *
689 nxge_grp_dc_unlink(
690 	nxge_t *nxge,
691 	nxge_grp_t *group,
692 	int channel)
693 {
694 	nxge_hio_dc_t *current, *previous;
695 
696 	MUTEX_ENTER(&nxge->group_lock);
697 
698 	if (group == NULL) {
699 		MUTEX_EXIT(&nxge->group_lock);
700 		return (0);
701 	}
702 
703 	if ((current = group->dc) == 0) {
704 		MUTEX_EXIT(&nxge->group_lock);
705 		return (0);
706 	}
707 
708 	previous = 0;
709 	do {
710 		if (current->channel == channel) {
711 			if (previous)
712 				previous->next = current->next;
713 			else
714 				group->dc = current->next;
715 			break;
716 		}
717 		previous = current;
718 		current = current->next;
719 	} while (current);
720 
721 	if (current == 0) {
722 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
723 		    "DC unlink: DC %d not found", channel));
724 	} else {
725 		current->next = 0;
726 		current->group = 0;
727 
728 		NXGE_DC_RESET(group->map, channel);
729 		group->count--;
730 	}
731 
732 	nxge_grp_dc_map(group);
733 
734 	MUTEX_EXIT(&nxge->group_lock);
735 
736 	return (current);
737 }
738 
739 /*
740  * nxge_grp_dc_map
741  *
742  *	Map a linked list to an array of channel numbers.
743  *
744  * Arguments:
745  * 	nxge
746  * 	group	The group to remap.
747  *
748  * Notes:
749  *	It is expected that the caller will hold the correct mutex.
750  *
751  * Context:
752  *	Service domain
753  */
754 void
755 nxge_grp_dc_map(
756 	nxge_grp_t *group)
757 {
758 	nxge_channel_t *legend;
759 	nxge_hio_dc_t *dc;
760 
761 	(void) memset(group->legend, 0, sizeof (group->legend));
762 
763 	legend = group->legend;
764 	dc = group->dc;
765 	while (dc) {
766 		*legend = dc->channel;
767 		legend++;
768 		dc = dc->next;
769 	}
770 }
771 
772 /*
773  * ---------------------------------------------------------------------
774  * These are HIO debugging functions.
775  * ---------------------------------------------------------------------
776  */
777 
778 /*
779  * nxge_delay
780  *
781  *	Delay <seconds> number of seconds.
782  *
783  * Arguments:
784  * 	nxge
785  * 	group	The group to append to
786  * 	dc	The DMA channel to append
787  *
788  * Notes:
789  *	This is a developer-only function.
790  *
791  * Context:
792  *	Any domain
793  */
794 void
795 nxge_delay(
796 	int seconds)
797 {
798 	delay(drv_usectohz(seconds * 1000000));
799 }
800 
801 static dmc_reg_name_t rx_names[] = {
802 	{ "RXDMA_CFIG1",	0 },
803 	{ "RXDMA_CFIG2",	8 },
804 	{ "RBR_CFIG_A",		0x10 },
805 	{ "RBR_CFIG_B",		0x18 },
806 	{ "RBR_KICK",		0x20 },
807 	{ "RBR_STAT",		0x28 },
808 	{ "RBR_HDH",		0x30 },
809 	{ "RBR_HDL",		0x38 },
810 	{ "RCRCFIG_A",		0x40 },
811 	{ "RCRCFIG_B",		0x48 },
812 	{ "RCRSTAT_A",		0x50 },
813 	{ "RCRSTAT_B",		0x58 },
814 	{ "RCRSTAT_C",		0x60 },
815 	{ "RX_DMA_ENT_MSK",	0x68 },
816 	{ "RX_DMA_CTL_STAT",	0x70 },
817 	{ "RCR_FLSH",		0x78 },
818 	{ "RXMISC",		0x90 },
819 	{ "RX_DMA_CTL_STAT_DBG", 0x98 },
820 	{ 0, -1 }
821 };
822 
823 static dmc_reg_name_t tx_names[] = {
824 	{ "Tx_RNG_CFIG",	0 },
825 	{ "Tx_RNG_HDL",		0x10 },
826 	{ "Tx_RNG_KICK",	0x18 },
827 	{ "Tx_ENT_MASK",	0x20 },
828 	{ "Tx_CS",		0x28 },
829 	{ "TxDMA_MBH",		0x30 },
830 	{ "TxDMA_MBL",		0x38 },
831 	{ "TxDMA_PRE_ST",	0x40 },
832 	{ "Tx_RNG_ERR_LOGH",	0x48 },
833 	{ "Tx_RNG_ERR_LOGL",	0x50 },
834 	{ "TDMC_INTR_DBG",	0x60 },
835 	{ "Tx_CS_DBG",		0x68 },
836 	{ 0, -1 }
837 };
838 
839 /*
840  * nxge_xx2str
841  *
842  *	Translate a register address into a string.
843  *
844  * Arguments:
845  * 	offset	The address of the register to translate.
846  *
847  * Notes:
848  *	These are developer-only function.
849  *
850  * Context:
851  *	Any domain
852  */
853 const char *
854 nxge_rx2str(
855 	int offset)
856 {
857 	dmc_reg_name_t *reg = &rx_names[0];
858 
859 	offset &= DMA_CSR_MASK;
860 
861 	while (reg->name) {
862 		if (offset == reg->offset)
863 			return (reg->name);
864 		reg++;
865 	}
866 
867 	return (0);
868 }
869 
870 const char *
871 nxge_tx2str(
872 	int offset)
873 {
874 	dmc_reg_name_t *reg = &tx_names[0];
875 
876 	offset &= DMA_CSR_MASK;
877 
878 	while (reg->name) {
879 		if (offset == reg->offset)
880 			return (reg->name);
881 		reg++;
882 	}
883 
884 	return (0);
885 }
886 
887 /*
888  * nxge_ddi_perror
889  *
890  *	Map a DDI error number to a string.
891  *
892  * Arguments:
893  * 	ddi_error	The DDI error number to map.
894  *
895  * Notes:
896  *
897  * Context:
898  *	Any domain
899  */
900 const char *
901 nxge_ddi_perror(
902 	int ddi_error)
903 {
904 	switch (ddi_error) {
905 	case DDI_SUCCESS:
906 		return ("DDI_SUCCESS");
907 	case DDI_FAILURE:
908 		return ("DDI_FAILURE");
909 	case DDI_NOT_WELL_FORMED:
910 		return ("DDI_NOT_WELL_FORMED");
911 	case DDI_EAGAIN:
912 		return ("DDI_EAGAIN");
913 	case DDI_EINVAL:
914 		return ("DDI_EINVAL");
915 	case DDI_ENOTSUP:
916 		return ("DDI_ENOTSUP");
917 	case DDI_EPENDING:
918 		return ("DDI_EPENDING");
919 	case DDI_ENOMEM:
920 		return ("DDI_ENOMEM");
921 	case DDI_EBUSY:
922 		return ("DDI_EBUSY");
923 	case DDI_ETRANSPORT:
924 		return ("DDI_ETRANSPORT");
925 	case DDI_ECONTEXT:
926 		return ("DDI_ECONTEXT");
927 	default:
928 		return ("Unknown error");
929 	}
930 }
931 
932 /*
933  * ---------------------------------------------------------------------
934  * These are Sun4v HIO function definitions
935  * ---------------------------------------------------------------------
936  */
937 
938 #if defined(sun4v)
939 
940 /*
941  * Local prototypes
942  */
943 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
944 static void nxge_hio_unshare(nxge_hio_vr_t *);
945 
946 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
947 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
948 
949 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
950 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
951 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
952 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
953     mac_ring_type_t, int);
954 
955 /*
956  * nxge_hio_init
957  *
958  *	Initialize the HIO module of the NXGE driver.
959  *
960  * Arguments:
961  * 	nxge
962  *
963  * Notes:
964  *
965  * Context:
966  *	Any domain
967  */
968 int
969 nxge_hio_init(
970 	nxge_t *nxge)
971 {
972 	nxge_hio_data_t *nhd;
973 	int i, region;
974 
975 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
976 	if (nhd == 0) {
977 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
978 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
979 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
980 	}
981 
982 	if ((nxge->environs == SOLARIS_DOMAIN) &&
983 	    (nxge->niu_type == N2_NIU)) {
984 		if (nxge->niu_hsvc_available == B_TRUE) {
985 			hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
986 			if (niu_hsvc->hsvc_major == 1 &&
987 			    niu_hsvc->hsvc_minor == 1)
988 				nxge->environs = SOLARIS_SERVICE_DOMAIN;
989 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
990 			    "nxge_hio_init: hypervisor services "
991 			    "version %d.%d",
992 			    niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor));
993 		}
994 	}
995 
996 	/*
997 	 * Initialize share and ring group structures.
998 	 */
999 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1000 		nxge->tx_hio_groups[i].ghandle = NULL;
1001 		nxge->tx_hio_groups[i].nxgep = nxge;
1002 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1003 		nxge->tx_hio_groups[i].gindex = 0;
1004 		nxge->tx_hio_groups[i].sindex = 0;
1005 	}
1006 
1007 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1008 		nxge->rx_hio_groups[i].ghandle = NULL;
1009 		nxge->rx_hio_groups[i].nxgep = nxge;
1010 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1011 		nxge->rx_hio_groups[i].gindex = 0;
1012 		nxge->rx_hio_groups[i].sindex = 0;
1013 		nxge->rx_hio_groups[i].started = B_FALSE;
1014 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1015 		nxge->rx_hio_groups[i].rdctbl = -1;
1016 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
1017 	}
1018 
1019 	if (!isLDOMs(nxge)) {
1020 		nhd->hio.ldoms = B_FALSE;
1021 		return (NXGE_OK);
1022 	}
1023 
1024 	nhd->hio.ldoms = B_TRUE;
1025 
1026 	/*
1027 	 * Fill in what we can.
1028 	 */
1029 	for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1030 		nhd->vr[region].region = region;
1031 	}
1032 	nhd->vrs = NXGE_VR_SR_MAX - 2;
1033 
1034 	/*
1035 	 * Initialize the share stuctures.
1036 	 */
1037 	for (i = 0; i < NXGE_MAX_TDCS; i++)
1038 		nxge->tdc_is_shared[i] = B_FALSE;
1039 
1040 	for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1041 		nxge->shares[i].nxgep = nxge;
1042 		nxge->shares[i].index = 0;
1043 		nxge->shares[i].vrp = NULL;
1044 		nxge->shares[i].tmap = 0;
1045 		nxge->shares[i].rmap = 0;
1046 		nxge->shares[i].rxgroup = 0;
1047 		nxge->shares[i].active = B_FALSE;
1048 	}
1049 
1050 	/* Fill in the HV HIO function pointers. */
1051 	nxge_hio_hv_init(nxge);
1052 
1053 	if (isLDOMservice(nxge)) {
1054 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
1055 		    "Hybrid IO-capable service domain"));
1056 		return (NXGE_OK);
1057 	}
1058 
1059 	return (0);
1060 }
1061 #endif /* defined(sun4v) */
1062 
1063 static int
1064 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1065     const uint8_t *macaddr)
1066 {
1067 	int rv;
1068 	nxge_rdc_grp_t *group;
1069 
1070 	mutex_enter(nxge->genlock);
1071 
1072 	/*
1073 	 * Initialize the NXGE RDC table data structure.
1074 	 */
1075 	group = &nxge->pt_config.rdc_grps[g->rdctbl];
1076 	if (!group->flag) {
1077 		group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1078 		group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1079 		group->flag = B_TRUE;	/* This group has been configured. */
1080 	}
1081 
1082 	mutex_exit(nxge->genlock);
1083 
1084 	/*
1085 	 * Add the MAC address.
1086 	 */
1087 	if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1088 	    g->rdctbl, B_TRUE)) != 0) {
1089 		return (rv);
1090 	}
1091 
1092 	mutex_enter(nxge->genlock);
1093 	g->n_mac_addrs++;
1094 	mutex_exit(nxge->genlock);
1095 	return (0);
1096 }
1097 
1098 static int
1099 nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
1100 {
1101 	p_nxge_t		nxgep = (p_nxge_t)arg;
1102 	struct ether_addr	addrp;
1103 
1104 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
1105 	if (nxge_set_mac_addr(nxgep, &addrp)) {
1106 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1107 		    "<== nxge_m_unicst: set unitcast failed"));
1108 		return (EINVAL);
1109 	}
1110 
1111 	nxgep->primary = B_TRUE;
1112 
1113 	return (0);
1114 }
1115 
1116 /*ARGSUSED*/
1117 static int
1118 nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
1119 {
1120 	nxgep->primary = B_FALSE;
1121 	return (0);
1122 }
1123 
1124 static int
1125 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1126 {
1127 	nxge_ring_group_t	*group = (nxge_ring_group_t *)arg;
1128 	p_nxge_t		nxge = group->nxgep;
1129 	int			rv;
1130 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1131 
1132 	ASSERT(group->type == MAC_RING_TYPE_RX);
1133 	ASSERT(group->nxgep != NULL);
1134 
1135 	if (isLDOMguest(group->nxgep))
1136 		return (0);
1137 
1138 	mutex_enter(nxge->genlock);
1139 
1140 	if (!nxge->primary && group->port_default_grp) {
1141 		rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
1142 		mutex_exit(nxge->genlock);
1143 		return (rv);
1144 	}
1145 
1146 	/*
1147 	 * If the group is associated with a VR, then only one
1148 	 * address may be assigned to the group.
1149 	 */
1150 	vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1151 	if ((vr != NULL) && (group->n_mac_addrs)) {
1152 		mutex_exit(nxge->genlock);
1153 		return (ENOSPC);
1154 	}
1155 
1156 	mutex_exit(nxge->genlock);
1157 
1158 	/*
1159 	 * Program the mac address for the group.
1160 	 */
1161 	if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) {
1162 		return (rv);
1163 	}
1164 
1165 	return (0);
1166 }
1167 
1168 static int
1169 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1170 {
1171 	int i;
1172 	for (i = 0; i <= mmac_info->num_mmac; i++) {
1173 		if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1174 		    ETHERADDRL) == 0) {
1175 			return (i);
1176 		}
1177 	}
1178 	return (-1);
1179 }
1180 
1181 /* ARGSUSED */
1182 static int
1183 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1184 {
1185 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1186 	struct ether_addr addrp;
1187 	p_nxge_t nxge = group->nxgep;
1188 	nxge_mmac_t *mmac_info;
1189 	int rv, slot;
1190 
1191 	ASSERT(group->type == MAC_RING_TYPE_RX);
1192 	ASSERT(group->nxgep != NULL);
1193 
1194 	if (isLDOMguest(group->nxgep))
1195 		return (0);
1196 
1197 	mutex_enter(nxge->genlock);
1198 
1199 	mmac_info = &nxge->nxge_mmac_info;
1200 	slot = find_mac_slot(mmac_info, mac_addr);
1201 	if (slot < 0) {
1202 		if (group->port_default_grp && nxge->primary) {
1203 			bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
1204 			if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
1205 				rv = nxge_hio_clear_unicst(nxge, mac_addr);
1206 				mutex_exit(nxge->genlock);
1207 				return (rv);
1208 			} else {
1209 				mutex_exit(nxge->genlock);
1210 				return (EINVAL);
1211 			}
1212 		} else {
1213 			mutex_exit(nxge->genlock);
1214 			return (EINVAL);
1215 		}
1216 	}
1217 
1218 	mutex_exit(nxge->genlock);
1219 
1220 	/*
1221 	 * Remove the mac address for the group
1222 	 */
1223 	if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1224 		return (rv);
1225 	}
1226 
1227 	mutex_enter(nxge->genlock);
1228 	group->n_mac_addrs--;
1229 	mutex_exit(nxge->genlock);
1230 
1231 	return (0);
1232 }
1233 
1234 static int
1235 nxge_hio_group_start(mac_group_driver_t gdriver)
1236 {
1237 	nxge_ring_group_t	*group = (nxge_ring_group_t *)gdriver;
1238 	nxge_rdc_grp_t		*rdc_grp_p;
1239 	int			rdctbl;
1240 	int			dev_gindex;
1241 
1242 	ASSERT(group->type == MAC_RING_TYPE_RX);
1243 	ASSERT(group->nxgep != NULL);
1244 
1245 	ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1246 	if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1247 		return (ENXIO);
1248 
1249 	mutex_enter(group->nxgep->genlock);
1250 	if (isLDOMguest(group->nxgep))
1251 		goto nxge_hio_group_start_exit;
1252 
1253 	dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1254 	    group->gindex;
1255 	rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex];
1256 
1257 	/*
1258 	 * Get an rdc table for this group.
1259 	 * Group ID is given by the caller, and that's the group it needs
1260 	 * to bind to.  The default group is already bound when the driver
1261 	 * was attached.
1262 	 *
1263 	 * For Group 0, it's RDC table was allocated at attach time
1264 	 * no need to allocate a new table.
1265 	 */
1266 	if (group->gindex != 0) {
1267 		rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1268 		    dev_gindex, B_TRUE);
1269 		if (rdctbl < 0) {
1270 			mutex_exit(group->nxgep->genlock);
1271 			return (rdctbl);
1272 		}
1273 	} else {
1274 		rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1275 	}
1276 
1277 	group->rdctbl = rdctbl;
1278 
1279 	(void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl);
1280 
1281 nxge_hio_group_start_exit:
1282 	group->started = B_TRUE;
1283 	mutex_exit(group->nxgep->genlock);
1284 	return (0);
1285 }
1286 
1287 static void
1288 nxge_hio_group_stop(mac_group_driver_t gdriver)
1289 {
1290 	nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1291 
1292 	ASSERT(group->type == MAC_RING_TYPE_RX);
1293 
1294 	mutex_enter(group->nxgep->genlock);
1295 	group->started = B_FALSE;
1296 
1297 	if (isLDOMguest(group->nxgep))
1298 		goto nxge_hio_group_stop_exit;
1299 
1300 	/*
1301 	 * Unbind the RDC table previously bound for this group.
1302 	 *
1303 	 * Since RDC table for group 0 was allocated at attach
1304 	 * time, no need to unbind the table here.
1305 	 */
1306 	if (group->gindex != 0)
1307 		(void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1308 
1309 nxge_hio_group_stop_exit:
1310 	mutex_exit(group->nxgep->genlock);
1311 }
1312 
1313 /* ARGSUSED */
1314 void
1315 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1316 	mac_group_info_t *infop, mac_group_handle_t ghdl)
1317 {
1318 	p_nxge_t		nxgep = (p_nxge_t)arg;
1319 	nxge_ring_group_t	*group;
1320 	int			dev_gindex;
1321 
1322 	switch (type) {
1323 	case MAC_RING_TYPE_RX:
1324 		group = &nxgep->rx_hio_groups[groupid];
1325 		group->nxgep = nxgep;
1326 		group->ghandle = ghdl;
1327 		group->gindex = groupid;
1328 		group->sindex = 0;	/* not yet bound to a share */
1329 
1330 		if (!isLDOMguest(nxgep)) {
1331 			dev_gindex =
1332 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1333 			    groupid;
1334 
1335 			if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
1336 			    dev_gindex)
1337 				group->port_default_grp = B_TRUE;
1338 
1339 			infop->mgi_count =
1340 			    nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1341 		} else {
1342 			infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS;
1343 		}
1344 
1345 		infop->mgi_driver = (mac_group_driver_t)group;
1346 		infop->mgi_start = nxge_hio_group_start;
1347 		infop->mgi_stop = nxge_hio_group_stop;
1348 		infop->mgi_addmac = nxge_hio_add_mac;
1349 		infop->mgi_remmac = nxge_hio_rem_mac;
1350 		break;
1351 
1352 	case MAC_RING_TYPE_TX:
1353 		/*
1354 		 * 'groupid' for TX should be incremented by one since
1355 		 * the default group (groupid 0) is not known by the MAC layer
1356 		 */
1357 		group = &nxgep->tx_hio_groups[groupid + 1];
1358 		group->nxgep = nxgep;
1359 		group->ghandle = ghdl;
1360 		group->gindex = groupid + 1;
1361 		group->sindex = 0;	/* not yet bound to a share */
1362 
1363 		infop->mgi_driver = (mac_group_driver_t)group;
1364 		infop->mgi_start = NULL;
1365 		infop->mgi_stop = NULL;
1366 		infop->mgi_addmac = NULL;	/* not needed */
1367 		infop->mgi_remmac = NULL;	/* not needed */
1368 		/* no rings associated with group initially */
1369 		infop->mgi_count = 0;
1370 		break;
1371 	}
1372 }
1373 
1374 #if defined(sun4v)
1375 
1376 int
1377 nxge_hio_share_assign(
1378 	nxge_t *nxge,
1379 	uint64_t cookie,
1380 	res_map_t *tmap,
1381 	res_map_t *rmap,
1382 	nxge_hio_vr_t *vr)
1383 {
1384 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1385 	uint64_t slot, hv_rv;
1386 	nxge_hio_dc_t *dc;
1387 	nxhv_vr_fp_t *fp;
1388 	int i;
1389 
1390 	/*
1391 	 * Ask the Hypervisor to set up the VR for us
1392 	 */
1393 	fp = &nhd->hio.vr;
1394 	if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1395 		NXGE_ERROR_MSG((nxge, HIO_CTL,
1396 		    "nxge_hio_share_assign: "
1397 		    "vr->assign() returned %d", hv_rv));
1398 		return (-EIO);
1399 	}
1400 
1401 	/*
1402 	 * For each shared TDC, ask the HV to find us an empty slot.
1403 	 * -----------------------------------------------------
1404 	 */
1405 	dc = vr->tx_group.dc;
1406 	for (i = 0; i < NXGE_MAX_TDCS; i++) {
1407 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1408 		while (dc) {
1409 			hv_rv = (*tx->assign)
1410 			    (vr->cookie, dc->channel, &slot);
1411 			if (hv_rv != 0) {
1412 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1413 				    "nxge_hio_share_assign: "
1414 				    "tx->assign(%x, %d) failed: %ld",
1415 				    vr->cookie, dc->channel, hv_rv));
1416 				return (-EIO);
1417 			}
1418 
1419 			dc->cookie = vr->cookie;
1420 			dc->page = (vp_channel_t)slot;
1421 
1422 			/* Inform the caller about the slot chosen. */
1423 			(*tmap) |= 1 << slot;
1424 
1425 			dc = dc->next;
1426 		}
1427 	}
1428 
1429 	/*
1430 	 * For each shared RDC, ask the HV to find us an empty slot.
1431 	 * -----------------------------------------------------
1432 	 */
1433 	dc = vr->rx_group.dc;
1434 	for (i = 0; i < NXGE_MAX_RDCS; i++) {
1435 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1436 		while (dc) {
1437 			hv_rv = (*rx->assign)
1438 			    (vr->cookie, dc->channel, &slot);
1439 			if (hv_rv != 0) {
1440 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1441 				    "nxge_hio_share_assign: "
1442 				    "rx->assign(%x, %d) failed: %ld",
1443 				    vr->cookie, dc->channel, hv_rv));
1444 				return (-EIO);
1445 			}
1446 
1447 			dc->cookie = vr->cookie;
1448 			dc->page = (vp_channel_t)slot;
1449 
1450 			/* Inform the caller about the slot chosen. */
1451 			(*rmap) |= 1 << slot;
1452 
1453 			dc = dc->next;
1454 		}
1455 	}
1456 
1457 	return (0);
1458 }
1459 
1460 void
1461 nxge_hio_share_unassign(
1462 	nxge_hio_vr_t *vr)
1463 {
1464 	nxge_t *nxge = (nxge_t *)vr->nxge;
1465 	nxge_hio_data_t *nhd;
1466 	nxge_hio_dc_t *dc;
1467 	nxhv_vr_fp_t *fp;
1468 	uint64_t hv_rv;
1469 
1470 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1471 
1472 	dc = vr->tx_group.dc;
1473 	while (dc) {
1474 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1475 		hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1476 		if (hv_rv != 0) {
1477 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1478 			    "nxge_hio_share_unassign: "
1479 			    "tx->unassign(%x, %d) failed: %ld",
1480 			    vr->cookie, dc->page, hv_rv));
1481 		}
1482 		dc = dc->next;
1483 	}
1484 
1485 	dc = vr->rx_group.dc;
1486 	while (dc) {
1487 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1488 		hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1489 		if (hv_rv != 0) {
1490 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1491 			    "nxge_hio_share_unassign: "
1492 			    "rx->unassign(%x, %d) failed: %ld",
1493 			    vr->cookie, dc->page, hv_rv));
1494 		}
1495 		dc = dc->next;
1496 	}
1497 
1498 	fp = &nhd->hio.vr;
1499 	if (fp->unassign) {
1500 		hv_rv = (*fp->unassign)(vr->cookie);
1501 		if (hv_rv != 0) {
1502 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1503 			    "nxge_hio_share_unassign: "
1504 			    "vr->assign(%x) failed: %ld",
1505 			    vr->cookie, hv_rv));
1506 		}
1507 	}
1508 }
1509 
1510 int
1511 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1512 {
1513 	p_nxge_t		nxge = (p_nxge_t)arg;
1514 	nxge_share_handle_t	*shp;
1515 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1516 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1517 
1518 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1519 
1520 	if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1521 	    nhd->hio.rx.assign == 0) {
1522 		NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1523 		return (EIO);
1524 	}
1525 
1526 	/*
1527 	 * Get a VR.
1528 	 */
1529 	if ((vr = nxge_hio_vr_share(nxge)) == 0)
1530 		return (EAGAIN);
1531 
1532 	shp = &nxge->shares[vr->region];
1533 	shp->nxgep = nxge;
1534 	shp->index = vr->region;
1535 	shp->vrp = (void *)vr;
1536 	shp->tmap = shp->rmap = 0;	/* to be assigned by ms_sbind */
1537 	shp->rxgroup = 0;		/* to be assigned by ms_sadd */
1538 	shp->active = B_FALSE;		/* not bound yet */
1539 
1540 	*shandle = (mac_share_handle_t)shp;
1541 
1542 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1543 	return (0);
1544 }
1545 
1546 
1547 void
1548 nxge_hio_share_free(mac_share_handle_t shandle)
1549 {
1550 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1551 	nxge_hio_vr_t		*vr;
1552 
1553 	/*
1554 	 * Clear internal handle state.
1555 	 */
1556 	vr = shp->vrp;
1557 	shp->vrp = (void *)NULL;
1558 	shp->index = 0;
1559 	shp->tmap = 0;
1560 	shp->rmap = 0;
1561 	shp->rxgroup = 0;
1562 	shp->active = B_FALSE;
1563 
1564 	/*
1565 	 * Free VR resource.
1566 	 */
1567 	nxge_hio_unshare(vr);
1568 }
1569 
1570 
1571 void
1572 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1573     mac_ring_handle_t *rings, uint_t *n_rings)
1574 {
1575 	nxge_t			*nxge;
1576 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1577 	nxge_ring_handle_t	*rh;
1578 	uint32_t		offset;
1579 
1580 	nxge = shp->nxgep;
1581 
1582 	switch (type) {
1583 	case MAC_RING_TYPE_RX:
1584 		rh = nxge->rx_ring_handles;
1585 		offset = nxge->pt_config.hw_config.start_rdc;
1586 		break;
1587 
1588 	case MAC_RING_TYPE_TX:
1589 		rh = nxge->tx_ring_handles;
1590 		offset = nxge->pt_config.hw_config.tdc.start;
1591 		break;
1592 	}
1593 
1594 	/*
1595 	 * In version 1.0, we may only give a VR 2 RDCs/TDCs.  Not only that,
1596 	 * but the HV has statically assigned the channels like so:
1597 	 * VR0: RDC0 & RDC1
1598 	 * VR1: RDC2 & RDC3, etc.
1599 	 * The TDCs are assigned in exactly the same way.
1600 	 */
1601 	if (rings != NULL) {
1602 		rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1603 		rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1604 	}
1605 	if (n_rings != NULL) {
1606 		*n_rings = 2;
1607 	}
1608 }
1609 
1610 int
1611 nxge_hio_share_add_group(mac_share_handle_t shandle,
1612     mac_group_driver_t ghandle)
1613 {
1614 	nxge_t			*nxge;
1615 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1616 	nxge_ring_group_t	*rg = (nxge_ring_group_t *)ghandle;
1617 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1618 	nxge_grp_t		*group;
1619 	int			i;
1620 
1621 	if (rg->sindex != 0) {
1622 		/* the group is already bound to a share */
1623 		return (EALREADY);
1624 	}
1625 
1626 	/*
1627 	 * If we are adding a group 0 to a share, this
1628 	 * is not correct.
1629 	 */
1630 	ASSERT(rg->gindex != 0);
1631 
1632 	nxge = rg->nxgep;
1633 	vr = shp->vrp;
1634 
1635 	switch (rg->type) {
1636 	case MAC_RING_TYPE_RX:
1637 		/*
1638 		 * Make sure that the group has the right rings associated
1639 		 * for the share. In version 1.0, we may only give a VR
1640 		 * 2 RDCs.  Not only that, but the HV has statically
1641 		 * assigned the channels like so:
1642 		 * VR0: RDC0 & RDC1
1643 		 * VR1: RDC2 & RDC3, etc.
1644 		 */
1645 		group = nxge->rx_set.group[rg->gindex];
1646 
1647 		if (group->count > 2) {
1648 			/* a share can have at most 2 rings */
1649 			return (EINVAL);
1650 		}
1651 
1652 		for (i = 0; i < NXGE_MAX_RDCS; i++) {
1653 			if (group->map & (1 << i)) {
1654 				if ((i != shp->index * 2) &&
1655 				    (i != (shp->index * 2 + 1))) {
1656 					/*
1657 					 * A group with invalid rings was
1658 					 * attempted to bind to this share
1659 					 */
1660 					return (EINVAL);
1661 				}
1662 			}
1663 		}
1664 
1665 		rg->sindex = vr->region;
1666 		vr->rdc_tbl = rg->rdctbl;
1667 		shp->rxgroup = vr->rdc_tbl;
1668 		break;
1669 
1670 	case MAC_RING_TYPE_TX:
1671 		/*
1672 		 * Make sure that the group has the right rings associated
1673 		 * for the share. In version 1.0, we may only give a VR
1674 		 * 2 TDCs.  Not only that, but the HV has statically
1675 		 * assigned the channels like so:
1676 		 * VR0: TDC0 & TDC1
1677 		 * VR1: TDC2 & TDC3, etc.
1678 		 */
1679 		group = nxge->tx_set.group[rg->gindex];
1680 
1681 		if (group->count > 2) {
1682 			/* a share can have at most 2 rings */
1683 			return (EINVAL);
1684 		}
1685 
1686 		for (i = 0; i < NXGE_MAX_TDCS; i++) {
1687 			if (group->map & (1 << i)) {
1688 				if ((i != shp->index * 2) &&
1689 				    (i != (shp->index * 2 + 1))) {
1690 					/*
1691 					 * A group with invalid rings was
1692 					 * attempted to bind to this share
1693 					 */
1694 					return (EINVAL);
1695 				}
1696 			}
1697 		}
1698 
1699 		vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1700 		    rg->gindex;
1701 		rg->sindex = vr->region;
1702 		break;
1703 	}
1704 	return (0);
1705 }
1706 
1707 int
1708 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1709     mac_group_driver_t ghandle)
1710 {
1711 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1712 	nxge_ring_group_t	*group = (nxge_ring_group_t *)ghandle;
1713 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1714 	int			rv = 0;
1715 
1716 	vr = shp->vrp;
1717 
1718 	switch (group->type) {
1719 	case MAC_RING_TYPE_RX:
1720 		group->sindex = 0;
1721 		vr->rdc_tbl = 0;
1722 		shp->rxgroup = 0;
1723 		break;
1724 
1725 	case MAC_RING_TYPE_TX:
1726 		group->sindex = 0;
1727 		vr->tdc_tbl = 0;
1728 		break;
1729 	}
1730 
1731 	return (rv);
1732 }
1733 
1734 int
1735 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1736     uint64_t *rcookie)
1737 {
1738 	nxge_t			*nxge;
1739 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1740 	nxge_hio_vr_t		*vr;
1741 	uint64_t		rmap, tmap, hv_rmap, hv_tmap;
1742 	int			rv;
1743 
1744 	nxge = shp->nxgep;
1745 	vr = (nxge_hio_vr_t *)shp->vrp;
1746 
1747 	/*
1748 	 * Add resources to the share.
1749 	 * For each DMA channel associated with the VR, bind its resources
1750 	 * to the VR.
1751 	 */
1752 	tmap = 0;
1753 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1754 	if (rv != 0) {
1755 		return (rv);
1756 	}
1757 
1758 	rmap = 0;
1759 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1760 	if (rv != 0) {
1761 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1762 		return (rv);
1763 	}
1764 
1765 	/*
1766 	 * Ask the Hypervisor to set up the VR and allocate slots for
1767 	 * each rings associated with the VR.
1768 	 */
1769 	hv_tmap = hv_rmap = 0;
1770 	if ((rv = nxge_hio_share_assign(nxge, cookie,
1771 	    &hv_tmap, &hv_rmap, vr))) {
1772 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1773 		nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1774 		return (rv);
1775 	}
1776 
1777 	shp->active = B_TRUE;
1778 	shp->tmap = hv_tmap;
1779 	shp->rmap = hv_rmap;
1780 
1781 	/* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1782 	*rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1783 
1784 	return (0);
1785 }
1786 
1787 void
1788 nxge_hio_share_unbind(mac_share_handle_t shandle)
1789 {
1790 	nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1791 
1792 	/*
1793 	 * First, unassign the VR (take it back),
1794 	 * so we can enable interrupts again.
1795 	 */
1796 	nxge_hio_share_unassign(shp->vrp);
1797 
1798 	/*
1799 	 * Free Ring Resources for TX and RX
1800 	 */
1801 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1802 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1803 }
1804 
1805 
1806 /*
1807  * nxge_hio_vr_share
1808  *
1809  *	Find an unused Virtualization Region (VR).
1810  *
1811  * Arguments:
1812  * 	nxge
1813  *
1814  * Notes:
1815  *
1816  * Context:
1817  *	Service domain
1818  */
1819 nxge_hio_vr_t *
1820 nxge_hio_vr_share(
1821 	nxge_t *nxge)
1822 {
1823 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1824 	nxge_hio_vr_t *vr;
1825 
1826 	int first, limit, region;
1827 
1828 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1829 
1830 	MUTEX_ENTER(&nhd->lock);
1831 
1832 	if (nhd->vrs == 0) {
1833 		MUTEX_EXIT(&nhd->lock);
1834 		return (0);
1835 	}
1836 
1837 	/* Find an empty virtual region (VR). */
1838 	if (nxge->function_num == 0) {
1839 		// FUNC0_VIR0 'belongs' to NIU port 0.
1840 		first = FUNC0_VIR1;
1841 		limit = FUNC2_VIR0;
1842 	} else if (nxge->function_num == 1) {
1843 		// FUNC2_VIR0 'belongs' to NIU port 1.
1844 		first = FUNC2_VIR1;
1845 		limit = FUNC_VIR_MAX;
1846 	} else {
1847 		cmn_err(CE_WARN,
1848 		    "Shares not supported on function(%d) at this time.\n",
1849 		    nxge->function_num);
1850 	}
1851 
1852 	for (region = first; region < limit; region++) {
1853 		if (nhd->vr[region].nxge == 0)
1854 			break;
1855 	}
1856 
1857 	if (region == limit) {
1858 		MUTEX_EXIT(&nhd->lock);
1859 		return (0);
1860 	}
1861 
1862 	vr = &nhd->vr[region];
1863 	vr->nxge = (uintptr_t)nxge;
1864 	vr->region = (uintptr_t)region;
1865 
1866 	nhd->vrs--;
1867 
1868 	MUTEX_EXIT(&nhd->lock);
1869 
1870 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1871 
1872 	return (vr);
1873 }
1874 
1875 void
1876 nxge_hio_unshare(
1877 	nxge_hio_vr_t *vr)
1878 {
1879 	nxge_t *nxge = (nxge_t *)vr->nxge;
1880 	nxge_hio_data_t *nhd;
1881 
1882 	vr_region_t region;
1883 
1884 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1885 
1886 	if (!nxge) {
1887 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1888 		    "vr->nxge is NULL"));
1889 		return;
1890 	}
1891 
1892 	/*
1893 	 * This function is no longer called, but I will keep it
1894 	 * here in case we want to revisit this topic in the future.
1895 	 *
1896 	 * nxge_hio_hostinfo_uninit(nxge, vr);
1897 	 */
1898 
1899 	/*
1900 	 * XXX: This is done by ms_sremove?
1901 	 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1902 	 */
1903 
1904 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1905 
1906 	MUTEX_ENTER(&nhd->lock);
1907 
1908 	region = vr->region;
1909 	(void) memset(vr, 0, sizeof (*vr));
1910 	vr->region = region;
1911 
1912 	nhd->vrs++;
1913 
1914 	MUTEX_EXIT(&nhd->lock);
1915 
1916 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1917 }
1918 
1919 int
1920 nxge_hio_addres(
1921 	nxge_hio_vr_t *vr,
1922 	mac_ring_type_t type,
1923 	uint64_t *map)
1924 {
1925 	nxge_t		*nxge = (nxge_t *)vr->nxge;
1926 	nxge_grp_t	*group;
1927 	int		groupid;
1928 	int		i;
1929 	int		max_dcs;
1930 
1931 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1932 
1933 	if (!nxge)
1934 		return (EINVAL);
1935 
1936 	/*
1937 	 * For each ring associated with the group, add the resources
1938 	 * to the group and bind.
1939 	 */
1940 	max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1941 	if (type == MAC_RING_TYPE_TX) {
1942 		/* set->group is an array of group indexed by a port group id */
1943 		groupid = vr->tdc_tbl -
1944 		    nxge->pt_config.hw_config.def_mac_txdma_grpid;
1945 		group = nxge->tx_set.group[groupid];
1946 	} else {
1947 		/* set->group is an array of group indexed by a port group id */
1948 		groupid = vr->rdc_tbl -
1949 		    nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1950 		group = nxge->rx_set.group[groupid];
1951 	}
1952 
1953 	if (group->map == 0) {
1954 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
1955 		    "with this VR"));
1956 		return (EINVAL);
1957 	}
1958 
1959 	for (i = 0; i < max_dcs; i++) {
1960 		if (group->map & (1 << i)) {
1961 			int rv;
1962 
1963 			if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
1964 				if (*map == 0) /* Couldn't get even one DC. */
1965 					return (-rv);
1966 				else
1967 					break;
1968 			}
1969 			*map |= (1 << i);
1970 		}
1971 	}
1972 
1973 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
1974 
1975 	return (0);
1976 }
1977 
1978 /* ARGSUSED */
1979 void
1980 nxge_hio_remres(
1981 	nxge_hio_vr_t *vr,
1982 	mac_ring_type_t type,
1983 	res_map_t res_map)
1984 {
1985 	nxge_t *nxge = (nxge_t *)vr->nxge;
1986 	nxge_grp_t *group;
1987 
1988 	if (!nxge) {
1989 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
1990 		    "vr->nxge is NULL"));
1991 		return;
1992 	}
1993 
1994 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
1995 
1996 	/*
1997 	 * For each ring bound to the group, remove the DMA resources
1998 	 * from the group and unbind.
1999 	 */
2000 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2001 	while (group->dc) {
2002 		nxge_hio_dc_t *dc = group->dc;
2003 		NXGE_DC_RESET(res_map, dc->page);
2004 		nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
2005 	}
2006 
2007 	if (res_map) {
2008 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2009 		    "res_map %lx", res_map));
2010 	}
2011 
2012 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
2013 }
2014 
2015 /*
2016  * nxge_hio_tdc_share
2017  *
2018  *	Share an unused TDC channel.
2019  *
2020  * Arguments:
2021  * 	nxge
2022  *
2023  * Notes:
2024  *
2025  * A.7.3 Reconfigure Tx DMA channel
2026  *	Disable TxDMA			A.9.6.10
2027  *     [Rebind TxDMA channel to Port	A.9.6.7]
2028  *
2029  * We don't have to Rebind the TDC to the port - it always already bound.
2030  *
2031  *	Soft Reset TxDMA		A.9.6.2
2032  *
2033  * This procedure will be executed by nxge_init_txdma_channel() in the
2034  * guest domain:
2035  *
2036  *	Re-initialize TxDMA		A.9.6.8
2037  *	Reconfigure TxDMA
2038  *	Enable TxDMA			A.9.6.9
2039  *
2040  * Context:
2041  *	Service domain
2042  */
2043 int
2044 nxge_hio_tdc_share(
2045 	nxge_t *nxge,
2046 	int channel)
2047 {
2048 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2049 	nxge_grp_set_t *set = &nxge->tx_set;
2050 	tx_ring_t *ring;
2051 	int count;
2052 
2053 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2054 
2055 	/*
2056 	 * Wait until this channel is idle.
2057 	 */
2058 	ring = nxge->tx_rings->rings[channel];
2059 	ASSERT(ring != NULL);
2060 
2061 	(void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2062 	if (ring->tx_ring_busy) {
2063 		/*
2064 		 * Wait for 30 seconds.
2065 		 */
2066 		for (count = 30 * 1000; count; count--) {
2067 			if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2068 				break;
2069 			}
2070 
2071 			drv_usecwait(1000);
2072 		}
2073 
2074 		if (count == 0) {
2075 			(void) atomic_swap_32(&ring->tx_ring_offline,
2076 			    NXGE_TX_RING_ONLINE);
2077 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2078 			    "nxge_hio_tdc_share: "
2079 			    "Tx ring %d was always BUSY", channel));
2080 			return (-EIO);
2081 		}
2082 	} else {
2083 		(void) atomic_swap_32(&ring->tx_ring_offline,
2084 		    NXGE_TX_RING_OFFLINED);
2085 	}
2086 
2087 	MUTEX_ENTER(&nhd->lock);
2088 	nxge->tdc_is_shared[channel] = B_TRUE;
2089 	MUTEX_EXIT(&nhd->lock);
2090 
2091 	if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2092 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2093 		    "Failed to remove interrupt for TxDMA channel %d",
2094 		    channel));
2095 		return (-EINVAL);
2096 	}
2097 
2098 	/* Disable TxDMA A.9.6.10 */
2099 	(void) nxge_txdma_channel_disable(nxge, channel);
2100 
2101 	/* The SD is sharing this channel. */
2102 	NXGE_DC_SET(set->shared.map, channel);
2103 	set->shared.count++;
2104 
2105 	/* Soft Reset TxDMA A.9.6.2 */
2106 	nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2107 
2108 	/*
2109 	 * Initialize the DC-specific FZC control registers.
2110 	 * -----------------------------------------------------
2111 	 */
2112 	if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2113 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2114 		    "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2115 		return (-EIO);
2116 	}
2117 
2118 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2119 
2120 	return (0);
2121 }
2122 
2123 /*
2124  * nxge_hio_rdc_share
2125  *
2126  *	Share an unused RDC channel.
2127  *
2128  * Arguments:
2129  * 	nxge
2130  *
2131  * Notes:
2132  *
2133  * This is the latest version of the procedure to
2134  * Reconfigure an Rx DMA channel:
2135  *
2136  * A.6.3 Reconfigure Rx DMA channel
2137  *	Stop RxMAC		A.9.2.6
2138  *	Drain IPP Port		A.9.3.6
2139  *	Stop and reset RxDMA	A.9.5.3
2140  *
2141  * This procedure will be executed by nxge_init_rxdma_channel() in the
2142  * guest domain:
2143  *
2144  *	Initialize RxDMA	A.9.5.4
2145  *	Reconfigure RxDMA
2146  *	Enable RxDMA		A.9.5.5
2147  *
2148  * We will do this here, since the RDC is a canalis non grata:
2149  *	Enable RxMAC		A.9.2.10
2150  *
2151  * Context:
2152  *	Service domain
2153  */
2154 int
2155 nxge_hio_rdc_share(
2156 	nxge_t *nxge,
2157 	nxge_hio_vr_t *vr,
2158 	int channel)
2159 {
2160 	nxge_grp_set_t *set = &nxge->rx_set;
2161 	nxge_rdc_grp_t *rdc_grp;
2162 
2163 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2164 
2165 	/* Disable interrupts. */
2166 	if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2167 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2168 		    "Failed to remove interrupt for RxDMA channel %d",
2169 		    channel));
2170 		return (NXGE_ERROR);
2171 	}
2172 
2173 	/* Stop RxMAC = A.9.2.6 */
2174 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2175 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2176 		    "Failed to disable RxMAC"));
2177 	}
2178 
2179 	/* Drain IPP Port = A.9.3.6 */
2180 	(void) nxge_ipp_drain(nxge);
2181 
2182 	/* Stop and reset RxDMA = A.9.5.3 */
2183 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2184 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2185 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2186 		    "Failed to disable RxDMA channel %d", channel));
2187 	}
2188 
2189 	/* The SD is sharing this channel. */
2190 	NXGE_DC_SET(set->shared.map, channel);
2191 	set->shared.count++;
2192 
2193 	// Assert RST: RXDMA_CFIG1[30] = 1
2194 	nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2195 
2196 	/*
2197 	 * The guest domain will reconfigure the RDC later.
2198 	 *
2199 	 * But in the meantime, we must re-enable the Rx MAC so
2200 	 * that we can start receiving packets again on the
2201 	 * remaining RDCs:
2202 	 *
2203 	 * Enable RxMAC = A.9.2.10
2204 	 */
2205 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2206 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2207 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2208 	}
2209 
2210 	/*
2211 	 * Initialize the DC-specific FZC control registers.
2212 	 * -----------------------------------------------------
2213 	 */
2214 	if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2215 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2216 		    "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2217 		return (-EIO);
2218 	}
2219 
2220 	/*
2221 	 * Update the RDC group.
2222 	 */
2223 	rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2224 	NXGE_DC_SET(rdc_grp->map, channel);
2225 
2226 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2227 
2228 	return (0);
2229 }
2230 
2231 /*
2232  * nxge_hio_dc_share
2233  *
2234  *	Share a DMA channel with a guest domain.
2235  *
2236  * Arguments:
2237  * 	nxge
2238  * 	vr	The VR that <channel> will belong to.
2239  * 	type	Tx or Rx.
2240  * 	channel	Channel to share
2241  *
2242  * Notes:
2243  *
2244  * Context:
2245  *	Service domain
2246  */
2247 int
2248 nxge_hio_dc_share(
2249 	nxge_t *nxge,
2250 	nxge_hio_vr_t *vr,
2251 	mac_ring_type_t type,
2252 	int channel)
2253 {
2254 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2255 	nxge_hio_dc_t *dc;
2256 	nxge_grp_t *group;
2257 	int slot;
2258 
2259 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2260 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2261 
2262 
2263 	/* -------------------------------------------------- */
2264 	slot = (type == MAC_RING_TYPE_TX) ?
2265 	    nxge_hio_tdc_share(nxge, channel) :
2266 	    nxge_hio_rdc_share(nxge, vr, channel);
2267 
2268 	if (slot < 0) {
2269 		if (type == MAC_RING_TYPE_RX) {
2270 			nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2271 		} else {
2272 			nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2273 		}
2274 		return (slot);
2275 	}
2276 
2277 	MUTEX_ENTER(&nhd->lock);
2278 
2279 	/*
2280 	 * Tag this channel.
2281 	 * --------------------------------------------------
2282 	 */
2283 	dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2284 
2285 	dc->vr = vr;
2286 	dc->channel = (nxge_channel_t)channel;
2287 
2288 	MUTEX_EXIT(&nhd->lock);
2289 
2290 	/*
2291 	 * vr->[t|r]x_group is used by the service domain to
2292 	 * keep track of its shared DMA channels.
2293 	 */
2294 	MUTEX_ENTER(&nxge->group_lock);
2295 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2296 
2297 	dc->group = group;
2298 	/* Initialize <group>, if necessary */
2299 	if (group->count == 0) {
2300 		group->nxge = nxge;
2301 		group->type = (type == MAC_RING_TYPE_TX) ?
2302 		    VP_BOUND_TX : VP_BOUND_RX;
2303 		group->sequence	= nhd->sequence++;
2304 		group->active = B_TRUE;
2305 	}
2306 
2307 	MUTEX_EXIT(&nxge->group_lock);
2308 
2309 	NXGE_ERROR_MSG((nxge, HIO_CTL,
2310 	    "DC share: %cDC %d was assigned to slot %d",
2311 	    type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2312 
2313 	nxge_grp_dc_append(nxge, group, dc);
2314 
2315 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2316 
2317 	return (0);
2318 }
2319 
2320 /*
2321  * nxge_hio_tdc_unshare
2322  *
2323  *	Unshare a TDC.
2324  *
2325  * Arguments:
2326  * 	nxge
2327  * 	channel	The channel to unshare (add again).
2328  *
2329  * Notes:
2330  *
2331  * Context:
2332  *	Service domain
2333  */
2334 void
2335 nxge_hio_tdc_unshare(
2336 	nxge_t *nxge,
2337 	int dev_grpid,
2338 	int channel)
2339 {
2340 	nxge_grp_set_t *set = &nxge->tx_set;
2341 	nxge_grp_t *group;
2342 	int grpid;
2343 
2344 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2345 
2346 	NXGE_DC_RESET(set->shared.map, channel);
2347 	set->shared.count--;
2348 
2349 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2350 	group = set->group[grpid];
2351 
2352 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2353 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2354 		    "Failed to initialize TxDMA channel %d", channel));
2355 		return;
2356 	}
2357 
2358 	/* Re-add this interrupt. */
2359 	if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2360 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2361 		    "Failed to add interrupt for TxDMA channel %d", channel));
2362 	}
2363 
2364 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2365 }
2366 
2367 /*
2368  * nxge_hio_rdc_unshare
2369  *
2370  *	Unshare an RDC: add it to the SD's RDC groups (tables).
2371  *
2372  * Arguments:
2373  * 	nxge
2374  * 	channel	The channel to unshare (add again).
2375  *
2376  * Notes:
2377  *
2378  * Context:
2379  *	Service domain
2380  */
2381 void
2382 nxge_hio_rdc_unshare(
2383 	nxge_t *nxge,
2384 	int dev_grpid,
2385 	int channel)
2386 {
2387 	nxge_grp_set_t		*set = &nxge->rx_set;
2388 	nxge_grp_t		*group;
2389 	int			grpid;
2390 
2391 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2392 
2393 	/* Stop RxMAC = A.9.2.6 */
2394 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2395 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2396 		    "Failed to disable RxMAC"));
2397 	}
2398 
2399 	/* Drain IPP Port = A.9.3.6 */
2400 	(void) nxge_ipp_drain(nxge);
2401 
2402 	/* Stop and reset RxDMA = A.9.5.3 */
2403 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2404 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2405 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2406 		    "Failed to disable RxDMA channel %d", channel));
2407 	}
2408 
2409 	NXGE_DC_RESET(set->shared.map, channel);
2410 	set->shared.count--;
2411 
2412 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2413 	group = set->group[grpid];
2414 
2415 	/*
2416 	 * Assert RST: RXDMA_CFIG1[30] = 1
2417 	 *
2418 	 * Initialize RxDMA	A.9.5.4
2419 	 * Reconfigure RxDMA
2420 	 * Enable RxDMA		A.9.5.5
2421 	 */
2422 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2423 		/* Be sure to re-enable the RX MAC. */
2424 		if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2425 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2426 			    "nxge_hio_rdc_share: Rx MAC still disabled"));
2427 		}
2428 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2429 		    "Failed to initialize RxDMA channel %d", channel));
2430 		return;
2431 	}
2432 
2433 	/*
2434 	 * Enable RxMAC = A.9.2.10
2435 	 */
2436 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2437 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2438 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2439 		return;
2440 	}
2441 
2442 	/* Re-add this interrupt. */
2443 	if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2444 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2445 		    "nxge_hio_rdc_unshare: Failed to add interrupt for "
2446 		    "RxDMA CHANNEL %d", channel));
2447 	}
2448 
2449 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2450 }
2451 
2452 /*
2453  * nxge_hio_dc_unshare
2454  *
2455  *	Unshare (reuse) a DMA channel.
2456  *
2457  * Arguments:
2458  * 	nxge
2459  * 	vr	The VR that <channel> belongs to.
2460  * 	type	Tx or Rx.
2461  * 	channel	The DMA channel to reuse.
2462  *
2463  * Notes:
2464  *
2465  * Context:
2466  *	Service domain
2467  */
2468 void
2469 nxge_hio_dc_unshare(
2470 	nxge_t *nxge,
2471 	nxge_hio_vr_t *vr,
2472 	mac_ring_type_t type,
2473 	int channel)
2474 {
2475 	nxge_grp_t *group;
2476 	nxge_hio_dc_t *dc;
2477 
2478 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2479 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2480 
2481 	/* Unlink the channel from its group. */
2482 	/* -------------------------------------------------- */
2483 	group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2484 	NXGE_DC_RESET(group->map, channel);
2485 	if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2486 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2487 		    "nxge_hio_dc_unshare(%d) failed", channel));
2488 		return;
2489 	}
2490 
2491 	dc->vr = 0;
2492 	dc->cookie = 0;
2493 
2494 	if (type == MAC_RING_TYPE_RX) {
2495 		nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2496 	} else {
2497 		nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2498 	}
2499 
2500 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2501 }
2502 
2503 
2504 /*
2505  * nxge_hio_rxdma_bind_intr():
2506  *
2507  *	For the guest domain driver, need to bind the interrupt group
2508  *	and state to the rx_rcr_ring_t.
2509  */
2510 
2511 int
2512 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2513 {
2514 	nxge_hio_dc_t	*dc;
2515 	nxge_ldgv_t	*control;
2516 	nxge_ldg_t	*group;
2517 	nxge_ldv_t	*device;
2518 
2519 	/*
2520 	 * Find the DMA channel.
2521 	 */
2522 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2523 		return (NXGE_ERROR);
2524 	}
2525 
2526 	/*
2527 	 * Get the control structure.
2528 	 */
2529 	control = nxge->ldgvp;
2530 	if (control == NULL) {
2531 		return (NXGE_ERROR);
2532 	}
2533 
2534 	group = &control->ldgp[dc->ldg.vector];
2535 	device = &control->ldvp[dc->ldg.ldsv];
2536 
2537 	MUTEX_ENTER(&ring->lock);
2538 	ring->ldgp = group;
2539 	ring->ldvp = device;
2540 	MUTEX_EXIT(&ring->lock);
2541 
2542 	return (NXGE_OK);
2543 }
2544 #endif	/* if defined(sun4v) */
2545