xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_hio.c (revision 2cf06b0d760c09adfc3af47b08e250f9d56736e6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio.c
29  *
30  * This file manages the virtualization resources for Neptune
31  * devices.  That is, it implements a hybrid I/O (HIO) approach in the
32  * Solaris kernel, whereby a guest domain on an LDOMs server may
33  * request & use hardware resources from the service domain.
34  *
35  */
36 
37 #include <sys/mac_provider.h>
38 #include <sys/nxge/nxge_impl.h>
39 #include <sys/nxge/nxge_fzc.h>
40 #include <sys/nxge/nxge_rxdma.h>
41 #include <sys/nxge/nxge_txdma.h>
42 #include <sys/nxge/nxge_hio.h>
43 
44 #define	NXGE_HIO_SHARE_MIN_CHANNELS 2
45 #define	NXGE_HIO_SHARE_MAX_CHANNELS 2
46 
47 /*
48  * External prototypes
49  */
50 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
51 
52 /* The following function may be found in nxge_main.c */
53 extern int nxge_m_mmac_remove(void *arg, int slot);
54 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
55 	boolean_t usetbl);
56 
57 /* The following function may be found in nxge_[t|r]xdma.c */
58 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
59 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
60 
61 /*
62  * Local prototypes
63  */
64 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
65 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
66 static void nxge_grp_dc_map(nxge_grp_t *group);
67 
68 /*
69  * These functions are used by both service & guest domains to
70  * decide whether they're running in an LDOMs/XEN environment
71  * or not.  If so, then the Hybrid I/O (HIO) module is initialized.
72  */
73 
74 /*
75  * nxge_get_environs
76  *
77  *	Figure out if we are in a guest domain or not.
78  *
79  * Arguments:
80  * 	nxge
81  *
82  * Notes:
83  *
84  * Context:
85  *	Any domain
86  */
87 void
88 nxge_get_environs(
89 	nxge_t *nxge)
90 {
91 	char *string;
92 
93 	/*
94 	 * In the beginning, assume that we are running sans LDOMs/XEN.
95 	 */
96 	nxge->environs = SOLARIS_DOMAIN;
97 
98 	/*
99 	 * Are we a hybrid I/O (HIO) guest domain driver?
100 	 */
101 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
102 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
103 	    "niutype", &string)) == DDI_PROP_SUCCESS) {
104 		if (strcmp(string, "n2niu") == 0) {
105 			nxge->environs = SOLARIS_GUEST_DOMAIN;
106 			/* So we can allocate properly-aligned memory. */
107 			nxge->niu_type = N2_NIU;
108 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
109 			    "Hybrid IO-capable guest domain"));
110 		}
111 		ddi_prop_free(string);
112 	}
113 }
114 
115 #if !defined(sun4v)
116 
117 /*
118  * nxge_hio_init
119  *
120  *	Initialize the HIO module of the NXGE driver.
121  *
122  * Arguments:
123  * 	nxge
124  *
125  * Notes:
126  *	This is the non-hybrid I/O version of this function.
127  *
128  * Context:
129  *	Any domain
130  */
131 int
132 nxge_hio_init(nxge_t *nxge)
133 {
134 	nxge_hio_data_t *nhd;
135 	int i;
136 
137 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
138 	if (nhd == 0) {
139 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
140 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
141 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
142 	}
143 
144 	/*
145 	 * Initialize share and ring group structures.
146 	 */
147 	for (i = 0; i < NXGE_MAX_TDCS; i++)
148 		nxge->tdc_is_shared[i] = B_FALSE;
149 
150 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
151 		nxge->tx_hio_groups[i].ghandle = NULL;
152 		nxge->tx_hio_groups[i].nxgep = nxge;
153 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
154 		nxge->tx_hio_groups[i].gindex = 0;
155 		nxge->tx_hio_groups[i].sindex = 0;
156 	}
157 
158 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
159 		nxge->rx_hio_groups[i].ghandle = NULL;
160 		nxge->rx_hio_groups[i].nxgep = nxge;
161 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
162 		nxge->rx_hio_groups[i].gindex = 0;
163 		nxge->rx_hio_groups[i].sindex = 0;
164 		nxge->rx_hio_groups[i].started = B_FALSE;
165 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
166 		nxge->rx_hio_groups[i].rdctbl = -1;
167 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
168 	}
169 
170 	nhd->hio.ldoms = B_FALSE;
171 
172 	return (NXGE_OK);
173 }
174 
175 #endif
176 
177 void
178 nxge_hio_uninit(nxge_t *nxge)
179 {
180 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
181 
182 	ASSERT(nxge->nxge_hw_p->ndevs == 0);
183 
184 	if (nhd != NULL) {
185 		MUTEX_DESTROY(&nhd->lock);
186 		KMEM_FREE(nhd, sizeof (*nhd));
187 		nxge->nxge_hw_p->hio = 0;
188 	}
189 }
190 
191 /*
192  * nxge_dci_map
193  *
194  *	Map a DMA channel index to a channel number.
195  *
196  * Arguments:
197  * 	instance	The instance number of the driver.
198  * 	type		The type of channel this is: Tx or Rx.
199  * 	index		The index to convert to a channel number
200  *
201  * Notes:
202  *	This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
203  *
204  * Context:
205  *	Any domain
206  */
207 int
208 nxge_dci_map(
209 	nxge_t *nxge,
210 	vpc_type_t type,
211 	int index)
212 {
213 	nxge_grp_set_t *set;
214 	int dc;
215 
216 	switch (type) {
217 	case VP_BOUND_TX:
218 		set = &nxge->tx_set;
219 		break;
220 	case VP_BOUND_RX:
221 		set = &nxge->rx_set;
222 		break;
223 	}
224 
225 	for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
226 		if ((1 << dc) & set->owned.map) {
227 			if (index == 0)
228 				return (dc);
229 			else
230 				index--;
231 		}
232 	}
233 
234 	return (-1);
235 }
236 
237 /*
238  * ---------------------------------------------------------------------
239  * These are the general-purpose DMA channel group functions.  That is,
240  * these functions are used to manage groups of TDCs or RDCs in an HIO
241  * environment.
242  *
243  * But is also expected that in the future they will be able to manage
244  * Crossbow groups.
245  * ---------------------------------------------------------------------
246  */
247 
248 /*
249  * nxge_grp_cleanup(p_nxge_t nxge)
250  *
251  *	Remove all outstanding groups.
252  *
253  * Arguments:
254  *	nxge
255  */
256 void
257 nxge_grp_cleanup(p_nxge_t nxge)
258 {
259 	nxge_grp_set_t *set;
260 	int i;
261 
262 	MUTEX_ENTER(&nxge->group_lock);
263 
264 	/*
265 	 * Find RX groups that need to be cleaned up.
266 	 */
267 	set = &nxge->rx_set;
268 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
269 		if (set->group[i] != NULL) {
270 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
271 			set->group[i] = NULL;
272 		}
273 	}
274 
275 	/*
276 	 * Find TX groups that need to be cleaned up.
277 	 */
278 	set = &nxge->tx_set;
279 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
280 		if (set->group[i] != NULL) {
281 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
282 			set->group[i] = NULL;
283 		}
284 	}
285 	MUTEX_EXIT(&nxge->group_lock);
286 }
287 
288 
289 /*
290  * nxge_grp_add
291  *
292  *	Add a group to an instance of NXGE.
293  *
294  * Arguments:
295  * 	nxge
296  * 	type	Tx or Rx
297  *
298  * Notes:
299  *
300  * Context:
301  *	Any domain
302  */
303 nxge_grp_t *
304 nxge_grp_add(
305 	nxge_t *nxge,
306 	nxge_grp_type_t type)
307 {
308 	nxge_grp_set_t *set;
309 	nxge_grp_t *group;
310 	int i;
311 
312 	group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
313 	group->nxge = nxge;
314 
315 	MUTEX_ENTER(&nxge->group_lock);
316 	switch (type) {
317 	case NXGE_TRANSMIT_GROUP:
318 	case EXT_TRANSMIT_GROUP:
319 		set = &nxge->tx_set;
320 		break;
321 	default:
322 		set = &nxge->rx_set;
323 		break;
324 	}
325 
326 	group->type = type;
327 	group->active = B_TRUE;
328 	group->sequence = set->sequence++;
329 
330 	/* Find an empty slot for this logical group. */
331 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
332 		if (set->group[i] == 0) {
333 			group->index = i;
334 			set->group[i] = group;
335 			NXGE_DC_SET(set->lg.map, i);
336 			set->lg.count++;
337 			break;
338 		}
339 	}
340 	MUTEX_EXIT(&nxge->group_lock);
341 
342 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
343 	    "nxge_grp_add: %cgroup = %d.%d",
344 	    type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
345 	    nxge->mac.portnum, group->sequence));
346 
347 	return (group);
348 }
349 
350 void
351 nxge_grp_remove(
352 	nxge_t *nxge,
353 	nxge_grp_t *group)	/* The group to remove. */
354 {
355 	nxge_grp_set_t *set;
356 	vpc_type_t type;
357 
358 	MUTEX_ENTER(&nxge->group_lock);
359 	switch (group->type) {
360 	case NXGE_TRANSMIT_GROUP:
361 	case EXT_TRANSMIT_GROUP:
362 		set = &nxge->tx_set;
363 		break;
364 	default:
365 		set = &nxge->rx_set;
366 		break;
367 	}
368 
369 	if (set->group[group->index] != group) {
370 		MUTEX_EXIT(&nxge->group_lock);
371 		return;
372 	}
373 
374 	set->group[group->index] = 0;
375 	NXGE_DC_RESET(set->lg.map, group->index);
376 	set->lg.count--;
377 
378 	/* While inside the mutex, deactivate <group>. */
379 	group->active = B_FALSE;
380 
381 	MUTEX_EXIT(&nxge->group_lock);
382 
383 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
384 	    "nxge_grp_remove(%c.%d.%d) called",
385 	    group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
386 	    nxge->mac.portnum, group->sequence));
387 
388 	/* Now, remove any DCs which are still active. */
389 	switch (group->type) {
390 	default:
391 		type = VP_BOUND_TX;
392 		break;
393 	case NXGE_RECEIVE_GROUP:
394 	case EXT_RECEIVE_GROUP:
395 		type = VP_BOUND_RX;
396 	}
397 
398 	while (group->dc) {
399 		nxge_grp_dc_remove(nxge, type, group->dc->channel);
400 	}
401 
402 	KMEM_FREE(group, sizeof (*group));
403 }
404 
405 /*
406  * nxge_grp_dc_add
407  *
408  *	Add a DMA channel to a VR/Group.
409  *
410  * Arguments:
411  * 	nxge
412  * 	channel	The channel to add.
413  * Notes:
414  *
415  * Context:
416  *	Any domain
417  */
418 /* ARGSUSED */
419 int
420 nxge_grp_dc_add(
421 	nxge_t *nxge,
422 	nxge_grp_t *group,	/* The group to add <channel> to. */
423 	vpc_type_t type,	/* Rx or Tx */
424 	int channel)		/* A physical/logical channel number */
425 {
426 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
427 	nxge_hio_dc_t *dc;
428 	nxge_grp_set_t *set;
429 	nxge_status_t status = NXGE_OK;
430 
431 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
432 
433 	if (group == 0)
434 		return (0);
435 
436 	switch (type) {
437 	case VP_BOUND_TX:
438 		set = &nxge->tx_set;
439 		if (channel > NXGE_MAX_TDCS) {
440 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
441 			    "nxge_grp_dc_add: TDC = %d", channel));
442 			return (NXGE_ERROR);
443 		}
444 		break;
445 	case VP_BOUND_RX:
446 		set = &nxge->rx_set;
447 		if (channel > NXGE_MAX_RDCS) {
448 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
449 			    "nxge_grp_dc_add: RDC = %d", channel));
450 			return (NXGE_ERROR);
451 		}
452 		break;
453 
454 	default:
455 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
456 		    "nxge_grp_dc_add: unknown type channel(%d)", channel));
457 	}
458 
459 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
460 	    "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
461 	    type == VP_BOUND_TX ? 't' : 'r',
462 	    nxge->mac.portnum, group->sequence, group->count, channel));
463 
464 	MUTEX_ENTER(&nxge->group_lock);
465 	if (group->active != B_TRUE) {
466 		/* We may be in the process of removing this group. */
467 		MUTEX_EXIT(&nxge->group_lock);
468 		return (NXGE_ERROR);
469 	}
470 	MUTEX_EXIT(&nxge->group_lock);
471 
472 	if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
473 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
474 		    "nxge_grp_dc_add(%d): DC FIND failed", channel));
475 		return (NXGE_ERROR);
476 	}
477 
478 	MUTEX_ENTER(&nhd->lock);
479 
480 	if (dc->group) {
481 		MUTEX_EXIT(&nhd->lock);
482 		/* This channel is already in use! */
483 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
484 		    "nxge_grp_dc_add(%d): channel already in group", channel));
485 		return (NXGE_ERROR);
486 	}
487 
488 	dc->next = 0;
489 	dc->page = channel;
490 	dc->channel = (nxge_channel_t)channel;
491 
492 	dc->type = type;
493 	if (type == VP_BOUND_RX) {
494 		dc->init = nxge_init_rxdma_channel;
495 		dc->uninit = nxge_uninit_rxdma_channel;
496 	} else {
497 		dc->init = nxge_init_txdma_channel;
498 		dc->uninit = nxge_uninit_txdma_channel;
499 	}
500 
501 	dc->group = group;
502 
503 	if (isLDOMguest(nxge))
504 		(void) nxge_hio_ldsv_add(nxge, dc);
505 
506 	NXGE_DC_SET(set->owned.map, channel);
507 	set->owned.count++;
508 
509 	MUTEX_EXIT(&nhd->lock);
510 
511 	if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
512 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
513 		    "nxge_grp_dc_add(%d): channel init failed", channel));
514 		MUTEX_ENTER(&nhd->lock);
515 		(void) memset(dc, 0, sizeof (*dc));
516 		NXGE_DC_RESET(set->owned.map, channel);
517 		set->owned.count--;
518 		MUTEX_EXIT(&nhd->lock);
519 		return (NXGE_ERROR);
520 	}
521 
522 	nxge_grp_dc_append(nxge, group, dc);
523 
524 	if (type == VP_BOUND_TX) {
525 		MUTEX_ENTER(&nhd->lock);
526 		nxge->tdc_is_shared[channel] = B_FALSE;
527 		MUTEX_EXIT(&nhd->lock);
528 	}
529 
530 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
531 
532 	return ((int)status);
533 }
534 
535 void
536 nxge_grp_dc_remove(
537 	nxge_t *nxge,
538 	vpc_type_t type,
539 	int channel)
540 {
541 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
542 	nxge_hio_dc_t *dc;
543 	nxge_grp_set_t *set;
544 	nxge_grp_t *group;
545 
546 	dc_uninit_t uninit;
547 
548 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
549 
550 	if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
551 		goto nxge_grp_dc_remove_exit;
552 
553 	if ((dc->group == NULL) && (dc->next == 0) &&
554 	    (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
555 		goto nxge_grp_dc_remove_exit;
556 	}
557 
558 	group = (nxge_grp_t *)dc->group;
559 
560 	if (isLDOMguest(nxge)) {
561 		(void) nxge_hio_intr_remove(nxge, type, channel);
562 	}
563 
564 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
565 	    "DC remove: group = %d.%d.%d, %cdc %d",
566 	    nxge->mac.portnum, group->sequence, group->count,
567 	    type == VP_BOUND_TX ? 't' : 'r', dc->channel));
568 
569 	MUTEX_ENTER(&nhd->lock);
570 
571 	set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
572 
573 	/* Remove the DC from its group. */
574 	if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
575 		MUTEX_EXIT(&nhd->lock);
576 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
577 		    "nxge_grp_dc_remove(%d) failed", channel));
578 		goto nxge_grp_dc_remove_exit;
579 	}
580 
581 	uninit = dc->uninit;
582 	channel = dc->channel;
583 
584 	NXGE_DC_RESET(set->owned.map, channel);
585 	set->owned.count--;
586 
587 	(void) memset(dc, 0, sizeof (*dc));
588 
589 	MUTEX_EXIT(&nhd->lock);
590 
591 	(*uninit)(nxge, channel);
592 
593 nxge_grp_dc_remove_exit:
594 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
595 }
596 
597 nxge_hio_dc_t *
598 nxge_grp_dc_find(
599 	nxge_t *nxge,
600 	vpc_type_t type,	/* Rx or Tx */
601 	int channel)
602 {
603 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
604 	nxge_hio_dc_t *current;
605 
606 	current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
607 
608 	if (!isLDOMguest(nxge)) {
609 		return (&current[channel]);
610 	} else {
611 		/* We're in a guest domain. */
612 		int i, limit = (type == VP_BOUND_TX) ?
613 		    NXGE_MAX_TDCS : NXGE_MAX_RDCS;
614 
615 		MUTEX_ENTER(&nhd->lock);
616 		for (i = 0; i < limit; i++, current++) {
617 			if (current->channel == channel) {
618 				if (current->vr && current->vr->nxge ==
619 				    (uintptr_t)nxge) {
620 					MUTEX_EXIT(&nhd->lock);
621 					return (current);
622 				}
623 			}
624 		}
625 		MUTEX_EXIT(&nhd->lock);
626 	}
627 
628 	return (0);
629 }
630 
631 /*
632  * nxge_grp_dc_append
633  *
634  *	Append a DMA channel to a group.
635  *
636  * Arguments:
637  * 	nxge
638  * 	group	The group to append to
639  * 	dc	The DMA channel to append
640  *
641  * Notes:
642  *
643  * Context:
644  *	Any domain
645  */
646 static
647 void
648 nxge_grp_dc_append(
649 	nxge_t *nxge,
650 	nxge_grp_t *group,
651 	nxge_hio_dc_t *dc)
652 {
653 	MUTEX_ENTER(&nxge->group_lock);
654 
655 	if (group->dc == 0) {
656 		group->dc = dc;
657 	} else {
658 		nxge_hio_dc_t *current = group->dc;
659 		do {
660 			if (current->next == 0) {
661 				current->next = dc;
662 				break;
663 			}
664 			current = current->next;
665 		} while (current);
666 	}
667 
668 	NXGE_DC_SET(group->map, dc->channel);
669 
670 	nxge_grp_dc_map(group);
671 	group->count++;
672 
673 	MUTEX_EXIT(&nxge->group_lock);
674 }
675 
676 /*
677  * nxge_grp_dc_unlink
678  *
679  *	Unlink a DMA channel fromits linked list (group).
680  *
681  * Arguments:
682  * 	nxge
683  * 	group	The group (linked list) to unlink from
684  * 	dc	The DMA channel to append
685  *
686  * Notes:
687  *
688  * Context:
689  *	Any domain
690  */
691 nxge_hio_dc_t *
692 nxge_grp_dc_unlink(
693 	nxge_t *nxge,
694 	nxge_grp_t *group,
695 	int channel)
696 {
697 	nxge_hio_dc_t *current, *previous;
698 
699 	MUTEX_ENTER(&nxge->group_lock);
700 
701 	if (group == NULL) {
702 		MUTEX_EXIT(&nxge->group_lock);
703 		return (0);
704 	}
705 
706 	if ((current = group->dc) == 0) {
707 		MUTEX_EXIT(&nxge->group_lock);
708 		return (0);
709 	}
710 
711 	previous = 0;
712 	do {
713 		if (current->channel == channel) {
714 			if (previous)
715 				previous->next = current->next;
716 			else
717 				group->dc = current->next;
718 			break;
719 		}
720 		previous = current;
721 		current = current->next;
722 	} while (current);
723 
724 	if (current == 0) {
725 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
726 		    "DC unlink: DC %d not found", channel));
727 	} else {
728 		current->next = 0;
729 		current->group = 0;
730 
731 		NXGE_DC_RESET(group->map, channel);
732 		group->count--;
733 	}
734 
735 	nxge_grp_dc_map(group);
736 
737 	MUTEX_EXIT(&nxge->group_lock);
738 
739 	return (current);
740 }
741 
742 /*
743  * nxge_grp_dc_map
744  *
745  *	Map a linked list to an array of channel numbers.
746  *
747  * Arguments:
748  * 	nxge
749  * 	group	The group to remap.
750  *
751  * Notes:
752  *	It is expected that the caller will hold the correct mutex.
753  *
754  * Context:
755  *	Service domain
756  */
757 void
758 nxge_grp_dc_map(
759 	nxge_grp_t *group)
760 {
761 	nxge_channel_t *legend;
762 	nxge_hio_dc_t *dc;
763 
764 	(void) memset(group->legend, 0, sizeof (group->legend));
765 
766 	legend = group->legend;
767 	dc = group->dc;
768 	while (dc) {
769 		*legend = dc->channel;
770 		legend++;
771 		dc = dc->next;
772 	}
773 }
774 
775 /*
776  * ---------------------------------------------------------------------
777  * These are HIO debugging functions.
778  * ---------------------------------------------------------------------
779  */
780 
781 /*
782  * nxge_delay
783  *
784  *	Delay <seconds> number of seconds.
785  *
786  * Arguments:
787  * 	nxge
788  * 	group	The group to append to
789  * 	dc	The DMA channel to append
790  *
791  * Notes:
792  *	This is a developer-only function.
793  *
794  * Context:
795  *	Any domain
796  */
797 void
798 nxge_delay(
799 	int seconds)
800 {
801 	delay(drv_usectohz(seconds * 1000000));
802 }
803 
804 static dmc_reg_name_t rx_names[] = {
805 	{ "RXDMA_CFIG1",	0 },
806 	{ "RXDMA_CFIG2",	8 },
807 	{ "RBR_CFIG_A",		0x10 },
808 	{ "RBR_CFIG_B",		0x18 },
809 	{ "RBR_KICK",		0x20 },
810 	{ "RBR_STAT",		0x28 },
811 	{ "RBR_HDH",		0x30 },
812 	{ "RBR_HDL",		0x38 },
813 	{ "RCRCFIG_A",		0x40 },
814 	{ "RCRCFIG_B",		0x48 },
815 	{ "RCRSTAT_A",		0x50 },
816 	{ "RCRSTAT_B",		0x58 },
817 	{ "RCRSTAT_C",		0x60 },
818 	{ "RX_DMA_ENT_MSK",	0x68 },
819 	{ "RX_DMA_CTL_STAT",	0x70 },
820 	{ "RCR_FLSH",		0x78 },
821 	{ "RXMISC",		0x90 },
822 	{ "RX_DMA_CTL_STAT_DBG", 0x98 },
823 	{ 0, -1 }
824 };
825 
826 static dmc_reg_name_t tx_names[] = {
827 	{ "Tx_RNG_CFIG",	0 },
828 	{ "Tx_RNG_HDL",		0x10 },
829 	{ "Tx_RNG_KICK",	0x18 },
830 	{ "Tx_ENT_MASK",	0x20 },
831 	{ "Tx_CS",		0x28 },
832 	{ "TxDMA_MBH",		0x30 },
833 	{ "TxDMA_MBL",		0x38 },
834 	{ "TxDMA_PRE_ST",	0x40 },
835 	{ "Tx_RNG_ERR_LOGH",	0x48 },
836 	{ "Tx_RNG_ERR_LOGL",	0x50 },
837 	{ "TDMC_INTR_DBG",	0x60 },
838 	{ "Tx_CS_DBG",		0x68 },
839 	{ 0, -1 }
840 };
841 
842 /*
843  * nxge_xx2str
844  *
845  *	Translate a register address into a string.
846  *
847  * Arguments:
848  * 	offset	The address of the register to translate.
849  *
850  * Notes:
851  *	These are developer-only function.
852  *
853  * Context:
854  *	Any domain
855  */
856 const char *
857 nxge_rx2str(
858 	int offset)
859 {
860 	dmc_reg_name_t *reg = &rx_names[0];
861 
862 	offset &= DMA_CSR_MASK;
863 
864 	while (reg->name) {
865 		if (offset == reg->offset)
866 			return (reg->name);
867 		reg++;
868 	}
869 
870 	return (0);
871 }
872 
873 const char *
874 nxge_tx2str(
875 	int offset)
876 {
877 	dmc_reg_name_t *reg = &tx_names[0];
878 
879 	offset &= DMA_CSR_MASK;
880 
881 	while (reg->name) {
882 		if (offset == reg->offset)
883 			return (reg->name);
884 		reg++;
885 	}
886 
887 	return (0);
888 }
889 
890 /*
891  * nxge_ddi_perror
892  *
893  *	Map a DDI error number to a string.
894  *
895  * Arguments:
896  * 	ddi_error	The DDI error number to map.
897  *
898  * Notes:
899  *
900  * Context:
901  *	Any domain
902  */
903 const char *
904 nxge_ddi_perror(
905 	int ddi_error)
906 {
907 	switch (ddi_error) {
908 	case DDI_SUCCESS:
909 		return ("DDI_SUCCESS");
910 	case DDI_FAILURE:
911 		return ("DDI_FAILURE");
912 	case DDI_NOT_WELL_FORMED:
913 		return ("DDI_NOT_WELL_FORMED");
914 	case DDI_EAGAIN:
915 		return ("DDI_EAGAIN");
916 	case DDI_EINVAL:
917 		return ("DDI_EINVAL");
918 	case DDI_ENOTSUP:
919 		return ("DDI_ENOTSUP");
920 	case DDI_EPENDING:
921 		return ("DDI_EPENDING");
922 	case DDI_ENOMEM:
923 		return ("DDI_ENOMEM");
924 	case DDI_EBUSY:
925 		return ("DDI_EBUSY");
926 	case DDI_ETRANSPORT:
927 		return ("DDI_ETRANSPORT");
928 	case DDI_ECONTEXT:
929 		return ("DDI_ECONTEXT");
930 	default:
931 		return ("Unknown error");
932 	}
933 }
934 
935 /*
936  * ---------------------------------------------------------------------
937  * These are Sun4v HIO function definitions
938  * ---------------------------------------------------------------------
939  */
940 
941 #if defined(sun4v)
942 
943 /*
944  * Local prototypes
945  */
946 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
947 static void nxge_hio_unshare(nxge_hio_vr_t *);
948 
949 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
950 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
951 
952 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
953 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
954 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
955 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
956     mac_ring_type_t, int);
957 
958 /*
959  * nxge_hio_init
960  *
961  *	Initialize the HIO module of the NXGE driver.
962  *
963  * Arguments:
964  * 	nxge
965  *
966  * Notes:
967  *
968  * Context:
969  *	Any domain
970  */
971 int
972 nxge_hio_init(
973 	nxge_t *nxge)
974 {
975 	nxge_hio_data_t *nhd;
976 	int i, region;
977 
978 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
979 	if (nhd == 0) {
980 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
981 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
982 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
983 	}
984 
985 	if ((nxge->environs == SOLARIS_DOMAIN) &&
986 	    (nxge->niu_type == N2_NIU)) {
987 		if (nxge->niu_hsvc_available == B_TRUE) {
988 			hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
989 			if (niu_hsvc->hsvc_major == 1 &&
990 			    niu_hsvc->hsvc_minor == 1)
991 				nxge->environs = SOLARIS_SERVICE_DOMAIN;
992 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
993 			    "nxge_hio_init: hypervisor services "
994 			    "version %d.%d",
995 			    niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor));
996 		}
997 	}
998 
999 	/*
1000 	 * Initialize share and ring group structures.
1001 	 */
1002 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1003 		nxge->tx_hio_groups[i].ghandle = NULL;
1004 		nxge->tx_hio_groups[i].nxgep = nxge;
1005 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1006 		nxge->tx_hio_groups[i].gindex = 0;
1007 		nxge->tx_hio_groups[i].sindex = 0;
1008 	}
1009 
1010 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1011 		nxge->rx_hio_groups[i].ghandle = NULL;
1012 		nxge->rx_hio_groups[i].nxgep = nxge;
1013 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1014 		nxge->rx_hio_groups[i].gindex = 0;
1015 		nxge->rx_hio_groups[i].sindex = 0;
1016 		nxge->rx_hio_groups[i].started = B_FALSE;
1017 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1018 		nxge->rx_hio_groups[i].rdctbl = -1;
1019 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
1020 	}
1021 
1022 	if (!isLDOMs(nxge)) {
1023 		nhd->hio.ldoms = B_FALSE;
1024 		return (NXGE_OK);
1025 	}
1026 
1027 	nhd->hio.ldoms = B_TRUE;
1028 
1029 	/*
1030 	 * Fill in what we can.
1031 	 */
1032 	for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1033 		nhd->vr[region].region = region;
1034 	}
1035 	nhd->vrs = NXGE_VR_SR_MAX - 2;
1036 
1037 	/*
1038 	 * Initialize the share stuctures.
1039 	 */
1040 	for (i = 0; i < NXGE_MAX_TDCS; i++)
1041 		nxge->tdc_is_shared[i] = B_FALSE;
1042 
1043 	for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1044 		nxge->shares[i].nxgep = nxge;
1045 		nxge->shares[i].index = 0;
1046 		nxge->shares[i].vrp = NULL;
1047 		nxge->shares[i].tmap = 0;
1048 		nxge->shares[i].rmap = 0;
1049 		nxge->shares[i].rxgroup = 0;
1050 		nxge->shares[i].active = B_FALSE;
1051 	}
1052 
1053 	/* Fill in the HV HIO function pointers. */
1054 	nxge_hio_hv_init(nxge);
1055 
1056 	if (isLDOMservice(nxge)) {
1057 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
1058 		    "Hybrid IO-capable service domain"));
1059 		return (NXGE_OK);
1060 	} else {
1061 		/*
1062 		 * isLDOMguest(nxge) == B_TRUE
1063 		 */
1064 		nx_vio_fp_t *vio;
1065 		nhd->type = NXGE_HIO_TYPE_GUEST;
1066 
1067 		vio = &nhd->hio.vio;
1068 		vio->__register = (vio_net_resource_reg_t)
1069 		    modgetsymvalue("vio_net_resource_reg", 0);
1070 		vio->unregister = (vio_net_resource_unreg_t)
1071 		    modgetsymvalue("vio_net_resource_unreg", 0);
1072 
1073 		if (vio->__register == 0 || vio->unregister == 0) {
1074 			NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!"));
1075 			return (NXGE_ERROR);
1076 		}
1077 	}
1078 
1079 	return (0);
1080 }
1081 #endif /* defined(sun4v) */
1082 
1083 static int
1084 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1085     const uint8_t *macaddr)
1086 {
1087 	int rv;
1088 	nxge_rdc_grp_t *group;
1089 
1090 	mutex_enter(nxge->genlock);
1091 
1092 	/*
1093 	 * Initialize the NXGE RDC table data structure.
1094 	 */
1095 	group = &nxge->pt_config.rdc_grps[g->rdctbl];
1096 	if (!group->flag) {
1097 		group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1098 		group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1099 		group->flag = B_TRUE;	/* This group has been configured. */
1100 	}
1101 
1102 	mutex_exit(nxge->genlock);
1103 
1104 	/*
1105 	 * Add the MAC address.
1106 	 */
1107 	if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1108 	    g->rdctbl, B_TRUE)) != 0) {
1109 		return (rv);
1110 	}
1111 
1112 	mutex_enter(nxge->genlock);
1113 	g->n_mac_addrs++;
1114 	mutex_exit(nxge->genlock);
1115 	return (0);
1116 }
1117 
1118 static int
1119 nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
1120 {
1121 	p_nxge_t		nxgep = (p_nxge_t)arg;
1122 	struct ether_addr	addrp;
1123 
1124 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
1125 	if (nxge_set_mac_addr(nxgep, &addrp)) {
1126 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1127 		    "<== nxge_m_unicst: set unitcast failed"));
1128 		return (EINVAL);
1129 	}
1130 
1131 	nxgep->primary = B_TRUE;
1132 
1133 	return (0);
1134 }
1135 
1136 /*ARGSUSED*/
1137 static int
1138 nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
1139 {
1140 	nxgep->primary = B_FALSE;
1141 	return (0);
1142 }
1143 
1144 static int
1145 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1146 {
1147 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1148 	p_nxge_t nxge = group->nxgep;
1149 	int rv;
1150 	nxge_hio_vr_t *vr;	/* The Virtualization Region */
1151 
1152 	ASSERT(group->type == MAC_RING_TYPE_RX);
1153 
1154 	mutex_enter(nxge->genlock);
1155 
1156 	if (!nxge->primary && group->port_default_grp) {
1157 		rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
1158 		mutex_exit(nxge->genlock);
1159 		return (rv);
1160 	}
1161 
1162 	/*
1163 	 * If the group is associated with a VR, then only one
1164 	 * address may be assigned to the group.
1165 	 */
1166 	vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1167 	if ((vr != NULL) && (group->n_mac_addrs)) {
1168 		mutex_exit(nxge->genlock);
1169 		return (ENOSPC);
1170 	}
1171 
1172 	mutex_exit(nxge->genlock);
1173 
1174 	/*
1175 	 * Program the mac address for the group.
1176 	 */
1177 	if ((rv = nxge_hio_group_mac_add(nxge, group,
1178 	    mac_addr)) != 0) {
1179 		return (rv);
1180 	}
1181 
1182 	return (0);
1183 }
1184 
1185 static int
1186 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1187 {
1188 	int i;
1189 	for (i = 0; i <= mmac_info->num_mmac; i++) {
1190 		if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1191 		    ETHERADDRL) == 0) {
1192 			return (i);
1193 		}
1194 	}
1195 	return (-1);
1196 }
1197 
1198 /* ARGSUSED */
1199 static int
1200 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1201 {
1202 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1203 	struct ether_addr addrp;
1204 	p_nxge_t nxge = group->nxgep;
1205 	nxge_mmac_t *mmac_info;
1206 	int rv, slot;
1207 
1208 	ASSERT(group->type == MAC_RING_TYPE_RX);
1209 
1210 	mutex_enter(nxge->genlock);
1211 
1212 	mmac_info = &nxge->nxge_mmac_info;
1213 	slot = find_mac_slot(mmac_info, mac_addr);
1214 	if (slot < 0) {
1215 		if (group->port_default_grp && nxge->primary) {
1216 			bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
1217 			if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
1218 				rv = nxge_hio_clear_unicst(nxge, mac_addr);
1219 				mutex_exit(nxge->genlock);
1220 				return (rv);
1221 			} else {
1222 				mutex_exit(nxge->genlock);
1223 				return (EINVAL);
1224 			}
1225 		} else {
1226 			mutex_exit(nxge->genlock);
1227 			return (EINVAL);
1228 		}
1229 	}
1230 
1231 	mutex_exit(nxge->genlock);
1232 
1233 	/*
1234 	 * Remove the mac address for the group
1235 	 */
1236 	if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1237 		return (rv);
1238 	}
1239 
1240 	mutex_enter(nxge->genlock);
1241 	group->n_mac_addrs--;
1242 	mutex_exit(nxge->genlock);
1243 
1244 	return (0);
1245 }
1246 
1247 static int
1248 nxge_hio_group_start(mac_group_driver_t gdriver)
1249 {
1250 	nxge_ring_group_t	*group = (nxge_ring_group_t *)gdriver;
1251 	int			rdctbl;
1252 	int			dev_gindex;
1253 
1254 	ASSERT(group->type == MAC_RING_TYPE_RX);
1255 
1256 #ifdef later
1257 	ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1258 #endif
1259 	if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1260 		return (ENXIO);
1261 
1262 	mutex_enter(group->nxgep->genlock);
1263 	dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1264 	    group->gindex;
1265 
1266 	/*
1267 	 * Get an rdc table for this group.
1268 	 * Group ID is given by the caller, and that's the group it needs
1269 	 * to bind to.  The default group is already bound when the driver
1270 	 * was attached.
1271 	 *
1272 	 * For Group 0, it's RDC table was allocated at attach time
1273 	 * no need to allocate a new table.
1274 	 */
1275 	if (group->gindex != 0) {
1276 		rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1277 		    dev_gindex, B_TRUE);
1278 		if (rdctbl < 0) {
1279 			mutex_exit(group->nxgep->genlock);
1280 			return (rdctbl);
1281 		}
1282 	} else {
1283 		rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1284 	}
1285 
1286 	group->rdctbl = rdctbl;
1287 
1288 	(void) nxge_init_fzc_rdc_tbl(group->nxgep, rdctbl);
1289 
1290 	group->started = B_TRUE;
1291 	mutex_exit(group->nxgep->genlock);
1292 
1293 	return (0);
1294 }
1295 
1296 static void
1297 nxge_hio_group_stop(mac_group_driver_t gdriver)
1298 {
1299 	nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1300 
1301 	ASSERT(group->type == MAC_RING_TYPE_RX);
1302 
1303 	mutex_enter(group->nxgep->genlock);
1304 	group->started = B_FALSE;
1305 
1306 	/*
1307 	 * Unbind the RDC table previously bound for this group.
1308 	 *
1309 	 * Since RDC table for group 0 was allocated at attach
1310 	 * time, no need to unbind the table here.
1311 	 */
1312 	if (group->gindex != 0)
1313 		(void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1314 
1315 	mutex_exit(group->nxgep->genlock);
1316 }
1317 
1318 /* ARGSUSED */
1319 void
1320 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1321 	mac_group_info_t *infop, mac_group_handle_t ghdl)
1322 {
1323 	p_nxge_t		nxgep = (p_nxge_t)arg;
1324 	nxge_ring_group_t	*group;
1325 	int			dev_gindex;
1326 
1327 	switch (type) {
1328 	case MAC_RING_TYPE_RX:
1329 		group = &nxgep->rx_hio_groups[groupid];
1330 		group->nxgep = nxgep;
1331 		group->ghandle = ghdl;
1332 		group->gindex = groupid;
1333 		group->sindex = 0;	/* not yet bound to a share */
1334 
1335 		dev_gindex = nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1336 		    groupid;
1337 
1338 		if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
1339 		    dev_gindex)
1340 			group->port_default_grp = B_TRUE;
1341 
1342 		infop->mgi_driver = (mac_group_driver_t)group;
1343 		infop->mgi_start = nxge_hio_group_start;
1344 		infop->mgi_stop = nxge_hio_group_stop;
1345 		infop->mgi_addmac = nxge_hio_add_mac;
1346 		infop->mgi_remmac = nxge_hio_rem_mac;
1347 		infop->mgi_count =
1348 		    nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1349 		break;
1350 
1351 	case MAC_RING_TYPE_TX:
1352 		/*
1353 		 * 'groupid' for TX should be incremented by one since
1354 		 * the default group (groupid 0) is not known by the MAC layer
1355 		 */
1356 		group = &nxgep->tx_hio_groups[groupid + 1];
1357 		group->nxgep = nxgep;
1358 		group->ghandle = ghdl;
1359 		group->gindex = groupid + 1;
1360 		group->sindex = 0;	/* not yet bound to a share */
1361 
1362 		infop->mgi_driver = (mac_group_driver_t)group;
1363 		infop->mgi_start = NULL;
1364 		infop->mgi_stop = NULL;
1365 		infop->mgi_addmac = NULL;	/* not needed */
1366 		infop->mgi_remmac = NULL;	/* not needed */
1367 		/* no rings associated with group initially */
1368 		infop->mgi_count = 0;
1369 		break;
1370 	}
1371 }
1372 
1373 #if defined(sun4v)
1374 
1375 int
1376 nxge_hio_share_assign(
1377 	nxge_t *nxge,
1378 	uint64_t cookie,
1379 	res_map_t *tmap,
1380 	res_map_t *rmap,
1381 	nxge_hio_vr_t *vr)
1382 {
1383 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1384 	uint64_t slot, hv_rv;
1385 	nxge_hio_dc_t *dc;
1386 	nxhv_vr_fp_t *fp;
1387 	int i;
1388 
1389 	/*
1390 	 * Ask the Hypervisor to set up the VR for us
1391 	 */
1392 	fp = &nhd->hio.vr;
1393 	if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1394 		NXGE_ERROR_MSG((nxge, HIO_CTL,
1395 		    "nxge_hio_share_assign: "
1396 		    "vr->assign() returned %d", hv_rv));
1397 		return (-EIO);
1398 	}
1399 
1400 	/*
1401 	 * For each shared TDC, ask the HV to find us an empty slot.
1402 	 * -----------------------------------------------------
1403 	 */
1404 	dc = vr->tx_group.dc;
1405 	for (i = 0; i < NXGE_MAX_TDCS; i++) {
1406 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1407 		while (dc) {
1408 			hv_rv = (*tx->assign)
1409 			    (vr->cookie, dc->channel, &slot);
1410 			if (hv_rv != 0) {
1411 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1412 				    "nxge_hio_share_assign: "
1413 				    "tx->assign(%x, %d) failed: %ld",
1414 				    vr->cookie, dc->channel, hv_rv));
1415 				return (-EIO);
1416 			}
1417 
1418 			dc->cookie = vr->cookie;
1419 			dc->page = (vp_channel_t)slot;
1420 
1421 			/* Inform the caller about the slot chosen. */
1422 			(*tmap) |= 1 << slot;
1423 
1424 			dc = dc->next;
1425 		}
1426 	}
1427 
1428 	/*
1429 	 * For each shared RDC, ask the HV to find us an empty slot.
1430 	 * -----------------------------------------------------
1431 	 */
1432 	dc = vr->rx_group.dc;
1433 	for (i = 0; i < NXGE_MAX_RDCS; i++) {
1434 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1435 		while (dc) {
1436 			hv_rv = (*rx->assign)
1437 			    (vr->cookie, dc->channel, &slot);
1438 			if (hv_rv != 0) {
1439 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1440 				    "nxge_hio_share_assign: "
1441 				    "rx->assign(%x, %d) failed: %ld",
1442 				    vr->cookie, dc->channel, hv_rv));
1443 				return (-EIO);
1444 			}
1445 
1446 			dc->cookie = vr->cookie;
1447 			dc->page = (vp_channel_t)slot;
1448 
1449 			/* Inform the caller about the slot chosen. */
1450 			(*rmap) |= 1 << slot;
1451 
1452 			dc = dc->next;
1453 		}
1454 	}
1455 
1456 	return (0);
1457 }
1458 
1459 void
1460 nxge_hio_share_unassign(
1461 	nxge_hio_vr_t *vr)
1462 {
1463 	nxge_t *nxge = (nxge_t *)vr->nxge;
1464 	nxge_hio_data_t *nhd;
1465 	nxge_hio_dc_t *dc;
1466 	nxhv_vr_fp_t *fp;
1467 	uint64_t hv_rv;
1468 
1469 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1470 
1471 	dc = vr->tx_group.dc;
1472 	while (dc) {
1473 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1474 		hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1475 		if (hv_rv != 0) {
1476 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1477 			    "nxge_hio_share_unassign: "
1478 			    "tx->unassign(%x, %d) failed: %ld",
1479 			    vr->cookie, dc->page, hv_rv));
1480 		}
1481 		dc = dc->next;
1482 	}
1483 
1484 	dc = vr->rx_group.dc;
1485 	while (dc) {
1486 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1487 		hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1488 		if (hv_rv != 0) {
1489 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1490 			    "nxge_hio_share_unassign: "
1491 			    "rx->unassign(%x, %d) failed: %ld",
1492 			    vr->cookie, dc->page, hv_rv));
1493 		}
1494 		dc = dc->next;
1495 	}
1496 
1497 	fp = &nhd->hio.vr;
1498 	if (fp->unassign) {
1499 		hv_rv = (*fp->unassign)(vr->cookie);
1500 		if (hv_rv != 0) {
1501 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1502 			    "nxge_hio_share_unassign: "
1503 			    "vr->assign(%x) failed: %ld",
1504 			    vr->cookie, hv_rv));
1505 		}
1506 	}
1507 }
1508 
1509 int
1510 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1511 {
1512 	p_nxge_t		nxge = (p_nxge_t)arg;
1513 	nxge_share_handle_t	*shp;
1514 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1515 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1516 
1517 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1518 
1519 	if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1520 	    nhd->hio.rx.assign == 0) {
1521 		NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1522 		return (EIO);
1523 	}
1524 
1525 	/*
1526 	 * Get a VR.
1527 	 */
1528 	if ((vr = nxge_hio_vr_share(nxge)) == 0)
1529 		return (EAGAIN);
1530 
1531 	shp = &nxge->shares[vr->region];
1532 	shp->nxgep = nxge;
1533 	shp->index = vr->region;
1534 	shp->vrp = (void *)vr;
1535 	shp->tmap = shp->rmap = 0;	/* to be assigned by ms_sbind */
1536 	shp->rxgroup = 0;		/* to be assigned by ms_sadd */
1537 	shp->active = B_FALSE;		/* not bound yet */
1538 
1539 	*shandle = (mac_share_handle_t)shp;
1540 
1541 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1542 	return (0);
1543 }
1544 
1545 
1546 void
1547 nxge_hio_share_free(mac_share_handle_t shandle)
1548 {
1549 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1550 	nxge_hio_vr_t		*vr;
1551 
1552 	/*
1553 	 * Clear internal handle state.
1554 	 */
1555 	vr = shp->vrp;
1556 	shp->vrp = (void *)NULL;
1557 	shp->index = 0;
1558 	shp->tmap = 0;
1559 	shp->rmap = 0;
1560 	shp->rxgroup = 0;
1561 	shp->active = B_FALSE;
1562 
1563 	/*
1564 	 * Free VR resource.
1565 	 */
1566 	nxge_hio_unshare(vr);
1567 }
1568 
1569 
1570 void
1571 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1572     mac_ring_handle_t *rings, uint_t *n_rings)
1573 {
1574 	nxge_t			*nxge;
1575 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1576 	nxge_ring_handle_t	*rh;
1577 	uint32_t		offset;
1578 
1579 	nxge = shp->nxgep;
1580 
1581 	switch (type) {
1582 	case MAC_RING_TYPE_RX:
1583 		rh = nxge->rx_ring_handles;
1584 		offset = nxge->pt_config.hw_config.start_rdc;
1585 		break;
1586 
1587 	case MAC_RING_TYPE_TX:
1588 		rh = nxge->tx_ring_handles;
1589 		offset = nxge->pt_config.hw_config.tdc.start;
1590 		break;
1591 	}
1592 
1593 	/*
1594 	 * In version 1.0, we may only give a VR 2 RDCs/TDCs.  Not only that,
1595 	 * but the HV has statically assigned the channels like so:
1596 	 * VR0: RDC0 & RDC1
1597 	 * VR1: RDC2 & RDC3, etc.
1598 	 * The TDCs are assigned in exactly the same way.
1599 	 */
1600 	if (rings != NULL) {
1601 		rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1602 		rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1603 	}
1604 	if (n_rings != NULL) {
1605 		*n_rings = 2;
1606 	}
1607 }
1608 
1609 int
1610 nxge_hio_share_add_group(mac_share_handle_t shandle,
1611     mac_group_driver_t ghandle)
1612 {
1613 	nxge_t			*nxge;
1614 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1615 	nxge_ring_group_t	*rg = (nxge_ring_group_t *)ghandle;
1616 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1617 	nxge_grp_t		*group;
1618 	int			i;
1619 
1620 	if (rg->sindex != 0) {
1621 		/* the group is already bound to a share */
1622 		return (EALREADY);
1623 	}
1624 
1625 	/*
1626 	 * If we are adding a group 0 to a share, this
1627 	 * is not correct.
1628 	 */
1629 	ASSERT(rg->gindex != 0);
1630 
1631 	nxge = rg->nxgep;
1632 	vr = shp->vrp;
1633 
1634 	switch (rg->type) {
1635 	case MAC_RING_TYPE_RX:
1636 		/*
1637 		 * Make sure that the group has the right rings associated
1638 		 * for the share. In version 1.0, we may only give a VR
1639 		 * 2 RDCs.  Not only that, but the HV has statically
1640 		 * assigned the channels like so:
1641 		 * VR0: RDC0 & RDC1
1642 		 * VR1: RDC2 & RDC3, etc.
1643 		 */
1644 		group = nxge->rx_set.group[rg->gindex];
1645 
1646 		if (group->count > 2) {
1647 			/* a share can have at most 2 rings */
1648 			return (EINVAL);
1649 		}
1650 
1651 		for (i = 0; i < NXGE_MAX_RDCS; i++) {
1652 			if (group->map & (1 << i)) {
1653 				if ((i != shp->index * 2) &&
1654 				    (i != (shp->index * 2 + 1))) {
1655 					/*
1656 					 * A group with invalid rings was
1657 					 * attempted to bind to this share
1658 					 */
1659 					return (EINVAL);
1660 				}
1661 			}
1662 		}
1663 
1664 		rg->sindex = vr->region;
1665 		vr->rdc_tbl = rg->rdctbl;
1666 		shp->rxgroup = vr->rdc_tbl;
1667 		break;
1668 
1669 	case MAC_RING_TYPE_TX:
1670 		/*
1671 		 * Make sure that the group has the right rings associated
1672 		 * for the share. In version 1.0, we may only give a VR
1673 		 * 2 TDCs.  Not only that, but the HV has statically
1674 		 * assigned the channels like so:
1675 		 * VR0: TDC0 & TDC1
1676 		 * VR1: TDC2 & TDC3, etc.
1677 		 */
1678 		group = nxge->tx_set.group[rg->gindex];
1679 
1680 		if (group->count > 2) {
1681 			/* a share can have at most 2 rings */
1682 			return (EINVAL);
1683 		}
1684 
1685 		for (i = 0; i < NXGE_MAX_TDCS; i++) {
1686 			if (group->map & (1 << i)) {
1687 				if ((i != shp->index * 2) &&
1688 				    (i != (shp->index * 2 + 1))) {
1689 					/*
1690 					 * A group with invalid rings was
1691 					 * attempted to bind to this share
1692 					 */
1693 					return (EINVAL);
1694 				}
1695 			}
1696 		}
1697 
1698 		vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1699 		    rg->gindex;
1700 		rg->sindex = vr->region;
1701 		break;
1702 	}
1703 	return (0);
1704 }
1705 
1706 int
1707 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1708     mac_group_driver_t ghandle)
1709 {
1710 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1711 	nxge_ring_group_t	*group = (nxge_ring_group_t *)ghandle;
1712 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1713 	int			rv = 0;
1714 
1715 	vr = shp->vrp;
1716 
1717 	switch (group->type) {
1718 	case MAC_RING_TYPE_RX:
1719 		group->sindex = 0;
1720 		vr->rdc_tbl = 0;
1721 		shp->rxgroup = 0;
1722 		break;
1723 
1724 	case MAC_RING_TYPE_TX:
1725 		group->sindex = 0;
1726 		vr->tdc_tbl = 0;
1727 		break;
1728 	}
1729 
1730 	return (rv);
1731 }
1732 
1733 int
1734 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1735     uint64_t *rcookie)
1736 {
1737 	nxge_t			*nxge;
1738 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1739 	nxge_hio_vr_t		*vr;
1740 	uint64_t		rmap, tmap, hv_rmap, hv_tmap;
1741 	int			rv;
1742 
1743 	nxge = shp->nxgep;
1744 	vr = (nxge_hio_vr_t *)shp->vrp;
1745 
1746 	/*
1747 	 * Add resources to the share.
1748 	 * For each DMA channel associated with the VR, bind its resources
1749 	 * to the VR.
1750 	 */
1751 	tmap = 0;
1752 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1753 	if (rv != 0) {
1754 		return (rv);
1755 	}
1756 
1757 	rmap = 0;
1758 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1759 	if (rv != 0) {
1760 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1761 		return (rv);
1762 	}
1763 
1764 	/*
1765 	 * Ask the Hypervisor to set up the VR and allocate slots for
1766 	 * each rings associated with the VR.
1767 	 */
1768 	hv_tmap = hv_rmap = 0;
1769 	if ((rv = nxge_hio_share_assign(nxge, cookie,
1770 	    &hv_tmap, &hv_rmap, vr))) {
1771 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1772 		nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1773 		return (rv);
1774 	}
1775 
1776 	shp->active = B_TRUE;
1777 	shp->tmap = hv_tmap;
1778 	shp->rmap = hv_rmap;
1779 
1780 	/* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1781 	*rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1782 
1783 	return (0);
1784 }
1785 
1786 void
1787 nxge_hio_share_unbind(mac_share_handle_t shandle)
1788 {
1789 	nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1790 
1791 	/*
1792 	 * First, unassign the VR (take it back),
1793 	 * so we can enable interrupts again.
1794 	 */
1795 	nxge_hio_share_unassign(shp->vrp);
1796 
1797 	/*
1798 	 * Free Ring Resources for TX and RX
1799 	 */
1800 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1801 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1802 }
1803 
1804 
1805 /*
1806  * nxge_hio_vr_share
1807  *
1808  *	Find an unused Virtualization Region (VR).
1809  *
1810  * Arguments:
1811  * 	nxge
1812  *
1813  * Notes:
1814  *
1815  * Context:
1816  *	Service domain
1817  */
1818 nxge_hio_vr_t *
1819 nxge_hio_vr_share(
1820 	nxge_t *nxge)
1821 {
1822 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1823 	nxge_hio_vr_t *vr;
1824 
1825 	int first, limit, region;
1826 
1827 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1828 
1829 	MUTEX_ENTER(&nhd->lock);
1830 
1831 	if (nhd->vrs == 0) {
1832 		MUTEX_EXIT(&nhd->lock);
1833 		return (0);
1834 	}
1835 
1836 	/* Find an empty virtual region (VR). */
1837 	if (nxge->function_num == 0) {
1838 		// FUNC0_VIR0 'belongs' to NIU port 0.
1839 		first = FUNC0_VIR1;
1840 		limit = FUNC2_VIR0;
1841 	} else if (nxge->function_num == 1) {
1842 		// FUNC2_VIR0 'belongs' to NIU port 1.
1843 		first = FUNC2_VIR1;
1844 		limit = FUNC_VIR_MAX;
1845 	} else {
1846 		cmn_err(CE_WARN,
1847 		    "Shares not supported on function(%d) at this time.\n",
1848 		    nxge->function_num);
1849 	}
1850 
1851 	for (region = first; region < limit; region++) {
1852 		if (nhd->vr[region].nxge == 0)
1853 			break;
1854 	}
1855 
1856 	if (region == limit) {
1857 		MUTEX_EXIT(&nhd->lock);
1858 		return (0);
1859 	}
1860 
1861 	vr = &nhd->vr[region];
1862 	vr->nxge = (uintptr_t)nxge;
1863 	vr->region = (uintptr_t)region;
1864 
1865 	nhd->vrs--;
1866 
1867 	MUTEX_EXIT(&nhd->lock);
1868 
1869 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1870 
1871 	return (vr);
1872 }
1873 
1874 void
1875 nxge_hio_unshare(
1876 	nxge_hio_vr_t *vr)
1877 {
1878 	nxge_t *nxge = (nxge_t *)vr->nxge;
1879 	nxge_hio_data_t *nhd;
1880 
1881 	vr_region_t region;
1882 
1883 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1884 
1885 	if (!nxge) {
1886 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1887 		    "vr->nxge is NULL"));
1888 		return;
1889 	}
1890 
1891 	/*
1892 	 * This function is no longer called, but I will keep it
1893 	 * here in case we want to revisit this topic in the future.
1894 	 *
1895 	 * nxge_hio_hostinfo_uninit(nxge, vr);
1896 	 */
1897 
1898 	/*
1899 	 * XXX: This is done by ms_sremove?
1900 	 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1901 	 */
1902 
1903 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1904 
1905 	MUTEX_ENTER(&nhd->lock);
1906 
1907 	region = vr->region;
1908 	(void) memset(vr, 0, sizeof (*vr));
1909 	vr->region = region;
1910 
1911 	nhd->vrs++;
1912 
1913 	MUTEX_EXIT(&nhd->lock);
1914 
1915 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1916 }
1917 
1918 int
1919 nxge_hio_addres(
1920 	nxge_hio_vr_t *vr,
1921 	mac_ring_type_t type,
1922 	uint64_t *map)
1923 {
1924 	nxge_t		*nxge = (nxge_t *)vr->nxge;
1925 	nxge_grp_t	*group;
1926 	int		groupid;
1927 	int		i;
1928 	int		max_dcs;
1929 
1930 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1931 
1932 	if (!nxge)
1933 		return (EINVAL);
1934 
1935 	/*
1936 	 * For each ring associated with the group, add the resources
1937 	 * to the group and bind.
1938 	 */
1939 	max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1940 	if (type == MAC_RING_TYPE_TX) {
1941 		/* set->group is an array of group indexed by a port group id */
1942 		groupid = vr->tdc_tbl -
1943 		    nxge->pt_config.hw_config.def_mac_txdma_grpid;
1944 		group = nxge->tx_set.group[groupid];
1945 	} else {
1946 		/* set->group is an array of group indexed by a port group id */
1947 		groupid = vr->rdc_tbl -
1948 		    nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1949 		group = nxge->rx_set.group[groupid];
1950 	}
1951 
1952 	if (group->map == 0) {
1953 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
1954 		    "with this VR"));
1955 		return (EINVAL);
1956 	}
1957 
1958 	for (i = 0; i < max_dcs; i++) {
1959 		if (group->map & (1 << i)) {
1960 			int rv;
1961 
1962 			if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
1963 				if (*map == 0) /* Couldn't get even one DC. */
1964 					return (-rv);
1965 				else
1966 					break;
1967 			}
1968 			*map |= (1 << i);
1969 		}
1970 	}
1971 
1972 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
1973 
1974 	return (0);
1975 }
1976 
1977 /* ARGSUSED */
1978 void
1979 nxge_hio_remres(
1980 	nxge_hio_vr_t *vr,
1981 	mac_ring_type_t type,
1982 	res_map_t res_map)
1983 {
1984 	nxge_t *nxge = (nxge_t *)vr->nxge;
1985 	nxge_grp_t *group;
1986 
1987 	if (!nxge) {
1988 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
1989 		    "vr->nxge is NULL"));
1990 		return;
1991 	}
1992 
1993 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
1994 
1995 	/*
1996 	 * For each ring bound to the group, remove the DMA resources
1997 	 * from the group and unbind.
1998 	 */
1999 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2000 	while (group->dc) {
2001 		nxge_hio_dc_t *dc = group->dc;
2002 		NXGE_DC_RESET(res_map, dc->page);
2003 		nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
2004 	}
2005 
2006 	if (res_map) {
2007 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2008 		    "res_map %lx", res_map));
2009 	}
2010 
2011 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
2012 }
2013 
2014 /*
2015  * nxge_hio_tdc_share
2016  *
2017  *	Share an unused TDC channel.
2018  *
2019  * Arguments:
2020  * 	nxge
2021  *
2022  * Notes:
2023  *
2024  * A.7.3 Reconfigure Tx DMA channel
2025  *	Disable TxDMA			A.9.6.10
2026  *     [Rebind TxDMA channel to Port	A.9.6.7]
2027  *
2028  * We don't have to Rebind the TDC to the port - it always already bound.
2029  *
2030  *	Soft Reset TxDMA		A.9.6.2
2031  *
2032  * This procedure will be executed by nxge_init_txdma_channel() in the
2033  * guest domain:
2034  *
2035  *	Re-initialize TxDMA		A.9.6.8
2036  *	Reconfigure TxDMA
2037  *	Enable TxDMA			A.9.6.9
2038  *
2039  * Context:
2040  *	Service domain
2041  */
2042 int
2043 nxge_hio_tdc_share(
2044 	nxge_t *nxge,
2045 	int channel)
2046 {
2047 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2048 	nxge_grp_set_t *set = &nxge->tx_set;
2049 	tx_ring_t *ring;
2050 	int count;
2051 
2052 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2053 
2054 	/*
2055 	 * Wait until this channel is idle.
2056 	 */
2057 	ring = nxge->tx_rings->rings[channel];
2058 
2059 	(void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2060 	if (ring->tx_ring_busy) {
2061 		/*
2062 		 * Wait for 30 seconds.
2063 		 */
2064 		for (count = 30 * 1000; count; count--) {
2065 			if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2066 				break;
2067 			}
2068 
2069 			drv_usecwait(1000);
2070 		}
2071 
2072 		if (count == 0) {
2073 			(void) atomic_swap_32(&ring->tx_ring_offline,
2074 			    NXGE_TX_RING_ONLINE);
2075 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2076 			    "nxge_hio_tdc_share: "
2077 			    "Tx ring %d was always BUSY", channel));
2078 			return (-EIO);
2079 		}
2080 	} else {
2081 		(void) atomic_swap_32(&ring->tx_ring_offline,
2082 		    NXGE_TX_RING_OFFLINED);
2083 	}
2084 
2085 	MUTEX_ENTER(&nhd->lock);
2086 	nxge->tdc_is_shared[channel] = B_TRUE;
2087 	MUTEX_EXIT(&nhd->lock);
2088 
2089 	if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2090 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2091 		    "Failed to remove interrupt for TxDMA channel %d",
2092 		    channel));
2093 		return (-EINVAL);
2094 	}
2095 
2096 	/* Disable TxDMA A.9.6.10 */
2097 	(void) nxge_txdma_channel_disable(nxge, channel);
2098 
2099 	/* The SD is sharing this channel. */
2100 	NXGE_DC_SET(set->shared.map, channel);
2101 	set->shared.count++;
2102 
2103 	/* Soft Reset TxDMA A.9.6.2 */
2104 	nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2105 
2106 	/*
2107 	 * Initialize the DC-specific FZC control registers.
2108 	 * -----------------------------------------------------
2109 	 */
2110 	if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2111 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2112 		    "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2113 		return (-EIO);
2114 	}
2115 
2116 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2117 
2118 	return (0);
2119 }
2120 
2121 /*
2122  * nxge_hio_rdc_share
2123  *
2124  *	Share an unused RDC channel.
2125  *
2126  * Arguments:
2127  * 	nxge
2128  *
2129  * Notes:
2130  *
2131  * This is the latest version of the procedure to
2132  * Reconfigure an Rx DMA channel:
2133  *
2134  * A.6.3 Reconfigure Rx DMA channel
2135  *	Stop RxMAC		A.9.2.6
2136  *	Drain IPP Port		A.9.3.6
2137  *	Stop and reset RxDMA	A.9.5.3
2138  *
2139  * This procedure will be executed by nxge_init_rxdma_channel() in the
2140  * guest domain:
2141  *
2142  *	Initialize RxDMA	A.9.5.4
2143  *	Reconfigure RxDMA
2144  *	Enable RxDMA		A.9.5.5
2145  *
2146  * We will do this here, since the RDC is a canalis non grata:
2147  *	Enable RxMAC		A.9.2.10
2148  *
2149  * Context:
2150  *	Service domain
2151  */
2152 int
2153 nxge_hio_rdc_share(
2154 	nxge_t *nxge,
2155 	nxge_hio_vr_t *vr,
2156 	int channel)
2157 {
2158 	nxge_grp_set_t *set = &nxge->rx_set;
2159 	nxge_rdc_grp_t *rdc_grp;
2160 
2161 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2162 
2163 	/* Disable interrupts. */
2164 	if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2165 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2166 		    "Failed to remove interrupt for RxDMA channel %d",
2167 		    channel));
2168 		return (NXGE_ERROR);
2169 	}
2170 
2171 	/* Stop RxMAC = A.9.2.6 */
2172 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2173 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2174 		    "Failed to disable RxMAC"));
2175 	}
2176 
2177 	/* Drain IPP Port = A.9.3.6 */
2178 	(void) nxge_ipp_drain(nxge);
2179 
2180 	/* Stop and reset RxDMA = A.9.5.3 */
2181 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2182 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2183 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2184 		    "Failed to disable RxDMA channel %d", channel));
2185 	}
2186 
2187 	/* The SD is sharing this channel. */
2188 	NXGE_DC_SET(set->shared.map, channel);
2189 	set->shared.count++;
2190 
2191 	// Assert RST: RXDMA_CFIG1[30] = 1
2192 	nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2193 
2194 	/*
2195 	 * The guest domain will reconfigure the RDC later.
2196 	 *
2197 	 * But in the meantime, we must re-enable the Rx MAC so
2198 	 * that we can start receiving packets again on the
2199 	 * remaining RDCs:
2200 	 *
2201 	 * Enable RxMAC = A.9.2.10
2202 	 */
2203 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2204 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2205 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2206 	}
2207 
2208 	/*
2209 	 * Initialize the DC-specific FZC control registers.
2210 	 * -----------------------------------------------------
2211 	 */
2212 	if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2213 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2214 		    "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2215 		return (-EIO);
2216 	}
2217 
2218 	/*
2219 	 * We have to initialize the guest's RDC table, too.
2220 	 * -----------------------------------------------------
2221 	 */
2222 	rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2223 	if (rdc_grp->max_rdcs == 0) {
2224 		rdc_grp->start_rdc = (uint8_t)channel;
2225 		rdc_grp->def_rdc = (uint8_t)channel;
2226 		rdc_grp->max_rdcs = 1;
2227 	} else {
2228 		rdc_grp->max_rdcs++;
2229 	}
2230 	NXGE_DC_SET(rdc_grp->map, channel);
2231 
2232 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2233 
2234 	return (0);
2235 }
2236 
2237 /*
2238  * nxge_hio_dc_share
2239  *
2240  *	Share a DMA channel with a guest domain.
2241  *
2242  * Arguments:
2243  * 	nxge
2244  * 	vr	The VR that <channel> will belong to.
2245  * 	type	Tx or Rx.
2246  * 	channel	Channel to share
2247  *
2248  * Notes:
2249  *
2250  * Context:
2251  *	Service domain
2252  */
2253 int
2254 nxge_hio_dc_share(
2255 	nxge_t *nxge,
2256 	nxge_hio_vr_t *vr,
2257 	mac_ring_type_t type,
2258 	int channel)
2259 {
2260 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2261 	nxge_hio_dc_t *dc;
2262 	nxge_grp_t *group;
2263 	int slot;
2264 
2265 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2266 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2267 
2268 
2269 	/* -------------------------------------------------- */
2270 	slot = (type == MAC_RING_TYPE_TX) ?
2271 	    nxge_hio_tdc_share(nxge, channel) :
2272 	    nxge_hio_rdc_share(nxge, vr, channel);
2273 
2274 	if (slot < 0) {
2275 		if (type == MAC_RING_TYPE_RX) {
2276 			nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2277 		} else {
2278 			nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2279 		}
2280 		return (slot);
2281 	}
2282 
2283 	MUTEX_ENTER(&nhd->lock);
2284 
2285 	/*
2286 	 * Tag this channel.
2287 	 * --------------------------------------------------
2288 	 */
2289 	dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2290 
2291 	dc->vr = vr;
2292 	dc->channel = (nxge_channel_t)channel;
2293 
2294 	MUTEX_EXIT(&nhd->lock);
2295 
2296 	/*
2297 	 * vr->[t|r]x_group is used by the service domain to
2298 	 * keep track of its shared DMA channels.
2299 	 */
2300 	MUTEX_ENTER(&nxge->group_lock);
2301 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2302 
2303 	dc->group = group;
2304 	/* Initialize <group>, if necessary */
2305 	if (group->count == 0) {
2306 		group->nxge = nxge;
2307 		group->type = (type == MAC_RING_TYPE_TX) ?
2308 		    VP_BOUND_TX : VP_BOUND_RX;
2309 		group->sequence	= nhd->sequence++;
2310 		group->active = B_TRUE;
2311 	}
2312 
2313 	MUTEX_EXIT(&nxge->group_lock);
2314 
2315 	NXGE_ERROR_MSG((nxge, HIO_CTL,
2316 	    "DC share: %cDC %d was assigned to slot %d",
2317 	    type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2318 
2319 	nxge_grp_dc_append(nxge, group, dc);
2320 
2321 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2322 
2323 	return (0);
2324 }
2325 
2326 /*
2327  * nxge_hio_tdc_unshare
2328  *
2329  *	Unshare a TDC.
2330  *
2331  * Arguments:
2332  * 	nxge
2333  * 	channel	The channel to unshare (add again).
2334  *
2335  * Notes:
2336  *
2337  * Context:
2338  *	Service domain
2339  */
2340 void
2341 nxge_hio_tdc_unshare(
2342 	nxge_t *nxge,
2343 	int dev_grpid,
2344 	int channel)
2345 {
2346 	nxge_grp_set_t *set = &nxge->tx_set;
2347 	nxge_grp_t *group;
2348 	int grpid;
2349 
2350 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2351 
2352 	NXGE_DC_RESET(set->shared.map, channel);
2353 	set->shared.count--;
2354 
2355 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2356 	group = set->group[grpid];
2357 
2358 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2359 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2360 		    "Failed to initialize TxDMA channel %d", channel));
2361 		return;
2362 	}
2363 
2364 	/* Re-add this interrupt. */
2365 	if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2366 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2367 		    "Failed to add interrupt for TxDMA channel %d", channel));
2368 	}
2369 
2370 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2371 }
2372 
2373 /*
2374  * nxge_hio_rdc_unshare
2375  *
2376  *	Unshare an RDC: add it to the SD's RDC groups (tables).
2377  *
2378  * Arguments:
2379  * 	nxge
2380  * 	channel	The channel to unshare (add again).
2381  *
2382  * Notes:
2383  *
2384  * Context:
2385  *	Service domain
2386  */
2387 void
2388 nxge_hio_rdc_unshare(
2389 	nxge_t *nxge,
2390 	int dev_grpid,
2391 	int channel)
2392 {
2393 	nxge_grp_set_t		*set = &nxge->rx_set;
2394 	nxge_grp_t		*group;
2395 	int			grpid;
2396 
2397 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2398 
2399 	/* Stop RxMAC = A.9.2.6 */
2400 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2401 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2402 		    "Failed to disable RxMAC"));
2403 	}
2404 
2405 	/* Drain IPP Port = A.9.3.6 */
2406 	(void) nxge_ipp_drain(nxge);
2407 
2408 	/* Stop and reset RxDMA = A.9.5.3 */
2409 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2410 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2411 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2412 		    "Failed to disable RxDMA channel %d", channel));
2413 	}
2414 
2415 	NXGE_DC_RESET(set->shared.map, channel);
2416 	set->shared.count--;
2417 
2418 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2419 	group = set->group[grpid];
2420 
2421 	/*
2422 	 * Assert RST: RXDMA_CFIG1[30] = 1
2423 	 *
2424 	 * Initialize RxDMA	A.9.5.4
2425 	 * Reconfigure RxDMA
2426 	 * Enable RxDMA		A.9.5.5
2427 	 */
2428 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2429 		/* Be sure to re-enable the RX MAC. */
2430 		if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2431 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2432 			    "nxge_hio_rdc_share: Rx MAC still disabled"));
2433 		}
2434 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2435 		    "Failed to initialize RxDMA channel %d", channel));
2436 		return;
2437 	}
2438 
2439 	/*
2440 	 * Enable RxMAC = A.9.2.10
2441 	 */
2442 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2443 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2444 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2445 		return;
2446 	}
2447 
2448 	/* Re-add this interrupt. */
2449 	if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2450 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2451 		    "nxge_hio_rdc_unshare: Failed to add interrupt for "
2452 		    "RxDMA CHANNEL %d", channel));
2453 	}
2454 
2455 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2456 }
2457 
2458 /*
2459  * nxge_hio_dc_unshare
2460  *
2461  *	Unshare (reuse) a DMA channel.
2462  *
2463  * Arguments:
2464  * 	nxge
2465  * 	vr	The VR that <channel> belongs to.
2466  * 	type	Tx or Rx.
2467  * 	channel	The DMA channel to reuse.
2468  *
2469  * Notes:
2470  *
2471  * Context:
2472  *	Service domain
2473  */
2474 void
2475 nxge_hio_dc_unshare(
2476 	nxge_t *nxge,
2477 	nxge_hio_vr_t *vr,
2478 	mac_ring_type_t type,
2479 	int channel)
2480 {
2481 	nxge_grp_t *group;
2482 	nxge_hio_dc_t *dc;
2483 
2484 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2485 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2486 
2487 	/* Unlink the channel from its group. */
2488 	/* -------------------------------------------------- */
2489 	group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2490 	NXGE_DC_RESET(group->map, channel);
2491 	if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2492 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2493 		    "nxge_hio_dc_unshare(%d) failed", channel));
2494 		return;
2495 	}
2496 
2497 	dc->vr = 0;
2498 	dc->cookie = 0;
2499 
2500 	if (type == MAC_RING_TYPE_RX) {
2501 		nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2502 	} else {
2503 		nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2504 	}
2505 
2506 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2507 }
2508 
2509 
2510 /*
2511  * nxge_hio_rxdma_bind_intr():
2512  *
2513  *	For the guest domain driver, need to bind the interrupt group
2514  *	and state to the rx_rcr_ring_t.
2515  */
2516 
2517 int
2518 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2519 {
2520 	nxge_hio_dc_t	*dc;
2521 	nxge_ldgv_t	*control;
2522 	nxge_ldg_t	*group;
2523 	nxge_ldv_t	*device;
2524 
2525 	/*
2526 	 * Find the DMA channel.
2527 	 */
2528 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2529 		return (NXGE_ERROR);
2530 	}
2531 
2532 	/*
2533 	 * Get the control structure.
2534 	 */
2535 	control = nxge->ldgvp;
2536 	if (control == NULL) {
2537 		return (NXGE_ERROR);
2538 	}
2539 
2540 	group = &control->ldgp[dc->ldg.vector];
2541 	device = &control->ldvp[dc->ldg.ldsv];
2542 
2543 	MUTEX_ENTER(&ring->lock);
2544 	ring->ldgp = group;
2545 	ring->ldvp = device;
2546 	MUTEX_EXIT(&ring->lock);
2547 
2548 	return (NXGE_OK);
2549 }
2550 #endif	/* if defined(sun4v) */
2551