xref: /titanic_41/usr/src/uts/common/io/nxge/nxge_hio.c (revision 82629e3015252bf18319ba3815c773df23e21436)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * nxge_hio.c
29  *
30  * This file manages the virtualization resources for Neptune
31  * devices.  That is, it implements a hybrid I/O (HIO) approach in the
32  * Solaris kernel, whereby a guest domain on an LDOMs server may
33  * request & use hardware resources from the service domain.
34  *
35  */
36 
37 #include <sys/mac_provider.h>
38 #include <sys/nxge/nxge_impl.h>
39 #include <sys/nxge/nxge_fzc.h>
40 #include <sys/nxge/nxge_rxdma.h>
41 #include <sys/nxge/nxge_txdma.h>
42 #include <sys/nxge/nxge_hio.h>
43 
44 /*
45  * External prototypes
46  */
47 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
48 
49 /* The following function may be found in nxge_main.c */
50 extern int nxge_m_mmac_remove(void *arg, int slot);
51 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
52 	boolean_t usetbl);
53 
54 /* The following function may be found in nxge_[t|r]xdma.c */
55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
57 
58 /*
59  * Local prototypes
60  */
61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
63 static void nxge_grp_dc_map(nxge_grp_t *group);
64 
65 /*
66  * These functions are used by both service & guest domains to
67  * decide whether they're running in an LDOMs/XEN environment
68  * or not.  If so, then the Hybrid I/O (HIO) module is initialized.
69  */
70 
71 /*
72  * nxge_get_environs
73  *
74  *	Figure out if we are in a guest domain or not.
75  *
76  * Arguments:
77  * 	nxge
78  *
79  * Notes:
80  *
81  * Context:
82  *	Any domain
83  */
84 void
85 nxge_get_environs(
86 	nxge_t *nxge)
87 {
88 	char *string;
89 
90 	/*
91 	 * In the beginning, assume that we are running sans LDOMs/XEN.
92 	 */
93 	nxge->environs = SOLARIS_DOMAIN;
94 
95 	/*
96 	 * Are we a hybrid I/O (HIO) guest domain driver?
97 	 */
98 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
99 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
100 	    "niutype", &string)) == DDI_PROP_SUCCESS) {
101 		if (strcmp(string, "n2niu") == 0) {
102 			nxge->environs = SOLARIS_GUEST_DOMAIN;
103 			/* So we can allocate properly-aligned memory. */
104 			nxge->niu_type = N2_NIU;
105 			NXGE_DEBUG_MSG((nxge, HIO_CTL,
106 			    "Hybrid IO-capable guest domain"));
107 		}
108 		ddi_prop_free(string);
109 	}
110 }
111 
112 #if !defined(sun4v)
113 
114 /*
115  * nxge_hio_init
116  *
117  *	Initialize the HIO module of the NXGE driver.
118  *
119  * Arguments:
120  * 	nxge
121  *
122  * Notes:
123  *	This is the non-hybrid I/O version of this function.
124  *
125  * Context:
126  *	Any domain
127  */
128 int
129 nxge_hio_init(nxge_t *nxge)
130 {
131 	nxge_hio_data_t *nhd;
132 	int i;
133 
134 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
135 	if (nhd == NULL) {
136 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
137 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
138 		nhd->type = NXGE_HIO_TYPE_SERVICE;
139 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
140 	}
141 
142 	/*
143 	 * Initialize share and ring group structures.
144 	 */
145 	for (i = 0; i < NXGE_MAX_TDCS; i++)
146 		nxge->tdc_is_shared[i] = B_FALSE;
147 
148 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
149 		nxge->tx_hio_groups[i].ghandle = NULL;
150 		nxge->tx_hio_groups[i].nxgep = nxge;
151 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
152 		nxge->tx_hio_groups[i].gindex = 0;
153 		nxge->tx_hio_groups[i].sindex = 0;
154 	}
155 
156 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
157 		nxge->rx_hio_groups[i].ghandle = NULL;
158 		nxge->rx_hio_groups[i].nxgep = nxge;
159 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
160 		nxge->rx_hio_groups[i].gindex = 0;
161 		nxge->rx_hio_groups[i].sindex = 0;
162 		nxge->rx_hio_groups[i].started = B_FALSE;
163 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
164 		nxge->rx_hio_groups[i].rdctbl = -1;
165 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
166 	}
167 
168 	nhd->hio.ldoms = B_FALSE;
169 
170 	return (NXGE_OK);
171 }
172 
173 #endif
174 
175 void
176 nxge_hio_uninit(nxge_t *nxge)
177 {
178 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
179 
180 	ASSERT(nxge->nxge_hw_p->ndevs == 0);
181 
182 	if (nhd != NULL) {
183 		MUTEX_DESTROY(&nhd->lock);
184 		KMEM_FREE(nhd, sizeof (*nhd));
185 		nxge->nxge_hw_p->hio = 0;
186 	}
187 }
188 
189 /*
190  * nxge_dci_map
191  *
192  *	Map a DMA channel index to a channel number.
193  *
194  * Arguments:
195  * 	instance	The instance number of the driver.
196  * 	type		The type of channel this is: Tx or Rx.
197  * 	index		The index to convert to a channel number
198  *
199  * Notes:
200  *	This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
201  *
202  * Context:
203  *	Any domain
204  */
205 int
206 nxge_dci_map(
207 	nxge_t *nxge,
208 	vpc_type_t type,
209 	int index)
210 {
211 	nxge_grp_set_t *set;
212 	int dc;
213 
214 	switch (type) {
215 	case VP_BOUND_TX:
216 		set = &nxge->tx_set;
217 		break;
218 	case VP_BOUND_RX:
219 		set = &nxge->rx_set;
220 		break;
221 	}
222 
223 	for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
224 		if ((1 << dc) & set->owned.map) {
225 			if (index == 0)
226 				return (dc);
227 			else
228 				index--;
229 		}
230 	}
231 
232 	return (-1);
233 }
234 
235 /*
236  * ---------------------------------------------------------------------
237  * These are the general-purpose DMA channel group functions.  That is,
238  * these functions are used to manage groups of TDCs or RDCs in an HIO
239  * environment.
240  *
241  * But is also expected that in the future they will be able to manage
242  * Crossbow groups.
243  * ---------------------------------------------------------------------
244  */
245 
246 /*
247  * nxge_grp_cleanup(p_nxge_t nxge)
248  *
249  *	Remove all outstanding groups.
250  *
251  * Arguments:
252  *	nxge
253  */
254 void
255 nxge_grp_cleanup(p_nxge_t nxge)
256 {
257 	nxge_grp_set_t *set;
258 	int i;
259 
260 	MUTEX_ENTER(&nxge->group_lock);
261 
262 	/*
263 	 * Find RX groups that need to be cleaned up.
264 	 */
265 	set = &nxge->rx_set;
266 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
267 		if (set->group[i] != NULL) {
268 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
269 			set->group[i] = NULL;
270 		}
271 	}
272 
273 	/*
274 	 * Find TX groups that need to be cleaned up.
275 	 */
276 	set = &nxge->tx_set;
277 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
278 		if (set->group[i] != NULL) {
279 			KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
280 			set->group[i] = NULL;
281 		}
282 	}
283 	MUTEX_EXIT(&nxge->group_lock);
284 }
285 
286 
287 /*
288  * nxge_grp_add
289  *
290  *	Add a group to an instance of NXGE.
291  *
292  * Arguments:
293  * 	nxge
294  * 	type	Tx or Rx
295  *
296  * Notes:
297  *
298  * Context:
299  *	Any domain
300  */
301 nxge_grp_t *
302 nxge_grp_add(
303 	nxge_t *nxge,
304 	nxge_grp_type_t type)
305 {
306 	nxge_grp_set_t *set;
307 	nxge_grp_t *group;
308 	int i;
309 
310 	group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
311 	group->nxge = nxge;
312 
313 	MUTEX_ENTER(&nxge->group_lock);
314 	switch (type) {
315 	case NXGE_TRANSMIT_GROUP:
316 	case EXT_TRANSMIT_GROUP:
317 		set = &nxge->tx_set;
318 		break;
319 	default:
320 		set = &nxge->rx_set;
321 		break;
322 	}
323 
324 	group->type = type;
325 	group->active = B_TRUE;
326 	group->sequence = set->sequence++;
327 
328 	/* Find an empty slot for this logical group. */
329 	for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
330 		if (set->group[i] == 0) {
331 			group->index = i;
332 			set->group[i] = group;
333 			NXGE_DC_SET(set->lg.map, i);
334 			set->lg.count++;
335 			break;
336 		}
337 	}
338 	MUTEX_EXIT(&nxge->group_lock);
339 
340 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
341 	    "nxge_grp_add: %cgroup = %d.%d",
342 	    type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
343 	    nxge->mac.portnum, group->sequence));
344 
345 	return (group);
346 }
347 
348 void
349 nxge_grp_remove(
350 	nxge_t *nxge,
351 	nxge_grp_t *group)	/* The group to remove. */
352 {
353 	nxge_grp_set_t *set;
354 	vpc_type_t type;
355 
356 	if (group == NULL)
357 		return;
358 
359 	MUTEX_ENTER(&nxge->group_lock);
360 	switch (group->type) {
361 	case NXGE_TRANSMIT_GROUP:
362 	case EXT_TRANSMIT_GROUP:
363 		set = &nxge->tx_set;
364 		break;
365 	default:
366 		set = &nxge->rx_set;
367 		break;
368 	}
369 
370 	if (set->group[group->index] != group) {
371 		MUTEX_EXIT(&nxge->group_lock);
372 		return;
373 	}
374 
375 	set->group[group->index] = 0;
376 	NXGE_DC_RESET(set->lg.map, group->index);
377 	set->lg.count--;
378 
379 	/* While inside the mutex, deactivate <group>. */
380 	group->active = B_FALSE;
381 
382 	MUTEX_EXIT(&nxge->group_lock);
383 
384 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
385 	    "nxge_grp_remove(%c.%d.%d) called",
386 	    group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
387 	    nxge->mac.portnum, group->sequence));
388 
389 	/* Now, remove any DCs which are still active. */
390 	switch (group->type) {
391 	default:
392 		type = VP_BOUND_TX;
393 		break;
394 	case NXGE_RECEIVE_GROUP:
395 	case EXT_RECEIVE_GROUP:
396 		type = VP_BOUND_RX;
397 	}
398 
399 	while (group->dc) {
400 		nxge_grp_dc_remove(nxge, type, group->dc->channel);
401 	}
402 
403 	KMEM_FREE(group, sizeof (*group));
404 }
405 
406 /*
407  * nxge_grp_dc_add
408  *
409  *	Add a DMA channel to a VR/Group.
410  *
411  * Arguments:
412  * 	nxge
413  * 	channel	The channel to add.
414  * Notes:
415  *
416  * Context:
417  *	Any domain
418  */
419 /* ARGSUSED */
420 int
421 nxge_grp_dc_add(
422 	nxge_t *nxge,
423 	nxge_grp_t *group,	/* The group to add <channel> to. */
424 	vpc_type_t type,	/* Rx or Tx */
425 	int channel)		/* A physical/logical channel number */
426 {
427 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
428 	nxge_hio_dc_t *dc;
429 	nxge_grp_set_t *set;
430 	nxge_status_t status = NXGE_OK;
431 
432 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
433 
434 	if (group == 0)
435 		return (0);
436 
437 	switch (type) {
438 	case VP_BOUND_TX:
439 		set = &nxge->tx_set;
440 		if (channel > NXGE_MAX_TDCS) {
441 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
442 			    "nxge_grp_dc_add: TDC = %d", channel));
443 			return (NXGE_ERROR);
444 		}
445 		break;
446 	case VP_BOUND_RX:
447 		set = &nxge->rx_set;
448 		if (channel > NXGE_MAX_RDCS) {
449 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
450 			    "nxge_grp_dc_add: RDC = %d", channel));
451 			return (NXGE_ERROR);
452 		}
453 		break;
454 
455 	default:
456 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
457 		    "nxge_grp_dc_add: unknown type channel(%d)", channel));
458 	}
459 
460 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
461 	    "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
462 	    type == VP_BOUND_TX ? 't' : 'r',
463 	    nxge->mac.portnum, group->sequence, group->count, channel));
464 
465 	MUTEX_ENTER(&nxge->group_lock);
466 	if (group->active != B_TRUE) {
467 		/* We may be in the process of removing this group. */
468 		MUTEX_EXIT(&nxge->group_lock);
469 		return (NXGE_ERROR);
470 	}
471 	MUTEX_EXIT(&nxge->group_lock);
472 
473 	if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
474 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
475 		    "nxge_grp_dc_add(%d): DC FIND failed", channel));
476 		return (NXGE_ERROR);
477 	}
478 
479 	MUTEX_ENTER(&nhd->lock);
480 
481 	if (dc->group) {
482 		MUTEX_EXIT(&nhd->lock);
483 		/* This channel is already in use! */
484 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
485 		    "nxge_grp_dc_add(%d): channel already in group", channel));
486 		return (NXGE_ERROR);
487 	}
488 
489 	dc->next = 0;
490 	dc->page = channel;
491 	dc->channel = (nxge_channel_t)channel;
492 
493 	dc->type = type;
494 	if (type == VP_BOUND_RX) {
495 		dc->init = nxge_init_rxdma_channel;
496 		dc->uninit = nxge_uninit_rxdma_channel;
497 	} else {
498 		dc->init = nxge_init_txdma_channel;
499 		dc->uninit = nxge_uninit_txdma_channel;
500 	}
501 
502 	dc->group = group;
503 
504 	if (isLDOMguest(nxge))
505 		(void) nxge_hio_ldsv_add(nxge, dc);
506 
507 	NXGE_DC_SET(set->owned.map, channel);
508 	set->owned.count++;
509 
510 	MUTEX_EXIT(&nhd->lock);
511 
512 	if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
513 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
514 		    "nxge_grp_dc_add(%d): channel init failed", channel));
515 		MUTEX_ENTER(&nhd->lock);
516 		(void) memset(dc, 0, sizeof (*dc));
517 		NXGE_DC_RESET(set->owned.map, channel);
518 		set->owned.count--;
519 		MUTEX_EXIT(&nhd->lock);
520 		return (NXGE_ERROR);
521 	}
522 
523 	nxge_grp_dc_append(nxge, group, dc);
524 
525 	if (type == VP_BOUND_TX) {
526 		MUTEX_ENTER(&nhd->lock);
527 		nxge->tdc_is_shared[channel] = B_FALSE;
528 		MUTEX_EXIT(&nhd->lock);
529 	}
530 
531 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
532 
533 	return ((int)status);
534 }
535 
536 void
537 nxge_grp_dc_remove(
538 	nxge_t *nxge,
539 	vpc_type_t type,
540 	int channel)
541 {
542 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
543 	nxge_hio_dc_t *dc;
544 	nxge_grp_set_t *set;
545 	nxge_grp_t *group;
546 
547 	dc_uninit_t uninit;
548 
549 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
550 
551 	if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
552 		goto nxge_grp_dc_remove_exit;
553 
554 	if ((dc->group == NULL) && (dc->next == 0) &&
555 	    (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
556 		goto nxge_grp_dc_remove_exit;
557 	}
558 
559 	group = (nxge_grp_t *)dc->group;
560 
561 	if (isLDOMguest(nxge)) {
562 		(void) nxge_hio_intr_remove(nxge, type, channel);
563 	}
564 
565 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
566 	    "DC remove: group = %d.%d.%d, %cdc %d",
567 	    nxge->mac.portnum, group->sequence, group->count,
568 	    type == VP_BOUND_TX ? 't' : 'r', dc->channel));
569 
570 	MUTEX_ENTER(&nhd->lock);
571 
572 	set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
573 
574 	/* Remove the DC from its group. */
575 	if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
576 		MUTEX_EXIT(&nhd->lock);
577 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
578 		    "nxge_grp_dc_remove(%d) failed", channel));
579 		goto nxge_grp_dc_remove_exit;
580 	}
581 
582 	uninit = dc->uninit;
583 	channel = dc->channel;
584 
585 	NXGE_DC_RESET(set->owned.map, channel);
586 	set->owned.count--;
587 
588 	(void) memset(dc, 0, sizeof (*dc));
589 
590 	MUTEX_EXIT(&nhd->lock);
591 
592 	(*uninit)(nxge, channel);
593 
594 nxge_grp_dc_remove_exit:
595 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
596 }
597 
598 nxge_hio_dc_t *
599 nxge_grp_dc_find(
600 	nxge_t *nxge,
601 	vpc_type_t type,	/* Rx or Tx */
602 	int channel)
603 {
604 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
605 	nxge_hio_dc_t *current;
606 
607 	current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
608 
609 	if (!isLDOMguest(nxge)) {
610 		return (&current[channel]);
611 	} else {
612 		/* We're in a guest domain. */
613 		int i, limit = (type == VP_BOUND_TX) ?
614 		    NXGE_MAX_TDCS : NXGE_MAX_RDCS;
615 
616 		MUTEX_ENTER(&nhd->lock);
617 		for (i = 0; i < limit; i++, current++) {
618 			if (current->channel == channel) {
619 				if (current->vr && current->vr->nxge ==
620 				    (uintptr_t)nxge) {
621 					MUTEX_EXIT(&nhd->lock);
622 					return (current);
623 				}
624 			}
625 		}
626 		MUTEX_EXIT(&nhd->lock);
627 	}
628 
629 	return (0);
630 }
631 
632 /*
633  * nxge_grp_dc_append
634  *
635  *	Append a DMA channel to a group.
636  *
637  * Arguments:
638  * 	nxge
639  * 	group	The group to append to
640  * 	dc	The DMA channel to append
641  *
642  * Notes:
643  *
644  * Context:
645  *	Any domain
646  */
647 static
648 void
649 nxge_grp_dc_append(
650 	nxge_t *nxge,
651 	nxge_grp_t *group,
652 	nxge_hio_dc_t *dc)
653 {
654 	MUTEX_ENTER(&nxge->group_lock);
655 
656 	if (group->dc == 0) {
657 		group->dc = dc;
658 	} else {
659 		nxge_hio_dc_t *current = group->dc;
660 		do {
661 			if (current->next == 0) {
662 				current->next = dc;
663 				break;
664 			}
665 			current = current->next;
666 		} while (current);
667 	}
668 
669 	NXGE_DC_SET(group->map, dc->channel);
670 
671 	nxge_grp_dc_map(group);
672 	group->count++;
673 
674 	MUTEX_EXIT(&nxge->group_lock);
675 }
676 
677 /*
678  * nxge_grp_dc_unlink
679  *
680  *	Unlink a DMA channel fromits linked list (group).
681  *
682  * Arguments:
683  * 	nxge
684  * 	group	The group (linked list) to unlink from
685  * 	dc	The DMA channel to append
686  *
687  * Notes:
688  *
689  * Context:
690  *	Any domain
691  */
692 nxge_hio_dc_t *
693 nxge_grp_dc_unlink(
694 	nxge_t *nxge,
695 	nxge_grp_t *group,
696 	int channel)
697 {
698 	nxge_hio_dc_t *current, *previous;
699 
700 	MUTEX_ENTER(&nxge->group_lock);
701 
702 	if (group == NULL) {
703 		MUTEX_EXIT(&nxge->group_lock);
704 		return (0);
705 	}
706 
707 	if ((current = group->dc) == 0) {
708 		MUTEX_EXIT(&nxge->group_lock);
709 		return (0);
710 	}
711 
712 	previous = 0;
713 	do {
714 		if (current->channel == channel) {
715 			if (previous)
716 				previous->next = current->next;
717 			else
718 				group->dc = current->next;
719 			break;
720 		}
721 		previous = current;
722 		current = current->next;
723 	} while (current);
724 
725 	if (current == 0) {
726 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
727 		    "DC unlink: DC %d not found", channel));
728 	} else {
729 		current->next = 0;
730 		current->group = 0;
731 
732 		NXGE_DC_RESET(group->map, channel);
733 		group->count--;
734 	}
735 
736 	nxge_grp_dc_map(group);
737 
738 	MUTEX_EXIT(&nxge->group_lock);
739 
740 	return (current);
741 }
742 
743 /*
744  * nxge_grp_dc_map
745  *
746  *	Map a linked list to an array of channel numbers.
747  *
748  * Arguments:
749  * 	nxge
750  * 	group	The group to remap.
751  *
752  * Notes:
753  *	It is expected that the caller will hold the correct mutex.
754  *
755  * Context:
756  *	Service domain
757  */
758 void
759 nxge_grp_dc_map(
760 	nxge_grp_t *group)
761 {
762 	nxge_channel_t *legend;
763 	nxge_hio_dc_t *dc;
764 
765 	(void) memset(group->legend, 0, sizeof (group->legend));
766 
767 	legend = group->legend;
768 	dc = group->dc;
769 	while (dc) {
770 		*legend = dc->channel;
771 		legend++;
772 		dc = dc->next;
773 	}
774 }
775 
776 /*
777  * ---------------------------------------------------------------------
778  * These are HIO debugging functions.
779  * ---------------------------------------------------------------------
780  */
781 
782 /*
783  * nxge_delay
784  *
785  *	Delay <seconds> number of seconds.
786  *
787  * Arguments:
788  * 	nxge
789  * 	group	The group to append to
790  * 	dc	The DMA channel to append
791  *
792  * Notes:
793  *	This is a developer-only function.
794  *
795  * Context:
796  *	Any domain
797  */
798 void
799 nxge_delay(
800 	int seconds)
801 {
802 	delay(drv_usectohz(seconds * 1000000));
803 }
804 
805 static dmc_reg_name_t rx_names[] = {
806 	{ "RXDMA_CFIG1",	0 },
807 	{ "RXDMA_CFIG2",	8 },
808 	{ "RBR_CFIG_A",		0x10 },
809 	{ "RBR_CFIG_B",		0x18 },
810 	{ "RBR_KICK",		0x20 },
811 	{ "RBR_STAT",		0x28 },
812 	{ "RBR_HDH",		0x30 },
813 	{ "RBR_HDL",		0x38 },
814 	{ "RCRCFIG_A",		0x40 },
815 	{ "RCRCFIG_B",		0x48 },
816 	{ "RCRSTAT_A",		0x50 },
817 	{ "RCRSTAT_B",		0x58 },
818 	{ "RCRSTAT_C",		0x60 },
819 	{ "RX_DMA_ENT_MSK",	0x68 },
820 	{ "RX_DMA_CTL_STAT",	0x70 },
821 	{ "RCR_FLSH",		0x78 },
822 	{ "RXMISC",		0x90 },
823 	{ "RX_DMA_CTL_STAT_DBG", 0x98 },
824 	{ 0, -1 }
825 };
826 
827 static dmc_reg_name_t tx_names[] = {
828 	{ "Tx_RNG_CFIG",	0 },
829 	{ "Tx_RNG_HDL",		0x10 },
830 	{ "Tx_RNG_KICK",	0x18 },
831 	{ "Tx_ENT_MASK",	0x20 },
832 	{ "Tx_CS",		0x28 },
833 	{ "TxDMA_MBH",		0x30 },
834 	{ "TxDMA_MBL",		0x38 },
835 	{ "TxDMA_PRE_ST",	0x40 },
836 	{ "Tx_RNG_ERR_LOGH",	0x48 },
837 	{ "Tx_RNG_ERR_LOGL",	0x50 },
838 	{ "TDMC_INTR_DBG",	0x60 },
839 	{ "Tx_CS_DBG",		0x68 },
840 	{ 0, -1 }
841 };
842 
843 /*
844  * nxge_xx2str
845  *
846  *	Translate a register address into a string.
847  *
848  * Arguments:
849  * 	offset	The address of the register to translate.
850  *
851  * Notes:
852  *	These are developer-only function.
853  *
854  * Context:
855  *	Any domain
856  */
857 const char *
858 nxge_rx2str(
859 	int offset)
860 {
861 	dmc_reg_name_t *reg = &rx_names[0];
862 
863 	offset &= DMA_CSR_MASK;
864 
865 	while (reg->name) {
866 		if (offset == reg->offset)
867 			return (reg->name);
868 		reg++;
869 	}
870 
871 	return (0);
872 }
873 
874 const char *
875 nxge_tx2str(
876 	int offset)
877 {
878 	dmc_reg_name_t *reg = &tx_names[0];
879 
880 	offset &= DMA_CSR_MASK;
881 
882 	while (reg->name) {
883 		if (offset == reg->offset)
884 			return (reg->name);
885 		reg++;
886 	}
887 
888 	return (0);
889 }
890 
891 /*
892  * nxge_ddi_perror
893  *
894  *	Map a DDI error number to a string.
895  *
896  * Arguments:
897  * 	ddi_error	The DDI error number to map.
898  *
899  * Notes:
900  *
901  * Context:
902  *	Any domain
903  */
904 const char *
905 nxge_ddi_perror(
906 	int ddi_error)
907 {
908 	switch (ddi_error) {
909 	case DDI_SUCCESS:
910 		return ("DDI_SUCCESS");
911 	case DDI_FAILURE:
912 		return ("DDI_FAILURE");
913 	case DDI_NOT_WELL_FORMED:
914 		return ("DDI_NOT_WELL_FORMED");
915 	case DDI_EAGAIN:
916 		return ("DDI_EAGAIN");
917 	case DDI_EINVAL:
918 		return ("DDI_EINVAL");
919 	case DDI_ENOTSUP:
920 		return ("DDI_ENOTSUP");
921 	case DDI_EPENDING:
922 		return ("DDI_EPENDING");
923 	case DDI_ENOMEM:
924 		return ("DDI_ENOMEM");
925 	case DDI_EBUSY:
926 		return ("DDI_EBUSY");
927 	case DDI_ETRANSPORT:
928 		return ("DDI_ETRANSPORT");
929 	case DDI_ECONTEXT:
930 		return ("DDI_ECONTEXT");
931 	default:
932 		return ("Unknown error");
933 	}
934 }
935 
936 /*
937  * ---------------------------------------------------------------------
938  * These are Sun4v HIO function definitions
939  * ---------------------------------------------------------------------
940  */
941 
942 #if defined(sun4v)
943 
944 /*
945  * Local prototypes
946  */
947 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
948 static void nxge_hio_unshare(nxge_hio_vr_t *);
949 
950 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
951 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
952 
953 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
954 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
955 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
956 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
957     mac_ring_type_t, int);
958 
959 /*
960  * nxge_hio_init
961  *
962  *	Initialize the HIO module of the NXGE driver.
963  *
964  * Arguments:
965  * 	nxge
966  *
967  * Notes:
968  *
969  * Context:
970  *	Any domain
971  */
972 int
973 nxge_hio_init(nxge_t *nxge)
974 {
975 	nxge_hio_data_t *nhd;
976 	int i, region;
977 
978 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
979 	if (nhd == 0) {
980 		nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
981 		MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
982 		if (isLDOMguest(nxge))
983 			nhd->type = NXGE_HIO_TYPE_GUEST;
984 		else
985 			nhd->type = NXGE_HIO_TYPE_SERVICE;
986 		nxge->nxge_hw_p->hio = (uintptr_t)nhd;
987 	}
988 
989 	if ((nxge->environs == SOLARIS_DOMAIN) &&
990 	    (nxge->niu_type == N2_NIU)) {
991 		if (nxge->niu_hsvc_available == B_TRUE) {
992 			hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
993 			/*
994 			 * Versions supported now are:
995 			 *  - major number >= 1 (NIU_MAJOR_VER).
996 			 */
997 			if ((niu_hsvc->hsvc_major >= NIU_MAJOR_VER) ||
998 			    (niu_hsvc->hsvc_major == 1 &&
999 			    niu_hsvc->hsvc_minor == 1)) {
1000 				nxge->environs = SOLARIS_SERVICE_DOMAIN;
1001 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1002 				    "nxge_hio_init: hypervisor services "
1003 				    "version %d.%d",
1004 				    niu_hsvc->hsvc_major,
1005 				    niu_hsvc->hsvc_minor));
1006 			}
1007 		}
1008 	}
1009 
1010 	/*
1011 	 * Initialize share and ring group structures.
1012 	 */
1013 	for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1014 		nxge->tx_hio_groups[i].ghandle = NULL;
1015 		nxge->tx_hio_groups[i].nxgep = nxge;
1016 		nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1017 		nxge->tx_hio_groups[i].gindex = 0;
1018 		nxge->tx_hio_groups[i].sindex = 0;
1019 	}
1020 
1021 	for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1022 		nxge->rx_hio_groups[i].ghandle = NULL;
1023 		nxge->rx_hio_groups[i].nxgep = nxge;
1024 		nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1025 		nxge->rx_hio_groups[i].gindex = 0;
1026 		nxge->rx_hio_groups[i].sindex = 0;
1027 		nxge->rx_hio_groups[i].started = B_FALSE;
1028 		nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1029 		nxge->rx_hio_groups[i].rdctbl = -1;
1030 		nxge->rx_hio_groups[i].n_mac_addrs = 0;
1031 	}
1032 
1033 	if (!isLDOMs(nxge)) {
1034 		nhd->hio.ldoms = B_FALSE;
1035 		return (NXGE_OK);
1036 	}
1037 
1038 	nhd->hio.ldoms = B_TRUE;
1039 
1040 	/*
1041 	 * Fill in what we can.
1042 	 */
1043 	for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1044 		nhd->vr[region].region = region;
1045 	}
1046 	nhd->vrs = NXGE_VR_SR_MAX - 2;
1047 
1048 	/*
1049 	 * Initialize the share stuctures.
1050 	 */
1051 	for (i = 0; i < NXGE_MAX_TDCS; i++)
1052 		nxge->tdc_is_shared[i] = B_FALSE;
1053 
1054 	for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1055 		nxge->shares[i].nxgep = nxge;
1056 		nxge->shares[i].index = 0;
1057 		nxge->shares[i].vrp = NULL;
1058 		nxge->shares[i].tmap = 0;
1059 		nxge->shares[i].rmap = 0;
1060 		nxge->shares[i].rxgroup = 0;
1061 		nxge->shares[i].active = B_FALSE;
1062 	}
1063 
1064 	/* Fill in the HV HIO function pointers. */
1065 	nxge_hio_hv_init(nxge);
1066 
1067 	if (isLDOMservice(nxge)) {
1068 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
1069 		    "Hybrid IO-capable service domain"));
1070 		return (NXGE_OK);
1071 	}
1072 
1073 	return (0);
1074 }
1075 #endif /* defined(sun4v) */
1076 
1077 static int
1078 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1079     const uint8_t *macaddr)
1080 {
1081 	int rv;
1082 	nxge_rdc_grp_t *group;
1083 
1084 	mutex_enter(nxge->genlock);
1085 
1086 	/*
1087 	 * Initialize the NXGE RDC table data structure.
1088 	 */
1089 	group = &nxge->pt_config.rdc_grps[g->rdctbl];
1090 	if (!group->flag) {
1091 		group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1092 		group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1093 		group->flag = B_TRUE;	/* This group has been configured. */
1094 	}
1095 
1096 	mutex_exit(nxge->genlock);
1097 
1098 	/*
1099 	 * Add the MAC address.
1100 	 */
1101 	if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1102 	    g->rdctbl, B_TRUE)) != 0) {
1103 		return (rv);
1104 	}
1105 
1106 	mutex_enter(nxge->genlock);
1107 	g->n_mac_addrs++;
1108 	mutex_exit(nxge->genlock);
1109 	return (0);
1110 }
1111 
1112 static int
1113 nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
1114 {
1115 	p_nxge_t		nxgep = (p_nxge_t)arg;
1116 	struct ether_addr	addrp;
1117 
1118 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
1119 	if (nxge_set_mac_addr(nxgep, &addrp)) {
1120 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1121 		    "<== nxge_m_unicst: set unitcast failed"));
1122 		return (EINVAL);
1123 	}
1124 
1125 	nxgep->primary = B_TRUE;
1126 
1127 	return (0);
1128 }
1129 
1130 /*ARGSUSED*/
1131 static int
1132 nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
1133 {
1134 	nxgep->primary = B_FALSE;
1135 	return (0);
1136 }
1137 
1138 static int
1139 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1140 {
1141 	nxge_ring_group_t	*group = (nxge_ring_group_t *)arg;
1142 	p_nxge_t		nxge = group->nxgep;
1143 	int			rv;
1144 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1145 
1146 	ASSERT(group->type == MAC_RING_TYPE_RX);
1147 	ASSERT(group->nxgep != NULL);
1148 
1149 	if (isLDOMguest(group->nxgep))
1150 		return (0);
1151 
1152 	mutex_enter(nxge->genlock);
1153 
1154 	if (!nxge->primary && group->port_default_grp) {
1155 		rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
1156 		mutex_exit(nxge->genlock);
1157 		return (rv);
1158 	}
1159 
1160 	/*
1161 	 * If the group is associated with a VR, then only one
1162 	 * address may be assigned to the group.
1163 	 */
1164 	vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1165 	if ((vr != NULL) && (group->n_mac_addrs)) {
1166 		mutex_exit(nxge->genlock);
1167 		return (ENOSPC);
1168 	}
1169 
1170 	mutex_exit(nxge->genlock);
1171 
1172 	/*
1173 	 * Program the mac address for the group.
1174 	 */
1175 	if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) {
1176 		return (rv);
1177 	}
1178 
1179 	return (0);
1180 }
1181 
1182 static int
1183 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1184 {
1185 	int i;
1186 	for (i = 0; i <= mmac_info->num_mmac; i++) {
1187 		if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1188 		    ETHERADDRL) == 0) {
1189 			return (i);
1190 		}
1191 	}
1192 	return (-1);
1193 }
1194 
1195 /* ARGSUSED */
1196 static int
1197 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1198 {
1199 	nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1200 	struct ether_addr addrp;
1201 	p_nxge_t nxge = group->nxgep;
1202 	nxge_mmac_t *mmac_info;
1203 	int rv, slot;
1204 
1205 	ASSERT(group->type == MAC_RING_TYPE_RX);
1206 	ASSERT(group->nxgep != NULL);
1207 
1208 	if (isLDOMguest(group->nxgep))
1209 		return (0);
1210 
1211 	mutex_enter(nxge->genlock);
1212 
1213 	mmac_info = &nxge->nxge_mmac_info;
1214 	slot = find_mac_slot(mmac_info, mac_addr);
1215 	if (slot < 0) {
1216 		if (group->port_default_grp && nxge->primary) {
1217 			bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
1218 			if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
1219 				rv = nxge_hio_clear_unicst(nxge, mac_addr);
1220 				mutex_exit(nxge->genlock);
1221 				return (rv);
1222 			} else {
1223 				mutex_exit(nxge->genlock);
1224 				return (EINVAL);
1225 			}
1226 		} else {
1227 			mutex_exit(nxge->genlock);
1228 			return (EINVAL);
1229 		}
1230 	}
1231 
1232 	mutex_exit(nxge->genlock);
1233 
1234 	/*
1235 	 * Remove the mac address for the group
1236 	 */
1237 	if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1238 		return (rv);
1239 	}
1240 
1241 	mutex_enter(nxge->genlock);
1242 	group->n_mac_addrs--;
1243 	mutex_exit(nxge->genlock);
1244 
1245 	return (0);
1246 }
1247 
1248 static int
1249 nxge_hio_group_start(mac_group_driver_t gdriver)
1250 {
1251 	nxge_ring_group_t	*group = (nxge_ring_group_t *)gdriver;
1252 	nxge_rdc_grp_t		*rdc_grp_p;
1253 	int			rdctbl;
1254 	int			dev_gindex;
1255 
1256 	ASSERT(group->type == MAC_RING_TYPE_RX);
1257 	ASSERT(group->nxgep != NULL);
1258 
1259 	ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1260 	if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1261 		return (ENXIO);
1262 
1263 	mutex_enter(group->nxgep->genlock);
1264 	if (isLDOMguest(group->nxgep))
1265 		goto nxge_hio_group_start_exit;
1266 
1267 	dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1268 	    group->gindex;
1269 	rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex];
1270 
1271 	/*
1272 	 * Get an rdc table for this group.
1273 	 * Group ID is given by the caller, and that's the group it needs
1274 	 * to bind to.  The default group is already bound when the driver
1275 	 * was attached.
1276 	 *
1277 	 * For Group 0, it's RDC table was allocated at attach time
1278 	 * no need to allocate a new table.
1279 	 */
1280 	if (group->gindex != 0) {
1281 		rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1282 		    dev_gindex, B_TRUE);
1283 		if (rdctbl < 0) {
1284 			mutex_exit(group->nxgep->genlock);
1285 			return (rdctbl);
1286 		}
1287 	} else {
1288 		rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1289 	}
1290 
1291 	group->rdctbl = rdctbl;
1292 
1293 	(void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl);
1294 
1295 nxge_hio_group_start_exit:
1296 	group->started = B_TRUE;
1297 	mutex_exit(group->nxgep->genlock);
1298 	return (0);
1299 }
1300 
1301 static void
1302 nxge_hio_group_stop(mac_group_driver_t gdriver)
1303 {
1304 	nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1305 
1306 	ASSERT(group->type == MAC_RING_TYPE_RX);
1307 
1308 	mutex_enter(group->nxgep->genlock);
1309 	group->started = B_FALSE;
1310 
1311 	if (isLDOMguest(group->nxgep))
1312 		goto nxge_hio_group_stop_exit;
1313 
1314 	/*
1315 	 * Unbind the RDC table previously bound for this group.
1316 	 *
1317 	 * Since RDC table for group 0 was allocated at attach
1318 	 * time, no need to unbind the table here.
1319 	 */
1320 	if (group->gindex != 0)
1321 		(void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1322 
1323 nxge_hio_group_stop_exit:
1324 	mutex_exit(group->nxgep->genlock);
1325 }
1326 
1327 /* ARGSUSED */
1328 void
1329 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1330 	mac_group_info_t *infop, mac_group_handle_t ghdl)
1331 {
1332 	p_nxge_t		nxgep = (p_nxge_t)arg;
1333 	nxge_ring_group_t	*group;
1334 	int			dev_gindex;
1335 
1336 	switch (type) {
1337 	case MAC_RING_TYPE_RX:
1338 		group = &nxgep->rx_hio_groups[groupid];
1339 		group->nxgep = nxgep;
1340 		group->ghandle = ghdl;
1341 		group->gindex = groupid;
1342 		group->sindex = 0;	/* not yet bound to a share */
1343 
1344 		if (!isLDOMguest(nxgep)) {
1345 			dev_gindex =
1346 			    nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1347 			    groupid;
1348 
1349 			if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
1350 			    dev_gindex)
1351 				group->port_default_grp = B_TRUE;
1352 
1353 			infop->mgi_count =
1354 			    nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1355 		} else {
1356 			infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS;
1357 		}
1358 
1359 		infop->mgi_driver = (mac_group_driver_t)group;
1360 		infop->mgi_start = nxge_hio_group_start;
1361 		infop->mgi_stop = nxge_hio_group_stop;
1362 		infop->mgi_addmac = nxge_hio_add_mac;
1363 		infop->mgi_remmac = nxge_hio_rem_mac;
1364 		break;
1365 
1366 	case MAC_RING_TYPE_TX:
1367 		/*
1368 		 * 'groupid' for TX should be incremented by one since
1369 		 * the default group (groupid 0) is not known by the MAC layer
1370 		 */
1371 		group = &nxgep->tx_hio_groups[groupid + 1];
1372 		group->nxgep = nxgep;
1373 		group->ghandle = ghdl;
1374 		group->gindex = groupid + 1;
1375 		group->sindex = 0;	/* not yet bound to a share */
1376 
1377 		infop->mgi_driver = (mac_group_driver_t)group;
1378 		infop->mgi_start = NULL;
1379 		infop->mgi_stop = NULL;
1380 		infop->mgi_addmac = NULL;	/* not needed */
1381 		infop->mgi_remmac = NULL;	/* not needed */
1382 		/* no rings associated with group initially */
1383 		infop->mgi_count = 0;
1384 		break;
1385 	}
1386 }
1387 
1388 #if defined(sun4v)
1389 
1390 int
1391 nxge_hio_share_assign(
1392 	nxge_t *nxge,
1393 	uint64_t cookie,
1394 	res_map_t *tmap,
1395 	res_map_t *rmap,
1396 	nxge_hio_vr_t *vr)
1397 {
1398 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1399 	uint64_t slot, hv_rv;
1400 	nxge_hio_dc_t *dc;
1401 	nxhv_vr_fp_t *fp;
1402 	int i;
1403 	uint64_t major;
1404 
1405 	/*
1406 	 * Ask the Hypervisor to set up the VR for us
1407 	 */
1408 	fp = &nhd->hio.vr;
1409 	major = nxge->niu_hsvc.hsvc_major;
1410 	switch (major) {
1411 	case NIU_MAJOR_VER: /* 1 */
1412 		if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1413 			NXGE_ERROR_MSG((nxge, HIO_CTL,
1414 			    "nxge_hio_share_assign: major %d "
1415 			    "vr->assign() returned %d", major, hv_rv));
1416 			nxge_hio_unshare(vr);
1417 			return (-EIO);
1418 		}
1419 
1420 		break;
1421 
1422 	case NIU_MAJOR_VER_2: /* 2 */
1423 	default:
1424 		if ((hv_rv = (*fp->cfgh_assign)
1425 		    (nxge->niu_cfg_hdl, vr->region, cookie, &vr->cookie))) {
1426 			NXGE_ERROR_MSG((nxge, HIO_CTL,
1427 			    "nxge_hio_share_assign: major %d "
1428 			    "vr->assign() returned %d", major, hv_rv));
1429 			nxge_hio_unshare(vr);
1430 			return (-EIO);
1431 		}
1432 
1433 		break;
1434 	}
1435 
1436 	NXGE_DEBUG_MSG((nxge, HIO_CTL,
1437 	    "nxge_hio_share_assign: major %d "
1438 	    "vr->assign() success", major));
1439 
1440 	/*
1441 	 * For each shared TDC, ask the HV to find us an empty slot.
1442 	 */
1443 	dc = vr->tx_group.dc;
1444 	for (i = 0; i < NXGE_MAX_TDCS; i++) {
1445 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1446 		while (dc) {
1447 			hv_rv = (*tx->assign)
1448 			    (vr->cookie, dc->channel, &slot);
1449 			if (hv_rv != 0) {
1450 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1451 				    "nxge_hio_share_assign: "
1452 				    "tx->assign(%x, %d) failed: %ld",
1453 				    vr->cookie, dc->channel, hv_rv));
1454 				return (-EIO);
1455 			}
1456 
1457 			dc->cookie = vr->cookie;
1458 			dc->page = (vp_channel_t)slot;
1459 
1460 			/* Inform the caller about the slot chosen. */
1461 			(*tmap) |= 1 << slot;
1462 
1463 			dc = dc->next;
1464 		}
1465 	}
1466 
1467 	/*
1468 	 * For each shared RDC, ask the HV to find us an empty slot.
1469 	 */
1470 	dc = vr->rx_group.dc;
1471 	for (i = 0; i < NXGE_MAX_RDCS; i++) {
1472 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1473 		while (dc) {
1474 			hv_rv = (*rx->assign)
1475 			    (vr->cookie, dc->channel, &slot);
1476 			if (hv_rv != 0) {
1477 				NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1478 				    "nxge_hio_share_assign: "
1479 				    "rx->assign(%x, %d) failed: %ld",
1480 				    vr->cookie, dc->channel, hv_rv));
1481 				return (-EIO);
1482 			}
1483 
1484 			dc->cookie = vr->cookie;
1485 			dc->page = (vp_channel_t)slot;
1486 
1487 			/* Inform the caller about the slot chosen. */
1488 			(*rmap) |= 1 << slot;
1489 
1490 			dc = dc->next;
1491 		}
1492 	}
1493 
1494 	return (0);
1495 }
1496 
1497 void
1498 nxge_hio_share_unassign(
1499 	nxge_hio_vr_t *vr)
1500 {
1501 	nxge_t *nxge = (nxge_t *)vr->nxge;
1502 	nxge_hio_data_t *nhd;
1503 	nxge_hio_dc_t *dc;
1504 	nxhv_vr_fp_t *fp;
1505 	uint64_t hv_rv;
1506 
1507 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1508 
1509 	dc = vr->tx_group.dc;
1510 	while (dc) {
1511 		nxhv_dc_fp_t *tx = &nhd->hio.tx;
1512 		hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1513 		if (hv_rv != 0) {
1514 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1515 			    "nxge_hio_share_unassign: "
1516 			    "tx->unassign(%x, %d) failed: %ld",
1517 			    vr->cookie, dc->page, hv_rv));
1518 		}
1519 		dc = dc->next;
1520 	}
1521 
1522 	dc = vr->rx_group.dc;
1523 	while (dc) {
1524 		nxhv_dc_fp_t *rx = &nhd->hio.rx;
1525 		hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1526 		if (hv_rv != 0) {
1527 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1528 			    "nxge_hio_share_unassign: "
1529 			    "rx->unassign(%x, %d) failed: %ld",
1530 			    vr->cookie, dc->page, hv_rv));
1531 		}
1532 		dc = dc->next;
1533 	}
1534 
1535 	fp = &nhd->hio.vr;
1536 	if (fp->unassign) {
1537 		hv_rv = (*fp->unassign)(vr->cookie);
1538 		if (hv_rv != 0) {
1539 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1540 			    "nxge_hio_share_unassign: "
1541 			    "vr->assign(%x) failed: %ld",
1542 			    vr->cookie, hv_rv));
1543 		}
1544 	}
1545 }
1546 
1547 int
1548 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1549 {
1550 	p_nxge_t		nxge = (p_nxge_t)arg;
1551 	nxge_share_handle_t	*shp;
1552 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1553 	nxge_hio_data_t		*nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1554 
1555 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1556 
1557 	if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1558 	    nhd->hio.rx.assign == 0) {
1559 		NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1560 		return (EIO);
1561 	}
1562 
1563 	/*
1564 	 * Get a VR.
1565 	 */
1566 	if ((vr = nxge_hio_vr_share(nxge)) == 0)
1567 		return (EAGAIN);
1568 
1569 	shp = &nxge->shares[vr->region];
1570 	shp->nxgep = nxge;
1571 	shp->index = vr->region;
1572 	shp->vrp = (void *)vr;
1573 	shp->tmap = shp->rmap = 0;	/* to be assigned by ms_sbind */
1574 	shp->rxgroup = 0;		/* to be assigned by ms_sadd */
1575 	shp->active = B_FALSE;		/* not bound yet */
1576 
1577 	*shandle = (mac_share_handle_t)shp;
1578 
1579 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1580 	return (0);
1581 }
1582 
1583 
1584 void
1585 nxge_hio_share_free(mac_share_handle_t shandle)
1586 {
1587 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1588 	nxge_hio_vr_t		*vr;
1589 
1590 	/*
1591 	 * Clear internal handle state.
1592 	 */
1593 	vr = shp->vrp;
1594 	shp->vrp = (void *)NULL;
1595 	shp->index = 0;
1596 	shp->tmap = 0;
1597 	shp->rmap = 0;
1598 	shp->rxgroup = 0;
1599 	shp->active = B_FALSE;
1600 
1601 	/*
1602 	 * Free VR resource.
1603 	 */
1604 	nxge_hio_unshare(vr);
1605 }
1606 
1607 
1608 void
1609 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1610     mac_ring_handle_t *rings, uint_t *n_rings)
1611 {
1612 	nxge_t			*nxge;
1613 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1614 	nxge_ring_handle_t	*rh;
1615 	uint32_t		offset;
1616 
1617 	nxge = shp->nxgep;
1618 
1619 	switch (type) {
1620 	case MAC_RING_TYPE_RX:
1621 		rh = nxge->rx_ring_handles;
1622 		offset = nxge->pt_config.hw_config.start_rdc;
1623 		break;
1624 
1625 	case MAC_RING_TYPE_TX:
1626 		rh = nxge->tx_ring_handles;
1627 		offset = nxge->pt_config.hw_config.tdc.start;
1628 		break;
1629 	}
1630 
1631 	/*
1632 	 * In version 1.0, we may only give a VR 2 RDCs/TDCs.  Not only that,
1633 	 * but the HV has statically assigned the channels like so:
1634 	 * VR0: RDC0 & RDC1
1635 	 * VR1: RDC2 & RDC3, etc.
1636 	 * The TDCs are assigned in exactly the same way.
1637 	 */
1638 	if (rings != NULL) {
1639 		rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1640 		rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1641 	}
1642 	if (n_rings != NULL) {
1643 		*n_rings = 2;
1644 	}
1645 }
1646 
1647 int
1648 nxge_hio_share_add_group(mac_share_handle_t shandle,
1649     mac_group_driver_t ghandle)
1650 {
1651 	nxge_t			*nxge;
1652 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1653 	nxge_ring_group_t	*rg = (nxge_ring_group_t *)ghandle;
1654 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1655 	nxge_grp_t		*group;
1656 	int			i;
1657 
1658 	if (rg->sindex != 0) {
1659 		/* the group is already bound to a share */
1660 		return (EALREADY);
1661 	}
1662 
1663 	/*
1664 	 * If we are adding a group 0 to a share, this
1665 	 * is not correct.
1666 	 */
1667 	ASSERT(rg->gindex != 0);
1668 
1669 	nxge = rg->nxgep;
1670 	vr = shp->vrp;
1671 
1672 	switch (rg->type) {
1673 	case MAC_RING_TYPE_RX:
1674 		/*
1675 		 * Make sure that the group has the right rings associated
1676 		 * for the share. In version 1.0, we may only give a VR
1677 		 * 2 RDCs.  Not only that, but the HV has statically
1678 		 * assigned the channels like so:
1679 		 * VR0: RDC0 & RDC1
1680 		 * VR1: RDC2 & RDC3, etc.
1681 		 */
1682 		group = nxge->rx_set.group[rg->gindex];
1683 
1684 		if (group->count > 2) {
1685 			/* a share can have at most 2 rings */
1686 			return (EINVAL);
1687 		}
1688 
1689 		for (i = 0; i < NXGE_MAX_RDCS; i++) {
1690 			if (group->map & (1 << i)) {
1691 				if ((i != shp->index * 2) &&
1692 				    (i != (shp->index * 2 + 1))) {
1693 					/*
1694 					 * A group with invalid rings was
1695 					 * attempted to bind to this share
1696 					 */
1697 					return (EINVAL);
1698 				}
1699 			}
1700 		}
1701 
1702 		rg->sindex = vr->region;
1703 		vr->rdc_tbl = rg->rdctbl;
1704 		shp->rxgroup = vr->rdc_tbl;
1705 		break;
1706 
1707 	case MAC_RING_TYPE_TX:
1708 		/*
1709 		 * Make sure that the group has the right rings associated
1710 		 * for the share. In version 1.0, we may only give a VR
1711 		 * 2 TDCs.  Not only that, but the HV has statically
1712 		 * assigned the channels like so:
1713 		 * VR0: TDC0 & TDC1
1714 		 * VR1: TDC2 & TDC3, etc.
1715 		 */
1716 		group = nxge->tx_set.group[rg->gindex];
1717 
1718 		if (group->count > 2) {
1719 			/* a share can have at most 2 rings */
1720 			return (EINVAL);
1721 		}
1722 
1723 		for (i = 0; i < NXGE_MAX_TDCS; i++) {
1724 			if (group->map & (1 << i)) {
1725 				if ((i != shp->index * 2) &&
1726 				    (i != (shp->index * 2 + 1))) {
1727 					/*
1728 					 * A group with invalid rings was
1729 					 * attempted to bind to this share
1730 					 */
1731 					return (EINVAL);
1732 				}
1733 			}
1734 		}
1735 
1736 		vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1737 		    rg->gindex;
1738 		rg->sindex = vr->region;
1739 		break;
1740 	}
1741 	return (0);
1742 }
1743 
1744 int
1745 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1746     mac_group_driver_t ghandle)
1747 {
1748 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1749 	nxge_ring_group_t	*group = (nxge_ring_group_t *)ghandle;
1750 	nxge_hio_vr_t		*vr;	/* The Virtualization Region */
1751 	int			rv = 0;
1752 
1753 	vr = shp->vrp;
1754 
1755 	switch (group->type) {
1756 	case MAC_RING_TYPE_RX:
1757 		group->sindex = 0;
1758 		vr->rdc_tbl = 0;
1759 		shp->rxgroup = 0;
1760 		break;
1761 
1762 	case MAC_RING_TYPE_TX:
1763 		group->sindex = 0;
1764 		vr->tdc_tbl = 0;
1765 		break;
1766 	}
1767 
1768 	return (rv);
1769 }
1770 
1771 int
1772 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1773     uint64_t *rcookie)
1774 {
1775 	nxge_t			*nxge;
1776 	nxge_share_handle_t	*shp = (nxge_share_handle_t *)shandle;
1777 	nxge_hio_vr_t		*vr;
1778 	uint64_t		rmap, tmap, hv_rmap, hv_tmap;
1779 	int			rv;
1780 
1781 	nxge = shp->nxgep;
1782 	vr = (nxge_hio_vr_t *)shp->vrp;
1783 
1784 	/*
1785 	 * Add resources to the share.
1786 	 * For each DMA channel associated with the VR, bind its resources
1787 	 * to the VR.
1788 	 */
1789 	tmap = 0;
1790 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1791 	if (rv != 0) {
1792 		return (rv);
1793 	}
1794 
1795 	rmap = 0;
1796 	rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1797 	if (rv != 0) {
1798 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1799 		return (rv);
1800 	}
1801 
1802 	/*
1803 	 * Ask the Hypervisor to set up the VR and allocate slots for
1804 	 * each rings associated with the VR.
1805 	 */
1806 	hv_tmap = hv_rmap = 0;
1807 	if ((rv = nxge_hio_share_assign(nxge, cookie,
1808 	    &hv_tmap, &hv_rmap, vr))) {
1809 		nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1810 		nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1811 		return (rv);
1812 	}
1813 
1814 	shp->active = B_TRUE;
1815 	shp->tmap = hv_tmap;
1816 	shp->rmap = hv_rmap;
1817 
1818 	/* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1819 	*rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1820 
1821 	return (0);
1822 }
1823 
1824 void
1825 nxge_hio_share_unbind(mac_share_handle_t shandle)
1826 {
1827 	nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1828 
1829 	/*
1830 	 * First, unassign the VR (take it back),
1831 	 * so we can enable interrupts again.
1832 	 */
1833 	nxge_hio_share_unassign(shp->vrp);
1834 
1835 	/*
1836 	 * Free Ring Resources for TX and RX
1837 	 */
1838 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1839 	nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1840 }
1841 
1842 
1843 /*
1844  * nxge_hio_vr_share
1845  *
1846  *	Find an unused Virtualization Region (VR).
1847  *
1848  * Arguments:
1849  * 	nxge
1850  *
1851  * Notes:
1852  *
1853  * Context:
1854  *	Service domain
1855  */
1856 nxge_hio_vr_t *
1857 nxge_hio_vr_share(
1858 	nxge_t *nxge)
1859 {
1860 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1861 	nxge_hio_vr_t *vr;
1862 
1863 	int first, limit, region;
1864 
1865 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1866 
1867 	MUTEX_ENTER(&nhd->lock);
1868 
1869 	if (nhd->vrs == 0) {
1870 		MUTEX_EXIT(&nhd->lock);
1871 		return (0);
1872 	}
1873 
1874 	/* Find an empty virtual region (VR). */
1875 	if (nxge->function_num == 0) {
1876 		// FUNC0_VIR0 'belongs' to NIU port 0.
1877 		first = FUNC0_VIR1;
1878 		limit = FUNC2_VIR0;
1879 	} else if (nxge->function_num == 1) {
1880 		// FUNC2_VIR0 'belongs' to NIU port 1.
1881 		first = FUNC2_VIR1;
1882 		limit = FUNC_VIR_MAX;
1883 	} else {
1884 		cmn_err(CE_WARN,
1885 		    "Shares not supported on function(%d) at this time.\n",
1886 		    nxge->function_num);
1887 	}
1888 
1889 	for (region = first; region < limit; region++) {
1890 		if (nhd->vr[region].nxge == 0)
1891 			break;
1892 	}
1893 
1894 	if (region == limit) {
1895 		MUTEX_EXIT(&nhd->lock);
1896 		return (0);
1897 	}
1898 
1899 	vr = &nhd->vr[region];
1900 	vr->nxge = (uintptr_t)nxge;
1901 	vr->region = (uintptr_t)region;
1902 
1903 	nhd->vrs--;
1904 
1905 	MUTEX_EXIT(&nhd->lock);
1906 
1907 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1908 
1909 	return (vr);
1910 }
1911 
1912 void
1913 nxge_hio_unshare(
1914 	nxge_hio_vr_t *vr)
1915 {
1916 	nxge_t *nxge = (nxge_t *)vr->nxge;
1917 	nxge_hio_data_t *nhd;
1918 
1919 	vr_region_t region;
1920 
1921 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1922 
1923 	if (!nxge) {
1924 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1925 		    "vr->nxge is NULL"));
1926 		return;
1927 	}
1928 
1929 	/*
1930 	 * This function is no longer called, but I will keep it
1931 	 * here in case we want to revisit this topic in the future.
1932 	 *
1933 	 * nxge_hio_hostinfo_uninit(nxge, vr);
1934 	 */
1935 
1936 	/*
1937 	 * XXX: This is done by ms_sremove?
1938 	 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1939 	 */
1940 
1941 	nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1942 
1943 	MUTEX_ENTER(&nhd->lock);
1944 
1945 	region = vr->region;
1946 	(void) memset(vr, 0, sizeof (*vr));
1947 	vr->region = region;
1948 
1949 	nhd->vrs++;
1950 
1951 	MUTEX_EXIT(&nhd->lock);
1952 
1953 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1954 }
1955 
1956 int
1957 nxge_hio_addres(nxge_hio_vr_t *vr, mac_ring_type_t type, uint64_t *map)
1958 {
1959 	nxge_t		*nxge = (nxge_t *)vr->nxge;
1960 	nxge_grp_t	*group;
1961 	int		groupid;
1962 	int		i, rv = 0;
1963 	int		max_dcs;
1964 
1965 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1966 
1967 	if (!nxge)
1968 		return (EINVAL);
1969 
1970 	/*
1971 	 * For each ring associated with the group, add the resources
1972 	 * to the group and bind.
1973 	 */
1974 	max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1975 	if (type == MAC_RING_TYPE_TX) {
1976 		/* set->group is an array of group indexed by a port group id */
1977 		groupid = vr->tdc_tbl -
1978 		    nxge->pt_config.hw_config.def_mac_txdma_grpid;
1979 		group = nxge->tx_set.group[groupid];
1980 	} else {
1981 		/* set->group is an array of group indexed by a port group id */
1982 		groupid = vr->rdc_tbl -
1983 		    nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1984 		group = nxge->rx_set.group[groupid];
1985 	}
1986 
1987 	if (group->map == 0) {
1988 		NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
1989 		    "with this VR"));
1990 		return (EINVAL);
1991 	}
1992 
1993 	for (i = 0; i < max_dcs; i++) {
1994 		if (group->map & (1 << i)) {
1995 			if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
1996 				if (*map == 0) /* Couldn't get even one DC. */
1997 					return (-rv);
1998 				else
1999 					break;
2000 			}
2001 			*map |= (1 << i);
2002 		}
2003 	}
2004 
2005 	if ((*map == 0) || (rv != 0)) {
2006 		NXGE_DEBUG_MSG((nxge, HIO_CTL,
2007 		    "<== nxge_hio_addres: rv(%x)", rv));
2008 		return (EIO);
2009 	}
2010 
2011 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
2012 	return (0);
2013 }
2014 
2015 /* ARGSUSED */
2016 void
2017 nxge_hio_remres(
2018 	nxge_hio_vr_t *vr,
2019 	mac_ring_type_t type,
2020 	res_map_t res_map)
2021 {
2022 	nxge_t *nxge = (nxge_t *)vr->nxge;
2023 	nxge_grp_t *group;
2024 
2025 	if (!nxge) {
2026 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2027 		    "vr->nxge is NULL"));
2028 		return;
2029 	}
2030 
2031 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
2032 
2033 	/*
2034 	 * For each ring bound to the group, remove the DMA resources
2035 	 * from the group and unbind.
2036 	 */
2037 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2038 	while (group->dc) {
2039 		nxge_hio_dc_t *dc = group->dc;
2040 		NXGE_DC_RESET(res_map, dc->page);
2041 		nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
2042 	}
2043 
2044 	if (res_map) {
2045 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2046 		    "res_map %lx", res_map));
2047 	}
2048 
2049 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
2050 }
2051 
2052 /*
2053  * nxge_hio_tdc_share
2054  *
2055  *	Share an unused TDC channel.
2056  *
2057  * Arguments:
2058  * 	nxge
2059  *
2060  * Notes:
2061  *
2062  * A.7.3 Reconfigure Tx DMA channel
2063  *	Disable TxDMA			A.9.6.10
2064  *     [Rebind TxDMA channel to Port	A.9.6.7]
2065  *
2066  * We don't have to Rebind the TDC to the port - it always already bound.
2067  *
2068  *	Soft Reset TxDMA		A.9.6.2
2069  *
2070  * This procedure will be executed by nxge_init_txdma_channel() in the
2071  * guest domain:
2072  *
2073  *	Re-initialize TxDMA		A.9.6.8
2074  *	Reconfigure TxDMA
2075  *	Enable TxDMA			A.9.6.9
2076  *
2077  * Context:
2078  *	Service domain
2079  */
2080 int
2081 nxge_hio_tdc_share(
2082 	nxge_t *nxge,
2083 	int channel)
2084 {
2085 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2086 	nxge_grp_set_t *set = &nxge->tx_set;
2087 	tx_ring_t *ring;
2088 	int count;
2089 
2090 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2091 
2092 	/*
2093 	 * Wait until this channel is idle.
2094 	 */
2095 	ring = nxge->tx_rings->rings[channel];
2096 	ASSERT(ring != NULL);
2097 
2098 	(void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2099 	if (ring->tx_ring_busy) {
2100 		/*
2101 		 * Wait for 30 seconds.
2102 		 */
2103 		for (count = 30 * 1000; count; count--) {
2104 			if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2105 				break;
2106 			}
2107 
2108 			drv_usecwait(1000);
2109 		}
2110 
2111 		if (count == 0) {
2112 			(void) atomic_swap_32(&ring->tx_ring_offline,
2113 			    NXGE_TX_RING_ONLINE);
2114 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2115 			    "nxge_hio_tdc_share: "
2116 			    "Tx ring %d was always BUSY", channel));
2117 			return (-EIO);
2118 		}
2119 	} else {
2120 		(void) atomic_swap_32(&ring->tx_ring_offline,
2121 		    NXGE_TX_RING_OFFLINED);
2122 	}
2123 
2124 	MUTEX_ENTER(&nhd->lock);
2125 	nxge->tdc_is_shared[channel] = B_TRUE;
2126 	MUTEX_EXIT(&nhd->lock);
2127 
2128 	if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2129 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2130 		    "Failed to remove interrupt for TxDMA channel %d",
2131 		    channel));
2132 		return (-EINVAL);
2133 	}
2134 
2135 	/* Disable TxDMA A.9.6.10 */
2136 	(void) nxge_txdma_channel_disable(nxge, channel);
2137 
2138 	/* The SD is sharing this channel. */
2139 	NXGE_DC_SET(set->shared.map, channel);
2140 	set->shared.count++;
2141 
2142 	/* Soft Reset TxDMA A.9.6.2 */
2143 	nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2144 
2145 	/*
2146 	 * Initialize the DC-specific FZC control registers.
2147 	 * -----------------------------------------------------
2148 	 */
2149 	if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2150 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2151 		    "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2152 		return (-EIO);
2153 	}
2154 
2155 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2156 
2157 	return (0);
2158 }
2159 
2160 /*
2161  * nxge_hio_rdc_share
2162  *
2163  *	Share an unused RDC channel.
2164  *
2165  * Arguments:
2166  * 	nxge
2167  *
2168  * Notes:
2169  *
2170  * This is the latest version of the procedure to
2171  * Reconfigure an Rx DMA channel:
2172  *
2173  * A.6.3 Reconfigure Rx DMA channel
2174  *	Stop RxMAC		A.9.2.6
2175  *	Drain IPP Port		A.9.3.6
2176  *	Stop and reset RxDMA	A.9.5.3
2177  *
2178  * This procedure will be executed by nxge_init_rxdma_channel() in the
2179  * guest domain:
2180  *
2181  *	Initialize RxDMA	A.9.5.4
2182  *	Reconfigure RxDMA
2183  *	Enable RxDMA		A.9.5.5
2184  *
2185  * We will do this here, since the RDC is a canalis non grata:
2186  *	Enable RxMAC		A.9.2.10
2187  *
2188  * Context:
2189  *	Service domain
2190  */
2191 int
2192 nxge_hio_rdc_share(
2193 	nxge_t *nxge,
2194 	nxge_hio_vr_t *vr,
2195 	int channel)
2196 {
2197 	nxge_grp_set_t *set = &nxge->rx_set;
2198 	nxge_rdc_grp_t *rdc_grp;
2199 
2200 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2201 
2202 	/* Disable interrupts. */
2203 	if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2204 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2205 		    "Failed to remove interrupt for RxDMA channel %d",
2206 		    channel));
2207 		return (NXGE_ERROR);
2208 	}
2209 
2210 	/* Stop RxMAC = A.9.2.6 */
2211 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2212 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2213 		    "Failed to disable RxMAC"));
2214 	}
2215 
2216 	/* Drain IPP Port = A.9.3.6 */
2217 	(void) nxge_ipp_drain(nxge);
2218 
2219 	/* Stop and reset RxDMA = A.9.5.3 */
2220 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2221 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2222 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2223 		    "Failed to disable RxDMA channel %d", channel));
2224 	}
2225 
2226 	/* The SD is sharing this channel. */
2227 	NXGE_DC_SET(set->shared.map, channel);
2228 	set->shared.count++;
2229 
2230 	// Assert RST: RXDMA_CFIG1[30] = 1
2231 	nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2232 
2233 	/*
2234 	 * The guest domain will reconfigure the RDC later.
2235 	 *
2236 	 * But in the meantime, we must re-enable the Rx MAC so
2237 	 * that we can start receiving packets again on the
2238 	 * remaining RDCs:
2239 	 *
2240 	 * Enable RxMAC = A.9.2.10
2241 	 */
2242 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2243 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2244 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2245 	}
2246 
2247 	/*
2248 	 * Initialize the DC-specific FZC control registers.
2249 	 * -----------------------------------------------------
2250 	 */
2251 	if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2252 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2253 		    "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2254 		return (-EIO);
2255 	}
2256 
2257 	/*
2258 	 * Update the RDC group.
2259 	 */
2260 	rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2261 	NXGE_DC_SET(rdc_grp->map, channel);
2262 
2263 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2264 
2265 	return (0);
2266 }
2267 
2268 /*
2269  * nxge_hio_dc_share
2270  *
2271  *	Share a DMA channel with a guest domain.
2272  *
2273  * Arguments:
2274  * 	nxge
2275  * 	vr	The VR that <channel> will belong to.
2276  * 	type	Tx or Rx.
2277  * 	channel	Channel to share
2278  *
2279  * Notes:
2280  *
2281  * Context:
2282  *	Service domain
2283  */
2284 int
2285 nxge_hio_dc_share(
2286 	nxge_t *nxge,
2287 	nxge_hio_vr_t *vr,
2288 	mac_ring_type_t type,
2289 	int channel)
2290 {
2291 	nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2292 	nxge_hio_dc_t *dc;
2293 	nxge_grp_t *group;
2294 	int slot;
2295 
2296 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2297 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2298 
2299 
2300 	/* -------------------------------------------------- */
2301 	slot = (type == MAC_RING_TYPE_TX) ?
2302 	    nxge_hio_tdc_share(nxge, channel) :
2303 	    nxge_hio_rdc_share(nxge, vr, channel);
2304 
2305 	if (slot < 0) {
2306 		if (type == MAC_RING_TYPE_RX) {
2307 			nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2308 		} else {
2309 			nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2310 		}
2311 		return (slot);
2312 	}
2313 
2314 	MUTEX_ENTER(&nhd->lock);
2315 
2316 	/*
2317 	 * Tag this channel.
2318 	 * --------------------------------------------------
2319 	 */
2320 	dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2321 
2322 	dc->vr = vr;
2323 	dc->channel = (nxge_channel_t)channel;
2324 
2325 	MUTEX_EXIT(&nhd->lock);
2326 
2327 	/*
2328 	 * vr->[t|r]x_group is used by the service domain to
2329 	 * keep track of its shared DMA channels.
2330 	 */
2331 	MUTEX_ENTER(&nxge->group_lock);
2332 	group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2333 
2334 	dc->group = group;
2335 	/* Initialize <group>, if necessary */
2336 	if (group->count == 0) {
2337 		group->nxge = nxge;
2338 		group->type = (type == MAC_RING_TYPE_TX) ?
2339 		    VP_BOUND_TX : VP_BOUND_RX;
2340 		group->sequence	= nhd->sequence++;
2341 		group->active = B_TRUE;
2342 	}
2343 
2344 	MUTEX_EXIT(&nxge->group_lock);
2345 
2346 	NXGE_ERROR_MSG((nxge, HIO_CTL,
2347 	    "DC share: %cDC %d was assigned to slot %d",
2348 	    type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2349 
2350 	nxge_grp_dc_append(nxge, group, dc);
2351 
2352 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2353 
2354 	return (0);
2355 }
2356 
2357 /*
2358  * nxge_hio_tdc_unshare
2359  *
2360  *	Unshare a TDC.
2361  *
2362  * Arguments:
2363  * 	nxge
2364  * 	channel	The channel to unshare (add again).
2365  *
2366  * Notes:
2367  *
2368  * Context:
2369  *	Service domain
2370  */
2371 void
2372 nxge_hio_tdc_unshare(
2373 	nxge_t *nxge,
2374 	int dev_grpid,
2375 	int channel)
2376 {
2377 	nxge_grp_set_t *set = &nxge->tx_set;
2378 	nxge_grp_t *group;
2379 	int grpid;
2380 
2381 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2382 
2383 	NXGE_DC_RESET(set->shared.map, channel);
2384 	set->shared.count--;
2385 
2386 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2387 	group = set->group[grpid];
2388 
2389 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2390 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2391 		    "Failed to initialize TxDMA channel %d", channel));
2392 		return;
2393 	}
2394 
2395 	/* Re-add this interrupt. */
2396 	if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2397 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2398 		    "Failed to add interrupt for TxDMA channel %d", channel));
2399 	}
2400 
2401 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2402 }
2403 
2404 /*
2405  * nxge_hio_rdc_unshare
2406  *
2407  *	Unshare an RDC: add it to the SD's RDC groups (tables).
2408  *
2409  * Arguments:
2410  * 	nxge
2411  * 	channel	The channel to unshare (add again).
2412  *
2413  * Notes:
2414  *
2415  * Context:
2416  *	Service domain
2417  */
2418 void
2419 nxge_hio_rdc_unshare(
2420 	nxge_t *nxge,
2421 	int dev_grpid,
2422 	int channel)
2423 {
2424 	nxge_grp_set_t		*set = &nxge->rx_set;
2425 	nxge_grp_t		*group;
2426 	int			grpid;
2427 
2428 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2429 
2430 	/* Stop RxMAC = A.9.2.6 */
2431 	if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2432 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2433 		    "Failed to disable RxMAC"));
2434 	}
2435 
2436 	/* Drain IPP Port = A.9.3.6 */
2437 	(void) nxge_ipp_drain(nxge);
2438 
2439 	/* Stop and reset RxDMA = A.9.5.3 */
2440 	// De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2441 	if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2442 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2443 		    "Failed to disable RxDMA channel %d", channel));
2444 	}
2445 
2446 	NXGE_DC_RESET(set->shared.map, channel);
2447 	set->shared.count--;
2448 
2449 	grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2450 	group = set->group[grpid];
2451 
2452 	/*
2453 	 * Assert RST: RXDMA_CFIG1[30] = 1
2454 	 *
2455 	 * Initialize RxDMA	A.9.5.4
2456 	 * Reconfigure RxDMA
2457 	 * Enable RxDMA		A.9.5.5
2458 	 */
2459 	if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2460 		/* Be sure to re-enable the RX MAC. */
2461 		if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2462 			NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2463 			    "nxge_hio_rdc_share: Rx MAC still disabled"));
2464 		}
2465 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2466 		    "Failed to initialize RxDMA channel %d", channel));
2467 		return;
2468 	}
2469 
2470 	/*
2471 	 * Enable RxMAC = A.9.2.10
2472 	 */
2473 	if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2474 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2475 		    "nxge_hio_rdc_share: Rx MAC still disabled"));
2476 		return;
2477 	}
2478 
2479 	/* Re-add this interrupt. */
2480 	if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2481 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2482 		    "nxge_hio_rdc_unshare: Failed to add interrupt for "
2483 		    "RxDMA CHANNEL %d", channel));
2484 	}
2485 
2486 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2487 }
2488 
2489 /*
2490  * nxge_hio_dc_unshare
2491  *
2492  *	Unshare (reuse) a DMA channel.
2493  *
2494  * Arguments:
2495  * 	nxge
2496  * 	vr	The VR that <channel> belongs to.
2497  * 	type	Tx or Rx.
2498  * 	channel	The DMA channel to reuse.
2499  *
2500  * Notes:
2501  *
2502  * Context:
2503  *	Service domain
2504  */
2505 void
2506 nxge_hio_dc_unshare(
2507 	nxge_t *nxge,
2508 	nxge_hio_vr_t *vr,
2509 	mac_ring_type_t type,
2510 	int channel)
2511 {
2512 	nxge_grp_t *group;
2513 	nxge_hio_dc_t *dc;
2514 
2515 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2516 	    type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2517 
2518 	/* Unlink the channel from its group. */
2519 	/* -------------------------------------------------- */
2520 	group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2521 	NXGE_DC_RESET(group->map, channel);
2522 	if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2523 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2524 		    "nxge_hio_dc_unshare(%d) failed", channel));
2525 		return;
2526 	}
2527 
2528 	dc->vr = 0;
2529 	dc->cookie = 0;
2530 
2531 	if (type == MAC_RING_TYPE_RX) {
2532 		nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2533 	} else {
2534 		nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2535 	}
2536 
2537 	NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2538 }
2539 
2540 
2541 /*
2542  * nxge_hio_rxdma_bind_intr():
2543  *
2544  *	For the guest domain driver, need to bind the interrupt group
2545  *	and state to the rx_rcr_ring_t.
2546  */
2547 
2548 int
2549 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2550 {
2551 	nxge_hio_dc_t	*dc;
2552 	nxge_ldgv_t	*control;
2553 	nxge_ldg_t	*group;
2554 	nxge_ldv_t	*device;
2555 
2556 	/*
2557 	 * Find the DMA channel.
2558 	 */
2559 	if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2560 		return (NXGE_ERROR);
2561 	}
2562 
2563 	/*
2564 	 * Get the control structure.
2565 	 */
2566 	control = nxge->ldgvp;
2567 	if (control == NULL) {
2568 		return (NXGE_ERROR);
2569 	}
2570 
2571 	group = &control->ldgp[dc->ldg.vector];
2572 	device = &control->ldvp[dc->ldg.ldsv];
2573 
2574 	MUTEX_ENTER(&ring->lock);
2575 	ring->ldgp = group;
2576 	ring->ldvp = device;
2577 	MUTEX_EXIT(&ring->lock);
2578 
2579 	return (NXGE_OK);
2580 }
2581 #endif	/* if defined(sun4v) */
2582