1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * nxge_hio.c
29 *
30 * This file manages the virtualization resources for Neptune
31 * devices. That is, it implements a hybrid I/O (HIO) approach in the
32 * Solaris kernel, whereby a guest domain on an LDOMs server may
33 * request & use hardware resources from the service domain.
34 *
35 */
36
37 #include <sys/mac_provider.h>
38 #include <sys/nxge/nxge_impl.h>
39 #include <sys/nxge/nxge_fzc.h>
40 #include <sys/nxge/nxge_rxdma.h>
41 #include <sys/nxge/nxge_txdma.h>
42 #include <sys/nxge/nxge_hio.h>
43
44 /*
45 * External prototypes
46 */
47 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
48
49 /* The following function may be found in nxge_main.c */
50 extern int nxge_m_mmac_remove(void *arg, int slot);
51 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
52 boolean_t usetbl);
53 extern int nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num);
54
55 /* The following function may be found in nxge_[t|r]xdma.c */
56 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
57 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
58
59 /*
60 * Local prototypes
61 */
62 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
63 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
64 static void nxge_grp_dc_map(nxge_grp_t *group);
65
66 /*
67 * These functions are used by both service & guest domains to
68 * decide whether they're running in an LDOMs/XEN environment
69 * or not. If so, then the Hybrid I/O (HIO) module is initialized.
70 */
71
72 /*
73 * nxge_get_environs
74 *
75 * Figure out if we are in a guest domain or not.
76 *
77 * Arguments:
78 * nxge
79 *
80 * Notes:
81 *
82 * Context:
83 * Any domain
84 */
85 void
nxge_get_environs(nxge_t * nxge)86 nxge_get_environs(
87 nxge_t *nxge)
88 {
89 char *string;
90
91 /*
92 * In the beginning, assume that we are running sans LDOMs/XEN.
93 */
94 nxge->environs = SOLARIS_DOMAIN;
95
96 /*
97 * Are we a hybrid I/O (HIO) guest domain driver?
98 */
99 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
100 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
101 "niutype", &string)) == DDI_PROP_SUCCESS) {
102 if (strcmp(string, "n2niu") == 0) {
103 nxge->environs = SOLARIS_GUEST_DOMAIN;
104 /* So we can allocate properly-aligned memory. */
105 nxge->niu_type = N2_NIU;
106 NXGE_DEBUG_MSG((nxge, HIO_CTL,
107 "Hybrid IO-capable guest domain"));
108 }
109 ddi_prop_free(string);
110 }
111 }
112
113 #if !defined(sun4v)
114
115 /*
116 * nxge_hio_init
117 *
118 * Initialize the HIO module of the NXGE driver.
119 *
120 * Arguments:
121 * nxge
122 *
123 * Notes:
124 * This is the non-hybrid I/O version of this function.
125 *
126 * Context:
127 * Any domain
128 */
129 int
nxge_hio_init(nxge_t * nxge)130 nxge_hio_init(nxge_t *nxge)
131 {
132 nxge_hio_data_t *nhd;
133 int i;
134
135 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
136 if (nhd == NULL) {
137 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
138 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
139 nhd->type = NXGE_HIO_TYPE_SERVICE;
140 nxge->nxge_hw_p->hio = (uintptr_t)nhd;
141 }
142
143 /*
144 * Initialize share and ring group structures.
145 */
146 for (i = 0; i < NXGE_MAX_TDCS; i++)
147 nxge->tdc_is_shared[i] = B_FALSE;
148
149 for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
150 nxge->tx_hio_groups[i].ghandle = NULL;
151 nxge->tx_hio_groups[i].nxgep = nxge;
152 nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
153 nxge->tx_hio_groups[i].gindex = 0;
154 nxge->tx_hio_groups[i].sindex = 0;
155 }
156
157 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
158 nxge->rx_hio_groups[i].ghandle = NULL;
159 nxge->rx_hio_groups[i].nxgep = nxge;
160 nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
161 nxge->rx_hio_groups[i].gindex = 0;
162 nxge->rx_hio_groups[i].sindex = 0;
163 nxge->rx_hio_groups[i].started = B_FALSE;
164 nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
165 nxge->rx_hio_groups[i].rdctbl = -1;
166 nxge->rx_hio_groups[i].n_mac_addrs = 0;
167 }
168
169 nhd->hio.ldoms = B_FALSE;
170
171 return (NXGE_OK);
172 }
173
174 #endif
175
176 void
nxge_hio_uninit(nxge_t * nxge)177 nxge_hio_uninit(nxge_t *nxge)
178 {
179 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
180
181 ASSERT(nxge->nxge_hw_p->ndevs == 0);
182
183 if (nhd != NULL) {
184 MUTEX_DESTROY(&nhd->lock);
185 KMEM_FREE(nhd, sizeof (*nhd));
186 nxge->nxge_hw_p->hio = 0;
187 }
188 }
189
190 /*
191 * nxge_dci_map
192 *
193 * Map a DMA channel index to a channel number.
194 *
195 * Arguments:
196 * instance The instance number of the driver.
197 * type The type of channel this is: Tx or Rx.
198 * index The index to convert to a channel number
199 *
200 * Notes:
201 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
202 *
203 * Context:
204 * Any domain
205 */
206 int
nxge_dci_map(nxge_t * nxge,vpc_type_t type,int index)207 nxge_dci_map(nxge_t *nxge, vpc_type_t type, int index)
208 {
209 nxge_grp_set_t *set;
210 int dc;
211
212 switch (type) {
213 case VP_BOUND_TX:
214 set = &nxge->tx_set;
215 break;
216 case VP_BOUND_RX:
217 set = &nxge->rx_set;
218 break;
219 default:
220 return (-1);
221 }
222
223 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
224 if ((1 << dc) & set->owned.map) {
225 if (index == 0)
226 return (dc);
227 else
228 index--;
229 }
230 }
231
232 return (-1);
233 }
234
235 /*
236 * ---------------------------------------------------------------------
237 * These are the general-purpose DMA channel group functions. That is,
238 * these functions are used to manage groups of TDCs or RDCs in an HIO
239 * environment.
240 *
241 * But is also expected that in the future they will be able to manage
242 * Crossbow groups.
243 * ---------------------------------------------------------------------
244 */
245
246 /*
247 * nxge_grp_cleanup(p_nxge_t nxge)
248 *
249 * Remove all outstanding groups.
250 *
251 * Arguments:
252 * nxge
253 */
254 void
nxge_grp_cleanup(p_nxge_t nxge)255 nxge_grp_cleanup(p_nxge_t nxge)
256 {
257 nxge_grp_set_t *set;
258 int i;
259
260 MUTEX_ENTER(&nxge->group_lock);
261
262 /*
263 * Find RX groups that need to be cleaned up.
264 */
265 set = &nxge->rx_set;
266 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
267 if (set->group[i] != NULL) {
268 KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
269 set->group[i] = NULL;
270 }
271 }
272
273 /*
274 * Find TX groups that need to be cleaned up.
275 */
276 set = &nxge->tx_set;
277 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
278 if (set->group[i] != NULL) {
279 KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
280 set->group[i] = NULL;
281 }
282 }
283 MUTEX_EXIT(&nxge->group_lock);
284 }
285
286
287 /*
288 * nxge_grp_add
289 *
290 * Add a group to an instance of NXGE.
291 *
292 * Arguments:
293 * nxge
294 * type Tx or Rx
295 *
296 * Notes:
297 *
298 * Context:
299 * Any domain
300 */
301 nxge_grp_t *
nxge_grp_add(nxge_t * nxge,nxge_grp_type_t type)302 nxge_grp_add(
303 nxge_t *nxge,
304 nxge_grp_type_t type)
305 {
306 nxge_grp_set_t *set;
307 nxge_grp_t *group;
308 int i;
309
310 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
311 group->nxge = nxge;
312
313 MUTEX_ENTER(&nxge->group_lock);
314 switch (type) {
315 case NXGE_TRANSMIT_GROUP:
316 case EXT_TRANSMIT_GROUP:
317 set = &nxge->tx_set;
318 break;
319 default:
320 set = &nxge->rx_set;
321 break;
322 }
323
324 group->type = type;
325 group->active = B_TRUE;
326 group->sequence = set->sequence++;
327
328 /* Find an empty slot for this logical group. */
329 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
330 if (set->group[i] == 0) {
331 group->index = i;
332 set->group[i] = group;
333 NXGE_DC_SET(set->lg.map, i);
334 set->lg.count++;
335 break;
336 }
337 }
338 MUTEX_EXIT(&nxge->group_lock);
339
340 NXGE_DEBUG_MSG((nxge, HIO_CTL,
341 "nxge_grp_add: %cgroup = %d.%d",
342 type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
343 nxge->mac.portnum, group->sequence));
344
345 return (group);
346 }
347
348 void
nxge_grp_remove(nxge_t * nxge,nxge_grp_t * group)349 nxge_grp_remove(
350 nxge_t *nxge,
351 nxge_grp_t *group) /* The group to remove. */
352 {
353 nxge_grp_set_t *set;
354 vpc_type_t type;
355
356 if (group == NULL)
357 return;
358
359 MUTEX_ENTER(&nxge->group_lock);
360 switch (group->type) {
361 case NXGE_TRANSMIT_GROUP:
362 case EXT_TRANSMIT_GROUP:
363 set = &nxge->tx_set;
364 break;
365 default:
366 set = &nxge->rx_set;
367 break;
368 }
369
370 if (set->group[group->index] != group) {
371 MUTEX_EXIT(&nxge->group_lock);
372 return;
373 }
374
375 set->group[group->index] = 0;
376 NXGE_DC_RESET(set->lg.map, group->index);
377 set->lg.count--;
378
379 /* While inside the mutex, deactivate <group>. */
380 group->active = B_FALSE;
381
382 MUTEX_EXIT(&nxge->group_lock);
383
384 NXGE_DEBUG_MSG((nxge, HIO_CTL,
385 "nxge_grp_remove(%c.%d.%d) called",
386 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
387 nxge->mac.portnum, group->sequence));
388
389 /* Now, remove any DCs which are still active. */
390 switch (group->type) {
391 default:
392 type = VP_BOUND_TX;
393 break;
394 case NXGE_RECEIVE_GROUP:
395 case EXT_RECEIVE_GROUP:
396 type = VP_BOUND_RX;
397 }
398
399 while (group->dc) {
400 nxge_grp_dc_remove(nxge, type, group->dc->channel);
401 }
402
403 KMEM_FREE(group, sizeof (*group));
404 }
405
406 /*
407 * nxge_grp_dc_add
408 *
409 * Add a DMA channel to a VR/Group.
410 *
411 * Arguments:
412 * nxge
413 * channel The channel to add.
414 * Notes:
415 *
416 * Context:
417 * Any domain
418 */
419 /* ARGSUSED */
420 int
nxge_grp_dc_add(nxge_t * nxge,nxge_grp_t * group,vpc_type_t type,int channel)421 nxge_grp_dc_add(
422 nxge_t *nxge,
423 nxge_grp_t *group, /* The group to add <channel> to. */
424 vpc_type_t type, /* Rx or Tx */
425 int channel) /* A physical/logical channel number */
426 {
427 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
428 nxge_hio_dc_t *dc;
429 nxge_grp_set_t *set = NULL;
430 nxge_status_t status = NXGE_OK;
431 int error = 0;
432
433 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
434
435 if (group == 0)
436 return (0);
437
438 switch (type) {
439 case VP_BOUND_TX:
440 set = &nxge->tx_set;
441 if (channel > NXGE_MAX_TDCS) {
442 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
443 "nxge_grp_dc_add: TDC = %d", channel));
444 return (NXGE_ERROR);
445 }
446 break;
447 case VP_BOUND_RX:
448 set = &nxge->rx_set;
449 if (channel > NXGE_MAX_RDCS) {
450 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
451 "nxge_grp_dc_add: RDC = %d", channel));
452 return (NXGE_ERROR);
453 }
454 break;
455
456 default:
457 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
458 "nxge_grp_dc_add: unknown type channel(%d)", channel));
459 }
460
461 NXGE_DEBUG_MSG((nxge, HIO_CTL,
462 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
463 type == VP_BOUND_TX ? 't' : 'r',
464 nxge->mac.portnum, group->sequence, group->count, channel));
465
466 MUTEX_ENTER(&nxge->group_lock);
467 if (group->active != B_TRUE) {
468 /* We may be in the process of removing this group. */
469 MUTEX_EXIT(&nxge->group_lock);
470 return (NXGE_ERROR);
471 }
472 MUTEX_EXIT(&nxge->group_lock);
473
474 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
475 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
476 "nxge_grp_dc_add(%d): DC FIND failed", channel));
477 return (NXGE_ERROR);
478 }
479
480 MUTEX_ENTER(&nhd->lock);
481
482 if (dc->group) {
483 MUTEX_EXIT(&nhd->lock);
484 /* This channel is already in use! */
485 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
486 "nxge_grp_dc_add(%d): channel already in group", channel));
487 return (NXGE_ERROR);
488 }
489
490 dc->next = 0;
491 dc->page = channel;
492 dc->channel = (nxge_channel_t)channel;
493
494 dc->type = type;
495 if (type == VP_BOUND_RX) {
496 dc->init = nxge_init_rxdma_channel;
497 dc->uninit = nxge_uninit_rxdma_channel;
498 } else {
499 dc->init = nxge_init_txdma_channel;
500 dc->uninit = nxge_uninit_txdma_channel;
501 }
502
503 dc->group = group;
504
505 if (isLDOMguest(nxge)) {
506 error = nxge_hio_ldsv_add(nxge, dc);
507 if (error != 0) {
508 MUTEX_EXIT(&nhd->lock);
509 return (NXGE_ERROR);
510 }
511 }
512
513 NXGE_DC_SET(set->owned.map, channel);
514 set->owned.count++;
515
516 MUTEX_EXIT(&nhd->lock);
517
518 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
519 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
520 "nxge_grp_dc_add(%d): channel init failed", channel));
521 MUTEX_ENTER(&nhd->lock);
522 (void) memset(dc, 0, sizeof (*dc));
523 NXGE_DC_RESET(set->owned.map, channel);
524 set->owned.count--;
525 MUTEX_EXIT(&nhd->lock);
526 return (NXGE_ERROR);
527 }
528
529 nxge_grp_dc_append(nxge, group, dc);
530
531 if (type == VP_BOUND_TX) {
532 MUTEX_ENTER(&nhd->lock);
533 nxge->tdc_is_shared[channel] = B_FALSE;
534 MUTEX_EXIT(&nhd->lock);
535 }
536
537 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
538
539 return ((int)status);
540 }
541
542 void
nxge_grp_dc_remove(nxge_t * nxge,vpc_type_t type,int channel)543 nxge_grp_dc_remove(nxge_t *nxge, vpc_type_t type, int channel)
544 {
545 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
546 nxge_hio_dc_t *dc;
547 nxge_grp_set_t *set;
548 nxge_grp_t *group;
549
550 dc_uninit_t uninit;
551
552 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
553
554 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
555 goto nxge_grp_dc_remove_exit;
556
557 if ((dc->group == NULL) && (dc->next == 0) &&
558 (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
559 goto nxge_grp_dc_remove_exit;
560 }
561
562 group = (nxge_grp_t *)dc->group;
563
564 if (isLDOMguest(nxge)) {
565 (void) nxge_hio_intr_remove(nxge, type, channel);
566 }
567
568 NXGE_DEBUG_MSG((nxge, HIO_CTL,
569 "DC remove: group = %d.%d.%d, %cdc %d",
570 nxge->mac.portnum, group->sequence, group->count,
571 type == VP_BOUND_TX ? 't' : 'r', dc->channel));
572
573 MUTEX_ENTER(&nhd->lock);
574
575 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
576
577 /* Remove the DC from its group. */
578 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
579 MUTEX_EXIT(&nhd->lock);
580 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
581 "nxge_grp_dc_remove(%d) failed", channel));
582 goto nxge_grp_dc_remove_exit;
583 }
584
585 uninit = dc->uninit;
586 channel = dc->channel;
587
588 NXGE_DC_RESET(set->owned.map, channel);
589 set->owned.count--;
590
591 (void) memset(dc, 0, sizeof (*dc));
592
593 MUTEX_EXIT(&nhd->lock);
594
595 (*uninit)(nxge, channel);
596
597 nxge_grp_dc_remove_exit:
598 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
599 }
600
601 nxge_hio_dc_t *
nxge_grp_dc_find(nxge_t * nxge,vpc_type_t type,int channel)602 nxge_grp_dc_find(
603 nxge_t *nxge,
604 vpc_type_t type, /* Rx or Tx */
605 int channel)
606 {
607 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
608 nxge_hio_dc_t *current;
609
610 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
611
612 if (!isLDOMguest(nxge)) {
613 return (¤t[channel]);
614 } else {
615 /* We're in a guest domain. */
616 int i, limit = (type == VP_BOUND_TX) ?
617 NXGE_MAX_TDCS : NXGE_MAX_RDCS;
618
619 MUTEX_ENTER(&nhd->lock);
620 for (i = 0; i < limit; i++, current++) {
621 if (current->channel == channel) {
622 if (current->vr && current->vr->nxge ==
623 (uintptr_t)nxge) {
624 MUTEX_EXIT(&nhd->lock);
625 return (current);
626 }
627 }
628 }
629 MUTEX_EXIT(&nhd->lock);
630 }
631
632 return (0);
633 }
634
635 /*
636 * nxge_grp_dc_append
637 *
638 * Append a DMA channel to a group.
639 *
640 * Arguments:
641 * nxge
642 * group The group to append to
643 * dc The DMA channel to append
644 *
645 * Notes:
646 *
647 * Context:
648 * Any domain
649 */
650 static
651 void
nxge_grp_dc_append(nxge_t * nxge,nxge_grp_t * group,nxge_hio_dc_t * dc)652 nxge_grp_dc_append(
653 nxge_t *nxge,
654 nxge_grp_t *group,
655 nxge_hio_dc_t *dc)
656 {
657 MUTEX_ENTER(&nxge->group_lock);
658
659 if (group->dc == 0) {
660 group->dc = dc;
661 } else {
662 nxge_hio_dc_t *current = group->dc;
663 do {
664 if (current->next == 0) {
665 current->next = dc;
666 break;
667 }
668 current = current->next;
669 } while (current);
670 }
671
672 NXGE_DC_SET(group->map, dc->channel);
673
674 nxge_grp_dc_map(group);
675 group->count++;
676
677 MUTEX_EXIT(&nxge->group_lock);
678 }
679
680 /*
681 * nxge_grp_dc_unlink
682 *
683 * Unlink a DMA channel fromits linked list (group).
684 *
685 * Arguments:
686 * nxge
687 * group The group (linked list) to unlink from
688 * dc The DMA channel to append
689 *
690 * Notes:
691 *
692 * Context:
693 * Any domain
694 */
695 nxge_hio_dc_t *
nxge_grp_dc_unlink(nxge_t * nxge,nxge_grp_t * group,int channel)696 nxge_grp_dc_unlink(
697 nxge_t *nxge,
698 nxge_grp_t *group,
699 int channel)
700 {
701 nxge_hio_dc_t *current, *previous;
702
703 MUTEX_ENTER(&nxge->group_lock);
704
705 if (group == NULL) {
706 MUTEX_EXIT(&nxge->group_lock);
707 return (0);
708 }
709
710 if ((current = group->dc) == 0) {
711 MUTEX_EXIT(&nxge->group_lock);
712 return (0);
713 }
714
715 previous = 0;
716 do {
717 if (current->channel == channel) {
718 if (previous)
719 previous->next = current->next;
720 else
721 group->dc = current->next;
722 break;
723 }
724 previous = current;
725 current = current->next;
726 } while (current);
727
728 if (current == 0) {
729 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
730 "DC unlink: DC %d not found", channel));
731 } else {
732 current->next = 0;
733 current->group = 0;
734
735 NXGE_DC_RESET(group->map, channel);
736 group->count--;
737 }
738
739 nxge_grp_dc_map(group);
740
741 MUTEX_EXIT(&nxge->group_lock);
742
743 return (current);
744 }
745
746 /*
747 * nxge_grp_dc_map
748 *
749 * Map a linked list to an array of channel numbers.
750 *
751 * Arguments:
752 * nxge
753 * group The group to remap.
754 *
755 * Notes:
756 * It is expected that the caller will hold the correct mutex.
757 *
758 * Context:
759 * Service domain
760 */
761 void
nxge_grp_dc_map(nxge_grp_t * group)762 nxge_grp_dc_map(
763 nxge_grp_t *group)
764 {
765 nxge_channel_t *legend;
766 nxge_hio_dc_t *dc;
767
768 (void) memset(group->legend, 0, sizeof (group->legend));
769
770 legend = group->legend;
771 dc = group->dc;
772 while (dc) {
773 *legend = dc->channel;
774 legend++;
775 dc = dc->next;
776 }
777 }
778
779 /*
780 * ---------------------------------------------------------------------
781 * These are HIO debugging functions.
782 * ---------------------------------------------------------------------
783 */
784
785 /*
786 * nxge_delay
787 *
788 * Delay <seconds> number of seconds.
789 *
790 * Arguments:
791 * nxge
792 * group The group to append to
793 * dc The DMA channel to append
794 *
795 * Notes:
796 * This is a developer-only function.
797 *
798 * Context:
799 * Any domain
800 */
801 void
nxge_delay(int seconds)802 nxge_delay(
803 int seconds)
804 {
805 delay(drv_usectohz(seconds * 1000000));
806 }
807
808 static dmc_reg_name_t rx_names[] = {
809 { "RXDMA_CFIG1", 0 },
810 { "RXDMA_CFIG2", 8 },
811 { "RBR_CFIG_A", 0x10 },
812 { "RBR_CFIG_B", 0x18 },
813 { "RBR_KICK", 0x20 },
814 { "RBR_STAT", 0x28 },
815 { "RBR_HDH", 0x30 },
816 { "RBR_HDL", 0x38 },
817 { "RCRCFIG_A", 0x40 },
818 { "RCRCFIG_B", 0x48 },
819 { "RCRSTAT_A", 0x50 },
820 { "RCRSTAT_B", 0x58 },
821 { "RCRSTAT_C", 0x60 },
822 { "RX_DMA_ENT_MSK", 0x68 },
823 { "RX_DMA_CTL_STAT", 0x70 },
824 { "RCR_FLSH", 0x78 },
825 { "RXMISC", 0x90 },
826 { "RX_DMA_CTL_STAT_DBG", 0x98 },
827 { 0, -1 }
828 };
829
830 static dmc_reg_name_t tx_names[] = {
831 { "Tx_RNG_CFIG", 0 },
832 { "Tx_RNG_HDL", 0x10 },
833 { "Tx_RNG_KICK", 0x18 },
834 { "Tx_ENT_MASK", 0x20 },
835 { "Tx_CS", 0x28 },
836 { "TxDMA_MBH", 0x30 },
837 { "TxDMA_MBL", 0x38 },
838 { "TxDMA_PRE_ST", 0x40 },
839 { "Tx_RNG_ERR_LOGH", 0x48 },
840 { "Tx_RNG_ERR_LOGL", 0x50 },
841 { "TDMC_INTR_DBG", 0x60 },
842 { "Tx_CS_DBG", 0x68 },
843 { 0, -1 }
844 };
845
846 /*
847 * nxge_xx2str
848 *
849 * Translate a register address into a string.
850 *
851 * Arguments:
852 * offset The address of the register to translate.
853 *
854 * Notes:
855 * These are developer-only function.
856 *
857 * Context:
858 * Any domain
859 */
860 const char *
nxge_rx2str(int offset)861 nxge_rx2str(
862 int offset)
863 {
864 dmc_reg_name_t *reg = &rx_names[0];
865
866 offset &= DMA_CSR_MASK;
867
868 while (reg->name) {
869 if (offset == reg->offset)
870 return (reg->name);
871 reg++;
872 }
873
874 return (0);
875 }
876
877 const char *
nxge_tx2str(int offset)878 nxge_tx2str(
879 int offset)
880 {
881 dmc_reg_name_t *reg = &tx_names[0];
882
883 offset &= DMA_CSR_MASK;
884
885 while (reg->name) {
886 if (offset == reg->offset)
887 return (reg->name);
888 reg++;
889 }
890
891 return (0);
892 }
893
894 /*
895 * nxge_ddi_perror
896 *
897 * Map a DDI error number to a string.
898 *
899 * Arguments:
900 * ddi_error The DDI error number to map.
901 *
902 * Notes:
903 *
904 * Context:
905 * Any domain
906 */
907 const char *
nxge_ddi_perror(int ddi_error)908 nxge_ddi_perror(
909 int ddi_error)
910 {
911 switch (ddi_error) {
912 case DDI_SUCCESS:
913 return ("DDI_SUCCESS");
914 case DDI_FAILURE:
915 return ("DDI_FAILURE");
916 case DDI_NOT_WELL_FORMED:
917 return ("DDI_NOT_WELL_FORMED");
918 case DDI_EAGAIN:
919 return ("DDI_EAGAIN");
920 case DDI_EINVAL:
921 return ("DDI_EINVAL");
922 case DDI_ENOTSUP:
923 return ("DDI_ENOTSUP");
924 case DDI_EPENDING:
925 return ("DDI_EPENDING");
926 case DDI_ENOMEM:
927 return ("DDI_ENOMEM");
928 case DDI_EBUSY:
929 return ("DDI_EBUSY");
930 case DDI_ETRANSPORT:
931 return ("DDI_ETRANSPORT");
932 case DDI_ECONTEXT:
933 return ("DDI_ECONTEXT");
934 default:
935 return ("Unknown error");
936 }
937 }
938
939 /*
940 * ---------------------------------------------------------------------
941 * These are Sun4v HIO function definitions
942 * ---------------------------------------------------------------------
943 */
944
945 #if defined(sun4v)
946
947 /*
948 * Local prototypes
949 */
950 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
951 static void nxge_hio_unshare(nxge_hio_vr_t *);
952
953 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
954 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
955
956 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
957 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
958 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
959 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
960 mac_ring_type_t, int);
961
962 /*
963 * nxge_hio_init
964 *
965 * Initialize the HIO module of the NXGE driver.
966 *
967 * Arguments:
968 * nxge
969 *
970 * Notes:
971 *
972 * Context:
973 * Any domain
974 */
975 int
nxge_hio_init(nxge_t * nxge)976 nxge_hio_init(nxge_t *nxge)
977 {
978 nxge_hio_data_t *nhd;
979 int i, region;
980
981 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
982 if (nhd == 0) {
983 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
984 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
985 if (isLDOMguest(nxge))
986 nhd->type = NXGE_HIO_TYPE_GUEST;
987 else
988 nhd->type = NXGE_HIO_TYPE_SERVICE;
989 nxge->nxge_hw_p->hio = (uintptr_t)nhd;
990 }
991
992 if ((nxge->environs == SOLARIS_DOMAIN) &&
993 (nxge->niu_type == N2_NIU)) {
994 if (nxge->niu_hsvc_available == B_TRUE) {
995 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
996 /*
997 * Versions supported now are:
998 * - major number >= 1 (NIU_MAJOR_VER).
999 */
1000 if ((niu_hsvc->hsvc_major >= NIU_MAJOR_VER) ||
1001 (niu_hsvc->hsvc_major == 1 &&
1002 niu_hsvc->hsvc_minor == 1)) {
1003 nxge->environs = SOLARIS_SERVICE_DOMAIN;
1004 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1005 "nxge_hio_init: hypervisor services "
1006 "version %d.%d",
1007 niu_hsvc->hsvc_major,
1008 niu_hsvc->hsvc_minor));
1009 }
1010 }
1011 }
1012
1013 /*
1014 * Initialize share and ring group structures.
1015 */
1016 for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1017 nxge->tx_hio_groups[i].ghandle = NULL;
1018 nxge->tx_hio_groups[i].nxgep = nxge;
1019 nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1020 nxge->tx_hio_groups[i].gindex = 0;
1021 nxge->tx_hio_groups[i].sindex = 0;
1022 }
1023
1024 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1025 nxge->rx_hio_groups[i].ghandle = NULL;
1026 nxge->rx_hio_groups[i].nxgep = nxge;
1027 nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1028 nxge->rx_hio_groups[i].gindex = 0;
1029 nxge->rx_hio_groups[i].sindex = 0;
1030 nxge->rx_hio_groups[i].started = B_FALSE;
1031 nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1032 nxge->rx_hio_groups[i].rdctbl = -1;
1033 nxge->rx_hio_groups[i].n_mac_addrs = 0;
1034 }
1035
1036 if (!isLDOMs(nxge)) {
1037 nhd->hio.ldoms = B_FALSE;
1038 return (NXGE_OK);
1039 }
1040
1041 nhd->hio.ldoms = B_TRUE;
1042
1043 /*
1044 * Fill in what we can.
1045 */
1046 for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1047 nhd->vr[region].region = region;
1048 }
1049 nhd->vrs = NXGE_VR_SR_MAX - 2;
1050
1051 /*
1052 * Initialize the share stuctures.
1053 */
1054 for (i = 0; i < NXGE_MAX_TDCS; i++)
1055 nxge->tdc_is_shared[i] = B_FALSE;
1056
1057 for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1058 nxge->shares[i].nxgep = nxge;
1059 nxge->shares[i].index = 0;
1060 nxge->shares[i].vrp = NULL;
1061 nxge->shares[i].tmap = 0;
1062 nxge->shares[i].rmap = 0;
1063 nxge->shares[i].rxgroup = 0;
1064 nxge->shares[i].active = B_FALSE;
1065 }
1066
1067 /* Fill in the HV HIO function pointers. */
1068 nxge_hio_hv_init(nxge);
1069
1070 if (isLDOMservice(nxge)) {
1071 NXGE_DEBUG_MSG((nxge, HIO_CTL,
1072 "Hybrid IO-capable service domain"));
1073 return (NXGE_OK);
1074 }
1075
1076 return (0);
1077 }
1078 #endif /* defined(sun4v) */
1079
1080 static int
nxge_hio_group_mac_add(nxge_t * nxge,nxge_ring_group_t * g,const uint8_t * macaddr)1081 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1082 const uint8_t *macaddr)
1083 {
1084 int rv;
1085 nxge_rdc_grp_t *group;
1086
1087 mutex_enter(nxge->genlock);
1088
1089 /*
1090 * Initialize the NXGE RDC table data structure.
1091 */
1092 group = &nxge->pt_config.rdc_grps[g->rdctbl];
1093 if (!group->flag) {
1094 group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1095 group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1096 group->flag = B_TRUE; /* This group has been configured. */
1097 }
1098
1099 mutex_exit(nxge->genlock);
1100
1101 /*
1102 * Add the MAC address.
1103 */
1104 if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1105 g->rdctbl, B_TRUE)) != 0) {
1106 return (rv);
1107 }
1108
1109 mutex_enter(nxge->genlock);
1110 g->n_mac_addrs++;
1111 mutex_exit(nxge->genlock);
1112 return (0);
1113 }
1114
1115 static int
nxge_hio_set_unicst(void * arg,const uint8_t * macaddr)1116 nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
1117 {
1118 p_nxge_t nxgep = (p_nxge_t)arg;
1119 struct ether_addr addrp;
1120
1121 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
1122 if (nxge_set_mac_addr(nxgep, &addrp)) {
1123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1124 "<== nxge_m_unicst: set unitcast failed"));
1125 return (EINVAL);
1126 }
1127
1128 nxgep->primary = B_TRUE;
1129
1130 return (0);
1131 }
1132
1133 /*ARGSUSED*/
1134 static int
nxge_hio_clear_unicst(p_nxge_t nxgep,const uint8_t * mac_addr)1135 nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
1136 {
1137 nxgep->primary = B_FALSE;
1138 return (0);
1139 }
1140
1141 static int
nxge_hio_add_mac(void * arg,const uint8_t * mac_addr)1142 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1143 {
1144 nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1145 p_nxge_t nxge = group->nxgep;
1146 int rv;
1147 nxge_hio_vr_t *vr; /* The Virtualization Region */
1148
1149 ASSERT(group->type == MAC_RING_TYPE_RX);
1150 ASSERT(group->nxgep != NULL);
1151
1152 if (isLDOMguest(group->nxgep))
1153 return (0);
1154
1155 mutex_enter(nxge->genlock);
1156
1157 if (!nxge->primary && group->port_default_grp) {
1158 rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
1159 mutex_exit(nxge->genlock);
1160 return (rv);
1161 }
1162
1163 /*
1164 * If the group is associated with a VR, then only one
1165 * address may be assigned to the group.
1166 */
1167 vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1168 if ((vr != NULL) && (group->n_mac_addrs)) {
1169 mutex_exit(nxge->genlock);
1170 return (ENOSPC);
1171 }
1172
1173 mutex_exit(nxge->genlock);
1174
1175 /*
1176 * Program the mac address for the group.
1177 */
1178 if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) {
1179 return (rv);
1180 }
1181
1182 return (0);
1183 }
1184
1185 static int
find_mac_slot(nxge_mmac_t * mmac_info,const uint8_t * mac_addr)1186 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1187 {
1188 int i;
1189 for (i = 0; i <= mmac_info->num_mmac; i++) {
1190 if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1191 ETHERADDRL) == 0) {
1192 return (i);
1193 }
1194 }
1195 return (-1);
1196 }
1197
1198 /* ARGSUSED */
1199 static int
nxge_hio_rem_mac(void * arg,const uint8_t * mac_addr)1200 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1201 {
1202 nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1203 struct ether_addr addrp;
1204 p_nxge_t nxge = group->nxgep;
1205 nxge_mmac_t *mmac_info;
1206 int rv, slot;
1207
1208 ASSERT(group->type == MAC_RING_TYPE_RX);
1209 ASSERT(group->nxgep != NULL);
1210
1211 if (isLDOMguest(group->nxgep))
1212 return (0);
1213
1214 mutex_enter(nxge->genlock);
1215
1216 mmac_info = &nxge->nxge_mmac_info;
1217 slot = find_mac_slot(mmac_info, mac_addr);
1218 if (slot < 0) {
1219 if (group->port_default_grp && nxge->primary) {
1220 bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
1221 if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
1222 rv = nxge_hio_clear_unicst(nxge, mac_addr);
1223 mutex_exit(nxge->genlock);
1224 return (rv);
1225 } else {
1226 mutex_exit(nxge->genlock);
1227 return (EINVAL);
1228 }
1229 } else {
1230 mutex_exit(nxge->genlock);
1231 return (EINVAL);
1232 }
1233 }
1234
1235 mutex_exit(nxge->genlock);
1236
1237 /*
1238 * Remove the mac address for the group
1239 */
1240 if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1241 return (rv);
1242 }
1243
1244 mutex_enter(nxge->genlock);
1245 group->n_mac_addrs--;
1246 mutex_exit(nxge->genlock);
1247
1248 return (0);
1249 }
1250
1251 static int
nxge_hio_group_start(mac_group_driver_t gdriver)1252 nxge_hio_group_start(mac_group_driver_t gdriver)
1253 {
1254 nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1255 nxge_rdc_grp_t *rdc_grp_p;
1256 int rdctbl;
1257 int dev_gindex;
1258
1259 ASSERT(group->type == MAC_RING_TYPE_RX);
1260 ASSERT(group->nxgep != NULL);
1261
1262 ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1263 if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1264 return (ENXIO);
1265
1266 mutex_enter(group->nxgep->genlock);
1267 if (isLDOMguest(group->nxgep))
1268 goto nxge_hio_group_start_exit;
1269
1270 dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1271 group->gindex;
1272 rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex];
1273
1274 /*
1275 * Get an rdc table for this group.
1276 * Group ID is given by the caller, and that's the group it needs
1277 * to bind to. The default group is already bound when the driver
1278 * was attached.
1279 *
1280 * For Group 0, it's RDC table was allocated at attach time
1281 * no need to allocate a new table.
1282 */
1283 if (group->gindex != 0) {
1284 rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1285 dev_gindex, B_TRUE);
1286 if (rdctbl < 0) {
1287 mutex_exit(group->nxgep->genlock);
1288 return (rdctbl);
1289 }
1290 } else {
1291 rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1292 }
1293
1294 group->rdctbl = rdctbl;
1295
1296 (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl);
1297
1298 nxge_hio_group_start_exit:
1299 group->started = B_TRUE;
1300 mutex_exit(group->nxgep->genlock);
1301 return (0);
1302 }
1303
1304 static void
nxge_hio_group_stop(mac_group_driver_t gdriver)1305 nxge_hio_group_stop(mac_group_driver_t gdriver)
1306 {
1307 nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1308
1309 ASSERT(group->type == MAC_RING_TYPE_RX);
1310
1311 mutex_enter(group->nxgep->genlock);
1312 group->started = B_FALSE;
1313
1314 if (isLDOMguest(group->nxgep))
1315 goto nxge_hio_group_stop_exit;
1316
1317 /*
1318 * Unbind the RDC table previously bound for this group.
1319 *
1320 * Since RDC table for group 0 was allocated at attach
1321 * time, no need to unbind the table here.
1322 */
1323 if (group->gindex != 0)
1324 (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1325
1326 nxge_hio_group_stop_exit:
1327 mutex_exit(group->nxgep->genlock);
1328 }
1329
1330 /* ARGSUSED */
1331 void
nxge_hio_group_get(void * arg,mac_ring_type_t type,int groupid,mac_group_info_t * infop,mac_group_handle_t ghdl)1332 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1333 mac_group_info_t *infop, mac_group_handle_t ghdl)
1334 {
1335 p_nxge_t nxgep = (p_nxge_t)arg;
1336 nxge_ring_group_t *group;
1337 int dev_gindex;
1338
1339 switch (type) {
1340 case MAC_RING_TYPE_RX:
1341 group = &nxgep->rx_hio_groups[groupid];
1342 group->nxgep = nxgep;
1343 group->ghandle = ghdl;
1344 group->gindex = groupid;
1345 group->sindex = 0; /* not yet bound to a share */
1346
1347 if (!isLDOMguest(nxgep)) {
1348 dev_gindex =
1349 nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1350 groupid;
1351
1352 if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
1353 dev_gindex)
1354 group->port_default_grp = B_TRUE;
1355
1356 infop->mgi_count =
1357 nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1358 } else {
1359 infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS;
1360 }
1361
1362 infop->mgi_driver = (mac_group_driver_t)group;
1363 infop->mgi_start = nxge_hio_group_start;
1364 infop->mgi_stop = nxge_hio_group_stop;
1365 infop->mgi_addmac = nxge_hio_add_mac;
1366 infop->mgi_remmac = nxge_hio_rem_mac;
1367 break;
1368
1369 case MAC_RING_TYPE_TX:
1370 /*
1371 * 'groupid' for TX should be incremented by one since
1372 * the default group (groupid 0) is not known by the MAC layer
1373 */
1374 group = &nxgep->tx_hio_groups[groupid + 1];
1375 group->nxgep = nxgep;
1376 group->ghandle = ghdl;
1377 group->gindex = groupid + 1;
1378 group->sindex = 0; /* not yet bound to a share */
1379
1380 infop->mgi_driver = (mac_group_driver_t)group;
1381 infop->mgi_start = NULL;
1382 infop->mgi_stop = NULL;
1383 infop->mgi_addmac = NULL; /* not needed */
1384 infop->mgi_remmac = NULL; /* not needed */
1385 /* no rings associated with group initially */
1386 infop->mgi_count = 0;
1387 break;
1388 }
1389 }
1390
1391 #if defined(sun4v)
1392
1393 int
nxge_hio_share_assign(nxge_t * nxge,uint64_t cookie,res_map_t * tmap,res_map_t * rmap,nxge_hio_vr_t * vr)1394 nxge_hio_share_assign(
1395 nxge_t *nxge,
1396 uint64_t cookie,
1397 res_map_t *tmap,
1398 res_map_t *rmap,
1399 nxge_hio_vr_t *vr)
1400 {
1401 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1402 uint64_t slot, hv_rv;
1403 nxge_hio_dc_t *dc;
1404 nxhv_vr_fp_t *fp;
1405 int i;
1406 uint64_t major;
1407
1408 /*
1409 * Ask the Hypervisor to set up the VR for us
1410 */
1411 fp = &nhd->hio.vr;
1412 major = nxge->niu_hsvc.hsvc_major;
1413 switch (major) {
1414 case NIU_MAJOR_VER: /* 1 */
1415 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1416 NXGE_ERROR_MSG((nxge, HIO_CTL,
1417 "nxge_hio_share_assign: major %d "
1418 "vr->assign() returned %d", major, hv_rv));
1419 nxge_hio_unshare(vr);
1420 return (-EIO);
1421 }
1422
1423 break;
1424
1425 case NIU_MAJOR_VER_2: /* 2 */
1426 default:
1427 if ((hv_rv = (*fp->cfgh_assign)
1428 (nxge->niu_cfg_hdl, vr->region, cookie, &vr->cookie))) {
1429 NXGE_ERROR_MSG((nxge, HIO_CTL,
1430 "nxge_hio_share_assign: major %d "
1431 "vr->assign() returned %d", major, hv_rv));
1432 nxge_hio_unshare(vr);
1433 return (-EIO);
1434 }
1435
1436 break;
1437 }
1438
1439 NXGE_DEBUG_MSG((nxge, HIO_CTL,
1440 "nxge_hio_share_assign: major %d "
1441 "vr->assign() success", major));
1442
1443 /*
1444 * For each shared TDC, ask the HV to find us an empty slot.
1445 */
1446 dc = vr->tx_group.dc;
1447 for (i = 0; i < NXGE_MAX_TDCS; i++) {
1448 nxhv_dc_fp_t *tx = &nhd->hio.tx;
1449 while (dc) {
1450 hv_rv = (*tx->assign)
1451 (vr->cookie, dc->channel, &slot);
1452 if (hv_rv != 0) {
1453 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1454 "nxge_hio_share_assign: "
1455 "tx->assign(%x, %d) failed: %ld",
1456 vr->cookie, dc->channel, hv_rv));
1457 return (-EIO);
1458 }
1459
1460 dc->cookie = vr->cookie;
1461 dc->page = (vp_channel_t)slot;
1462
1463 /* Inform the caller about the slot chosen. */
1464 (*tmap) |= 1 << slot;
1465
1466 dc = dc->next;
1467 }
1468 }
1469
1470 /*
1471 * For each shared RDC, ask the HV to find us an empty slot.
1472 */
1473 dc = vr->rx_group.dc;
1474 for (i = 0; i < NXGE_MAX_RDCS; i++) {
1475 nxhv_dc_fp_t *rx = &nhd->hio.rx;
1476 while (dc) {
1477 hv_rv = (*rx->assign)
1478 (vr->cookie, dc->channel, &slot);
1479 if (hv_rv != 0) {
1480 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1481 "nxge_hio_share_assign: "
1482 "rx->assign(%x, %d) failed: %ld",
1483 vr->cookie, dc->channel, hv_rv));
1484 return (-EIO);
1485 }
1486
1487 dc->cookie = vr->cookie;
1488 dc->page = (vp_channel_t)slot;
1489
1490 /* Inform the caller about the slot chosen. */
1491 (*rmap) |= 1 << slot;
1492
1493 dc = dc->next;
1494 }
1495 }
1496
1497 return (0);
1498 }
1499
1500 void
nxge_hio_share_unassign(nxge_hio_vr_t * vr)1501 nxge_hio_share_unassign(
1502 nxge_hio_vr_t *vr)
1503 {
1504 nxge_t *nxge = (nxge_t *)vr->nxge;
1505 nxge_hio_data_t *nhd;
1506 nxge_hio_dc_t *dc;
1507 nxhv_vr_fp_t *fp;
1508 uint64_t hv_rv;
1509
1510 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1511
1512 dc = vr->tx_group.dc;
1513 while (dc) {
1514 nxhv_dc_fp_t *tx = &nhd->hio.tx;
1515 hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1516 if (hv_rv != 0) {
1517 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1518 "nxge_hio_share_unassign: "
1519 "tx->unassign(%x, %d) failed: %ld",
1520 vr->cookie, dc->page, hv_rv));
1521 }
1522 dc = dc->next;
1523 }
1524
1525 dc = vr->rx_group.dc;
1526 while (dc) {
1527 nxhv_dc_fp_t *rx = &nhd->hio.rx;
1528 hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1529 if (hv_rv != 0) {
1530 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1531 "nxge_hio_share_unassign: "
1532 "rx->unassign(%x, %d) failed: %ld",
1533 vr->cookie, dc->page, hv_rv));
1534 }
1535 dc = dc->next;
1536 }
1537
1538 fp = &nhd->hio.vr;
1539 if (fp->unassign) {
1540 hv_rv = (*fp->unassign)(vr->cookie);
1541 if (hv_rv != 0) {
1542 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1543 "nxge_hio_share_unassign: "
1544 "vr->assign(%x) failed: %ld",
1545 vr->cookie, hv_rv));
1546 }
1547 }
1548 }
1549
1550 int
nxge_hio_share_alloc(void * arg,mac_share_handle_t * shandle)1551 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1552 {
1553 p_nxge_t nxge = (p_nxge_t)arg;
1554 nxge_share_handle_t *shp;
1555 nxge_hio_vr_t *vr; /* The Virtualization Region */
1556 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1557
1558 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1559
1560 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1561 nhd->hio.rx.assign == 0) {
1562 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1563 return (EIO);
1564 }
1565
1566 /*
1567 * Get a VR.
1568 */
1569 if ((vr = nxge_hio_vr_share(nxge)) == 0)
1570 return (EAGAIN);
1571
1572 shp = &nxge->shares[vr->region];
1573 shp->nxgep = nxge;
1574 shp->index = vr->region;
1575 shp->vrp = (void *)vr;
1576 shp->tmap = shp->rmap = 0; /* to be assigned by ms_sbind */
1577 shp->rxgroup = 0; /* to be assigned by ms_sadd */
1578 shp->active = B_FALSE; /* not bound yet */
1579
1580 *shandle = (mac_share_handle_t)shp;
1581
1582 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1583 return (0);
1584 }
1585
1586
1587 void
nxge_hio_share_free(mac_share_handle_t shandle)1588 nxge_hio_share_free(mac_share_handle_t shandle)
1589 {
1590 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1591 nxge_hio_vr_t *vr;
1592
1593 /*
1594 * Clear internal handle state.
1595 */
1596 vr = shp->vrp;
1597 shp->vrp = (void *)NULL;
1598 shp->index = 0;
1599 shp->tmap = 0;
1600 shp->rmap = 0;
1601 shp->rxgroup = 0;
1602 shp->active = B_FALSE;
1603
1604 /*
1605 * Free VR resource.
1606 */
1607 nxge_hio_unshare(vr);
1608 }
1609
1610
1611 void
nxge_hio_share_query(mac_share_handle_t shandle,mac_ring_type_t type,mac_ring_handle_t * rings,uint_t * n_rings)1612 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1613 mac_ring_handle_t *rings, uint_t *n_rings)
1614 {
1615 nxge_t *nxge;
1616 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1617 nxge_ring_handle_t *rh;
1618 uint32_t offset;
1619
1620 nxge = shp->nxgep;
1621
1622 switch (type) {
1623 case MAC_RING_TYPE_RX:
1624 rh = nxge->rx_ring_handles;
1625 offset = nxge->pt_config.hw_config.start_rdc;
1626 break;
1627
1628 case MAC_RING_TYPE_TX:
1629 rh = nxge->tx_ring_handles;
1630 offset = nxge->pt_config.hw_config.tdc.start;
1631 break;
1632 }
1633
1634 /*
1635 * In version 1.0, we may only give a VR 2 RDCs/TDCs. Not only that,
1636 * but the HV has statically assigned the channels like so:
1637 * VR0: RDC0 & RDC1
1638 * VR1: RDC2 & RDC3, etc.
1639 * The TDCs are assigned in exactly the same way.
1640 */
1641 if (rings != NULL) {
1642 rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1643 rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1644 }
1645 if (n_rings != NULL) {
1646 *n_rings = 2;
1647 }
1648 }
1649
1650 int
nxge_hio_share_add_group(mac_share_handle_t shandle,mac_group_driver_t ghandle)1651 nxge_hio_share_add_group(mac_share_handle_t shandle,
1652 mac_group_driver_t ghandle)
1653 {
1654 nxge_t *nxge;
1655 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1656 nxge_ring_group_t *rg = (nxge_ring_group_t *)ghandle;
1657 nxge_hio_vr_t *vr; /* The Virtualization Region */
1658 nxge_grp_t *group;
1659 int i;
1660
1661 if (rg->sindex != 0) {
1662 /* the group is already bound to a share */
1663 return (EALREADY);
1664 }
1665
1666 /*
1667 * If we are adding a group 0 to a share, this
1668 * is not correct.
1669 */
1670 ASSERT(rg->gindex != 0);
1671
1672 nxge = rg->nxgep;
1673 vr = shp->vrp;
1674
1675 switch (rg->type) {
1676 case MAC_RING_TYPE_RX:
1677 /*
1678 * Make sure that the group has the right rings associated
1679 * for the share. In version 1.0, we may only give a VR
1680 * 2 RDCs. Not only that, but the HV has statically
1681 * assigned the channels like so:
1682 * VR0: RDC0 & RDC1
1683 * VR1: RDC2 & RDC3, etc.
1684 */
1685 group = nxge->rx_set.group[rg->gindex];
1686
1687 if (group->count > 2) {
1688 /* a share can have at most 2 rings */
1689 return (EINVAL);
1690 }
1691
1692 for (i = 0; i < NXGE_MAX_RDCS; i++) {
1693 if (group->map & (1 << i)) {
1694 if ((i != shp->index * 2) &&
1695 (i != (shp->index * 2 + 1))) {
1696 /*
1697 * A group with invalid rings was
1698 * attempted to bind to this share
1699 */
1700 return (EINVAL);
1701 }
1702 }
1703 }
1704
1705 rg->sindex = vr->region;
1706 vr->rdc_tbl = rg->rdctbl;
1707 shp->rxgroup = vr->rdc_tbl;
1708 break;
1709
1710 case MAC_RING_TYPE_TX:
1711 /*
1712 * Make sure that the group has the right rings associated
1713 * for the share. In version 1.0, we may only give a VR
1714 * 2 TDCs. Not only that, but the HV has statically
1715 * assigned the channels like so:
1716 * VR0: TDC0 & TDC1
1717 * VR1: TDC2 & TDC3, etc.
1718 */
1719 group = nxge->tx_set.group[rg->gindex];
1720
1721 if (group->count > 2) {
1722 /* a share can have at most 2 rings */
1723 return (EINVAL);
1724 }
1725
1726 for (i = 0; i < NXGE_MAX_TDCS; i++) {
1727 if (group->map & (1 << i)) {
1728 if ((i != shp->index * 2) &&
1729 (i != (shp->index * 2 + 1))) {
1730 /*
1731 * A group with invalid rings was
1732 * attempted to bind to this share
1733 */
1734 return (EINVAL);
1735 }
1736 }
1737 }
1738
1739 vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1740 rg->gindex;
1741 rg->sindex = vr->region;
1742 break;
1743 }
1744 return (0);
1745 }
1746
1747 int
nxge_hio_share_rem_group(mac_share_handle_t shandle,mac_group_driver_t ghandle)1748 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1749 mac_group_driver_t ghandle)
1750 {
1751 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1752 nxge_ring_group_t *group = (nxge_ring_group_t *)ghandle;
1753 nxge_hio_vr_t *vr; /* The Virtualization Region */
1754 int rv = 0;
1755
1756 vr = shp->vrp;
1757
1758 switch (group->type) {
1759 case MAC_RING_TYPE_RX:
1760 group->sindex = 0;
1761 vr->rdc_tbl = 0;
1762 shp->rxgroup = 0;
1763 break;
1764
1765 case MAC_RING_TYPE_TX:
1766 group->sindex = 0;
1767 vr->tdc_tbl = 0;
1768 break;
1769 }
1770
1771 return (rv);
1772 }
1773
1774 int
nxge_hio_share_bind(mac_share_handle_t shandle,uint64_t cookie,uint64_t * rcookie)1775 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1776 uint64_t *rcookie)
1777 {
1778 nxge_t *nxge;
1779 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1780 nxge_hio_vr_t *vr;
1781 uint64_t rmap, tmap, hv_rmap, hv_tmap;
1782 int rv;
1783
1784 ASSERT(shp != NULL);
1785 ASSERT(shp->nxgep != NULL);
1786 ASSERT(shp->vrp != NULL);
1787
1788 nxge = shp->nxgep;
1789 vr = (nxge_hio_vr_t *)shp->vrp;
1790
1791 /*
1792 * Add resources to the share.
1793 * For each DMA channel associated with the VR, bind its resources
1794 * to the VR.
1795 */
1796 tmap = 0;
1797 rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1798 if (rv != 0) {
1799 return (rv);
1800 }
1801
1802 rmap = 0;
1803 rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1804 if (rv != 0) {
1805 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1806 return (rv);
1807 }
1808
1809 /*
1810 * Ask the Hypervisor to set up the VR and allocate slots for
1811 * each rings associated with the VR.
1812 */
1813 hv_tmap = hv_rmap = 0;
1814 if ((rv = nxge_hio_share_assign(nxge, cookie,
1815 &hv_tmap, &hv_rmap, vr))) {
1816 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1817 nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1818 return (rv);
1819 }
1820
1821 shp->active = B_TRUE;
1822 shp->tmap = hv_tmap;
1823 shp->rmap = hv_rmap;
1824
1825 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1826 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1827
1828 return (0);
1829 }
1830
1831 void
nxge_hio_share_unbind(mac_share_handle_t shandle)1832 nxge_hio_share_unbind(mac_share_handle_t shandle)
1833 {
1834 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1835
1836 /*
1837 * First, unassign the VR (take it back),
1838 * so we can enable interrupts again.
1839 */
1840 nxge_hio_share_unassign(shp->vrp);
1841
1842 /*
1843 * Free Ring Resources for TX and RX
1844 */
1845 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1846 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1847 }
1848
1849
1850 /*
1851 * nxge_hio_vr_share
1852 *
1853 * Find an unused Virtualization Region (VR).
1854 *
1855 * Arguments:
1856 * nxge
1857 *
1858 * Notes:
1859 *
1860 * Context:
1861 * Service domain
1862 */
1863 nxge_hio_vr_t *
nxge_hio_vr_share(nxge_t * nxge)1864 nxge_hio_vr_share(
1865 nxge_t *nxge)
1866 {
1867 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1868 nxge_hio_vr_t *vr;
1869
1870 int first, limit, region;
1871
1872 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1873
1874 MUTEX_ENTER(&nhd->lock);
1875
1876 if (nhd->vrs == 0) {
1877 MUTEX_EXIT(&nhd->lock);
1878 return (0);
1879 }
1880
1881 /* Find an empty virtual region (VR). */
1882 if (nxge->function_num == 0) {
1883 // FUNC0_VIR0 'belongs' to NIU port 0.
1884 first = FUNC0_VIR1;
1885 limit = FUNC2_VIR0;
1886 } else if (nxge->function_num == 1) {
1887 // FUNC2_VIR0 'belongs' to NIU port 1.
1888 first = FUNC2_VIR1;
1889 limit = FUNC_VIR_MAX;
1890 } else {
1891 cmn_err(CE_WARN,
1892 "Shares not supported on function(%d) at this time.\n",
1893 nxge->function_num);
1894 }
1895
1896 for (region = first; region < limit; region++) {
1897 if (nhd->vr[region].nxge == 0)
1898 break;
1899 }
1900
1901 if (region == limit) {
1902 MUTEX_EXIT(&nhd->lock);
1903 return (0);
1904 }
1905
1906 vr = &nhd->vr[region];
1907 vr->nxge = (uintptr_t)nxge;
1908 vr->region = (uintptr_t)region;
1909
1910 nhd->vrs--;
1911
1912 MUTEX_EXIT(&nhd->lock);
1913
1914 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1915
1916 return (vr);
1917 }
1918
1919 void
nxge_hio_unshare(nxge_hio_vr_t * vr)1920 nxge_hio_unshare(
1921 nxge_hio_vr_t *vr)
1922 {
1923 nxge_t *nxge = (nxge_t *)vr->nxge;
1924 nxge_hio_data_t *nhd;
1925
1926 vr_region_t region;
1927
1928 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1929
1930 if (!nxge) {
1931 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1932 "vr->nxge is NULL"));
1933 return;
1934 }
1935
1936 /*
1937 * This function is no longer called, but I will keep it
1938 * here in case we want to revisit this topic in the future.
1939 *
1940 * nxge_hio_hostinfo_uninit(nxge, vr);
1941 */
1942
1943 /*
1944 * XXX: This is done by ms_sremove?
1945 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1946 */
1947
1948 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1949
1950 MUTEX_ENTER(&nhd->lock);
1951
1952 region = vr->region;
1953 (void) memset(vr, 0, sizeof (*vr));
1954 vr->region = region;
1955
1956 nhd->vrs++;
1957
1958 MUTEX_EXIT(&nhd->lock);
1959
1960 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1961 }
1962
1963 int
nxge_hio_addres(nxge_hio_vr_t * vr,mac_ring_type_t type,uint64_t * map)1964 nxge_hio_addres(nxge_hio_vr_t *vr, mac_ring_type_t type, uint64_t *map)
1965 {
1966 nxge_t *nxge;
1967 nxge_grp_t *group;
1968 int groupid;
1969 int i, rv = 0;
1970 int max_dcs;
1971
1972 ASSERT(vr != NULL);
1973 ASSERT(vr->nxge != NULL);
1974 nxge = (nxge_t *)vr->nxge;
1975
1976 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1977
1978 /*
1979 * For each ring associated with the group, add the resources
1980 * to the group and bind.
1981 */
1982 max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1983 if (type == MAC_RING_TYPE_TX) {
1984 /* set->group is an array of group indexed by a port group id */
1985 groupid = vr->tdc_tbl -
1986 nxge->pt_config.hw_config.def_mac_txdma_grpid;
1987 group = nxge->tx_set.group[groupid];
1988 } else {
1989 /* set->group is an array of group indexed by a port group id */
1990 groupid = vr->rdc_tbl -
1991 nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1992 group = nxge->rx_set.group[groupid];
1993 }
1994
1995 ASSERT(group != NULL);
1996
1997 if (group->map == 0) {
1998 NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
1999 "with this VR"));
2000 return (EINVAL);
2001 }
2002
2003 for (i = 0; i < max_dcs; i++) {
2004 if (group->map & (1 << i)) {
2005 if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
2006 if (*map == 0) /* Couldn't get even one DC. */
2007 return (-rv);
2008 else
2009 break;
2010 }
2011 *map |= (1 << i);
2012 }
2013 }
2014
2015 if ((*map == 0) || (rv != 0)) {
2016 NXGE_DEBUG_MSG((nxge, HIO_CTL,
2017 "<== nxge_hio_addres: rv(%x)", rv));
2018 return (EIO);
2019 }
2020
2021 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
2022 return (0);
2023 }
2024
2025 /* ARGSUSED */
2026 void
nxge_hio_remres(nxge_hio_vr_t * vr,mac_ring_type_t type,res_map_t res_map)2027 nxge_hio_remres(
2028 nxge_hio_vr_t *vr,
2029 mac_ring_type_t type,
2030 res_map_t res_map)
2031 {
2032 nxge_t *nxge = (nxge_t *)vr->nxge;
2033 nxge_grp_t *group;
2034
2035 if (!nxge) {
2036 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2037 "vr->nxge is NULL"));
2038 return;
2039 }
2040
2041 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
2042
2043 /*
2044 * For each ring bound to the group, remove the DMA resources
2045 * from the group and unbind.
2046 */
2047 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2048 while (group->dc) {
2049 nxge_hio_dc_t *dc = group->dc;
2050 NXGE_DC_RESET(res_map, dc->page);
2051 nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
2052 }
2053
2054 if (res_map) {
2055 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2056 "res_map %lx", res_map));
2057 }
2058
2059 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
2060 }
2061
2062 /*
2063 * nxge_hio_tdc_share
2064 *
2065 * Share an unused TDC channel.
2066 *
2067 * Arguments:
2068 * nxge
2069 *
2070 * Notes:
2071 *
2072 * A.7.3 Reconfigure Tx DMA channel
2073 * Disable TxDMA A.9.6.10
2074 * [Rebind TxDMA channel to Port A.9.6.7]
2075 *
2076 * We don't have to Rebind the TDC to the port - it always already bound.
2077 *
2078 * Soft Reset TxDMA A.9.6.2
2079 *
2080 * This procedure will be executed by nxge_init_txdma_channel() in the
2081 * guest domain:
2082 *
2083 * Re-initialize TxDMA A.9.6.8
2084 * Reconfigure TxDMA
2085 * Enable TxDMA A.9.6.9
2086 *
2087 * Context:
2088 * Service domain
2089 */
2090 int
nxge_hio_tdc_share(nxge_t * nxge,int channel)2091 nxge_hio_tdc_share(
2092 nxge_t *nxge,
2093 int channel)
2094 {
2095 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2096 nxge_grp_set_t *set = &nxge->tx_set;
2097 tx_ring_t *ring;
2098 int count;
2099
2100 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2101
2102 /*
2103 * Wait until this channel is idle.
2104 */
2105 ring = nxge->tx_rings->rings[channel];
2106 ASSERT(ring != NULL);
2107
2108 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2109 if (ring->tx_ring_busy) {
2110 /*
2111 * Wait for 30 seconds.
2112 */
2113 for (count = 30 * 1000; count; count--) {
2114 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2115 break;
2116 }
2117
2118 drv_usecwait(1000);
2119 }
2120
2121 if (count == 0) {
2122 (void) atomic_swap_32(&ring->tx_ring_offline,
2123 NXGE_TX_RING_ONLINE);
2124 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2125 "nxge_hio_tdc_share: "
2126 "Tx ring %d was always BUSY", channel));
2127 return (-EIO);
2128 }
2129 } else {
2130 (void) atomic_swap_32(&ring->tx_ring_offline,
2131 NXGE_TX_RING_OFFLINED);
2132 }
2133
2134 MUTEX_ENTER(&nhd->lock);
2135 nxge->tdc_is_shared[channel] = B_TRUE;
2136 MUTEX_EXIT(&nhd->lock);
2137
2138 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2139 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2140 "Failed to remove interrupt for TxDMA channel %d",
2141 channel));
2142 return (-EINVAL);
2143 }
2144
2145 /* Disable TxDMA A.9.6.10 */
2146 (void) nxge_txdma_channel_disable(nxge, channel);
2147
2148 /* The SD is sharing this channel. */
2149 NXGE_DC_SET(set->shared.map, channel);
2150 set->shared.count++;
2151
2152 /* Soft Reset TxDMA A.9.6.2 */
2153 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2154
2155 /*
2156 * Initialize the DC-specific FZC control registers.
2157 * -----------------------------------------------------
2158 */
2159 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2160 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2161 "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2162 return (-EIO);
2163 }
2164
2165 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2166
2167 return (0);
2168 }
2169
2170 /*
2171 * nxge_hio_rdc_share
2172 *
2173 * Share an unused RDC channel.
2174 *
2175 * Arguments:
2176 * nxge
2177 *
2178 * Notes:
2179 *
2180 * This is the latest version of the procedure to
2181 * Reconfigure an Rx DMA channel:
2182 *
2183 * A.6.3 Reconfigure Rx DMA channel
2184 * Stop RxMAC A.9.2.6
2185 * Drain IPP Port A.9.3.6
2186 * Stop and reset RxDMA A.9.5.3
2187 *
2188 * This procedure will be executed by nxge_init_rxdma_channel() in the
2189 * guest domain:
2190 *
2191 * Initialize RxDMA A.9.5.4
2192 * Reconfigure RxDMA
2193 * Enable RxDMA A.9.5.5
2194 *
2195 * We will do this here, since the RDC is a canalis non grata:
2196 * Enable RxMAC A.9.2.10
2197 *
2198 * Context:
2199 * Service domain
2200 */
2201 int
nxge_hio_rdc_share(nxge_t * nxge,nxge_hio_vr_t * vr,int channel)2202 nxge_hio_rdc_share(
2203 nxge_t *nxge,
2204 nxge_hio_vr_t *vr,
2205 int channel)
2206 {
2207 nxge_grp_set_t *set = &nxge->rx_set;
2208 nxge_rdc_grp_t *rdc_grp;
2209
2210 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2211
2212 /* Disable interrupts. */
2213 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2214 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2215 "Failed to remove interrupt for RxDMA channel %d",
2216 channel));
2217 return (NXGE_ERROR);
2218 }
2219
2220 /* Stop RxMAC = A.9.2.6 */
2221 if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2222 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2223 "Failed to disable RxMAC"));
2224 }
2225
2226 /* Drain IPP Port = A.9.3.6 */
2227 (void) nxge_ipp_drain(nxge);
2228
2229 /* Stop and reset RxDMA = A.9.5.3 */
2230 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2231 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2232 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2233 "Failed to disable RxDMA channel %d", channel));
2234 }
2235
2236 /* The SD is sharing this channel. */
2237 NXGE_DC_SET(set->shared.map, channel);
2238 set->shared.count++;
2239
2240 // Assert RST: RXDMA_CFIG1[30] = 1
2241 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2242
2243 /*
2244 * The guest domain will reconfigure the RDC later.
2245 *
2246 * But in the meantime, we must re-enable the Rx MAC so
2247 * that we can start receiving packets again on the
2248 * remaining RDCs:
2249 *
2250 * Enable RxMAC = A.9.2.10
2251 */
2252 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2253 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2254 "nxge_hio_rdc_share: Rx MAC still disabled"));
2255 }
2256
2257 /*
2258 * Initialize the DC-specific FZC control registers.
2259 * -----------------------------------------------------
2260 */
2261 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2262 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2263 "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2264 return (-EIO);
2265 }
2266
2267 /*
2268 * Update the RDC group.
2269 */
2270 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2271 NXGE_DC_SET(rdc_grp->map, channel);
2272
2273 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2274
2275 return (0);
2276 }
2277
2278 /*
2279 * nxge_hio_dc_share
2280 *
2281 * Share a DMA channel with a guest domain.
2282 *
2283 * Arguments:
2284 * nxge
2285 * vr The VR that <channel> will belong to.
2286 * type Tx or Rx.
2287 * channel Channel to share
2288 *
2289 * Notes:
2290 *
2291 * Context:
2292 * Service domain
2293 */
2294 int
nxge_hio_dc_share(nxge_t * nxge,nxge_hio_vr_t * vr,mac_ring_type_t type,int channel)2295 nxge_hio_dc_share(
2296 nxge_t *nxge,
2297 nxge_hio_vr_t *vr,
2298 mac_ring_type_t type,
2299 int channel)
2300 {
2301 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2302 nxge_hio_dc_t *dc;
2303 nxge_grp_t *group;
2304 int slot;
2305
2306 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2307 type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2308
2309
2310 /* -------------------------------------------------- */
2311 slot = (type == MAC_RING_TYPE_TX) ?
2312 nxge_hio_tdc_share(nxge, channel) :
2313 nxge_hio_rdc_share(nxge, vr, channel);
2314
2315 if (slot < 0) {
2316 if (type == MAC_RING_TYPE_RX) {
2317 nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2318 } else {
2319 nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2320 }
2321 return (slot);
2322 }
2323
2324 MUTEX_ENTER(&nhd->lock);
2325
2326 /*
2327 * Tag this channel.
2328 * --------------------------------------------------
2329 */
2330 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2331
2332 dc->vr = vr;
2333 dc->channel = (nxge_channel_t)channel;
2334
2335 MUTEX_EXIT(&nhd->lock);
2336
2337 /*
2338 * vr->[t|r]x_group is used by the service domain to
2339 * keep track of its shared DMA channels.
2340 */
2341 MUTEX_ENTER(&nxge->group_lock);
2342 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2343
2344 dc->group = group;
2345 /* Initialize <group>, if necessary */
2346 if (group->count == 0) {
2347 group->nxge = nxge;
2348 group->type = (type == MAC_RING_TYPE_TX) ?
2349 VP_BOUND_TX : VP_BOUND_RX;
2350 group->sequence = nhd->sequence++;
2351 group->active = B_TRUE;
2352 }
2353
2354 MUTEX_EXIT(&nxge->group_lock);
2355
2356 NXGE_ERROR_MSG((nxge, HIO_CTL,
2357 "DC share: %cDC %d was assigned to slot %d",
2358 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2359
2360 nxge_grp_dc_append(nxge, group, dc);
2361
2362 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2363
2364 return (0);
2365 }
2366
2367 /*
2368 * nxge_hio_tdc_unshare
2369 *
2370 * Unshare a TDC.
2371 *
2372 * Arguments:
2373 * nxge
2374 * channel The channel to unshare (add again).
2375 *
2376 * Notes:
2377 *
2378 * Context:
2379 * Service domain
2380 */
2381 void
nxge_hio_tdc_unshare(nxge_t * nxge,int dev_grpid,int channel)2382 nxge_hio_tdc_unshare(
2383 nxge_t *nxge,
2384 int dev_grpid,
2385 int channel)
2386 {
2387 nxge_grp_set_t *set = &nxge->tx_set;
2388 nxge_grp_t *group;
2389 int grpid;
2390
2391 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2392
2393 NXGE_DC_RESET(set->shared.map, channel);
2394 set->shared.count--;
2395
2396 grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2397 group = set->group[grpid];
2398
2399 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2400 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2401 "Failed to initialize TxDMA channel %d", channel));
2402 return;
2403 }
2404
2405 /* Re-add this interrupt. */
2406 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2407 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2408 "Failed to add interrupt for TxDMA channel %d", channel));
2409 }
2410
2411 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2412 }
2413
2414 /*
2415 * nxge_hio_rdc_unshare
2416 *
2417 * Unshare an RDC: add it to the SD's RDC groups (tables).
2418 *
2419 * Arguments:
2420 * nxge
2421 * channel The channel to unshare (add again).
2422 *
2423 * Notes:
2424 *
2425 * Context:
2426 * Service domain
2427 */
2428 void
nxge_hio_rdc_unshare(nxge_t * nxge,int dev_grpid,int channel)2429 nxge_hio_rdc_unshare(
2430 nxge_t *nxge,
2431 int dev_grpid,
2432 int channel)
2433 {
2434 nxge_grp_set_t *set = &nxge->rx_set;
2435 nxge_grp_t *group;
2436 int grpid;
2437 int i;
2438
2439 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2440
2441 /* Stop RxMAC = A.9.2.6 */
2442 if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2443 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2444 "Failed to disable RxMAC"));
2445 }
2446
2447 /* Drain IPP Port = A.9.3.6 */
2448 (void) nxge_ipp_drain(nxge);
2449
2450 /* Stop and reset RxDMA = A.9.5.3 */
2451 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2452 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2453 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2454 "Failed to disable RxDMA channel %d", channel));
2455 }
2456
2457 NXGE_DC_RESET(set->shared.map, channel);
2458 set->shared.count--;
2459
2460 grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2461 group = set->group[grpid];
2462
2463 /*
2464 * Assert RST: RXDMA_CFIG1[30] = 1
2465 *
2466 * Initialize RxDMA A.9.5.4
2467 * Reconfigure RxDMA
2468 * Enable RxDMA A.9.5.5
2469 */
2470 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2471 /* Be sure to re-enable the RX MAC. */
2472 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2473 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2474 "nxge_hio_rdc_share: Rx MAC still disabled"));
2475 }
2476 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2477 "Failed to initialize RxDMA channel %d", channel));
2478 return;
2479 }
2480
2481 /*
2482 * Enable RxMAC = A.9.2.10
2483 */
2484 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2485 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2486 "nxge_hio_rdc_share: Rx MAC still disabled"));
2487 return;
2488 }
2489
2490 /* Re-add this interrupt. */
2491 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2492 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2493 "nxge_hio_rdc_unshare: Failed to add interrupt for "
2494 "RxDMA CHANNEL %d", channel));
2495 }
2496
2497 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2498
2499 for (i = 0; i < NXGE_MAX_RDCS; i++) {
2500 if (nxge->rx_ring_handles[i].channel == channel) {
2501 (void) nxge_rx_ring_start(
2502 (mac_ring_driver_t)&nxge->rx_ring_handles[i],
2503 nxge->rx_ring_handles[i].ring_gen_num);
2504 }
2505 }
2506 }
2507
2508 /*
2509 * nxge_hio_dc_unshare
2510 *
2511 * Unshare (reuse) a DMA channel.
2512 *
2513 * Arguments:
2514 * nxge
2515 * vr The VR that <channel> belongs to.
2516 * type Tx or Rx.
2517 * channel The DMA channel to reuse.
2518 *
2519 * Notes:
2520 *
2521 * Context:
2522 * Service domain
2523 */
2524 void
nxge_hio_dc_unshare(nxge_t * nxge,nxge_hio_vr_t * vr,mac_ring_type_t type,int channel)2525 nxge_hio_dc_unshare(
2526 nxge_t *nxge,
2527 nxge_hio_vr_t *vr,
2528 mac_ring_type_t type,
2529 int channel)
2530 {
2531 nxge_grp_t *group;
2532 nxge_hio_dc_t *dc;
2533
2534 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2535 type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2536
2537 /* Unlink the channel from its group. */
2538 /* -------------------------------------------------- */
2539 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2540 NXGE_DC_RESET(group->map, channel);
2541 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2542 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2543 "nxge_hio_dc_unshare(%d) failed", channel));
2544 return;
2545 }
2546
2547 dc->vr = 0;
2548 dc->cookie = 0;
2549
2550 if (type == MAC_RING_TYPE_RX) {
2551 nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2552 } else {
2553 nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2554 }
2555
2556 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2557 }
2558
2559
2560 /*
2561 * nxge_hio_rxdma_bind_intr():
2562 *
2563 * For the guest domain driver, need to bind the interrupt group
2564 * and state to the rx_rcr_ring_t.
2565 */
2566
2567 int
nxge_hio_rxdma_bind_intr(nxge_t * nxge,rx_rcr_ring_t * ring,int channel)2568 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2569 {
2570 nxge_hio_dc_t *dc;
2571 nxge_ldgv_t *control;
2572 nxge_ldg_t *group;
2573 nxge_ldv_t *device;
2574
2575 /*
2576 * Find the DMA channel.
2577 */
2578 if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2579 return (NXGE_ERROR);
2580 }
2581
2582 /*
2583 * Get the control structure.
2584 */
2585 control = nxge->ldgvp;
2586 if (control == NULL) {
2587 return (NXGE_ERROR);
2588 }
2589
2590 group = &control->ldgp[dc->ldg.vector];
2591 device = &control->ldvp[dc->ldg.ldsv];
2592
2593 MUTEX_ENTER(&ring->lock);
2594 ring->ldgp = group;
2595 ring->ldvp = device;
2596 MUTEX_EXIT(&ring->lock);
2597
2598 return (NXGE_OK);
2599 }
2600 #endif /* if defined(sun4v) */
2601