1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * nxge_hio.c
29 *
30 * This file manages the virtualization resources for Neptune
31 * devices. That is, it implements a hybrid I/O (HIO) approach in the
32 * Solaris kernel, whereby a guest domain on an LDOMs server may
33 * request & use hardware resources from the service domain.
34 *
35 */
36
37 #include <sys/mac_provider.h>
38 #include <sys/nxge/nxge_impl.h>
39 #include <sys/nxge/nxge_fzc.h>
40 #include <sys/nxge/nxge_rxdma.h>
41 #include <sys/nxge/nxge_txdma.h>
42 #include <sys/nxge/nxge_hio.h>
43
44 /*
45 * External prototypes
46 */
47 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t);
48
49 /* The following function may be found in nxge_main.c */
50 extern int nxge_m_mmac_remove(void *arg, int slot);
51 extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl,
52 boolean_t usetbl);
53 extern int nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num);
54
55 /* The following function may be found in nxge_[t|r]xdma.c */
56 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int);
57 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t);
58
59 /*
60 * Local prototypes
61 */
62 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *);
63 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int);
64 static void nxge_grp_dc_map(nxge_grp_t *group);
65
66 /*
67 * These functions are used by both service & guest domains to
68 * decide whether they're running in an LDOMs/XEN environment
69 * or not. If so, then the Hybrid I/O (HIO) module is initialized.
70 */
71
72 /*
73 * nxge_get_environs
74 *
75 * Figure out if we are in a guest domain or not.
76 *
77 * Arguments:
78 * nxge
79 *
80 * Notes:
81 *
82 * Context:
83 * Any domain
84 */
85 void
nxge_get_environs(nxge_t * nxge)86 nxge_get_environs(
87 nxge_t *nxge)
88 {
89 char *string;
90
91 /*
92 * In the beginning, assume that we are running sans LDOMs/XEN.
93 */
94 nxge->environs = SOLARIS_DOMAIN;
95
96 /*
97 * Are we a hybrid I/O (HIO) guest domain driver?
98 */
99 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip,
100 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
101 "niutype", &string)) == DDI_PROP_SUCCESS) {
102 if (strcmp(string, "n2niu") == 0) {
103 nxge->environs = SOLARIS_GUEST_DOMAIN;
104 /* So we can allocate properly-aligned memory. */
105 nxge->niu_type = N2_NIU;
106 NXGE_DEBUG_MSG((nxge, HIO_CTL,
107 "Hybrid IO-capable guest domain"));
108 }
109 ddi_prop_free(string);
110 }
111 }
112
113 #if !defined(sun4v)
114
115 /*
116 * nxge_hio_init
117 *
118 * Initialize the HIO module of the NXGE driver.
119 *
120 * Arguments:
121 * nxge
122 *
123 * Notes:
124 * This is the non-hybrid I/O version of this function.
125 *
126 * Context:
127 * Any domain
128 */
129 int
nxge_hio_init(nxge_t * nxge)130 nxge_hio_init(nxge_t *nxge)
131 {
132 nxge_hio_data_t *nhd;
133 int i;
134
135 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
136 if (nhd == NULL) {
137 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
138 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
139 nhd->type = NXGE_HIO_TYPE_SERVICE;
140 nxge->nxge_hw_p->hio = (uintptr_t)nhd;
141 }
142
143 /*
144 * Initialize share and ring group structures.
145 */
146 for (i = 0; i < NXGE_MAX_TDCS; i++)
147 nxge->tdc_is_shared[i] = B_FALSE;
148
149 for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
150 nxge->tx_hio_groups[i].ghandle = NULL;
151 nxge->tx_hio_groups[i].nxgep = nxge;
152 nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
153 nxge->tx_hio_groups[i].gindex = 0;
154 nxge->tx_hio_groups[i].sindex = 0;
155 }
156
157 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
158 nxge->rx_hio_groups[i].ghandle = NULL;
159 nxge->rx_hio_groups[i].nxgep = nxge;
160 nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
161 nxge->rx_hio_groups[i].gindex = 0;
162 nxge->rx_hio_groups[i].sindex = 0;
163 nxge->rx_hio_groups[i].started = B_FALSE;
164 nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
165 nxge->rx_hio_groups[i].rdctbl = -1;
166 nxge->rx_hio_groups[i].n_mac_addrs = 0;
167 }
168
169 nhd->hio.ldoms = B_FALSE;
170
171 return (NXGE_OK);
172 }
173
174 #endif
175
176 void
nxge_hio_uninit(nxge_t * nxge)177 nxge_hio_uninit(nxge_t *nxge)
178 {
179 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
180
181 ASSERT(nxge->nxge_hw_p->ndevs == 0);
182
183 if (nhd != NULL) {
184 MUTEX_DESTROY(&nhd->lock);
185 KMEM_FREE(nhd, sizeof (*nhd));
186 nxge->nxge_hw_p->hio = 0;
187 }
188 }
189
190 /*
191 * nxge_dci_map
192 *
193 * Map a DMA channel index to a channel number.
194 *
195 * Arguments:
196 * instance The instance number of the driver.
197 * type The type of channel this is: Tx or Rx.
198 * index The index to convert to a channel number
199 *
200 * Notes:
201 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc()
202 *
203 * Context:
204 * Any domain
205 */
206 int
nxge_dci_map(nxge_t * nxge,vpc_type_t type,int index)207 nxge_dci_map(
208 nxge_t *nxge,
209 vpc_type_t type,
210 int index)
211 {
212 nxge_grp_set_t *set;
213 int dc;
214
215 switch (type) {
216 case VP_BOUND_TX:
217 set = &nxge->tx_set;
218 break;
219 case VP_BOUND_RX:
220 set = &nxge->rx_set;
221 break;
222 }
223
224 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) {
225 if ((1 << dc) & set->owned.map) {
226 if (index == 0)
227 return (dc);
228 else
229 index--;
230 }
231 }
232
233 return (-1);
234 }
235
236 /*
237 * ---------------------------------------------------------------------
238 * These are the general-purpose DMA channel group functions. That is,
239 * these functions are used to manage groups of TDCs or RDCs in an HIO
240 * environment.
241 *
242 * But is also expected that in the future they will be able to manage
243 * Crossbow groups.
244 * ---------------------------------------------------------------------
245 */
246
247 /*
248 * nxge_grp_cleanup(p_nxge_t nxge)
249 *
250 * Remove all outstanding groups.
251 *
252 * Arguments:
253 * nxge
254 */
255 void
nxge_grp_cleanup(p_nxge_t nxge)256 nxge_grp_cleanup(p_nxge_t nxge)
257 {
258 nxge_grp_set_t *set;
259 int i;
260
261 MUTEX_ENTER(&nxge->group_lock);
262
263 /*
264 * Find RX groups that need to be cleaned up.
265 */
266 set = &nxge->rx_set;
267 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
268 if (set->group[i] != NULL) {
269 KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
270 set->group[i] = NULL;
271 }
272 }
273
274 /*
275 * Find TX groups that need to be cleaned up.
276 */
277 set = &nxge->tx_set;
278 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
279 if (set->group[i] != NULL) {
280 KMEM_FREE(set->group[i], sizeof (nxge_grp_t));
281 set->group[i] = NULL;
282 }
283 }
284 MUTEX_EXIT(&nxge->group_lock);
285 }
286
287
288 /*
289 * nxge_grp_add
290 *
291 * Add a group to an instance of NXGE.
292 *
293 * Arguments:
294 * nxge
295 * type Tx or Rx
296 *
297 * Notes:
298 *
299 * Context:
300 * Any domain
301 */
302 nxge_grp_t *
nxge_grp_add(nxge_t * nxge,nxge_grp_type_t type)303 nxge_grp_add(
304 nxge_t *nxge,
305 nxge_grp_type_t type)
306 {
307 nxge_grp_set_t *set;
308 nxge_grp_t *group;
309 int i;
310
311 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP);
312 group->nxge = nxge;
313
314 MUTEX_ENTER(&nxge->group_lock);
315 switch (type) {
316 case NXGE_TRANSMIT_GROUP:
317 case EXT_TRANSMIT_GROUP:
318 set = &nxge->tx_set;
319 break;
320 default:
321 set = &nxge->rx_set;
322 break;
323 }
324
325 group->type = type;
326 group->active = B_TRUE;
327 group->sequence = set->sequence++;
328
329 /* Find an empty slot for this logical group. */
330 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
331 if (set->group[i] == 0) {
332 group->index = i;
333 set->group[i] = group;
334 NXGE_DC_SET(set->lg.map, i);
335 set->lg.count++;
336 break;
337 }
338 }
339 MUTEX_EXIT(&nxge->group_lock);
340
341 NXGE_DEBUG_MSG((nxge, HIO_CTL,
342 "nxge_grp_add: %cgroup = %d.%d",
343 type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
344 nxge->mac.portnum, group->sequence));
345
346 return (group);
347 }
348
349 void
nxge_grp_remove(nxge_t * nxge,nxge_grp_t * group)350 nxge_grp_remove(
351 nxge_t *nxge,
352 nxge_grp_t *group) /* The group to remove. */
353 {
354 nxge_grp_set_t *set;
355 vpc_type_t type;
356
357 if (group == NULL)
358 return;
359
360 MUTEX_ENTER(&nxge->group_lock);
361 switch (group->type) {
362 case NXGE_TRANSMIT_GROUP:
363 case EXT_TRANSMIT_GROUP:
364 set = &nxge->tx_set;
365 break;
366 default:
367 set = &nxge->rx_set;
368 break;
369 }
370
371 if (set->group[group->index] != group) {
372 MUTEX_EXIT(&nxge->group_lock);
373 return;
374 }
375
376 set->group[group->index] = 0;
377 NXGE_DC_RESET(set->lg.map, group->index);
378 set->lg.count--;
379
380 /* While inside the mutex, deactivate <group>. */
381 group->active = B_FALSE;
382
383 MUTEX_EXIT(&nxge->group_lock);
384
385 NXGE_DEBUG_MSG((nxge, HIO_CTL,
386 "nxge_grp_remove(%c.%d.%d) called",
387 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r',
388 nxge->mac.portnum, group->sequence));
389
390 /* Now, remove any DCs which are still active. */
391 switch (group->type) {
392 default:
393 type = VP_BOUND_TX;
394 break;
395 case NXGE_RECEIVE_GROUP:
396 case EXT_RECEIVE_GROUP:
397 type = VP_BOUND_RX;
398 }
399
400 while (group->dc) {
401 nxge_grp_dc_remove(nxge, type, group->dc->channel);
402 }
403
404 KMEM_FREE(group, sizeof (*group));
405 }
406
407 /*
408 * nxge_grp_dc_add
409 *
410 * Add a DMA channel to a VR/Group.
411 *
412 * Arguments:
413 * nxge
414 * channel The channel to add.
415 * Notes:
416 *
417 * Context:
418 * Any domain
419 */
420 /* ARGSUSED */
421 int
nxge_grp_dc_add(nxge_t * nxge,nxge_grp_t * group,vpc_type_t type,int channel)422 nxge_grp_dc_add(
423 nxge_t *nxge,
424 nxge_grp_t *group, /* The group to add <channel> to. */
425 vpc_type_t type, /* Rx or Tx */
426 int channel) /* A physical/logical channel number */
427 {
428 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
429 nxge_hio_dc_t *dc;
430 nxge_grp_set_t *set;
431 nxge_status_t status = NXGE_OK;
432 int error = 0;
433
434 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add"));
435
436 if (group == 0)
437 return (0);
438
439 switch (type) {
440 case VP_BOUND_TX:
441 set = &nxge->tx_set;
442 if (channel > NXGE_MAX_TDCS) {
443 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
444 "nxge_grp_dc_add: TDC = %d", channel));
445 return (NXGE_ERROR);
446 }
447 break;
448 case VP_BOUND_RX:
449 set = &nxge->rx_set;
450 if (channel > NXGE_MAX_RDCS) {
451 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
452 "nxge_grp_dc_add: RDC = %d", channel));
453 return (NXGE_ERROR);
454 }
455 break;
456
457 default:
458 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
459 "nxge_grp_dc_add: unknown type channel(%d)", channel));
460 }
461
462 NXGE_DEBUG_MSG((nxge, HIO_CTL,
463 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d",
464 type == VP_BOUND_TX ? 't' : 'r',
465 nxge->mac.portnum, group->sequence, group->count, channel));
466
467 MUTEX_ENTER(&nxge->group_lock);
468 if (group->active != B_TRUE) {
469 /* We may be in the process of removing this group. */
470 MUTEX_EXIT(&nxge->group_lock);
471 return (NXGE_ERROR);
472 }
473 MUTEX_EXIT(&nxge->group_lock);
474
475 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) {
476 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
477 "nxge_grp_dc_add(%d): DC FIND failed", channel));
478 return (NXGE_ERROR);
479 }
480
481 MUTEX_ENTER(&nhd->lock);
482
483 if (dc->group) {
484 MUTEX_EXIT(&nhd->lock);
485 /* This channel is already in use! */
486 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
487 "nxge_grp_dc_add(%d): channel already in group", channel));
488 return (NXGE_ERROR);
489 }
490
491 dc->next = 0;
492 dc->page = channel;
493 dc->channel = (nxge_channel_t)channel;
494
495 dc->type = type;
496 if (type == VP_BOUND_RX) {
497 dc->init = nxge_init_rxdma_channel;
498 dc->uninit = nxge_uninit_rxdma_channel;
499 } else {
500 dc->init = nxge_init_txdma_channel;
501 dc->uninit = nxge_uninit_txdma_channel;
502 }
503
504 dc->group = group;
505
506 if (isLDOMguest(nxge)) {
507 error = nxge_hio_ldsv_add(nxge, dc);
508 if (error != 0) {
509 MUTEX_EXIT(&nhd->lock);
510 return (NXGE_ERROR);
511 }
512 }
513
514 NXGE_DC_SET(set->owned.map, channel);
515 set->owned.count++;
516
517 MUTEX_EXIT(&nhd->lock);
518
519 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) {
520 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
521 "nxge_grp_dc_add(%d): channel init failed", channel));
522 MUTEX_ENTER(&nhd->lock);
523 (void) memset(dc, 0, sizeof (*dc));
524 NXGE_DC_RESET(set->owned.map, channel);
525 set->owned.count--;
526 MUTEX_EXIT(&nhd->lock);
527 return (NXGE_ERROR);
528 }
529
530 nxge_grp_dc_append(nxge, group, dc);
531
532 if (type == VP_BOUND_TX) {
533 MUTEX_ENTER(&nhd->lock);
534 nxge->tdc_is_shared[channel] = B_FALSE;
535 MUTEX_EXIT(&nhd->lock);
536 }
537
538 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add"));
539
540 return ((int)status);
541 }
542
543 void
nxge_grp_dc_remove(nxge_t * nxge,vpc_type_t type,int channel)544 nxge_grp_dc_remove(
545 nxge_t *nxge,
546 vpc_type_t type,
547 int channel)
548 {
549 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
550 nxge_hio_dc_t *dc;
551 nxge_grp_set_t *set;
552 nxge_grp_t *group;
553
554 dc_uninit_t uninit;
555
556 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove"));
557
558 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0)
559 goto nxge_grp_dc_remove_exit;
560
561 if ((dc->group == NULL) && (dc->next == 0) &&
562 (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) {
563 goto nxge_grp_dc_remove_exit;
564 }
565
566 group = (nxge_grp_t *)dc->group;
567
568 if (isLDOMguest(nxge)) {
569 (void) nxge_hio_intr_remove(nxge, type, channel);
570 }
571
572 NXGE_DEBUG_MSG((nxge, HIO_CTL,
573 "DC remove: group = %d.%d.%d, %cdc %d",
574 nxge->mac.portnum, group->sequence, group->count,
575 type == VP_BOUND_TX ? 't' : 'r', dc->channel));
576
577 MUTEX_ENTER(&nhd->lock);
578
579 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set;
580
581 /* Remove the DC from its group. */
582 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) {
583 MUTEX_EXIT(&nhd->lock);
584 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
585 "nxge_grp_dc_remove(%d) failed", channel));
586 goto nxge_grp_dc_remove_exit;
587 }
588
589 uninit = dc->uninit;
590 channel = dc->channel;
591
592 NXGE_DC_RESET(set->owned.map, channel);
593 set->owned.count--;
594
595 (void) memset(dc, 0, sizeof (*dc));
596
597 MUTEX_EXIT(&nhd->lock);
598
599 (*uninit)(nxge, channel);
600
601 nxge_grp_dc_remove_exit:
602 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove"));
603 }
604
605 nxge_hio_dc_t *
nxge_grp_dc_find(nxge_t * nxge,vpc_type_t type,int channel)606 nxge_grp_dc_find(
607 nxge_t *nxge,
608 vpc_type_t type, /* Rx or Tx */
609 int channel)
610 {
611 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
612 nxge_hio_dc_t *current;
613
614 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0];
615
616 if (!isLDOMguest(nxge)) {
617 return (¤t[channel]);
618 } else {
619 /* We're in a guest domain. */
620 int i, limit = (type == VP_BOUND_TX) ?
621 NXGE_MAX_TDCS : NXGE_MAX_RDCS;
622
623 MUTEX_ENTER(&nhd->lock);
624 for (i = 0; i < limit; i++, current++) {
625 if (current->channel == channel) {
626 if (current->vr && current->vr->nxge ==
627 (uintptr_t)nxge) {
628 MUTEX_EXIT(&nhd->lock);
629 return (current);
630 }
631 }
632 }
633 MUTEX_EXIT(&nhd->lock);
634 }
635
636 return (0);
637 }
638
639 /*
640 * nxge_grp_dc_append
641 *
642 * Append a DMA channel to a group.
643 *
644 * Arguments:
645 * nxge
646 * group The group to append to
647 * dc The DMA channel to append
648 *
649 * Notes:
650 *
651 * Context:
652 * Any domain
653 */
654 static
655 void
nxge_grp_dc_append(nxge_t * nxge,nxge_grp_t * group,nxge_hio_dc_t * dc)656 nxge_grp_dc_append(
657 nxge_t *nxge,
658 nxge_grp_t *group,
659 nxge_hio_dc_t *dc)
660 {
661 MUTEX_ENTER(&nxge->group_lock);
662
663 if (group->dc == 0) {
664 group->dc = dc;
665 } else {
666 nxge_hio_dc_t *current = group->dc;
667 do {
668 if (current->next == 0) {
669 current->next = dc;
670 break;
671 }
672 current = current->next;
673 } while (current);
674 }
675
676 NXGE_DC_SET(group->map, dc->channel);
677
678 nxge_grp_dc_map(group);
679 group->count++;
680
681 MUTEX_EXIT(&nxge->group_lock);
682 }
683
684 /*
685 * nxge_grp_dc_unlink
686 *
687 * Unlink a DMA channel fromits linked list (group).
688 *
689 * Arguments:
690 * nxge
691 * group The group (linked list) to unlink from
692 * dc The DMA channel to append
693 *
694 * Notes:
695 *
696 * Context:
697 * Any domain
698 */
699 nxge_hio_dc_t *
nxge_grp_dc_unlink(nxge_t * nxge,nxge_grp_t * group,int channel)700 nxge_grp_dc_unlink(
701 nxge_t *nxge,
702 nxge_grp_t *group,
703 int channel)
704 {
705 nxge_hio_dc_t *current, *previous;
706
707 MUTEX_ENTER(&nxge->group_lock);
708
709 if (group == NULL) {
710 MUTEX_EXIT(&nxge->group_lock);
711 return (0);
712 }
713
714 if ((current = group->dc) == 0) {
715 MUTEX_EXIT(&nxge->group_lock);
716 return (0);
717 }
718
719 previous = 0;
720 do {
721 if (current->channel == channel) {
722 if (previous)
723 previous->next = current->next;
724 else
725 group->dc = current->next;
726 break;
727 }
728 previous = current;
729 current = current->next;
730 } while (current);
731
732 if (current == 0) {
733 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
734 "DC unlink: DC %d not found", channel));
735 } else {
736 current->next = 0;
737 current->group = 0;
738
739 NXGE_DC_RESET(group->map, channel);
740 group->count--;
741 }
742
743 nxge_grp_dc_map(group);
744
745 MUTEX_EXIT(&nxge->group_lock);
746
747 return (current);
748 }
749
750 /*
751 * nxge_grp_dc_map
752 *
753 * Map a linked list to an array of channel numbers.
754 *
755 * Arguments:
756 * nxge
757 * group The group to remap.
758 *
759 * Notes:
760 * It is expected that the caller will hold the correct mutex.
761 *
762 * Context:
763 * Service domain
764 */
765 void
nxge_grp_dc_map(nxge_grp_t * group)766 nxge_grp_dc_map(
767 nxge_grp_t *group)
768 {
769 nxge_channel_t *legend;
770 nxge_hio_dc_t *dc;
771
772 (void) memset(group->legend, 0, sizeof (group->legend));
773
774 legend = group->legend;
775 dc = group->dc;
776 while (dc) {
777 *legend = dc->channel;
778 legend++;
779 dc = dc->next;
780 }
781 }
782
783 /*
784 * ---------------------------------------------------------------------
785 * These are HIO debugging functions.
786 * ---------------------------------------------------------------------
787 */
788
789 /*
790 * nxge_delay
791 *
792 * Delay <seconds> number of seconds.
793 *
794 * Arguments:
795 * nxge
796 * group The group to append to
797 * dc The DMA channel to append
798 *
799 * Notes:
800 * This is a developer-only function.
801 *
802 * Context:
803 * Any domain
804 */
805 void
nxge_delay(int seconds)806 nxge_delay(
807 int seconds)
808 {
809 delay(drv_usectohz(seconds * 1000000));
810 }
811
812 static dmc_reg_name_t rx_names[] = {
813 { "RXDMA_CFIG1", 0 },
814 { "RXDMA_CFIG2", 8 },
815 { "RBR_CFIG_A", 0x10 },
816 { "RBR_CFIG_B", 0x18 },
817 { "RBR_KICK", 0x20 },
818 { "RBR_STAT", 0x28 },
819 { "RBR_HDH", 0x30 },
820 { "RBR_HDL", 0x38 },
821 { "RCRCFIG_A", 0x40 },
822 { "RCRCFIG_B", 0x48 },
823 { "RCRSTAT_A", 0x50 },
824 { "RCRSTAT_B", 0x58 },
825 { "RCRSTAT_C", 0x60 },
826 { "RX_DMA_ENT_MSK", 0x68 },
827 { "RX_DMA_CTL_STAT", 0x70 },
828 { "RCR_FLSH", 0x78 },
829 { "RXMISC", 0x90 },
830 { "RX_DMA_CTL_STAT_DBG", 0x98 },
831 { 0, -1 }
832 };
833
834 static dmc_reg_name_t tx_names[] = {
835 { "Tx_RNG_CFIG", 0 },
836 { "Tx_RNG_HDL", 0x10 },
837 { "Tx_RNG_KICK", 0x18 },
838 { "Tx_ENT_MASK", 0x20 },
839 { "Tx_CS", 0x28 },
840 { "TxDMA_MBH", 0x30 },
841 { "TxDMA_MBL", 0x38 },
842 { "TxDMA_PRE_ST", 0x40 },
843 { "Tx_RNG_ERR_LOGH", 0x48 },
844 { "Tx_RNG_ERR_LOGL", 0x50 },
845 { "TDMC_INTR_DBG", 0x60 },
846 { "Tx_CS_DBG", 0x68 },
847 { 0, -1 }
848 };
849
850 /*
851 * nxge_xx2str
852 *
853 * Translate a register address into a string.
854 *
855 * Arguments:
856 * offset The address of the register to translate.
857 *
858 * Notes:
859 * These are developer-only function.
860 *
861 * Context:
862 * Any domain
863 */
864 const char *
nxge_rx2str(int offset)865 nxge_rx2str(
866 int offset)
867 {
868 dmc_reg_name_t *reg = &rx_names[0];
869
870 offset &= DMA_CSR_MASK;
871
872 while (reg->name) {
873 if (offset == reg->offset)
874 return (reg->name);
875 reg++;
876 }
877
878 return (0);
879 }
880
881 const char *
nxge_tx2str(int offset)882 nxge_tx2str(
883 int offset)
884 {
885 dmc_reg_name_t *reg = &tx_names[0];
886
887 offset &= DMA_CSR_MASK;
888
889 while (reg->name) {
890 if (offset == reg->offset)
891 return (reg->name);
892 reg++;
893 }
894
895 return (0);
896 }
897
898 /*
899 * nxge_ddi_perror
900 *
901 * Map a DDI error number to a string.
902 *
903 * Arguments:
904 * ddi_error The DDI error number to map.
905 *
906 * Notes:
907 *
908 * Context:
909 * Any domain
910 */
911 const char *
nxge_ddi_perror(int ddi_error)912 nxge_ddi_perror(
913 int ddi_error)
914 {
915 switch (ddi_error) {
916 case DDI_SUCCESS:
917 return ("DDI_SUCCESS");
918 case DDI_FAILURE:
919 return ("DDI_FAILURE");
920 case DDI_NOT_WELL_FORMED:
921 return ("DDI_NOT_WELL_FORMED");
922 case DDI_EAGAIN:
923 return ("DDI_EAGAIN");
924 case DDI_EINVAL:
925 return ("DDI_EINVAL");
926 case DDI_ENOTSUP:
927 return ("DDI_ENOTSUP");
928 case DDI_EPENDING:
929 return ("DDI_EPENDING");
930 case DDI_ENOMEM:
931 return ("DDI_ENOMEM");
932 case DDI_EBUSY:
933 return ("DDI_EBUSY");
934 case DDI_ETRANSPORT:
935 return ("DDI_ETRANSPORT");
936 case DDI_ECONTEXT:
937 return ("DDI_ECONTEXT");
938 default:
939 return ("Unknown error");
940 }
941 }
942
943 /*
944 * ---------------------------------------------------------------------
945 * These are Sun4v HIO function definitions
946 * ---------------------------------------------------------------------
947 */
948
949 #if defined(sun4v)
950
951 /*
952 * Local prototypes
953 */
954 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *);
955 static void nxge_hio_unshare(nxge_hio_vr_t *);
956
957 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *);
958 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t);
959
960 static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
961 static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel);
962 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int);
963 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *,
964 mac_ring_type_t, int);
965
966 /*
967 * nxge_hio_init
968 *
969 * Initialize the HIO module of the NXGE driver.
970 *
971 * Arguments:
972 * nxge
973 *
974 * Notes:
975 *
976 * Context:
977 * Any domain
978 */
979 int
nxge_hio_init(nxge_t * nxge)980 nxge_hio_init(nxge_t *nxge)
981 {
982 nxge_hio_data_t *nhd;
983 int i, region;
984
985 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
986 if (nhd == 0) {
987 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP);
988 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL);
989 if (isLDOMguest(nxge))
990 nhd->type = NXGE_HIO_TYPE_GUEST;
991 else
992 nhd->type = NXGE_HIO_TYPE_SERVICE;
993 nxge->nxge_hw_p->hio = (uintptr_t)nhd;
994 }
995
996 if ((nxge->environs == SOLARIS_DOMAIN) &&
997 (nxge->niu_type == N2_NIU)) {
998 if (nxge->niu_hsvc_available == B_TRUE) {
999 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc;
1000 /*
1001 * Versions supported now are:
1002 * - major number >= 1 (NIU_MAJOR_VER).
1003 */
1004 if ((niu_hsvc->hsvc_major >= NIU_MAJOR_VER) ||
1005 (niu_hsvc->hsvc_major == 1 &&
1006 niu_hsvc->hsvc_minor == 1)) {
1007 nxge->environs = SOLARIS_SERVICE_DOMAIN;
1008 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1009 "nxge_hio_init: hypervisor services "
1010 "version %d.%d",
1011 niu_hsvc->hsvc_major,
1012 niu_hsvc->hsvc_minor));
1013 }
1014 }
1015 }
1016
1017 /*
1018 * Initialize share and ring group structures.
1019 */
1020 for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) {
1021 nxge->tx_hio_groups[i].ghandle = NULL;
1022 nxge->tx_hio_groups[i].nxgep = nxge;
1023 nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX;
1024 nxge->tx_hio_groups[i].gindex = 0;
1025 nxge->tx_hio_groups[i].sindex = 0;
1026 }
1027
1028 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) {
1029 nxge->rx_hio_groups[i].ghandle = NULL;
1030 nxge->rx_hio_groups[i].nxgep = nxge;
1031 nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX;
1032 nxge->rx_hio_groups[i].gindex = 0;
1033 nxge->rx_hio_groups[i].sindex = 0;
1034 nxge->rx_hio_groups[i].started = B_FALSE;
1035 nxge->rx_hio_groups[i].port_default_grp = B_FALSE;
1036 nxge->rx_hio_groups[i].rdctbl = -1;
1037 nxge->rx_hio_groups[i].n_mac_addrs = 0;
1038 }
1039
1040 if (!isLDOMs(nxge)) {
1041 nhd->hio.ldoms = B_FALSE;
1042 return (NXGE_OK);
1043 }
1044
1045 nhd->hio.ldoms = B_TRUE;
1046
1047 /*
1048 * Fill in what we can.
1049 */
1050 for (region = 0; region < NXGE_VR_SR_MAX; region++) {
1051 nhd->vr[region].region = region;
1052 }
1053 nhd->vrs = NXGE_VR_SR_MAX - 2;
1054
1055 /*
1056 * Initialize the share stuctures.
1057 */
1058 for (i = 0; i < NXGE_MAX_TDCS; i++)
1059 nxge->tdc_is_shared[i] = B_FALSE;
1060
1061 for (i = 0; i < NXGE_VR_SR_MAX; i++) {
1062 nxge->shares[i].nxgep = nxge;
1063 nxge->shares[i].index = 0;
1064 nxge->shares[i].vrp = NULL;
1065 nxge->shares[i].tmap = 0;
1066 nxge->shares[i].rmap = 0;
1067 nxge->shares[i].rxgroup = 0;
1068 nxge->shares[i].active = B_FALSE;
1069 }
1070
1071 /* Fill in the HV HIO function pointers. */
1072 nxge_hio_hv_init(nxge);
1073
1074 if (isLDOMservice(nxge)) {
1075 NXGE_DEBUG_MSG((nxge, HIO_CTL,
1076 "Hybrid IO-capable service domain"));
1077 return (NXGE_OK);
1078 }
1079
1080 return (0);
1081 }
1082 #endif /* defined(sun4v) */
1083
1084 static int
nxge_hio_group_mac_add(nxge_t * nxge,nxge_ring_group_t * g,const uint8_t * macaddr)1085 nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g,
1086 const uint8_t *macaddr)
1087 {
1088 int rv;
1089 nxge_rdc_grp_t *group;
1090
1091 mutex_enter(nxge->genlock);
1092
1093 /*
1094 * Initialize the NXGE RDC table data structure.
1095 */
1096 group = &nxge->pt_config.rdc_grps[g->rdctbl];
1097 if (!group->flag) {
1098 group->port = NXGE_GET_PORT_NUM(nxge->function_num);
1099 group->config_method = RDC_TABLE_ENTRY_METHOD_REP;
1100 group->flag = B_TRUE; /* This group has been configured. */
1101 }
1102
1103 mutex_exit(nxge->genlock);
1104
1105 /*
1106 * Add the MAC address.
1107 */
1108 if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr,
1109 g->rdctbl, B_TRUE)) != 0) {
1110 return (rv);
1111 }
1112
1113 mutex_enter(nxge->genlock);
1114 g->n_mac_addrs++;
1115 mutex_exit(nxge->genlock);
1116 return (0);
1117 }
1118
1119 static int
nxge_hio_set_unicst(void * arg,const uint8_t * macaddr)1120 nxge_hio_set_unicst(void *arg, const uint8_t *macaddr)
1121 {
1122 p_nxge_t nxgep = (p_nxge_t)arg;
1123 struct ether_addr addrp;
1124
1125 bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
1126 if (nxge_set_mac_addr(nxgep, &addrp)) {
1127 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1128 "<== nxge_m_unicst: set unitcast failed"));
1129 return (EINVAL);
1130 }
1131
1132 nxgep->primary = B_TRUE;
1133
1134 return (0);
1135 }
1136
1137 /*ARGSUSED*/
1138 static int
nxge_hio_clear_unicst(p_nxge_t nxgep,const uint8_t * mac_addr)1139 nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr)
1140 {
1141 nxgep->primary = B_FALSE;
1142 return (0);
1143 }
1144
1145 static int
nxge_hio_add_mac(void * arg,const uint8_t * mac_addr)1146 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr)
1147 {
1148 nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1149 p_nxge_t nxge = group->nxgep;
1150 int rv;
1151 nxge_hio_vr_t *vr; /* The Virtualization Region */
1152
1153 ASSERT(group->type == MAC_RING_TYPE_RX);
1154 ASSERT(group->nxgep != NULL);
1155
1156 if (isLDOMguest(group->nxgep))
1157 return (0);
1158
1159 mutex_enter(nxge->genlock);
1160
1161 if (!nxge->primary && group->port_default_grp) {
1162 rv = nxge_hio_set_unicst((void *)nxge, mac_addr);
1163 mutex_exit(nxge->genlock);
1164 return (rv);
1165 }
1166
1167 /*
1168 * If the group is associated with a VR, then only one
1169 * address may be assigned to the group.
1170 */
1171 vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp;
1172 if ((vr != NULL) && (group->n_mac_addrs)) {
1173 mutex_exit(nxge->genlock);
1174 return (ENOSPC);
1175 }
1176
1177 mutex_exit(nxge->genlock);
1178
1179 /*
1180 * Program the mac address for the group.
1181 */
1182 if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) {
1183 return (rv);
1184 }
1185
1186 return (0);
1187 }
1188
1189 static int
find_mac_slot(nxge_mmac_t * mmac_info,const uint8_t * mac_addr)1190 find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr)
1191 {
1192 int i;
1193 for (i = 0; i <= mmac_info->num_mmac; i++) {
1194 if (memcmp(mmac_info->mac_pool[i].addr, mac_addr,
1195 ETHERADDRL) == 0) {
1196 return (i);
1197 }
1198 }
1199 return (-1);
1200 }
1201
1202 /* ARGSUSED */
1203 static int
nxge_hio_rem_mac(void * arg,const uint8_t * mac_addr)1204 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr)
1205 {
1206 nxge_ring_group_t *group = (nxge_ring_group_t *)arg;
1207 struct ether_addr addrp;
1208 p_nxge_t nxge = group->nxgep;
1209 nxge_mmac_t *mmac_info;
1210 int rv, slot;
1211
1212 ASSERT(group->type == MAC_RING_TYPE_RX);
1213 ASSERT(group->nxgep != NULL);
1214
1215 if (isLDOMguest(group->nxgep))
1216 return (0);
1217
1218 mutex_enter(nxge->genlock);
1219
1220 mmac_info = &nxge->nxge_mmac_info;
1221 slot = find_mac_slot(mmac_info, mac_addr);
1222 if (slot < 0) {
1223 if (group->port_default_grp && nxge->primary) {
1224 bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL);
1225 if (ether_cmp(&addrp, &nxge->ouraddr) == 0) {
1226 rv = nxge_hio_clear_unicst(nxge, mac_addr);
1227 mutex_exit(nxge->genlock);
1228 return (rv);
1229 } else {
1230 mutex_exit(nxge->genlock);
1231 return (EINVAL);
1232 }
1233 } else {
1234 mutex_exit(nxge->genlock);
1235 return (EINVAL);
1236 }
1237 }
1238
1239 mutex_exit(nxge->genlock);
1240
1241 /*
1242 * Remove the mac address for the group
1243 */
1244 if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) {
1245 return (rv);
1246 }
1247
1248 mutex_enter(nxge->genlock);
1249 group->n_mac_addrs--;
1250 mutex_exit(nxge->genlock);
1251
1252 return (0);
1253 }
1254
1255 static int
nxge_hio_group_start(mac_group_driver_t gdriver)1256 nxge_hio_group_start(mac_group_driver_t gdriver)
1257 {
1258 nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1259 nxge_rdc_grp_t *rdc_grp_p;
1260 int rdctbl;
1261 int dev_gindex;
1262
1263 ASSERT(group->type == MAC_RING_TYPE_RX);
1264 ASSERT(group->nxgep != NULL);
1265
1266 ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED);
1267 if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED)
1268 return (ENXIO);
1269
1270 mutex_enter(group->nxgep->genlock);
1271 if (isLDOMguest(group->nxgep))
1272 goto nxge_hio_group_start_exit;
1273
1274 dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1275 group->gindex;
1276 rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex];
1277
1278 /*
1279 * Get an rdc table for this group.
1280 * Group ID is given by the caller, and that's the group it needs
1281 * to bind to. The default group is already bound when the driver
1282 * was attached.
1283 *
1284 * For Group 0, it's RDC table was allocated at attach time
1285 * no need to allocate a new table.
1286 */
1287 if (group->gindex != 0) {
1288 rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep,
1289 dev_gindex, B_TRUE);
1290 if (rdctbl < 0) {
1291 mutex_exit(group->nxgep->genlock);
1292 return (rdctbl);
1293 }
1294 } else {
1295 rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid;
1296 }
1297
1298 group->rdctbl = rdctbl;
1299
1300 (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl);
1301
1302 nxge_hio_group_start_exit:
1303 group->started = B_TRUE;
1304 mutex_exit(group->nxgep->genlock);
1305 return (0);
1306 }
1307
1308 static void
nxge_hio_group_stop(mac_group_driver_t gdriver)1309 nxge_hio_group_stop(mac_group_driver_t gdriver)
1310 {
1311 nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver;
1312
1313 ASSERT(group->type == MAC_RING_TYPE_RX);
1314
1315 mutex_enter(group->nxgep->genlock);
1316 group->started = B_FALSE;
1317
1318 if (isLDOMguest(group->nxgep))
1319 goto nxge_hio_group_stop_exit;
1320
1321 /*
1322 * Unbind the RDC table previously bound for this group.
1323 *
1324 * Since RDC table for group 0 was allocated at attach
1325 * time, no need to unbind the table here.
1326 */
1327 if (group->gindex != 0)
1328 (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl);
1329
1330 nxge_hio_group_stop_exit:
1331 mutex_exit(group->nxgep->genlock);
1332 }
1333
1334 /* ARGSUSED */
1335 void
nxge_hio_group_get(void * arg,mac_ring_type_t type,int groupid,mac_group_info_t * infop,mac_group_handle_t ghdl)1336 nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid,
1337 mac_group_info_t *infop, mac_group_handle_t ghdl)
1338 {
1339 p_nxge_t nxgep = (p_nxge_t)arg;
1340 nxge_ring_group_t *group;
1341 int dev_gindex;
1342
1343 switch (type) {
1344 case MAC_RING_TYPE_RX:
1345 group = &nxgep->rx_hio_groups[groupid];
1346 group->nxgep = nxgep;
1347 group->ghandle = ghdl;
1348 group->gindex = groupid;
1349 group->sindex = 0; /* not yet bound to a share */
1350
1351 if (!isLDOMguest(nxgep)) {
1352 dev_gindex =
1353 nxgep->pt_config.hw_config.def_mac_rxdma_grpid +
1354 groupid;
1355
1356 if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid ==
1357 dev_gindex)
1358 group->port_default_grp = B_TRUE;
1359
1360 infop->mgi_count =
1361 nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs;
1362 } else {
1363 infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS;
1364 }
1365
1366 infop->mgi_driver = (mac_group_driver_t)group;
1367 infop->mgi_start = nxge_hio_group_start;
1368 infop->mgi_stop = nxge_hio_group_stop;
1369 infop->mgi_addmac = nxge_hio_add_mac;
1370 infop->mgi_remmac = nxge_hio_rem_mac;
1371 break;
1372
1373 case MAC_RING_TYPE_TX:
1374 /*
1375 * 'groupid' for TX should be incremented by one since
1376 * the default group (groupid 0) is not known by the MAC layer
1377 */
1378 group = &nxgep->tx_hio_groups[groupid + 1];
1379 group->nxgep = nxgep;
1380 group->ghandle = ghdl;
1381 group->gindex = groupid + 1;
1382 group->sindex = 0; /* not yet bound to a share */
1383
1384 infop->mgi_driver = (mac_group_driver_t)group;
1385 infop->mgi_start = NULL;
1386 infop->mgi_stop = NULL;
1387 infop->mgi_addmac = NULL; /* not needed */
1388 infop->mgi_remmac = NULL; /* not needed */
1389 /* no rings associated with group initially */
1390 infop->mgi_count = 0;
1391 break;
1392 }
1393 }
1394
1395 #if defined(sun4v)
1396
1397 int
nxge_hio_share_assign(nxge_t * nxge,uint64_t cookie,res_map_t * tmap,res_map_t * rmap,nxge_hio_vr_t * vr)1398 nxge_hio_share_assign(
1399 nxge_t *nxge,
1400 uint64_t cookie,
1401 res_map_t *tmap,
1402 res_map_t *rmap,
1403 nxge_hio_vr_t *vr)
1404 {
1405 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1406 uint64_t slot, hv_rv;
1407 nxge_hio_dc_t *dc;
1408 nxhv_vr_fp_t *fp;
1409 int i;
1410 uint64_t major;
1411
1412 /*
1413 * Ask the Hypervisor to set up the VR for us
1414 */
1415 fp = &nhd->hio.vr;
1416 major = nxge->niu_hsvc.hsvc_major;
1417 switch (major) {
1418 case NIU_MAJOR_VER: /* 1 */
1419 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) {
1420 NXGE_ERROR_MSG((nxge, HIO_CTL,
1421 "nxge_hio_share_assign: major %d "
1422 "vr->assign() returned %d", major, hv_rv));
1423 nxge_hio_unshare(vr);
1424 return (-EIO);
1425 }
1426
1427 break;
1428
1429 case NIU_MAJOR_VER_2: /* 2 */
1430 default:
1431 if ((hv_rv = (*fp->cfgh_assign)
1432 (nxge->niu_cfg_hdl, vr->region, cookie, &vr->cookie))) {
1433 NXGE_ERROR_MSG((nxge, HIO_CTL,
1434 "nxge_hio_share_assign: major %d "
1435 "vr->assign() returned %d", major, hv_rv));
1436 nxge_hio_unshare(vr);
1437 return (-EIO);
1438 }
1439
1440 break;
1441 }
1442
1443 NXGE_DEBUG_MSG((nxge, HIO_CTL,
1444 "nxge_hio_share_assign: major %d "
1445 "vr->assign() success", major));
1446
1447 /*
1448 * For each shared TDC, ask the HV to find us an empty slot.
1449 */
1450 dc = vr->tx_group.dc;
1451 for (i = 0; i < NXGE_MAX_TDCS; i++) {
1452 nxhv_dc_fp_t *tx = &nhd->hio.tx;
1453 while (dc) {
1454 hv_rv = (*tx->assign)
1455 (vr->cookie, dc->channel, &slot);
1456 if (hv_rv != 0) {
1457 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1458 "nxge_hio_share_assign: "
1459 "tx->assign(%x, %d) failed: %ld",
1460 vr->cookie, dc->channel, hv_rv));
1461 return (-EIO);
1462 }
1463
1464 dc->cookie = vr->cookie;
1465 dc->page = (vp_channel_t)slot;
1466
1467 /* Inform the caller about the slot chosen. */
1468 (*tmap) |= 1 << slot;
1469
1470 dc = dc->next;
1471 }
1472 }
1473
1474 /*
1475 * For each shared RDC, ask the HV to find us an empty slot.
1476 */
1477 dc = vr->rx_group.dc;
1478 for (i = 0; i < NXGE_MAX_RDCS; i++) {
1479 nxhv_dc_fp_t *rx = &nhd->hio.rx;
1480 while (dc) {
1481 hv_rv = (*rx->assign)
1482 (vr->cookie, dc->channel, &slot);
1483 if (hv_rv != 0) {
1484 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1485 "nxge_hio_share_assign: "
1486 "rx->assign(%x, %d) failed: %ld",
1487 vr->cookie, dc->channel, hv_rv));
1488 return (-EIO);
1489 }
1490
1491 dc->cookie = vr->cookie;
1492 dc->page = (vp_channel_t)slot;
1493
1494 /* Inform the caller about the slot chosen. */
1495 (*rmap) |= 1 << slot;
1496
1497 dc = dc->next;
1498 }
1499 }
1500
1501 return (0);
1502 }
1503
1504 void
nxge_hio_share_unassign(nxge_hio_vr_t * vr)1505 nxge_hio_share_unassign(
1506 nxge_hio_vr_t *vr)
1507 {
1508 nxge_t *nxge = (nxge_t *)vr->nxge;
1509 nxge_hio_data_t *nhd;
1510 nxge_hio_dc_t *dc;
1511 nxhv_vr_fp_t *fp;
1512 uint64_t hv_rv;
1513
1514 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1515
1516 dc = vr->tx_group.dc;
1517 while (dc) {
1518 nxhv_dc_fp_t *tx = &nhd->hio.tx;
1519 hv_rv = (*tx->unassign)(vr->cookie, dc->page);
1520 if (hv_rv != 0) {
1521 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1522 "nxge_hio_share_unassign: "
1523 "tx->unassign(%x, %d) failed: %ld",
1524 vr->cookie, dc->page, hv_rv));
1525 }
1526 dc = dc->next;
1527 }
1528
1529 dc = vr->rx_group.dc;
1530 while (dc) {
1531 nxhv_dc_fp_t *rx = &nhd->hio.rx;
1532 hv_rv = (*rx->unassign)(vr->cookie, dc->page);
1533 if (hv_rv != 0) {
1534 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1535 "nxge_hio_share_unassign: "
1536 "rx->unassign(%x, %d) failed: %ld",
1537 vr->cookie, dc->page, hv_rv));
1538 }
1539 dc = dc->next;
1540 }
1541
1542 fp = &nhd->hio.vr;
1543 if (fp->unassign) {
1544 hv_rv = (*fp->unassign)(vr->cookie);
1545 if (hv_rv != 0) {
1546 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1547 "nxge_hio_share_unassign: "
1548 "vr->assign(%x) failed: %ld",
1549 vr->cookie, hv_rv));
1550 }
1551 }
1552 }
1553
1554 int
nxge_hio_share_alloc(void * arg,mac_share_handle_t * shandle)1555 nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle)
1556 {
1557 p_nxge_t nxge = (p_nxge_t)arg;
1558 nxge_share_handle_t *shp;
1559 nxge_hio_vr_t *vr; /* The Virtualization Region */
1560 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1561
1562 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share"));
1563
1564 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 ||
1565 nhd->hio.rx.assign == 0) {
1566 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL"));
1567 return (EIO);
1568 }
1569
1570 /*
1571 * Get a VR.
1572 */
1573 if ((vr = nxge_hio_vr_share(nxge)) == 0)
1574 return (EAGAIN);
1575
1576 shp = &nxge->shares[vr->region];
1577 shp->nxgep = nxge;
1578 shp->index = vr->region;
1579 shp->vrp = (void *)vr;
1580 shp->tmap = shp->rmap = 0; /* to be assigned by ms_sbind */
1581 shp->rxgroup = 0; /* to be assigned by ms_sadd */
1582 shp->active = B_FALSE; /* not bound yet */
1583
1584 *shandle = (mac_share_handle_t)shp;
1585
1586 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share"));
1587 return (0);
1588 }
1589
1590
1591 void
nxge_hio_share_free(mac_share_handle_t shandle)1592 nxge_hio_share_free(mac_share_handle_t shandle)
1593 {
1594 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1595 nxge_hio_vr_t *vr;
1596
1597 /*
1598 * Clear internal handle state.
1599 */
1600 vr = shp->vrp;
1601 shp->vrp = (void *)NULL;
1602 shp->index = 0;
1603 shp->tmap = 0;
1604 shp->rmap = 0;
1605 shp->rxgroup = 0;
1606 shp->active = B_FALSE;
1607
1608 /*
1609 * Free VR resource.
1610 */
1611 nxge_hio_unshare(vr);
1612 }
1613
1614
1615 void
nxge_hio_share_query(mac_share_handle_t shandle,mac_ring_type_t type,mac_ring_handle_t * rings,uint_t * n_rings)1616 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type,
1617 mac_ring_handle_t *rings, uint_t *n_rings)
1618 {
1619 nxge_t *nxge;
1620 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1621 nxge_ring_handle_t *rh;
1622 uint32_t offset;
1623
1624 nxge = shp->nxgep;
1625
1626 switch (type) {
1627 case MAC_RING_TYPE_RX:
1628 rh = nxge->rx_ring_handles;
1629 offset = nxge->pt_config.hw_config.start_rdc;
1630 break;
1631
1632 case MAC_RING_TYPE_TX:
1633 rh = nxge->tx_ring_handles;
1634 offset = nxge->pt_config.hw_config.tdc.start;
1635 break;
1636 }
1637
1638 /*
1639 * In version 1.0, we may only give a VR 2 RDCs/TDCs. Not only that,
1640 * but the HV has statically assigned the channels like so:
1641 * VR0: RDC0 & RDC1
1642 * VR1: RDC2 & RDC3, etc.
1643 * The TDCs are assigned in exactly the same way.
1644 */
1645 if (rings != NULL) {
1646 rings[0] = rh[(shp->index * 2) - offset].ring_handle;
1647 rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle;
1648 }
1649 if (n_rings != NULL) {
1650 *n_rings = 2;
1651 }
1652 }
1653
1654 int
nxge_hio_share_add_group(mac_share_handle_t shandle,mac_group_driver_t ghandle)1655 nxge_hio_share_add_group(mac_share_handle_t shandle,
1656 mac_group_driver_t ghandle)
1657 {
1658 nxge_t *nxge;
1659 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1660 nxge_ring_group_t *rg = (nxge_ring_group_t *)ghandle;
1661 nxge_hio_vr_t *vr; /* The Virtualization Region */
1662 nxge_grp_t *group;
1663 int i;
1664
1665 if (rg->sindex != 0) {
1666 /* the group is already bound to a share */
1667 return (EALREADY);
1668 }
1669
1670 /*
1671 * If we are adding a group 0 to a share, this
1672 * is not correct.
1673 */
1674 ASSERT(rg->gindex != 0);
1675
1676 nxge = rg->nxgep;
1677 vr = shp->vrp;
1678
1679 switch (rg->type) {
1680 case MAC_RING_TYPE_RX:
1681 /*
1682 * Make sure that the group has the right rings associated
1683 * for the share. In version 1.0, we may only give a VR
1684 * 2 RDCs. Not only that, but the HV has statically
1685 * assigned the channels like so:
1686 * VR0: RDC0 & RDC1
1687 * VR1: RDC2 & RDC3, etc.
1688 */
1689 group = nxge->rx_set.group[rg->gindex];
1690
1691 if (group->count > 2) {
1692 /* a share can have at most 2 rings */
1693 return (EINVAL);
1694 }
1695
1696 for (i = 0; i < NXGE_MAX_RDCS; i++) {
1697 if (group->map & (1 << i)) {
1698 if ((i != shp->index * 2) &&
1699 (i != (shp->index * 2 + 1))) {
1700 /*
1701 * A group with invalid rings was
1702 * attempted to bind to this share
1703 */
1704 return (EINVAL);
1705 }
1706 }
1707 }
1708
1709 rg->sindex = vr->region;
1710 vr->rdc_tbl = rg->rdctbl;
1711 shp->rxgroup = vr->rdc_tbl;
1712 break;
1713
1714 case MAC_RING_TYPE_TX:
1715 /*
1716 * Make sure that the group has the right rings associated
1717 * for the share. In version 1.0, we may only give a VR
1718 * 2 TDCs. Not only that, but the HV has statically
1719 * assigned the channels like so:
1720 * VR0: TDC0 & TDC1
1721 * VR1: TDC2 & TDC3, etc.
1722 */
1723 group = nxge->tx_set.group[rg->gindex];
1724
1725 if (group->count > 2) {
1726 /* a share can have at most 2 rings */
1727 return (EINVAL);
1728 }
1729
1730 for (i = 0; i < NXGE_MAX_TDCS; i++) {
1731 if (group->map & (1 << i)) {
1732 if ((i != shp->index * 2) &&
1733 (i != (shp->index * 2 + 1))) {
1734 /*
1735 * A group with invalid rings was
1736 * attempted to bind to this share
1737 */
1738 return (EINVAL);
1739 }
1740 }
1741 }
1742
1743 vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid +
1744 rg->gindex;
1745 rg->sindex = vr->region;
1746 break;
1747 }
1748 return (0);
1749 }
1750
1751 int
nxge_hio_share_rem_group(mac_share_handle_t shandle,mac_group_driver_t ghandle)1752 nxge_hio_share_rem_group(mac_share_handle_t shandle,
1753 mac_group_driver_t ghandle)
1754 {
1755 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1756 nxge_ring_group_t *group = (nxge_ring_group_t *)ghandle;
1757 nxge_hio_vr_t *vr; /* The Virtualization Region */
1758 int rv = 0;
1759
1760 vr = shp->vrp;
1761
1762 switch (group->type) {
1763 case MAC_RING_TYPE_RX:
1764 group->sindex = 0;
1765 vr->rdc_tbl = 0;
1766 shp->rxgroup = 0;
1767 break;
1768
1769 case MAC_RING_TYPE_TX:
1770 group->sindex = 0;
1771 vr->tdc_tbl = 0;
1772 break;
1773 }
1774
1775 return (rv);
1776 }
1777
1778 int
nxge_hio_share_bind(mac_share_handle_t shandle,uint64_t cookie,uint64_t * rcookie)1779 nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie,
1780 uint64_t *rcookie)
1781 {
1782 nxge_t *nxge;
1783 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1784 nxge_hio_vr_t *vr;
1785 uint64_t rmap, tmap, hv_rmap, hv_tmap;
1786 int rv;
1787
1788 ASSERT(shp != NULL);
1789 ASSERT(shp->nxgep != NULL);
1790 ASSERT(shp->vrp != NULL);
1791
1792 nxge = shp->nxgep;
1793 vr = (nxge_hio_vr_t *)shp->vrp;
1794
1795 /*
1796 * Add resources to the share.
1797 * For each DMA channel associated with the VR, bind its resources
1798 * to the VR.
1799 */
1800 tmap = 0;
1801 rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap);
1802 if (rv != 0) {
1803 return (rv);
1804 }
1805
1806 rmap = 0;
1807 rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap);
1808 if (rv != 0) {
1809 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1810 return (rv);
1811 }
1812
1813 /*
1814 * Ask the Hypervisor to set up the VR and allocate slots for
1815 * each rings associated with the VR.
1816 */
1817 hv_tmap = hv_rmap = 0;
1818 if ((rv = nxge_hio_share_assign(nxge, cookie,
1819 &hv_tmap, &hv_rmap, vr))) {
1820 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap);
1821 nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap);
1822 return (rv);
1823 }
1824
1825 shp->active = B_TRUE;
1826 shp->tmap = hv_tmap;
1827 shp->rmap = hv_rmap;
1828
1829 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */
1830 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie;
1831
1832 return (0);
1833 }
1834
1835 void
nxge_hio_share_unbind(mac_share_handle_t shandle)1836 nxge_hio_share_unbind(mac_share_handle_t shandle)
1837 {
1838 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle;
1839
1840 /*
1841 * First, unassign the VR (take it back),
1842 * so we can enable interrupts again.
1843 */
1844 nxge_hio_share_unassign(shp->vrp);
1845
1846 /*
1847 * Free Ring Resources for TX and RX
1848 */
1849 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap);
1850 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap);
1851 }
1852
1853
1854 /*
1855 * nxge_hio_vr_share
1856 *
1857 * Find an unused Virtualization Region (VR).
1858 *
1859 * Arguments:
1860 * nxge
1861 *
1862 * Notes:
1863 *
1864 * Context:
1865 * Service domain
1866 */
1867 nxge_hio_vr_t *
nxge_hio_vr_share(nxge_t * nxge)1868 nxge_hio_vr_share(
1869 nxge_t *nxge)
1870 {
1871 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1872 nxge_hio_vr_t *vr;
1873
1874 int first, limit, region;
1875
1876 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share"));
1877
1878 MUTEX_ENTER(&nhd->lock);
1879
1880 if (nhd->vrs == 0) {
1881 MUTEX_EXIT(&nhd->lock);
1882 return (0);
1883 }
1884
1885 /* Find an empty virtual region (VR). */
1886 if (nxge->function_num == 0) {
1887 // FUNC0_VIR0 'belongs' to NIU port 0.
1888 first = FUNC0_VIR1;
1889 limit = FUNC2_VIR0;
1890 } else if (nxge->function_num == 1) {
1891 // FUNC2_VIR0 'belongs' to NIU port 1.
1892 first = FUNC2_VIR1;
1893 limit = FUNC_VIR_MAX;
1894 } else {
1895 cmn_err(CE_WARN,
1896 "Shares not supported on function(%d) at this time.\n",
1897 nxge->function_num);
1898 }
1899
1900 for (region = first; region < limit; region++) {
1901 if (nhd->vr[region].nxge == 0)
1902 break;
1903 }
1904
1905 if (region == limit) {
1906 MUTEX_EXIT(&nhd->lock);
1907 return (0);
1908 }
1909
1910 vr = &nhd->vr[region];
1911 vr->nxge = (uintptr_t)nxge;
1912 vr->region = (uintptr_t)region;
1913
1914 nhd->vrs--;
1915
1916 MUTEX_EXIT(&nhd->lock);
1917
1918 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share"));
1919
1920 return (vr);
1921 }
1922
1923 void
nxge_hio_unshare(nxge_hio_vr_t * vr)1924 nxge_hio_unshare(
1925 nxge_hio_vr_t *vr)
1926 {
1927 nxge_t *nxge = (nxge_t *)vr->nxge;
1928 nxge_hio_data_t *nhd;
1929
1930 vr_region_t region;
1931
1932 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare"));
1933
1934 if (!nxge) {
1935 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: "
1936 "vr->nxge is NULL"));
1937 return;
1938 }
1939
1940 /*
1941 * This function is no longer called, but I will keep it
1942 * here in case we want to revisit this topic in the future.
1943 *
1944 * nxge_hio_hostinfo_uninit(nxge, vr);
1945 */
1946
1947 /*
1948 * XXX: This is done by ms_sremove?
1949 * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl);
1950 */
1951
1952 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
1953
1954 MUTEX_ENTER(&nhd->lock);
1955
1956 region = vr->region;
1957 (void) memset(vr, 0, sizeof (*vr));
1958 vr->region = region;
1959
1960 nhd->vrs++;
1961
1962 MUTEX_EXIT(&nhd->lock);
1963
1964 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare"));
1965 }
1966
1967 int
nxge_hio_addres(nxge_hio_vr_t * vr,mac_ring_type_t type,uint64_t * map)1968 nxge_hio_addres(nxge_hio_vr_t *vr, mac_ring_type_t type, uint64_t *map)
1969 {
1970 nxge_t *nxge;
1971 nxge_grp_t *group;
1972 int groupid;
1973 int i, rv = 0;
1974 int max_dcs;
1975
1976 ASSERT(vr != NULL);
1977 ASSERT(vr->nxge != NULL);
1978 nxge = (nxge_t *)vr->nxge;
1979
1980 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres"));
1981
1982 /*
1983 * For each ring associated with the group, add the resources
1984 * to the group and bind.
1985 */
1986 max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS;
1987 if (type == MAC_RING_TYPE_TX) {
1988 /* set->group is an array of group indexed by a port group id */
1989 groupid = vr->tdc_tbl -
1990 nxge->pt_config.hw_config.def_mac_txdma_grpid;
1991 group = nxge->tx_set.group[groupid];
1992 } else {
1993 /* set->group is an array of group indexed by a port group id */
1994 groupid = vr->rdc_tbl -
1995 nxge->pt_config.hw_config.def_mac_rxdma_grpid;
1996 group = nxge->rx_set.group[groupid];
1997 }
1998
1999 ASSERT(group != NULL);
2000
2001 if (group->map == 0) {
2002 NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated "
2003 "with this VR"));
2004 return (EINVAL);
2005 }
2006
2007 for (i = 0; i < max_dcs; i++) {
2008 if (group->map & (1 << i)) {
2009 if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) {
2010 if (*map == 0) /* Couldn't get even one DC. */
2011 return (-rv);
2012 else
2013 break;
2014 }
2015 *map |= (1 << i);
2016 }
2017 }
2018
2019 if ((*map == 0) || (rv != 0)) {
2020 NXGE_DEBUG_MSG((nxge, HIO_CTL,
2021 "<== nxge_hio_addres: rv(%x)", rv));
2022 return (EIO);
2023 }
2024
2025 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres"));
2026 return (0);
2027 }
2028
2029 /* ARGSUSED */
2030 void
nxge_hio_remres(nxge_hio_vr_t * vr,mac_ring_type_t type,res_map_t res_map)2031 nxge_hio_remres(
2032 nxge_hio_vr_t *vr,
2033 mac_ring_type_t type,
2034 res_map_t res_map)
2035 {
2036 nxge_t *nxge = (nxge_t *)vr->nxge;
2037 nxge_grp_t *group;
2038
2039 if (!nxge) {
2040 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2041 "vr->nxge is NULL"));
2042 return;
2043 }
2044
2045 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map));
2046
2047 /*
2048 * For each ring bound to the group, remove the DMA resources
2049 * from the group and unbind.
2050 */
2051 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2052 while (group->dc) {
2053 nxge_hio_dc_t *dc = group->dc;
2054 NXGE_DC_RESET(res_map, dc->page);
2055 nxge_hio_dc_unshare(nxge, vr, type, dc->channel);
2056 }
2057
2058 if (res_map) {
2059 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: "
2060 "res_map %lx", res_map));
2061 }
2062
2063 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres"));
2064 }
2065
2066 /*
2067 * nxge_hio_tdc_share
2068 *
2069 * Share an unused TDC channel.
2070 *
2071 * Arguments:
2072 * nxge
2073 *
2074 * Notes:
2075 *
2076 * A.7.3 Reconfigure Tx DMA channel
2077 * Disable TxDMA A.9.6.10
2078 * [Rebind TxDMA channel to Port A.9.6.7]
2079 *
2080 * We don't have to Rebind the TDC to the port - it always already bound.
2081 *
2082 * Soft Reset TxDMA A.9.6.2
2083 *
2084 * This procedure will be executed by nxge_init_txdma_channel() in the
2085 * guest domain:
2086 *
2087 * Re-initialize TxDMA A.9.6.8
2088 * Reconfigure TxDMA
2089 * Enable TxDMA A.9.6.9
2090 *
2091 * Context:
2092 * Service domain
2093 */
2094 int
nxge_hio_tdc_share(nxge_t * nxge,int channel)2095 nxge_hio_tdc_share(
2096 nxge_t *nxge,
2097 int channel)
2098 {
2099 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2100 nxge_grp_set_t *set = &nxge->tx_set;
2101 tx_ring_t *ring;
2102 int count;
2103
2104 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share"));
2105
2106 /*
2107 * Wait until this channel is idle.
2108 */
2109 ring = nxge->tx_rings->rings[channel];
2110 ASSERT(ring != NULL);
2111
2112 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING);
2113 if (ring->tx_ring_busy) {
2114 /*
2115 * Wait for 30 seconds.
2116 */
2117 for (count = 30 * 1000; count; count--) {
2118 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) {
2119 break;
2120 }
2121
2122 drv_usecwait(1000);
2123 }
2124
2125 if (count == 0) {
2126 (void) atomic_swap_32(&ring->tx_ring_offline,
2127 NXGE_TX_RING_ONLINE);
2128 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2129 "nxge_hio_tdc_share: "
2130 "Tx ring %d was always BUSY", channel));
2131 return (-EIO);
2132 }
2133 } else {
2134 (void) atomic_swap_32(&ring->tx_ring_offline,
2135 NXGE_TX_RING_OFFLINED);
2136 }
2137
2138 MUTEX_ENTER(&nhd->lock);
2139 nxge->tdc_is_shared[channel] = B_TRUE;
2140 MUTEX_EXIT(&nhd->lock);
2141
2142 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2143 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: "
2144 "Failed to remove interrupt for TxDMA channel %d",
2145 channel));
2146 return (-EINVAL);
2147 }
2148
2149 /* Disable TxDMA A.9.6.10 */
2150 (void) nxge_txdma_channel_disable(nxge, channel);
2151
2152 /* The SD is sharing this channel. */
2153 NXGE_DC_SET(set->shared.map, channel);
2154 set->shared.count++;
2155
2156 /* Soft Reset TxDMA A.9.6.2 */
2157 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel);
2158
2159 /*
2160 * Initialize the DC-specific FZC control registers.
2161 * -----------------------------------------------------
2162 */
2163 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) {
2164 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2165 "nxge_hio_tdc_share: FZC TDC failed: %d", channel));
2166 return (-EIO);
2167 }
2168
2169 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share"));
2170
2171 return (0);
2172 }
2173
2174 /*
2175 * nxge_hio_rdc_share
2176 *
2177 * Share an unused RDC channel.
2178 *
2179 * Arguments:
2180 * nxge
2181 *
2182 * Notes:
2183 *
2184 * This is the latest version of the procedure to
2185 * Reconfigure an Rx DMA channel:
2186 *
2187 * A.6.3 Reconfigure Rx DMA channel
2188 * Stop RxMAC A.9.2.6
2189 * Drain IPP Port A.9.3.6
2190 * Stop and reset RxDMA A.9.5.3
2191 *
2192 * This procedure will be executed by nxge_init_rxdma_channel() in the
2193 * guest domain:
2194 *
2195 * Initialize RxDMA A.9.5.4
2196 * Reconfigure RxDMA
2197 * Enable RxDMA A.9.5.5
2198 *
2199 * We will do this here, since the RDC is a canalis non grata:
2200 * Enable RxMAC A.9.2.10
2201 *
2202 * Context:
2203 * Service domain
2204 */
2205 int
nxge_hio_rdc_share(nxge_t * nxge,nxge_hio_vr_t * vr,int channel)2206 nxge_hio_rdc_share(
2207 nxge_t *nxge,
2208 nxge_hio_vr_t *vr,
2209 int channel)
2210 {
2211 nxge_grp_set_t *set = &nxge->rx_set;
2212 nxge_rdc_grp_t *rdc_grp;
2213
2214 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share"));
2215
2216 /* Disable interrupts. */
2217 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2218 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2219 "Failed to remove interrupt for RxDMA channel %d",
2220 channel));
2221 return (NXGE_ERROR);
2222 }
2223
2224 /* Stop RxMAC = A.9.2.6 */
2225 if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2226 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2227 "Failed to disable RxMAC"));
2228 }
2229
2230 /* Drain IPP Port = A.9.3.6 */
2231 (void) nxge_ipp_drain(nxge);
2232
2233 /* Stop and reset RxDMA = A.9.5.3 */
2234 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2235 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2236 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: "
2237 "Failed to disable RxDMA channel %d", channel));
2238 }
2239
2240 /* The SD is sharing this channel. */
2241 NXGE_DC_SET(set->shared.map, channel);
2242 set->shared.count++;
2243
2244 // Assert RST: RXDMA_CFIG1[30] = 1
2245 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel);
2246
2247 /*
2248 * The guest domain will reconfigure the RDC later.
2249 *
2250 * But in the meantime, we must re-enable the Rx MAC so
2251 * that we can start receiving packets again on the
2252 * remaining RDCs:
2253 *
2254 * Enable RxMAC = A.9.2.10
2255 */
2256 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2257 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2258 "nxge_hio_rdc_share: Rx MAC still disabled"));
2259 }
2260
2261 /*
2262 * Initialize the DC-specific FZC control registers.
2263 * -----------------------------------------------------
2264 */
2265 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) {
2266 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2267 "nxge_hio_rdc_share: RZC RDC failed: %ld", channel));
2268 return (-EIO);
2269 }
2270
2271 /*
2272 * Update the RDC group.
2273 */
2274 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl];
2275 NXGE_DC_SET(rdc_grp->map, channel);
2276
2277 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share"));
2278
2279 return (0);
2280 }
2281
2282 /*
2283 * nxge_hio_dc_share
2284 *
2285 * Share a DMA channel with a guest domain.
2286 *
2287 * Arguments:
2288 * nxge
2289 * vr The VR that <channel> will belong to.
2290 * type Tx or Rx.
2291 * channel Channel to share
2292 *
2293 * Notes:
2294 *
2295 * Context:
2296 * Service domain
2297 */
2298 int
nxge_hio_dc_share(nxge_t * nxge,nxge_hio_vr_t * vr,mac_ring_type_t type,int channel)2299 nxge_hio_dc_share(
2300 nxge_t *nxge,
2301 nxge_hio_vr_t *vr,
2302 mac_ring_type_t type,
2303 int channel)
2304 {
2305 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio;
2306 nxge_hio_dc_t *dc;
2307 nxge_grp_t *group;
2308 int slot;
2309
2310 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d",
2311 type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2312
2313
2314 /* -------------------------------------------------- */
2315 slot = (type == MAC_RING_TYPE_TX) ?
2316 nxge_hio_tdc_share(nxge, channel) :
2317 nxge_hio_rdc_share(nxge, vr, channel);
2318
2319 if (slot < 0) {
2320 if (type == MAC_RING_TYPE_RX) {
2321 nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2322 } else {
2323 nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2324 }
2325 return (slot);
2326 }
2327
2328 MUTEX_ENTER(&nhd->lock);
2329
2330 /*
2331 * Tag this channel.
2332 * --------------------------------------------------
2333 */
2334 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel];
2335
2336 dc->vr = vr;
2337 dc->channel = (nxge_channel_t)channel;
2338
2339 MUTEX_EXIT(&nhd->lock);
2340
2341 /*
2342 * vr->[t|r]x_group is used by the service domain to
2343 * keep track of its shared DMA channels.
2344 */
2345 MUTEX_ENTER(&nxge->group_lock);
2346 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group);
2347
2348 dc->group = group;
2349 /* Initialize <group>, if necessary */
2350 if (group->count == 0) {
2351 group->nxge = nxge;
2352 group->type = (type == MAC_RING_TYPE_TX) ?
2353 VP_BOUND_TX : VP_BOUND_RX;
2354 group->sequence = nhd->sequence++;
2355 group->active = B_TRUE;
2356 }
2357
2358 MUTEX_EXIT(&nxge->group_lock);
2359
2360 NXGE_ERROR_MSG((nxge, HIO_CTL,
2361 "DC share: %cDC %d was assigned to slot %d",
2362 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot));
2363
2364 nxge_grp_dc_append(nxge, group, dc);
2365
2366 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share"));
2367
2368 return (0);
2369 }
2370
2371 /*
2372 * nxge_hio_tdc_unshare
2373 *
2374 * Unshare a TDC.
2375 *
2376 * Arguments:
2377 * nxge
2378 * channel The channel to unshare (add again).
2379 *
2380 * Notes:
2381 *
2382 * Context:
2383 * Service domain
2384 */
2385 void
nxge_hio_tdc_unshare(nxge_t * nxge,int dev_grpid,int channel)2386 nxge_hio_tdc_unshare(
2387 nxge_t *nxge,
2388 int dev_grpid,
2389 int channel)
2390 {
2391 nxge_grp_set_t *set = &nxge->tx_set;
2392 nxge_grp_t *group;
2393 int grpid;
2394
2395 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare"));
2396
2397 NXGE_DC_RESET(set->shared.map, channel);
2398 set->shared.count--;
2399
2400 grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid;
2401 group = set->group[grpid];
2402
2403 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) {
2404 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2405 "Failed to initialize TxDMA channel %d", channel));
2406 return;
2407 }
2408
2409 /* Re-add this interrupt. */
2410 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) {
2411 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: "
2412 "Failed to add interrupt for TxDMA channel %d", channel));
2413 }
2414
2415 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare"));
2416 }
2417
2418 /*
2419 * nxge_hio_rdc_unshare
2420 *
2421 * Unshare an RDC: add it to the SD's RDC groups (tables).
2422 *
2423 * Arguments:
2424 * nxge
2425 * channel The channel to unshare (add again).
2426 *
2427 * Notes:
2428 *
2429 * Context:
2430 * Service domain
2431 */
2432 void
nxge_hio_rdc_unshare(nxge_t * nxge,int dev_grpid,int channel)2433 nxge_hio_rdc_unshare(
2434 nxge_t *nxge,
2435 int dev_grpid,
2436 int channel)
2437 {
2438 nxge_grp_set_t *set = &nxge->rx_set;
2439 nxge_grp_t *group;
2440 int grpid;
2441 int i;
2442
2443 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare"));
2444
2445 /* Stop RxMAC = A.9.2.6 */
2446 if (nxge_rx_mac_disable(nxge) != NXGE_OK) {
2447 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2448 "Failed to disable RxMAC"));
2449 }
2450
2451 /* Drain IPP Port = A.9.3.6 */
2452 (void) nxge_ipp_drain(nxge);
2453
2454 /* Stop and reset RxDMA = A.9.5.3 */
2455 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 )
2456 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) {
2457 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2458 "Failed to disable RxDMA channel %d", channel));
2459 }
2460
2461 NXGE_DC_RESET(set->shared.map, channel);
2462 set->shared.count--;
2463
2464 grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid;
2465 group = set->group[grpid];
2466
2467 /*
2468 * Assert RST: RXDMA_CFIG1[30] = 1
2469 *
2470 * Initialize RxDMA A.9.5.4
2471 * Reconfigure RxDMA
2472 * Enable RxDMA A.9.5.5
2473 */
2474 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) {
2475 /* Be sure to re-enable the RX MAC. */
2476 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2477 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2478 "nxge_hio_rdc_share: Rx MAC still disabled"));
2479 }
2480 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: "
2481 "Failed to initialize RxDMA channel %d", channel));
2482 return;
2483 }
2484
2485 /*
2486 * Enable RxMAC = A.9.2.10
2487 */
2488 if (nxge_rx_mac_enable(nxge) != NXGE_OK) {
2489 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2490 "nxge_hio_rdc_share: Rx MAC still disabled"));
2491 return;
2492 }
2493
2494 /* Re-add this interrupt. */
2495 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) {
2496 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2497 "nxge_hio_rdc_unshare: Failed to add interrupt for "
2498 "RxDMA CHANNEL %d", channel));
2499 }
2500
2501 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare"));
2502
2503 for (i = 0; i < NXGE_MAX_RDCS; i++) {
2504 if (nxge->rx_ring_handles[i].channel == channel) {
2505 (void) nxge_rx_ring_start(
2506 (mac_ring_driver_t)&nxge->rx_ring_handles[i],
2507 nxge->rx_ring_handles[i].ring_gen_num);
2508 }
2509 }
2510 }
2511
2512 /*
2513 * nxge_hio_dc_unshare
2514 *
2515 * Unshare (reuse) a DMA channel.
2516 *
2517 * Arguments:
2518 * nxge
2519 * vr The VR that <channel> belongs to.
2520 * type Tx or Rx.
2521 * channel The DMA channel to reuse.
2522 *
2523 * Notes:
2524 *
2525 * Context:
2526 * Service domain
2527 */
2528 void
nxge_hio_dc_unshare(nxge_t * nxge,nxge_hio_vr_t * vr,mac_ring_type_t type,int channel)2529 nxge_hio_dc_unshare(
2530 nxge_t *nxge,
2531 nxge_hio_vr_t *vr,
2532 mac_ring_type_t type,
2533 int channel)
2534 {
2535 nxge_grp_t *group;
2536 nxge_hio_dc_t *dc;
2537
2538 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)",
2539 type == MAC_RING_TYPE_TX ? 't' : 'r', channel));
2540
2541 /* Unlink the channel from its group. */
2542 /* -------------------------------------------------- */
2543 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group;
2544 NXGE_DC_RESET(group->map, channel);
2545 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) {
2546 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2547 "nxge_hio_dc_unshare(%d) failed", channel));
2548 return;
2549 }
2550
2551 dc->vr = 0;
2552 dc->cookie = 0;
2553
2554 if (type == MAC_RING_TYPE_RX) {
2555 nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel);
2556 } else {
2557 nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel);
2558 }
2559
2560 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare"));
2561 }
2562
2563
2564 /*
2565 * nxge_hio_rxdma_bind_intr():
2566 *
2567 * For the guest domain driver, need to bind the interrupt group
2568 * and state to the rx_rcr_ring_t.
2569 */
2570
2571 int
nxge_hio_rxdma_bind_intr(nxge_t * nxge,rx_rcr_ring_t * ring,int channel)2572 nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel)
2573 {
2574 nxge_hio_dc_t *dc;
2575 nxge_ldgv_t *control;
2576 nxge_ldg_t *group;
2577 nxge_ldv_t *device;
2578
2579 /*
2580 * Find the DMA channel.
2581 */
2582 if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) {
2583 return (NXGE_ERROR);
2584 }
2585
2586 /*
2587 * Get the control structure.
2588 */
2589 control = nxge->ldgvp;
2590 if (control == NULL) {
2591 return (NXGE_ERROR);
2592 }
2593
2594 group = &control->ldgp[dc->ldg.vector];
2595 device = &control->ldvp[dc->ldg.ldsv];
2596
2597 MUTEX_ENTER(&ring->lock);
2598 ring->ldgp = group;
2599 ring->ldvp = device;
2600 MUTEX_EXIT(&ring->lock);
2601
2602 return (NXGE_OK);
2603 }
2604 #endif /* if defined(sun4v) */
2605