1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * nxge_hio.c 29 * 30 * This file manages the virtualization resources for Neptune 31 * devices. That is, it implements a hybrid I/O (HIO) approach in the 32 * Solaris kernel, whereby a guest domain on an LDOMs server may 33 * request & use hardware resources from the service domain. 34 * 35 */ 36 37 #include <sys/nxge/nxge_impl.h> 38 #include <sys/nxge/nxge_fzc.h> 39 #include <sys/nxge/nxge_rxdma.h> 40 #include <sys/nxge/nxge_txdma.h> 41 #include <sys/nxge/nxge_hio.h> 42 43 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 44 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 45 46 /* 47 * External prototypes 48 */ 49 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 50 51 /* The following function may be found in nxge_main.c */ 52 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 53 54 /* The following function may be found in nxge_[t|r]xdma.c */ 55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 57 58 /* 59 * Local prototypes 60 */ 61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 63 static void nxge_grp_dc_map(nxge_grp_t *group); 64 65 /* 66 * These functions are used by both service & guest domains to 67 * decide whether they're running in an LDOMs/XEN environment 68 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 69 */ 70 71 /* 72 * nxge_get_environs 73 * 74 * Figure out if we are in a guest domain or not. 75 * 76 * Arguments: 77 * nxge 78 * 79 * Notes: 80 * 81 * Context: 82 * Any domain 83 */ 84 void 85 nxge_get_environs( 86 nxge_t *nxge) 87 { 88 char *string; 89 90 /* 91 * In the beginning, assume that we are running sans LDOMs/XEN. 92 */ 93 nxge->environs = SOLARIS_DOMAIN; 94 95 /* 96 * Are we a hybrid I/O (HIO) guest domain driver? 97 */ 98 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 99 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 100 "niutype", &string)) == DDI_PROP_SUCCESS) { 101 if (strcmp(string, "n2niu") == 0) { 102 nxge->environs = SOLARIS_GUEST_DOMAIN; 103 /* So we can allocate properly-aligned memory. */ 104 nxge->niu_type = N2_NIU; 105 NXGE_DEBUG_MSG((nxge, HIO_CTL, 106 "Hybrid IO-capable guest domain")); 107 } 108 ddi_prop_free(string); 109 } 110 } 111 112 #if !defined(sun4v) 113 114 /* 115 * nxge_hio_init 116 * 117 * Initialize the HIO module of the NXGE driver. 118 * 119 * Arguments: 120 * nxge 121 * 122 * Notes: 123 * This is the non-hybrid I/O version of this function. 124 * 125 * Context: 126 * Any domain 127 */ 128 int 129 nxge_hio_init(nxge_t *nxge) 130 { 131 nxge_hio_data_t *nhd; 132 133 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 134 if (nhd == 0) { 135 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 136 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 137 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 138 } 139 140 nhd->hio.ldoms = B_FALSE; 141 142 return (NXGE_OK); 143 } 144 145 #endif 146 147 void 148 nxge_hio_uninit(nxge_t *nxge) 149 { 150 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 151 152 ASSERT(nxge->nxge_hw_p->ndevs == 0); 153 154 if (nhd != NULL) { 155 MUTEX_DESTROY(&nhd->lock); 156 KMEM_FREE(nhd, sizeof (*nhd)); 157 nxge->nxge_hw_p->hio = 0; 158 } 159 } 160 161 /* 162 * nxge_dci_map 163 * 164 * Map a DMA channel index to a channel number. 165 * 166 * Arguments: 167 * instance The instance number of the driver. 168 * type The type of channel this is: Tx or Rx. 169 * index The index to convert to a channel number 170 * 171 * Notes: 172 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 173 * 174 * Context: 175 * Any domain 176 */ 177 int 178 nxge_dci_map( 179 nxge_t *nxge, 180 vpc_type_t type, 181 int index) 182 { 183 nxge_grp_set_t *set; 184 int dc; 185 186 switch (type) { 187 case VP_BOUND_TX: 188 set = &nxge->tx_set; 189 break; 190 case VP_BOUND_RX: 191 set = &nxge->rx_set; 192 break; 193 } 194 195 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 196 if ((1 << dc) & set->owned.map) { 197 if (index == 0) 198 return (dc); 199 else 200 index--; 201 } 202 } 203 204 return (-1); 205 } 206 207 /* 208 * --------------------------------------------------------------------- 209 * These are the general-purpose DMA channel group functions. That is, 210 * these functions are used to manage groups of TDCs or RDCs in an HIO 211 * environment. 212 * 213 * But is also expected that in the future they will be able to manage 214 * Crossbow groups. 215 * --------------------------------------------------------------------- 216 */ 217 218 /* 219 * nxge_grp_cleanup(p_nxge_t nxge) 220 * 221 * Remove all outstanding groups. 222 * 223 * Arguments: 224 * nxge 225 */ 226 void 227 nxge_grp_cleanup(p_nxge_t nxge) 228 { 229 nxge_grp_set_t *set; 230 int i; 231 232 MUTEX_ENTER(&nxge->group_lock); 233 234 /* 235 * Find RX groups that need to be cleaned up. 236 */ 237 set = &nxge->rx_set; 238 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 239 if (set->group[i] != NULL) { 240 KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 241 set->group[i] = NULL; 242 } 243 } 244 245 /* 246 * Find TX groups that need to be cleaned up. 247 */ 248 set = &nxge->tx_set; 249 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 250 if (set->group[i] != NULL) { 251 KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 252 set->group[i] = NULL; 253 } 254 } 255 MUTEX_EXIT(&nxge->group_lock); 256 } 257 258 259 /* 260 * nxge_grp_add 261 * 262 * Add a group to an instance of NXGE. 263 * 264 * Arguments: 265 * nxge 266 * type Tx or Rx 267 * 268 * Notes: 269 * 270 * Context: 271 * Any domain 272 */ 273 nxge_grp_t * 274 nxge_grp_add( 275 nxge_t *nxge, 276 nxge_grp_type_t type) 277 { 278 nxge_grp_set_t *set; 279 nxge_grp_t *group; 280 int i; 281 282 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 283 group->nxge = nxge; 284 285 MUTEX_ENTER(&nxge->group_lock); 286 switch (type) { 287 case NXGE_TRANSMIT_GROUP: 288 case EXT_TRANSMIT_GROUP: 289 set = &nxge->tx_set; 290 break; 291 default: 292 set = &nxge->rx_set; 293 break; 294 } 295 296 group->type = type; 297 group->active = B_TRUE; 298 group->sequence = set->sequence++; 299 300 /* Find an empty slot for this logical group. */ 301 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 302 if (set->group[i] == 0) { 303 group->index = i; 304 set->group[i] = group; 305 NXGE_DC_SET(set->lg.map, i); 306 set->lg.count++; 307 break; 308 } 309 } 310 MUTEX_EXIT(&nxge->group_lock); 311 312 NXGE_DEBUG_MSG((nxge, HIO_CTL, 313 "nxge_grp_add: %cgroup = %d.%d", 314 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 315 nxge->mac.portnum, group->sequence)); 316 317 return (group); 318 } 319 320 void 321 nxge_grp_remove( 322 nxge_t *nxge, 323 nxge_grp_t *group) /* The group to remove. */ 324 { 325 nxge_grp_set_t *set; 326 vpc_type_t type; 327 328 MUTEX_ENTER(&nxge->group_lock); 329 switch (group->type) { 330 case NXGE_TRANSMIT_GROUP: 331 case EXT_TRANSMIT_GROUP: 332 set = &nxge->tx_set; 333 break; 334 default: 335 set = &nxge->rx_set; 336 break; 337 } 338 339 if (set->group[group->index] != group) { 340 MUTEX_EXIT(&nxge->group_lock); 341 return; 342 } 343 344 set->group[group->index] = 0; 345 NXGE_DC_RESET(set->lg.map, group->index); 346 set->lg.count--; 347 348 /* While inside the mutex, deactivate <group>. */ 349 group->active = B_FALSE; 350 351 MUTEX_EXIT(&nxge->group_lock); 352 353 NXGE_DEBUG_MSG((nxge, HIO_CTL, 354 "nxge_grp_remove(%c.%d.%d) called", 355 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 356 nxge->mac.portnum, group->sequence)); 357 358 /* Now, remove any DCs which are still active. */ 359 switch (group->type) { 360 default: 361 type = VP_BOUND_TX; 362 break; 363 case NXGE_RECEIVE_GROUP: 364 case EXT_RECEIVE_GROUP: 365 type = VP_BOUND_RX; 366 } 367 368 while (group->dc) { 369 nxge_grp_dc_remove(nxge, type, group->dc->channel); 370 } 371 372 KMEM_FREE(group, sizeof (*group)); 373 } 374 375 /* 376 * nxge_grp_dc_add 377 * 378 * Add a DMA channel to a VR/Group. 379 * 380 * Arguments: 381 * nxge 382 * channel The channel to add. 383 * Notes: 384 * 385 * Context: 386 * Any domain 387 */ 388 /* ARGSUSED */ 389 int 390 nxge_grp_dc_add( 391 nxge_t *nxge, 392 nxge_grp_t *group, /* The group to add <channel> to. */ 393 vpc_type_t type, /* Rx or Tx */ 394 int channel) /* A physical/logical channel number */ 395 { 396 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 397 nxge_hio_dc_t *dc; 398 nxge_grp_set_t *set; 399 nxge_status_t status = NXGE_OK; 400 401 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 402 403 if (group == NULL) 404 return (0); 405 406 switch (type) { 407 case VP_BOUND_TX: 408 set = &nxge->tx_set; 409 if (channel > NXGE_MAX_TDCS) { 410 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 411 "nxge_grp_dc_add: TDC = %d", channel)); 412 return (NXGE_ERROR); 413 } 414 break; 415 case VP_BOUND_RX: 416 set = &nxge->rx_set; 417 if (channel > NXGE_MAX_RDCS) { 418 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 419 "nxge_grp_dc_add: RDC = %d", channel)); 420 return (NXGE_ERROR); 421 } 422 break; 423 424 default: 425 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 426 "nxge_grp_dc_add: unknown type channel(%d)", channel)); 427 return (NXGE_ERROR); 428 } 429 430 NXGE_DEBUG_MSG((nxge, HIO_CTL, 431 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 432 type == VP_BOUND_TX ? 't' : 'r', 433 nxge->mac.portnum, group->sequence, group->count, channel)); 434 435 MUTEX_ENTER(&nxge->group_lock); 436 if (group->active != B_TRUE) { 437 /* We may be in the process of removing this group. */ 438 MUTEX_EXIT(&nxge->group_lock); 439 return (NXGE_ERROR); 440 } 441 MUTEX_EXIT(&nxge->group_lock); 442 443 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 444 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 445 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 446 return (NXGE_ERROR); 447 } 448 449 MUTEX_ENTER(&nhd->lock); 450 451 if (dc->group) { 452 MUTEX_EXIT(&nhd->lock); 453 /* This channel is already in use! */ 454 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 455 "nxge_grp_dc_add(%d): channel already in group", channel)); 456 return (NXGE_ERROR); 457 } 458 459 dc->next = 0; 460 dc->page = channel; 461 dc->channel = (nxge_channel_t)channel; 462 463 dc->type = type; 464 if (type == VP_BOUND_RX) { 465 dc->init = nxge_init_rxdma_channel; 466 dc->uninit = nxge_uninit_rxdma_channel; 467 } else { 468 dc->init = nxge_init_txdma_channel; 469 dc->uninit = nxge_uninit_txdma_channel; 470 } 471 472 dc->group = group; 473 474 if (isLDOMguest(nxge)) 475 (void) nxge_hio_ldsv_add(nxge, dc); 476 477 NXGE_DC_SET(set->owned.map, channel); 478 set->owned.count++; 479 480 MUTEX_EXIT(&nhd->lock); 481 482 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 483 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 484 "nxge_grp_dc_add(%d): channel init failed", channel)); 485 MUTEX_ENTER(&nhd->lock); 486 (void) memset(dc, 0, sizeof (*dc)); 487 NXGE_DC_RESET(set->owned.map, channel); 488 set->owned.count--; 489 MUTEX_EXIT(&nhd->lock); 490 return (NXGE_ERROR); 491 } 492 493 nxge_grp_dc_append(nxge, group, dc); 494 495 if (type == VP_BOUND_TX) { 496 MUTEX_ENTER(&nhd->lock); 497 nxge->tdc_is_shared[channel] = B_FALSE; 498 MUTEX_EXIT(&nhd->lock); 499 } 500 501 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 502 503 return ((int)status); 504 } 505 506 void 507 nxge_grp_dc_remove( 508 nxge_t *nxge, 509 vpc_type_t type, 510 int channel) 511 { 512 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 513 nxge_hio_dc_t *dc; 514 nxge_grp_set_t *set; 515 nxge_grp_t *group; 516 517 dc_uninit_t uninit; 518 519 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 520 521 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) 522 goto nxge_grp_dc_remove_exit; 523 524 if ((dc->group == NULL) && (dc->next == 0) && 525 (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) { 526 goto nxge_grp_dc_remove_exit; 527 } 528 529 group = (nxge_grp_t *)dc->group; 530 531 if (isLDOMguest(nxge)) { 532 (void) nxge_hio_intr_remove(nxge, type, channel); 533 } 534 535 NXGE_DEBUG_MSG((nxge, HIO_CTL, 536 "DC remove: group = %d.%d.%d, %cdc %d", 537 nxge->mac.portnum, group->sequence, group->count, 538 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 539 540 MUTEX_ENTER(&nhd->lock); 541 542 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 543 if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 544 NXGE_DC_RESET(group->map, channel); 545 } 546 547 /* Remove the DC from its group. */ 548 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 549 MUTEX_EXIT(&nhd->lock); 550 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 551 "nxge_grp_dc_remove(%d) failed", channel)); 552 goto nxge_grp_dc_remove_exit; 553 } 554 555 uninit = dc->uninit; 556 channel = dc->channel; 557 558 NXGE_DC_RESET(set->owned.map, channel); 559 set->owned.count--; 560 561 (void) memset(dc, 0, sizeof (*dc)); 562 563 MUTEX_EXIT(&nhd->lock); 564 565 (*uninit)(nxge, channel); 566 567 nxge_grp_dc_remove_exit: 568 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 569 } 570 571 nxge_hio_dc_t * 572 nxge_grp_dc_find( 573 nxge_t *nxge, 574 vpc_type_t type, /* Rx or Tx */ 575 int channel) 576 { 577 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 578 nxge_hio_dc_t *current; 579 580 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 581 582 if (!isLDOMguest(nxge)) { 583 return (¤t[channel]); 584 } else { 585 /* We're in a guest domain. */ 586 int i, limit = (type == VP_BOUND_TX) ? 587 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 588 589 MUTEX_ENTER(&nhd->lock); 590 for (i = 0; i < limit; i++, current++) { 591 if (current->channel == channel) { 592 if (current->vr && current->vr->nxge == 593 (uintptr_t)nxge) { 594 MUTEX_EXIT(&nhd->lock); 595 return (current); 596 } 597 } 598 } 599 MUTEX_EXIT(&nhd->lock); 600 } 601 602 return (0); 603 } 604 605 /* 606 * nxge_grp_dc_append 607 * 608 * Append a DMA channel to a group. 609 * 610 * Arguments: 611 * nxge 612 * group The group to append to 613 * dc The DMA channel to append 614 * 615 * Notes: 616 * 617 * Context: 618 * Any domain 619 */ 620 static 621 void 622 nxge_grp_dc_append( 623 nxge_t *nxge, 624 nxge_grp_t *group, 625 nxge_hio_dc_t *dc) 626 { 627 MUTEX_ENTER(&nxge->group_lock); 628 629 if (group->dc == 0) { 630 group->dc = dc; 631 } else { 632 nxge_hio_dc_t *current = group->dc; 633 do { 634 if (current->next == 0) { 635 current->next = dc; 636 break; 637 } 638 current = current->next; 639 } while (current); 640 } 641 642 NXGE_DC_SET(group->map, dc->channel); 643 644 nxge_grp_dc_map(group); 645 group->count++; 646 647 MUTEX_EXIT(&nxge->group_lock); 648 } 649 650 /* 651 * nxge_grp_dc_unlink 652 * 653 * Unlink a DMA channel fromits linked list (group). 654 * 655 * Arguments: 656 * nxge 657 * group The group (linked list) to unlink from 658 * dc The DMA channel to append 659 * 660 * Notes: 661 * 662 * Context: 663 * Any domain 664 */ 665 nxge_hio_dc_t * 666 nxge_grp_dc_unlink(nxge_t *nxge, nxge_grp_t *group, int channel) 667 { 668 nxge_hio_dc_t *current, *previous; 669 670 MUTEX_ENTER(&nxge->group_lock); 671 672 if (group == NULL) { 673 MUTEX_EXIT(&nxge->group_lock); 674 return (0); 675 } 676 677 if ((current = group->dc) == 0) { 678 MUTEX_EXIT(&nxge->group_lock); 679 return (0); 680 } 681 682 previous = 0; 683 do { 684 if (current->channel == channel) { 685 if (previous) 686 previous->next = current->next; 687 else 688 group->dc = current->next; 689 break; 690 } 691 previous = current; 692 current = current->next; 693 } while (current); 694 695 if (current == 0) { 696 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 697 "DC unlink: DC %d not found", channel)); 698 } else { 699 current->next = 0; 700 current->group = 0; 701 702 group->count--; 703 } 704 705 nxge_grp_dc_map(group); 706 707 MUTEX_EXIT(&nxge->group_lock); 708 709 return (current); 710 } 711 712 /* 713 * nxge_grp_dc_map 714 * 715 * Map a linked list to an array of channel numbers. 716 * 717 * Arguments: 718 * nxge 719 * group The group to remap. 720 * 721 * Notes: 722 * It is expected that the caller will hold the correct mutex. 723 * 724 * Context: 725 * Service domain 726 */ 727 void 728 nxge_grp_dc_map( 729 nxge_grp_t *group) 730 { 731 nxge_channel_t *legend; 732 nxge_hio_dc_t *dc; 733 734 (void) memset(group->legend, 0, sizeof (group->legend)); 735 736 legend = group->legend; 737 dc = group->dc; 738 while (dc) { 739 *legend = dc->channel; 740 legend++; 741 dc = dc->next; 742 } 743 } 744 745 /* 746 * --------------------------------------------------------------------- 747 * These are HIO debugging functions. 748 * --------------------------------------------------------------------- 749 */ 750 751 /* 752 * nxge_delay 753 * 754 * Delay <seconds> number of seconds. 755 * 756 * Arguments: 757 * nxge 758 * group The group to append to 759 * dc The DMA channel to append 760 * 761 * Notes: 762 * This is a developer-only function. 763 * 764 * Context: 765 * Any domain 766 */ 767 void 768 nxge_delay( 769 int seconds) 770 { 771 delay(drv_usectohz(seconds * 1000000)); 772 } 773 774 static dmc_reg_name_t rx_names[] = { 775 { "RXDMA_CFIG1", 0 }, 776 { "RXDMA_CFIG2", 8 }, 777 { "RBR_CFIG_A", 0x10 }, 778 { "RBR_CFIG_B", 0x18 }, 779 { "RBR_KICK", 0x20 }, 780 { "RBR_STAT", 0x28 }, 781 { "RBR_HDH", 0x30 }, 782 { "RBR_HDL", 0x38 }, 783 { "RCRCFIG_A", 0x40 }, 784 { "RCRCFIG_B", 0x48 }, 785 { "RCRSTAT_A", 0x50 }, 786 { "RCRSTAT_B", 0x58 }, 787 { "RCRSTAT_C", 0x60 }, 788 { "RX_DMA_ENT_MSK", 0x68 }, 789 { "RX_DMA_CTL_STAT", 0x70 }, 790 { "RCR_FLSH", 0x78 }, 791 { "RXMISC", 0x90 }, 792 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 793 { 0, -1 } 794 }; 795 796 static dmc_reg_name_t tx_names[] = { 797 { "Tx_RNG_CFIG", 0 }, 798 { "Tx_RNG_HDL", 0x10 }, 799 { "Tx_RNG_KICK", 0x18 }, 800 { "Tx_ENT_MASK", 0x20 }, 801 { "Tx_CS", 0x28 }, 802 { "TxDMA_MBH", 0x30 }, 803 { "TxDMA_MBL", 0x38 }, 804 { "TxDMA_PRE_ST", 0x40 }, 805 { "Tx_RNG_ERR_LOGH", 0x48 }, 806 { "Tx_RNG_ERR_LOGL", 0x50 }, 807 { "TDMC_INTR_DBG", 0x60 }, 808 { "Tx_CS_DBG", 0x68 }, 809 { 0, -1 } 810 }; 811 812 /* 813 * nxge_xx2str 814 * 815 * Translate a register address into a string. 816 * 817 * Arguments: 818 * offset The address of the register to translate. 819 * 820 * Notes: 821 * These are developer-only function. 822 * 823 * Context: 824 * Any domain 825 */ 826 const char * 827 nxge_rx2str( 828 int offset) 829 { 830 dmc_reg_name_t *reg = &rx_names[0]; 831 832 offset &= DMA_CSR_MASK; 833 834 while (reg->name) { 835 if (offset == reg->offset) 836 return (reg->name); 837 reg++; 838 } 839 840 return (0); 841 } 842 843 const char * 844 nxge_tx2str( 845 int offset) 846 { 847 dmc_reg_name_t *reg = &tx_names[0]; 848 849 offset &= DMA_CSR_MASK; 850 851 while (reg->name) { 852 if (offset == reg->offset) 853 return (reg->name); 854 reg++; 855 } 856 857 return (0); 858 } 859 860 /* 861 * nxge_ddi_perror 862 * 863 * Map a DDI error number to a string. 864 * 865 * Arguments: 866 * ddi_error The DDI error number to map. 867 * 868 * Notes: 869 * 870 * Context: 871 * Any domain 872 */ 873 const char * 874 nxge_ddi_perror( 875 int ddi_error) 876 { 877 switch (ddi_error) { 878 case DDI_SUCCESS: 879 return ("DDI_SUCCESS"); 880 case DDI_FAILURE: 881 return ("DDI_FAILURE"); 882 case DDI_NOT_WELL_FORMED: 883 return ("DDI_NOT_WELL_FORMED"); 884 case DDI_EAGAIN: 885 return ("DDI_EAGAIN"); 886 case DDI_EINVAL: 887 return ("DDI_EINVAL"); 888 case DDI_ENOTSUP: 889 return ("DDI_ENOTSUP"); 890 case DDI_EPENDING: 891 return ("DDI_EPENDING"); 892 case DDI_ENOMEM: 893 return ("DDI_ENOMEM"); 894 case DDI_EBUSY: 895 return ("DDI_EBUSY"); 896 case DDI_ETRANSPORT: 897 return ("DDI_ETRANSPORT"); 898 case DDI_ECONTEXT: 899 return ("DDI_ECONTEXT"); 900 default: 901 return ("Unknown error"); 902 } 903 } 904 905 /* 906 * --------------------------------------------------------------------- 907 * These are Sun4v HIO function definitions 908 * --------------------------------------------------------------------- 909 */ 910 911 #if defined(sun4v) 912 913 /* 914 * Local prototypes 915 */ 916 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 917 918 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 919 static void nxge_hio_unshare(nxge_hio_vr_t *); 920 921 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, int); 922 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 923 924 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 925 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 926 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 927 mac_ring_type_t, int); 928 929 /* 930 * nxge_hio_init 931 * 932 * Initialize the HIO module of the NXGE driver. 933 * 934 * Arguments: 935 * nxge 936 * 937 * Notes: 938 * 939 * Context: 940 * Any domain 941 */ 942 int 943 nxge_hio_init( 944 nxge_t *nxge) 945 { 946 nxge_hio_data_t *nhd; 947 int i, region; 948 949 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 950 if (nhd == 0) { 951 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 952 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 953 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 954 } 955 956 if ((nxge->environs == SOLARIS_DOMAIN) && 957 (nxge->niu_type == N2_NIU)) { 958 if (nxge->niu_hsvc_available == B_TRUE) { 959 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 960 if (niu_hsvc->hsvc_major == 1 && 961 niu_hsvc->hsvc_minor == 1) 962 nxge->environs = SOLARIS_SERVICE_DOMAIN; 963 NXGE_DEBUG_MSG((nxge, HIO_CTL, 964 "nxge_hio_init: hypervisor services " 965 "version %d.%d", 966 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 967 } 968 } 969 970 if (!isLDOMs(nxge)) { 971 nhd->hio.ldoms = B_FALSE; 972 return (NXGE_OK); 973 } 974 975 nhd->hio.ldoms = B_TRUE; 976 977 /* 978 * Fill in what we can. 979 */ 980 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 981 nhd->vr[region].region = region; 982 } 983 nhd->vrs = NXGE_VR_SR_MAX - 2; 984 985 /* 986 * Initialize tdc share state, shares and ring group structures. 987 */ 988 for (i = 0; i < NXGE_MAX_TDCS; i++) 989 nxge->tdc_is_shared[i] = B_FALSE; 990 991 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 992 nxge->rx_hio_groups[i].ghandle = NULL; 993 nxge->rx_hio_groups[i].nxgep = nxge; 994 nxge->rx_hio_groups[i].gindex = 0; 995 nxge->rx_hio_groups[i].sindex = 0; 996 } 997 998 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 999 nxge->shares[i].nxgep = nxge; 1000 nxge->shares[i].index = 0; 1001 nxge->shares[i].vrp = (void *)NULL; 1002 nxge->shares[i].tmap = 0; 1003 nxge->shares[i].rmap = 0; 1004 nxge->shares[i].rxgroup = 0; 1005 nxge->shares[i].active = B_FALSE; 1006 } 1007 1008 /* Fill in the HV HIO function pointers. */ 1009 nxge_hio_hv_init(nxge); 1010 1011 if (isLDOMservice(nxge)) { 1012 NXGE_DEBUG_MSG((nxge, HIO_CTL, 1013 "Hybrid IO-capable service domain")); 1014 return (NXGE_OK); 1015 } else { 1016 /* 1017 * isLDOMguest(nxge) == B_TRUE 1018 */ 1019 nx_vio_fp_t *vio; 1020 nhd->type = NXGE_HIO_TYPE_GUEST; 1021 1022 vio = &nhd->hio.vio; 1023 vio->__register = (vio_net_resource_reg_t) 1024 modgetsymvalue("vio_net_resource_reg", 0); 1025 vio->unregister = (vio_net_resource_unreg_t) 1026 modgetsymvalue("vio_net_resource_unreg", 0); 1027 1028 if (vio->__register == 0 || vio->unregister == 0) { 1029 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 1030 return (NXGE_ERROR); 1031 } 1032 } 1033 1034 return (0); 1035 } 1036 1037 static int 1038 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 1039 { 1040 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1041 p_nxge_t nxge = rxgroup->nxgep; 1042 int group = rxgroup->gindex; 1043 int rv, sindex; 1044 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1045 1046 sindex = nxge->rx_hio_groups[group].sindex; 1047 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1048 1049 /* 1050 * Program the mac address for the group/share. 1051 */ 1052 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 1053 (ether_addr_t *)mac_addr)) != 0) { 1054 return (rv); 1055 } 1056 1057 return (0); 1058 } 1059 1060 /* ARGSUSED */ 1061 static int 1062 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 1063 { 1064 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1065 p_nxge_t nxge = rxgroup->nxgep; 1066 int group = rxgroup->gindex; 1067 int sindex; 1068 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1069 1070 sindex = nxge->rx_hio_groups[group].sindex; 1071 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1072 1073 /* 1074 * Remove the mac address for the group/share. 1075 */ 1076 nxge_hio_hostinfo_uninit(nxge, vr); 1077 1078 return (0); 1079 } 1080 1081 /* ARGSUSED */ 1082 void 1083 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1084 mac_group_info_t *infop, mac_group_handle_t ghdl) 1085 { 1086 p_nxge_t nxgep = (p_nxge_t)arg; 1087 nxge_rx_ring_group_t *rxgroup; 1088 1089 switch (type) { 1090 case MAC_RING_TYPE_RX: 1091 rxgroup = &nxgep->rx_hio_groups[group]; 1092 rxgroup->gindex = group; 1093 1094 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1095 infop->mrg_start = NULL; 1096 infop->mrg_stop = NULL; 1097 infop->mrg_addmac = nxge_hio_add_mac; 1098 infop->mrg_remmac = nxge_hio_rem_mac; 1099 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1100 break; 1101 1102 case MAC_RING_TYPE_TX: 1103 break; 1104 } 1105 } 1106 1107 int 1108 nxge_hio_share_assign( 1109 nxge_t *nxge, 1110 uint64_t cookie, 1111 res_map_t *tmap, 1112 res_map_t *rmap, 1113 nxge_hio_vr_t *vr) 1114 { 1115 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1116 uint64_t slot, hv_rv; 1117 nxge_hio_dc_t *dc; 1118 nxhv_vr_fp_t *fp; 1119 int i; 1120 1121 /* 1122 * Ask the Hypervisor to set up the VR for us 1123 */ 1124 fp = &nhd->hio.vr; 1125 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1126 NXGE_ERROR_MSG((nxge, HIO_CTL, 1127 "nxge_hio_share_assign: " 1128 "vr->assign() returned %d", hv_rv)); 1129 nxge_hio_unshare(vr); 1130 return (-EIO); 1131 } 1132 1133 /* 1134 * For each shared TDC, ask the HV to find us an empty slot. 1135 * ----------------------------------------------------- 1136 */ 1137 dc = vr->tx_group.dc; 1138 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1139 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1140 while (dc) { 1141 hv_rv = (*tx->assign) 1142 (vr->cookie, dc->channel, &slot); 1143 if (hv_rv != 0) { 1144 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1145 "nxge_hio_share_assign: " 1146 "tx->assign(%x, %d) failed: %ld", 1147 vr->cookie, dc->channel, hv_rv)); 1148 return (-EIO); 1149 } 1150 1151 dc->cookie = vr->cookie; 1152 dc->page = (vp_channel_t)slot; 1153 1154 /* Inform the caller about the slot chosen. */ 1155 (*tmap) |= 1 << slot; 1156 1157 dc = dc->next; 1158 } 1159 } 1160 1161 /* 1162 * For each shared RDC, ask the HV to find us an empty slot. 1163 * ----------------------------------------------------- 1164 */ 1165 dc = vr->rx_group.dc; 1166 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1167 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1168 while (dc) { 1169 hv_rv = (*rx->assign) 1170 (vr->cookie, dc->channel, &slot); 1171 if (hv_rv != 0) { 1172 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1173 "nxge_hio_share_assign: " 1174 "rx->assign(%x, %d) failed: %ld", 1175 vr->cookie, dc->channel, hv_rv)); 1176 return (-EIO); 1177 } 1178 1179 dc->cookie = vr->cookie; 1180 dc->page = (vp_channel_t)slot; 1181 1182 /* Inform the caller about the slot chosen. */ 1183 (*rmap) |= 1 << slot; 1184 1185 dc = dc->next; 1186 } 1187 } 1188 1189 return (0); 1190 } 1191 1192 int 1193 nxge_hio_share_unassign( 1194 nxge_hio_vr_t *vr) 1195 { 1196 nxge_t *nxge = (nxge_t *)vr->nxge; 1197 nxge_hio_data_t *nhd; 1198 nxge_hio_dc_t *dc; 1199 nxhv_vr_fp_t *fp; 1200 uint64_t hv_rv; 1201 1202 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1203 1204 dc = vr->tx_group.dc; 1205 while (dc) { 1206 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1207 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1208 if (hv_rv != 0) { 1209 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1210 "nxge_hio_share_unassign: " 1211 "tx->unassign(%x, %d) failed: %ld", 1212 vr->cookie, dc->page, hv_rv)); 1213 } 1214 dc = dc->next; 1215 } 1216 1217 dc = vr->rx_group.dc; 1218 while (dc) { 1219 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1220 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1221 if (hv_rv != 0) { 1222 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1223 "nxge_hio_share_unassign: " 1224 "rx->unassign(%x, %d) failed: %ld", 1225 vr->cookie, dc->page, hv_rv)); 1226 } 1227 dc = dc->next; 1228 } 1229 1230 fp = &nhd->hio.vr; 1231 if (fp->unassign) { 1232 hv_rv = (*fp->unassign)(vr->cookie); 1233 if (hv_rv != 0) { 1234 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1235 "nxge_hio_share_unassign: " 1236 "vr->assign(%x) failed: %ld", 1237 vr->cookie, hv_rv)); 1238 } 1239 } 1240 1241 return (0); 1242 } 1243 1244 int 1245 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1246 mac_share_handle_t *shandle) 1247 { 1248 p_nxge_t nxge = (p_nxge_t)arg; 1249 nxge_rx_ring_group_t *rxgroup; 1250 nxge_share_handle_t *shp; 1251 1252 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1253 uint64_t rmap, tmap; 1254 int rdctbl, rv; 1255 1256 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1257 1258 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1259 1260 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1261 nhd->hio.rx.assign == 0) { 1262 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1263 return (EIO); 1264 } 1265 1266 /* 1267 * Get a VR. 1268 */ 1269 if ((vr = nxge_hio_vr_share(nxge)) == 0) 1270 return (EAGAIN); 1271 1272 /* 1273 * Get an RDC group for us to use. 1274 */ 1275 if ((rdctbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1276 nxge_hio_unshare(vr); 1277 return (EBUSY); 1278 } 1279 vr->rdc_tbl = (uint8_t)rdctbl; 1280 1281 /* 1282 * Add resources to the share. 1283 */ 1284 tmap = 0; 1285 rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, 1286 NXGE_HIO_SHARE_MAX_CHANNELS); 1287 if (rv != 0) { 1288 nxge_hio_unshare(vr); 1289 return (rv); 1290 } 1291 1292 rmap = 0; 1293 rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, 1294 NXGE_HIO_SHARE_MAX_CHANNELS); 1295 if (rv != 0) { 1296 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1297 nxge_hio_unshare(vr); 1298 return (rv); 1299 } 1300 1301 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1302 nxge_hio_remres(vr, MAC_RING_TYPE_RX, tmap); 1303 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1304 nxge_hio_unshare(vr); 1305 return (rv); 1306 } 1307 1308 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1309 rxgroup->gindex = vr->rdc_tbl; 1310 rxgroup->sindex = vr->region; 1311 1312 shp = &nxge->shares[vr->region]; 1313 shp->index = vr->region; 1314 shp->vrp = (void *)vr; 1315 shp->tmap = tmap; 1316 shp->rmap = rmap; 1317 shp->rxgroup = vr->rdc_tbl; 1318 shp->active = B_TRUE; 1319 1320 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1321 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1322 1323 *shandle = (mac_share_handle_t)shp; 1324 1325 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1326 return (0); 1327 } 1328 1329 void 1330 nxge_hio_share_free(mac_share_handle_t shandle) 1331 { 1332 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1333 1334 /* 1335 * First, unassign the VR (take it back), 1336 * so we can enable interrupts again. 1337 */ 1338 (void) nxge_hio_share_unassign(shp->vrp); 1339 1340 /* 1341 * Free Ring Resources for TX and RX 1342 */ 1343 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1344 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1345 1346 /* 1347 * Free VR resource. 1348 */ 1349 nxge_hio_unshare(shp->vrp); 1350 1351 /* 1352 * Clear internal handle state. 1353 */ 1354 shp->index = 0; 1355 shp->vrp = (void *)NULL; 1356 shp->tmap = 0; 1357 shp->rmap = 0; 1358 shp->rxgroup = 0; 1359 shp->active = B_FALSE; 1360 } 1361 1362 void 1363 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1364 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1365 { 1366 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1367 1368 switch (type) { 1369 case MAC_RING_TYPE_RX: 1370 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1371 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1372 *rmap = shp->rmap; 1373 *gnum = shp->rxgroup; 1374 break; 1375 1376 case MAC_RING_TYPE_TX: 1377 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1378 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1379 *rmap = shp->tmap; 1380 *gnum = 0; 1381 break; 1382 } 1383 } 1384 1385 /* 1386 * nxge_hio_vr_share 1387 * 1388 * Find an unused Virtualization Region (VR). 1389 * 1390 * Arguments: 1391 * nxge 1392 * 1393 * Notes: 1394 * 1395 * Context: 1396 * Service domain 1397 */ 1398 nxge_hio_vr_t * 1399 nxge_hio_vr_share( 1400 nxge_t *nxge) 1401 { 1402 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1403 nxge_hio_vr_t *vr; 1404 1405 int first, limit, region; 1406 1407 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1408 1409 MUTEX_ENTER(&nhd->lock); 1410 1411 if (nhd->vrs == 0) { 1412 MUTEX_EXIT(&nhd->lock); 1413 return (0); 1414 } 1415 1416 /* Find an empty virtual region (VR). */ 1417 if (nxge->function_num == 0) { 1418 // FUNC0_VIR0 'belongs' to NIU port 0. 1419 first = FUNC0_VIR1; 1420 limit = FUNC2_VIR0; 1421 } else if (nxge->function_num == 1) { 1422 // FUNC2_VIR0 'belongs' to NIU port 1. 1423 first = FUNC2_VIR1; 1424 limit = FUNC_VIR_MAX; 1425 } else { 1426 cmn_err(CE_WARN, 1427 "Shares not supported on function(%d) at this time.\n", 1428 nxge->function_num); 1429 } 1430 1431 for (region = first; region < limit; region++) { 1432 if (nhd->vr[region].nxge == 0) 1433 break; 1434 } 1435 1436 if (region == limit) { 1437 MUTEX_EXIT(&nhd->lock); 1438 return (0); 1439 } 1440 1441 vr = &nhd->vr[region]; 1442 vr->nxge = (uintptr_t)nxge; 1443 vr->region = (uintptr_t)region; 1444 1445 nhd->vrs--; 1446 1447 MUTEX_EXIT(&nhd->lock); 1448 1449 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1450 1451 return (vr); 1452 } 1453 1454 void 1455 nxge_hio_unshare( 1456 nxge_hio_vr_t *vr) 1457 { 1458 nxge_t *nxge = (nxge_t *)vr->nxge; 1459 nxge_hio_data_t *nhd; 1460 1461 vr_region_t region; 1462 1463 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1464 1465 if (!nxge) { 1466 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: " 1467 "vr->nxge is NULL")); 1468 return; 1469 } 1470 1471 /* 1472 * This function is no longer called, but I will keep it 1473 * here in case we want to revisit this topic in the future. 1474 * 1475 * nxge_hio_hostinfo_uninit(nxge, vr); 1476 */ 1477 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1478 1479 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1480 1481 MUTEX_ENTER(&nhd->lock); 1482 1483 region = vr->region; 1484 (void) memset(vr, 0, sizeof (*vr)); 1485 vr->region = region; 1486 1487 nhd->vrs++; 1488 1489 MUTEX_EXIT(&nhd->lock); 1490 1491 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1492 } 1493 1494 int 1495 nxge_hio_addres( 1496 nxge_hio_vr_t *vr, 1497 mac_ring_type_t type, 1498 int count) 1499 { 1500 nxge_t *nxge = (nxge_t *)vr->nxge; 1501 int i; 1502 1503 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1504 1505 if (!nxge) 1506 return (EINVAL); 1507 1508 for (i = 0; i < count; i++) { 1509 int rv; 1510 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1511 if (i == 0) /* Couldn't get even one DC. */ 1512 return (-rv); 1513 else 1514 break; 1515 } 1516 } 1517 1518 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1519 1520 return (0); 1521 } 1522 1523 /* ARGSUSED */ 1524 void 1525 nxge_hio_remres( 1526 nxge_hio_vr_t *vr, 1527 mac_ring_type_t type, 1528 res_map_t res_map) 1529 { 1530 nxge_t *nxge = (nxge_t *)vr->nxge; 1531 nxge_grp_t *group; 1532 1533 if (!nxge) { 1534 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1535 "vr->nxge is NULL")); 1536 return; 1537 } 1538 1539 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1540 1541 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1542 while (group->dc) { 1543 nxge_hio_dc_t *dc = group->dc; 1544 NXGE_DC_RESET(res_map, dc->page); 1545 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1546 } 1547 1548 if (res_map) { 1549 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1550 "res_map %lx", res_map)); 1551 } 1552 1553 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1554 } 1555 1556 /* 1557 * nxge_hio_tdc_share 1558 * 1559 * Share an unused TDC channel. 1560 * 1561 * Arguments: 1562 * nxge 1563 * 1564 * Notes: 1565 * 1566 * A.7.3 Reconfigure Tx DMA channel 1567 * Disable TxDMA A.9.6.10 1568 * [Rebind TxDMA channel to Port A.9.6.7] 1569 * 1570 * We don't have to Rebind the TDC to the port - it always already bound. 1571 * 1572 * Soft Reset TxDMA A.9.6.2 1573 * 1574 * This procedure will be executed by nxge_init_txdma_channel() in the 1575 * guest domain: 1576 * 1577 * Re-initialize TxDMA A.9.6.8 1578 * Reconfigure TxDMA 1579 * Enable TxDMA A.9.6.9 1580 * 1581 * Context: 1582 * Service domain 1583 */ 1584 int 1585 nxge_hio_tdc_share( 1586 nxge_t *nxge, 1587 int channel) 1588 { 1589 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1590 nxge_grp_set_t *set = &nxge->tx_set; 1591 tx_ring_t *ring; 1592 int count; 1593 1594 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1595 1596 /* 1597 * Wait until this channel is idle. 1598 */ 1599 ring = nxge->tx_rings->rings[channel]; 1600 1601 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 1602 if (ring->tx_ring_busy) { 1603 /* 1604 * Wait for 30 seconds. 1605 */ 1606 for (count = 30 * 1000; count; count--) { 1607 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 1608 break; 1609 } 1610 1611 drv_usecwait(1000); 1612 } 1613 1614 if (count == 0) { 1615 (void) atomic_swap_32(&ring->tx_ring_offline, 1616 NXGE_TX_RING_ONLINE); 1617 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1618 "nxge_hio_tdc_share: " 1619 "Tx ring %d was always BUSY", channel)); 1620 return (-EIO); 1621 } 1622 } else { 1623 (void) atomic_swap_32(&ring->tx_ring_offline, 1624 NXGE_TX_RING_OFFLINED); 1625 } 1626 1627 MUTEX_ENTER(&nhd->lock); 1628 nxge->tdc_is_shared[channel] = B_TRUE; 1629 MUTEX_EXIT(&nhd->lock); 1630 1631 1632 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1633 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: " 1634 "Failed to remove interrupt for TxDMA channel %d", 1635 channel)); 1636 return (NXGE_ERROR); 1637 } 1638 1639 /* Disable TxDMA A.9.6.10 */ 1640 (void) nxge_txdma_channel_disable(nxge, channel); 1641 1642 /* The SD is sharing this channel. */ 1643 NXGE_DC_SET(set->shared.map, channel); 1644 set->shared.count++; 1645 1646 /* Soft Reset TxDMA A.9.6.2 */ 1647 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1648 1649 /* 1650 * Initialize the DC-specific FZC control registers. 1651 * ----------------------------------------------------- 1652 */ 1653 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1654 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1655 "nxge_hio_tdc_share: FZC TDC failed: %d", channel)); 1656 return (-EIO); 1657 } 1658 1659 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1660 1661 return (0); 1662 } 1663 1664 /* 1665 * nxge_hio_rdc_share 1666 * 1667 * Share an unused RDC channel. 1668 * 1669 * Arguments: 1670 * nxge 1671 * 1672 * Notes: 1673 * 1674 * This is the latest version of the procedure to 1675 * Reconfigure an Rx DMA channel: 1676 * 1677 * A.6.3 Reconfigure Rx DMA channel 1678 * Stop RxMAC A.9.2.6 1679 * Drain IPP Port A.9.3.6 1680 * Stop and reset RxDMA A.9.5.3 1681 * 1682 * This procedure will be executed by nxge_init_rxdma_channel() in the 1683 * guest domain: 1684 * 1685 * Initialize RxDMA A.9.5.4 1686 * Reconfigure RxDMA 1687 * Enable RxDMA A.9.5.5 1688 * 1689 * We will do this here, since the RDC is a canalis non grata: 1690 * Enable RxMAC A.9.2.10 1691 * 1692 * Context: 1693 * Service domain 1694 */ 1695 int 1696 nxge_hio_rdc_share( 1697 nxge_t *nxge, 1698 nxge_hio_vr_t *vr, 1699 int channel) 1700 { 1701 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1702 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1703 nxge_grp_set_t *set = &nxge->rx_set; 1704 nxge_rdc_grp_t *rdc_grp; 1705 1706 int current, last; 1707 1708 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1709 1710 /* Disable interrupts. */ 1711 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1712 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1713 "Failed to remove interrupt for RxDMA channel %d", 1714 channel)); 1715 return (NXGE_ERROR); 1716 } 1717 1718 /* Stop RxMAC = A.9.2.6 */ 1719 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1720 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1721 "Failed to disable RxMAC")); 1722 } 1723 1724 /* Drain IPP Port = A.9.3.6 */ 1725 (void) nxge_ipp_drain(nxge); 1726 1727 /* Stop and reset RxDMA = A.9.5.3 */ 1728 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1729 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1730 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1731 "Failed to disable RxDMA channel %d", channel)); 1732 } 1733 1734 /* The SD is sharing this channel. */ 1735 NXGE_DC_SET(set->shared.map, channel); 1736 set->shared.count++; 1737 1738 // Assert RST: RXDMA_CFIG1[30] = 1 1739 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1740 1741 /* 1742 * We have to reconfigure the RDC table(s) 1743 * to which this channel belongs. 1744 */ 1745 current = hardware->def_mac_rxdma_grpid; 1746 last = current + hardware->max_rdc_grpids; 1747 for (; current < last; current++) { 1748 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1749 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1750 rdc_grp->map = set->owned.map; 1751 rdc_grp->max_rdcs--; 1752 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1753 } 1754 } 1755 1756 /* 1757 * The guest domain will reconfigure the RDC later. 1758 * 1759 * But in the meantime, we must re-enable the Rx MAC so 1760 * that we can start receiving packets again on the 1761 * remaining RDCs: 1762 * 1763 * Enable RxMAC = A.9.2.10 1764 */ 1765 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1766 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1767 "nxge_hio_rdc_share: Rx MAC still disabled")); 1768 } 1769 1770 /* 1771 * Initialize the DC-specific FZC control registers. 1772 * ----------------------------------------------------- 1773 */ 1774 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1775 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1776 "nxge_hio_rdc_share: RZC RDC failed: %ld", channel)); 1777 return (-EIO); 1778 } 1779 1780 /* 1781 * We have to initialize the guest's RDC table, too. 1782 * ----------------------------------------------------- 1783 */ 1784 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1785 if (rdc_grp->max_rdcs == 0) { 1786 rdc_grp->start_rdc = (uint8_t)channel; 1787 rdc_grp->def_rdc = (uint8_t)channel; 1788 rdc_grp->max_rdcs = 1; 1789 } else { 1790 rdc_grp->max_rdcs++; 1791 } 1792 NXGE_DC_SET(rdc_grp->map, channel); 1793 1794 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1795 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1796 "nxge_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1797 return (-EIO); 1798 } 1799 1800 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1801 1802 return (0); 1803 } 1804 1805 /* 1806 * nxge_hio_dc_share 1807 * 1808 * Share a DMA channel with a guest domain. 1809 * 1810 * Arguments: 1811 * nxge 1812 * vr The VR that <channel> will belong to. 1813 * type Tx or Rx. 1814 * res_map The resource map used by the caller, which we will 1815 * update if successful. 1816 * 1817 * Notes: 1818 * 1819 * Context: 1820 * Service domain 1821 */ 1822 int 1823 nxge_hio_dc_share( 1824 nxge_t *nxge, 1825 nxge_hio_vr_t *vr, 1826 mac_ring_type_t type) 1827 { 1828 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1829 nxge_hw_pt_cfg_t *hardware; 1830 nxge_hio_dc_t *dc; 1831 int channel, limit; 1832 1833 nxge_grp_set_t *set; 1834 nxge_grp_t *group; 1835 1836 int slot; 1837 1838 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1839 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1840 1841 /* 1842 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1843 * Not only that, but the HV has statically assigned the 1844 * channels like so: 1845 * VR0: RDC0 & RDC1 1846 * VR1: RDC2 & RDC3, etc. 1847 * The TDCs are assigned in exactly the same way. 1848 * 1849 * So, for example 1850 * hardware->start_rdc + vr->region * 2; 1851 * VR1: hardware->start_rdc + 1 * 2; 1852 * VR3: hardware->start_rdc + 3 * 2; 1853 * If start_rdc is 0, we end up with 2 or 6. 1854 * If start_rdc is 8, we end up with 10 or 14. 1855 */ 1856 1857 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1858 hardware = &nxge->pt_config.hw_config; 1859 1860 // This code is still NIU-specific (assuming only 2 ports) 1861 channel = hardware->start_rdc + (vr->region % 4) * 2; 1862 limit = channel + 2; 1863 1864 MUTEX_ENTER(&nhd->lock); 1865 for (; channel < limit; channel++) { 1866 if ((1 << channel) & set->owned.map) { 1867 break; 1868 } 1869 } 1870 1871 if (channel == limit) { 1872 MUTEX_EXIT(&nhd->lock); 1873 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1874 "nxge_hio_dc_share: there are no channels to share")); 1875 return (-EIO); 1876 } 1877 1878 MUTEX_EXIT(&nhd->lock); 1879 1880 /* -------------------------------------------------- */ 1881 slot = (type == MAC_RING_TYPE_TX) ? 1882 nxge_hio_tdc_share(nxge, channel) : 1883 nxge_hio_rdc_share(nxge, vr, channel); 1884 1885 if (slot < 0) { 1886 if (type == MAC_RING_TYPE_RX) { 1887 nxge_hio_rdc_unshare(nxge, channel); 1888 } else { 1889 nxge_hio_tdc_unshare(nxge, channel); 1890 } 1891 return (slot); 1892 } 1893 1894 MUTEX_ENTER(&nhd->lock); 1895 1896 /* 1897 * Tag this channel. 1898 * -------------------------------------------------- 1899 */ 1900 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1901 1902 dc->vr = vr; 1903 dc->channel = (nxge_channel_t)channel; 1904 1905 MUTEX_EXIT(&nhd->lock); 1906 1907 /* 1908 * vr->[t|r]x_group is used by the service domain to 1909 * keep track of its shared DMA channels. 1910 */ 1911 MUTEX_ENTER(&nxge->group_lock); 1912 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1913 1914 dc->group = group; 1915 1916 /* Initialize <group>, if necessary */ 1917 if (group->count == 0) { 1918 group->nxge = nxge; 1919 group->type = (type == MAC_RING_TYPE_TX) ? 1920 VP_BOUND_TX : VP_BOUND_RX; 1921 group->sequence = nhd->sequence++; 1922 group->active = B_TRUE; 1923 } 1924 1925 MUTEX_EXIT(&nxge->group_lock); 1926 1927 NXGE_ERROR_MSG((nxge, HIO_CTL, 1928 "DC share: %cDC %d was assigned to slot %d", 1929 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1930 1931 nxge_grp_dc_append(nxge, group, dc); 1932 1933 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1934 1935 return (0); 1936 } 1937 1938 /* 1939 * nxge_hio_tdc_unshare 1940 * 1941 * Unshare a TDC. 1942 * 1943 * Arguments: 1944 * nxge 1945 * channel The channel to unshare (add again). 1946 * 1947 * Notes: 1948 * 1949 * Context: 1950 * Service domain 1951 */ 1952 void 1953 nxge_hio_tdc_unshare( 1954 nxge_t *nxge, 1955 int channel) 1956 { 1957 nxge_grp_set_t *set = &nxge->tx_set; 1958 nxge_grp_t *group = set->group[0]; 1959 1960 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1961 1962 NXGE_DC_RESET(set->shared.map, channel); 1963 set->shared.count--; 1964 1965 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 1966 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1967 "Failed to initialize TxDMA channel %d", channel)); 1968 return; 1969 } 1970 1971 /* Re-add this interrupt. */ 1972 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1973 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1974 "Failed to add interrupt for TxDMA channel %d", channel)); 1975 } 1976 1977 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1978 } 1979 1980 /* 1981 * nxge_hio_rdc_unshare 1982 * 1983 * Unshare an RDC: add it to the SD's RDC groups (tables). 1984 * 1985 * Arguments: 1986 * nxge 1987 * channel The channel to unshare (add again). 1988 * 1989 * Notes: 1990 * 1991 * Context: 1992 * Service domain 1993 */ 1994 void 1995 nxge_hio_rdc_unshare( 1996 nxge_t *nxge, 1997 int channel) 1998 { 1999 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 2000 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 2001 2002 nxge_grp_set_t *set = &nxge->rx_set; 2003 nxge_grp_t *group = set->group[0]; 2004 int current, last; 2005 2006 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 2007 2008 /* Stop RxMAC = A.9.2.6 */ 2009 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 2010 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2011 "Failed to disable RxMAC")); 2012 } 2013 2014 /* Drain IPP Port = A.9.3.6 */ 2015 (void) nxge_ipp_drain(nxge); 2016 2017 /* Stop and reset RxDMA = A.9.5.3 */ 2018 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 2019 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 2020 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2021 "Failed to disable RxDMA channel %d", channel)); 2022 } 2023 2024 NXGE_DC_RESET(set->shared.map, channel); 2025 set->shared.count--; 2026 2027 /* 2028 * Assert RST: RXDMA_CFIG1[30] = 1 2029 * 2030 * Initialize RxDMA A.9.5.4 2031 * Reconfigure RxDMA 2032 * Enable RxDMA A.9.5.5 2033 */ 2034 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 2035 /* Be sure to re-enable the RX MAC. */ 2036 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2037 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2038 "nxge_hio_rdc_unshare: Rx MAC still disabled")); 2039 } 2040 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2041 "Failed to initialize RxDMA channel %d", channel)); 2042 return; 2043 } 2044 2045 /* 2046 * We have to reconfigure the RDC table(s) 2047 * to which this channel once again belongs. 2048 */ 2049 current = hardware->def_mac_rxdma_grpid; 2050 last = current + hardware->max_rdc_grpids; 2051 for (; current < last; current++) { 2052 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 2053 nxge_rdc_grp_t *group; 2054 group = &nxge->pt_config.rdc_grps[current]; 2055 group->map = set->owned.map; 2056 group->max_rdcs++; 2057 (void) nxge_init_fzc_rdc_tbl(nxge, current); 2058 } 2059 } 2060 2061 /* 2062 * Enable RxMAC = A.9.2.10 2063 */ 2064 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2065 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2066 "nxge_hio_rdc_unshare: Rx MAC still disabled")); 2067 return; 2068 } 2069 2070 /* Re-add this interrupt. */ 2071 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2072 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2073 "nxge_hio_rdc_unshare: Failed to add interrupt for " 2074 "RxDMA CHANNEL %d", channel)); 2075 } 2076 2077 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2078 } 2079 2080 /* 2081 * nxge_hio_dc_unshare 2082 * 2083 * Unshare (reuse) a DMA channel. 2084 * 2085 * Arguments: 2086 * nxge 2087 * vr The VR that <channel> belongs to. 2088 * type Tx or Rx. 2089 * channel The DMA channel to reuse. 2090 * 2091 * Notes: 2092 * 2093 * Context: 2094 * Service domain 2095 */ 2096 void 2097 nxge_hio_dc_unshare( 2098 nxge_t *nxge, 2099 nxge_hio_vr_t *vr, 2100 mac_ring_type_t type, 2101 int channel) 2102 { 2103 nxge_grp_t *group; 2104 nxge_hio_dc_t *dc; 2105 2106 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2107 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2108 2109 /* Unlink the channel from its group. */ 2110 /* -------------------------------------------------- */ 2111 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2112 NXGE_DC_RESET(group->map, channel); 2113 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2114 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2115 "nxge_hio_dc_unshare(%d) failed", channel)); 2116 return; 2117 } 2118 2119 dc->vr = 0; 2120 dc->cookie = 0; 2121 2122 if (type == MAC_RING_TYPE_RX) { 2123 nxge_hio_rdc_unshare(nxge, channel); 2124 } else { 2125 nxge_hio_tdc_unshare(nxge, channel); 2126 } 2127 2128 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2129 } 2130 2131 #endif /* if defined(sun4v) */ 2132