1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * nxge_hio.c 29 * 30 * This file manages the virtualization resources for Neptune 31 * devices. That is, it implements a hybrid I/O (HIO) approach in the 32 * Solaris kernel, whereby a guest domain on an LDOMs server may 33 * request & use hardware resources from the service domain. 34 * 35 */ 36 37 #include <sys/nxge/nxge_impl.h> 38 #include <sys/nxge/nxge_fzc.h> 39 #include <sys/nxge/nxge_rxdma.h> 40 #include <sys/nxge/nxge_txdma.h> 41 #include <sys/nxge/nxge_hio.h> 42 43 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 44 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 45 46 /* 47 * External prototypes 48 */ 49 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 50 51 /* The following function may be found in nxge_main.c */ 52 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 53 54 /* The following function may be found in nxge_[t|r]xdma.c */ 55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 57 58 /* 59 * Local prototypes 60 */ 61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 63 static void nxge_grp_dc_map(nxge_grp_t *group); 64 65 /* 66 * These functions are used by both service & guest domains to 67 * decide whether they're running in an LDOMs/XEN environment 68 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 69 */ 70 71 /* 72 * nxge_get_environs 73 * 74 * Figure out if we are in a guest domain or not. 75 * 76 * Arguments: 77 * nxge 78 * 79 * Notes: 80 * 81 * Context: 82 * Any domain 83 */ 84 void 85 nxge_get_environs( 86 nxge_t *nxge) 87 { 88 char *string; 89 90 /* 91 * In the beginning, assume that we are running sans LDOMs/XEN. 92 */ 93 nxge->environs = SOLARIS_DOMAIN; 94 95 /* 96 * Are we a hybrid I/O (HIO) guest domain driver? 97 */ 98 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 99 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 100 "niutype", &string)) == DDI_PROP_SUCCESS) { 101 if (strcmp(string, "n2niu") == 0) { 102 nxge->environs = SOLARIS_GUEST_DOMAIN; 103 /* So we can allocate properly-aligned memory. */ 104 nxge->niu_type = N2_NIU; 105 NXGE_DEBUG_MSG((nxge, HIO_CTL, 106 "Hybrid IO-capable guest domain")); 107 } 108 ddi_prop_free(string); 109 } 110 } 111 112 #if !defined(sun4v) 113 114 /* 115 * nxge_hio_init 116 * 117 * Initialize the HIO module of the NXGE driver. 118 * 119 * Arguments: 120 * nxge 121 * 122 * Notes: 123 * This is the non-hybrid I/O version of this function. 124 * 125 * Context: 126 * Any domain 127 */ 128 int 129 nxge_hio_init(nxge_t *nxge) 130 { 131 nxge_hio_data_t *nhd; 132 133 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 134 if (nhd == 0) { 135 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 136 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 137 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 138 } 139 140 nhd->hio.ldoms = B_FALSE; 141 142 return (NXGE_OK); 143 } 144 145 #endif 146 147 void 148 nxge_hio_uninit(nxge_t *nxge) 149 { 150 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 151 152 ASSERT(nxge->nxge_hw_p->ndevs == 0); 153 154 if (nhd != NULL) { 155 MUTEX_DESTROY(&nhd->lock); 156 KMEM_FREE(nhd, sizeof (*nhd)); 157 nxge->nxge_hw_p->hio = 0; 158 } 159 } 160 161 /* 162 * nxge_dci_map 163 * 164 * Map a DMA channel index to a channel number. 165 * 166 * Arguments: 167 * instance The instance number of the driver. 168 * type The type of channel this is: Tx or Rx. 169 * index The index to convert to a channel number 170 * 171 * Notes: 172 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 173 * 174 * Context: 175 * Any domain 176 */ 177 int 178 nxge_dci_map( 179 nxge_t *nxge, 180 vpc_type_t type, 181 int index) 182 { 183 nxge_grp_set_t *set; 184 int dc; 185 186 switch (type) { 187 case VP_BOUND_TX: 188 set = &nxge->tx_set; 189 break; 190 case VP_BOUND_RX: 191 set = &nxge->rx_set; 192 break; 193 } 194 195 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 196 if ((1 << dc) & set->owned.map) { 197 if (index == 0) 198 return (dc); 199 else 200 index--; 201 } 202 } 203 204 return (-1); 205 } 206 207 /* 208 * --------------------------------------------------------------------- 209 * These are the general-purpose DMA channel group functions. That is, 210 * these functions are used to manage groups of TDCs or RDCs in an HIO 211 * environment. 212 * 213 * But is also expected that in the future they will be able to manage 214 * Crossbow groups. 215 * --------------------------------------------------------------------- 216 */ 217 218 /* 219 * nxge_grp_cleanup(p_nxge_t nxge) 220 * 221 * Remove all outstanding groups. 222 * 223 * Arguments: 224 * nxge 225 */ 226 void 227 nxge_grp_cleanup(p_nxge_t nxge) 228 { 229 nxge_grp_set_t *set; 230 int i; 231 232 MUTEX_ENTER(&nxge->group_lock); 233 234 /* 235 * Find RX groups that need to be cleaned up. 236 */ 237 set = &nxge->rx_set; 238 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 239 if (set->group[i] != NULL) { 240 KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 241 set->group[i] = NULL; 242 } 243 } 244 245 /* 246 * Find TX groups that need to be cleaned up. 247 */ 248 set = &nxge->tx_set; 249 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 250 if (set->group[i] != NULL) { 251 KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 252 set->group[i] = NULL; 253 } 254 } 255 MUTEX_EXIT(&nxge->group_lock); 256 } 257 258 259 /* 260 * nxge_grp_add 261 * 262 * Add a group to an instance of NXGE. 263 * 264 * Arguments: 265 * nxge 266 * type Tx or Rx 267 * 268 * Notes: 269 * 270 * Context: 271 * Any domain 272 */ 273 nxge_grp_t * 274 nxge_grp_add( 275 nxge_t *nxge, 276 nxge_grp_type_t type) 277 { 278 nxge_grp_set_t *set; 279 nxge_grp_t *group; 280 int i; 281 282 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 283 group->nxge = nxge; 284 285 MUTEX_ENTER(&nxge->group_lock); 286 switch (type) { 287 case NXGE_TRANSMIT_GROUP: 288 case EXT_TRANSMIT_GROUP: 289 set = &nxge->tx_set; 290 break; 291 default: 292 set = &nxge->rx_set; 293 break; 294 } 295 296 group->type = type; 297 group->active = B_TRUE; 298 group->sequence = set->sequence++; 299 300 /* Find an empty slot for this logical group. */ 301 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 302 if (set->group[i] == 0) { 303 group->index = i; 304 set->group[i] = group; 305 NXGE_DC_SET(set->lg.map, i); 306 set->lg.count++; 307 break; 308 } 309 } 310 MUTEX_EXIT(&nxge->group_lock); 311 312 NXGE_DEBUG_MSG((nxge, HIO_CTL, 313 "nxge_grp_add: %cgroup = %d.%d", 314 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 315 nxge->mac.portnum, group->sequence)); 316 317 return (group); 318 } 319 320 void 321 nxge_grp_remove( 322 nxge_t *nxge, 323 nxge_grp_t *group) /* The group to remove. */ 324 { 325 nxge_grp_set_t *set; 326 vpc_type_t type; 327 328 MUTEX_ENTER(&nxge->group_lock); 329 switch (group->type) { 330 case NXGE_TRANSMIT_GROUP: 331 case EXT_TRANSMIT_GROUP: 332 set = &nxge->tx_set; 333 break; 334 default: 335 set = &nxge->rx_set; 336 break; 337 } 338 339 if (set->group[group->index] != group) { 340 MUTEX_EXIT(&nxge->group_lock); 341 return; 342 } 343 344 set->group[group->index] = 0; 345 NXGE_DC_RESET(set->lg.map, group->index); 346 set->lg.count--; 347 348 /* While inside the mutex, deactivate <group>. */ 349 group->active = B_FALSE; 350 351 MUTEX_EXIT(&nxge->group_lock); 352 353 NXGE_DEBUG_MSG((nxge, HIO_CTL, 354 "nxge_grp_remove(%c.%d.%d) called", 355 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 356 nxge->mac.portnum, group->sequence)); 357 358 /* Now, remove any DCs which are still active. */ 359 switch (group->type) { 360 default: 361 type = VP_BOUND_TX; 362 break; 363 case NXGE_RECEIVE_GROUP: 364 case EXT_RECEIVE_GROUP: 365 type = VP_BOUND_RX; 366 } 367 368 while (group->dc) { 369 nxge_grp_dc_remove(nxge, type, group->dc->channel); 370 } 371 372 KMEM_FREE(group, sizeof (*group)); 373 } 374 375 /* 376 * nx_hio_dc_add 377 * 378 * Add a DMA channel to a VR/Group. 379 * 380 * Arguments: 381 * nxge 382 * channel The channel to add. 383 * Notes: 384 * 385 * Context: 386 * Any domain 387 */ 388 /* ARGSUSED */ 389 int 390 nxge_grp_dc_add( 391 nxge_t *nxge, 392 nxge_grp_t *group, /* The group to add <channel> to. */ 393 vpc_type_t type, /* Rx or Tx */ 394 int channel) /* A physical/logical channel number */ 395 { 396 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 397 nxge_hio_dc_t *dc; 398 nxge_grp_set_t *set; 399 nxge_status_t status = NXGE_OK; 400 401 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 402 403 if (group == NULL) 404 return (0); 405 406 switch (type) { 407 default: 408 set = &nxge->tx_set; 409 if (channel > NXGE_MAX_TDCS) { 410 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 411 "nxge_grp_dc_add: TDC = %d", channel)); 412 return (NXGE_ERROR); 413 } 414 break; 415 case VP_BOUND_RX: 416 set = &nxge->rx_set; 417 if (channel > NXGE_MAX_RDCS) { 418 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 419 "nxge_grp_dc_add: RDC = %d", channel)); 420 return (NXGE_ERROR); 421 } 422 break; 423 } 424 425 NXGE_DEBUG_MSG((nxge, HIO_CTL, 426 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 427 type == VP_BOUND_TX ? 't' : 'r', 428 nxge->mac.portnum, group->sequence, group->count, channel)); 429 430 MUTEX_ENTER(&nxge->group_lock); 431 if (group->active != B_TRUE) { 432 /* We may be in the process of removing this group. */ 433 MUTEX_EXIT(&nxge->group_lock); 434 return (NXGE_ERROR); 435 } 436 MUTEX_EXIT(&nxge->group_lock); 437 438 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 439 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 440 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 441 return (NXGE_ERROR); 442 } 443 444 MUTEX_ENTER(&nhd->lock); 445 446 if (dc->group) { 447 MUTEX_EXIT(&nhd->lock); 448 /* This channel is already in use! */ 449 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 450 "nxge_grp_dc_add(%d): channel already in group", channel)); 451 return (NXGE_ERROR); 452 } 453 454 dc->next = 0; 455 dc->page = channel; 456 dc->channel = (nxge_channel_t)channel; 457 458 dc->type = type; 459 if (type == VP_BOUND_RX) { 460 dc->init = nxge_init_rxdma_channel; 461 dc->uninit = nxge_uninit_rxdma_channel; 462 } else { 463 dc->init = nxge_init_txdma_channel; 464 dc->uninit = nxge_uninit_txdma_channel; 465 } 466 467 dc->group = group; 468 469 if (isLDOMguest(nxge)) 470 (void) nxge_hio_ldsv_add(nxge, dc); 471 472 NXGE_DC_SET(set->owned.map, channel); 473 set->owned.count++; 474 475 MUTEX_EXIT(&nhd->lock); 476 477 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 478 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 479 "nxge_grp_dc_add(%d): channel init failed", channel)); 480 return (NXGE_ERROR); 481 } 482 483 nxge_grp_dc_append(nxge, group, dc); 484 485 if (type == VP_BOUND_TX) { 486 MUTEX_ENTER(&nhd->lock); 487 nxge->tdc_is_shared[channel] = B_FALSE; 488 MUTEX_EXIT(&nhd->lock); 489 } 490 491 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 492 493 return ((int)status); 494 } 495 496 void 497 nxge_grp_dc_remove( 498 nxge_t *nxge, 499 vpc_type_t type, 500 int channel) 501 { 502 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 503 nxge_hio_dc_t *dc; 504 nxge_grp_set_t *set; 505 nxge_grp_t *group; 506 507 dc_uninit_t uninit; 508 509 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 510 511 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 512 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 513 "nx_hio_dc_remove: find(%d) failed", channel)); 514 return; 515 } 516 group = (nxge_grp_t *)dc->group; 517 518 if (isLDOMguest(nxge)) { 519 (void) nxge_hio_intr_remove(nxge, type, channel); 520 } 521 522 NXGE_DEBUG_MSG((nxge, HIO_CTL, 523 "DC remove: group = %d.%d.%d, %cdc %d", 524 nxge->mac.portnum, group->sequence, group->count, 525 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 526 527 MUTEX_ENTER(&nhd->lock); 528 529 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 530 if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 531 NXGE_DC_RESET(group->map, channel); 532 } 533 534 /* Remove the DC from its group. */ 535 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 536 MUTEX_EXIT(&nhd->lock); 537 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 538 "nx_hio_dc_remove(%d) failed", channel)); 539 return; 540 } 541 542 uninit = dc->uninit; 543 channel = dc->channel; 544 545 NXGE_DC_RESET(set->owned.map, channel); 546 set->owned.count--; 547 548 (void) memset(dc, 0, sizeof (*dc)); 549 550 MUTEX_EXIT(&nhd->lock); 551 552 (*uninit)(nxge, channel); 553 554 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 555 } 556 557 nxge_hio_dc_t * 558 nxge_grp_dc_find( 559 nxge_t *nxge, 560 vpc_type_t type, /* Rx or Tx */ 561 int channel) 562 { 563 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 564 nxge_hio_dc_t *current; 565 566 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 567 568 if (!isLDOMguest(nxge)) { 569 return (¤t[channel]); 570 } else { 571 /* We're in a guest domain. */ 572 int i, limit = (type == VP_BOUND_TX) ? 573 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 574 575 MUTEX_ENTER(&nhd->lock); 576 for (i = 0; i < limit; i++, current++) { 577 if (current->channel == channel) { 578 if (current->vr && current->vr->nxge == 579 (uintptr_t)nxge) { 580 MUTEX_EXIT(&nhd->lock); 581 return (current); 582 } 583 } 584 } 585 MUTEX_EXIT(&nhd->lock); 586 } 587 588 return (0); 589 } 590 591 /* 592 * nxge_grp_dc_append 593 * 594 * Append a DMA channel to a group. 595 * 596 * Arguments: 597 * nxge 598 * group The group to append to 599 * dc The DMA channel to append 600 * 601 * Notes: 602 * 603 * Context: 604 * Any domain 605 */ 606 static 607 void 608 nxge_grp_dc_append( 609 nxge_t *nxge, 610 nxge_grp_t *group, 611 nxge_hio_dc_t *dc) 612 { 613 MUTEX_ENTER(&nxge->group_lock); 614 615 if (group->dc == 0) { 616 group->dc = dc; 617 } else { 618 nxge_hio_dc_t *current = group->dc; 619 do { 620 if (current->next == 0) { 621 current->next = dc; 622 break; 623 } 624 current = current->next; 625 } while (current); 626 } 627 628 NXGE_DC_SET(group->map, dc->channel); 629 630 nxge_grp_dc_map(group); 631 group->count++; 632 633 MUTEX_EXIT(&nxge->group_lock); 634 } 635 636 /* 637 * nxge_grp_dc_unlink 638 * 639 * Unlink a DMA channel fromits linked list (group). 640 * 641 * Arguments: 642 * nxge 643 * group The group (linked list) to unlink from 644 * dc The DMA channel to append 645 * 646 * Notes: 647 * 648 * Context: 649 * Any domain 650 */ 651 nxge_hio_dc_t * 652 nxge_grp_dc_unlink(nxge_t *nxge, nxge_grp_t *group, int channel) 653 { 654 nxge_hio_dc_t *current, *previous; 655 656 MUTEX_ENTER(&nxge->group_lock); 657 658 if (group == NULL) { 659 MUTEX_EXIT(&nxge->group_lock); 660 return (0); 661 } 662 663 if ((current = group->dc) == 0) { 664 MUTEX_EXIT(&nxge->group_lock); 665 return (0); 666 } 667 668 previous = 0; 669 do { 670 if (current->channel == channel) { 671 if (previous) 672 previous->next = current->next; 673 else 674 group->dc = current->next; 675 break; 676 } 677 previous = current; 678 current = current->next; 679 } while (current); 680 681 if (current == 0) { 682 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 683 "DC unlink: DC %d not found", channel)); 684 } else { 685 current->next = 0; 686 current->group = 0; 687 688 group->count--; 689 } 690 691 nxge_grp_dc_map(group); 692 693 MUTEX_EXIT(&nxge->group_lock); 694 695 return (current); 696 } 697 698 /* 699 * nxge_grp_dc_map 700 * 701 * Map a linked list to an array of channel numbers. 702 * 703 * Arguments: 704 * nxge 705 * group The group to remap. 706 * 707 * Notes: 708 * It is expected that the caller will hold the correct mutex. 709 * 710 * Context: 711 * Service domain 712 */ 713 void 714 nxge_grp_dc_map( 715 nxge_grp_t *group) 716 { 717 nxge_channel_t *legend; 718 nxge_hio_dc_t *dc; 719 720 (void) memset(group->legend, 0, sizeof (group->legend)); 721 722 legend = group->legend; 723 dc = group->dc; 724 while (dc) { 725 *legend = dc->channel; 726 legend++; 727 dc = dc->next; 728 } 729 } 730 731 /* 732 * --------------------------------------------------------------------- 733 * These are HIO debugging functions. 734 * --------------------------------------------------------------------- 735 */ 736 737 /* 738 * nxge_delay 739 * 740 * Delay <seconds> number of seconds. 741 * 742 * Arguments: 743 * nxge 744 * group The group to append to 745 * dc The DMA channel to append 746 * 747 * Notes: 748 * This is a developer-only function. 749 * 750 * Context: 751 * Any domain 752 */ 753 void 754 nxge_delay( 755 int seconds) 756 { 757 delay(drv_usectohz(seconds * 1000000)); 758 } 759 760 static dmc_reg_name_t rx_names[] = { 761 { "RXDMA_CFIG1", 0 }, 762 { "RXDMA_CFIG2", 8 }, 763 { "RBR_CFIG_A", 0x10 }, 764 { "RBR_CFIG_B", 0x18 }, 765 { "RBR_KICK", 0x20 }, 766 { "RBR_STAT", 0x28 }, 767 { "RBR_HDH", 0x30 }, 768 { "RBR_HDL", 0x38 }, 769 { "RCRCFIG_A", 0x40 }, 770 { "RCRCFIG_B", 0x48 }, 771 { "RCRSTAT_A", 0x50 }, 772 { "RCRSTAT_B", 0x58 }, 773 { "RCRSTAT_C", 0x60 }, 774 { "RX_DMA_ENT_MSK", 0x68 }, 775 { "RX_DMA_CTL_STAT", 0x70 }, 776 { "RCR_FLSH", 0x78 }, 777 { "RXMISC", 0x90 }, 778 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 779 { 0, -1 } 780 }; 781 782 static dmc_reg_name_t tx_names[] = { 783 { "Tx_RNG_CFIG", 0 }, 784 { "Tx_RNG_HDL", 0x10 }, 785 { "Tx_RNG_KICK", 0x18 }, 786 { "Tx_ENT_MASK", 0x20 }, 787 { "Tx_CS", 0x28 }, 788 { "TxDMA_MBH", 0x30 }, 789 { "TxDMA_MBL", 0x38 }, 790 { "TxDMA_PRE_ST", 0x40 }, 791 { "Tx_RNG_ERR_LOGH", 0x48 }, 792 { "Tx_RNG_ERR_LOGL", 0x50 }, 793 { "TDMC_INTR_DBG", 0x60 }, 794 { "Tx_CS_DBG", 0x68 }, 795 { 0, -1 } 796 }; 797 798 /* 799 * nxge_xx2str 800 * 801 * Translate a register address into a string. 802 * 803 * Arguments: 804 * offset The address of the register to translate. 805 * 806 * Notes: 807 * These are developer-only function. 808 * 809 * Context: 810 * Any domain 811 */ 812 const char * 813 nxge_rx2str( 814 int offset) 815 { 816 dmc_reg_name_t *reg = &rx_names[0]; 817 818 offset &= DMA_CSR_MASK; 819 820 while (reg->name) { 821 if (offset == reg->offset) 822 return (reg->name); 823 reg++; 824 } 825 826 return (0); 827 } 828 829 const char * 830 nxge_tx2str( 831 int offset) 832 { 833 dmc_reg_name_t *reg = &tx_names[0]; 834 835 offset &= DMA_CSR_MASK; 836 837 while (reg->name) { 838 if (offset == reg->offset) 839 return (reg->name); 840 reg++; 841 } 842 843 return (0); 844 } 845 846 /* 847 * nxge_ddi_perror 848 * 849 * Map a DDI error number to a string. 850 * 851 * Arguments: 852 * ddi_error The DDI error number to map. 853 * 854 * Notes: 855 * 856 * Context: 857 * Any domain 858 */ 859 const char * 860 nxge_ddi_perror( 861 int ddi_error) 862 { 863 switch (ddi_error) { 864 case DDI_SUCCESS: 865 return ("DDI_SUCCESS"); 866 case DDI_FAILURE: 867 return ("DDI_FAILURE"); 868 case DDI_NOT_WELL_FORMED: 869 return ("DDI_NOT_WELL_FORMED"); 870 case DDI_EAGAIN: 871 return ("DDI_EAGAIN"); 872 case DDI_EINVAL: 873 return ("DDI_EINVAL"); 874 case DDI_ENOTSUP: 875 return ("DDI_ENOTSUP"); 876 case DDI_EPENDING: 877 return ("DDI_EPENDING"); 878 case DDI_ENOMEM: 879 return ("DDI_ENOMEM"); 880 case DDI_EBUSY: 881 return ("DDI_EBUSY"); 882 case DDI_ETRANSPORT: 883 return ("DDI_ETRANSPORT"); 884 case DDI_ECONTEXT: 885 return ("DDI_ECONTEXT"); 886 default: 887 return ("Unknown error"); 888 } 889 } 890 891 /* 892 * --------------------------------------------------------------------- 893 * These are Sun4v HIO function definitions 894 * --------------------------------------------------------------------- 895 */ 896 897 #if defined(sun4v) 898 899 /* 900 * Local prototypes 901 */ 902 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 903 904 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 905 static void nxge_hio_unshare(nxge_hio_vr_t *); 906 907 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, int); 908 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 909 910 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 911 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 912 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 913 mac_ring_type_t, int); 914 915 /* 916 * nxge_hio_init 917 * 918 * Initialize the HIO module of the NXGE driver. 919 * 920 * Arguments: 921 * nxge 922 * 923 * Notes: 924 * 925 * Context: 926 * Any domain 927 */ 928 int 929 nxge_hio_init( 930 nxge_t *nxge) 931 { 932 nxge_hio_data_t *nhd; 933 int i, region; 934 935 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 936 if (nhd == 0) { 937 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 938 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 939 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 940 } 941 942 if ((nxge->environs == SOLARIS_DOMAIN) && 943 (nxge->niu_type == N2_NIU)) { 944 if (nxge->niu_hsvc_available == B_TRUE) { 945 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 946 if (niu_hsvc->hsvc_major == 1 && 947 niu_hsvc->hsvc_minor == 1) 948 nxge->environs = SOLARIS_SERVICE_DOMAIN; 949 NXGE_DEBUG_MSG((nxge, HIO_CTL, 950 "nxge_hio_init: hypervisor services " 951 "version %d.%d", 952 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 953 } 954 } 955 956 if (!isLDOMs(nxge)) { 957 nhd->hio.ldoms = B_FALSE; 958 return (NXGE_OK); 959 } 960 961 nhd->hio.ldoms = B_TRUE; 962 963 /* 964 * Fill in what we can. 965 */ 966 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 967 nhd->vr[region].region = region; 968 } 969 nhd->vrs = NXGE_VR_SR_MAX - 2; 970 971 /* 972 * Initialize tdc share state, shares and ring group structures. 973 */ 974 for (i = 0; i < NXGE_MAX_TDCS; i++) 975 nxge->tdc_is_shared[i] = B_FALSE; 976 977 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 978 nxge->rx_hio_groups[i].ghandle = NULL; 979 nxge->rx_hio_groups[i].nxgep = nxge; 980 nxge->rx_hio_groups[i].gindex = 0; 981 nxge->rx_hio_groups[i].sindex = 0; 982 } 983 984 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 985 nxge->shares[i].nxgep = nxge; 986 nxge->shares[i].index = 0; 987 nxge->shares[i].vrp = (void *)NULL; 988 nxge->shares[i].tmap = 0; 989 nxge->shares[i].rmap = 0; 990 nxge->shares[i].rxgroup = 0; 991 nxge->shares[i].active = B_FALSE; 992 } 993 994 /* Fill in the HV HIO function pointers. */ 995 nxge_hio_hv_init(nxge); 996 997 if (isLDOMservice(nxge)) { 998 NXGE_DEBUG_MSG((nxge, HIO_CTL, 999 "Hybrid IO-capable service domain")); 1000 return (NXGE_OK); 1001 } else { 1002 /* 1003 * isLDOMguest(nxge) == B_TRUE 1004 */ 1005 nx_vio_fp_t *vio; 1006 nhd->type = NXGE_HIO_TYPE_GUEST; 1007 1008 vio = &nhd->hio.vio; 1009 vio->__register = (vio_net_resource_reg_t) 1010 modgetsymvalue("vio_net_resource_reg", 0); 1011 vio->unregister = (vio_net_resource_unreg_t) 1012 modgetsymvalue("vio_net_resource_unreg", 0); 1013 1014 if (vio->__register == 0 || vio->unregister == 0) { 1015 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 1016 return (NXGE_ERROR); 1017 } 1018 } 1019 1020 return (0); 1021 } 1022 1023 static int 1024 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 1025 { 1026 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1027 p_nxge_t nxge = rxgroup->nxgep; 1028 int group = rxgroup->gindex; 1029 int rv, sindex; 1030 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1031 1032 sindex = nxge->rx_hio_groups[group].sindex; 1033 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1034 1035 /* 1036 * Program the mac address for the group/share. 1037 */ 1038 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 1039 (ether_addr_t *)mac_addr)) != 0) { 1040 return (rv); 1041 } 1042 1043 return (0); 1044 } 1045 1046 /* ARGSUSED */ 1047 static int 1048 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 1049 { 1050 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1051 p_nxge_t nxge = rxgroup->nxgep; 1052 int group = rxgroup->gindex; 1053 int sindex; 1054 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1055 1056 sindex = nxge->rx_hio_groups[group].sindex; 1057 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1058 1059 /* 1060 * Remove the mac address for the group/share. 1061 */ 1062 nxge_hio_hostinfo_uninit(nxge, vr); 1063 1064 return (0); 1065 } 1066 1067 /* ARGSUSED */ 1068 void 1069 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1070 mac_group_info_t *infop, mac_group_handle_t ghdl) 1071 { 1072 p_nxge_t nxgep = (p_nxge_t)arg; 1073 nxge_rx_ring_group_t *rxgroup; 1074 1075 switch (type) { 1076 case MAC_RING_TYPE_RX: 1077 rxgroup = &nxgep->rx_hio_groups[group]; 1078 rxgroup->gindex = group; 1079 1080 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1081 infop->mrg_start = NULL; 1082 infop->mrg_stop = NULL; 1083 infop->mrg_addmac = nxge_hio_add_mac; 1084 infop->mrg_remmac = nxge_hio_rem_mac; 1085 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1086 break; 1087 1088 case MAC_RING_TYPE_TX: 1089 break; 1090 } 1091 } 1092 1093 int 1094 nxge_hio_share_assign( 1095 nxge_t *nxge, 1096 uint64_t cookie, 1097 res_map_t *tmap, 1098 res_map_t *rmap, 1099 nxge_hio_vr_t *vr) 1100 { 1101 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1102 uint64_t slot, hv_rv; 1103 nxge_hio_dc_t *dc; 1104 nxhv_vr_fp_t *fp; 1105 int i; 1106 1107 /* 1108 * Ask the Hypervisor to set up the VR for us 1109 */ 1110 fp = &nhd->hio.vr; 1111 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1112 NXGE_ERROR_MSG((nxge, HIO_CTL, 1113 "nx_hio_share_assign: " 1114 "vr->assign() returned %d", hv_rv)); 1115 nxge_hio_unshare(vr); 1116 return (-EIO); 1117 } 1118 1119 /* 1120 * For each shared TDC, ask the HV to find us an empty slot. 1121 * ----------------------------------------------------- 1122 */ 1123 dc = vr->tx_group.dc; 1124 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1125 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1126 while (dc) { 1127 hv_rv = (*tx->assign) 1128 (vr->cookie, dc->channel, &slot); 1129 if (hv_rv != 0) { 1130 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1131 "nx_hio_share_assign: " 1132 "tx->assign(%x, %d) failed: %ld", 1133 vr->cookie, dc->channel, hv_rv)); 1134 return (-EIO); 1135 } 1136 1137 dc->cookie = vr->cookie; 1138 dc->page = (vp_channel_t)slot; 1139 1140 /* Inform the caller about the slot chosen. */ 1141 (*tmap) |= 1 << slot; 1142 1143 dc = dc->next; 1144 } 1145 } 1146 1147 /* 1148 * For each shared RDC, ask the HV to find us an empty slot. 1149 * ----------------------------------------------------- 1150 */ 1151 dc = vr->rx_group.dc; 1152 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1153 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1154 while (dc) { 1155 hv_rv = (*rx->assign) 1156 (vr->cookie, dc->channel, &slot); 1157 if (hv_rv != 0) { 1158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1159 "nx_hio_share_assign: " 1160 "rx->assign(%x, %d) failed: %ld", 1161 vr->cookie, dc->channel, hv_rv)); 1162 return (-EIO); 1163 } 1164 1165 dc->cookie = vr->cookie; 1166 dc->page = (vp_channel_t)slot; 1167 1168 /* Inform the caller about the slot chosen. */ 1169 (*rmap) |= 1 << slot; 1170 1171 dc = dc->next; 1172 } 1173 } 1174 1175 return (0); 1176 } 1177 1178 int 1179 nxge_hio_share_unassign( 1180 nxge_hio_vr_t *vr) 1181 { 1182 nxge_t *nxge = (nxge_t *)vr->nxge; 1183 nxge_hio_data_t *nhd; 1184 nxge_hio_dc_t *dc; 1185 nxhv_vr_fp_t *fp; 1186 uint64_t hv_rv; 1187 1188 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1189 1190 dc = vr->tx_group.dc; 1191 while (dc) { 1192 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1193 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1194 if (hv_rv != 0) { 1195 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1196 "nx_hio_dc_unshare: " 1197 "tx->unassign(%x, %d) failed: %ld", 1198 vr->cookie, dc->page, hv_rv)); 1199 } 1200 dc = dc->next; 1201 } 1202 1203 dc = vr->rx_group.dc; 1204 while (dc) { 1205 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1206 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1207 if (hv_rv != 0) { 1208 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1209 "nx_hio_dc_unshare: " 1210 "rx->unassign(%x, %d) failed: %ld", 1211 vr->cookie, dc->page, hv_rv)); 1212 } 1213 dc = dc->next; 1214 } 1215 1216 fp = &nhd->hio.vr; 1217 if (fp->unassign) { 1218 hv_rv = (*fp->unassign)(vr->cookie); 1219 if (hv_rv != 0) { 1220 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1221 "vr->assign(%x) failed: %ld", 1222 vr->cookie, hv_rv)); 1223 } 1224 } 1225 1226 return (0); 1227 } 1228 1229 int 1230 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1231 mac_share_handle_t *shandle) 1232 { 1233 p_nxge_t nxge = (p_nxge_t)arg; 1234 nxge_rx_ring_group_t *rxgroup; 1235 nxge_share_handle_t *shp; 1236 1237 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1238 uint64_t rmap, tmap; 1239 int rv; 1240 1241 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1242 1243 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1244 1245 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1246 nhd->hio.rx.assign == 0) { 1247 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1248 return (EIO); 1249 } 1250 1251 /* 1252 * Get a VR. 1253 */ 1254 if ((vr = nxge_hio_vr_share(nxge)) == 0) 1255 return (EAGAIN); 1256 1257 /* 1258 * Get an RDC group for us to use. 1259 */ 1260 if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1261 nxge_hio_unshare(vr); 1262 return (EBUSY); 1263 } 1264 1265 /* 1266 * Add resources to the share. 1267 */ 1268 tmap = 0; 1269 rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, 1270 NXGE_HIO_SHARE_MAX_CHANNELS); 1271 if (rv != 0) { 1272 nxge_hio_unshare(vr); 1273 return (rv); 1274 } 1275 1276 rmap = 0; 1277 rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, 1278 NXGE_HIO_SHARE_MAX_CHANNELS); 1279 if (rv != 0) { 1280 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1281 nxge_hio_unshare(vr); 1282 return (rv); 1283 } 1284 1285 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1286 nxge_hio_remres(vr, MAC_RING_TYPE_RX, tmap); 1287 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1288 nxge_hio_unshare(vr); 1289 return (rv); 1290 } 1291 1292 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1293 rxgroup->gindex = vr->rdc_tbl; 1294 rxgroup->sindex = vr->region; 1295 1296 shp = &nxge->shares[vr->region]; 1297 shp->index = vr->region; 1298 shp->vrp = (void *)vr; 1299 shp->tmap = tmap; 1300 shp->rmap = rmap; 1301 shp->rxgroup = vr->rdc_tbl; 1302 shp->active = B_TRUE; 1303 1304 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1305 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1306 1307 *shandle = (mac_share_handle_t)shp; 1308 1309 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1310 return (0); 1311 } 1312 1313 void 1314 nxge_hio_share_free(mac_share_handle_t shandle) 1315 { 1316 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1317 1318 /* 1319 * First, unassign the VR (take it back), 1320 * so we can enable interrupts again. 1321 */ 1322 (void) nxge_hio_share_unassign(shp->vrp); 1323 1324 /* 1325 * Free Ring Resources for TX and RX 1326 */ 1327 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1328 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1329 1330 /* 1331 * Free VR resource. 1332 */ 1333 nxge_hio_unshare(shp->vrp); 1334 1335 /* 1336 * Clear internal handle state. 1337 */ 1338 shp->index = 0; 1339 shp->vrp = (void *)NULL; 1340 shp->tmap = 0; 1341 shp->rmap = 0; 1342 shp->rxgroup = 0; 1343 shp->active = B_FALSE; 1344 } 1345 1346 void 1347 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1348 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1349 { 1350 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1351 1352 switch (type) { 1353 case MAC_RING_TYPE_RX: 1354 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1355 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1356 *rmap = shp->rmap; 1357 *gnum = shp->rxgroup; 1358 break; 1359 1360 case MAC_RING_TYPE_TX: 1361 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1362 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1363 *rmap = shp->tmap; 1364 *gnum = 0; 1365 break; 1366 } 1367 } 1368 1369 /* 1370 * nxge_hio_vr_share 1371 * 1372 * Find an unused Virtualization Region (VR). 1373 * 1374 * Arguments: 1375 * nxge 1376 * 1377 * Notes: 1378 * 1379 * Context: 1380 * Service domain 1381 */ 1382 nxge_hio_vr_t * 1383 nxge_hio_vr_share( 1384 nxge_t *nxge) 1385 { 1386 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1387 nxge_hio_vr_t *vr; 1388 1389 int first, limit, region; 1390 1391 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1392 1393 MUTEX_ENTER(&nhd->lock); 1394 1395 if (nhd->vrs == 0) { 1396 MUTEX_EXIT(&nhd->lock); 1397 return (0); 1398 } 1399 1400 /* Find an empty virtual region (VR). */ 1401 if (nxge->function_num == 0) { 1402 // FUNC0_VIR0 'belongs' to NIU port 0. 1403 first = FUNC0_VIR1; 1404 limit = FUNC2_VIR0; 1405 } else if (nxge->function_num == 1) { 1406 // FUNC2_VIR0 'belongs' to NIU port 1. 1407 first = FUNC2_VIR1; 1408 limit = FUNC_VIR_MAX; 1409 } else { 1410 cmn_err(CE_WARN, 1411 "Shares not supported on function(%d) at this time.\n", 1412 nxge->function_num); 1413 } 1414 1415 for (region = first; region < limit; region++) { 1416 if (nhd->vr[region].nxge == 0) 1417 break; 1418 } 1419 1420 if (region == limit) { 1421 MUTEX_EXIT(&nhd->lock); 1422 return (0); 1423 } 1424 1425 vr = &nhd->vr[region]; 1426 vr->nxge = (uintptr_t)nxge; 1427 vr->region = (uintptr_t)region; 1428 1429 nhd->vrs--; 1430 1431 MUTEX_EXIT(&nhd->lock); 1432 1433 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1434 1435 return (vr); 1436 } 1437 1438 void 1439 nxge_hio_unshare( 1440 nxge_hio_vr_t *vr) 1441 { 1442 nxge_t *nxge = (nxge_t *)vr->nxge; 1443 nxge_hio_data_t *nhd; 1444 1445 vr_region_t region; 1446 1447 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1448 1449 if (!nxge) { 1450 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1451 "vr->nxge is NULL")); 1452 return; 1453 } 1454 1455 /* 1456 * This function is no longer called, but I will keep it 1457 * here in case we want to revisit this topic in the future. 1458 * 1459 * nxge_hio_hostinfo_uninit(nxge, vr); 1460 */ 1461 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1462 1463 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1464 1465 MUTEX_ENTER(&nhd->lock); 1466 1467 region = vr->region; 1468 (void) memset(vr, 0, sizeof (*vr)); 1469 vr->region = region; 1470 1471 nhd->vrs++; 1472 1473 MUTEX_EXIT(&nhd->lock); 1474 1475 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1476 } 1477 1478 int 1479 nxge_hio_addres( 1480 nxge_hio_vr_t *vr, 1481 mac_ring_type_t type, 1482 int count) 1483 { 1484 nxge_t *nxge = (nxge_t *)vr->nxge; 1485 int i; 1486 1487 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1488 1489 if (!nxge) 1490 return (EINVAL); 1491 1492 for (i = 0; i < count; i++) { 1493 int rv; 1494 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1495 if (i == 0) /* Couldn't get even one DC. */ 1496 return (-rv); 1497 else 1498 break; 1499 } 1500 } 1501 1502 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1503 1504 return (0); 1505 } 1506 1507 /* ARGSUSED */ 1508 void 1509 nxge_hio_remres( 1510 nxge_hio_vr_t *vr, 1511 mac_ring_type_t type, 1512 res_map_t res_map) 1513 { 1514 nxge_t *nxge = (nxge_t *)vr->nxge; 1515 nxge_grp_t *group; 1516 1517 if (!nxge) { 1518 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 1519 "vr->nxge is NULL")); 1520 return; 1521 } 1522 1523 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1524 1525 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1526 while (group->dc) { 1527 nxge_hio_dc_t *dc = group->dc; 1528 NXGE_DC_RESET(res_map, dc->page); 1529 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1530 } 1531 1532 if (res_map) { 1533 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1534 "res_map %lx", res_map)); 1535 } 1536 1537 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1538 } 1539 1540 /* 1541 * nxge_hio_tdc_share 1542 * 1543 * Share an unused TDC channel. 1544 * 1545 * Arguments: 1546 * nxge 1547 * 1548 * Notes: 1549 * 1550 * A.7.3 Reconfigure Tx DMA channel 1551 * Disable TxDMA A.9.6.10 1552 * [Rebind TxDMA channel to Port A.9.6.7] 1553 * 1554 * We don't have to Rebind the TDC to the port - it always already bound. 1555 * 1556 * Soft Reset TxDMA A.9.6.2 1557 * 1558 * This procedure will be executed by nxge_init_txdma_channel() in the 1559 * guest domain: 1560 * 1561 * Re-initialize TxDMA A.9.6.8 1562 * Reconfigure TxDMA 1563 * Enable TxDMA A.9.6.9 1564 * 1565 * Context: 1566 * Service domain 1567 */ 1568 int 1569 nxge_hio_tdc_share( 1570 nxge_t *nxge, 1571 int channel) 1572 { 1573 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1574 nxge_grp_set_t *set = &nxge->tx_set; 1575 tx_ring_t *ring; 1576 int count; 1577 1578 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1579 1580 /* 1581 * Wait until this channel is idle. 1582 */ 1583 ring = nxge->tx_rings->rings[channel]; 1584 1585 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 1586 if (ring->tx_ring_busy) { 1587 /* 1588 * Wait for 30 seconds. 1589 */ 1590 for (count = 30 * 1000; count; count--) { 1591 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 1592 break; 1593 } 1594 1595 drv_usecwait(1000); 1596 } 1597 1598 if (count == 0) { 1599 (void) atomic_swap_32(&ring->tx_ring_offline, 1600 NXGE_TX_RING_ONLINE); 1601 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1602 "Tx ring %d was always BUSY", channel)); 1603 return (-EIO); 1604 } 1605 } else { 1606 (void) atomic_swap_32(&ring->tx_ring_offline, 1607 NXGE_TX_RING_OFFLINED); 1608 } 1609 1610 MUTEX_ENTER(&nhd->lock); 1611 nxge->tdc_is_shared[channel] = B_TRUE; 1612 MUTEX_EXIT(&nhd->lock); 1613 1614 1615 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1616 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1617 "Failed to remove interrupt for TxDMA channel %d", 1618 channel)); 1619 return (NXGE_ERROR); 1620 } 1621 1622 /* Disable TxDMA A.9.6.10 */ 1623 (void) nxge_txdma_channel_disable(nxge, channel); 1624 1625 /* The SD is sharing this channel. */ 1626 NXGE_DC_SET(set->shared.map, channel); 1627 set->shared.count++; 1628 1629 /* Soft Reset TxDMA A.9.6.2 */ 1630 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1631 1632 /* 1633 * Initialize the DC-specific FZC control registers. 1634 * ----------------------------------------------------- 1635 */ 1636 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1637 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1638 "nx_hio_dc_share: FZC TDC failed: %d", channel)); 1639 return (-EIO); 1640 } 1641 1642 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1643 1644 return (0); 1645 } 1646 1647 /* 1648 * nxge_hio_rdc_share 1649 * 1650 * Share an unused RDC channel. 1651 * 1652 * Arguments: 1653 * nxge 1654 * 1655 * Notes: 1656 * 1657 * This is the latest version of the procedure to 1658 * Reconfigure an Rx DMA channel: 1659 * 1660 * A.6.3 Reconfigure Rx DMA channel 1661 * Stop RxMAC A.9.2.6 1662 * Drain IPP Port A.9.3.6 1663 * Stop and reset RxDMA A.9.5.3 1664 * 1665 * This procedure will be executed by nxge_init_rxdma_channel() in the 1666 * guest domain: 1667 * 1668 * Initialize RxDMA A.9.5.4 1669 * Reconfigure RxDMA 1670 * Enable RxDMA A.9.5.5 1671 * 1672 * We will do this here, since the RDC is a canalis non grata: 1673 * Enable RxMAC A.9.2.10 1674 * 1675 * Context: 1676 * Service domain 1677 */ 1678 int 1679 nxge_hio_rdc_share( 1680 nxge_t *nxge, 1681 nxge_hio_vr_t *vr, 1682 int channel) 1683 { 1684 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1685 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1686 nxge_grp_set_t *set = &nxge->rx_set; 1687 nxge_rdc_grp_t *rdc_grp; 1688 1689 int current, last; 1690 1691 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1692 1693 /* Disable interrupts. */ 1694 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1695 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 1696 "Failed to remove interrupt for RxDMA channel %d", 1697 channel)); 1698 return (NXGE_ERROR); 1699 } 1700 1701 /* Stop RxMAC = A.9.2.6 */ 1702 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1703 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1704 "Failed to disable RxMAC")); 1705 } 1706 1707 /* Drain IPP Port = A.9.3.6 */ 1708 (void) nxge_ipp_drain(nxge); 1709 1710 /* Stop and reset RxDMA = A.9.5.3 */ 1711 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1712 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1713 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1714 "Failed to disable RxDMA channel %d", channel)); 1715 } 1716 1717 /* The SD is sharing this channel. */ 1718 NXGE_DC_SET(set->shared.map, channel); 1719 set->shared.count++; 1720 1721 // Assert RST: RXDMA_CFIG1[30] = 1 1722 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1723 1724 /* 1725 * We have to reconfigure the RDC table(s) 1726 * to which this channel belongs. 1727 */ 1728 current = hardware->def_mac_rxdma_grpid; 1729 last = current + hardware->max_rdc_grpids; 1730 for (; current < last; current++) { 1731 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1732 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1733 rdc_grp->map = set->owned.map; 1734 rdc_grp->max_rdcs--; 1735 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1736 } 1737 } 1738 1739 /* 1740 * The guest domain will reconfigure the RDC later. 1741 * 1742 * But in the meantime, we must re-enable the Rx MAC so 1743 * that we can start receiving packets again on the 1744 * remaining RDCs: 1745 * 1746 * Enable RxMAC = A.9.2.10 1747 */ 1748 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1749 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1750 "nx_hio_rdc_share: Rx MAC still disabled")); 1751 } 1752 1753 /* 1754 * Initialize the DC-specific FZC control registers. 1755 * ----------------------------------------------------- 1756 */ 1757 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1758 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1759 "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 1760 return (-EIO); 1761 } 1762 1763 /* 1764 * We have to initialize the guest's RDC table, too. 1765 * ----------------------------------------------------- 1766 */ 1767 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1768 if (rdc_grp->max_rdcs == 0) { 1769 rdc_grp->start_rdc = (uint8_t)channel; 1770 rdc_grp->def_rdc = (uint8_t)channel; 1771 rdc_grp->max_rdcs = 1; 1772 } else { 1773 rdc_grp->max_rdcs++; 1774 } 1775 NXGE_DC_SET(rdc_grp->map, channel); 1776 1777 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1778 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1779 "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1780 return (-EIO); 1781 } 1782 1783 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1784 1785 return (0); 1786 } 1787 1788 /* 1789 * nxge_hio_dc_share 1790 * 1791 * Share a DMA channel with a guest domain. 1792 * 1793 * Arguments: 1794 * nxge 1795 * vr The VR that <channel> will belong to. 1796 * type Tx or Rx. 1797 * res_map The resource map used by the caller, which we will 1798 * update if successful. 1799 * 1800 * Notes: 1801 * 1802 * Context: 1803 * Service domain 1804 */ 1805 int 1806 nxge_hio_dc_share( 1807 nxge_t *nxge, 1808 nxge_hio_vr_t *vr, 1809 mac_ring_type_t type) 1810 { 1811 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1812 nxge_hw_pt_cfg_t *hardware; 1813 nxge_hio_dc_t *dc; 1814 int channel, limit; 1815 1816 nxge_grp_set_t *set; 1817 nxge_grp_t *group; 1818 1819 int slot; 1820 1821 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1822 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1823 1824 /* 1825 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1826 * Not only that, but the HV has statically assigned the 1827 * channels like so: 1828 * VR0: RDC0 & RDC1 1829 * VR1: RDC2 & RDC3, etc. 1830 * The TDCs are assigned in exactly the same way. 1831 * 1832 * So, for example 1833 * hardware->start_rdc + vr->region * 2; 1834 * VR1: hardware->start_rdc + 1 * 2; 1835 * VR3: hardware->start_rdc + 3 * 2; 1836 * If start_rdc is 0, we end up with 2 or 6. 1837 * If start_rdc is 8, we end up with 10 or 14. 1838 */ 1839 1840 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1841 hardware = &nxge->pt_config.hw_config; 1842 1843 // This code is still NIU-specific (assuming only 2 ports) 1844 channel = hardware->start_rdc + (vr->region % 4) * 2; 1845 limit = channel + 2; 1846 1847 MUTEX_ENTER(&nhd->lock); 1848 for (; channel < limit; channel++) { 1849 if ((1 << channel) & set->owned.map) { 1850 break; 1851 } 1852 } 1853 1854 if (channel == limit) { 1855 MUTEX_EXIT(&nhd->lock); 1856 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1857 "nx_hio_dc_share: there are no channels to share")); 1858 return (-EIO); 1859 } 1860 1861 MUTEX_EXIT(&nhd->lock); 1862 1863 /* -------------------------------------------------- */ 1864 slot = (type == MAC_RING_TYPE_TX) ? 1865 nxge_hio_tdc_share(nxge, channel) : 1866 nxge_hio_rdc_share(nxge, vr, channel); 1867 1868 if (slot < 0) { 1869 if (type == MAC_RING_TYPE_RX) { 1870 nxge_hio_rdc_unshare(nxge, channel); 1871 } else { 1872 nxge_hio_tdc_unshare(nxge, channel); 1873 } 1874 return (slot); 1875 } 1876 1877 MUTEX_ENTER(&nhd->lock); 1878 1879 /* 1880 * Tag this channel. 1881 * -------------------------------------------------- 1882 */ 1883 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1884 1885 dc->vr = vr; 1886 dc->channel = (nxge_channel_t)channel; 1887 1888 MUTEX_EXIT(&nhd->lock); 1889 1890 /* 1891 * vr->[t|r]x_group is used by the service domain to 1892 * keep track of its shared DMA channels. 1893 */ 1894 MUTEX_ENTER(&nxge->group_lock); 1895 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1896 1897 dc->group = group; 1898 1899 /* Initialize <group>, if necessary */ 1900 if (group->count == 0) { 1901 group->nxge = nxge; 1902 group->type = (type == MAC_RING_TYPE_TX) ? 1903 VP_BOUND_TX : VP_BOUND_RX; 1904 group->sequence = nhd->sequence++; 1905 group->active = B_TRUE; 1906 } 1907 1908 MUTEX_EXIT(&nxge->group_lock); 1909 1910 NXGE_ERROR_MSG((nxge, HIO_CTL, 1911 "DC share: %cDC %d was assigned to slot %d", 1912 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1913 1914 nxge_grp_dc_append(nxge, group, dc); 1915 1916 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1917 1918 return (0); 1919 } 1920 1921 /* 1922 * nxge_hio_tdc_unshare 1923 * 1924 * Unshare a TDC. 1925 * 1926 * Arguments: 1927 * nxge 1928 * channel The channel to unshare (add again). 1929 * 1930 * Notes: 1931 * 1932 * Context: 1933 * Service domain 1934 */ 1935 void 1936 nxge_hio_tdc_unshare( 1937 nxge_t *nxge, 1938 int channel) 1939 { 1940 nxge_grp_set_t *set = &nxge->tx_set; 1941 nxge_grp_t *group = set->group[0]; 1942 1943 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1944 1945 NXGE_DC_RESET(set->shared.map, channel); 1946 set->shared.count--; 1947 1948 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 1949 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1950 "Failed to initialize TxDMA channel %d", channel)); 1951 return; 1952 } 1953 1954 /* Re-add this interrupt. */ 1955 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1956 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1957 "Failed to add interrupt for TxDMA channel %d", channel)); 1958 } 1959 1960 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1961 } 1962 1963 /* 1964 * nxge_hio_rdc_unshare 1965 * 1966 * Unshare an RDC: add it to the SD's RDC groups (tables). 1967 * 1968 * Arguments: 1969 * nxge 1970 * channel The channel to unshare (add again). 1971 * 1972 * Notes: 1973 * 1974 * Context: 1975 * Service domain 1976 */ 1977 void 1978 nxge_hio_rdc_unshare( 1979 nxge_t *nxge, 1980 int channel) 1981 { 1982 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1983 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1984 1985 nxge_grp_set_t *set = &nxge->rx_set; 1986 nxge_grp_t *group = set->group[0]; 1987 int current, last; 1988 1989 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 1990 1991 /* Stop RxMAC = A.9.2.6 */ 1992 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1993 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1994 "Failed to disable RxMAC")); 1995 } 1996 1997 /* Drain IPP Port = A.9.3.6 */ 1998 (void) nxge_ipp_drain(nxge); 1999 2000 /* Stop and reset RxDMA = A.9.5.3 */ 2001 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 2002 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 2003 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2004 "Failed to disable RxDMA channel %d", channel)); 2005 } 2006 2007 NXGE_DC_RESET(set->shared.map, channel); 2008 set->shared.count--; 2009 2010 /* 2011 * Assert RST: RXDMA_CFIG1[30] = 1 2012 * 2013 * Initialize RxDMA A.9.5.4 2014 * Reconfigure RxDMA 2015 * Enable RxDMA A.9.5.5 2016 */ 2017 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 2018 /* Be sure to re-enable the RX MAC. */ 2019 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2020 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2021 "nx_hio_rdc_share: Rx MAC still disabled")); 2022 } 2023 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2024 "Failed to initialize RxDMA channel %d", channel)); 2025 return; 2026 } 2027 2028 /* 2029 * We have to reconfigure the RDC table(s) 2030 * to which this channel once again belongs. 2031 */ 2032 current = hardware->def_mac_rxdma_grpid; 2033 last = current + hardware->max_rdc_grpids; 2034 for (; current < last; current++) { 2035 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 2036 nxge_rdc_grp_t *group; 2037 group = &nxge->pt_config.rdc_grps[current]; 2038 group->map = set->owned.map; 2039 group->max_rdcs++; 2040 (void) nxge_init_fzc_rdc_tbl(nxge, current); 2041 } 2042 } 2043 2044 /* 2045 * Enable RxMAC = A.9.2.10 2046 */ 2047 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2048 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2049 "nx_hio_rdc_share: Rx MAC still disabled")); 2050 return; 2051 } 2052 2053 /* Re-add this interrupt. */ 2054 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2055 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2056 "nx_hio_rdc_unshare: Failed to add interrupt for " 2057 "RxDMA CHANNEL %d", channel)); 2058 } 2059 2060 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2061 } 2062 2063 /* 2064 * nxge_hio_dc_unshare 2065 * 2066 * Unshare (reuse) a DMA channel. 2067 * 2068 * Arguments: 2069 * nxge 2070 * vr The VR that <channel> belongs to. 2071 * type Tx or Rx. 2072 * channel The DMA channel to reuse. 2073 * 2074 * Notes: 2075 * 2076 * Context: 2077 * Service domain 2078 */ 2079 void 2080 nxge_hio_dc_unshare( 2081 nxge_t *nxge, 2082 nxge_hio_vr_t *vr, 2083 mac_ring_type_t type, 2084 int channel) 2085 { 2086 nxge_grp_t *group; 2087 nxge_hio_dc_t *dc; 2088 2089 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2090 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2091 2092 /* Unlink the channel from its group. */ 2093 /* -------------------------------------------------- */ 2094 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2095 NXGE_DC_RESET(group->map, channel); 2096 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2097 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2098 "nx_hio_dc_unshare(%d) failed", channel)); 2099 return; 2100 } 2101 2102 dc->vr = 0; 2103 dc->cookie = 0; 2104 2105 if (type == MAC_RING_TYPE_RX) { 2106 nxge_hio_rdc_unshare(nxge, channel); 2107 } else { 2108 nxge_hio_tdc_unshare(nxge, channel); 2109 } 2110 2111 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2112 } 2113 2114 #endif /* if defined(sun4v) */ 2115