1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * nxge_hio.c 31 * 32 * This file manages the virtualization resources for Neptune 33 * devices. That is, it implements a hybrid I/O (HIO) approach in the 34 * Solaris kernel, whereby a guest domain on an LDOMs server may 35 * request & use hardware resources from the service domain. 36 * 37 */ 38 39 #include <sys/nxge/nxge_impl.h> 40 #include <sys/nxge/nxge_fzc.h> 41 #include <sys/nxge/nxge_rxdma.h> 42 #include <sys/nxge/nxge_txdma.h> 43 #include <sys/nxge/nxge_hio.h> 44 45 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 46 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 47 48 /* 49 * External prototypes 50 */ 51 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 52 53 /* The following function may be found in nxge_main.c */ 54 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 55 56 /* The following function may be found in nxge_[t|r]xdma.c */ 57 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 58 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 59 60 /* 61 * Local prototypes 62 */ 63 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 64 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 65 static void nxge_grp_dc_map(nxge_grp_t *group); 66 67 /* 68 * These functions are used by both service & guest domains to 69 * decide whether they're running in an LDOMs/XEN environment 70 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 71 */ 72 73 /* 74 * nxge_get_environs 75 * 76 * Figure out if we are in a guest domain or not. 77 * 78 * Arguments: 79 * nxge 80 * 81 * Notes: 82 * 83 * Context: 84 * Any domain 85 */ 86 void 87 nxge_get_environs( 88 nxge_t *nxge) 89 { 90 char *string; 91 92 /* 93 * In the beginning, assume that we are running sans LDOMs/XEN. 94 */ 95 nxge->environs = SOLARIS_DOMAIN; 96 97 /* 98 * Are we a hybrid I/O (HIO) guest domain driver? 99 */ 100 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 101 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 102 "niutype", &string)) == DDI_PROP_SUCCESS) { 103 if (strcmp(string, "n2niu") == 0) { 104 nxge->environs = SOLARIS_GUEST_DOMAIN; 105 /* So we can allocate properly-aligned memory. */ 106 nxge->niu_type = N2_NIU; 107 NXGE_DEBUG_MSG((nxge, HIO_CTL, 108 "Hybrid IO-capable guest domain")); 109 } 110 ddi_prop_free(string); 111 } 112 } 113 114 #if !defined(sun4v) 115 116 /* 117 * nxge_hio_init 118 * 119 * Initialize the HIO module of the NXGE driver. 120 * 121 * Arguments: 122 * nxge 123 * 124 * Notes: 125 * This is the non-hybrid I/O version of this function. 126 * 127 * Context: 128 * Any domain 129 */ 130 int 131 nxge_hio_init( 132 nxge_t *nxge) 133 { 134 nxge_hio_data_t *nhd; 135 136 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 137 if (nhd == 0) { 138 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 139 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 140 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 141 } 142 143 nhd->hio.ldoms = B_FALSE; 144 145 return (NXGE_OK); 146 } 147 148 #endif 149 150 void 151 nxge_hio_uninit( 152 nxge_t *nxge) 153 { 154 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 155 156 ASSERT(nhd != NULL); 157 ASSERT(nxge->nxge_hw_p->ndevs == 0); 158 159 MUTEX_DESTROY(&nhd->lock); 160 161 KMEM_FREE(nhd, sizeof (*nhd)); 162 163 nxge->nxge_hw_p->hio = 0; 164 } 165 166 /* 167 * nxge_dci_map 168 * 169 * Map a DMA channel index to a channel number. 170 * 171 * Arguments: 172 * instance The instance number of the driver. 173 * type The type of channel this is: Tx or Rx. 174 * index The index to convert to a channel number 175 * 176 * Notes: 177 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 178 * 179 * Context: 180 * Any domain 181 */ 182 int 183 nxge_dci_map( 184 nxge_t *nxge, 185 vpc_type_t type, 186 int index) 187 { 188 nxge_grp_set_t *set; 189 int dc; 190 191 switch (type) { 192 case VP_BOUND_TX: 193 set = &nxge->tx_set; 194 break; 195 case VP_BOUND_RX: 196 set = &nxge->rx_set; 197 break; 198 } 199 200 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 201 if ((1 << dc) & set->owned.map) { 202 if (index == 0) 203 return (dc); 204 else 205 index--; 206 } 207 } 208 209 return (-1); 210 } 211 212 /* 213 * --------------------------------------------------------------------- 214 * These are the general-purpose DMA channel group functions. That is, 215 * these functions are used to manage groups of TDCs or RDCs in an HIO 216 * environment. 217 * 218 * But is also expected that in the future they will be able to manage 219 * Crossbow groups. 220 * --------------------------------------------------------------------- 221 */ 222 223 /* 224 * nxge_grp_add 225 * 226 * Add a group to an instance of NXGE. 227 * 228 * Arguments: 229 * nxge 230 * type Tx or Rx 231 * 232 * Notes: 233 * 234 * Context: 235 * Any domain 236 */ 237 vr_handle_t 238 nxge_grp_add( 239 nxge_t *nxge, 240 nxge_grp_type_t type) 241 { 242 nxge_grp_set_t *set; 243 nxge_grp_t *group; 244 int i; 245 246 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 247 group->nxge = nxge; 248 249 MUTEX_ENTER(&nxge->group_lock); 250 switch (type) { 251 case NXGE_TRANSMIT_GROUP: 252 case EXT_TRANSMIT_GROUP: 253 set = &nxge->tx_set; 254 break; 255 default: 256 set = &nxge->rx_set; 257 break; 258 } 259 260 group->type = type; 261 group->active = B_TRUE; 262 group->sequence = set->sequence++; 263 264 /* Find an empty slot for this logical group. */ 265 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 266 if (set->group[i] == 0) { 267 group->index = i; 268 set->group[i] = group; 269 NXGE_DC_SET(set->lg.map, i); 270 set->lg.count++; 271 break; 272 } 273 } 274 MUTEX_EXIT(&nxge->group_lock); 275 276 NXGE_DEBUG_MSG((nxge, HIO_CTL, 277 "nxge_grp_add: %cgroup = %d.%d", 278 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 279 nxge->mac.portnum, group->sequence)); 280 281 return ((vr_handle_t)group); 282 } 283 284 void 285 nxge_grp_remove( 286 nxge_t *nxge, 287 vr_handle_t handle) /* The group to remove. */ 288 { 289 nxge_grp_set_t *set; 290 nxge_grp_t *group; 291 vpc_type_t type; 292 293 group = (nxge_grp_t *)handle; 294 295 MUTEX_ENTER(&nxge->group_lock); 296 switch (group->type) { 297 case NXGE_TRANSMIT_GROUP: 298 case EXT_TRANSMIT_GROUP: 299 set = &nxge->tx_set; 300 break; 301 default: 302 set = &nxge->rx_set; 303 break; 304 } 305 306 if (set->group[group->index] != group) { 307 MUTEX_EXIT(&nxge->group_lock); 308 return; 309 } 310 311 set->group[group->index] = 0; 312 NXGE_DC_RESET(set->lg.map, group->index); 313 set->lg.count--; 314 315 /* While inside the mutex, deactivate <group>. */ 316 group->active = B_FALSE; 317 318 MUTEX_EXIT(&nxge->group_lock); 319 320 NXGE_DEBUG_MSG((nxge, HIO_CTL, 321 "nxge_grp_remove(%c.%d.%d) called", 322 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 323 nxge->mac.portnum, group->sequence)); 324 325 /* Now, remove any DCs which are still active. */ 326 switch (group->type) { 327 default: 328 type = VP_BOUND_TX; 329 break; 330 case NXGE_RECEIVE_GROUP: 331 case EXT_RECEIVE_GROUP: 332 type = VP_BOUND_RX; 333 } 334 335 while (group->dc) { 336 nxge_grp_dc_remove(nxge, type, group->dc->channel); 337 } 338 339 KMEM_FREE(group, sizeof (*group)); 340 } 341 342 /* 343 * nx_hio_dc_add 344 * 345 * Add a DMA channel to a VR/Group. 346 * 347 * Arguments: 348 * nxge 349 * channel The channel to add. 350 * Notes: 351 * 352 * Context: 353 * Any domain 354 */ 355 /* ARGSUSED */ 356 int 357 nxge_grp_dc_add( 358 nxge_t *nxge, 359 vr_handle_t handle, /* The group to add <channel> to. */ 360 vpc_type_t type, /* Rx or Tx */ 361 int channel) /* A physical/logical channel number */ 362 { 363 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 364 nxge_hio_dc_t *dc; 365 nxge_grp_set_t *set; 366 nxge_grp_t *group; 367 nxge_status_t status = NXGE_OK; 368 369 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 370 371 if (handle == 0) 372 return (0); 373 374 switch (type) { 375 default: 376 set = &nxge->tx_set; 377 if (channel > NXGE_MAX_TDCS) { 378 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 379 "nxge_grp_dc_add: TDC = %d", channel)); 380 return (NXGE_ERROR); 381 } 382 break; 383 case VP_BOUND_RX: 384 set = &nxge->rx_set; 385 if (channel > NXGE_MAX_RDCS) { 386 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 387 "nxge_grp_dc_add: RDC = %d", channel)); 388 return (NXGE_ERROR); 389 } 390 break; 391 } 392 393 group = (nxge_grp_t *)handle; 394 NXGE_DEBUG_MSG((nxge, HIO_CTL, 395 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 396 type == VP_BOUND_TX ? 't' : 'r', 397 nxge->mac.portnum, group->sequence, group->count, channel)); 398 399 MUTEX_ENTER(&nxge->group_lock); 400 if (group->active != B_TRUE) { 401 /* We may be in the process of removing this group. */ 402 MUTEX_EXIT(&nxge->group_lock); 403 return (NXGE_ERROR); 404 } 405 MUTEX_EXIT(&nxge->group_lock); 406 407 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 408 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 409 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 410 return (NXGE_ERROR); 411 } 412 413 MUTEX_ENTER(&nhd->lock); 414 415 if (dc->group) { 416 MUTEX_EXIT(&nhd->lock); 417 /* This channel is already in use! */ 418 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 419 "nxge_grp_dc_add(%d): channel already in group", channel)); 420 return (NXGE_ERROR); 421 } 422 423 dc->next = 0; 424 dc->page = channel; 425 dc->channel = (nxge_channel_t)channel; 426 427 dc->type = type; 428 if (type == VP_BOUND_RX) { 429 dc->init = nxge_init_rxdma_channel; 430 dc->uninit = nxge_uninit_rxdma_channel; 431 } else { 432 dc->init = nxge_init_txdma_channel; 433 dc->uninit = nxge_uninit_txdma_channel; 434 } 435 436 dc->group = handle; 437 438 if (isLDOMguest(nxge)) 439 (void) nxge_hio_ldsv_add(nxge, dc); 440 441 NXGE_DC_SET(set->owned.map, channel); 442 set->owned.count++; 443 444 MUTEX_EXIT(&nhd->lock); 445 446 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 447 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 448 "nxge_grp_dc_add(%d): channel init failed", channel)); 449 return (NXGE_ERROR); 450 } 451 452 nxge_grp_dc_append(nxge, group, dc); 453 454 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 455 456 return ((int)status); 457 } 458 459 void 460 nxge_grp_dc_remove( 461 nxge_t *nxge, 462 vpc_type_t type, 463 int channel) 464 { 465 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 466 nxge_hio_dc_t *dc; 467 nxge_grp_set_t *set; 468 nxge_grp_t *group; 469 470 dc_uninit_t uninit; 471 472 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 473 474 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 475 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 476 "nx_hio_dc_remove: find(%d) failed", channel)); 477 return; 478 } 479 group = (nxge_grp_t *)dc->group; 480 481 if (isLDOMguest(nxge)) { 482 (void) nxge_hio_intr_remove(nxge, type, channel); 483 } 484 485 NXGE_DEBUG_MSG((nxge, HIO_CTL, 486 "DC remove: group = %d.%d.%d, %cdc %d", 487 nxge->mac.portnum, group->sequence, group->count, 488 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 489 490 MUTEX_ENTER(&nhd->lock); 491 492 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 493 if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 494 NXGE_DC_RESET(group->map, channel); 495 } 496 497 /* Remove the DC from its group. */ 498 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 499 MUTEX_EXIT(&nhd->lock); 500 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 501 "nx_hio_dc_remove(%d) failed", channel)); 502 return; 503 } 504 505 uninit = dc->uninit; 506 channel = dc->channel; 507 508 NXGE_DC_RESET(set->owned.map, channel); 509 set->owned.count--; 510 511 (void) memset(dc, 0, sizeof (*dc)); 512 513 MUTEX_EXIT(&nhd->lock); 514 515 (*uninit)(nxge, channel); 516 517 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 518 } 519 520 nxge_hio_dc_t * 521 nxge_grp_dc_find( 522 nxge_t *nxge, 523 vpc_type_t type, /* Rx or Tx */ 524 int channel) 525 { 526 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 527 nxge_hio_dc_t *current; 528 529 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 530 531 if (!isLDOMguest(nxge)) { 532 return (¤t[channel]); 533 } else { 534 /* We're in a guest domain. */ 535 int i, limit = (type == VP_BOUND_TX) ? 536 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 537 538 MUTEX_ENTER(&nhd->lock); 539 for (i = 0; i < limit; i++, current++) { 540 if (current->channel == channel) { 541 if (current->vr && current->vr->nxge == 542 (uintptr_t)nxge) { 543 MUTEX_EXIT(&nhd->lock); 544 return (current); 545 } 546 } 547 } 548 MUTEX_EXIT(&nhd->lock); 549 } 550 551 return (0); 552 } 553 554 /* 555 * nxge_grp_dc_append 556 * 557 * Append a DMA channel to a group. 558 * 559 * Arguments: 560 * nxge 561 * group The group to append to 562 * dc The DMA channel to append 563 * 564 * Notes: 565 * 566 * Context: 567 * Any domain 568 */ 569 static 570 void 571 nxge_grp_dc_append( 572 nxge_t *nxge, 573 nxge_grp_t *group, 574 nxge_hio_dc_t *dc) 575 { 576 MUTEX_ENTER(&nxge->group_lock); 577 578 if (group->dc == 0) { 579 group->dc = dc; 580 } else { 581 nxge_hio_dc_t *current = group->dc; 582 do { 583 if (current->next == 0) { 584 current->next = dc; 585 break; 586 } 587 current = current->next; 588 } while (current); 589 } 590 591 NXGE_DC_SET(group->map, dc->channel); 592 593 nxge_grp_dc_map(group); 594 group->count++; 595 596 MUTEX_EXIT(&nxge->group_lock); 597 } 598 599 /* 600 * nxge_grp_dc_unlink 601 * 602 * Unlink a DMA channel fromits linked list (group). 603 * 604 * Arguments: 605 * nxge 606 * group The group (linked list) to unlink from 607 * dc The DMA channel to append 608 * 609 * Notes: 610 * 611 * Context: 612 * Any domain 613 */ 614 nxge_hio_dc_t * 615 nxge_grp_dc_unlink( 616 nxge_t *nxge, 617 nxge_grp_t *group, 618 int channel) 619 { 620 nxge_hio_dc_t *current, *previous; 621 622 MUTEX_ENTER(&nxge->group_lock); 623 624 if ((current = group->dc) == 0) { 625 MUTEX_EXIT(&nxge->group_lock); 626 return (0); 627 } 628 629 previous = 0; 630 do { 631 if (current->channel == channel) { 632 if (previous) 633 previous->next = current->next; 634 else 635 group->dc = current->next; 636 break; 637 } 638 previous = current; 639 current = current->next; 640 } while (current); 641 642 if (current == 0) { 643 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 644 "DC unlink: DC %d not found", channel)); 645 } else { 646 current->next = 0; 647 current->group = 0; 648 649 group->count--; 650 } 651 652 nxge_grp_dc_map(group); 653 654 MUTEX_EXIT(&nxge->group_lock); 655 656 return (current); 657 } 658 659 /* 660 * nxge_grp_dc_map 661 * 662 * Map a linked list to an array of channel numbers. 663 * 664 * Arguments: 665 * nxge 666 * group The group to remap. 667 * 668 * Notes: 669 * It is expected that the caller will hold the correct mutex. 670 * 671 * Context: 672 * Service domain 673 */ 674 void 675 nxge_grp_dc_map( 676 nxge_grp_t *group) 677 { 678 nxge_channel_t *legend; 679 nxge_hio_dc_t *dc; 680 681 (void) memset(group->legend, 0, sizeof (group->legend)); 682 683 legend = group->legend; 684 dc = group->dc; 685 while (dc) { 686 *legend = dc->channel; 687 legend++; 688 dc = dc->next; 689 } 690 } 691 692 /* 693 * --------------------------------------------------------------------- 694 * These are HIO debugging functions. 695 * --------------------------------------------------------------------- 696 */ 697 698 /* 699 * nxge_delay 700 * 701 * Delay <seconds> number of seconds. 702 * 703 * Arguments: 704 * nxge 705 * group The group to append to 706 * dc The DMA channel to append 707 * 708 * Notes: 709 * This is a developer-only function. 710 * 711 * Context: 712 * Any domain 713 */ 714 void 715 nxge_delay( 716 int seconds) 717 { 718 delay(drv_usectohz(seconds * 1000000)); 719 } 720 721 static dmc_reg_name_t rx_names[] = { 722 { "RXDMA_CFIG1", 0 }, 723 { "RXDMA_CFIG2", 8 }, 724 { "RBR_CFIG_A", 0x10 }, 725 { "RBR_CFIG_B", 0x18 }, 726 { "RBR_KICK", 0x20 }, 727 { "RBR_STAT", 0x28 }, 728 { "RBR_HDH", 0x30 }, 729 { "RBR_HDL", 0x38 }, 730 { "RCRCFIG_A", 0x40 }, 731 { "RCRCFIG_B", 0x48 }, 732 { "RCRSTAT_A", 0x50 }, 733 { "RCRSTAT_B", 0x58 }, 734 { "RCRSTAT_C", 0x60 }, 735 { "RX_DMA_ENT_MSK", 0x68 }, 736 { "RX_DMA_CTL_STAT", 0x70 }, 737 { "RCR_FLSH", 0x78 }, 738 { "RXMISC", 0x90 }, 739 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 740 { 0, -1 } 741 }; 742 743 static dmc_reg_name_t tx_names[] = { 744 { "Tx_RNG_CFIG", 0 }, 745 { "Tx_RNG_HDL", 0x10 }, 746 { "Tx_RNG_KICK", 0x18 }, 747 { "Tx_ENT_MASK", 0x20 }, 748 { "Tx_CS", 0x28 }, 749 { "TxDMA_MBH", 0x30 }, 750 { "TxDMA_MBL", 0x38 }, 751 { "TxDMA_PRE_ST", 0x40 }, 752 { "Tx_RNG_ERR_LOGH", 0x48 }, 753 { "Tx_RNG_ERR_LOGL", 0x50 }, 754 { "TDMC_INTR_DBG", 0x60 }, 755 { "Tx_CS_DBG", 0x68 }, 756 { 0, -1 } 757 }; 758 759 /* 760 * nxge_xx2str 761 * 762 * Translate a register address into a string. 763 * 764 * Arguments: 765 * offset The address of the register to translate. 766 * 767 * Notes: 768 * These are developer-only function. 769 * 770 * Context: 771 * Any domain 772 */ 773 const char * 774 nxge_rx2str( 775 int offset) 776 { 777 dmc_reg_name_t *reg = &rx_names[0]; 778 779 offset &= DMA_CSR_MASK; 780 781 while (reg->name) { 782 if (offset == reg->offset) 783 return (reg->name); 784 reg++; 785 } 786 787 return (0); 788 } 789 790 const char * 791 nxge_tx2str( 792 int offset) 793 { 794 dmc_reg_name_t *reg = &tx_names[0]; 795 796 offset &= DMA_CSR_MASK; 797 798 while (reg->name) { 799 if (offset == reg->offset) 800 return (reg->name); 801 reg++; 802 } 803 804 return (0); 805 } 806 807 /* 808 * nxge_ddi_perror 809 * 810 * Map a DDI error number to a string. 811 * 812 * Arguments: 813 * ddi_error The DDI error number to map. 814 * 815 * Notes: 816 * 817 * Context: 818 * Any domain 819 */ 820 const char * 821 nxge_ddi_perror( 822 int ddi_error) 823 { 824 switch (ddi_error) { 825 case DDI_SUCCESS: 826 return ("DDI_SUCCESS"); 827 case DDI_FAILURE: 828 return ("DDI_FAILURE"); 829 case DDI_NOT_WELL_FORMED: 830 return ("DDI_NOT_WELL_FORMED"); 831 case DDI_EAGAIN: 832 return ("DDI_EAGAIN"); 833 case DDI_EINVAL: 834 return ("DDI_EINVAL"); 835 case DDI_ENOTSUP: 836 return ("DDI_ENOTSUP"); 837 case DDI_EPENDING: 838 return ("DDI_EPENDING"); 839 case DDI_ENOMEM: 840 return ("DDI_ENOMEM"); 841 case DDI_EBUSY: 842 return ("DDI_EBUSY"); 843 case DDI_ETRANSPORT: 844 return ("DDI_ETRANSPORT"); 845 case DDI_ECONTEXT: 846 return ("DDI_ECONTEXT"); 847 default: 848 return ("Unknown error"); 849 } 850 } 851 852 /* 853 * --------------------------------------------------------------------- 854 * These are Sun4v HIO function definitions 855 * --------------------------------------------------------------------- 856 */ 857 858 #if defined(sun4v) 859 860 /* 861 * Local prototypes 862 */ 863 static vr_handle_t nxge_hio_vr_share(nxge_t *); 864 865 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 866 static void nxge_hio_unshare(vr_handle_t); 867 868 static int nxge_hio_addres(vr_handle_t, mac_ring_type_t, int); 869 static void nxge_hio_remres(vr_handle_t, mac_ring_type_t, res_map_t); 870 871 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 872 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 873 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 874 mac_ring_type_t, int); 875 876 /* 877 * nxge_hio_init 878 * 879 * Initialize the HIO module of the NXGE driver. 880 * 881 * Arguments: 882 * nxge 883 * 884 * Notes: 885 * 886 * Context: 887 * Any domain 888 */ 889 int 890 nxge_hio_init( 891 nxge_t *nxge) 892 { 893 nxge_hio_data_t *nhd; 894 int i, region; 895 896 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 897 if (nhd == 0) { 898 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 899 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 900 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 901 } 902 903 if ((nxge->environs == SOLARIS_DOMAIN) && 904 (nxge->niu_type == N2_NIU)) { 905 if (nxge->niu_hsvc_available == B_TRUE) { 906 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 907 if (niu_hsvc->hsvc_major == 1 && 908 niu_hsvc->hsvc_minor == 1) 909 nxge->environs = SOLARIS_SERVICE_DOMAIN; 910 NXGE_DEBUG_MSG((nxge, HIO_CTL, 911 "nxge_hio_init: hypervisor services " 912 "version %d.%d", 913 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 914 } 915 } 916 917 if (!isLDOMs(nxge)) { 918 nhd->hio.ldoms = B_FALSE; 919 return (NXGE_OK); 920 } 921 922 nhd->hio.ldoms = B_TRUE; 923 924 /* 925 * Fill in what we can. 926 */ 927 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 928 nhd->vr[region].region = region; 929 } 930 nhd->available.vrs = NXGE_VR_SR_MAX - 2; 931 932 /* 933 * Initialize share and ring group structures. 934 */ 935 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 936 nxge->rx_hio_groups[i].ghandle = NULL; 937 nxge->rx_hio_groups[i].nxgep = nxge; 938 nxge->rx_hio_groups[i].gindex = 0; 939 nxge->rx_hio_groups[i].sindex = 0; 940 } 941 942 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 943 nxge->shares[i].nxgep = nxge; 944 nxge->shares[i].index = 0; 945 nxge->shares[i].vrp = (void *)NULL; 946 nxge->shares[i].tmap = 0; 947 nxge->shares[i].rmap = 0; 948 nxge->shares[i].rxgroup = 0; 949 nxge->shares[i].active = B_FALSE; 950 } 951 952 /* Fill in the HV HIO function pointers. */ 953 nxge_hio_hv_init(nxge); 954 955 if (isLDOMservice(nxge)) { 956 NXGE_DEBUG_MSG((nxge, HIO_CTL, 957 "Hybrid IO-capable service domain")); 958 return (NXGE_OK); 959 } else { 960 /* 961 * isLDOMguest(nxge) == B_TRUE 962 */ 963 nx_vio_fp_t *vio; 964 nhd->type = NXGE_HIO_TYPE_GUEST; 965 966 vio = &nhd->hio.vio; 967 vio->__register = (vio_net_resource_reg_t) 968 modgetsymvalue("vio_net_resource_reg", 0); 969 vio->unregister = (vio_net_resource_unreg_t) 970 modgetsymvalue("vio_net_resource_unreg", 0); 971 972 if (vio->__register == 0 || vio->unregister == 0) { 973 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 974 return (NXGE_ERROR); 975 } 976 } 977 978 return (0); 979 } 980 981 static int 982 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 983 { 984 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 985 p_nxge_t nxge = rxgroup->nxgep; 986 int group = rxgroup->gindex; 987 int rv, sindex; 988 nxge_hio_vr_t *vr; /* The Virtualization Region */ 989 990 sindex = nxge->rx_hio_groups[group].sindex; 991 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 992 993 /* 994 * Program the mac address for the group/share. 995 */ 996 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 997 (ether_addr_t *)mac_addr)) != 0) { 998 return (rv); 999 } 1000 1001 return (0); 1002 } 1003 1004 /* ARGSUSED */ 1005 static int 1006 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 1007 { 1008 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1009 p_nxge_t nxge = rxgroup->nxgep; 1010 int group = rxgroup->gindex; 1011 int sindex; 1012 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1013 1014 sindex = nxge->rx_hio_groups[group].sindex; 1015 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1016 1017 /* 1018 * Remove the mac address for the group/share. 1019 */ 1020 nxge_hio_hostinfo_uninit(nxge, vr); 1021 1022 return (0); 1023 } 1024 1025 /* ARGSUSED */ 1026 void 1027 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1028 mac_group_info_t *infop, mac_group_handle_t ghdl) 1029 { 1030 p_nxge_t nxgep = (p_nxge_t)arg; 1031 nxge_rx_ring_group_t *rxgroup; 1032 1033 switch (type) { 1034 case MAC_RING_TYPE_RX: 1035 rxgroup = &nxgep->rx_hio_groups[group]; 1036 rxgroup->gindex = group; 1037 1038 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1039 infop->mrg_start = NULL; 1040 infop->mrg_stop = NULL; 1041 infop->mrg_addmac = nxge_hio_add_mac; 1042 infop->mrg_remmac = nxge_hio_rem_mac; 1043 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1044 break; 1045 1046 case MAC_RING_TYPE_TX: 1047 break; 1048 } 1049 } 1050 1051 int 1052 nxge_hio_share_assign( 1053 nxge_t *nxge, 1054 uint64_t cookie, 1055 res_map_t *tmap, 1056 res_map_t *rmap, 1057 nxge_hio_vr_t *vr) 1058 { 1059 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1060 uint64_t slot, hv_rv; 1061 nxge_hio_dc_t *dc; 1062 nxhv_vr_fp_t *fp; 1063 int i; 1064 1065 /* 1066 * Ask the Hypervisor to set up the VR for us 1067 */ 1068 fp = &nhd->hio.vr; 1069 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1070 NXGE_ERROR_MSG((nxge, HIO_CTL, 1071 "nx_hio_share_assign: " 1072 "vr->assign() returned %d", hv_rv)); 1073 nxge_hio_unshare((vr_handle_t)vr); 1074 return (-EIO); 1075 } 1076 1077 /* 1078 * For each shared TDC, ask the HV to find us an empty slot. 1079 * ----------------------------------------------------- 1080 */ 1081 dc = vr->tx_group.dc; 1082 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1083 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1084 while (dc) { 1085 hv_rv = (*tx->assign) 1086 (vr->cookie, dc->channel, &slot); 1087 if (hv_rv != 0) { 1088 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1089 "nx_hio_share_assign: " 1090 "tx->assign(%x, %d) failed: %ld", 1091 vr->cookie, dc->channel, hv_rv)); 1092 return (-EIO); 1093 } 1094 1095 dc->cookie = vr->cookie; 1096 dc->page = (vp_channel_t)slot; 1097 1098 /* Inform the caller about the slot chosen. */ 1099 (*tmap) |= 1 << slot; 1100 1101 dc = dc->next; 1102 } 1103 } 1104 1105 /* 1106 * For each shared RDC, ask the HV to find us an empty slot. 1107 * ----------------------------------------------------- 1108 */ 1109 dc = vr->rx_group.dc; 1110 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1111 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1112 while (dc) { 1113 hv_rv = (*rx->assign) 1114 (vr->cookie, dc->channel, &slot); 1115 if (hv_rv != 0) { 1116 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1117 "nx_hio_share_assign: " 1118 "rx->assign(%x, %d) failed: %ld", 1119 vr->cookie, dc->channel, hv_rv)); 1120 return (-EIO); 1121 } 1122 1123 dc->cookie = vr->cookie; 1124 dc->page = (vp_channel_t)slot; 1125 1126 /* Inform the caller about the slot chosen. */ 1127 (*rmap) |= 1 << slot; 1128 1129 dc = dc->next; 1130 } 1131 } 1132 1133 return (0); 1134 } 1135 1136 int 1137 nxge_hio_share_unassign( 1138 nxge_hio_vr_t *vr) 1139 { 1140 nxge_t *nxge = (nxge_t *)vr->nxge; 1141 nxge_hio_data_t *nhd; 1142 nxge_hio_dc_t *dc; 1143 nxhv_vr_fp_t *fp; 1144 uint64_t hv_rv; 1145 1146 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1147 1148 dc = vr->tx_group.dc; 1149 while (dc) { 1150 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1151 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1152 if (hv_rv != 0) { 1153 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1154 "nx_hio_dc_unshare: " 1155 "tx->unassign(%x, %d) failed: %ld", 1156 vr->cookie, dc->page, hv_rv)); 1157 } 1158 dc = dc->next; 1159 } 1160 1161 dc = vr->rx_group.dc; 1162 while (dc) { 1163 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1164 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1165 if (hv_rv != 0) { 1166 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1167 "nx_hio_dc_unshare: " 1168 "rx->unassign(%x, %d) failed: %ld", 1169 vr->cookie, dc->page, hv_rv)); 1170 } 1171 dc = dc->next; 1172 } 1173 1174 fp = &nhd->hio.vr; 1175 if (fp->unassign) { 1176 hv_rv = (*fp->unassign)(vr->cookie); 1177 if (hv_rv != 0) { 1178 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1179 "vr->assign(%x) failed: %ld", 1180 vr->cookie, hv_rv)); 1181 } 1182 } 1183 1184 return (0); 1185 } 1186 1187 int 1188 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1189 mac_share_handle_t *shandle) 1190 { 1191 p_nxge_t nxge = (p_nxge_t)arg; 1192 nxge_rx_ring_group_t *rxgroup; 1193 nxge_share_handle_t *shp; 1194 1195 vr_handle_t shared; /* The VR being shared */ 1196 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1197 uint64_t rmap, tmap; 1198 int rv; 1199 1200 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1201 1202 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1203 1204 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1205 nhd->hio.rx.assign == 0) { 1206 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1207 return (EIO); 1208 } 1209 1210 /* 1211 * Get a VR. 1212 */ 1213 if ((shared = nxge_hio_vr_share(nxge)) == 0) 1214 return (EAGAIN); 1215 vr = (nxge_hio_vr_t *)shared; 1216 1217 /* 1218 * Get an RDC group for us to use. 1219 */ 1220 if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1221 nxge_hio_unshare(shared); 1222 return (EBUSY); 1223 } 1224 1225 /* 1226 * Add resources to the share. 1227 */ 1228 tmap = 0; 1229 rv = nxge_hio_addres(shared, MAC_RING_TYPE_TX, 1230 NXGE_HIO_SHARE_MAX_CHANNELS); 1231 if (rv != 0) { 1232 nxge_hio_unshare(shared); 1233 return (rv); 1234 } 1235 1236 rmap = 0; 1237 rv = nxge_hio_addres(shared, MAC_RING_TYPE_RX, 1238 NXGE_HIO_SHARE_MAX_CHANNELS); 1239 if (rv != 0) { 1240 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1241 nxge_hio_unshare(shared); 1242 return (rv); 1243 } 1244 1245 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1246 nxge_hio_remres(shared, MAC_RING_TYPE_RX, tmap); 1247 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1248 nxge_hio_unshare(shared); 1249 return (rv); 1250 } 1251 1252 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1253 rxgroup->gindex = vr->rdc_tbl; 1254 rxgroup->sindex = vr->region; 1255 1256 shp = &nxge->shares[vr->region]; 1257 shp->index = vr->region; 1258 shp->vrp = (void *)vr; 1259 shp->tmap = tmap; 1260 shp->rmap = rmap; 1261 shp->rxgroup = vr->rdc_tbl; 1262 shp->active = B_TRUE; 1263 1264 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1265 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1266 1267 *shandle = (mac_share_handle_t)shp; 1268 1269 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1270 return (0); 1271 } 1272 1273 void 1274 nxge_hio_share_free(mac_share_handle_t shandle) 1275 { 1276 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1277 1278 /* 1279 * First, unassign the VR (take it back), 1280 * so we can enable interrupts again. 1281 */ 1282 (void) nxge_hio_share_unassign(shp->vrp); 1283 1284 /* 1285 * Free Ring Resources for TX and RX 1286 */ 1287 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1288 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1289 1290 /* 1291 * Free VR resource. 1292 */ 1293 nxge_hio_unshare((vr_handle_t)shp->vrp); 1294 1295 /* 1296 * Clear internal handle state. 1297 */ 1298 shp->index = 0; 1299 shp->vrp = (void *)NULL; 1300 shp->tmap = 0; 1301 shp->rmap = 0; 1302 shp->rxgroup = 0; 1303 shp->active = B_FALSE; 1304 } 1305 1306 void 1307 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1308 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1309 { 1310 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1311 1312 switch (type) { 1313 case MAC_RING_TYPE_RX: 1314 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1315 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1316 *rmap = shp->rmap; 1317 *gnum = shp->rxgroup; 1318 break; 1319 1320 case MAC_RING_TYPE_TX: 1321 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1322 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1323 *rmap = shp->tmap; 1324 *gnum = 0; 1325 break; 1326 } 1327 } 1328 1329 /* 1330 * nxge_hio_vr_share 1331 * 1332 * Find an unused Virtualization Region (VR). 1333 * 1334 * Arguments: 1335 * nxge 1336 * 1337 * Notes: 1338 * 1339 * Context: 1340 * Service domain 1341 */ 1342 vr_handle_t 1343 nxge_hio_vr_share( 1344 nxge_t *nxge) 1345 { 1346 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1347 nxge_hio_vr_t *vr; 1348 1349 int first, limit, region; 1350 1351 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1352 1353 MUTEX_ENTER(&nhd->lock); 1354 1355 if (nhd->available.vrs == 0) { 1356 MUTEX_EXIT(&nhd->lock); 1357 return (0); 1358 } 1359 1360 /* Find an empty virtual region (VR). */ 1361 if (nxge->function_num == 0) { 1362 // FUNC0_VIR0 'belongs' to NIU port 0. 1363 first = FUNC0_VIR1; 1364 limit = FUNC2_VIR0; 1365 } else if (nxge->function_num == 1) { 1366 // FUNC2_VIR0 'belongs' to NIU port 1. 1367 first = FUNC2_VIR1; 1368 limit = FUNC_VIR_MAX; 1369 } else { 1370 cmn_err(CE_WARN, 1371 "Shares not supported on function(%d) at this time.\n", 1372 nxge->function_num); 1373 } 1374 1375 for (region = first; region < limit; region++) { 1376 if (nhd->vr[region].nxge == 0) 1377 break; 1378 } 1379 1380 if (region == limit) { 1381 MUTEX_EXIT(&nhd->lock); 1382 return (0); 1383 } 1384 1385 vr = &nhd->vr[region]; 1386 vr->nxge = (uintptr_t)nxge; 1387 vr->region = (uintptr_t)region; 1388 1389 nhd->available.vrs--; 1390 1391 MUTEX_EXIT(&nhd->lock); 1392 1393 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1394 1395 return ((vr_handle_t)vr); 1396 } 1397 1398 void 1399 nxge_hio_unshare( 1400 vr_handle_t shared) 1401 { 1402 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1403 nxge_t *nxge = (nxge_t *)vr->nxge; 1404 nxge_hio_data_t *nhd; 1405 1406 vr_region_t region; 1407 1408 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1409 1410 if (!nxge) { 1411 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1412 "vr->nxge is NULL")); 1413 return; 1414 } 1415 1416 /* 1417 * This function is no longer called, but I will keep it 1418 * here in case we want to revisit this topic in the future. 1419 * 1420 * nxge_hio_hostinfo_uninit(nxge, vr); 1421 */ 1422 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1423 1424 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1425 1426 MUTEX_ENTER(&nhd->lock); 1427 1428 region = vr->region; 1429 (void) memset(vr, 0, sizeof (*vr)); 1430 vr->region = region; 1431 1432 nhd->available.vrs++; 1433 1434 MUTEX_EXIT(&nhd->lock); 1435 1436 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1437 } 1438 1439 int 1440 nxge_hio_addres( 1441 vr_handle_t shared, 1442 mac_ring_type_t type, 1443 int count) 1444 { 1445 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1446 nxge_t *nxge = (nxge_t *)vr->nxge; 1447 int i; 1448 1449 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1450 1451 if (!nxge) 1452 return (EINVAL); 1453 1454 for (i = 0; i < count; i++) { 1455 int rv; 1456 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1457 if (i == 0) /* Couldn't get even one DC. */ 1458 return (-rv); 1459 else 1460 break; 1461 } 1462 } 1463 1464 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1465 1466 return (0); 1467 } 1468 1469 /* ARGSUSED */ 1470 void 1471 nxge_hio_remres( 1472 vr_handle_t shared, 1473 mac_ring_type_t type, 1474 res_map_t res_map) 1475 { 1476 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1477 nxge_t *nxge = (nxge_t *)vr->nxge; 1478 nxge_grp_t *group; 1479 1480 if (!nxge) { 1481 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 1482 "vr->nxge is NULL")); 1483 return; 1484 } 1485 1486 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1487 1488 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1489 while (group->dc) { 1490 nxge_hio_dc_t *dc = group->dc; 1491 NXGE_DC_RESET(res_map, dc->page); 1492 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1493 } 1494 1495 if (res_map) { 1496 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1497 "res_map %lx", res_map)); 1498 } 1499 1500 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1501 } 1502 1503 /* 1504 * nxge_hio_tdc_share 1505 * 1506 * Share an unused TDC channel. 1507 * 1508 * Arguments: 1509 * nxge 1510 * 1511 * Notes: 1512 * 1513 * A.7.3 Reconfigure Tx DMA channel 1514 * Disable TxDMA A.9.6.10 1515 * [Rebind TxDMA channel to Port A.9.6.7] 1516 * 1517 * We don't have to Rebind the TDC to the port - it always already bound. 1518 * 1519 * Soft Reset TxDMA A.9.6.2 1520 * 1521 * This procedure will be executed by nxge_init_txdma_channel() in the 1522 * guest domain: 1523 * 1524 * Re-initialize TxDMA A.9.6.8 1525 * Reconfigure TxDMA 1526 * Enable TxDMA A.9.6.9 1527 * 1528 * Context: 1529 * Service domain 1530 */ 1531 int 1532 nxge_hio_tdc_share( 1533 nxge_t *nxge, 1534 int channel) 1535 { 1536 nxge_grp_set_t *set = &nxge->tx_set; 1537 tx_ring_t *ring; 1538 int count; 1539 1540 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1541 1542 /* 1543 * Wait until this channel is idle. 1544 */ 1545 ring = nxge->tx_rings->rings[channel]; 1546 1547 /* 1548 * Wait for 30 seconds. 1549 */ 1550 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 1551 for (count = 30 * 1000; count; count--) { 1552 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 1553 break; 1554 } 1555 1556 drv_usecwait(1000); 1557 } 1558 1559 if (count == 0) { 1560 (void) atomic_swap_32(&ring->tx_ring_offline, 1561 NXGE_TX_RING_ONLINE); 1562 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1563 "Tx ring %d was always BUSY", channel)); 1564 return (-EIO); 1565 } 1566 1567 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1568 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1569 "Failed to remove interrupt for TxDMA channel %d", 1570 channel)); 1571 return (NXGE_ERROR); 1572 } 1573 1574 /* Disable TxDMA A.9.6.10 */ 1575 (void) nxge_txdma_channel_disable(nxge, channel); 1576 1577 /* The SD is sharing this channel. */ 1578 NXGE_DC_SET(set->shared.map, channel); 1579 set->shared.count++; 1580 1581 /* Soft Reset TxDMA A.9.6.2 */ 1582 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1583 1584 /* 1585 * Initialize the DC-specific FZC control registers. 1586 * ----------------------------------------------------- 1587 */ 1588 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1589 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1590 "nx_hio_dc_share: FZC TDC failed: %d", channel)); 1591 return (-EIO); 1592 } 1593 1594 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1595 1596 return (0); 1597 } 1598 1599 /* 1600 * nxge_hio_rdc_share 1601 * 1602 * Share an unused RDC channel. 1603 * 1604 * Arguments: 1605 * nxge 1606 * 1607 * Notes: 1608 * 1609 * This is the latest version of the procedure to 1610 * Reconfigure an Rx DMA channel: 1611 * 1612 * A.6.3 Reconfigure Rx DMA channel 1613 * Stop RxMAC A.9.2.6 1614 * Drain IPP Port A.9.3.6 1615 * Stop and reset RxDMA A.9.5.3 1616 * 1617 * This procedure will be executed by nxge_init_rxdma_channel() in the 1618 * guest domain: 1619 * 1620 * Initialize RxDMA A.9.5.4 1621 * Reconfigure RxDMA 1622 * Enable RxDMA A.9.5.5 1623 * 1624 * We will do this here, since the RDC is a canalis non grata: 1625 * Enable RxMAC A.9.2.10 1626 * 1627 * Context: 1628 * Service domain 1629 */ 1630 int 1631 nxge_hio_rdc_share( 1632 nxge_t *nxge, 1633 nxge_hio_vr_t *vr, 1634 int channel) 1635 { 1636 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1637 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1638 nxge_grp_set_t *set = &nxge->rx_set; 1639 nxge_rdc_grp_t *rdc_grp; 1640 1641 int current, last; 1642 1643 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1644 1645 /* Disable interrupts. */ 1646 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1647 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 1648 "Failed to remove interrupt for RxDMA channel %d", 1649 channel)); 1650 return (NXGE_ERROR); 1651 } 1652 1653 /* Stop RxMAC = A.9.2.6 */ 1654 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1655 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1656 "Failed to disable RxMAC")); 1657 } 1658 1659 /* Drain IPP Port = A.9.3.6 */ 1660 (void) nxge_ipp_drain(nxge); 1661 1662 /* Stop and reset RxDMA = A.9.5.3 */ 1663 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1664 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1665 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1666 "Failed to disable RxDMA channel %d", channel)); 1667 } 1668 1669 /* The SD is sharing this channel. */ 1670 NXGE_DC_SET(set->shared.map, channel); 1671 set->shared.count++; 1672 1673 // Assert RST: RXDMA_CFIG1[30] = 1 1674 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1675 1676 /* 1677 * We have to reconfigure the RDC table(s) 1678 * to which this channel belongs. 1679 */ 1680 current = hardware->def_mac_rxdma_grpid; 1681 last = current + hardware->max_rdc_grpids; 1682 for (; current < last; current++) { 1683 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1684 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1685 rdc_grp->map = set->owned.map; 1686 rdc_grp->max_rdcs--; 1687 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1688 } 1689 } 1690 1691 /* 1692 * The guest domain will reconfigure the RDC later. 1693 * 1694 * But in the meantime, we must re-enable the Rx MAC so 1695 * that we can start receiving packets again on the 1696 * remaining RDCs: 1697 * 1698 * Enable RxMAC = A.9.2.10 1699 */ 1700 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1701 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1702 "nx_hio_rdc_share: Rx MAC still disabled")); 1703 } 1704 1705 /* 1706 * Initialize the DC-specific FZC control registers. 1707 * ----------------------------------------------------- 1708 */ 1709 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1710 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1711 "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 1712 return (-EIO); 1713 } 1714 1715 /* 1716 * We have to initialize the guest's RDC table, too. 1717 * ----------------------------------------------------- 1718 */ 1719 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1720 if (rdc_grp->max_rdcs == 0) { 1721 rdc_grp->start_rdc = (uint8_t)channel; 1722 rdc_grp->def_rdc = (uint8_t)channel; 1723 rdc_grp->max_rdcs = 1; 1724 } else { 1725 rdc_grp->max_rdcs++; 1726 } 1727 NXGE_DC_SET(rdc_grp->map, channel); 1728 1729 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1730 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1731 "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1732 return (-EIO); 1733 } 1734 1735 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1736 1737 return (0); 1738 } 1739 1740 /* 1741 * nxge_hio_dc_share 1742 * 1743 * Share a DMA channel with a guest domain. 1744 * 1745 * Arguments: 1746 * nxge 1747 * vr The VR that <channel> will belong to. 1748 * type Tx or Rx. 1749 * res_map The resource map used by the caller, which we will 1750 * update if successful. 1751 * 1752 * Notes: 1753 * 1754 * Context: 1755 * Service domain 1756 */ 1757 int 1758 nxge_hio_dc_share( 1759 nxge_t *nxge, 1760 nxge_hio_vr_t *vr, 1761 mac_ring_type_t type) 1762 { 1763 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1764 nxge_hw_pt_cfg_t *hardware; 1765 nxge_hio_dc_t *dc; 1766 int channel, limit; 1767 1768 nxge_grp_set_t *set; 1769 nxge_grp_t *group; 1770 1771 int slot; 1772 1773 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1774 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1775 1776 /* 1777 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1778 * Not only that, but the HV has statically assigned the 1779 * channels like so: 1780 * VR0: RDC0 & RDC1 1781 * VR1: RDC2 & RDC3, etc. 1782 * The TDCs are assigned in exactly the same way. 1783 * 1784 * So, for example 1785 * hardware->start_rdc + vr->region * 2; 1786 * VR1: hardware->start_rdc + 1 * 2; 1787 * VR3: hardware->start_rdc + 3 * 2; 1788 * If start_rdc is 0, we end up with 2 or 6. 1789 * If start_rdc is 8, we end up with 10 or 14. 1790 */ 1791 1792 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1793 hardware = &nxge->pt_config.hw_config; 1794 1795 // This code is still NIU-specific (assuming only 2 ports) 1796 channel = hardware->start_rdc + (vr->region % 4) * 2; 1797 limit = channel + 2; 1798 1799 MUTEX_ENTER(&nhd->lock); 1800 for (; channel < limit; channel++) { 1801 if ((1 << channel) & set->owned.map) { 1802 break; 1803 } 1804 } 1805 1806 if (channel == limit) { 1807 MUTEX_EXIT(&nhd->lock); 1808 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1809 "nx_hio_dc_share: there are no channels to share")); 1810 return (-EIO); 1811 } 1812 1813 MUTEX_EXIT(&nhd->lock); 1814 1815 /* -------------------------------------------------- */ 1816 slot = (type == MAC_RING_TYPE_TX) ? 1817 nxge_hio_tdc_share(nxge, channel) : 1818 nxge_hio_rdc_share(nxge, vr, channel); 1819 1820 if (slot < 0) { 1821 if (type == MAC_RING_TYPE_RX) { 1822 nxge_hio_rdc_unshare(nxge, channel); 1823 } else { 1824 nxge_hio_tdc_unshare(nxge, channel); 1825 } 1826 return (slot); 1827 } 1828 1829 MUTEX_ENTER(&nhd->lock); 1830 1831 /* 1832 * Tag this channel. 1833 * -------------------------------------------------- 1834 */ 1835 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1836 1837 dc->vr = vr; 1838 dc->channel = (nxge_channel_t)channel; 1839 1840 MUTEX_EXIT(&nhd->lock); 1841 1842 /* 1843 * vr->[t|r]x_group is used by the service domain to 1844 * keep track of its shared DMA channels. 1845 */ 1846 MUTEX_ENTER(&nxge->group_lock); 1847 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1848 1849 dc->group = (vr_handle_t)group; 1850 1851 /* Initialize <group>, if necessary */ 1852 if (group->count == 0) { 1853 group->nxge = nxge; 1854 group->type = (type == MAC_RING_TYPE_TX) ? 1855 VP_BOUND_TX : VP_BOUND_RX; 1856 group->sequence = nhd->sequence++; 1857 group->active = B_TRUE; 1858 } 1859 1860 MUTEX_EXIT(&nxge->group_lock); 1861 1862 NXGE_ERROR_MSG((nxge, HIO_CTL, 1863 "DC share: %cDC %d was assigned to slot %d", 1864 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1865 1866 nxge_grp_dc_append(nxge, group, dc); 1867 1868 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1869 1870 return (0); 1871 } 1872 1873 /* 1874 * nxge_hio_tdc_unshare 1875 * 1876 * Unshare a TDC. 1877 * 1878 * Arguments: 1879 * nxge 1880 * channel The channel to unshare (add again). 1881 * 1882 * Notes: 1883 * 1884 * Context: 1885 * Service domain 1886 */ 1887 void 1888 nxge_hio_tdc_unshare( 1889 nxge_t *nxge, 1890 int channel) 1891 { 1892 nxge_grp_set_t *set = &nxge->tx_set; 1893 vr_handle_t handle = (vr_handle_t)set->group[0]; 1894 1895 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1896 1897 NXGE_DC_RESET(set->shared.map, channel); 1898 set->shared.count--; 1899 1900 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_TX, channel))) { 1901 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1902 "Failed to initialize TxDMA channel %d", channel)); 1903 return; 1904 } 1905 1906 /* Re-add this interrupt. */ 1907 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1908 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1909 "Failed to add interrupt for TxDMA channel %d", channel)); 1910 } 1911 1912 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1913 } 1914 1915 /* 1916 * nxge_hio_rdc_unshare 1917 * 1918 * Unshare an RDC: add it to the SD's RDC groups (tables). 1919 * 1920 * Arguments: 1921 * nxge 1922 * channel The channel to unshare (add again). 1923 * 1924 * Notes: 1925 * 1926 * Context: 1927 * Service domain 1928 */ 1929 void 1930 nxge_hio_rdc_unshare( 1931 nxge_t *nxge, 1932 int channel) 1933 { 1934 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1935 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1936 1937 nxge_grp_set_t *set = &nxge->rx_set; 1938 vr_handle_t handle = (vr_handle_t)set->group[0]; 1939 int current, last; 1940 1941 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 1942 1943 /* Stop RxMAC = A.9.2.6 */ 1944 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1945 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1946 "Failed to disable RxMAC")); 1947 } 1948 1949 /* Drain IPP Port = A.9.3.6 */ 1950 (void) nxge_ipp_drain(nxge); 1951 1952 /* Stop and reset RxDMA = A.9.5.3 */ 1953 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1954 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1955 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1956 "Failed to disable RxDMA channel %d", channel)); 1957 } 1958 1959 NXGE_DC_RESET(set->shared.map, channel); 1960 set->shared.count--; 1961 1962 /* 1963 * Assert RST: RXDMA_CFIG1[30] = 1 1964 * 1965 * Initialize RxDMA A.9.5.4 1966 * Reconfigure RxDMA 1967 * Enable RxDMA A.9.5.5 1968 */ 1969 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_RX, channel))) { 1970 /* Be sure to re-enable the RX MAC. */ 1971 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1972 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1973 "nx_hio_rdc_share: Rx MAC still disabled")); 1974 } 1975 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1976 "Failed to initialize RxDMA channel %d", channel)); 1977 return; 1978 } 1979 1980 /* 1981 * We have to reconfigure the RDC table(s) 1982 * to which this channel once again belongs. 1983 */ 1984 current = hardware->def_mac_rxdma_grpid; 1985 last = current + hardware->max_rdc_grpids; 1986 for (; current < last; current++) { 1987 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1988 nxge_rdc_grp_t *group; 1989 group = &nxge->pt_config.rdc_grps[current]; 1990 group->map = set->owned.map; 1991 group->max_rdcs++; 1992 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1993 } 1994 } 1995 1996 /* 1997 * Enable RxMAC = A.9.2.10 1998 */ 1999 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2000 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2001 "nx_hio_rdc_share: Rx MAC still disabled")); 2002 return; 2003 } 2004 2005 /* Re-add this interrupt. */ 2006 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2007 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2008 "nx_hio_rdc_unshare: Failed to add interrupt for " 2009 "RxDMA CHANNEL %d", channel)); 2010 } 2011 2012 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2013 } 2014 2015 /* 2016 * nxge_hio_dc_unshare 2017 * 2018 * Unshare (reuse) a DMA channel. 2019 * 2020 * Arguments: 2021 * nxge 2022 * vr The VR that <channel> belongs to. 2023 * type Tx or Rx. 2024 * channel The DMA channel to reuse. 2025 * 2026 * Notes: 2027 * 2028 * Context: 2029 * Service domain 2030 */ 2031 void 2032 nxge_hio_dc_unshare( 2033 nxge_t *nxge, 2034 nxge_hio_vr_t *vr, 2035 mac_ring_type_t type, 2036 int channel) 2037 { 2038 nxge_grp_t *group; 2039 nxge_hio_dc_t *dc; 2040 2041 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2042 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2043 2044 /* Unlink the channel from its group. */ 2045 /* -------------------------------------------------- */ 2046 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2047 NXGE_DC_RESET(group->map, channel); 2048 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2049 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2050 "nx_hio_dc_unshare(%d) failed", channel)); 2051 return; 2052 } 2053 2054 dc->vr = 0; 2055 dc->cookie = 0; 2056 2057 if (type == MAC_RING_TYPE_RX) { 2058 nxge_hio_rdc_unshare(nxge, channel); 2059 } else { 2060 nxge_hio_tdc_unshare(nxge, channel); 2061 } 2062 2063 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2064 } 2065 2066 #endif /* if defined(sun4v) */ 2067