1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * nxge_hio.c 29 * 30 * This file manages the virtualization resources for Neptune 31 * devices. That is, it implements a hybrid I/O (HIO) approach in the 32 * Solaris kernel, whereby a guest domain on an LDOMs server may 33 * request & use hardware resources from the service domain. 34 * 35 */ 36 37 #include <sys/nxge/nxge_impl.h> 38 #include <sys/nxge/nxge_fzc.h> 39 #include <sys/nxge/nxge_rxdma.h> 40 #include <sys/nxge/nxge_txdma.h> 41 #include <sys/nxge/nxge_hio.h> 42 43 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 44 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 45 46 /* 47 * External prototypes 48 */ 49 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 50 51 /* The following function may be found in nxge_main.c */ 52 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 53 54 /* The following function may be found in nxge_[t|r]xdma.c */ 55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 57 58 /* 59 * Local prototypes 60 */ 61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 63 static void nxge_grp_dc_map(nxge_grp_t *group); 64 65 /* 66 * These functions are used by both service & guest domains to 67 * decide whether they're running in an LDOMs/XEN environment 68 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 69 */ 70 71 /* 72 * nxge_get_environs 73 * 74 * Figure out if we are in a guest domain or not. 75 * 76 * Arguments: 77 * nxge 78 * 79 * Notes: 80 * 81 * Context: 82 * Any domain 83 */ 84 void 85 nxge_get_environs( 86 nxge_t *nxge) 87 { 88 char *string; 89 90 /* 91 * In the beginning, assume that we are running sans LDOMs/XEN. 92 */ 93 nxge->environs = SOLARIS_DOMAIN; 94 95 /* 96 * Are we a hybrid I/O (HIO) guest domain driver? 97 */ 98 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 99 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 100 "niutype", &string)) == DDI_PROP_SUCCESS) { 101 if (strcmp(string, "n2niu") == 0) { 102 nxge->environs = SOLARIS_GUEST_DOMAIN; 103 /* So we can allocate properly-aligned memory. */ 104 nxge->niu_type = N2_NIU; 105 NXGE_DEBUG_MSG((nxge, HIO_CTL, 106 "Hybrid IO-capable guest domain")); 107 } 108 ddi_prop_free(string); 109 } 110 } 111 112 #if !defined(sun4v) 113 114 /* 115 * nxge_hio_init 116 * 117 * Initialize the HIO module of the NXGE driver. 118 * 119 * Arguments: 120 * nxge 121 * 122 * Notes: 123 * This is the non-hybrid I/O version of this function. 124 * 125 * Context: 126 * Any domain 127 */ 128 int 129 nxge_hio_init(nxge_t *nxge) 130 { 131 nxge_hio_data_t *nhd; 132 133 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 134 if (nhd == 0) { 135 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 136 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 137 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 138 } 139 140 nhd->hio.ldoms = B_FALSE; 141 142 return (NXGE_OK); 143 } 144 145 #endif 146 147 void 148 nxge_hio_uninit(nxge_t *nxge) 149 { 150 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 151 152 ASSERT(nxge->nxge_hw_p->ndevs == 0); 153 154 if (nhd != NULL) { 155 MUTEX_DESTROY(&nhd->lock); 156 KMEM_FREE(nhd, sizeof (*nhd)); 157 nxge->nxge_hw_p->hio = 0; 158 } 159 } 160 161 /* 162 * nxge_dci_map 163 * 164 * Map a DMA channel index to a channel number. 165 * 166 * Arguments: 167 * instance The instance number of the driver. 168 * type The type of channel this is: Tx or Rx. 169 * index The index to convert to a channel number 170 * 171 * Notes: 172 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 173 * 174 * Context: 175 * Any domain 176 */ 177 int 178 nxge_dci_map( 179 nxge_t *nxge, 180 vpc_type_t type, 181 int index) 182 { 183 nxge_grp_set_t *set; 184 int dc; 185 186 switch (type) { 187 case VP_BOUND_TX: 188 set = &nxge->tx_set; 189 break; 190 case VP_BOUND_RX: 191 set = &nxge->rx_set; 192 break; 193 } 194 195 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 196 if ((1 << dc) & set->owned.map) { 197 if (index == 0) 198 return (dc); 199 else 200 index--; 201 } 202 } 203 204 return (-1); 205 } 206 207 /* 208 * --------------------------------------------------------------------- 209 * These are the general-purpose DMA channel group functions. That is, 210 * these functions are used to manage groups of TDCs or RDCs in an HIO 211 * environment. 212 * 213 * But is also expected that in the future they will be able to manage 214 * Crossbow groups. 215 * --------------------------------------------------------------------- 216 */ 217 218 /* 219 * nxge_grp_cleanup(p_nxge_t nxge) 220 * 221 * Remove all outstanding groups. 222 * 223 * Arguments: 224 * nxge 225 */ 226 void 227 nxge_grp_cleanup(p_nxge_t nxge) 228 { 229 nxge_grp_set_t *set; 230 int i; 231 232 MUTEX_ENTER(&nxge->group_lock); 233 234 /* 235 * Find RX groups that need to be cleaned up. 236 */ 237 set = &nxge->rx_set; 238 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 239 if (set->group[i] != NULL) { 240 KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 241 set->group[i] = NULL; 242 } 243 } 244 245 /* 246 * Find TX groups that need to be cleaned up. 247 */ 248 set = &nxge->tx_set; 249 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 250 if (set->group[i] != NULL) { 251 KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 252 set->group[i] = NULL; 253 } 254 } 255 MUTEX_EXIT(&nxge->group_lock); 256 } 257 258 259 /* 260 * nxge_grp_add 261 * 262 * Add a group to an instance of NXGE. 263 * 264 * Arguments: 265 * nxge 266 * type Tx or Rx 267 * 268 * Notes: 269 * 270 * Context: 271 * Any domain 272 */ 273 nxge_grp_t * 274 nxge_grp_add( 275 nxge_t *nxge, 276 nxge_grp_type_t type) 277 { 278 nxge_grp_set_t *set; 279 nxge_grp_t *group; 280 int i; 281 282 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 283 group->nxge = nxge; 284 285 MUTEX_ENTER(&nxge->group_lock); 286 switch (type) { 287 case NXGE_TRANSMIT_GROUP: 288 case EXT_TRANSMIT_GROUP: 289 set = &nxge->tx_set; 290 break; 291 default: 292 set = &nxge->rx_set; 293 break; 294 } 295 296 group->type = type; 297 group->active = B_TRUE; 298 group->sequence = set->sequence++; 299 300 /* Find an empty slot for this logical group. */ 301 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 302 if (set->group[i] == 0) { 303 group->index = i; 304 set->group[i] = group; 305 NXGE_DC_SET(set->lg.map, i); 306 set->lg.count++; 307 break; 308 } 309 } 310 MUTEX_EXIT(&nxge->group_lock); 311 312 NXGE_DEBUG_MSG((nxge, HIO_CTL, 313 "nxge_grp_add: %cgroup = %d.%d", 314 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 315 nxge->mac.portnum, group->sequence)); 316 317 return (group); 318 } 319 320 void 321 nxge_grp_remove( 322 nxge_t *nxge, 323 nxge_grp_t *group) /* The group to remove. */ 324 { 325 nxge_grp_set_t *set; 326 vpc_type_t type; 327 328 MUTEX_ENTER(&nxge->group_lock); 329 switch (group->type) { 330 case NXGE_TRANSMIT_GROUP: 331 case EXT_TRANSMIT_GROUP: 332 set = &nxge->tx_set; 333 break; 334 default: 335 set = &nxge->rx_set; 336 break; 337 } 338 339 if (set->group[group->index] != group) { 340 MUTEX_EXIT(&nxge->group_lock); 341 return; 342 } 343 344 set->group[group->index] = 0; 345 NXGE_DC_RESET(set->lg.map, group->index); 346 set->lg.count--; 347 348 /* While inside the mutex, deactivate <group>. */ 349 group->active = B_FALSE; 350 351 MUTEX_EXIT(&nxge->group_lock); 352 353 NXGE_DEBUG_MSG((nxge, HIO_CTL, 354 "nxge_grp_remove(%c.%d.%d) called", 355 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 356 nxge->mac.portnum, group->sequence)); 357 358 /* Now, remove any DCs which are still active. */ 359 switch (group->type) { 360 default: 361 type = VP_BOUND_TX; 362 break; 363 case NXGE_RECEIVE_GROUP: 364 case EXT_RECEIVE_GROUP: 365 type = VP_BOUND_RX; 366 } 367 368 while (group->dc) { 369 nxge_grp_dc_remove(nxge, type, group->dc->channel); 370 } 371 372 KMEM_FREE(group, sizeof (*group)); 373 } 374 375 /* 376 * nx_hio_dc_add 377 * 378 * Add a DMA channel to a VR/Group. 379 * 380 * Arguments: 381 * nxge 382 * channel The channel to add. 383 * Notes: 384 * 385 * Context: 386 * Any domain 387 */ 388 /* ARGSUSED */ 389 int 390 nxge_grp_dc_add( 391 nxge_t *nxge, 392 nxge_grp_t *group, /* The group to add <channel> to. */ 393 vpc_type_t type, /* Rx or Tx */ 394 int channel) /* A physical/logical channel number */ 395 { 396 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 397 nxge_hio_dc_t *dc; 398 nxge_grp_set_t *set; 399 nxge_status_t status = NXGE_OK; 400 401 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 402 403 if (group == NULL) 404 return (0); 405 406 switch (type) { 407 default: 408 set = &nxge->tx_set; 409 if (channel > NXGE_MAX_TDCS) { 410 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 411 "nxge_grp_dc_add: TDC = %d", channel)); 412 return (NXGE_ERROR); 413 } 414 break; 415 case VP_BOUND_RX: 416 set = &nxge->rx_set; 417 if (channel > NXGE_MAX_RDCS) { 418 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 419 "nxge_grp_dc_add: RDC = %d", channel)); 420 return (NXGE_ERROR); 421 } 422 break; 423 } 424 425 NXGE_DEBUG_MSG((nxge, HIO_CTL, 426 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 427 type == VP_BOUND_TX ? 't' : 'r', 428 nxge->mac.portnum, group->sequence, group->count, channel)); 429 430 MUTEX_ENTER(&nxge->group_lock); 431 if (group->active != B_TRUE) { 432 /* We may be in the process of removing this group. */ 433 MUTEX_EXIT(&nxge->group_lock); 434 return (NXGE_ERROR); 435 } 436 MUTEX_EXIT(&nxge->group_lock); 437 438 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 439 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 440 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 441 return (NXGE_ERROR); 442 } 443 444 MUTEX_ENTER(&nhd->lock); 445 446 if (dc->group) { 447 MUTEX_EXIT(&nhd->lock); 448 /* This channel is already in use! */ 449 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 450 "nxge_grp_dc_add(%d): channel already in group", channel)); 451 return (NXGE_ERROR); 452 } 453 454 dc->next = 0; 455 dc->page = channel; 456 dc->channel = (nxge_channel_t)channel; 457 458 dc->type = type; 459 if (type == VP_BOUND_RX) { 460 dc->init = nxge_init_rxdma_channel; 461 dc->uninit = nxge_uninit_rxdma_channel; 462 } else { 463 dc->init = nxge_init_txdma_channel; 464 dc->uninit = nxge_uninit_txdma_channel; 465 } 466 467 dc->group = group; 468 469 if (isLDOMguest(nxge)) 470 (void) nxge_hio_ldsv_add(nxge, dc); 471 472 NXGE_DC_SET(set->owned.map, channel); 473 set->owned.count++; 474 475 MUTEX_EXIT(&nhd->lock); 476 477 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 478 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 479 "nxge_grp_dc_add(%d): channel init failed", channel)); 480 return (NXGE_ERROR); 481 } 482 483 nxge_grp_dc_append(nxge, group, dc); 484 485 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 486 487 return ((int)status); 488 } 489 490 void 491 nxge_grp_dc_remove( 492 nxge_t *nxge, 493 vpc_type_t type, 494 int channel) 495 { 496 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 497 nxge_hio_dc_t *dc; 498 nxge_grp_set_t *set; 499 nxge_grp_t *group; 500 501 dc_uninit_t uninit; 502 503 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 504 505 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 506 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 507 "nx_hio_dc_remove: find(%d) failed", channel)); 508 return; 509 } 510 group = (nxge_grp_t *)dc->group; 511 512 if (isLDOMguest(nxge)) { 513 (void) nxge_hio_intr_remove(nxge, type, channel); 514 } 515 516 NXGE_DEBUG_MSG((nxge, HIO_CTL, 517 "DC remove: group = %d.%d.%d, %cdc %d", 518 nxge->mac.portnum, group->sequence, group->count, 519 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 520 521 MUTEX_ENTER(&nhd->lock); 522 523 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 524 if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 525 NXGE_DC_RESET(group->map, channel); 526 } 527 528 /* Remove the DC from its group. */ 529 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 530 MUTEX_EXIT(&nhd->lock); 531 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 532 "nx_hio_dc_remove(%d) failed", channel)); 533 return; 534 } 535 536 uninit = dc->uninit; 537 channel = dc->channel; 538 539 NXGE_DC_RESET(set->owned.map, channel); 540 set->owned.count--; 541 542 (void) memset(dc, 0, sizeof (*dc)); 543 544 MUTEX_EXIT(&nhd->lock); 545 546 (*uninit)(nxge, channel); 547 548 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 549 } 550 551 nxge_hio_dc_t * 552 nxge_grp_dc_find( 553 nxge_t *nxge, 554 vpc_type_t type, /* Rx or Tx */ 555 int channel) 556 { 557 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 558 nxge_hio_dc_t *current; 559 560 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 561 562 if (!isLDOMguest(nxge)) { 563 return (¤t[channel]); 564 } else { 565 /* We're in a guest domain. */ 566 int i, limit = (type == VP_BOUND_TX) ? 567 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 568 569 MUTEX_ENTER(&nhd->lock); 570 for (i = 0; i < limit; i++, current++) { 571 if (current->channel == channel) { 572 if (current->vr && current->vr->nxge == 573 (uintptr_t)nxge) { 574 MUTEX_EXIT(&nhd->lock); 575 return (current); 576 } 577 } 578 } 579 MUTEX_EXIT(&nhd->lock); 580 } 581 582 return (0); 583 } 584 585 /* 586 * nxge_grp_dc_append 587 * 588 * Append a DMA channel to a group. 589 * 590 * Arguments: 591 * nxge 592 * group The group to append to 593 * dc The DMA channel to append 594 * 595 * Notes: 596 * 597 * Context: 598 * Any domain 599 */ 600 static 601 void 602 nxge_grp_dc_append( 603 nxge_t *nxge, 604 nxge_grp_t *group, 605 nxge_hio_dc_t *dc) 606 { 607 MUTEX_ENTER(&nxge->group_lock); 608 609 if (group->dc == 0) { 610 group->dc = dc; 611 } else { 612 nxge_hio_dc_t *current = group->dc; 613 do { 614 if (current->next == 0) { 615 current->next = dc; 616 break; 617 } 618 current = current->next; 619 } while (current); 620 } 621 622 NXGE_DC_SET(group->map, dc->channel); 623 624 nxge_grp_dc_map(group); 625 group->count++; 626 627 MUTEX_EXIT(&nxge->group_lock); 628 } 629 630 /* 631 * nxge_grp_dc_unlink 632 * 633 * Unlink a DMA channel fromits linked list (group). 634 * 635 * Arguments: 636 * nxge 637 * group The group (linked list) to unlink from 638 * dc The DMA channel to append 639 * 640 * Notes: 641 * 642 * Context: 643 * Any domain 644 */ 645 nxge_hio_dc_t * 646 nxge_grp_dc_unlink( 647 nxge_t *nxge, 648 nxge_grp_t *group, 649 int channel) 650 { 651 nxge_hio_dc_t *current, *previous; 652 653 MUTEX_ENTER(&nxge->group_lock); 654 655 if ((current = group->dc) == 0) { 656 MUTEX_EXIT(&nxge->group_lock); 657 return (0); 658 } 659 660 previous = 0; 661 do { 662 if (current->channel == channel) { 663 if (previous) 664 previous->next = current->next; 665 else 666 group->dc = current->next; 667 break; 668 } 669 previous = current; 670 current = current->next; 671 } while (current); 672 673 if (current == 0) { 674 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 675 "DC unlink: DC %d not found", channel)); 676 } else { 677 current->next = 0; 678 current->group = 0; 679 680 group->count--; 681 } 682 683 nxge_grp_dc_map(group); 684 685 MUTEX_EXIT(&nxge->group_lock); 686 687 return (current); 688 } 689 690 /* 691 * nxge_grp_dc_map 692 * 693 * Map a linked list to an array of channel numbers. 694 * 695 * Arguments: 696 * nxge 697 * group The group to remap. 698 * 699 * Notes: 700 * It is expected that the caller will hold the correct mutex. 701 * 702 * Context: 703 * Service domain 704 */ 705 void 706 nxge_grp_dc_map( 707 nxge_grp_t *group) 708 { 709 nxge_channel_t *legend; 710 nxge_hio_dc_t *dc; 711 712 (void) memset(group->legend, 0, sizeof (group->legend)); 713 714 legend = group->legend; 715 dc = group->dc; 716 while (dc) { 717 *legend = dc->channel; 718 legend++; 719 dc = dc->next; 720 } 721 } 722 723 /* 724 * --------------------------------------------------------------------- 725 * These are HIO debugging functions. 726 * --------------------------------------------------------------------- 727 */ 728 729 /* 730 * nxge_delay 731 * 732 * Delay <seconds> number of seconds. 733 * 734 * Arguments: 735 * nxge 736 * group The group to append to 737 * dc The DMA channel to append 738 * 739 * Notes: 740 * This is a developer-only function. 741 * 742 * Context: 743 * Any domain 744 */ 745 void 746 nxge_delay( 747 int seconds) 748 { 749 delay(drv_usectohz(seconds * 1000000)); 750 } 751 752 static dmc_reg_name_t rx_names[] = { 753 { "RXDMA_CFIG1", 0 }, 754 { "RXDMA_CFIG2", 8 }, 755 { "RBR_CFIG_A", 0x10 }, 756 { "RBR_CFIG_B", 0x18 }, 757 { "RBR_KICK", 0x20 }, 758 { "RBR_STAT", 0x28 }, 759 { "RBR_HDH", 0x30 }, 760 { "RBR_HDL", 0x38 }, 761 { "RCRCFIG_A", 0x40 }, 762 { "RCRCFIG_B", 0x48 }, 763 { "RCRSTAT_A", 0x50 }, 764 { "RCRSTAT_B", 0x58 }, 765 { "RCRSTAT_C", 0x60 }, 766 { "RX_DMA_ENT_MSK", 0x68 }, 767 { "RX_DMA_CTL_STAT", 0x70 }, 768 { "RCR_FLSH", 0x78 }, 769 { "RXMISC", 0x90 }, 770 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 771 { 0, -1 } 772 }; 773 774 static dmc_reg_name_t tx_names[] = { 775 { "Tx_RNG_CFIG", 0 }, 776 { "Tx_RNG_HDL", 0x10 }, 777 { "Tx_RNG_KICK", 0x18 }, 778 { "Tx_ENT_MASK", 0x20 }, 779 { "Tx_CS", 0x28 }, 780 { "TxDMA_MBH", 0x30 }, 781 { "TxDMA_MBL", 0x38 }, 782 { "TxDMA_PRE_ST", 0x40 }, 783 { "Tx_RNG_ERR_LOGH", 0x48 }, 784 { "Tx_RNG_ERR_LOGL", 0x50 }, 785 { "TDMC_INTR_DBG", 0x60 }, 786 { "Tx_CS_DBG", 0x68 }, 787 { 0, -1 } 788 }; 789 790 /* 791 * nxge_xx2str 792 * 793 * Translate a register address into a string. 794 * 795 * Arguments: 796 * offset The address of the register to translate. 797 * 798 * Notes: 799 * These are developer-only function. 800 * 801 * Context: 802 * Any domain 803 */ 804 const char * 805 nxge_rx2str( 806 int offset) 807 { 808 dmc_reg_name_t *reg = &rx_names[0]; 809 810 offset &= DMA_CSR_MASK; 811 812 while (reg->name) { 813 if (offset == reg->offset) 814 return (reg->name); 815 reg++; 816 } 817 818 return (0); 819 } 820 821 const char * 822 nxge_tx2str( 823 int offset) 824 { 825 dmc_reg_name_t *reg = &tx_names[0]; 826 827 offset &= DMA_CSR_MASK; 828 829 while (reg->name) { 830 if (offset == reg->offset) 831 return (reg->name); 832 reg++; 833 } 834 835 return (0); 836 } 837 838 /* 839 * nxge_ddi_perror 840 * 841 * Map a DDI error number to a string. 842 * 843 * Arguments: 844 * ddi_error The DDI error number to map. 845 * 846 * Notes: 847 * 848 * Context: 849 * Any domain 850 */ 851 const char * 852 nxge_ddi_perror( 853 int ddi_error) 854 { 855 switch (ddi_error) { 856 case DDI_SUCCESS: 857 return ("DDI_SUCCESS"); 858 case DDI_FAILURE: 859 return ("DDI_FAILURE"); 860 case DDI_NOT_WELL_FORMED: 861 return ("DDI_NOT_WELL_FORMED"); 862 case DDI_EAGAIN: 863 return ("DDI_EAGAIN"); 864 case DDI_EINVAL: 865 return ("DDI_EINVAL"); 866 case DDI_ENOTSUP: 867 return ("DDI_ENOTSUP"); 868 case DDI_EPENDING: 869 return ("DDI_EPENDING"); 870 case DDI_ENOMEM: 871 return ("DDI_ENOMEM"); 872 case DDI_EBUSY: 873 return ("DDI_EBUSY"); 874 case DDI_ETRANSPORT: 875 return ("DDI_ETRANSPORT"); 876 case DDI_ECONTEXT: 877 return ("DDI_ECONTEXT"); 878 default: 879 return ("Unknown error"); 880 } 881 } 882 883 /* 884 * --------------------------------------------------------------------- 885 * These are Sun4v HIO function definitions 886 * --------------------------------------------------------------------- 887 */ 888 889 #if defined(sun4v) 890 891 /* 892 * Local prototypes 893 */ 894 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 895 896 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 897 static void nxge_hio_unshare(nxge_hio_vr_t *); 898 899 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, int); 900 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 901 902 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 903 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 904 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 905 mac_ring_type_t, int); 906 907 /* 908 * nxge_hio_init 909 * 910 * Initialize the HIO module of the NXGE driver. 911 * 912 * Arguments: 913 * nxge 914 * 915 * Notes: 916 * 917 * Context: 918 * Any domain 919 */ 920 int 921 nxge_hio_init( 922 nxge_t *nxge) 923 { 924 nxge_hio_data_t *nhd; 925 int i, region; 926 927 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 928 if (nhd == 0) { 929 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 930 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 931 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 932 } 933 934 if ((nxge->environs == SOLARIS_DOMAIN) && 935 (nxge->niu_type == N2_NIU)) { 936 if (nxge->niu_hsvc_available == B_TRUE) { 937 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 938 if (niu_hsvc->hsvc_major == 1 && 939 niu_hsvc->hsvc_minor == 1) 940 nxge->environs = SOLARIS_SERVICE_DOMAIN; 941 NXGE_DEBUG_MSG((nxge, HIO_CTL, 942 "nxge_hio_init: hypervisor services " 943 "version %d.%d", 944 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 945 } 946 } 947 948 if (!isLDOMs(nxge)) { 949 nhd->hio.ldoms = B_FALSE; 950 return (NXGE_OK); 951 } 952 953 nhd->hio.ldoms = B_TRUE; 954 955 /* 956 * Fill in what we can. 957 */ 958 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 959 nhd->vr[region].region = region; 960 } 961 nhd->vrs = NXGE_VR_SR_MAX - 2; 962 963 /* 964 * Initialize share and ring group structures. 965 */ 966 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 967 nxge->rx_hio_groups[i].ghandle = NULL; 968 nxge->rx_hio_groups[i].nxgep = nxge; 969 nxge->rx_hio_groups[i].gindex = 0; 970 nxge->rx_hio_groups[i].sindex = 0; 971 } 972 973 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 974 nxge->shares[i].nxgep = nxge; 975 nxge->shares[i].index = 0; 976 nxge->shares[i].vrp = (void *)NULL; 977 nxge->shares[i].tmap = 0; 978 nxge->shares[i].rmap = 0; 979 nxge->shares[i].rxgroup = 0; 980 nxge->shares[i].active = B_FALSE; 981 } 982 983 /* Fill in the HV HIO function pointers. */ 984 nxge_hio_hv_init(nxge); 985 986 if (isLDOMservice(nxge)) { 987 NXGE_DEBUG_MSG((nxge, HIO_CTL, 988 "Hybrid IO-capable service domain")); 989 return (NXGE_OK); 990 } else { 991 /* 992 * isLDOMguest(nxge) == B_TRUE 993 */ 994 nx_vio_fp_t *vio; 995 nhd->type = NXGE_HIO_TYPE_GUEST; 996 997 vio = &nhd->hio.vio; 998 vio->__register = (vio_net_resource_reg_t) 999 modgetsymvalue("vio_net_resource_reg", 0); 1000 vio->unregister = (vio_net_resource_unreg_t) 1001 modgetsymvalue("vio_net_resource_unreg", 0); 1002 1003 if (vio->__register == 0 || vio->unregister == 0) { 1004 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 1005 return (NXGE_ERROR); 1006 } 1007 } 1008 1009 return (0); 1010 } 1011 1012 static int 1013 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 1014 { 1015 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1016 p_nxge_t nxge = rxgroup->nxgep; 1017 int group = rxgroup->gindex; 1018 int rv, sindex; 1019 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1020 1021 sindex = nxge->rx_hio_groups[group].sindex; 1022 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1023 1024 /* 1025 * Program the mac address for the group/share. 1026 */ 1027 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 1028 (ether_addr_t *)mac_addr)) != 0) { 1029 return (rv); 1030 } 1031 1032 return (0); 1033 } 1034 1035 /* ARGSUSED */ 1036 static int 1037 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 1038 { 1039 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1040 p_nxge_t nxge = rxgroup->nxgep; 1041 int group = rxgroup->gindex; 1042 int sindex; 1043 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1044 1045 sindex = nxge->rx_hio_groups[group].sindex; 1046 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1047 1048 /* 1049 * Remove the mac address for the group/share. 1050 */ 1051 nxge_hio_hostinfo_uninit(nxge, vr); 1052 1053 return (0); 1054 } 1055 1056 /* ARGSUSED */ 1057 void 1058 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1059 mac_group_info_t *infop, mac_group_handle_t ghdl) 1060 { 1061 p_nxge_t nxgep = (p_nxge_t)arg; 1062 nxge_rx_ring_group_t *rxgroup; 1063 1064 switch (type) { 1065 case MAC_RING_TYPE_RX: 1066 rxgroup = &nxgep->rx_hio_groups[group]; 1067 rxgroup->gindex = group; 1068 1069 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1070 infop->mrg_start = NULL; 1071 infop->mrg_stop = NULL; 1072 infop->mrg_addmac = nxge_hio_add_mac; 1073 infop->mrg_remmac = nxge_hio_rem_mac; 1074 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1075 break; 1076 1077 case MAC_RING_TYPE_TX: 1078 break; 1079 } 1080 } 1081 1082 int 1083 nxge_hio_share_assign( 1084 nxge_t *nxge, 1085 uint64_t cookie, 1086 res_map_t *tmap, 1087 res_map_t *rmap, 1088 nxge_hio_vr_t *vr) 1089 { 1090 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1091 uint64_t slot, hv_rv; 1092 nxge_hio_dc_t *dc; 1093 nxhv_vr_fp_t *fp; 1094 int i; 1095 1096 /* 1097 * Ask the Hypervisor to set up the VR for us 1098 */ 1099 fp = &nhd->hio.vr; 1100 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1101 NXGE_ERROR_MSG((nxge, HIO_CTL, 1102 "nx_hio_share_assign: " 1103 "vr->assign() returned %d", hv_rv)); 1104 nxge_hio_unshare(vr); 1105 return (-EIO); 1106 } 1107 1108 /* 1109 * For each shared TDC, ask the HV to find us an empty slot. 1110 * ----------------------------------------------------- 1111 */ 1112 dc = vr->tx_group.dc; 1113 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1114 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1115 while (dc) { 1116 hv_rv = (*tx->assign) 1117 (vr->cookie, dc->channel, &slot); 1118 if (hv_rv != 0) { 1119 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1120 "nx_hio_share_assign: " 1121 "tx->assign(%x, %d) failed: %ld", 1122 vr->cookie, dc->channel, hv_rv)); 1123 return (-EIO); 1124 } 1125 1126 dc->cookie = vr->cookie; 1127 dc->page = (vp_channel_t)slot; 1128 1129 /* Inform the caller about the slot chosen. */ 1130 (*tmap) |= 1 << slot; 1131 1132 dc = dc->next; 1133 } 1134 } 1135 1136 /* 1137 * For each shared RDC, ask the HV to find us an empty slot. 1138 * ----------------------------------------------------- 1139 */ 1140 dc = vr->rx_group.dc; 1141 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1142 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1143 while (dc) { 1144 hv_rv = (*rx->assign) 1145 (vr->cookie, dc->channel, &slot); 1146 if (hv_rv != 0) { 1147 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1148 "nx_hio_share_assign: " 1149 "rx->assign(%x, %d) failed: %ld", 1150 vr->cookie, dc->channel, hv_rv)); 1151 return (-EIO); 1152 } 1153 1154 dc->cookie = vr->cookie; 1155 dc->page = (vp_channel_t)slot; 1156 1157 /* Inform the caller about the slot chosen. */ 1158 (*rmap) |= 1 << slot; 1159 1160 dc = dc->next; 1161 } 1162 } 1163 1164 return (0); 1165 } 1166 1167 int 1168 nxge_hio_share_unassign( 1169 nxge_hio_vr_t *vr) 1170 { 1171 nxge_t *nxge = (nxge_t *)vr->nxge; 1172 nxge_hio_data_t *nhd; 1173 nxge_hio_dc_t *dc; 1174 nxhv_vr_fp_t *fp; 1175 uint64_t hv_rv; 1176 1177 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1178 1179 dc = vr->tx_group.dc; 1180 while (dc) { 1181 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1182 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1183 if (hv_rv != 0) { 1184 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1185 "nx_hio_dc_unshare: " 1186 "tx->unassign(%x, %d) failed: %ld", 1187 vr->cookie, dc->page, hv_rv)); 1188 } 1189 dc = dc->next; 1190 } 1191 1192 dc = vr->rx_group.dc; 1193 while (dc) { 1194 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1195 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1196 if (hv_rv != 0) { 1197 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1198 "nx_hio_dc_unshare: " 1199 "rx->unassign(%x, %d) failed: %ld", 1200 vr->cookie, dc->page, hv_rv)); 1201 } 1202 dc = dc->next; 1203 } 1204 1205 fp = &nhd->hio.vr; 1206 if (fp->unassign) { 1207 hv_rv = (*fp->unassign)(vr->cookie); 1208 if (hv_rv != 0) { 1209 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1210 "vr->assign(%x) failed: %ld", 1211 vr->cookie, hv_rv)); 1212 } 1213 } 1214 1215 return (0); 1216 } 1217 1218 int 1219 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1220 mac_share_handle_t *shandle) 1221 { 1222 p_nxge_t nxge = (p_nxge_t)arg; 1223 nxge_rx_ring_group_t *rxgroup; 1224 nxge_share_handle_t *shp; 1225 1226 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1227 uint64_t rmap, tmap; 1228 int rv; 1229 1230 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1231 1232 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1233 1234 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1235 nhd->hio.rx.assign == 0) { 1236 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1237 return (EIO); 1238 } 1239 1240 /* 1241 * Get a VR. 1242 */ 1243 if ((vr = nxge_hio_vr_share(nxge)) == 0) 1244 return (EAGAIN); 1245 1246 /* 1247 * Get an RDC group for us to use. 1248 */ 1249 if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1250 nxge_hio_unshare(vr); 1251 return (EBUSY); 1252 } 1253 1254 /* 1255 * Add resources to the share. 1256 */ 1257 tmap = 0; 1258 rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, 1259 NXGE_HIO_SHARE_MAX_CHANNELS); 1260 if (rv != 0) { 1261 nxge_hio_unshare(vr); 1262 return (rv); 1263 } 1264 1265 rmap = 0; 1266 rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, 1267 NXGE_HIO_SHARE_MAX_CHANNELS); 1268 if (rv != 0) { 1269 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1270 nxge_hio_unshare(vr); 1271 return (rv); 1272 } 1273 1274 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1275 nxge_hio_remres(vr, MAC_RING_TYPE_RX, tmap); 1276 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1277 nxge_hio_unshare(vr); 1278 return (rv); 1279 } 1280 1281 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1282 rxgroup->gindex = vr->rdc_tbl; 1283 rxgroup->sindex = vr->region; 1284 1285 shp = &nxge->shares[vr->region]; 1286 shp->index = vr->region; 1287 shp->vrp = (void *)vr; 1288 shp->tmap = tmap; 1289 shp->rmap = rmap; 1290 shp->rxgroup = vr->rdc_tbl; 1291 shp->active = B_TRUE; 1292 1293 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1294 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1295 1296 *shandle = (mac_share_handle_t)shp; 1297 1298 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1299 return (0); 1300 } 1301 1302 void 1303 nxge_hio_share_free(mac_share_handle_t shandle) 1304 { 1305 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1306 1307 /* 1308 * First, unassign the VR (take it back), 1309 * so we can enable interrupts again. 1310 */ 1311 (void) nxge_hio_share_unassign(shp->vrp); 1312 1313 /* 1314 * Free Ring Resources for TX and RX 1315 */ 1316 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1317 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1318 1319 /* 1320 * Free VR resource. 1321 */ 1322 nxge_hio_unshare(shp->vrp); 1323 1324 /* 1325 * Clear internal handle state. 1326 */ 1327 shp->index = 0; 1328 shp->vrp = (void *)NULL; 1329 shp->tmap = 0; 1330 shp->rmap = 0; 1331 shp->rxgroup = 0; 1332 shp->active = B_FALSE; 1333 } 1334 1335 void 1336 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1337 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1338 { 1339 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1340 1341 switch (type) { 1342 case MAC_RING_TYPE_RX: 1343 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1344 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1345 *rmap = shp->rmap; 1346 *gnum = shp->rxgroup; 1347 break; 1348 1349 case MAC_RING_TYPE_TX: 1350 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1351 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1352 *rmap = shp->tmap; 1353 *gnum = 0; 1354 break; 1355 } 1356 } 1357 1358 /* 1359 * nxge_hio_vr_share 1360 * 1361 * Find an unused Virtualization Region (VR). 1362 * 1363 * Arguments: 1364 * nxge 1365 * 1366 * Notes: 1367 * 1368 * Context: 1369 * Service domain 1370 */ 1371 nxge_hio_vr_t * 1372 nxge_hio_vr_share( 1373 nxge_t *nxge) 1374 { 1375 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1376 nxge_hio_vr_t *vr; 1377 1378 int first, limit, region; 1379 1380 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1381 1382 MUTEX_ENTER(&nhd->lock); 1383 1384 if (nhd->vrs == 0) { 1385 MUTEX_EXIT(&nhd->lock); 1386 return (0); 1387 } 1388 1389 /* Find an empty virtual region (VR). */ 1390 if (nxge->function_num == 0) { 1391 // FUNC0_VIR0 'belongs' to NIU port 0. 1392 first = FUNC0_VIR1; 1393 limit = FUNC2_VIR0; 1394 } else if (nxge->function_num == 1) { 1395 // FUNC2_VIR0 'belongs' to NIU port 1. 1396 first = FUNC2_VIR1; 1397 limit = FUNC_VIR_MAX; 1398 } else { 1399 cmn_err(CE_WARN, 1400 "Shares not supported on function(%d) at this time.\n", 1401 nxge->function_num); 1402 } 1403 1404 for (region = first; region < limit; region++) { 1405 if (nhd->vr[region].nxge == 0) 1406 break; 1407 } 1408 1409 if (region == limit) { 1410 MUTEX_EXIT(&nhd->lock); 1411 return (0); 1412 } 1413 1414 vr = &nhd->vr[region]; 1415 vr->nxge = (uintptr_t)nxge; 1416 vr->region = (uintptr_t)region; 1417 1418 nhd->vrs--; 1419 1420 MUTEX_EXIT(&nhd->lock); 1421 1422 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1423 1424 return (vr); 1425 } 1426 1427 void 1428 nxge_hio_unshare( 1429 nxge_hio_vr_t *vr) 1430 { 1431 nxge_t *nxge = (nxge_t *)vr->nxge; 1432 nxge_hio_data_t *nhd; 1433 1434 vr_region_t region; 1435 1436 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1437 1438 if (!nxge) { 1439 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1440 "vr->nxge is NULL")); 1441 return; 1442 } 1443 1444 /* 1445 * This function is no longer called, but I will keep it 1446 * here in case we want to revisit this topic in the future. 1447 * 1448 * nxge_hio_hostinfo_uninit(nxge, vr); 1449 */ 1450 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1451 1452 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1453 1454 MUTEX_ENTER(&nhd->lock); 1455 1456 region = vr->region; 1457 (void) memset(vr, 0, sizeof (*vr)); 1458 vr->region = region; 1459 1460 nhd->vrs++; 1461 1462 MUTEX_EXIT(&nhd->lock); 1463 1464 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1465 } 1466 1467 int 1468 nxge_hio_addres( 1469 nxge_hio_vr_t *vr, 1470 mac_ring_type_t type, 1471 int count) 1472 { 1473 nxge_t *nxge = (nxge_t *)vr->nxge; 1474 int i; 1475 1476 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1477 1478 if (!nxge) 1479 return (EINVAL); 1480 1481 for (i = 0; i < count; i++) { 1482 int rv; 1483 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1484 if (i == 0) /* Couldn't get even one DC. */ 1485 return (-rv); 1486 else 1487 break; 1488 } 1489 } 1490 1491 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1492 1493 return (0); 1494 } 1495 1496 /* ARGSUSED */ 1497 void 1498 nxge_hio_remres( 1499 nxge_hio_vr_t *vr, 1500 mac_ring_type_t type, 1501 res_map_t res_map) 1502 { 1503 nxge_t *nxge = (nxge_t *)vr->nxge; 1504 nxge_grp_t *group; 1505 1506 if (!nxge) { 1507 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 1508 "vr->nxge is NULL")); 1509 return; 1510 } 1511 1512 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1513 1514 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1515 while (group->dc) { 1516 nxge_hio_dc_t *dc = group->dc; 1517 NXGE_DC_RESET(res_map, dc->page); 1518 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1519 } 1520 1521 if (res_map) { 1522 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1523 "res_map %lx", res_map)); 1524 } 1525 1526 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1527 } 1528 1529 /* 1530 * nxge_hio_tdc_share 1531 * 1532 * Share an unused TDC channel. 1533 * 1534 * Arguments: 1535 * nxge 1536 * 1537 * Notes: 1538 * 1539 * A.7.3 Reconfigure Tx DMA channel 1540 * Disable TxDMA A.9.6.10 1541 * [Rebind TxDMA channel to Port A.9.6.7] 1542 * 1543 * We don't have to Rebind the TDC to the port - it always already bound. 1544 * 1545 * Soft Reset TxDMA A.9.6.2 1546 * 1547 * This procedure will be executed by nxge_init_txdma_channel() in the 1548 * guest domain: 1549 * 1550 * Re-initialize TxDMA A.9.6.8 1551 * Reconfigure TxDMA 1552 * Enable TxDMA A.9.6.9 1553 * 1554 * Context: 1555 * Service domain 1556 */ 1557 int 1558 nxge_hio_tdc_share( 1559 nxge_t *nxge, 1560 int channel) 1561 { 1562 nxge_grp_set_t *set = &nxge->tx_set; 1563 tx_ring_t *ring; 1564 int count; 1565 1566 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1567 1568 /* 1569 * Wait until this channel is idle. 1570 */ 1571 ring = nxge->tx_rings->rings[channel]; 1572 1573 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 1574 if (ring->tx_ring_busy) { 1575 /* 1576 * Wait for 30 seconds. 1577 */ 1578 for (count = 30 * 1000; count; count--) { 1579 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 1580 break; 1581 } 1582 1583 drv_usecwait(1000); 1584 } 1585 1586 if (count == 0) { 1587 (void) atomic_swap_32(&ring->tx_ring_offline, 1588 NXGE_TX_RING_ONLINE); 1589 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1590 "Tx ring %d was always BUSY", channel)); 1591 return (-EIO); 1592 } 1593 } else { 1594 (void) atomic_swap_32(&ring->tx_ring_offline, 1595 NXGE_TX_RING_OFFLINED); 1596 } 1597 1598 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1599 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1600 "Failed to remove interrupt for TxDMA channel %d", 1601 channel)); 1602 return (NXGE_ERROR); 1603 } 1604 1605 /* Disable TxDMA A.9.6.10 */ 1606 (void) nxge_txdma_channel_disable(nxge, channel); 1607 1608 /* The SD is sharing this channel. */ 1609 NXGE_DC_SET(set->shared.map, channel); 1610 set->shared.count++; 1611 1612 /* Soft Reset TxDMA A.9.6.2 */ 1613 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1614 1615 /* 1616 * Initialize the DC-specific FZC control registers. 1617 * ----------------------------------------------------- 1618 */ 1619 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1620 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1621 "nx_hio_dc_share: FZC TDC failed: %d", channel)); 1622 return (-EIO); 1623 } 1624 1625 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1626 1627 return (0); 1628 } 1629 1630 /* 1631 * nxge_hio_rdc_share 1632 * 1633 * Share an unused RDC channel. 1634 * 1635 * Arguments: 1636 * nxge 1637 * 1638 * Notes: 1639 * 1640 * This is the latest version of the procedure to 1641 * Reconfigure an Rx DMA channel: 1642 * 1643 * A.6.3 Reconfigure Rx DMA channel 1644 * Stop RxMAC A.9.2.6 1645 * Drain IPP Port A.9.3.6 1646 * Stop and reset RxDMA A.9.5.3 1647 * 1648 * This procedure will be executed by nxge_init_rxdma_channel() in the 1649 * guest domain: 1650 * 1651 * Initialize RxDMA A.9.5.4 1652 * Reconfigure RxDMA 1653 * Enable RxDMA A.9.5.5 1654 * 1655 * We will do this here, since the RDC is a canalis non grata: 1656 * Enable RxMAC A.9.2.10 1657 * 1658 * Context: 1659 * Service domain 1660 */ 1661 int 1662 nxge_hio_rdc_share( 1663 nxge_t *nxge, 1664 nxge_hio_vr_t *vr, 1665 int channel) 1666 { 1667 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1668 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1669 nxge_grp_set_t *set = &nxge->rx_set; 1670 nxge_rdc_grp_t *rdc_grp; 1671 1672 int current, last; 1673 1674 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1675 1676 /* Disable interrupts. */ 1677 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1678 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 1679 "Failed to remove interrupt for RxDMA channel %d", 1680 channel)); 1681 return (NXGE_ERROR); 1682 } 1683 1684 /* Stop RxMAC = A.9.2.6 */ 1685 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1686 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1687 "Failed to disable RxMAC")); 1688 } 1689 1690 /* Drain IPP Port = A.9.3.6 */ 1691 (void) nxge_ipp_drain(nxge); 1692 1693 /* Stop and reset RxDMA = A.9.5.3 */ 1694 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1695 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1696 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1697 "Failed to disable RxDMA channel %d", channel)); 1698 } 1699 1700 /* The SD is sharing this channel. */ 1701 NXGE_DC_SET(set->shared.map, channel); 1702 set->shared.count++; 1703 1704 // Assert RST: RXDMA_CFIG1[30] = 1 1705 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1706 1707 /* 1708 * We have to reconfigure the RDC table(s) 1709 * to which this channel belongs. 1710 */ 1711 current = hardware->def_mac_rxdma_grpid; 1712 last = current + hardware->max_rdc_grpids; 1713 for (; current < last; current++) { 1714 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1715 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1716 rdc_grp->map = set->owned.map; 1717 rdc_grp->max_rdcs--; 1718 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1719 } 1720 } 1721 1722 /* 1723 * The guest domain will reconfigure the RDC later. 1724 * 1725 * But in the meantime, we must re-enable the Rx MAC so 1726 * that we can start receiving packets again on the 1727 * remaining RDCs: 1728 * 1729 * Enable RxMAC = A.9.2.10 1730 */ 1731 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1732 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1733 "nx_hio_rdc_share: Rx MAC still disabled")); 1734 } 1735 1736 /* 1737 * Initialize the DC-specific FZC control registers. 1738 * ----------------------------------------------------- 1739 */ 1740 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1741 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1742 "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 1743 return (-EIO); 1744 } 1745 1746 /* 1747 * We have to initialize the guest's RDC table, too. 1748 * ----------------------------------------------------- 1749 */ 1750 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1751 if (rdc_grp->max_rdcs == 0) { 1752 rdc_grp->start_rdc = (uint8_t)channel; 1753 rdc_grp->def_rdc = (uint8_t)channel; 1754 rdc_grp->max_rdcs = 1; 1755 } else { 1756 rdc_grp->max_rdcs++; 1757 } 1758 NXGE_DC_SET(rdc_grp->map, channel); 1759 1760 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1761 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1762 "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1763 return (-EIO); 1764 } 1765 1766 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1767 1768 return (0); 1769 } 1770 1771 /* 1772 * nxge_hio_dc_share 1773 * 1774 * Share a DMA channel with a guest domain. 1775 * 1776 * Arguments: 1777 * nxge 1778 * vr The VR that <channel> will belong to. 1779 * type Tx or Rx. 1780 * res_map The resource map used by the caller, which we will 1781 * update if successful. 1782 * 1783 * Notes: 1784 * 1785 * Context: 1786 * Service domain 1787 */ 1788 int 1789 nxge_hio_dc_share( 1790 nxge_t *nxge, 1791 nxge_hio_vr_t *vr, 1792 mac_ring_type_t type) 1793 { 1794 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1795 nxge_hw_pt_cfg_t *hardware; 1796 nxge_hio_dc_t *dc; 1797 int channel, limit; 1798 1799 nxge_grp_set_t *set; 1800 nxge_grp_t *group; 1801 1802 int slot; 1803 1804 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1805 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1806 1807 /* 1808 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1809 * Not only that, but the HV has statically assigned the 1810 * channels like so: 1811 * VR0: RDC0 & RDC1 1812 * VR1: RDC2 & RDC3, etc. 1813 * The TDCs are assigned in exactly the same way. 1814 * 1815 * So, for example 1816 * hardware->start_rdc + vr->region * 2; 1817 * VR1: hardware->start_rdc + 1 * 2; 1818 * VR3: hardware->start_rdc + 3 * 2; 1819 * If start_rdc is 0, we end up with 2 or 6. 1820 * If start_rdc is 8, we end up with 10 or 14. 1821 */ 1822 1823 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1824 hardware = &nxge->pt_config.hw_config; 1825 1826 // This code is still NIU-specific (assuming only 2 ports) 1827 channel = hardware->start_rdc + (vr->region % 4) * 2; 1828 limit = channel + 2; 1829 1830 MUTEX_ENTER(&nhd->lock); 1831 for (; channel < limit; channel++) { 1832 if ((1 << channel) & set->owned.map) { 1833 break; 1834 } 1835 } 1836 1837 if (channel == limit) { 1838 MUTEX_EXIT(&nhd->lock); 1839 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1840 "nx_hio_dc_share: there are no channels to share")); 1841 return (-EIO); 1842 } 1843 1844 MUTEX_EXIT(&nhd->lock); 1845 1846 /* -------------------------------------------------- */ 1847 slot = (type == MAC_RING_TYPE_TX) ? 1848 nxge_hio_tdc_share(nxge, channel) : 1849 nxge_hio_rdc_share(nxge, vr, channel); 1850 1851 if (slot < 0) { 1852 if (type == MAC_RING_TYPE_RX) { 1853 nxge_hio_rdc_unshare(nxge, channel); 1854 } else { 1855 nxge_hio_tdc_unshare(nxge, channel); 1856 } 1857 return (slot); 1858 } 1859 1860 MUTEX_ENTER(&nhd->lock); 1861 1862 /* 1863 * Tag this channel. 1864 * -------------------------------------------------- 1865 */ 1866 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1867 1868 dc->vr = vr; 1869 dc->channel = (nxge_channel_t)channel; 1870 1871 MUTEX_EXIT(&nhd->lock); 1872 1873 /* 1874 * vr->[t|r]x_group is used by the service domain to 1875 * keep track of its shared DMA channels. 1876 */ 1877 MUTEX_ENTER(&nxge->group_lock); 1878 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1879 1880 dc->group = group; 1881 1882 /* Initialize <group>, if necessary */ 1883 if (group->count == 0) { 1884 group->nxge = nxge; 1885 group->type = (type == MAC_RING_TYPE_TX) ? 1886 VP_BOUND_TX : VP_BOUND_RX; 1887 group->sequence = nhd->sequence++; 1888 group->active = B_TRUE; 1889 } 1890 1891 MUTEX_EXIT(&nxge->group_lock); 1892 1893 NXGE_ERROR_MSG((nxge, HIO_CTL, 1894 "DC share: %cDC %d was assigned to slot %d", 1895 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1896 1897 nxge_grp_dc_append(nxge, group, dc); 1898 1899 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1900 1901 return (0); 1902 } 1903 1904 /* 1905 * nxge_hio_tdc_unshare 1906 * 1907 * Unshare a TDC. 1908 * 1909 * Arguments: 1910 * nxge 1911 * channel The channel to unshare (add again). 1912 * 1913 * Notes: 1914 * 1915 * Context: 1916 * Service domain 1917 */ 1918 void 1919 nxge_hio_tdc_unshare( 1920 nxge_t *nxge, 1921 int channel) 1922 { 1923 nxge_grp_set_t *set = &nxge->tx_set; 1924 nxge_grp_t *group = set->group[0]; 1925 1926 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1927 1928 NXGE_DC_RESET(set->shared.map, channel); 1929 set->shared.count--; 1930 1931 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 1932 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1933 "Failed to initialize TxDMA channel %d", channel)); 1934 return; 1935 } 1936 1937 /* Re-add this interrupt. */ 1938 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1939 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1940 "Failed to add interrupt for TxDMA channel %d", channel)); 1941 } 1942 1943 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1944 } 1945 1946 /* 1947 * nxge_hio_rdc_unshare 1948 * 1949 * Unshare an RDC: add it to the SD's RDC groups (tables). 1950 * 1951 * Arguments: 1952 * nxge 1953 * channel The channel to unshare (add again). 1954 * 1955 * Notes: 1956 * 1957 * Context: 1958 * Service domain 1959 */ 1960 void 1961 nxge_hio_rdc_unshare( 1962 nxge_t *nxge, 1963 int channel) 1964 { 1965 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1966 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1967 1968 nxge_grp_set_t *set = &nxge->rx_set; 1969 nxge_grp_t *group = set->group[0]; 1970 int current, last; 1971 1972 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 1973 1974 /* Stop RxMAC = A.9.2.6 */ 1975 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1976 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1977 "Failed to disable RxMAC")); 1978 } 1979 1980 /* Drain IPP Port = A.9.3.6 */ 1981 (void) nxge_ipp_drain(nxge); 1982 1983 /* Stop and reset RxDMA = A.9.5.3 */ 1984 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1985 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1986 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1987 "Failed to disable RxDMA channel %d", channel)); 1988 } 1989 1990 NXGE_DC_RESET(set->shared.map, channel); 1991 set->shared.count--; 1992 1993 /* 1994 * Assert RST: RXDMA_CFIG1[30] = 1 1995 * 1996 * Initialize RxDMA A.9.5.4 1997 * Reconfigure RxDMA 1998 * Enable RxDMA A.9.5.5 1999 */ 2000 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 2001 /* Be sure to re-enable the RX MAC. */ 2002 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2003 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2004 "nx_hio_rdc_share: Rx MAC still disabled")); 2005 } 2006 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2007 "Failed to initialize RxDMA channel %d", channel)); 2008 return; 2009 } 2010 2011 /* 2012 * We have to reconfigure the RDC table(s) 2013 * to which this channel once again belongs. 2014 */ 2015 current = hardware->def_mac_rxdma_grpid; 2016 last = current + hardware->max_rdc_grpids; 2017 for (; current < last; current++) { 2018 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 2019 nxge_rdc_grp_t *group; 2020 group = &nxge->pt_config.rdc_grps[current]; 2021 group->map = set->owned.map; 2022 group->max_rdcs++; 2023 (void) nxge_init_fzc_rdc_tbl(nxge, current); 2024 } 2025 } 2026 2027 /* 2028 * Enable RxMAC = A.9.2.10 2029 */ 2030 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2031 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2032 "nx_hio_rdc_share: Rx MAC still disabled")); 2033 return; 2034 } 2035 2036 /* Re-add this interrupt. */ 2037 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2038 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2039 "nx_hio_rdc_unshare: Failed to add interrupt for " 2040 "RxDMA CHANNEL %d", channel)); 2041 } 2042 2043 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2044 } 2045 2046 /* 2047 * nxge_hio_dc_unshare 2048 * 2049 * Unshare (reuse) a DMA channel. 2050 * 2051 * Arguments: 2052 * nxge 2053 * vr The VR that <channel> belongs to. 2054 * type Tx or Rx. 2055 * channel The DMA channel to reuse. 2056 * 2057 * Notes: 2058 * 2059 * Context: 2060 * Service domain 2061 */ 2062 void 2063 nxge_hio_dc_unshare( 2064 nxge_t *nxge, 2065 nxge_hio_vr_t *vr, 2066 mac_ring_type_t type, 2067 int channel) 2068 { 2069 nxge_grp_t *group; 2070 nxge_hio_dc_t *dc; 2071 2072 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2073 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2074 2075 /* Unlink the channel from its group. */ 2076 /* -------------------------------------------------- */ 2077 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2078 NXGE_DC_RESET(group->map, channel); 2079 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2080 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2081 "nx_hio_dc_unshare(%d) failed", channel)); 2082 return; 2083 } 2084 2085 dc->vr = 0; 2086 dc->cookie = 0; 2087 2088 if (type == MAC_RING_TYPE_RX) { 2089 nxge_hio_rdc_unshare(nxge, channel); 2090 } else { 2091 nxge_hio_tdc_unshare(nxge, channel); 2092 } 2093 2094 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2095 } 2096 2097 #endif /* if defined(sun4v) */ 2098