1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * nxge_hio.c 31 * 32 * This file manages the virtualization resources for Neptune 33 * devices. That is, it implements a hybrid I/O (HIO) approach in the 34 * Solaris kernel, whereby a guest domain on an LDOMs server may 35 * request & use hardware resources from the service domain. 36 * 37 */ 38 39 #include <sys/nxge/nxge_impl.h> 40 #include <sys/nxge/nxge_fzc.h> 41 #include <sys/nxge/nxge_rxdma.h> 42 #include <sys/nxge/nxge_txdma.h> 43 #include <sys/nxge/nxge_hio.h> 44 45 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 46 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 47 48 /* 49 * External prototypes 50 */ 51 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 52 53 /* The following function may be found in nxge_main.c */ 54 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 55 56 /* The following function may be found in nxge_[t|r]xdma.c */ 57 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 58 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 59 60 /* 61 * Local prototypes 62 */ 63 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 64 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 65 static void nxge_grp_dc_map(nxge_grp_t *group); 66 67 /* 68 * These functions are used by both service & guest domains to 69 * decide whether they're running in an LDOMs/XEN environment 70 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 71 */ 72 73 /* 74 * nxge_get_environs 75 * 76 * Figure out if we are in a guest domain or not. 77 * 78 * Arguments: 79 * nxge 80 * 81 * Notes: 82 * 83 * Context: 84 * Any domain 85 */ 86 void 87 nxge_get_environs( 88 nxge_t *nxge) 89 { 90 char *string; 91 92 /* 93 * In the beginning, assume that we are running sans LDOMs/XEN. 94 */ 95 nxge->environs = SOLARIS_DOMAIN; 96 97 /* 98 * Are we a hybrid I/O (HIO) guest domain driver? 99 */ 100 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 101 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 102 "niutype", &string)) == DDI_PROP_SUCCESS) { 103 if (strcmp(string, "n2niu") == 0) { 104 nxge->environs = SOLARIS_GUEST_DOMAIN; 105 /* So we can allocate properly-aligned memory. */ 106 nxge->niu_type = N2_NIU; 107 NXGE_DEBUG_MSG((nxge, HIO_CTL, 108 "Hybrid IO-capable guest domain")); 109 } 110 ddi_prop_free(string); 111 } 112 } 113 114 #if !defined(sun4v) 115 116 /* 117 * nxge_hio_init 118 * 119 * Initialize the HIO module of the NXGE driver. 120 * 121 * Arguments: 122 * nxge 123 * 124 * Notes: 125 * This is the non-hybrid I/O version of this function. 126 * 127 * Context: 128 * Any domain 129 */ 130 int 131 nxge_hio_init( 132 nxge_t *nxge) 133 { 134 nxge_hio_data_t *nhd; 135 136 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 137 if (nhd == 0) { 138 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 139 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 140 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 141 } 142 143 nhd->hio.ldoms = B_FALSE; 144 145 return (NXGE_OK); 146 } 147 148 #endif 149 150 void 151 nxge_hio_uninit( 152 nxge_t *nxge) 153 { 154 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 155 156 ASSERT(nhd != NULL); 157 ASSERT(nxge->nxge_hw_p->ndevs == 0); 158 159 MUTEX_DESTROY(&nhd->lock); 160 161 KMEM_FREE(nhd, sizeof (*nhd)); 162 163 nxge->nxge_hw_p->hio = 0; 164 } 165 166 /* 167 * nxge_dci_map 168 * 169 * Map a DMA channel index to a channel number. 170 * 171 * Arguments: 172 * instance The instance number of the driver. 173 * type The type of channel this is: Tx or Rx. 174 * index The index to convert to a channel number 175 * 176 * Notes: 177 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 178 * 179 * Context: 180 * Any domain 181 */ 182 int 183 nxge_dci_map( 184 nxge_t *nxge, 185 vpc_type_t type, 186 int index) 187 { 188 nxge_grp_set_t *set; 189 int dc; 190 191 switch (type) { 192 case VP_BOUND_TX: 193 set = &nxge->tx_set; 194 break; 195 case VP_BOUND_RX: 196 set = &nxge->rx_set; 197 break; 198 } 199 200 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 201 if ((1 << dc) & set->owned.map) { 202 if (index == 0) 203 return (dc); 204 else 205 index--; 206 } 207 } 208 209 return (-1); 210 } 211 212 /* 213 * --------------------------------------------------------------------- 214 * These are the general-purpose DMA channel group functions. That is, 215 * these functions are used to manage groups of TDCs or RDCs in an HIO 216 * environment. 217 * 218 * But is also expected that in the future they will be able to manage 219 * Crossbow groups. 220 * --------------------------------------------------------------------- 221 */ 222 223 /* 224 * nxge_grp_add 225 * 226 * Add a group to an instance of NXGE. 227 * 228 * Arguments: 229 * nxge 230 * type Tx or Rx 231 * 232 * Notes: 233 * 234 * Context: 235 * Any domain 236 */ 237 vr_handle_t 238 nxge_grp_add( 239 nxge_t *nxge, 240 nxge_grp_type_t type) 241 { 242 nxge_grp_set_t *set; 243 nxge_grp_t *group; 244 int i; 245 246 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 247 group->nxge = nxge; 248 249 MUTEX_ENTER(&nxge->group_lock); 250 switch (type) { 251 case NXGE_TRANSMIT_GROUP: 252 case EXT_TRANSMIT_GROUP: 253 set = &nxge->tx_set; 254 break; 255 default: 256 set = &nxge->rx_set; 257 break; 258 } 259 260 group->type = type; 261 group->active = B_TRUE; 262 group->sequence = set->sequence++; 263 264 /* Find an empty slot for this logical group. */ 265 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 266 if (set->group[i] == 0) { 267 group->index = i; 268 set->group[i] = group; 269 NXGE_DC_SET(set->lg.map, i); 270 set->lg.count++; 271 break; 272 } 273 } 274 MUTEX_EXIT(&nxge->group_lock); 275 276 NXGE_DEBUG_MSG((nxge, HIO_CTL, 277 "nxge_grp_add: %cgroup = %d.%d", 278 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 279 nxge->mac.portnum, group->sequence)); 280 281 return ((vr_handle_t)group); 282 } 283 284 void 285 nxge_grp_remove( 286 nxge_t *nxge, 287 vr_handle_t handle) /* The group to remove. */ 288 { 289 nxge_grp_set_t *set; 290 nxge_grp_t *group; 291 vpc_type_t type; 292 293 group = (nxge_grp_t *)handle; 294 295 MUTEX_ENTER(&nxge->group_lock); 296 switch (group->type) { 297 case NXGE_TRANSMIT_GROUP: 298 case EXT_TRANSMIT_GROUP: 299 set = &nxge->tx_set; 300 break; 301 default: 302 set = &nxge->rx_set; 303 break; 304 } 305 306 if (set->group[group->index] != group) { 307 MUTEX_EXIT(&nxge->group_lock); 308 return; 309 } 310 311 set->group[group->index] = 0; 312 NXGE_DC_RESET(set->lg.map, group->index); 313 set->lg.count--; 314 315 /* While inside the mutex, deactivate <group>. */ 316 group->active = B_FALSE; 317 318 MUTEX_EXIT(&nxge->group_lock); 319 320 NXGE_DEBUG_MSG((nxge, HIO_CTL, 321 "nxge_grp_remove(%c.%d.%d) called", 322 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 323 nxge->mac.portnum, group->sequence)); 324 325 /* Now, remove any DCs which are still active. */ 326 switch (group->type) { 327 default: 328 type = VP_BOUND_TX; 329 break; 330 case NXGE_RECEIVE_GROUP: 331 case EXT_RECEIVE_GROUP: 332 type = VP_BOUND_RX; 333 } 334 335 while (group->dc) { 336 nxge_grp_dc_remove(nxge, type, group->dc->channel); 337 } 338 339 KMEM_FREE(group, sizeof (*group)); 340 } 341 342 /* 343 * nx_hio_dc_add 344 * 345 * Add a DMA channel to a VR/Group. 346 * 347 * Arguments: 348 * nxge 349 * channel The channel to add. 350 * Notes: 351 * 352 * Context: 353 * Any domain 354 */ 355 /* ARGSUSED */ 356 int 357 nxge_grp_dc_add( 358 nxge_t *nxge, 359 vr_handle_t handle, /* The group to add <channel> to. */ 360 vpc_type_t type, /* Rx or Tx */ 361 int channel) /* A physical/logical channel number */ 362 { 363 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 364 nxge_hio_dc_t *dc; 365 nxge_grp_set_t *set; 366 nxge_grp_t *group; 367 nxge_status_t status = NXGE_OK; 368 369 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 370 371 if (handle == 0) 372 return (0); 373 374 switch (type) { 375 default: 376 set = &nxge->tx_set; 377 if (channel > NXGE_MAX_TDCS) { 378 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 379 "nxge_grp_dc_add: TDC = %d", channel)); 380 return (NXGE_ERROR); 381 } 382 break; 383 case VP_BOUND_RX: 384 set = &nxge->rx_set; 385 if (channel > NXGE_MAX_RDCS) { 386 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 387 "nxge_grp_dc_add: RDC = %d", channel)); 388 return (NXGE_ERROR); 389 } 390 break; 391 } 392 393 group = (nxge_grp_t *)handle; 394 NXGE_DEBUG_MSG((nxge, HIO_CTL, 395 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 396 type == VP_BOUND_TX ? 't' : 'r', 397 nxge->mac.portnum, group->sequence, group->count, channel)); 398 399 MUTEX_ENTER(&nxge->group_lock); 400 if (group->active != B_TRUE) { 401 /* We may be in the process of removing this group. */ 402 MUTEX_EXIT(&nxge->group_lock); 403 return (NXGE_ERROR); 404 } 405 MUTEX_EXIT(&nxge->group_lock); 406 407 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 408 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 409 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 410 return (NXGE_ERROR); 411 } 412 413 MUTEX_ENTER(&nhd->lock); 414 415 if (dc->group) { 416 MUTEX_EXIT(&nhd->lock); 417 /* This channel is already in use! */ 418 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 419 "nxge_grp_dc_add(%d): channel already in group", channel)); 420 return (NXGE_ERROR); 421 } 422 423 dc->next = 0; 424 dc->page = channel; 425 dc->channel = (nxge_channel_t)channel; 426 427 dc->type = type; 428 if (type == VP_BOUND_RX) { 429 dc->init = nxge_init_rxdma_channel; 430 dc->uninit = nxge_uninit_rxdma_channel; 431 } else { 432 dc->init = nxge_init_txdma_channel; 433 dc->uninit = nxge_uninit_txdma_channel; 434 } 435 436 dc->group = handle; 437 438 if (isLDOMguest(nxge)) 439 (void) nxge_hio_ldsv_add(nxge, dc); 440 441 NXGE_DC_SET(set->owned.map, channel); 442 set->owned.count++; 443 444 MUTEX_EXIT(&nhd->lock); 445 446 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 447 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 448 "nxge_grp_dc_add(%d): channel init failed", channel)); 449 return (NXGE_ERROR); 450 } 451 452 nxge_grp_dc_append(nxge, group, dc); 453 454 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 455 456 return ((int)status); 457 } 458 459 void 460 nxge_grp_dc_remove( 461 nxge_t *nxge, 462 vpc_type_t type, 463 int channel) 464 { 465 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 466 nxge_hio_dc_t *dc; 467 nxge_grp_set_t *set; 468 nxge_grp_t *group; 469 470 dc_uninit_t uninit; 471 472 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 473 474 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 475 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 476 "nx_hio_dc_remove: find(%d) failed", channel)); 477 return; 478 } 479 group = (nxge_grp_t *)dc->group; 480 481 if (isLDOMguest(nxge)) { 482 (void) nxge_hio_intr_remove(nxge, type, channel); 483 } 484 485 NXGE_DEBUG_MSG((nxge, HIO_CTL, 486 "DC remove: group = %d.%d.%d, %cdc %d", 487 nxge->mac.portnum, group->sequence, group->count, 488 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 489 490 MUTEX_ENTER(&nhd->lock); 491 492 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 493 if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 494 NXGE_DC_RESET(group->map, channel); 495 } 496 497 /* Remove the DC from its group. */ 498 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 499 MUTEX_EXIT(&nhd->lock); 500 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 501 "nx_hio_dc_remove(%d) failed", channel)); 502 return; 503 } 504 505 uninit = dc->uninit; 506 channel = dc->channel; 507 508 NXGE_DC_RESET(set->owned.map, channel); 509 set->owned.count--; 510 511 (void) memset(dc, 0, sizeof (*dc)); 512 513 MUTEX_EXIT(&nhd->lock); 514 515 (*uninit)(nxge, channel); 516 517 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 518 } 519 520 nxge_hio_dc_t * 521 nxge_grp_dc_find( 522 nxge_t *nxge, 523 vpc_type_t type, /* Rx or Tx */ 524 int channel) 525 { 526 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 527 nxge_hio_dc_t *current; 528 529 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 530 531 if (!isLDOMguest(nxge)) { 532 return (¤t[channel]); 533 } else { 534 /* We're in a guest domain. */ 535 int i, limit = (type == VP_BOUND_TX) ? 536 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 537 538 MUTEX_ENTER(&nhd->lock); 539 for (i = 0; i < limit; i++, current++) { 540 if (current->channel == channel) { 541 if (current->vr && current->vr->nxge == 542 (uintptr_t)nxge) { 543 MUTEX_EXIT(&nhd->lock); 544 return (current); 545 } 546 } 547 } 548 MUTEX_EXIT(&nhd->lock); 549 } 550 551 return (0); 552 } 553 554 /* 555 * nxge_grp_dc_append 556 * 557 * Append a DMA channel to a group. 558 * 559 * Arguments: 560 * nxge 561 * group The group to append to 562 * dc The DMA channel to append 563 * 564 * Notes: 565 * 566 * Context: 567 * Any domain 568 */ 569 static 570 void 571 nxge_grp_dc_append( 572 nxge_t *nxge, 573 nxge_grp_t *group, 574 nxge_hio_dc_t *dc) 575 { 576 MUTEX_ENTER(&nxge->group_lock); 577 578 if (group->dc == 0) { 579 group->dc = dc; 580 } else { 581 nxge_hio_dc_t *current = group->dc; 582 do { 583 if (current->next == 0) { 584 current->next = dc; 585 break; 586 } 587 current = current->next; 588 } while (current); 589 } 590 591 NXGE_DC_SET(group->map, dc->channel); 592 593 nxge_grp_dc_map(group); 594 group->count++; 595 596 MUTEX_EXIT(&nxge->group_lock); 597 } 598 599 /* 600 * nxge_grp_dc_unlink 601 * 602 * Unlink a DMA channel fromits linked list (group). 603 * 604 * Arguments: 605 * nxge 606 * group The group (linked list) to unlink from 607 * dc The DMA channel to append 608 * 609 * Notes: 610 * 611 * Context: 612 * Any domain 613 */ 614 nxge_hio_dc_t * 615 nxge_grp_dc_unlink( 616 nxge_t *nxge, 617 nxge_grp_t *group, 618 int channel) 619 { 620 nxge_hio_dc_t *current, *previous; 621 622 MUTEX_ENTER(&nxge->group_lock); 623 624 if ((current = group->dc) == 0) { 625 MUTEX_EXIT(&nxge->group_lock); 626 return (0); 627 } 628 629 previous = 0; 630 do { 631 if (current->channel == channel) { 632 if (previous) 633 previous->next = current->next; 634 else 635 group->dc = current->next; 636 break; 637 } 638 previous = current; 639 current = current->next; 640 } while (current); 641 642 if (current == 0) { 643 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 644 "DC unlink: DC %d not found", channel)); 645 } else { 646 current->next = 0; 647 current->group = 0; 648 649 group->count--; 650 } 651 652 nxge_grp_dc_map(group); 653 654 MUTEX_EXIT(&nxge->group_lock); 655 656 return (current); 657 } 658 659 /* 660 * nxge_grp_dc_map 661 * 662 * Map a linked list to an array of channel numbers. 663 * 664 * Arguments: 665 * nxge 666 * group The group to remap. 667 * 668 * Notes: 669 * It is expected that the caller will hold the correct mutex. 670 * 671 * Context: 672 * Service domain 673 */ 674 void 675 nxge_grp_dc_map( 676 nxge_grp_t *group) 677 { 678 nxge_channel_t *legend; 679 nxge_hio_dc_t *dc; 680 681 (void) memset(group->legend, 0, sizeof (group->legend)); 682 683 legend = group->legend; 684 dc = group->dc; 685 while (dc) { 686 *legend = dc->channel; 687 legend++; 688 dc = dc->next; 689 } 690 } 691 692 /* 693 * --------------------------------------------------------------------- 694 * These are HIO debugging functions. 695 * --------------------------------------------------------------------- 696 */ 697 698 /* 699 * nxge_delay 700 * 701 * Delay <seconds> number of seconds. 702 * 703 * Arguments: 704 * nxge 705 * group The group to append to 706 * dc The DMA channel to append 707 * 708 * Notes: 709 * This is a developer-only function. 710 * 711 * Context: 712 * Any domain 713 */ 714 void 715 nxge_delay( 716 int seconds) 717 { 718 delay(drv_usectohz(seconds * 1000000)); 719 } 720 721 static dmc_reg_name_t rx_names[] = { 722 { "RXDMA_CFIG1", 0 }, 723 { "RXDMA_CFIG2", 8 }, 724 { "RBR_CFIG_A", 0x10 }, 725 { "RBR_CFIG_B", 0x18 }, 726 { "RBR_KICK", 0x20 }, 727 { "RBR_STAT", 0x28 }, 728 { "RBR_HDH", 0x30 }, 729 { "RBR_HDL", 0x38 }, 730 { "RCRCFIG_A", 0x40 }, 731 { "RCRCFIG_B", 0x48 }, 732 { "RCRSTAT_A", 0x50 }, 733 { "RCRSTAT_B", 0x58 }, 734 { "RCRSTAT_C", 0x60 }, 735 { "RX_DMA_ENT_MSK", 0x68 }, 736 { "RX_DMA_CTL_STAT", 0x70 }, 737 { "RCR_FLSH", 0x78 }, 738 { "RXMISC", 0x90 }, 739 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 740 { 0, -1 } 741 }; 742 743 static dmc_reg_name_t tx_names[] = { 744 { "Tx_RNG_CFIG", 0 }, 745 { "Tx_RNG_HDL", 0x10 }, 746 { "Tx_RNG_KICK", 0x18 }, 747 { "Tx_ENT_MASK", 0x20 }, 748 { "Tx_CS", 0x28 }, 749 { "TxDMA_MBH", 0x30 }, 750 { "TxDMA_MBL", 0x38 }, 751 { "TxDMA_PRE_ST", 0x40 }, 752 { "Tx_RNG_ERR_LOGH", 0x48 }, 753 { "Tx_RNG_ERR_LOGL", 0x50 }, 754 { "TDMC_INTR_DBG", 0x60 }, 755 { "Tx_CS_DBG", 0x68 }, 756 { 0, -1 } 757 }; 758 759 /* 760 * nxge_xx2str 761 * 762 * Translate a register address into a string. 763 * 764 * Arguments: 765 * offset The address of the register to translate. 766 * 767 * Notes: 768 * These are developer-only function. 769 * 770 * Context: 771 * Any domain 772 */ 773 const char * 774 nxge_rx2str( 775 int offset) 776 { 777 dmc_reg_name_t *reg = &rx_names[0]; 778 779 offset &= DMA_CSR_MASK; 780 781 while (reg->name) { 782 if (offset == reg->offset) 783 return (reg->name); 784 reg++; 785 } 786 787 return (0); 788 } 789 790 const char * 791 nxge_tx2str( 792 int offset) 793 { 794 dmc_reg_name_t *reg = &tx_names[0]; 795 796 offset &= DMA_CSR_MASK; 797 798 while (reg->name) { 799 if (offset == reg->offset) 800 return (reg->name); 801 reg++; 802 } 803 804 return (0); 805 } 806 807 /* 808 * nxge_ddi_perror 809 * 810 * Map a DDI error number to a string. 811 * 812 * Arguments: 813 * ddi_error The DDI error number to map. 814 * 815 * Notes: 816 * 817 * Context: 818 * Any domain 819 */ 820 const char * 821 nxge_ddi_perror( 822 int ddi_error) 823 { 824 switch (ddi_error) { 825 case DDI_SUCCESS: 826 return ("DDI_SUCCESS"); 827 case DDI_FAILURE: 828 return ("DDI_FAILURE"); 829 case DDI_NOT_WELL_FORMED: 830 return ("DDI_NOT_WELL_FORMED"); 831 case DDI_EAGAIN: 832 return ("DDI_EAGAIN"); 833 case DDI_EINVAL: 834 return ("DDI_EINVAL"); 835 case DDI_ENOTSUP: 836 return ("DDI_ENOTSUP"); 837 case DDI_EPENDING: 838 return ("DDI_EPENDING"); 839 case DDI_ENOMEM: 840 return ("DDI_ENOMEM"); 841 case DDI_EBUSY: 842 return ("DDI_EBUSY"); 843 case DDI_ETRANSPORT: 844 return ("DDI_ETRANSPORT"); 845 case DDI_ECONTEXT: 846 return ("DDI_ECONTEXT"); 847 default: 848 return ("Unknown error"); 849 } 850 } 851 852 /* 853 * --------------------------------------------------------------------- 854 * These are Sun4v HIO function definitions 855 * --------------------------------------------------------------------- 856 */ 857 858 #if defined(sun4v) 859 860 /* 861 * Local prototypes 862 */ 863 static vr_handle_t nxge_hio_vr_share(nxge_t *); 864 865 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 866 static void nxge_hio_unshare(vr_handle_t); 867 868 static int nxge_hio_addres(vr_handle_t, mac_ring_type_t, int); 869 static void nxge_hio_remres(vr_handle_t, mac_ring_type_t, res_map_t); 870 871 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 872 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 873 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 874 mac_ring_type_t, int); 875 876 /* 877 * nxge_hio_init 878 * 879 * Initialize the HIO module of the NXGE driver. 880 * 881 * Arguments: 882 * nxge 883 * 884 * Notes: 885 * 886 * Context: 887 * Any domain 888 */ 889 int 890 nxge_hio_init( 891 nxge_t *nxge) 892 { 893 nxge_hio_data_t *nhd; 894 int i, region; 895 896 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 897 if (nhd == 0) { 898 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 899 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 900 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 901 } 902 903 if (nxge->environs == SOLARIS_DOMAIN) { 904 if (nxge->niu_hsvc_available == B_TRUE) { 905 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 906 if (niu_hsvc->hsvc_major == 1 && 907 niu_hsvc->hsvc_minor == 1) 908 nxge->environs = SOLARIS_SERVICE_DOMAIN; 909 NXGE_DEBUG_MSG((nxge, HIO_CTL, 910 "nxge_hio_init: hypervisor services " 911 "version %d.%d", 912 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 913 } 914 } 915 916 if (!isLDOMs(nxge)) { 917 nhd->hio.ldoms = B_FALSE; 918 return (NXGE_OK); 919 } 920 921 nhd->hio.ldoms = B_TRUE; 922 923 /* 924 * Fill in what we can. 925 */ 926 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 927 nhd->vr[region].region = region; 928 } 929 nhd->available.vrs = NXGE_VR_SR_MAX - 2; 930 931 /* 932 * Initialize share and ring group structures. 933 */ 934 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 935 nxge->rx_hio_groups[i].ghandle = NULL; 936 nxge->rx_hio_groups[i].nxgep = nxge; 937 nxge->rx_hio_groups[i].gindex = 0; 938 nxge->rx_hio_groups[i].sindex = 0; 939 } 940 941 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 942 nxge->shares[i].nxgep = nxge; 943 nxge->shares[i].index = 0; 944 nxge->shares[i].vrp = (void *)NULL; 945 nxge->shares[i].tmap = 0; 946 nxge->shares[i].rmap = 0; 947 nxge->shares[i].rxgroup = 0; 948 nxge->shares[i].active = B_FALSE; 949 } 950 951 /* Fill in the HV HIO function pointers. */ 952 nxge_hio_hv_init(nxge); 953 954 if (isLDOMservice(nxge)) { 955 NXGE_DEBUG_MSG((nxge, HIO_CTL, 956 "Hybrid IO-capable service domain")); 957 return (NXGE_OK); 958 } else { 959 /* 960 * isLDOMguest(nxge) == B_TRUE 961 */ 962 nx_vio_fp_t *vio; 963 nhd->type = NXGE_HIO_TYPE_GUEST; 964 965 vio = &nhd->hio.vio; 966 vio->__register = (vio_net_resource_reg_t) 967 modgetsymvalue("vio_net_resource_reg", 0); 968 vio->unregister = (vio_net_resource_unreg_t) 969 modgetsymvalue("vio_net_resource_unreg", 0); 970 971 if (vio->__register == 0 || vio->unregister == 0) { 972 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 973 return (NXGE_ERROR); 974 } 975 } 976 977 return (0); 978 } 979 980 static int 981 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 982 { 983 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 984 p_nxge_t nxge = rxgroup->nxgep; 985 int group = rxgroup->gindex; 986 int rv, sindex; 987 nxge_hio_vr_t *vr; /* The Virtualization Region */ 988 989 sindex = nxge->rx_hio_groups[group].sindex; 990 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 991 992 /* 993 * Program the mac address for the group/share. 994 */ 995 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 996 (ether_addr_t *)mac_addr)) != 0) { 997 return (rv); 998 } 999 1000 return (0); 1001 } 1002 1003 /* ARGSUSED */ 1004 static int 1005 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 1006 { 1007 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1008 p_nxge_t nxge = rxgroup->nxgep; 1009 int group = rxgroup->gindex; 1010 int sindex; 1011 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1012 1013 sindex = nxge->rx_hio_groups[group].sindex; 1014 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1015 1016 /* 1017 * Remove the mac address for the group/share. 1018 */ 1019 nxge_hio_hostinfo_uninit(nxge, vr); 1020 1021 return (0); 1022 } 1023 1024 /* ARGSUSED */ 1025 void 1026 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1027 mac_group_info_t *infop, mac_group_handle_t ghdl) 1028 { 1029 p_nxge_t nxgep = (p_nxge_t)arg; 1030 nxge_rx_ring_group_t *rxgroup; 1031 1032 switch (type) { 1033 case MAC_RING_TYPE_RX: 1034 rxgroup = &nxgep->rx_hio_groups[group]; 1035 rxgroup->gindex = group; 1036 1037 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1038 infop->mrg_start = NULL; 1039 infop->mrg_stop = NULL; 1040 infop->mrg_addmac = nxge_hio_add_mac; 1041 infop->mrg_remmac = nxge_hio_rem_mac; 1042 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1043 break; 1044 1045 case MAC_RING_TYPE_TX: 1046 break; 1047 } 1048 } 1049 1050 int 1051 nxge_hio_share_assign( 1052 nxge_t *nxge, 1053 uint64_t cookie, 1054 res_map_t *tmap, 1055 res_map_t *rmap, 1056 nxge_hio_vr_t *vr) 1057 { 1058 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1059 uint64_t slot, hv_rv; 1060 nxge_hio_dc_t *dc; 1061 nxhv_vr_fp_t *fp; 1062 int i; 1063 1064 /* 1065 * Ask the Hypervisor to set up the VR for us 1066 */ 1067 fp = &nhd->hio.vr; 1068 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1069 NXGE_ERROR_MSG((nxge, HIO_CTL, 1070 "nx_hio_share_assign: " 1071 "vr->assign() returned %d", hv_rv)); 1072 nxge_hio_unshare((vr_handle_t)vr); 1073 return (-EIO); 1074 } 1075 1076 /* 1077 * For each shared TDC, ask the HV to find us an empty slot. 1078 * ----------------------------------------------------- 1079 */ 1080 dc = vr->tx_group.dc; 1081 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1082 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1083 while (dc) { 1084 hv_rv = (*tx->assign) 1085 (vr->cookie, dc->channel, &slot); 1086 cmn_err(CE_CONT, "tx->assign(%d, %d)", dc->channel, dc->page); 1087 if (hv_rv != 0) { 1088 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1089 "nx_hio_share_assign: " 1090 "tx->assign(%x, %d) failed: %ld", 1091 vr->cookie, dc->channel, hv_rv)); 1092 return (-EIO); 1093 } 1094 1095 dc->cookie = vr->cookie; 1096 dc->page = (vp_channel_t)slot; 1097 1098 /* Inform the caller about the slot chosen. */ 1099 (*tmap) |= 1 << slot; 1100 1101 dc = dc->next; 1102 } 1103 } 1104 1105 /* 1106 * For each shared RDC, ask the HV to find us an empty slot. 1107 * ----------------------------------------------------- 1108 */ 1109 dc = vr->rx_group.dc; 1110 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1111 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1112 while (dc) { 1113 hv_rv = (*rx->assign) 1114 (vr->cookie, dc->channel, &slot); 1115 cmn_err(CE_CONT, "rx->assign(%d, %d)", dc->channel, dc->page); 1116 if (hv_rv != 0) { 1117 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1118 "nx_hio_share_assign: " 1119 "rx->assign(%x, %d) failed: %ld", 1120 vr->cookie, dc->channel, hv_rv)); 1121 return (-EIO); 1122 } 1123 1124 dc->cookie = vr->cookie; 1125 dc->page = (vp_channel_t)slot; 1126 1127 /* Inform the caller about the slot chosen. */ 1128 (*rmap) |= 1 << slot; 1129 1130 dc = dc->next; 1131 } 1132 } 1133 1134 cmn_err(CE_CONT, "tmap %lx, rmap %lx", *tmap, *rmap); 1135 return (0); 1136 } 1137 1138 int 1139 nxge_hio_share_unassign( 1140 nxge_hio_vr_t *vr) 1141 { 1142 nxge_t *nxge = (nxge_t *)vr->nxge; 1143 nxge_hio_data_t *nhd; 1144 nxge_hio_dc_t *dc; 1145 nxhv_vr_fp_t *fp; 1146 uint64_t hv_rv; 1147 1148 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1149 1150 dc = vr->tx_group.dc; 1151 while (dc) { 1152 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1153 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1154 if (hv_rv != 0) { 1155 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1156 "nx_hio_dc_unshare: " 1157 "tx->unassign(%x, %d) failed: %ld", 1158 vr->cookie, dc->page, hv_rv)); 1159 } 1160 dc = dc->next; 1161 } 1162 1163 dc = vr->rx_group.dc; 1164 while (dc) { 1165 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1166 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1167 if (hv_rv != 0) { 1168 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1169 "nx_hio_dc_unshare: " 1170 "rx->unassign(%x, %d) failed: %ld", 1171 vr->cookie, dc->page, hv_rv)); 1172 } 1173 dc = dc->next; 1174 } 1175 1176 fp = &nhd->hio.vr; 1177 if (fp->unassign) { 1178 hv_rv = (*fp->unassign)(vr->cookie); 1179 if (hv_rv != 0) { 1180 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1181 "vr->assign(%x) failed: %ld", 1182 vr->cookie, hv_rv)); 1183 } 1184 } 1185 1186 return (0); 1187 } 1188 1189 int 1190 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1191 mac_share_handle_t *shandle) 1192 { 1193 p_nxge_t nxge = (p_nxge_t)arg; 1194 nxge_rx_ring_group_t *rxgroup; 1195 nxge_share_handle_t *shp; 1196 1197 vr_handle_t shared; /* The VR being shared */ 1198 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1199 uint64_t rmap, tmap; 1200 int rv; 1201 1202 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1203 1204 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1205 1206 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1207 nhd->hio.rx.assign == 0) { 1208 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1209 return (EIO); 1210 } 1211 1212 /* 1213 * Get a VR. 1214 */ 1215 if ((shared = nxge_hio_vr_share(nxge)) == 0) 1216 return (EAGAIN); 1217 vr = (nxge_hio_vr_t *)shared; 1218 1219 /* 1220 * Get an RDC group for us to use. 1221 */ 1222 if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1223 nxge_hio_unshare(shared); 1224 return (EBUSY); 1225 } 1226 1227 /* 1228 * Add resources to the share. 1229 */ 1230 tmap = 0; 1231 rv = nxge_hio_addres(shared, MAC_RING_TYPE_TX, 1232 NXGE_HIO_SHARE_MAX_CHANNELS); 1233 if (rv != 0) { 1234 nxge_hio_unshare(shared); 1235 return (rv); 1236 } 1237 1238 rmap = 0; 1239 rv = nxge_hio_addres(shared, MAC_RING_TYPE_RX, 1240 NXGE_HIO_SHARE_MAX_CHANNELS); 1241 if (rv != 0) { 1242 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1243 nxge_hio_unshare(shared); 1244 return (rv); 1245 } 1246 1247 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1248 nxge_hio_remres(shared, MAC_RING_TYPE_RX, tmap); 1249 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1250 nxge_hio_unshare(shared); 1251 return (rv); 1252 } 1253 1254 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1255 rxgroup->gindex = vr->rdc_tbl; 1256 rxgroup->sindex = vr->region; 1257 1258 shp = &nxge->shares[vr->region]; 1259 shp->index = vr->region; 1260 shp->vrp = (void *)vr; 1261 shp->tmap = tmap; 1262 shp->rmap = rmap; 1263 shp->rxgroup = vr->rdc_tbl; 1264 shp->active = B_TRUE; 1265 1266 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1267 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1268 1269 *shandle = (mac_share_handle_t)shp; 1270 1271 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1272 return (0); 1273 } 1274 1275 void 1276 nxge_hio_share_free(mac_share_handle_t shandle) 1277 { 1278 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1279 1280 /* 1281 * First, unassign the VR (take it back), 1282 * so we can enable interrupts again. 1283 */ 1284 (void) nxge_hio_share_unassign(shp->vrp); 1285 1286 /* 1287 * Free Ring Resources for TX and RX 1288 */ 1289 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1290 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1291 1292 /* 1293 * Free VR resource. 1294 */ 1295 nxge_hio_unshare((vr_handle_t)shp->vrp); 1296 1297 /* 1298 * Clear internal handle state. 1299 */ 1300 shp->index = 0; 1301 shp->vrp = (void *)NULL; 1302 shp->tmap = 0; 1303 shp->rmap = 0; 1304 shp->rxgroup = 0; 1305 shp->active = B_FALSE; 1306 } 1307 1308 void 1309 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1310 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1311 { 1312 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1313 1314 switch (type) { 1315 case MAC_RING_TYPE_RX: 1316 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1317 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1318 *rmap = shp->rmap; 1319 *gnum = shp->rxgroup; 1320 break; 1321 1322 case MAC_RING_TYPE_TX: 1323 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1324 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1325 *rmap = shp->tmap; 1326 *gnum = 0; 1327 break; 1328 } 1329 } 1330 1331 /* 1332 * nxge_hio_vr_share 1333 * 1334 * Find an unused Virtualization Region (VR). 1335 * 1336 * Arguments: 1337 * nxge 1338 * 1339 * Notes: 1340 * 1341 * Context: 1342 * Service domain 1343 */ 1344 vr_handle_t 1345 nxge_hio_vr_share( 1346 nxge_t *nxge) 1347 { 1348 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1349 nxge_hio_vr_t *vr; 1350 1351 int first, limit, region; 1352 1353 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1354 1355 MUTEX_ENTER(&nhd->lock); 1356 1357 if (nhd->available.vrs == 0) { 1358 MUTEX_EXIT(&nhd->lock); 1359 return (0); 1360 } 1361 1362 /* Find an empty virtual region (VR). */ 1363 if (nxge->function_num == 0) { 1364 // FUNC0_VIR0 'belongs' to NIU port 0. 1365 first = FUNC0_VIR1; 1366 limit = FUNC2_VIR0; 1367 } else if (nxge->function_num == 1) { 1368 // FUNC2_VIR0 'belongs' to NIU port 1. 1369 first = FUNC2_VIR1; 1370 limit = FUNC_VIR_MAX; 1371 } else { 1372 cmn_err(CE_WARN, 1373 "Shares not supported on function(%d) at this time.\n", 1374 nxge->function_num); 1375 } 1376 1377 for (region = first; region < limit; region++) { 1378 if (nhd->vr[region].nxge == 0) 1379 break; 1380 } 1381 1382 if (region == limit) { 1383 MUTEX_EXIT(&nhd->lock); 1384 return (0); 1385 } 1386 1387 vr = &nhd->vr[region]; 1388 vr->nxge = (uintptr_t)nxge; 1389 vr->region = (uintptr_t)region; 1390 1391 nhd->available.vrs--; 1392 1393 MUTEX_EXIT(&nhd->lock); 1394 1395 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1396 1397 return ((vr_handle_t)vr); 1398 } 1399 1400 void 1401 nxge_hio_unshare( 1402 vr_handle_t shared) 1403 { 1404 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1405 nxge_t *nxge = (nxge_t *)vr->nxge; 1406 nxge_hio_data_t *nhd; 1407 1408 vr_region_t region; 1409 1410 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1411 1412 if (!nxge) { 1413 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1414 "vr->nxge is NULL")); 1415 return; 1416 } 1417 1418 /* 1419 * This function is no longer called, but I will keep it 1420 * here in case we want to revisit this topic in the future. 1421 * 1422 * nxge_hio_hostinfo_uninit(nxge, vr); 1423 */ 1424 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1425 1426 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1427 1428 MUTEX_ENTER(&nhd->lock); 1429 1430 region = vr->region; 1431 (void) memset(vr, 0, sizeof (*vr)); 1432 vr->region = region; 1433 1434 nhd->available.vrs++; 1435 1436 MUTEX_EXIT(&nhd->lock); 1437 1438 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1439 } 1440 1441 int 1442 nxge_hio_addres( 1443 vr_handle_t shared, 1444 mac_ring_type_t type, 1445 int count) 1446 { 1447 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1448 nxge_t *nxge = (nxge_t *)vr->nxge; 1449 int i; 1450 1451 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1452 1453 if (!nxge) 1454 return (EINVAL); 1455 1456 for (i = 0; i < count; i++) { 1457 int rv; 1458 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1459 if (i == 0) /* Couldn't get even one DC. */ 1460 return (-rv); 1461 else 1462 break; 1463 } 1464 } 1465 1466 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1467 1468 return (0); 1469 } 1470 1471 /* ARGSUSED */ 1472 void 1473 nxge_hio_remres( 1474 vr_handle_t shared, 1475 mac_ring_type_t type, 1476 res_map_t res_map) 1477 { 1478 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1479 nxge_t *nxge = (nxge_t *)vr->nxge; 1480 nxge_grp_t *group; 1481 1482 if (!nxge) { 1483 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 1484 "vr->nxge is NULL")); 1485 return; 1486 } 1487 1488 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1489 1490 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1491 while (group->dc) { 1492 nxge_hio_dc_t *dc = group->dc; 1493 NXGE_DC_RESET(res_map, dc->page); 1494 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1495 } 1496 1497 if (res_map) { 1498 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1499 "res_map %lx", res_map)); 1500 } 1501 1502 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1503 } 1504 1505 /* 1506 * nxge_hio_tdc_share 1507 * 1508 * Share an unused TDC channel. 1509 * 1510 * Arguments: 1511 * nxge 1512 * 1513 * Notes: 1514 * 1515 * A.7.3 Reconfigure Tx DMA channel 1516 * Disable TxDMA A.9.6.10 1517 * [Rebind TxDMA channel to Port A.9.6.7] 1518 * 1519 * We don't have to Rebind the TDC to the port - it always already bound. 1520 * 1521 * Soft Reset TxDMA A.9.6.2 1522 * 1523 * This procedure will be executed by nxge_init_txdma_channel() in the 1524 * guest domain: 1525 * 1526 * Re-initialize TxDMA A.9.6.8 1527 * Reconfigure TxDMA 1528 * Enable TxDMA A.9.6.9 1529 * 1530 * Context: 1531 * Service domain 1532 */ 1533 int 1534 nxge_hio_tdc_share( 1535 nxge_t *nxge, 1536 int channel) 1537 { 1538 nxge_grp_set_t *set = &nxge->tx_set; 1539 tx_ring_t *ring; 1540 1541 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1542 1543 /* 1544 * Wait until this channel is idle. 1545 */ 1546 ring = nxge->tx_rings->rings[channel]; 1547 MUTEX_ENTER(&ring->lock); 1548 switch (ring->tx_ring_state) { 1549 int count; 1550 case TX_RING_STATE_OFFLINE: 1551 break; 1552 case TX_RING_STATE_IDLE: 1553 ring->tx_ring_state = TX_RING_STATE_OFFLINE; 1554 break; 1555 case TX_RING_STATE_BUSY: 1556 /* 30 seconds */ 1557 for (count = 30 * 1000; count; count--) { 1558 MUTEX_EXIT(&ring->lock); 1559 drv_usecwait(1000); /* 1 millisecond */ 1560 MUTEX_ENTER(&ring->lock); 1561 if (ring->tx_ring_state == TX_RING_STATE_IDLE) { 1562 ring->tx_ring_state = TX_RING_STATE_OFFLINE; 1563 break; 1564 } 1565 } 1566 if (count == 0) { 1567 MUTEX_EXIT(&ring->lock); 1568 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1569 "Tx ring %d was always BUSY", channel)); 1570 return (-EIO); 1571 } 1572 break; 1573 default: 1574 MUTEX_EXIT(&ring->lock); 1575 return (-EIO); 1576 } 1577 MUTEX_EXIT(&ring->lock); 1578 1579 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1580 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1581 "Failed to remove interrupt for TxDMA channel %d", 1582 channel)); 1583 return (NXGE_ERROR); 1584 } 1585 1586 /* Disable TxDMA A.9.6.10 */ 1587 (void) nxge_txdma_channel_disable(nxge, channel); 1588 1589 /* The SD is sharing this channel. */ 1590 NXGE_DC_SET(set->shared.map, channel); 1591 set->shared.count++; 1592 1593 /* Soft Reset TxDMA A.9.6.2 */ 1594 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1595 1596 /* 1597 * Initialize the DC-specific FZC control registers. 1598 * ----------------------------------------------------- 1599 */ 1600 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1601 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1602 "nx_hio_dc_share: FZC TDC failed: %d", channel)); 1603 return (-EIO); 1604 } 1605 1606 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1607 1608 return (0); 1609 } 1610 1611 /* 1612 * nxge_hio_rdc_share 1613 * 1614 * Share an unused RDC channel. 1615 * 1616 * Arguments: 1617 * nxge 1618 * 1619 * Notes: 1620 * 1621 * This is the latest version of the procedure to 1622 * Reconfigure an Rx DMA channel: 1623 * 1624 * A.6.3 Reconfigure Rx DMA channel 1625 * Stop RxMAC A.9.2.6 1626 * Drain IPP Port A.9.3.6 1627 * Stop and reset RxDMA A.9.5.3 1628 * 1629 * This procedure will be executed by nxge_init_rxdma_channel() in the 1630 * guest domain: 1631 * 1632 * Initialize RxDMA A.9.5.4 1633 * Reconfigure RxDMA 1634 * Enable RxDMA A.9.5.5 1635 * 1636 * We will do this here, since the RDC is a canalis non grata: 1637 * Enable RxMAC A.9.2.10 1638 * 1639 * Context: 1640 * Service domain 1641 */ 1642 int 1643 nxge_hio_rdc_share( 1644 nxge_t *nxge, 1645 nxge_hio_vr_t *vr, 1646 int channel) 1647 { 1648 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1649 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1650 nxge_grp_set_t *set = &nxge->rx_set; 1651 nxge_rdc_grp_t *rdc_grp; 1652 1653 int current, last; 1654 1655 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1656 1657 /* Disable interrupts. */ 1658 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1659 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 1660 "Failed to remove interrupt for RxDMA channel %d", 1661 channel)); 1662 return (NXGE_ERROR); 1663 } 1664 1665 /* Stop RxMAC = A.9.2.6 */ 1666 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1667 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1668 "Failed to disable RxMAC")); 1669 } 1670 1671 /* Drain IPP Port = A.9.3.6 */ 1672 (void) nxge_ipp_drain(nxge); 1673 1674 /* Stop and reset RxDMA = A.9.5.3 */ 1675 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1676 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1677 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1678 "Failed to disable RxDMA channel %d", channel)); 1679 } 1680 1681 /* The SD is sharing this channel. */ 1682 NXGE_DC_SET(set->shared.map, channel); 1683 set->shared.count++; 1684 1685 // Assert RST: RXDMA_CFIG1[30] = 1 1686 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1687 1688 /* 1689 * We have to reconfigure the RDC table(s) 1690 * to which this channel belongs. 1691 */ 1692 current = hardware->def_mac_rxdma_grpid; 1693 last = current + hardware->max_rdc_grpids; 1694 for (; current < last; current++) { 1695 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1696 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1697 rdc_grp->map = set->owned.map; 1698 rdc_grp->max_rdcs--; 1699 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1700 } 1701 } 1702 1703 /* 1704 * The guest domain will reconfigure the RDC later. 1705 * 1706 * But in the meantime, we must re-enable the Rx MAC so 1707 * that we can start receiving packets again on the 1708 * remaining RDCs: 1709 * 1710 * Enable RxMAC = A.9.2.10 1711 */ 1712 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1713 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1714 "nx_hio_rdc_share: Rx MAC still disabled")); 1715 } 1716 1717 /* 1718 * Initialize the DC-specific FZC control registers. 1719 * ----------------------------------------------------- 1720 */ 1721 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1722 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1723 "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 1724 return (-EIO); 1725 } 1726 1727 /* 1728 * We have to initialize the guest's RDC table, too. 1729 * ----------------------------------------------------- 1730 */ 1731 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1732 if (rdc_grp->max_rdcs == 0) { 1733 rdc_grp->start_rdc = (uint8_t)channel; 1734 rdc_grp->def_rdc = (uint8_t)channel; 1735 rdc_grp->max_rdcs = 1; 1736 } else { 1737 rdc_grp->max_rdcs++; 1738 } 1739 NXGE_DC_SET(rdc_grp->map, channel); 1740 1741 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1742 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1743 "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1744 return (-EIO); 1745 } 1746 1747 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1748 1749 return (0); 1750 } 1751 1752 /* 1753 * nxge_hio_dc_share 1754 * 1755 * Share a DMA channel with a guest domain. 1756 * 1757 * Arguments: 1758 * nxge 1759 * vr The VR that <channel> will belong to. 1760 * type Tx or Rx. 1761 * res_map The resource map used by the caller, which we will 1762 * update if successful. 1763 * 1764 * Notes: 1765 * 1766 * Context: 1767 * Service domain 1768 */ 1769 int 1770 nxge_hio_dc_share( 1771 nxge_t *nxge, 1772 nxge_hio_vr_t *vr, 1773 mac_ring_type_t type) 1774 { 1775 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1776 nxge_hw_pt_cfg_t *hardware; 1777 nxge_hio_dc_t *dc; 1778 int channel, limit; 1779 1780 nxge_grp_set_t *set; 1781 nxge_grp_t *group; 1782 1783 int slot; 1784 1785 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1786 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1787 1788 /* 1789 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1790 * Not only that, but the HV has statically assigned the 1791 * channels like so: 1792 * VR0: RDC0 & RDC1 1793 * VR1: RDC2 & RDC3, etc. 1794 * The TDCs are assigned in exactly the same way. 1795 * 1796 * So, for example 1797 * hardware->start_rdc + vr->region * 2; 1798 * VR1: hardware->start_rdc + 1 * 2; 1799 * VR3: hardware->start_rdc + 3 * 2; 1800 * If start_rdc is 0, we end up with 2 or 6. 1801 * If start_rdc is 8, we end up with 10 or 14. 1802 */ 1803 1804 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1805 hardware = &nxge->pt_config.hw_config; 1806 1807 // This code is still NIU-specific (assuming only 2 ports) 1808 channel = hardware->start_rdc + (vr->region % 4) * 2; 1809 limit = channel + 2; 1810 1811 MUTEX_ENTER(&nhd->lock); 1812 for (; channel < limit; channel++) { 1813 if ((1 << channel) & set->owned.map) { 1814 break; 1815 } 1816 } 1817 1818 if (channel == limit) { 1819 MUTEX_EXIT(&nhd->lock); 1820 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1821 "nx_hio_dc_share: there are no channels to share")); 1822 return (-EIO); 1823 } 1824 1825 MUTEX_EXIT(&nhd->lock); 1826 1827 /* -------------------------------------------------- */ 1828 slot = (type == MAC_RING_TYPE_TX) ? 1829 nxge_hio_tdc_share(nxge, channel) : 1830 nxge_hio_rdc_share(nxge, vr, channel); 1831 1832 if (slot < 0) { 1833 if (type == MAC_RING_TYPE_RX) { 1834 nxge_hio_rdc_unshare(nxge, channel); 1835 } else { 1836 nxge_hio_tdc_unshare(nxge, channel); 1837 } 1838 return (slot); 1839 } 1840 1841 MUTEX_ENTER(&nhd->lock); 1842 1843 /* 1844 * Tag this channel. 1845 * -------------------------------------------------- 1846 */ 1847 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1848 1849 dc->vr = vr; 1850 dc->channel = (nxge_channel_t)channel; 1851 1852 MUTEX_EXIT(&nhd->lock); 1853 1854 /* 1855 * vr->[t|r]x_group is used by the service domain to 1856 * keep track of its shared DMA channels. 1857 */ 1858 MUTEX_ENTER(&nxge->group_lock); 1859 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1860 1861 dc->group = (vr_handle_t)group; 1862 1863 /* Initialize <group>, if necessary */ 1864 if (group->count == 0) { 1865 group->nxge = nxge; 1866 group->type = (type == MAC_RING_TYPE_TX) ? 1867 VP_BOUND_TX : VP_BOUND_RX; 1868 group->sequence = nhd->sequence++; 1869 group->active = B_TRUE; 1870 } 1871 1872 MUTEX_EXIT(&nxge->group_lock); 1873 1874 NXGE_ERROR_MSG((nxge, HIO_CTL, 1875 "DC share: %cDC %d was assigned to slot %d", 1876 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1877 1878 nxge_grp_dc_append(nxge, group, dc); 1879 1880 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1881 1882 return (0); 1883 } 1884 1885 /* 1886 * nxge_hio_tdc_unshare 1887 * 1888 * Unshare a TDC. 1889 * 1890 * Arguments: 1891 * nxge 1892 * channel The channel to unshare (add again). 1893 * 1894 * Notes: 1895 * 1896 * Context: 1897 * Service domain 1898 */ 1899 void 1900 nxge_hio_tdc_unshare( 1901 nxge_t *nxge, 1902 int channel) 1903 { 1904 nxge_grp_set_t *set = &nxge->tx_set; 1905 vr_handle_t handle = (vr_handle_t)set->group[0]; 1906 1907 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1908 1909 NXGE_DC_RESET(set->shared.map, channel); 1910 set->shared.count--; 1911 1912 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_TX, channel))) { 1913 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1914 "Failed to initialize TxDMA channel %d", channel)); 1915 return; 1916 } 1917 1918 /* Re-add this interrupt. */ 1919 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1920 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1921 "Failed to add interrupt for TxDMA channel %d", channel)); 1922 } 1923 1924 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1925 } 1926 1927 /* 1928 * nxge_hio_rdc_unshare 1929 * 1930 * Unshare an RDC: add it to the SD's RDC groups (tables). 1931 * 1932 * Arguments: 1933 * nxge 1934 * channel The channel to unshare (add again). 1935 * 1936 * Notes: 1937 * 1938 * Context: 1939 * Service domain 1940 */ 1941 void 1942 nxge_hio_rdc_unshare( 1943 nxge_t *nxge, 1944 int channel) 1945 { 1946 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1947 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1948 1949 nxge_grp_set_t *set = &nxge->rx_set; 1950 vr_handle_t handle = (vr_handle_t)set->group[0]; 1951 int current, last; 1952 1953 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 1954 1955 /* Stop RxMAC = A.9.2.6 */ 1956 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1957 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1958 "Failed to disable RxMAC")); 1959 } 1960 1961 /* Drain IPP Port = A.9.3.6 */ 1962 (void) nxge_ipp_drain(nxge); 1963 1964 /* Stop and reset RxDMA = A.9.5.3 */ 1965 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1966 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1967 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1968 "Failed to disable RxDMA channel %d", channel)); 1969 } 1970 1971 NXGE_DC_RESET(set->shared.map, channel); 1972 set->shared.count--; 1973 1974 /* 1975 * Assert RST: RXDMA_CFIG1[30] = 1 1976 * 1977 * Initialize RxDMA A.9.5.4 1978 * Reconfigure RxDMA 1979 * Enable RxDMA A.9.5.5 1980 */ 1981 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_RX, channel))) { 1982 /* Be sure to re-enable the RX MAC. */ 1983 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1984 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1985 "nx_hio_rdc_share: Rx MAC still disabled")); 1986 } 1987 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1988 "Failed to initialize RxDMA channel %d", channel)); 1989 return; 1990 } 1991 1992 /* 1993 * We have to reconfigure the RDC table(s) 1994 * to which this channel once again belongs. 1995 */ 1996 current = hardware->def_mac_rxdma_grpid; 1997 last = current + hardware->max_rdc_grpids; 1998 for (; current < last; current++) { 1999 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 2000 nxge_rdc_grp_t *group; 2001 group = &nxge->pt_config.rdc_grps[current]; 2002 group->map = set->owned.map; 2003 group->max_rdcs++; 2004 (void) nxge_init_fzc_rdc_tbl(nxge, current); 2005 } 2006 } 2007 2008 /* 2009 * Enable RxMAC = A.9.2.10 2010 */ 2011 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2012 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2013 "nx_hio_rdc_share: Rx MAC still disabled")); 2014 return; 2015 } 2016 2017 /* Re-add this interrupt. */ 2018 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2019 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2020 "nx_hio_rdc_unshare: Failed to add interrupt for " 2021 "RxDMA CHANNEL %d", channel)); 2022 } 2023 2024 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2025 } 2026 2027 /* 2028 * nxge_hio_dc_unshare 2029 * 2030 * Unshare (reuse) a DMA channel. 2031 * 2032 * Arguments: 2033 * nxge 2034 * vr The VR that <channel> belongs to. 2035 * type Tx or Rx. 2036 * channel The DMA channel to reuse. 2037 * 2038 * Notes: 2039 * 2040 * Context: 2041 * Service domain 2042 */ 2043 void 2044 nxge_hio_dc_unshare( 2045 nxge_t *nxge, 2046 nxge_hio_vr_t *vr, 2047 mac_ring_type_t type, 2048 int channel) 2049 { 2050 nxge_grp_t *group; 2051 nxge_hio_dc_t *dc; 2052 2053 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2054 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2055 2056 /* Unlink the channel from its group. */ 2057 /* -------------------------------------------------- */ 2058 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2059 NXGE_DC_RESET(group->map, channel); 2060 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2061 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2062 "nx_hio_dc_unshare(%d) failed", channel)); 2063 return; 2064 } 2065 2066 dc->vr = 0; 2067 dc->cookie = 0; 2068 2069 if (type == MAC_RING_TYPE_RX) { 2070 nxge_hio_rdc_unshare(nxge, channel); 2071 } else { 2072 nxge_hio_tdc_unshare(nxge, channel); 2073 } 2074 2075 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2076 } 2077 2078 #endif /* if defined(sun4v) */ 2079