1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * nxge_hio.c 31 * 32 * This file manages the virtualization resources for Neptune 33 * devices. That is, it implements a hybrid I/O (HIO) approach in the 34 * Solaris kernel, whereby a guest domain on an LDOMs server may 35 * request & use hardware resources from the service domain. 36 * 37 */ 38 39 #include <sys/nxge/nxge_impl.h> 40 #include <sys/nxge/nxge_fzc.h> 41 #include <sys/nxge/nxge_rxdma.h> 42 #include <sys/nxge/nxge_txdma.h> 43 #include <sys/nxge/nxge_hio.h> 44 45 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 46 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 47 48 /* 49 * External prototypes 50 */ 51 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 52 53 /* The following function may be found in nxge_main.c */ 54 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 55 56 /* The following function may be found in nxge_[t|r]xdma.c */ 57 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 58 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 59 60 /* 61 * Local prototypes 62 */ 63 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 64 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 65 static void nxge_grp_dc_map(nxge_grp_t *group); 66 67 /* 68 * These functions are used by both service & guest domains to 69 * decide whether they're running in an LDOMs/XEN environment 70 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 71 */ 72 73 /* 74 * nxge_get_environs 75 * 76 * Figure out if we are in a guest domain or not. 77 * 78 * Arguments: 79 * nxge 80 * 81 * Notes: 82 * 83 * Context: 84 * Any domain 85 */ 86 void 87 nxge_get_environs( 88 nxge_t *nxge) 89 { 90 char *string; 91 92 /* 93 * In the beginning, assume that we are running sans LDOMs/XEN. 94 */ 95 nxge->environs = SOLARIS_DOMAIN; 96 97 /* 98 * Are we a hybrid I/O (HIO) guest domain driver? 99 */ 100 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 101 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 102 "niutype", &string)) == DDI_PROP_SUCCESS) { 103 if (strcmp(string, "n2niu") == 0) { 104 nxge->environs = SOLARIS_GUEST_DOMAIN; 105 /* So we can allocate properly-aligned memory. */ 106 nxge->niu_type = N2_NIU; 107 NXGE_DEBUG_MSG((nxge, HIO_CTL, 108 "Hybrid IO-capable guest domain")); 109 } 110 ddi_prop_free(string); 111 } 112 } 113 114 #if !defined(sun4v) 115 116 /* 117 * nxge_hio_init 118 * 119 * Initialize the HIO module of the NXGE driver. 120 * 121 * Arguments: 122 * nxge 123 * 124 * Notes: 125 * This is the non-hybrid I/O version of this function. 126 * 127 * Context: 128 * Any domain 129 */ 130 int 131 nxge_hio_init( 132 nxge_t *nxge) 133 { 134 nxge_hio_data_t *nhd; 135 136 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 137 if (nhd == 0) { 138 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 139 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 140 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 141 } 142 143 nhd->hio.ldoms = B_FALSE; 144 145 return (NXGE_OK); 146 } 147 148 #endif 149 150 void 151 nxge_hio_uninit( 152 nxge_t *nxge) 153 { 154 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 155 156 ASSERT(nhd != NULL); 157 ASSERT(nxge->nxge_hw_p->ndevs == 0); 158 159 MUTEX_DESTROY(&nhd->lock); 160 161 KMEM_FREE(nhd, sizeof (*nhd)); 162 163 nxge->nxge_hw_p->hio = 0; 164 } 165 166 /* 167 * nxge_dci_map 168 * 169 * Map a DMA channel index to a channel number. 170 * 171 * Arguments: 172 * instance The instance number of the driver. 173 * type The type of channel this is: Tx or Rx. 174 * index The index to convert to a channel number 175 * 176 * Notes: 177 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 178 * 179 * Context: 180 * Any domain 181 */ 182 int 183 nxge_dci_map( 184 nxge_t *nxge, 185 vpc_type_t type, 186 int index) 187 { 188 nxge_grp_set_t *set; 189 int dc; 190 191 switch (type) { 192 case VP_BOUND_TX: 193 set = &nxge->tx_set; 194 break; 195 case VP_BOUND_RX: 196 set = &nxge->rx_set; 197 break; 198 } 199 200 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 201 if ((1 << dc) & set->owned.map) { 202 if (index == 0) 203 return (dc); 204 else 205 index--; 206 } 207 } 208 209 return (-1); 210 } 211 212 /* 213 * --------------------------------------------------------------------- 214 * These are the general-purpose DMA channel group functions. That is, 215 * these functions are used to manage groups of TDCs or RDCs in an HIO 216 * environment. 217 * 218 * But is also expected that in the future they will be able to manage 219 * Crossbow groups. 220 * --------------------------------------------------------------------- 221 */ 222 223 /* 224 * nxge_grp_add 225 * 226 * Add a group to an instance of NXGE. 227 * 228 * Arguments: 229 * nxge 230 * type Tx or Rx 231 * 232 * Notes: 233 * 234 * Context: 235 * Any domain 236 */ 237 vr_handle_t 238 nxge_grp_add( 239 nxge_t *nxge, 240 nxge_grp_type_t type) 241 { 242 nxge_grp_set_t *set; 243 nxge_grp_t *group; 244 int i; 245 246 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 247 group->nxge = nxge; 248 249 MUTEX_ENTER(&nxge->group_lock); 250 switch (type) { 251 case NXGE_TRANSMIT_GROUP: 252 case EXT_TRANSMIT_GROUP: 253 set = &nxge->tx_set; 254 break; 255 default: 256 set = &nxge->rx_set; 257 break; 258 } 259 260 group->type = type; 261 group->active = B_TRUE; 262 group->sequence = set->sequence++; 263 264 /* Find an empty slot for this logical group. */ 265 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 266 if (set->group[i] == 0) { 267 group->index = i; 268 set->group[i] = group; 269 NXGE_DC_SET(set->lg.map, i); 270 set->lg.count++; 271 break; 272 } 273 } 274 MUTEX_EXIT(&nxge->group_lock); 275 276 NXGE_DEBUG_MSG((nxge, HIO_CTL, 277 "nxge_grp_add: %cgroup = %d.%d", 278 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 279 nxge->mac.portnum, group->sequence)); 280 281 return ((vr_handle_t)group); 282 } 283 284 void 285 nxge_grp_remove( 286 nxge_t *nxge, 287 vr_handle_t handle) /* The group to remove. */ 288 { 289 nxge_grp_set_t *set; 290 nxge_grp_t *group; 291 vpc_type_t type; 292 293 group = (nxge_grp_t *)handle; 294 295 MUTEX_ENTER(&nxge->group_lock); 296 switch (group->type) { 297 case NXGE_TRANSMIT_GROUP: 298 case EXT_TRANSMIT_GROUP: 299 set = &nxge->tx_set; 300 break; 301 default: 302 set = &nxge->rx_set; 303 break; 304 } 305 306 if (set->group[group->index] != group) { 307 MUTEX_EXIT(&nxge->group_lock); 308 return; 309 } 310 311 set->group[group->index] = 0; 312 NXGE_DC_RESET(set->lg.map, group->index); 313 set->lg.count--; 314 315 /* While inside the mutex, deactivate <group>. */ 316 group->active = B_FALSE; 317 318 MUTEX_EXIT(&nxge->group_lock); 319 320 NXGE_DEBUG_MSG((nxge, HIO_CTL, 321 "nxge_grp_remove(%c.%d.%d) called", 322 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 323 nxge->mac.portnum, group->sequence)); 324 325 /* Now, remove any DCs which are still active. */ 326 switch (group->type) { 327 default: 328 type = VP_BOUND_TX; 329 break; 330 case NXGE_RECEIVE_GROUP: 331 case EXT_RECEIVE_GROUP: 332 type = VP_BOUND_RX; 333 } 334 335 while (group->dc) { 336 nxge_grp_dc_remove(nxge, type, group->dc->channel); 337 } 338 339 KMEM_FREE(group, sizeof (*group)); 340 } 341 342 /* 343 * nx_hio_dc_add 344 * 345 * Add a DMA channel to a VR/Group. 346 * 347 * Arguments: 348 * nxge 349 * channel The channel to add. 350 * Notes: 351 * 352 * Context: 353 * Any domain 354 */ 355 /* ARGSUSED */ 356 int 357 nxge_grp_dc_add( 358 nxge_t *nxge, 359 vr_handle_t handle, /* The group to add <channel> to. */ 360 vpc_type_t type, /* Rx or Tx */ 361 int channel) /* A physical/logical channel number */ 362 { 363 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 364 nxge_hio_dc_t *dc; 365 nxge_grp_set_t *set; 366 nxge_grp_t *group; 367 368 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 369 370 if (handle == 0) 371 return (0); 372 373 switch (type) { 374 default: 375 set = &nxge->tx_set; 376 if (channel > NXGE_MAX_TDCS) { 377 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 378 "nxge_grp_dc_add: TDC = %d", channel)); 379 return (NXGE_ERROR); 380 } 381 break; 382 case VP_BOUND_RX: 383 set = &nxge->rx_set; 384 if (channel > NXGE_MAX_RDCS) { 385 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 386 "nxge_grp_dc_add: RDC = %d", channel)); 387 return (NXGE_ERROR); 388 } 389 break; 390 } 391 392 group = (nxge_grp_t *)handle; 393 NXGE_DEBUG_MSG((nxge, HIO_CTL, 394 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 395 type == VP_BOUND_TX ? 't' : 'r', 396 nxge->mac.portnum, group->sequence, group->count, channel)); 397 398 MUTEX_ENTER(&nxge->group_lock); 399 if (group->active != B_TRUE) { 400 /* We may be in the process of removing this group. */ 401 MUTEX_EXIT(&nxge->group_lock); 402 return (NXGE_ERROR); 403 } 404 MUTEX_EXIT(&nxge->group_lock); 405 406 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 407 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 408 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 409 return (NXGE_ERROR); 410 } 411 412 MUTEX_ENTER(&nhd->lock); 413 414 if (dc->group) { 415 MUTEX_EXIT(&nhd->lock); 416 /* This channel is already in use! */ 417 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 418 "nxge_grp_dc_add(%d): channel already in group", channel)); 419 return (NXGE_ERROR); 420 } 421 422 dc->next = 0; 423 dc->page = channel; 424 dc->channel = (nxge_channel_t)channel; 425 426 dc->type = type; 427 if (type == VP_BOUND_RX) { 428 dc->init = nxge_init_rxdma_channel; 429 dc->uninit = nxge_uninit_rxdma_channel; 430 } else { 431 dc->init = nxge_init_txdma_channel; 432 dc->uninit = nxge_uninit_txdma_channel; 433 } 434 435 dc->group = handle; 436 437 if (isLDOMguest(nxge)) 438 (void) nxge_hio_ldsv_add(nxge, dc); 439 440 NXGE_DC_SET(set->owned.map, channel); 441 set->owned.count++; 442 443 MUTEX_EXIT(&nhd->lock); 444 445 nxge_grp_dc_append(nxge, group, dc); 446 447 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 448 449 return ((*dc->init)(nxge, channel)); 450 } 451 452 void 453 nxge_grp_dc_remove( 454 nxge_t *nxge, 455 vpc_type_t type, 456 int channel) 457 { 458 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 459 nxge_hio_dc_t *dc; 460 nxge_grp_set_t *set; 461 nxge_grp_t *group; 462 463 dc_uninit_t uninit; 464 465 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 466 467 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 468 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 469 "nx_hio_dc_remove: find(%d) failed", channel)); 470 return; 471 } 472 group = (nxge_grp_t *)dc->group; 473 474 if (isLDOMguest(nxge)) { 475 (void) nxge_hio_intr_remove(nxge, type, channel); 476 } 477 478 NXGE_DEBUG_MSG((nxge, HIO_CTL, 479 "DC remove: group = %d.%d.%d, %cdc %d", 480 nxge->mac.portnum, group->sequence, group->count, 481 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 482 483 MUTEX_ENTER(&nhd->lock); 484 485 /* Remove the DC from its group. */ 486 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 487 MUTEX_EXIT(&nhd->lock); 488 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 489 "nx_hio_dc_remove(%d) failed", channel)); 490 return; 491 } 492 493 uninit = dc->uninit; 494 channel = dc->channel; 495 496 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 497 NXGE_DC_RESET(set->owned.map, channel); 498 set->owned.count--; 499 500 (void) memset(dc, 0, sizeof (*dc)); 501 502 MUTEX_EXIT(&nhd->lock); 503 504 (*uninit)(nxge, channel); 505 506 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 507 } 508 509 nxge_hio_dc_t * 510 nxge_grp_dc_find( 511 nxge_t *nxge, 512 vpc_type_t type, /* Rx or Tx */ 513 int channel) 514 { 515 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 516 nxge_hio_dc_t *current; 517 518 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 519 520 if (!isLDOMguest(nxge)) { 521 return (¤t[channel]); 522 } else { 523 /* We're in a guest domain. */ 524 int i, limit = (type == VP_BOUND_TX) ? 525 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 526 527 MUTEX_ENTER(&nhd->lock); 528 for (i = 0; i < limit; i++, current++) { 529 if (current->channel == channel) { 530 if (current->vr && current->vr->nxge == 531 (uintptr_t)nxge) { 532 MUTEX_EXIT(&nhd->lock); 533 return (current); 534 } 535 } 536 } 537 MUTEX_EXIT(&nhd->lock); 538 } 539 540 return (0); 541 } 542 543 /* 544 * nxge_grp_dc_append 545 * 546 * Append a DMA channel to a group. 547 * 548 * Arguments: 549 * nxge 550 * group The group to append to 551 * dc The DMA channel to append 552 * 553 * Notes: 554 * 555 * Context: 556 * Any domain 557 */ 558 static 559 void 560 nxge_grp_dc_append( 561 nxge_t *nxge, 562 nxge_grp_t *group, 563 nxge_hio_dc_t *dc) 564 { 565 MUTEX_ENTER(&nxge->group_lock); 566 567 if (group->dc == 0) { 568 group->dc = dc; 569 } else { 570 nxge_hio_dc_t *current = group->dc; 571 do { 572 if (current->next == 0) { 573 current->next = dc; 574 break; 575 } 576 current = current->next; 577 } while (current); 578 } 579 580 NXGE_DC_SET(group->map, dc->channel); 581 group->count++; 582 583 nxge_grp_dc_map(group); 584 585 MUTEX_EXIT(&nxge->group_lock); 586 } 587 588 /* 589 * nxge_grp_dc_unlink 590 * 591 * Unlink a DMA channel fromits linked list (group). 592 * 593 * Arguments: 594 * nxge 595 * group The group (linked list) to unlink from 596 * dc The DMA channel to append 597 * 598 * Notes: 599 * 600 * Context: 601 * Any domain 602 */ 603 nxge_hio_dc_t * 604 nxge_grp_dc_unlink( 605 nxge_t *nxge, 606 nxge_grp_t *group, 607 int channel) 608 { 609 nxge_hio_dc_t *current, *previous; 610 611 MUTEX_ENTER(&nxge->group_lock); 612 613 if ((current = group->dc) == 0) { 614 MUTEX_EXIT(&nxge->group_lock); 615 return (0); 616 } 617 618 previous = 0; 619 do { 620 if (current->channel == channel) { 621 if (previous) 622 previous->next = current->next; 623 else 624 group->dc = current->next; 625 break; 626 } 627 previous = current; 628 current = current->next; 629 } while (current); 630 631 if (current == 0) { 632 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 633 "DC unlink: DC %d not found", channel)); 634 } else { 635 current->next = 0; 636 current->group = 0; 637 638 group->count--; 639 if (isLDOMs(nxge)) 640 NXGE_DC_RESET(group->map, channel); 641 } 642 643 nxge_grp_dc_map(group); 644 645 MUTEX_EXIT(&nxge->group_lock); 646 647 return (current); 648 } 649 650 /* 651 * nxge_grp_dc_map 652 * 653 * Map a linked list to an array of channel numbers. 654 * 655 * Arguments: 656 * nxge 657 * group The group to remap. 658 * 659 * Notes: 660 * It is expected that the caller will hold the correct mutex. 661 * 662 * Context: 663 * Service domain 664 */ 665 void 666 nxge_grp_dc_map( 667 nxge_grp_t *group) 668 { 669 nxge_channel_t *legend; 670 nxge_hio_dc_t *dc; 671 672 (void) memset(group->legend, 0, sizeof (group->legend)); 673 674 legend = group->legend; 675 dc = group->dc; 676 while (dc) { 677 *legend = dc->channel; 678 legend++; 679 dc = dc->next; 680 } 681 } 682 683 /* 684 * --------------------------------------------------------------------- 685 * These are HIO debugging functions. 686 * --------------------------------------------------------------------- 687 */ 688 689 /* 690 * nxge_delay 691 * 692 * Delay <seconds> number of seconds. 693 * 694 * Arguments: 695 * nxge 696 * group The group to append to 697 * dc The DMA channel to append 698 * 699 * Notes: 700 * This is a developer-only function. 701 * 702 * Context: 703 * Any domain 704 */ 705 void 706 nxge_delay( 707 int seconds) 708 { 709 delay(drv_usectohz(seconds * 1000000)); 710 } 711 712 static dmc_reg_name_t rx_names[] = { 713 { "RXDMA_CFIG1", 0 }, 714 { "RXDMA_CFIG2", 8 }, 715 { "RBR_CFIG_A", 0x10 }, 716 { "RBR_CFIG_B", 0x18 }, 717 { "RBR_KICK", 0x20 }, 718 { "RBR_STAT", 0x28 }, 719 { "RBR_HDH", 0x30 }, 720 { "RBR_HDL", 0x38 }, 721 { "RCRCFIG_A", 0x40 }, 722 { "RCRCFIG_B", 0x48 }, 723 { "RCRSTAT_A", 0x50 }, 724 { "RCRSTAT_B", 0x58 }, 725 { "RCRSTAT_C", 0x60 }, 726 { "RX_DMA_ENT_MSK", 0x68 }, 727 { "RX_DMA_CTL_STAT", 0x70 }, 728 { "RCR_FLSH", 0x78 }, 729 { "RXMISC", 0x90 }, 730 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 731 { 0, -1 } 732 }; 733 734 static dmc_reg_name_t tx_names[] = { 735 { "Tx_RNG_CFIG", 0 }, 736 { "Tx_RNG_HDL", 0x10 }, 737 { "Tx_RNG_KICK", 0x18 }, 738 { "Tx_ENT_MASK", 0x20 }, 739 { "Tx_CS", 0x28 }, 740 { "TxDMA_MBH", 0x30 }, 741 { "TxDMA_MBL", 0x38 }, 742 { "TxDMA_PRE_ST", 0x40 }, 743 { "Tx_RNG_ERR_LOGH", 0x48 }, 744 { "Tx_RNG_ERR_LOGL", 0x50 }, 745 { "TDMC_INTR_DBG", 0x60 }, 746 { "Tx_CS_DBG", 0x68 }, 747 { 0, -1 } 748 }; 749 750 /* 751 * nxge_xx2str 752 * 753 * Translate a register address into a string. 754 * 755 * Arguments: 756 * offset The address of the register to translate. 757 * 758 * Notes: 759 * These are developer-only function. 760 * 761 * Context: 762 * Any domain 763 */ 764 const char * 765 nxge_rx2str( 766 int offset) 767 { 768 dmc_reg_name_t *reg = &rx_names[0]; 769 770 offset &= DMA_CSR_MASK; 771 772 while (reg->name) { 773 if (offset == reg->offset) 774 return (reg->name); 775 reg++; 776 } 777 778 return (0); 779 } 780 781 const char * 782 nxge_tx2str( 783 int offset) 784 { 785 dmc_reg_name_t *reg = &tx_names[0]; 786 787 offset &= DMA_CSR_MASK; 788 789 while (reg->name) { 790 if (offset == reg->offset) 791 return (reg->name); 792 reg++; 793 } 794 795 return (0); 796 } 797 798 /* 799 * nxge_ddi_perror 800 * 801 * Map a DDI error number to a string. 802 * 803 * Arguments: 804 * ddi_error The DDI error number to map. 805 * 806 * Notes: 807 * 808 * Context: 809 * Any domain 810 */ 811 const char * 812 nxge_ddi_perror( 813 int ddi_error) 814 { 815 switch (ddi_error) { 816 case DDI_SUCCESS: 817 return ("DDI_SUCCESS"); 818 case DDI_FAILURE: 819 return ("DDI_FAILURE"); 820 case DDI_NOT_WELL_FORMED: 821 return ("DDI_NOT_WELL_FORMED"); 822 case DDI_EAGAIN: 823 return ("DDI_EAGAIN"); 824 case DDI_EINVAL: 825 return ("DDI_EINVAL"); 826 case DDI_ENOTSUP: 827 return ("DDI_ENOTSUP"); 828 case DDI_EPENDING: 829 return ("DDI_EPENDING"); 830 case DDI_ENOMEM: 831 return ("DDI_ENOMEM"); 832 case DDI_EBUSY: 833 return ("DDI_EBUSY"); 834 case DDI_ETRANSPORT: 835 return ("DDI_ETRANSPORT"); 836 case DDI_ECONTEXT: 837 return ("DDI_ECONTEXT"); 838 default: 839 return ("Unknown error"); 840 } 841 } 842 843 /* 844 * --------------------------------------------------------------------- 845 * These are Sun4v HIO function definitions 846 * --------------------------------------------------------------------- 847 */ 848 849 #if defined(sun4v) 850 851 /* 852 * Local prototypes 853 */ 854 static vr_handle_t nxge_hio_vr_share(nxge_t *); 855 856 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 857 static void nxge_hio_unshare(vr_handle_t); 858 859 static int nxge_hio_addres(vr_handle_t, mac_ring_type_t, int); 860 static void nxge_hio_remres(vr_handle_t, mac_ring_type_t, res_map_t); 861 862 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 863 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 864 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 865 mac_ring_type_t, int); 866 867 /* 868 * nxge_hio_init 869 * 870 * Initialize the HIO module of the NXGE driver. 871 * 872 * Arguments: 873 * nxge 874 * 875 * Notes: 876 * 877 * Context: 878 * Any domain 879 */ 880 int 881 nxge_hio_init( 882 nxge_t *nxge) 883 { 884 nxge_hio_data_t *nhd; 885 int i, region; 886 887 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 888 if (nhd == 0) { 889 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 890 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 891 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 892 } 893 894 if (nxge->environs == SOLARIS_DOMAIN) { 895 if (nxge->niu_hsvc_available == B_TRUE) { 896 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 897 if (niu_hsvc->hsvc_major == 1 && 898 niu_hsvc->hsvc_minor == 1) 899 nxge->environs = SOLARIS_SERVICE_DOMAIN; 900 NXGE_DEBUG_MSG((nxge, HIO_CTL, 901 "nxge_hio_init: hypervisor services " 902 "version %d.%d", 903 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 904 } 905 } 906 907 if (!isLDOMs(nxge)) { 908 nhd->hio.ldoms = B_FALSE; 909 return (NXGE_OK); 910 } 911 912 nhd->hio.ldoms = B_TRUE; 913 914 /* 915 * Fill in what we can. 916 */ 917 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 918 nhd->vr[region].region = region; 919 } 920 nhd->available.vrs = NXGE_VR_SR_MAX - 2; 921 922 /* 923 * Initialize share and ring group structures. 924 */ 925 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 926 nxge->rx_hio_groups[i].ghandle = NULL; 927 nxge->rx_hio_groups[i].nxgep = nxge; 928 nxge->rx_hio_groups[i].gindex = 0; 929 nxge->rx_hio_groups[i].sindex = 0; 930 } 931 932 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 933 nxge->shares[i].nxgep = nxge; 934 nxge->shares[i].index = 0; 935 nxge->shares[i].vrp = (void *)NULL; 936 nxge->shares[i].tmap = 0; 937 nxge->shares[i].rmap = 0; 938 nxge->shares[i].rxgroup = 0; 939 nxge->shares[i].active = B_FALSE; 940 } 941 942 /* Fill in the HV HIO function pointers. */ 943 nxge_hio_hv_init(nxge); 944 945 if (isLDOMservice(nxge)) { 946 NXGE_DEBUG_MSG((nxge, HIO_CTL, 947 "Hybrid IO-capable service domain")); 948 return (NXGE_OK); 949 } else { 950 /* 951 * isLDOMguest(nxge) == B_TRUE 952 */ 953 nx_vio_fp_t *vio; 954 nhd->type = NXGE_HIO_TYPE_GUEST; 955 956 vio = &nhd->hio.vio; 957 vio->__register = (vio_net_resource_reg_t) 958 modgetsymvalue("vio_net_resource_reg", 0); 959 vio->unregister = (vio_net_resource_unreg_t) 960 modgetsymvalue("vio_net_resource_unreg", 0); 961 962 if (vio->__register == 0 || vio->unregister == 0) { 963 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 964 return (NXGE_ERROR); 965 } 966 } 967 968 return (0); 969 } 970 971 static int 972 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 973 { 974 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 975 p_nxge_t nxge = rxgroup->nxgep; 976 int group = rxgroup->gindex; 977 int rv, sindex; 978 nxge_hio_vr_t *vr; /* The Virtualization Region */ 979 980 sindex = nxge->rx_hio_groups[group].sindex; 981 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 982 983 /* 984 * Program the mac address for the group/share. 985 */ 986 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 987 (ether_addr_t *)mac_addr)) != 0) { 988 return (rv); 989 } 990 991 return (0); 992 } 993 994 /* ARGSUSED */ 995 static int 996 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 997 { 998 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 999 p_nxge_t nxge = rxgroup->nxgep; 1000 int group = rxgroup->gindex; 1001 int sindex; 1002 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1003 1004 sindex = nxge->rx_hio_groups[group].sindex; 1005 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1006 1007 /* 1008 * Remove the mac address for the group/share. 1009 */ 1010 nxge_hio_hostinfo_uninit(nxge, vr); 1011 1012 return (0); 1013 } 1014 1015 /* ARGSUSED */ 1016 void 1017 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1018 mac_group_info_t *infop, mac_group_handle_t ghdl) 1019 { 1020 p_nxge_t nxgep = (p_nxge_t)arg; 1021 nxge_rx_ring_group_t *rxgroup; 1022 1023 switch (type) { 1024 case MAC_RING_TYPE_RX: 1025 rxgroup = &nxgep->rx_hio_groups[group]; 1026 rxgroup->gindex = group; 1027 1028 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1029 infop->mrg_start = NULL; 1030 infop->mrg_stop = NULL; 1031 infop->mrg_addmac = nxge_hio_add_mac; 1032 infop->mrg_remmac = nxge_hio_rem_mac; 1033 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1034 break; 1035 1036 case MAC_RING_TYPE_TX: 1037 break; 1038 } 1039 } 1040 1041 int 1042 nxge_hio_share_assign( 1043 nxge_t *nxge, 1044 uint64_t cookie, 1045 res_map_t *tmap, 1046 res_map_t *rmap, 1047 nxge_hio_vr_t *vr) 1048 { 1049 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1050 uint64_t slot, hv_rv; 1051 nxge_hio_dc_t *dc; 1052 nxhv_vr_fp_t *fp; 1053 int i; 1054 1055 /* 1056 * Ask the Hypervisor to set up the VR for us 1057 */ 1058 fp = &nhd->hio.vr; 1059 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1060 NXGE_ERROR_MSG((nxge, HIO_CTL, 1061 "nx_hio_share_assign: " 1062 "vr->assign() returned %d", hv_rv)); 1063 nxge_hio_unshare((vr_handle_t)vr); 1064 return (-EIO); 1065 } 1066 1067 /* 1068 * For each shared TDC, ask the HV to find us an empty slot. 1069 * ----------------------------------------------------- 1070 */ 1071 dc = vr->tx_group.dc; 1072 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1073 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1074 while (dc) { 1075 hv_rv = (*tx->assign) 1076 (vr->cookie, dc->channel, &slot); 1077 cmn_err(CE_CONT, "tx->assign(%d, %d)", dc->channel, dc->page); 1078 if (hv_rv != 0) { 1079 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1080 "nx_hio_share_assign: " 1081 "tx->assign(%x, %d) failed: %ld", 1082 vr->cookie, dc->channel, hv_rv)); 1083 return (-EIO); 1084 } 1085 1086 dc->cookie = vr->cookie; 1087 dc->page = (vp_channel_t)slot; 1088 1089 /* Inform the caller about the slot chosen. */ 1090 (*tmap) |= 1 << slot; 1091 1092 dc = dc->next; 1093 } 1094 } 1095 1096 /* 1097 * For each shared RDC, ask the HV to find us an empty slot. 1098 * ----------------------------------------------------- 1099 */ 1100 dc = vr->rx_group.dc; 1101 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1102 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1103 while (dc) { 1104 hv_rv = (*rx->assign) 1105 (vr->cookie, dc->channel, &slot); 1106 cmn_err(CE_CONT, "rx->assign(%d, %d)", dc->channel, dc->page); 1107 if (hv_rv != 0) { 1108 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1109 "nx_hio_share_assign: " 1110 "rx->assign(%x, %d) failed: %ld", 1111 vr->cookie, dc->channel, hv_rv)); 1112 return (-EIO); 1113 } 1114 1115 dc->cookie = vr->cookie; 1116 dc->page = (vp_channel_t)slot; 1117 1118 /* Inform the caller about the slot chosen. */ 1119 (*rmap) |= 1 << slot; 1120 1121 dc = dc->next; 1122 } 1123 } 1124 1125 cmn_err(CE_CONT, "tmap %lx, rmap %lx", *tmap, *rmap); 1126 return (0); 1127 } 1128 1129 int 1130 nxge_hio_share_unassign( 1131 nxge_hio_vr_t *vr) 1132 { 1133 nxge_t *nxge = (nxge_t *)vr->nxge; 1134 nxge_hio_data_t *nhd; 1135 nxge_hio_dc_t *dc; 1136 nxhv_vr_fp_t *fp; 1137 uint64_t hv_rv; 1138 1139 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1140 1141 dc = vr->tx_group.dc; 1142 while (dc) { 1143 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1144 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1145 if (hv_rv != 0) { 1146 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1147 "nx_hio_dc_unshare: " 1148 "tx->unassign(%x, %d) failed: %ld", 1149 vr->cookie, dc->page, hv_rv)); 1150 } 1151 dc = dc->next; 1152 } 1153 1154 dc = vr->rx_group.dc; 1155 while (dc) { 1156 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1157 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1158 if (hv_rv != 0) { 1159 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1160 "nx_hio_dc_unshare: " 1161 "rx->unassign(%x, %d) failed: %ld", 1162 vr->cookie, dc->page, hv_rv)); 1163 } 1164 dc = dc->next; 1165 } 1166 1167 fp = &nhd->hio.vr; 1168 if (fp->unassign) { 1169 hv_rv = (*fp->unassign)(vr->cookie); 1170 if (hv_rv != 0) { 1171 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1172 "vr->assign(%x) failed: %ld", 1173 vr->cookie, hv_rv)); 1174 } 1175 } 1176 1177 return (0); 1178 } 1179 1180 int 1181 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1182 mac_share_handle_t *shandle) 1183 { 1184 p_nxge_t nxge = (p_nxge_t)arg; 1185 nxge_rx_ring_group_t *rxgroup; 1186 nxge_share_handle_t *shp; 1187 1188 vr_handle_t shared; /* The VR being shared */ 1189 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1190 uint64_t rmap, tmap; 1191 int rv; 1192 1193 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1194 1195 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1196 1197 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1198 nhd->hio.rx.assign == 0) { 1199 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1200 return (EIO); 1201 } 1202 1203 /* 1204 * Get a VR. 1205 */ 1206 if ((shared = nxge_hio_vr_share(nxge)) == 0) 1207 return (EAGAIN); 1208 vr = (nxge_hio_vr_t *)shared; 1209 1210 /* 1211 * Get an RDC group for us to use. 1212 */ 1213 if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1214 nxge_hio_unshare(shared); 1215 return (EBUSY); 1216 } 1217 1218 /* 1219 * Add resources to the share. 1220 */ 1221 tmap = 0; 1222 rv = nxge_hio_addres(shared, MAC_RING_TYPE_TX, 1223 NXGE_HIO_SHARE_MAX_CHANNELS); 1224 if (rv != 0) { 1225 nxge_hio_unshare(shared); 1226 return (rv); 1227 } 1228 1229 rmap = 0; 1230 rv = nxge_hio_addres(shared, MAC_RING_TYPE_RX, 1231 NXGE_HIO_SHARE_MAX_CHANNELS); 1232 if (rv != 0) { 1233 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1234 nxge_hio_unshare(shared); 1235 return (rv); 1236 } 1237 1238 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1239 nxge_hio_remres(shared, MAC_RING_TYPE_RX, tmap); 1240 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1241 nxge_hio_unshare(shared); 1242 return (rv); 1243 } 1244 1245 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1246 rxgroup->gindex = vr->rdc_tbl; 1247 rxgroup->sindex = vr->region; 1248 1249 shp = &nxge->shares[vr->region]; 1250 shp->index = vr->region; 1251 shp->vrp = (void *)vr; 1252 shp->tmap = tmap; 1253 shp->rmap = rmap; 1254 shp->rxgroup = vr->rdc_tbl; 1255 shp->active = B_TRUE; 1256 1257 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1258 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1259 1260 *shandle = (mac_share_handle_t)shp; 1261 1262 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1263 return (0); 1264 } 1265 1266 void 1267 nxge_hio_share_free(mac_share_handle_t shandle) 1268 { 1269 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1270 1271 /* 1272 * First, unassign the VR (take it back), 1273 * so we can enable interrupts again. 1274 */ 1275 (void) nxge_hio_share_unassign(shp->vrp); 1276 1277 /* 1278 * Free Ring Resources for TX and RX 1279 */ 1280 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1281 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1282 1283 /* 1284 * Free VR resource. 1285 */ 1286 nxge_hio_unshare((vr_handle_t)shp->vrp); 1287 1288 /* 1289 * Clear internal handle state. 1290 */ 1291 shp->index = 0; 1292 shp->vrp = (void *)NULL; 1293 shp->tmap = 0; 1294 shp->rmap = 0; 1295 shp->rxgroup = 0; 1296 shp->active = B_FALSE; 1297 } 1298 1299 void 1300 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1301 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1302 { 1303 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1304 1305 switch (type) { 1306 case MAC_RING_TYPE_RX: 1307 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1308 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1309 *rmap = shp->rmap; 1310 *gnum = shp->rxgroup; 1311 break; 1312 1313 case MAC_RING_TYPE_TX: 1314 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1315 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1316 *rmap = shp->tmap; 1317 *gnum = 0; 1318 break; 1319 } 1320 } 1321 1322 /* 1323 * nxge_hio_vr_share 1324 * 1325 * Find an unused Virtualization Region (VR). 1326 * 1327 * Arguments: 1328 * nxge 1329 * 1330 * Notes: 1331 * 1332 * Context: 1333 * Service domain 1334 */ 1335 vr_handle_t 1336 nxge_hio_vr_share( 1337 nxge_t *nxge) 1338 { 1339 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1340 nxge_hio_vr_t *vr; 1341 1342 int first, limit, region; 1343 1344 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1345 1346 MUTEX_ENTER(&nhd->lock); 1347 1348 if (nhd->available.vrs == 0) { 1349 MUTEX_EXIT(&nhd->lock); 1350 return (0); 1351 } 1352 1353 /* Find an empty virtual region (VR). */ 1354 if (nxge->function_num == 0) { 1355 // FUNC0_VIR0 'belongs' to NIU port 0. 1356 first = FUNC0_VIR1; 1357 limit = FUNC2_VIR0; 1358 } else if (nxge->function_num == 1) { 1359 // FUNC2_VIR0 'belongs' to NIU port 1. 1360 first = FUNC2_VIR1; 1361 limit = FUNC_VIR_MAX; 1362 } else { 1363 cmn_err(CE_WARN, 1364 "Shares not supported on function(%d) at this time.\n", 1365 nxge->function_num); 1366 } 1367 1368 for (region = first; region < limit; region++) { 1369 if (nhd->vr[region].nxge == 0) 1370 break; 1371 } 1372 1373 if (region == limit) { 1374 MUTEX_EXIT(&nhd->lock); 1375 return (0); 1376 } 1377 1378 vr = &nhd->vr[region]; 1379 vr->nxge = (uintptr_t)nxge; 1380 vr->region = (uintptr_t)region; 1381 1382 nhd->available.vrs--; 1383 1384 MUTEX_EXIT(&nhd->lock); 1385 1386 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1387 1388 return ((vr_handle_t)vr); 1389 } 1390 1391 void 1392 nxge_hio_unshare( 1393 vr_handle_t shared) 1394 { 1395 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1396 nxge_t *nxge = (nxge_t *)vr->nxge; 1397 nxge_hio_data_t *nhd; 1398 1399 vr_region_t region; 1400 1401 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1402 1403 if (!nxge) { 1404 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1405 "vr->nxge is NULL")); 1406 return; 1407 } 1408 1409 /* 1410 * This function is no longer called, but I will keep it 1411 * here in case we want to revisit this topic in the future. 1412 * 1413 * nxge_hio_hostinfo_uninit(nxge, vr); 1414 */ 1415 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1416 1417 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1418 1419 MUTEX_ENTER(&nhd->lock); 1420 1421 region = vr->region; 1422 (void) memset(vr, 0, sizeof (*vr)); 1423 vr->region = region; 1424 1425 nhd->available.vrs++; 1426 1427 MUTEX_EXIT(&nhd->lock); 1428 1429 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1430 } 1431 1432 int 1433 nxge_hio_addres( 1434 vr_handle_t shared, 1435 mac_ring_type_t type, 1436 int count) 1437 { 1438 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1439 nxge_t *nxge = (nxge_t *)vr->nxge; 1440 int i; 1441 1442 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1443 1444 if (!nxge) 1445 return (EINVAL); 1446 1447 for (i = 0; i < count; i++) { 1448 int rv; 1449 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1450 if (i == 0) /* Couldn't get even one DC. */ 1451 return (-rv); 1452 else 1453 break; 1454 } 1455 } 1456 1457 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1458 1459 return (0); 1460 } 1461 1462 /* ARGSUSED */ 1463 void 1464 nxge_hio_remres( 1465 vr_handle_t shared, 1466 mac_ring_type_t type, 1467 res_map_t res_map) 1468 { 1469 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1470 nxge_t *nxge = (nxge_t *)vr->nxge; 1471 nxge_grp_t *group; 1472 1473 if (!nxge) { 1474 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 1475 "vr->nxge is NULL")); 1476 return; 1477 } 1478 1479 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1480 1481 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1482 while (group->dc) { 1483 nxge_hio_dc_t *dc = group->dc; 1484 NXGE_DC_RESET(res_map, dc->page); 1485 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1486 } 1487 1488 if (res_map) { 1489 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1490 "res_map %lx", res_map)); 1491 } 1492 1493 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1494 } 1495 1496 /* 1497 * nxge_hio_tdc_share 1498 * 1499 * Share an unused TDC channel. 1500 * 1501 * Arguments: 1502 * nxge 1503 * 1504 * Notes: 1505 * 1506 * A.7.3 Reconfigure Tx DMA channel 1507 * Disable TxDMA A.9.6.10 1508 * [Rebind TxDMA channel to Port A.9.6.7] 1509 * 1510 * We don't have to Rebind the TDC to the port - it always already bound. 1511 * 1512 * Soft Reset TxDMA A.9.6.2 1513 * 1514 * This procedure will be executed by nxge_init_txdma_channel() in the 1515 * guest domain: 1516 * 1517 * Re-initialize TxDMA A.9.6.8 1518 * Reconfigure TxDMA 1519 * Enable TxDMA A.9.6.9 1520 * 1521 * Context: 1522 * Service domain 1523 */ 1524 int 1525 nxge_hio_tdc_share( 1526 nxge_t *nxge, 1527 int channel) 1528 { 1529 nxge_grp_set_t *set = &nxge->tx_set; 1530 tx_ring_t *ring; 1531 1532 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1533 1534 /* 1535 * Wait until this channel is idle. 1536 */ 1537 ring = nxge->tx_rings->rings[channel]; 1538 MUTEX_ENTER(&ring->lock); 1539 switch (ring->tx_ring_state) { 1540 int count; 1541 case TX_RING_STATE_OFFLINE: 1542 MUTEX_EXIT(&ring->lock); 1543 break; 1544 case TX_RING_STATE_IDLE: 1545 ring->tx_ring_state = TX_RING_STATE_OFFLINE; 1546 MUTEX_EXIT(&ring->lock); 1547 break; 1548 case TX_RING_STATE_BUSY: 1549 /* 30 seconds */ 1550 for (count = 30 * 1000; count; count--) { 1551 MUTEX_EXIT(&ring->lock); 1552 drv_usecwait(1000); /* 1 millisecond */ 1553 MUTEX_ENTER(&ring->lock); 1554 if (ring->tx_ring_state == TX_RING_STATE_IDLE) { 1555 ring->tx_ring_state = TX_RING_STATE_OFFLINE; 1556 MUTEX_EXIT(&ring->lock); 1557 break; 1558 } 1559 } 1560 if (count == 0) { 1561 MUTEX_EXIT(&ring->lock); 1562 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1563 "Tx ring %d was always BUSY", channel)); 1564 return (-EIO); 1565 } 1566 break; 1567 } 1568 1569 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1570 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1571 "Failed to remove interrupt for TxDMA channel %d", 1572 channel)); 1573 return (NXGE_ERROR); 1574 } 1575 1576 /* Disable TxDMA A.9.6.10 */ 1577 (void) nxge_txdma_channel_disable(nxge, channel); 1578 1579 /* Soft Reset TxDMA A.9.6.2 */ 1580 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1581 1582 /* The SD is sharing this channel. */ 1583 NXGE_DC_SET(set->shared.map, channel); 1584 set->shared.count++; 1585 1586 /* 1587 * Initialize the DC-specific FZC control registers. 1588 * ----------------------------------------------------- 1589 */ 1590 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1591 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1592 "nx_hio_dc_share: FZC TDC failed: %d", channel)); 1593 return (-EIO); 1594 } 1595 1596 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1597 1598 return (0); 1599 } 1600 1601 /* 1602 * nxge_hio_rdc_share 1603 * 1604 * Share an unused RDC channel. 1605 * 1606 * Arguments: 1607 * nxge 1608 * 1609 * Notes: 1610 * 1611 * This is the latest version of the procedure to 1612 * Reconfigure an Rx DMA channel: 1613 * 1614 * A.6.3 Reconfigure Rx DMA channel 1615 * Stop RxMAC A.9.2.6 1616 * Drain IPP Port A.9.3.6 1617 * Stop and reset RxDMA A.9.5.3 1618 * 1619 * This procedure will be executed by nxge_init_rxdma_channel() in the 1620 * guest domain: 1621 * 1622 * Initialize RxDMA A.9.5.4 1623 * Reconfigure RxDMA 1624 * Enable RxDMA A.9.5.5 1625 * 1626 * We will do this here, since the RDC is a canalis non grata: 1627 * Enable RxMAC A.9.2.10 1628 * 1629 * Context: 1630 * Service domain 1631 */ 1632 int 1633 nxge_hio_rdc_share( 1634 nxge_t *nxge, 1635 nxge_hio_vr_t *vr, 1636 int channel) 1637 { 1638 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1639 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1640 nxge_grp_set_t *set = &nxge->rx_set; 1641 nxge_rdc_grp_t *rdc_grp; 1642 1643 int current, last; 1644 1645 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1646 1647 /* Disable interrupts. */ 1648 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1649 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 1650 "Failed to remove interrupt for RxDMA channel %d", 1651 channel)); 1652 return (NXGE_ERROR); 1653 } 1654 1655 /* Stop RxMAC = A.9.2.6 */ 1656 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1657 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1658 "Failed to disable RxMAC")); 1659 } 1660 1661 /* Drain IPP Port = A.9.3.6 */ 1662 (void) nxge_ipp_drain(nxge); 1663 1664 /* Stop and reset RxDMA = A.9.5.3 */ 1665 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1666 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1667 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1668 "Failed to disable RxDMA channel %d", channel)); 1669 } 1670 1671 // Assert RST: RXDMA_CFIG1[30] = 1 1672 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1673 1674 /* The SD is sharing this channel. */ 1675 NXGE_DC_SET(set->shared.map, channel); 1676 set->shared.count++; 1677 1678 /* 1679 * We have to reconfigure the RDC table(s) 1680 * to which this channel belongs. 1681 */ 1682 current = hardware->def_mac_rxdma_grpid; 1683 last = current + hardware->max_rdc_grpids; 1684 for (; current < last; current++) { 1685 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1686 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1687 rdc_grp->map = set->owned.map; 1688 rdc_grp->max_rdcs--; 1689 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1690 } 1691 } 1692 1693 /* 1694 * The guest domain will reconfigure the RDC later. 1695 * 1696 * But in the meantime, we must re-enable the Rx MAC so 1697 * that we can start receiving packets again on the 1698 * remaining RDCs: 1699 * 1700 * Enable RxMAC = A.9.2.10 1701 */ 1702 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1703 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1704 "nx_hio_rdc_share: Rx MAC still disabled")); 1705 } 1706 1707 /* 1708 * Initialize the DC-specific FZC control registers. 1709 * ----------------------------------------------------- 1710 */ 1711 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1712 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1713 "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 1714 return (-EIO); 1715 } 1716 1717 /* 1718 * We have to initialize the guest's RDC table, too. 1719 * ----------------------------------------------------- 1720 */ 1721 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1722 if (rdc_grp->max_rdcs == 0) { 1723 rdc_grp->start_rdc = (uint8_t)channel; 1724 rdc_grp->def_rdc = (uint8_t)channel; 1725 rdc_grp->max_rdcs = 1; 1726 } else { 1727 rdc_grp->max_rdcs++; 1728 } 1729 NXGE_DC_SET(rdc_grp->map, channel); 1730 1731 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1732 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1733 "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1734 return (-EIO); 1735 } 1736 1737 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1738 1739 return (0); 1740 } 1741 1742 /* 1743 * nxge_hio_dc_share 1744 * 1745 * Share a DMA channel with a guest domain. 1746 * 1747 * Arguments: 1748 * nxge 1749 * vr The VR that <channel> will belong to. 1750 * type Tx or Rx. 1751 * res_map The resource map used by the caller, which we will 1752 * update if successful. 1753 * 1754 * Notes: 1755 * 1756 * Context: 1757 * Service domain 1758 */ 1759 int 1760 nxge_hio_dc_share( 1761 nxge_t *nxge, 1762 nxge_hio_vr_t *vr, 1763 mac_ring_type_t type) 1764 { 1765 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1766 nxge_hw_pt_cfg_t *hardware; 1767 nxge_hio_dc_t *dc; 1768 int channel, limit; 1769 1770 nxge_grp_set_t *set; 1771 nxge_grp_t *group; 1772 1773 int slot; 1774 1775 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1776 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1777 1778 /* 1779 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1780 * Not only that, but the HV has statically assigned the 1781 * channels like so: 1782 * VR0: RDC0 & RDC1 1783 * VR1: RDC2 & RDC3, etc. 1784 * The TDCs are assigned in exactly the same way. 1785 * 1786 * So, for example 1787 * hardware->start_rdc + vr->region * 2; 1788 * VR1: hardware->start_rdc + 1 * 2; 1789 * VR3: hardware->start_rdc + 3 * 2; 1790 * If start_rdc is 0, we end up with 2 or 6. 1791 * If start_rdc is 8, we end up with 10 or 14. 1792 */ 1793 1794 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1795 hardware = &nxge->pt_config.hw_config; 1796 1797 // This code is still NIU-specific (assuming only 2 ports) 1798 channel = hardware->start_rdc + (vr->region % 4) * 2; 1799 limit = channel + 2; 1800 1801 MUTEX_ENTER(&nhd->lock); 1802 for (; channel < limit; channel++) { 1803 if ((1 << channel) & set->owned.map) { 1804 break; 1805 } 1806 } 1807 1808 if (channel == limit) { 1809 MUTEX_EXIT(&nhd->lock); 1810 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1811 "nx_hio_dc_share: there are no channels to share")); 1812 return (-EIO); 1813 } 1814 1815 MUTEX_EXIT(&nhd->lock); 1816 1817 /* -------------------------------------------------- */ 1818 slot = (type == MAC_RING_TYPE_TX) ? 1819 nxge_hio_tdc_share(nxge, channel) : 1820 nxge_hio_rdc_share(nxge, vr, channel); 1821 1822 if (slot < 0) { 1823 if (type == MAC_RING_TYPE_RX) { 1824 nxge_hio_rdc_unshare(nxge, channel); 1825 } else { 1826 nxge_hio_tdc_unshare(nxge, channel); 1827 } 1828 return (slot); 1829 } 1830 1831 MUTEX_ENTER(&nhd->lock); 1832 1833 /* 1834 * Tag this channel. 1835 * -------------------------------------------------- 1836 */ 1837 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1838 1839 dc->vr = vr; 1840 dc->channel = (nxge_channel_t)channel; 1841 1842 MUTEX_EXIT(&nhd->lock); 1843 1844 /* 1845 * vr->[t|r]x_group is used by the service domain to 1846 * keep track of its shared DMA channels. 1847 */ 1848 MUTEX_ENTER(&nxge->group_lock); 1849 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1850 1851 dc->group = (vr_handle_t)group; 1852 1853 /* Initialize <group>, if necessary */ 1854 if (group->count == 0) { 1855 group->nxge = nxge; 1856 group->type = (type == MAC_RING_TYPE_TX) ? 1857 VP_BOUND_TX : VP_BOUND_RX; 1858 group->sequence = nhd->sequence++; 1859 group->active = B_TRUE; 1860 } 1861 1862 MUTEX_EXIT(&nxge->group_lock); 1863 1864 NXGE_ERROR_MSG((nxge, HIO_CTL, 1865 "DC share: %cDC %d was assigned to slot %d", 1866 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1867 1868 nxge_grp_dc_append(nxge, group, dc); 1869 1870 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1871 1872 return (0); 1873 } 1874 1875 /* 1876 * nxge_hio_tdc_unshare 1877 * 1878 * Unshare a TDC. 1879 * 1880 * Arguments: 1881 * nxge 1882 * channel The channel to unshare (add again). 1883 * 1884 * Notes: 1885 * 1886 * Context: 1887 * Service domain 1888 */ 1889 void 1890 nxge_hio_tdc_unshare( 1891 nxge_t *nxge, 1892 int channel) 1893 { 1894 nxge_grp_set_t *set = &nxge->tx_set; 1895 vr_handle_t handle = (vr_handle_t)set->group[0]; 1896 1897 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1898 1899 NXGE_DC_RESET(set->shared.map, channel); 1900 set->shared.count--; 1901 1902 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_TX, channel))) { 1903 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1904 "Failed to initialize TxDMA channel %d", channel)); 1905 return; 1906 } 1907 1908 /* Re-add this interrupt. */ 1909 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1910 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1911 "Failed to add interrupt for TxDMA channel %d", channel)); 1912 } 1913 1914 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1915 } 1916 1917 /* 1918 * nxge_hio_rdc_unshare 1919 * 1920 * Unshare an RDC: add it to the SD's RDC groups (tables). 1921 * 1922 * Arguments: 1923 * nxge 1924 * channel The channel to unshare (add again). 1925 * 1926 * Notes: 1927 * 1928 * Context: 1929 * Service domain 1930 */ 1931 void 1932 nxge_hio_rdc_unshare( 1933 nxge_t *nxge, 1934 int channel) 1935 { 1936 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1937 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1938 1939 nxge_grp_set_t *set = &nxge->rx_set; 1940 vr_handle_t handle = (vr_handle_t)set->group[0]; 1941 int current, last; 1942 1943 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 1944 1945 /* Stop RxMAC = A.9.2.6 */ 1946 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1947 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1948 "Failed to disable RxMAC")); 1949 } 1950 1951 /* Drain IPP Port = A.9.3.6 */ 1952 (void) nxge_ipp_drain(nxge); 1953 1954 /* Stop and reset RxDMA = A.9.5.3 */ 1955 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1956 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1957 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1958 "Failed to disable RxDMA channel %d", channel)); 1959 } 1960 1961 NXGE_DC_RESET(set->shared.map, channel); 1962 set->shared.count--; 1963 1964 /* 1965 * Assert RST: RXDMA_CFIG1[30] = 1 1966 * 1967 * Initialize RxDMA A.9.5.4 1968 * Reconfigure RxDMA 1969 * Enable RxDMA A.9.5.5 1970 */ 1971 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_RX, channel))) { 1972 /* Be sure to re-enable the RX MAC. */ 1973 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1974 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1975 "nx_hio_rdc_share: Rx MAC still disabled")); 1976 } 1977 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1978 "Failed to initialize RxDMA channel %d", channel)); 1979 return; 1980 } 1981 1982 /* 1983 * We have to reconfigure the RDC table(s) 1984 * to which this channel once again belongs. 1985 */ 1986 current = hardware->def_mac_rxdma_grpid; 1987 last = current + hardware->max_rdc_grpids; 1988 for (; current < last; current++) { 1989 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1990 nxge_rdc_grp_t *group; 1991 group = &nxge->pt_config.rdc_grps[current]; 1992 group->map = set->owned.map; 1993 group->max_rdcs++; 1994 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1995 } 1996 } 1997 1998 /* 1999 * Enable RxMAC = A.9.2.10 2000 */ 2001 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2002 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2003 "nx_hio_rdc_share: Rx MAC still disabled")); 2004 return; 2005 } 2006 2007 /* Re-add this interrupt. */ 2008 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2009 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2010 "nx_hio_rdc_unshare: Failed to add interrupt for " 2011 "RxDMA CHANNEL %d", channel)); 2012 } 2013 2014 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2015 } 2016 2017 /* 2018 * nxge_hio_dc_unshare 2019 * 2020 * Unshare (reuse) a DMA channel. 2021 * 2022 * Arguments: 2023 * nxge 2024 * vr The VR that <channel> belongs to. 2025 * type Tx or Rx. 2026 * channel The DMA channel to reuse. 2027 * 2028 * Notes: 2029 * 2030 * Context: 2031 * Service domain 2032 */ 2033 void 2034 nxge_hio_dc_unshare( 2035 nxge_t *nxge, 2036 nxge_hio_vr_t *vr, 2037 mac_ring_type_t type, 2038 int channel) 2039 { 2040 nxge_grp_t *group; 2041 nxge_hio_dc_t *dc; 2042 2043 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2044 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2045 2046 /* Unlink the channel from its group. */ 2047 /* -------------------------------------------------- */ 2048 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2049 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2050 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2051 "nx_hio_dc_unshare(%d) failed", channel)); 2052 return; 2053 } 2054 2055 dc->vr = 0; 2056 dc->cookie = 0; 2057 2058 if (type == MAC_RING_TYPE_RX) { 2059 nxge_hio_rdc_unshare(nxge, channel); 2060 } else { 2061 nxge_hio_tdc_unshare(nxge, channel); 2062 } 2063 2064 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2065 } 2066 2067 #endif /* if defined(sun4v) */ 2068