1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * nxge_hio.c 29 * 30 * This file manages the virtualization resources for Neptune 31 * devices. That is, it implements a hybrid I/O (HIO) approach in the 32 * Solaris kernel, whereby a guest domain on an LDOMs server may 33 * request & use hardware resources from the service domain. 34 * 35 */ 36 37 #include <sys/nxge/nxge_impl.h> 38 #include <sys/nxge/nxge_fzc.h> 39 #include <sys/nxge/nxge_rxdma.h> 40 #include <sys/nxge/nxge_txdma.h> 41 #include <sys/nxge/nxge_hio.h> 42 43 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 44 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 45 46 /* 47 * External prototypes 48 */ 49 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 50 51 /* The following function may be found in nxge_main.c */ 52 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 53 54 /* The following function may be found in nxge_[t|r]xdma.c */ 55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 57 58 /* 59 * Local prototypes 60 */ 61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 63 static void nxge_grp_dc_map(nxge_grp_t *group); 64 65 /* 66 * These functions are used by both service & guest domains to 67 * decide whether they're running in an LDOMs/XEN environment 68 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 69 */ 70 71 /* 72 * nxge_get_environs 73 * 74 * Figure out if we are in a guest domain or not. 75 * 76 * Arguments: 77 * nxge 78 * 79 * Notes: 80 * 81 * Context: 82 * Any domain 83 */ 84 void 85 nxge_get_environs( 86 nxge_t *nxge) 87 { 88 char *string; 89 90 /* 91 * In the beginning, assume that we are running sans LDOMs/XEN. 92 */ 93 nxge->environs = SOLARIS_DOMAIN; 94 95 /* 96 * Are we a hybrid I/O (HIO) guest domain driver? 97 */ 98 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 99 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 100 "niutype", &string)) == DDI_PROP_SUCCESS) { 101 if (strcmp(string, "n2niu") == 0) { 102 nxge->environs = SOLARIS_GUEST_DOMAIN; 103 /* So we can allocate properly-aligned memory. */ 104 nxge->niu_type = N2_NIU; 105 NXGE_DEBUG_MSG((nxge, HIO_CTL, 106 "Hybrid IO-capable guest domain")); 107 } 108 ddi_prop_free(string); 109 } 110 } 111 112 #if !defined(sun4v) 113 114 /* 115 * nxge_hio_init 116 * 117 * Initialize the HIO module of the NXGE driver. 118 * 119 * Arguments: 120 * nxge 121 * 122 * Notes: 123 * This is the non-hybrid I/O version of this function. 124 * 125 * Context: 126 * Any domain 127 */ 128 int 129 nxge_hio_init(nxge_t *nxge) 130 { 131 nxge_hio_data_t *nhd; 132 133 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 134 if (nhd == 0) { 135 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 136 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 137 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 138 } 139 140 nhd->hio.ldoms = B_FALSE; 141 142 return (NXGE_OK); 143 } 144 145 #endif 146 147 void 148 nxge_hio_uninit(nxge_t *nxge) 149 { 150 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 151 152 ASSERT(nxge->nxge_hw_p->ndevs == 0); 153 154 if (nhd != NULL) { 155 MUTEX_DESTROY(&nhd->lock); 156 KMEM_FREE(nhd, sizeof (*nhd)); 157 nxge->nxge_hw_p->hio = 0; 158 } 159 } 160 161 /* 162 * nxge_dci_map 163 * 164 * Map a DMA channel index to a channel number. 165 * 166 * Arguments: 167 * instance The instance number of the driver. 168 * type The type of channel this is: Tx or Rx. 169 * index The index to convert to a channel number 170 * 171 * Notes: 172 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 173 * 174 * Context: 175 * Any domain 176 */ 177 int 178 nxge_dci_map( 179 nxge_t *nxge, 180 vpc_type_t type, 181 int index) 182 { 183 nxge_grp_set_t *set; 184 int dc; 185 186 switch (type) { 187 case VP_BOUND_TX: 188 set = &nxge->tx_set; 189 break; 190 case VP_BOUND_RX: 191 set = &nxge->rx_set; 192 break; 193 } 194 195 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 196 if ((1 << dc) & set->owned.map) { 197 if (index == 0) 198 return (dc); 199 else 200 index--; 201 } 202 } 203 204 return (-1); 205 } 206 207 /* 208 * --------------------------------------------------------------------- 209 * These are the general-purpose DMA channel group functions. That is, 210 * these functions are used to manage groups of TDCs or RDCs in an HIO 211 * environment. 212 * 213 * But is also expected that in the future they will be able to manage 214 * Crossbow groups. 215 * --------------------------------------------------------------------- 216 */ 217 218 /* 219 * nxge_grp_add 220 * 221 * Add a group to an instance of NXGE. 222 * 223 * Arguments: 224 * nxge 225 * type Tx or Rx 226 * 227 * Notes: 228 * 229 * Context: 230 * Any domain 231 */ 232 nxge_grp_t * 233 nxge_grp_add( 234 nxge_t *nxge, 235 nxge_grp_type_t type) 236 { 237 nxge_grp_set_t *set; 238 nxge_grp_t *group; 239 int i; 240 241 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 242 group->nxge = nxge; 243 244 MUTEX_ENTER(&nxge->group_lock); 245 switch (type) { 246 case NXGE_TRANSMIT_GROUP: 247 case EXT_TRANSMIT_GROUP: 248 set = &nxge->tx_set; 249 break; 250 default: 251 set = &nxge->rx_set; 252 break; 253 } 254 255 group->type = type; 256 group->active = B_TRUE; 257 group->sequence = set->sequence++; 258 259 /* Find an empty slot for this logical group. */ 260 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 261 if (set->group[i] == 0) { 262 group->index = i; 263 set->group[i] = group; 264 NXGE_DC_SET(set->lg.map, i); 265 set->lg.count++; 266 break; 267 } 268 } 269 MUTEX_EXIT(&nxge->group_lock); 270 271 NXGE_DEBUG_MSG((nxge, HIO_CTL, 272 "nxge_grp_add: %cgroup = %d.%d", 273 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 274 nxge->mac.portnum, group->sequence)); 275 276 return (group); 277 } 278 279 void 280 nxge_grp_remove( 281 nxge_t *nxge, 282 nxge_grp_t *group) /* The group to remove. */ 283 { 284 nxge_grp_set_t *set; 285 vpc_type_t type; 286 287 MUTEX_ENTER(&nxge->group_lock); 288 switch (group->type) { 289 case NXGE_TRANSMIT_GROUP: 290 case EXT_TRANSMIT_GROUP: 291 set = &nxge->tx_set; 292 break; 293 default: 294 set = &nxge->rx_set; 295 break; 296 } 297 298 if (set->group[group->index] != group) { 299 MUTEX_EXIT(&nxge->group_lock); 300 return; 301 } 302 303 set->group[group->index] = 0; 304 NXGE_DC_RESET(set->lg.map, group->index); 305 set->lg.count--; 306 307 /* While inside the mutex, deactivate <group>. */ 308 group->active = B_FALSE; 309 310 MUTEX_EXIT(&nxge->group_lock); 311 312 NXGE_DEBUG_MSG((nxge, HIO_CTL, 313 "nxge_grp_remove(%c.%d.%d) called", 314 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 315 nxge->mac.portnum, group->sequence)); 316 317 /* Now, remove any DCs which are still active. */ 318 switch (group->type) { 319 default: 320 type = VP_BOUND_TX; 321 break; 322 case NXGE_RECEIVE_GROUP: 323 case EXT_RECEIVE_GROUP: 324 type = VP_BOUND_RX; 325 } 326 327 while (group->dc) { 328 nxge_grp_dc_remove(nxge, type, group->dc->channel); 329 } 330 331 KMEM_FREE(group, sizeof (*group)); 332 } 333 334 /* 335 * nx_hio_dc_add 336 * 337 * Add a DMA channel to a VR/Group. 338 * 339 * Arguments: 340 * nxge 341 * channel The channel to add. 342 * Notes: 343 * 344 * Context: 345 * Any domain 346 */ 347 /* ARGSUSED */ 348 int 349 nxge_grp_dc_add( 350 nxge_t *nxge, 351 nxge_grp_t *group, /* The group to add <channel> to. */ 352 vpc_type_t type, /* Rx or Tx */ 353 int channel) /* A physical/logical channel number */ 354 { 355 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 356 nxge_hio_dc_t *dc; 357 nxge_grp_set_t *set; 358 nxge_status_t status = NXGE_OK; 359 360 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 361 362 if (group == NULL) 363 return (0); 364 365 switch (type) { 366 default: 367 set = &nxge->tx_set; 368 if (channel > NXGE_MAX_TDCS) { 369 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 370 "nxge_grp_dc_add: TDC = %d", channel)); 371 return (NXGE_ERROR); 372 } 373 break; 374 case VP_BOUND_RX: 375 set = &nxge->rx_set; 376 if (channel > NXGE_MAX_RDCS) { 377 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 378 "nxge_grp_dc_add: RDC = %d", channel)); 379 return (NXGE_ERROR); 380 } 381 break; 382 } 383 384 NXGE_DEBUG_MSG((nxge, HIO_CTL, 385 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 386 type == VP_BOUND_TX ? 't' : 'r', 387 nxge->mac.portnum, group->sequence, group->count, channel)); 388 389 MUTEX_ENTER(&nxge->group_lock); 390 if (group->active != B_TRUE) { 391 /* We may be in the process of removing this group. */ 392 MUTEX_EXIT(&nxge->group_lock); 393 return (NXGE_ERROR); 394 } 395 MUTEX_EXIT(&nxge->group_lock); 396 397 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 398 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 399 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 400 return (NXGE_ERROR); 401 } 402 403 MUTEX_ENTER(&nhd->lock); 404 405 if (dc->group) { 406 MUTEX_EXIT(&nhd->lock); 407 /* This channel is already in use! */ 408 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 409 "nxge_grp_dc_add(%d): channel already in group", channel)); 410 return (NXGE_ERROR); 411 } 412 413 dc->next = 0; 414 dc->page = channel; 415 dc->channel = (nxge_channel_t)channel; 416 417 dc->type = type; 418 if (type == VP_BOUND_RX) { 419 dc->init = nxge_init_rxdma_channel; 420 dc->uninit = nxge_uninit_rxdma_channel; 421 } else { 422 dc->init = nxge_init_txdma_channel; 423 dc->uninit = nxge_uninit_txdma_channel; 424 } 425 426 dc->group = group; 427 428 if (isLDOMguest(nxge)) 429 (void) nxge_hio_ldsv_add(nxge, dc); 430 431 NXGE_DC_SET(set->owned.map, channel); 432 set->owned.count++; 433 434 MUTEX_EXIT(&nhd->lock); 435 436 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 437 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 438 "nxge_grp_dc_add(%d): channel init failed", channel)); 439 return (NXGE_ERROR); 440 } 441 442 nxge_grp_dc_append(nxge, group, dc); 443 444 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 445 446 return ((int)status); 447 } 448 449 void 450 nxge_grp_dc_remove( 451 nxge_t *nxge, 452 vpc_type_t type, 453 int channel) 454 { 455 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 456 nxge_hio_dc_t *dc; 457 nxge_grp_set_t *set; 458 nxge_grp_t *group; 459 460 dc_uninit_t uninit; 461 462 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 463 464 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 465 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 466 "nx_hio_dc_remove: find(%d) failed", channel)); 467 return; 468 } 469 group = (nxge_grp_t *)dc->group; 470 471 if (isLDOMguest(nxge)) { 472 (void) nxge_hio_intr_remove(nxge, type, channel); 473 } 474 475 NXGE_DEBUG_MSG((nxge, HIO_CTL, 476 "DC remove: group = %d.%d.%d, %cdc %d", 477 nxge->mac.portnum, group->sequence, group->count, 478 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 479 480 MUTEX_ENTER(&nhd->lock); 481 482 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 483 if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 484 NXGE_DC_RESET(group->map, channel); 485 } 486 487 /* Remove the DC from its group. */ 488 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 489 MUTEX_EXIT(&nhd->lock); 490 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 491 "nx_hio_dc_remove(%d) failed", channel)); 492 return; 493 } 494 495 uninit = dc->uninit; 496 channel = dc->channel; 497 498 NXGE_DC_RESET(set->owned.map, channel); 499 set->owned.count--; 500 501 (void) memset(dc, 0, sizeof (*dc)); 502 503 MUTEX_EXIT(&nhd->lock); 504 505 (*uninit)(nxge, channel); 506 507 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 508 } 509 510 nxge_hio_dc_t * 511 nxge_grp_dc_find( 512 nxge_t *nxge, 513 vpc_type_t type, /* Rx or Tx */ 514 int channel) 515 { 516 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 517 nxge_hio_dc_t *current; 518 519 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 520 521 if (!isLDOMguest(nxge)) { 522 return (¤t[channel]); 523 } else { 524 /* We're in a guest domain. */ 525 int i, limit = (type == VP_BOUND_TX) ? 526 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 527 528 MUTEX_ENTER(&nhd->lock); 529 for (i = 0; i < limit; i++, current++) { 530 if (current->channel == channel) { 531 if (current->vr && current->vr->nxge == 532 (uintptr_t)nxge) { 533 MUTEX_EXIT(&nhd->lock); 534 return (current); 535 } 536 } 537 } 538 MUTEX_EXIT(&nhd->lock); 539 } 540 541 return (0); 542 } 543 544 /* 545 * nxge_grp_dc_append 546 * 547 * Append a DMA channel to a group. 548 * 549 * Arguments: 550 * nxge 551 * group The group to append to 552 * dc The DMA channel to append 553 * 554 * Notes: 555 * 556 * Context: 557 * Any domain 558 */ 559 static 560 void 561 nxge_grp_dc_append( 562 nxge_t *nxge, 563 nxge_grp_t *group, 564 nxge_hio_dc_t *dc) 565 { 566 MUTEX_ENTER(&nxge->group_lock); 567 568 if (group->dc == 0) { 569 group->dc = dc; 570 } else { 571 nxge_hio_dc_t *current = group->dc; 572 do { 573 if (current->next == 0) { 574 current->next = dc; 575 break; 576 } 577 current = current->next; 578 } while (current); 579 } 580 581 NXGE_DC_SET(group->map, dc->channel); 582 583 nxge_grp_dc_map(group); 584 group->count++; 585 586 MUTEX_EXIT(&nxge->group_lock); 587 } 588 589 /* 590 * nxge_grp_dc_unlink 591 * 592 * Unlink a DMA channel fromits linked list (group). 593 * 594 * Arguments: 595 * nxge 596 * group The group (linked list) to unlink from 597 * dc The DMA channel to append 598 * 599 * Notes: 600 * 601 * Context: 602 * Any domain 603 */ 604 nxge_hio_dc_t * 605 nxge_grp_dc_unlink( 606 nxge_t *nxge, 607 nxge_grp_t *group, 608 int channel) 609 { 610 nxge_hio_dc_t *current, *previous; 611 612 MUTEX_ENTER(&nxge->group_lock); 613 614 if ((current = group->dc) == 0) { 615 MUTEX_EXIT(&nxge->group_lock); 616 return (0); 617 } 618 619 previous = 0; 620 do { 621 if (current->channel == channel) { 622 if (previous) 623 previous->next = current->next; 624 else 625 group->dc = current->next; 626 break; 627 } 628 previous = current; 629 current = current->next; 630 } while (current); 631 632 if (current == 0) { 633 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 634 "DC unlink: DC %d not found", channel)); 635 } else { 636 current->next = 0; 637 current->group = 0; 638 639 group->count--; 640 } 641 642 nxge_grp_dc_map(group); 643 644 MUTEX_EXIT(&nxge->group_lock); 645 646 return (current); 647 } 648 649 /* 650 * nxge_grp_dc_map 651 * 652 * Map a linked list to an array of channel numbers. 653 * 654 * Arguments: 655 * nxge 656 * group The group to remap. 657 * 658 * Notes: 659 * It is expected that the caller will hold the correct mutex. 660 * 661 * Context: 662 * Service domain 663 */ 664 void 665 nxge_grp_dc_map( 666 nxge_grp_t *group) 667 { 668 nxge_channel_t *legend; 669 nxge_hio_dc_t *dc; 670 671 (void) memset(group->legend, 0, sizeof (group->legend)); 672 673 legend = group->legend; 674 dc = group->dc; 675 while (dc) { 676 *legend = dc->channel; 677 legend++; 678 dc = dc->next; 679 } 680 } 681 682 /* 683 * --------------------------------------------------------------------- 684 * These are HIO debugging functions. 685 * --------------------------------------------------------------------- 686 */ 687 688 /* 689 * nxge_delay 690 * 691 * Delay <seconds> number of seconds. 692 * 693 * Arguments: 694 * nxge 695 * group The group to append to 696 * dc The DMA channel to append 697 * 698 * Notes: 699 * This is a developer-only function. 700 * 701 * Context: 702 * Any domain 703 */ 704 void 705 nxge_delay( 706 int seconds) 707 { 708 delay(drv_usectohz(seconds * 1000000)); 709 } 710 711 static dmc_reg_name_t rx_names[] = { 712 { "RXDMA_CFIG1", 0 }, 713 { "RXDMA_CFIG2", 8 }, 714 { "RBR_CFIG_A", 0x10 }, 715 { "RBR_CFIG_B", 0x18 }, 716 { "RBR_KICK", 0x20 }, 717 { "RBR_STAT", 0x28 }, 718 { "RBR_HDH", 0x30 }, 719 { "RBR_HDL", 0x38 }, 720 { "RCRCFIG_A", 0x40 }, 721 { "RCRCFIG_B", 0x48 }, 722 { "RCRSTAT_A", 0x50 }, 723 { "RCRSTAT_B", 0x58 }, 724 { "RCRSTAT_C", 0x60 }, 725 { "RX_DMA_ENT_MSK", 0x68 }, 726 { "RX_DMA_CTL_STAT", 0x70 }, 727 { "RCR_FLSH", 0x78 }, 728 { "RXMISC", 0x90 }, 729 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 730 { 0, -1 } 731 }; 732 733 static dmc_reg_name_t tx_names[] = { 734 { "Tx_RNG_CFIG", 0 }, 735 { "Tx_RNG_HDL", 0x10 }, 736 { "Tx_RNG_KICK", 0x18 }, 737 { "Tx_ENT_MASK", 0x20 }, 738 { "Tx_CS", 0x28 }, 739 { "TxDMA_MBH", 0x30 }, 740 { "TxDMA_MBL", 0x38 }, 741 { "TxDMA_PRE_ST", 0x40 }, 742 { "Tx_RNG_ERR_LOGH", 0x48 }, 743 { "Tx_RNG_ERR_LOGL", 0x50 }, 744 { "TDMC_INTR_DBG", 0x60 }, 745 { "Tx_CS_DBG", 0x68 }, 746 { 0, -1 } 747 }; 748 749 /* 750 * nxge_xx2str 751 * 752 * Translate a register address into a string. 753 * 754 * Arguments: 755 * offset The address of the register to translate. 756 * 757 * Notes: 758 * These are developer-only function. 759 * 760 * Context: 761 * Any domain 762 */ 763 const char * 764 nxge_rx2str( 765 int offset) 766 { 767 dmc_reg_name_t *reg = &rx_names[0]; 768 769 offset &= DMA_CSR_MASK; 770 771 while (reg->name) { 772 if (offset == reg->offset) 773 return (reg->name); 774 reg++; 775 } 776 777 return (0); 778 } 779 780 const char * 781 nxge_tx2str( 782 int offset) 783 { 784 dmc_reg_name_t *reg = &tx_names[0]; 785 786 offset &= DMA_CSR_MASK; 787 788 while (reg->name) { 789 if (offset == reg->offset) 790 return (reg->name); 791 reg++; 792 } 793 794 return (0); 795 } 796 797 /* 798 * nxge_ddi_perror 799 * 800 * Map a DDI error number to a string. 801 * 802 * Arguments: 803 * ddi_error The DDI error number to map. 804 * 805 * Notes: 806 * 807 * Context: 808 * Any domain 809 */ 810 const char * 811 nxge_ddi_perror( 812 int ddi_error) 813 { 814 switch (ddi_error) { 815 case DDI_SUCCESS: 816 return ("DDI_SUCCESS"); 817 case DDI_FAILURE: 818 return ("DDI_FAILURE"); 819 case DDI_NOT_WELL_FORMED: 820 return ("DDI_NOT_WELL_FORMED"); 821 case DDI_EAGAIN: 822 return ("DDI_EAGAIN"); 823 case DDI_EINVAL: 824 return ("DDI_EINVAL"); 825 case DDI_ENOTSUP: 826 return ("DDI_ENOTSUP"); 827 case DDI_EPENDING: 828 return ("DDI_EPENDING"); 829 case DDI_ENOMEM: 830 return ("DDI_ENOMEM"); 831 case DDI_EBUSY: 832 return ("DDI_EBUSY"); 833 case DDI_ETRANSPORT: 834 return ("DDI_ETRANSPORT"); 835 case DDI_ECONTEXT: 836 return ("DDI_ECONTEXT"); 837 default: 838 return ("Unknown error"); 839 } 840 } 841 842 /* 843 * --------------------------------------------------------------------- 844 * These are Sun4v HIO function definitions 845 * --------------------------------------------------------------------- 846 */ 847 848 #if defined(sun4v) 849 850 /* 851 * Local prototypes 852 */ 853 static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 854 855 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 856 static void nxge_hio_unshare(nxge_hio_vr_t *); 857 858 static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, int); 859 static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 860 861 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 862 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 863 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 864 mac_ring_type_t, int); 865 866 /* 867 * nxge_hio_init 868 * 869 * Initialize the HIO module of the NXGE driver. 870 * 871 * Arguments: 872 * nxge 873 * 874 * Notes: 875 * 876 * Context: 877 * Any domain 878 */ 879 int 880 nxge_hio_init( 881 nxge_t *nxge) 882 { 883 nxge_hio_data_t *nhd; 884 int i, region; 885 886 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 887 if (nhd == 0) { 888 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 889 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 890 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 891 } 892 893 if ((nxge->environs == SOLARIS_DOMAIN) && 894 (nxge->niu_type == N2_NIU)) { 895 if (nxge->niu_hsvc_available == B_TRUE) { 896 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 897 if (niu_hsvc->hsvc_major == 1 && 898 niu_hsvc->hsvc_minor == 1) 899 nxge->environs = SOLARIS_SERVICE_DOMAIN; 900 NXGE_DEBUG_MSG((nxge, HIO_CTL, 901 "nxge_hio_init: hypervisor services " 902 "version %d.%d", 903 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 904 } 905 } 906 907 if (!isLDOMs(nxge)) { 908 nhd->hio.ldoms = B_FALSE; 909 return (NXGE_OK); 910 } 911 912 nhd->hio.ldoms = B_TRUE; 913 914 /* 915 * Fill in what we can. 916 */ 917 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 918 nhd->vr[region].region = region; 919 } 920 nhd->vrs = NXGE_VR_SR_MAX - 2; 921 922 /* 923 * Initialize share and ring group structures. 924 */ 925 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 926 nxge->rx_hio_groups[i].ghandle = NULL; 927 nxge->rx_hio_groups[i].nxgep = nxge; 928 nxge->rx_hio_groups[i].gindex = 0; 929 nxge->rx_hio_groups[i].sindex = 0; 930 } 931 932 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 933 nxge->shares[i].nxgep = nxge; 934 nxge->shares[i].index = 0; 935 nxge->shares[i].vrp = (void *)NULL; 936 nxge->shares[i].tmap = 0; 937 nxge->shares[i].rmap = 0; 938 nxge->shares[i].rxgroup = 0; 939 nxge->shares[i].active = B_FALSE; 940 } 941 942 /* Fill in the HV HIO function pointers. */ 943 nxge_hio_hv_init(nxge); 944 945 if (isLDOMservice(nxge)) { 946 NXGE_DEBUG_MSG((nxge, HIO_CTL, 947 "Hybrid IO-capable service domain")); 948 return (NXGE_OK); 949 } else { 950 /* 951 * isLDOMguest(nxge) == B_TRUE 952 */ 953 nx_vio_fp_t *vio; 954 nhd->type = NXGE_HIO_TYPE_GUEST; 955 956 vio = &nhd->hio.vio; 957 vio->__register = (vio_net_resource_reg_t) 958 modgetsymvalue("vio_net_resource_reg", 0); 959 vio->unregister = (vio_net_resource_unreg_t) 960 modgetsymvalue("vio_net_resource_unreg", 0); 961 962 if (vio->__register == 0 || vio->unregister == 0) { 963 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 964 return (NXGE_ERROR); 965 } 966 } 967 968 return (0); 969 } 970 971 static int 972 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 973 { 974 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 975 p_nxge_t nxge = rxgroup->nxgep; 976 int group = rxgroup->gindex; 977 int rv, sindex; 978 nxge_hio_vr_t *vr; /* The Virtualization Region */ 979 980 sindex = nxge->rx_hio_groups[group].sindex; 981 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 982 983 /* 984 * Program the mac address for the group/share. 985 */ 986 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 987 (ether_addr_t *)mac_addr)) != 0) { 988 return (rv); 989 } 990 991 return (0); 992 } 993 994 /* ARGSUSED */ 995 static int 996 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 997 { 998 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 999 p_nxge_t nxge = rxgroup->nxgep; 1000 int group = rxgroup->gindex; 1001 int sindex; 1002 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1003 1004 sindex = nxge->rx_hio_groups[group].sindex; 1005 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1006 1007 /* 1008 * Remove the mac address for the group/share. 1009 */ 1010 nxge_hio_hostinfo_uninit(nxge, vr); 1011 1012 return (0); 1013 } 1014 1015 /* ARGSUSED */ 1016 void 1017 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1018 mac_group_info_t *infop, mac_group_handle_t ghdl) 1019 { 1020 p_nxge_t nxgep = (p_nxge_t)arg; 1021 nxge_rx_ring_group_t *rxgroup; 1022 1023 switch (type) { 1024 case MAC_RING_TYPE_RX: 1025 rxgroup = &nxgep->rx_hio_groups[group]; 1026 rxgroup->gindex = group; 1027 1028 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1029 infop->mrg_start = NULL; 1030 infop->mrg_stop = NULL; 1031 infop->mrg_addmac = nxge_hio_add_mac; 1032 infop->mrg_remmac = nxge_hio_rem_mac; 1033 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1034 break; 1035 1036 case MAC_RING_TYPE_TX: 1037 break; 1038 } 1039 } 1040 1041 int 1042 nxge_hio_share_assign( 1043 nxge_t *nxge, 1044 uint64_t cookie, 1045 res_map_t *tmap, 1046 res_map_t *rmap, 1047 nxge_hio_vr_t *vr) 1048 { 1049 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1050 uint64_t slot, hv_rv; 1051 nxge_hio_dc_t *dc; 1052 nxhv_vr_fp_t *fp; 1053 int i; 1054 1055 /* 1056 * Ask the Hypervisor to set up the VR for us 1057 */ 1058 fp = &nhd->hio.vr; 1059 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1060 NXGE_ERROR_MSG((nxge, HIO_CTL, 1061 "nx_hio_share_assign: " 1062 "vr->assign() returned %d", hv_rv)); 1063 nxge_hio_unshare(vr); 1064 return (-EIO); 1065 } 1066 1067 /* 1068 * For each shared TDC, ask the HV to find us an empty slot. 1069 * ----------------------------------------------------- 1070 */ 1071 dc = vr->tx_group.dc; 1072 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1073 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1074 while (dc) { 1075 hv_rv = (*tx->assign) 1076 (vr->cookie, dc->channel, &slot); 1077 if (hv_rv != 0) { 1078 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1079 "nx_hio_share_assign: " 1080 "tx->assign(%x, %d) failed: %ld", 1081 vr->cookie, dc->channel, hv_rv)); 1082 return (-EIO); 1083 } 1084 1085 dc->cookie = vr->cookie; 1086 dc->page = (vp_channel_t)slot; 1087 1088 /* Inform the caller about the slot chosen. */ 1089 (*tmap) |= 1 << slot; 1090 1091 dc = dc->next; 1092 } 1093 } 1094 1095 /* 1096 * For each shared RDC, ask the HV to find us an empty slot. 1097 * ----------------------------------------------------- 1098 */ 1099 dc = vr->rx_group.dc; 1100 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1101 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1102 while (dc) { 1103 hv_rv = (*rx->assign) 1104 (vr->cookie, dc->channel, &slot); 1105 if (hv_rv != 0) { 1106 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1107 "nx_hio_share_assign: " 1108 "rx->assign(%x, %d) failed: %ld", 1109 vr->cookie, dc->channel, hv_rv)); 1110 return (-EIO); 1111 } 1112 1113 dc->cookie = vr->cookie; 1114 dc->page = (vp_channel_t)slot; 1115 1116 /* Inform the caller about the slot chosen. */ 1117 (*rmap) |= 1 << slot; 1118 1119 dc = dc->next; 1120 } 1121 } 1122 1123 return (0); 1124 } 1125 1126 int 1127 nxge_hio_share_unassign( 1128 nxge_hio_vr_t *vr) 1129 { 1130 nxge_t *nxge = (nxge_t *)vr->nxge; 1131 nxge_hio_data_t *nhd; 1132 nxge_hio_dc_t *dc; 1133 nxhv_vr_fp_t *fp; 1134 uint64_t hv_rv; 1135 1136 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1137 1138 dc = vr->tx_group.dc; 1139 while (dc) { 1140 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1141 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1142 if (hv_rv != 0) { 1143 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1144 "nx_hio_dc_unshare: " 1145 "tx->unassign(%x, %d) failed: %ld", 1146 vr->cookie, dc->page, hv_rv)); 1147 } 1148 dc = dc->next; 1149 } 1150 1151 dc = vr->rx_group.dc; 1152 while (dc) { 1153 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1154 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1155 if (hv_rv != 0) { 1156 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1157 "nx_hio_dc_unshare: " 1158 "rx->unassign(%x, %d) failed: %ld", 1159 vr->cookie, dc->page, hv_rv)); 1160 } 1161 dc = dc->next; 1162 } 1163 1164 fp = &nhd->hio.vr; 1165 if (fp->unassign) { 1166 hv_rv = (*fp->unassign)(vr->cookie); 1167 if (hv_rv != 0) { 1168 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1169 "vr->assign(%x) failed: %ld", 1170 vr->cookie, hv_rv)); 1171 } 1172 } 1173 1174 return (0); 1175 } 1176 1177 int 1178 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1179 mac_share_handle_t *shandle) 1180 { 1181 p_nxge_t nxge = (p_nxge_t)arg; 1182 nxge_rx_ring_group_t *rxgroup; 1183 nxge_share_handle_t *shp; 1184 1185 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1186 uint64_t rmap, tmap; 1187 int rv; 1188 1189 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1190 1191 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1192 1193 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1194 nhd->hio.rx.assign == 0) { 1195 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1196 return (EIO); 1197 } 1198 1199 /* 1200 * Get a VR. 1201 */ 1202 if ((vr = nxge_hio_vr_share(nxge)) == 0) 1203 return (EAGAIN); 1204 1205 /* 1206 * Get an RDC group for us to use. 1207 */ 1208 if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1209 nxge_hio_unshare(vr); 1210 return (EBUSY); 1211 } 1212 1213 /* 1214 * Add resources to the share. 1215 */ 1216 tmap = 0; 1217 rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, 1218 NXGE_HIO_SHARE_MAX_CHANNELS); 1219 if (rv != 0) { 1220 nxge_hio_unshare(vr); 1221 return (rv); 1222 } 1223 1224 rmap = 0; 1225 rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, 1226 NXGE_HIO_SHARE_MAX_CHANNELS); 1227 if (rv != 0) { 1228 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1229 nxge_hio_unshare(vr); 1230 return (rv); 1231 } 1232 1233 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1234 nxge_hio_remres(vr, MAC_RING_TYPE_RX, tmap); 1235 nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1236 nxge_hio_unshare(vr); 1237 return (rv); 1238 } 1239 1240 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1241 rxgroup->gindex = vr->rdc_tbl; 1242 rxgroup->sindex = vr->region; 1243 1244 shp = &nxge->shares[vr->region]; 1245 shp->index = vr->region; 1246 shp->vrp = (void *)vr; 1247 shp->tmap = tmap; 1248 shp->rmap = rmap; 1249 shp->rxgroup = vr->rdc_tbl; 1250 shp->active = B_TRUE; 1251 1252 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1253 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1254 1255 *shandle = (mac_share_handle_t)shp; 1256 1257 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1258 return (0); 1259 } 1260 1261 void 1262 nxge_hio_share_free(mac_share_handle_t shandle) 1263 { 1264 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1265 1266 /* 1267 * First, unassign the VR (take it back), 1268 * so we can enable interrupts again. 1269 */ 1270 (void) nxge_hio_share_unassign(shp->vrp); 1271 1272 /* 1273 * Free Ring Resources for TX and RX 1274 */ 1275 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1276 nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1277 1278 /* 1279 * Free VR resource. 1280 */ 1281 nxge_hio_unshare(shp->vrp); 1282 1283 /* 1284 * Clear internal handle state. 1285 */ 1286 shp->index = 0; 1287 shp->vrp = (void *)NULL; 1288 shp->tmap = 0; 1289 shp->rmap = 0; 1290 shp->rxgroup = 0; 1291 shp->active = B_FALSE; 1292 } 1293 1294 void 1295 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1296 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1297 { 1298 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1299 1300 switch (type) { 1301 case MAC_RING_TYPE_RX: 1302 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1303 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1304 *rmap = shp->rmap; 1305 *gnum = shp->rxgroup; 1306 break; 1307 1308 case MAC_RING_TYPE_TX: 1309 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1310 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1311 *rmap = shp->tmap; 1312 *gnum = 0; 1313 break; 1314 } 1315 } 1316 1317 /* 1318 * nxge_hio_vr_share 1319 * 1320 * Find an unused Virtualization Region (VR). 1321 * 1322 * Arguments: 1323 * nxge 1324 * 1325 * Notes: 1326 * 1327 * Context: 1328 * Service domain 1329 */ 1330 nxge_hio_vr_t * 1331 nxge_hio_vr_share( 1332 nxge_t *nxge) 1333 { 1334 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1335 nxge_hio_vr_t *vr; 1336 1337 int first, limit, region; 1338 1339 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1340 1341 MUTEX_ENTER(&nhd->lock); 1342 1343 if (nhd->vrs == 0) { 1344 MUTEX_EXIT(&nhd->lock); 1345 return (0); 1346 } 1347 1348 /* Find an empty virtual region (VR). */ 1349 if (nxge->function_num == 0) { 1350 // FUNC0_VIR0 'belongs' to NIU port 0. 1351 first = FUNC0_VIR1; 1352 limit = FUNC2_VIR0; 1353 } else if (nxge->function_num == 1) { 1354 // FUNC2_VIR0 'belongs' to NIU port 1. 1355 first = FUNC2_VIR1; 1356 limit = FUNC_VIR_MAX; 1357 } else { 1358 cmn_err(CE_WARN, 1359 "Shares not supported on function(%d) at this time.\n", 1360 nxge->function_num); 1361 } 1362 1363 for (region = first; region < limit; region++) { 1364 if (nhd->vr[region].nxge == 0) 1365 break; 1366 } 1367 1368 if (region == limit) { 1369 MUTEX_EXIT(&nhd->lock); 1370 return (0); 1371 } 1372 1373 vr = &nhd->vr[region]; 1374 vr->nxge = (uintptr_t)nxge; 1375 vr->region = (uintptr_t)region; 1376 1377 nhd->vrs--; 1378 1379 MUTEX_EXIT(&nhd->lock); 1380 1381 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1382 1383 return (vr); 1384 } 1385 1386 void 1387 nxge_hio_unshare( 1388 nxge_hio_vr_t *vr) 1389 { 1390 nxge_t *nxge = (nxge_t *)vr->nxge; 1391 nxge_hio_data_t *nhd; 1392 1393 vr_region_t region; 1394 1395 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1396 1397 if (!nxge) { 1398 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1399 "vr->nxge is NULL")); 1400 return; 1401 } 1402 1403 /* 1404 * This function is no longer called, but I will keep it 1405 * here in case we want to revisit this topic in the future. 1406 * 1407 * nxge_hio_hostinfo_uninit(nxge, vr); 1408 */ 1409 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1410 1411 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1412 1413 MUTEX_ENTER(&nhd->lock); 1414 1415 region = vr->region; 1416 (void) memset(vr, 0, sizeof (*vr)); 1417 vr->region = region; 1418 1419 nhd->vrs++; 1420 1421 MUTEX_EXIT(&nhd->lock); 1422 1423 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1424 } 1425 1426 int 1427 nxge_hio_addres( 1428 nxge_hio_vr_t *vr, 1429 mac_ring_type_t type, 1430 int count) 1431 { 1432 nxge_t *nxge = (nxge_t *)vr->nxge; 1433 int i; 1434 1435 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1436 1437 if (!nxge) 1438 return (EINVAL); 1439 1440 for (i = 0; i < count; i++) { 1441 int rv; 1442 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1443 if (i == 0) /* Couldn't get even one DC. */ 1444 return (-rv); 1445 else 1446 break; 1447 } 1448 } 1449 1450 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1451 1452 return (0); 1453 } 1454 1455 /* ARGSUSED */ 1456 void 1457 nxge_hio_remres( 1458 nxge_hio_vr_t *vr, 1459 mac_ring_type_t type, 1460 res_map_t res_map) 1461 { 1462 nxge_t *nxge = (nxge_t *)vr->nxge; 1463 nxge_grp_t *group; 1464 1465 if (!nxge) { 1466 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 1467 "vr->nxge is NULL")); 1468 return; 1469 } 1470 1471 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1472 1473 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1474 while (group->dc) { 1475 nxge_hio_dc_t *dc = group->dc; 1476 NXGE_DC_RESET(res_map, dc->page); 1477 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1478 } 1479 1480 if (res_map) { 1481 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1482 "res_map %lx", res_map)); 1483 } 1484 1485 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1486 } 1487 1488 /* 1489 * nxge_hio_tdc_share 1490 * 1491 * Share an unused TDC channel. 1492 * 1493 * Arguments: 1494 * nxge 1495 * 1496 * Notes: 1497 * 1498 * A.7.3 Reconfigure Tx DMA channel 1499 * Disable TxDMA A.9.6.10 1500 * [Rebind TxDMA channel to Port A.9.6.7] 1501 * 1502 * We don't have to Rebind the TDC to the port - it always already bound. 1503 * 1504 * Soft Reset TxDMA A.9.6.2 1505 * 1506 * This procedure will be executed by nxge_init_txdma_channel() in the 1507 * guest domain: 1508 * 1509 * Re-initialize TxDMA A.9.6.8 1510 * Reconfigure TxDMA 1511 * Enable TxDMA A.9.6.9 1512 * 1513 * Context: 1514 * Service domain 1515 */ 1516 int 1517 nxge_hio_tdc_share( 1518 nxge_t *nxge, 1519 int channel) 1520 { 1521 nxge_grp_set_t *set = &nxge->tx_set; 1522 tx_ring_t *ring; 1523 int count; 1524 1525 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1526 1527 /* 1528 * Wait until this channel is idle. 1529 */ 1530 ring = nxge->tx_rings->rings[channel]; 1531 1532 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 1533 if (ring->tx_ring_busy) { 1534 /* 1535 * Wait for 30 seconds. 1536 */ 1537 for (count = 30 * 1000; count; count--) { 1538 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 1539 break; 1540 } 1541 1542 drv_usecwait(1000); 1543 } 1544 1545 if (count == 0) { 1546 (void) atomic_swap_32(&ring->tx_ring_offline, 1547 NXGE_TX_RING_ONLINE); 1548 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1549 "Tx ring %d was always BUSY", channel)); 1550 return (-EIO); 1551 } 1552 } else { 1553 (void) atomic_swap_32(&ring->tx_ring_offline, 1554 NXGE_TX_RING_OFFLINED); 1555 } 1556 1557 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1558 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1559 "Failed to remove interrupt for TxDMA channel %d", 1560 channel)); 1561 return (NXGE_ERROR); 1562 } 1563 1564 /* Disable TxDMA A.9.6.10 */ 1565 (void) nxge_txdma_channel_disable(nxge, channel); 1566 1567 /* The SD is sharing this channel. */ 1568 NXGE_DC_SET(set->shared.map, channel); 1569 set->shared.count++; 1570 1571 /* Soft Reset TxDMA A.9.6.2 */ 1572 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1573 1574 /* 1575 * Initialize the DC-specific FZC control registers. 1576 * ----------------------------------------------------- 1577 */ 1578 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1579 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1580 "nx_hio_dc_share: FZC TDC failed: %d", channel)); 1581 return (-EIO); 1582 } 1583 1584 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1585 1586 return (0); 1587 } 1588 1589 /* 1590 * nxge_hio_rdc_share 1591 * 1592 * Share an unused RDC channel. 1593 * 1594 * Arguments: 1595 * nxge 1596 * 1597 * Notes: 1598 * 1599 * This is the latest version of the procedure to 1600 * Reconfigure an Rx DMA channel: 1601 * 1602 * A.6.3 Reconfigure Rx DMA channel 1603 * Stop RxMAC A.9.2.6 1604 * Drain IPP Port A.9.3.6 1605 * Stop and reset RxDMA A.9.5.3 1606 * 1607 * This procedure will be executed by nxge_init_rxdma_channel() in the 1608 * guest domain: 1609 * 1610 * Initialize RxDMA A.9.5.4 1611 * Reconfigure RxDMA 1612 * Enable RxDMA A.9.5.5 1613 * 1614 * We will do this here, since the RDC is a canalis non grata: 1615 * Enable RxMAC A.9.2.10 1616 * 1617 * Context: 1618 * Service domain 1619 */ 1620 int 1621 nxge_hio_rdc_share( 1622 nxge_t *nxge, 1623 nxge_hio_vr_t *vr, 1624 int channel) 1625 { 1626 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1627 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1628 nxge_grp_set_t *set = &nxge->rx_set; 1629 nxge_rdc_grp_t *rdc_grp; 1630 1631 int current, last; 1632 1633 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1634 1635 /* Disable interrupts. */ 1636 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1637 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 1638 "Failed to remove interrupt for RxDMA channel %d", 1639 channel)); 1640 return (NXGE_ERROR); 1641 } 1642 1643 /* Stop RxMAC = A.9.2.6 */ 1644 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1645 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1646 "Failed to disable RxMAC")); 1647 } 1648 1649 /* Drain IPP Port = A.9.3.6 */ 1650 (void) nxge_ipp_drain(nxge); 1651 1652 /* Stop and reset RxDMA = A.9.5.3 */ 1653 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1654 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1655 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1656 "Failed to disable RxDMA channel %d", channel)); 1657 } 1658 1659 /* The SD is sharing this channel. */ 1660 NXGE_DC_SET(set->shared.map, channel); 1661 set->shared.count++; 1662 1663 // Assert RST: RXDMA_CFIG1[30] = 1 1664 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1665 1666 /* 1667 * We have to reconfigure the RDC table(s) 1668 * to which this channel belongs. 1669 */ 1670 current = hardware->def_mac_rxdma_grpid; 1671 last = current + hardware->max_rdc_grpids; 1672 for (; current < last; current++) { 1673 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1674 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1675 rdc_grp->map = set->owned.map; 1676 rdc_grp->max_rdcs--; 1677 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1678 } 1679 } 1680 1681 /* 1682 * The guest domain will reconfigure the RDC later. 1683 * 1684 * But in the meantime, we must re-enable the Rx MAC so 1685 * that we can start receiving packets again on the 1686 * remaining RDCs: 1687 * 1688 * Enable RxMAC = A.9.2.10 1689 */ 1690 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1691 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1692 "nx_hio_rdc_share: Rx MAC still disabled")); 1693 } 1694 1695 /* 1696 * Initialize the DC-specific FZC control registers. 1697 * ----------------------------------------------------- 1698 */ 1699 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1700 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1701 "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 1702 return (-EIO); 1703 } 1704 1705 /* 1706 * We have to initialize the guest's RDC table, too. 1707 * ----------------------------------------------------- 1708 */ 1709 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1710 if (rdc_grp->max_rdcs == 0) { 1711 rdc_grp->start_rdc = (uint8_t)channel; 1712 rdc_grp->def_rdc = (uint8_t)channel; 1713 rdc_grp->max_rdcs = 1; 1714 } else { 1715 rdc_grp->max_rdcs++; 1716 } 1717 NXGE_DC_SET(rdc_grp->map, channel); 1718 1719 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1720 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1721 "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1722 return (-EIO); 1723 } 1724 1725 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1726 1727 return (0); 1728 } 1729 1730 /* 1731 * nxge_hio_dc_share 1732 * 1733 * Share a DMA channel with a guest domain. 1734 * 1735 * Arguments: 1736 * nxge 1737 * vr The VR that <channel> will belong to. 1738 * type Tx or Rx. 1739 * res_map The resource map used by the caller, which we will 1740 * update if successful. 1741 * 1742 * Notes: 1743 * 1744 * Context: 1745 * Service domain 1746 */ 1747 int 1748 nxge_hio_dc_share( 1749 nxge_t *nxge, 1750 nxge_hio_vr_t *vr, 1751 mac_ring_type_t type) 1752 { 1753 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1754 nxge_hw_pt_cfg_t *hardware; 1755 nxge_hio_dc_t *dc; 1756 int channel, limit; 1757 1758 nxge_grp_set_t *set; 1759 nxge_grp_t *group; 1760 1761 int slot; 1762 1763 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1764 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1765 1766 /* 1767 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1768 * Not only that, but the HV has statically assigned the 1769 * channels like so: 1770 * VR0: RDC0 & RDC1 1771 * VR1: RDC2 & RDC3, etc. 1772 * The TDCs are assigned in exactly the same way. 1773 * 1774 * So, for example 1775 * hardware->start_rdc + vr->region * 2; 1776 * VR1: hardware->start_rdc + 1 * 2; 1777 * VR3: hardware->start_rdc + 3 * 2; 1778 * If start_rdc is 0, we end up with 2 or 6. 1779 * If start_rdc is 8, we end up with 10 or 14. 1780 */ 1781 1782 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1783 hardware = &nxge->pt_config.hw_config; 1784 1785 // This code is still NIU-specific (assuming only 2 ports) 1786 channel = hardware->start_rdc + (vr->region % 4) * 2; 1787 limit = channel + 2; 1788 1789 MUTEX_ENTER(&nhd->lock); 1790 for (; channel < limit; channel++) { 1791 if ((1 << channel) & set->owned.map) { 1792 break; 1793 } 1794 } 1795 1796 if (channel == limit) { 1797 MUTEX_EXIT(&nhd->lock); 1798 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1799 "nx_hio_dc_share: there are no channels to share")); 1800 return (-EIO); 1801 } 1802 1803 MUTEX_EXIT(&nhd->lock); 1804 1805 /* -------------------------------------------------- */ 1806 slot = (type == MAC_RING_TYPE_TX) ? 1807 nxge_hio_tdc_share(nxge, channel) : 1808 nxge_hio_rdc_share(nxge, vr, channel); 1809 1810 if (slot < 0) { 1811 if (type == MAC_RING_TYPE_RX) { 1812 nxge_hio_rdc_unshare(nxge, channel); 1813 } else { 1814 nxge_hio_tdc_unshare(nxge, channel); 1815 } 1816 return (slot); 1817 } 1818 1819 MUTEX_ENTER(&nhd->lock); 1820 1821 /* 1822 * Tag this channel. 1823 * -------------------------------------------------- 1824 */ 1825 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1826 1827 dc->vr = vr; 1828 dc->channel = (nxge_channel_t)channel; 1829 1830 MUTEX_EXIT(&nhd->lock); 1831 1832 /* 1833 * vr->[t|r]x_group is used by the service domain to 1834 * keep track of its shared DMA channels. 1835 */ 1836 MUTEX_ENTER(&nxge->group_lock); 1837 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1838 1839 dc->group = group; 1840 1841 /* Initialize <group>, if necessary */ 1842 if (group->count == 0) { 1843 group->nxge = nxge; 1844 group->type = (type == MAC_RING_TYPE_TX) ? 1845 VP_BOUND_TX : VP_BOUND_RX; 1846 group->sequence = nhd->sequence++; 1847 group->active = B_TRUE; 1848 } 1849 1850 MUTEX_EXIT(&nxge->group_lock); 1851 1852 NXGE_ERROR_MSG((nxge, HIO_CTL, 1853 "DC share: %cDC %d was assigned to slot %d", 1854 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1855 1856 nxge_grp_dc_append(nxge, group, dc); 1857 1858 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1859 1860 return (0); 1861 } 1862 1863 /* 1864 * nxge_hio_tdc_unshare 1865 * 1866 * Unshare a TDC. 1867 * 1868 * Arguments: 1869 * nxge 1870 * channel The channel to unshare (add again). 1871 * 1872 * Notes: 1873 * 1874 * Context: 1875 * Service domain 1876 */ 1877 void 1878 nxge_hio_tdc_unshare( 1879 nxge_t *nxge, 1880 int channel) 1881 { 1882 nxge_grp_set_t *set = &nxge->tx_set; 1883 nxge_grp_t *group = set->group[0]; 1884 1885 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1886 1887 NXGE_DC_RESET(set->shared.map, channel); 1888 set->shared.count--; 1889 1890 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 1891 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1892 "Failed to initialize TxDMA channel %d", channel)); 1893 return; 1894 } 1895 1896 /* Re-add this interrupt. */ 1897 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1898 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1899 "Failed to add interrupt for TxDMA channel %d", channel)); 1900 } 1901 1902 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1903 } 1904 1905 /* 1906 * nxge_hio_rdc_unshare 1907 * 1908 * Unshare an RDC: add it to the SD's RDC groups (tables). 1909 * 1910 * Arguments: 1911 * nxge 1912 * channel The channel to unshare (add again). 1913 * 1914 * Notes: 1915 * 1916 * Context: 1917 * Service domain 1918 */ 1919 void 1920 nxge_hio_rdc_unshare( 1921 nxge_t *nxge, 1922 int channel) 1923 { 1924 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1925 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1926 1927 nxge_grp_set_t *set = &nxge->rx_set; 1928 nxge_grp_t *group = set->group[0]; 1929 int current, last; 1930 1931 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 1932 1933 /* Stop RxMAC = A.9.2.6 */ 1934 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1935 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1936 "Failed to disable RxMAC")); 1937 } 1938 1939 /* Drain IPP Port = A.9.3.6 */ 1940 (void) nxge_ipp_drain(nxge); 1941 1942 /* Stop and reset RxDMA = A.9.5.3 */ 1943 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1944 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1945 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1946 "Failed to disable RxDMA channel %d", channel)); 1947 } 1948 1949 NXGE_DC_RESET(set->shared.map, channel); 1950 set->shared.count--; 1951 1952 /* 1953 * Assert RST: RXDMA_CFIG1[30] = 1 1954 * 1955 * Initialize RxDMA A.9.5.4 1956 * Reconfigure RxDMA 1957 * Enable RxDMA A.9.5.5 1958 */ 1959 if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 1960 /* Be sure to re-enable the RX MAC. */ 1961 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1962 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1963 "nx_hio_rdc_share: Rx MAC still disabled")); 1964 } 1965 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1966 "Failed to initialize RxDMA channel %d", channel)); 1967 return; 1968 } 1969 1970 /* 1971 * We have to reconfigure the RDC table(s) 1972 * to which this channel once again belongs. 1973 */ 1974 current = hardware->def_mac_rxdma_grpid; 1975 last = current + hardware->max_rdc_grpids; 1976 for (; current < last; current++) { 1977 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1978 nxge_rdc_grp_t *group; 1979 group = &nxge->pt_config.rdc_grps[current]; 1980 group->map = set->owned.map; 1981 group->max_rdcs++; 1982 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1983 } 1984 } 1985 1986 /* 1987 * Enable RxMAC = A.9.2.10 1988 */ 1989 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1990 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1991 "nx_hio_rdc_share: Rx MAC still disabled")); 1992 return; 1993 } 1994 1995 /* Re-add this interrupt. */ 1996 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1997 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1998 "nx_hio_rdc_unshare: Failed to add interrupt for " 1999 "RxDMA CHANNEL %d", channel)); 2000 } 2001 2002 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2003 } 2004 2005 /* 2006 * nxge_hio_dc_unshare 2007 * 2008 * Unshare (reuse) a DMA channel. 2009 * 2010 * Arguments: 2011 * nxge 2012 * vr The VR that <channel> belongs to. 2013 * type Tx or Rx. 2014 * channel The DMA channel to reuse. 2015 * 2016 * Notes: 2017 * 2018 * Context: 2019 * Service domain 2020 */ 2021 void 2022 nxge_hio_dc_unshare( 2023 nxge_t *nxge, 2024 nxge_hio_vr_t *vr, 2025 mac_ring_type_t type, 2026 int channel) 2027 { 2028 nxge_grp_t *group; 2029 nxge_hio_dc_t *dc; 2030 2031 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2032 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2033 2034 /* Unlink the channel from its group. */ 2035 /* -------------------------------------------------- */ 2036 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2037 NXGE_DC_RESET(group->map, channel); 2038 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2039 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2040 "nx_hio_dc_unshare(%d) failed", channel)); 2041 return; 2042 } 2043 2044 dc->vr = 0; 2045 dc->cookie = 0; 2046 2047 if (type == MAC_RING_TYPE_RX) { 2048 nxge_hio_rdc_unshare(nxge, channel); 2049 } else { 2050 nxge_hio_tdc_unshare(nxge, channel); 2051 } 2052 2053 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2054 } 2055 2056 #endif /* if defined(sun4v) */ 2057