1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * nxge_hio.c 29 * 30 * This file manages the virtualization resources for Neptune 31 * devices. That is, it implements a hybrid I/O (HIO) approach in the 32 * Solaris kernel, whereby a guest domain on an LDOMs server may 33 * request & use hardware resources from the service domain. 34 * 35 */ 36 37 #include <sys/nxge/nxge_impl.h> 38 #include <sys/nxge/nxge_fzc.h> 39 #include <sys/nxge/nxge_rxdma.h> 40 #include <sys/nxge/nxge_txdma.h> 41 #include <sys/nxge/nxge_hio.h> 42 43 #define NXGE_HIO_SHARE_MIN_CHANNELS 2 44 #define NXGE_HIO_SHARE_MAX_CHANNELS 2 45 46 /* 47 * External prototypes 48 */ 49 extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 50 51 /* The following function may be found in nxge_main.c */ 52 extern int nxge_m_mmac_remove(void *arg, mac_addr_slot_t slot); 53 54 /* The following function may be found in nxge_[t|r]xdma.c */ 55 extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 56 extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 57 58 /* 59 * Local prototypes 60 */ 61 static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 62 static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 63 static void nxge_grp_dc_map(nxge_grp_t *group); 64 65 /* 66 * These functions are used by both service & guest domains to 67 * decide whether they're running in an LDOMs/XEN environment 68 * or not. If so, then the Hybrid I/O (HIO) module is initialized. 69 */ 70 71 /* 72 * nxge_get_environs 73 * 74 * Figure out if we are in a guest domain or not. 75 * 76 * Arguments: 77 * nxge 78 * 79 * Notes: 80 * 81 * Context: 82 * Any domain 83 */ 84 void 85 nxge_get_environs( 86 nxge_t *nxge) 87 { 88 char *string; 89 90 /* 91 * In the beginning, assume that we are running sans LDOMs/XEN. 92 */ 93 nxge->environs = SOLARIS_DOMAIN; 94 95 /* 96 * Are we a hybrid I/O (HIO) guest domain driver? 97 */ 98 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 99 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 100 "niutype", &string)) == DDI_PROP_SUCCESS) { 101 if (strcmp(string, "n2niu") == 0) { 102 nxge->environs = SOLARIS_GUEST_DOMAIN; 103 /* So we can allocate properly-aligned memory. */ 104 nxge->niu_type = N2_NIU; 105 NXGE_DEBUG_MSG((nxge, HIO_CTL, 106 "Hybrid IO-capable guest domain")); 107 } 108 ddi_prop_free(string); 109 } 110 } 111 112 #if !defined(sun4v) 113 114 /* 115 * nxge_hio_init 116 * 117 * Initialize the HIO module of the NXGE driver. 118 * 119 * Arguments: 120 * nxge 121 * 122 * Notes: 123 * This is the non-hybrid I/O version of this function. 124 * 125 * Context: 126 * Any domain 127 */ 128 int 129 nxge_hio_init(nxge_t *nxge) 130 { 131 nxge_hio_data_t *nhd; 132 133 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 134 if (nhd == 0) { 135 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 136 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 137 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 138 } 139 140 nhd->hio.ldoms = B_FALSE; 141 142 return (NXGE_OK); 143 } 144 145 #endif 146 147 void 148 nxge_hio_uninit(nxge_t *nxge) 149 { 150 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 151 152 ASSERT(nxge->nxge_hw_p->ndevs == 0); 153 154 if (nhd != NULL) { 155 MUTEX_DESTROY(&nhd->lock); 156 KMEM_FREE(nhd, sizeof (*nhd)); 157 nxge->nxge_hw_p->hio = 0; 158 } 159 } 160 161 /* 162 * nxge_dci_map 163 * 164 * Map a DMA channel index to a channel number. 165 * 166 * Arguments: 167 * instance The instance number of the driver. 168 * type The type of channel this is: Tx or Rx. 169 * index The index to convert to a channel number 170 * 171 * Notes: 172 * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 173 * 174 * Context: 175 * Any domain 176 */ 177 int 178 nxge_dci_map( 179 nxge_t *nxge, 180 vpc_type_t type, 181 int index) 182 { 183 nxge_grp_set_t *set; 184 int dc; 185 186 switch (type) { 187 case VP_BOUND_TX: 188 set = &nxge->tx_set; 189 break; 190 case VP_BOUND_RX: 191 set = &nxge->rx_set; 192 break; 193 } 194 195 for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 196 if ((1 << dc) & set->owned.map) { 197 if (index == 0) 198 return (dc); 199 else 200 index--; 201 } 202 } 203 204 return (-1); 205 } 206 207 /* 208 * --------------------------------------------------------------------- 209 * These are the general-purpose DMA channel group functions. That is, 210 * these functions are used to manage groups of TDCs or RDCs in an HIO 211 * environment. 212 * 213 * But is also expected that in the future they will be able to manage 214 * Crossbow groups. 215 * --------------------------------------------------------------------- 216 */ 217 218 /* 219 * nxge_grp_add 220 * 221 * Add a group to an instance of NXGE. 222 * 223 * Arguments: 224 * nxge 225 * type Tx or Rx 226 * 227 * Notes: 228 * 229 * Context: 230 * Any domain 231 */ 232 vr_handle_t 233 nxge_grp_add( 234 nxge_t *nxge, 235 nxge_grp_type_t type) 236 { 237 nxge_grp_set_t *set; 238 nxge_grp_t *group; 239 int i; 240 241 group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 242 group->nxge = nxge; 243 244 MUTEX_ENTER(&nxge->group_lock); 245 switch (type) { 246 case NXGE_TRANSMIT_GROUP: 247 case EXT_TRANSMIT_GROUP: 248 set = &nxge->tx_set; 249 break; 250 default: 251 set = &nxge->rx_set; 252 break; 253 } 254 255 group->type = type; 256 group->active = B_TRUE; 257 group->sequence = set->sequence++; 258 259 /* Find an empty slot for this logical group. */ 260 for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 261 if (set->group[i] == 0) { 262 group->index = i; 263 set->group[i] = group; 264 NXGE_DC_SET(set->lg.map, i); 265 set->lg.count++; 266 break; 267 } 268 } 269 MUTEX_EXIT(&nxge->group_lock); 270 271 NXGE_DEBUG_MSG((nxge, HIO_CTL, 272 "nxge_grp_add: %cgroup = %d.%d", 273 type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 274 nxge->mac.portnum, group->sequence)); 275 276 return ((vr_handle_t)group); 277 } 278 279 void 280 nxge_grp_remove( 281 nxge_t *nxge, 282 vr_handle_t handle) /* The group to remove. */ 283 { 284 nxge_grp_set_t *set; 285 nxge_grp_t *group; 286 vpc_type_t type; 287 288 group = (nxge_grp_t *)handle; 289 290 MUTEX_ENTER(&nxge->group_lock); 291 switch (group->type) { 292 case NXGE_TRANSMIT_GROUP: 293 case EXT_TRANSMIT_GROUP: 294 set = &nxge->tx_set; 295 break; 296 default: 297 set = &nxge->rx_set; 298 break; 299 } 300 301 if (set->group[group->index] != group) { 302 MUTEX_EXIT(&nxge->group_lock); 303 return; 304 } 305 306 set->group[group->index] = 0; 307 NXGE_DC_RESET(set->lg.map, group->index); 308 set->lg.count--; 309 310 /* While inside the mutex, deactivate <group>. */ 311 group->active = B_FALSE; 312 313 MUTEX_EXIT(&nxge->group_lock); 314 315 NXGE_DEBUG_MSG((nxge, HIO_CTL, 316 "nxge_grp_remove(%c.%d.%d) called", 317 group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 318 nxge->mac.portnum, group->sequence)); 319 320 /* Now, remove any DCs which are still active. */ 321 switch (group->type) { 322 default: 323 type = VP_BOUND_TX; 324 break; 325 case NXGE_RECEIVE_GROUP: 326 case EXT_RECEIVE_GROUP: 327 type = VP_BOUND_RX; 328 } 329 330 while (group->dc) { 331 nxge_grp_dc_remove(nxge, type, group->dc->channel); 332 } 333 334 KMEM_FREE(group, sizeof (*group)); 335 } 336 337 /* 338 * nx_hio_dc_add 339 * 340 * Add a DMA channel to a VR/Group. 341 * 342 * Arguments: 343 * nxge 344 * channel The channel to add. 345 * Notes: 346 * 347 * Context: 348 * Any domain 349 */ 350 /* ARGSUSED */ 351 int 352 nxge_grp_dc_add( 353 nxge_t *nxge, 354 vr_handle_t handle, /* The group to add <channel> to. */ 355 vpc_type_t type, /* Rx or Tx */ 356 int channel) /* A physical/logical channel number */ 357 { 358 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 359 nxge_hio_dc_t *dc; 360 nxge_grp_set_t *set; 361 nxge_grp_t *group; 362 nxge_status_t status = NXGE_OK; 363 364 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 365 366 if (handle == 0) 367 return (0); 368 369 switch (type) { 370 default: 371 set = &nxge->tx_set; 372 if (channel > NXGE_MAX_TDCS) { 373 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 374 "nxge_grp_dc_add: TDC = %d", channel)); 375 return (NXGE_ERROR); 376 } 377 break; 378 case VP_BOUND_RX: 379 set = &nxge->rx_set; 380 if (channel > NXGE_MAX_RDCS) { 381 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 382 "nxge_grp_dc_add: RDC = %d", channel)); 383 return (NXGE_ERROR); 384 } 385 break; 386 } 387 388 group = (nxge_grp_t *)handle; 389 NXGE_DEBUG_MSG((nxge, HIO_CTL, 390 "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 391 type == VP_BOUND_TX ? 't' : 'r', 392 nxge->mac.portnum, group->sequence, group->count, channel)); 393 394 MUTEX_ENTER(&nxge->group_lock); 395 if (group->active != B_TRUE) { 396 /* We may be in the process of removing this group. */ 397 MUTEX_EXIT(&nxge->group_lock); 398 return (NXGE_ERROR); 399 } 400 MUTEX_EXIT(&nxge->group_lock); 401 402 if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 403 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 404 "nxge_grp_dc_add(%d): DC FIND failed", channel)); 405 return (NXGE_ERROR); 406 } 407 408 MUTEX_ENTER(&nhd->lock); 409 410 if (dc->group) { 411 MUTEX_EXIT(&nhd->lock); 412 /* This channel is already in use! */ 413 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 414 "nxge_grp_dc_add(%d): channel already in group", channel)); 415 return (NXGE_ERROR); 416 } 417 418 dc->next = 0; 419 dc->page = channel; 420 dc->channel = (nxge_channel_t)channel; 421 422 dc->type = type; 423 if (type == VP_BOUND_RX) { 424 dc->init = nxge_init_rxdma_channel; 425 dc->uninit = nxge_uninit_rxdma_channel; 426 } else { 427 dc->init = nxge_init_txdma_channel; 428 dc->uninit = nxge_uninit_txdma_channel; 429 } 430 431 dc->group = handle; 432 433 if (isLDOMguest(nxge)) 434 (void) nxge_hio_ldsv_add(nxge, dc); 435 436 NXGE_DC_SET(set->owned.map, channel); 437 set->owned.count++; 438 439 MUTEX_EXIT(&nhd->lock); 440 441 if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 442 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 443 "nxge_grp_dc_add(%d): channel init failed", channel)); 444 return (NXGE_ERROR); 445 } 446 447 nxge_grp_dc_append(nxge, group, dc); 448 449 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 450 451 return ((int)status); 452 } 453 454 void 455 nxge_grp_dc_remove( 456 nxge_t *nxge, 457 vpc_type_t type, 458 int channel) 459 { 460 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 461 nxge_hio_dc_t *dc; 462 nxge_grp_set_t *set; 463 nxge_grp_t *group; 464 465 dc_uninit_t uninit; 466 467 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 468 469 if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) { 470 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 471 "nx_hio_dc_remove: find(%d) failed", channel)); 472 return; 473 } 474 group = (nxge_grp_t *)dc->group; 475 476 if (isLDOMguest(nxge)) { 477 (void) nxge_hio_intr_remove(nxge, type, channel); 478 } 479 480 NXGE_DEBUG_MSG((nxge, HIO_CTL, 481 "DC remove: group = %d.%d.%d, %cdc %d", 482 nxge->mac.portnum, group->sequence, group->count, 483 type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 484 485 MUTEX_ENTER(&nhd->lock); 486 487 set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 488 if (isLDOMs(nxge) && ((1 << channel) && set->shared.map)) { 489 NXGE_DC_RESET(group->map, channel); 490 } 491 492 /* Remove the DC from its group. */ 493 if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 494 MUTEX_EXIT(&nhd->lock); 495 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 496 "nx_hio_dc_remove(%d) failed", channel)); 497 return; 498 } 499 500 uninit = dc->uninit; 501 channel = dc->channel; 502 503 NXGE_DC_RESET(set->owned.map, channel); 504 set->owned.count--; 505 506 (void) memset(dc, 0, sizeof (*dc)); 507 508 MUTEX_EXIT(&nhd->lock); 509 510 (*uninit)(nxge, channel); 511 512 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 513 } 514 515 nxge_hio_dc_t * 516 nxge_grp_dc_find( 517 nxge_t *nxge, 518 vpc_type_t type, /* Rx or Tx */ 519 int channel) 520 { 521 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 522 nxge_hio_dc_t *current; 523 524 current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 525 526 if (!isLDOMguest(nxge)) { 527 return (¤t[channel]); 528 } else { 529 /* We're in a guest domain. */ 530 int i, limit = (type == VP_BOUND_TX) ? 531 NXGE_MAX_TDCS : NXGE_MAX_RDCS; 532 533 MUTEX_ENTER(&nhd->lock); 534 for (i = 0; i < limit; i++, current++) { 535 if (current->channel == channel) { 536 if (current->vr && current->vr->nxge == 537 (uintptr_t)nxge) { 538 MUTEX_EXIT(&nhd->lock); 539 return (current); 540 } 541 } 542 } 543 MUTEX_EXIT(&nhd->lock); 544 } 545 546 return (0); 547 } 548 549 /* 550 * nxge_grp_dc_append 551 * 552 * Append a DMA channel to a group. 553 * 554 * Arguments: 555 * nxge 556 * group The group to append to 557 * dc The DMA channel to append 558 * 559 * Notes: 560 * 561 * Context: 562 * Any domain 563 */ 564 static 565 void 566 nxge_grp_dc_append( 567 nxge_t *nxge, 568 nxge_grp_t *group, 569 nxge_hio_dc_t *dc) 570 { 571 MUTEX_ENTER(&nxge->group_lock); 572 573 if (group->dc == 0) { 574 group->dc = dc; 575 } else { 576 nxge_hio_dc_t *current = group->dc; 577 do { 578 if (current->next == 0) { 579 current->next = dc; 580 break; 581 } 582 current = current->next; 583 } while (current); 584 } 585 586 NXGE_DC_SET(group->map, dc->channel); 587 588 nxge_grp_dc_map(group); 589 group->count++; 590 591 MUTEX_EXIT(&nxge->group_lock); 592 } 593 594 /* 595 * nxge_grp_dc_unlink 596 * 597 * Unlink a DMA channel fromits linked list (group). 598 * 599 * Arguments: 600 * nxge 601 * group The group (linked list) to unlink from 602 * dc The DMA channel to append 603 * 604 * Notes: 605 * 606 * Context: 607 * Any domain 608 */ 609 nxge_hio_dc_t * 610 nxge_grp_dc_unlink( 611 nxge_t *nxge, 612 nxge_grp_t *group, 613 int channel) 614 { 615 nxge_hio_dc_t *current, *previous; 616 617 MUTEX_ENTER(&nxge->group_lock); 618 619 if ((current = group->dc) == 0) { 620 MUTEX_EXIT(&nxge->group_lock); 621 return (0); 622 } 623 624 previous = 0; 625 do { 626 if (current->channel == channel) { 627 if (previous) 628 previous->next = current->next; 629 else 630 group->dc = current->next; 631 break; 632 } 633 previous = current; 634 current = current->next; 635 } while (current); 636 637 if (current == 0) { 638 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 639 "DC unlink: DC %d not found", channel)); 640 } else { 641 current->next = 0; 642 current->group = 0; 643 644 group->count--; 645 } 646 647 nxge_grp_dc_map(group); 648 649 MUTEX_EXIT(&nxge->group_lock); 650 651 return (current); 652 } 653 654 /* 655 * nxge_grp_dc_map 656 * 657 * Map a linked list to an array of channel numbers. 658 * 659 * Arguments: 660 * nxge 661 * group The group to remap. 662 * 663 * Notes: 664 * It is expected that the caller will hold the correct mutex. 665 * 666 * Context: 667 * Service domain 668 */ 669 void 670 nxge_grp_dc_map( 671 nxge_grp_t *group) 672 { 673 nxge_channel_t *legend; 674 nxge_hio_dc_t *dc; 675 676 (void) memset(group->legend, 0, sizeof (group->legend)); 677 678 legend = group->legend; 679 dc = group->dc; 680 while (dc) { 681 *legend = dc->channel; 682 legend++; 683 dc = dc->next; 684 } 685 } 686 687 /* 688 * --------------------------------------------------------------------- 689 * These are HIO debugging functions. 690 * --------------------------------------------------------------------- 691 */ 692 693 /* 694 * nxge_delay 695 * 696 * Delay <seconds> number of seconds. 697 * 698 * Arguments: 699 * nxge 700 * group The group to append to 701 * dc The DMA channel to append 702 * 703 * Notes: 704 * This is a developer-only function. 705 * 706 * Context: 707 * Any domain 708 */ 709 void 710 nxge_delay( 711 int seconds) 712 { 713 delay(drv_usectohz(seconds * 1000000)); 714 } 715 716 static dmc_reg_name_t rx_names[] = { 717 { "RXDMA_CFIG1", 0 }, 718 { "RXDMA_CFIG2", 8 }, 719 { "RBR_CFIG_A", 0x10 }, 720 { "RBR_CFIG_B", 0x18 }, 721 { "RBR_KICK", 0x20 }, 722 { "RBR_STAT", 0x28 }, 723 { "RBR_HDH", 0x30 }, 724 { "RBR_HDL", 0x38 }, 725 { "RCRCFIG_A", 0x40 }, 726 { "RCRCFIG_B", 0x48 }, 727 { "RCRSTAT_A", 0x50 }, 728 { "RCRSTAT_B", 0x58 }, 729 { "RCRSTAT_C", 0x60 }, 730 { "RX_DMA_ENT_MSK", 0x68 }, 731 { "RX_DMA_CTL_STAT", 0x70 }, 732 { "RCR_FLSH", 0x78 }, 733 { "RXMISC", 0x90 }, 734 { "RX_DMA_CTL_STAT_DBG", 0x98 }, 735 { 0, -1 } 736 }; 737 738 static dmc_reg_name_t tx_names[] = { 739 { "Tx_RNG_CFIG", 0 }, 740 { "Tx_RNG_HDL", 0x10 }, 741 { "Tx_RNG_KICK", 0x18 }, 742 { "Tx_ENT_MASK", 0x20 }, 743 { "Tx_CS", 0x28 }, 744 { "TxDMA_MBH", 0x30 }, 745 { "TxDMA_MBL", 0x38 }, 746 { "TxDMA_PRE_ST", 0x40 }, 747 { "Tx_RNG_ERR_LOGH", 0x48 }, 748 { "Tx_RNG_ERR_LOGL", 0x50 }, 749 { "TDMC_INTR_DBG", 0x60 }, 750 { "Tx_CS_DBG", 0x68 }, 751 { 0, -1 } 752 }; 753 754 /* 755 * nxge_xx2str 756 * 757 * Translate a register address into a string. 758 * 759 * Arguments: 760 * offset The address of the register to translate. 761 * 762 * Notes: 763 * These are developer-only function. 764 * 765 * Context: 766 * Any domain 767 */ 768 const char * 769 nxge_rx2str( 770 int offset) 771 { 772 dmc_reg_name_t *reg = &rx_names[0]; 773 774 offset &= DMA_CSR_MASK; 775 776 while (reg->name) { 777 if (offset == reg->offset) 778 return (reg->name); 779 reg++; 780 } 781 782 return (0); 783 } 784 785 const char * 786 nxge_tx2str( 787 int offset) 788 { 789 dmc_reg_name_t *reg = &tx_names[0]; 790 791 offset &= DMA_CSR_MASK; 792 793 while (reg->name) { 794 if (offset == reg->offset) 795 return (reg->name); 796 reg++; 797 } 798 799 return (0); 800 } 801 802 /* 803 * nxge_ddi_perror 804 * 805 * Map a DDI error number to a string. 806 * 807 * Arguments: 808 * ddi_error The DDI error number to map. 809 * 810 * Notes: 811 * 812 * Context: 813 * Any domain 814 */ 815 const char * 816 nxge_ddi_perror( 817 int ddi_error) 818 { 819 switch (ddi_error) { 820 case DDI_SUCCESS: 821 return ("DDI_SUCCESS"); 822 case DDI_FAILURE: 823 return ("DDI_FAILURE"); 824 case DDI_NOT_WELL_FORMED: 825 return ("DDI_NOT_WELL_FORMED"); 826 case DDI_EAGAIN: 827 return ("DDI_EAGAIN"); 828 case DDI_EINVAL: 829 return ("DDI_EINVAL"); 830 case DDI_ENOTSUP: 831 return ("DDI_ENOTSUP"); 832 case DDI_EPENDING: 833 return ("DDI_EPENDING"); 834 case DDI_ENOMEM: 835 return ("DDI_ENOMEM"); 836 case DDI_EBUSY: 837 return ("DDI_EBUSY"); 838 case DDI_ETRANSPORT: 839 return ("DDI_ETRANSPORT"); 840 case DDI_ECONTEXT: 841 return ("DDI_ECONTEXT"); 842 default: 843 return ("Unknown error"); 844 } 845 } 846 847 /* 848 * --------------------------------------------------------------------- 849 * These are Sun4v HIO function definitions 850 * --------------------------------------------------------------------- 851 */ 852 853 #if defined(sun4v) 854 855 /* 856 * Local prototypes 857 */ 858 static vr_handle_t nxge_hio_vr_share(nxge_t *); 859 860 static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t); 861 static void nxge_hio_unshare(vr_handle_t); 862 863 static int nxge_hio_addres(vr_handle_t, mac_ring_type_t, int); 864 static void nxge_hio_remres(vr_handle_t, mac_ring_type_t, res_map_t); 865 866 static void nxge_hio_tdc_unshare(nxge_t *nxge, int channel); 867 static void nxge_hio_rdc_unshare(nxge_t *nxge, int channel); 868 static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 869 mac_ring_type_t, int); 870 871 /* 872 * nxge_hio_init 873 * 874 * Initialize the HIO module of the NXGE driver. 875 * 876 * Arguments: 877 * nxge 878 * 879 * Notes: 880 * 881 * Context: 882 * Any domain 883 */ 884 int 885 nxge_hio_init( 886 nxge_t *nxge) 887 { 888 nxge_hio_data_t *nhd; 889 int i, region; 890 891 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 892 if (nhd == 0) { 893 nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 894 MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 895 nxge->nxge_hw_p->hio = (uintptr_t)nhd; 896 } 897 898 if ((nxge->environs == SOLARIS_DOMAIN) && 899 (nxge->niu_type == N2_NIU)) { 900 if (nxge->niu_hsvc_available == B_TRUE) { 901 hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 902 if (niu_hsvc->hsvc_major == 1 && 903 niu_hsvc->hsvc_minor == 1) 904 nxge->environs = SOLARIS_SERVICE_DOMAIN; 905 NXGE_DEBUG_MSG((nxge, HIO_CTL, 906 "nxge_hio_init: hypervisor services " 907 "version %d.%d", 908 niu_hsvc->hsvc_major, niu_hsvc->hsvc_minor)); 909 } 910 } 911 912 if (!isLDOMs(nxge)) { 913 nhd->hio.ldoms = B_FALSE; 914 return (NXGE_OK); 915 } 916 917 nhd->hio.ldoms = B_TRUE; 918 919 /* 920 * Fill in what we can. 921 */ 922 for (region = 0; region < NXGE_VR_SR_MAX; region++) { 923 nhd->vr[region].region = region; 924 } 925 nhd->available.vrs = NXGE_VR_SR_MAX - 2; 926 927 /* 928 * Initialize share and ring group structures. 929 */ 930 for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 931 nxge->rx_hio_groups[i].ghandle = NULL; 932 nxge->rx_hio_groups[i].nxgep = nxge; 933 nxge->rx_hio_groups[i].gindex = 0; 934 nxge->rx_hio_groups[i].sindex = 0; 935 } 936 937 for (i = 0; i < NXGE_VR_SR_MAX; i++) { 938 nxge->shares[i].nxgep = nxge; 939 nxge->shares[i].index = 0; 940 nxge->shares[i].vrp = (void *)NULL; 941 nxge->shares[i].tmap = 0; 942 nxge->shares[i].rmap = 0; 943 nxge->shares[i].rxgroup = 0; 944 nxge->shares[i].active = B_FALSE; 945 } 946 947 /* Fill in the HV HIO function pointers. */ 948 nxge_hio_hv_init(nxge); 949 950 if (isLDOMservice(nxge)) { 951 NXGE_DEBUG_MSG((nxge, HIO_CTL, 952 "Hybrid IO-capable service domain")); 953 return (NXGE_OK); 954 } else { 955 /* 956 * isLDOMguest(nxge) == B_TRUE 957 */ 958 nx_vio_fp_t *vio; 959 nhd->type = NXGE_HIO_TYPE_GUEST; 960 961 vio = &nhd->hio.vio; 962 vio->__register = (vio_net_resource_reg_t) 963 modgetsymvalue("vio_net_resource_reg", 0); 964 vio->unregister = (vio_net_resource_unreg_t) 965 modgetsymvalue("vio_net_resource_unreg", 0); 966 967 if (vio->__register == 0 || vio->unregister == 0) { 968 NXGE_ERROR_MSG((nxge, VIR_CTL, "vio_net is absent!")); 969 return (NXGE_ERROR); 970 } 971 } 972 973 return (0); 974 } 975 976 static int 977 nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 978 { 979 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 980 p_nxge_t nxge = rxgroup->nxgep; 981 int group = rxgroup->gindex; 982 int rv, sindex; 983 nxge_hio_vr_t *vr; /* The Virtualization Region */ 984 985 sindex = nxge->rx_hio_groups[group].sindex; 986 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 987 988 /* 989 * Program the mac address for the group/share. 990 */ 991 if ((rv = nxge_hio_hostinfo_init(nxge, vr, 992 (ether_addr_t *)mac_addr)) != 0) { 993 return (rv); 994 } 995 996 return (0); 997 } 998 999 /* ARGSUSED */ 1000 static int 1001 nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 1002 { 1003 nxge_rx_ring_group_t *rxgroup = (nxge_rx_ring_group_t *)arg; 1004 p_nxge_t nxge = rxgroup->nxgep; 1005 int group = rxgroup->gindex; 1006 int sindex; 1007 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1008 1009 sindex = nxge->rx_hio_groups[group].sindex; 1010 vr = (nxge_hio_vr_t *)nxge->shares[sindex].vrp; 1011 1012 /* 1013 * Remove the mac address for the group/share. 1014 */ 1015 nxge_hio_hostinfo_uninit(nxge, vr); 1016 1017 return (0); 1018 } 1019 1020 /* ARGSUSED */ 1021 void 1022 nxge_hio_group_get(void *arg, mac_ring_type_t type, int group, 1023 mac_group_info_t *infop, mac_group_handle_t ghdl) 1024 { 1025 p_nxge_t nxgep = (p_nxge_t)arg; 1026 nxge_rx_ring_group_t *rxgroup; 1027 1028 switch (type) { 1029 case MAC_RING_TYPE_RX: 1030 rxgroup = &nxgep->rx_hio_groups[group]; 1031 rxgroup->gindex = group; 1032 1033 infop->mrg_driver = (mac_group_driver_t)rxgroup; 1034 infop->mrg_start = NULL; 1035 infop->mrg_stop = NULL; 1036 infop->mrg_addmac = nxge_hio_add_mac; 1037 infop->mrg_remmac = nxge_hio_rem_mac; 1038 infop->mrg_count = NXGE_HIO_SHARE_MAX_CHANNELS; 1039 break; 1040 1041 case MAC_RING_TYPE_TX: 1042 break; 1043 } 1044 } 1045 1046 int 1047 nxge_hio_share_assign( 1048 nxge_t *nxge, 1049 uint64_t cookie, 1050 res_map_t *tmap, 1051 res_map_t *rmap, 1052 nxge_hio_vr_t *vr) 1053 { 1054 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1055 uint64_t slot, hv_rv; 1056 nxge_hio_dc_t *dc; 1057 nxhv_vr_fp_t *fp; 1058 int i; 1059 1060 /* 1061 * Ask the Hypervisor to set up the VR for us 1062 */ 1063 fp = &nhd->hio.vr; 1064 if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1065 NXGE_ERROR_MSG((nxge, HIO_CTL, 1066 "nx_hio_share_assign: " 1067 "vr->assign() returned %d", hv_rv)); 1068 nxge_hio_unshare((vr_handle_t)vr); 1069 return (-EIO); 1070 } 1071 1072 /* 1073 * For each shared TDC, ask the HV to find us an empty slot. 1074 * ----------------------------------------------------- 1075 */ 1076 dc = vr->tx_group.dc; 1077 for (i = 0; i < NXGE_MAX_TDCS; i++) { 1078 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1079 while (dc) { 1080 hv_rv = (*tx->assign) 1081 (vr->cookie, dc->channel, &slot); 1082 if (hv_rv != 0) { 1083 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1084 "nx_hio_share_assign: " 1085 "tx->assign(%x, %d) failed: %ld", 1086 vr->cookie, dc->channel, hv_rv)); 1087 return (-EIO); 1088 } 1089 1090 dc->cookie = vr->cookie; 1091 dc->page = (vp_channel_t)slot; 1092 1093 /* Inform the caller about the slot chosen. */ 1094 (*tmap) |= 1 << slot; 1095 1096 dc = dc->next; 1097 } 1098 } 1099 1100 /* 1101 * For each shared RDC, ask the HV to find us an empty slot. 1102 * ----------------------------------------------------- 1103 */ 1104 dc = vr->rx_group.dc; 1105 for (i = 0; i < NXGE_MAX_RDCS; i++) { 1106 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1107 while (dc) { 1108 hv_rv = (*rx->assign) 1109 (vr->cookie, dc->channel, &slot); 1110 if (hv_rv != 0) { 1111 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1112 "nx_hio_share_assign: " 1113 "rx->assign(%x, %d) failed: %ld", 1114 vr->cookie, dc->channel, hv_rv)); 1115 return (-EIO); 1116 } 1117 1118 dc->cookie = vr->cookie; 1119 dc->page = (vp_channel_t)slot; 1120 1121 /* Inform the caller about the slot chosen. */ 1122 (*rmap) |= 1 << slot; 1123 1124 dc = dc->next; 1125 } 1126 } 1127 1128 return (0); 1129 } 1130 1131 int 1132 nxge_hio_share_unassign( 1133 nxge_hio_vr_t *vr) 1134 { 1135 nxge_t *nxge = (nxge_t *)vr->nxge; 1136 nxge_hio_data_t *nhd; 1137 nxge_hio_dc_t *dc; 1138 nxhv_vr_fp_t *fp; 1139 uint64_t hv_rv; 1140 1141 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1142 1143 dc = vr->tx_group.dc; 1144 while (dc) { 1145 nxhv_dc_fp_t *tx = &nhd->hio.tx; 1146 hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1147 if (hv_rv != 0) { 1148 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1149 "nx_hio_dc_unshare: " 1150 "tx->unassign(%x, %d) failed: %ld", 1151 vr->cookie, dc->page, hv_rv)); 1152 } 1153 dc = dc->next; 1154 } 1155 1156 dc = vr->rx_group.dc; 1157 while (dc) { 1158 nxhv_dc_fp_t *rx = &nhd->hio.rx; 1159 hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1160 if (hv_rv != 0) { 1161 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1162 "nx_hio_dc_unshare: " 1163 "rx->unassign(%x, %d) failed: %ld", 1164 vr->cookie, dc->page, hv_rv)); 1165 } 1166 dc = dc->next; 1167 } 1168 1169 fp = &nhd->hio.vr; 1170 if (fp->unassign) { 1171 hv_rv = (*fp->unassign)(vr->cookie); 1172 if (hv_rv != 0) { 1173 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1174 "vr->assign(%x) failed: %ld", 1175 vr->cookie, hv_rv)); 1176 } 1177 } 1178 1179 return (0); 1180 } 1181 1182 int 1183 nxge_hio_share_alloc(void *arg, uint64_t cookie, uint64_t *rcookie, 1184 mac_share_handle_t *shandle) 1185 { 1186 p_nxge_t nxge = (p_nxge_t)arg; 1187 nxge_rx_ring_group_t *rxgroup; 1188 nxge_share_handle_t *shp; 1189 1190 vr_handle_t shared; /* The VR being shared */ 1191 nxge_hio_vr_t *vr; /* The Virtualization Region */ 1192 uint64_t rmap, tmap; 1193 int rv; 1194 1195 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1196 1197 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1198 1199 if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1200 nhd->hio.rx.assign == 0) { 1201 NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1202 return (EIO); 1203 } 1204 1205 /* 1206 * Get a VR. 1207 */ 1208 if ((shared = nxge_hio_vr_share(nxge)) == 0) 1209 return (EAGAIN); 1210 vr = (nxge_hio_vr_t *)shared; 1211 1212 /* 1213 * Get an RDC group for us to use. 1214 */ 1215 if ((vr->rdc_tbl = nxge_hio_hostinfo_get_rdc_table(nxge)) < 0) { 1216 nxge_hio_unshare(shared); 1217 return (EBUSY); 1218 } 1219 1220 /* 1221 * Add resources to the share. 1222 */ 1223 tmap = 0; 1224 rv = nxge_hio_addres(shared, MAC_RING_TYPE_TX, 1225 NXGE_HIO_SHARE_MAX_CHANNELS); 1226 if (rv != 0) { 1227 nxge_hio_unshare(shared); 1228 return (rv); 1229 } 1230 1231 rmap = 0; 1232 rv = nxge_hio_addres(shared, MAC_RING_TYPE_RX, 1233 NXGE_HIO_SHARE_MAX_CHANNELS); 1234 if (rv != 0) { 1235 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1236 nxge_hio_unshare(shared); 1237 return (rv); 1238 } 1239 1240 if ((rv = nxge_hio_share_assign(nxge, cookie, &tmap, &rmap, vr))) { 1241 nxge_hio_remres(shared, MAC_RING_TYPE_RX, tmap); 1242 nxge_hio_remres(shared, MAC_RING_TYPE_TX, tmap); 1243 nxge_hio_unshare(shared); 1244 return (rv); 1245 } 1246 1247 rxgroup = &nxge->rx_hio_groups[vr->rdc_tbl]; 1248 rxgroup->gindex = vr->rdc_tbl; 1249 rxgroup->sindex = vr->region; 1250 1251 shp = &nxge->shares[vr->region]; 1252 shp->index = vr->region; 1253 shp->vrp = (void *)vr; 1254 shp->tmap = tmap; 1255 shp->rmap = rmap; 1256 shp->rxgroup = vr->rdc_tbl; 1257 shp->active = B_TRUE; 1258 1259 /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1260 *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1261 1262 *shandle = (mac_share_handle_t)shp; 1263 1264 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1265 return (0); 1266 } 1267 1268 void 1269 nxge_hio_share_free(mac_share_handle_t shandle) 1270 { 1271 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1272 1273 /* 1274 * First, unassign the VR (take it back), 1275 * so we can enable interrupts again. 1276 */ 1277 (void) nxge_hio_share_unassign(shp->vrp); 1278 1279 /* 1280 * Free Ring Resources for TX and RX 1281 */ 1282 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 1283 nxge_hio_remres((vr_handle_t)shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1284 1285 /* 1286 * Free VR resource. 1287 */ 1288 nxge_hio_unshare((vr_handle_t)shp->vrp); 1289 1290 /* 1291 * Clear internal handle state. 1292 */ 1293 shp->index = 0; 1294 shp->vrp = (void *)NULL; 1295 shp->tmap = 0; 1296 shp->rmap = 0; 1297 shp->rxgroup = 0; 1298 shp->active = B_FALSE; 1299 } 1300 1301 void 1302 nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1303 uint32_t *rmin, uint32_t *rmax, uint64_t *rmap, uint64_t *gnum) 1304 { 1305 nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1306 1307 switch (type) { 1308 case MAC_RING_TYPE_RX: 1309 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1310 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1311 *rmap = shp->rmap; 1312 *gnum = shp->rxgroup; 1313 break; 1314 1315 case MAC_RING_TYPE_TX: 1316 *rmin = NXGE_HIO_SHARE_MIN_CHANNELS; 1317 *rmax = NXGE_HIO_SHARE_MAX_CHANNELS; 1318 *rmap = shp->tmap; 1319 *gnum = 0; 1320 break; 1321 } 1322 } 1323 1324 /* 1325 * nxge_hio_vr_share 1326 * 1327 * Find an unused Virtualization Region (VR). 1328 * 1329 * Arguments: 1330 * nxge 1331 * 1332 * Notes: 1333 * 1334 * Context: 1335 * Service domain 1336 */ 1337 vr_handle_t 1338 nxge_hio_vr_share( 1339 nxge_t *nxge) 1340 { 1341 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1342 nxge_hio_vr_t *vr; 1343 1344 int first, limit, region; 1345 1346 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1347 1348 MUTEX_ENTER(&nhd->lock); 1349 1350 if (nhd->available.vrs == 0) { 1351 MUTEX_EXIT(&nhd->lock); 1352 return (0); 1353 } 1354 1355 /* Find an empty virtual region (VR). */ 1356 if (nxge->function_num == 0) { 1357 // FUNC0_VIR0 'belongs' to NIU port 0. 1358 first = FUNC0_VIR1; 1359 limit = FUNC2_VIR0; 1360 } else if (nxge->function_num == 1) { 1361 // FUNC2_VIR0 'belongs' to NIU port 1. 1362 first = FUNC2_VIR1; 1363 limit = FUNC_VIR_MAX; 1364 } else { 1365 cmn_err(CE_WARN, 1366 "Shares not supported on function(%d) at this time.\n", 1367 nxge->function_num); 1368 } 1369 1370 for (region = first; region < limit; region++) { 1371 if (nhd->vr[region].nxge == 0) 1372 break; 1373 } 1374 1375 if (region == limit) { 1376 MUTEX_EXIT(&nhd->lock); 1377 return (0); 1378 } 1379 1380 vr = &nhd->vr[region]; 1381 vr->nxge = (uintptr_t)nxge; 1382 vr->region = (uintptr_t)region; 1383 1384 nhd->available.vrs--; 1385 1386 MUTEX_EXIT(&nhd->lock); 1387 1388 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1389 1390 return ((vr_handle_t)vr); 1391 } 1392 1393 void 1394 nxge_hio_unshare( 1395 vr_handle_t shared) 1396 { 1397 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1398 nxge_t *nxge = (nxge_t *)vr->nxge; 1399 nxge_hio_data_t *nhd; 1400 1401 vr_region_t region; 1402 1403 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1404 1405 if (!nxge) { 1406 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_unshare: " 1407 "vr->nxge is NULL")); 1408 return; 1409 } 1410 1411 /* 1412 * This function is no longer called, but I will keep it 1413 * here in case we want to revisit this topic in the future. 1414 * 1415 * nxge_hio_hostinfo_uninit(nxge, vr); 1416 */ 1417 (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1418 1419 nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1420 1421 MUTEX_ENTER(&nhd->lock); 1422 1423 region = vr->region; 1424 (void) memset(vr, 0, sizeof (*vr)); 1425 vr->region = region; 1426 1427 nhd->available.vrs++; 1428 1429 MUTEX_EXIT(&nhd->lock); 1430 1431 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1432 } 1433 1434 int 1435 nxge_hio_addres( 1436 vr_handle_t shared, 1437 mac_ring_type_t type, 1438 int count) 1439 { 1440 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1441 nxge_t *nxge = (nxge_t *)vr->nxge; 1442 int i; 1443 1444 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1445 1446 if (!nxge) 1447 return (EINVAL); 1448 1449 for (i = 0; i < count; i++) { 1450 int rv; 1451 if ((rv = nxge_hio_dc_share(nxge, vr, type)) < 0) { 1452 if (i == 0) /* Couldn't get even one DC. */ 1453 return (-rv); 1454 else 1455 break; 1456 } 1457 } 1458 1459 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 1460 1461 return (0); 1462 } 1463 1464 /* ARGSUSED */ 1465 void 1466 nxge_hio_remres( 1467 vr_handle_t shared, 1468 mac_ring_type_t type, 1469 res_map_t res_map) 1470 { 1471 nxge_hio_vr_t *vr = (nxge_hio_vr_t *)shared; 1472 nxge_t *nxge = (nxge_t *)vr->nxge; 1473 nxge_grp_t *group; 1474 1475 if (!nxge) { 1476 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_remres: " 1477 "vr->nxge is NULL")); 1478 return; 1479 } 1480 1481 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 1482 1483 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1484 while (group->dc) { 1485 nxge_hio_dc_t *dc = group->dc; 1486 NXGE_DC_RESET(res_map, dc->page); 1487 nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 1488 } 1489 1490 if (res_map) { 1491 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 1492 "res_map %lx", res_map)); 1493 } 1494 1495 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 1496 } 1497 1498 /* 1499 * nxge_hio_tdc_share 1500 * 1501 * Share an unused TDC channel. 1502 * 1503 * Arguments: 1504 * nxge 1505 * 1506 * Notes: 1507 * 1508 * A.7.3 Reconfigure Tx DMA channel 1509 * Disable TxDMA A.9.6.10 1510 * [Rebind TxDMA channel to Port A.9.6.7] 1511 * 1512 * We don't have to Rebind the TDC to the port - it always already bound. 1513 * 1514 * Soft Reset TxDMA A.9.6.2 1515 * 1516 * This procedure will be executed by nxge_init_txdma_channel() in the 1517 * guest domain: 1518 * 1519 * Re-initialize TxDMA A.9.6.8 1520 * Reconfigure TxDMA 1521 * Enable TxDMA A.9.6.9 1522 * 1523 * Context: 1524 * Service domain 1525 */ 1526 int 1527 nxge_hio_tdc_share( 1528 nxge_t *nxge, 1529 int channel) 1530 { 1531 nxge_grp_set_t *set = &nxge->tx_set; 1532 tx_ring_t *ring; 1533 int count; 1534 1535 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 1536 1537 /* 1538 * Wait until this channel is idle. 1539 */ 1540 ring = nxge->tx_rings->rings[channel]; 1541 1542 (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 1543 if (ring->tx_ring_busy) { 1544 /* 1545 * Wait for 30 seconds. 1546 */ 1547 for (count = 30 * 1000; count; count--) { 1548 if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 1549 break; 1550 } 1551 1552 drv_usecwait(1000); 1553 } 1554 1555 if (count == 0) { 1556 (void) atomic_swap_32(&ring->tx_ring_offline, 1557 NXGE_TX_RING_ONLINE); 1558 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1559 "Tx ring %d was always BUSY", channel)); 1560 return (-EIO); 1561 } 1562 } else { 1563 (void) atomic_swap_32(&ring->tx_ring_offline, 1564 NXGE_TX_RING_OFFLINED); 1565 } 1566 1567 if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1568 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_tdc_share: " 1569 "Failed to remove interrupt for TxDMA channel %d", 1570 channel)); 1571 return (NXGE_ERROR); 1572 } 1573 1574 /* Disable TxDMA A.9.6.10 */ 1575 (void) nxge_txdma_channel_disable(nxge, channel); 1576 1577 /* The SD is sharing this channel. */ 1578 NXGE_DC_SET(set->shared.map, channel); 1579 set->shared.count++; 1580 1581 /* Soft Reset TxDMA A.9.6.2 */ 1582 nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 1583 1584 /* 1585 * Initialize the DC-specific FZC control registers. 1586 * ----------------------------------------------------- 1587 */ 1588 if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 1589 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1590 "nx_hio_dc_share: FZC TDC failed: %d", channel)); 1591 return (-EIO); 1592 } 1593 1594 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 1595 1596 return (0); 1597 } 1598 1599 /* 1600 * nxge_hio_rdc_share 1601 * 1602 * Share an unused RDC channel. 1603 * 1604 * Arguments: 1605 * nxge 1606 * 1607 * Notes: 1608 * 1609 * This is the latest version of the procedure to 1610 * Reconfigure an Rx DMA channel: 1611 * 1612 * A.6.3 Reconfigure Rx DMA channel 1613 * Stop RxMAC A.9.2.6 1614 * Drain IPP Port A.9.3.6 1615 * Stop and reset RxDMA A.9.5.3 1616 * 1617 * This procedure will be executed by nxge_init_rxdma_channel() in the 1618 * guest domain: 1619 * 1620 * Initialize RxDMA A.9.5.4 1621 * Reconfigure RxDMA 1622 * Enable RxDMA A.9.5.5 1623 * 1624 * We will do this here, since the RDC is a canalis non grata: 1625 * Enable RxMAC A.9.2.10 1626 * 1627 * Context: 1628 * Service domain 1629 */ 1630 int 1631 nxge_hio_rdc_share( 1632 nxge_t *nxge, 1633 nxge_hio_vr_t *vr, 1634 int channel) 1635 { 1636 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1637 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1638 nxge_grp_set_t *set = &nxge->rx_set; 1639 nxge_rdc_grp_t *rdc_grp; 1640 1641 int current, last; 1642 1643 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 1644 1645 /* Disable interrupts. */ 1646 if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 1647 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nx_hio_rdc_share: " 1648 "Failed to remove interrupt for RxDMA channel %d", 1649 channel)); 1650 return (NXGE_ERROR); 1651 } 1652 1653 /* Stop RxMAC = A.9.2.6 */ 1654 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1655 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1656 "Failed to disable RxMAC")); 1657 } 1658 1659 /* Drain IPP Port = A.9.3.6 */ 1660 (void) nxge_ipp_drain(nxge); 1661 1662 /* Stop and reset RxDMA = A.9.5.3 */ 1663 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1664 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1665 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 1666 "Failed to disable RxDMA channel %d", channel)); 1667 } 1668 1669 /* The SD is sharing this channel. */ 1670 NXGE_DC_SET(set->shared.map, channel); 1671 set->shared.count++; 1672 1673 // Assert RST: RXDMA_CFIG1[30] = 1 1674 nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 1675 1676 /* 1677 * We have to reconfigure the RDC table(s) 1678 * to which this channel belongs. 1679 */ 1680 current = hardware->def_mac_rxdma_grpid; 1681 last = current + hardware->max_rdc_grpids; 1682 for (; current < last; current++) { 1683 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1684 rdc_grp = &nxge->pt_config.rdc_grps[current]; 1685 rdc_grp->map = set->owned.map; 1686 rdc_grp->max_rdcs--; 1687 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1688 } 1689 } 1690 1691 /* 1692 * The guest domain will reconfigure the RDC later. 1693 * 1694 * But in the meantime, we must re-enable the Rx MAC so 1695 * that we can start receiving packets again on the 1696 * remaining RDCs: 1697 * 1698 * Enable RxMAC = A.9.2.10 1699 */ 1700 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1701 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1702 "nx_hio_rdc_share: Rx MAC still disabled")); 1703 } 1704 1705 /* 1706 * Initialize the DC-specific FZC control registers. 1707 * ----------------------------------------------------- 1708 */ 1709 if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 1710 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1711 "nx_hio_rdc_share: RZC RDC failed: %ld", channel)); 1712 return (-EIO); 1713 } 1714 1715 /* 1716 * We have to initialize the guest's RDC table, too. 1717 * ----------------------------------------------------- 1718 */ 1719 rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 1720 if (rdc_grp->max_rdcs == 0) { 1721 rdc_grp->start_rdc = (uint8_t)channel; 1722 rdc_grp->def_rdc = (uint8_t)channel; 1723 rdc_grp->max_rdcs = 1; 1724 } else { 1725 rdc_grp->max_rdcs++; 1726 } 1727 NXGE_DC_SET(rdc_grp->map, channel); 1728 1729 if (nxge_init_fzc_rdc_tbl(nxge, vr->rdc_tbl) != NXGE_OK) { 1730 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1731 "nx_hio_rdc_share: nxge_init_fzc_rdc_tbl failed")); 1732 return (-EIO); 1733 } 1734 1735 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 1736 1737 return (0); 1738 } 1739 1740 /* 1741 * nxge_hio_dc_share 1742 * 1743 * Share a DMA channel with a guest domain. 1744 * 1745 * Arguments: 1746 * nxge 1747 * vr The VR that <channel> will belong to. 1748 * type Tx or Rx. 1749 * res_map The resource map used by the caller, which we will 1750 * update if successful. 1751 * 1752 * Notes: 1753 * 1754 * Context: 1755 * Service domain 1756 */ 1757 int 1758 nxge_hio_dc_share( 1759 nxge_t *nxge, 1760 nxge_hio_vr_t *vr, 1761 mac_ring_type_t type) 1762 { 1763 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1764 nxge_hw_pt_cfg_t *hardware; 1765 nxge_hio_dc_t *dc; 1766 int channel, limit; 1767 1768 nxge_grp_set_t *set; 1769 nxge_grp_t *group; 1770 1771 int slot; 1772 1773 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 1774 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 1775 1776 /* 1777 * In version 1.0, we may only give a VR 2 RDCs or TDCs. 1778 * Not only that, but the HV has statically assigned the 1779 * channels like so: 1780 * VR0: RDC0 & RDC1 1781 * VR1: RDC2 & RDC3, etc. 1782 * The TDCs are assigned in exactly the same way. 1783 * 1784 * So, for example 1785 * hardware->start_rdc + vr->region * 2; 1786 * VR1: hardware->start_rdc + 1 * 2; 1787 * VR3: hardware->start_rdc + 3 * 2; 1788 * If start_rdc is 0, we end up with 2 or 6. 1789 * If start_rdc is 8, we end up with 10 or 14. 1790 */ 1791 1792 set = (type == MAC_RING_TYPE_TX ? &nxge->tx_set : &nxge->rx_set); 1793 hardware = &nxge->pt_config.hw_config; 1794 1795 // This code is still NIU-specific (assuming only 2 ports) 1796 channel = hardware->start_rdc + (vr->region % 4) * 2; 1797 limit = channel + 2; 1798 1799 MUTEX_ENTER(&nhd->lock); 1800 for (; channel < limit; channel++) { 1801 if ((1 << channel) & set->owned.map) { 1802 break; 1803 } 1804 } 1805 1806 if (channel == limit) { 1807 MUTEX_EXIT(&nhd->lock); 1808 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1809 "nx_hio_dc_share: there are no channels to share")); 1810 return (-EIO); 1811 } 1812 1813 MUTEX_EXIT(&nhd->lock); 1814 1815 /* -------------------------------------------------- */ 1816 slot = (type == MAC_RING_TYPE_TX) ? 1817 nxge_hio_tdc_share(nxge, channel) : 1818 nxge_hio_rdc_share(nxge, vr, channel); 1819 1820 if (slot < 0) { 1821 if (type == MAC_RING_TYPE_RX) { 1822 nxge_hio_rdc_unshare(nxge, channel); 1823 } else { 1824 nxge_hio_tdc_unshare(nxge, channel); 1825 } 1826 return (slot); 1827 } 1828 1829 MUTEX_ENTER(&nhd->lock); 1830 1831 /* 1832 * Tag this channel. 1833 * -------------------------------------------------- 1834 */ 1835 dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 1836 1837 dc->vr = vr; 1838 dc->channel = (nxge_channel_t)channel; 1839 1840 MUTEX_EXIT(&nhd->lock); 1841 1842 /* 1843 * vr->[t|r]x_group is used by the service domain to 1844 * keep track of its shared DMA channels. 1845 */ 1846 MUTEX_ENTER(&nxge->group_lock); 1847 group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 1848 1849 dc->group = (vr_handle_t)group; 1850 1851 /* Initialize <group>, if necessary */ 1852 if (group->count == 0) { 1853 group->nxge = nxge; 1854 group->type = (type == MAC_RING_TYPE_TX) ? 1855 VP_BOUND_TX : VP_BOUND_RX; 1856 group->sequence = nhd->sequence++; 1857 group->active = B_TRUE; 1858 } 1859 1860 MUTEX_EXIT(&nxge->group_lock); 1861 1862 NXGE_ERROR_MSG((nxge, HIO_CTL, 1863 "DC share: %cDC %d was assigned to slot %d", 1864 type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 1865 1866 nxge_grp_dc_append(nxge, group, dc); 1867 1868 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 1869 1870 return (0); 1871 } 1872 1873 /* 1874 * nxge_hio_tdc_unshare 1875 * 1876 * Unshare a TDC. 1877 * 1878 * Arguments: 1879 * nxge 1880 * channel The channel to unshare (add again). 1881 * 1882 * Notes: 1883 * 1884 * Context: 1885 * Service domain 1886 */ 1887 void 1888 nxge_hio_tdc_unshare( 1889 nxge_t *nxge, 1890 int channel) 1891 { 1892 nxge_grp_set_t *set = &nxge->tx_set; 1893 vr_handle_t handle = (vr_handle_t)set->group[0]; 1894 1895 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 1896 1897 NXGE_DC_RESET(set->shared.map, channel); 1898 set->shared.count--; 1899 1900 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_TX, channel))) { 1901 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1902 "Failed to initialize TxDMA channel %d", channel)); 1903 return; 1904 } 1905 1906 /* Re-add this interrupt. */ 1907 if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 1908 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 1909 "Failed to add interrupt for TxDMA channel %d", channel)); 1910 } 1911 1912 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 1913 } 1914 1915 /* 1916 * nxge_hio_rdc_unshare 1917 * 1918 * Unshare an RDC: add it to the SD's RDC groups (tables). 1919 * 1920 * Arguments: 1921 * nxge 1922 * channel The channel to unshare (add again). 1923 * 1924 * Notes: 1925 * 1926 * Context: 1927 * Service domain 1928 */ 1929 void 1930 nxge_hio_rdc_unshare( 1931 nxge_t *nxge, 1932 int channel) 1933 { 1934 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1935 nxge_hw_pt_cfg_t *hardware = &nxge->pt_config.hw_config; 1936 1937 nxge_grp_set_t *set = &nxge->rx_set; 1938 vr_handle_t handle = (vr_handle_t)set->group[0]; 1939 int current, last; 1940 1941 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 1942 1943 /* Stop RxMAC = A.9.2.6 */ 1944 if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 1945 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1946 "Failed to disable RxMAC")); 1947 } 1948 1949 /* Drain IPP Port = A.9.3.6 */ 1950 (void) nxge_ipp_drain(nxge); 1951 1952 /* Stop and reset RxDMA = A.9.5.3 */ 1953 // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 1954 if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 1955 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1956 "Failed to disable RxDMA channel %d", channel)); 1957 } 1958 1959 NXGE_DC_RESET(set->shared.map, channel); 1960 set->shared.count--; 1961 1962 /* 1963 * Assert RST: RXDMA_CFIG1[30] = 1 1964 * 1965 * Initialize RxDMA A.9.5.4 1966 * Reconfigure RxDMA 1967 * Enable RxDMA A.9.5.5 1968 */ 1969 if ((nxge_grp_dc_add(nxge, handle, VP_BOUND_RX, channel))) { 1970 /* Be sure to re-enable the RX MAC. */ 1971 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 1972 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1973 "nx_hio_rdc_share: Rx MAC still disabled")); 1974 } 1975 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 1976 "Failed to initialize RxDMA channel %d", channel)); 1977 return; 1978 } 1979 1980 /* 1981 * We have to reconfigure the RDC table(s) 1982 * to which this channel once again belongs. 1983 */ 1984 current = hardware->def_mac_rxdma_grpid; 1985 last = current + hardware->max_rdc_grpids; 1986 for (; current < last; current++) { 1987 if (nhd->rdc_tbl[current].nxge == (uintptr_t)nxge) { 1988 nxge_rdc_grp_t *group; 1989 group = &nxge->pt_config.rdc_grps[current]; 1990 group->map = set->owned.map; 1991 group->max_rdcs++; 1992 (void) nxge_init_fzc_rdc_tbl(nxge, current); 1993 } 1994 } 1995 1996 /* 1997 * Enable RxMAC = A.9.2.10 1998 */ 1999 if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2000 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2001 "nx_hio_rdc_share: Rx MAC still disabled")); 2002 return; 2003 } 2004 2005 /* Re-add this interrupt. */ 2006 if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2007 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2008 "nx_hio_rdc_unshare: Failed to add interrupt for " 2009 "RxDMA CHANNEL %d", channel)); 2010 } 2011 2012 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 2013 } 2014 2015 /* 2016 * nxge_hio_dc_unshare 2017 * 2018 * Unshare (reuse) a DMA channel. 2019 * 2020 * Arguments: 2021 * nxge 2022 * vr The VR that <channel> belongs to. 2023 * type Tx or Rx. 2024 * channel The DMA channel to reuse. 2025 * 2026 * Notes: 2027 * 2028 * Context: 2029 * Service domain 2030 */ 2031 void 2032 nxge_hio_dc_unshare( 2033 nxge_t *nxge, 2034 nxge_hio_vr_t *vr, 2035 mac_ring_type_t type, 2036 int channel) 2037 { 2038 nxge_grp_t *group; 2039 nxge_hio_dc_t *dc; 2040 2041 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2042 type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2043 2044 /* Unlink the channel from its group. */ 2045 /* -------------------------------------------------- */ 2046 group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 2047 NXGE_DC_RESET(group->map, channel); 2048 if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2049 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2050 "nx_hio_dc_unshare(%d) failed", channel)); 2051 return; 2052 } 2053 2054 dc->vr = 0; 2055 dc->cookie = 0; 2056 2057 if (type == MAC_RING_TYPE_RX) { 2058 nxge_hio_rdc_unshare(nxge, channel); 2059 } else { 2060 nxge_hio_tdc_unshare(nxge, channel); 2061 } 2062 2063 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2064 } 2065 2066 #endif /* if defined(sun4v) */ 2067