1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * nxge_hio_guest.c 29 * 30 * This file manages the virtualization resources for a guest domain. 31 * 32 */ 33 34 #include <sys/nxge/nxge_impl.h> 35 #include <sys/nxge/nxge_fzc.h> 36 #include <sys/nxge/nxge_rxdma.h> 37 #include <sys/nxge/nxge_txdma.h> 38 39 #include <sys/nxge/nxge_hio.h> 40 41 /* 42 * nxge_hio_unregister 43 * 44 * Unregister with the VNET module. 45 * 46 * Arguments: 47 * nxge 48 * 49 * Notes: 50 * We must uninitialize all DMA channels associated with the VR, too. 51 * 52 * We're assuming that the channels will be disabled & unassigned 53 * in the service domain, after we're done here. 54 * 55 * Context: 56 * Guest domain 57 */ 58 void 59 nxge_hio_unregister( 60 nxge_t *nxge) 61 { 62 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 63 64 if (nhd == 0) { 65 return; 66 } 67 68 #if defined(sun4v) 69 /* Unregister with vNet. */ 70 if (nhd->hio.vio.unregister) { 71 if (nxge->hio_vr) 72 (*nhd->hio.vio.unregister)(nxge->hio_vr->vhp); 73 } 74 #endif 75 } 76 77 /* 78 * nxge_guest_regs_map 79 * 80 * Map in a guest domain's register set(s). 81 * 82 * Arguments: 83 * nxge 84 * 85 * Notes: 86 * Note that we set <is_vraddr> to TRUE. 87 * 88 * Context: 89 * Guest domain 90 */ 91 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = { 92 DDI_DEVICE_ATTR_V0, 93 DDI_STRUCTURE_LE_ACC, 94 DDI_STRICTORDER_ACC, 95 }; 96 97 int 98 nxge_guest_regs_map( 99 nxge_t *nxge) 100 { 101 dev_regs_t *regs; 102 off_t regsize; 103 int rv; 104 105 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map")); 106 107 /* So we can allocate properly-aligned memory. */ 108 nxge->niu_type = N2_NIU; /* Version 1.0 only */ 109 nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */ 110 111 nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 112 regs = nxge->dev_regs; 113 114 if ((rv = ddi_dev_regsize(nxge->dip, 0, ®size)) != DDI_SUCCESS) { 115 NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed")); 116 return (NXGE_ERROR); 117 } 118 119 rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)®s->nxge_regp, 0, 0, 120 &nxge_guest_register_access_attributes, ®s->nxge_regh); 121 122 if (rv != DDI_SUCCESS) { 123 NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed")); 124 return (NXGE_ERROR); 125 } 126 127 nxge->npi_handle.regh = regs->nxge_regh; 128 nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 129 nxge->npi_handle.is_vraddr = B_TRUE; 130 nxge->npi_handle.function.instance = nxge->instance; 131 nxge->npi_handle.function.function = nxge->function_num; 132 nxge->npi_handle.nxgep = (void *)nxge; 133 134 /* NPI_REG_ADD_HANDLE_SET() */ 135 nxge->npi_reg_handle.regh = regs->nxge_regh; 136 nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 137 nxge->npi_reg_handle.is_vraddr = B_TRUE; 138 nxge->npi_reg_handle.function.instance = nxge->instance; 139 nxge->npi_reg_handle.function.function = nxge->function_num; 140 nxge->npi_reg_handle.nxgep = (void *)nxge; 141 142 /* NPI_VREG_ADD_HANDLE_SET() */ 143 nxge->npi_vreg_handle.regh = regs->nxge_regh; 144 nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 145 nxge->npi_vreg_handle.is_vraddr = B_TRUE; 146 nxge->npi_vreg_handle.function.instance = nxge->instance; 147 nxge->npi_vreg_handle.function.function = nxge->function_num; 148 nxge->npi_vreg_handle.nxgep = (void *)nxge; 149 150 regs->nxge_vir_regp = regs->nxge_regp; 151 regs->nxge_vir_regh = regs->nxge_regh; 152 153 /* 154 * We do NOT set the PCI, MSI-X, 2nd Virtualization, 155 * or FCODE reg variables. 156 */ 157 158 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map")); 159 160 return (NXGE_OK); 161 } 162 163 void 164 nxge_guest_regs_map_free( 165 nxge_t *nxge) 166 { 167 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free")); 168 169 if (nxge->dev_regs) { 170 if (nxge->dev_regs->nxge_regh) { 171 NXGE_DEBUG_MSG((nxge, DDI_CTL, 172 "==> nxge_unmap_regs: device registers")); 173 ddi_regs_map_free(&nxge->dev_regs->nxge_regh); 174 nxge->dev_regs->nxge_regh = NULL; 175 } 176 kmem_free(nxge->dev_regs, sizeof (dev_regs_t)); 177 nxge->dev_regs = 0; 178 } 179 180 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free")); 181 } 182 183 #if defined(sun4v) 184 185 /* 186 * ------------------------------------------------------------- 187 * Local prototypes 188 * ------------------------------------------------------------- 189 */ 190 static nxge_hio_dc_t *nxge_guest_dc_alloc( 191 nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t); 192 193 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t); 194 static void nxge_check_guest_state(nxge_hio_vr_t *); 195 196 /* 197 * nxge_hio_vr_add 198 * 199 * If we have been given a virtualization region (VR), 200 * then initialize it. 201 * 202 * Arguments: 203 * nxge 204 * 205 * Notes: 206 * 207 * Context: 208 * Guest domain 209 */ 210 /* ARGSUSED */ 211 int 212 nxge_hio_vr_add(nxge_t *nxge) 213 { 214 extern mac_callbacks_t nxge_m_callbacks; 215 216 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 217 nxge_hio_vr_t *vr; 218 nxge_hio_dc_t *dc; 219 220 int *reg_val; 221 uint_t reg_len; 222 uint8_t vr_index; 223 224 nxhv_vr_fp_t *fp; 225 uint64_t vr_address, vr_size; 226 uint32_t cookie; 227 228 nxhv_dc_fp_t *tx, *rx; 229 uint64_t tx_map, rx_map; 230 231 uint64_t hv_rv; 232 233 /* Variables needed to register with vnet. */ 234 mac_register_t *mac_info; 235 ether_addr_t mac_addr; 236 nx_vio_fp_t *vio; 237 238 int i; 239 240 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add")); 241 242 /* 243 * Get our HV cookie. 244 */ 245 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip, 246 0, "reg", ®_val, ®_len) != DDI_PROP_SUCCESS) { 247 NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found")); 248 return (NXGE_ERROR); 249 } 250 251 cookie = (uint32_t)(reg_val[0]); 252 ddi_prop_free(reg_val); 253 254 fp = &nhd->hio.vr; 255 hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size); 256 if (hv_rv != 0) { 257 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 258 "vr->getinfo() failed")); 259 return (NXGE_ERROR); 260 } 261 262 /* 263 * In the guest domain, we can use any VR data structure 264 * we want, because we're not supposed to know which VR 265 * the service domain has allocated to us. 266 * 267 * In the current version, the least significant nybble of 268 * the cookie is the VR region, but that could change 269 * very easily. 270 * 271 * In the future, a guest may have more than one VR allocated 272 * to it, which is why we go through this exercise. 273 */ 274 MUTEX_ENTER(&nhd->lock); 275 for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) { 276 if (nhd->vr[vr_index].nxge == 0) { 277 nhd->vr[vr_index].nxge = (uintptr_t)nxge; 278 break; 279 } 280 } 281 MUTEX_EXIT(&nhd->lock); 282 283 if (vr_index == FUNC_VIR_MAX) { 284 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add " 285 "no VRs available")); 286 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 287 "nxge_hio_vr_add(%d): cookie(0x%x)\n", 288 nxge->instance, cookie)); 289 return (NXGE_ERROR); 290 } 291 292 vr = &nhd->vr[vr_index]; 293 294 vr->nxge = (uintptr_t)nxge; 295 vr->cookie = (uint32_t)cookie; 296 vr->address = vr_address; 297 vr->size = vr_size; 298 vr->region = vr_index; 299 300 /* 301 * This is redundant data, but useful nonetheless. It helps 302 * us to keep track of which RDCs & TDCs belong to us. 303 */ 304 if (nxge->tx_set.lg.count == 0) 305 (void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP); 306 if (nxge->rx_set.lg.count == 0) 307 (void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP); 308 309 /* 310 * See nxge_intr.c. 311 */ 312 if (nxge_hio_intr_init(nxge) != NXGE_OK) { 313 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 314 "nxge_hio_intr_init() failed")); 315 return (NXGE_ERROR); 316 } 317 318 /* 319 * Now we find out which RDCs & TDCs have been allocated to us. 320 */ 321 tx = &nhd->hio.tx; 322 if (tx->get_map) { 323 /* 324 * The map we get back is a bitmap of the 325 * virtual Tx DMA channels we own - 326 * they are NOT real channel numbers. 327 */ 328 hv_rv = (*tx->get_map)(vr->cookie, &tx_map); 329 if (hv_rv != 0) { 330 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 331 "tx->get_map() failed")); 332 return (NXGE_ERROR); 333 } 334 res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map); 335 336 /* 337 * For each channel, mark these two fields 338 * while we have the VR data structure. 339 */ 340 for (i = 0; i < VP_CHANNEL_MAX; i++) { 341 if ((1 << i) & tx_map) { 342 dc = nxge_guest_dc_alloc(nxge, vr, 343 NXGE_TRANSMIT_GROUP); 344 if (dc == 0) { 345 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 346 "DC add failed")); 347 return (NXGE_ERROR); 348 } 349 dc->channel = (nxge_channel_t)i; 350 } 351 } 352 } 353 354 rx = &nhd->hio.rx; 355 if (rx->get_map) { 356 /* 357 * I repeat, the map we get back is a bitmap of 358 * the virtual Rx DMA channels we own - 359 * they are NOT real channel numbers. 360 */ 361 hv_rv = (*rx->get_map)(vr->cookie, &rx_map); 362 if (hv_rv != 0) { 363 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 364 "rx->get_map() failed")); 365 return (NXGE_ERROR); 366 } 367 res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map); 368 369 /* 370 * For each channel, mark these two fields 371 * while we have the VR data structure. 372 */ 373 for (i = 0; i < VP_CHANNEL_MAX; i++) { 374 if ((1 << i) & rx_map) { 375 dc = nxge_guest_dc_alloc(nxge, vr, 376 NXGE_RECEIVE_GROUP); 377 if (dc == 0) { 378 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 379 "DC add failed")); 380 return (NXGE_ERROR); 381 } 382 dc->channel = (nxge_channel_t)i; 383 } 384 } 385 } 386 387 /* 388 * Register with vnet. 389 */ 390 if ((mac_info = mac_alloc(MAC_VERSION)) == NULL) 391 return (NXGE_ERROR); 392 393 mac_info->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 394 mac_info->m_driver = nxge; 395 mac_info->m_dip = nxge->dip; 396 mac_info->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 397 mac_info->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 398 (void) memset(mac_info->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 399 mac_info->m_callbacks = &nxge_m_callbacks; 400 mac_info->m_min_sdu = 0; 401 mac_info->m_max_sdu = NXGE_MTU_DEFAULT_MAX - 402 sizeof (struct ether_header) - ETHERFCSL - 4; 403 404 (void) memset(&mac_addr, 0xff, sizeof (mac_addr)); 405 406 /* Register with vio_net. */ 407 vio = &nhd->hio.vio; 408 if ((*vio->__register)(mac_info, VIO_NET_RES_HYBRID, 409 nxge->hio_mac_addr, mac_addr, &vr->vhp, &vio->cb)) { 410 NXGE_DEBUG_MSG((nxge, HIO_CTL, "HIO registration() failed")); 411 return (NXGE_ERROR); 412 } 413 414 nxge->hio_vr = vr; /* For faster lookups. */ 415 416 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add")); 417 418 return (NXGE_OK); 419 } 420 421 /* 422 * nxge_guest_dc_alloc 423 * 424 * Find a free nxge_hio_dc_t data structure. 425 * 426 * Arguments: 427 * nxge 428 * type TRANSMIT or RECEIVE. 429 * 430 * Notes: 431 * 432 * Context: 433 * Guest domain 434 */ 435 nxge_hio_dc_t * 436 nxge_guest_dc_alloc( 437 nxge_t *nxge, 438 nxge_hio_vr_t *vr, 439 nxge_grp_type_t type) 440 { 441 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 442 nxge_hio_dc_t *dc; 443 int limit, i; 444 445 /* 446 * In the guest domain, there may be more than one VR. 447 * each one of which will be using the same slots, or 448 * virtual channel numbers. So the <nhd>'s rdc & tdc 449 * tables must be shared. 450 */ 451 if (type == NXGE_TRANSMIT_GROUP) { 452 dc = &nhd->tdc[0]; 453 limit = NXGE_MAX_TDCS; 454 } else { 455 dc = &nhd->rdc[0]; 456 limit = NXGE_MAX_RDCS; 457 } 458 459 MUTEX_ENTER(&nhd->lock); 460 for (i = 0; i < limit; i++, dc++) { 461 if (dc->vr == 0) { 462 dc->vr = vr; 463 dc->cookie = vr->cookie; 464 MUTEX_EXIT(&nhd->lock); 465 return (dc); 466 } 467 } 468 MUTEX_EXIT(&nhd->lock); 469 470 return (0); 471 } 472 473 /* 474 * res_map_parse 475 * 476 * Parse a resource map. The resources are DMA channels, receive 477 * or transmit, depending on <type>. 478 * 479 * Arguments: 480 * nxge 481 * type Transmit or receive. 482 * res_map The resource map to parse. 483 * 484 * Notes: 485 * 486 * Context: 487 * Guest domain 488 */ 489 void 490 res_map_parse( 491 nxge_t *nxge, 492 nxge_grp_type_t type, 493 uint64_t res_map) 494 { 495 uint8_t slots, mask, slot; 496 int first, count; 497 498 nxge_hw_pt_cfg_t *hardware; 499 nxge_grp_t *group; 500 501 /* Slots are numbered 0 - 7. */ 502 slots = (uint8_t)(res_map & 0xff); 503 504 /* Count the number of bits in the bitmap. */ 505 for (slot = 0, count = 0, mask = 1; slot < 8; slot++) { 506 if (slots & mask) 507 count++; 508 if (count == 1) 509 first = slot; 510 mask <<= 1; 511 } 512 513 hardware = &nxge->pt_config.hw_config; 514 group = (type == NXGE_TRANSMIT_GROUP) ? 515 nxge->tx_set.group[0] : nxge->rx_set.group[0]; 516 517 /* 518 * A guest domain has one Tx & one Rx group, so far. 519 * In the future, there may be more than one. 520 */ 521 if (type == NXGE_TRANSMIT_GROUP) { 522 nxge_dma_pt_cfg_t *port = &nxge->pt_config; 523 nxge_tdc_grp_t *tdc_grp = &nxge->pt_config.tdc_grps[0]; 524 525 hardware->tdc.start = first; 526 hardware->tdc.count = count; 527 hardware->tdc.owned = count; 528 529 tdc_grp->start_tdc = first; 530 tdc_grp->max_tdcs = (uint8_t)count; 531 tdc_grp->grp_index = group->index; 532 tdc_grp->map = slots; 533 534 group->map = slots; 535 536 /* 537 * Pointless in a guest domain. This bitmap is used 538 * in only one place: nxge_txc_init(), 539 * a service-domain-only function. 540 */ 541 port->tx_dma_map = slots; 542 543 nxge->tx_set.owned.map |= slots; 544 } else { 545 nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0]; 546 547 hardware->start_rdc = first; 548 hardware->max_rdcs = count; 549 550 rdc_grp->start_rdc = (uint8_t)first; 551 rdc_grp->max_rdcs = (uint8_t)count; 552 rdc_grp->def_rdc = (uint8_t)first; 553 554 rdc_grp->map = slots; 555 group->map = slots; 556 557 nxge->rx_set.owned.map |= slots; 558 } 559 } 560 561 /* 562 * nxge_hio_vr_release 563 * 564 * Release a virtualization region (VR). 565 * 566 * Arguments: 567 * nxge 568 * 569 * Notes: 570 * We must uninitialize all DMA channels associated with the VR, too. 571 * 572 * The service domain will re-initialize these DMA channels later. 573 * See nxge_hio.c:nxge_hio_share_free() for details. 574 * 575 * Context: 576 * Guest domain 577 */ 578 int 579 nxge_hio_vr_release(nxge_t *nxge) 580 { 581 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 582 int vr_index; 583 584 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release")); 585 586 if (nxge->hio_vr == NULL) { 587 return (NXGE_OK); 588 } 589 590 /* 591 * Uninitialize interrupts. 592 */ 593 nxge_hio_intr_uninit(nxge); 594 595 /* 596 * Uninitialize the receive DMA channels. 597 */ 598 nxge_uninit_rxdma_channels(nxge); 599 600 /* 601 * Uninitialize the transmit DMA channels. 602 */ 603 nxge_uninit_txdma_channels(nxge); 604 605 /* 606 * Remove both groups. Assumption: only two groups! 607 */ 608 if (nxge->rx_set.group[0] != NULL) 609 nxge_grp_remove(nxge, nxge->rx_set.group[0]); 610 if (nxge->tx_set.group[0] != NULL) 611 nxge_grp_remove(nxge, nxge->tx_set.group[0]); 612 613 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release")); 614 615 /* 616 * Clean up. 617 */ 618 MUTEX_ENTER(&nhd->lock); 619 for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) { 620 if (nhd->vr[vr_index].nxge == (uintptr_t)nxge) { 621 nhd->vr[vr_index].nxge = NULL; 622 break; 623 } 624 } 625 MUTEX_EXIT(&nhd->lock); 626 627 return (NXGE_OK); 628 } 629 630 #if defined(NIU_LP_WORKAROUND) 631 /* 632 * nxge_tdc_lp_conf 633 * 634 * Configure the logical pages for a TDC. 635 * 636 * Arguments: 637 * nxge 638 * channel The TDC to configure. 639 * 640 * Notes: 641 * 642 * Context: 643 * Guest domain 644 */ 645 nxge_status_t 646 nxge_tdc_lp_conf( 647 p_nxge_t nxge, 648 int channel) 649 { 650 nxge_hio_dc_t *dc; 651 nxge_dma_common_t *data; 652 nxge_dma_common_t *control; 653 tx_ring_t *ring; 654 655 uint64_t hv_rv; 656 uint64_t ra, size; 657 658 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf")); 659 660 ring = nxge->tx_rings->rings[channel]; 661 662 if (ring->hv_set) { 663 /* This shouldn't happen. */ 664 return (NXGE_OK); 665 } 666 667 if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel))) 668 return (NXGE_ERROR); 669 670 /* 671 * Initialize logical page 0 for data buffers. 672 * 673 * <orig_ioaddr_pp> & <orig_alength> are initialized in 674 * nxge_main.c:nxge_dma_mem_alloc(). 675 */ 676 data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel]; 677 ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp; 678 ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength; 679 680 hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie, 681 (uint64_t)channel, 0, 682 ring->hv_tx_buf_base_ioaddr_pp, 683 ring->hv_tx_buf_ioaddr_size); 684 685 if (hv_rv != 0) { 686 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 687 "<== nxge_tdc_lp_conf: channel %d " 688 "(page 0 data buf) hv: %d " 689 "ioaddr_pp $%p size 0x%llx ", 690 channel, hv_rv, 691 ring->hv_tx_buf_base_ioaddr_pp, 692 ring->hv_tx_buf_ioaddr_size)); 693 return (NXGE_ERROR | hv_rv); 694 } 695 696 ra = size = 0; 697 hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie, 698 (uint64_t)channel, 0, &ra, &size); 699 700 NXGE_DEBUG_MSG((nxge, HIO_CTL, 701 "==> nxge_tdc_lp_conf: channel %d " 702 "(page 0 data buf) hv_rv 0x%llx " 703 "set ioaddr_pp $%p set size 0x%llx " 704 "get ra ioaddr_pp $%p get size 0x%llx ", 705 channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp, 706 ring->hv_tx_buf_ioaddr_size, ra, size)); 707 708 /* 709 * Initialize logical page 1 for control buffers. 710 */ 711 control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel]; 712 ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp; 713 ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength; 714 715 hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie, 716 (uint64_t)channel, (uint64_t)1, 717 ring->hv_tx_cntl_base_ioaddr_pp, 718 ring->hv_tx_cntl_ioaddr_size); 719 720 if (hv_rv != 0) { 721 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 722 "<== nxge_tdc_lp_conf: channel %d " 723 "(page 1 cntl buf) hv_rv 0x%llx " 724 "ioaddr_pp $%p size 0x%llx ", 725 channel, hv_rv, 726 ring->hv_tx_cntl_base_ioaddr_pp, 727 ring->hv_tx_cntl_ioaddr_size)); 728 return (NXGE_ERROR | hv_rv); 729 } 730 731 ra = size = 0; 732 hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie, 733 (uint64_t)channel, (uint64_t)1, &ra, &size); 734 735 NXGE_DEBUG_MSG((nxge, HIO_CTL, 736 "==> nxge_tdc_lp_conf: channel %d " 737 "(page 1 cntl buf) hv_rv 0x%llx " 738 "set ioaddr_pp $%p set size 0x%llx " 739 "get ra ioaddr_pp $%p get size 0x%llx ", 740 channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp, 741 ring->hv_tx_cntl_ioaddr_size, ra, size)); 742 743 ring->hv_set = B_TRUE; 744 745 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf")); 746 747 return (NXGE_OK); 748 } 749 750 /* 751 * nxge_rdc_lp_conf 752 * 753 * Configure an RDC's logical pages. 754 * 755 * Arguments: 756 * nxge 757 * channel The RDC to configure. 758 * 759 * Notes: 760 * 761 * Context: 762 * Guest domain 763 */ 764 nxge_status_t 765 nxge_rdc_lp_conf( 766 p_nxge_t nxge, 767 int channel) 768 { 769 nxge_hio_dc_t *dc; 770 nxge_dma_common_t *data; 771 nxge_dma_common_t *control; 772 rx_rbr_ring_t *ring; 773 774 uint64_t hv_rv; 775 uint64_t ra, size; 776 777 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf")); 778 779 ring = nxge->rx_rbr_rings->rbr_rings[channel]; 780 781 if (ring->hv_set) { 782 return (NXGE_OK); 783 } 784 785 if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) 786 return (NXGE_ERROR); 787 788 /* 789 * Initialize logical page 0 for data buffers. 790 * 791 * <orig_ioaddr_pp> & <orig_alength> are initialized in 792 * nxge_main.c:nxge_dma_mem_alloc(). 793 */ 794 data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel]; 795 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp; 796 ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength; 797 798 hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie, 799 (uint64_t)channel, 0, 800 ring->hv_rx_buf_base_ioaddr_pp, 801 ring->hv_rx_buf_ioaddr_size); 802 803 if (hv_rv != 0) { 804 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 805 "<== nxge_rdc_lp_conf: channel %d " 806 "(page 0 data buf) hv_rv 0x%llx " 807 "ioaddr_pp $%p size 0x%llx ", 808 channel, hv_rv, 809 ring->hv_rx_buf_base_ioaddr_pp, 810 ring->hv_rx_buf_ioaddr_size)); 811 return (NXGE_ERROR | hv_rv); 812 } 813 814 ra = size = 0; 815 hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie, 816 (uint64_t)channel, 0, &ra, &size); 817 818 NXGE_DEBUG_MSG((nxge, HIO_CTL, 819 "==> nxge_rdc_lp_conf: channel %d " 820 "(page 0 data buf) hv_rv 0x%llx " 821 "set ioaddr_pp $%p set size 0x%llx " 822 "get ra ioaddr_pp $%p get size 0x%llx ", 823 channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp, 824 ring->hv_rx_buf_ioaddr_size, ra, size)); 825 826 /* 827 * Initialize logical page 1 for control buffers. 828 */ 829 control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel]; 830 ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp; 831 ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength; 832 833 hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie, 834 (uint64_t)channel, (uint64_t)1, 835 ring->hv_rx_cntl_base_ioaddr_pp, 836 ring->hv_rx_cntl_ioaddr_size); 837 838 if (hv_rv != 0) { 839 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 840 "<== nxge_rdc_lp_conf: channel %d " 841 "(page 1 cntl buf) hv_rv 0x%llx " 842 "ioaddr_pp $%p size 0x%llx ", 843 channel, hv_rv, 844 ring->hv_rx_cntl_base_ioaddr_pp, 845 ring->hv_rx_cntl_ioaddr_size)); 846 return (NXGE_ERROR | hv_rv); 847 } 848 849 ra = size = 0; 850 hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie, 851 (uint64_t)channel, (uint64_t)1, &ra, &size); 852 853 NXGE_DEBUG_MSG((nxge, HIO_CTL, 854 "==> nxge_rdc_lp_conf: channel %d " 855 "(page 1 cntl buf) hv_rv 0x%llx " 856 "set ioaddr_pp $%p set size 0x%llx " 857 "get ra ioaddr_pp $%p get size 0x%llx ", 858 channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp, 859 ring->hv_rx_cntl_ioaddr_size, ra, size)); 860 861 ring->hv_set = B_TRUE; 862 863 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf")); 864 865 return (NXGE_OK); 866 } 867 #endif /* defined(NIU_LP_WORKAROUND) */ 868 869 /* 870 * This value is in milliseconds. 871 */ 872 #define NXGE_GUEST_TIMER 500 /* 1/2 second, for now */ 873 874 /* 875 * nxge_hio_start_timer 876 * 877 * Start the timer which checks for Tx hangs. 878 * 879 * Arguments: 880 * nxge 881 * 882 * Notes: 883 * This function is called from nxge_attach(). 884 * 885 * This function kicks off the guest domain equivalent of 886 * nxge_check_hw_state(). It is called only once, from attach. 887 * 888 * Context: 889 * Guest domain 890 */ 891 void 892 nxge_hio_start_timer( 893 nxge_t *nxge) 894 { 895 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 896 nxge_hio_vr_t *vr; 897 int region; 898 899 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start")); 900 901 MUTEX_ENTER(&nhd->lock); 902 903 /* 904 * Find our VR data structure. (We are currently assuming 905 * one VR per guest domain. That may change in the future.) 906 */ 907 for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) { 908 if (nhd->vr[region].nxge == (uintptr_t)nxge) 909 break; 910 } 911 912 MUTEX_EXIT(&nhd->lock); 913 914 if (region == NXGE_VR_SR_MAX) { 915 return; 916 } 917 918 vr = (nxge_hio_vr_t *)&nhd->vr[region]; 919 920 nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state, 921 (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER)); 922 923 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start")); 924 } 925 926 /* 927 * nxge_check_guest_state 928 * 929 * Essentially, check for Tx hangs. In the future, if we are 930 * polling the hardware, we may do so here. 931 * 932 * Arguments: 933 * vr The virtualization region (VR) data structure. 934 * 935 * Notes: 936 * This function is the guest domain equivalent of 937 * nxge_check_hw_state(). Since we have no hardware to 938 * check, we simply call nxge_check_tx_hang(). 939 * 940 * Context: 941 * Guest domain 942 */ 943 void 944 nxge_check_guest_state( 945 nxge_hio_vr_t *vr) 946 { 947 nxge_t *nxge = (nxge_t *)vr->nxge; 948 949 NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state")); 950 951 MUTEX_ENTER(nxge->genlock); 952 nxge->nxge_timerid = 0; 953 954 if (nxge->nxge_mac_state == NXGE_MAC_STARTED) { 955 nxge_check_tx_hang(nxge); 956 957 nxge->nxge_timerid = timeout((void(*)(void *)) 958 nxge_check_guest_state, (caddr_t)vr, 959 drv_usectohz(1000 * NXGE_GUEST_TIMER)); 960 } 961 962 nxge_check_guest_state_exit: 963 MUTEX_EXIT(nxge->genlock); 964 NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state")); 965 } 966 967 nxge_status_t 968 nxge_hio_rdc_intr_arm(p_nxge_t nxge, boolean_t arm) 969 { 970 nxge_grp_t *group; 971 uint32_t channel; 972 nxge_hio_dc_t *dc; 973 nxge_ldg_t *ldgp; 974 975 /* 976 * Validate state of guest interface before 977 * proceeeding. 978 */ 979 if (!isLDOMguest(nxge)) 980 return (NXGE_ERROR); 981 if (nxge->nxge_mac_state != NXGE_MAC_STARTED) 982 return (NXGE_ERROR); 983 984 /* 985 * In guest domain, always and only dealing with 986 * group 0 for an instance of nxge. 987 */ 988 group = nxge->rx_set.group[0]; 989 990 /* 991 * Look to arm the the RDCs for the group. 992 */ 993 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 994 if ((1 << channel) & group->map) { 995 /* 996 * Get the RDC. 997 */ 998 dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel); 999 if (dc == NULL) 1000 return (NXGE_ERROR); 1001 1002 /* 1003 * Get the RDC's ldg group. 1004 */ 1005 ldgp = &nxge->ldgvp->ldgp[dc->ldg.vector]; 1006 if (ldgp == NULL) 1007 return (NXGE_ERROR); 1008 1009 /* 1010 * Set the state of the group. 1011 */ 1012 ldgp->arm = arm; 1013 1014 nxge_hio_ldgimgn(nxge, ldgp); 1015 } 1016 } 1017 1018 return (NXGE_OK); 1019 } 1020 1021 nxge_status_t 1022 nxge_hio_rdc_enable(p_nxge_t nxge) 1023 { 1024 nxge_grp_t *group; 1025 npi_handle_t handle; 1026 uint32_t channel; 1027 npi_status_t rval; 1028 1029 /* 1030 * Validate state of guest interface before 1031 * proceeeding. 1032 */ 1033 if (!isLDOMguest(nxge)) 1034 return (NXGE_ERROR); 1035 if (nxge->nxge_mac_state != NXGE_MAC_STARTED) 1036 return (NXGE_ERROR); 1037 1038 /* 1039 * In guest domain, always and only dealing with 1040 * group 0 for an instance of nxge. 1041 */ 1042 group = nxge->rx_set.group[0]; 1043 1044 /* 1045 * Get the PIO handle. 1046 */ 1047 handle = NXGE_DEV_NPI_HANDLE(nxge); 1048 1049 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1050 /* 1051 * If this channel is in the map, then enable 1052 * it. 1053 */ 1054 if ((1 << channel) & group->map) { 1055 /* 1056 * Enable the RDC and clear the empty bit. 1057 */ 1058 rval = npi_rxdma_cfg_rdc_enable(handle, channel); 1059 if (rval != NPI_SUCCESS) 1060 return (NXGE_ERROR); 1061 1062 (void) npi_rxdma_channel_rbr_empty_clear(handle, 1063 channel); 1064 } 1065 } 1066 1067 return (NXGE_OK); 1068 } 1069 #endif /* defined(sun4v) */ 1070