1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * nxge_hio_guest.c 29 * 30 * This file manages the virtualization resources for a guest domain. 31 * 32 */ 33 34 #include <sys/nxge/nxge_impl.h> 35 #include <sys/nxge/nxge_fzc.h> 36 #include <sys/nxge/nxge_rxdma.h> 37 #include <sys/nxge/nxge_txdma.h> 38 39 #include <sys/nxge/nxge_hio.h> 40 41 /* 42 * nxge_hio_unregister 43 * 44 * Unregister with the VNET module. 45 * 46 * Arguments: 47 * nxge 48 * 49 * Notes: 50 * We must uninitialize all DMA channels associated with the VR, too. 51 * 52 * We're assuming that the channels will be disabled & unassigned 53 * in the service domain, after we're done here. 54 * 55 * Context: 56 * Guest domain 57 */ 58 void 59 nxge_hio_unregister( 60 nxge_t *nxge) 61 { 62 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 63 64 if (nhd == 0) { 65 return; 66 } 67 68 #if defined(sun4v) 69 /* Unregister with vNet. */ 70 if (nhd->hio.vio.unregister) { 71 if (nxge->hio_vr) 72 (*nhd->hio.vio.unregister)(nxge->hio_vr->vhp); 73 } 74 #endif 75 } 76 77 /* 78 * nxge_guest_regs_map 79 * 80 * Map in a guest domain's register set(s). 81 * 82 * Arguments: 83 * nxge 84 * 85 * Notes: 86 * Note that we set <is_vraddr> to TRUE. 87 * 88 * Context: 89 * Guest domain 90 */ 91 static ddi_device_acc_attr_t nxge_guest_register_access_attributes = { 92 DDI_DEVICE_ATTR_V0, 93 DDI_STRUCTURE_LE_ACC, 94 DDI_STRICTORDER_ACC, 95 }; 96 97 int 98 nxge_guest_regs_map( 99 nxge_t *nxge) 100 { 101 dev_regs_t *regs; 102 off_t regsize; 103 int rv; 104 105 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map")); 106 107 /* So we can allocate properly-aligned memory. */ 108 nxge->niu_type = N2_NIU; /* Version 1.0 only */ 109 nxge->function_num = nxge->instance; /* HIOXXX Looking for ideas. */ 110 111 nxge->dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP); 112 regs = nxge->dev_regs; 113 114 if ((rv = ddi_dev_regsize(nxge->dip, 0, ®size)) != DDI_SUCCESS) { 115 NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_dev_regsize() failed")); 116 return (NXGE_ERROR); 117 } 118 119 rv = ddi_regs_map_setup(nxge->dip, 0, (caddr_t *)®s->nxge_regp, 0, 0, 120 &nxge_guest_register_access_attributes, ®s->nxge_regh); 121 122 if (rv != DDI_SUCCESS) { 123 NXGE_ERROR_MSG((nxge, HIO_CTL, "ddi_regs_map_setup() failed")); 124 return (NXGE_ERROR); 125 } 126 127 nxge->npi_handle.regh = regs->nxge_regh; 128 nxge->npi_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 129 nxge->npi_handle.is_vraddr = B_TRUE; 130 nxge->npi_handle.function.instance = nxge->instance; 131 nxge->npi_handle.function.function = nxge->function_num; 132 nxge->npi_handle.nxgep = (void *)nxge; 133 134 /* NPI_REG_ADD_HANDLE_SET() */ 135 nxge->npi_reg_handle.regh = regs->nxge_regh; 136 nxge->npi_reg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 137 nxge->npi_reg_handle.is_vraddr = B_TRUE; 138 nxge->npi_reg_handle.function.instance = nxge->instance; 139 nxge->npi_reg_handle.function.function = nxge->function_num; 140 nxge->npi_reg_handle.nxgep = (void *)nxge; 141 142 /* NPI_VREG_ADD_HANDLE_SET() */ 143 nxge->npi_vreg_handle.regh = regs->nxge_regh; 144 nxge->npi_vreg_handle.regp = (npi_reg_ptr_t)regs->nxge_regp; 145 nxge->npi_vreg_handle.is_vraddr = B_TRUE; 146 nxge->npi_vreg_handle.function.instance = nxge->instance; 147 nxge->npi_vreg_handle.function.function = nxge->function_num; 148 nxge->npi_vreg_handle.nxgep = (void *)nxge; 149 150 regs->nxge_vir_regp = regs->nxge_regp; 151 regs->nxge_vir_regh = regs->nxge_regh; 152 153 /* 154 * We do NOT set the PCI, MSI-X, 2nd Virtualization, 155 * or FCODE reg variables. 156 */ 157 158 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map")); 159 160 return (NXGE_OK); 161 } 162 163 void 164 nxge_guest_regs_map_free( 165 nxge_t *nxge) 166 { 167 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_guest_regs_map_free")); 168 169 if (nxge->dev_regs) { 170 if (nxge->dev_regs->nxge_regh) { 171 NXGE_DEBUG_MSG((nxge, DDI_CTL, 172 "==> nxge_unmap_regs: device registers")); 173 ddi_regs_map_free(&nxge->dev_regs->nxge_regh); 174 nxge->dev_regs->nxge_regh = NULL; 175 } 176 kmem_free(nxge->dev_regs, sizeof (dev_regs_t)); 177 nxge->dev_regs = 0; 178 } 179 180 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_guest_regs_map_free")); 181 } 182 183 #if defined(sun4v) 184 185 /* 186 * ------------------------------------------------------------- 187 * Local prototypes 188 * ------------------------------------------------------------- 189 */ 190 static nxge_hio_dc_t *nxge_guest_dc_alloc( 191 nxge_t *, nxge_hio_vr_t *, nxge_grp_type_t); 192 193 static void res_map_parse(nxge_t *, nxge_grp_type_t, uint64_t); 194 static void nxge_check_guest_state(nxge_hio_vr_t *); 195 196 /* 197 * nxge_hio_vr_add 198 * 199 * If we have been given a virtualization region (VR), 200 * then initialize it. 201 * 202 * Arguments: 203 * nxge 204 * 205 * Notes: 206 * 207 * Context: 208 * Guest domain 209 */ 210 /* ARGSUSED */ 211 int 212 nxge_hio_vr_add( 213 nxge_t *nxge) 214 { 215 extern mac_callbacks_t nxge_m_callbacks; 216 217 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 218 nxge_hio_vr_t *vr; 219 nxge_hio_dc_t *dc; 220 221 int *reg_val; 222 uint_t reg_len; 223 uint8_t vr_index; 224 225 nxhv_vr_fp_t *fp; 226 uint64_t cookie, vr_address, vr_size; 227 228 nxhv_dc_fp_t *tx, *rx; 229 uint64_t tx_map, rx_map; 230 231 uint64_t hv_rv; 232 233 /* Variables needed to register with vnet. */ 234 mac_register_t *mac_info; 235 ether_addr_t mac_addr; 236 nx_vio_fp_t *vio; 237 238 int i; 239 240 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_add")); 241 242 /* 243 * Get our HV cookie. 244 */ 245 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, nxge->dip, 246 0, "reg", ®_val, ®_len) != DDI_PROP_SUCCESS) { 247 NXGE_DEBUG_MSG((nxge, VPD_CTL, "`reg' property not found")); 248 return (NXGE_ERROR); 249 } 250 251 cookie = (uint64_t)(reg_val[0]); 252 ddi_prop_free(reg_val); 253 254 fp = &nhd->hio.vr; 255 hv_rv = (*fp->getinfo)(cookie, &vr_address, &vr_size); 256 if (hv_rv != 0) { 257 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 258 "vr->getinfo() failed")); 259 return (NXGE_ERROR); 260 } 261 262 /* 263 * In the guest domain, we can use any VR data structure 264 * we want, because we're not supposed to know which VR 265 * the service domain has allocated to us. 266 * 267 * In the current version, the least significant nybble of 268 * the cookie is the VR region, but that could change 269 * very easily. 270 * 271 * In the future, a guest may have more than one VR allocated 272 * to it, which is why we go through this exercise. 273 */ 274 MUTEX_ENTER(&nhd->lock); 275 for (vr_index = 0; vr_index < FUNC_VIR_MAX; vr_index++) { 276 if (nhd->vr[vr_index].nxge == 0) { 277 nhd->vr[vr_index].nxge = (uintptr_t)nxge; 278 break; 279 } 280 } 281 MUTEX_EXIT(&nhd->lock); 282 283 if (vr_index == FUNC_VIR_MAX) { 284 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_vr_add " 285 "no VRs available")); 286 return (NXGE_ERROR); 287 } 288 289 vr = &nhd->vr[vr_index]; 290 291 vr->nxge = (uintptr_t)nxge; 292 vr->cookie = (uint32_t)cookie; 293 vr->address = vr_address; 294 vr->size = vr_size; 295 vr->region = vr_index; 296 297 /* 298 * This is redundant data, but useful nonetheless. It helps 299 * us to keep track of which RDCs & TDCs belong to us. 300 */ 301 if (nxge->tx_set.lg.count == 0) 302 (void) nxge_grp_add(nxge, NXGE_TRANSMIT_GROUP); 303 if (nxge->rx_set.lg.count == 0) 304 (void) nxge_grp_add(nxge, NXGE_RECEIVE_GROUP); 305 306 /* 307 * See nxge_intr.c. 308 */ 309 if (nxge_hio_intr_init(nxge) != NXGE_OK) { 310 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 311 "nxge_hio_intr_init() failed")); 312 return (NXGE_ERROR); 313 } 314 315 /* 316 * Now we find out which RDCs & TDCs have been allocated to us. 317 */ 318 tx = &nhd->hio.tx; 319 if (tx->get_map) { 320 /* 321 * The map we get back is a bitmap of the 322 * virtual Tx DMA channels we own - 323 * they are NOT real channel numbers. 324 */ 325 hv_rv = (*tx->get_map)(vr->cookie, &tx_map); 326 if (hv_rv != 0) { 327 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 328 "tx->get_map() failed")); 329 return (NXGE_ERROR); 330 } 331 res_map_parse(nxge, NXGE_TRANSMIT_GROUP, tx_map); 332 333 /* 334 * For each channel, mark these two fields 335 * while we have the VR data structure. 336 */ 337 for (i = 0; i < VP_CHANNEL_MAX; i++) { 338 if ((1 << i) & tx_map) { 339 dc = nxge_guest_dc_alloc(nxge, vr, 340 NXGE_TRANSMIT_GROUP); 341 if (dc == 0) { 342 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 343 "DC add failed")); 344 return (NXGE_ERROR); 345 } 346 dc->channel = (nxge_channel_t)i; 347 } 348 } 349 } 350 351 rx = &nhd->hio.rx; 352 if (rx->get_map) { 353 /* 354 * I repeat, the map we get back is a bitmap of 355 * the virtual Rx DMA channels we own - 356 * they are NOT real channel numbers. 357 */ 358 hv_rv = (*rx->get_map)(vr->cookie, &rx_map); 359 if (hv_rv != 0) { 360 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 361 "rx->get_map() failed")); 362 return (NXGE_ERROR); 363 } 364 res_map_parse(nxge, NXGE_RECEIVE_GROUP, rx_map); 365 366 /* 367 * For each channel, mark these two fields 368 * while we have the VR data structure. 369 */ 370 for (i = 0; i < VP_CHANNEL_MAX; i++) { 371 if ((1 << i) & rx_map) { 372 dc = nxge_guest_dc_alloc(nxge, vr, 373 NXGE_RECEIVE_GROUP); 374 if (dc == 0) { 375 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 376 "DC add failed")); 377 return (NXGE_ERROR); 378 } 379 dc->channel = (nxge_channel_t)i; 380 } 381 } 382 } 383 384 /* 385 * Register with vnet. 386 */ 387 if ((mac_info = mac_alloc(MAC_VERSION)) == NULL) 388 return (NXGE_ERROR); 389 390 mac_info->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 391 mac_info->m_driver = nxge; 392 mac_info->m_dip = nxge->dip; 393 mac_info->m_src_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 394 mac_info->m_dst_addr = KMEM_ZALLOC(MAXMACADDRLEN, KM_SLEEP); 395 (void) memset(mac_info->m_src_addr, 0xff, sizeof (MAXMACADDRLEN)); 396 mac_info->m_callbacks = &nxge_m_callbacks; 397 mac_info->m_min_sdu = 0; 398 mac_info->m_max_sdu = NXGE_MTU_DEFAULT_MAX - 399 sizeof (struct ether_header) - ETHERFCSL - 4; 400 401 (void) memset(&mac_addr, 0xff, sizeof (mac_addr)); 402 403 /* Register with vio_net. */ 404 vio = &nhd->hio.vio; 405 if ((*vio->__register)(mac_info, VIO_NET_RES_HYBRID, 406 nxge->hio_mac_addr, mac_addr, &vr->vhp, &vio->cb)) { 407 NXGE_DEBUG_MSG((nxge, HIO_CTL, "HIO registration() failed")); 408 return (NXGE_ERROR); 409 } 410 411 nxge->hio_vr = vr; /* For faster lookups. */ 412 413 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_add")); 414 415 return (NXGE_OK); 416 } 417 418 /* 419 * nxge_guest_dc_alloc 420 * 421 * Find a free nxge_hio_dc_t data structure. 422 * 423 * Arguments: 424 * nxge 425 * type TRANSMIT or RECEIVE. 426 * 427 * Notes: 428 * 429 * Context: 430 * Guest domain 431 */ 432 nxge_hio_dc_t * 433 nxge_guest_dc_alloc( 434 nxge_t *nxge, 435 nxge_hio_vr_t *vr, 436 nxge_grp_type_t type) 437 { 438 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 439 nxge_hio_dc_t *dc; 440 int limit, i; 441 442 /* 443 * In the guest domain, there may be more than one VR. 444 * each one of which will be using the same slots, or 445 * virtual channel numbers. So the <nhd>'s rdc & tdc 446 * tables must be shared. 447 */ 448 if (type == NXGE_TRANSMIT_GROUP) { 449 dc = &nhd->tdc[0]; 450 limit = NXGE_MAX_TDCS; 451 } else { 452 dc = &nhd->rdc[0]; 453 limit = NXGE_MAX_RDCS; 454 } 455 456 MUTEX_ENTER(&nhd->lock); 457 for (i = 0; i < limit; i++, dc++) { 458 if (dc->vr == 0) { 459 dc->vr = vr; 460 dc->cookie = vr->cookie; 461 MUTEX_EXIT(&nhd->lock); 462 return (dc); 463 } 464 } 465 MUTEX_EXIT(&nhd->lock); 466 467 return (0); 468 } 469 470 /* 471 * res_map_parse 472 * 473 * Parse a resource map. The resources are DMA channels, receive 474 * or transmit, depending on <type>. 475 * 476 * Arguments: 477 * nxge 478 * type Transmit or receive. 479 * res_map The resource map to parse. 480 * 481 * Notes: 482 * 483 * Context: 484 * Guest domain 485 */ 486 void 487 res_map_parse( 488 nxge_t *nxge, 489 nxge_grp_type_t type, 490 uint64_t res_map) 491 { 492 uint8_t slots, mask, slot; 493 int first, count; 494 495 nxge_hw_pt_cfg_t *hardware; 496 nxge_grp_t *group; 497 498 /* Slots are numbered 0 - 7. */ 499 slots = (uint8_t)(res_map & 0xff); 500 501 /* Count the number of bits in the bitmap. */ 502 for (slot = 0, count = 0, mask = 1; slot < 8; slot++) { 503 if (slots & mask) 504 count++; 505 if (count == 1) 506 first = slot; 507 mask <<= 1; 508 } 509 510 hardware = &nxge->pt_config.hw_config; 511 group = (type == NXGE_TRANSMIT_GROUP) ? 512 nxge->tx_set.group[0] : nxge->rx_set.group[0]; 513 514 /* 515 * A guest domain has one Tx & one Rx group, so far. 516 * In the future, there may be more than one. 517 */ 518 if (type == NXGE_TRANSMIT_GROUP) { 519 nxge_dma_pt_cfg_t *port = &nxge->pt_config; 520 521 hardware->tdc.start = first; 522 hardware->tdc.count = count; 523 hardware->tdc.owned = count; 524 525 group->map = slots; 526 527 /* 528 * Pointless in a guest domain. This bitmap is used 529 * in only one place: nxge_txc_init(), 530 * a service-domain-only function. 531 */ 532 port->tx_dma_map = slots; 533 534 nxge->tx_set.owned.map |= slots; 535 } else { 536 nxge_rdc_grp_t *rdc_grp = &nxge->pt_config.rdc_grps[0]; 537 538 hardware->start_rdc = first; 539 hardware->max_rdcs = count; 540 541 rdc_grp->start_rdc = (uint8_t)first; 542 rdc_grp->max_rdcs = (uint8_t)count; 543 rdc_grp->def_rdc = (uint8_t)first; 544 545 rdc_grp->map = slots; 546 group->map = slots; 547 548 nxge->rx_set.owned.map |= slots; 549 } 550 } 551 552 /* 553 * nxge_hio_vr_release 554 * 555 * Release a virtualization region (VR). 556 * 557 * Arguments: 558 * nxge 559 * 560 * Notes: 561 * We must uninitialize all DMA channels associated with the VR, too. 562 * 563 * The service domain will re-initialize these DMA channels later. 564 * See nxge_hio.c:nxge_hio_share_free() for details. 565 * 566 * Context: 567 * Guest domain 568 */ 569 int 570 nxge_hio_vr_release( 571 nxge_t *nxge) 572 { 573 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_hio_vr_release")); 574 575 /* 576 * Uninitialize interrupts. 577 */ 578 nxge_hio_intr_uninit(nxge); 579 580 /* 581 * Uninitialize the receive DMA channels. 582 */ 583 nxge_uninit_rxdma_channels(nxge); 584 585 /* 586 * Uninitialize the transmit DMA channels. 587 */ 588 nxge_uninit_txdma_channels(nxge); 589 590 /* 591 * Remove both groups. Assumption: only two groups! 592 */ 593 if (nxge->rx_set.group[0] != NULL) 594 nxge_grp_remove(nxge, nxge->rx_set.group[0]); 595 if (nxge->tx_set.group[0] != NULL) 596 nxge_grp_remove(nxge, nxge->tx_set.group[0]); 597 598 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_hio_vr_release")); 599 600 return (NXGE_OK); 601 } 602 603 #if defined(NIU_LP_WORKAROUND) 604 /* 605 * nxge_tdc_lp_conf 606 * 607 * Configure the logical pages for a TDC. 608 * 609 * Arguments: 610 * nxge 611 * channel The TDC to configure. 612 * 613 * Notes: 614 * 615 * Context: 616 * Guest domain 617 */ 618 nxge_status_t 619 nxge_tdc_lp_conf( 620 p_nxge_t nxge, 621 int channel) 622 { 623 nxge_hio_dc_t *dc; 624 nxge_dma_common_t *data; 625 nxge_dma_common_t *control; 626 tx_ring_t *ring; 627 628 uint64_t hv_rv; 629 uint64_t ra, size; 630 631 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_tdc_lp_conf")); 632 633 ring = nxge->tx_rings->rings[channel]; 634 635 if (ring->hv_set) { 636 /* This shouldn't happen. */ 637 return (NXGE_OK); 638 } 639 640 if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_TX, channel))) 641 return (NXGE_ERROR); 642 643 /* 644 * Initialize logical page 0 for data buffers. 645 * 646 * <orig_ioaddr_pp> & <orig_alength> are initialized in 647 * nxge_main.c:nxge_dma_mem_alloc(). 648 */ 649 data = nxge->tx_buf_pool_p->dma_buf_pool_p[channel]; 650 ring->hv_tx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp; 651 ring->hv_tx_buf_ioaddr_size = (uint64_t)data->orig_alength; 652 653 hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie, 654 (uint64_t)channel, 0, 655 ring->hv_tx_buf_base_ioaddr_pp, 656 ring->hv_tx_buf_ioaddr_size); 657 658 if (hv_rv != 0) { 659 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 660 "<== nxge_tdc_lp_conf: channel %d " 661 "(page 0 data buf) hv: %d " 662 "ioaddr_pp $%p size 0x%llx ", 663 channel, hv_rv, 664 ring->hv_tx_buf_base_ioaddr_pp, 665 ring->hv_tx_buf_ioaddr_size)); 666 return (NXGE_ERROR | hv_rv); 667 } 668 669 ra = size = 0; 670 hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie, 671 (uint64_t)channel, 0, &ra, &size); 672 673 NXGE_DEBUG_MSG((nxge, HIO_CTL, 674 "==> nxge_tdc_lp_conf: channel %d " 675 "(page 0 data buf) hv_rv 0x%llx " 676 "set ioaddr_pp $%p set size 0x%llx " 677 "get ra ioaddr_pp $%p get size 0x%llx ", 678 channel, hv_rv, ring->hv_tx_buf_base_ioaddr_pp, 679 ring->hv_tx_buf_ioaddr_size, ra, size)); 680 681 /* 682 * Initialize logical page 1 for control buffers. 683 */ 684 control = nxge->tx_cntl_pool_p->dma_buf_pool_p[channel]; 685 ring->hv_tx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp; 686 ring->hv_tx_cntl_ioaddr_size = (uint64_t)control->orig_alength; 687 688 hv_rv = hv_niu_vrtx_logical_page_conf(dc->cookie, 689 (uint64_t)channel, (uint64_t)1, 690 ring->hv_tx_cntl_base_ioaddr_pp, 691 ring->hv_tx_cntl_ioaddr_size); 692 693 if (hv_rv != 0) { 694 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 695 "<== nxge_tdc_lp_conf: channel %d " 696 "(page 1 cntl buf) hv_rv 0x%llx " 697 "ioaddr_pp $%p size 0x%llx ", 698 channel, hv_rv, 699 ring->hv_tx_cntl_base_ioaddr_pp, 700 ring->hv_tx_cntl_ioaddr_size)); 701 return (NXGE_ERROR | hv_rv); 702 } 703 704 ra = size = 0; 705 hv_rv = hv_niu_vrtx_logical_page_info(dc->cookie, 706 (uint64_t)channel, (uint64_t)1, &ra, &size); 707 708 NXGE_DEBUG_MSG((nxge, HIO_CTL, 709 "==> nxge_tdc_lp_conf: channel %d " 710 "(page 1 cntl buf) hv_rv 0x%llx " 711 "set ioaddr_pp $%p set size 0x%llx " 712 "get ra ioaddr_pp $%p get size 0x%llx ", 713 channel, hv_rv, ring->hv_tx_cntl_base_ioaddr_pp, 714 ring->hv_tx_cntl_ioaddr_size, ra, size)); 715 716 ring->hv_set = B_TRUE; 717 718 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_tdc_lp_conf")); 719 720 return (NXGE_OK); 721 } 722 723 /* 724 * nxge_rdc_lp_conf 725 * 726 * Configure an RDC's logical pages. 727 * 728 * Arguments: 729 * nxge 730 * channel The RDC to configure. 731 * 732 * Notes: 733 * 734 * Context: 735 * Guest domain 736 */ 737 nxge_status_t 738 nxge_rdc_lp_conf( 739 p_nxge_t nxge, 740 int channel) 741 { 742 nxge_hio_dc_t *dc; 743 nxge_dma_common_t *data; 744 nxge_dma_common_t *control; 745 rx_rbr_ring_t *ring; 746 747 uint64_t hv_rv; 748 uint64_t ra, size; 749 750 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_rdc_lp_conf")); 751 752 ring = nxge->rx_rbr_rings->rbr_rings[channel]; 753 754 if (ring->hv_set) { 755 return (NXGE_OK); 756 } 757 758 if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) 759 return (NXGE_ERROR); 760 761 /* 762 * Initialize logical page 0 for data buffers. 763 * 764 * <orig_ioaddr_pp> & <orig_alength> are initialized in 765 * nxge_main.c:nxge_dma_mem_alloc(). 766 */ 767 data = nxge->rx_buf_pool_p->dma_buf_pool_p[channel]; 768 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)data->orig_ioaddr_pp; 769 ring->hv_rx_buf_ioaddr_size = (uint64_t)data->orig_alength; 770 771 hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie, 772 (uint64_t)channel, 0, 773 ring->hv_rx_buf_base_ioaddr_pp, 774 ring->hv_rx_buf_ioaddr_size); 775 776 if (hv_rv != 0) { 777 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 778 "<== nxge_rdc_lp_conf: channel %d " 779 "(page 0 data buf) hv_rv 0x%llx " 780 "ioaddr_pp $%p size 0x%llx ", 781 channel, hv_rv, 782 ring->hv_rx_buf_base_ioaddr_pp, 783 ring->hv_rx_buf_ioaddr_size)); 784 return (NXGE_ERROR | hv_rv); 785 } 786 787 ra = size = 0; 788 hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie, 789 (uint64_t)channel, 0, &ra, &size); 790 791 NXGE_DEBUG_MSG((nxge, HIO_CTL, 792 "==> nxge_rdc_lp_conf: channel %d " 793 "(page 0 data buf) hv_rv 0x%llx " 794 "set ioaddr_pp $%p set size 0x%llx " 795 "get ra ioaddr_pp $%p get size 0x%llx ", 796 channel, hv_rv, ring->hv_rx_buf_base_ioaddr_pp, 797 ring->hv_rx_buf_ioaddr_size, ra, size)); 798 799 /* 800 * Initialize logical page 1 for control buffers. 801 */ 802 control = nxge->rx_cntl_pool_p->dma_buf_pool_p[channel]; 803 ring->hv_rx_cntl_base_ioaddr_pp = (uint64_t)control->orig_ioaddr_pp; 804 ring->hv_rx_cntl_ioaddr_size = (uint64_t)control->orig_alength; 805 806 hv_rv = hv_niu_vrrx_logical_page_conf(dc->cookie, 807 (uint64_t)channel, (uint64_t)1, 808 ring->hv_rx_cntl_base_ioaddr_pp, 809 ring->hv_rx_cntl_ioaddr_size); 810 811 if (hv_rv != 0) { 812 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 813 "<== nxge_rdc_lp_conf: channel %d " 814 "(page 1 cntl buf) hv_rv 0x%llx " 815 "ioaddr_pp $%p size 0x%llx ", 816 channel, hv_rv, 817 ring->hv_rx_cntl_base_ioaddr_pp, 818 ring->hv_rx_cntl_ioaddr_size)); 819 return (NXGE_ERROR | hv_rv); 820 } 821 822 ra = size = 0; 823 hv_rv = hv_niu_vrrx_logical_page_info(dc->cookie, 824 (uint64_t)channel, (uint64_t)1, &ra, &size); 825 826 NXGE_DEBUG_MSG((nxge, HIO_CTL, 827 "==> nxge_rdc_lp_conf: channel %d " 828 "(page 1 cntl buf) hv_rv 0x%llx " 829 "set ioaddr_pp $%p set size 0x%llx " 830 "get ra ioaddr_pp $%p get size 0x%llx ", 831 channel, hv_rv, ring->hv_rx_cntl_base_ioaddr_pp, 832 ring->hv_rx_cntl_ioaddr_size, ra, size)); 833 834 ring->hv_set = B_TRUE; 835 836 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_rdc_lp_conf")); 837 838 return (NXGE_OK); 839 } 840 #endif /* defined(NIU_LP_WORKAROUND) */ 841 842 /* 843 * This value is in milliseconds. 844 */ 845 #define NXGE_GUEST_TIMER 500 /* 1/2 second, for now */ 846 847 /* 848 * nxge_hio_start_timer 849 * 850 * Start the timer which checks for Tx hangs. 851 * 852 * Arguments: 853 * nxge 854 * 855 * Notes: 856 * This function is called from nxge_attach(). 857 * 858 * This function kicks off the guest domain equivalent of 859 * nxge_check_hw_state(). It is called only once, from attach. 860 * 861 * Context: 862 * Guest domain 863 */ 864 void 865 nxge_hio_start_timer( 866 nxge_t *nxge) 867 { 868 nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 869 nxge_hio_vr_t *vr; 870 int region; 871 872 NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_timer_start")); 873 874 MUTEX_ENTER(&nhd->lock); 875 876 /* 877 * Find our VR data structure. (We are currently assuming 878 * one VR per guest domain. That may change in the future.) 879 */ 880 for (region = FUNC0_VIR0; region < NXGE_VR_SR_MAX; region++) { 881 if (nhd->vr[region].nxge == (uintptr_t)nxge) 882 break; 883 } 884 885 MUTEX_EXIT(&nhd->lock); 886 887 if (region == NXGE_VR_SR_MAX) { 888 return; 889 } 890 891 vr = (nxge_hio_vr_t *)&nhd->vr[region]; 892 893 nxge->nxge_timerid = timeout((void(*)(void *))nxge_check_guest_state, 894 (void *)vr, drv_usectohz(1000 * NXGE_GUEST_TIMER)); 895 896 NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_timer_start")); 897 } 898 899 /* 900 * nxge_check_guest_state 901 * 902 * Essentially, check for Tx hangs. In the future, if we are 903 * polling the hardware, we may do so here. 904 * 905 * Arguments: 906 * vr The virtualization region (VR) data structure. 907 * 908 * Notes: 909 * This function is the guest domain equivalent of 910 * nxge_check_hw_state(). Since we have no hardware to 911 * check, we simply call nxge_check_tx_hang(). 912 * 913 * Context: 914 * Guest domain 915 */ 916 void 917 nxge_check_guest_state( 918 nxge_hio_vr_t *vr) 919 { 920 nxge_t *nxge = (nxge_t *)vr->nxge; 921 922 NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "==> nxge_check_guest_state")); 923 924 MUTEX_ENTER(nxge->genlock); 925 926 nxge->nxge_timerid = 0; 927 928 if (nxge->nxge_mac_state == NXGE_MAC_STARTED) { 929 nxge_check_tx_hang(nxge); 930 931 nxge->nxge_timerid = timeout((void(*)(void *)) 932 nxge_check_guest_state, (caddr_t)vr, 933 drv_usectohz(1000 * NXGE_GUEST_TIMER)); 934 } 935 936 nxge_check_guest_state_exit: 937 MUTEX_EXIT(nxge->genlock); 938 NXGE_DEBUG_MSG((nxge, SYSERR_CTL, "<== nxge_check_guest_state")); 939 } 940 941 #endif /* defined(sun4v) */ 942