1678453a8Sspeer /* 2678453a8Sspeer * CDDL HEADER START 3678453a8Sspeer * 4678453a8Sspeer * The contents of this file are subject to the terms of the 5678453a8Sspeer * Common Development and Distribution License (the "License"). 6678453a8Sspeer * You may not use this file except in compliance with the License. 7678453a8Sspeer * 8678453a8Sspeer * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9678453a8Sspeer * or http://www.opensolaris.org/os/licensing. 10678453a8Sspeer * See the License for the specific language governing permissions 11678453a8Sspeer * and limitations under the License. 12678453a8Sspeer * 13678453a8Sspeer * When distributing Covered Code, include this CDDL HEADER in each 14678453a8Sspeer * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15678453a8Sspeer * If applicable, add the following below this CDDL HEADER, with the 16678453a8Sspeer * fields enclosed by brackets "[]" replaced with your own identifying 17678453a8Sspeer * information: Portions Copyright [yyyy] [name of copyright owner] 18678453a8Sspeer * 19678453a8Sspeer * CDDL HEADER END 20678453a8Sspeer */ 21678453a8Sspeer 22678453a8Sspeer /* 230dc2366fSVenugopal Iyer * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24678453a8Sspeer * Use is subject to license terms. 25678453a8Sspeer */ 26678453a8Sspeer 27678453a8Sspeer /* 28678453a8Sspeer * nxge_hio.c 29678453a8Sspeer * 30678453a8Sspeer * This file manages the virtualization resources for Neptune 31678453a8Sspeer * devices. That is, it implements a hybrid I/O (HIO) approach in the 32678453a8Sspeer * Solaris kernel, whereby a guest domain on an LDOMs server may 33678453a8Sspeer * request & use hardware resources from the service domain. 34678453a8Sspeer * 35678453a8Sspeer */ 36678453a8Sspeer 37da14cebeSEric Cheng #include <sys/mac_provider.h> 38678453a8Sspeer #include <sys/nxge/nxge_impl.h> 39678453a8Sspeer #include <sys/nxge/nxge_fzc.h> 40678453a8Sspeer #include <sys/nxge/nxge_rxdma.h> 41678453a8Sspeer #include <sys/nxge/nxge_txdma.h> 42678453a8Sspeer #include <sys/nxge/nxge_hio.h> 43678453a8Sspeer 44678453a8Sspeer /* 45678453a8Sspeer * External prototypes 46678453a8Sspeer */ 47678453a8Sspeer extern npi_status_t npi_rxdma_dump_rdc_table(npi_handle_t, uint8_t); 48678453a8Sspeer 49678453a8Sspeer /* The following function may be found in nxge_main.c */ 50da14cebeSEric Cheng extern int nxge_m_mmac_remove(void *arg, int slot); 51da14cebeSEric Cheng extern int nxge_m_mmac_add_g(void *arg, const uint8_t *maddr, int rdctbl, 52da14cebeSEric Cheng boolean_t usetbl); 530dc2366fSVenugopal Iyer extern int nxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num); 54678453a8Sspeer 55678453a8Sspeer /* The following function may be found in nxge_[t|r]xdma.c */ 56678453a8Sspeer extern npi_status_t nxge_txdma_channel_disable(nxge_t *, int); 57678453a8Sspeer extern nxge_status_t nxge_disable_rxdma_channel(nxge_t *, uint16_t); 58678453a8Sspeer 59678453a8Sspeer /* 60678453a8Sspeer * Local prototypes 61678453a8Sspeer */ 62678453a8Sspeer static void nxge_grp_dc_append(nxge_t *, nxge_grp_t *, nxge_hio_dc_t *); 63678453a8Sspeer static nxge_hio_dc_t *nxge_grp_dc_unlink(nxge_t *, nxge_grp_t *, int); 64678453a8Sspeer static void nxge_grp_dc_map(nxge_grp_t *group); 65678453a8Sspeer 66678453a8Sspeer /* 67678453a8Sspeer * These functions are used by both service & guest domains to 68678453a8Sspeer * decide whether they're running in an LDOMs/XEN environment 69678453a8Sspeer * or not. If so, then the Hybrid I/O (HIO) module is initialized. 70678453a8Sspeer */ 71678453a8Sspeer 72678453a8Sspeer /* 73678453a8Sspeer * nxge_get_environs 74678453a8Sspeer * 75678453a8Sspeer * Figure out if we are in a guest domain or not. 76678453a8Sspeer * 77678453a8Sspeer * Arguments: 78678453a8Sspeer * nxge 79678453a8Sspeer * 80678453a8Sspeer * Notes: 81678453a8Sspeer * 82678453a8Sspeer * Context: 83678453a8Sspeer * Any domain 84678453a8Sspeer */ 85678453a8Sspeer void 86678453a8Sspeer nxge_get_environs( 87678453a8Sspeer nxge_t *nxge) 88678453a8Sspeer { 89678453a8Sspeer char *string; 90678453a8Sspeer 91678453a8Sspeer /* 92678453a8Sspeer * In the beginning, assume that we are running sans LDOMs/XEN. 93678453a8Sspeer */ 94678453a8Sspeer nxge->environs = SOLARIS_DOMAIN; 95678453a8Sspeer 96678453a8Sspeer /* 97678453a8Sspeer * Are we a hybrid I/O (HIO) guest domain driver? 98678453a8Sspeer */ 99678453a8Sspeer if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, nxge->dip, 100678453a8Sspeer DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 101678453a8Sspeer "niutype", &string)) == DDI_PROP_SUCCESS) { 102678453a8Sspeer if (strcmp(string, "n2niu") == 0) { 103678453a8Sspeer nxge->environs = SOLARIS_GUEST_DOMAIN; 104678453a8Sspeer /* So we can allocate properly-aligned memory. */ 105678453a8Sspeer nxge->niu_type = N2_NIU; 106678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 107678453a8Sspeer "Hybrid IO-capable guest domain")); 108678453a8Sspeer } 109678453a8Sspeer ddi_prop_free(string); 110678453a8Sspeer } 111678453a8Sspeer } 112678453a8Sspeer 113678453a8Sspeer #if !defined(sun4v) 114678453a8Sspeer 115678453a8Sspeer /* 116678453a8Sspeer * nxge_hio_init 117678453a8Sspeer * 118678453a8Sspeer * Initialize the HIO module of the NXGE driver. 119678453a8Sspeer * 120678453a8Sspeer * Arguments: 121678453a8Sspeer * nxge 122678453a8Sspeer * 123678453a8Sspeer * Notes: 124678453a8Sspeer * This is the non-hybrid I/O version of this function. 125678453a8Sspeer * 126678453a8Sspeer * Context: 127678453a8Sspeer * Any domain 128678453a8Sspeer */ 129678453a8Sspeer int 1309d5b8bc5SMichael Speer nxge_hio_init(nxge_t *nxge) 131678453a8Sspeer { 132678453a8Sspeer nxge_hio_data_t *nhd; 133da14cebeSEric Cheng int i; 134678453a8Sspeer 135678453a8Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 136ef523517SMichael Speer if (nhd == NULL) { 137678453a8Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 138678453a8Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 139ef523517SMichael Speer nhd->type = NXGE_HIO_TYPE_SERVICE; 140678453a8Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 141678453a8Sspeer } 142678453a8Sspeer 143da14cebeSEric Cheng /* 144da14cebeSEric Cheng * Initialize share and ring group structures. 145da14cebeSEric Cheng */ 146da14cebeSEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) 147da14cebeSEric Cheng nxge->tdc_is_shared[i] = B_FALSE; 148da14cebeSEric Cheng 149da14cebeSEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 150da14cebeSEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 151da14cebeSEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 152da14cebeSEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 153da14cebeSEric Cheng nxge->tx_hio_groups[i].gindex = 0; 154da14cebeSEric Cheng nxge->tx_hio_groups[i].sindex = 0; 155da14cebeSEric Cheng } 156da14cebeSEric Cheng 157da14cebeSEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 158da14cebeSEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 159da14cebeSEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 160da14cebeSEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 161da14cebeSEric Cheng nxge->rx_hio_groups[i].gindex = 0; 162da14cebeSEric Cheng nxge->rx_hio_groups[i].sindex = 0; 163da14cebeSEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 1642cf06b0dSMichael Speer nxge->rx_hio_groups[i].port_default_grp = B_FALSE; 165da14cebeSEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 166da14cebeSEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 167da14cebeSEric Cheng } 168da14cebeSEric Cheng 169678453a8Sspeer nhd->hio.ldoms = B_FALSE; 170678453a8Sspeer 171678453a8Sspeer return (NXGE_OK); 172678453a8Sspeer } 173678453a8Sspeer 174678453a8Sspeer #endif 175678453a8Sspeer 176678453a8Sspeer void 1779d5b8bc5SMichael Speer nxge_hio_uninit(nxge_t *nxge) 178678453a8Sspeer { 179678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 180678453a8Sspeer 181678453a8Sspeer ASSERT(nxge->nxge_hw_p->ndevs == 0); 182678453a8Sspeer 1839d5b8bc5SMichael Speer if (nhd != NULL) { 184678453a8Sspeer MUTEX_DESTROY(&nhd->lock); 185678453a8Sspeer KMEM_FREE(nhd, sizeof (*nhd)); 186678453a8Sspeer nxge->nxge_hw_p->hio = 0; 187678453a8Sspeer } 1889d5b8bc5SMichael Speer } 189678453a8Sspeer 190678453a8Sspeer /* 191678453a8Sspeer * nxge_dci_map 192678453a8Sspeer * 193678453a8Sspeer * Map a DMA channel index to a channel number. 194678453a8Sspeer * 195678453a8Sspeer * Arguments: 196678453a8Sspeer * instance The instance number of the driver. 197678453a8Sspeer * type The type of channel this is: Tx or Rx. 198678453a8Sspeer * index The index to convert to a channel number 199678453a8Sspeer * 200678453a8Sspeer * Notes: 201678453a8Sspeer * This function is called by nxge_ndd.c:nxge_param_set_port_rdc() 202678453a8Sspeer * 203678453a8Sspeer * Context: 204678453a8Sspeer * Any domain 205678453a8Sspeer */ 206678453a8Sspeer int 207678453a8Sspeer nxge_dci_map( 208678453a8Sspeer nxge_t *nxge, 209678453a8Sspeer vpc_type_t type, 210678453a8Sspeer int index) 211678453a8Sspeer { 212678453a8Sspeer nxge_grp_set_t *set; 213678453a8Sspeer int dc; 214678453a8Sspeer 215678453a8Sspeer switch (type) { 216678453a8Sspeer case VP_BOUND_TX: 217678453a8Sspeer set = &nxge->tx_set; 218678453a8Sspeer break; 219678453a8Sspeer case VP_BOUND_RX: 220678453a8Sspeer set = &nxge->rx_set; 221678453a8Sspeer break; 222678453a8Sspeer } 223678453a8Sspeer 224678453a8Sspeer for (dc = 0; dc < NXGE_MAX_TDCS; dc++) { 225678453a8Sspeer if ((1 << dc) & set->owned.map) { 226678453a8Sspeer if (index == 0) 227678453a8Sspeer return (dc); 228678453a8Sspeer else 229678453a8Sspeer index--; 230678453a8Sspeer } 231678453a8Sspeer } 232678453a8Sspeer 233678453a8Sspeer return (-1); 234678453a8Sspeer } 235678453a8Sspeer 236678453a8Sspeer /* 237678453a8Sspeer * --------------------------------------------------------------------- 238678453a8Sspeer * These are the general-purpose DMA channel group functions. That is, 239678453a8Sspeer * these functions are used to manage groups of TDCs or RDCs in an HIO 240678453a8Sspeer * environment. 241678453a8Sspeer * 242678453a8Sspeer * But is also expected that in the future they will be able to manage 243678453a8Sspeer * Crossbow groups. 244678453a8Sspeer * --------------------------------------------------------------------- 245678453a8Sspeer */ 246678453a8Sspeer 247678453a8Sspeer /* 248651ce697SMichael Speer * nxge_grp_cleanup(p_nxge_t nxge) 249651ce697SMichael Speer * 250651ce697SMichael Speer * Remove all outstanding groups. 251651ce697SMichael Speer * 252651ce697SMichael Speer * Arguments: 253651ce697SMichael Speer * nxge 254651ce697SMichael Speer */ 255651ce697SMichael Speer void 256651ce697SMichael Speer nxge_grp_cleanup(p_nxge_t nxge) 257651ce697SMichael Speer { 258651ce697SMichael Speer nxge_grp_set_t *set; 259651ce697SMichael Speer int i; 260651ce697SMichael Speer 261651ce697SMichael Speer MUTEX_ENTER(&nxge->group_lock); 262651ce697SMichael Speer 263651ce697SMichael Speer /* 264651ce697SMichael Speer * Find RX groups that need to be cleaned up. 265651ce697SMichael Speer */ 266651ce697SMichael Speer set = &nxge->rx_set; 267651ce697SMichael Speer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 268651ce697SMichael Speer if (set->group[i] != NULL) { 269651ce697SMichael Speer KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 270651ce697SMichael Speer set->group[i] = NULL; 271651ce697SMichael Speer } 272651ce697SMichael Speer } 273651ce697SMichael Speer 274651ce697SMichael Speer /* 275651ce697SMichael Speer * Find TX groups that need to be cleaned up. 276651ce697SMichael Speer */ 277651ce697SMichael Speer set = &nxge->tx_set; 278651ce697SMichael Speer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 279651ce697SMichael Speer if (set->group[i] != NULL) { 280651ce697SMichael Speer KMEM_FREE(set->group[i], sizeof (nxge_grp_t)); 281651ce697SMichael Speer set->group[i] = NULL; 282651ce697SMichael Speer } 283651ce697SMichael Speer } 284651ce697SMichael Speer MUTEX_EXIT(&nxge->group_lock); 285651ce697SMichael Speer } 286651ce697SMichael Speer 287651ce697SMichael Speer 288651ce697SMichael Speer /* 289678453a8Sspeer * nxge_grp_add 290678453a8Sspeer * 291678453a8Sspeer * Add a group to an instance of NXGE. 292678453a8Sspeer * 293678453a8Sspeer * Arguments: 294678453a8Sspeer * nxge 295678453a8Sspeer * type Tx or Rx 296678453a8Sspeer * 297678453a8Sspeer * Notes: 298678453a8Sspeer * 299678453a8Sspeer * Context: 300678453a8Sspeer * Any domain 301678453a8Sspeer */ 3026920a987SMisaki Miyashita nxge_grp_t * 303678453a8Sspeer nxge_grp_add( 304678453a8Sspeer nxge_t *nxge, 305678453a8Sspeer nxge_grp_type_t type) 306678453a8Sspeer { 307678453a8Sspeer nxge_grp_set_t *set; 308678453a8Sspeer nxge_grp_t *group; 309678453a8Sspeer int i; 310678453a8Sspeer 311678453a8Sspeer group = KMEM_ZALLOC(sizeof (*group), KM_SLEEP); 312678453a8Sspeer group->nxge = nxge; 313678453a8Sspeer 314678453a8Sspeer MUTEX_ENTER(&nxge->group_lock); 315678453a8Sspeer switch (type) { 316678453a8Sspeer case NXGE_TRANSMIT_GROUP: 317678453a8Sspeer case EXT_TRANSMIT_GROUP: 318678453a8Sspeer set = &nxge->tx_set; 319678453a8Sspeer break; 320678453a8Sspeer default: 321678453a8Sspeer set = &nxge->rx_set; 322678453a8Sspeer break; 323678453a8Sspeer } 324678453a8Sspeer 325678453a8Sspeer group->type = type; 326678453a8Sspeer group->active = B_TRUE; 327678453a8Sspeer group->sequence = set->sequence++; 328678453a8Sspeer 329678453a8Sspeer /* Find an empty slot for this logical group. */ 330678453a8Sspeer for (i = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 331678453a8Sspeer if (set->group[i] == 0) { 332678453a8Sspeer group->index = i; 333678453a8Sspeer set->group[i] = group; 334678453a8Sspeer NXGE_DC_SET(set->lg.map, i); 335678453a8Sspeer set->lg.count++; 336678453a8Sspeer break; 337678453a8Sspeer } 338678453a8Sspeer } 339678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 340678453a8Sspeer 341678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 342678453a8Sspeer "nxge_grp_add: %cgroup = %d.%d", 343678453a8Sspeer type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 344678453a8Sspeer nxge->mac.portnum, group->sequence)); 345678453a8Sspeer 3466920a987SMisaki Miyashita return (group); 347678453a8Sspeer } 348678453a8Sspeer 349678453a8Sspeer void 350678453a8Sspeer nxge_grp_remove( 351678453a8Sspeer nxge_t *nxge, 3526920a987SMisaki Miyashita nxge_grp_t *group) /* The group to remove. */ 353678453a8Sspeer { 354678453a8Sspeer nxge_grp_set_t *set; 355678453a8Sspeer vpc_type_t type; 356678453a8Sspeer 3574df55fdeSJanie Lu if (group == NULL) 3584df55fdeSJanie Lu return; 3594df55fdeSJanie Lu 360678453a8Sspeer MUTEX_ENTER(&nxge->group_lock); 361678453a8Sspeer switch (group->type) { 362678453a8Sspeer case NXGE_TRANSMIT_GROUP: 363678453a8Sspeer case EXT_TRANSMIT_GROUP: 364678453a8Sspeer set = &nxge->tx_set; 365678453a8Sspeer break; 366678453a8Sspeer default: 367678453a8Sspeer set = &nxge->rx_set; 368678453a8Sspeer break; 369678453a8Sspeer } 370678453a8Sspeer 371678453a8Sspeer if (set->group[group->index] != group) { 372678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 373678453a8Sspeer return; 374678453a8Sspeer } 375678453a8Sspeer 376678453a8Sspeer set->group[group->index] = 0; 377678453a8Sspeer NXGE_DC_RESET(set->lg.map, group->index); 378678453a8Sspeer set->lg.count--; 379678453a8Sspeer 380678453a8Sspeer /* While inside the mutex, deactivate <group>. */ 381678453a8Sspeer group->active = B_FALSE; 382678453a8Sspeer 383678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 384678453a8Sspeer 385678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 386678453a8Sspeer "nxge_grp_remove(%c.%d.%d) called", 387678453a8Sspeer group->type == NXGE_TRANSMIT_GROUP ? 't' : 'r', 388678453a8Sspeer nxge->mac.portnum, group->sequence)); 389678453a8Sspeer 390678453a8Sspeer /* Now, remove any DCs which are still active. */ 391678453a8Sspeer switch (group->type) { 392678453a8Sspeer default: 393678453a8Sspeer type = VP_BOUND_TX; 394678453a8Sspeer break; 395678453a8Sspeer case NXGE_RECEIVE_GROUP: 396678453a8Sspeer case EXT_RECEIVE_GROUP: 397678453a8Sspeer type = VP_BOUND_RX; 398678453a8Sspeer } 399678453a8Sspeer 400678453a8Sspeer while (group->dc) { 401678453a8Sspeer nxge_grp_dc_remove(nxge, type, group->dc->channel); 402678453a8Sspeer } 403678453a8Sspeer 404678453a8Sspeer KMEM_FREE(group, sizeof (*group)); 405678453a8Sspeer } 406678453a8Sspeer 407678453a8Sspeer /* 408e11f0814SMichael Speer * nxge_grp_dc_add 409678453a8Sspeer * 410678453a8Sspeer * Add a DMA channel to a VR/Group. 411678453a8Sspeer * 412678453a8Sspeer * Arguments: 413678453a8Sspeer * nxge 414678453a8Sspeer * channel The channel to add. 415678453a8Sspeer * Notes: 416678453a8Sspeer * 417678453a8Sspeer * Context: 418678453a8Sspeer * Any domain 419678453a8Sspeer */ 420678453a8Sspeer /* ARGSUSED */ 421678453a8Sspeer int 422678453a8Sspeer nxge_grp_dc_add( 423678453a8Sspeer nxge_t *nxge, 4246920a987SMisaki Miyashita nxge_grp_t *group, /* The group to add <channel> to. */ 425678453a8Sspeer vpc_type_t type, /* Rx or Tx */ 426678453a8Sspeer int channel) /* A physical/logical channel number */ 427678453a8Sspeer { 428678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 429678453a8Sspeer nxge_hio_dc_t *dc; 430678453a8Sspeer nxge_grp_set_t *set; 4311d36aa9eSspeer nxge_status_t status = NXGE_OK; 4320dc2366fSVenugopal Iyer int error = 0; 433678453a8Sspeer 434678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_add")); 435678453a8Sspeer 436da14cebeSEric Cheng if (group == 0) 437678453a8Sspeer return (0); 438678453a8Sspeer 439678453a8Sspeer switch (type) { 440e11f0814SMichael Speer case VP_BOUND_TX: 441678453a8Sspeer set = &nxge->tx_set; 442678453a8Sspeer if (channel > NXGE_MAX_TDCS) { 443678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 444678453a8Sspeer "nxge_grp_dc_add: TDC = %d", channel)); 445678453a8Sspeer return (NXGE_ERROR); 446678453a8Sspeer } 447678453a8Sspeer break; 448678453a8Sspeer case VP_BOUND_RX: 449678453a8Sspeer set = &nxge->rx_set; 450678453a8Sspeer if (channel > NXGE_MAX_RDCS) { 451678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 452678453a8Sspeer "nxge_grp_dc_add: RDC = %d", channel)); 453678453a8Sspeer return (NXGE_ERROR); 454678453a8Sspeer } 455678453a8Sspeer break; 456e11f0814SMichael Speer 457e11f0814SMichael Speer default: 458e11f0814SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 459e11f0814SMichael Speer "nxge_grp_dc_add: unknown type channel(%d)", channel)); 460678453a8Sspeer } 461678453a8Sspeer 462678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 463678453a8Sspeer "nxge_grp_dc_add: %cgroup = %d.%d.%d, channel = %d", 464678453a8Sspeer type == VP_BOUND_TX ? 't' : 'r', 465678453a8Sspeer nxge->mac.portnum, group->sequence, group->count, channel)); 466678453a8Sspeer 467678453a8Sspeer MUTEX_ENTER(&nxge->group_lock); 468678453a8Sspeer if (group->active != B_TRUE) { 469678453a8Sspeer /* We may be in the process of removing this group. */ 470678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 471678453a8Sspeer return (NXGE_ERROR); 472678453a8Sspeer } 473678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 474678453a8Sspeer 475678453a8Sspeer if (!(dc = nxge_grp_dc_find(nxge, type, channel))) { 476678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 477678453a8Sspeer "nxge_grp_dc_add(%d): DC FIND failed", channel)); 478678453a8Sspeer return (NXGE_ERROR); 479678453a8Sspeer } 480678453a8Sspeer 481678453a8Sspeer MUTEX_ENTER(&nhd->lock); 482678453a8Sspeer 483678453a8Sspeer if (dc->group) { 484678453a8Sspeer MUTEX_EXIT(&nhd->lock); 485678453a8Sspeer /* This channel is already in use! */ 486678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 487678453a8Sspeer "nxge_grp_dc_add(%d): channel already in group", channel)); 488678453a8Sspeer return (NXGE_ERROR); 489678453a8Sspeer } 490678453a8Sspeer 491678453a8Sspeer dc->next = 0; 492678453a8Sspeer dc->page = channel; 493678453a8Sspeer dc->channel = (nxge_channel_t)channel; 494678453a8Sspeer 495678453a8Sspeer dc->type = type; 496678453a8Sspeer if (type == VP_BOUND_RX) { 497678453a8Sspeer dc->init = nxge_init_rxdma_channel; 498678453a8Sspeer dc->uninit = nxge_uninit_rxdma_channel; 499678453a8Sspeer } else { 500678453a8Sspeer dc->init = nxge_init_txdma_channel; 501678453a8Sspeer dc->uninit = nxge_uninit_txdma_channel; 502678453a8Sspeer } 503678453a8Sspeer 5046920a987SMisaki Miyashita dc->group = group; 505678453a8Sspeer 5060dc2366fSVenugopal Iyer if (isLDOMguest(nxge)) { 5070dc2366fSVenugopal Iyer error = nxge_hio_ldsv_add(nxge, dc); 5080dc2366fSVenugopal Iyer if (error != 0) { 5090dc2366fSVenugopal Iyer MUTEX_EXIT(&nhd->lock); 5100dc2366fSVenugopal Iyer return (NXGE_ERROR); 5110dc2366fSVenugopal Iyer } 5120dc2366fSVenugopal Iyer } 513678453a8Sspeer 514678453a8Sspeer NXGE_DC_SET(set->owned.map, channel); 515678453a8Sspeer set->owned.count++; 516678453a8Sspeer 517678453a8Sspeer MUTEX_EXIT(&nhd->lock); 518678453a8Sspeer 5191d36aa9eSspeer if ((status = (*dc->init)(nxge, channel)) != NXGE_OK) { 5201d36aa9eSspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 5211d36aa9eSspeer "nxge_grp_dc_add(%d): channel init failed", channel)); 522e11f0814SMichael Speer MUTEX_ENTER(&nhd->lock); 523e11f0814SMichael Speer (void) memset(dc, 0, sizeof (*dc)); 524e11f0814SMichael Speer NXGE_DC_RESET(set->owned.map, channel); 525e11f0814SMichael Speer set->owned.count--; 526e11f0814SMichael Speer MUTEX_EXIT(&nhd->lock); 5271d36aa9eSspeer return (NXGE_ERROR); 5281d36aa9eSspeer } 5291d36aa9eSspeer 530678453a8Sspeer nxge_grp_dc_append(nxge, group, dc); 531678453a8Sspeer 532330cd344SMichael Speer if (type == VP_BOUND_TX) { 533330cd344SMichael Speer MUTEX_ENTER(&nhd->lock); 534330cd344SMichael Speer nxge->tdc_is_shared[channel] = B_FALSE; 535330cd344SMichael Speer MUTEX_EXIT(&nhd->lock); 536330cd344SMichael Speer } 537330cd344SMichael Speer 538678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_add")); 539678453a8Sspeer 5401d36aa9eSspeer return ((int)status); 541678453a8Sspeer } 542678453a8Sspeer 543678453a8Sspeer void 544678453a8Sspeer nxge_grp_dc_remove( 545678453a8Sspeer nxge_t *nxge, 546678453a8Sspeer vpc_type_t type, 547678453a8Sspeer int channel) 548678453a8Sspeer { 549678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 550678453a8Sspeer nxge_hio_dc_t *dc; 551678453a8Sspeer nxge_grp_set_t *set; 552678453a8Sspeer nxge_grp_t *group; 553678453a8Sspeer 554678453a8Sspeer dc_uninit_t uninit; 555678453a8Sspeer 556678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_grp_dc_remove")); 557678453a8Sspeer 558e11f0814SMichael Speer if ((dc = nxge_grp_dc_find(nxge, type, channel)) == 0) 559e11f0814SMichael Speer goto nxge_grp_dc_remove_exit; 560e11f0814SMichael Speer 561e11f0814SMichael Speer if ((dc->group == NULL) && (dc->next == 0) && 562e11f0814SMichael Speer (dc->channel == 0) && (dc->page == 0) && (dc->type == 0)) { 563e11f0814SMichael Speer goto nxge_grp_dc_remove_exit; 564678453a8Sspeer } 565e11f0814SMichael Speer 566678453a8Sspeer group = (nxge_grp_t *)dc->group; 567678453a8Sspeer 568678453a8Sspeer if (isLDOMguest(nxge)) { 569678453a8Sspeer (void) nxge_hio_intr_remove(nxge, type, channel); 570678453a8Sspeer } 571678453a8Sspeer 572678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 573678453a8Sspeer "DC remove: group = %d.%d.%d, %cdc %d", 574678453a8Sspeer nxge->mac.portnum, group->sequence, group->count, 575678453a8Sspeer type == VP_BOUND_TX ? 't' : 'r', dc->channel)); 576678453a8Sspeer 577678453a8Sspeer MUTEX_ENTER(&nhd->lock); 578678453a8Sspeer 5791d36aa9eSspeer set = dc->type == VP_BOUND_TX ? &nxge->tx_set : &nxge->rx_set; 5801d36aa9eSspeer 581678453a8Sspeer /* Remove the DC from its group. */ 582678453a8Sspeer if (nxge_grp_dc_unlink(nxge, group, channel) != dc) { 583678453a8Sspeer MUTEX_EXIT(&nhd->lock); 584678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 585e11f0814SMichael Speer "nxge_grp_dc_remove(%d) failed", channel)); 586e11f0814SMichael Speer goto nxge_grp_dc_remove_exit; 587678453a8Sspeer } 588678453a8Sspeer 589678453a8Sspeer uninit = dc->uninit; 590678453a8Sspeer channel = dc->channel; 591678453a8Sspeer 592678453a8Sspeer NXGE_DC_RESET(set->owned.map, channel); 593678453a8Sspeer set->owned.count--; 594678453a8Sspeer 595678453a8Sspeer (void) memset(dc, 0, sizeof (*dc)); 596678453a8Sspeer 597678453a8Sspeer MUTEX_EXIT(&nhd->lock); 598678453a8Sspeer 599678453a8Sspeer (*uninit)(nxge, channel); 600678453a8Sspeer 601e11f0814SMichael Speer nxge_grp_dc_remove_exit: 602678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_grp_dc_remove")); 603678453a8Sspeer } 604678453a8Sspeer 605678453a8Sspeer nxge_hio_dc_t * 606678453a8Sspeer nxge_grp_dc_find( 607678453a8Sspeer nxge_t *nxge, 608678453a8Sspeer vpc_type_t type, /* Rx or Tx */ 609678453a8Sspeer int channel) 610678453a8Sspeer { 611678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 612678453a8Sspeer nxge_hio_dc_t *current; 613678453a8Sspeer 614678453a8Sspeer current = (type == VP_BOUND_TX) ? &nhd->tdc[0] : &nhd->rdc[0]; 615678453a8Sspeer 616678453a8Sspeer if (!isLDOMguest(nxge)) { 617678453a8Sspeer return (¤t[channel]); 618678453a8Sspeer } else { 619678453a8Sspeer /* We're in a guest domain. */ 620678453a8Sspeer int i, limit = (type == VP_BOUND_TX) ? 621678453a8Sspeer NXGE_MAX_TDCS : NXGE_MAX_RDCS; 622678453a8Sspeer 623678453a8Sspeer MUTEX_ENTER(&nhd->lock); 624678453a8Sspeer for (i = 0; i < limit; i++, current++) { 625678453a8Sspeer if (current->channel == channel) { 626678453a8Sspeer if (current->vr && current->vr->nxge == 627678453a8Sspeer (uintptr_t)nxge) { 628678453a8Sspeer MUTEX_EXIT(&nhd->lock); 629678453a8Sspeer return (current); 630678453a8Sspeer } 631678453a8Sspeer } 632678453a8Sspeer } 633678453a8Sspeer MUTEX_EXIT(&nhd->lock); 634678453a8Sspeer } 635678453a8Sspeer 636678453a8Sspeer return (0); 637678453a8Sspeer } 638678453a8Sspeer 639678453a8Sspeer /* 640678453a8Sspeer * nxge_grp_dc_append 641678453a8Sspeer * 642678453a8Sspeer * Append a DMA channel to a group. 643678453a8Sspeer * 644678453a8Sspeer * Arguments: 645678453a8Sspeer * nxge 646678453a8Sspeer * group The group to append to 647678453a8Sspeer * dc The DMA channel to append 648678453a8Sspeer * 649678453a8Sspeer * Notes: 650678453a8Sspeer * 651678453a8Sspeer * Context: 652678453a8Sspeer * Any domain 653678453a8Sspeer */ 654678453a8Sspeer static 655678453a8Sspeer void 656678453a8Sspeer nxge_grp_dc_append( 657678453a8Sspeer nxge_t *nxge, 658678453a8Sspeer nxge_grp_t *group, 659678453a8Sspeer nxge_hio_dc_t *dc) 660678453a8Sspeer { 661678453a8Sspeer MUTEX_ENTER(&nxge->group_lock); 662678453a8Sspeer 663678453a8Sspeer if (group->dc == 0) { 664678453a8Sspeer group->dc = dc; 665678453a8Sspeer } else { 666678453a8Sspeer nxge_hio_dc_t *current = group->dc; 667678453a8Sspeer do { 668678453a8Sspeer if (current->next == 0) { 669678453a8Sspeer current->next = dc; 670678453a8Sspeer break; 671678453a8Sspeer } 672678453a8Sspeer current = current->next; 673678453a8Sspeer } while (current); 674678453a8Sspeer } 675678453a8Sspeer 676678453a8Sspeer NXGE_DC_SET(group->map, dc->channel); 677678453a8Sspeer 678678453a8Sspeer nxge_grp_dc_map(group); 6791d36aa9eSspeer group->count++; 680678453a8Sspeer 681678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 682678453a8Sspeer } 683678453a8Sspeer 684678453a8Sspeer /* 685678453a8Sspeer * nxge_grp_dc_unlink 686678453a8Sspeer * 687678453a8Sspeer * Unlink a DMA channel fromits linked list (group). 688678453a8Sspeer * 689678453a8Sspeer * Arguments: 690678453a8Sspeer * nxge 691678453a8Sspeer * group The group (linked list) to unlink from 692678453a8Sspeer * dc The DMA channel to append 693678453a8Sspeer * 694678453a8Sspeer * Notes: 695678453a8Sspeer * 696678453a8Sspeer * Context: 697678453a8Sspeer * Any domain 698678453a8Sspeer */ 699678453a8Sspeer nxge_hio_dc_t * 700da14cebeSEric Cheng nxge_grp_dc_unlink( 701da14cebeSEric Cheng nxge_t *nxge, 702da14cebeSEric Cheng nxge_grp_t *group, 703da14cebeSEric Cheng int channel) 704678453a8Sspeer { 705678453a8Sspeer nxge_hio_dc_t *current, *previous; 706678453a8Sspeer 707678453a8Sspeer MUTEX_ENTER(&nxge->group_lock); 708678453a8Sspeer 709330cd344SMichael Speer if (group == NULL) { 710330cd344SMichael Speer MUTEX_EXIT(&nxge->group_lock); 711330cd344SMichael Speer return (0); 712330cd344SMichael Speer } 713330cd344SMichael Speer 714678453a8Sspeer if ((current = group->dc) == 0) { 715678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 716678453a8Sspeer return (0); 717678453a8Sspeer } 718678453a8Sspeer 719678453a8Sspeer previous = 0; 720678453a8Sspeer do { 721678453a8Sspeer if (current->channel == channel) { 722678453a8Sspeer if (previous) 723678453a8Sspeer previous->next = current->next; 724678453a8Sspeer else 725678453a8Sspeer group->dc = current->next; 726678453a8Sspeer break; 727678453a8Sspeer } 728678453a8Sspeer previous = current; 729678453a8Sspeer current = current->next; 730678453a8Sspeer } while (current); 731678453a8Sspeer 732678453a8Sspeer if (current == 0) { 733678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 734678453a8Sspeer "DC unlink: DC %d not found", channel)); 735678453a8Sspeer } else { 736678453a8Sspeer current->next = 0; 737678453a8Sspeer current->group = 0; 738678453a8Sspeer 739da14cebeSEric Cheng NXGE_DC_RESET(group->map, channel); 740678453a8Sspeer group->count--; 741678453a8Sspeer } 742678453a8Sspeer 743678453a8Sspeer nxge_grp_dc_map(group); 744678453a8Sspeer 745678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 746678453a8Sspeer 747678453a8Sspeer return (current); 748678453a8Sspeer } 749678453a8Sspeer 750678453a8Sspeer /* 751678453a8Sspeer * nxge_grp_dc_map 752678453a8Sspeer * 753678453a8Sspeer * Map a linked list to an array of channel numbers. 754678453a8Sspeer * 755678453a8Sspeer * Arguments: 756678453a8Sspeer * nxge 757678453a8Sspeer * group The group to remap. 758678453a8Sspeer * 759678453a8Sspeer * Notes: 760678453a8Sspeer * It is expected that the caller will hold the correct mutex. 761678453a8Sspeer * 762678453a8Sspeer * Context: 763678453a8Sspeer * Service domain 764678453a8Sspeer */ 765678453a8Sspeer void 766678453a8Sspeer nxge_grp_dc_map( 767678453a8Sspeer nxge_grp_t *group) 768678453a8Sspeer { 769678453a8Sspeer nxge_channel_t *legend; 770678453a8Sspeer nxge_hio_dc_t *dc; 771678453a8Sspeer 772678453a8Sspeer (void) memset(group->legend, 0, sizeof (group->legend)); 773678453a8Sspeer 774678453a8Sspeer legend = group->legend; 775678453a8Sspeer dc = group->dc; 776678453a8Sspeer while (dc) { 777678453a8Sspeer *legend = dc->channel; 778678453a8Sspeer legend++; 779678453a8Sspeer dc = dc->next; 780678453a8Sspeer } 781678453a8Sspeer } 782678453a8Sspeer 783678453a8Sspeer /* 784678453a8Sspeer * --------------------------------------------------------------------- 785678453a8Sspeer * These are HIO debugging functions. 786678453a8Sspeer * --------------------------------------------------------------------- 787678453a8Sspeer */ 788678453a8Sspeer 789678453a8Sspeer /* 790678453a8Sspeer * nxge_delay 791678453a8Sspeer * 792678453a8Sspeer * Delay <seconds> number of seconds. 793678453a8Sspeer * 794678453a8Sspeer * Arguments: 795678453a8Sspeer * nxge 796678453a8Sspeer * group The group to append to 797678453a8Sspeer * dc The DMA channel to append 798678453a8Sspeer * 799678453a8Sspeer * Notes: 800678453a8Sspeer * This is a developer-only function. 801678453a8Sspeer * 802678453a8Sspeer * Context: 803678453a8Sspeer * Any domain 804678453a8Sspeer */ 805678453a8Sspeer void 806678453a8Sspeer nxge_delay( 807678453a8Sspeer int seconds) 808678453a8Sspeer { 809678453a8Sspeer delay(drv_usectohz(seconds * 1000000)); 810678453a8Sspeer } 811678453a8Sspeer 812678453a8Sspeer static dmc_reg_name_t rx_names[] = { 813678453a8Sspeer { "RXDMA_CFIG1", 0 }, 814678453a8Sspeer { "RXDMA_CFIG2", 8 }, 815678453a8Sspeer { "RBR_CFIG_A", 0x10 }, 816678453a8Sspeer { "RBR_CFIG_B", 0x18 }, 817678453a8Sspeer { "RBR_KICK", 0x20 }, 818678453a8Sspeer { "RBR_STAT", 0x28 }, 819678453a8Sspeer { "RBR_HDH", 0x30 }, 820678453a8Sspeer { "RBR_HDL", 0x38 }, 821678453a8Sspeer { "RCRCFIG_A", 0x40 }, 822678453a8Sspeer { "RCRCFIG_B", 0x48 }, 823678453a8Sspeer { "RCRSTAT_A", 0x50 }, 824678453a8Sspeer { "RCRSTAT_B", 0x58 }, 825678453a8Sspeer { "RCRSTAT_C", 0x60 }, 826678453a8Sspeer { "RX_DMA_ENT_MSK", 0x68 }, 827678453a8Sspeer { "RX_DMA_CTL_STAT", 0x70 }, 828678453a8Sspeer { "RCR_FLSH", 0x78 }, 829678453a8Sspeer { "RXMISC", 0x90 }, 830678453a8Sspeer { "RX_DMA_CTL_STAT_DBG", 0x98 }, 831678453a8Sspeer { 0, -1 } 832678453a8Sspeer }; 833678453a8Sspeer 834678453a8Sspeer static dmc_reg_name_t tx_names[] = { 835678453a8Sspeer { "Tx_RNG_CFIG", 0 }, 836678453a8Sspeer { "Tx_RNG_HDL", 0x10 }, 837678453a8Sspeer { "Tx_RNG_KICK", 0x18 }, 838678453a8Sspeer { "Tx_ENT_MASK", 0x20 }, 839678453a8Sspeer { "Tx_CS", 0x28 }, 840678453a8Sspeer { "TxDMA_MBH", 0x30 }, 841678453a8Sspeer { "TxDMA_MBL", 0x38 }, 842678453a8Sspeer { "TxDMA_PRE_ST", 0x40 }, 843678453a8Sspeer { "Tx_RNG_ERR_LOGH", 0x48 }, 844678453a8Sspeer { "Tx_RNG_ERR_LOGL", 0x50 }, 845678453a8Sspeer { "TDMC_INTR_DBG", 0x60 }, 846678453a8Sspeer { "Tx_CS_DBG", 0x68 }, 847678453a8Sspeer { 0, -1 } 848678453a8Sspeer }; 849678453a8Sspeer 850678453a8Sspeer /* 851678453a8Sspeer * nxge_xx2str 852678453a8Sspeer * 853678453a8Sspeer * Translate a register address into a string. 854678453a8Sspeer * 855678453a8Sspeer * Arguments: 856678453a8Sspeer * offset The address of the register to translate. 857678453a8Sspeer * 858678453a8Sspeer * Notes: 859678453a8Sspeer * These are developer-only function. 860678453a8Sspeer * 861678453a8Sspeer * Context: 862678453a8Sspeer * Any domain 863678453a8Sspeer */ 864678453a8Sspeer const char * 865678453a8Sspeer nxge_rx2str( 866678453a8Sspeer int offset) 867678453a8Sspeer { 868678453a8Sspeer dmc_reg_name_t *reg = &rx_names[0]; 869678453a8Sspeer 870678453a8Sspeer offset &= DMA_CSR_MASK; 871678453a8Sspeer 872678453a8Sspeer while (reg->name) { 873678453a8Sspeer if (offset == reg->offset) 874678453a8Sspeer return (reg->name); 875678453a8Sspeer reg++; 876678453a8Sspeer } 877678453a8Sspeer 878678453a8Sspeer return (0); 879678453a8Sspeer } 880678453a8Sspeer 881678453a8Sspeer const char * 882678453a8Sspeer nxge_tx2str( 883678453a8Sspeer int offset) 884678453a8Sspeer { 885678453a8Sspeer dmc_reg_name_t *reg = &tx_names[0]; 886678453a8Sspeer 887678453a8Sspeer offset &= DMA_CSR_MASK; 888678453a8Sspeer 889678453a8Sspeer while (reg->name) { 890678453a8Sspeer if (offset == reg->offset) 891678453a8Sspeer return (reg->name); 892678453a8Sspeer reg++; 893678453a8Sspeer } 894678453a8Sspeer 895678453a8Sspeer return (0); 896678453a8Sspeer } 897678453a8Sspeer 898678453a8Sspeer /* 899678453a8Sspeer * nxge_ddi_perror 900678453a8Sspeer * 901678453a8Sspeer * Map a DDI error number to a string. 902678453a8Sspeer * 903678453a8Sspeer * Arguments: 904678453a8Sspeer * ddi_error The DDI error number to map. 905678453a8Sspeer * 906678453a8Sspeer * Notes: 907678453a8Sspeer * 908678453a8Sspeer * Context: 909678453a8Sspeer * Any domain 910678453a8Sspeer */ 911678453a8Sspeer const char * 912678453a8Sspeer nxge_ddi_perror( 913678453a8Sspeer int ddi_error) 914678453a8Sspeer { 915678453a8Sspeer switch (ddi_error) { 916678453a8Sspeer case DDI_SUCCESS: 917678453a8Sspeer return ("DDI_SUCCESS"); 918678453a8Sspeer case DDI_FAILURE: 919678453a8Sspeer return ("DDI_FAILURE"); 920678453a8Sspeer case DDI_NOT_WELL_FORMED: 921678453a8Sspeer return ("DDI_NOT_WELL_FORMED"); 922678453a8Sspeer case DDI_EAGAIN: 923678453a8Sspeer return ("DDI_EAGAIN"); 924678453a8Sspeer case DDI_EINVAL: 925678453a8Sspeer return ("DDI_EINVAL"); 926678453a8Sspeer case DDI_ENOTSUP: 927678453a8Sspeer return ("DDI_ENOTSUP"); 928678453a8Sspeer case DDI_EPENDING: 929678453a8Sspeer return ("DDI_EPENDING"); 930678453a8Sspeer case DDI_ENOMEM: 931678453a8Sspeer return ("DDI_ENOMEM"); 932678453a8Sspeer case DDI_EBUSY: 933678453a8Sspeer return ("DDI_EBUSY"); 934678453a8Sspeer case DDI_ETRANSPORT: 935678453a8Sspeer return ("DDI_ETRANSPORT"); 936678453a8Sspeer case DDI_ECONTEXT: 937678453a8Sspeer return ("DDI_ECONTEXT"); 938678453a8Sspeer default: 939678453a8Sspeer return ("Unknown error"); 940678453a8Sspeer } 941678453a8Sspeer } 942678453a8Sspeer 943678453a8Sspeer /* 944678453a8Sspeer * --------------------------------------------------------------------- 945678453a8Sspeer * These are Sun4v HIO function definitions 946678453a8Sspeer * --------------------------------------------------------------------- 947678453a8Sspeer */ 948678453a8Sspeer 949678453a8Sspeer #if defined(sun4v) 950678453a8Sspeer 951678453a8Sspeer /* 952678453a8Sspeer * Local prototypes 953678453a8Sspeer */ 9546920a987SMisaki Miyashita static nxge_hio_vr_t *nxge_hio_vr_share(nxge_t *); 9556920a987SMisaki Miyashita static void nxge_hio_unshare(nxge_hio_vr_t *); 956678453a8Sspeer 957da14cebeSEric Cheng static int nxge_hio_addres(nxge_hio_vr_t *, mac_ring_type_t, uint64_t *); 9586920a987SMisaki Miyashita static void nxge_hio_remres(nxge_hio_vr_t *, mac_ring_type_t, res_map_t); 959678453a8Sspeer 960da14cebeSEric Cheng static void nxge_hio_tdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 961da14cebeSEric Cheng static void nxge_hio_rdc_unshare(nxge_t *nxge, int dev_grpid, int channel); 962da14cebeSEric Cheng static int nxge_hio_dc_share(nxge_t *, nxge_hio_vr_t *, mac_ring_type_t, int); 963678453a8Sspeer static void nxge_hio_dc_unshare(nxge_t *, nxge_hio_vr_t *, 964678453a8Sspeer mac_ring_type_t, int); 965678453a8Sspeer 966678453a8Sspeer /* 967678453a8Sspeer * nxge_hio_init 968678453a8Sspeer * 969678453a8Sspeer * Initialize the HIO module of the NXGE driver. 970678453a8Sspeer * 971678453a8Sspeer * Arguments: 972678453a8Sspeer * nxge 973678453a8Sspeer * 974678453a8Sspeer * Notes: 975678453a8Sspeer * 976678453a8Sspeer * Context: 977678453a8Sspeer * Any domain 978678453a8Sspeer */ 979678453a8Sspeer int 980ef523517SMichael Speer nxge_hio_init(nxge_t *nxge) 981678453a8Sspeer { 982678453a8Sspeer nxge_hio_data_t *nhd; 983678453a8Sspeer int i, region; 984678453a8Sspeer 985678453a8Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 986678453a8Sspeer if (nhd == 0) { 987678453a8Sspeer nhd = KMEM_ZALLOC(sizeof (*nhd), KM_SLEEP); 988678453a8Sspeer MUTEX_INIT(&nhd->lock, NULL, MUTEX_DRIVER, NULL); 989ef523517SMichael Speer if (isLDOMguest(nxge)) 990ef523517SMichael Speer nhd->type = NXGE_HIO_TYPE_GUEST; 991ef523517SMichael Speer else 992ef523517SMichael Speer nhd->type = NXGE_HIO_TYPE_SERVICE; 993678453a8Sspeer nxge->nxge_hw_p->hio = (uintptr_t)nhd; 994678453a8Sspeer } 995678453a8Sspeer 99622c0d73aSspeer if ((nxge->environs == SOLARIS_DOMAIN) && 99722c0d73aSspeer (nxge->niu_type == N2_NIU)) { 998678453a8Sspeer if (nxge->niu_hsvc_available == B_TRUE) { 999678453a8Sspeer hsvc_info_t *niu_hsvc = &nxge->niu_hsvc; 10004df55fdeSJanie Lu /* 10014df55fdeSJanie Lu * Versions supported now are: 10024df55fdeSJanie Lu * - major number >= 1 (NIU_MAJOR_VER). 10034df55fdeSJanie Lu */ 10044df55fdeSJanie Lu if ((niu_hsvc->hsvc_major >= NIU_MAJOR_VER) || 10054df55fdeSJanie Lu (niu_hsvc->hsvc_major == 1 && 10064df55fdeSJanie Lu niu_hsvc->hsvc_minor == 1)) { 1007678453a8Sspeer nxge->environs = SOLARIS_SERVICE_DOMAIN; 10084df55fdeSJanie Lu NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1009678453a8Sspeer "nxge_hio_init: hypervisor services " 1010678453a8Sspeer "version %d.%d", 10114df55fdeSJanie Lu niu_hsvc->hsvc_major, 10124df55fdeSJanie Lu niu_hsvc->hsvc_minor)); 10134df55fdeSJanie Lu } 1014678453a8Sspeer } 1015678453a8Sspeer } 1016678453a8Sspeer 1017da14cebeSEric Cheng /* 1018da14cebeSEric Cheng * Initialize share and ring group structures. 1019da14cebeSEric Cheng */ 1020da14cebeSEric Cheng for (i = 0; i < NXGE_MAX_TDC_GROUPS; i++) { 1021da14cebeSEric Cheng nxge->tx_hio_groups[i].ghandle = NULL; 1022da14cebeSEric Cheng nxge->tx_hio_groups[i].nxgep = nxge; 1023da14cebeSEric Cheng nxge->tx_hio_groups[i].type = MAC_RING_TYPE_TX; 1024da14cebeSEric Cheng nxge->tx_hio_groups[i].gindex = 0; 1025da14cebeSEric Cheng nxge->tx_hio_groups[i].sindex = 0; 1026da14cebeSEric Cheng } 1027da14cebeSEric Cheng 1028da14cebeSEric Cheng for (i = 0; i < NXGE_MAX_RDC_GROUPS; i++) { 1029da14cebeSEric Cheng nxge->rx_hio_groups[i].ghandle = NULL; 1030da14cebeSEric Cheng nxge->rx_hio_groups[i].nxgep = nxge; 1031da14cebeSEric Cheng nxge->rx_hio_groups[i].type = MAC_RING_TYPE_RX; 1032da14cebeSEric Cheng nxge->rx_hio_groups[i].gindex = 0; 1033da14cebeSEric Cheng nxge->rx_hio_groups[i].sindex = 0; 1034da14cebeSEric Cheng nxge->rx_hio_groups[i].started = B_FALSE; 10352cf06b0dSMichael Speer nxge->rx_hio_groups[i].port_default_grp = B_FALSE; 1036da14cebeSEric Cheng nxge->rx_hio_groups[i].rdctbl = -1; 1037da14cebeSEric Cheng nxge->rx_hio_groups[i].n_mac_addrs = 0; 1038da14cebeSEric Cheng } 1039da14cebeSEric Cheng 1040678453a8Sspeer if (!isLDOMs(nxge)) { 1041678453a8Sspeer nhd->hio.ldoms = B_FALSE; 1042678453a8Sspeer return (NXGE_OK); 1043678453a8Sspeer } 1044678453a8Sspeer 1045678453a8Sspeer nhd->hio.ldoms = B_TRUE; 1046678453a8Sspeer 1047678453a8Sspeer /* 1048678453a8Sspeer * Fill in what we can. 1049678453a8Sspeer */ 1050678453a8Sspeer for (region = 0; region < NXGE_VR_SR_MAX; region++) { 1051678453a8Sspeer nhd->vr[region].region = region; 1052678453a8Sspeer } 10536920a987SMisaki Miyashita nhd->vrs = NXGE_VR_SR_MAX - 2; 1054678453a8Sspeer 1055678453a8Sspeer /* 1056da14cebeSEric Cheng * Initialize the share stuctures. 1057678453a8Sspeer */ 1058330cd344SMichael Speer for (i = 0; i < NXGE_MAX_TDCS; i++) 1059330cd344SMichael Speer nxge->tdc_is_shared[i] = B_FALSE; 1060330cd344SMichael Speer 1061678453a8Sspeer for (i = 0; i < NXGE_VR_SR_MAX; i++) { 1062678453a8Sspeer nxge->shares[i].nxgep = nxge; 1063678453a8Sspeer nxge->shares[i].index = 0; 1064da14cebeSEric Cheng nxge->shares[i].vrp = NULL; 1065678453a8Sspeer nxge->shares[i].tmap = 0; 1066678453a8Sspeer nxge->shares[i].rmap = 0; 1067678453a8Sspeer nxge->shares[i].rxgroup = 0; 1068678453a8Sspeer nxge->shares[i].active = B_FALSE; 1069678453a8Sspeer } 1070678453a8Sspeer 1071678453a8Sspeer /* Fill in the HV HIO function pointers. */ 1072678453a8Sspeer nxge_hio_hv_init(nxge); 1073678453a8Sspeer 1074678453a8Sspeer if (isLDOMservice(nxge)) { 1075678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, 1076678453a8Sspeer "Hybrid IO-capable service domain")); 1077678453a8Sspeer return (NXGE_OK); 1078678453a8Sspeer } 1079678453a8Sspeer 1080678453a8Sspeer return (0); 1081678453a8Sspeer } 1082da14cebeSEric Cheng #endif /* defined(sun4v) */ 1083da14cebeSEric Cheng 1084da14cebeSEric Cheng static int 1085da14cebeSEric Cheng nxge_hio_group_mac_add(nxge_t *nxge, nxge_ring_group_t *g, 1086da14cebeSEric Cheng const uint8_t *macaddr) 1087da14cebeSEric Cheng { 1088da14cebeSEric Cheng int rv; 1089da14cebeSEric Cheng nxge_rdc_grp_t *group; 1090da14cebeSEric Cheng 1091da14cebeSEric Cheng mutex_enter(nxge->genlock); 1092da14cebeSEric Cheng 1093da14cebeSEric Cheng /* 1094da14cebeSEric Cheng * Initialize the NXGE RDC table data structure. 1095da14cebeSEric Cheng */ 1096da14cebeSEric Cheng group = &nxge->pt_config.rdc_grps[g->rdctbl]; 1097da14cebeSEric Cheng if (!group->flag) { 1098da14cebeSEric Cheng group->port = NXGE_GET_PORT_NUM(nxge->function_num); 1099da14cebeSEric Cheng group->config_method = RDC_TABLE_ENTRY_METHOD_REP; 1100da14cebeSEric Cheng group->flag = B_TRUE; /* This group has been configured. */ 1101da14cebeSEric Cheng } 1102da14cebeSEric Cheng 1103da14cebeSEric Cheng mutex_exit(nxge->genlock); 1104da14cebeSEric Cheng 1105da14cebeSEric Cheng /* 1106da14cebeSEric Cheng * Add the MAC address. 1107da14cebeSEric Cheng */ 1108da14cebeSEric Cheng if ((rv = nxge_m_mmac_add_g((void *)nxge, macaddr, 1109da14cebeSEric Cheng g->rdctbl, B_TRUE)) != 0) { 1110da14cebeSEric Cheng return (rv); 1111da14cebeSEric Cheng } 1112da14cebeSEric Cheng 1113da14cebeSEric Cheng mutex_enter(nxge->genlock); 1114da14cebeSEric Cheng g->n_mac_addrs++; 1115da14cebeSEric Cheng mutex_exit(nxge->genlock); 1116da14cebeSEric Cheng return (0); 1117da14cebeSEric Cheng } 1118678453a8Sspeer 1119678453a8Sspeer static int 11202cf06b0dSMichael Speer nxge_hio_set_unicst(void *arg, const uint8_t *macaddr) 11212cf06b0dSMichael Speer { 11222cf06b0dSMichael Speer p_nxge_t nxgep = (p_nxge_t)arg; 11232cf06b0dSMichael Speer struct ether_addr addrp; 11242cf06b0dSMichael Speer 11252cf06b0dSMichael Speer bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL); 11262cf06b0dSMichael Speer if (nxge_set_mac_addr(nxgep, &addrp)) { 11272cf06b0dSMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 11282cf06b0dSMichael Speer "<== nxge_m_unicst: set unitcast failed")); 11292cf06b0dSMichael Speer return (EINVAL); 11302cf06b0dSMichael Speer } 11312cf06b0dSMichael Speer 11322cf06b0dSMichael Speer nxgep->primary = B_TRUE; 11332cf06b0dSMichael Speer 11342cf06b0dSMichael Speer return (0); 11352cf06b0dSMichael Speer } 11362cf06b0dSMichael Speer 11372cf06b0dSMichael Speer /*ARGSUSED*/ 11382cf06b0dSMichael Speer static int 11392cf06b0dSMichael Speer nxge_hio_clear_unicst(p_nxge_t nxgep, const uint8_t *mac_addr) 11402cf06b0dSMichael Speer { 11412cf06b0dSMichael Speer nxgep->primary = B_FALSE; 11422cf06b0dSMichael Speer return (0); 11432cf06b0dSMichael Speer } 11442cf06b0dSMichael Speer 11452cf06b0dSMichael Speer static int 1146678453a8Sspeer nxge_hio_add_mac(void *arg, const uint8_t *mac_addr) 1147678453a8Sspeer { 1148da14cebeSEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 1149da14cebeSEric Cheng p_nxge_t nxge = group->nxgep; 1150da14cebeSEric Cheng int rv; 1151678453a8Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 1152678453a8Sspeer 1153da14cebeSEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 115463f531d1SSriharsha Basavapatna ASSERT(group->nxgep != NULL); 115563f531d1SSriharsha Basavapatna 115663f531d1SSriharsha Basavapatna if (isLDOMguest(group->nxgep)) 115763f531d1SSriharsha Basavapatna return (0); 1158da14cebeSEric Cheng 1159da14cebeSEric Cheng mutex_enter(nxge->genlock); 1160678453a8Sspeer 11612cf06b0dSMichael Speer if (!nxge->primary && group->port_default_grp) { 11622cf06b0dSMichael Speer rv = nxge_hio_set_unicst((void *)nxge, mac_addr); 11632cf06b0dSMichael Speer mutex_exit(nxge->genlock); 11642cf06b0dSMichael Speer return (rv); 11652cf06b0dSMichael Speer } 11662cf06b0dSMichael Speer 1167678453a8Sspeer /* 1168da14cebeSEric Cheng * If the group is associated with a VR, then only one 1169da14cebeSEric Cheng * address may be assigned to the group. 1170678453a8Sspeer */ 1171da14cebeSEric Cheng vr = (nxge_hio_vr_t *)nxge->shares[group->sindex].vrp; 1172da14cebeSEric Cheng if ((vr != NULL) && (group->n_mac_addrs)) { 1173da14cebeSEric Cheng mutex_exit(nxge->genlock); 1174da14cebeSEric Cheng return (ENOSPC); 1175da14cebeSEric Cheng } 1176da14cebeSEric Cheng 1177da14cebeSEric Cheng mutex_exit(nxge->genlock); 1178da14cebeSEric Cheng 1179da14cebeSEric Cheng /* 1180da14cebeSEric Cheng * Program the mac address for the group. 1181da14cebeSEric Cheng */ 118263f531d1SSriharsha Basavapatna if ((rv = nxge_hio_group_mac_add(nxge, group, mac_addr)) != 0) { 1183678453a8Sspeer return (rv); 1184678453a8Sspeer } 1185678453a8Sspeer 1186678453a8Sspeer return (0); 1187678453a8Sspeer } 1188678453a8Sspeer 1189da14cebeSEric Cheng static int 1190da14cebeSEric Cheng find_mac_slot(nxge_mmac_t *mmac_info, const uint8_t *mac_addr) 1191da14cebeSEric Cheng { 1192da14cebeSEric Cheng int i; 1193da14cebeSEric Cheng for (i = 0; i <= mmac_info->num_mmac; i++) { 1194da14cebeSEric Cheng if (memcmp(mmac_info->mac_pool[i].addr, mac_addr, 1195da14cebeSEric Cheng ETHERADDRL) == 0) { 1196da14cebeSEric Cheng return (i); 1197da14cebeSEric Cheng } 1198da14cebeSEric Cheng } 1199da14cebeSEric Cheng return (-1); 1200da14cebeSEric Cheng } 1201da14cebeSEric Cheng 1202678453a8Sspeer /* ARGSUSED */ 1203678453a8Sspeer static int 1204678453a8Sspeer nxge_hio_rem_mac(void *arg, const uint8_t *mac_addr) 1205678453a8Sspeer { 1206da14cebeSEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)arg; 12072cf06b0dSMichael Speer struct ether_addr addrp; 1208da14cebeSEric Cheng p_nxge_t nxge = group->nxgep; 1209da14cebeSEric Cheng nxge_mmac_t *mmac_info; 1210da14cebeSEric Cheng int rv, slot; 1211678453a8Sspeer 1212da14cebeSEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 121363f531d1SSriharsha Basavapatna ASSERT(group->nxgep != NULL); 121463f531d1SSriharsha Basavapatna 121563f531d1SSriharsha Basavapatna if (isLDOMguest(group->nxgep)) 121663f531d1SSriharsha Basavapatna return (0); 1217da14cebeSEric Cheng 1218da14cebeSEric Cheng mutex_enter(nxge->genlock); 1219da14cebeSEric Cheng 1220da14cebeSEric Cheng mmac_info = &nxge->nxge_mmac_info; 1221da14cebeSEric Cheng slot = find_mac_slot(mmac_info, mac_addr); 1222da14cebeSEric Cheng if (slot < 0) { 12232cf06b0dSMichael Speer if (group->port_default_grp && nxge->primary) { 12242cf06b0dSMichael Speer bcopy(mac_addr, (uint8_t *)&addrp, ETHERADDRL); 12252cf06b0dSMichael Speer if (ether_cmp(&addrp, &nxge->ouraddr) == 0) { 12262cf06b0dSMichael Speer rv = nxge_hio_clear_unicst(nxge, mac_addr); 12272cf06b0dSMichael Speer mutex_exit(nxge->genlock); 12282cf06b0dSMichael Speer return (rv); 12292cf06b0dSMichael Speer } else { 1230da14cebeSEric Cheng mutex_exit(nxge->genlock); 1231da14cebeSEric Cheng return (EINVAL); 1232da14cebeSEric Cheng } 12332cf06b0dSMichael Speer } else { 12342cf06b0dSMichael Speer mutex_exit(nxge->genlock); 12352cf06b0dSMichael Speer return (EINVAL); 12362cf06b0dSMichael Speer } 12372cf06b0dSMichael Speer } 1238da14cebeSEric Cheng 1239da14cebeSEric Cheng mutex_exit(nxge->genlock); 1240678453a8Sspeer 1241678453a8Sspeer /* 1242da14cebeSEric Cheng * Remove the mac address for the group 1243678453a8Sspeer */ 1244da14cebeSEric Cheng if ((rv = nxge_m_mmac_remove(nxge, slot)) != 0) { 1245da14cebeSEric Cheng return (rv); 1246da14cebeSEric Cheng } 1247da14cebeSEric Cheng 1248da14cebeSEric Cheng mutex_enter(nxge->genlock); 1249da14cebeSEric Cheng group->n_mac_addrs--; 1250da14cebeSEric Cheng mutex_exit(nxge->genlock); 1251678453a8Sspeer 1252678453a8Sspeer return (0); 1253678453a8Sspeer } 1254678453a8Sspeer 1255da14cebeSEric Cheng static int 1256da14cebeSEric Cheng nxge_hio_group_start(mac_group_driver_t gdriver) 1257da14cebeSEric Cheng { 1258da14cebeSEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 12594ba491f5SMichael Speer nxge_rdc_grp_t *rdc_grp_p; 1260da14cebeSEric Cheng int rdctbl; 1261da14cebeSEric Cheng int dev_gindex; 1262da14cebeSEric Cheng 1263da14cebeSEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 126463f531d1SSriharsha Basavapatna ASSERT(group->nxgep != NULL); 1265da14cebeSEric Cheng 1266da14cebeSEric Cheng ASSERT(group->nxgep->nxge_mac_state == NXGE_MAC_STARTED); 1267da14cebeSEric Cheng if (group->nxgep->nxge_mac_state != NXGE_MAC_STARTED) 1268da14cebeSEric Cheng return (ENXIO); 1269da14cebeSEric Cheng 1270da14cebeSEric Cheng mutex_enter(group->nxgep->genlock); 127163f531d1SSriharsha Basavapatna if (isLDOMguest(group->nxgep)) 127263f531d1SSriharsha Basavapatna goto nxge_hio_group_start_exit; 127363f531d1SSriharsha Basavapatna 1274da14cebeSEric Cheng dev_gindex = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 1275da14cebeSEric Cheng group->gindex; 12764ba491f5SMichael Speer rdc_grp_p = &group->nxgep->pt_config.rdc_grps[dev_gindex]; 1277da14cebeSEric Cheng 1278da14cebeSEric Cheng /* 1279da14cebeSEric Cheng * Get an rdc table for this group. 1280da14cebeSEric Cheng * Group ID is given by the caller, and that's the group it needs 1281da14cebeSEric Cheng * to bind to. The default group is already bound when the driver 1282da14cebeSEric Cheng * was attached. 1283da14cebeSEric Cheng * 1284da14cebeSEric Cheng * For Group 0, it's RDC table was allocated at attach time 1285da14cebeSEric Cheng * no need to allocate a new table. 1286da14cebeSEric Cheng */ 1287da14cebeSEric Cheng if (group->gindex != 0) { 1288da14cebeSEric Cheng rdctbl = nxge_fzc_rdc_tbl_bind(group->nxgep, 1289da14cebeSEric Cheng dev_gindex, B_TRUE); 1290da14cebeSEric Cheng if (rdctbl < 0) { 1291da14cebeSEric Cheng mutex_exit(group->nxgep->genlock); 1292da14cebeSEric Cheng return (rdctbl); 1293da14cebeSEric Cheng } 1294da14cebeSEric Cheng } else { 1295da14cebeSEric Cheng rdctbl = group->nxgep->pt_config.hw_config.def_mac_rxdma_grpid; 1296da14cebeSEric Cheng } 1297da14cebeSEric Cheng 1298da14cebeSEric Cheng group->rdctbl = rdctbl; 1299da14cebeSEric Cheng 13004ba491f5SMichael Speer (void) nxge_init_fzc_rdc_tbl(group->nxgep, rdc_grp_p, rdctbl); 1301da14cebeSEric Cheng 130263f531d1SSriharsha Basavapatna nxge_hio_group_start_exit: 1303da14cebeSEric Cheng group->started = B_TRUE; 1304da14cebeSEric Cheng mutex_exit(group->nxgep->genlock); 1305da14cebeSEric Cheng return (0); 1306da14cebeSEric Cheng } 1307da14cebeSEric Cheng 1308da14cebeSEric Cheng static void 1309da14cebeSEric Cheng nxge_hio_group_stop(mac_group_driver_t gdriver) 1310da14cebeSEric Cheng { 1311da14cebeSEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)gdriver; 1312da14cebeSEric Cheng 1313da14cebeSEric Cheng ASSERT(group->type == MAC_RING_TYPE_RX); 1314da14cebeSEric Cheng 1315da14cebeSEric Cheng mutex_enter(group->nxgep->genlock); 1316da14cebeSEric Cheng group->started = B_FALSE; 1317da14cebeSEric Cheng 131863f531d1SSriharsha Basavapatna if (isLDOMguest(group->nxgep)) 131963f531d1SSriharsha Basavapatna goto nxge_hio_group_stop_exit; 132063f531d1SSriharsha Basavapatna 1321da14cebeSEric Cheng /* 1322da14cebeSEric Cheng * Unbind the RDC table previously bound for this group. 1323da14cebeSEric Cheng * 1324da14cebeSEric Cheng * Since RDC table for group 0 was allocated at attach 1325da14cebeSEric Cheng * time, no need to unbind the table here. 1326da14cebeSEric Cheng */ 1327da14cebeSEric Cheng if (group->gindex != 0) 1328da14cebeSEric Cheng (void) nxge_fzc_rdc_tbl_unbind(group->nxgep, group->rdctbl); 1329da14cebeSEric Cheng 133063f531d1SSriharsha Basavapatna nxge_hio_group_stop_exit: 1331da14cebeSEric Cheng mutex_exit(group->nxgep->genlock); 1332da14cebeSEric Cheng } 1333da14cebeSEric Cheng 1334678453a8Sspeer /* ARGSUSED */ 1335678453a8Sspeer void 1336da14cebeSEric Cheng nxge_hio_group_get(void *arg, mac_ring_type_t type, int groupid, 1337678453a8Sspeer mac_group_info_t *infop, mac_group_handle_t ghdl) 1338678453a8Sspeer { 1339678453a8Sspeer p_nxge_t nxgep = (p_nxge_t)arg; 1340da14cebeSEric Cheng nxge_ring_group_t *group; 1341da14cebeSEric Cheng int dev_gindex; 1342678453a8Sspeer 1343678453a8Sspeer switch (type) { 1344678453a8Sspeer case MAC_RING_TYPE_RX: 1345da14cebeSEric Cheng group = &nxgep->rx_hio_groups[groupid]; 1346da14cebeSEric Cheng group->nxgep = nxgep; 1347da14cebeSEric Cheng group->ghandle = ghdl; 1348da14cebeSEric Cheng group->gindex = groupid; 1349da14cebeSEric Cheng group->sindex = 0; /* not yet bound to a share */ 1350678453a8Sspeer 135163f531d1SSriharsha Basavapatna if (!isLDOMguest(nxgep)) { 135263f531d1SSriharsha Basavapatna dev_gindex = 135363f531d1SSriharsha Basavapatna nxgep->pt_config.hw_config.def_mac_rxdma_grpid + 1354da14cebeSEric Cheng groupid; 1355da14cebeSEric Cheng 13562cf06b0dSMichael Speer if (nxgep->pt_config.hw_config.def_mac_rxdma_grpid == 13572cf06b0dSMichael Speer dev_gindex) 13582cf06b0dSMichael Speer group->port_default_grp = B_TRUE; 13592cf06b0dSMichael Speer 136063f531d1SSriharsha Basavapatna infop->mgi_count = 136163f531d1SSriharsha Basavapatna nxgep->pt_config.rdc_grps[dev_gindex].max_rdcs; 136263f531d1SSriharsha Basavapatna } else { 136363f531d1SSriharsha Basavapatna infop->mgi_count = NXGE_HIO_SHARE_MAX_CHANNELS; 136463f531d1SSriharsha Basavapatna } 136563f531d1SSriharsha Basavapatna 1366da14cebeSEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 1367da14cebeSEric Cheng infop->mgi_start = nxge_hio_group_start; 1368da14cebeSEric Cheng infop->mgi_stop = nxge_hio_group_stop; 1369da14cebeSEric Cheng infop->mgi_addmac = nxge_hio_add_mac; 1370da14cebeSEric Cheng infop->mgi_remmac = nxge_hio_rem_mac; 1371678453a8Sspeer break; 1372678453a8Sspeer 1373678453a8Sspeer case MAC_RING_TYPE_TX: 1374da14cebeSEric Cheng /* 1375da14cebeSEric Cheng * 'groupid' for TX should be incremented by one since 1376da14cebeSEric Cheng * the default group (groupid 0) is not known by the MAC layer 1377da14cebeSEric Cheng */ 1378da14cebeSEric Cheng group = &nxgep->tx_hio_groups[groupid + 1]; 1379da14cebeSEric Cheng group->nxgep = nxgep; 1380da14cebeSEric Cheng group->ghandle = ghdl; 1381da14cebeSEric Cheng group->gindex = groupid + 1; 1382da14cebeSEric Cheng group->sindex = 0; /* not yet bound to a share */ 1383da14cebeSEric Cheng 1384da14cebeSEric Cheng infop->mgi_driver = (mac_group_driver_t)group; 1385da14cebeSEric Cheng infop->mgi_start = NULL; 1386da14cebeSEric Cheng infop->mgi_stop = NULL; 1387da14cebeSEric Cheng infop->mgi_addmac = NULL; /* not needed */ 1388da14cebeSEric Cheng infop->mgi_remmac = NULL; /* not needed */ 1389da14cebeSEric Cheng /* no rings associated with group initially */ 1390da14cebeSEric Cheng infop->mgi_count = 0; 1391678453a8Sspeer break; 1392678453a8Sspeer } 1393678453a8Sspeer } 1394678453a8Sspeer 1395da14cebeSEric Cheng #if defined(sun4v) 1396da14cebeSEric Cheng 1397678453a8Sspeer int 1398678453a8Sspeer nxge_hio_share_assign( 1399678453a8Sspeer nxge_t *nxge, 1400678453a8Sspeer uint64_t cookie, 1401678453a8Sspeer res_map_t *tmap, 1402678453a8Sspeer res_map_t *rmap, 1403678453a8Sspeer nxge_hio_vr_t *vr) 1404678453a8Sspeer { 1405678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1406678453a8Sspeer uint64_t slot, hv_rv; 1407678453a8Sspeer nxge_hio_dc_t *dc; 1408678453a8Sspeer nxhv_vr_fp_t *fp; 1409678453a8Sspeer int i; 14104df55fdeSJanie Lu uint64_t major; 1411678453a8Sspeer 1412678453a8Sspeer /* 1413678453a8Sspeer * Ask the Hypervisor to set up the VR for us 1414678453a8Sspeer */ 1415678453a8Sspeer fp = &nhd->hio.vr; 14164df55fdeSJanie Lu major = nxge->niu_hsvc.hsvc_major; 14174df55fdeSJanie Lu switch (major) { 14184df55fdeSJanie Lu case NIU_MAJOR_VER: /* 1 */ 1419678453a8Sspeer if ((hv_rv = (*fp->assign)(vr->region, cookie, &vr->cookie))) { 1420678453a8Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 14214df55fdeSJanie Lu "nxge_hio_share_assign: major %d " 14224df55fdeSJanie Lu "vr->assign() returned %d", major, hv_rv)); 14234df55fdeSJanie Lu nxge_hio_unshare(vr); 1424678453a8Sspeer return (-EIO); 1425678453a8Sspeer } 1426678453a8Sspeer 14274df55fdeSJanie Lu break; 14284df55fdeSJanie Lu 14294df55fdeSJanie Lu case NIU_MAJOR_VER_2: /* 2 */ 14304df55fdeSJanie Lu default: 14314df55fdeSJanie Lu if ((hv_rv = (*fp->cfgh_assign) 14324df55fdeSJanie Lu (nxge->niu_cfg_hdl, vr->region, cookie, &vr->cookie))) { 14334df55fdeSJanie Lu NXGE_ERROR_MSG((nxge, HIO_CTL, 14344df55fdeSJanie Lu "nxge_hio_share_assign: major %d " 14354df55fdeSJanie Lu "vr->assign() returned %d", major, hv_rv)); 14364df55fdeSJanie Lu nxge_hio_unshare(vr); 14374df55fdeSJanie Lu return (-EIO); 14384df55fdeSJanie Lu } 14394df55fdeSJanie Lu 14404df55fdeSJanie Lu break; 14414df55fdeSJanie Lu } 14424df55fdeSJanie Lu 14434df55fdeSJanie Lu NXGE_DEBUG_MSG((nxge, HIO_CTL, 14444df55fdeSJanie Lu "nxge_hio_share_assign: major %d " 14454df55fdeSJanie Lu "vr->assign() success", major)); 14464df55fdeSJanie Lu 1447678453a8Sspeer /* 1448678453a8Sspeer * For each shared TDC, ask the HV to find us an empty slot. 1449678453a8Sspeer */ 1450678453a8Sspeer dc = vr->tx_group.dc; 1451678453a8Sspeer for (i = 0; i < NXGE_MAX_TDCS; i++) { 1452678453a8Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 1453678453a8Sspeer while (dc) { 1454678453a8Sspeer hv_rv = (*tx->assign) 1455678453a8Sspeer (vr->cookie, dc->channel, &slot); 1456678453a8Sspeer if (hv_rv != 0) { 1457678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1458e11f0814SMichael Speer "nxge_hio_share_assign: " 1459678453a8Sspeer "tx->assign(%x, %d) failed: %ld", 1460678453a8Sspeer vr->cookie, dc->channel, hv_rv)); 1461678453a8Sspeer return (-EIO); 1462678453a8Sspeer } 1463678453a8Sspeer 1464678453a8Sspeer dc->cookie = vr->cookie; 1465678453a8Sspeer dc->page = (vp_channel_t)slot; 1466678453a8Sspeer 1467678453a8Sspeer /* Inform the caller about the slot chosen. */ 1468678453a8Sspeer (*tmap) |= 1 << slot; 1469678453a8Sspeer 1470678453a8Sspeer dc = dc->next; 1471678453a8Sspeer } 1472678453a8Sspeer } 1473678453a8Sspeer 1474678453a8Sspeer /* 1475678453a8Sspeer * For each shared RDC, ask the HV to find us an empty slot. 1476678453a8Sspeer */ 1477678453a8Sspeer dc = vr->rx_group.dc; 1478678453a8Sspeer for (i = 0; i < NXGE_MAX_RDCS; i++) { 1479678453a8Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 1480678453a8Sspeer while (dc) { 1481678453a8Sspeer hv_rv = (*rx->assign) 1482678453a8Sspeer (vr->cookie, dc->channel, &slot); 1483678453a8Sspeer if (hv_rv != 0) { 1484678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1485e11f0814SMichael Speer "nxge_hio_share_assign: " 1486678453a8Sspeer "rx->assign(%x, %d) failed: %ld", 1487678453a8Sspeer vr->cookie, dc->channel, hv_rv)); 1488678453a8Sspeer return (-EIO); 1489678453a8Sspeer } 1490678453a8Sspeer 1491678453a8Sspeer dc->cookie = vr->cookie; 1492678453a8Sspeer dc->page = (vp_channel_t)slot; 1493678453a8Sspeer 1494678453a8Sspeer /* Inform the caller about the slot chosen. */ 1495678453a8Sspeer (*rmap) |= 1 << slot; 1496678453a8Sspeer 1497678453a8Sspeer dc = dc->next; 1498678453a8Sspeer } 1499678453a8Sspeer } 1500678453a8Sspeer 1501678453a8Sspeer return (0); 1502678453a8Sspeer } 1503678453a8Sspeer 1504da14cebeSEric Cheng void 1505678453a8Sspeer nxge_hio_share_unassign( 1506678453a8Sspeer nxge_hio_vr_t *vr) 1507678453a8Sspeer { 1508678453a8Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 1509678453a8Sspeer nxge_hio_data_t *nhd; 1510678453a8Sspeer nxge_hio_dc_t *dc; 1511678453a8Sspeer nxhv_vr_fp_t *fp; 1512678453a8Sspeer uint64_t hv_rv; 1513678453a8Sspeer 1514678453a8Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1515678453a8Sspeer 1516678453a8Sspeer dc = vr->tx_group.dc; 1517678453a8Sspeer while (dc) { 1518678453a8Sspeer nxhv_dc_fp_t *tx = &nhd->hio.tx; 1519678453a8Sspeer hv_rv = (*tx->unassign)(vr->cookie, dc->page); 1520678453a8Sspeer if (hv_rv != 0) { 1521678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1522e11f0814SMichael Speer "nxge_hio_share_unassign: " 1523678453a8Sspeer "tx->unassign(%x, %d) failed: %ld", 1524678453a8Sspeer vr->cookie, dc->page, hv_rv)); 1525678453a8Sspeer } 1526678453a8Sspeer dc = dc->next; 1527678453a8Sspeer } 1528678453a8Sspeer 1529678453a8Sspeer dc = vr->rx_group.dc; 1530678453a8Sspeer while (dc) { 1531678453a8Sspeer nxhv_dc_fp_t *rx = &nhd->hio.rx; 1532678453a8Sspeer hv_rv = (*rx->unassign)(vr->cookie, dc->page); 1533678453a8Sspeer if (hv_rv != 0) { 1534678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1535e11f0814SMichael Speer "nxge_hio_share_unassign: " 1536678453a8Sspeer "rx->unassign(%x, %d) failed: %ld", 1537678453a8Sspeer vr->cookie, dc->page, hv_rv)); 1538678453a8Sspeer } 1539678453a8Sspeer dc = dc->next; 1540678453a8Sspeer } 1541678453a8Sspeer 1542678453a8Sspeer fp = &nhd->hio.vr; 1543678453a8Sspeer if (fp->unassign) { 1544678453a8Sspeer hv_rv = (*fp->unassign)(vr->cookie); 1545678453a8Sspeer if (hv_rv != 0) { 1546e11f0814SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1547e11f0814SMichael Speer "nxge_hio_share_unassign: " 1548678453a8Sspeer "vr->assign(%x) failed: %ld", 1549678453a8Sspeer vr->cookie, hv_rv)); 1550678453a8Sspeer } 1551678453a8Sspeer } 1552678453a8Sspeer } 1553678453a8Sspeer 1554678453a8Sspeer int 1555da14cebeSEric Cheng nxge_hio_share_alloc(void *arg, mac_share_handle_t *shandle) 1556678453a8Sspeer { 1557678453a8Sspeer p_nxge_t nxge = (p_nxge_t)arg; 1558678453a8Sspeer nxge_share_handle_t *shp; 1559678453a8Sspeer nxge_hio_vr_t *vr; /* The Virtualization Region */ 1560678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1561678453a8Sspeer 1562678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_share")); 1563678453a8Sspeer 1564678453a8Sspeer if (nhd->hio.vr.assign == 0 || nhd->hio.tx.assign == 0 || 1565678453a8Sspeer nhd->hio.rx.assign == 0) { 1566678453a8Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, "HV assign function(s) NULL")); 1567678453a8Sspeer return (EIO); 1568678453a8Sspeer } 1569678453a8Sspeer 1570678453a8Sspeer /* 1571678453a8Sspeer * Get a VR. 1572678453a8Sspeer */ 15736920a987SMisaki Miyashita if ((vr = nxge_hio_vr_share(nxge)) == 0) 1574678453a8Sspeer return (EAGAIN); 1575678453a8Sspeer 1576678453a8Sspeer shp = &nxge->shares[vr->region]; 1577da14cebeSEric Cheng shp->nxgep = nxge; 1578678453a8Sspeer shp->index = vr->region; 1579678453a8Sspeer shp->vrp = (void *)vr; 1580da14cebeSEric Cheng shp->tmap = shp->rmap = 0; /* to be assigned by ms_sbind */ 1581da14cebeSEric Cheng shp->rxgroup = 0; /* to be assigned by ms_sadd */ 1582da14cebeSEric Cheng shp->active = B_FALSE; /* not bound yet */ 1583678453a8Sspeer 1584678453a8Sspeer *shandle = (mac_share_handle_t)shp; 1585678453a8Sspeer 1586678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_share")); 1587678453a8Sspeer return (0); 1588678453a8Sspeer } 1589678453a8Sspeer 1590da14cebeSEric Cheng 1591678453a8Sspeer void 1592678453a8Sspeer nxge_hio_share_free(mac_share_handle_t shandle) 1593678453a8Sspeer { 1594678453a8Sspeer nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1595da14cebeSEric Cheng nxge_hio_vr_t *vr; 1596da14cebeSEric Cheng 1597da14cebeSEric Cheng /* 1598da14cebeSEric Cheng * Clear internal handle state. 1599da14cebeSEric Cheng */ 1600da14cebeSEric Cheng vr = shp->vrp; 1601da14cebeSEric Cheng shp->vrp = (void *)NULL; 1602da14cebeSEric Cheng shp->index = 0; 1603da14cebeSEric Cheng shp->tmap = 0; 1604da14cebeSEric Cheng shp->rmap = 0; 1605da14cebeSEric Cheng shp->rxgroup = 0; 1606da14cebeSEric Cheng shp->active = B_FALSE; 1607da14cebeSEric Cheng 1608da14cebeSEric Cheng /* 1609da14cebeSEric Cheng * Free VR resource. 1610da14cebeSEric Cheng */ 1611da14cebeSEric Cheng nxge_hio_unshare(vr); 1612da14cebeSEric Cheng } 1613da14cebeSEric Cheng 1614da14cebeSEric Cheng 1615da14cebeSEric Cheng void 1616da14cebeSEric Cheng nxge_hio_share_query(mac_share_handle_t shandle, mac_ring_type_t type, 1617da14cebeSEric Cheng mac_ring_handle_t *rings, uint_t *n_rings) 1618da14cebeSEric Cheng { 1619da14cebeSEric Cheng nxge_t *nxge; 1620da14cebeSEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1621da14cebeSEric Cheng nxge_ring_handle_t *rh; 1622da14cebeSEric Cheng uint32_t offset; 1623da14cebeSEric Cheng 1624da14cebeSEric Cheng nxge = shp->nxgep; 1625da14cebeSEric Cheng 1626da14cebeSEric Cheng switch (type) { 1627da14cebeSEric Cheng case MAC_RING_TYPE_RX: 1628da14cebeSEric Cheng rh = nxge->rx_ring_handles; 1629da14cebeSEric Cheng offset = nxge->pt_config.hw_config.start_rdc; 1630da14cebeSEric Cheng break; 1631da14cebeSEric Cheng 1632da14cebeSEric Cheng case MAC_RING_TYPE_TX: 1633da14cebeSEric Cheng rh = nxge->tx_ring_handles; 1634da14cebeSEric Cheng offset = nxge->pt_config.hw_config.tdc.start; 1635da14cebeSEric Cheng break; 1636da14cebeSEric Cheng } 1637da14cebeSEric Cheng 1638da14cebeSEric Cheng /* 1639da14cebeSEric Cheng * In version 1.0, we may only give a VR 2 RDCs/TDCs. Not only that, 1640da14cebeSEric Cheng * but the HV has statically assigned the channels like so: 1641da14cebeSEric Cheng * VR0: RDC0 & RDC1 1642da14cebeSEric Cheng * VR1: RDC2 & RDC3, etc. 1643da14cebeSEric Cheng * The TDCs are assigned in exactly the same way. 1644da14cebeSEric Cheng */ 1645da14cebeSEric Cheng if (rings != NULL) { 1646da14cebeSEric Cheng rings[0] = rh[(shp->index * 2) - offset].ring_handle; 1647da14cebeSEric Cheng rings[1] = rh[(shp->index * 2 + 1) - offset].ring_handle; 1648da14cebeSEric Cheng } 1649da14cebeSEric Cheng if (n_rings != NULL) { 1650da14cebeSEric Cheng *n_rings = 2; 1651da14cebeSEric Cheng } 1652da14cebeSEric Cheng } 1653da14cebeSEric Cheng 1654da14cebeSEric Cheng int 1655da14cebeSEric Cheng nxge_hio_share_add_group(mac_share_handle_t shandle, 1656da14cebeSEric Cheng mac_group_driver_t ghandle) 1657da14cebeSEric Cheng { 1658da14cebeSEric Cheng nxge_t *nxge; 1659da14cebeSEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1660da14cebeSEric Cheng nxge_ring_group_t *rg = (nxge_ring_group_t *)ghandle; 1661da14cebeSEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 1662da14cebeSEric Cheng nxge_grp_t *group; 1663da14cebeSEric Cheng int i; 1664da14cebeSEric Cheng 1665da14cebeSEric Cheng if (rg->sindex != 0) { 1666da14cebeSEric Cheng /* the group is already bound to a share */ 1667da14cebeSEric Cheng return (EALREADY); 1668da14cebeSEric Cheng } 1669da14cebeSEric Cheng 167008ac1c49SNicolas Droux /* 167108ac1c49SNicolas Droux * If we are adding a group 0 to a share, this 167208ac1c49SNicolas Droux * is not correct. 167308ac1c49SNicolas Droux */ 167408ac1c49SNicolas Droux ASSERT(rg->gindex != 0); 167508ac1c49SNicolas Droux 1676da14cebeSEric Cheng nxge = rg->nxgep; 1677da14cebeSEric Cheng vr = shp->vrp; 1678da14cebeSEric Cheng 1679da14cebeSEric Cheng switch (rg->type) { 1680da14cebeSEric Cheng case MAC_RING_TYPE_RX: 1681da14cebeSEric Cheng /* 1682da14cebeSEric Cheng * Make sure that the group has the right rings associated 1683da14cebeSEric Cheng * for the share. In version 1.0, we may only give a VR 1684da14cebeSEric Cheng * 2 RDCs. Not only that, but the HV has statically 1685da14cebeSEric Cheng * assigned the channels like so: 1686da14cebeSEric Cheng * VR0: RDC0 & RDC1 1687da14cebeSEric Cheng * VR1: RDC2 & RDC3, etc. 1688da14cebeSEric Cheng */ 1689da14cebeSEric Cheng group = nxge->rx_set.group[rg->gindex]; 1690da14cebeSEric Cheng 1691da14cebeSEric Cheng if (group->count > 2) { 1692da14cebeSEric Cheng /* a share can have at most 2 rings */ 1693da14cebeSEric Cheng return (EINVAL); 1694da14cebeSEric Cheng } 1695da14cebeSEric Cheng 1696da14cebeSEric Cheng for (i = 0; i < NXGE_MAX_RDCS; i++) { 1697da14cebeSEric Cheng if (group->map & (1 << i)) { 1698da14cebeSEric Cheng if ((i != shp->index * 2) && 1699da14cebeSEric Cheng (i != (shp->index * 2 + 1))) { 1700da14cebeSEric Cheng /* 1701da14cebeSEric Cheng * A group with invalid rings was 1702da14cebeSEric Cheng * attempted to bind to this share 1703da14cebeSEric Cheng */ 1704da14cebeSEric Cheng return (EINVAL); 1705da14cebeSEric Cheng } 1706da14cebeSEric Cheng } 1707da14cebeSEric Cheng } 1708da14cebeSEric Cheng 1709da14cebeSEric Cheng rg->sindex = vr->region; 1710da14cebeSEric Cheng vr->rdc_tbl = rg->rdctbl; 1711da14cebeSEric Cheng shp->rxgroup = vr->rdc_tbl; 1712da14cebeSEric Cheng break; 1713da14cebeSEric Cheng 1714da14cebeSEric Cheng case MAC_RING_TYPE_TX: 1715da14cebeSEric Cheng /* 1716da14cebeSEric Cheng * Make sure that the group has the right rings associated 1717da14cebeSEric Cheng * for the share. In version 1.0, we may only give a VR 1718da14cebeSEric Cheng * 2 TDCs. Not only that, but the HV has statically 1719da14cebeSEric Cheng * assigned the channels like so: 1720da14cebeSEric Cheng * VR0: TDC0 & TDC1 1721da14cebeSEric Cheng * VR1: TDC2 & TDC3, etc. 1722da14cebeSEric Cheng */ 1723da14cebeSEric Cheng group = nxge->tx_set.group[rg->gindex]; 1724da14cebeSEric Cheng 1725da14cebeSEric Cheng if (group->count > 2) { 1726da14cebeSEric Cheng /* a share can have at most 2 rings */ 1727da14cebeSEric Cheng return (EINVAL); 1728da14cebeSEric Cheng } 1729da14cebeSEric Cheng 1730da14cebeSEric Cheng for (i = 0; i < NXGE_MAX_TDCS; i++) { 1731da14cebeSEric Cheng if (group->map & (1 << i)) { 1732da14cebeSEric Cheng if ((i != shp->index * 2) && 1733da14cebeSEric Cheng (i != (shp->index * 2 + 1))) { 1734da14cebeSEric Cheng /* 1735da14cebeSEric Cheng * A group with invalid rings was 1736da14cebeSEric Cheng * attempted to bind to this share 1737da14cebeSEric Cheng */ 1738da14cebeSEric Cheng return (EINVAL); 1739da14cebeSEric Cheng } 1740da14cebeSEric Cheng } 1741da14cebeSEric Cheng } 1742da14cebeSEric Cheng 1743da14cebeSEric Cheng vr->tdc_tbl = nxge->pt_config.hw_config.def_mac_txdma_grpid + 1744da14cebeSEric Cheng rg->gindex; 1745da14cebeSEric Cheng rg->sindex = vr->region; 1746da14cebeSEric Cheng break; 1747da14cebeSEric Cheng } 1748da14cebeSEric Cheng return (0); 1749da14cebeSEric Cheng } 1750da14cebeSEric Cheng 1751da14cebeSEric Cheng int 1752da14cebeSEric Cheng nxge_hio_share_rem_group(mac_share_handle_t shandle, 1753da14cebeSEric Cheng mac_group_driver_t ghandle) 1754da14cebeSEric Cheng { 1755da14cebeSEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1756da14cebeSEric Cheng nxge_ring_group_t *group = (nxge_ring_group_t *)ghandle; 1757da14cebeSEric Cheng nxge_hio_vr_t *vr; /* The Virtualization Region */ 1758da14cebeSEric Cheng int rv = 0; 1759da14cebeSEric Cheng 1760da14cebeSEric Cheng vr = shp->vrp; 1761da14cebeSEric Cheng 1762da14cebeSEric Cheng switch (group->type) { 1763da14cebeSEric Cheng case MAC_RING_TYPE_RX: 1764da14cebeSEric Cheng group->sindex = 0; 1765da14cebeSEric Cheng vr->rdc_tbl = 0; 1766da14cebeSEric Cheng shp->rxgroup = 0; 1767da14cebeSEric Cheng break; 1768da14cebeSEric Cheng 1769da14cebeSEric Cheng case MAC_RING_TYPE_TX: 1770da14cebeSEric Cheng group->sindex = 0; 1771da14cebeSEric Cheng vr->tdc_tbl = 0; 1772da14cebeSEric Cheng break; 1773da14cebeSEric Cheng } 1774da14cebeSEric Cheng 1775da14cebeSEric Cheng return (rv); 1776da14cebeSEric Cheng } 1777da14cebeSEric Cheng 1778da14cebeSEric Cheng int 1779da14cebeSEric Cheng nxge_hio_share_bind(mac_share_handle_t shandle, uint64_t cookie, 1780da14cebeSEric Cheng uint64_t *rcookie) 1781da14cebeSEric Cheng { 1782da14cebeSEric Cheng nxge_t *nxge; 1783da14cebeSEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1784da14cebeSEric Cheng nxge_hio_vr_t *vr; 1785da14cebeSEric Cheng uint64_t rmap, tmap, hv_rmap, hv_tmap; 1786da14cebeSEric Cheng int rv; 1787da14cebeSEric Cheng 17880dc2366fSVenugopal Iyer ASSERT(shp != NULL); 17890dc2366fSVenugopal Iyer ASSERT(shp->nxgep != NULL); 17900dc2366fSVenugopal Iyer ASSERT(shp->vrp != NULL); 17910dc2366fSVenugopal Iyer 1792da14cebeSEric Cheng nxge = shp->nxgep; 1793da14cebeSEric Cheng vr = (nxge_hio_vr_t *)shp->vrp; 1794da14cebeSEric Cheng 1795da14cebeSEric Cheng /* 1796da14cebeSEric Cheng * Add resources to the share. 1797da14cebeSEric Cheng * For each DMA channel associated with the VR, bind its resources 1798da14cebeSEric Cheng * to the VR. 1799da14cebeSEric Cheng */ 1800da14cebeSEric Cheng tmap = 0; 1801da14cebeSEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_TX, &tmap); 1802da14cebeSEric Cheng if (rv != 0) { 1803da14cebeSEric Cheng return (rv); 1804da14cebeSEric Cheng } 1805da14cebeSEric Cheng 1806da14cebeSEric Cheng rmap = 0; 1807da14cebeSEric Cheng rv = nxge_hio_addres(vr, MAC_RING_TYPE_RX, &rmap); 1808da14cebeSEric Cheng if (rv != 0) { 1809da14cebeSEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1810da14cebeSEric Cheng return (rv); 1811da14cebeSEric Cheng } 1812da14cebeSEric Cheng 1813da14cebeSEric Cheng /* 1814da14cebeSEric Cheng * Ask the Hypervisor to set up the VR and allocate slots for 1815da14cebeSEric Cheng * each rings associated with the VR. 1816da14cebeSEric Cheng */ 1817da14cebeSEric Cheng hv_tmap = hv_rmap = 0; 1818da14cebeSEric Cheng if ((rv = nxge_hio_share_assign(nxge, cookie, 1819da14cebeSEric Cheng &hv_tmap, &hv_rmap, vr))) { 1820da14cebeSEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_TX, tmap); 1821da14cebeSEric Cheng nxge_hio_remres(vr, MAC_RING_TYPE_RX, rmap); 1822da14cebeSEric Cheng return (rv); 1823da14cebeSEric Cheng } 1824da14cebeSEric Cheng 1825da14cebeSEric Cheng shp->active = B_TRUE; 1826da14cebeSEric Cheng shp->tmap = hv_tmap; 1827da14cebeSEric Cheng shp->rmap = hv_rmap; 1828da14cebeSEric Cheng 1829da14cebeSEric Cheng /* high 32 bits are cfg_hdl and low 32 bits are HV cookie */ 1830da14cebeSEric Cheng *rcookie = (((uint64_t)nxge->niu_cfg_hdl) << 32) | vr->cookie; 1831da14cebeSEric Cheng 1832da14cebeSEric Cheng return (0); 1833da14cebeSEric Cheng } 1834da14cebeSEric Cheng 1835da14cebeSEric Cheng void 1836da14cebeSEric Cheng nxge_hio_share_unbind(mac_share_handle_t shandle) 1837da14cebeSEric Cheng { 1838da14cebeSEric Cheng nxge_share_handle_t *shp = (nxge_share_handle_t *)shandle; 1839678453a8Sspeer 1840678453a8Sspeer /* 1841678453a8Sspeer * First, unassign the VR (take it back), 1842678453a8Sspeer * so we can enable interrupts again. 1843678453a8Sspeer */ 1844da14cebeSEric Cheng nxge_hio_share_unassign(shp->vrp); 1845678453a8Sspeer 1846678453a8Sspeer /* 1847678453a8Sspeer * Free Ring Resources for TX and RX 1848678453a8Sspeer */ 18496920a987SMisaki Miyashita nxge_hio_remres(shp->vrp, MAC_RING_TYPE_TX, shp->tmap); 18506920a987SMisaki Miyashita nxge_hio_remres(shp->vrp, MAC_RING_TYPE_RX, shp->rmap); 1851678453a8Sspeer } 1852678453a8Sspeer 1853678453a8Sspeer 1854678453a8Sspeer /* 1855678453a8Sspeer * nxge_hio_vr_share 1856678453a8Sspeer * 1857678453a8Sspeer * Find an unused Virtualization Region (VR). 1858678453a8Sspeer * 1859678453a8Sspeer * Arguments: 1860678453a8Sspeer * nxge 1861678453a8Sspeer * 1862678453a8Sspeer * Notes: 1863678453a8Sspeer * 1864678453a8Sspeer * Context: 1865678453a8Sspeer * Service domain 1866678453a8Sspeer */ 18676920a987SMisaki Miyashita nxge_hio_vr_t * 1868678453a8Sspeer nxge_hio_vr_share( 1869678453a8Sspeer nxge_t *nxge) 1870678453a8Sspeer { 1871678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1872678453a8Sspeer nxge_hio_vr_t *vr; 1873678453a8Sspeer 1874678453a8Sspeer int first, limit, region; 1875678453a8Sspeer 1876678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_vr_share")); 1877678453a8Sspeer 1878678453a8Sspeer MUTEX_ENTER(&nhd->lock); 1879678453a8Sspeer 18806920a987SMisaki Miyashita if (nhd->vrs == 0) { 1881678453a8Sspeer MUTEX_EXIT(&nhd->lock); 1882678453a8Sspeer return (0); 1883678453a8Sspeer } 1884678453a8Sspeer 1885678453a8Sspeer /* Find an empty virtual region (VR). */ 1886678453a8Sspeer if (nxge->function_num == 0) { 1887678453a8Sspeer // FUNC0_VIR0 'belongs' to NIU port 0. 1888678453a8Sspeer first = FUNC0_VIR1; 1889678453a8Sspeer limit = FUNC2_VIR0; 1890678453a8Sspeer } else if (nxge->function_num == 1) { 1891678453a8Sspeer // FUNC2_VIR0 'belongs' to NIU port 1. 1892678453a8Sspeer first = FUNC2_VIR1; 1893678453a8Sspeer limit = FUNC_VIR_MAX; 1894678453a8Sspeer } else { 1895678453a8Sspeer cmn_err(CE_WARN, 1896678453a8Sspeer "Shares not supported on function(%d) at this time.\n", 1897678453a8Sspeer nxge->function_num); 1898678453a8Sspeer } 1899678453a8Sspeer 1900678453a8Sspeer for (region = first; region < limit; region++) { 1901678453a8Sspeer if (nhd->vr[region].nxge == 0) 1902678453a8Sspeer break; 1903678453a8Sspeer } 1904678453a8Sspeer 1905678453a8Sspeer if (region == limit) { 1906678453a8Sspeer MUTEX_EXIT(&nhd->lock); 1907678453a8Sspeer return (0); 1908678453a8Sspeer } 1909678453a8Sspeer 1910678453a8Sspeer vr = &nhd->vr[region]; 1911678453a8Sspeer vr->nxge = (uintptr_t)nxge; 1912678453a8Sspeer vr->region = (uintptr_t)region; 1913678453a8Sspeer 19146920a987SMisaki Miyashita nhd->vrs--; 1915678453a8Sspeer 1916678453a8Sspeer MUTEX_EXIT(&nhd->lock); 1917678453a8Sspeer 1918678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_vr_share")); 1919678453a8Sspeer 19206920a987SMisaki Miyashita return (vr); 1921678453a8Sspeer } 1922678453a8Sspeer 1923678453a8Sspeer void 1924678453a8Sspeer nxge_hio_unshare( 19256920a987SMisaki Miyashita nxge_hio_vr_t *vr) 1926678453a8Sspeer { 1927678453a8Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 1928678453a8Sspeer nxge_hio_data_t *nhd; 1929678453a8Sspeer 1930678453a8Sspeer vr_region_t region; 1931678453a8Sspeer 1932678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_unshare")); 1933678453a8Sspeer 1934678453a8Sspeer if (!nxge) { 1935e11f0814SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_unshare: " 1936678453a8Sspeer "vr->nxge is NULL")); 1937678453a8Sspeer return; 1938678453a8Sspeer } 1939678453a8Sspeer 1940678453a8Sspeer /* 1941678453a8Sspeer * This function is no longer called, but I will keep it 1942678453a8Sspeer * here in case we want to revisit this topic in the future. 1943678453a8Sspeer * 1944678453a8Sspeer * nxge_hio_hostinfo_uninit(nxge, vr); 1945678453a8Sspeer */ 1946da14cebeSEric Cheng 1947da14cebeSEric Cheng /* 1948da14cebeSEric Cheng * XXX: This is done by ms_sremove? 1949da14cebeSEric Cheng * (void) nxge_fzc_rdc_tbl_unbind(nxge, vr->rdc_tbl); 1950da14cebeSEric Cheng */ 1951678453a8Sspeer 1952678453a8Sspeer nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 1953678453a8Sspeer 1954678453a8Sspeer MUTEX_ENTER(&nhd->lock); 1955678453a8Sspeer 1956678453a8Sspeer region = vr->region; 1957678453a8Sspeer (void) memset(vr, 0, sizeof (*vr)); 1958678453a8Sspeer vr->region = region; 1959678453a8Sspeer 19606920a987SMisaki Miyashita nhd->vrs++; 1961678453a8Sspeer 1962678453a8Sspeer MUTEX_EXIT(&nhd->lock); 1963678453a8Sspeer 1964678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_unshare")); 1965678453a8Sspeer } 1966678453a8Sspeer 1967678453a8Sspeer int 1968ef523517SMichael Speer nxge_hio_addres(nxge_hio_vr_t *vr, mac_ring_type_t type, uint64_t *map) 1969678453a8Sspeer { 19700dc2366fSVenugopal Iyer nxge_t *nxge; 1971da14cebeSEric Cheng nxge_grp_t *group; 1972da14cebeSEric Cheng int groupid; 1973ef523517SMichael Speer int i, rv = 0; 1974da14cebeSEric Cheng int max_dcs; 1975678453a8Sspeer 19760dc2366fSVenugopal Iyer ASSERT(vr != NULL); 19770dc2366fSVenugopal Iyer ASSERT(vr->nxge != NULL); 19780dc2366fSVenugopal Iyer nxge = (nxge_t *)vr->nxge; 1979678453a8Sspeer 19800dc2366fSVenugopal Iyer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_addres")); 1981678453a8Sspeer 1982da14cebeSEric Cheng /* 1983da14cebeSEric Cheng * For each ring associated with the group, add the resources 1984da14cebeSEric Cheng * to the group and bind. 1985da14cebeSEric Cheng */ 1986da14cebeSEric Cheng max_dcs = (type == MAC_RING_TYPE_TX) ? NXGE_MAX_TDCS : NXGE_MAX_RDCS; 1987da14cebeSEric Cheng if (type == MAC_RING_TYPE_TX) { 1988da14cebeSEric Cheng /* set->group is an array of group indexed by a port group id */ 1989da14cebeSEric Cheng groupid = vr->tdc_tbl - 1990da14cebeSEric Cheng nxge->pt_config.hw_config.def_mac_txdma_grpid; 1991da14cebeSEric Cheng group = nxge->tx_set.group[groupid]; 1992da14cebeSEric Cheng } else { 1993da14cebeSEric Cheng /* set->group is an array of group indexed by a port group id */ 1994da14cebeSEric Cheng groupid = vr->rdc_tbl - 1995da14cebeSEric Cheng nxge->pt_config.hw_config.def_mac_rxdma_grpid; 1996da14cebeSEric Cheng group = nxge->rx_set.group[groupid]; 1997da14cebeSEric Cheng } 1998da14cebeSEric Cheng 19990dc2366fSVenugopal Iyer ASSERT(group != NULL); 20000dc2366fSVenugopal Iyer 2001da14cebeSEric Cheng if (group->map == 0) { 2002da14cebeSEric Cheng NXGE_DEBUG_MSG((nxge, HIO_CTL, "There is no rings associated " 2003da14cebeSEric Cheng "with this VR")); 2004da14cebeSEric Cheng return (EINVAL); 2005da14cebeSEric Cheng } 2006da14cebeSEric Cheng 2007da14cebeSEric Cheng for (i = 0; i < max_dcs; i++) { 2008da14cebeSEric Cheng if (group->map & (1 << i)) { 2009da14cebeSEric Cheng if ((rv = nxge_hio_dc_share(nxge, vr, type, i)) < 0) { 2010da14cebeSEric Cheng if (*map == 0) /* Couldn't get even one DC. */ 2011678453a8Sspeer return (-rv); 2012678453a8Sspeer else 2013678453a8Sspeer break; 2014678453a8Sspeer } 2015da14cebeSEric Cheng *map |= (1 << i); 2016da14cebeSEric Cheng } 2017678453a8Sspeer } 2018678453a8Sspeer 2019ef523517SMichael Speer if ((*map == 0) || (rv != 0)) { 2020ef523517SMichael Speer NXGE_DEBUG_MSG((nxge, HIO_CTL, 2021ef523517SMichael Speer "<== nxge_hio_addres: rv(%x)", rv)); 2022ef523517SMichael Speer return (EIO); 2023ef523517SMichael Speer } 2024678453a8Sspeer 2025ef523517SMichael Speer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_addres")); 2026678453a8Sspeer return (0); 2027678453a8Sspeer } 2028678453a8Sspeer 2029678453a8Sspeer /* ARGSUSED */ 2030678453a8Sspeer void 2031678453a8Sspeer nxge_hio_remres( 20326920a987SMisaki Miyashita nxge_hio_vr_t *vr, 2033678453a8Sspeer mac_ring_type_t type, 2034678453a8Sspeer res_map_t res_map) 2035678453a8Sspeer { 2036678453a8Sspeer nxge_t *nxge = (nxge_t *)vr->nxge; 2037678453a8Sspeer nxge_grp_t *group; 2038678453a8Sspeer 2039678453a8Sspeer if (!nxge) { 2040e11f0814SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 2041678453a8Sspeer "vr->nxge is NULL")); 2042678453a8Sspeer return; 2043678453a8Sspeer } 2044678453a8Sspeer 2045678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_remres(%lx)", res_map)); 2046678453a8Sspeer 2047da14cebeSEric Cheng /* 2048da14cebeSEric Cheng * For each ring bound to the group, remove the DMA resources 2049da14cebeSEric Cheng * from the group and unbind. 2050da14cebeSEric Cheng */ 2051678453a8Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 2052678453a8Sspeer while (group->dc) { 2053678453a8Sspeer nxge_hio_dc_t *dc = group->dc; 2054678453a8Sspeer NXGE_DC_RESET(res_map, dc->page); 2055678453a8Sspeer nxge_hio_dc_unshare(nxge, vr, type, dc->channel); 2056678453a8Sspeer } 2057678453a8Sspeer 2058678453a8Sspeer if (res_map) { 2059678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_remres: " 2060678453a8Sspeer "res_map %lx", res_map)); 2061678453a8Sspeer } 2062678453a8Sspeer 2063678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_remres")); 2064678453a8Sspeer } 2065678453a8Sspeer 2066678453a8Sspeer /* 2067678453a8Sspeer * nxge_hio_tdc_share 2068678453a8Sspeer * 2069678453a8Sspeer * Share an unused TDC channel. 2070678453a8Sspeer * 2071678453a8Sspeer * Arguments: 2072678453a8Sspeer * nxge 2073678453a8Sspeer * 2074678453a8Sspeer * Notes: 2075678453a8Sspeer * 2076678453a8Sspeer * A.7.3 Reconfigure Tx DMA channel 2077678453a8Sspeer * Disable TxDMA A.9.6.10 2078678453a8Sspeer * [Rebind TxDMA channel to Port A.9.6.7] 2079678453a8Sspeer * 2080678453a8Sspeer * We don't have to Rebind the TDC to the port - it always already bound. 2081678453a8Sspeer * 2082678453a8Sspeer * Soft Reset TxDMA A.9.6.2 2083678453a8Sspeer * 2084678453a8Sspeer * This procedure will be executed by nxge_init_txdma_channel() in the 2085678453a8Sspeer * guest domain: 2086678453a8Sspeer * 2087678453a8Sspeer * Re-initialize TxDMA A.9.6.8 2088678453a8Sspeer * Reconfigure TxDMA 2089678453a8Sspeer * Enable TxDMA A.9.6.9 2090678453a8Sspeer * 2091678453a8Sspeer * Context: 2092678453a8Sspeer * Service domain 2093678453a8Sspeer */ 2094678453a8Sspeer int 2095678453a8Sspeer nxge_hio_tdc_share( 2096678453a8Sspeer nxge_t *nxge, 2097678453a8Sspeer int channel) 2098678453a8Sspeer { 2099330cd344SMichael Speer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 2100678453a8Sspeer nxge_grp_set_t *set = &nxge->tx_set; 2101678453a8Sspeer tx_ring_t *ring; 210222c0d73aSspeer int count; 2103678453a8Sspeer 2104678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_share")); 2105678453a8Sspeer 2106678453a8Sspeer /* 2107678453a8Sspeer * Wait until this channel is idle. 2108678453a8Sspeer */ 2109678453a8Sspeer ring = nxge->tx_rings->rings[channel]; 211048056c53SMichael Speer ASSERT(ring != NULL); 211122c0d73aSspeer 21126895688eSspeer (void) atomic_swap_32(&ring->tx_ring_offline, NXGE_TX_RING_OFFLINING); 21136895688eSspeer if (ring->tx_ring_busy) { 211422c0d73aSspeer /* 211522c0d73aSspeer * Wait for 30 seconds. 211622c0d73aSspeer */ 2117678453a8Sspeer for (count = 30 * 1000; count; count--) { 211822c0d73aSspeer if (ring->tx_ring_offline & NXGE_TX_RING_OFFLINED) { 2119678453a8Sspeer break; 2120678453a8Sspeer } 212122c0d73aSspeer 212222c0d73aSspeer drv_usecwait(1000); 2123678453a8Sspeer } 212422c0d73aSspeer 2125678453a8Sspeer if (count == 0) { 212622c0d73aSspeer (void) atomic_swap_32(&ring->tx_ring_offline, 212722c0d73aSspeer NXGE_TX_RING_ONLINE); 2128e11f0814SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2129e11f0814SMichael Speer "nxge_hio_tdc_share: " 2130678453a8Sspeer "Tx ring %d was always BUSY", channel)); 2131678453a8Sspeer return (-EIO); 2132678453a8Sspeer } 21336895688eSspeer } else { 21346895688eSspeer (void) atomic_swap_32(&ring->tx_ring_offline, 21356895688eSspeer NXGE_TX_RING_OFFLINED); 21366895688eSspeer } 2137678453a8Sspeer 2138330cd344SMichael Speer MUTEX_ENTER(&nhd->lock); 2139330cd344SMichael Speer nxge->tdc_is_shared[channel] = B_TRUE; 2140330cd344SMichael Speer MUTEX_EXIT(&nhd->lock); 2141330cd344SMichael Speer 2142678453a8Sspeer if (nxge_intr_remove(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 2143e11f0814SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_share: " 2144678453a8Sspeer "Failed to remove interrupt for TxDMA channel %d", 2145678453a8Sspeer channel)); 2146da14cebeSEric Cheng return (-EINVAL); 2147678453a8Sspeer } 2148678453a8Sspeer 2149678453a8Sspeer /* Disable TxDMA A.9.6.10 */ 2150678453a8Sspeer (void) nxge_txdma_channel_disable(nxge, channel); 2151678453a8Sspeer 2152678453a8Sspeer /* The SD is sharing this channel. */ 2153678453a8Sspeer NXGE_DC_SET(set->shared.map, channel); 2154678453a8Sspeer set->shared.count++; 2155678453a8Sspeer 21561d36aa9eSspeer /* Soft Reset TxDMA A.9.6.2 */ 21571d36aa9eSspeer nxge_grp_dc_remove(nxge, VP_BOUND_TX, channel); 21581d36aa9eSspeer 2159678453a8Sspeer /* 2160678453a8Sspeer * Initialize the DC-specific FZC control registers. 2161678453a8Sspeer * ----------------------------------------------------- 2162678453a8Sspeer */ 2163678453a8Sspeer if (nxge_init_fzc_tdc(nxge, channel) != NXGE_OK) { 2164678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2165e11f0814SMichael Speer "nxge_hio_tdc_share: FZC TDC failed: %d", channel)); 2166678453a8Sspeer return (-EIO); 2167678453a8Sspeer } 2168678453a8Sspeer 2169678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_share")); 2170678453a8Sspeer 2171678453a8Sspeer return (0); 2172678453a8Sspeer } 2173678453a8Sspeer 2174678453a8Sspeer /* 2175678453a8Sspeer * nxge_hio_rdc_share 2176678453a8Sspeer * 2177678453a8Sspeer * Share an unused RDC channel. 2178678453a8Sspeer * 2179678453a8Sspeer * Arguments: 2180678453a8Sspeer * nxge 2181678453a8Sspeer * 2182678453a8Sspeer * Notes: 2183678453a8Sspeer * 2184678453a8Sspeer * This is the latest version of the procedure to 2185678453a8Sspeer * Reconfigure an Rx DMA channel: 2186678453a8Sspeer * 2187678453a8Sspeer * A.6.3 Reconfigure Rx DMA channel 2188678453a8Sspeer * Stop RxMAC A.9.2.6 2189678453a8Sspeer * Drain IPP Port A.9.3.6 2190678453a8Sspeer * Stop and reset RxDMA A.9.5.3 2191678453a8Sspeer * 2192678453a8Sspeer * This procedure will be executed by nxge_init_rxdma_channel() in the 2193678453a8Sspeer * guest domain: 2194678453a8Sspeer * 2195678453a8Sspeer * Initialize RxDMA A.9.5.4 2196678453a8Sspeer * Reconfigure RxDMA 2197678453a8Sspeer * Enable RxDMA A.9.5.5 2198678453a8Sspeer * 2199678453a8Sspeer * We will do this here, since the RDC is a canalis non grata: 2200678453a8Sspeer * Enable RxMAC A.9.2.10 2201678453a8Sspeer * 2202678453a8Sspeer * Context: 2203678453a8Sspeer * Service domain 2204678453a8Sspeer */ 2205678453a8Sspeer int 2206678453a8Sspeer nxge_hio_rdc_share( 2207678453a8Sspeer nxge_t *nxge, 2208678453a8Sspeer nxge_hio_vr_t *vr, 2209678453a8Sspeer int channel) 2210678453a8Sspeer { 2211678453a8Sspeer nxge_grp_set_t *set = &nxge->rx_set; 2212678453a8Sspeer nxge_rdc_grp_t *rdc_grp; 2213678453a8Sspeer 2214678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_share")); 2215678453a8Sspeer 2216678453a8Sspeer /* Disable interrupts. */ 2217678453a8Sspeer if (nxge_intr_remove(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2218e11f0814SMichael Speer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 2219678453a8Sspeer "Failed to remove interrupt for RxDMA channel %d", 2220678453a8Sspeer channel)); 2221678453a8Sspeer return (NXGE_ERROR); 2222678453a8Sspeer } 2223678453a8Sspeer 2224678453a8Sspeer /* Stop RxMAC = A.9.2.6 */ 2225678453a8Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 2226678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 2227678453a8Sspeer "Failed to disable RxMAC")); 2228678453a8Sspeer } 2229678453a8Sspeer 2230678453a8Sspeer /* Drain IPP Port = A.9.3.6 */ 2231678453a8Sspeer (void) nxge_ipp_drain(nxge); 2232678453a8Sspeer 2233678453a8Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 2234678453a8Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 2235678453a8Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 2236678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_share: " 2237678453a8Sspeer "Failed to disable RxDMA channel %d", channel)); 2238678453a8Sspeer } 2239678453a8Sspeer 2240678453a8Sspeer /* The SD is sharing this channel. */ 2241678453a8Sspeer NXGE_DC_SET(set->shared.map, channel); 2242678453a8Sspeer set->shared.count++; 2243678453a8Sspeer 22441d36aa9eSspeer // Assert RST: RXDMA_CFIG1[30] = 1 22451d36aa9eSspeer nxge_grp_dc_remove(nxge, VP_BOUND_RX, channel); 22461d36aa9eSspeer 2247678453a8Sspeer /* 2248678453a8Sspeer * The guest domain will reconfigure the RDC later. 2249678453a8Sspeer * 2250678453a8Sspeer * But in the meantime, we must re-enable the Rx MAC so 2251678453a8Sspeer * that we can start receiving packets again on the 2252678453a8Sspeer * remaining RDCs: 2253678453a8Sspeer * 2254678453a8Sspeer * Enable RxMAC = A.9.2.10 2255678453a8Sspeer */ 2256678453a8Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2257678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2258e11f0814SMichael Speer "nxge_hio_rdc_share: Rx MAC still disabled")); 2259678453a8Sspeer } 2260678453a8Sspeer 2261678453a8Sspeer /* 2262678453a8Sspeer * Initialize the DC-specific FZC control registers. 2263678453a8Sspeer * ----------------------------------------------------- 2264678453a8Sspeer */ 2265678453a8Sspeer if (nxge_init_fzc_rdc(nxge, channel) != NXGE_OK) { 2266678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2267e11f0814SMichael Speer "nxge_hio_rdc_share: RZC RDC failed: %ld", channel)); 2268678453a8Sspeer return (-EIO); 2269678453a8Sspeer } 2270678453a8Sspeer 2271678453a8Sspeer /* 22724ba491f5SMichael Speer * Update the RDC group. 2273678453a8Sspeer */ 2274678453a8Sspeer rdc_grp = &nxge->pt_config.rdc_grps[vr->rdc_tbl]; 2275678453a8Sspeer NXGE_DC_SET(rdc_grp->map, channel); 2276678453a8Sspeer 2277678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_share")); 2278678453a8Sspeer 2279678453a8Sspeer return (0); 2280678453a8Sspeer } 2281678453a8Sspeer 2282678453a8Sspeer /* 2283678453a8Sspeer * nxge_hio_dc_share 2284678453a8Sspeer * 2285678453a8Sspeer * Share a DMA channel with a guest domain. 2286678453a8Sspeer * 2287678453a8Sspeer * Arguments: 2288678453a8Sspeer * nxge 2289678453a8Sspeer * vr The VR that <channel> will belong to. 2290678453a8Sspeer * type Tx or Rx. 2291da14cebeSEric Cheng * channel Channel to share 2292678453a8Sspeer * 2293678453a8Sspeer * Notes: 2294678453a8Sspeer * 2295678453a8Sspeer * Context: 2296678453a8Sspeer * Service domain 2297678453a8Sspeer */ 2298678453a8Sspeer int 2299678453a8Sspeer nxge_hio_dc_share( 2300678453a8Sspeer nxge_t *nxge, 2301678453a8Sspeer nxge_hio_vr_t *vr, 2302da14cebeSEric Cheng mac_ring_type_t type, 2303da14cebeSEric Cheng int channel) 2304678453a8Sspeer { 2305678453a8Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *)nxge->nxge_hw_p->hio; 2306678453a8Sspeer nxge_hio_dc_t *dc; 2307678453a8Sspeer nxge_grp_t *group; 2308678453a8Sspeer int slot; 2309678453a8Sspeer 2310678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_share(%cdc %d", 2311678453a8Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2312678453a8Sspeer 2313678453a8Sspeer 2314678453a8Sspeer /* -------------------------------------------------- */ 2315678453a8Sspeer slot = (type == MAC_RING_TYPE_TX) ? 2316678453a8Sspeer nxge_hio_tdc_share(nxge, channel) : 2317678453a8Sspeer nxge_hio_rdc_share(nxge, vr, channel); 2318678453a8Sspeer 2319678453a8Sspeer if (slot < 0) { 2320678453a8Sspeer if (type == MAC_RING_TYPE_RX) { 2321da14cebeSEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 2322678453a8Sspeer } else { 2323da14cebeSEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 2324678453a8Sspeer } 2325678453a8Sspeer return (slot); 2326678453a8Sspeer } 2327678453a8Sspeer 2328678453a8Sspeer MUTEX_ENTER(&nhd->lock); 2329678453a8Sspeer 2330678453a8Sspeer /* 2331678453a8Sspeer * Tag this channel. 2332678453a8Sspeer * -------------------------------------------------- 2333678453a8Sspeer */ 2334678453a8Sspeer dc = type == MAC_RING_TYPE_TX ? &nhd->tdc[channel] : &nhd->rdc[channel]; 2335678453a8Sspeer 2336678453a8Sspeer dc->vr = vr; 2337678453a8Sspeer dc->channel = (nxge_channel_t)channel; 2338678453a8Sspeer 2339678453a8Sspeer MUTEX_EXIT(&nhd->lock); 2340678453a8Sspeer 2341678453a8Sspeer /* 2342678453a8Sspeer * vr->[t|r]x_group is used by the service domain to 2343678453a8Sspeer * keep track of its shared DMA channels. 2344678453a8Sspeer */ 2345678453a8Sspeer MUTEX_ENTER(&nxge->group_lock); 2346678453a8Sspeer group = (type == MAC_RING_TYPE_TX ? &vr->tx_group : &vr->rx_group); 2347678453a8Sspeer 23486920a987SMisaki Miyashita dc->group = group; 2349678453a8Sspeer /* Initialize <group>, if necessary */ 2350678453a8Sspeer if (group->count == 0) { 2351678453a8Sspeer group->nxge = nxge; 2352678453a8Sspeer group->type = (type == MAC_RING_TYPE_TX) ? 2353678453a8Sspeer VP_BOUND_TX : VP_BOUND_RX; 2354678453a8Sspeer group->sequence = nhd->sequence++; 2355678453a8Sspeer group->active = B_TRUE; 2356678453a8Sspeer } 2357678453a8Sspeer 2358678453a8Sspeer MUTEX_EXIT(&nxge->group_lock); 2359678453a8Sspeer 2360678453a8Sspeer NXGE_ERROR_MSG((nxge, HIO_CTL, 2361678453a8Sspeer "DC share: %cDC %d was assigned to slot %d", 2362678453a8Sspeer type == MAC_RING_TYPE_TX ? 'T' : 'R', channel, slot)); 2363678453a8Sspeer 2364678453a8Sspeer nxge_grp_dc_append(nxge, group, dc); 2365678453a8Sspeer 2366678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_share")); 2367678453a8Sspeer 2368678453a8Sspeer return (0); 2369678453a8Sspeer } 2370678453a8Sspeer 2371678453a8Sspeer /* 2372678453a8Sspeer * nxge_hio_tdc_unshare 2373678453a8Sspeer * 2374678453a8Sspeer * Unshare a TDC. 2375678453a8Sspeer * 2376678453a8Sspeer * Arguments: 2377678453a8Sspeer * nxge 2378678453a8Sspeer * channel The channel to unshare (add again). 2379678453a8Sspeer * 2380678453a8Sspeer * Notes: 2381678453a8Sspeer * 2382678453a8Sspeer * Context: 2383678453a8Sspeer * Service domain 2384678453a8Sspeer */ 2385678453a8Sspeer void 2386678453a8Sspeer nxge_hio_tdc_unshare( 2387678453a8Sspeer nxge_t *nxge, 2388da14cebeSEric Cheng int dev_grpid, 2389678453a8Sspeer int channel) 2390678453a8Sspeer { 2391678453a8Sspeer nxge_grp_set_t *set = &nxge->tx_set; 2392da14cebeSEric Cheng nxge_grp_t *group; 2393da14cebeSEric Cheng int grpid; 2394678453a8Sspeer 2395678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_tdc_unshare")); 2396678453a8Sspeer 2397678453a8Sspeer NXGE_DC_RESET(set->shared.map, channel); 2398678453a8Sspeer set->shared.count--; 2399678453a8Sspeer 2400da14cebeSEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_txdma_grpid; 2401da14cebeSEric Cheng group = set->group[grpid]; 2402da14cebeSEric Cheng 24036920a987SMisaki Miyashita if ((nxge_grp_dc_add(nxge, group, VP_BOUND_TX, channel))) { 2404678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 2405678453a8Sspeer "Failed to initialize TxDMA channel %d", channel)); 2406678453a8Sspeer return; 2407678453a8Sspeer } 2408678453a8Sspeer 2409678453a8Sspeer /* Re-add this interrupt. */ 2410678453a8Sspeer if (nxge_intr_add(nxge, VP_BOUND_TX, channel) != NXGE_OK) { 2411678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_tdc_unshare: " 2412678453a8Sspeer "Failed to add interrupt for TxDMA channel %d", channel)); 2413678453a8Sspeer } 2414678453a8Sspeer 2415678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_tdc_unshare")); 2416678453a8Sspeer } 2417678453a8Sspeer 2418678453a8Sspeer /* 2419678453a8Sspeer * nxge_hio_rdc_unshare 2420678453a8Sspeer * 2421678453a8Sspeer * Unshare an RDC: add it to the SD's RDC groups (tables). 2422678453a8Sspeer * 2423678453a8Sspeer * Arguments: 2424678453a8Sspeer * nxge 2425678453a8Sspeer * channel The channel to unshare (add again). 2426678453a8Sspeer * 2427678453a8Sspeer * Notes: 2428678453a8Sspeer * 2429678453a8Sspeer * Context: 2430678453a8Sspeer * Service domain 2431678453a8Sspeer */ 2432678453a8Sspeer void 2433678453a8Sspeer nxge_hio_rdc_unshare( 2434678453a8Sspeer nxge_t *nxge, 2435da14cebeSEric Cheng int dev_grpid, 2436678453a8Sspeer int channel) 2437678453a8Sspeer { 2438678453a8Sspeer nxge_grp_set_t *set = &nxge->rx_set; 2439da14cebeSEric Cheng nxge_grp_t *group; 2440da14cebeSEric Cheng int grpid; 24410dc2366fSVenugopal Iyer int i; 2442678453a8Sspeer 2443678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_rdc_unshare")); 2444678453a8Sspeer 2445678453a8Sspeer /* Stop RxMAC = A.9.2.6 */ 2446678453a8Sspeer if (nxge_rx_mac_disable(nxge) != NXGE_OK) { 2447678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2448678453a8Sspeer "Failed to disable RxMAC")); 2449678453a8Sspeer } 2450678453a8Sspeer 2451678453a8Sspeer /* Drain IPP Port = A.9.3.6 */ 2452678453a8Sspeer (void) nxge_ipp_drain(nxge); 2453678453a8Sspeer 2454678453a8Sspeer /* Stop and reset RxDMA = A.9.5.3 */ 2455678453a8Sspeer // De-assert EN: RXDMA_CFIG1[31] = 0 (DMC+00000 ) 2456678453a8Sspeer if (nxge_disable_rxdma_channel(nxge, channel) != NXGE_OK) { 2457678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2458678453a8Sspeer "Failed to disable RxDMA channel %d", channel)); 2459678453a8Sspeer } 2460678453a8Sspeer 2461678453a8Sspeer NXGE_DC_RESET(set->shared.map, channel); 2462678453a8Sspeer set->shared.count--; 2463678453a8Sspeer 2464da14cebeSEric Cheng grpid = dev_grpid - nxge->pt_config.hw_config.def_mac_rxdma_grpid; 2465da14cebeSEric Cheng group = set->group[grpid]; 2466da14cebeSEric Cheng 2467678453a8Sspeer /* 2468678453a8Sspeer * Assert RST: RXDMA_CFIG1[30] = 1 2469678453a8Sspeer * 2470678453a8Sspeer * Initialize RxDMA A.9.5.4 2471678453a8Sspeer * Reconfigure RxDMA 2472678453a8Sspeer * Enable RxDMA A.9.5.5 2473678453a8Sspeer */ 24746920a987SMisaki Miyashita if ((nxge_grp_dc_add(nxge, group, VP_BOUND_RX, channel))) { 2475678453a8Sspeer /* Be sure to re-enable the RX MAC. */ 2476678453a8Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2477678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2478da14cebeSEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 2479678453a8Sspeer } 2480678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, "nxge_hio_rdc_unshare: " 2481678453a8Sspeer "Failed to initialize RxDMA channel %d", channel)); 2482678453a8Sspeer return; 2483678453a8Sspeer } 2484678453a8Sspeer 2485678453a8Sspeer /* 2486678453a8Sspeer * Enable RxMAC = A.9.2.10 2487678453a8Sspeer */ 2488678453a8Sspeer if (nxge_rx_mac_enable(nxge) != NXGE_OK) { 2489678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2490da14cebeSEric Cheng "nxge_hio_rdc_share: Rx MAC still disabled")); 2491678453a8Sspeer return; 2492678453a8Sspeer } 2493678453a8Sspeer 2494678453a8Sspeer /* Re-add this interrupt. */ 2495678453a8Sspeer if (nxge_intr_add(nxge, VP_BOUND_RX, channel) != NXGE_OK) { 2496678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2497e11f0814SMichael Speer "nxge_hio_rdc_unshare: Failed to add interrupt for " 2498678453a8Sspeer "RxDMA CHANNEL %d", channel)); 2499678453a8Sspeer } 2500678453a8Sspeer 2501678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_rdc_unshare")); 25020dc2366fSVenugopal Iyer 25030dc2366fSVenugopal Iyer for (i = 0; i < NXGE_MAX_RDCS; i++) { 25040dc2366fSVenugopal Iyer if (nxge->rx_ring_handles[i].channel == channel) { 2505*9f26b864SVenugopal Iyer (void) nxge_rx_ring_start( 25060dc2366fSVenugopal Iyer (mac_ring_driver_t)&nxge->rx_ring_handles[i], 25070dc2366fSVenugopal Iyer nxge->rx_ring_handles[i].ring_gen_num); 25080dc2366fSVenugopal Iyer } 25090dc2366fSVenugopal Iyer } 2510678453a8Sspeer } 2511678453a8Sspeer 2512678453a8Sspeer /* 2513678453a8Sspeer * nxge_hio_dc_unshare 2514678453a8Sspeer * 2515678453a8Sspeer * Unshare (reuse) a DMA channel. 2516678453a8Sspeer * 2517678453a8Sspeer * Arguments: 2518678453a8Sspeer * nxge 2519678453a8Sspeer * vr The VR that <channel> belongs to. 2520678453a8Sspeer * type Tx or Rx. 2521678453a8Sspeer * channel The DMA channel to reuse. 2522678453a8Sspeer * 2523678453a8Sspeer * Notes: 2524678453a8Sspeer * 2525678453a8Sspeer * Context: 2526678453a8Sspeer * Service domain 2527678453a8Sspeer */ 2528678453a8Sspeer void 2529678453a8Sspeer nxge_hio_dc_unshare( 2530678453a8Sspeer nxge_t *nxge, 2531678453a8Sspeer nxge_hio_vr_t *vr, 2532678453a8Sspeer mac_ring_type_t type, 2533678453a8Sspeer int channel) 2534678453a8Sspeer { 2535678453a8Sspeer nxge_grp_t *group; 2536678453a8Sspeer nxge_hio_dc_t *dc; 2537678453a8Sspeer 2538678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "==> nxge_hio_dc_unshare(%cdc %d)", 2539678453a8Sspeer type == MAC_RING_TYPE_TX ? 't' : 'r', channel)); 2540678453a8Sspeer 2541678453a8Sspeer /* Unlink the channel from its group. */ 2542678453a8Sspeer /* -------------------------------------------------- */ 2543678453a8Sspeer group = (type == MAC_RING_TYPE_TX) ? &vr->tx_group : &vr->rx_group; 25441d36aa9eSspeer NXGE_DC_RESET(group->map, channel); 2545678453a8Sspeer if ((dc = nxge_grp_dc_unlink(nxge, group, channel)) == 0) { 2546678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2547e11f0814SMichael Speer "nxge_hio_dc_unshare(%d) failed", channel)); 2548678453a8Sspeer return; 2549678453a8Sspeer } 2550678453a8Sspeer 2551678453a8Sspeer dc->vr = 0; 2552678453a8Sspeer dc->cookie = 0; 2553678453a8Sspeer 2554678453a8Sspeer if (type == MAC_RING_TYPE_RX) { 2555da14cebeSEric Cheng nxge_hio_rdc_unshare(nxge, vr->rdc_tbl, channel); 2556678453a8Sspeer } else { 2557da14cebeSEric Cheng nxge_hio_tdc_unshare(nxge, vr->tdc_tbl, channel); 2558678453a8Sspeer } 2559678453a8Sspeer 2560678453a8Sspeer NXGE_DEBUG_MSG((nxge, HIO_CTL, "<== nxge_hio_dc_unshare")); 2561678453a8Sspeer } 2562678453a8Sspeer 256308ac1c49SNicolas Droux 256408ac1c49SNicolas Droux /* 256508ac1c49SNicolas Droux * nxge_hio_rxdma_bind_intr(): 256608ac1c49SNicolas Droux * 256708ac1c49SNicolas Droux * For the guest domain driver, need to bind the interrupt group 256808ac1c49SNicolas Droux * and state to the rx_rcr_ring_t. 256908ac1c49SNicolas Droux */ 257008ac1c49SNicolas Droux 257108ac1c49SNicolas Droux int 257208ac1c49SNicolas Droux nxge_hio_rxdma_bind_intr(nxge_t *nxge, rx_rcr_ring_t *ring, int channel) 257308ac1c49SNicolas Droux { 257408ac1c49SNicolas Droux nxge_hio_dc_t *dc; 257508ac1c49SNicolas Droux nxge_ldgv_t *control; 257608ac1c49SNicolas Droux nxge_ldg_t *group; 257708ac1c49SNicolas Droux nxge_ldv_t *device; 257808ac1c49SNicolas Droux 257908ac1c49SNicolas Droux /* 258008ac1c49SNicolas Droux * Find the DMA channel. 258108ac1c49SNicolas Droux */ 258208ac1c49SNicolas Droux if (!(dc = nxge_grp_dc_find(nxge, VP_BOUND_RX, channel))) { 258308ac1c49SNicolas Droux return (NXGE_ERROR); 258408ac1c49SNicolas Droux } 258508ac1c49SNicolas Droux 258608ac1c49SNicolas Droux /* 258708ac1c49SNicolas Droux * Get the control structure. 258808ac1c49SNicolas Droux */ 258908ac1c49SNicolas Droux control = nxge->ldgvp; 259008ac1c49SNicolas Droux if (control == NULL) { 259108ac1c49SNicolas Droux return (NXGE_ERROR); 259208ac1c49SNicolas Droux } 259308ac1c49SNicolas Droux 259408ac1c49SNicolas Droux group = &control->ldgp[dc->ldg.vector]; 259508ac1c49SNicolas Droux device = &control->ldvp[dc->ldg.ldsv]; 259608ac1c49SNicolas Droux 259708ac1c49SNicolas Droux MUTEX_ENTER(&ring->lock); 259808ac1c49SNicolas Droux ring->ldgp = group; 259908ac1c49SNicolas Droux ring->ldvp = device; 260008ac1c49SNicolas Droux MUTEX_EXIT(&ring->lock); 260108ac1c49SNicolas Droux 260208ac1c49SNicolas Droux return (NXGE_OK); 260308ac1c49SNicolas Droux } 2604678453a8Sspeer #endif /* if defined(sun4v) */ 2605