1 /* 2 * Copyright (C) 2007 VMware, Inc. All rights reserved. 3 * 4 * The contents of this file are subject to the terms of the Common 5 * Development and Distribution License (the "License") version 1.0 6 * and no later version. You may not use this file except in 7 * compliance with the License. 8 * 9 * You can obtain a copy of the License at 10 * http://www.opensource.org/licenses/cddl1.php 11 * 12 * See the License for the specific language governing permissions 13 * and limitations under the License. 14 */ 15 16 /* 17 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 18 */ 19 20 #include <vmxnet3.h> 21 22 #define BUILD_NUMBER_NUMERIC 20160330 23 #define BUILD_NUMBER_NUMERIC_STRING "20160330" 24 25 /* 26 * TODO: 27 * - Tx data ring 28 * - MAC_CAPAB_POLL support 29 * - JF support 30 * - Dynamic RX pool 31 */ 32 33 /* 34 * Forward declarations 35 */ 36 static int vmxnet3_getstat(void *, uint_t, uint64_t *); 37 static int vmxnet3_start(void *); 38 static void vmxnet3_stop(void *); 39 static int vmxnet3_setpromisc(void *, boolean_t); 40 static void vmxnet3_ioctl(void *arg, queue_t *wq, mblk_t *mp); 41 static int vmxnet3_multicst(void *, boolean_t, const uint8_t *); 42 static int vmxnet3_unicst(void *, const uint8_t *); 43 static boolean_t vmxnet3_getcapab(void *, mac_capab_t, void *); 44 static int vmxnet3_setmacprop(void *, const char *, mac_prop_id_t, uint_t, 45 const void *); 46 static void vmxnet3_macpropinfo(void *, const char *, mac_prop_id_t, 47 mac_prop_info_handle_t); 48 49 int vmxnet3s_debug = 0; 50 51 /* MAC callbacks */ 52 static mac_callbacks_t vmxnet3_mac_callbacks = { 53 .mc_callbacks = MC_GETCAPAB | MC_IOCTL | MC_SETPROP | MC_PROPINFO, 54 .mc_getstat = vmxnet3_getstat, 55 .mc_start = vmxnet3_start, 56 .mc_stop = vmxnet3_stop, 57 .mc_setpromisc = vmxnet3_setpromisc, 58 .mc_multicst = vmxnet3_multicst, 59 .mc_unicst = vmxnet3_unicst, 60 .mc_tx = vmxnet3_tx, 61 .mc_ioctl = vmxnet3_ioctl, 62 .mc_getcapab = vmxnet3_getcapab, 63 .mc_setprop = vmxnet3_setmacprop, 64 .mc_propinfo = vmxnet3_macpropinfo 65 }; 66 67 /* Tx DMA engine description */ 68 static ddi_dma_attr_t vmxnet3_dma_attrs_tx = { 69 .dma_attr_version = DMA_ATTR_V0, 70 .dma_attr_addr_lo = 0x0000000000000000ull, 71 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull, 72 .dma_attr_count_max = 0xFFFFFFFFFFFFFFFFull, 73 .dma_attr_align = 0x0000000000000001ull, 74 .dma_attr_burstsizes = 0x0000000000000001ull, 75 .dma_attr_minxfer = 0x00000001, 76 .dma_attr_maxxfer = 0x000000000000FFFFull, 77 .dma_attr_seg = 0xFFFFFFFFFFFFFFFFull, 78 .dma_attr_sgllen = -1, 79 .dma_attr_granular = 0x00000001, 80 .dma_attr_flags = 0 81 }; 82 83 /* --- */ 84 85 /* 86 * vmxnet3_getstat -- 87 * 88 * Fetch the statistics of a vmxnet3 device. 89 * 90 * Results: 91 * DDI_FAILURE. 92 * 93 * Side effects: 94 * None. 95 */ 96 static int 97 vmxnet3_getstat(void *data, uint_t stat, uint64_t *val) 98 { 99 vmxnet3_softc_t *dp = data; 100 UPT1_TxStats *txStats; 101 UPT1_RxStats *rxStats; 102 103 VMXNET3_DEBUG(dp, 3, "getstat(%u)\n", stat); 104 105 if (!dp->devEnabled) { 106 return (DDI_FAILURE); 107 } 108 109 txStats = &VMXNET3_TQDESC(dp)->stats; 110 rxStats = &VMXNET3_RQDESC(dp)->stats; 111 112 /* 113 * First touch the related register 114 */ 115 switch (stat) { 116 case MAC_STAT_MULTIRCV: 117 case MAC_STAT_BRDCSTRCV: 118 case MAC_STAT_MULTIXMT: 119 case MAC_STAT_BRDCSTXMT: 120 case MAC_STAT_NORCVBUF: 121 case MAC_STAT_IERRORS: 122 case MAC_STAT_NOXMTBUF: 123 case MAC_STAT_OERRORS: 124 case MAC_STAT_RBYTES: 125 case MAC_STAT_IPACKETS: 126 case MAC_STAT_OBYTES: 127 case MAC_STAT_OPACKETS: 128 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 129 break; 130 case MAC_STAT_IFSPEED: 131 case MAC_STAT_COLLISIONS: 132 case ETHER_STAT_LINK_DUPLEX: 133 /* nothing */ 134 break; 135 default: 136 return (DDI_FAILURE); 137 } 138 139 /* 140 * Then fetch the corresponding stat 141 */ 142 switch (stat) { 143 case MAC_STAT_IFSPEED: 144 *val = dp->linkSpeed; 145 break; 146 case MAC_STAT_MULTIRCV: 147 *val = rxStats->mcastPktsRxOK; 148 break; 149 case MAC_STAT_BRDCSTRCV: 150 *val = rxStats->bcastPktsRxOK; 151 break; 152 case MAC_STAT_MULTIXMT: 153 *val = txStats->mcastPktsTxOK; 154 break; 155 case MAC_STAT_BRDCSTXMT: 156 *val = txStats->bcastPktsTxOK; 157 break; 158 case MAC_STAT_NORCVBUF: 159 *val = rxStats->pktsRxOutOfBuf + dp->rx_alloc_failed; 160 break; 161 case MAC_STAT_IERRORS: 162 *val = rxStats->pktsRxError; 163 break; 164 case MAC_STAT_NOXMTBUF: 165 *val = txStats->pktsTxDiscard + dp->tx_pullup_failed; 166 break; 167 case MAC_STAT_OERRORS: 168 *val = txStats->pktsTxError + dp->tx_error; 169 break; 170 case MAC_STAT_COLLISIONS: 171 *val = 0; 172 break; 173 case MAC_STAT_RBYTES: 174 *val = rxStats->ucastBytesRxOK + rxStats->mcastBytesRxOK + 175 rxStats->bcastBytesRxOK; 176 break; 177 case MAC_STAT_IPACKETS: 178 *val = rxStats->ucastPktsRxOK + rxStats->mcastPktsRxOK + 179 rxStats->bcastPktsRxOK; 180 break; 181 case MAC_STAT_OBYTES: 182 *val = txStats->ucastBytesTxOK + txStats->mcastBytesTxOK + 183 txStats->bcastBytesTxOK; 184 break; 185 case MAC_STAT_OPACKETS: 186 *val = txStats->ucastPktsTxOK + txStats->mcastPktsTxOK + 187 txStats->bcastPktsTxOK; 188 break; 189 case ETHER_STAT_LINK_DUPLEX: 190 *val = LINK_DUPLEX_FULL; 191 break; 192 default: 193 ASSERT(B_FALSE); 194 } 195 196 return (DDI_SUCCESS); 197 } 198 199 /* 200 * vmxnet3_prepare_drivershared -- 201 * 202 * Allocate and initialize the shared data structures 203 * of a vmxnet3 device. 204 * 205 * Results: 206 * DDI_SUCCESS or DDI_FAILURE. 207 * 208 * Side effects: 209 * None. 210 */ 211 static int 212 vmxnet3_prepare_drivershared(vmxnet3_softc_t *dp) 213 { 214 Vmxnet3_DriverShared *ds; 215 size_t allocSize = sizeof (Vmxnet3_DriverShared); 216 217 if (vmxnet3_alloc_dma_mem_1(dp, &dp->sharedData, allocSize, 218 B_TRUE) != DDI_SUCCESS) { 219 return (DDI_FAILURE); 220 } 221 ds = VMXNET3_DS(dp); 222 (void) memset(ds, 0, allocSize); 223 224 allocSize = sizeof (Vmxnet3_TxQueueDesc) + sizeof (Vmxnet3_RxQueueDesc); 225 if (vmxnet3_alloc_dma_mem_128(dp, &dp->queueDescs, allocSize, 226 B_TRUE) != DDI_SUCCESS) { 227 vmxnet3_free_dma_mem(&dp->sharedData); 228 return (DDI_FAILURE); 229 } 230 (void) memset(dp->queueDescs.buf, 0, allocSize); 231 232 ds->magic = VMXNET3_REV1_MAGIC; 233 234 /* Take care of most of devRead */ 235 ds->devRead.misc.driverInfo.version = BUILD_NUMBER_NUMERIC; 236 #ifdef _LP64 237 ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_64; 238 #else 239 ds->devRead.misc.driverInfo.gos.gosBits = VMXNET3_GOS_BITS_32; 240 #endif 241 ds->devRead.misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_SOLARIS; 242 ds->devRead.misc.driverInfo.gos.gosVer = 10; 243 ds->devRead.misc.driverInfo.vmxnet3RevSpt = 1; 244 ds->devRead.misc.driverInfo.uptVerSpt = 1; 245 246 ds->devRead.misc.uptFeatures = UPT1_F_RXCSUM; 247 ds->devRead.misc.mtu = dp->cur_mtu; 248 249 /* XXX: ds->devRead.misc.maxNumRxSG */ 250 ds->devRead.misc.numTxQueues = 1; 251 ds->devRead.misc.numRxQueues = 1; 252 ds->devRead.misc.queueDescPA = dp->queueDescs.bufPA; 253 ds->devRead.misc.queueDescLen = allocSize; 254 255 /* TxQueue and RxQueue information is filled in other functions */ 256 ds->devRead.intrConf.autoMask = (dp->intrMaskMode == VMXNET3_IMM_AUTO); 257 ds->devRead.intrConf.numIntrs = 1; 258 /* XXX: ds->intr.modLevels */ 259 ds->devRead.intrConf.eventIntrIdx = 0; 260 261 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL, 262 VMXNET3_ADDR_LO(dp->sharedData.bufPA)); 263 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH, 264 VMXNET3_ADDR_HI(dp->sharedData.bufPA)); 265 266 return (DDI_SUCCESS); 267 } 268 269 /* 270 * vmxnet3_destroy_drivershared -- 271 * 272 * Destroy the shared data structures of a vmxnet3 device. 273 * 274 * Results: 275 * None. 276 * 277 * Side effects: 278 * None. 279 */ 280 static void 281 vmxnet3_destroy_drivershared(vmxnet3_softc_t *dp) 282 { 283 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAL, 0); 284 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_DSAH, 0); 285 286 vmxnet3_free_dma_mem(&dp->queueDescs); 287 vmxnet3_free_dma_mem(&dp->sharedData); 288 } 289 290 /* 291 * vmxnet3_alloc_cmdring -- 292 * 293 * Allocate and initialize the command ring of a queue. 294 * 295 * Results: 296 * DDI_SUCCESS or DDI_FAILURE. 297 * 298 * Side effects: 299 * None. 300 */ 301 static int 302 vmxnet3_alloc_cmdring(vmxnet3_softc_t *dp, vmxnet3_cmdring_t *cmdRing) 303 { 304 size_t ringSize = cmdRing->size * sizeof (Vmxnet3_TxDesc); 305 306 if (vmxnet3_alloc_dma_mem_512(dp, &cmdRing->dma, ringSize, 307 B_TRUE) != DDI_SUCCESS) { 308 return (DDI_FAILURE); 309 } 310 (void) memset(cmdRing->dma.buf, 0, ringSize); 311 cmdRing->avail = cmdRing->size; 312 cmdRing->next2fill = 0; 313 cmdRing->gen = VMXNET3_INIT_GEN; 314 315 return (DDI_SUCCESS); 316 } 317 318 /* 319 * vmxnet3_alloc_compring -- 320 * 321 * Allocate and initialize the completion ring of a queue. 322 * 323 * Results: 324 * DDI_SUCCESS or DDI_FAILURE. 325 * 326 * Side effects: 327 * None. 328 */ 329 static int 330 vmxnet3_alloc_compring(vmxnet3_softc_t *dp, vmxnet3_compring_t *compRing) 331 { 332 size_t ringSize = compRing->size * sizeof (Vmxnet3_TxCompDesc); 333 334 if (vmxnet3_alloc_dma_mem_512(dp, &compRing->dma, ringSize, 335 B_TRUE) != DDI_SUCCESS) { 336 return (DDI_FAILURE); 337 } 338 (void) memset(compRing->dma.buf, 0, ringSize); 339 compRing->next2comp = 0; 340 compRing->gen = VMXNET3_INIT_GEN; 341 342 return (DDI_SUCCESS); 343 } 344 345 /* 346 * vmxnet3_prepare_txqueue -- 347 * 348 * Initialize the tx queue of a vmxnet3 device. 349 * 350 * Results: 351 * DDI_SUCCESS or DDI_FAILURE. 352 * 353 * Side effects: 354 * None. 355 */ 356 static int 357 vmxnet3_prepare_txqueue(vmxnet3_softc_t *dp) 358 { 359 Vmxnet3_TxQueueDesc *tqdesc = VMXNET3_TQDESC(dp); 360 vmxnet3_txqueue_t *txq = &dp->txQueue; 361 362 ASSERT(!(txq->cmdRing.size & VMXNET3_RING_SIZE_MASK)); 363 ASSERT(!(txq->compRing.size & VMXNET3_RING_SIZE_MASK)); 364 ASSERT(!txq->cmdRing.dma.buf && !txq->compRing.dma.buf); 365 366 if (vmxnet3_alloc_cmdring(dp, &txq->cmdRing) != DDI_SUCCESS) { 367 goto error; 368 } 369 tqdesc->conf.txRingBasePA = txq->cmdRing.dma.bufPA; 370 tqdesc->conf.txRingSize = txq->cmdRing.size; 371 tqdesc->conf.dataRingBasePA = 0; 372 tqdesc->conf.dataRingSize = 0; 373 374 if (vmxnet3_alloc_compring(dp, &txq->compRing) != DDI_SUCCESS) { 375 goto error_cmdring; 376 } 377 tqdesc->conf.compRingBasePA = txq->compRing.dma.bufPA; 378 tqdesc->conf.compRingSize = txq->compRing.size; 379 380 txq->metaRing = kmem_zalloc(txq->cmdRing.size * 381 sizeof (vmxnet3_metatx_t), KM_SLEEP); 382 ASSERT(txq->metaRing); 383 384 if (vmxnet3_txqueue_init(dp, txq) != DDI_SUCCESS) { 385 goto error_mpring; 386 } 387 388 return (DDI_SUCCESS); 389 390 error_mpring: 391 kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t)); 392 vmxnet3_free_dma_mem(&txq->compRing.dma); 393 error_cmdring: 394 vmxnet3_free_dma_mem(&txq->cmdRing.dma); 395 error: 396 return (DDI_FAILURE); 397 } 398 399 /* 400 * vmxnet3_prepare_rxqueue -- 401 * 402 * Initialize the rx queue of a vmxnet3 device. 403 * 404 * Results: 405 * DDI_SUCCESS or DDI_FAILURE. 406 * 407 * Side effects: 408 * None. 409 */ 410 static int 411 vmxnet3_prepare_rxqueue(vmxnet3_softc_t *dp) 412 { 413 Vmxnet3_RxQueueDesc *rqdesc = VMXNET3_RQDESC(dp); 414 vmxnet3_rxqueue_t *rxq = &dp->rxQueue; 415 416 ASSERT(!(rxq->cmdRing.size & VMXNET3_RING_SIZE_MASK)); 417 ASSERT(!(rxq->compRing.size & VMXNET3_RING_SIZE_MASK)); 418 ASSERT(!rxq->cmdRing.dma.buf && !rxq->compRing.dma.buf); 419 420 if (vmxnet3_alloc_cmdring(dp, &rxq->cmdRing) != DDI_SUCCESS) { 421 goto error; 422 } 423 rqdesc->conf.rxRingBasePA[0] = rxq->cmdRing.dma.bufPA; 424 rqdesc->conf.rxRingSize[0] = rxq->cmdRing.size; 425 rqdesc->conf.rxRingBasePA[1] = 0; 426 rqdesc->conf.rxRingSize[1] = 0; 427 428 if (vmxnet3_alloc_compring(dp, &rxq->compRing) != DDI_SUCCESS) { 429 goto error_cmdring; 430 } 431 rqdesc->conf.compRingBasePA = rxq->compRing.dma.bufPA; 432 rqdesc->conf.compRingSize = rxq->compRing.size; 433 434 rxq->bufRing = kmem_zalloc(rxq->cmdRing.size * 435 sizeof (vmxnet3_bufdesc_t), KM_SLEEP); 436 ASSERT(rxq->bufRing); 437 438 if (vmxnet3_rxqueue_init(dp, rxq) != DDI_SUCCESS) { 439 goto error_bufring; 440 } 441 442 return (DDI_SUCCESS); 443 444 error_bufring: 445 kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t)); 446 vmxnet3_free_dma_mem(&rxq->compRing.dma); 447 error_cmdring: 448 vmxnet3_free_dma_mem(&rxq->cmdRing.dma); 449 error: 450 return (DDI_FAILURE); 451 } 452 453 /* 454 * vmxnet3_destroy_txqueue -- 455 * 456 * Destroy the tx queue of a vmxnet3 device. 457 * 458 * Results: 459 * None. 460 * 461 * Side effects: 462 * None. 463 */ 464 static void 465 vmxnet3_destroy_txqueue(vmxnet3_softc_t *dp) 466 { 467 vmxnet3_txqueue_t *txq = &dp->txQueue; 468 469 ASSERT(txq->metaRing); 470 ASSERT(txq->cmdRing.dma.buf && txq->compRing.dma.buf); 471 472 vmxnet3_txqueue_fini(dp, txq); 473 474 kmem_free(txq->metaRing, txq->cmdRing.size * sizeof (vmxnet3_metatx_t)); 475 476 vmxnet3_free_dma_mem(&txq->cmdRing.dma); 477 vmxnet3_free_dma_mem(&txq->compRing.dma); 478 } 479 480 /* 481 * vmxnet3_destroy_rxqueue -- 482 * 483 * Destroy the rx queue of a vmxnet3 device. 484 * 485 * Results: 486 * None. 487 * 488 * Side effects: 489 * None. 490 */ 491 static void 492 vmxnet3_destroy_rxqueue(vmxnet3_softc_t *dp) 493 { 494 vmxnet3_rxqueue_t *rxq = &dp->rxQueue; 495 496 ASSERT(rxq->bufRing); 497 ASSERT(rxq->cmdRing.dma.buf && rxq->compRing.dma.buf); 498 499 vmxnet3_rxqueue_fini(dp, rxq); 500 501 kmem_free(rxq->bufRing, rxq->cmdRing.size * sizeof (vmxnet3_bufdesc_t)); 502 503 vmxnet3_free_dma_mem(&rxq->cmdRing.dma); 504 vmxnet3_free_dma_mem(&rxq->compRing.dma); 505 } 506 507 /* 508 * vmxnet3_refresh_rxfilter -- 509 * 510 * Apply new RX filters settings to a vmxnet3 device. 511 * 512 * Results: 513 * None. 514 * 515 * Side effects: 516 * None. 517 */ 518 static void 519 vmxnet3_refresh_rxfilter(vmxnet3_softc_t *dp) 520 { 521 Vmxnet3_DriverShared *ds = VMXNET3_DS(dp); 522 523 ds->devRead.rxFilterConf.rxMode = dp->rxMode; 524 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); 525 } 526 527 /* 528 * vmxnet3_refresh_linkstate -- 529 * 530 * Fetch the link state of a vmxnet3 device. 531 * 532 * Results: 533 * None. 534 * 535 * Side effects: 536 * None. 537 */ 538 static void 539 vmxnet3_refresh_linkstate(vmxnet3_softc_t *dp) 540 { 541 uint32_t ret32; 542 543 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 544 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD); 545 if (ret32 & 1) { 546 dp->linkState = LINK_STATE_UP; 547 dp->linkSpeed = (ret32 >> 16) * 1000000ULL; 548 } else { 549 dp->linkState = LINK_STATE_DOWN; 550 dp->linkSpeed = 0; 551 } 552 } 553 554 /* 555 * vmxnet3_start -- 556 * 557 * Start a vmxnet3 device: allocate and initialize the shared data 558 * structures and send a start command to the device. 559 * 560 * Results: 561 * DDI_SUCCESS or DDI_FAILURE. 562 * 563 * Side effects: 564 * None. 565 */ 566 static int 567 vmxnet3_start(void *data) 568 { 569 vmxnet3_softc_t *dp = data; 570 Vmxnet3_TxQueueDesc *tqdesc; 571 Vmxnet3_RxQueueDesc *rqdesc; 572 int txQueueSize, rxQueueSize; 573 uint32_t ret32; 574 575 VMXNET3_DEBUG(dp, 1, "start()\n"); 576 577 /* 578 * Allocate vmxnet3's shared data and advertise its PA 579 */ 580 if (vmxnet3_prepare_drivershared(dp) != DDI_SUCCESS) { 581 VMXNET3_WARN(dp, "vmxnet3_prepare_drivershared() failed\n"); 582 goto error; 583 } 584 tqdesc = VMXNET3_TQDESC(dp); 585 rqdesc = VMXNET3_RQDESC(dp); 586 587 /* 588 * Create and initialize the tx queue 589 */ 590 txQueueSize = vmxnet3_getprop(dp, "TxRingSize", 32, 4096, 591 VMXNET3_DEF_TX_RING_SIZE); 592 if (!(txQueueSize & VMXNET3_RING_SIZE_MASK)) { 593 dp->txQueue.cmdRing.size = txQueueSize; 594 dp->txQueue.compRing.size = txQueueSize; 595 dp->txQueue.sharedCtrl = &tqdesc->ctrl; 596 if (vmxnet3_prepare_txqueue(dp) != DDI_SUCCESS) { 597 VMXNET3_WARN(dp, "vmxnet3_prepare_txqueue() failed\n"); 598 goto error_shared_data; 599 } 600 } else { 601 VMXNET3_WARN(dp, "invalid tx ring size (%d)\n", txQueueSize); 602 goto error_shared_data; 603 } 604 605 /* 606 * Create and initialize the rx queue 607 */ 608 rxQueueSize = vmxnet3_getprop(dp, "RxRingSize", 32, 4096, 609 VMXNET3_DEF_RX_RING_SIZE); 610 if (!(rxQueueSize & VMXNET3_RING_SIZE_MASK)) { 611 dp->rxQueue.cmdRing.size = rxQueueSize; 612 dp->rxQueue.compRing.size = rxQueueSize; 613 dp->rxQueue.sharedCtrl = &rqdesc->ctrl; 614 if (vmxnet3_prepare_rxqueue(dp) != DDI_SUCCESS) { 615 VMXNET3_WARN(dp, "vmxnet3_prepare_rxqueue() failed\n"); 616 goto error_tx_queue; 617 } 618 } else { 619 VMXNET3_WARN(dp, "invalid rx ring size (%d)\n", rxQueueSize); 620 goto error_tx_queue; 621 } 622 623 /* 624 * Allocate the Tx DMA handle 625 */ 626 if (ddi_dma_alloc_handle(dp->dip, &vmxnet3_dma_attrs_tx, DDI_DMA_SLEEP, 627 NULL, &dp->txDmaHandle) != DDI_SUCCESS) { 628 VMXNET3_WARN(dp, "ddi_dma_alloc_handle() failed\n"); 629 goto error_rx_queue; 630 } 631 632 /* 633 * Activate the device 634 */ 635 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); 636 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD); 637 if (ret32) { 638 VMXNET3_WARN(dp, "ACTIVATE_DEV failed: 0x%x\n", ret32); 639 goto error_txhandle; 640 } 641 dp->devEnabled = B_TRUE; 642 643 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_RXPROD, 644 dp->txQueue.cmdRing.size - 1); 645 646 /* 647 * Update the RX filters, must be done after ACTIVATE_DEV 648 */ 649 dp->rxMode = VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST; 650 vmxnet3_refresh_rxfilter(dp); 651 652 /* 653 * Get the link state now because no events will be generated 654 */ 655 vmxnet3_refresh_linkstate(dp); 656 mac_link_update(dp->mac, dp->linkState); 657 658 /* 659 * Finally, unmask the interrupt 660 */ 661 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 0); 662 663 return (DDI_SUCCESS); 664 665 error_txhandle: 666 ddi_dma_free_handle(&dp->txDmaHandle); 667 error_rx_queue: 668 vmxnet3_destroy_rxqueue(dp); 669 error_tx_queue: 670 vmxnet3_destroy_txqueue(dp); 671 error_shared_data: 672 vmxnet3_destroy_drivershared(dp); 673 error: 674 return (DDI_FAILURE); 675 } 676 677 /* 678 * vmxnet3_stop -- 679 * 680 * Stop a vmxnet3 device: send a stop command to the device and 681 * de-allocate the shared data structures. 682 * 683 * Results: 684 * None. 685 * 686 * Side effects: 687 * None. 688 */ 689 static void 690 vmxnet3_stop(void *data) 691 { 692 vmxnet3_softc_t *dp = data; 693 694 VMXNET3_DEBUG(dp, 1, "stop()\n"); 695 696 /* 697 * Take the 2 locks related to asynchronous events. 698 * These events should always check dp->devEnabled before poking dp. 699 */ 700 mutex_enter(&dp->intrLock); 701 mutex_enter(&dp->rxPoolLock); 702 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 1); 703 dp->devEnabled = B_FALSE; 704 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 705 mutex_exit(&dp->rxPoolLock); 706 mutex_exit(&dp->intrLock); 707 708 ddi_dma_free_handle(&dp->txDmaHandle); 709 710 vmxnet3_destroy_rxqueue(dp); 711 vmxnet3_destroy_txqueue(dp); 712 713 vmxnet3_destroy_drivershared(dp); 714 } 715 716 /* 717 * vmxnet3_setpromisc -- 718 * 719 * Set or unset promiscuous mode on a vmxnet3 device. 720 * 721 * Results: 722 * DDI_SUCCESS. 723 * 724 * Side effects: 725 * None. 726 */ 727 static int 728 vmxnet3_setpromisc(void *data, boolean_t promisc) 729 { 730 vmxnet3_softc_t *dp = data; 731 732 VMXNET3_DEBUG(dp, 2, "setpromisc(%s)\n", promisc ? "TRUE" : "FALSE"); 733 734 if (promisc) { 735 dp->rxMode |= VMXNET3_RXM_PROMISC; 736 } else { 737 dp->rxMode &= ~VMXNET3_RXM_PROMISC; 738 } 739 740 vmxnet3_refresh_rxfilter(dp); 741 742 return (DDI_SUCCESS); 743 } 744 745 /* 746 * vmxnet3_multicst -- 747 * 748 * Add or remove a multicast address from/to a vmxnet3 device. 749 * 750 * Results: 751 * DDI_FAILURE. 752 * 753 * Side effects: 754 * None. 755 */ 756 static int 757 vmxnet3_multicst(void *data, boolean_t add, const uint8_t *macaddr) 758 { 759 vmxnet3_softc_t *dp = data; 760 vmxnet3_dmabuf_t newMfTable; 761 int ret = DDI_SUCCESS; 762 uint16_t macIdx; 763 size_t allocSize; 764 765 VMXNET3_DEBUG(dp, 2, "multicst(%s, "MACADDR_FMT")\n", 766 add ? "add" : "remove", MACADDR_FMT_ARGS(macaddr)); 767 768 /* 769 * First lookup the position of the given MAC to check if it is 770 * present in the existing MF table. 771 */ 772 for (macIdx = 0; macIdx < dp->mfTable.bufLen; macIdx += 6) { 773 if (memcmp(&dp->mfTable.buf[macIdx], macaddr, 6) == 0) { 774 break; 775 } 776 } 777 778 /* 779 * Check for 2 situations we can handle gracefully by bailing out: 780 * Adding an already existing filter or removing a non-existing one. 781 */ 782 if (add && macIdx < dp->mfTable.bufLen) { 783 VMXNET3_WARN(dp, MACADDR_FMT " already in MC filter list " 784 "@ %u\n", MACADDR_FMT_ARGS(macaddr), macIdx / 6); 785 ASSERT(B_FALSE); 786 goto done; 787 } 788 if (!add && macIdx == dp->mfTable.bufLen) { 789 VMXNET3_WARN(dp, MACADDR_FMT " not in MC filter list @ %u\n", 790 MACADDR_FMT_ARGS(macaddr), macIdx / 6); 791 ASSERT(B_FALSE); 792 goto done; 793 } 794 795 /* 796 * Create the new MF table 797 */ 798 allocSize = dp->mfTable.bufLen + (add ? 6 : -6); 799 if (allocSize) { 800 ret = vmxnet3_alloc_dma_mem_1(dp, &newMfTable, allocSize, 801 B_TRUE); 802 ASSERT(ret == DDI_SUCCESS); 803 if (add) { 804 (void) memcpy(newMfTable.buf, dp->mfTable.buf, 805 dp->mfTable.bufLen); 806 (void) memcpy(newMfTable.buf + dp->mfTable.bufLen, 807 macaddr, 6); 808 } else { 809 (void) memcpy(newMfTable.buf, dp->mfTable.buf, 810 macIdx); 811 (void) memcpy(newMfTable.buf + macIdx, 812 dp->mfTable.buf + macIdx + 6, 813 dp->mfTable.bufLen - macIdx - 6); 814 } 815 } else { 816 newMfTable.buf = NULL; 817 newMfTable.bufPA = 0; 818 newMfTable.bufLen = 0; 819 } 820 821 /* 822 * Now handle 2 corner cases: if we're creating the first filter or 823 * removing the last one, we have to update rxMode accordingly. 824 */ 825 if (add && newMfTable.bufLen == 6) { 826 ASSERT(!(dp->rxMode & VMXNET3_RXM_MCAST)); 827 dp->rxMode |= VMXNET3_RXM_MCAST; 828 vmxnet3_refresh_rxfilter(dp); 829 } 830 if (!add && dp->mfTable.bufLen == 6) { 831 ASSERT(newMfTable.buf == NULL); 832 ASSERT(dp->rxMode & VMXNET3_RXM_MCAST); 833 dp->rxMode &= ~VMXNET3_RXM_MCAST; 834 vmxnet3_refresh_rxfilter(dp); 835 } 836 837 /* 838 * Now replace the old MF table with the new one 839 */ 840 if (dp->mfTable.buf) { 841 vmxnet3_free_dma_mem(&dp->mfTable); 842 } 843 dp->mfTable = newMfTable; 844 VMXNET3_DS(dp)->devRead.rxFilterConf.mfTablePA = newMfTable.bufPA; 845 VMXNET3_DS(dp)->devRead.rxFilterConf.mfTableLen = newMfTable.bufLen; 846 847 done: 848 /* Always update the filters */ 849 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_MAC_FILTERS); 850 851 return (ret); 852 } 853 854 /* 855 * vmxnet3_unicst -- 856 * 857 * Set the mac address of a vmxnet3 device. 858 * 859 * Results: 860 * DDI_FAILURE. 861 * 862 * Side effects: 863 * None. 864 */ 865 static int 866 vmxnet3_unicst(void *data, const uint8_t *macaddr) 867 { 868 vmxnet3_softc_t *dp = data; 869 uint32_t val32; 870 871 VMXNET3_DEBUG(dp, 2, "unicst("MACADDR_FMT")\n", 872 MACADDR_FMT_ARGS(macaddr)); 873 874 val32 = *((uint32_t *)(macaddr + 0)); 875 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_MACL, val32); 876 val32 = *((uint16_t *)(macaddr + 4)); 877 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_MACH, val32); 878 879 (void) memcpy(dp->macaddr, macaddr, 6); 880 881 return (DDI_SUCCESS); 882 } 883 884 885 /* 886 * vmxnet3_change_mtu -- 887 * 888 * Change the MTU as seen by the driver. This is only supported when 889 * the mac is stopped. 890 * 891 * Results: 892 * EBUSY if the device is enabled. 893 * EINVAL for invalid MTU values. 894 * 0 on success. 895 * 896 * Side effects: 897 * None. 898 */ 899 900 static int 901 vmxnet3_change_mtu(vmxnet3_softc_t *dp, uint32_t new_mtu) 902 { 903 int ret; 904 905 if (dp->devEnabled) 906 return (EBUSY); 907 908 if (new_mtu == dp->cur_mtu) { 909 VMXNET3_WARN(dp, "New MTU is same as old mtu : %d.\n", new_mtu); 910 return (0); 911 } 912 913 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) { 914 VMXNET3_WARN(dp, "New MTU not in valid range [%d, %d].\n", 915 VMXNET3_MIN_MTU, VMXNET3_MAX_MTU); 916 return (EINVAL); 917 } 918 919 dp->cur_mtu = new_mtu; 920 921 if ((ret = mac_maxsdu_update(dp->mac, new_mtu)) != 0) 922 VMXNET3_WARN(dp, "Unable to update mac with %d mtu: %d", 923 new_mtu, ret); 924 925 return (ret); 926 } 927 928 929 /* 930 * vmxnet3_ioctl -- 931 * 932 * DDI/DDK callback to handle IOCTL in driver. Currently it only handles 933 * ND_SET ioctl. Rest all are ignored. The ND_SET is used to set/reset 934 * accept-jumbo ndd parameted for the interface. 935 * 936 * Results: 937 * Nothing is returned directly. An ACK or NACK is conveyed to the calling 938 * function from the mblk which was used to call this function. 939 * 940 * Side effects: 941 * MTU can be changed and device can be reset. 942 */ 943 944 static void 945 vmxnet3_ioctl(void *arg, queue_t *wq, mblk_t *mp) 946 { 947 vmxnet3_softc_t *dp = arg; 948 int ret = EINVAL; 949 IOCP iocp; 950 mblk_t *mp1; 951 char *valp, *param; 952 int data; 953 954 iocp = (void *)mp->b_rptr; 955 iocp->ioc_error = 0; 956 957 switch (iocp->ioc_cmd) { 958 case ND_SET: 959 /* 960 * The mblk in continuation would contain the ndd parameter name 961 * and data value to be set 962 */ 963 mp1 = mp->b_cont; 964 if (!mp1) { 965 VMXNET3_WARN(dp, "Error locating parameter name.\n"); 966 ret = EINVAL; 967 break; 968 } 969 970 /* Force null termination */ 971 mp1->b_datap->db_lim[-1] = '\0'; 972 973 /* 974 * From /usr/src/uts/common/inet/nd.c : nd_getset() 975 * "logic throughout nd_xxx assumes single data block for ioctl. 976 * However, existing code sends in some big buffers." 977 */ 978 if (mp1->b_cont) { 979 freemsg(mp1->b_cont); 980 mp1->b_cont = NULL; 981 } 982 983 valp = (char *)mp1->b_rptr; /* Points to param name */ 984 ASSERT(valp); 985 param = valp; 986 VMXNET3_DEBUG(dp, 3, "ND Set ioctl for %s\n", param); 987 988 /* 989 * Go past the end of this null terminated string to get the 990 * data value. 991 */ 992 while (*valp && valp <= (char *)mp1->b_wptr) 993 valp++; 994 995 if (valp > (char *)mp1->b_wptr) { 996 /* 997 * We are already beyond the readable area of mblk and 998 * still haven't found the end of param string. 999 */ 1000 VMXNET3_WARN(dp, 1001 "No data value found to be set to param\n"); 1002 data = -1; 1003 } else { 1004 /* Now this points to data string */ 1005 valp++; 1006 /* Get numeric value of first letter */ 1007 data = (int)*valp - (int)'0'; 1008 } 1009 1010 if (strcmp("accept-jumbo", param) == 0) { 1011 if (data == 1) { 1012 VMXNET3_DEBUG(dp, 2, 1013 "Accepting jumbo frames\n"); 1014 ret = vmxnet3_change_mtu(dp, VMXNET3_MAX_MTU); 1015 } else if (data == 0) { 1016 VMXNET3_DEBUG(dp, 2, 1017 "Rejecting jumbo frames\n"); 1018 ret = vmxnet3_change_mtu(dp, ETHERMTU); 1019 } else { 1020 VMXNET3_WARN(dp, "Invalid data value to be set," 1021 " use 0 or 1\n"); 1022 ret = -1; 1023 } 1024 } 1025 freemsg(mp1); 1026 mp->b_cont = NULL; 1027 break; 1028 1029 default: 1030 if (mp->b_cont) { 1031 freemsg(mp->b_cont); 1032 mp->b_cont = NULL; 1033 } 1034 ret = -1; 1035 break; 1036 } 1037 1038 if (ret == 0) 1039 miocack(wq, mp, 0, 0); 1040 else 1041 miocnak(wq, mp, 0, EINVAL); 1042 } 1043 1044 1045 /* 1046 * vmxnet3_getcapab -- 1047 * 1048 * Get the capabilities of a vmxnet3 device. 1049 * 1050 * Results: 1051 * B_TRUE or B_FALSE. 1052 * 1053 * Side effects: 1054 * None. 1055 */ 1056 static boolean_t 1057 vmxnet3_getcapab(void *data, mac_capab_t capab, void *arg) 1058 { 1059 vmxnet3_softc_t *dp = data; 1060 boolean_t ret; 1061 1062 switch (capab) { 1063 case MAC_CAPAB_HCKSUM: { 1064 uint32_t *txflags = arg; 1065 *txflags = HCKSUM_INET_PARTIAL; 1066 ret = B_TRUE; 1067 break; 1068 } 1069 case MAC_CAPAB_LSO: { 1070 mac_capab_lso_t *lso = arg; 1071 lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 1072 lso->lso_basic_tcp_ipv4.lso_max = IP_MAXPACKET; 1073 ret = vmxnet3_getprop(dp, "EnableLSO", 0, 1, 1); 1074 break; 1075 } 1076 default: 1077 ret = B_FALSE; 1078 } 1079 1080 VMXNET3_DEBUG(dp, 2, "getcapab(0x%x) -> %s\n", capab, 1081 ret ? "yes" : "no"); 1082 1083 return (ret); 1084 } 1085 1086 /* 1087 * vmxnet3_setmacprop -- 1088 * 1089 * Set a MAC property. 1090 * 1091 * Results: 1092 * 0 on success, errno otherwise. 1093 * 1094 * Side effects: 1095 * None. 1096 */ 1097 /* ARGSUSED */ 1098 static int 1099 vmxnet3_setmacprop(void *data, const char *pr_name, mac_prop_id_t pr_num, 1100 uint_t pr_valsize, const void *pr_val) 1101 { 1102 vmxnet3_softc_t *dp = data; 1103 int ret = 0; 1104 uint32_t newmtu; 1105 1106 switch (pr_num) { 1107 case MAC_PROP_MTU: 1108 (void) memcpy(&newmtu, pr_val, sizeof (newmtu)); 1109 ret = vmxnet3_change_mtu(dp, newmtu); 1110 break; 1111 default: 1112 ret = ENOTSUP; 1113 } 1114 1115 return (ret); 1116 } 1117 1118 /* 1119 * vmxnet3_macpropinfo -- 1120 * 1121 * Get MAC property information. 1122 * 1123 * Results: 1124 * None. 1125 * 1126 * Side effects: 1127 * None. 1128 */ 1129 /* ARGSUSED */ 1130 static void 1131 vmxnet3_macpropinfo(void *data, const char *pr_name, mac_prop_id_t pr_num, 1132 mac_prop_info_handle_t prh) 1133 { 1134 switch (pr_num) { 1135 case MAC_PROP_MTU: 1136 mac_prop_info_set_range_uint32(prh, VMXNET3_MIN_MTU, 1137 VMXNET3_MAX_MTU); 1138 break; 1139 default: 1140 break; 1141 } 1142 } 1143 1144 /* 1145 * vmxnet3_reset -- 1146 * 1147 * Reset a vmxnet3 device. Only to be used when the device is wedged. 1148 * 1149 * Results: 1150 * None. 1151 * 1152 * Side effects: 1153 * The device is reset. 1154 */ 1155 static void 1156 vmxnet3_reset(void *data) 1157 { 1158 int ret; 1159 1160 vmxnet3_softc_t *dp = data; 1161 1162 VMXNET3_DEBUG(dp, 1, "vmxnet3_reset()\n"); 1163 1164 atomic_inc_32(&dp->reset_count); 1165 vmxnet3_stop(dp); 1166 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 1167 if ((ret = vmxnet3_start(dp)) != DDI_SUCCESS) 1168 VMXNET3_WARN(dp, "failed to reset the device: %d", ret); 1169 } 1170 1171 /* 1172 * vmxnet3_intr_events -- 1173 * 1174 * Process pending events on a vmxnet3 device. 1175 * 1176 * Results: 1177 * B_TRUE if the link state changed, B_FALSE otherwise. 1178 * 1179 * Side effects: 1180 * None. 1181 */ 1182 static boolean_t 1183 vmxnet3_intr_events(vmxnet3_softc_t *dp) 1184 { 1185 Vmxnet3_DriverShared *ds = VMXNET3_DS(dp); 1186 boolean_t linkStateChanged = B_FALSE; 1187 uint32_t events = ds->ecr; 1188 1189 if (events) { 1190 VMXNET3_DEBUG(dp, 2, "events(0x%x)\n", events); 1191 if (events & (VMXNET3_ECR_RQERR | VMXNET3_ECR_TQERR)) { 1192 Vmxnet3_TxQueueDesc *tqdesc = VMXNET3_TQDESC(dp); 1193 Vmxnet3_RxQueueDesc *rqdesc = VMXNET3_RQDESC(dp); 1194 1195 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, 1196 VMXNET3_CMD_GET_QUEUE_STATUS); 1197 if (tqdesc->status.stopped) { 1198 VMXNET3_WARN(dp, "tq error 0x%x\n", 1199 tqdesc->status.error); 1200 } 1201 if (rqdesc->status.stopped) { 1202 VMXNET3_WARN(dp, "rq error 0x%x\n", 1203 rqdesc->status.error); 1204 } 1205 1206 if (ddi_taskq_dispatch(dp->resetTask, vmxnet3_reset, 1207 dp, DDI_NOSLEEP) == DDI_SUCCESS) { 1208 VMXNET3_WARN(dp, "reset scheduled\n"); 1209 } else { 1210 VMXNET3_WARN(dp, 1211 "ddi_taskq_dispatch() failed\n"); 1212 } 1213 } 1214 if (events & VMXNET3_ECR_LINK) { 1215 vmxnet3_refresh_linkstate(dp); 1216 linkStateChanged = B_TRUE; 1217 } 1218 if (events & VMXNET3_ECR_DIC) { 1219 VMXNET3_DEBUG(dp, 1, "device implementation change\n"); 1220 } 1221 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_ECR, events); 1222 } 1223 1224 return (linkStateChanged); 1225 } 1226 1227 /* 1228 * vmxnet3_intr -- 1229 * 1230 * Interrupt handler of a vmxnet3 device. 1231 * 1232 * Results: 1233 * DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED. 1234 * 1235 * Side effects: 1236 * None. 1237 */ 1238 /* ARGSUSED1 */ 1239 static uint_t 1240 vmxnet3_intr(caddr_t data1, caddr_t data2) 1241 { 1242 vmxnet3_softc_t *dp = (void *) data1; 1243 1244 VMXNET3_DEBUG(dp, 3, "intr()\n"); 1245 1246 mutex_enter(&dp->intrLock); 1247 1248 if (dp->devEnabled) { 1249 boolean_t linkStateChanged; 1250 boolean_t mustUpdateTx; 1251 mblk_t *mps; 1252 1253 if (dp->intrType == DDI_INTR_TYPE_FIXED && 1254 !VMXNET3_BAR1_GET32(dp, VMXNET3_REG_ICR)) { 1255 goto intr_unclaimed; 1256 } 1257 1258 if (dp->intrMaskMode == VMXNET3_IMM_ACTIVE) { 1259 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 1); 1260 } 1261 1262 linkStateChanged = vmxnet3_intr_events(dp); 1263 mustUpdateTx = vmxnet3_tx_complete(dp, &dp->txQueue); 1264 mps = vmxnet3_rx_intr(dp, &dp->rxQueue); 1265 1266 mutex_exit(&dp->intrLock); 1267 VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_IMR, 0); 1268 1269 if (linkStateChanged) { 1270 mac_link_update(dp->mac, dp->linkState); 1271 } 1272 if (mustUpdateTx) { 1273 mac_tx_update(dp->mac); 1274 } 1275 if (mps) { 1276 mac_rx(dp->mac, NULL, mps); 1277 } 1278 1279 return (DDI_INTR_CLAIMED); 1280 } 1281 1282 intr_unclaimed: 1283 mutex_exit(&dp->intrLock); 1284 return (DDI_INTR_UNCLAIMED); 1285 } 1286 1287 static int 1288 vmxnet3_kstat_update(kstat_t *ksp, int rw) 1289 { 1290 vmxnet3_softc_t *dp = ksp->ks_private; 1291 vmxnet3_kstats_t *statp = ksp->ks_data; 1292 1293 if (rw == KSTAT_WRITE) 1294 return (EACCES); 1295 1296 statp->reset_count.value.ul = dp->reset_count; 1297 statp->tx_pullup_needed.value.ul = dp->tx_pullup_needed; 1298 statp->tx_ring_full.value.ul = dp->tx_ring_full; 1299 statp->rx_alloc_buf.value.ul = dp->rx_alloc_buf; 1300 1301 return (0); 1302 } 1303 1304 static int 1305 vmxnet3_kstat_init(vmxnet3_softc_t *dp) 1306 { 1307 vmxnet3_kstats_t *statp; 1308 1309 dp->devKstats = kstat_create(VMXNET3_MODNAME, dp->instance, 1310 "statistics", "dev", KSTAT_TYPE_NAMED, 1311 sizeof (vmxnet3_kstats_t) / sizeof (kstat_named_t), 0); 1312 if (dp->devKstats == NULL) 1313 return (DDI_FAILURE); 1314 1315 dp->devKstats->ks_update = vmxnet3_kstat_update; 1316 dp->devKstats->ks_private = dp; 1317 1318 statp = dp->devKstats->ks_data; 1319 1320 kstat_named_init(&statp->reset_count, "reset_count", KSTAT_DATA_ULONG); 1321 kstat_named_init(&statp->tx_pullup_needed, "tx_pullup_needed", 1322 KSTAT_DATA_ULONG); 1323 kstat_named_init(&statp->tx_ring_full, "tx_ring_full", 1324 KSTAT_DATA_ULONG); 1325 kstat_named_init(&statp->rx_alloc_buf, "rx_alloc_buf", 1326 KSTAT_DATA_ULONG); 1327 1328 kstat_install(dp->devKstats); 1329 1330 return (DDI_SUCCESS); 1331 } 1332 1333 /* 1334 * vmxnet3_attach -- 1335 * 1336 * Probe and attach a vmxnet3 instance to the stack. 1337 * 1338 * Results: 1339 * DDI_SUCCESS or DDI_FAILURE. 1340 * 1341 * Side effects: 1342 * None. 1343 */ 1344 static int 1345 vmxnet3_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1346 { 1347 vmxnet3_softc_t *dp; 1348 mac_register_t *macr; 1349 uint16_t vendorId, devId, ret16; 1350 uint32_t ret32; 1351 int ret, err; 1352 uint_t uret; 1353 1354 if (cmd != DDI_ATTACH) { 1355 goto error; 1356 } 1357 1358 /* 1359 * Allocate the soft state 1360 */ 1361 dp = kmem_zalloc(sizeof (vmxnet3_softc_t), KM_SLEEP); 1362 ASSERT(dp); 1363 1364 dp->dip = dip; 1365 dp->instance = ddi_get_instance(dip); 1366 dp->cur_mtu = ETHERMTU; 1367 1368 VMXNET3_DEBUG(dp, 1, "attach()\n"); 1369 1370 ddi_set_driver_private(dip, dp); 1371 1372 /* 1373 * Get access to the PCI bus configuration space 1374 */ 1375 if (pci_config_setup(dip, &dp->pciHandle) != DDI_SUCCESS) { 1376 VMXNET3_WARN(dp, "pci_config_setup() failed\n"); 1377 goto error_soft_state; 1378 } 1379 1380 /* 1381 * Make sure the chip is a vmxnet3 device 1382 */ 1383 vendorId = pci_config_get16(dp->pciHandle, PCI_CONF_VENID); 1384 devId = pci_config_get16(dp->pciHandle, PCI_CONF_DEVID); 1385 if (vendorId != PCI_VENDOR_ID_VMWARE || 1386 devId != PCI_DEVICE_ID_VMWARE_VMXNET3) { 1387 VMXNET3_WARN(dp, "wrong PCI venid/devid (0x%x, 0x%x)\n", 1388 vendorId, devId); 1389 goto error_pci_config; 1390 } 1391 1392 /* 1393 * Make sure we can access the registers through the I/O space 1394 */ 1395 ret16 = pci_config_get16(dp->pciHandle, PCI_CONF_COMM); 1396 ret16 |= PCI_COMM_IO | PCI_COMM_ME; 1397 pci_config_put16(dp->pciHandle, PCI_CONF_COMM, ret16); 1398 1399 /* 1400 * Map the I/O space in memory 1401 */ 1402 if (ddi_regs_map_setup(dip, 1, &dp->bar0, 0, 0, &vmxnet3_dev_attr, 1403 &dp->bar0Handle) != DDI_SUCCESS) { 1404 VMXNET3_WARN(dp, "ddi_regs_map_setup() for BAR0 failed\n"); 1405 goto error_pci_config; 1406 } 1407 1408 if (ddi_regs_map_setup(dip, 2, &dp->bar1, 0, 0, &vmxnet3_dev_attr, 1409 &dp->bar1Handle) != DDI_SUCCESS) { 1410 VMXNET3_WARN(dp, "ddi_regs_map_setup() for BAR1 failed\n"); 1411 goto error_regs_map_0; 1412 } 1413 1414 /* 1415 * Check the version number of the virtual device 1416 */ 1417 if (VMXNET3_BAR1_GET32(dp, VMXNET3_REG_VRRS) & 1) { 1418 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_VRRS, 1); 1419 } else { 1420 VMXNET3_WARN(dp, "incompatible h/w version\n"); 1421 goto error_regs_map_1; 1422 } 1423 1424 if (VMXNET3_BAR1_GET32(dp, VMXNET3_REG_UVRS) & 1) { 1425 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_UVRS, 1); 1426 } else { 1427 VMXNET3_WARN(dp, "incompatible upt version\n"); 1428 goto error_regs_map_1; 1429 } 1430 1431 if (vmxnet3_kstat_init(dp) != DDI_SUCCESS) { 1432 VMXNET3_WARN(dp, "unable to initialize kstats"); 1433 goto error_regs_map_1; 1434 } 1435 1436 /* 1437 * Read the MAC address from the device 1438 */ 1439 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_MACL); 1440 *((uint32_t *)(dp->macaddr + 0)) = ret32; 1441 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_MACH); 1442 *((uint16_t *)(dp->macaddr + 4)) = ret32; 1443 1444 /* 1445 * Register with the MAC framework 1446 */ 1447 if (!(macr = mac_alloc(MAC_VERSION))) { 1448 VMXNET3_WARN(dp, "mac_alloc() failed\n"); 1449 goto error_kstat; 1450 } 1451 1452 macr->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 1453 macr->m_driver = dp; 1454 macr->m_dip = dip; 1455 macr->m_instance = 0; 1456 macr->m_src_addr = dp->macaddr; 1457 macr->m_dst_addr = NULL; 1458 macr->m_callbacks = &vmxnet3_mac_callbacks; 1459 macr->m_min_sdu = VMXNET3_MIN_MTU; 1460 macr->m_max_sdu = ETHERMTU; 1461 macr->m_margin = VLAN_TAGSZ; 1462 macr->m_pdata = NULL; 1463 macr->m_pdata_size = 0; 1464 1465 ret = mac_register(macr, &dp->mac); 1466 mac_free(macr); 1467 if (ret != DDI_SUCCESS) { 1468 VMXNET3_WARN(dp, "mac_register() failed\n"); 1469 goto error_kstat; 1470 } 1471 1472 /* 1473 * Register the interrupt(s) in this order of preference: 1474 * MSI-X, MSI, INTx 1475 */ 1476 VMXNET3_BAR1_PUT32(dp, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); 1477 ret32 = VMXNET3_BAR1_GET32(dp, VMXNET3_REG_CMD); 1478 switch (ret32 & 0x3) { 1479 case VMXNET3_IT_AUTO: 1480 case VMXNET3_IT_MSIX: 1481 dp->intrType = DDI_INTR_TYPE_MSIX; 1482 err = ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1, 1483 &ret, DDI_INTR_ALLOC_STRICT); 1484 if (err == DDI_SUCCESS) 1485 break; 1486 VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_MSIX failed, err:%d\n", 1487 err); 1488 /* FALLTHROUGH */ 1489 case VMXNET3_IT_MSI: 1490 dp->intrType = DDI_INTR_TYPE_MSI; 1491 if (ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1, 1492 &ret, DDI_INTR_ALLOC_STRICT) == DDI_SUCCESS) 1493 break; 1494 VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_MSI failed\n"); 1495 /* FALLTHROUGH */ 1496 case VMXNET3_IT_INTX: 1497 dp->intrType = DDI_INTR_TYPE_FIXED; 1498 if (ddi_intr_alloc(dip, &dp->intrHandle, dp->intrType, 0, 1, 1499 &ret, DDI_INTR_ALLOC_STRICT) == DDI_SUCCESS) { 1500 break; 1501 } 1502 VMXNET3_DEBUG(dp, 2, "DDI_INTR_TYPE_INTX failed\n"); 1503 /* FALLTHROUGH */ 1504 default: 1505 VMXNET3_WARN(dp, "ddi_intr_alloc() failed\n"); 1506 goto error_mac; 1507 } 1508 dp->intrMaskMode = (ret32 >> 2) & 0x3; 1509 if (dp->intrMaskMode == VMXNET3_IMM_LAZY) { 1510 VMXNET3_WARN(dp, "Lazy masking is not supported\n"); 1511 goto error_intr; 1512 } 1513 1514 if (ddi_intr_get_pri(dp->intrHandle, &uret) != DDI_SUCCESS) { 1515 VMXNET3_WARN(dp, "ddi_intr_get_pri() failed\n"); 1516 goto error_intr; 1517 } 1518 1519 VMXNET3_DEBUG(dp, 2, "intrType=0x%x, intrMaskMode=0x%x, intrPrio=%u\n", 1520 dp->intrType, dp->intrMaskMode, uret); 1521 1522 /* 1523 * Create a task queue to reset the device if it wedges. 1524 */ 1525 dp->resetTask = ddi_taskq_create(dip, "vmxnet3_reset_task", 1, 1526 TASKQ_DEFAULTPRI, 0); 1527 if (!dp->resetTask) { 1528 VMXNET3_WARN(dp, "ddi_taskq_create() failed()\n"); 1529 goto error_intr; 1530 } 1531 1532 /* 1533 * Initialize our mutexes now that we know the interrupt priority 1534 * This _must_ be done before ddi_intr_enable() 1535 */ 1536 mutex_init(&dp->intrLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret)); 1537 mutex_init(&dp->txLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret)); 1538 mutex_init(&dp->rxPoolLock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(uret)); 1539 1540 if (ddi_intr_add_handler(dp->intrHandle, vmxnet3_intr, 1541 dp, NULL) != DDI_SUCCESS) { 1542 VMXNET3_WARN(dp, "ddi_intr_add_handler() failed\n"); 1543 goto error_mutexes; 1544 } 1545 1546 err = ddi_intr_get_cap(dp->intrHandle, &dp->intrCap); 1547 if (err != DDI_SUCCESS) { 1548 VMXNET3_WARN(dp, "ddi_intr_get_cap() failed %d", err); 1549 goto error_intr_handler; 1550 } 1551 1552 if (dp->intrCap & DDI_INTR_FLAG_BLOCK) { 1553 err = ddi_intr_block_enable(&dp->intrHandle, 1); 1554 if (err != DDI_SUCCESS) { 1555 VMXNET3_WARN(dp, "ddi_intr_block_enable() failed, " 1556 "err:%d\n", err); 1557 goto error_intr_handler; 1558 } 1559 } else { 1560 err = ddi_intr_enable(dp->intrHandle); 1561 if ((err != DDI_SUCCESS)) { 1562 VMXNET3_WARN(dp, "ddi_intr_enable() failed, err:%d\n", 1563 err); 1564 goto error_intr_handler; 1565 } 1566 } 1567 1568 return (DDI_SUCCESS); 1569 1570 error_intr_handler: 1571 (void) ddi_intr_remove_handler(dp->intrHandle); 1572 error_mutexes: 1573 mutex_destroy(&dp->rxPoolLock); 1574 mutex_destroy(&dp->txLock); 1575 mutex_destroy(&dp->intrLock); 1576 ddi_taskq_destroy(dp->resetTask); 1577 error_intr: 1578 (void) ddi_intr_free(dp->intrHandle); 1579 error_mac: 1580 (void) mac_unregister(dp->mac); 1581 error_kstat: 1582 kstat_delete(dp->devKstats); 1583 error_regs_map_1: 1584 ddi_regs_map_free(&dp->bar1Handle); 1585 error_regs_map_0: 1586 ddi_regs_map_free(&dp->bar0Handle); 1587 error_pci_config: 1588 pci_config_teardown(&dp->pciHandle); 1589 error_soft_state: 1590 kmem_free(dp, sizeof (vmxnet3_softc_t)); 1591 error: 1592 return (DDI_FAILURE); 1593 } 1594 1595 /* 1596 * vmxnet3_detach -- 1597 * 1598 * Detach a vmxnet3 instance from the stack. 1599 * 1600 * Results: 1601 * DDI_SUCCESS or DDI_FAILURE. 1602 * 1603 * Side effects: 1604 * None. 1605 */ 1606 static int 1607 vmxnet3_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1608 { 1609 vmxnet3_softc_t *dp = ddi_get_driver_private(dip); 1610 unsigned int retries = 0; 1611 int ret; 1612 1613 VMXNET3_DEBUG(dp, 1, "detach()\n"); 1614 1615 if (cmd != DDI_DETACH) { 1616 return (DDI_FAILURE); 1617 } 1618 1619 while (dp->rxNumBufs) { 1620 if (retries++ < 10) { 1621 VMXNET3_WARN(dp, "rx pending (%u), waiting 1 second\n", 1622 dp->rxNumBufs); 1623 delay(drv_usectohz(1000000)); 1624 } else { 1625 VMXNET3_WARN(dp, "giving up\n"); 1626 return (DDI_FAILURE); 1627 } 1628 } 1629 1630 if (dp->intrCap & DDI_INTR_FLAG_BLOCK) { 1631 ret = ddi_intr_block_disable(&dp->intrHandle, 1); 1632 } else { 1633 ret = ddi_intr_disable(dp->intrHandle); 1634 } 1635 if (ret != DDI_SUCCESS) { 1636 VMXNET3_WARN(dp, "unable to disable interrupts"); 1637 return (DDI_FAILURE); 1638 } 1639 if (ddi_intr_remove_handler(dp->intrHandle) != DDI_SUCCESS) { 1640 VMXNET3_WARN(dp, "unable to remove interrupt handler"); 1641 return (DDI_FAILURE); 1642 } 1643 (void) ddi_intr_free(dp->intrHandle); 1644 1645 VERIFY(mac_unregister(dp->mac) == 0); 1646 1647 kstat_delete(dp->devKstats); 1648 1649 if (dp->mfTable.buf) { 1650 vmxnet3_free_dma_mem(&dp->mfTable); 1651 } 1652 1653 mutex_destroy(&dp->rxPoolLock); 1654 mutex_destroy(&dp->txLock); 1655 mutex_destroy(&dp->intrLock); 1656 ddi_taskq_destroy(dp->resetTask); 1657 1658 ddi_regs_map_free(&dp->bar1Handle); 1659 ddi_regs_map_free(&dp->bar0Handle); 1660 pci_config_teardown(&dp->pciHandle); 1661 1662 kmem_free(dp, sizeof (vmxnet3_softc_t)); 1663 1664 return (DDI_SUCCESS); 1665 } 1666 1667 /* 1668 * Structures used by the module loader 1669 */ 1670 1671 #define VMXNET3_IDENT "VMware Ethernet v3 (" BUILD_NUMBER_NUMERIC_STRING ")" 1672 1673 DDI_DEFINE_STREAM_OPS( 1674 vmxnet3_dev_ops, 1675 nulldev, 1676 nulldev, 1677 vmxnet3_attach, 1678 vmxnet3_detach, 1679 nodev, 1680 NULL, 1681 D_NEW | D_MP, 1682 NULL, 1683 ddi_quiesce_not_supported); 1684 1685 static struct modldrv vmxnet3_modldrv = { 1686 &mod_driverops, /* drv_modops */ 1687 VMXNET3_IDENT, /* drv_linkinfo */ 1688 &vmxnet3_dev_ops /* drv_dev_ops */ 1689 }; 1690 1691 static struct modlinkage vmxnet3_modlinkage = { 1692 MODREV_1, /* ml_rev */ 1693 { &vmxnet3_modldrv, NULL } /* ml_linkage */ 1694 }; 1695 1696 /* Module load entry point */ 1697 int 1698 _init(void) 1699 { 1700 int ret; 1701 1702 mac_init_ops(&vmxnet3_dev_ops, VMXNET3_MODNAME); 1703 ret = mod_install(&vmxnet3_modlinkage); 1704 if (ret != DDI_SUCCESS) { 1705 mac_fini_ops(&vmxnet3_dev_ops); 1706 } 1707 1708 return (ret); 1709 } 1710 1711 /* Module unload entry point */ 1712 int 1713 _fini(void) 1714 { 1715 int ret; 1716 1717 ret = mod_remove(&vmxnet3_modlinkage); 1718 if (ret == DDI_SUCCESS) { 1719 mac_fini_ops(&vmxnet3_dev_ops); 1720 } 1721 1722 return (ret); 1723 } 1724 1725 /* Module info entry point */ 1726 int 1727 _info(struct modinfo *modinfop) 1728 { 1729 return (mod_info(&vmxnet3_modlinkage, modinfop)); 1730 } 1731 1732 void 1733 vmxnet3_log(int level, vmxnet3_softc_t *dp, char *fmt, ...) 1734 { 1735 dev_err(dp->dip, level, fmt); 1736 } 1737