1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 14 * Copyright 2021 OmniOS Community Edition (OmniOSce) Association. 15 */ 16 17 /* 18 * Intel Pro/100B Ethernet Driver 19 */ 20 21 #include <sys/types.h> 22 #include <sys/modctl.h> 23 #include <sys/conf.h> 24 #include <sys/kmem.h> 25 #include <sys/ksynch.h> 26 #include <sys/cmn_err.h> 27 #include <sys/note.h> 28 #include <sys/pci.h> 29 #include <sys/pci_cap.h> 30 #include <sys/ethernet.h> 31 #include <sys/mii.h> 32 #include <sys/miiregs.h> 33 #include <sys/mac.h> 34 #include <sys/mac_ether.h> 35 #include <sys/ethernet.h> 36 #include <sys/vlan.h> 37 #include <sys/list.h> 38 #include <sys/sysmacros.h> 39 #include <sys/varargs.h> 40 #include <sys/stream.h> 41 #include <sys/strsun.h> 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 45 #include "iprb.h" 46 #include "rcvbundl.h" 47 48 /* 49 * Intel has openly documented the programming interface for these 50 * parts in the "Intel 8255x 10/100 Mbps Ethernet Controller Family 51 * Open Source Software Developer Manual". 52 * 53 * While some open source systems have utilized many of the features 54 * of some models in this family (especially scatter gather and IP 55 * checksum support), we have elected to offer only the basic 56 * functionality. These are only 10/100 parts, and the additional 57 * complexity is not justified by the minimal performance benefit. 58 * KISS. So, we are only supporting the simple 82557 features. 59 */ 60 61 static uint16_t iprb_mii_read(void *, uint8_t, uint8_t); 62 static void iprb_mii_write(void *, uint8_t, uint8_t, uint16_t); 63 static void iprb_mii_notify(void *, link_state_t); 64 static int iprb_attach(dev_info_t *); 65 static int iprb_detach(dev_info_t *); 66 static int iprb_quiesce(dev_info_t *); 67 static int iprb_suspend(dev_info_t *); 68 static int iprb_resume(dev_info_t *); 69 static int iprb_m_stat(void *, uint_t, uint64_t *); 70 static int iprb_m_start(void *); 71 static void iprb_m_stop(void *); 72 static int iprb_m_promisc(void *, boolean_t); 73 static int iprb_m_multicst(void *, boolean_t, const uint8_t *); 74 static int iprb_m_unicst(void *, const uint8_t *); 75 static mblk_t *iprb_m_tx(void *, mblk_t *); 76 static void iprb_m_ioctl(void *, queue_t *, mblk_t *); 77 static int iprb_m_setprop(void *, const char *, mac_prop_id_t, uint_t, 78 const void *); 79 static int iprb_m_getprop(void *, const char *, mac_prop_id_t, uint_t, 80 void *); 81 static void iprb_m_propinfo(void *, const char *, mac_prop_id_t, 82 mac_prop_info_handle_t); 83 static void iprb_destroy(iprb_t *); 84 static int iprb_configure(iprb_t *); 85 static void iprb_eeprom_sendbits(iprb_t *, uint32_t, uint8_t); 86 static uint16_t iprb_eeprom_read(iprb_t *, uint16_t); 87 static void iprb_identify(iprb_t *); 88 static int iprb_cmd_submit(iprb_t *, uint16_t); 89 static void iprb_cmd_reclaim(iprb_t *); 90 static int iprb_cmd_ready(iprb_t *); 91 static int iprb_cmd_drain(iprb_t *); 92 static void iprb_rx_add(iprb_t *); 93 static void iprb_rx_init(iprb_t *); 94 static mblk_t *iprb_rx(iprb_t *); 95 static mblk_t *iprb_send(iprb_t *, mblk_t *); 96 static uint_t iprb_intr(caddr_t, caddr_t); 97 static void iprb_periodic(void *); 98 static int iprb_add_intr(iprb_t *); 99 static int iprb_dma_alloc(iprb_t *, iprb_dma_t *, size_t); 100 static void iprb_dma_free(iprb_dma_t *); 101 static iprb_dma_t *iprb_cmd_next(iprb_t *); 102 static int iprb_set_config(iprb_t *); 103 static int iprb_set_unicast(iprb_t *); 104 static int iprb_set_multicast(iprb_t *); 105 static int iprb_set_ucode(iprb_t *); 106 static void iprb_update_stats(iprb_t *); 107 static int iprb_start(iprb_t *); 108 static void iprb_stop(iprb_t *); 109 static int iprb_ddi_attach(dev_info_t *, ddi_attach_cmd_t); 110 static int iprb_ddi_detach(dev_info_t *, ddi_detach_cmd_t); 111 static void iprb_error(iprb_t *, const char *, ...); 112 113 static mii_ops_t iprb_mii_ops = { 114 MII_OPS_VERSION, 115 iprb_mii_read, 116 iprb_mii_write, 117 iprb_mii_notify, 118 NULL, /* reset */ 119 }; 120 121 static mac_callbacks_t iprb_m_callbacks = { 122 MC_IOCTL | MC_SETPROP | MC_GETPROP | MC_PROPINFO, 123 iprb_m_stat, 124 iprb_m_start, 125 iprb_m_stop, 126 iprb_m_promisc, 127 iprb_m_multicst, 128 iprb_m_unicst, 129 iprb_m_tx, 130 NULL, 131 iprb_m_ioctl, /* mc_ioctl */ 132 NULL, /* mc_getcapab */ 133 NULL, /* mc_open */ 134 NULL, /* mc_close */ 135 iprb_m_setprop, 136 iprb_m_getprop, 137 iprb_m_propinfo 138 }; 139 140 141 /* 142 * Stream information 143 */ 144 DDI_DEFINE_STREAM_OPS(iprb_devops, nulldev, nulldev, 145 iprb_ddi_attach, iprb_ddi_detach, nodev, NULL, D_MP, NULL, iprb_quiesce); 146 147 static struct modldrv iprb_modldrv = { 148 &mod_driverops, /* drv_modops */ 149 "Intel 8255x Ethernet", /* drv_linkinfo */ 150 &iprb_devops /* drv_dev_ops */ 151 }; 152 153 static struct modlinkage iprb_modlinkage = { 154 MODREV_1, /* ml_rev */ 155 { &iprb_modldrv, NULL } /* ml_linkage */ 156 }; 157 158 159 static ddi_device_acc_attr_t acc_attr = { 160 DDI_DEVICE_ATTR_V0, 161 DDI_STRUCTURE_LE_ACC, 162 DDI_STRICTORDER_ACC 163 }; 164 165 static ddi_device_acc_attr_t buf_attr = { 166 DDI_DEVICE_ATTR_V0, 167 DDI_NEVERSWAP_ACC, 168 DDI_STORECACHING_OK_ACC 169 }; 170 171 /* 172 * The 8225x is a 32-bit addressing engine, but it can only address up 173 * to 31 bits on a single transaction. (Far less in reality it turns 174 * out.) Statistics buffers have to be 16-byte aligned, and as we 175 * allocate individual data pieces for other things, there is no 176 * compelling reason to use another attribute with support for less 177 * strict alignment. 178 */ 179 static ddi_dma_attr_t dma_attr = { 180 DMA_ATTR_V0, /* dma_attr_version */ 181 0, /* dma_attr_addr_lo */ 182 0xFFFFFFFFU, /* dma_attr_addr_hi */ 183 0x7FFFFFFFU, /* dma_attr_count_max */ 184 16, /* dma_attr_align */ 185 0x100, /* dma_attr_burstsizes */ 186 1, /* dma_attr_minxfer */ 187 0xFFFFFFFFU, /* dma_attr_maxxfer */ 188 0xFFFFFFFFU, /* dma_attr_seg */ 189 1, /* dma_attr_sgllen */ 190 1, /* dma_attr_granular */ 191 0 /* dma_attr_flags */ 192 }; 193 194 #define DECL_UCODE(x) \ 195 static const uint32_t x ## _WORDS[] = x ## _RCVBUNDLE_UCODE 196 DECL_UCODE(D101_A); 197 DECL_UCODE(D101_B0); 198 DECL_UCODE(D101M_B); 199 DECL_UCODE(D101S); 200 DECL_UCODE(D102_B); 201 DECL_UCODE(D102_C); 202 DECL_UCODE(D102_E); 203 204 static uint8_t iprb_bcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 205 206 /* 207 * We don't bother allowing for tuning of the CPU saver algorithm. 208 * The ucode has reasonable defaults built-in. However, some variants 209 * apparently have bug fixes delivered via this ucode, so we still 210 * need to support the ucode upload. 211 */ 212 typedef struct { 213 uint8_t rev; 214 uint8_t length; 215 const uint32_t *ucode; 216 } iprb_ucode_t; 217 218 #define UCODE(x) \ 219 sizeof (x ## _WORDS) / sizeof (uint32_t), x ## _WORDS 220 221 static const iprb_ucode_t iprb_ucode[] = { 222 { REV_82558_A4, UCODE(D101_A) }, 223 { REV_82558_B0, UCODE(D101_B0) }, 224 { REV_82559_A0, UCODE(D101M_B) }, 225 { REV_82559S_A, UCODE(D101S) }, 226 { REV_82550, UCODE(D102_B) }, 227 { REV_82550_C, UCODE(D102_C) }, 228 { REV_82551_F, UCODE(D102_E) }, 229 { 0 }, 230 }; 231 232 int 233 _init(void) 234 { 235 int rv; 236 mac_init_ops(&iprb_devops, "iprb"); 237 if ((rv = mod_install(&iprb_modlinkage)) != DDI_SUCCESS) { 238 mac_fini_ops(&iprb_devops); 239 } 240 return (rv); 241 } 242 243 int 244 _fini(void) 245 { 246 int rv; 247 if ((rv = mod_remove(&iprb_modlinkage)) == DDI_SUCCESS) { 248 mac_fini_ops(&iprb_devops); 249 } 250 return (rv); 251 } 252 253 int 254 _info(struct modinfo *modinfop) 255 { 256 return (mod_info(&iprb_modlinkage, modinfop)); 257 } 258 259 int 260 iprb_attach(dev_info_t *dip) 261 { 262 iprb_t *ip; 263 uint16_t w; 264 int i; 265 mac_register_t *macp; 266 267 ip = kmem_zalloc(sizeof (*ip), KM_SLEEP); 268 ddi_set_driver_private(dip, ip); 269 ip->dip = dip; 270 271 list_create(&ip->mcast, sizeof (struct iprb_mcast), 272 offsetof(struct iprb_mcast, node)); 273 274 /* we don't support high level interrupts, so we don't need cookies */ 275 mutex_init(&ip->culock, NULL, MUTEX_DRIVER, NULL); 276 mutex_init(&ip->rulock, NULL, MUTEX_DRIVER, NULL); 277 278 if (pci_config_setup(dip, &ip->pcih) != DDI_SUCCESS) { 279 iprb_error(ip, "unable to map configuration space"); 280 iprb_destroy(ip); 281 return (DDI_FAILURE); 282 } 283 284 if (ddi_regs_map_setup(dip, 1, &ip->regs, 0, 0, &acc_attr, 285 &ip->regsh) != DDI_SUCCESS) { 286 iprb_error(ip, "unable to map device registers"); 287 iprb_destroy(ip); 288 return (DDI_FAILURE); 289 } 290 291 /* Reset, but first go into idle state */ 292 PUT32(ip, CSR_PORT, PORT_SEL_RESET); 293 drv_usecwait(10); 294 PUT32(ip, CSR_PORT, PORT_SW_RESET); 295 drv_usecwait(10); 296 PUT8(ip, CSR_INTCTL, INTCTL_MASK); 297 (void) GET8(ip, CSR_INTCTL); 298 299 /* 300 * Precalculate watchdog times. 301 */ 302 ip->tx_timeout = TX_WATCHDOG; 303 ip->rx_timeout = RX_WATCHDOG; 304 305 iprb_identify(ip); 306 307 /* Obtain our factory MAC address */ 308 w = iprb_eeprom_read(ip, 0); 309 ip->factaddr[0] = w & 0xff; 310 ip->factaddr[1] = w >> 8; 311 w = iprb_eeprom_read(ip, 1); 312 ip->factaddr[2] = w & 0xff; 313 ip->factaddr[3] = w >> 8; 314 w = iprb_eeprom_read(ip, 2); 315 ip->factaddr[4] = w & 0xff; 316 ip->factaddr[5] = w >> 8; 317 bcopy(ip->factaddr, ip->curraddr, 6); 318 319 if (ip->resumebug) { 320 /* 321 * Generally, most devices we will ever see will 322 * already have fixed firmware. Since I can't verify 323 * the validity of the fix (no suitably downrev 324 * hardware), we'll just do our best to avoid it for 325 * devices that exhibit this behavior. 326 */ 327 if ((iprb_eeprom_read(ip, 10) & 0x02) == 0) { 328 /* EEPROM fix was already applied, assume safe. */ 329 ip->resumebug = B_FALSE; 330 } 331 } 332 333 if ((iprb_eeprom_read(ip, 3) & 0x3) != 0x3) { 334 cmn_err(CE_CONT, "?Enabling RX errata workaround.\n"); 335 ip->rxhangbug = B_TRUE; 336 } 337 338 /* Determine whether we have an MII or a legacy 80c24 */ 339 w = iprb_eeprom_read(ip, 6); 340 if ((w & 0x3f00) != 0x0600) { 341 if ((ip->miih = mii_alloc(ip, dip, &iprb_mii_ops)) == NULL) { 342 iprb_error(ip, "unable to allocate MII ops vector"); 343 iprb_destroy(ip); 344 return (DDI_FAILURE); 345 } 346 if (ip->canpause) { 347 mii_set_pauseable(ip->miih, B_TRUE, B_FALSE); 348 } 349 } 350 351 /* Allocate cmds and tx region */ 352 for (i = 0; i < NUM_TX; i++) { 353 /* Command blocks */ 354 if (iprb_dma_alloc(ip, &ip->cmds[i], CB_SIZE) != DDI_SUCCESS) { 355 iprb_destroy(ip); 356 return (DDI_FAILURE); 357 } 358 } 359 360 for (i = 0; i < NUM_TX; i++) { 361 iprb_dma_t *cb = &ip->cmds[i]; 362 /* Link the command blocks into a ring */ 363 PUTCB32(cb, CB_LNK_OFFSET, (ip->cmds[(i + 1) % NUM_TX].paddr)); 364 } 365 366 for (i = 0; i < NUM_RX; i++) { 367 /* Rx packet buffers */ 368 if (iprb_dma_alloc(ip, &ip->rxb[i], RFD_SIZE) != DDI_SUCCESS) { 369 iprb_destroy(ip); 370 return (DDI_FAILURE); 371 } 372 } 373 if (iprb_dma_alloc(ip, &ip->stats, STATS_SIZE) != DDI_SUCCESS) { 374 iprb_destroy(ip); 375 return (DDI_FAILURE); 376 } 377 378 if (iprb_add_intr(ip) != DDI_SUCCESS) { 379 iprb_destroy(ip); 380 return (DDI_FAILURE); 381 } 382 383 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 384 iprb_error(ip, "unable to allocate mac structure"); 385 iprb_destroy(ip); 386 return (DDI_FAILURE); 387 } 388 389 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 390 macp->m_driver = ip; 391 macp->m_dip = dip; 392 macp->m_src_addr = ip->curraddr; 393 macp->m_callbacks = &iprb_m_callbacks; 394 macp->m_min_sdu = 0; 395 macp->m_max_sdu = ETHERMTU; 396 macp->m_margin = VLAN_TAGSZ; 397 if (mac_register(macp, &ip->mach) != 0) { 398 iprb_error(ip, "unable to register mac with framework"); 399 mac_free(macp); 400 iprb_destroy(ip); 401 return (DDI_FAILURE); 402 } 403 404 mac_free(macp); 405 return (DDI_SUCCESS); 406 } 407 408 int 409 iprb_detach(dev_info_t *dip) 410 { 411 iprb_t *ip; 412 413 ip = ddi_get_driver_private(dip); 414 ASSERT(ip != NULL); 415 416 if (mac_disable(ip->mach) != 0) 417 return (DDI_FAILURE); 418 419 (void) mac_unregister(ip->mach); 420 iprb_destroy(ip); 421 return (DDI_SUCCESS); 422 } 423 424 int 425 iprb_add_intr(iprb_t *ip) 426 { 427 int actual; 428 429 if (ddi_intr_alloc(ip->dip, &ip->intrh, DDI_INTR_TYPE_FIXED, 0, 1, 430 &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS) { 431 iprb_error(ip, "failed allocating interrupt handle"); 432 return (DDI_FAILURE); 433 } 434 435 if (ddi_intr_add_handler(ip->intrh, iprb_intr, ip, NULL) != 436 DDI_SUCCESS) { 437 (void) ddi_intr_free(ip->intrh); 438 ip->intrh = NULL; 439 iprb_error(ip, "failed adding interrupt handler"); 440 return (DDI_FAILURE); 441 } 442 if (ddi_intr_enable(ip->intrh) != DDI_SUCCESS) { 443 (void) ddi_intr_remove_handler(ip->intrh); 444 (void) ddi_intr_free(ip->intrh); 445 ip->intrh = NULL; 446 iprb_error(ip, "failed enabling interrupt"); 447 return (DDI_FAILURE); 448 } 449 return (DDI_SUCCESS); 450 } 451 452 int 453 iprb_dma_alloc(iprb_t *ip, iprb_dma_t *h, size_t size) 454 { 455 size_t rlen; 456 ddi_dma_cookie_t dmac; 457 uint_t ndmac; 458 459 if (ddi_dma_alloc_handle(ip->dip, &dma_attr, DDI_DMA_SLEEP, NULL, 460 &h->dmah) != DDI_SUCCESS) { 461 iprb_error(ip, "unable to allocate dma handle"); 462 return (DDI_FAILURE); 463 } 464 if (ddi_dma_mem_alloc(h->dmah, size, &buf_attr, DDI_DMA_CONSISTENT, 465 DDI_DMA_SLEEP, NULL, &h->vaddr, &rlen, &h->acch) != DDI_SUCCESS) { 466 iprb_error(ip, "unable to allocate dma memory"); 467 return (DDI_FAILURE); 468 } 469 bzero(h->vaddr, size); 470 if (ddi_dma_addr_bind_handle(h->dmah, NULL, h->vaddr, size, 471 DDI_DMA_CONSISTENT | DDI_DMA_RDWR, DDI_DMA_SLEEP, NULL, 472 &dmac, &ndmac) != DDI_DMA_MAPPED) { 473 iprb_error(ip, "unable to map command memory"); 474 return (DDI_FAILURE); 475 } 476 h->paddr = dmac.dmac_address; 477 return (DDI_SUCCESS); 478 } 479 480 void 481 iprb_dma_free(iprb_dma_t *h) 482 { 483 if (h->paddr != 0) 484 (void) ddi_dma_unbind_handle(h->dmah); 485 h->paddr = 0; 486 if (h->acch != NULL) 487 ddi_dma_mem_free(&h->acch); 488 h->acch = NULL; 489 if (h->dmah != NULL) 490 ddi_dma_free_handle(&h->dmah); 491 h->dmah = NULL; 492 } 493 494 void 495 iprb_destroy(iprb_t *ip) 496 { 497 int i; 498 iprb_mcast_t *mc; 499 500 /* shut down interrupts */ 501 if (ip->intrh != NULL) { 502 (void) ddi_intr_disable(ip->intrh); 503 (void) ddi_intr_remove_handler(ip->intrh); 504 (void) ddi_intr_free(ip->intrh); 505 } 506 /* release DMA resources */ 507 for (i = 0; i < NUM_TX; i++) { 508 iprb_dma_free(&ip->cmds[i]); 509 } 510 for (i = 0; i < NUM_RX; i++) { 511 iprb_dma_free(&ip->rxb[i]); 512 } 513 iprb_dma_free(&ip->stats); 514 515 if (ip->miih) 516 mii_free(ip->miih); 517 518 /* clean up the multicast list */ 519 while ((mc = list_head(&ip->mcast)) != NULL) { 520 list_remove(&ip->mcast, mc); 521 kmem_free(mc, sizeof (*mc)); 522 } 523 524 /* tear down register mappings */ 525 if (ip->pcih) 526 pci_config_teardown(&ip->pcih); 527 if (ip->regsh) 528 ddi_regs_map_free(&ip->regsh); 529 530 /* clean the dip */ 531 ddi_set_driver_private(ip->dip, NULL); 532 533 list_destroy(&ip->mcast); 534 mutex_destroy(&ip->culock); 535 mutex_destroy(&ip->rulock); 536 537 /* and finally toss the structure itself */ 538 kmem_free(ip, sizeof (*ip)); 539 } 540 541 void 542 iprb_identify(iprb_t *ip) 543 { 544 ip->devid = pci_config_get16(ip->pcih, PCI_CONF_DEVID); 545 ip->revid = pci_config_get8(ip->pcih, PCI_CONF_REVID); 546 547 switch (ip->devid) { 548 case 0x1229: /* 8255x family */ 549 case 0x1030: /* Intel InBusiness */ 550 551 if (ip->revid >= REV_82558_A4) { 552 ip->canpause = B_TRUE; 553 ip->canmwi = B_TRUE; 554 } else { 555 ip->is557 = B_TRUE; 556 } 557 if (ip->revid >= REV_82559_A0) 558 ip->resumebug = B_TRUE; 559 break; 560 561 case 0x1209: /* Embedded 82559ER */ 562 ip->canpause = B_TRUE; 563 ip->resumebug = B_TRUE; 564 ip->canmwi = B_TRUE; 565 break; 566 567 case 0x2449: /* ICH2 */ 568 case 0x1031: /* Pro/100 VE (ICH3) */ 569 case 0x1032: /* Pro/100 VE (ICH3) */ 570 case 0x1033: /* Pro/100 VM (ICH3) */ 571 case 0x1034: /* Pro/100 VM (ICH3) */ 572 case 0x1038: /* Pro/100 VM (ICH3) */ 573 ip->resumebug = B_TRUE; 574 if (ip->revid >= REV_82558_A4) 575 ip->canpause = B_TRUE; 576 break; 577 578 default: 579 if (ip->revid >= REV_82558_A4) 580 ip->canpause = B_TRUE; 581 break; 582 } 583 584 /* Allow property override MWI support - not normally needed. */ 585 if (ddi_prop_get_int(DDI_DEV_T_ANY, ip->dip, 0, "MWIEnable", 1) == 0) { 586 ip->canmwi = B_FALSE; 587 } 588 } 589 590 void 591 iprb_eeprom_sendbits(iprb_t *ip, uint32_t val, uint8_t nbits) 592 { 593 uint32_t mask; 594 uint16_t x; 595 596 mask = 1U << (nbits - 1); 597 while (mask) { 598 x = (mask & val) ? EEPROM_EEDI : 0; 599 PUT16(ip, CSR_EECTL, x | EEPROM_EECS); 600 drv_usecwait(100); 601 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS); 602 drv_usecwait(100); 603 PUT16(ip, CSR_EECTL, x | EEPROM_EECS); 604 drv_usecwait(100); 605 mask >>= 1; 606 } 607 } 608 609 uint16_t 610 iprb_eeprom_read(iprb_t *ip, uint16_t address) 611 { 612 uint16_t val; 613 int mask; 614 uint16_t n; 615 uint16_t bits; 616 617 /* if we don't know the address size yet call again to determine it */ 618 if ((address != 0) && (ip->eeprom_bits == 0)) 619 (void) iprb_eeprom_read(ip, 0); 620 621 if ((bits = ip->eeprom_bits) == 0) { 622 bits = 8; 623 ASSERT(address == 0); 624 } 625 /* enable the EEPROM chip select */ 626 PUT16(ip, CSR_EECTL, EEPROM_EECS); 627 drv_usecwait(100); 628 629 /* send a read command */ 630 iprb_eeprom_sendbits(ip, 6, 3); 631 n = 0; 632 for (mask = (1U << (bits - 1)); mask != 0; mask >>= 1) { 633 uint16_t x = (mask & address) ? EEPROM_EEDI : 0; 634 PUT16(ip, CSR_EECTL, x | EEPROM_EECS); 635 drv_usecwait(100); 636 PUT16(ip, CSR_EECTL, x | EEPROM_EESK | EEPROM_EECS); 637 drv_usecwait(100); 638 PUT16(ip, CSR_EECTL, x | EEPROM_EECS); 639 drv_usecwait(100); 640 641 n++; 642 /* check the dummy 0 bit */ 643 if ((GET16(ip, CSR_EECTL) & EEPROM_EEDO) == 0) { 644 if (ip->eeprom_bits == 0) { 645 ip->eeprom_bits = n; 646 cmn_err(CE_CONT, "?EEPROM size %d words.\n", 647 1U << ip->eeprom_bits); 648 } 649 break; 650 } 651 } 652 if (n != ip->eeprom_bits) { 653 iprb_error(ip, "cannot determine EEPROM size (%d, %d)", 654 ip->eeprom_bits, n); 655 } 656 657 /* shift out a 16-bit word */ 658 val = 0; 659 for (mask = 0x8000; mask; mask >>= 1) { 660 PUT16(ip, CSR_EECTL, EEPROM_EECS | EEPROM_EESK); 661 drv_usecwait(100); 662 if (GET16(ip, CSR_EECTL) & EEPROM_EEDO) 663 val |= mask; 664 drv_usecwait(100); 665 PUT16(ip, CSR_EECTL, EEPROM_EECS); 666 drv_usecwait(100); 667 } 668 669 /* and disable the eeprom */ 670 PUT16(ip, CSR_EECTL, 0); 671 drv_usecwait(100); 672 673 return (val); 674 } 675 676 int 677 iprb_cmd_ready(iprb_t *ip) 678 { 679 /* wait for pending SCB commands to be accepted */ 680 for (int cnt = 1000000; cnt != 0; cnt -= 10) { 681 if (GET8(ip, CSR_CMD) == 0) { 682 return (DDI_SUCCESS); 683 } 684 drv_usecwait(10); 685 } 686 iprb_error(ip, "timeout waiting for chip to become ready"); 687 return (DDI_FAILURE); 688 } 689 690 void 691 iprb_cmd_reclaim(iprb_t *ip) 692 { 693 while (ip->cmd_count) { 694 iprb_dma_t *cb = &ip->cmds[ip->cmd_tail]; 695 696 SYNCCB(cb, CB_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL); 697 if ((GETCB16(cb, CB_STS_OFFSET) & CB_STS_C) == 0) { 698 break; 699 } 700 701 ip->cmd_tail++; 702 ip->cmd_tail %= NUM_TX; 703 ip->cmd_count--; 704 if (ip->cmd_count == 0) { 705 ip->tx_wdog = 0; 706 } else { 707 ip->tx_wdog = gethrtime(); 708 } 709 } 710 } 711 712 int 713 iprb_cmd_drain(iprb_t *ip) 714 { 715 for (int i = 1000000; i; i -= 10) { 716 iprb_cmd_reclaim(ip); 717 if (ip->cmd_count == 0) 718 return (DDI_SUCCESS); 719 drv_usecwait(10); 720 } 721 iprb_error(ip, "time out waiting for commands to drain"); 722 return (DDI_FAILURE); 723 } 724 725 int 726 iprb_cmd_submit(iprb_t *ip, uint16_t cmd) 727 { 728 iprb_dma_t *ncb = &ip->cmds[ip->cmd_head]; 729 iprb_dma_t *lcb = &ip->cmds[ip->cmd_last]; 730 731 /* If this command will consume the last CB, interrupt when done */ 732 ASSERT((ip->cmd_count) < NUM_TX); 733 if (ip->cmd_count == (NUM_TX - 1)) { 734 cmd |= CB_CMD_I; 735 } 736 737 /* clear the status entry */ 738 PUTCB16(ncb, CB_STS_OFFSET, 0); 739 740 /* suspend upon completion of this new command */ 741 cmd |= CB_CMD_S; 742 PUTCB16(ncb, CB_CMD_OFFSET, cmd); 743 SYNCCB(ncb, 0, 0, DDI_DMA_SYNC_FORDEV); 744 745 /* clear the suspend flag from the last submitted command */ 746 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL); 747 PUTCB16(lcb, CB_CMD_OFFSET, GETCB16(lcb, CB_CMD_OFFSET) & ~CB_CMD_S); 748 SYNCCB(lcb, CB_CMD_OFFSET, 2, DDI_DMA_SYNC_FORDEV); 749 750 751 /* 752 * If the chip has a resume bug, then we need to try this as a work 753 * around. Some anecdotal evidence is that this will help solve 754 * the resume bug. Its a performance hit, but only if the EEPROM 755 * is not updated. (In theory we could do this only for 10Mbps HDX, 756 * but since it should just about never get used, we keep it simple.) 757 */ 758 if (ip->resumebug) { 759 if (iprb_cmd_ready(ip) != DDI_SUCCESS) 760 return (DDI_FAILURE); 761 PUT8(ip, CSR_CMD, CUC_NOP); 762 (void) GET8(ip, CSR_CMD); 763 drv_usecwait(1); 764 } 765 766 /* wait for the SCB to be ready to accept a new command */ 767 if (iprb_cmd_ready(ip) != DDI_SUCCESS) 768 return (DDI_FAILURE); 769 770 /* 771 * Finally we can resume the CU. Note that if this the first 772 * command in the sequence (i.e. if the CU is IDLE), or if the 773 * CU is already busy working, then this CU resume command 774 * will not have any effect. 775 */ 776 PUT8(ip, CSR_CMD, CUC_RESUME); 777 (void) GET8(ip, CSR_CMD); /* flush CSR */ 778 779 ip->tx_wdog = gethrtime(); 780 ip->cmd_last = ip->cmd_head; 781 ip->cmd_head++; 782 ip->cmd_head %= NUM_TX; 783 ip->cmd_count++; 784 785 return (DDI_SUCCESS); 786 } 787 788 iprb_dma_t * 789 iprb_cmd_next(iprb_t *ip) 790 { 791 if (ip->cmd_count >= NUM_TX) { 792 return (NULL); 793 } 794 return (&ip->cmds[ip->cmd_head]); 795 } 796 797 int 798 iprb_set_unicast(iprb_t *ip) 799 { 800 iprb_dma_t *cb; 801 802 ASSERT(mutex_owned(&ip->culock)); 803 804 if ((cb = iprb_cmd_next(ip)) == NULL) 805 return (DDI_FAILURE); 806 807 PUTCBEA(cb, CB_IAS_ADR_OFFSET, ip->curraddr); 808 return (iprb_cmd_submit(ip, CB_CMD_IAS)); 809 } 810 811 int 812 iprb_set_multicast(iprb_t *ip) 813 { 814 iprb_dma_t *cb; 815 iprb_mcast_t *mc; 816 int i; 817 list_t *l; 818 819 ASSERT(mutex_owned(&ip->culock)); 820 821 if ((ip->nmcast <= 0) || (ip->nmcast > CB_MCS_CNT_MAX)) { 822 /* 823 * Only send the list if the total number of multicast 824 * address is nonzero and small enough to fit. We 825 * don't error out if it is too big, because in that 826 * case we will use the "allmulticast" support 827 * via iprb_set_config instead. 828 */ 829 return (DDI_SUCCESS); 830 } 831 832 if ((cb = iprb_cmd_next(ip)) == NULL) { 833 return (DDI_FAILURE); 834 } 835 836 l = &ip->mcast; 837 for (mc = list_head(l), i = 0; mc; mc = list_next(l, mc), i++) { 838 PUTCBEA(cb, CB_MCS_ADR_OFFSET + (i * 6), mc->addr); 839 } 840 ASSERT(i == ip->nmcast); 841 PUTCB16(cb, CB_MCS_CNT_OFFSET, i); 842 return (iprb_cmd_submit(ip, CB_CMD_MCS)); 843 } 844 845 int 846 iprb_set_config(iprb_t *ip) 847 { 848 iprb_dma_t *cb; 849 850 ASSERT(mutex_owned(&ip->culock)); 851 if ((cb = iprb_cmd_next(ip)) == NULL) { 852 return (DDI_FAILURE); 853 } 854 PUTCB8(cb, CB_CONFIG_OFFSET + 0, 0x16); 855 PUTCB8(cb, CB_CONFIG_OFFSET + 1, 0x8); 856 PUTCB8(cb, CB_CONFIG_OFFSET + 2, 0); 857 PUTCB8(cb, CB_CONFIG_OFFSET + 3, (ip->canmwi ? 1 : 0)); 858 PUTCB8(cb, CB_CONFIG_OFFSET + 4, 0); 859 PUTCB8(cb, CB_CONFIG_OFFSET + 5, 0); 860 PUTCB8(cb, CB_CONFIG_OFFSET + 6, (ip->promisc ? 0x80 : 0) | 0x3a); 861 PUTCB8(cb, CB_CONFIG_OFFSET + 7, (ip->promisc ? 0 : 0x1) | 2); 862 PUTCB8(cb, CB_CONFIG_OFFSET + 8, (ip->miih ? 0x1 : 0)); 863 PUTCB8(cb, CB_CONFIG_OFFSET + 9, 0); 864 PUTCB8(cb, CB_CONFIG_OFFSET + 10, 0x2e); 865 PUTCB8(cb, CB_CONFIG_OFFSET + 11, 0); 866 PUTCB8(cb, CB_CONFIG_OFFSET + 12, (ip->is557 ? 0 : 1) | 0x60); 867 PUTCB8(cb, CB_CONFIG_OFFSET + 13, 0); 868 PUTCB8(cb, CB_CONFIG_OFFSET + 14, 0xf2); 869 PUTCB8(cb, CB_CONFIG_OFFSET + 15, 870 (ip->miih ? 0x80 : 0) | (ip->promisc ? 0x1 : 0) | 0x48); 871 PUTCB8(cb, CB_CONFIG_OFFSET + 16, 0); 872 PUTCB8(cb, CB_CONFIG_OFFSET + 17, (ip->canpause ? 0x40 : 0)); 873 PUTCB8(cb, CB_CONFIG_OFFSET + 18, (ip->is557 ? 0 : 0x8) | 0xf2); 874 PUTCB8(cb, CB_CONFIG_OFFSET + 19, 875 ((ip->revid < REV_82558_B0) ? 0 : 0x80) | 876 (ip->canpause ? 0x18 : 0)); 877 PUTCB8(cb, CB_CONFIG_OFFSET + 20, 0x3f); 878 PUTCB8(cb, CB_CONFIG_OFFSET + 21, 879 ((ip->nmcast >= CB_MCS_CNT_MAX) ? 0x8 : 0) | 0x5); 880 881 return (iprb_cmd_submit(ip, CB_CMD_CONFIG)); 882 } 883 884 int 885 iprb_set_ucode(iprb_t *ip) 886 { 887 iprb_dma_t *cb; 888 const iprb_ucode_t *uc = NULL; 889 int i; 890 891 for (i = 0; iprb_ucode[i].length; i++) { 892 if (iprb_ucode[i].rev == ip->revid) { 893 uc = &iprb_ucode[i]; 894 break; 895 } 896 } 897 if (uc == NULL) { 898 /* no matching firmware found, assume success */ 899 return (DDI_SUCCESS); 900 } 901 902 ASSERT(mutex_owned(&ip->culock)); 903 if ((cb = iprb_cmd_next(ip)) == NULL) { 904 return (DDI_FAILURE); 905 } 906 for (i = 0; i < uc->length; i++) { 907 PUTCB32(cb, (CB_UCODE_OFFSET + i * 4), uc->ucode[i]); 908 } 909 return (iprb_cmd_submit(ip, CB_CMD_UCODE)); 910 } 911 912 int 913 iprb_configure(iprb_t *ip) 914 { 915 ASSERT(mutex_owned(&ip->culock)); 916 917 if (iprb_cmd_drain(ip) != DDI_SUCCESS) 918 return (DDI_FAILURE); 919 920 if (iprb_set_config(ip) != DDI_SUCCESS) 921 return (DDI_FAILURE); 922 if (iprb_set_unicast(ip) != DDI_SUCCESS) 923 return (DDI_FAILURE); 924 if (iprb_set_multicast(ip) != DDI_SUCCESS) 925 return (DDI_FAILURE); 926 927 return (DDI_SUCCESS); 928 } 929 930 void 931 iprb_stop(iprb_t *ip) 932 { 933 /* go idle */ 934 PUT32(ip, CSR_PORT, PORT_SEL_RESET); 935 (void) GET32(ip, CSR_PORT); 936 drv_usecwait(50); 937 938 /* shut off device interrupts */ 939 PUT8(ip, CSR_INTCTL, INTCTL_MASK); 940 } 941 942 int 943 iprb_start(iprb_t *ip) 944 { 945 iprb_dma_t *cb; 946 947 ASSERT(mutex_owned(&ip->rulock)); 948 ASSERT(mutex_owned(&ip->culock)); 949 950 /* Reset, but first go into idle state */ 951 PUT32(ip, CSR_PORT, PORT_SEL_RESET); 952 (void) GET32(ip, CSR_PORT); 953 drv_usecwait(50); 954 955 PUT32(ip, CSR_PORT, PORT_SW_RESET); 956 (void) GET32(ip, CSR_PORT); 957 drv_usecwait(10); 958 PUT8(ip, CSR_INTCTL, INTCTL_MASK); 959 960 /* Reset pointers */ 961 ip->cmd_head = ip->cmd_tail = 0; 962 ip->cmd_last = NUM_TX - 1; 963 ip->cmd_count = 0; 964 965 if (iprb_cmd_ready(ip) != DDI_SUCCESS) 966 return (DDI_FAILURE); 967 PUT32(ip, CSR_GEN_PTR, 0); 968 PUT8(ip, CSR_CMD, CUC_CUBASE); 969 (void) GET8(ip, CSR_CMD); 970 971 if (iprb_cmd_ready(ip) != DDI_SUCCESS) 972 return (DDI_FAILURE); 973 PUT32(ip, CSR_GEN_PTR, 0); 974 PUT8(ip, CSR_CMD, RUC_RUBASE); 975 (void) GET8(ip, CSR_CMD); 976 977 /* Send a NOP. This will be the first command seen by the device. */ 978 cb = iprb_cmd_next(ip); 979 VERIFY3P(cb, !=, NULL); 980 if (iprb_cmd_submit(ip, CB_CMD_NOP) != DDI_SUCCESS) 981 return (DDI_FAILURE); 982 983 /* as that was the first command, go ahead and submit a CU start */ 984 if (iprb_cmd_ready(ip) != DDI_SUCCESS) 985 return (DDI_FAILURE); 986 PUT32(ip, CSR_GEN_PTR, cb->paddr); 987 PUT8(ip, CSR_CMD, CUC_START); 988 (void) GET8(ip, CSR_CMD); 989 990 /* Upload firmware. */ 991 if (iprb_set_ucode(ip) != DDI_SUCCESS) 992 return (DDI_FAILURE); 993 994 /* Set up RFDs */ 995 iprb_rx_init(ip); 996 997 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr); 998 /* wait for the SCB */ 999 (void) iprb_cmd_ready(ip); 1000 PUT8(ip, CSR_CMD, RUC_START); 1001 (void) GET8(ip, CSR_CMD); /* flush CSR */ 1002 1003 /* Enable device interrupts */ 1004 PUT8(ip, CSR_INTCTL, 0); 1005 (void) GET8(ip, CSR_INTCTL); 1006 1007 return (DDI_SUCCESS); 1008 } 1009 1010 void 1011 iprb_update_stats(iprb_t *ip) 1012 { 1013 iprb_dma_t *sp = &ip->stats; 1014 hrtime_t tstamp; 1015 int i; 1016 1017 ASSERT(mutex_owned(&ip->culock)); 1018 1019 /* Collect the hardware stats, but don't keep redoing it */ 1020 tstamp = gethrtime(); 1021 if (tstamp / NANOSEC == ip->stats_time / NANOSEC) 1022 return; 1023 1024 PUTSTAT(sp, STATS_DONE_OFFSET, 0); 1025 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORDEV); 1026 1027 if (iprb_cmd_ready(ip) != DDI_SUCCESS) 1028 return; 1029 PUT32(ip, CSR_GEN_PTR, sp->paddr); 1030 PUT8(ip, CSR_CMD, CUC_STATSBASE); 1031 (void) GET8(ip, CSR_CMD); 1032 1033 if (iprb_cmd_ready(ip) != DDI_SUCCESS) 1034 return; 1035 PUT8(ip, CSR_CMD, CUC_STATS_RST); 1036 (void) GET8(ip, CSR_CMD); /* flush wb */ 1037 1038 for (i = 10000; i; i -= 10) { 1039 SYNCSTATS(sp, 0, 0, DDI_DMA_SYNC_FORKERNEL); 1040 if (GETSTAT(sp, STATS_DONE_OFFSET) == STATS_RST_DONE) { 1041 /* yay stats are updated */ 1042 break; 1043 } 1044 drv_usecwait(10); 1045 } 1046 if (i == 0) { 1047 iprb_error(ip, "time out acquiring hardware statistics"); 1048 return; 1049 } 1050 1051 ip->ex_coll += GETSTAT(sp, STATS_TX_MAXCOL_OFFSET); 1052 ip->late_coll += GETSTAT(sp, STATS_TX_LATECOL_OFFSET); 1053 ip->uflo += GETSTAT(sp, STATS_TX_UFLO_OFFSET); 1054 ip->defer_xmt += GETSTAT(sp, STATS_TX_DEFER_OFFSET); 1055 ip->one_coll += GETSTAT(sp, STATS_TX_ONECOL_OFFSET); 1056 ip->multi_coll += GETSTAT(sp, STATS_TX_MULTCOL_OFFSET); 1057 ip->collisions += GETSTAT(sp, STATS_TX_TOTCOL_OFFSET); 1058 ip->fcs_errs += GETSTAT(sp, STATS_RX_FCS_OFFSET); 1059 ip->align_errs += GETSTAT(sp, STATS_RX_ALIGN_OFFSET); 1060 ip->norcvbuf += GETSTAT(sp, STATS_RX_NOBUF_OFFSET); 1061 ip->oflo += GETSTAT(sp, STATS_RX_OFLO_OFFSET); 1062 ip->runt += GETSTAT(sp, STATS_RX_SHORT_OFFSET); 1063 1064 ip->stats_time = tstamp; 1065 } 1066 1067 mblk_t * 1068 iprb_send(iprb_t *ip, mblk_t *mp) 1069 { 1070 iprb_dma_t *cb; 1071 size_t sz; 1072 1073 ASSERT(mutex_owned(&ip->culock)); 1074 1075 /* possibly reclaim some CBs */ 1076 iprb_cmd_reclaim(ip); 1077 1078 cb = iprb_cmd_next(ip); 1079 1080 if (cb == NULL) { 1081 /* flow control */ 1082 ip->wantw = B_TRUE; 1083 return (mp); 1084 } 1085 1086 if ((sz = msgsize(mp)) > (ETHERMAX + VLAN_TAGSZ)) { 1087 /* Generally this should never occur */ 1088 ip->macxmt_errs++; 1089 freemsg(mp); 1090 return (NULL); 1091 } 1092 1093 ip->opackets++; 1094 ip->obytes += sz; 1095 1096 PUTCB32(cb, CB_TX_TBD_OFFSET, 0xffffffffU); 1097 PUTCB16(cb, CB_TX_COUNT_OFFSET, (sz & 0x3fff) | CB_TX_EOF); 1098 PUTCB8(cb, CB_TX_THRESH_OFFSET, (sz / 8) & 0xff); 1099 PUTCB8(cb, CB_TX_NUMBER_OFFSET, 0); 1100 mcopymsg(mp, cb->vaddr + CB_TX_DATA_OFFSET); 1101 if (cb->vaddr[CB_TX_DATA_OFFSET] & 0x1) { 1102 if (bcmp(cb->vaddr + CB_TX_DATA_OFFSET, &iprb_bcast, 6) != 0) { 1103 ip->multixmt++; 1104 } else { 1105 ip->brdcstxmt++; 1106 } 1107 } 1108 SYNCCB(cb, 0, CB_TX_DATA_OFFSET + sz, DDI_DMA_SYNC_FORDEV); 1109 1110 if (iprb_cmd_submit(ip, CB_CMD_TX) != DDI_SUCCESS) { 1111 ip->macxmt_errs++; 1112 } 1113 1114 return (NULL); 1115 } 1116 1117 void 1118 iprb_rx_add(iprb_t *ip) 1119 { 1120 uint16_t last, curr, next; 1121 iprb_dma_t *rfd, *nfd, *lfd; 1122 1123 ASSERT(mutex_owned(&ip->rulock)); 1124 1125 curr = ip->rx_index; 1126 last = ip->rx_last; 1127 next = (curr + 1) % NUM_RX; 1128 1129 ip->rx_last = curr; 1130 ip->rx_index = next; 1131 1132 lfd = &ip->rxb[last]; 1133 rfd = &ip->rxb[curr]; 1134 nfd = &ip->rxb[next]; 1135 1136 PUTRFD32(rfd, RFD_LNK_OFFSET, nfd->paddr); 1137 PUTRFD16(rfd, RFD_CTL_OFFSET, RFD_CTL_EL); 1138 PUTRFD16(rfd, RFD_SIZ_OFFSET, RFD_SIZE - RFD_PKT_OFFSET); 1139 PUTRFD16(rfd, RFD_CNT_OFFSET, 0); 1140 SYNCRFD(rfd, 0, RFD_PKT_OFFSET, DDI_DMA_SYNC_FORDEV); 1141 /* clear the suspend & EL bits from the previous RFD */ 1142 PUTRFD16(lfd, RFD_CTL_OFFSET, 0); 1143 SYNCRFD(rfd, RFD_CTL_OFFSET, 2, DDI_DMA_SYNC_FORDEV); 1144 } 1145 1146 void 1147 iprb_rx_init(iprb_t *ip) 1148 { 1149 ip->rx_index = 0; 1150 ip->rx_last = NUM_RX - 1; 1151 for (int i = 0; i < NUM_RX; i++) 1152 iprb_rx_add(ip); 1153 ip->rx_index = 0; 1154 ip->rx_last = NUM_RX - 1; 1155 } 1156 1157 mblk_t * 1158 iprb_rx(iprb_t *ip) 1159 { 1160 iprb_dma_t *rfd; 1161 uint16_t cnt; 1162 uint16_t sts; 1163 int i; 1164 mblk_t *mplist; 1165 mblk_t **mpp; 1166 mblk_t *mp; 1167 1168 mplist = NULL; 1169 mpp = &mplist; 1170 1171 for (i = 0; i < NUM_RX; i++) { 1172 rfd = &ip->rxb[ip->rx_index]; 1173 SYNCRFD(rfd, RFD_STS_OFFSET, 2, DDI_DMA_SYNC_FORKERNEL); 1174 if ((GETRFD16(rfd, RFD_STS_OFFSET) & RFD_STS_C) == 0) { 1175 break; 1176 } 1177 1178 ip->rx_wdog = gethrtime(); 1179 1180 SYNCRFD(rfd, 0, 0, DDI_DMA_SYNC_FORKERNEL); 1181 cnt = GETRFD16(rfd, RFD_CNT_OFFSET); 1182 cnt &= ~(RFD_CNT_EOF | RFD_CNT_F); 1183 sts = GETRFD16(rfd, RFD_STS_OFFSET); 1184 1185 if (cnt > (ETHERMAX + VLAN_TAGSZ)) { 1186 ip->toolong++; 1187 iprb_rx_add(ip); 1188 continue; 1189 } 1190 if (((sts & RFD_STS_OK) == 0) && (sts & RFD_STS_ERRS)) { 1191 iprb_rx_add(ip); 1192 continue; 1193 } 1194 if ((mp = allocb(cnt, BPRI_MED)) == NULL) { 1195 ip->norcvbuf++; 1196 iprb_rx_add(ip); 1197 continue; 1198 } 1199 bcopy(rfd->vaddr + RFD_PKT_OFFSET, mp->b_wptr, cnt); 1200 1201 /* return it to the RFD list */ 1202 iprb_rx_add(ip); 1203 1204 mp->b_wptr += cnt; 1205 ip->ipackets++; 1206 ip->rbytes += cnt; 1207 if (mp->b_rptr[0] & 0x1) { 1208 if (bcmp(mp->b_rptr, &iprb_bcast, 6) != 0) { 1209 ip->multircv++; 1210 } else { 1211 ip->brdcstrcv++; 1212 } 1213 } 1214 *mpp = mp; 1215 mpp = &mp->b_next; 1216 } 1217 return (mplist); 1218 } 1219 1220 int 1221 iprb_m_promisc(void *arg, boolean_t on) 1222 { 1223 iprb_t *ip = arg; 1224 1225 mutex_enter(&ip->culock); 1226 ip->promisc = on; 1227 if (ip->running && !ip->suspended) 1228 (void) iprb_configure(ip); 1229 mutex_exit(&ip->culock); 1230 return (0); 1231 } 1232 1233 int 1234 iprb_m_unicst(void *arg, const uint8_t *macaddr) 1235 { 1236 iprb_t *ip = arg; 1237 1238 mutex_enter(&ip->culock); 1239 bcopy(macaddr, ip->curraddr, 6); 1240 if (ip->running && !ip->suspended) 1241 (void) iprb_configure(ip); 1242 mutex_exit(&ip->culock); 1243 return (0); 1244 } 1245 1246 int 1247 iprb_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr) 1248 { 1249 iprb_t *ip = arg; 1250 list_t *l = &ip->mcast; 1251 iprb_mcast_t *mc; 1252 1253 if (add) { 1254 mc = kmem_alloc(sizeof (*mc), KM_NOSLEEP); 1255 if (mc == NULL) { 1256 return (ENOMEM); 1257 } 1258 bcopy(macaddr, mc->addr, 6); 1259 mutex_enter(&ip->culock); 1260 list_insert_head(l, mc); 1261 ip->nmcast++; 1262 if (ip->running && !ip->suspended) 1263 (void) iprb_configure(ip); 1264 mutex_exit(&ip->culock); 1265 } else { 1266 mutex_enter(&ip->culock); 1267 for (mc = list_head(l); mc != NULL; mc = list_next(l, mc)) { 1268 if (bcmp(macaddr, mc->addr, 6) == 0) { 1269 list_remove(&ip->mcast, mc); 1270 ip->nmcast--; 1271 if (ip->running && !ip->suspended) 1272 (void) iprb_configure(ip); 1273 break; 1274 } 1275 } 1276 mutex_exit(&ip->culock); 1277 if (mc) 1278 kmem_free(mc, sizeof (*mc)); 1279 } 1280 return (0); 1281 } 1282 1283 int 1284 iprb_m_start(void *arg) 1285 { 1286 int rv; 1287 iprb_t *ip = arg; 1288 1289 mutex_enter(&ip->rulock); 1290 mutex_enter(&ip->culock); 1291 rv = ip->suspended ? 0 : iprb_start(ip); 1292 if (rv == 0) 1293 ip->running = B_TRUE; 1294 ip->perh = ddi_periodic_add(iprb_periodic, ip, 5000000000, 0); 1295 mutex_exit(&ip->culock); 1296 mutex_exit(&ip->rulock); 1297 if (rv == 0) { 1298 if (ip->miih) 1299 mii_start(ip->miih); 1300 else 1301 /* might be a lie. */ 1302 mac_link_update(ip->mach, LINK_STATE_UP); 1303 } 1304 return (rv ? EIO : 0); 1305 } 1306 1307 void 1308 iprb_m_stop(void *arg) 1309 { 1310 iprb_t *ip = arg; 1311 1312 if (ip->miih) { 1313 mii_stop(ip->miih); 1314 } else { 1315 mac_link_update(ip->mach, LINK_STATE_DOWN); 1316 } 1317 1318 ddi_periodic_delete(ip->perh); 1319 ip->perh = 0; 1320 1321 mutex_enter(&ip->rulock); 1322 mutex_enter(&ip->culock); 1323 1324 if (!ip->suspended) { 1325 iprb_update_stats(ip); 1326 iprb_stop(ip); 1327 } 1328 ip->running = B_FALSE; 1329 mutex_exit(&ip->culock); 1330 mutex_exit(&ip->rulock); 1331 } 1332 1333 int 1334 iprb_m_stat(void *arg, uint_t stat, uint64_t *val) 1335 { 1336 iprb_t *ip = arg; 1337 1338 if (ip->miih && (mii_m_getstat(ip->miih, stat, val) == 0)) { 1339 return (0); 1340 } 1341 1342 mutex_enter(&ip->culock); 1343 if ((!ip->suspended) && (ip->running)) { 1344 iprb_update_stats(ip); 1345 } 1346 mutex_exit(&ip->culock); 1347 1348 switch (stat) { 1349 case MAC_STAT_IFSPEED: 1350 if (ip->miih == NULL) { 1351 *val = 10000000; /* 10 Mbps */ 1352 } 1353 break; 1354 case ETHER_STAT_LINK_DUPLEX: 1355 if (ip->miih == NULL) { 1356 *val = LINK_DUPLEX_UNKNOWN; 1357 } 1358 break; 1359 case MAC_STAT_MULTIRCV: 1360 *val = ip->multircv; 1361 break; 1362 case MAC_STAT_BRDCSTRCV: 1363 *val = ip->brdcstrcv; 1364 break; 1365 case MAC_STAT_MULTIXMT: 1366 *val = ip->multixmt; 1367 break; 1368 case MAC_STAT_BRDCSTXMT: 1369 *val = ip->brdcstxmt; 1370 break; 1371 case MAC_STAT_IPACKETS: 1372 * val = ip->ipackets; 1373 break; 1374 case MAC_STAT_RBYTES: 1375 *val = ip->rbytes; 1376 break; 1377 case MAC_STAT_OPACKETS: 1378 *val = ip->opackets; 1379 break; 1380 case MAC_STAT_OBYTES: 1381 *val = ip->obytes; 1382 break; 1383 case MAC_STAT_NORCVBUF: 1384 *val = ip->norcvbuf; 1385 break; 1386 case MAC_STAT_COLLISIONS: 1387 *val = ip->collisions; 1388 break; 1389 case MAC_STAT_IERRORS: 1390 *val = ip->align_errs + 1391 ip->fcs_errs + 1392 ip->norcvbuf + 1393 ip->runt + 1394 ip->toolong + 1395 ip->macrcv_errs; 1396 break; 1397 case MAC_STAT_OERRORS: 1398 *val = ip->ex_coll + 1399 ip->late_coll + 1400 ip->uflo + 1401 ip->macxmt_errs + 1402 ip->nocarrier; 1403 break; 1404 case ETHER_STAT_ALIGN_ERRORS: 1405 *val = ip->align_errs; 1406 break; 1407 case ETHER_STAT_FCS_ERRORS: 1408 *val = ip->fcs_errs; 1409 break; 1410 case ETHER_STAT_DEFER_XMTS: 1411 *val = ip->defer_xmt; 1412 break; 1413 case ETHER_STAT_FIRST_COLLISIONS: 1414 *val = ip->one_coll + ip->multi_coll + ip->ex_coll; 1415 break; 1416 case ETHER_STAT_MULTI_COLLISIONS: 1417 *val = ip->multi_coll; 1418 break; 1419 case ETHER_STAT_TX_LATE_COLLISIONS: 1420 *val = ip->late_coll; 1421 break; 1422 case ETHER_STAT_EX_COLLISIONS: 1423 *val = ip->ex_coll; 1424 break; 1425 case MAC_STAT_OVERFLOWS: 1426 *val = ip->oflo; 1427 break; 1428 case MAC_STAT_UNDERFLOWS: 1429 *val = ip->uflo; 1430 break; 1431 case ETHER_STAT_TOOSHORT_ERRORS: 1432 *val = ip->runt; 1433 break; 1434 case ETHER_STAT_TOOLONG_ERRORS: 1435 *val = ip->toolong; 1436 break; 1437 case ETHER_STAT_CARRIER_ERRORS: 1438 *val = ip->nocarrier; /* reported only for "suspend" */ 1439 break; 1440 case ETHER_STAT_MACXMT_ERRORS: 1441 *val = ip->macxmt_errs; 1442 break; 1443 case ETHER_STAT_MACRCV_ERRORS: 1444 *val = ip->macrcv_errs; 1445 break; 1446 default: 1447 return (ENOTSUP); 1448 } 1449 return (0); 1450 } 1451 1452 void 1453 iprb_m_propinfo(void *arg, const char *name, mac_prop_id_t id, 1454 mac_prop_info_handle_t pih) 1455 { 1456 iprb_t *ip = arg; 1457 1458 if (ip->miih != NULL) { 1459 mii_m_propinfo(ip->miih, name, id, pih); 1460 return; 1461 } 1462 switch (id) { 1463 case MAC_PROP_DUPLEX: 1464 case MAC_PROP_SPEED: 1465 mac_prop_info_set_perm(pih, MAC_PROP_PERM_READ); 1466 break; 1467 } 1468 } 1469 1470 int 1471 iprb_m_getprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz, 1472 void *val) 1473 { 1474 iprb_t *ip = arg; 1475 uint64_t x; 1476 1477 if (ip->miih != NULL) { 1478 return (mii_m_getprop(ip->miih, name, id, sz, val)); 1479 } 1480 switch (id) { 1481 case MAC_PROP_SPEED: 1482 x = 10000000; 1483 bcopy(&x, val, sizeof (x)); 1484 return (0); 1485 1486 case MAC_PROP_DUPLEX: 1487 x = LINK_DUPLEX_UNKNOWN; 1488 bcopy(&x, val, sizeof (x)); 1489 return (0); 1490 } 1491 1492 return (ENOTSUP); 1493 } 1494 1495 int 1496 iprb_m_setprop(void *arg, const char *name, mac_prop_id_t id, uint_t sz, 1497 const void *val) 1498 { 1499 iprb_t *ip = arg; 1500 1501 if (ip->miih != NULL) { 1502 return (mii_m_setprop(ip->miih, name, id, sz, val)); 1503 } 1504 return (ENOTSUP); 1505 } 1506 1507 mblk_t * 1508 iprb_m_tx(void *arg, mblk_t *mp) 1509 { 1510 iprb_t *ip = arg; 1511 mblk_t *nmp; 1512 1513 mutex_enter(&ip->culock); 1514 1515 while (mp != NULL) { 1516 nmp = mp->b_next; 1517 mp->b_next = NULL; 1518 if (ip->suspended) { 1519 freemsg(mp); 1520 ip->nocarrier++; 1521 mp = nmp; 1522 continue; 1523 } 1524 if ((mp = iprb_send(ip, mp)) != NULL) { 1525 mp->b_next = nmp; 1526 break; 1527 } 1528 mp = nmp; 1529 } 1530 mutex_exit(&ip->culock); 1531 return (mp); 1532 } 1533 1534 void 1535 iprb_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 1536 { 1537 iprb_t *ip = arg; 1538 1539 if ((ip->miih != NULL) && (mii_m_loop_ioctl(ip->miih, wq, mp))) 1540 return; 1541 1542 miocnak(wq, mp, 0, EINVAL); 1543 } 1544 1545 uint16_t 1546 iprb_mii_read(void *arg, uint8_t phy, uint8_t reg) 1547 { 1548 iprb_t *ip = arg; 1549 uint32_t mdi; 1550 1551 /* 1552 * NB: we are guaranteed by the MII layer not to be suspended. 1553 * Furthermore, we have an independent MII register. 1554 */ 1555 1556 mdi = MDI_OP_RD | 1557 ((uint32_t)phy << MDI_PHYAD_SHIFT) | 1558 ((uint32_t)reg << MDI_REGAD_SHIFT); 1559 1560 PUT32(ip, CSR_MDICTL, mdi); 1561 for (int i = 0; i < 100; i++) { 1562 mdi = GET32(ip, CSR_MDICTL); 1563 if (mdi & MDI_R) { 1564 return (mdi & 0xffff); 1565 } 1566 drv_usecwait(1); 1567 } 1568 return (0xffff); 1569 } 1570 1571 void 1572 iprb_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t data) 1573 { 1574 iprb_t *ip = arg; 1575 uint32_t mdi; 1576 1577 mdi = MDI_OP_WR | 1578 ((uint32_t)phy << MDI_PHYAD_SHIFT) | 1579 ((uint32_t)reg << MDI_REGAD_SHIFT) | 1580 (data); 1581 1582 PUT32(ip, CSR_MDICTL, mdi); 1583 for (int i = 0; i < 100; i++) { 1584 if (GET32(ip, CSR_MDICTL) & MDI_R) 1585 break; 1586 } 1587 } 1588 1589 void 1590 iprb_mii_notify(void *arg, link_state_t link) 1591 { 1592 iprb_t *ip = arg; 1593 1594 mac_link_update(ip->mach, link); 1595 } 1596 1597 uint_t 1598 iprb_intr(caddr_t arg1, caddr_t arg2) 1599 { 1600 iprb_t *ip = (void *)arg1; 1601 uint8_t sts; 1602 mblk_t *mp = NULL; 1603 1604 _NOTE(ARGUNUSED(arg2)); 1605 1606 mutex_enter(&ip->rulock); 1607 if (ip->suspended) { 1608 mutex_exit(&ip->rulock); 1609 return (DDI_INTR_UNCLAIMED); 1610 } 1611 sts = GET8(ip, CSR_STS); 1612 if (sts == 0) { 1613 /* No interrupt status! */ 1614 mutex_exit(&ip->rulock); 1615 return (DDI_INTR_UNCLAIMED); 1616 } 1617 /* acknowledge the interrupts */ 1618 PUT8(ip, CSR_STS, sts); 1619 1620 if (sts & (STS_RNR | STS_FR)) { 1621 mp = iprb_rx(ip); 1622 1623 if ((sts & STS_RNR) && 1624 ((GET8(ip, CSR_STATE) & STATE_RUS) == STATE_RUS_NORES)) { 1625 iprb_rx_init(ip); 1626 1627 mutex_enter(&ip->culock); 1628 PUT32(ip, CSR_GEN_PTR, ip->rxb[0].paddr); 1629 /* wait for the SCB */ 1630 (void) iprb_cmd_ready(ip); 1631 PUT8(ip, CSR_CMD, RUC_START); 1632 (void) GET8(ip, CSR_CMD); /* flush CSR */ 1633 mutex_exit(&ip->culock); 1634 } 1635 } 1636 mutex_exit(&ip->rulock); 1637 1638 if (mp) { 1639 mac_rx(ip->mach, NULL, mp); 1640 } 1641 if ((sts & (STS_CNA | STS_CX)) && ip->wantw) { 1642 ip->wantw = B_FALSE; 1643 mac_tx_update(ip->mach); 1644 } 1645 return (DDI_INTR_CLAIMED); 1646 } 1647 1648 void 1649 iprb_periodic(void *arg) 1650 { 1651 iprb_t *ip = arg; 1652 boolean_t reset = B_FALSE; 1653 1654 mutex_enter(&ip->rulock); 1655 if (ip->suspended || !ip->running) { 1656 mutex_exit(&ip->rulock); 1657 return; 1658 } 1659 1660 /* 1661 * If we haven't received a packet in a while, and if the link 1662 * is up, then it might be a hung chip. This problem 1663 * reportedly only occurs at 10 Mbps. 1664 */ 1665 if (ip->rxhangbug && 1666 ((ip->miih == NULL) || (mii_get_speed(ip->miih) == 10000000)) && 1667 ((gethrtime() - ip->rx_wdog) > ip->rx_timeout)) { 1668 cmn_err(CE_CONT, "?Possible RU hang, resetting.\n"); 1669 reset = B_TRUE; 1670 } 1671 1672 /* update the statistics */ 1673 mutex_enter(&ip->culock); 1674 1675 if (ip->tx_wdog && ((gethrtime() - ip->tx_wdog) > ip->tx_timeout)) { 1676 /* transmit/CU hang? */ 1677 cmn_err(CE_CONT, "?CU stalled, resetting.\n"); 1678 reset = B_TRUE; 1679 } 1680 1681 if (reset) { 1682 /* We want to reconfigure */ 1683 iprb_stop(ip); 1684 if (iprb_start(ip) != DDI_SUCCESS) { 1685 iprb_error(ip, "unable to restart chip"); 1686 } 1687 } 1688 1689 iprb_update_stats(ip); 1690 1691 mutex_exit(&ip->culock); 1692 mutex_exit(&ip->rulock); 1693 } 1694 1695 int 1696 iprb_quiesce(dev_info_t *dip) 1697 { 1698 iprb_t *ip = ddi_get_driver_private(dip); 1699 1700 /* Reset, but first go into idle state */ 1701 PUT32(ip, CSR_PORT, PORT_SEL_RESET); 1702 drv_usecwait(50); 1703 PUT32(ip, CSR_PORT, PORT_SW_RESET); 1704 drv_usecwait(10); 1705 PUT8(ip, CSR_INTCTL, INTCTL_MASK); 1706 1707 return (DDI_SUCCESS); 1708 } 1709 1710 int 1711 iprb_suspend(dev_info_t *dip) 1712 { 1713 iprb_t *ip = ddi_get_driver_private(dip); 1714 1715 if (ip->miih) 1716 mii_suspend(ip->miih); 1717 1718 mutex_enter(&ip->rulock); 1719 mutex_enter(&ip->culock); 1720 if (!ip->suspended) { 1721 ip->suspended = B_TRUE; 1722 if (ip->running) { 1723 iprb_update_stats(ip); 1724 iprb_stop(ip); 1725 } 1726 } 1727 mutex_exit(&ip->culock); 1728 mutex_exit(&ip->rulock); 1729 return (DDI_SUCCESS); 1730 } 1731 1732 int 1733 iprb_resume(dev_info_t *dip) 1734 { 1735 iprb_t *ip = ddi_get_driver_private(dip); 1736 1737 mutex_enter(&ip->rulock); 1738 mutex_enter(&ip->culock); 1739 1740 ip->suspended = B_FALSE; 1741 if (ip->running) { 1742 if (iprb_start(ip) != DDI_SUCCESS) { 1743 iprb_error(ip, "unable to restart chip!"); 1744 ip->suspended = B_TRUE; 1745 mutex_exit(&ip->culock); 1746 mutex_exit(&ip->rulock); 1747 return (DDI_FAILURE); 1748 } 1749 } 1750 1751 mutex_exit(&ip->culock); 1752 mutex_exit(&ip->rulock); 1753 if (ip->miih) 1754 mii_resume(ip->miih); 1755 return (DDI_SUCCESS); 1756 } 1757 1758 int 1759 iprb_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1760 { 1761 switch (cmd) { 1762 case DDI_ATTACH: 1763 return (iprb_attach(dip)); 1764 1765 case DDI_RESUME: 1766 return (iprb_resume(dip)); 1767 1768 default: 1769 return (DDI_FAILURE); 1770 } 1771 } 1772 1773 int 1774 iprb_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1775 { 1776 switch (cmd) { 1777 case DDI_DETACH: 1778 return (iprb_detach(dip)); 1779 1780 case DDI_SUSPEND: 1781 return (iprb_suspend(dip)); 1782 1783 default: 1784 return (DDI_FAILURE); 1785 } 1786 } 1787 1788 void 1789 iprb_error(iprb_t *ip, const char *fmt, ...) 1790 { 1791 va_list ap; 1792 char buf[256]; 1793 1794 va_start(ap, fmt); 1795 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 1796 va_end(ap); 1797 1798 cmn_err(CE_WARN, "%s%d: %s", 1799 ddi_driver_name(ip->dip), ddi_get_instance(ip->dip), buf); 1800 } 1801