1 /* 2 * Solaris driver for ethernet cards based on the ADMtek Centaur 3 * 4 * Copyright (c) 2007 by Garrett D'Amore <garrett@damore.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the author nor the names of any co-contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 33 * Use is subject to license terms. 34 */ 35 36 37 #include <sys/varargs.h> 38 #include <sys/types.h> 39 #include <sys/modctl.h> 40 #include <sys/conf.h> 41 #include <sys/devops.h> 42 #include <sys/stream.h> 43 #include <sys/strsun.h> 44 #include <sys/cmn_err.h> 45 #include <sys/ethernet.h> 46 #include <sys/kmem.h> 47 #include <sys/time.h> 48 #include <sys/crc32.h> 49 #include <sys/miiregs.h> 50 #include <sys/mac.h> 51 #include <sys/mac_ether.h> 52 #include <sys/ddi.h> 53 #include <sys/sunddi.h> 54 #include <sys/vlan.h> 55 56 #include "afe.h" 57 #include "afeimpl.h" 58 59 /* 60 * Driver globals. 61 */ 62 63 /* patchable debug flag ... must not be static! */ 64 #ifdef DEBUG 65 unsigned afe_debug = DWARN; 66 #endif 67 68 /* table of supported devices */ 69 static afe_card_t afe_cards[] = { 70 71 /* 72 * ADMtek Centaur and Comet 73 */ 74 { 0x1317, 0x0981, "ADMtek AL981", MODEL_COMET }, 75 { 0x1317, 0x0985, "ADMtek AN983", MODEL_CENTAUR }, 76 { 0x1317, 0x1985, "ADMtek AN985", MODEL_CENTAUR }, 77 { 0x1317, 0x9511, "ADMtek ADM9511", MODEL_CENTAUR }, 78 { 0x1317, 0x9513, "ADMtek ADM9513", MODEL_CENTAUR }, 79 /* 80 * Accton just relabels other companies' controllers 81 */ 82 { 0x1113, 0x1216, "Accton EN5251", MODEL_CENTAUR }, 83 /* 84 * Models listed here. 85 */ 86 { 0x10b7, 0x9300, "3Com 3CSOHO100B-TX", MODEL_CENTAUR }, 87 { 0x1113, 0xec02, "SMC SMC1244TX", MODEL_CENTAUR }, 88 { 0x10b8, 0x1255, "SMC SMC1255TX", MODEL_CENTAUR }, 89 { 0x111a, 0x1020, "Siemens SpeedStream PCI 10/100", MODEL_CENTAUR }, 90 { 0x1113, 0x1207, "Accton EN1207F", MODEL_CENTAUR }, 91 { 0x1113, 0x2242, "Accton EN2242", MODEL_CENTAUR }, 92 { 0x1113, 0x2220, "Accton EN2220", MODEL_CENTAUR }, 93 { 0x1113, 0x9216, "3M VOL-N100VF+TX", MODEL_CENTAUR }, 94 { 0x1317, 0x0574, "Linksys LNE100TX", MODEL_CENTAUR }, 95 { 0x1317, 0x0570, "Linksys NC100", MODEL_CENTAUR }, 96 { 0x1385, 0x511a, "Netgear FA511", MODEL_CENTAUR }, 97 { 0x13d1, 0xab02, "AboCom FE2500", MODEL_CENTAUR }, 98 { 0x13d1, 0xab03, "AboCom PCM200", MODEL_CENTAUR }, 99 { 0x13d1, 0xab08, "AboCom FE2500MX", MODEL_CENTAUR }, 100 { 0x1414, 0x0001, "Microsoft MN-120", MODEL_CENTAUR }, 101 { 0x16ec, 0x00ed, "U.S. Robotics USR997900", MODEL_CENTAUR }, 102 { 0x1734, 0x100c, "Fujitsu-Siemens D1961", MODEL_CENTAUR }, 103 { 0x1737, 0xab08, "Linksys PCMPC200", MODEL_CENTAUR }, 104 { 0x1737, 0xab09, "Linksys PCM200", MODEL_CENTAUR }, 105 { 0x17b3, 0xab08, "Hawking PN672TX", MODEL_CENTAUR }, 106 }; 107 108 #define ETHERVLANMTU (ETHERMAX + 4) 109 110 /* 111 * Function prototypes 112 */ 113 static int afe_attach(dev_info_t *, ddi_attach_cmd_t); 114 static int afe_detach(dev_info_t *, ddi_detach_cmd_t); 115 static int afe_resume(dev_info_t *); 116 static int afe_m_unicst(void *, const uint8_t *); 117 static int afe_m_multicst(void *, boolean_t, const uint8_t *); 118 static int afe_m_promisc(void *, boolean_t); 119 static mblk_t *afe_m_tx(void *, mblk_t *); 120 static int afe_m_stat(void *, uint_t, uint64_t *); 121 static int afe_m_start(void *); 122 static void afe_m_stop(void *); 123 static int afe_m_getprop(void *, const char *, mac_prop_id_t, uint_t, 124 uint_t, void *); 125 static int afe_m_setprop(void *, const char *, mac_prop_id_t, uint_t, 126 const void *); 127 static unsigned afe_intr(caddr_t); 128 static void afe_startmac(afe_t *); 129 static void afe_stopmac(afe_t *); 130 static void afe_resetrings(afe_t *); 131 static boolean_t afe_initialize(afe_t *); 132 static void afe_startall(afe_t *); 133 static void afe_stopall(afe_t *); 134 static void afe_resetall(afe_t *); 135 static afe_txbuf_t *afe_alloctxbuf(afe_t *); 136 static void afe_destroytxbuf(afe_txbuf_t *); 137 static afe_rxbuf_t *afe_allocrxbuf(afe_t *); 138 static void afe_destroyrxbuf(afe_rxbuf_t *); 139 static boolean_t afe_send(afe_t *, mblk_t *); 140 static int afe_allocrxring(afe_t *); 141 static void afe_freerxring(afe_t *); 142 static int afe_alloctxring(afe_t *); 143 static void afe_freetxring(afe_t *); 144 static void afe_error(dev_info_t *, char *, ...); 145 static void afe_setrxfilt(afe_t *); 146 static uint8_t afe_sromwidth(afe_t *); 147 static uint16_t afe_readsromword(afe_t *, unsigned); 148 static void afe_readsrom(afe_t *, unsigned, unsigned, char *); 149 static void afe_getfactaddr(afe_t *, uchar_t *); 150 static uint8_t afe_miireadbit(afe_t *); 151 static void afe_miiwritebit(afe_t *, uint8_t); 152 static void afe_miitristate(afe_t *); 153 static uint16_t afe_miiread(afe_t *, int, int); 154 static void afe_miiwrite(afe_t *, int, int, uint16_t); 155 static uint16_t afe_miireadgeneral(afe_t *, int, int); 156 static void afe_miiwritegeneral(afe_t *, int, int, uint16_t); 157 static uint16_t afe_miireadcomet(afe_t *, int, int); 158 static void afe_miiwritecomet(afe_t *, int, int, uint16_t); 159 static int afe_getmiibit(afe_t *, uint16_t, uint16_t); 160 static void afe_startphy(afe_t *); 161 static void afe_stopphy(afe_t *); 162 static void afe_reportlink(afe_t *); 163 static void afe_checklink(afe_t *); 164 static void afe_checklinkcomet(afe_t *); 165 static void afe_checklinkcentaur(afe_t *); 166 static void afe_checklinkmii(afe_t *); 167 static void afe_disableinterrupts(afe_t *); 168 static void afe_enableinterrupts(afe_t *); 169 static void afe_reclaim(afe_t *); 170 static mblk_t *afe_receive(afe_t *); 171 172 #ifdef DEBUG 173 static void afe_dprintf(afe_t *, const char *, int, char *, ...); 174 #endif 175 176 #define KIOIP KSTAT_INTR_PTR(afep->afe_intrstat) 177 178 static mac_callbacks_t afe_m_callbacks = { 179 MC_SETPROP | MC_GETPROP, 180 afe_m_stat, 181 afe_m_start, 182 afe_m_stop, 183 afe_m_promisc, 184 afe_m_multicst, 185 afe_m_unicst, 186 afe_m_tx, 187 NULL, /* mc_resources */ 188 NULL, /* mc_ioctl */ 189 NULL, /* mc_getcapab */ 190 NULL, /* mc_open */ 191 NULL, /* mc_close */ 192 afe_m_setprop, 193 afe_m_getprop, 194 }; 195 196 197 /* 198 * Stream information 199 */ 200 DDI_DEFINE_STREAM_OPS(afe_devops, nulldev, nulldev, afe_attach, afe_detach, 201 nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported); 202 203 /* 204 * Module linkage information. 205 */ 206 207 static struct modldrv afe_modldrv = { 208 &mod_driverops, /* drv_modops */ 209 "ADMtek Fast Ethernet", /* drv_linkinfo */ 210 &afe_devops /* drv_dev_ops */ 211 }; 212 213 static struct modlinkage afe_modlinkage = { 214 MODREV_1, /* ml_rev */ 215 { &afe_modldrv, NULL } /* ml_linkage */ 216 }; 217 218 /* 219 * Device attributes. 220 */ 221 static ddi_device_acc_attr_t afe_devattr = { 222 DDI_DEVICE_ATTR_V0, 223 DDI_STRUCTURE_LE_ACC, 224 DDI_STRICTORDER_ACC 225 }; 226 227 static ddi_device_acc_attr_t afe_bufattr = { 228 DDI_DEVICE_ATTR_V0, 229 DDI_NEVERSWAP_ACC, 230 DDI_STRICTORDER_ACC 231 }; 232 233 static ddi_dma_attr_t afe_dma_attr = { 234 DMA_ATTR_V0, /* dma_attr_version */ 235 0, /* dma_attr_addr_lo */ 236 0xFFFFFFFFU, /* dma_attr_addr_hi */ 237 0x7FFFFFFFU, /* dma_attr_count_max */ 238 4, /* dma_attr_align */ 239 0x3F, /* dma_attr_burstsizes */ 240 1, /* dma_attr_minxfer */ 241 0xFFFFFFFFU, /* dma_attr_maxxfer */ 242 0xFFFFFFFFU, /* dma_attr_seg */ 243 1, /* dma_attr_sgllen */ 244 1, /* dma_attr_granular */ 245 0 /* dma_attr_flags */ 246 }; 247 248 /* 249 * Tx buffers can be arbitrarily aligned. Additionally, they can 250 * cross a page boundary, so we use the two buffer addresses of the 251 * chip to provide a two-entry scatter-gather list. 252 */ 253 static ddi_dma_attr_t afe_dma_txattr = { 254 DMA_ATTR_V0, /* dma_attr_version */ 255 0, /* dma_attr_addr_lo */ 256 0xFFFFFFFFU, /* dma_attr_addr_hi */ 257 0x7FFFFFFFU, /* dma_attr_count_max */ 258 1, /* dma_attr_align */ 259 0x3F, /* dma_attr_burstsizes */ 260 1, /* dma_attr_minxfer */ 261 0xFFFFFFFFU, /* dma_attr_maxxfer */ 262 0xFFFFFFFFU, /* dma_attr_seg */ 263 2, /* dma_attr_sgllen */ 264 1, /* dma_attr_granular */ 265 0 /* dma_attr_flags */ 266 }; 267 268 /* 269 * Ethernet addresses. 270 */ 271 static uchar_t afe_broadcast[ETHERADDRL] = { 272 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 273 }; 274 275 /* 276 * DDI entry points. 277 */ 278 int 279 _init(void) 280 { 281 int rv; 282 mac_init_ops(&afe_devops, "afe"); 283 if ((rv = mod_install(&afe_modlinkage)) != DDI_SUCCESS) { 284 mac_fini_ops(&afe_devops); 285 } 286 return (rv); 287 } 288 289 int 290 _fini(void) 291 { 292 int rv; 293 if ((rv = mod_remove(&afe_modlinkage)) == DDI_SUCCESS) { 294 mac_fini_ops(&afe_devops); 295 } 296 return (rv); 297 } 298 299 int 300 _info(struct modinfo *modinfop) 301 { 302 return (mod_info(&afe_modlinkage, modinfop)); 303 } 304 305 int 306 afe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 307 { 308 afe_t *afep; 309 mac_register_t *macp; 310 int inst = ddi_get_instance(dip); 311 ddi_acc_handle_t pci; 312 uint16_t venid; 313 uint16_t devid; 314 uint16_t svid; 315 uint16_t ssid; 316 uint16_t cachesize; 317 afe_card_t *cardp; 318 int i; 319 320 switch (cmd) { 321 case DDI_RESUME: 322 return (afe_resume(dip)); 323 324 case DDI_ATTACH: 325 break; 326 327 default: 328 return (DDI_FAILURE); 329 } 330 331 /* this card is a bus master, reject any slave-only slot */ 332 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 333 afe_error(dip, "slot does not support PCI bus-master"); 334 return (DDI_FAILURE); 335 } 336 /* PCI devices shouldn't generate hilevel interrupts */ 337 if (ddi_intr_hilevel(dip, 0) != 0) { 338 afe_error(dip, "hilevel interrupts not supported"); 339 return (DDI_FAILURE); 340 } 341 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { 342 afe_error(dip, "unable to setup PCI config handle"); 343 return (DDI_FAILURE); 344 } 345 346 venid = pci_config_get16(pci, PCI_VID); 347 devid = pci_config_get16(pci, PCI_DID); 348 svid = pci_config_get16(pci, PCI_SVID); 349 ssid = pci_config_get16(pci, PCI_SSID); 350 351 /* 352 * Note: ADMtek boards seem to misprogram themselves with bogus 353 * timings, which do not seem to work properly on SPARC. We 354 * reprogram them zero (but only if they appear to be broken), 355 * which seems to at least work. Its unclear that this is a 356 * legal or wise practice to me, but it certainly works better 357 * than the original values. (I would love to hear 358 * suggestions for better values, or a better strategy.) 359 */ 360 if ((pci_config_get8(pci, PCI_MINGNT) == 0xff) && 361 (pci_config_get8(pci, PCI_MAXLAT) == 0xff)) { 362 pci_config_put8(pci, PCI_MINGNT, 0); 363 pci_config_put8(pci, PCI_MAXLAT, 0); 364 } 365 366 /* 367 * the last entry in the card table matches every possible 368 * card, so the for-loop always terminates properly. 369 */ 370 cardp = NULL; 371 for (i = 0; i < (sizeof (afe_cards) / sizeof (afe_card_t)); i++) { 372 if ((venid == afe_cards[i].card_venid) && 373 (devid == afe_cards[i].card_devid)) { 374 cardp = &afe_cards[i]; 375 } 376 if ((svid == afe_cards[i].card_venid) && 377 (ssid == afe_cards[i].card_devid)) { 378 cardp = &afe_cards[i]; 379 break; 380 } 381 } 382 383 if (cardp == NULL) { 384 pci_config_teardown(&pci); 385 afe_error(dip, "Unable to identify PCI card"); 386 return (DDI_FAILURE); 387 } 388 389 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 390 cardp->card_cardname) != DDI_PROP_SUCCESS) { 391 pci_config_teardown(&pci); 392 afe_error(dip, "Unable to create model property"); 393 return (DDI_FAILURE); 394 } 395 396 /* 397 * Grab the PCI cachesize -- we use this to program the 398 * cache-optimization bus access bits. 399 */ 400 cachesize = pci_config_get8(pci, PCI_CLS); 401 402 /* this cannot fail */ 403 afep = kmem_zalloc(sizeof (afe_t), KM_SLEEP); 404 ddi_set_driver_private(dip, afep); 405 406 /* get the interrupt block cookie */ 407 if (ddi_get_iblock_cookie(dip, 0, &afep->afe_icookie) != DDI_SUCCESS) { 408 afe_error(dip, "ddi_get_iblock_cookie failed"); 409 pci_config_teardown(&pci); 410 kmem_free(afep, sizeof (afe_t)); 411 return (DDI_FAILURE); 412 } 413 414 afep->afe_dip = dip; 415 afep->afe_cardp = cardp; 416 afep->afe_phyaddr = -1; 417 afep->afe_cachesize = cachesize; 418 419 /* default properties */ 420 afep->afe_adv_aneg = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 421 "adv_autoneg_cap", 1); 422 afep->afe_adv_100T4 = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 423 "adv_100T4_cap", 1); 424 afep->afe_adv_100fdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 425 "adv_100fdx_cap", 1); 426 afep->afe_adv_100hdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 427 "adv_100hdx_cap", 1); 428 afep->afe_adv_10fdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 429 "adv_10fdx_cap", 1); 430 afep->afe_adv_10hdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 431 "adv_10hdx_cap", 1); 432 433 afep->afe_forcefiber = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 434 "fiber", 0); 435 436 DBG(DPCI, "PCI vendor id = %x", venid); 437 DBG(DPCI, "PCI device id = %x", devid); 438 DBG(DPCI, "PCI cachesize = %d", cachesize); 439 DBG(DPCI, "PCI COMM = %x", pci_config_get8(pci, PCI_CMD)); 440 DBG(DPCI, "PCI STAT = %x", pci_config_get8(pci, PCI_STAT)); 441 442 mutex_init(&afep->afe_xmtlock, NULL, MUTEX_DRIVER, afep->afe_icookie); 443 mutex_init(&afep->afe_intrlock, NULL, MUTEX_DRIVER, afep->afe_icookie); 444 445 /* 446 * Enable bus master, IO space, and memory space accesses. 447 */ 448 pci_config_put16(pci, PCI_CMD, 449 pci_config_get16(pci, PCI_CMD) | PCI_CMD_BME | PCI_CMD_MAE); 450 451 /* we're done with this now, drop it */ 452 pci_config_teardown(&pci); 453 454 /* 455 * Initialize interrupt kstat. This should not normally fail, since 456 * we don't use a persistent stat. We do it this way to avoid having 457 * to test for it at run time on the hot path. 458 */ 459 afep->afe_intrstat = kstat_create("afe", inst, "intr", "controller", 460 KSTAT_TYPE_INTR, 1, 0); 461 if (afep->afe_intrstat == NULL) { 462 afe_error(dip, "kstat_create failed"); 463 goto failed; 464 } 465 kstat_install(afep->afe_intrstat); 466 467 /* 468 * Map in the device registers. 469 */ 470 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&afep->afe_regs, 471 0, 0, &afe_devattr, &afep->afe_regshandle)) { 472 afe_error(dip, "ddi_regs_map_setup failed"); 473 goto failed; 474 } 475 476 /* 477 * Allocate DMA resources (descriptor rings and buffers). 478 */ 479 if ((afe_allocrxring(afep) != DDI_SUCCESS) || 480 (afe_alloctxring(afep) != DDI_SUCCESS)) { 481 afe_error(dip, "unable to allocate DMA resources"); 482 goto failed; 483 } 484 485 /* Initialize the chip. */ 486 mutex_enter(&afep->afe_intrlock); 487 mutex_enter(&afep->afe_xmtlock); 488 if (!afe_initialize(afep)) { 489 mutex_exit(&afep->afe_xmtlock); 490 mutex_exit(&afep->afe_intrlock); 491 goto failed; 492 } 493 mutex_exit(&afep->afe_xmtlock); 494 mutex_exit(&afep->afe_intrlock); 495 496 /* Determine the number of address bits to our EEPROM. */ 497 afep->afe_sromwidth = afe_sromwidth(afep); 498 499 /* 500 * Get the factory ethernet address. This becomes the current 501 * ethernet address (it can be overridden later via ifconfig). 502 */ 503 afe_getfactaddr(afep, afep->afe_curraddr); 504 afep->afe_promisc = B_FALSE; 505 506 /* make sure we add configure the initial filter */ 507 (void) afe_m_unicst(afep, afep->afe_curraddr); 508 (void) afe_m_multicst(afep, B_TRUE, afe_broadcast); 509 510 /* 511 * Establish interrupt handler. 512 */ 513 if (ddi_add_intr(dip, 0, NULL, NULL, afe_intr, (caddr_t)afep) != 514 DDI_SUCCESS) { 515 afe_error(dip, "unable to add interrupt"); 516 goto failed; 517 } 518 519 /* TODO: do the power management stuff */ 520 521 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 522 afe_error(dip, "mac_alloc failed"); 523 goto failed; 524 } 525 526 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 527 macp->m_driver = afep; 528 macp->m_dip = dip; 529 macp->m_src_addr = afep->afe_curraddr; 530 macp->m_callbacks = &afe_m_callbacks; 531 macp->m_min_sdu = 0; 532 macp->m_max_sdu = ETHERMTU; 533 macp->m_margin = VLAN_TAGSZ; 534 535 if (mac_register(macp, &afep->afe_mh) == DDI_SUCCESS) { 536 mac_free(macp); 537 return (DDI_SUCCESS); 538 } 539 540 /* failed to register with MAC */ 541 mac_free(macp); 542 failed: 543 if (afep->afe_icookie != NULL) { 544 ddi_remove_intr(dip, 0, afep->afe_icookie); 545 } 546 if (afep->afe_intrstat) { 547 kstat_delete(afep->afe_intrstat); 548 } 549 mutex_destroy(&afep->afe_intrlock); 550 mutex_destroy(&afep->afe_xmtlock); 551 552 afe_freerxring(afep); 553 afe_freetxring(afep); 554 555 if (afep->afe_regshandle != NULL) { 556 ddi_regs_map_free(&afep->afe_regshandle); 557 } 558 kmem_free(afep, sizeof (afe_t)); 559 return (DDI_FAILURE); 560 } 561 562 int 563 afe_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 564 { 565 afe_t *afep; 566 567 afep = ddi_get_driver_private(dip); 568 if (afep == NULL) { 569 afe_error(dip, "no soft state in detach!"); 570 return (DDI_FAILURE); 571 } 572 573 switch (cmd) { 574 case DDI_DETACH: 575 576 if (mac_unregister(afep->afe_mh) != 0) { 577 return (DDI_FAILURE); 578 } 579 580 /* make sure hardware is quiesced */ 581 mutex_enter(&afep->afe_intrlock); 582 mutex_enter(&afep->afe_xmtlock); 583 afep->afe_flags &= ~AFE_RUNNING; 584 afe_stopall(afep); 585 mutex_exit(&afep->afe_xmtlock); 586 mutex_exit(&afep->afe_intrlock); 587 588 /* clean up and shut down device */ 589 ddi_remove_intr(dip, 0, afep->afe_icookie); 590 591 /* clean up kstats */ 592 kstat_delete(afep->afe_intrstat); 593 594 ddi_prop_remove_all(dip); 595 596 /* free up any left over buffers or DMA resources */ 597 afe_freerxring(afep); 598 afe_freetxring(afep); 599 600 ddi_regs_map_free(&afep->afe_regshandle); 601 mutex_destroy(&afep->afe_intrlock); 602 mutex_destroy(&afep->afe_xmtlock); 603 604 kmem_free(afep, sizeof (afe_t)); 605 return (DDI_SUCCESS); 606 607 case DDI_SUSPEND: 608 /* quiesce the hardware */ 609 mutex_enter(&afep->afe_intrlock); 610 mutex_enter(&afep->afe_xmtlock); 611 afep->afe_flags |= AFE_SUSPENDED; 612 afe_stopall(afep); 613 mutex_exit(&afep->afe_xmtlock); 614 mutex_exit(&afep->afe_intrlock); 615 return (DDI_SUCCESS); 616 default: 617 return (DDI_FAILURE); 618 } 619 } 620 621 int 622 afe_resume(dev_info_t *dip) 623 { 624 afe_t *afep; 625 626 if ((afep = ddi_get_driver_private(dip)) == NULL) { 627 return (DDI_FAILURE); 628 } 629 630 mutex_enter(&afep->afe_intrlock); 631 mutex_enter(&afep->afe_xmtlock); 632 633 afep->afe_flags &= ~AFE_SUSPENDED; 634 635 /* re-initialize chip */ 636 if (!afe_initialize(afep)) { 637 afe_error(afep->afe_dip, "unable to resume chip!"); 638 afep->afe_flags |= AFE_SUSPENDED; 639 mutex_exit(&afep->afe_intrlock); 640 mutex_exit(&afep->afe_xmtlock); 641 return (DDI_SUCCESS); 642 } 643 644 /* start the chip */ 645 if (afep->afe_flags & AFE_RUNNING) { 646 afe_startall(afep); 647 } 648 649 /* drop locks */ 650 mutex_exit(&afep->afe_xmtlock); 651 mutex_exit(&afep->afe_intrlock); 652 653 return (DDI_SUCCESS); 654 } 655 656 void 657 afe_setrxfilt(afe_t *afep) 658 { 659 unsigned rxen, pa0, pa1; 660 661 if (afep->afe_flags & AFE_SUSPENDED) { 662 /* don't touch a suspended interface */ 663 return; 664 } 665 666 rxen = GETCSR(afep, CSR_NAR) & NAR_RX_ENABLE; 667 668 /* stop receiver */ 669 if (rxen) { 670 afe_stopmac(afep); 671 } 672 673 /* program promiscuous mode */ 674 if (afep->afe_promisc) 675 SETBIT(afep, CSR_NAR, NAR_RX_PROMISC); 676 else 677 CLRBIT(afep, CSR_NAR, NAR_RX_PROMISC); 678 679 /* program mac address */ 680 pa0 = (afep->afe_curraddr[3] << 24) | (afep->afe_curraddr[2] << 16) | 681 (afep->afe_curraddr[1] << 8) | afep->afe_curraddr[0]; 682 pa1 = (afep->afe_curraddr[5] << 8) | afep->afe_curraddr[4]; 683 684 DBG(DMACID, "programming PAR0 with %x", pa0); 685 DBG(DMACID, "programming PAR1 with %x", pa1); 686 PUTCSR(afep, CSR_PAR0, pa0); 687 PUTCSR(afep, CSR_PAR1, pa1); 688 if (rxen) { 689 SETBIT(afep, CSR_NAR, rxen); 690 } 691 692 DBG(DMACID, "programming MAR0 = %x", afep->afe_mctab[0]); 693 DBG(DMACID, "programming MAR1 = %x", afep->afe_mctab[1]); 694 695 /* program multicast filter */ 696 if (AFE_MODEL(afep) == MODEL_COMET) { 697 if (afep->afe_mctab[0] || afep->afe_mctab[1]) { 698 SETBIT(afep, CSR_NAR, NAR_RX_MULTI); 699 } else { 700 CLRBIT(afep, CSR_NAR, NAR_RX_MULTI); 701 } 702 } else { 703 CLRBIT(afep, CSR_NAR, NAR_RX_MULTI); 704 PUTCSR(afep, CSR_MAR0, afep->afe_mctab[0]); 705 PUTCSR(afep, CSR_MAR1, afep->afe_mctab[1]); 706 } 707 708 /* restart receiver */ 709 if (rxen) { 710 afe_startmac(afep); 711 } 712 } 713 714 int 715 afe_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr) 716 { 717 afe_t *afep = arg; 718 int index; 719 uint32_t crc; 720 uint32_t bit; 721 uint32_t newval, oldval; 722 723 CRC32(crc, macaddr, ETHERADDRL, -1U, crc32_table); 724 crc %= AFE_MCHASH; 725 726 /* bit within a 32-bit word */ 727 index = crc / 32; 728 bit = (1 << (crc % 32)); 729 730 mutex_enter(&afep->afe_intrlock); 731 mutex_enter(&afep->afe_xmtlock); 732 newval = oldval = afep->afe_mctab[index]; 733 734 if (add) { 735 afep->afe_mccount[crc]++; 736 if (afep->afe_mccount[crc] == 1) 737 newval |= bit; 738 } else { 739 afep->afe_mccount[crc]--; 740 if (afep->afe_mccount[crc] == 0) 741 newval &= ~bit; 742 } 743 if (newval != oldval) { 744 afep->afe_mctab[index] = newval; 745 afe_setrxfilt(afep); 746 } 747 748 mutex_exit(&afep->afe_xmtlock); 749 mutex_exit(&afep->afe_intrlock); 750 751 return (0); 752 } 753 754 int 755 afe_m_promisc(void *arg, boolean_t on) 756 { 757 afe_t *afep = arg; 758 759 /* exclusive access to the card while we reprogram it */ 760 mutex_enter(&afep->afe_intrlock); 761 mutex_enter(&afep->afe_xmtlock); 762 /* save current promiscuous mode state for replay in resume */ 763 afep->afe_promisc = on; 764 765 afe_setrxfilt(afep); 766 mutex_exit(&afep->afe_xmtlock); 767 mutex_exit(&afep->afe_intrlock); 768 769 return (0); 770 } 771 772 int 773 afe_m_unicst(void *arg, const uint8_t *macaddr) 774 { 775 afe_t *afep = arg; 776 777 /* exclusive access to the card while we reprogram it */ 778 mutex_enter(&afep->afe_intrlock); 779 mutex_enter(&afep->afe_xmtlock); 780 781 bcopy(macaddr, afep->afe_curraddr, ETHERADDRL); 782 afe_setrxfilt(afep); 783 784 mutex_exit(&afep->afe_xmtlock); 785 mutex_exit(&afep->afe_intrlock); 786 787 return (0); 788 } 789 790 mblk_t * 791 afe_m_tx(void *arg, mblk_t *mp) 792 { 793 afe_t *afep = arg; 794 mblk_t *nmp; 795 796 mutex_enter(&afep->afe_xmtlock); 797 798 if (afep->afe_flags & AFE_SUSPENDED) { 799 while ((nmp = mp) != NULL) { 800 afep->afe_carrier_errors++; 801 mp = mp->b_next; 802 freemsg(nmp); 803 } 804 mutex_exit(&afep->afe_xmtlock); 805 return (NULL); 806 } 807 808 while (mp != NULL) { 809 nmp = mp->b_next; 810 mp->b_next = NULL; 811 812 if (!afe_send(afep, mp)) { 813 mp->b_next = nmp; 814 break; 815 } 816 mp = nmp; 817 } 818 mutex_exit(&afep->afe_xmtlock); 819 820 return (mp); 821 } 822 823 /* 824 * Hardware management. 825 */ 826 static boolean_t 827 afe_initialize(afe_t *afep) 828 { 829 int i; 830 unsigned val; 831 uint32_t par, nar; 832 833 ASSERT(mutex_owned(&afep->afe_intrlock)); 834 ASSERT(mutex_owned(&afep->afe_xmtlock)); 835 836 DBG(DCHATTY, "resetting!"); 837 SETBIT(afep, CSR_PAR, PAR_RESET); 838 for (i = 1; i < 10; i++) { 839 drv_usecwait(5); 840 val = GETCSR(afep, CSR_PAR); 841 if (!(val & PAR_RESET)) { 842 break; 843 } 844 } 845 if (i == 10) { 846 afe_error(afep->afe_dip, "timed out waiting for reset!"); 847 return (B_FALSE); 848 } 849 850 /* 851 * Updated Centaur data sheets show that the Comet and Centaur are 852 * alike here (contrary to earlier versions of the data sheet). 853 */ 854 /* XXX:? chip problems */ 855 /* par = PAR_MRLE | PAR_MRME | PAR_MWIE; */ 856 par = 0; 857 switch (afep->afe_cachesize) { 858 case 8: 859 par |= PAR_CALIGN_8 | PAR_BURST_8; 860 break; 861 case 16: 862 par |= PAR_CALIGN_16 | PAR_BURST_16; 863 break; 864 case 32: 865 par |= PAR_CALIGN_32 | PAR_BURST_32; 866 break; 867 default: 868 par |= PAR_BURST_32; 869 par &= ~(PAR_MWIE | PAR_MRLE | PAR_MRME); 870 break; 871 872 } 873 874 PUTCSR(afep, CSR_PAR, par); 875 876 /* enable transmit underrun auto-recovery */ 877 SETBIT(afep, CSR_CR, CR_TXURAUTOR); 878 879 afe_resetrings(afep); 880 881 /* clear the lost packet counter (cleared on read) */ 882 (void) GETCSR(afep, CSR_LPC); 883 884 nar = GETCSR(afep, CSR_NAR); 885 nar &= ~NAR_TR; /* clear tx threshold */ 886 nar |= NAR_SF; /* store-and-forward */ 887 nar |= NAR_HBD; /* disable SQE test */ 888 PUTCSR(afep, CSR_NAR, nar); 889 890 afe_setrxfilt(afep); 891 892 return (B_TRUE); 893 } 894 895 /* 896 * Serial EEPROM access - inspired by the FreeBSD implementation. 897 */ 898 899 uint8_t 900 afe_sromwidth(afe_t *afep) 901 { 902 int i; 903 uint32_t eeread; 904 uint8_t addrlen = 8; 905 906 eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP; 907 908 PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP); 909 drv_usecwait(1); 910 PUTCSR(afep, CSR_SPR, eeread); 911 912 /* command bits first */ 913 for (i = 4; i != 0; i >>= 1) { 914 unsigned val = (SROM_READCMD & i) ? SPR_SROM_DIN : 0; 915 916 PUTCSR(afep, CSR_SPR, eeread | val); 917 drv_usecwait(1); 918 PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK); 919 drv_usecwait(1); 920 } 921 922 PUTCSR(afep, CSR_SPR, eeread); 923 924 for (addrlen = 1; addrlen <= 12; addrlen++) { 925 PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK); 926 drv_usecwait(1); 927 if (!(GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT)) { 928 PUTCSR(afep, CSR_SPR, eeread); 929 drv_usecwait(1); 930 break; 931 } 932 PUTCSR(afep, CSR_SPR, eeread); 933 drv_usecwait(1); 934 } 935 936 /* turn off accesses to the EEPROM */ 937 PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP); 938 939 DBG(DSROM, "detected srom width = %d bits", addrlen); 940 941 return ((addrlen < 4 || addrlen > 12) ? 6 : addrlen); 942 } 943 944 /* 945 * The words in EEPROM are stored in little endian order. We 946 * shift bits out in big endian order, though. This requires 947 * a byte swap on some platforms. 948 */ 949 uint16_t 950 afe_readsromword(afe_t *afep, unsigned romaddr) 951 { 952 int i; 953 uint16_t word = 0; 954 uint16_t retval; 955 int eeread; 956 uint8_t addrlen; 957 int readcmd; 958 uchar_t *ptr; 959 960 eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP; 961 addrlen = afep->afe_sromwidth; 962 readcmd = (SROM_READCMD << addrlen) | romaddr; 963 964 if (romaddr >= (1 << addrlen)) { 965 /* too big to fit! */ 966 return (0); 967 } 968 969 PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP); 970 PUTCSR(afep, CSR_SPR, eeread); 971 972 /* command and address bits */ 973 for (i = 4 + addrlen; i >= 0; i--) { 974 short val = (readcmd & (1 << i)) ? SPR_SROM_DIN : 0; 975 976 PUTCSR(afep, CSR_SPR, eeread | val); 977 drv_usecwait(1); 978 PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK); 979 drv_usecwait(1); 980 } 981 982 PUTCSR(afep, CSR_SPR, eeread); 983 984 for (i = 0; i < 16; i++) { 985 PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK); 986 drv_usecwait(1); 987 word <<= 1; 988 if (GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT) { 989 word |= 1; 990 } 991 PUTCSR(afep, CSR_SPR, eeread); 992 drv_usecwait(1); 993 } 994 995 /* turn off accesses to the EEPROM */ 996 PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP); 997 998 /* 999 * Fix up the endianness thing. Note that the values 1000 * are stored in little endian format on the SROM. 1001 */ 1002 ptr = (uchar_t *)&word; 1003 retval = (ptr[1] << 8) | ptr[0]; 1004 return (retval); 1005 } 1006 1007 void 1008 afe_readsrom(afe_t *afep, unsigned romaddr, unsigned len, char *dest) 1009 { 1010 int i; 1011 uint16_t word; 1012 uint16_t *ptr = (uint16_t *)((void *)dest); 1013 for (i = 0; i < len; i++) { 1014 word = afe_readsromword(afep, romaddr + i); 1015 *ptr = word; 1016 ptr++; 1017 } 1018 } 1019 1020 void 1021 afe_getfactaddr(afe_t *afep, uchar_t *eaddr) 1022 { 1023 afe_readsrom(afep, SROM_ENADDR, ETHERADDRL / 2, (char *)eaddr); 1024 1025 DBG(DMACID, 1026 "factory ethernet address = %02x:%02x:%02x:%02x:%02x:%02x", 1027 eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); 1028 } 1029 1030 /* 1031 * MII management. 1032 */ 1033 void 1034 afe_startphy(afe_t *afep) 1035 { 1036 unsigned phyaddr; 1037 unsigned bmcr; 1038 unsigned bmsr; 1039 unsigned anar; 1040 unsigned phyidr1; 1041 unsigned phyidr2; 1042 unsigned nosqe = 0; 1043 int retries; 1044 int fiber; 1045 int cnt; 1046 1047 /* ADMtek devices just use the PHY at address 1 */ 1048 afep->afe_phyaddr = phyaddr = 1; 1049 1050 phyidr1 = afe_miiread(afep, phyaddr, MII_PHYIDH); 1051 phyidr2 = afe_miiread(afep, phyaddr, MII_PHYIDL); 1052 if ((phyidr1 == 0x0022) && 1053 ((phyidr2 & 0xfff0) == 0x5410)) { 1054 nosqe = 1; 1055 /* only 983B has fiber support */ 1056 afep->afe_flags |= AFE_HASFIBER; 1057 } 1058 afep->afe_phyid = (phyidr1 << 16) | phyidr2; 1059 1060 DBG(DPHY, "phy at %d: %x,%x", phyaddr, phyidr1, phyidr2); 1061 DBG(DPHY, "bmsr = %x", afe_miiread(afep, 1062 afep->afe_phyaddr, MII_STATUS)); 1063 DBG(DPHY, "anar = %x", afe_miiread(afep, 1064 afep->afe_phyaddr, MII_AN_ADVERT)); 1065 DBG(DPHY, "anlpar = %x", afe_miiread(afep, 1066 afep->afe_phyaddr, MII_AN_LPABLE)); 1067 DBG(DPHY, "aner = %x", afe_miiread(afep, 1068 afep->afe_phyaddr, MII_AN_EXPANSION)); 1069 1070 DBG(DPHY, "resetting phy"); 1071 1072 /* we reset the phy block */ 1073 afe_miiwrite(afep, phyaddr, MII_CONTROL, MII_CONTROL_RESET); 1074 /* 1075 * wait for it to complete -- 500usec is still to short to 1076 * bother getting the system clock involved. 1077 */ 1078 drv_usecwait(500); 1079 for (retries = 0; retries < 10; retries++) { 1080 if (afe_miiread(afep, phyaddr, MII_CONTROL) & 1081 MII_CONTROL_RESET) { 1082 drv_usecwait(500); 1083 continue; 1084 } 1085 break; 1086 } 1087 if (retries == 100) { 1088 afe_error(afep->afe_dip, "timeout waiting on phy to reset"); 1089 return; 1090 } 1091 1092 DBG(DPHY, "phy reset complete"); 1093 1094 bmsr = afe_miiread(afep, phyaddr, MII_STATUS); 1095 anar = afe_miiread(afep, phyaddr, MII_AN_ADVERT); 1096 1097 anar &= ~(MII_ABILITY_100BASE_T4 | 1098 MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX | 1099 MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T); 1100 1101 fiber = 0; 1102 1103 /* if fiber is being forced, and device supports fiber... */ 1104 if (afep->afe_flags & AFE_HASFIBER) { 1105 1106 uint16_t mcr; 1107 1108 DBG(DPHY, "device supports 100BaseFX"); 1109 mcr = afe_miiread(afep, phyaddr, PHY_MCR); 1110 switch (afep->afe_forcefiber) { 1111 case 0: 1112 /* UTP Port */ 1113 DBG(DPHY, "forcing twpair"); 1114 mcr &= ~MCR_FIBER; 1115 fiber = 0; 1116 break; 1117 case 1: 1118 /* Fiber Port */ 1119 DBG(DPHY, "forcing 100BaseFX"); 1120 mcr |= MCR_FIBER; 1121 bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX); 1122 fiber = 1; 1123 break; 1124 default: 1125 DBG(DPHY, "checking for 100BaseFX link"); 1126 /* fiber is 100 Mb FDX */ 1127 afe_miiwrite(afep, phyaddr, MII_CONTROL, 1128 MII_CONTROL_100MB | MII_CONTROL_FDUPLEX); 1129 drv_usecwait(50); 1130 1131 mcr = afe_miiread(afep, phyaddr, PHY_MCR); 1132 mcr |= MCR_FIBER; 1133 afe_miiwrite(afep, phyaddr, PHY_MCR, mcr); 1134 drv_usecwait(500); 1135 1136 /* if fiber is active, use it */ 1137 if ((afe_miiread(afep, phyaddr, MII_STATUS) & 1138 MII_STATUS_LINKUP)) { 1139 bmcr = MII_CONTROL_100MB | MII_CONTROL_FDUPLEX; 1140 fiber = 1; 1141 } else { 1142 mcr &= ~MCR_FIBER; 1143 fiber = 0; 1144 } 1145 break; 1146 } 1147 afe_miiwrite(afep, phyaddr, PHY_MCR, mcr); 1148 drv_usecwait(500); 1149 } 1150 1151 if (fiber) { 1152 /* fiber only supports 100FDX(?) */ 1153 bmsr &= ~(MII_STATUS_100_BASE_T4 | 1154 MII_STATUS_100_BASEX | MII_STATUS_10_FD | MII_STATUS_10); 1155 bmsr |= MII_STATUS_100_BASEX_FD; 1156 } 1157 1158 /* assume full support for everything to start */ 1159 afep->afe_cap_aneg = afep->afe_cap_100T4 = 1160 afep->afe_cap_100fdx = afep->afe_cap_100hdx = 1161 afep->afe_cap_10fdx = afep->afe_cap_10hdx = 1; 1162 1163 /* disable modes not supported in hardware */ 1164 if (!(bmsr & MII_STATUS_100_BASEX_FD)) { 1165 afep->afe_adv_100fdx = 0; 1166 afep->afe_cap_100fdx = 0; 1167 } 1168 if (!(bmsr & MII_STATUS_100_BASE_T4)) { 1169 afep->afe_adv_100T4 = 0; 1170 afep->afe_cap_100T4 = 0; 1171 } 1172 if (!(bmsr & MII_STATUS_100_BASEX)) { 1173 afep->afe_adv_100hdx = 0; 1174 afep->afe_cap_100hdx = 0; 1175 } 1176 if (!(bmsr & MII_STATUS_10_FD)) { 1177 afep->afe_adv_10fdx = 0; 1178 afep->afe_cap_10fdx = 0; 1179 } 1180 if (!(bmsr & MII_STATUS_10)) { 1181 afep->afe_adv_10hdx = 0; 1182 afep->afe_cap_10hdx = 0; 1183 } 1184 if (!(bmsr & MII_STATUS_CANAUTONEG)) { 1185 afep->afe_adv_aneg = 0; 1186 afep->afe_cap_aneg = 0; 1187 } 1188 1189 cnt = 0; 1190 if (afep->afe_adv_100fdx) { 1191 anar |= MII_ABILITY_100BASE_TX_FD; 1192 cnt++; 1193 } 1194 if (afep->afe_adv_100T4) { 1195 anar |= MII_ABILITY_100BASE_T4; 1196 cnt++; 1197 } 1198 if (afep->afe_adv_100hdx) { 1199 anar |= MII_ABILITY_100BASE_TX; 1200 cnt++; 1201 } 1202 if (afep->afe_adv_10fdx) { 1203 anar |= MII_ABILITY_10BASE_T_FD; 1204 cnt++; 1205 } 1206 if (afep->afe_adv_10hdx) { 1207 anar |= MII_ABILITY_10BASE_T; 1208 cnt++; 1209 } 1210 1211 /* 1212 * Make certain at least one valid link mode is selected. 1213 */ 1214 if (!cnt) { 1215 afe_error(afep->afe_dip, "No valid link mode selected."); 1216 afe_error(afep->afe_dip, "Powering down PHY."); 1217 afe_stopphy(afep); 1218 afep->afe_linkup = LINK_STATE_DOWN; 1219 if (afep->afe_flags & AFE_RUNNING) 1220 afe_reportlink(afep); 1221 return; 1222 } 1223 1224 if (fiber) { 1225 bmcr = MII_CONTROL_100MB | MII_CONTROL_FDUPLEX; 1226 } else if ((afep->afe_adv_aneg) && (bmsr & MII_STATUS_CANAUTONEG)) { 1227 DBG(DPHY, "using autoneg mode"); 1228 bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN); 1229 } else { 1230 DBG(DPHY, "using forced mode"); 1231 if (afep->afe_adv_100fdx) { 1232 bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX); 1233 } else if (afep->afe_adv_100hdx) { 1234 bmcr = MII_CONTROL_100MB; 1235 } else if (afep->afe_adv_10fdx) { 1236 bmcr = MII_CONTROL_FDUPLEX; 1237 } else { 1238 /* 10HDX */ 1239 bmcr = 0; 1240 } 1241 } 1242 1243 DBG(DPHY, "programming anar to 0x%x", anar); 1244 afe_miiwrite(afep, phyaddr, MII_AN_ADVERT, anar); 1245 DBG(DPHY, "programming bmcr to 0x%x", bmcr); 1246 afe_miiwrite(afep, phyaddr, MII_CONTROL, bmcr); 1247 1248 if (nosqe) { 1249 uint16_t pilr; 1250 /* 1251 * work around for errata 983B_0416 -- duplex light flashes 1252 * in 10 HDX. we just disable SQE testing on the device. 1253 */ 1254 pilr = afe_miiread(afep, phyaddr, PHY_PILR); 1255 pilr |= PILR_NOSQE; 1256 afe_miiwrite(afep, phyaddr, PHY_PILR, pilr); 1257 } 1258 1259 /* 1260 * schedule a query of the link status 1261 */ 1262 PUTCSR(afep, CSR_TIMER, TIMER_LOOP | 1263 (AFE_LINKTIMER * 1000 / TIMER_USEC)); 1264 } 1265 1266 void 1267 afe_stopphy(afe_t *afep) 1268 { 1269 /* stop the phy timer */ 1270 PUTCSR(afep, CSR_TIMER, 0); 1271 1272 /* 1273 * phy in isolate & powerdown mode... 1274 */ 1275 afe_miiwrite(afep, afep->afe_phyaddr, MII_CONTROL, 1276 MII_CONTROL_PWRDN | MII_CONTROL_ISOLATE); 1277 1278 /* 1279 * mark the link state unknown 1280 */ 1281 if (!afep->afe_resetting) { 1282 afep->afe_linkup = LINK_STATE_UNKNOWN; 1283 afep->afe_ifspeed = 0; 1284 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1285 if (afep->afe_flags & AFE_RUNNING) 1286 afe_reportlink(afep); 1287 } 1288 } 1289 1290 void 1291 afe_reportlink(afe_t *afep) 1292 { 1293 int changed = 0; 1294 1295 if (afep->afe_ifspeed != afep->afe_lastifspeed) { 1296 afep->afe_lastifspeed = afep->afe_ifspeed; 1297 changed++; 1298 } 1299 if (afep->afe_duplex != afep->afe_lastduplex) { 1300 afep->afe_lastduplex = afep->afe_duplex; 1301 changed++; 1302 } 1303 if (changed) 1304 mac_link_update(afep->afe_mh, afep->afe_linkup); 1305 } 1306 1307 void 1308 afe_checklink(afe_t *afep) 1309 { 1310 if ((afep->afe_flags & AFE_RUNNING) == 0) 1311 return; 1312 1313 if ((afep->afe_txstall_time != 0) && 1314 (gethrtime() > afep->afe_txstall_time) && 1315 (afep->afe_txavail != AFE_TXRING)) { 1316 afep->afe_txstall_time = 0; 1317 afe_error(afep->afe_dip, "TX stall detected!"); 1318 afe_resetall(afep); 1319 return; 1320 } 1321 1322 switch (AFE_MODEL(afep)) { 1323 case MODEL_COMET: 1324 afe_checklinkcomet(afep); 1325 break; 1326 case MODEL_CENTAUR: 1327 afe_checklinkcentaur(afep); 1328 break; 1329 } 1330 } 1331 1332 void 1333 afe_checklinkcomet(afe_t *afep) 1334 { 1335 uint16_t xciis; 1336 int reinit = 0; 1337 1338 xciis = GETCSR16(afep, CSR_XCIIS); 1339 if (xciis & XCIIS_PDF) { 1340 afe_error(afep->afe_dip, "Parallel detection fault detected!"); 1341 } 1342 if (xciis & XCIIS_RF) { 1343 afe_error(afep->afe_dip, "Remote fault detected."); 1344 } 1345 if (xciis & XCIIS_LFAIL) { 1346 if (afep->afe_linkup == LINK_STATE_UP) { 1347 reinit++; 1348 } 1349 afep->afe_ifspeed = 0; 1350 afep->afe_linkup = LINK_STATE_DOWN; 1351 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1352 afe_reportlink(afep); 1353 if (reinit) { 1354 afe_startphy(afep); 1355 } 1356 return; 1357 } 1358 1359 afep->afe_linkup = LINK_STATE_UP; 1360 afep->afe_ifspeed = (xciis & XCIIS_SPEED) ? 100000000 : 10000000; 1361 if (xciis & XCIIS_DUPLEX) { 1362 afep->afe_duplex = LINK_DUPLEX_FULL; 1363 } else { 1364 afep->afe_duplex = LINK_DUPLEX_HALF; 1365 } 1366 1367 afe_reportlink(afep); 1368 } 1369 1370 void 1371 afe_checklinkcentaur(afe_t *afep) 1372 { 1373 unsigned opmode; 1374 int reinit = 0; 1375 1376 opmode = GETCSR(afep, CSR_OPM); 1377 if ((opmode & OPM_MODE) == OPM_MACONLY) { 1378 DBG(DPHY, "Centaur running in MAC-only mode"); 1379 afe_checklinkmii(afep); 1380 return; 1381 } 1382 DBG(DPHY, "Centaur running in single chip mode"); 1383 if ((opmode & OPM_LINK) == 0) { 1384 if (afep->afe_linkup == LINK_STATE_UP) { 1385 reinit++; 1386 } 1387 afep->afe_ifspeed = 0; 1388 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1389 afep->afe_linkup = LINK_STATE_DOWN; 1390 afe_reportlink(afep); 1391 if (reinit) { 1392 afe_startphy(afep); 1393 } 1394 return; 1395 } 1396 1397 afep->afe_linkup = LINK_STATE_UP; 1398 afep->afe_ifspeed = (opmode & OPM_SPEED) ? 100000000 : 10000000; 1399 if (opmode & OPM_DUPLEX) { 1400 afep->afe_duplex = LINK_DUPLEX_FULL; 1401 } else { 1402 afep->afe_duplex = LINK_DUPLEX_HALF; 1403 } 1404 afe_reportlink(afep); 1405 } 1406 1407 void 1408 afe_checklinkmii(afe_t *afep) 1409 { 1410 /* read MII state registers */ 1411 uint16_t bmsr; 1412 uint16_t bmcr; 1413 uint16_t anar; 1414 uint16_t anlpar; 1415 int reinit = 0; 1416 1417 /* read this twice, to clear latched link state */ 1418 bmsr = afe_miiread(afep, afep->afe_phyaddr, MII_STATUS); 1419 bmsr = afe_miiread(afep, afep->afe_phyaddr, MII_STATUS); 1420 bmcr = afe_miiread(afep, afep->afe_phyaddr, MII_CONTROL); 1421 anar = afe_miiread(afep, afep->afe_phyaddr, MII_AN_ADVERT); 1422 anlpar = afe_miiread(afep, afep->afe_phyaddr, MII_AN_LPABLE); 1423 1424 if (bmsr & MII_STATUS_REMFAULT) { 1425 afe_error(afep->afe_dip, "Remote fault detected."); 1426 } 1427 if (bmsr & MII_STATUS_JABBERING) { 1428 afe_error(afep->afe_dip, "Jabber condition detected."); 1429 } 1430 if ((bmsr & MII_STATUS_LINKUP) == 0) { 1431 /* no link */ 1432 if (afep->afe_linkup == LINK_STATE_UP) { 1433 reinit = 1; 1434 } 1435 afep->afe_ifspeed = 0; 1436 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1437 afep->afe_linkup = LINK_STATE_DOWN; 1438 afe_reportlink(afep); 1439 if (reinit) { 1440 afe_startphy(afep); 1441 } 1442 return; 1443 } 1444 1445 DBG(DCHATTY, "link up!"); 1446 afep->afe_linkup = LINK_STATE_UP; 1447 1448 if (!(bmcr & MII_CONTROL_ANE)) { 1449 /* forced mode */ 1450 if (bmcr & MII_CONTROL_100MB) { 1451 afep->afe_ifspeed = 100000000; 1452 } else { 1453 afep->afe_ifspeed = 10000000; 1454 } 1455 if (bmcr & MII_CONTROL_FDUPLEX) { 1456 afep->afe_duplex = LINK_DUPLEX_FULL; 1457 } else { 1458 afep->afe_duplex = LINK_DUPLEX_HALF; 1459 } 1460 } else if ((!(bmsr & MII_STATUS_CANAUTONEG)) || 1461 (!(bmsr & MII_STATUS_ANDONE))) { 1462 afep->afe_ifspeed = 0; 1463 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1464 } else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) { 1465 afep->afe_ifspeed = 100000000; 1466 afep->afe_duplex = LINK_DUPLEX_FULL; 1467 } else if (anar & anlpar & MII_ABILITY_100BASE_T4) { 1468 afep->afe_ifspeed = 100000000; 1469 afep->afe_duplex = LINK_DUPLEX_HALF; 1470 } else if (anar & anlpar & MII_ABILITY_100BASE_TX) { 1471 afep->afe_ifspeed = 100000000; 1472 afep->afe_duplex = LINK_DUPLEX_HALF; 1473 } else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) { 1474 afep->afe_ifspeed = 10000000; 1475 afep->afe_duplex = LINK_DUPLEX_FULL; 1476 } else if (anar & anlpar & MII_ABILITY_10BASE_T) { 1477 afep->afe_ifspeed = 10000000; 1478 afep->afe_duplex = LINK_DUPLEX_HALF; 1479 } else { 1480 afep->afe_ifspeed = 0; 1481 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1482 } 1483 1484 afe_reportlink(afep); 1485 } 1486 1487 void 1488 afe_miitristate(afe_t *afep) 1489 { 1490 uint32_t val = SPR_SROM_WRITE | SPR_MII_CTRL; 1491 1492 PUTCSR(afep, CSR_SPR, val); 1493 drv_usecwait(1); 1494 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK); 1495 drv_usecwait(1); 1496 } 1497 1498 void 1499 afe_miiwritebit(afe_t *afep, uint8_t bit) 1500 { 1501 uint32_t val = bit ? SPR_MII_DOUT : 0; 1502 1503 PUTCSR(afep, CSR_SPR, val); 1504 drv_usecwait(1); 1505 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK); 1506 drv_usecwait(1); 1507 } 1508 1509 uint8_t 1510 afe_miireadbit(afe_t *afep) 1511 { 1512 uint32_t val = SPR_MII_CTRL | SPR_SROM_READ; 1513 uint8_t bit; 1514 1515 PUTCSR(afep, CSR_SPR, val); 1516 drv_usecwait(1); 1517 bit = (GETCSR(afep, CSR_SPR) & SPR_MII_DIN) ? 1 : 0; 1518 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK); 1519 drv_usecwait(1); 1520 return (bit); 1521 } 1522 1523 uint16_t 1524 afe_miiread(afe_t *afep, int phy, int reg) 1525 { 1526 /* 1527 * ADMtek bugs ignore address decode bits -- they only 1528 * support PHY at 1. 1529 */ 1530 if (phy != 1) { 1531 return (0xffff); 1532 } 1533 switch (AFE_MODEL(afep)) { 1534 case MODEL_COMET: 1535 return (afe_miireadcomet(afep, phy, reg)); 1536 case MODEL_CENTAUR: 1537 return (afe_miireadgeneral(afep, phy, reg)); 1538 } 1539 return (0xffff); 1540 } 1541 1542 uint16_t 1543 afe_miireadgeneral(afe_t *afep, int phy, int reg) 1544 { 1545 uint16_t value = 0; 1546 int i; 1547 1548 /* send the 32 bit preamble */ 1549 for (i = 0; i < 32; i++) { 1550 afe_miiwritebit(afep, 1); 1551 } 1552 1553 /* send the start code - 01b */ 1554 afe_miiwritebit(afep, 0); 1555 afe_miiwritebit(afep, 1); 1556 1557 /* send the opcode for read, - 10b */ 1558 afe_miiwritebit(afep, 1); 1559 afe_miiwritebit(afep, 0); 1560 1561 /* next we send the 5 bit phy address */ 1562 for (i = 0x10; i > 0; i >>= 1) { 1563 afe_miiwritebit(afep, (phy & i) ? 1 : 0); 1564 } 1565 1566 /* the 5 bit register address goes next */ 1567 for (i = 0x10; i > 0; i >>= 1) { 1568 afe_miiwritebit(afep, (reg & i) ? 1 : 0); 1569 } 1570 1571 /* turnaround - tristate followed by logic 0 */ 1572 afe_miitristate(afep); 1573 afe_miiwritebit(afep, 0); 1574 1575 /* read the 16 bit register value */ 1576 for (i = 0x8000; i > 0; i >>= 1) { 1577 value <<= 1; 1578 value |= afe_miireadbit(afep); 1579 } 1580 afe_miitristate(afep); 1581 return (value); 1582 } 1583 1584 uint16_t 1585 afe_miireadcomet(afe_t *afep, int phy, int reg) 1586 { 1587 if (phy != 1) { 1588 return (0xffff); 1589 } 1590 switch (reg) { 1591 case MII_CONTROL: 1592 reg = CSR_BMCR; 1593 break; 1594 case MII_STATUS: 1595 reg = CSR_BMSR; 1596 break; 1597 case MII_PHYIDH: 1598 reg = CSR_PHYIDR1; 1599 break; 1600 case MII_PHYIDL: 1601 reg = CSR_PHYIDR2; 1602 break; 1603 case MII_AN_ADVERT: 1604 reg = CSR_ANAR; 1605 break; 1606 case MII_AN_LPABLE: 1607 reg = CSR_ANLPAR; 1608 break; 1609 case MII_AN_EXPANSION: 1610 reg = CSR_ANER; 1611 break; 1612 default: 1613 return (0); 1614 } 1615 return (GETCSR16(afep, reg) & 0xFFFF); 1616 } 1617 1618 void 1619 afe_miiwrite(afe_t *afep, int phy, int reg, uint16_t val) 1620 { 1621 /* 1622 * ADMtek bugs ignore address decode bits -- they only 1623 * support PHY at 1. 1624 */ 1625 if (phy != 1) { 1626 return; 1627 } 1628 switch (AFE_MODEL(afep)) { 1629 case MODEL_COMET: 1630 afe_miiwritecomet(afep, phy, reg, val); 1631 break; 1632 case MODEL_CENTAUR: 1633 afe_miiwritegeneral(afep, phy, reg, val); 1634 break; 1635 } 1636 } 1637 1638 void 1639 afe_miiwritegeneral(afe_t *afep, int phy, int reg, uint16_t val) 1640 { 1641 int i; 1642 1643 /* send the 32 bit preamble */ 1644 for (i = 0; i < 32; i++) { 1645 afe_miiwritebit(afep, 1); 1646 } 1647 1648 /* send the start code - 01b */ 1649 afe_miiwritebit(afep, 0); 1650 afe_miiwritebit(afep, 1); 1651 1652 /* send the opcode for write, - 01b */ 1653 afe_miiwritebit(afep, 0); 1654 afe_miiwritebit(afep, 1); 1655 1656 /* next we send the 5 bit phy address */ 1657 for (i = 0x10; i > 0; i >>= 1) { 1658 afe_miiwritebit(afep, (phy & i) ? 1 : 0); 1659 } 1660 1661 /* the 5 bit register address goes next */ 1662 for (i = 0x10; i > 0; i >>= 1) { 1663 afe_miiwritebit(afep, (reg & i) ? 1 : 0); 1664 } 1665 1666 /* turnaround - tristate followed by logic 0 */ 1667 afe_miitristate(afep); 1668 afe_miiwritebit(afep, 0); 1669 1670 /* now write out our data (16 bits) */ 1671 for (i = 0x8000; i > 0; i >>= 1) { 1672 afe_miiwritebit(afep, (val & i) ? 1 : 0); 1673 } 1674 1675 /* idle mode */ 1676 afe_miitristate(afep); 1677 } 1678 1679 void 1680 afe_miiwritecomet(afe_t *afep, int phy, int reg, uint16_t val) 1681 { 1682 if (phy != 1) { 1683 return; 1684 } 1685 switch (reg) { 1686 case MII_CONTROL: 1687 reg = CSR_BMCR; 1688 break; 1689 case MII_STATUS: 1690 reg = CSR_BMSR; 1691 break; 1692 case MII_PHYIDH: 1693 reg = CSR_PHYIDR1; 1694 break; 1695 case MII_PHYIDL: 1696 reg = CSR_PHYIDR2; 1697 break; 1698 case MII_AN_ADVERT: 1699 reg = CSR_ANAR; 1700 break; 1701 case MII_AN_LPABLE: 1702 reg = CSR_ANLPAR; 1703 break; 1704 case MII_AN_EXPANSION: 1705 reg = CSR_ANER; 1706 break; 1707 default: 1708 return; 1709 } 1710 PUTCSR16(afep, reg, val); 1711 } 1712 1713 int 1714 afe_m_start(void *arg) 1715 { 1716 afe_t *afep = arg; 1717 1718 /* grab exclusive access to the card */ 1719 mutex_enter(&afep->afe_intrlock); 1720 mutex_enter(&afep->afe_xmtlock); 1721 1722 afe_startall(afep); 1723 afep->afe_flags |= AFE_RUNNING; 1724 1725 mutex_exit(&afep->afe_xmtlock); 1726 mutex_exit(&afep->afe_intrlock); 1727 return (0); 1728 } 1729 1730 void 1731 afe_m_stop(void *arg) 1732 { 1733 afe_t *afep = arg; 1734 1735 /* exclusive access to the hardware! */ 1736 mutex_enter(&afep->afe_intrlock); 1737 mutex_enter(&afep->afe_xmtlock); 1738 1739 afe_stopall(afep); 1740 afep->afe_flags &= ~AFE_RUNNING; 1741 1742 mutex_exit(&afep->afe_xmtlock); 1743 mutex_exit(&afep->afe_intrlock); 1744 } 1745 1746 void 1747 afe_startmac(afe_t *afep) 1748 { 1749 /* verify exclusive access to the card */ 1750 ASSERT(mutex_owned(&afep->afe_intrlock)); 1751 ASSERT(mutex_owned(&afep->afe_xmtlock)); 1752 1753 /* start the card */ 1754 SETBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE); 1755 1756 if (afep->afe_txavail != AFE_TXRING) 1757 PUTCSR(afep, CSR_TDR, 0); 1758 1759 /* tell the mac that we are ready to go! */ 1760 if (afep->afe_flags & AFE_RUNNING) 1761 mac_tx_update(afep->afe_mh); 1762 } 1763 1764 void 1765 afe_stopmac(afe_t *afep) 1766 { 1767 int i; 1768 1769 /* exclusive access to the hardware! */ 1770 ASSERT(mutex_owned(&afep->afe_intrlock)); 1771 ASSERT(mutex_owned(&afep->afe_xmtlock)); 1772 1773 CLRBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE); 1774 1775 /* 1776 * A 1518 byte frame at 10Mbps takes about 1.2 msec to drain. 1777 * We just add up to the nearest msec (2), which should be 1778 * plenty to complete. 1779 * 1780 * Note that some chips never seem to indicate the transition to 1781 * the stopped state properly. Experience shows that we can safely 1782 * proceed anyway, after waiting the requisite timeout. 1783 */ 1784 for (i = 2000; i != 0; i -= 10) { 1785 if ((GETCSR(afep, CSR_SR) & (SR_TX_STATE | SR_RX_STATE)) == 0) 1786 break; 1787 drv_usecwait(10); 1788 } 1789 1790 /* prevent an interrupt */ 1791 PUTCSR(afep, CSR_SR2, INT_RXSTOPPED | INT_TXSTOPPED); 1792 } 1793 1794 void 1795 afe_resetrings(afe_t *afep) 1796 { 1797 int i; 1798 1799 /* now we need to reset the pointers... */ 1800 PUTCSR(afep, CSR_RDB, 0); 1801 PUTCSR(afep, CSR_TDB, 0); 1802 1803 /* reset the descriptor ring pointers */ 1804 afep->afe_rxhead = 0; 1805 afep->afe_txreclaim = 0; 1806 afep->afe_txsend = 0; 1807 afep->afe_txavail = AFE_TXRING; 1808 1809 /* set up transmit descriptor ring */ 1810 for (i = 0; i < AFE_TXRING; i++) { 1811 afe_desc_t *tmdp = &afep->afe_txdescp[i]; 1812 unsigned control = 0; 1813 if (i == (AFE_TXRING - 1)) { 1814 control |= TXCTL_ENDRING; 1815 } 1816 PUTTXDESC(afep, tmdp->desc_status, 0); 1817 PUTTXDESC(afep, tmdp->desc_control, control); 1818 PUTTXDESC(afep, tmdp->desc_buffer1, 0); 1819 PUTTXDESC(afep, tmdp->desc_buffer2, 0); 1820 SYNCTXDESC(afep, i, DDI_DMA_SYNC_FORDEV); 1821 } 1822 PUTCSR(afep, CSR_TDB, afep->afe_txdesc_paddr); 1823 1824 /* make the receive buffers available */ 1825 for (i = 0; i < AFE_RXRING; i++) { 1826 afe_rxbuf_t *rxb = afep->afe_rxbufs[i]; 1827 afe_desc_t *rmdp = &afep->afe_rxdescp[i]; 1828 unsigned control; 1829 1830 control = AFE_BUFSZ & RXCTL_BUFLEN1; 1831 if (i == (AFE_RXRING - 1)) { 1832 control |= RXCTL_ENDRING; 1833 } 1834 PUTRXDESC(afep, rmdp->desc_buffer1, rxb->rxb_paddr); 1835 PUTRXDESC(afep, rmdp->desc_buffer2, 0); 1836 PUTRXDESC(afep, rmdp->desc_control, control); 1837 PUTRXDESC(afep, rmdp->desc_status, RXSTAT_OWN); 1838 SYNCRXDESC(afep, i, DDI_DMA_SYNC_FORDEV); 1839 } 1840 PUTCSR(afep, CSR_RDB, afep->afe_rxdesc_paddr); 1841 } 1842 1843 void 1844 afe_stopall(afe_t *afep) 1845 { 1846 afe_disableinterrupts(afep); 1847 1848 afe_stopmac(afep); 1849 1850 /* stop the phy */ 1851 afe_stopphy(afep); 1852 } 1853 1854 void 1855 afe_startall(afe_t *afep) 1856 { 1857 ASSERT(mutex_owned(&afep->afe_intrlock)); 1858 ASSERT(mutex_owned(&afep->afe_xmtlock)); 1859 1860 /* make sure interrupts are disabled to begin */ 1861 afe_disableinterrupts(afep); 1862 1863 /* initialize the chip */ 1864 (void) afe_initialize(afep); 1865 1866 /* now we can enable interrupts */ 1867 afe_enableinterrupts(afep); 1868 1869 /* start up the phy */ 1870 afe_startphy(afep); 1871 1872 /* start up the mac */ 1873 afe_startmac(afep); 1874 } 1875 1876 void 1877 afe_resetall(afe_t *afep) 1878 { 1879 afep->afe_resetting = B_TRUE; 1880 afe_stopall(afep); 1881 afep->afe_resetting = B_FALSE; 1882 afe_startall(afep); 1883 } 1884 1885 afe_txbuf_t * 1886 afe_alloctxbuf(afe_t *afep) 1887 { 1888 ddi_dma_cookie_t dmac; 1889 unsigned ncookies; 1890 afe_txbuf_t *txb; 1891 size_t len; 1892 1893 txb = kmem_zalloc(sizeof (*txb), KM_SLEEP); 1894 1895 if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_txattr, 1896 DDI_DMA_SLEEP, NULL, &txb->txb_dmah) != DDI_SUCCESS) { 1897 return (NULL); 1898 } 1899 1900 if (ddi_dma_mem_alloc(txb->txb_dmah, AFE_BUFSZ, &afe_bufattr, 1901 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &txb->txb_buf, &len, 1902 &txb->txb_acch) != DDI_SUCCESS) { 1903 return (NULL); 1904 } 1905 if (ddi_dma_addr_bind_handle(txb->txb_dmah, NULL, txb->txb_buf, 1906 len, DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 1907 &dmac, &ncookies) != DDI_DMA_MAPPED) { 1908 return (NULL); 1909 } 1910 txb->txb_paddr = dmac.dmac_address; 1911 1912 return (txb); 1913 } 1914 1915 void 1916 afe_destroytxbuf(afe_txbuf_t *txb) 1917 { 1918 if (txb != NULL) { 1919 if (txb->txb_paddr) 1920 (void) ddi_dma_unbind_handle(txb->txb_dmah); 1921 if (txb->txb_acch) 1922 ddi_dma_mem_free(&txb->txb_acch); 1923 if (txb->txb_dmah) 1924 ddi_dma_free_handle(&txb->txb_dmah); 1925 kmem_free(txb, sizeof (*txb)); 1926 } 1927 } 1928 1929 afe_rxbuf_t * 1930 afe_allocrxbuf(afe_t *afep) 1931 { 1932 afe_rxbuf_t *rxb; 1933 size_t len; 1934 unsigned ccnt; 1935 ddi_dma_cookie_t dmac; 1936 1937 rxb = kmem_zalloc(sizeof (*rxb), KM_SLEEP); 1938 1939 if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr, 1940 DDI_DMA_SLEEP, NULL, &rxb->rxb_dmah) != DDI_SUCCESS) { 1941 kmem_free(rxb, sizeof (*rxb)); 1942 return (NULL); 1943 } 1944 if (ddi_dma_mem_alloc(rxb->rxb_dmah, AFE_BUFSZ, &afe_bufattr, 1945 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &rxb->rxb_buf, &len, 1946 &rxb->rxb_acch) != DDI_SUCCESS) { 1947 ddi_dma_free_handle(&rxb->rxb_dmah); 1948 kmem_free(rxb, sizeof (*rxb)); 1949 return (NULL); 1950 } 1951 if (ddi_dma_addr_bind_handle(rxb->rxb_dmah, NULL, rxb->rxb_buf, len, 1952 DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &dmac, 1953 &ccnt) != DDI_DMA_MAPPED) { 1954 ddi_dma_mem_free(&rxb->rxb_acch); 1955 ddi_dma_free_handle(&rxb->rxb_dmah); 1956 kmem_free(rxb, sizeof (*rxb)); 1957 return (NULL); 1958 } 1959 rxb->rxb_paddr = dmac.dmac_address; 1960 1961 return (rxb); 1962 } 1963 1964 void 1965 afe_destroyrxbuf(afe_rxbuf_t *rxb) 1966 { 1967 if (rxb) { 1968 (void) ddi_dma_unbind_handle(rxb->rxb_dmah); 1969 ddi_dma_mem_free(&rxb->rxb_acch); 1970 ddi_dma_free_handle(&rxb->rxb_dmah); 1971 kmem_free(rxb, sizeof (*rxb)); 1972 } 1973 } 1974 1975 /* 1976 * Allocate receive resources. 1977 */ 1978 int 1979 afe_allocrxring(afe_t *afep) 1980 { 1981 int rval; 1982 int i; 1983 size_t size; 1984 size_t len; 1985 ddi_dma_cookie_t dmac; 1986 unsigned ncookies; 1987 caddr_t kaddr; 1988 1989 size = AFE_RXRING * sizeof (afe_desc_t); 1990 1991 rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr, 1992 DDI_DMA_SLEEP, NULL, &afep->afe_rxdesc_dmah); 1993 if (rval != DDI_SUCCESS) { 1994 afe_error(afep->afe_dip, 1995 "unable to allocate DMA handle for rx descriptors"); 1996 return (DDI_FAILURE); 1997 } 1998 1999 rval = ddi_dma_mem_alloc(afep->afe_rxdesc_dmah, size, &afe_devattr, 2000 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len, 2001 &afep->afe_rxdesc_acch); 2002 if (rval != DDI_SUCCESS) { 2003 afe_error(afep->afe_dip, 2004 "unable to allocate DMA memory for rx descriptors"); 2005 return (DDI_FAILURE); 2006 } 2007 2008 rval = ddi_dma_addr_bind_handle(afep->afe_rxdesc_dmah, NULL, kaddr, 2009 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 2010 &dmac, &ncookies); 2011 if (rval != DDI_DMA_MAPPED) { 2012 afe_error(afep->afe_dip, 2013 "unable to bind DMA for rx descriptors"); 2014 return (DDI_FAILURE); 2015 } 2016 2017 /* because of afe_dma_attr */ 2018 ASSERT(ncookies == 1); 2019 2020 /* we take the 32-bit physical address out of the cookie */ 2021 afep->afe_rxdesc_paddr = dmac.dmac_address; 2022 afep->afe_rxdescp = (void *)kaddr; 2023 2024 /* allocate buffer pointers (not the buffers themselves, yet) */ 2025 afep->afe_rxbufs = kmem_zalloc(AFE_RXRING * sizeof (afe_rxbuf_t *), 2026 KM_SLEEP); 2027 2028 /* now allocate rx buffers */ 2029 for (i = 0; i < AFE_RXRING; i++) { 2030 afe_rxbuf_t *rxb = afe_allocrxbuf(afep); 2031 if (rxb == NULL) 2032 return (DDI_FAILURE); 2033 afep->afe_rxbufs[i] = rxb; 2034 } 2035 2036 return (DDI_SUCCESS); 2037 } 2038 2039 /* 2040 * Allocate transmit resources. 2041 */ 2042 int 2043 afe_alloctxring(afe_t *afep) 2044 { 2045 int rval; 2046 int i; 2047 size_t size; 2048 size_t len; 2049 ddi_dma_cookie_t dmac; 2050 unsigned ncookies; 2051 caddr_t kaddr; 2052 2053 size = AFE_TXRING * sizeof (afe_desc_t); 2054 2055 rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr, 2056 DDI_DMA_SLEEP, NULL, &afep->afe_txdesc_dmah); 2057 if (rval != DDI_SUCCESS) { 2058 afe_error(afep->afe_dip, 2059 "unable to allocate DMA handle for tx descriptors"); 2060 return (DDI_FAILURE); 2061 } 2062 2063 rval = ddi_dma_mem_alloc(afep->afe_txdesc_dmah, size, &afe_devattr, 2064 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len, 2065 &afep->afe_txdesc_acch); 2066 if (rval != DDI_SUCCESS) { 2067 afe_error(afep->afe_dip, 2068 "unable to allocate DMA memory for tx descriptors"); 2069 return (DDI_FAILURE); 2070 } 2071 2072 rval = ddi_dma_addr_bind_handle(afep->afe_txdesc_dmah, NULL, kaddr, 2073 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 2074 &dmac, &ncookies); 2075 if (rval != DDI_DMA_MAPPED) { 2076 afe_error(afep->afe_dip, 2077 "unable to bind DMA for tx descriptors"); 2078 return (DDI_FAILURE); 2079 } 2080 2081 /* because of afe_dma_attr */ 2082 ASSERT(ncookies == 1); 2083 2084 /* we take the 32-bit physical address out of the cookie */ 2085 afep->afe_txdesc_paddr = dmac.dmac_address; 2086 afep->afe_txdescp = (void *)kaddr; 2087 2088 /* allocate buffer pointers (not the buffers themselves, yet) */ 2089 afep->afe_txbufs = kmem_zalloc(AFE_TXRING * sizeof (afe_txbuf_t *), 2090 KM_SLEEP); 2091 2092 /* now allocate tx buffers */ 2093 for (i = 0; i < AFE_TXRING; i++) { 2094 afe_txbuf_t *txb = afe_alloctxbuf(afep); 2095 if (txb == NULL) 2096 return (DDI_FAILURE); 2097 afep->afe_txbufs[i] = txb; 2098 } 2099 2100 return (DDI_SUCCESS); 2101 } 2102 2103 void 2104 afe_freerxring(afe_t *afep) 2105 { 2106 int i; 2107 2108 for (i = 0; i < AFE_RXRING; i++) { 2109 afe_destroyrxbuf(afep->afe_rxbufs[i]); 2110 } 2111 2112 if (afep->afe_rxbufs) { 2113 kmem_free(afep->afe_rxbufs, 2114 AFE_RXRING * sizeof (afe_rxbuf_t *)); 2115 } 2116 2117 if (afep->afe_rxdesc_paddr) 2118 (void) ddi_dma_unbind_handle(afep->afe_rxdesc_dmah); 2119 if (afep->afe_rxdesc_acch) 2120 ddi_dma_mem_free(&afep->afe_rxdesc_acch); 2121 if (afep->afe_rxdesc_dmah) 2122 ddi_dma_free_handle(&afep->afe_rxdesc_dmah); 2123 } 2124 2125 void 2126 afe_freetxring(afe_t *afep) 2127 { 2128 int i; 2129 2130 for (i = 0; i < AFE_TXRING; i++) { 2131 afe_destroytxbuf(afep->afe_txbufs[i]); 2132 } 2133 2134 if (afep->afe_txbufs) { 2135 kmem_free(afep->afe_txbufs, 2136 AFE_TXRING * sizeof (afe_txbuf_t *)); 2137 } 2138 if (afep->afe_txdesc_paddr) 2139 (void) ddi_dma_unbind_handle(afep->afe_txdesc_dmah); 2140 if (afep->afe_txdesc_acch) 2141 ddi_dma_mem_free(&afep->afe_txdesc_acch); 2142 if (afep->afe_txdesc_dmah) 2143 ddi_dma_free_handle(&afep->afe_txdesc_dmah); 2144 } 2145 2146 /* 2147 * Interrupt service routine. 2148 */ 2149 unsigned 2150 afe_intr(caddr_t arg) 2151 { 2152 afe_t *afep = (void *)arg; 2153 uint32_t status; 2154 mblk_t *mp = NULL; 2155 2156 mutex_enter(&afep->afe_intrlock); 2157 2158 if (afep->afe_flags & AFE_SUSPENDED) { 2159 /* we cannot receive interrupts! */ 2160 mutex_exit(&afep->afe_intrlock); 2161 return (DDI_INTR_UNCLAIMED); 2162 } 2163 2164 /* check interrupt status bits, did we interrupt? */ 2165 status = GETCSR(afep, CSR_SR2) & INT_ALL; 2166 2167 if (status == 0) { 2168 KIOIP->intrs[KSTAT_INTR_SPURIOUS]++; 2169 mutex_exit(&afep->afe_intrlock); 2170 return (DDI_INTR_UNCLAIMED); 2171 } 2172 /* ack the interrupt */ 2173 PUTCSR(afep, CSR_SR2, status); 2174 KIOIP->intrs[KSTAT_INTR_HARD]++; 2175 2176 if (!(afep->afe_flags & AFE_RUNNING)) { 2177 /* not running, don't touch anything */ 2178 mutex_exit(&afep->afe_intrlock); 2179 return (DDI_INTR_CLAIMED); 2180 } 2181 2182 if (status & (INT_RXOK|INT_RXNOBUF)) { 2183 /* receive packets */ 2184 mp = afe_receive(afep); 2185 if (status & INT_RXNOBUF) 2186 PUTCSR(afep, CSR_RDR, 0); /* wake up chip */ 2187 } 2188 2189 if (status & INT_TXOK) { 2190 /* transmit completed */ 2191 mutex_enter(&afep->afe_xmtlock); 2192 afe_reclaim(afep); 2193 mutex_exit(&afep->afe_xmtlock); 2194 } 2195 2196 if (status & (INT_LINKCHG|INT_TIMER)) { 2197 mutex_enter(&afep->afe_xmtlock); 2198 afe_checklink(afep); 2199 mutex_exit(&afep->afe_xmtlock); 2200 } 2201 2202 if (status & (INT_RXSTOPPED|INT_TXSTOPPED| 2203 INT_RXJABBER|INT_TXJABBER|INT_TXUNDERFLOW)) { 2204 2205 if (status & (INT_RXJABBER | INT_TXJABBER)) { 2206 afep->afe_jabber++; 2207 } 2208 DBG(DWARN, "resetting mac, status %x", status); 2209 mutex_enter(&afep->afe_xmtlock); 2210 afe_resetall(afep); 2211 mutex_exit(&afep->afe_xmtlock); 2212 } 2213 2214 if (status & INT_BUSERR) { 2215 switch (GETCSR(afep, CSR_SR) & SR_BERR_TYPE) { 2216 case SR_BERR_PARITY: 2217 afe_error(afep->afe_dip, "PCI parity error"); 2218 break; 2219 case SR_BERR_TARGET_ABORT: 2220 afe_error(afep->afe_dip, "PCI target abort"); 2221 break; 2222 case SR_BERR_MASTER_ABORT: 2223 afe_error(afep->afe_dip, "PCI master abort"); 2224 break; 2225 default: 2226 afe_error(afep->afe_dip, "Unknown PCI error"); 2227 break; 2228 } 2229 2230 /* reset the chip in an attempt to fix things */ 2231 mutex_enter(&afep->afe_xmtlock); 2232 afe_resetall(afep); 2233 mutex_exit(&afep->afe_xmtlock); 2234 } 2235 2236 mutex_exit(&afep->afe_intrlock); 2237 2238 /* 2239 * Send up packets. We do this outside of the intrlock. 2240 */ 2241 if (mp) { 2242 mac_rx(afep->afe_mh, NULL, mp); 2243 } 2244 2245 return (DDI_INTR_CLAIMED); 2246 } 2247 2248 void 2249 afe_enableinterrupts(afe_t *afep) 2250 { 2251 unsigned mask = INT_WANTED; 2252 2253 if (afep->afe_wantw) 2254 mask |= INT_TXOK; 2255 2256 PUTCSR(afep, CSR_IER2, mask); 2257 2258 if (AFE_MODEL(afep) == MODEL_COMET) { 2259 /* 2260 * On the Comet, this is the internal transceiver 2261 * interrupt. We program the Comet's built-in PHY to 2262 * enable certain interrupts. 2263 */ 2264 PUTCSR16(afep, CSR_XIE, XIE_LDE | XIE_ANCE); 2265 } 2266 } 2267 2268 void 2269 afe_disableinterrupts(afe_t *afep) 2270 { 2271 /* disable further interrupts */ 2272 PUTCSR(afep, CSR_IER2, INT_NONE); 2273 2274 /* clear any pending interrupts */ 2275 PUTCSR(afep, CSR_SR2, INT_ALL); 2276 } 2277 2278 boolean_t 2279 afe_send(afe_t *afep, mblk_t *mp) 2280 { 2281 size_t len; 2282 afe_txbuf_t *txb; 2283 afe_desc_t *tmd; 2284 uint32_t control; 2285 int txsend; 2286 2287 ASSERT(mutex_owned(&afep->afe_xmtlock)); 2288 ASSERT(mp != NULL); 2289 2290 len = msgsize(mp); 2291 if (len > ETHERVLANMTU) { 2292 DBG(DXMIT, "frame too long: %d", len); 2293 afep->afe_macxmt_errors++; 2294 freemsg(mp); 2295 return (B_TRUE); 2296 } 2297 2298 if (afep->afe_txavail < AFE_TXRECLAIM) 2299 afe_reclaim(afep); 2300 2301 if (afep->afe_txavail == 0) { 2302 /* no more tmds */ 2303 afep->afe_wantw = B_TRUE; 2304 /* enable TX interrupt */ 2305 afe_enableinterrupts(afep); 2306 return (B_FALSE); 2307 } 2308 2309 txsend = afep->afe_txsend; 2310 2311 /* 2312 * For simplicity, we just do a copy into a preallocated 2313 * DMA buffer. 2314 */ 2315 2316 txb = afep->afe_txbufs[txsend]; 2317 mcopymsg(mp, txb->txb_buf); /* frees mp! */ 2318 2319 /* 2320 * Statistics. 2321 */ 2322 afep->afe_opackets++; 2323 afep->afe_obytes += len; 2324 if (txb->txb_buf[0] & 0x1) { 2325 if (bcmp(txb->txb_buf, afe_broadcast, ETHERADDRL) != 0) 2326 afep->afe_multixmt++; 2327 else 2328 afep->afe_brdcstxmt++; 2329 } 2330 2331 /* note len is already known to be a small unsigned */ 2332 control = len | TXCTL_FIRST | TXCTL_LAST | TXCTL_INTCMPLTE; 2333 2334 if (txsend == (AFE_TXRING - 1)) 2335 control |= TXCTL_ENDRING; 2336 2337 tmd = &afep->afe_txdescp[txsend]; 2338 2339 SYNCTXBUF(txb, len, DDI_DMA_SYNC_FORDEV); 2340 PUTTXDESC(afep, tmd->desc_control, control); 2341 PUTTXDESC(afep, tmd->desc_buffer1, txb->txb_paddr); 2342 PUTTXDESC(afep, tmd->desc_buffer2, 0); 2343 PUTTXDESC(afep, tmd->desc_status, TXSTAT_OWN); 2344 /* sync the descriptor out to the device */ 2345 SYNCTXDESC(afep, txsend, DDI_DMA_SYNC_FORDEV); 2346 2347 /* 2348 * Note the new values of txavail and txsend. 2349 */ 2350 afep->afe_txavail--; 2351 afep->afe_txsend = (txsend + 1) % AFE_TXRING; 2352 2353 /* 2354 * It should never, ever take more than 5 seconds to drain 2355 * the ring. If it happens, then we are stuck! 2356 */ 2357 afep->afe_txstall_time = gethrtime() + (5 * 1000000000ULL); 2358 2359 /* 2360 * wake up the chip ... inside the lock to protect against DR suspend, 2361 * etc. 2362 */ 2363 PUTCSR(afep, CSR_TDR, 0); 2364 2365 return (B_TRUE); 2366 } 2367 2368 /* 2369 * Reclaim buffers that have completed transmission. 2370 */ 2371 void 2372 afe_reclaim(afe_t *afep) 2373 { 2374 afe_desc_t *tmdp; 2375 2376 while (afep->afe_txavail != AFE_TXRING) { 2377 uint32_t status; 2378 uint32_t control; 2379 int index = afep->afe_txreclaim; 2380 2381 tmdp = &afep->afe_txdescp[index]; 2382 2383 /* sync it before we read it */ 2384 SYNCTXDESC(afep, index, DDI_DMA_SYNC_FORKERNEL); 2385 2386 control = GETTXDESC(afep, tmdp->desc_control); 2387 status = GETTXDESC(afep, tmdp->desc_status); 2388 2389 if (status & TXSTAT_OWN) { 2390 /* chip is still working on it, we're done */ 2391 break; 2392 } 2393 2394 afep->afe_txavail++; 2395 afep->afe_txreclaim = (index + 1) % AFE_TXRING; 2396 2397 /* in the most common successful case, all bits are clear */ 2398 if (status == 0) 2399 continue; 2400 2401 if ((control & TXCTL_LAST) == 0) 2402 continue; 2403 2404 if (status & TXSTAT_TXERR) { 2405 afep->afe_errxmt++; 2406 2407 if (status & TXSTAT_JABBER) { 2408 /* transmit jabber timeout */ 2409 afep->afe_macxmt_errors++; 2410 } 2411 if (status & 2412 (TXSTAT_CARRLOST | TXSTAT_NOCARR)) { 2413 afep->afe_carrier_errors++; 2414 } 2415 if (status & TXSTAT_UFLOW) { 2416 afep->afe_underflow++; 2417 } 2418 if (status & TXSTAT_LATECOL) { 2419 afep->afe_tx_late_collisions++; 2420 } 2421 if (status & TXSTAT_EXCOLL) { 2422 afep->afe_ex_collisions++; 2423 afep->afe_collisions += 16; 2424 } 2425 } 2426 2427 if (status & TXSTAT_DEFER) { 2428 afep->afe_defer_xmts++; 2429 } 2430 2431 /* collision counting */ 2432 if (TXCOLLCNT(status) == 1) { 2433 afep->afe_collisions++; 2434 afep->afe_first_collisions++; 2435 } else if (TXCOLLCNT(status)) { 2436 afep->afe_collisions += TXCOLLCNT(status); 2437 afep->afe_multi_collisions += TXCOLLCNT(status); 2438 } 2439 } 2440 2441 if (afep->afe_txavail >= AFE_TXRESCHED) { 2442 if (afep->afe_wantw) { 2443 /* 2444 * we were able to reclaim some packets, so 2445 * disable tx interrupts 2446 */ 2447 afep->afe_wantw = B_FALSE; 2448 afe_enableinterrupts(afep); 2449 mac_tx_update(afep->afe_mh); 2450 } 2451 } 2452 } 2453 2454 mblk_t * 2455 afe_receive(afe_t *afep) 2456 { 2457 unsigned len; 2458 afe_rxbuf_t *rxb; 2459 afe_desc_t *rmd; 2460 uint32_t status; 2461 mblk_t *mpchain, **mpp, *mp; 2462 int head, cnt; 2463 2464 mpchain = NULL; 2465 mpp = &mpchain; 2466 head = afep->afe_rxhead; 2467 2468 /* limit the number of packets we process to a half ring size */ 2469 for (cnt = 0; cnt < AFE_RXRING / 2; cnt++) { 2470 2471 DBG(DRECV, "receive at index %d", head); 2472 2473 rmd = &afep->afe_rxdescp[head]; 2474 rxb = afep->afe_rxbufs[head]; 2475 2476 SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORKERNEL); 2477 status = GETRXDESC(afep, rmd->desc_status); 2478 if (status & RXSTAT_OWN) { 2479 /* chip is still chewing on it */ 2480 break; 2481 } 2482 2483 /* discard the ethernet frame checksum */ 2484 len = RXLENGTH(status) - ETHERFCSL; 2485 2486 DBG(DRECV, "recv length %d, status %x", len, status); 2487 2488 if ((status & (RXSTAT_ERRS | RXSTAT_FIRST | RXSTAT_LAST)) != 2489 (RXSTAT_FIRST | RXSTAT_LAST)) { 2490 2491 afep->afe_errrcv++; 2492 2493 /* 2494 * Abnormal status bits detected, analyze further. 2495 */ 2496 if ((status & (RXSTAT_LAST|RXSTAT_FIRST)) != 2497 (RXSTAT_LAST|RXSTAT_FIRST)) { 2498 DBG(DRECV, "rx packet overspill"); 2499 if (status & RXSTAT_FIRST) { 2500 afep->afe_toolong_errors++; 2501 } 2502 } else if (status & RXSTAT_DESCERR) { 2503 afep->afe_macrcv_errors++; 2504 2505 } else if (status & RXSTAT_RUNT) { 2506 afep->afe_runt++; 2507 2508 } else if (status & RXSTAT_COLLSEEN) { 2509 /* this should really be rx_late_collisions */ 2510 afep->afe_macrcv_errors++; 2511 2512 } else if (status & RXSTAT_DRIBBLE) { 2513 afep->afe_align_errors++; 2514 2515 } else if (status & RXSTAT_CRCERR) { 2516 afep->afe_fcs_errors++; 2517 2518 } else if (status & RXSTAT_OFLOW) { 2519 afep->afe_overflow++; 2520 } 2521 } 2522 2523 else if (len > ETHERVLANMTU) { 2524 afep->afe_errrcv++; 2525 afep->afe_toolong_errors++; 2526 } 2527 2528 /* 2529 * At this point, the chip thinks the packet is OK. 2530 */ 2531 else { 2532 mp = allocb(len + AFE_HEADROOM, 0); 2533 if (mp == NULL) { 2534 afep->afe_errrcv++; 2535 afep->afe_norcvbuf++; 2536 goto skip; 2537 } 2538 2539 /* sync the buffer before we look at it */ 2540 SYNCRXBUF(rxb, len, DDI_DMA_SYNC_FORKERNEL); 2541 mp->b_rptr += AFE_HEADROOM; 2542 mp->b_wptr = mp->b_rptr + len; 2543 bcopy((char *)rxb->rxb_buf, mp->b_rptr, len); 2544 2545 afep->afe_ipackets++; 2546 afep->afe_rbytes += len; 2547 if (status & RXSTAT_GROUP) { 2548 if (bcmp(mp->b_rptr, afe_broadcast, 2549 ETHERADDRL) == 0) 2550 afep->afe_brdcstrcv++; 2551 else 2552 afep->afe_multircv++; 2553 } 2554 *mpp = mp; 2555 mpp = &mp->b_next; 2556 } 2557 2558 skip: 2559 /* return ring entry to the hardware */ 2560 PUTRXDESC(afep, rmd->desc_status, RXSTAT_OWN); 2561 SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORDEV); 2562 2563 /* advance to next RMD */ 2564 head = (head + 1) % AFE_RXRING; 2565 } 2566 2567 afep->afe_rxhead = head; 2568 2569 return (mpchain); 2570 } 2571 2572 int 2573 afe_getmiibit(afe_t *afep, uint16_t reg, uint16_t bit) 2574 { 2575 unsigned val; 2576 2577 mutex_enter(&afep->afe_xmtlock); 2578 if (afep->afe_flags & AFE_SUSPENDED) { 2579 mutex_exit(&afep->afe_xmtlock); 2580 /* device is suspended */ 2581 return (0); 2582 } 2583 val = afe_miiread(afep, afep->afe_phyaddr, reg); 2584 mutex_exit(&afep->afe_xmtlock); 2585 2586 return (val & bit ? 1 : 0); 2587 } 2588 #define GETMIIBIT(reg, bit) afe_getmiibit(afep, reg, bit) 2589 2590 int 2591 afe_m_stat(void *arg, uint_t stat, uint64_t *val) 2592 { 2593 afe_t *afep = arg; 2594 2595 mutex_enter(&afep->afe_xmtlock); 2596 if ((afep->afe_flags & (AFE_RUNNING|AFE_SUSPENDED)) == AFE_RUNNING) 2597 afe_reclaim(afep); 2598 mutex_exit(&afep->afe_xmtlock); 2599 2600 switch (stat) { 2601 case MAC_STAT_IFSPEED: 2602 *val = afep->afe_ifspeed; 2603 break; 2604 2605 case MAC_STAT_MULTIRCV: 2606 *val = afep->afe_multircv; 2607 break; 2608 2609 case MAC_STAT_BRDCSTRCV: 2610 *val = afep->afe_brdcstrcv; 2611 break; 2612 2613 case MAC_STAT_MULTIXMT: 2614 *val = afep->afe_multixmt; 2615 break; 2616 2617 case MAC_STAT_BRDCSTXMT: 2618 *val = afep->afe_brdcstxmt; 2619 break; 2620 2621 case MAC_STAT_IPACKETS: 2622 *val = afep->afe_ipackets; 2623 break; 2624 2625 case MAC_STAT_RBYTES: 2626 *val = afep->afe_rbytes; 2627 break; 2628 2629 case MAC_STAT_OPACKETS: 2630 *val = afep->afe_opackets; 2631 break; 2632 2633 case MAC_STAT_OBYTES: 2634 *val = afep->afe_obytes; 2635 break; 2636 2637 case MAC_STAT_NORCVBUF: 2638 *val = afep->afe_norcvbuf; 2639 break; 2640 2641 case MAC_STAT_NOXMTBUF: 2642 *val = 0; 2643 break; 2644 2645 case MAC_STAT_COLLISIONS: 2646 *val = afep->afe_collisions; 2647 break; 2648 2649 case MAC_STAT_IERRORS: 2650 *val = afep->afe_errrcv; 2651 break; 2652 2653 case MAC_STAT_OERRORS: 2654 *val = afep->afe_errxmt; 2655 break; 2656 2657 case ETHER_STAT_LINK_DUPLEX: 2658 *val = afep->afe_duplex; 2659 break; 2660 2661 case ETHER_STAT_ALIGN_ERRORS: 2662 *val = afep->afe_align_errors; 2663 break; 2664 2665 case ETHER_STAT_FCS_ERRORS: 2666 *val = afep->afe_fcs_errors; 2667 break; 2668 2669 case ETHER_STAT_SQE_ERRORS: 2670 *val = afep->afe_sqe_errors; 2671 break; 2672 2673 case ETHER_STAT_DEFER_XMTS: 2674 *val = afep->afe_defer_xmts; 2675 break; 2676 2677 case ETHER_STAT_FIRST_COLLISIONS: 2678 *val = afep->afe_first_collisions; 2679 break; 2680 2681 case ETHER_STAT_MULTI_COLLISIONS: 2682 *val = afep->afe_multi_collisions; 2683 break; 2684 2685 case ETHER_STAT_TX_LATE_COLLISIONS: 2686 *val = afep->afe_tx_late_collisions; 2687 break; 2688 2689 case ETHER_STAT_EX_COLLISIONS: 2690 *val = afep->afe_ex_collisions; 2691 break; 2692 2693 case ETHER_STAT_MACXMT_ERRORS: 2694 *val = afep->afe_macxmt_errors; 2695 break; 2696 2697 case ETHER_STAT_CARRIER_ERRORS: 2698 *val = afep->afe_carrier_errors; 2699 break; 2700 2701 case ETHER_STAT_TOOLONG_ERRORS: 2702 *val = afep->afe_toolong_errors; 2703 break; 2704 2705 case ETHER_STAT_MACRCV_ERRORS: 2706 *val = afep->afe_macrcv_errors; 2707 break; 2708 2709 case MAC_STAT_OVERFLOWS: 2710 *val = afep->afe_overflow; 2711 break; 2712 2713 case MAC_STAT_UNDERFLOWS: 2714 *val = afep->afe_underflow; 2715 break; 2716 2717 case ETHER_STAT_TOOSHORT_ERRORS: 2718 *val = afep->afe_runt; 2719 break; 2720 2721 case ETHER_STAT_JABBER_ERRORS: 2722 *val = afep->afe_jabber; 2723 break; 2724 2725 case ETHER_STAT_CAP_100T4: 2726 *val = afep->afe_cap_100T4; 2727 break; 2728 2729 case ETHER_STAT_CAP_100FDX: 2730 *val = afep->afe_cap_100fdx; 2731 break; 2732 2733 case ETHER_STAT_CAP_100HDX: 2734 *val = afep->afe_cap_100hdx; 2735 break; 2736 2737 case ETHER_STAT_CAP_10FDX: 2738 *val = afep->afe_cap_10fdx; 2739 break; 2740 2741 case ETHER_STAT_CAP_10HDX: 2742 *val = afep->afe_cap_10hdx; 2743 break; 2744 2745 case ETHER_STAT_CAP_AUTONEG: 2746 *val = afep->afe_cap_aneg; 2747 break; 2748 2749 case ETHER_STAT_LINK_AUTONEG: 2750 *val = ((afep->afe_adv_aneg != 0) && 2751 (GETMIIBIT(MII_AN_LPABLE, MII_AN_EXP_LPCANAN) != 0)); 2752 break; 2753 2754 case ETHER_STAT_ADV_CAP_100T4: 2755 *val = afep->afe_adv_100T4; 2756 break; 2757 2758 case ETHER_STAT_ADV_CAP_100FDX: 2759 *val = afep->afe_adv_100fdx; 2760 break; 2761 2762 case ETHER_STAT_ADV_CAP_100HDX: 2763 *val = afep->afe_adv_100hdx; 2764 break; 2765 2766 case ETHER_STAT_ADV_CAP_10FDX: 2767 *val = afep->afe_adv_10fdx; 2768 break; 2769 2770 case ETHER_STAT_ADV_CAP_10HDX: 2771 *val = afep->afe_adv_10hdx; 2772 break; 2773 2774 case ETHER_STAT_ADV_CAP_AUTONEG: 2775 *val = afep->afe_adv_aneg; 2776 break; 2777 2778 case ETHER_STAT_LP_CAP_100T4: 2779 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_100BASE_T4); 2780 break; 2781 2782 case ETHER_STAT_LP_CAP_100FDX: 2783 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_100BASE_TX_FD); 2784 break; 2785 2786 case ETHER_STAT_LP_CAP_100HDX: 2787 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_100BASE_TX); 2788 break; 2789 2790 case ETHER_STAT_LP_CAP_10FDX: 2791 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_10BASE_T_FD); 2792 break; 2793 2794 case ETHER_STAT_LP_CAP_10HDX: 2795 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_10BASE_T); 2796 break; 2797 2798 case ETHER_STAT_LP_CAP_AUTONEG: 2799 *val = GETMIIBIT(MII_AN_EXPANSION, MII_AN_EXP_LPCANAN); 2800 break; 2801 2802 case ETHER_STAT_XCVR_ADDR: 2803 *val = afep->afe_phyaddr; 2804 break; 2805 2806 case ETHER_STAT_XCVR_ID: 2807 *val = afep->afe_phyid; 2808 break; 2809 2810 default: 2811 return (ENOTSUP); 2812 } 2813 return (0); 2814 } 2815 2816 /*ARGSUSED*/ 2817 int 2818 afe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags, 2819 uint_t sz, void *val) 2820 { 2821 afe_t *afep = arg; 2822 int err = 0; 2823 boolean_t dfl = flags & MAC_PROP_DEFAULT; 2824 2825 if (sz == 0) 2826 return (EINVAL); 2827 2828 switch (num) { 2829 case MAC_PROP_DUPLEX: 2830 if (sz >= sizeof (link_duplex_t)) { 2831 bcopy(&afep->afe_duplex, val, sizeof (link_duplex_t)); 2832 } else { 2833 err = EINVAL; 2834 } 2835 break; 2836 2837 case MAC_PROP_SPEED: 2838 if (sz >= sizeof (uint64_t)) { 2839 bcopy(&afep->afe_ifspeed, val, sizeof (uint64_t)); 2840 } else { 2841 err = EINVAL; 2842 } 2843 break; 2844 2845 case MAC_PROP_AUTONEG: 2846 *(uint8_t *)val = 2847 dfl ? afep->afe_cap_aneg : afep->afe_adv_aneg; 2848 break; 2849 2850 #if 0 2851 case MAC_PROP_ADV_1000FDX_CAP: 2852 case MAC_PROP_EN_1000FDX_CAP: 2853 case MAC_PROP_ADV_1000HDX_CAP: 2854 case MAC_PROP_EN_1000HDX_CAP: 2855 /* We don't support gigabit! */ 2856 *(uint8_t *)val = 0; 2857 break; 2858 #endif 2859 2860 case MAC_PROP_ADV_100FDX_CAP: 2861 case MAC_PROP_EN_100FDX_CAP: 2862 *(uint8_t *)val = 2863 dfl ? afep->afe_cap_100fdx : afep->afe_adv_100fdx; 2864 break; 2865 2866 case MAC_PROP_ADV_100HDX_CAP: 2867 case MAC_PROP_EN_100HDX_CAP: 2868 *(uint8_t *)val = 2869 dfl ? afep->afe_cap_100hdx : afep->afe_adv_100hdx; 2870 break; 2871 2872 case MAC_PROP_ADV_10FDX_CAP: 2873 case MAC_PROP_EN_10FDX_CAP: 2874 *(uint8_t *)val = 2875 dfl ? afep->afe_cap_10fdx : afep->afe_adv_10fdx; 2876 break; 2877 2878 case MAC_PROP_ADV_10HDX_CAP: 2879 case MAC_PROP_EN_10HDX_CAP: 2880 *(uint8_t *)val = 2881 dfl ? afep->afe_cap_10hdx : afep->afe_adv_10hdx; 2882 break; 2883 2884 case MAC_PROP_ADV_100T4_CAP: 2885 case MAC_PROP_EN_100T4_CAP: 2886 *(uint8_t *)val = 2887 dfl ? afep->afe_cap_100T4 : afep->afe_adv_100T4; 2888 break; 2889 2890 default: 2891 err = ENOTSUP; 2892 } 2893 2894 return (err); 2895 } 2896 2897 /*ARGSUSED*/ 2898 int 2899 afe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz, 2900 const void *val) 2901 { 2902 afe_t *afep = arg; 2903 uint8_t *advp; 2904 uint8_t *capp; 2905 2906 switch (num) { 2907 case MAC_PROP_EN_100FDX_CAP: 2908 advp = &afep->afe_adv_100fdx; 2909 capp = &afep->afe_cap_100fdx; 2910 break; 2911 2912 case MAC_PROP_EN_100HDX_CAP: 2913 advp = &afep->afe_adv_100hdx; 2914 capp = &afep->afe_cap_100hdx; 2915 break; 2916 2917 case MAC_PROP_EN_10FDX_CAP: 2918 advp = &afep->afe_adv_10fdx; 2919 capp = &afep->afe_cap_10fdx; 2920 break; 2921 2922 case MAC_PROP_EN_10HDX_CAP: 2923 advp = &afep->afe_adv_10hdx; 2924 capp = &afep->afe_cap_10hdx; 2925 break; 2926 2927 case MAC_PROP_EN_100T4_CAP: 2928 advp = &afep->afe_adv_100T4; 2929 capp = &afep->afe_cap_100T4; 2930 break; 2931 2932 case MAC_PROP_AUTONEG: 2933 advp = &afep->afe_adv_aneg; 2934 capp = &afep->afe_cap_aneg; 2935 break; 2936 2937 default: 2938 return (ENOTSUP); 2939 } 2940 2941 if (*capp == 0) /* ensure phy can support value */ 2942 return (ENOTSUP); 2943 2944 mutex_enter(&afep->afe_intrlock); 2945 mutex_enter(&afep->afe_xmtlock); 2946 2947 if (*advp != *(const uint8_t *)val) { 2948 *advp = *(const uint8_t *)val; 2949 2950 if ((afep->afe_flags & (AFE_RUNNING|AFE_SUSPENDED)) == 2951 AFE_RUNNING) { 2952 /* 2953 * This re-initializes the phy, but it also 2954 * restarts transmit and receive rings. 2955 * Needless to say, changing the link 2956 * parameters is destructive to traffic in 2957 * progress. 2958 */ 2959 afe_resetall(afep); 2960 } 2961 } 2962 mutex_exit(&afep->afe_xmtlock); 2963 mutex_exit(&afep->afe_intrlock); 2964 2965 return (0); 2966 } 2967 2968 /* 2969 * Debugging and error reporting. 2970 */ 2971 void 2972 afe_error(dev_info_t *dip, char *fmt, ...) 2973 { 2974 va_list ap; 2975 char buf[256]; 2976 2977 va_start(ap, fmt); 2978 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 2979 va_end(ap); 2980 2981 if (dip) { 2982 cmn_err(CE_WARN, "%s%d: %s", 2983 ddi_driver_name(dip), ddi_get_instance(dip), buf); 2984 } else { 2985 cmn_err(CE_WARN, "afe: %s", buf); 2986 } 2987 } 2988 2989 #ifdef DEBUG 2990 2991 void 2992 afe_dprintf(afe_t *afep, const char *func, int level, char *fmt, ...) 2993 { 2994 va_list ap; 2995 2996 va_start(ap, fmt); 2997 if (afe_debug & level) { 2998 char tag[64]; 2999 char buf[256]; 3000 3001 if (afep && afep->afe_dip) { 3002 (void) snprintf(tag, sizeof (tag), "%s%d", 3003 ddi_driver_name(afep->afe_dip), 3004 ddi_get_instance(afep->afe_dip)); 3005 } else { 3006 (void) snprintf(tag, sizeof (tag), "afe"); 3007 } 3008 3009 (void) snprintf(buf, sizeof (buf), "%s: %s: %s\n", 3010 tag, func, fmt); 3011 3012 vcmn_err(CE_CONT, buf, ap); 3013 } 3014 va_end(ap); 3015 } 3016 3017 #endif 3018