1 /* 2 * Solaris driver for ethernet cards based on the ADMtek Centaur 3 * 4 * Copyright (c) 2007 by Garrett D'Amore <garrett@damore.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the author nor the names of any co-contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 33 * Use is subject to license terms. 34 */ 35 36 #pragma ident "%Z%%M% %I% %E% SMI" 37 38 #include <sys/varargs.h> 39 #include <sys/types.h> 40 #include <sys/modctl.h> 41 #include <sys/conf.h> 42 #include <sys/devops.h> 43 #include <sys/stream.h> 44 #include <sys/strsun.h> 45 #include <sys/cmn_err.h> 46 #include <sys/ethernet.h> 47 #include <sys/kmem.h> 48 #include <sys/time.h> 49 #include <sys/crc32.h> 50 #include <sys/miiregs.h> 51 #include <sys/mac.h> 52 #include <sys/mac_ether.h> 53 #include <sys/ddi.h> 54 #include <sys/sunddi.h> 55 #include <sys/vlan.h> 56 57 #include "afe.h" 58 #include "afeimpl.h" 59 60 /* 61 * Driver globals. 62 */ 63 64 /* patchable debug flag ... must not be static! */ 65 #ifdef DEBUG 66 unsigned afe_debug = DWARN; 67 #endif 68 69 /* table of supported devices */ 70 static afe_card_t afe_cards[] = { 71 72 /* 73 * ADMtek Centaur and Comet 74 */ 75 { 0x1317, 0x0981, "ADMtek AL981", MODEL_COMET }, 76 { 0x1317, 0x0985, "ADMtek AN983", MODEL_CENTAUR }, 77 { 0x1317, 0x1985, "ADMtek AN985", MODEL_CENTAUR }, 78 { 0x1317, 0x9511, "ADMtek ADM9511", MODEL_CENTAUR }, 79 { 0x1317, 0x9513, "ADMtek ADM9513", MODEL_CENTAUR }, 80 /* 81 * Accton just relabels other companies' controllers 82 */ 83 { 0x1113, 0x1216, "Accton EN5251", MODEL_CENTAUR }, 84 /* 85 * Models listed here. 86 */ 87 { 0x10b7, 0x9300, "3Com 3CSOHO100B-TX", MODEL_CENTAUR }, 88 { 0x1113, 0xec02, "SMC SMC1244TX", MODEL_CENTAUR }, 89 { 0x10b8, 0x1255, "SMC SMC1255TX", MODEL_CENTAUR }, 90 { 0x111a, 0x1020, "Siemens SpeedStream PCI 10/100", MODEL_CENTAUR }, 91 { 0x1113, 0x1207, "Accton EN1207F", MODEL_CENTAUR }, 92 { 0x1113, 0x2242, "Accton EN2242", MODEL_CENTAUR }, 93 { 0x1113, 0x2220, "Accton EN2220", MODEL_CENTAUR }, 94 { 0x1113, 0x9216, "3M VOL-N100VF+TX", MODEL_CENTAUR }, 95 { 0x1317, 0x0574, "Linksys LNE100TX", MODEL_CENTAUR }, 96 { 0x1317, 0x0570, "Linksys NC100", MODEL_CENTAUR }, 97 { 0x1385, 0x511a, "Netgear FA511", MODEL_CENTAUR }, 98 { 0x13d1, 0xab02, "AboCom FE2500", MODEL_CENTAUR }, 99 { 0x13d1, 0xab03, "AboCom PCM200", MODEL_CENTAUR }, 100 { 0x13d1, 0xab08, "AboCom FE2500MX", MODEL_CENTAUR }, 101 { 0x1414, 0x0001, "Microsoft MN-120", MODEL_CENTAUR }, 102 { 0x16ec, 0x00ed, "U.S. Robotics USR997900", MODEL_CENTAUR }, 103 { 0x1734, 0x100c, "Fujitsu-Siemens D1961", MODEL_CENTAUR }, 104 { 0x1737, 0xab08, "Linksys PCMPC200", MODEL_CENTAUR }, 105 { 0x1737, 0xab09, "Linksys PCM200", MODEL_CENTAUR }, 106 { 0x17b3, 0xab08, "Hawking PN672TX", MODEL_CENTAUR }, 107 }; 108 109 #define ETHERVLANMTU (ETHERMAX + 4) 110 111 /* 112 * Function prototypes 113 */ 114 static int afe_attach(dev_info_t *, ddi_attach_cmd_t); 115 static int afe_detach(dev_info_t *, ddi_detach_cmd_t); 116 static int afe_resume(dev_info_t *); 117 static int afe_m_unicst(void *, const uint8_t *); 118 static int afe_m_multicst(void *, boolean_t, const uint8_t *); 119 static int afe_m_promisc(void *, boolean_t); 120 static mblk_t *afe_m_tx(void *, mblk_t *); 121 static int afe_m_stat(void *, uint_t, uint64_t *); 122 static int afe_m_start(void *); 123 static void afe_m_stop(void *); 124 static int afe_m_getprop(void *, const char *, mac_prop_id_t, uint_t, 125 uint_t, void *); 126 static int afe_m_setprop(void *, const char *, mac_prop_id_t, uint_t, 127 const void *); 128 static unsigned afe_intr(caddr_t); 129 static void afe_startmac(afe_t *); 130 static void afe_stopmac(afe_t *); 131 static void afe_resetrings(afe_t *); 132 static boolean_t afe_initialize(afe_t *); 133 static void afe_startall(afe_t *); 134 static void afe_stopall(afe_t *); 135 static void afe_resetall(afe_t *); 136 static afe_txbuf_t *afe_alloctxbuf(afe_t *); 137 static void afe_destroytxbuf(afe_txbuf_t *); 138 static afe_rxbuf_t *afe_allocrxbuf(afe_t *); 139 static void afe_destroyrxbuf(afe_rxbuf_t *); 140 static boolean_t afe_send(afe_t *, mblk_t *); 141 static int afe_allocrxring(afe_t *); 142 static void afe_freerxring(afe_t *); 143 static int afe_alloctxring(afe_t *); 144 static void afe_freetxring(afe_t *); 145 static void afe_error(dev_info_t *, char *, ...); 146 static void afe_setrxfilt(afe_t *); 147 static uint8_t afe_sromwidth(afe_t *); 148 static uint16_t afe_readsromword(afe_t *, unsigned); 149 static void afe_readsrom(afe_t *, unsigned, unsigned, char *); 150 static void afe_getfactaddr(afe_t *, uchar_t *); 151 static uint8_t afe_miireadbit(afe_t *); 152 static void afe_miiwritebit(afe_t *, uint8_t); 153 static void afe_miitristate(afe_t *); 154 static uint16_t afe_miiread(afe_t *, int, int); 155 static void afe_miiwrite(afe_t *, int, int, uint16_t); 156 static uint16_t afe_miireadgeneral(afe_t *, int, int); 157 static void afe_miiwritegeneral(afe_t *, int, int, uint16_t); 158 static uint16_t afe_miireadcomet(afe_t *, int, int); 159 static void afe_miiwritecomet(afe_t *, int, int, uint16_t); 160 static int afe_getmiibit(afe_t *, uint16_t, uint16_t); 161 static void afe_startphy(afe_t *); 162 static void afe_stopphy(afe_t *); 163 static void afe_reportlink(afe_t *); 164 static void afe_checklink(afe_t *); 165 static void afe_checklinkcomet(afe_t *); 166 static void afe_checklinkcentaur(afe_t *); 167 static void afe_checklinkmii(afe_t *); 168 static void afe_disableinterrupts(afe_t *); 169 static void afe_enableinterrupts(afe_t *); 170 static void afe_reclaim(afe_t *); 171 static mblk_t *afe_receive(afe_t *); 172 173 #ifdef DEBUG 174 static void afe_dprintf(afe_t *, const char *, int, char *, ...); 175 #endif 176 177 #define KIOIP KSTAT_INTR_PTR(afep->afe_intrstat) 178 179 static mac_callbacks_t afe_m_callbacks = { 180 MC_SETPROP | MC_GETPROP, 181 afe_m_stat, 182 afe_m_start, 183 afe_m_stop, 184 afe_m_promisc, 185 afe_m_multicst, 186 afe_m_unicst, 187 afe_m_tx, 188 NULL, /* mc_resources */ 189 NULL, /* mc_ioctl */ 190 NULL, /* mc_getcapab */ 191 NULL, /* mc_open */ 192 NULL, /* mc_close */ 193 afe_m_setprop, 194 afe_m_getprop, 195 }; 196 197 198 /* 199 * Stream information 200 */ 201 DDI_DEFINE_STREAM_OPS(afe_devops, nulldev, nulldev, afe_attach, afe_detach, 202 nodev, NULL, D_MP, NULL); 203 204 /* 205 * Module linkage information. 206 */ 207 208 static struct modldrv afe_modldrv = { 209 &mod_driverops, /* drv_modops */ 210 "ADMtek Fast Ethernet", /* drv_linkinfo */ 211 &afe_devops /* drv_dev_ops */ 212 }; 213 214 static struct modlinkage afe_modlinkage = { 215 MODREV_1, /* ml_rev */ 216 { &afe_modldrv, NULL } /* ml_linkage */ 217 }; 218 219 /* 220 * Device attributes. 221 */ 222 static ddi_device_acc_attr_t afe_devattr = { 223 DDI_DEVICE_ATTR_V0, 224 DDI_STRUCTURE_LE_ACC, 225 DDI_STRICTORDER_ACC 226 }; 227 228 static ddi_device_acc_attr_t afe_bufattr = { 229 DDI_DEVICE_ATTR_V0, 230 DDI_NEVERSWAP_ACC, 231 DDI_STRICTORDER_ACC 232 }; 233 234 static ddi_dma_attr_t afe_dma_attr = { 235 DMA_ATTR_V0, /* dma_attr_version */ 236 0, /* dma_attr_addr_lo */ 237 0xFFFFFFFFU, /* dma_attr_addr_hi */ 238 0x7FFFFFFFU, /* dma_attr_count_max */ 239 4, /* dma_attr_align */ 240 0x3F, /* dma_attr_burstsizes */ 241 1, /* dma_attr_minxfer */ 242 0xFFFFFFFFU, /* dma_attr_maxxfer */ 243 0xFFFFFFFFU, /* dma_attr_seg */ 244 1, /* dma_attr_sgllen */ 245 1, /* dma_attr_granular */ 246 0 /* dma_attr_flags */ 247 }; 248 249 /* 250 * Tx buffers can be arbitrarily aligned. Additionally, they can 251 * cross a page boundary, so we use the two buffer addresses of the 252 * chip to provide a two-entry scatter-gather list. 253 */ 254 static ddi_dma_attr_t afe_dma_txattr = { 255 DMA_ATTR_V0, /* dma_attr_version */ 256 0, /* dma_attr_addr_lo */ 257 0xFFFFFFFFU, /* dma_attr_addr_hi */ 258 0x7FFFFFFFU, /* dma_attr_count_max */ 259 1, /* dma_attr_align */ 260 0x3F, /* dma_attr_burstsizes */ 261 1, /* dma_attr_minxfer */ 262 0xFFFFFFFFU, /* dma_attr_maxxfer */ 263 0xFFFFFFFFU, /* dma_attr_seg */ 264 2, /* dma_attr_sgllen */ 265 1, /* dma_attr_granular */ 266 0 /* dma_attr_flags */ 267 }; 268 269 /* 270 * Ethernet addresses. 271 */ 272 static uchar_t afe_broadcast[ETHERADDRL] = { 273 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 274 }; 275 276 /* 277 * DDI entry points. 278 */ 279 int 280 _init(void) 281 { 282 int rv; 283 mac_init_ops(&afe_devops, "afe"); 284 if ((rv = mod_install(&afe_modlinkage)) != DDI_SUCCESS) { 285 mac_fini_ops(&afe_devops); 286 } 287 return (rv); 288 } 289 290 int 291 _fini(void) 292 { 293 int rv; 294 if ((rv = mod_remove(&afe_modlinkage)) == DDI_SUCCESS) { 295 mac_fini_ops(&afe_devops); 296 } 297 return (rv); 298 } 299 300 int 301 _info(struct modinfo *modinfop) 302 { 303 return (mod_info(&afe_modlinkage, modinfop)); 304 } 305 306 int 307 afe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 308 { 309 afe_t *afep; 310 mac_register_t *macp; 311 int inst = ddi_get_instance(dip); 312 ddi_acc_handle_t pci; 313 uint16_t venid; 314 uint16_t devid; 315 uint16_t svid; 316 uint16_t ssid; 317 uint16_t cachesize; 318 afe_card_t *cardp; 319 int i; 320 321 switch (cmd) { 322 case DDI_RESUME: 323 return (afe_resume(dip)); 324 325 case DDI_ATTACH: 326 break; 327 328 default: 329 return (DDI_FAILURE); 330 } 331 332 /* this card is a bus master, reject any slave-only slot */ 333 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 334 afe_error(dip, "slot does not support PCI bus-master"); 335 return (DDI_FAILURE); 336 } 337 /* PCI devices shouldn't generate hilevel interrupts */ 338 if (ddi_intr_hilevel(dip, 0) != 0) { 339 afe_error(dip, "hilevel interrupts not supported"); 340 return (DDI_FAILURE); 341 } 342 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) { 343 afe_error(dip, "unable to setup PCI config handle"); 344 return (DDI_FAILURE); 345 } 346 347 venid = pci_config_get16(pci, PCI_VID); 348 devid = pci_config_get16(pci, PCI_DID); 349 svid = pci_config_get16(pci, PCI_SVID); 350 ssid = pci_config_get16(pci, PCI_SSID); 351 352 /* 353 * Note: ADMtek boards seem to misprogram themselves with bogus 354 * timings, which do not seem to work properly on SPARC. We 355 * reprogram them zero (but only if they appear to be broken), 356 * which seems to at least work. Its unclear that this is a 357 * legal or wise practice to me, but it certainly works better 358 * than the original values. (I would love to hear 359 * suggestions for better values, or a better strategy.) 360 */ 361 if ((pci_config_get8(pci, PCI_MINGNT) == 0xff) && 362 (pci_config_get8(pci, PCI_MAXLAT) == 0xff)) { 363 pci_config_put8(pci, PCI_MINGNT, 0); 364 pci_config_put8(pci, PCI_MAXLAT, 0); 365 } 366 367 /* 368 * the last entry in the card table matches every possible 369 * card, so the for-loop always terminates properly. 370 */ 371 cardp = NULL; 372 for (i = 0; i < (sizeof (afe_cards) / sizeof (afe_card_t)); i++) { 373 if ((venid == afe_cards[i].card_venid) && 374 (devid == afe_cards[i].card_devid)) { 375 cardp = &afe_cards[i]; 376 } 377 if ((svid == afe_cards[i].card_venid) && 378 (ssid == afe_cards[i].card_devid)) { 379 cardp = &afe_cards[i]; 380 break; 381 } 382 } 383 384 if (cardp == NULL) { 385 pci_config_teardown(&pci); 386 afe_error(dip, "Unable to identify PCI card"); 387 return (DDI_FAILURE); 388 } 389 390 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 391 cardp->card_cardname) != DDI_PROP_SUCCESS) { 392 pci_config_teardown(&pci); 393 afe_error(dip, "Unable to create model property"); 394 return (DDI_FAILURE); 395 } 396 397 /* 398 * Grab the PCI cachesize -- we use this to program the 399 * cache-optimization bus access bits. 400 */ 401 cachesize = pci_config_get8(pci, PCI_CLS); 402 403 /* this cannot fail */ 404 afep = kmem_zalloc(sizeof (afe_t), KM_SLEEP); 405 ddi_set_driver_private(dip, afep); 406 407 /* get the interrupt block cookie */ 408 if (ddi_get_iblock_cookie(dip, 0, &afep->afe_icookie) != DDI_SUCCESS) { 409 afe_error(dip, "ddi_get_iblock_cookie failed"); 410 pci_config_teardown(&pci); 411 kmem_free(afep, sizeof (afe_t)); 412 return (DDI_FAILURE); 413 } 414 415 afep->afe_dip = dip; 416 afep->afe_cardp = cardp; 417 afep->afe_phyaddr = -1; 418 afep->afe_cachesize = cachesize; 419 420 /* default properties */ 421 afep->afe_adv_aneg = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 422 "adv_autoneg_cap", 1); 423 afep->afe_adv_100T4 = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 424 "adv_100T4_cap", 1); 425 afep->afe_adv_100fdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 426 "adv_100fdx_cap", 1); 427 afep->afe_adv_100hdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 428 "adv_100hdx_cap", 1); 429 afep->afe_adv_10fdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 430 "adv_10fdx_cap", 1); 431 afep->afe_adv_10hdx = !!ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 432 "adv_10hdx_cap", 1); 433 434 afep->afe_forcefiber = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, 435 "fiber", 0); 436 437 DBG(DPCI, "PCI vendor id = %x", venid); 438 DBG(DPCI, "PCI device id = %x", devid); 439 DBG(DPCI, "PCI cachesize = %d", cachesize); 440 DBG(DPCI, "PCI COMM = %x", pci_config_get8(pci, PCI_CMD)); 441 DBG(DPCI, "PCI STAT = %x", pci_config_get8(pci, PCI_STAT)); 442 443 mutex_init(&afep->afe_xmtlock, NULL, MUTEX_DRIVER, afep->afe_icookie); 444 mutex_init(&afep->afe_intrlock, NULL, MUTEX_DRIVER, afep->afe_icookie); 445 446 /* 447 * Enable bus master, IO space, and memory space accesses. 448 */ 449 pci_config_put16(pci, PCI_CMD, 450 pci_config_get16(pci, PCI_CMD) | PCI_CMD_BME | PCI_CMD_MAE); 451 452 /* we're done with this now, drop it */ 453 pci_config_teardown(&pci); 454 455 /* 456 * Initialize interrupt kstat. This should not normally fail, since 457 * we don't use a persistent stat. We do it this way to avoid having 458 * to test for it at run time on the hot path. 459 */ 460 afep->afe_intrstat = kstat_create("afe", inst, "intr", "controller", 461 KSTAT_TYPE_INTR, 1, 0); 462 if (afep->afe_intrstat == NULL) { 463 afe_error(dip, "kstat_create failed"); 464 goto failed; 465 } 466 kstat_install(afep->afe_intrstat); 467 468 /* 469 * Map in the device registers. 470 */ 471 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&afep->afe_regs, 472 0, 0, &afe_devattr, &afep->afe_regshandle)) { 473 afe_error(dip, "ddi_regs_map_setup failed"); 474 goto failed; 475 } 476 477 /* 478 * Allocate DMA resources (descriptor rings and buffers). 479 */ 480 if ((afe_allocrxring(afep) != DDI_SUCCESS) || 481 (afe_alloctxring(afep) != DDI_SUCCESS)) { 482 afe_error(dip, "unable to allocate DMA resources"); 483 goto failed; 484 } 485 486 /* Initialize the chip. */ 487 mutex_enter(&afep->afe_intrlock); 488 mutex_enter(&afep->afe_xmtlock); 489 if (!afe_initialize(afep)) { 490 mutex_exit(&afep->afe_xmtlock); 491 mutex_exit(&afep->afe_intrlock); 492 goto failed; 493 } 494 mutex_exit(&afep->afe_xmtlock); 495 mutex_exit(&afep->afe_intrlock); 496 497 /* Determine the number of address bits to our EEPROM. */ 498 afep->afe_sromwidth = afe_sromwidth(afep); 499 500 /* 501 * Get the factory ethernet address. This becomes the current 502 * ethernet address (it can be overridden later via ifconfig). 503 */ 504 afe_getfactaddr(afep, afep->afe_curraddr); 505 afep->afe_promisc = B_FALSE; 506 507 /* make sure we add configure the initial filter */ 508 (void) afe_m_unicst(afep, afep->afe_curraddr); 509 (void) afe_m_multicst(afep, B_TRUE, afe_broadcast); 510 511 /* 512 * Establish interrupt handler. 513 */ 514 if (ddi_add_intr(dip, 0, NULL, NULL, afe_intr, (caddr_t)afep) != 515 DDI_SUCCESS) { 516 afe_error(dip, "unable to add interrupt"); 517 goto failed; 518 } 519 520 /* TODO: do the power management stuff */ 521 522 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 523 afe_error(dip, "mac_alloc failed"); 524 goto failed; 525 } 526 527 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 528 macp->m_driver = afep; 529 macp->m_dip = dip; 530 macp->m_src_addr = afep->afe_curraddr; 531 macp->m_callbacks = &afe_m_callbacks; 532 macp->m_min_sdu = 0; 533 macp->m_max_sdu = ETHERMTU; 534 macp->m_margin = VLAN_TAGSZ; 535 536 if (mac_register(macp, &afep->afe_mh) == DDI_SUCCESS) { 537 mac_free(macp); 538 return (DDI_SUCCESS); 539 } 540 541 /* failed to register with MAC */ 542 mac_free(macp); 543 failed: 544 if (afep->afe_icookie != NULL) { 545 ddi_remove_intr(dip, 0, afep->afe_icookie); 546 } 547 if (afep->afe_intrstat) { 548 kstat_delete(afep->afe_intrstat); 549 } 550 mutex_destroy(&afep->afe_intrlock); 551 mutex_destroy(&afep->afe_xmtlock); 552 553 afe_freerxring(afep); 554 afe_freetxring(afep); 555 556 if (afep->afe_regshandle != NULL) { 557 ddi_regs_map_free(&afep->afe_regshandle); 558 } 559 kmem_free(afep, sizeof (afe_t)); 560 return (DDI_FAILURE); 561 } 562 563 int 564 afe_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 565 { 566 afe_t *afep; 567 568 afep = ddi_get_driver_private(dip); 569 if (afep == NULL) { 570 afe_error(dip, "no soft state in detach!"); 571 return (DDI_FAILURE); 572 } 573 574 switch (cmd) { 575 case DDI_DETACH: 576 577 if (mac_unregister(afep->afe_mh) != 0) { 578 return (DDI_FAILURE); 579 } 580 581 /* make sure hardware is quiesced */ 582 mutex_enter(&afep->afe_intrlock); 583 mutex_enter(&afep->afe_xmtlock); 584 afep->afe_flags &= ~AFE_RUNNING; 585 afe_stopall(afep); 586 mutex_exit(&afep->afe_xmtlock); 587 mutex_exit(&afep->afe_intrlock); 588 589 /* clean up and shut down device */ 590 ddi_remove_intr(dip, 0, afep->afe_icookie); 591 592 /* clean up kstats */ 593 kstat_delete(afep->afe_intrstat); 594 595 ddi_prop_remove_all(dip); 596 597 /* free up any left over buffers or DMA resources */ 598 afe_freerxring(afep); 599 afe_freetxring(afep); 600 601 ddi_regs_map_free(&afep->afe_regshandle); 602 mutex_destroy(&afep->afe_intrlock); 603 mutex_destroy(&afep->afe_xmtlock); 604 605 kmem_free(afep, sizeof (afe_t)); 606 return (DDI_SUCCESS); 607 608 case DDI_SUSPEND: 609 /* quiesce the hardware */ 610 mutex_enter(&afep->afe_intrlock); 611 mutex_enter(&afep->afe_xmtlock); 612 afep->afe_flags |= AFE_SUSPENDED; 613 afe_stopall(afep); 614 mutex_exit(&afep->afe_xmtlock); 615 mutex_exit(&afep->afe_intrlock); 616 return (DDI_SUCCESS); 617 default: 618 return (DDI_FAILURE); 619 } 620 } 621 622 int 623 afe_resume(dev_info_t *dip) 624 { 625 afe_t *afep; 626 627 if ((afep = ddi_get_driver_private(dip)) == NULL) { 628 return (DDI_FAILURE); 629 } 630 631 mutex_enter(&afep->afe_intrlock); 632 mutex_enter(&afep->afe_xmtlock); 633 634 afep->afe_flags &= ~AFE_SUSPENDED; 635 636 /* re-initialize chip */ 637 if (!afe_initialize(afep)) { 638 afe_error(afep->afe_dip, "unable to resume chip!"); 639 afep->afe_flags |= AFE_SUSPENDED; 640 mutex_exit(&afep->afe_intrlock); 641 mutex_exit(&afep->afe_xmtlock); 642 return (DDI_SUCCESS); 643 } 644 645 /* start the chip */ 646 if (afep->afe_flags & AFE_RUNNING) { 647 afe_startall(afep); 648 } 649 650 /* drop locks */ 651 mutex_exit(&afep->afe_xmtlock); 652 mutex_exit(&afep->afe_intrlock); 653 654 return (DDI_SUCCESS); 655 } 656 657 void 658 afe_setrxfilt(afe_t *afep) 659 { 660 unsigned rxen, pa0, pa1; 661 662 if (afep->afe_flags & AFE_SUSPENDED) { 663 /* don't touch a suspended interface */ 664 return; 665 } 666 667 rxen = GETCSR(afep, CSR_NAR) & NAR_RX_ENABLE; 668 669 /* stop receiver */ 670 if (rxen) { 671 afe_stopmac(afep); 672 } 673 674 /* program promiscuous mode */ 675 if (afep->afe_promisc) 676 SETBIT(afep, CSR_NAR, NAR_RX_PROMISC); 677 else 678 CLRBIT(afep, CSR_NAR, NAR_RX_PROMISC); 679 680 /* program mac address */ 681 pa0 = (afep->afe_curraddr[3] << 24) | (afep->afe_curraddr[2] << 16) | 682 (afep->afe_curraddr[1] << 8) | afep->afe_curraddr[0]; 683 pa1 = (afep->afe_curraddr[5] << 8) | afep->afe_curraddr[4]; 684 685 DBG(DMACID, "programming PAR0 with %x", pa0); 686 DBG(DMACID, "programming PAR1 with %x", pa1); 687 PUTCSR(afep, CSR_PAR0, pa0); 688 PUTCSR(afep, CSR_PAR1, pa1); 689 if (rxen) { 690 SETBIT(afep, CSR_NAR, rxen); 691 } 692 693 DBG(DMACID, "programming MAR0 = %x", afep->afe_mctab[0]); 694 DBG(DMACID, "programming MAR1 = %x", afep->afe_mctab[1]); 695 696 /* program multicast filter */ 697 if (AFE_MODEL(afep) == MODEL_COMET) { 698 if (afep->afe_mctab[0] || afep->afe_mctab[1]) { 699 SETBIT(afep, CSR_NAR, NAR_RX_MULTI); 700 } else { 701 CLRBIT(afep, CSR_NAR, NAR_RX_MULTI); 702 } 703 } else { 704 CLRBIT(afep, CSR_NAR, NAR_RX_MULTI); 705 PUTCSR(afep, CSR_MAR0, afep->afe_mctab[0]); 706 PUTCSR(afep, CSR_MAR1, afep->afe_mctab[1]); 707 } 708 709 /* restart receiver */ 710 if (rxen) { 711 afe_startmac(afep); 712 } 713 } 714 715 int 716 afe_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr) 717 { 718 afe_t *afep = arg; 719 int index; 720 uint32_t crc; 721 uint32_t bit; 722 uint32_t newval, oldval; 723 724 CRC32(crc, macaddr, ETHERADDRL, -1U, crc32_table); 725 crc %= AFE_MCHASH; 726 727 /* bit within a 32-bit word */ 728 index = crc / 32; 729 bit = (1 << (crc % 32)); 730 731 mutex_enter(&afep->afe_intrlock); 732 mutex_enter(&afep->afe_xmtlock); 733 newval = oldval = afep->afe_mctab[index]; 734 735 if (add) { 736 afep->afe_mccount[crc]++; 737 if (afep->afe_mccount[crc] == 1) 738 newval |= bit; 739 } else { 740 afep->afe_mccount[crc]--; 741 if (afep->afe_mccount[crc] == 0) 742 newval &= ~bit; 743 } 744 if (newval != oldval) { 745 afep->afe_mctab[index] = newval; 746 afe_setrxfilt(afep); 747 } 748 749 mutex_exit(&afep->afe_xmtlock); 750 mutex_exit(&afep->afe_intrlock); 751 752 return (0); 753 } 754 755 int 756 afe_m_promisc(void *arg, boolean_t on) 757 { 758 afe_t *afep = arg; 759 760 /* exclusive access to the card while we reprogram it */ 761 mutex_enter(&afep->afe_intrlock); 762 mutex_enter(&afep->afe_xmtlock); 763 /* save current promiscuous mode state for replay in resume */ 764 afep->afe_promisc = on; 765 766 afe_setrxfilt(afep); 767 mutex_exit(&afep->afe_xmtlock); 768 mutex_exit(&afep->afe_intrlock); 769 770 return (0); 771 } 772 773 int 774 afe_m_unicst(void *arg, const uint8_t *macaddr) 775 { 776 afe_t *afep = arg; 777 778 /* exclusive access to the card while we reprogram it */ 779 mutex_enter(&afep->afe_intrlock); 780 mutex_enter(&afep->afe_xmtlock); 781 782 bcopy(macaddr, afep->afe_curraddr, ETHERADDRL); 783 afe_setrxfilt(afep); 784 785 mutex_exit(&afep->afe_xmtlock); 786 mutex_exit(&afep->afe_intrlock); 787 788 return (0); 789 } 790 791 mblk_t * 792 afe_m_tx(void *arg, mblk_t *mp) 793 { 794 afe_t *afep = arg; 795 mblk_t *nmp; 796 797 mutex_enter(&afep->afe_xmtlock); 798 799 if (afep->afe_flags & AFE_SUSPENDED) { 800 while ((nmp = mp) != NULL) { 801 afep->afe_carrier_errors++; 802 mp = mp->b_next; 803 freemsg(nmp); 804 } 805 mutex_exit(&afep->afe_xmtlock); 806 return (NULL); 807 } 808 809 while (mp != NULL) { 810 nmp = mp->b_next; 811 mp->b_next = NULL; 812 813 if (!afe_send(afep, mp)) { 814 mp->b_next = nmp; 815 break; 816 } 817 mp = nmp; 818 } 819 mutex_exit(&afep->afe_xmtlock); 820 821 return (mp); 822 } 823 824 /* 825 * Hardware management. 826 */ 827 static boolean_t 828 afe_initialize(afe_t *afep) 829 { 830 int i; 831 unsigned val; 832 uint32_t par, nar; 833 834 ASSERT(mutex_owned(&afep->afe_intrlock)); 835 ASSERT(mutex_owned(&afep->afe_xmtlock)); 836 837 DBG(DCHATTY, "resetting!"); 838 SETBIT(afep, CSR_PAR, PAR_RESET); 839 for (i = 1; i < 10; i++) { 840 drv_usecwait(5); 841 val = GETCSR(afep, CSR_PAR); 842 if (!(val & PAR_RESET)) { 843 break; 844 } 845 } 846 if (i == 10) { 847 afe_error(afep->afe_dip, "timed out waiting for reset!"); 848 return (B_FALSE); 849 } 850 851 /* 852 * Updated Centaur data sheets show that the Comet and Centaur are 853 * alike here (contrary to earlier versions of the data sheet). 854 */ 855 /* XXX:? chip problems */ 856 /* par = PAR_MRLE | PAR_MRME | PAR_MWIE; */ 857 par = 0; 858 switch (afep->afe_cachesize) { 859 case 8: 860 par |= PAR_CALIGN_8 | PAR_BURST_8; 861 break; 862 case 16: 863 par |= PAR_CALIGN_16 | PAR_BURST_16; 864 break; 865 case 32: 866 par |= PAR_CALIGN_32 | PAR_BURST_32; 867 break; 868 default: 869 par |= PAR_BURST_32; 870 par &= ~(PAR_MWIE | PAR_MRLE | PAR_MRME); 871 break; 872 873 } 874 875 PUTCSR(afep, CSR_PAR, par); 876 877 /* enable transmit underrun auto-recovery */ 878 SETBIT(afep, CSR_CR, CR_TXURAUTOR); 879 880 afe_resetrings(afep); 881 882 /* clear the lost packet counter (cleared on read) */ 883 (void) GETCSR(afep, CSR_LPC); 884 885 nar = GETCSR(afep, CSR_NAR); 886 nar &= ~NAR_TR; /* clear tx threshold */ 887 nar |= NAR_SF; /* store-and-forward */ 888 nar |= NAR_HBD; /* disable SQE test */ 889 PUTCSR(afep, CSR_NAR, nar); 890 891 afe_setrxfilt(afep); 892 893 return (B_TRUE); 894 } 895 896 /* 897 * Serial EEPROM access - inspired by the FreeBSD implementation. 898 */ 899 900 uint8_t 901 afe_sromwidth(afe_t *afep) 902 { 903 int i; 904 uint32_t eeread; 905 uint8_t addrlen = 8; 906 907 eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP; 908 909 PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP); 910 drv_usecwait(1); 911 PUTCSR(afep, CSR_SPR, eeread); 912 913 /* command bits first */ 914 for (i = 4; i != 0; i >>= 1) { 915 unsigned val = (SROM_READCMD & i) ? SPR_SROM_DIN : 0; 916 917 PUTCSR(afep, CSR_SPR, eeread | val); 918 drv_usecwait(1); 919 PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK); 920 drv_usecwait(1); 921 } 922 923 PUTCSR(afep, CSR_SPR, eeread); 924 925 for (addrlen = 1; addrlen <= 12; addrlen++) { 926 PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK); 927 drv_usecwait(1); 928 if (!(GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT)) { 929 PUTCSR(afep, CSR_SPR, eeread); 930 drv_usecwait(1); 931 break; 932 } 933 PUTCSR(afep, CSR_SPR, eeread); 934 drv_usecwait(1); 935 } 936 937 /* turn off accesses to the EEPROM */ 938 PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP); 939 940 DBG(DSROM, "detected srom width = %d bits", addrlen); 941 942 return ((addrlen < 4 || addrlen > 12) ? 6 : addrlen); 943 } 944 945 /* 946 * The words in EEPROM are stored in little endian order. We 947 * shift bits out in big endian order, though. This requires 948 * a byte swap on some platforms. 949 */ 950 uint16_t 951 afe_readsromword(afe_t *afep, unsigned romaddr) 952 { 953 int i; 954 uint16_t word = 0; 955 uint16_t retval; 956 int eeread; 957 uint8_t addrlen; 958 int readcmd; 959 uchar_t *ptr; 960 961 eeread = SPR_SROM_READ | SPR_SROM_SEL | SPR_SROM_CHIP; 962 addrlen = afep->afe_sromwidth; 963 readcmd = (SROM_READCMD << addrlen) | romaddr; 964 965 if (romaddr >= (1 << addrlen)) { 966 /* too big to fit! */ 967 return (0); 968 } 969 970 PUTCSR(afep, CSR_SPR, eeread & ~SPR_SROM_CHIP); 971 PUTCSR(afep, CSR_SPR, eeread); 972 973 /* command and address bits */ 974 for (i = 4 + addrlen; i >= 0; i--) { 975 short val = (readcmd & (1 << i)) ? SPR_SROM_DIN : 0; 976 977 PUTCSR(afep, CSR_SPR, eeread | val); 978 drv_usecwait(1); 979 PUTCSR(afep, CSR_SPR, eeread | val | SPR_SROM_CLOCK); 980 drv_usecwait(1); 981 } 982 983 PUTCSR(afep, CSR_SPR, eeread); 984 985 for (i = 0; i < 16; i++) { 986 PUTCSR(afep, CSR_SPR, eeread | SPR_SROM_CLOCK); 987 drv_usecwait(1); 988 word <<= 1; 989 if (GETCSR(afep, CSR_SPR) & SPR_SROM_DOUT) { 990 word |= 1; 991 } 992 PUTCSR(afep, CSR_SPR, eeread); 993 drv_usecwait(1); 994 } 995 996 /* turn off accesses to the EEPROM */ 997 PUTCSR(afep, CSR_SPR, eeread &~ SPR_SROM_CHIP); 998 999 /* 1000 * Fix up the endianness thing. Note that the values 1001 * are stored in little endian format on the SROM. 1002 */ 1003 ptr = (uchar_t *)&word; 1004 retval = (ptr[1] << 8) | ptr[0]; 1005 return (retval); 1006 } 1007 1008 void 1009 afe_readsrom(afe_t *afep, unsigned romaddr, unsigned len, char *dest) 1010 { 1011 int i; 1012 uint16_t word; 1013 uint16_t *ptr = (uint16_t *)((void *)dest); 1014 for (i = 0; i < len; i++) { 1015 word = afe_readsromword(afep, romaddr + i); 1016 *ptr = word; 1017 ptr++; 1018 } 1019 } 1020 1021 void 1022 afe_getfactaddr(afe_t *afep, uchar_t *eaddr) 1023 { 1024 afe_readsrom(afep, SROM_ENADDR, ETHERADDRL / 2, (char *)eaddr); 1025 1026 DBG(DMACID, 1027 "factory ethernet address = %02x:%02x:%02x:%02x:%02x:%02x", 1028 eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); 1029 } 1030 1031 /* 1032 * MII management. 1033 */ 1034 void 1035 afe_startphy(afe_t *afep) 1036 { 1037 unsigned phyaddr; 1038 unsigned bmcr; 1039 unsigned bmsr; 1040 unsigned anar; 1041 unsigned phyidr1; 1042 unsigned phyidr2; 1043 unsigned nosqe = 0; 1044 int retries; 1045 int fiber; 1046 int cnt; 1047 1048 /* ADMtek devices just use the PHY at address 1 */ 1049 afep->afe_phyaddr = phyaddr = 1; 1050 1051 phyidr1 = afe_miiread(afep, phyaddr, MII_PHYIDH); 1052 phyidr2 = afe_miiread(afep, phyaddr, MII_PHYIDL); 1053 if ((phyidr1 == 0x0022) && 1054 ((phyidr2 & 0xfff0) == 0x5410)) { 1055 nosqe = 1; 1056 /* only 983B has fiber support */ 1057 afep->afe_flags |= AFE_HASFIBER; 1058 } 1059 afep->afe_phyid = (phyidr1 << 16) | phyidr2; 1060 1061 DBG(DPHY, "phy at %d: %x,%x", phyaddr, phyidr1, phyidr2); 1062 DBG(DPHY, "bmsr = %x", afe_miiread(afep, 1063 afep->afe_phyaddr, MII_STATUS)); 1064 DBG(DPHY, "anar = %x", afe_miiread(afep, 1065 afep->afe_phyaddr, MII_AN_ADVERT)); 1066 DBG(DPHY, "anlpar = %x", afe_miiread(afep, 1067 afep->afe_phyaddr, MII_AN_LPABLE)); 1068 DBG(DPHY, "aner = %x", afe_miiread(afep, 1069 afep->afe_phyaddr, MII_AN_EXPANSION)); 1070 1071 DBG(DPHY, "resetting phy"); 1072 1073 /* we reset the phy block */ 1074 afe_miiwrite(afep, phyaddr, MII_CONTROL, MII_CONTROL_RESET); 1075 /* 1076 * wait for it to complete -- 500usec is still to short to 1077 * bother getting the system clock involved. 1078 */ 1079 drv_usecwait(500); 1080 for (retries = 0; retries < 10; retries++) { 1081 if (afe_miiread(afep, phyaddr, MII_CONTROL) & 1082 MII_CONTROL_RESET) { 1083 drv_usecwait(500); 1084 continue; 1085 } 1086 break; 1087 } 1088 if (retries == 100) { 1089 afe_error(afep->afe_dip, "timeout waiting on phy to reset"); 1090 return; 1091 } 1092 1093 DBG(DPHY, "phy reset complete"); 1094 1095 bmsr = afe_miiread(afep, phyaddr, MII_STATUS); 1096 anar = afe_miiread(afep, phyaddr, MII_AN_ADVERT); 1097 1098 anar &= ~(MII_ABILITY_100BASE_T4 | 1099 MII_ABILITY_100BASE_TX_FD | MII_ABILITY_100BASE_TX | 1100 MII_ABILITY_10BASE_T_FD | MII_ABILITY_10BASE_T); 1101 1102 fiber = 0; 1103 1104 /* if fiber is being forced, and device supports fiber... */ 1105 if (afep->afe_flags & AFE_HASFIBER) { 1106 1107 uint16_t mcr; 1108 1109 DBG(DPHY, "device supports 100BaseFX"); 1110 mcr = afe_miiread(afep, phyaddr, PHY_MCR); 1111 switch (afep->afe_forcefiber) { 1112 case 0: 1113 /* UTP Port */ 1114 DBG(DPHY, "forcing twpair"); 1115 mcr &= ~MCR_FIBER; 1116 fiber = 0; 1117 break; 1118 case 1: 1119 /* Fiber Port */ 1120 DBG(DPHY, "forcing 100BaseFX"); 1121 mcr |= MCR_FIBER; 1122 bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX); 1123 fiber = 1; 1124 break; 1125 default: 1126 DBG(DPHY, "checking for 100BaseFX link"); 1127 /* fiber is 100 Mb FDX */ 1128 afe_miiwrite(afep, phyaddr, MII_CONTROL, 1129 MII_CONTROL_100MB | MII_CONTROL_FDUPLEX); 1130 drv_usecwait(50); 1131 1132 mcr = afe_miiread(afep, phyaddr, PHY_MCR); 1133 mcr |= MCR_FIBER; 1134 afe_miiwrite(afep, phyaddr, PHY_MCR, mcr); 1135 drv_usecwait(500); 1136 1137 /* if fiber is active, use it */ 1138 if ((afe_miiread(afep, phyaddr, MII_STATUS) & 1139 MII_STATUS_LINKUP)) { 1140 bmcr = MII_CONTROL_100MB | MII_CONTROL_FDUPLEX; 1141 fiber = 1; 1142 } else { 1143 mcr &= ~MCR_FIBER; 1144 fiber = 0; 1145 } 1146 break; 1147 } 1148 afe_miiwrite(afep, phyaddr, PHY_MCR, mcr); 1149 drv_usecwait(500); 1150 } 1151 1152 if (fiber) { 1153 /* fiber only supports 100FDX(?) */ 1154 bmsr &= ~(MII_STATUS_100_BASE_T4 | 1155 MII_STATUS_100_BASEX | MII_STATUS_10_FD | MII_STATUS_10); 1156 bmsr |= MII_STATUS_100_BASEX_FD; 1157 } 1158 1159 /* assume full support for everything to start */ 1160 afep->afe_cap_aneg = afep->afe_cap_100T4 = 1161 afep->afe_cap_100fdx = afep->afe_cap_100hdx = 1162 afep->afe_cap_10fdx = afep->afe_cap_10hdx = 1; 1163 1164 /* disable modes not supported in hardware */ 1165 if (!(bmsr & MII_STATUS_100_BASEX_FD)) { 1166 afep->afe_adv_100fdx = 0; 1167 afep->afe_cap_100fdx = 0; 1168 } 1169 if (!(bmsr & MII_STATUS_100_BASE_T4)) { 1170 afep->afe_adv_100T4 = 0; 1171 afep->afe_cap_100T4 = 0; 1172 } 1173 if (!(bmsr & MII_STATUS_100_BASEX)) { 1174 afep->afe_adv_100hdx = 0; 1175 afep->afe_cap_100hdx = 0; 1176 } 1177 if (!(bmsr & MII_STATUS_10_FD)) { 1178 afep->afe_adv_10fdx = 0; 1179 afep->afe_cap_10fdx = 0; 1180 } 1181 if (!(bmsr & MII_STATUS_10)) { 1182 afep->afe_adv_10hdx = 0; 1183 afep->afe_cap_10hdx = 0; 1184 } 1185 if (!(bmsr & MII_STATUS_CANAUTONEG)) { 1186 afep->afe_adv_aneg = 0; 1187 afep->afe_cap_aneg = 0; 1188 } 1189 1190 cnt = 0; 1191 if (afep->afe_adv_100fdx) { 1192 anar |= MII_ABILITY_100BASE_TX_FD; 1193 cnt++; 1194 } 1195 if (afep->afe_adv_100T4) { 1196 anar |= MII_ABILITY_100BASE_T4; 1197 cnt++; 1198 } 1199 if (afep->afe_adv_100hdx) { 1200 anar |= MII_ABILITY_100BASE_TX; 1201 cnt++; 1202 } 1203 if (afep->afe_adv_10fdx) { 1204 anar |= MII_ABILITY_10BASE_T_FD; 1205 cnt++; 1206 } 1207 if (afep->afe_adv_10hdx) { 1208 anar |= MII_ABILITY_10BASE_T; 1209 cnt++; 1210 } 1211 1212 /* 1213 * Make certain at least one valid link mode is selected. 1214 */ 1215 if (!cnt) { 1216 afe_error(afep->afe_dip, "No valid link mode selected."); 1217 afe_error(afep->afe_dip, "Powering down PHY."); 1218 afe_stopphy(afep); 1219 afep->afe_linkup = LINK_STATE_DOWN; 1220 if (afep->afe_flags & AFE_RUNNING) 1221 afe_reportlink(afep); 1222 return; 1223 } 1224 1225 if (fiber) { 1226 bmcr = MII_CONTROL_100MB | MII_CONTROL_FDUPLEX; 1227 } else if ((afep->afe_adv_aneg) && (bmsr & MII_STATUS_CANAUTONEG)) { 1228 DBG(DPHY, "using autoneg mode"); 1229 bmcr = (MII_CONTROL_ANE | MII_CONTROL_RSAN); 1230 } else { 1231 DBG(DPHY, "using forced mode"); 1232 if (afep->afe_adv_100fdx) { 1233 bmcr = (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX); 1234 } else if (afep->afe_adv_100hdx) { 1235 bmcr = MII_CONTROL_100MB; 1236 } else if (afep->afe_adv_10fdx) { 1237 bmcr = MII_CONTROL_FDUPLEX; 1238 } else { 1239 /* 10HDX */ 1240 bmcr = 0; 1241 } 1242 } 1243 1244 DBG(DPHY, "programming anar to 0x%x", anar); 1245 afe_miiwrite(afep, phyaddr, MII_AN_ADVERT, anar); 1246 DBG(DPHY, "programming bmcr to 0x%x", bmcr); 1247 afe_miiwrite(afep, phyaddr, MII_CONTROL, bmcr); 1248 1249 if (nosqe) { 1250 uint16_t pilr; 1251 /* 1252 * work around for errata 983B_0416 -- duplex light flashes 1253 * in 10 HDX. we just disable SQE testing on the device. 1254 */ 1255 pilr = afe_miiread(afep, phyaddr, PHY_PILR); 1256 pilr |= PILR_NOSQE; 1257 afe_miiwrite(afep, phyaddr, PHY_PILR, pilr); 1258 } 1259 1260 /* 1261 * schedule a query of the link status 1262 */ 1263 PUTCSR(afep, CSR_TIMER, TIMER_LOOP | 1264 (AFE_LINKTIMER * 1000 / TIMER_USEC)); 1265 } 1266 1267 void 1268 afe_stopphy(afe_t *afep) 1269 { 1270 /* stop the phy timer */ 1271 PUTCSR(afep, CSR_TIMER, 0); 1272 1273 /* 1274 * phy in isolate & powerdown mode... 1275 */ 1276 afe_miiwrite(afep, afep->afe_phyaddr, MII_CONTROL, 1277 MII_CONTROL_PWRDN | MII_CONTROL_ISOLATE); 1278 1279 /* 1280 * mark the link state unknown 1281 */ 1282 if (!afep->afe_resetting) { 1283 afep->afe_linkup = LINK_STATE_UNKNOWN; 1284 afep->afe_ifspeed = 0; 1285 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1286 if (afep->afe_flags & AFE_RUNNING) 1287 afe_reportlink(afep); 1288 } 1289 } 1290 1291 void 1292 afe_reportlink(afe_t *afep) 1293 { 1294 int changed = 0; 1295 1296 if (afep->afe_ifspeed != afep->afe_lastifspeed) { 1297 afep->afe_lastifspeed = afep->afe_ifspeed; 1298 changed++; 1299 } 1300 if (afep->afe_duplex != afep->afe_lastduplex) { 1301 afep->afe_lastduplex = afep->afe_duplex; 1302 changed++; 1303 } 1304 if (changed) 1305 mac_link_update(afep->afe_mh, afep->afe_linkup); 1306 } 1307 1308 void 1309 afe_checklink(afe_t *afep) 1310 { 1311 if ((afep->afe_flags & AFE_RUNNING) == 0) 1312 return; 1313 1314 if ((afep->afe_txstall_time != 0) && 1315 (gethrtime() > afep->afe_txstall_time) && 1316 (afep->afe_txavail != AFE_TXRING)) { 1317 afep->afe_txstall_time = 0; 1318 afe_error(afep->afe_dip, "TX stall detected!"); 1319 afe_resetall(afep); 1320 return; 1321 } 1322 1323 switch (AFE_MODEL(afep)) { 1324 case MODEL_COMET: 1325 afe_checklinkcomet(afep); 1326 break; 1327 case MODEL_CENTAUR: 1328 afe_checklinkcentaur(afep); 1329 break; 1330 } 1331 } 1332 1333 void 1334 afe_checklinkcomet(afe_t *afep) 1335 { 1336 uint16_t xciis; 1337 int reinit = 0; 1338 1339 xciis = GETCSR16(afep, CSR_XCIIS); 1340 if (xciis & XCIIS_PDF) { 1341 afe_error(afep->afe_dip, "Parallel detection fault detected!"); 1342 } 1343 if (xciis & XCIIS_RF) { 1344 afe_error(afep->afe_dip, "Remote fault detected."); 1345 } 1346 if (xciis & XCIIS_LFAIL) { 1347 if (afep->afe_linkup == LINK_STATE_UP) { 1348 reinit++; 1349 } 1350 afep->afe_ifspeed = 0; 1351 afep->afe_linkup = LINK_STATE_DOWN; 1352 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1353 afe_reportlink(afep); 1354 if (reinit) { 1355 afe_startphy(afep); 1356 } 1357 return; 1358 } 1359 1360 afep->afe_linkup = LINK_STATE_UP; 1361 afep->afe_ifspeed = (xciis & XCIIS_SPEED) ? 100000000 : 10000000; 1362 if (xciis & XCIIS_DUPLEX) { 1363 afep->afe_duplex = LINK_DUPLEX_FULL; 1364 } else { 1365 afep->afe_duplex = LINK_DUPLEX_HALF; 1366 } 1367 1368 afe_reportlink(afep); 1369 } 1370 1371 void 1372 afe_checklinkcentaur(afe_t *afep) 1373 { 1374 unsigned opmode; 1375 int reinit = 0; 1376 1377 opmode = GETCSR(afep, CSR_OPM); 1378 if ((opmode & OPM_MODE) == OPM_MACONLY) { 1379 DBG(DPHY, "Centaur running in MAC-only mode"); 1380 afe_checklinkmii(afep); 1381 return; 1382 } 1383 DBG(DPHY, "Centaur running in single chip mode"); 1384 if ((opmode & OPM_LINK) == 0) { 1385 if (afep->afe_linkup == LINK_STATE_UP) { 1386 reinit++; 1387 } 1388 afep->afe_ifspeed = 0; 1389 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1390 afep->afe_linkup = LINK_STATE_DOWN; 1391 afe_reportlink(afep); 1392 if (reinit) { 1393 afe_startphy(afep); 1394 } 1395 return; 1396 } 1397 1398 afep->afe_linkup = LINK_STATE_UP; 1399 afep->afe_ifspeed = (opmode & OPM_SPEED) ? 100000000 : 10000000; 1400 if (opmode & OPM_DUPLEX) { 1401 afep->afe_duplex = LINK_DUPLEX_FULL; 1402 } else { 1403 afep->afe_duplex = LINK_DUPLEX_HALF; 1404 } 1405 afe_reportlink(afep); 1406 } 1407 1408 void 1409 afe_checklinkmii(afe_t *afep) 1410 { 1411 /* read MII state registers */ 1412 uint16_t bmsr; 1413 uint16_t bmcr; 1414 uint16_t anar; 1415 uint16_t anlpar; 1416 int reinit = 0; 1417 1418 /* read this twice, to clear latched link state */ 1419 bmsr = afe_miiread(afep, afep->afe_phyaddr, MII_STATUS); 1420 bmsr = afe_miiread(afep, afep->afe_phyaddr, MII_STATUS); 1421 bmcr = afe_miiread(afep, afep->afe_phyaddr, MII_CONTROL); 1422 anar = afe_miiread(afep, afep->afe_phyaddr, MII_AN_ADVERT); 1423 anlpar = afe_miiread(afep, afep->afe_phyaddr, MII_AN_LPABLE); 1424 1425 if (bmsr & MII_STATUS_REMFAULT) { 1426 afe_error(afep->afe_dip, "Remote fault detected."); 1427 } 1428 if (bmsr & MII_STATUS_JABBERING) { 1429 afe_error(afep->afe_dip, "Jabber condition detected."); 1430 } 1431 if ((bmsr & MII_STATUS_LINKUP) == 0) { 1432 /* no link */ 1433 if (afep->afe_linkup == LINK_STATE_UP) { 1434 reinit = 1; 1435 } 1436 afep->afe_ifspeed = 0; 1437 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1438 afep->afe_linkup = LINK_STATE_DOWN; 1439 afe_reportlink(afep); 1440 if (reinit) { 1441 afe_startphy(afep); 1442 } 1443 return; 1444 } 1445 1446 DBG(DCHATTY, "link up!"); 1447 afep->afe_linkup = LINK_STATE_UP; 1448 1449 if (!(bmcr & MII_CONTROL_ANE)) { 1450 /* forced mode */ 1451 if (bmcr & MII_CONTROL_100MB) { 1452 afep->afe_ifspeed = 100000000; 1453 } else { 1454 afep->afe_ifspeed = 10000000; 1455 } 1456 if (bmcr & MII_CONTROL_FDUPLEX) { 1457 afep->afe_duplex = LINK_DUPLEX_FULL; 1458 } else { 1459 afep->afe_duplex = LINK_DUPLEX_HALF; 1460 } 1461 } else if ((!(bmsr & MII_STATUS_CANAUTONEG)) || 1462 (!(bmsr & MII_STATUS_ANDONE))) { 1463 afep->afe_ifspeed = 0; 1464 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1465 } else if (anar & anlpar & MII_ABILITY_100BASE_TX_FD) { 1466 afep->afe_ifspeed = 100000000; 1467 afep->afe_duplex = LINK_DUPLEX_FULL; 1468 } else if (anar & anlpar & MII_ABILITY_100BASE_T4) { 1469 afep->afe_ifspeed = 100000000; 1470 afep->afe_duplex = LINK_DUPLEX_HALF; 1471 } else if (anar & anlpar & MII_ABILITY_100BASE_TX) { 1472 afep->afe_ifspeed = 100000000; 1473 afep->afe_duplex = LINK_DUPLEX_HALF; 1474 } else if (anar & anlpar & MII_ABILITY_10BASE_T_FD) { 1475 afep->afe_ifspeed = 10000000; 1476 afep->afe_duplex = LINK_DUPLEX_FULL; 1477 } else if (anar & anlpar & MII_ABILITY_10BASE_T) { 1478 afep->afe_ifspeed = 10000000; 1479 afep->afe_duplex = LINK_DUPLEX_HALF; 1480 } else { 1481 afep->afe_ifspeed = 0; 1482 afep->afe_duplex = LINK_DUPLEX_UNKNOWN; 1483 } 1484 1485 afe_reportlink(afep); 1486 } 1487 1488 void 1489 afe_miitristate(afe_t *afep) 1490 { 1491 uint32_t val = SPR_SROM_WRITE | SPR_MII_CTRL; 1492 1493 PUTCSR(afep, CSR_SPR, val); 1494 drv_usecwait(1); 1495 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK); 1496 drv_usecwait(1); 1497 } 1498 1499 void 1500 afe_miiwritebit(afe_t *afep, uint8_t bit) 1501 { 1502 uint32_t val = bit ? SPR_MII_DOUT : 0; 1503 1504 PUTCSR(afep, CSR_SPR, val); 1505 drv_usecwait(1); 1506 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK); 1507 drv_usecwait(1); 1508 } 1509 1510 uint8_t 1511 afe_miireadbit(afe_t *afep) 1512 { 1513 uint32_t val = SPR_MII_CTRL | SPR_SROM_READ; 1514 uint8_t bit; 1515 1516 PUTCSR(afep, CSR_SPR, val); 1517 drv_usecwait(1); 1518 bit = (GETCSR(afep, CSR_SPR) & SPR_MII_DIN) ? 1 : 0; 1519 PUTCSR(afep, CSR_SPR, val | SPR_MII_CLOCK); 1520 drv_usecwait(1); 1521 return (bit); 1522 } 1523 1524 uint16_t 1525 afe_miiread(afe_t *afep, int phy, int reg) 1526 { 1527 /* 1528 * ADMtek bugs ignore address decode bits -- they only 1529 * support PHY at 1. 1530 */ 1531 if (phy != 1) { 1532 return (0xffff); 1533 } 1534 switch (AFE_MODEL(afep)) { 1535 case MODEL_COMET: 1536 return (afe_miireadcomet(afep, phy, reg)); 1537 case MODEL_CENTAUR: 1538 return (afe_miireadgeneral(afep, phy, reg)); 1539 } 1540 return (0xffff); 1541 } 1542 1543 uint16_t 1544 afe_miireadgeneral(afe_t *afep, int phy, int reg) 1545 { 1546 uint16_t value = 0; 1547 int i; 1548 1549 /* send the 32 bit preamble */ 1550 for (i = 0; i < 32; i++) { 1551 afe_miiwritebit(afep, 1); 1552 } 1553 1554 /* send the start code - 01b */ 1555 afe_miiwritebit(afep, 0); 1556 afe_miiwritebit(afep, 1); 1557 1558 /* send the opcode for read, - 10b */ 1559 afe_miiwritebit(afep, 1); 1560 afe_miiwritebit(afep, 0); 1561 1562 /* next we send the 5 bit phy address */ 1563 for (i = 0x10; i > 0; i >>= 1) { 1564 afe_miiwritebit(afep, (phy & i) ? 1 : 0); 1565 } 1566 1567 /* the 5 bit register address goes next */ 1568 for (i = 0x10; i > 0; i >>= 1) { 1569 afe_miiwritebit(afep, (reg & i) ? 1 : 0); 1570 } 1571 1572 /* turnaround - tristate followed by logic 0 */ 1573 afe_miitristate(afep); 1574 afe_miiwritebit(afep, 0); 1575 1576 /* read the 16 bit register value */ 1577 for (i = 0x8000; i > 0; i >>= 1) { 1578 value <<= 1; 1579 value |= afe_miireadbit(afep); 1580 } 1581 afe_miitristate(afep); 1582 return (value); 1583 } 1584 1585 uint16_t 1586 afe_miireadcomet(afe_t *afep, int phy, int reg) 1587 { 1588 if (phy != 1) { 1589 return (0xffff); 1590 } 1591 switch (reg) { 1592 case MII_CONTROL: 1593 reg = CSR_BMCR; 1594 break; 1595 case MII_STATUS: 1596 reg = CSR_BMSR; 1597 break; 1598 case MII_PHYIDH: 1599 reg = CSR_PHYIDR1; 1600 break; 1601 case MII_PHYIDL: 1602 reg = CSR_PHYIDR2; 1603 break; 1604 case MII_AN_ADVERT: 1605 reg = CSR_ANAR; 1606 break; 1607 case MII_AN_LPABLE: 1608 reg = CSR_ANLPAR; 1609 break; 1610 case MII_AN_EXPANSION: 1611 reg = CSR_ANER; 1612 break; 1613 default: 1614 return (0); 1615 } 1616 return (GETCSR16(afep, reg) & 0xFFFF); 1617 } 1618 1619 void 1620 afe_miiwrite(afe_t *afep, int phy, int reg, uint16_t val) 1621 { 1622 /* 1623 * ADMtek bugs ignore address decode bits -- they only 1624 * support PHY at 1. 1625 */ 1626 if (phy != 1) { 1627 return; 1628 } 1629 switch (AFE_MODEL(afep)) { 1630 case MODEL_COMET: 1631 afe_miiwritecomet(afep, phy, reg, val); 1632 break; 1633 case MODEL_CENTAUR: 1634 afe_miiwritegeneral(afep, phy, reg, val); 1635 break; 1636 } 1637 } 1638 1639 void 1640 afe_miiwritegeneral(afe_t *afep, int phy, int reg, uint16_t val) 1641 { 1642 int i; 1643 1644 /* send the 32 bit preamble */ 1645 for (i = 0; i < 32; i++) { 1646 afe_miiwritebit(afep, 1); 1647 } 1648 1649 /* send the start code - 01b */ 1650 afe_miiwritebit(afep, 0); 1651 afe_miiwritebit(afep, 1); 1652 1653 /* send the opcode for write, - 01b */ 1654 afe_miiwritebit(afep, 0); 1655 afe_miiwritebit(afep, 1); 1656 1657 /* next we send the 5 bit phy address */ 1658 for (i = 0x10; i > 0; i >>= 1) { 1659 afe_miiwritebit(afep, (phy & i) ? 1 : 0); 1660 } 1661 1662 /* the 5 bit register address goes next */ 1663 for (i = 0x10; i > 0; i >>= 1) { 1664 afe_miiwritebit(afep, (reg & i) ? 1 : 0); 1665 } 1666 1667 /* turnaround - tristate followed by logic 0 */ 1668 afe_miitristate(afep); 1669 afe_miiwritebit(afep, 0); 1670 1671 /* now write out our data (16 bits) */ 1672 for (i = 0x8000; i > 0; i >>= 1) { 1673 afe_miiwritebit(afep, (val & i) ? 1 : 0); 1674 } 1675 1676 /* idle mode */ 1677 afe_miitristate(afep); 1678 } 1679 1680 void 1681 afe_miiwritecomet(afe_t *afep, int phy, int reg, uint16_t val) 1682 { 1683 if (phy != 1) { 1684 return; 1685 } 1686 switch (reg) { 1687 case MII_CONTROL: 1688 reg = CSR_BMCR; 1689 break; 1690 case MII_STATUS: 1691 reg = CSR_BMSR; 1692 break; 1693 case MII_PHYIDH: 1694 reg = CSR_PHYIDR1; 1695 break; 1696 case MII_PHYIDL: 1697 reg = CSR_PHYIDR2; 1698 break; 1699 case MII_AN_ADVERT: 1700 reg = CSR_ANAR; 1701 break; 1702 case MII_AN_LPABLE: 1703 reg = CSR_ANLPAR; 1704 break; 1705 case MII_AN_EXPANSION: 1706 reg = CSR_ANER; 1707 break; 1708 default: 1709 return; 1710 } 1711 PUTCSR16(afep, reg, val); 1712 } 1713 1714 int 1715 afe_m_start(void *arg) 1716 { 1717 afe_t *afep = arg; 1718 1719 /* grab exclusive access to the card */ 1720 mutex_enter(&afep->afe_intrlock); 1721 mutex_enter(&afep->afe_xmtlock); 1722 1723 afe_startall(afep); 1724 afep->afe_flags |= AFE_RUNNING; 1725 1726 mutex_exit(&afep->afe_xmtlock); 1727 mutex_exit(&afep->afe_intrlock); 1728 return (0); 1729 } 1730 1731 void 1732 afe_m_stop(void *arg) 1733 { 1734 afe_t *afep = arg; 1735 1736 /* exclusive access to the hardware! */ 1737 mutex_enter(&afep->afe_intrlock); 1738 mutex_enter(&afep->afe_xmtlock); 1739 1740 afe_stopall(afep); 1741 afep->afe_flags &= ~AFE_RUNNING; 1742 1743 mutex_exit(&afep->afe_xmtlock); 1744 mutex_exit(&afep->afe_intrlock); 1745 } 1746 1747 void 1748 afe_startmac(afe_t *afep) 1749 { 1750 /* verify exclusive access to the card */ 1751 ASSERT(mutex_owned(&afep->afe_intrlock)); 1752 ASSERT(mutex_owned(&afep->afe_xmtlock)); 1753 1754 /* start the card */ 1755 SETBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE); 1756 1757 if (afep->afe_txavail != AFE_TXRING) 1758 PUTCSR(afep, CSR_TDR, 0); 1759 1760 /* tell the mac that we are ready to go! */ 1761 if (afep->afe_flags & AFE_RUNNING) 1762 mac_tx_update(afep->afe_mh); 1763 } 1764 1765 void 1766 afe_stopmac(afe_t *afep) 1767 { 1768 int i; 1769 1770 /* exclusive access to the hardware! */ 1771 ASSERT(mutex_owned(&afep->afe_intrlock)); 1772 ASSERT(mutex_owned(&afep->afe_xmtlock)); 1773 1774 CLRBIT(afep, CSR_NAR, NAR_TX_ENABLE | NAR_RX_ENABLE); 1775 1776 /* 1777 * A 1518 byte frame at 10Mbps takes about 1.2 msec to drain. 1778 * We just add up to the nearest msec (2), which should be 1779 * plenty to complete. 1780 * 1781 * Note that some chips never seem to indicate the transition to 1782 * the stopped state properly. Experience shows that we can safely 1783 * proceed anyway, after waiting the requisite timeout. 1784 */ 1785 for (i = 2000; i != 0; i -= 10) { 1786 if ((GETCSR(afep, CSR_SR) & (SR_TX_STATE | SR_RX_STATE)) == 0) 1787 break; 1788 drv_usecwait(10); 1789 } 1790 1791 /* prevent an interrupt */ 1792 PUTCSR(afep, CSR_SR2, INT_RXSTOPPED | INT_TXSTOPPED); 1793 } 1794 1795 void 1796 afe_resetrings(afe_t *afep) 1797 { 1798 int i; 1799 1800 /* now we need to reset the pointers... */ 1801 PUTCSR(afep, CSR_RDB, 0); 1802 PUTCSR(afep, CSR_TDB, 0); 1803 1804 /* reset the descriptor ring pointers */ 1805 afep->afe_rxhead = 0; 1806 afep->afe_txreclaim = 0; 1807 afep->afe_txsend = 0; 1808 afep->afe_txavail = AFE_TXRING; 1809 1810 /* set up transmit descriptor ring */ 1811 for (i = 0; i < AFE_TXRING; i++) { 1812 afe_desc_t *tmdp = &afep->afe_txdescp[i]; 1813 unsigned control = 0; 1814 if (i == (AFE_TXRING - 1)) { 1815 control |= TXCTL_ENDRING; 1816 } 1817 PUTTXDESC(afep, tmdp->desc_status, 0); 1818 PUTTXDESC(afep, tmdp->desc_control, control); 1819 PUTTXDESC(afep, tmdp->desc_buffer1, 0); 1820 PUTTXDESC(afep, tmdp->desc_buffer2, 0); 1821 SYNCTXDESC(afep, i, DDI_DMA_SYNC_FORDEV); 1822 } 1823 PUTCSR(afep, CSR_TDB, afep->afe_txdesc_paddr); 1824 1825 /* make the receive buffers available */ 1826 for (i = 0; i < AFE_RXRING; i++) { 1827 afe_rxbuf_t *rxb = afep->afe_rxbufs[i]; 1828 afe_desc_t *rmdp = &afep->afe_rxdescp[i]; 1829 unsigned control; 1830 1831 control = AFE_BUFSZ & RXCTL_BUFLEN1; 1832 if (i == (AFE_RXRING - 1)) { 1833 control |= RXCTL_ENDRING; 1834 } 1835 PUTRXDESC(afep, rmdp->desc_buffer1, rxb->rxb_paddr); 1836 PUTRXDESC(afep, rmdp->desc_buffer2, 0); 1837 PUTRXDESC(afep, rmdp->desc_control, control); 1838 PUTRXDESC(afep, rmdp->desc_status, RXSTAT_OWN); 1839 SYNCRXDESC(afep, i, DDI_DMA_SYNC_FORDEV); 1840 } 1841 PUTCSR(afep, CSR_RDB, afep->afe_rxdesc_paddr); 1842 } 1843 1844 void 1845 afe_stopall(afe_t *afep) 1846 { 1847 afe_disableinterrupts(afep); 1848 1849 afe_stopmac(afep); 1850 1851 /* stop the phy */ 1852 afe_stopphy(afep); 1853 } 1854 1855 void 1856 afe_startall(afe_t *afep) 1857 { 1858 ASSERT(mutex_owned(&afep->afe_intrlock)); 1859 ASSERT(mutex_owned(&afep->afe_xmtlock)); 1860 1861 /* make sure interrupts are disabled to begin */ 1862 afe_disableinterrupts(afep); 1863 1864 /* initialize the chip */ 1865 (void) afe_initialize(afep); 1866 1867 /* now we can enable interrupts */ 1868 afe_enableinterrupts(afep); 1869 1870 /* start up the phy */ 1871 afe_startphy(afep); 1872 1873 /* start up the mac */ 1874 afe_startmac(afep); 1875 } 1876 1877 void 1878 afe_resetall(afe_t *afep) 1879 { 1880 afep->afe_resetting = B_TRUE; 1881 afe_stopall(afep); 1882 afep->afe_resetting = B_FALSE; 1883 afe_startall(afep); 1884 } 1885 1886 afe_txbuf_t * 1887 afe_alloctxbuf(afe_t *afep) 1888 { 1889 ddi_dma_cookie_t dmac; 1890 unsigned ncookies; 1891 afe_txbuf_t *txb; 1892 size_t len; 1893 1894 txb = kmem_zalloc(sizeof (*txb), KM_SLEEP); 1895 1896 if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_txattr, 1897 DDI_DMA_SLEEP, NULL, &txb->txb_dmah) != DDI_SUCCESS) { 1898 return (NULL); 1899 } 1900 1901 if (ddi_dma_mem_alloc(txb->txb_dmah, AFE_BUFSZ, &afe_bufattr, 1902 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &txb->txb_buf, &len, 1903 &txb->txb_acch) != DDI_SUCCESS) { 1904 return (NULL); 1905 } 1906 if (ddi_dma_addr_bind_handle(txb->txb_dmah, NULL, txb->txb_buf, 1907 len, DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, 1908 &dmac, &ncookies) != DDI_DMA_MAPPED) { 1909 return (NULL); 1910 } 1911 txb->txb_paddr = dmac.dmac_address; 1912 1913 return (txb); 1914 } 1915 1916 void 1917 afe_destroytxbuf(afe_txbuf_t *txb) 1918 { 1919 if (txb != NULL) { 1920 if (txb->txb_paddr) 1921 (void) ddi_dma_unbind_handle(txb->txb_dmah); 1922 if (txb->txb_acch) 1923 ddi_dma_mem_free(&txb->txb_acch); 1924 if (txb->txb_dmah) 1925 ddi_dma_free_handle(&txb->txb_dmah); 1926 kmem_free(txb, sizeof (*txb)); 1927 } 1928 } 1929 1930 afe_rxbuf_t * 1931 afe_allocrxbuf(afe_t *afep) 1932 { 1933 afe_rxbuf_t *rxb; 1934 size_t len; 1935 unsigned ccnt; 1936 ddi_dma_cookie_t dmac; 1937 1938 rxb = kmem_zalloc(sizeof (*rxb), KM_SLEEP); 1939 1940 if (ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr, 1941 DDI_DMA_SLEEP, NULL, &rxb->rxb_dmah) != DDI_SUCCESS) { 1942 kmem_free(rxb, sizeof (*rxb)); 1943 return (NULL); 1944 } 1945 if (ddi_dma_mem_alloc(rxb->rxb_dmah, AFE_BUFSZ, &afe_bufattr, 1946 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &rxb->rxb_buf, &len, 1947 &rxb->rxb_acch) != DDI_SUCCESS) { 1948 ddi_dma_free_handle(&rxb->rxb_dmah); 1949 kmem_free(rxb, sizeof (*rxb)); 1950 return (NULL); 1951 } 1952 if (ddi_dma_addr_bind_handle(rxb->rxb_dmah, NULL, rxb->rxb_buf, len, 1953 DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &dmac, 1954 &ccnt) != DDI_DMA_MAPPED) { 1955 ddi_dma_mem_free(&rxb->rxb_acch); 1956 ddi_dma_free_handle(&rxb->rxb_dmah); 1957 kmem_free(rxb, sizeof (*rxb)); 1958 return (NULL); 1959 } 1960 rxb->rxb_paddr = dmac.dmac_address; 1961 1962 return (rxb); 1963 } 1964 1965 void 1966 afe_destroyrxbuf(afe_rxbuf_t *rxb) 1967 { 1968 if (rxb) { 1969 (void) ddi_dma_unbind_handle(rxb->rxb_dmah); 1970 ddi_dma_mem_free(&rxb->rxb_acch); 1971 ddi_dma_free_handle(&rxb->rxb_dmah); 1972 kmem_free(rxb, sizeof (*rxb)); 1973 } 1974 } 1975 1976 /* 1977 * Allocate receive resources. 1978 */ 1979 int 1980 afe_allocrxring(afe_t *afep) 1981 { 1982 int rval; 1983 int i; 1984 size_t size; 1985 size_t len; 1986 ddi_dma_cookie_t dmac; 1987 unsigned ncookies; 1988 caddr_t kaddr; 1989 1990 size = AFE_RXRING * sizeof (afe_desc_t); 1991 1992 rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr, 1993 DDI_DMA_SLEEP, NULL, &afep->afe_rxdesc_dmah); 1994 if (rval != DDI_SUCCESS) { 1995 afe_error(afep->afe_dip, 1996 "unable to allocate DMA handle for rx descriptors"); 1997 return (DDI_FAILURE); 1998 } 1999 2000 rval = ddi_dma_mem_alloc(afep->afe_rxdesc_dmah, size, &afe_devattr, 2001 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len, 2002 &afep->afe_rxdesc_acch); 2003 if (rval != DDI_SUCCESS) { 2004 afe_error(afep->afe_dip, 2005 "unable to allocate DMA memory for rx descriptors"); 2006 return (DDI_FAILURE); 2007 } 2008 2009 rval = ddi_dma_addr_bind_handle(afep->afe_rxdesc_dmah, NULL, kaddr, 2010 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 2011 &dmac, &ncookies); 2012 if (rval != DDI_DMA_MAPPED) { 2013 afe_error(afep->afe_dip, 2014 "unable to bind DMA for rx descriptors"); 2015 return (DDI_FAILURE); 2016 } 2017 2018 /* because of afe_dma_attr */ 2019 ASSERT(ncookies == 1); 2020 2021 /* we take the 32-bit physical address out of the cookie */ 2022 afep->afe_rxdesc_paddr = dmac.dmac_address; 2023 afep->afe_rxdescp = (void *)kaddr; 2024 2025 /* allocate buffer pointers (not the buffers themselves, yet) */ 2026 afep->afe_rxbufs = kmem_zalloc(AFE_RXRING * sizeof (afe_rxbuf_t *), 2027 KM_SLEEP); 2028 2029 /* now allocate rx buffers */ 2030 for (i = 0; i < AFE_RXRING; i++) { 2031 afe_rxbuf_t *rxb = afe_allocrxbuf(afep); 2032 if (rxb == NULL) 2033 return (DDI_FAILURE); 2034 afep->afe_rxbufs[i] = rxb; 2035 } 2036 2037 return (DDI_SUCCESS); 2038 } 2039 2040 /* 2041 * Allocate transmit resources. 2042 */ 2043 int 2044 afe_alloctxring(afe_t *afep) 2045 { 2046 int rval; 2047 int i; 2048 size_t size; 2049 size_t len; 2050 ddi_dma_cookie_t dmac; 2051 unsigned ncookies; 2052 caddr_t kaddr; 2053 2054 size = AFE_TXRING * sizeof (afe_desc_t); 2055 2056 rval = ddi_dma_alloc_handle(afep->afe_dip, &afe_dma_attr, 2057 DDI_DMA_SLEEP, NULL, &afep->afe_txdesc_dmah); 2058 if (rval != DDI_SUCCESS) { 2059 afe_error(afep->afe_dip, 2060 "unable to allocate DMA handle for tx descriptors"); 2061 return (DDI_FAILURE); 2062 } 2063 2064 rval = ddi_dma_mem_alloc(afep->afe_txdesc_dmah, size, &afe_devattr, 2065 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &kaddr, &len, 2066 &afep->afe_txdesc_acch); 2067 if (rval != DDI_SUCCESS) { 2068 afe_error(afep->afe_dip, 2069 "unable to allocate DMA memory for tx descriptors"); 2070 return (DDI_FAILURE); 2071 } 2072 2073 rval = ddi_dma_addr_bind_handle(afep->afe_txdesc_dmah, NULL, kaddr, 2074 size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 2075 &dmac, &ncookies); 2076 if (rval != DDI_DMA_MAPPED) { 2077 afe_error(afep->afe_dip, 2078 "unable to bind DMA for tx descriptors"); 2079 return (DDI_FAILURE); 2080 } 2081 2082 /* because of afe_dma_attr */ 2083 ASSERT(ncookies == 1); 2084 2085 /* we take the 32-bit physical address out of the cookie */ 2086 afep->afe_txdesc_paddr = dmac.dmac_address; 2087 afep->afe_txdescp = (void *)kaddr; 2088 2089 /* allocate buffer pointers (not the buffers themselves, yet) */ 2090 afep->afe_txbufs = kmem_zalloc(AFE_TXRING * sizeof (afe_txbuf_t *), 2091 KM_SLEEP); 2092 2093 /* now allocate tx buffers */ 2094 for (i = 0; i < AFE_TXRING; i++) { 2095 afe_txbuf_t *txb = afe_alloctxbuf(afep); 2096 if (txb == NULL) 2097 return (DDI_FAILURE); 2098 afep->afe_txbufs[i] = txb; 2099 } 2100 2101 return (DDI_SUCCESS); 2102 } 2103 2104 void 2105 afe_freerxring(afe_t *afep) 2106 { 2107 int i; 2108 2109 for (i = 0; i < AFE_RXRING; i++) { 2110 afe_destroyrxbuf(afep->afe_rxbufs[i]); 2111 } 2112 2113 if (afep->afe_rxbufs) { 2114 kmem_free(afep->afe_rxbufs, 2115 AFE_RXRING * sizeof (afe_rxbuf_t *)); 2116 } 2117 2118 if (afep->afe_rxdesc_paddr) 2119 (void) ddi_dma_unbind_handle(afep->afe_rxdesc_dmah); 2120 if (afep->afe_rxdesc_acch) 2121 ddi_dma_mem_free(&afep->afe_rxdesc_acch); 2122 if (afep->afe_rxdesc_dmah) 2123 ddi_dma_free_handle(&afep->afe_rxdesc_dmah); 2124 } 2125 2126 void 2127 afe_freetxring(afe_t *afep) 2128 { 2129 int i; 2130 2131 for (i = 0; i < AFE_TXRING; i++) { 2132 afe_destroytxbuf(afep->afe_txbufs[i]); 2133 } 2134 2135 if (afep->afe_txbufs) { 2136 kmem_free(afep->afe_txbufs, 2137 AFE_TXRING * sizeof (afe_txbuf_t *)); 2138 } 2139 if (afep->afe_txdesc_paddr) 2140 (void) ddi_dma_unbind_handle(afep->afe_txdesc_dmah); 2141 if (afep->afe_txdesc_acch) 2142 ddi_dma_mem_free(&afep->afe_txdesc_acch); 2143 if (afep->afe_txdesc_dmah) 2144 ddi_dma_free_handle(&afep->afe_txdesc_dmah); 2145 } 2146 2147 /* 2148 * Interrupt service routine. 2149 */ 2150 unsigned 2151 afe_intr(caddr_t arg) 2152 { 2153 afe_t *afep = (void *)arg; 2154 uint32_t status; 2155 mblk_t *mp = NULL; 2156 2157 mutex_enter(&afep->afe_intrlock); 2158 2159 if (afep->afe_flags & AFE_SUSPENDED) { 2160 /* we cannot receive interrupts! */ 2161 mutex_exit(&afep->afe_intrlock); 2162 return (DDI_INTR_UNCLAIMED); 2163 } 2164 2165 /* check interrupt status bits, did we interrupt? */ 2166 status = GETCSR(afep, CSR_SR2) & INT_ALL; 2167 2168 if (status == 0) { 2169 KIOIP->intrs[KSTAT_INTR_SPURIOUS]++; 2170 mutex_exit(&afep->afe_intrlock); 2171 return (DDI_INTR_UNCLAIMED); 2172 } 2173 /* ack the interrupt */ 2174 PUTCSR(afep, CSR_SR2, status); 2175 KIOIP->intrs[KSTAT_INTR_HARD]++; 2176 2177 if (!(afep->afe_flags & AFE_RUNNING)) { 2178 /* not running, don't touch anything */ 2179 mutex_exit(&afep->afe_intrlock); 2180 return (DDI_INTR_CLAIMED); 2181 } 2182 2183 if (status & (INT_RXOK|INT_RXNOBUF)) { 2184 /* receive packets */ 2185 mp = afe_receive(afep); 2186 if (status & INT_RXNOBUF) 2187 PUTCSR(afep, CSR_RDR, 0); /* wake up chip */ 2188 } 2189 2190 if (status & INT_TXOK) { 2191 /* transmit completed */ 2192 mutex_enter(&afep->afe_xmtlock); 2193 afe_reclaim(afep); 2194 mutex_exit(&afep->afe_xmtlock); 2195 } 2196 2197 if (status & (INT_LINKCHG|INT_TIMER)) { 2198 mutex_enter(&afep->afe_xmtlock); 2199 afe_checklink(afep); 2200 mutex_exit(&afep->afe_xmtlock); 2201 } 2202 2203 if (status & (INT_RXSTOPPED|INT_TXSTOPPED| 2204 INT_RXJABBER|INT_TXJABBER|INT_TXUNDERFLOW)) { 2205 2206 if (status & (INT_RXJABBER | INT_TXJABBER)) { 2207 afep->afe_jabber++; 2208 } 2209 DBG(DWARN, "resetting mac, status %x", status); 2210 mutex_enter(&afep->afe_xmtlock); 2211 afe_resetall(afep); 2212 mutex_exit(&afep->afe_xmtlock); 2213 } 2214 2215 if (status & INT_BUSERR) { 2216 switch (GETCSR(afep, CSR_SR) & SR_BERR_TYPE) { 2217 case SR_BERR_PARITY: 2218 afe_error(afep->afe_dip, "PCI parity error"); 2219 break; 2220 case SR_BERR_TARGET_ABORT: 2221 afe_error(afep->afe_dip, "PCI target abort"); 2222 break; 2223 case SR_BERR_MASTER_ABORT: 2224 afe_error(afep->afe_dip, "PCI master abort"); 2225 break; 2226 default: 2227 afe_error(afep->afe_dip, "Unknown PCI error"); 2228 break; 2229 } 2230 2231 /* reset the chip in an attempt to fix things */ 2232 mutex_enter(&afep->afe_xmtlock); 2233 afe_resetall(afep); 2234 mutex_exit(&afep->afe_xmtlock); 2235 } 2236 2237 mutex_exit(&afep->afe_intrlock); 2238 2239 /* 2240 * Send up packets. We do this outside of the intrlock. 2241 */ 2242 if (mp) { 2243 mac_rx(afep->afe_mh, NULL, mp); 2244 } 2245 2246 return (DDI_INTR_CLAIMED); 2247 } 2248 2249 void 2250 afe_enableinterrupts(afe_t *afep) 2251 { 2252 unsigned mask = INT_WANTED; 2253 2254 if (afep->afe_wantw) 2255 mask |= INT_TXOK; 2256 2257 PUTCSR(afep, CSR_IER2, mask); 2258 2259 if (AFE_MODEL(afep) == MODEL_COMET) { 2260 /* 2261 * On the Comet, this is the internal transceiver 2262 * interrupt. We program the Comet's built-in PHY to 2263 * enable certain interrupts. 2264 */ 2265 PUTCSR16(afep, CSR_XIE, XIE_LDE | XIE_ANCE); 2266 } 2267 } 2268 2269 void 2270 afe_disableinterrupts(afe_t *afep) 2271 { 2272 /* disable further interrupts */ 2273 PUTCSR(afep, CSR_IER2, INT_NONE); 2274 2275 /* clear any pending interrupts */ 2276 PUTCSR(afep, CSR_SR2, INT_ALL); 2277 } 2278 2279 boolean_t 2280 afe_send(afe_t *afep, mblk_t *mp) 2281 { 2282 size_t len; 2283 afe_txbuf_t *txb; 2284 afe_desc_t *tmd; 2285 uint32_t control; 2286 int txsend; 2287 2288 ASSERT(mutex_owned(&afep->afe_xmtlock)); 2289 ASSERT(mp != NULL); 2290 2291 len = msgsize(mp); 2292 if (len > ETHERVLANMTU) { 2293 DBG(DXMIT, "frame too long: %d", len); 2294 afep->afe_macxmt_errors++; 2295 freemsg(mp); 2296 return (B_TRUE); 2297 } 2298 2299 if (afep->afe_txavail < AFE_TXRECLAIM) 2300 afe_reclaim(afep); 2301 2302 if (afep->afe_txavail == 0) { 2303 /* no more tmds */ 2304 afep->afe_wantw = B_TRUE; 2305 /* enable TX interrupt */ 2306 afe_enableinterrupts(afep); 2307 return (B_FALSE); 2308 } 2309 2310 txsend = afep->afe_txsend; 2311 2312 /* 2313 * For simplicity, we just do a copy into a preallocated 2314 * DMA buffer. 2315 */ 2316 2317 txb = afep->afe_txbufs[txsend]; 2318 mcopymsg(mp, txb->txb_buf); /* frees mp! */ 2319 2320 /* 2321 * Statistics. 2322 */ 2323 afep->afe_opackets++; 2324 afep->afe_obytes += len; 2325 if (txb->txb_buf[0] & 0x1) { 2326 if (bcmp(txb->txb_buf, afe_broadcast, ETHERADDRL) != 0) 2327 afep->afe_multixmt++; 2328 else 2329 afep->afe_brdcstxmt++; 2330 } 2331 2332 /* note len is already known to be a small unsigned */ 2333 control = len | TXCTL_FIRST | TXCTL_LAST | TXCTL_INTCMPLTE; 2334 2335 if (txsend == (AFE_TXRING - 1)) 2336 control |= TXCTL_ENDRING; 2337 2338 tmd = &afep->afe_txdescp[txsend]; 2339 2340 SYNCTXBUF(txb, len, DDI_DMA_SYNC_FORDEV); 2341 PUTTXDESC(afep, tmd->desc_control, control); 2342 PUTTXDESC(afep, tmd->desc_buffer1, txb->txb_paddr); 2343 PUTTXDESC(afep, tmd->desc_buffer2, 0); 2344 PUTTXDESC(afep, tmd->desc_status, TXSTAT_OWN); 2345 /* sync the descriptor out to the device */ 2346 SYNCTXDESC(afep, txsend, DDI_DMA_SYNC_FORDEV); 2347 2348 /* 2349 * Note the new values of txavail and txsend. 2350 */ 2351 afep->afe_txavail--; 2352 afep->afe_txsend = (txsend + 1) % AFE_TXRING; 2353 2354 /* 2355 * It should never, ever take more than 5 seconds to drain 2356 * the ring. If it happens, then we are stuck! 2357 */ 2358 afep->afe_txstall_time = gethrtime() + (5 * 1000000000ULL); 2359 2360 /* 2361 * wake up the chip ... inside the lock to protect against DR suspend, 2362 * etc. 2363 */ 2364 PUTCSR(afep, CSR_TDR, 0); 2365 2366 return (B_TRUE); 2367 } 2368 2369 /* 2370 * Reclaim buffers that have completed transmission. 2371 */ 2372 void 2373 afe_reclaim(afe_t *afep) 2374 { 2375 afe_desc_t *tmdp; 2376 2377 while (afep->afe_txavail != AFE_TXRING) { 2378 uint32_t status; 2379 uint32_t control; 2380 int index = afep->afe_txreclaim; 2381 2382 tmdp = &afep->afe_txdescp[index]; 2383 2384 /* sync it before we read it */ 2385 SYNCTXDESC(afep, index, DDI_DMA_SYNC_FORKERNEL); 2386 2387 control = GETTXDESC(afep, tmdp->desc_control); 2388 status = GETTXDESC(afep, tmdp->desc_status); 2389 2390 if (status & TXSTAT_OWN) { 2391 /* chip is still working on it, we're done */ 2392 break; 2393 } 2394 2395 afep->afe_txavail++; 2396 afep->afe_txreclaim = (index + 1) % AFE_TXRING; 2397 2398 /* in the most common successful case, all bits are clear */ 2399 if (status == 0) 2400 continue; 2401 2402 if ((control & TXCTL_LAST) == 0) 2403 continue; 2404 2405 if (status & TXSTAT_TXERR) { 2406 afep->afe_errxmt++; 2407 2408 if (status & TXSTAT_JABBER) { 2409 /* transmit jabber timeout */ 2410 afep->afe_macxmt_errors++; 2411 } 2412 if (status & 2413 (TXSTAT_CARRLOST | TXSTAT_NOCARR)) { 2414 afep->afe_carrier_errors++; 2415 } 2416 if (status & TXSTAT_UFLOW) { 2417 afep->afe_underflow++; 2418 } 2419 if (status & TXSTAT_LATECOL) { 2420 afep->afe_tx_late_collisions++; 2421 } 2422 if (status & TXSTAT_EXCOLL) { 2423 afep->afe_ex_collisions++; 2424 afep->afe_collisions += 16; 2425 } 2426 } 2427 2428 if (status & TXSTAT_DEFER) { 2429 afep->afe_defer_xmts++; 2430 } 2431 2432 /* collision counting */ 2433 if (TXCOLLCNT(status) == 1) { 2434 afep->afe_collisions++; 2435 afep->afe_first_collisions++; 2436 } else if (TXCOLLCNT(status)) { 2437 afep->afe_collisions += TXCOLLCNT(status); 2438 afep->afe_multi_collisions += TXCOLLCNT(status); 2439 } 2440 } 2441 2442 if (afep->afe_txavail >= AFE_TXRESCHED) { 2443 if (afep->afe_wantw) { 2444 /* 2445 * we were able to reclaim some packets, so 2446 * disable tx interrupts 2447 */ 2448 afep->afe_wantw = B_FALSE; 2449 afe_enableinterrupts(afep); 2450 mac_tx_update(afep->afe_mh); 2451 } 2452 } 2453 } 2454 2455 mblk_t * 2456 afe_receive(afe_t *afep) 2457 { 2458 unsigned len; 2459 afe_rxbuf_t *rxb; 2460 afe_desc_t *rmd; 2461 uint32_t status; 2462 mblk_t *mpchain, **mpp, *mp; 2463 int head, cnt; 2464 2465 mpchain = NULL; 2466 mpp = &mpchain; 2467 head = afep->afe_rxhead; 2468 2469 /* limit the number of packets we process to a half ring size */ 2470 for (cnt = 0; cnt < AFE_RXRING / 2; cnt++) { 2471 2472 DBG(DRECV, "receive at index %d", head); 2473 2474 rmd = &afep->afe_rxdescp[head]; 2475 rxb = afep->afe_rxbufs[head]; 2476 2477 SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORKERNEL); 2478 status = GETRXDESC(afep, rmd->desc_status); 2479 if (status & RXSTAT_OWN) { 2480 /* chip is still chewing on it */ 2481 break; 2482 } 2483 2484 /* discard the ethernet frame checksum */ 2485 len = RXLENGTH(status) - ETHERFCSL; 2486 2487 DBG(DRECV, "recv length %d, status %x", len, status); 2488 2489 if ((status & (RXSTAT_ERRS | RXSTAT_FIRST | RXSTAT_LAST)) != 2490 (RXSTAT_FIRST | RXSTAT_LAST)) { 2491 2492 afep->afe_errrcv++; 2493 2494 /* 2495 * Abnormal status bits detected, analyze further. 2496 */ 2497 if ((status & (RXSTAT_LAST|RXSTAT_FIRST)) != 2498 (RXSTAT_LAST|RXSTAT_FIRST)) { 2499 DBG(DRECV, "rx packet overspill"); 2500 if (status & RXSTAT_FIRST) { 2501 afep->afe_toolong_errors++; 2502 } 2503 } else if (status & RXSTAT_DESCERR) { 2504 afep->afe_macrcv_errors++; 2505 2506 } else if (status & RXSTAT_RUNT) { 2507 afep->afe_runt++; 2508 2509 } else if (status & RXSTAT_COLLSEEN) { 2510 /* this should really be rx_late_collisions */ 2511 afep->afe_macrcv_errors++; 2512 2513 } else if (status & RXSTAT_DRIBBLE) { 2514 afep->afe_align_errors++; 2515 2516 } else if (status & RXSTAT_CRCERR) { 2517 afep->afe_fcs_errors++; 2518 2519 } else if (status & RXSTAT_OFLOW) { 2520 afep->afe_overflow++; 2521 } 2522 } 2523 2524 else if (len > ETHERVLANMTU) { 2525 afep->afe_errrcv++; 2526 afep->afe_toolong_errors++; 2527 } 2528 2529 /* 2530 * At this point, the chip thinks the packet is OK. 2531 */ 2532 else { 2533 mp = allocb(len + AFE_HEADROOM, 0); 2534 if (mp == NULL) { 2535 afep->afe_errrcv++; 2536 afep->afe_norcvbuf++; 2537 goto skip; 2538 } 2539 2540 /* sync the buffer before we look at it */ 2541 SYNCRXBUF(rxb, len, DDI_DMA_SYNC_FORKERNEL); 2542 mp->b_rptr += AFE_HEADROOM; 2543 mp->b_wptr = mp->b_rptr + len; 2544 bcopy((char *)rxb->rxb_buf, mp->b_rptr, len); 2545 2546 afep->afe_ipackets++; 2547 afep->afe_rbytes += len; 2548 if (status & RXSTAT_GROUP) { 2549 if (bcmp(mp->b_rptr, afe_broadcast, 2550 ETHERADDRL) == 0) 2551 afep->afe_brdcstrcv++; 2552 else 2553 afep->afe_multircv++; 2554 } 2555 *mpp = mp; 2556 mpp = &mp->b_next; 2557 } 2558 2559 skip: 2560 /* return ring entry to the hardware */ 2561 PUTRXDESC(afep, rmd->desc_status, RXSTAT_OWN); 2562 SYNCRXDESC(afep, head, DDI_DMA_SYNC_FORDEV); 2563 2564 /* advance to next RMD */ 2565 head = (head + 1) % AFE_RXRING; 2566 } 2567 2568 afep->afe_rxhead = head; 2569 2570 return (mpchain); 2571 } 2572 2573 int 2574 afe_getmiibit(afe_t *afep, uint16_t reg, uint16_t bit) 2575 { 2576 unsigned val; 2577 2578 mutex_enter(&afep->afe_xmtlock); 2579 if (afep->afe_flags & AFE_SUSPENDED) { 2580 mutex_exit(&afep->afe_xmtlock); 2581 /* device is suspended */ 2582 return (0); 2583 } 2584 val = afe_miiread(afep, afep->afe_phyaddr, reg); 2585 mutex_exit(&afep->afe_xmtlock); 2586 2587 return (val & bit ? 1 : 0); 2588 } 2589 #define GETMIIBIT(reg, bit) afe_getmiibit(afep, reg, bit) 2590 2591 int 2592 afe_m_stat(void *arg, uint_t stat, uint64_t *val) 2593 { 2594 afe_t *afep = arg; 2595 2596 mutex_enter(&afep->afe_xmtlock); 2597 if ((afep->afe_flags & (AFE_RUNNING|AFE_SUSPENDED)) == AFE_RUNNING) 2598 afe_reclaim(afep); 2599 mutex_exit(&afep->afe_xmtlock); 2600 2601 switch (stat) { 2602 case MAC_STAT_IFSPEED: 2603 *val = afep->afe_ifspeed; 2604 break; 2605 2606 case MAC_STAT_MULTIRCV: 2607 *val = afep->afe_multircv; 2608 break; 2609 2610 case MAC_STAT_BRDCSTRCV: 2611 *val = afep->afe_brdcstrcv; 2612 break; 2613 2614 case MAC_STAT_MULTIXMT: 2615 *val = afep->afe_multixmt; 2616 break; 2617 2618 case MAC_STAT_BRDCSTXMT: 2619 *val = afep->afe_brdcstxmt; 2620 break; 2621 2622 case MAC_STAT_IPACKETS: 2623 *val = afep->afe_ipackets; 2624 break; 2625 2626 case MAC_STAT_RBYTES: 2627 *val = afep->afe_rbytes; 2628 break; 2629 2630 case MAC_STAT_OPACKETS: 2631 *val = afep->afe_opackets; 2632 break; 2633 2634 case MAC_STAT_OBYTES: 2635 *val = afep->afe_obytes; 2636 break; 2637 2638 case MAC_STAT_NORCVBUF: 2639 *val = afep->afe_norcvbuf; 2640 break; 2641 2642 case MAC_STAT_NOXMTBUF: 2643 *val = 0; 2644 break; 2645 2646 case MAC_STAT_COLLISIONS: 2647 *val = afep->afe_collisions; 2648 break; 2649 2650 case MAC_STAT_IERRORS: 2651 *val = afep->afe_errrcv; 2652 break; 2653 2654 case MAC_STAT_OERRORS: 2655 *val = afep->afe_errxmt; 2656 break; 2657 2658 case ETHER_STAT_LINK_DUPLEX: 2659 *val = afep->afe_duplex; 2660 break; 2661 2662 case ETHER_STAT_ALIGN_ERRORS: 2663 *val = afep->afe_align_errors; 2664 break; 2665 2666 case ETHER_STAT_FCS_ERRORS: 2667 *val = afep->afe_fcs_errors; 2668 break; 2669 2670 case ETHER_STAT_SQE_ERRORS: 2671 *val = afep->afe_sqe_errors; 2672 break; 2673 2674 case ETHER_STAT_DEFER_XMTS: 2675 *val = afep->afe_defer_xmts; 2676 break; 2677 2678 case ETHER_STAT_FIRST_COLLISIONS: 2679 *val = afep->afe_first_collisions; 2680 break; 2681 2682 case ETHER_STAT_MULTI_COLLISIONS: 2683 *val = afep->afe_multi_collisions; 2684 break; 2685 2686 case ETHER_STAT_TX_LATE_COLLISIONS: 2687 *val = afep->afe_tx_late_collisions; 2688 break; 2689 2690 case ETHER_STAT_EX_COLLISIONS: 2691 *val = afep->afe_ex_collisions; 2692 break; 2693 2694 case ETHER_STAT_MACXMT_ERRORS: 2695 *val = afep->afe_macxmt_errors; 2696 break; 2697 2698 case ETHER_STAT_CARRIER_ERRORS: 2699 *val = afep->afe_carrier_errors; 2700 break; 2701 2702 case ETHER_STAT_TOOLONG_ERRORS: 2703 *val = afep->afe_toolong_errors; 2704 break; 2705 2706 case ETHER_STAT_MACRCV_ERRORS: 2707 *val = afep->afe_macrcv_errors; 2708 break; 2709 2710 case MAC_STAT_OVERFLOWS: 2711 *val = afep->afe_overflow; 2712 break; 2713 2714 case MAC_STAT_UNDERFLOWS: 2715 *val = afep->afe_underflow; 2716 break; 2717 2718 case ETHER_STAT_TOOSHORT_ERRORS: 2719 *val = afep->afe_runt; 2720 break; 2721 2722 case ETHER_STAT_JABBER_ERRORS: 2723 *val = afep->afe_jabber; 2724 break; 2725 2726 case ETHER_STAT_CAP_100T4: 2727 *val = afep->afe_cap_100T4; 2728 break; 2729 2730 case ETHER_STAT_CAP_100FDX: 2731 *val = afep->afe_cap_100fdx; 2732 break; 2733 2734 case ETHER_STAT_CAP_100HDX: 2735 *val = afep->afe_cap_100hdx; 2736 break; 2737 2738 case ETHER_STAT_CAP_10FDX: 2739 *val = afep->afe_cap_10fdx; 2740 break; 2741 2742 case ETHER_STAT_CAP_10HDX: 2743 *val = afep->afe_cap_10hdx; 2744 break; 2745 2746 case ETHER_STAT_CAP_AUTONEG: 2747 *val = afep->afe_cap_aneg; 2748 break; 2749 2750 case ETHER_STAT_LINK_AUTONEG: 2751 *val = ((afep->afe_adv_aneg != 0) && 2752 (GETMIIBIT(MII_AN_LPABLE, MII_AN_EXP_LPCANAN) != 0)); 2753 break; 2754 2755 case ETHER_STAT_ADV_CAP_100T4: 2756 *val = afep->afe_adv_100T4; 2757 break; 2758 2759 case ETHER_STAT_ADV_CAP_100FDX: 2760 *val = afep->afe_adv_100fdx; 2761 break; 2762 2763 case ETHER_STAT_ADV_CAP_100HDX: 2764 *val = afep->afe_adv_100hdx; 2765 break; 2766 2767 case ETHER_STAT_ADV_CAP_10FDX: 2768 *val = afep->afe_adv_10fdx; 2769 break; 2770 2771 case ETHER_STAT_ADV_CAP_10HDX: 2772 *val = afep->afe_adv_10hdx; 2773 break; 2774 2775 case ETHER_STAT_ADV_CAP_AUTONEG: 2776 *val = afep->afe_adv_aneg; 2777 break; 2778 2779 case ETHER_STAT_LP_CAP_100T4: 2780 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_100BASE_T4); 2781 break; 2782 2783 case ETHER_STAT_LP_CAP_100FDX: 2784 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_100BASE_TX_FD); 2785 break; 2786 2787 case ETHER_STAT_LP_CAP_100HDX: 2788 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_100BASE_TX); 2789 break; 2790 2791 case ETHER_STAT_LP_CAP_10FDX: 2792 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_10BASE_T_FD); 2793 break; 2794 2795 case ETHER_STAT_LP_CAP_10HDX: 2796 *val = GETMIIBIT(MII_AN_LPABLE, MII_ABILITY_10BASE_T); 2797 break; 2798 2799 case ETHER_STAT_LP_CAP_AUTONEG: 2800 *val = GETMIIBIT(MII_AN_EXPANSION, MII_AN_EXP_LPCANAN); 2801 break; 2802 2803 case ETHER_STAT_XCVR_ADDR: 2804 *val = afep->afe_phyaddr; 2805 break; 2806 2807 case ETHER_STAT_XCVR_ID: 2808 *val = afep->afe_phyid; 2809 break; 2810 2811 default: 2812 return (ENOTSUP); 2813 } 2814 return (0); 2815 } 2816 2817 /*ARGSUSED*/ 2818 int 2819 afe_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t flags, 2820 uint_t sz, void *val) 2821 { 2822 afe_t *afep = arg; 2823 int err = 0; 2824 boolean_t dfl = flags & MAC_PROP_DEFAULT; 2825 2826 if (sz == 0) 2827 return (EINVAL); 2828 2829 switch (num) { 2830 case MAC_PROP_DUPLEX: 2831 if (sz >= sizeof (link_duplex_t)) { 2832 bcopy(&afep->afe_duplex, val, sizeof (link_duplex_t)); 2833 } else { 2834 err = EINVAL; 2835 } 2836 break; 2837 2838 case MAC_PROP_SPEED: 2839 if (sz >= sizeof (uint64_t)) { 2840 bcopy(&afep->afe_ifspeed, val, sizeof (uint64_t)); 2841 } else { 2842 err = EINVAL; 2843 } 2844 break; 2845 2846 case MAC_PROP_AUTONEG: 2847 *(uint8_t *)val = 2848 dfl ? afep->afe_cap_aneg : afep->afe_adv_aneg; 2849 break; 2850 2851 #if 0 2852 case MAC_PROP_ADV_1000FDX_CAP: 2853 case MAC_PROP_EN_1000FDX_CAP: 2854 case MAC_PROP_ADV_1000HDX_CAP: 2855 case MAC_PROP_EN_1000HDX_CAP: 2856 /* We don't support gigabit! */ 2857 *(uint8_t *)val = 0; 2858 break; 2859 #endif 2860 2861 case MAC_PROP_ADV_100FDX_CAP: 2862 case MAC_PROP_EN_100FDX_CAP: 2863 *(uint8_t *)val = 2864 dfl ? afep->afe_cap_100fdx : afep->afe_adv_100fdx; 2865 break; 2866 2867 case MAC_PROP_ADV_100HDX_CAP: 2868 case MAC_PROP_EN_100HDX_CAP: 2869 *(uint8_t *)val = 2870 dfl ? afep->afe_cap_100hdx : afep->afe_adv_100hdx; 2871 break; 2872 2873 case MAC_PROP_ADV_10FDX_CAP: 2874 case MAC_PROP_EN_10FDX_CAP: 2875 *(uint8_t *)val = 2876 dfl ? afep->afe_cap_10fdx : afep->afe_adv_10fdx; 2877 break; 2878 2879 case MAC_PROP_ADV_10HDX_CAP: 2880 case MAC_PROP_EN_10HDX_CAP: 2881 *(uint8_t *)val = 2882 dfl ? afep->afe_cap_10hdx : afep->afe_adv_10hdx; 2883 break; 2884 2885 case MAC_PROP_ADV_100T4_CAP: 2886 case MAC_PROP_EN_100T4_CAP: 2887 *(uint8_t *)val = 2888 dfl ? afep->afe_cap_100T4 : afep->afe_adv_100T4; 2889 break; 2890 2891 default: 2892 err = ENOTSUP; 2893 } 2894 2895 return (err); 2896 } 2897 2898 /*ARGSUSED*/ 2899 int 2900 afe_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz, 2901 const void *val) 2902 { 2903 afe_t *afep = arg; 2904 uint8_t *advp; 2905 uint8_t *capp; 2906 2907 switch (num) { 2908 case MAC_PROP_EN_100FDX_CAP: 2909 advp = &afep->afe_adv_100fdx; 2910 capp = &afep->afe_cap_100fdx; 2911 break; 2912 2913 case MAC_PROP_EN_100HDX_CAP: 2914 advp = &afep->afe_adv_100hdx; 2915 capp = &afep->afe_cap_100hdx; 2916 break; 2917 2918 case MAC_PROP_EN_10FDX_CAP: 2919 advp = &afep->afe_adv_10fdx; 2920 capp = &afep->afe_cap_10fdx; 2921 break; 2922 2923 case MAC_PROP_EN_10HDX_CAP: 2924 advp = &afep->afe_adv_10hdx; 2925 capp = &afep->afe_cap_10hdx; 2926 break; 2927 2928 case MAC_PROP_EN_100T4_CAP: 2929 advp = &afep->afe_adv_100T4; 2930 capp = &afep->afe_cap_100T4; 2931 break; 2932 2933 case MAC_PROP_AUTONEG: 2934 advp = &afep->afe_adv_aneg; 2935 capp = &afep->afe_cap_aneg; 2936 break; 2937 2938 default: 2939 return (ENOTSUP); 2940 } 2941 2942 if (*capp == 0) /* ensure phy can support value */ 2943 return (ENOTSUP); 2944 2945 mutex_enter(&afep->afe_intrlock); 2946 mutex_enter(&afep->afe_xmtlock); 2947 2948 if (*advp != *(const uint8_t *)val) { 2949 *advp = *(const uint8_t *)val; 2950 2951 if ((afep->afe_flags & (AFE_RUNNING|AFE_SUSPENDED)) == 2952 AFE_RUNNING) { 2953 /* 2954 * This re-initializes the phy, but it also 2955 * restarts transmit and receive rings. 2956 * Needless to say, changing the link 2957 * parameters is destructive to traffic in 2958 * progress. 2959 */ 2960 afe_resetall(afep); 2961 } 2962 } 2963 mutex_exit(&afep->afe_xmtlock); 2964 mutex_exit(&afep->afe_intrlock); 2965 2966 return (0); 2967 } 2968 2969 /* 2970 * Debugging and error reporting. 2971 */ 2972 void 2973 afe_error(dev_info_t *dip, char *fmt, ...) 2974 { 2975 va_list ap; 2976 char buf[256]; 2977 2978 va_start(ap, fmt); 2979 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 2980 va_end(ap); 2981 2982 if (dip) { 2983 cmn_err(CE_WARN, "%s%d: %s", 2984 ddi_driver_name(dip), ddi_get_instance(dip), buf); 2985 } else { 2986 cmn_err(CE_WARN, "afe: %s", buf); 2987 } 2988 } 2989 2990 #ifdef DEBUG 2991 2992 void 2993 afe_dprintf(afe_t *afep, const char *func, int level, char *fmt, ...) 2994 { 2995 va_list ap; 2996 2997 va_start(ap, fmt); 2998 if (afe_debug & level) { 2999 char tag[64]; 3000 char buf[256]; 3001 3002 if (afep && afep->afe_dip) { 3003 (void) snprintf(tag, sizeof (tag), "%s%d", 3004 ddi_driver_name(afep->afe_dip), 3005 ddi_get_instance(afep->afe_dip)); 3006 } else { 3007 (void) snprintf(tag, sizeof (tag), "afe"); 3008 } 3009 3010 (void) snprintf(buf, sizeof (buf), "%s: %s: %s\n", 3011 tag, func, fmt); 3012 3013 vcmn_err(CE_CONT, buf, ap); 3014 } 3015 va_end(ap); 3016 } 3017 3018 #endif 3019