1 /* 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * This driver was derived from the FreeBSD if_msk.c driver, which 8 * bears the following copyright attributions and licenses. 9 */ 10 11 /* 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 */ 47 /* 48 * Copyright (c) 1997, 1998, 1999, 2000 49 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. All advertising materials mentioning features or use of this software 60 * must display the following acknowledgement: 61 * This product includes software developed by Bill Paul. 62 * 4. Neither the name of the author nor the names of any co-contributors 63 * may be used to endorse or promote products derived from this software 64 * without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 70 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 71 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 72 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 73 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 74 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 75 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 76 * THE POSSIBILITY OF SUCH DAMAGE. 77 */ 78 /* 79 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 80 * 81 * Permission to use, copy, modify, and distribute this software for any 82 * purpose with or without fee is hereby granted, provided that the above 83 * copyright notice and this permission notice appear in all copies. 84 * 85 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 86 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 87 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 88 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 89 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 90 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 91 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 92 */ 93 94 #include <sys/varargs.h> 95 #include <sys/types.h> 96 #include <sys/modctl.h> 97 #include <sys/conf.h> 98 #include <sys/devops.h> 99 #include <sys/stream.h> 100 #include <sys/strsun.h> 101 #include <sys/cmn_err.h> 102 #include <sys/ethernet.h> 103 #include <sys/kmem.h> 104 #include <sys/time.h> 105 #include <sys/pci.h> 106 #include <sys/mii.h> 107 #include <sys/miiregs.h> 108 #include <sys/mac.h> 109 #include <sys/mac_ether.h> 110 #include <sys/mac_provider.h> 111 #include <sys/debug.h> 112 #include <sys/note.h> 113 #include <sys/ddi.h> 114 #include <sys/sunddi.h> 115 #include <sys/vlan.h> 116 117 #include "yge.h" 118 119 static struct ddi_device_acc_attr yge_regs_attr = { 120 DDI_DEVICE_ATTR_V0, 121 DDI_STRUCTURE_LE_ACC, 122 DDI_STRICTORDER_ACC 123 }; 124 125 static struct ddi_device_acc_attr yge_ring_attr = { 126 DDI_DEVICE_ATTR_V0, 127 DDI_STRUCTURE_LE_ACC, 128 DDI_STRICTORDER_ACC 129 }; 130 131 static struct ddi_device_acc_attr yge_buf_attr = { 132 DDI_DEVICE_ATTR_V0, 133 DDI_NEVERSWAP_ACC, 134 DDI_STRICTORDER_ACC 135 }; 136 137 #define DESC_ALIGN 0x1000 138 139 static ddi_dma_attr_t yge_ring_dma_attr = { 140 DMA_ATTR_V0, /* dma_attr_version */ 141 0, /* dma_attr_addr_lo */ 142 0x00000000ffffffffull, /* dma_attr_addr_hi */ 143 0x00000000ffffffffull, /* dma_attr_count_max */ 144 DESC_ALIGN, /* dma_attr_align */ 145 0x000007fc, /* dma_attr_burstsizes */ 146 1, /* dma_attr_minxfer */ 147 0x00000000ffffffffull, /* dma_attr_maxxfer */ 148 0x00000000ffffffffull, /* dma_attr_seg */ 149 1, /* dma_attr_sgllen */ 150 1, /* dma_attr_granular */ 151 0 /* dma_attr_flags */ 152 }; 153 154 static ddi_dma_attr_t yge_buf_dma_attr = { 155 DMA_ATTR_V0, /* dma_attr_version */ 156 0, /* dma_attr_addr_lo */ 157 0x00000000ffffffffull, /* dma_attr_addr_hi */ 158 0x00000000ffffffffull, /* dma_attr_count_max */ 159 1, /* dma_attr_align */ 160 0x0000fffc, /* dma_attr_burstsizes */ 161 1, /* dma_attr_minxfer */ 162 0x000000000000ffffull, /* dma_attr_maxxfer */ 163 0x00000000ffffffffull, /* dma_attr_seg */ 164 8, /* dma_attr_sgllen */ 165 1, /* dma_attr_granular */ 166 0 /* dma_attr_flags */ 167 }; 168 169 170 static int yge_attach(yge_dev_t *); 171 static void yge_detach(yge_dev_t *); 172 static int yge_suspend(yge_dev_t *); 173 static int yge_resume(yge_dev_t *); 174 175 static void yge_reset(yge_dev_t *); 176 static void yge_setup_rambuffer(yge_dev_t *); 177 178 static int yge_init_port(yge_port_t *); 179 static void yge_uninit_port(yge_port_t *); 180 static int yge_register_port(yge_port_t *); 181 static int yge_unregister_port(yge_port_t *); 182 183 static void yge_tick(void *); 184 static uint_t yge_intr(caddr_t, caddr_t); 185 static int yge_intr_gmac(yge_port_t *); 186 static void yge_intr_enable(yge_dev_t *); 187 static void yge_intr_disable(yge_dev_t *); 188 static boolean_t yge_handle_events(yge_dev_t *, mblk_t **, mblk_t **, int *); 189 static void yge_handle_hwerr(yge_port_t *, uint32_t); 190 static void yge_intr_hwerr(yge_dev_t *); 191 static mblk_t *yge_rxeof(yge_port_t *, uint32_t, int); 192 static void yge_txeof(yge_port_t *, int); 193 static boolean_t yge_send(yge_port_t *, mblk_t *); 194 static void yge_set_prefetch(yge_dev_t *, int, yge_ring_t *); 195 static void yge_set_rambuffer(yge_port_t *); 196 static void yge_start_port(yge_port_t *); 197 static void yge_stop_port(yge_port_t *); 198 static void yge_phy_power(yge_dev_t *, boolean_t); 199 static int yge_alloc_ring(yge_port_t *, yge_dev_t *, yge_ring_t *, uint32_t); 200 static void yge_free_ring(yge_ring_t *); 201 static uint8_t yge_find_capability(yge_dev_t *, uint8_t); 202 203 static int yge_txrx_dma_alloc(yge_port_t *); 204 static void yge_txrx_dma_free(yge_port_t *); 205 static void yge_init_rx_ring(yge_port_t *); 206 static void yge_init_tx_ring(yge_port_t *); 207 208 static uint16_t yge_mii_readreg(yge_port_t *, uint8_t, uint8_t); 209 static void yge_mii_writereg(yge_port_t *, uint8_t, uint8_t, uint16_t); 210 211 static uint16_t yge_mii_read(void *, uint8_t, uint8_t); 212 static void yge_mii_write(void *, uint8_t, uint8_t, uint16_t); 213 static void yge_mii_notify(void *, link_state_t); 214 215 static void yge_setrxfilt(yge_port_t *); 216 static void yge_restart_task(yge_dev_t *); 217 static void yge_task(void *); 218 static void yge_dispatch(yge_dev_t *, int); 219 220 static void yge_stats_clear(yge_port_t *); 221 static void yge_stats_update(yge_port_t *); 222 static uint32_t yge_hashbit(const uint8_t *); 223 224 static int yge_m_unicst(void *, const uint8_t *); 225 static int yge_m_multicst(void *, boolean_t, const uint8_t *); 226 static int yge_m_promisc(void *, boolean_t); 227 static mblk_t *yge_m_tx(void *, mblk_t *); 228 static int yge_m_stat(void *, uint_t, uint64_t *); 229 static int yge_m_start(void *); 230 static void yge_m_stop(void *); 231 static int yge_m_getprop(void *, const char *, mac_prop_id_t, uint_t, 232 uint_t, void *, uint_t *); 233 static int yge_m_setprop(void *, const char *, mac_prop_id_t, uint_t, 234 const void *); 235 static void yge_m_ioctl(void *, queue_t *, mblk_t *); 236 237 void yge_error(yge_dev_t *, yge_port_t *, char *, ...); 238 extern void yge_phys_update(yge_port_t *); 239 extern int yge_phys_restart(yge_port_t *, boolean_t); 240 extern int yge_phys_init(yge_port_t *, phy_readreg_t, phy_writereg_t); 241 242 static mac_callbacks_t yge_m_callbacks = { 243 MC_IOCTL | MC_SETPROP | MC_GETPROP, 244 yge_m_stat, 245 yge_m_start, 246 yge_m_stop, 247 yge_m_promisc, 248 yge_m_multicst, 249 yge_m_unicst, 250 yge_m_tx, 251 yge_m_ioctl, 252 NULL, /* mc_getcapab */ 253 NULL, /* mc_open */ 254 NULL, /* mc_close */ 255 yge_m_setprop, 256 yge_m_getprop, 257 }; 258 259 static mii_ops_t yge_mii_ops = { 260 MII_OPS_VERSION, 261 yge_mii_read, 262 yge_mii_write, 263 yge_mii_notify, 264 NULL /* reset */ 265 }; 266 267 /* 268 * This is the low level interface routine to read from the PHY 269 * MII registers. There is multiple steps to these accesses. First 270 * the register number is written to an address register. Then after 271 * a specified delay status is checked until the data is present. 272 */ 273 static uint16_t 274 yge_mii_readreg(yge_port_t *port, uint8_t phy, uint8_t reg) 275 { 276 yge_dev_t *dev = port->p_dev; 277 int pnum = port->p_port; 278 uint16_t val; 279 280 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL, 281 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 282 283 for (int i = 0; i < YGE_TIMEOUT; i += 10) { 284 drv_usecwait(10); 285 val = GMAC_READ_2(dev, pnum, GM_SMI_CTRL); 286 if ((val & GM_SMI_CT_RD_VAL) != 0) { 287 val = GMAC_READ_2(dev, pnum, GM_SMI_DATA); 288 return (val); 289 } 290 } 291 292 return (0xffff); 293 } 294 295 /* 296 * This is the low level interface routine to write to the PHY 297 * MII registers. There is multiple steps to these accesses. The 298 * data and the target registers address are written to the PHY. 299 * Then the PHY is polled until it is done with the write. Note 300 * that the delays are specified and required! 301 */ 302 static void 303 yge_mii_writereg(yge_port_t *port, uint8_t phy, uint8_t reg, uint16_t val) 304 { 305 yge_dev_t *dev = port->p_dev; 306 int pnum = port->p_port; 307 308 GMAC_WRITE_2(dev, pnum, GM_SMI_DATA, val); 309 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL, 310 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 311 312 for (int i = 0; i < YGE_TIMEOUT; i += 10) { 313 drv_usecwait(10); 314 if ((GMAC_READ_2(dev, pnum, GM_SMI_CTRL) & GM_SMI_CT_BUSY) == 0) 315 return; 316 } 317 318 yge_error(NULL, port, "phy write timeout"); 319 } 320 321 static uint16_t 322 yge_mii_read(void *arg, uint8_t phy, uint8_t reg) 323 { 324 yge_port_t *port = arg; 325 uint16_t rv; 326 327 PHY_LOCK(port->p_dev); 328 rv = yge_mii_readreg(port, phy, reg); 329 PHY_UNLOCK(port->p_dev); 330 return (rv); 331 } 332 333 static void 334 yge_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val) 335 { 336 yge_port_t *port = arg; 337 338 PHY_LOCK(port->p_dev); 339 yge_mii_writereg(port, phy, reg, val); 340 PHY_UNLOCK(port->p_dev); 341 } 342 343 /* 344 * The MII common code calls this function to let the MAC driver 345 * know when there has been a change in status. 346 */ 347 void 348 yge_mii_notify(void *arg, link_state_t link) 349 { 350 yge_port_t *port = arg; 351 yge_dev_t *dev = port->p_dev; 352 uint32_t gmac; 353 uint32_t gpcr; 354 link_flowctrl_t fc; 355 link_duplex_t duplex; 356 int speed; 357 358 fc = mii_get_flowctrl(port->p_mii); 359 duplex = mii_get_duplex(port->p_mii); 360 speed = mii_get_speed(port->p_mii); 361 362 DEV_LOCK(dev); 363 364 if (link == LINK_STATE_UP) { 365 366 /* Enable Tx FIFO Underrun. */ 367 CSR_WRITE_1(dev, MR_ADDR(port->p_port, GMAC_IRQ_MSK), 368 GM_IS_TX_FF_UR | /* TX FIFO underflow */ 369 GM_IS_RX_FF_OR); /* RX FIFO overflow */ 370 371 gpcr = GM_GPCR_AU_ALL_DIS; 372 373 switch (fc) { 374 case LINK_FLOWCTRL_BI: 375 gmac = GMC_PAUSE_ON; 376 gpcr &= ~(GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS); 377 break; 378 case LINK_FLOWCTRL_TX: 379 gmac = GMC_PAUSE_ON; 380 gpcr |= GM_GPCR_FC_RX_DIS; 381 break; 382 case LINK_FLOWCTRL_RX: 383 gmac = GMC_PAUSE_ON; 384 gpcr |= GM_GPCR_FC_TX_DIS; 385 break; 386 case LINK_FLOWCTRL_NONE: 387 default: 388 gmac = GMC_PAUSE_OFF; 389 gpcr |= GM_GPCR_FC_RX_DIS; 390 gpcr |= GM_GPCR_FC_TX_DIS; 391 break; 392 } 393 394 gpcr &= ~((GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100)); 395 switch (speed) { 396 case 1000: 397 gpcr |= GM_GPCR_SPEED_1000; 398 break; 399 case 100: 400 gpcr |= GM_GPCR_SPEED_100; 401 break; 402 case 10: 403 default: 404 break; 405 } 406 407 if (duplex == LINK_DUPLEX_FULL) { 408 gpcr |= GM_GPCR_DUP_FULL; 409 } else { 410 gpcr &= ~(GM_GPCR_DUP_FULL); 411 gmac = GMC_PAUSE_OFF; 412 gpcr |= GM_GPCR_FC_RX_DIS; 413 gpcr |= GM_GPCR_FC_TX_DIS; 414 } 415 416 gpcr |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 417 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr); 418 419 /* Read again to ensure writing. */ 420 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL); 421 422 /* write out the flow control gmac setting */ 423 CSR_WRITE_4(dev, MR_ADDR(port->p_port, GMAC_CTRL), gmac); 424 425 } else { 426 /* Disable Rx/Tx MAC. */ 427 gpcr = GMAC_READ_2(dev, port->p_port, GM_GP_CTRL); 428 gpcr &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 429 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr); 430 431 /* Read again to ensure writing. */ 432 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL); 433 } 434 435 DEV_UNLOCK(dev); 436 437 mac_link_update(port->p_mh, link); 438 439 if (port->p_running && (link == LINK_STATE_UP)) { 440 mac_tx_update(port->p_mh); 441 } 442 } 443 444 static void 445 yge_setrxfilt(yge_port_t *port) 446 { 447 yge_dev_t *dev; 448 uint16_t mode; 449 uint8_t *ea; 450 uint32_t *mchash; 451 int pnum; 452 453 dev = port->p_dev; 454 pnum = port->p_port; 455 ea = port->p_curraddr; 456 mchash = port->p_mchash; 457 458 if (dev->d_suspended) 459 return; 460 461 /* Set station address. */ 462 for (int i = 0; i < (ETHERADDRL / 2); i++) { 463 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_1L + i * 4, 464 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8))); 465 } 466 for (int i = 0; i < (ETHERADDRL / 2); i++) { 467 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_2L + i * 4, 468 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8))); 469 } 470 471 /* Figure out receive filtering mode. */ 472 mode = GMAC_READ_2(dev, pnum, GM_RX_CTRL); 473 if (port->p_promisc) { 474 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 475 } else { 476 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 477 } 478 /* Write the multicast filter. */ 479 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H1, mchash[0] & 0xffff); 480 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H2, (mchash[0] >> 16) & 0xffff); 481 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H3, mchash[1] & 0xffff); 482 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H4, (mchash[1] >> 16) & 0xffff); 483 /* Write the receive filtering mode. */ 484 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, mode); 485 } 486 487 static void 488 yge_init_rx_ring(yge_port_t *port) 489 { 490 yge_buf_t *rxb; 491 yge_ring_t *ring; 492 int prod; 493 494 port->p_rx_cons = 0; 495 port->p_rx_putwm = YGE_PUT_WM; 496 ring = &port->p_rx_ring; 497 498 /* ala bzero, but uses safer acch access */ 499 CLEARRING(ring); 500 501 for (prod = 0; prod < YGE_RX_RING_CNT; prod++) { 502 /* Hang out receive buffers. */ 503 rxb = &port->p_rx_buf[prod]; 504 505 PUTADDR(ring, prod, rxb->b_paddr); 506 PUTCTRL(ring, prod, port->p_framesize | OP_PACKET | HW_OWNER); 507 } 508 509 SYNCRING(ring, DDI_DMA_SYNC_FORDEV); 510 511 yge_set_prefetch(port->p_dev, port->p_rxq, ring); 512 513 /* Update prefetch unit. */ 514 CSR_WRITE_2(port->p_dev, 515 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG), 516 YGE_RX_RING_CNT - 1); 517 } 518 519 static void 520 yge_init_tx_ring(yge_port_t *port) 521 { 522 yge_ring_t *ring = &port->p_tx_ring; 523 524 port->p_tx_prod = 0; 525 port->p_tx_cons = 0; 526 port->p_tx_cnt = 0; 527 528 CLEARRING(ring); 529 SYNCRING(ring, DDI_DMA_SYNC_FORDEV); 530 531 yge_set_prefetch(port->p_dev, port->p_txq, ring); 532 } 533 534 static void 535 yge_setup_rambuffer(yge_dev_t *dev) 536 { 537 int next; 538 int i; 539 540 /* Get adapter SRAM size. */ 541 dev->d_ramsize = CSR_READ_1(dev, B2_E_0) * 4; 542 if (dev->d_ramsize == 0) 543 return; 544 545 dev->d_pflags |= PORT_FLAG_RAMBUF; 546 /* 547 * Give receiver 2/3 of memory and round down to the multiple 548 * of 1024. Tx/Rx RAM buffer size of Yukon 2 should be multiple 549 * of 1024. 550 */ 551 dev->d_rxqsize = (((dev->d_ramsize * 1024 * 2) / 3) & ~(1024 - 1)); 552 dev->d_txqsize = (dev->d_ramsize * 1024) - dev->d_rxqsize; 553 554 for (i = 0, next = 0; i < dev->d_num_port; i++) { 555 dev->d_rxqstart[i] = next; 556 dev->d_rxqend[i] = next + dev->d_rxqsize - 1; 557 next = dev->d_rxqend[i] + 1; 558 dev->d_txqstart[i] = next; 559 dev->d_txqend[i] = next + dev->d_txqsize - 1; 560 next = dev->d_txqend[i] + 1; 561 } 562 } 563 564 static void 565 yge_phy_power(yge_dev_t *dev, boolean_t powerup) 566 { 567 uint32_t val; 568 int i; 569 570 if (powerup) { 571 /* Switch power to VCC (WA for VAUX problem). */ 572 CSR_WRITE_1(dev, B0_POWER_CTRL, 573 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 574 /* Disable Core Clock Division, set Clock Select to 0. */ 575 CSR_WRITE_4(dev, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 576 577 val = 0; 578 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 579 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 580 /* Enable bits are inverted. */ 581 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 582 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 583 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 584 } 585 /* 586 * Enable PCI & Core Clock, enable clock gating for both Links. 587 */ 588 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val); 589 590 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1); 591 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 592 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 593 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 594 /* Deassert Low Power for 1st PHY. */ 595 val |= PCI_Y2_PHY1_COMA; 596 if (dev->d_num_port > 1) 597 val |= PCI_Y2_PHY2_COMA; 598 } 599 600 /* Release PHY from PowerDown/COMA mode. */ 601 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val); 602 603 switch (dev->d_hw_id) { 604 case CHIP_ID_YUKON_EC_U: 605 case CHIP_ID_YUKON_EX: 606 case CHIP_ID_YUKON_FE_P: { 607 uint32_t our; 608 609 CSR_WRITE_2(dev, B0_CTST, Y2_HW_WOL_OFF); 610 611 /* Enable all clocks. */ 612 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0); 613 614 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_4); 615 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 616 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 617 /* Set all bits to 0 except bits 15..12. */ 618 pci_config_put32(dev->d_pcih, PCI_OUR_REG_4, our); 619 620 /* Set to default value. */ 621 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_5); 622 our &= P_CTL_TIM_VMAIN_AV_MSK; 623 pci_config_put32(dev->d_pcih, PCI_OUR_REG_5, our); 624 625 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, 0); 626 627 /* 628 * Enable workaround for dev 4.107 on Yukon-Ultra 629 * and Extreme 630 */ 631 our = CSR_READ_4(dev, B2_GP_IO); 632 our |= GLB_GPIO_STAT_RACE_DIS; 633 CSR_WRITE_4(dev, B2_GP_IO, our); 634 635 (void) CSR_READ_4(dev, B2_GP_IO); 636 break; 637 } 638 default: 639 break; 640 } 641 642 for (i = 0; i < dev->d_num_port; i++) { 643 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL), 644 GMLC_RST_SET); 645 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL), 646 GMLC_RST_CLR); 647 } 648 } else { 649 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1); 650 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 651 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 652 val &= ~PCI_Y2_PHY1_COMA; 653 if (dev->d_num_port > 1) 654 val &= ~PCI_Y2_PHY2_COMA; 655 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 656 } else { 657 val |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 658 } 659 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val); 660 661 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 662 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 663 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 664 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 665 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 666 /* Enable bits are inverted. */ 667 val = 0; 668 } 669 /* 670 * Disable PCI & Core Clock, disable clock gating for 671 * both Links. 672 */ 673 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val); 674 CSR_WRITE_1(dev, B0_POWER_CTRL, 675 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 676 } 677 } 678 679 static void 680 yge_reset(yge_dev_t *dev) 681 { 682 uint64_t addr; 683 uint16_t status; 684 uint32_t val; 685 int i; 686 ddi_acc_handle_t pcih = dev->d_pcih; 687 688 /* Turn off ASF */ 689 if (dev->d_hw_id == CHIP_ID_YUKON_EX) { 690 status = CSR_READ_2(dev, B28_Y2_ASF_STAT_CMD); 691 /* Clear AHB bridge & microcontroller reset */ 692 status &= ~Y2_ASF_CPU_MODE; 693 status &= ~Y2_ASF_AHB_RST; 694 /* Clear ASF microcontroller state */ 695 status &= ~Y2_ASF_STAT_MSK; 696 CSR_WRITE_2(dev, B28_Y2_ASF_STAT_CMD, status); 697 } else { 698 CSR_WRITE_1(dev, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 699 } 700 CSR_WRITE_2(dev, B0_CTST, Y2_ASF_DISABLE); 701 702 /* 703 * Since we disabled ASF, S/W reset is required for Power Management. 704 */ 705 CSR_WRITE_1(dev, B0_CTST, CS_RST_SET); 706 CSR_WRITE_1(dev, B0_CTST, CS_RST_CLR); 707 708 /* Allow writes to PCI config space */ 709 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 710 711 /* Clear all error bits in the PCI status register. */ 712 status = pci_config_get16(pcih, PCI_CONF_STAT); 713 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 714 715 status |= (PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB | 716 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR); 717 pci_config_put16(pcih, PCI_CONF_STAT, status); 718 719 CSR_WRITE_1(dev, B0_CTST, CS_MRST_CLR); 720 721 switch (dev->d_bustype) { 722 case PEX_BUS: 723 /* Clear all PEX errors. */ 724 CSR_PCI_WRITE_4(dev, Y2_CFG_AER + AER_UNCOR_ERR, 0xffffffff); 725 726 /* is error bit status stuck? */ 727 val = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT); 728 if ((val & PEX_RX_OV) != 0) { 729 dev->d_intrmask &= ~Y2_IS_HW_ERR; 730 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP; 731 } 732 break; 733 case PCI_BUS: 734 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */ 735 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0) 736 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2); 737 break; 738 case PCIX_BUS: 739 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */ 740 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0) 741 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2); 742 743 /* Set Cache Line Size opt. */ 744 val = pci_config_get32(pcih, PCI_OUR_REG_1); 745 val |= PCI_CLS_OPT; 746 pci_config_put32(pcih, PCI_OUR_REG_1, val); 747 break; 748 } 749 750 /* Set PHY power state. */ 751 yge_phy_power(dev, B_TRUE); 752 753 /* Reset GPHY/GMAC Control */ 754 for (i = 0; i < dev->d_num_port; i++) { 755 /* GPHY Control reset. */ 756 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 757 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 758 /* GMAC Control reset. */ 759 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 760 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 761 if (dev->d_hw_id == CHIP_ID_YUKON_EX || 762 dev->d_hw_id == CHIP_ID_YUKON_SUPR) { 763 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL), 764 (GMC_BYP_RETR_ON | GMC_BYP_MACSECRX_ON | 765 GMC_BYP_MACSECTX_ON)); 766 } 767 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 768 769 } 770 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 771 772 /* LED On. */ 773 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_ON); 774 775 /* Clear TWSI IRQ. */ 776 CSR_WRITE_4(dev, B2_I2C_IRQ, I2C_CLR_IRQ); 777 778 /* Turn off hardware timer. */ 779 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_STOP); 780 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_CLR_IRQ); 781 782 /* Turn off descriptor polling. */ 783 CSR_WRITE_1(dev, B28_DPT_CTRL, DPT_STOP); 784 785 /* Turn off time stamps. */ 786 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_STOP); 787 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 788 789 /* Don't permit config space writing */ 790 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 791 792 /* enable TX Arbiters */ 793 for (i = 0; i < dev->d_num_port; i++) 794 CSR_WRITE_1(dev, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB); 795 796 /* Configure timeout values. */ 797 for (i = 0; i < dev->d_num_port; i++) { 798 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 799 800 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), RI_TO_53); 801 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), RI_TO_53); 802 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), RI_TO_53); 803 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), RI_TO_53); 804 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), RI_TO_53); 805 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), RI_TO_53); 806 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), RI_TO_53); 807 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), RI_TO_53); 808 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), RI_TO_53); 809 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), RI_TO_53); 810 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), RI_TO_53); 811 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), RI_TO_53); 812 } 813 814 /* Disable all interrupts. */ 815 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 816 (void) CSR_READ_4(dev, B0_HWE_IMSK); 817 CSR_WRITE_4(dev, B0_IMSK, 0); 818 (void) CSR_READ_4(dev, B0_IMSK); 819 820 /* 821 * On dual port PCI-X card, there is an problem where status 822 * can be received out of order due to split transactions. 823 */ 824 if (dev->d_bustype == PCIX_BUS && dev->d_num_port > 1) { 825 int pcix; 826 uint16_t pcix_cmd; 827 828 if ((pcix = yge_find_capability(dev, PCI_CAP_ID_PCIX)) != 0) { 829 pcix_cmd = pci_config_get16(pcih, pcix + 2); 830 /* Clear Max Outstanding Split Transactions. */ 831 pcix_cmd &= ~0x70; 832 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 833 pci_config_put16(pcih, pcix + 2, pcix_cmd); 834 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 835 } 836 } 837 if (dev->d_bustype == PEX_BUS) { 838 uint16_t v, width; 839 840 v = pci_config_get16(pcih, PEX_DEV_CTRL); 841 /* Change Max. Read Request Size to 4096 bytes. */ 842 v &= ~PEX_DC_MAX_RRS_MSK; 843 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 844 pci_config_put16(pcih, PEX_DEV_CTRL, v); 845 width = pci_config_get16(pcih, PEX_LNK_STAT); 846 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 847 v = pci_config_get16(pcih, PEX_LNK_CAP); 848 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 849 if (v != width) 850 yge_error(dev, NULL, 851 "Negotiated width of PCIe link(x%d) != " 852 "max. width of link(x%d)\n", width, v); 853 } 854 855 /* Clear status list. */ 856 CLEARRING(&dev->d_status_ring); 857 SYNCRING(&dev->d_status_ring, DDI_DMA_SYNC_FORDEV); 858 859 dev->d_stat_cons = 0; 860 861 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_SET); 862 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_CLR); 863 864 /* Set the status list base address. */ 865 addr = dev->d_status_ring.r_paddr; 866 CSR_WRITE_4(dev, STAT_LIST_ADDR_LO, YGE_ADDR_LO(addr)); 867 CSR_WRITE_4(dev, STAT_LIST_ADDR_HI, YGE_ADDR_HI(addr)); 868 869 /* Set the status list last index. */ 870 CSR_WRITE_2(dev, STAT_LAST_IDX, YGE_STAT_RING_CNT - 1); 871 CSR_WRITE_2(dev, STAT_PUT_IDX, 0); 872 873 if (dev->d_hw_id == CHIP_ID_YUKON_EC && 874 dev->d_hw_rev == CHIP_REV_YU_EC_A1) { 875 /* WA for dev. #4.3 */ 876 CSR_WRITE_2(dev, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 877 /* WA for dev #4.18 */ 878 CSR_WRITE_1(dev, STAT_FIFO_WM, 0x21); 879 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 7); 880 } else { 881 CSR_WRITE_2(dev, STAT_TX_IDX_TH, 10); 882 CSR_WRITE_1(dev, STAT_FIFO_WM, 16); 883 884 /* ISR status FIFO watermark */ 885 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 886 dev->d_hw_rev == CHIP_REV_YU_XL_A0) 887 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 4); 888 else 889 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 16); 890 891 CSR_WRITE_4(dev, STAT_ISR_TIMER_INI, 0x0190); 892 } 893 894 /* 895 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 896 */ 897 CSR_WRITE_4(dev, STAT_TX_TIMER_INI, YGE_USECS(dev, 1000)); 898 899 /* Enable status unit. */ 900 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_OP_ON); 901 902 CSR_WRITE_1(dev, STAT_TX_TIMER_CTRL, TIM_START); 903 CSR_WRITE_1(dev, STAT_LEV_TIMER_CTRL, TIM_START); 904 CSR_WRITE_1(dev, STAT_ISR_TIMER_CTRL, TIM_START); 905 } 906 907 static int 908 yge_init_port(yge_port_t *port) 909 { 910 yge_dev_t *dev = port->p_dev; 911 int i; 912 mac_register_t *macp; 913 914 port->p_flags = dev->d_pflags; 915 port->p_ppa = ddi_get_instance(dev->d_dip) + (port->p_port * 100); 916 917 port->p_tx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_TX_RING_CNT, 918 KM_SLEEP); 919 port->p_rx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_RX_RING_CNT, 920 KM_SLEEP); 921 922 /* Setup Tx/Rx queue register offsets. */ 923 if (port->p_port == YGE_PORT_A) { 924 port->p_txq = Q_XA1; 925 port->p_txsq = Q_XS1; 926 port->p_rxq = Q_R1; 927 } else { 928 port->p_txq = Q_XA2; 929 port->p_txsq = Q_XS2; 930 port->p_rxq = Q_R2; 931 } 932 933 /* Disable jumbo frame for Yukon FE. */ 934 if (dev->d_hw_id == CHIP_ID_YUKON_FE) 935 port->p_flags |= PORT_FLAG_NOJUMBO; 936 937 /* 938 * Start out assuming a regular MTU. Users can change this 939 * with dladm. The dladm daemon is supposed to issue commands 940 * to change the default MTU using m_setprop during early boot 941 * (before the interface is plumbed) if the user has so 942 * requested. 943 */ 944 port->p_mtu = ETHERMTU; 945 946 port->p_mii = mii_alloc(port, dev->d_dip, &yge_mii_ops); 947 if (port->p_mii == NULL) { 948 yge_error(NULL, port, "MII handle allocation failed"); 949 return (DDI_FAILURE); 950 } 951 /* We assume all parts support asymmetric pause */ 952 mii_set_pauseable(port->p_mii, B_TRUE, B_TRUE); 953 954 /* 955 * Get station address for this interface. Note that 956 * dual port cards actually come with three station 957 * addresses: one for each port, plus an extra. The 958 * extra one is used by the SysKonnect driver software 959 * as a 'virtual' station address for when both ports 960 * are operating in failover mode. Currently we don't 961 * use this extra address. 962 */ 963 for (i = 0; i < ETHERADDRL; i++) { 964 port->p_curraddr[i] = 965 CSR_READ_1(dev, B2_MAC_1 + (port->p_port * 8) + i); 966 } 967 968 /* Register with Nemo. */ 969 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 970 yge_error(NULL, port, "MAC handle allocation failed"); 971 return (DDI_FAILURE); 972 } 973 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 974 macp->m_driver = port; 975 macp->m_dip = dev->d_dip; 976 macp->m_src_addr = port->p_curraddr; 977 macp->m_callbacks = &yge_m_callbacks; 978 macp->m_min_sdu = 0; 979 macp->m_max_sdu = port->p_mtu; 980 macp->m_instance = port->p_ppa; 981 macp->m_margin = VLAN_TAGSZ; 982 983 port->p_mreg = macp; 984 985 return (DDI_SUCCESS); 986 } 987 988 static int 989 yge_add_intr(yge_dev_t *dev, int intr_type) 990 { 991 dev_info_t *dip; 992 int count; 993 int actual; 994 int rv; 995 int i, j; 996 997 dip = dev->d_dip; 998 999 rv = ddi_intr_get_nintrs(dip, intr_type, &count); 1000 if ((rv != DDI_SUCCESS) || (count == 0)) { 1001 yge_error(dev, NULL, 1002 "ddi_intr_get_nintrs failed, rv %d, count %d", rv, count); 1003 return (DDI_FAILURE); 1004 } 1005 1006 /* 1007 * Allocate the interrupt. Note that we only bother with a single 1008 * interrupt. One could argue that for MSI devices with dual ports, 1009 * it would be nice to have a separate interrupt per port. But right 1010 * now I don't know how to configure that, so we'll just settle for 1011 * a single interrupt. 1012 */ 1013 dev->d_intrcnt = 1; 1014 1015 dev->d_intrsize = count * sizeof (ddi_intr_handle_t); 1016 dev->d_intrh = kmem_zalloc(dev->d_intrsize, KM_SLEEP); 1017 if (dev->d_intrh == NULL) { 1018 yge_error(dev, NULL, "Unable to allocate interrupt handle"); 1019 return (DDI_FAILURE); 1020 } 1021 1022 rv = ddi_intr_alloc(dip, dev->d_intrh, intr_type, 0, dev->d_intrcnt, 1023 &actual, DDI_INTR_ALLOC_STRICT); 1024 if ((rv != DDI_SUCCESS) || (actual == 0)) { 1025 yge_error(dev, NULL, 1026 "Unable to allocate interrupt, %d, count %d", 1027 rv, actual); 1028 kmem_free(dev->d_intrh, dev->d_intrsize); 1029 return (DDI_FAILURE); 1030 } 1031 1032 if ((rv = ddi_intr_get_pri(dev->d_intrh[0], &dev->d_intrpri)) != 1033 DDI_SUCCESS) { 1034 for (i = 0; i < dev->d_intrcnt; i++) 1035 (void) ddi_intr_free(dev->d_intrh[i]); 1036 yge_error(dev, NULL, 1037 "Unable to get interrupt priority, %d", rv); 1038 kmem_free(dev->d_intrh, dev->d_intrsize); 1039 return (DDI_FAILURE); 1040 } 1041 1042 if ((rv = ddi_intr_get_cap(dev->d_intrh[0], &dev->d_intrcap)) != 1043 DDI_SUCCESS) { 1044 yge_error(dev, NULL, 1045 "Unable to get interrupt capabilities, %d", rv); 1046 for (i = 0; i < dev->d_intrcnt; i++) 1047 (void) ddi_intr_free(dev->d_intrh[i]); 1048 kmem_free(dev->d_intrh, dev->d_intrsize); 1049 return (DDI_FAILURE); 1050 } 1051 1052 /* register interrupt handler to kernel */ 1053 for (i = 0; i < dev->d_intrcnt; i++) { 1054 if ((rv = ddi_intr_add_handler(dev->d_intrh[i], yge_intr, 1055 dev, NULL)) != DDI_SUCCESS) { 1056 yge_error(dev, NULL, 1057 "Unable to add interrupt handler, %d", rv); 1058 for (j = 0; j < i; j++) 1059 (void) ddi_intr_remove_handler(dev->d_intrh[j]); 1060 for (i = 0; i < dev->d_intrcnt; i++) 1061 (void) ddi_intr_free(dev->d_intrh[i]); 1062 kmem_free(dev->d_intrh, dev->d_intrsize); 1063 return (DDI_FAILURE); 1064 } 1065 } 1066 1067 mutex_init(&dev->d_rxlock, NULL, MUTEX_DRIVER, 1068 DDI_INTR_PRI(dev->d_intrpri)); 1069 mutex_init(&dev->d_txlock, NULL, MUTEX_DRIVER, 1070 DDI_INTR_PRI(dev->d_intrpri)); 1071 mutex_init(&dev->d_phylock, NULL, MUTEX_DRIVER, 1072 DDI_INTR_PRI(dev->d_intrpri)); 1073 mutex_init(&dev->d_task_mtx, NULL, MUTEX_DRIVER, 1074 DDI_INTR_PRI(dev->d_intrpri)); 1075 1076 return (DDI_SUCCESS); 1077 } 1078 1079 static int 1080 yge_attach_intr(yge_dev_t *dev) 1081 { 1082 dev_info_t *dip = dev->d_dip; 1083 int intr_types; 1084 int rv; 1085 1086 /* Allocate IRQ resources. */ 1087 rv = ddi_intr_get_supported_types(dip, &intr_types); 1088 if (rv != DDI_SUCCESS) { 1089 yge_error(dev, NULL, 1090 "Unable to determine supported interrupt types, %d", rv); 1091 return (DDI_FAILURE); 1092 } 1093 1094 /* 1095 * We default to not supporting MSI. We've found some device 1096 * and motherboard combinations don't always work well with 1097 * MSI interrupts. Users may override this if they choose. 1098 */ 1099 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "msi_enable", 0) == 0) { 1100 /* If msi disable property present, disable both msix/msi. */ 1101 if (intr_types & DDI_INTR_TYPE_FIXED) { 1102 intr_types &= ~(DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX); 1103 } 1104 } 1105 1106 if (intr_types & DDI_INTR_TYPE_MSIX) { 1107 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSIX)) == 1108 DDI_SUCCESS) 1109 return (DDI_SUCCESS); 1110 } 1111 1112 if (intr_types & DDI_INTR_TYPE_MSI) { 1113 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSI)) == 1114 DDI_SUCCESS) 1115 return (DDI_SUCCESS); 1116 } 1117 1118 if (intr_types & DDI_INTR_TYPE_FIXED) { 1119 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_FIXED)) == 1120 DDI_SUCCESS) 1121 return (DDI_SUCCESS); 1122 } 1123 1124 yge_error(dev, NULL, "Unable to configure any interrupts"); 1125 return (DDI_FAILURE); 1126 } 1127 1128 static void 1129 yge_intr_enable(yge_dev_t *dev) 1130 { 1131 int i; 1132 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) { 1133 /* Call ddi_intr_block_enable() for MSI interrupts */ 1134 (void) ddi_intr_block_enable(dev->d_intrh, dev->d_intrcnt); 1135 } else { 1136 /* Call ddi_intr_enable for FIXED interrupts */ 1137 for (i = 0; i < dev->d_intrcnt; i++) 1138 (void) ddi_intr_enable(dev->d_intrh[i]); 1139 } 1140 } 1141 1142 void 1143 yge_intr_disable(yge_dev_t *dev) 1144 { 1145 int i; 1146 1147 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) { 1148 (void) ddi_intr_block_disable(dev->d_intrh, dev->d_intrcnt); 1149 } else { 1150 for (i = 0; i < dev->d_intrcnt; i++) 1151 (void) ddi_intr_disable(dev->d_intrh[i]); 1152 } 1153 } 1154 1155 static uint8_t 1156 yge_find_capability(yge_dev_t *dev, uint8_t cap) 1157 { 1158 uint8_t ptr; 1159 uint16_t capit; 1160 ddi_acc_handle_t pcih = dev->d_pcih; 1161 1162 if ((pci_config_get16(pcih, PCI_CONF_STAT) & PCI_STAT_CAP) == 0) { 1163 return (0); 1164 } 1165 /* This assumes PCI, and not CardBus. */ 1166 ptr = pci_config_get8(pcih, PCI_CONF_CAP_PTR); 1167 while (ptr != 0) { 1168 capit = pci_config_get8(pcih, ptr + PCI_CAP_ID); 1169 if (capit == cap) { 1170 return (ptr); 1171 } 1172 ptr = pci_config_get8(pcih, ptr + PCI_CAP_NEXT_PTR); 1173 } 1174 return (0); 1175 } 1176 1177 static int 1178 yge_attach(yge_dev_t *dev) 1179 { 1180 dev_info_t *dip = dev->d_dip; 1181 int rv; 1182 int nattached; 1183 uint8_t pm_cap; 1184 1185 if (pci_config_setup(dip, &dev->d_pcih) != DDI_SUCCESS) { 1186 yge_error(dev, NULL, "Unable to map PCI configuration space"); 1187 goto fail; 1188 } 1189 1190 /* 1191 * Map control/status registers. 1192 */ 1193 1194 /* ensure the pmcsr status is D0 state */ 1195 pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM); 1196 if (pm_cap != 0) { 1197 uint16_t pmcsr; 1198 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR); 1199 pmcsr &= ~PCI_PMCSR_STATE_MASK; 1200 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR, 1201 pmcsr | PCI_PMCSR_D0); 1202 } 1203 1204 /* Enable PCI access and bus master. */ 1205 pci_config_put16(dev->d_pcih, PCI_CONF_COMM, 1206 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) | 1207 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME); 1208 1209 1210 /* Allocate I/O resource */ 1211 rv = ddi_regs_map_setup(dip, 1, &dev->d_regs, 0, 0, &yge_regs_attr, 1212 &dev->d_regsh); 1213 if (rv != DDI_SUCCESS) { 1214 yge_error(dev, NULL, "Unable to map device registers"); 1215 goto fail; 1216 } 1217 1218 1219 /* Enable all clocks. */ 1220 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1221 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0); 1222 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1223 1224 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR); 1225 dev->d_hw_id = CSR_READ_1(dev, B2_CHIP_ID); 1226 dev->d_hw_rev = (CSR_READ_1(dev, B2_MAC_CFG) >> 4) & 0x0f; 1227 1228 1229 /* 1230 * Bail out if chip is not recognized. Note that we only enforce 1231 * this in production builds. The Ultra-2 (88e8057) has a problem 1232 * right now where TX works fine, but RX seems not to. So we've 1233 * disabled that for now. 1234 */ 1235 if (dev->d_hw_id < CHIP_ID_YUKON_XL || 1236 dev->d_hw_id >= CHIP_ID_YUKON_UL_2) { 1237 yge_error(dev, NULL, "Unknown device: id=0x%02x, rev=0x%02x", 1238 dev->d_hw_id, dev->d_hw_rev); 1239 #ifndef DEBUG 1240 goto fail; 1241 #endif 1242 } 1243 1244 /* Soft reset. */ 1245 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 1246 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR); 1247 dev->d_pmd = CSR_READ_1(dev, B2_PMD_TYP); 1248 if (dev->d_pmd == 'L' || dev->d_pmd == 'S' || dev->d_pmd == 'P') 1249 dev->d_coppertype = 0; 1250 else 1251 dev->d_coppertype = 1; 1252 /* Check number of MACs. */ 1253 dev->d_num_port = 1; 1254 if ((CSR_READ_1(dev, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1255 CFG_DUAL_MAC_MSK) { 1256 if (!(CSR_READ_1(dev, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1257 dev->d_num_port++; 1258 } 1259 1260 /* Check bus type. */ 1261 if (yge_find_capability(dev, PCI_CAP_ID_PCI_E) != 0) { 1262 dev->d_bustype = PEX_BUS; 1263 } else if (yge_find_capability(dev, PCI_CAP_ID_PCIX) != 0) { 1264 dev->d_bustype = PCIX_BUS; 1265 } else { 1266 dev->d_bustype = PCI_BUS; 1267 } 1268 1269 switch (dev->d_hw_id) { 1270 case CHIP_ID_YUKON_EC: 1271 dev->d_clock = 125; /* 125 Mhz */ 1272 break; 1273 case CHIP_ID_YUKON_UL_2: 1274 dev->d_clock = 125; /* 125 Mhz */ 1275 break; 1276 case CHIP_ID_YUKON_SUPR: 1277 dev->d_clock = 125; /* 125 Mhz */ 1278 break; 1279 case CHIP_ID_YUKON_EC_U: 1280 dev->d_clock = 125; /* 125 Mhz */ 1281 break; 1282 case CHIP_ID_YUKON_EX: 1283 dev->d_clock = 125; /* 125 Mhz */ 1284 break; 1285 case CHIP_ID_YUKON_FE: 1286 dev->d_clock = 100; /* 100 Mhz */ 1287 break; 1288 case CHIP_ID_YUKON_FE_P: 1289 dev->d_clock = 50; /* 50 Mhz */ 1290 break; 1291 case CHIP_ID_YUKON_XL: 1292 dev->d_clock = 156; /* 156 Mhz */ 1293 break; 1294 default: 1295 dev->d_clock = 156; /* 156 Mhz */ 1296 break; 1297 } 1298 1299 dev->d_process_limit = YGE_RX_RING_CNT/2; 1300 1301 rv = yge_alloc_ring(NULL, dev, &dev->d_status_ring, YGE_STAT_RING_CNT); 1302 if (rv != DDI_SUCCESS) 1303 goto fail; 1304 1305 /* Setup event taskq. */ 1306 dev->d_task_q = ddi_taskq_create(dip, "tq", 1, TASKQ_DEFAULTPRI, 0); 1307 if (dev->d_task_q == NULL) { 1308 yge_error(dev, NULL, "failed to create taskq"); 1309 goto fail; 1310 } 1311 1312 /* Init the condition variable */ 1313 cv_init(&dev->d_task_cv, NULL, CV_DRIVER, NULL); 1314 1315 /* Allocate IRQ resources. */ 1316 if ((rv = yge_attach_intr(dev)) != DDI_SUCCESS) { 1317 goto fail; 1318 } 1319 1320 /* Set base interrupt mask. */ 1321 dev->d_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1322 dev->d_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1323 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1324 1325 /* Reset the adapter. */ 1326 yge_reset(dev); 1327 1328 yge_setup_rambuffer(dev); 1329 1330 nattached = 0; 1331 for (int i = 0; i < dev->d_num_port; i++) { 1332 yge_port_t *port = dev->d_port[i]; 1333 if (yge_init_port(port) != DDI_SUCCESS) { 1334 goto fail; 1335 } 1336 } 1337 1338 yge_intr_enable(dev); 1339 1340 /* set up the periodic to run once per second */ 1341 dev->d_periodic = ddi_periodic_add(yge_tick, dev, 1000000000, 0); 1342 1343 for (int i = 0; i < dev->d_num_port; i++) { 1344 yge_port_t *port = dev->d_port[i]; 1345 if (yge_register_port(port) == DDI_SUCCESS) { 1346 nattached++; 1347 } 1348 } 1349 1350 if (nattached == 0) { 1351 goto fail; 1352 } 1353 1354 /* Dispatch the taskq */ 1355 if (ddi_taskq_dispatch(dev->d_task_q, yge_task, dev, DDI_SLEEP) != 1356 DDI_SUCCESS) { 1357 yge_error(dev, NULL, "failed to start taskq"); 1358 goto fail; 1359 } 1360 1361 ddi_report_dev(dip); 1362 1363 return (DDI_SUCCESS); 1364 1365 fail: 1366 yge_detach(dev); 1367 return (DDI_FAILURE); 1368 } 1369 1370 static int 1371 yge_register_port(yge_port_t *port) 1372 { 1373 if (mac_register(port->p_mreg, &port->p_mh) != DDI_SUCCESS) { 1374 yge_error(NULL, port, "MAC registration failed"); 1375 return (DDI_FAILURE); 1376 } 1377 1378 return (DDI_SUCCESS); 1379 } 1380 1381 static int 1382 yge_unregister_port(yge_port_t *port) 1383 { 1384 if ((port->p_mh) && (mac_unregister(port->p_mh) != 0)) { 1385 return (DDI_FAILURE); 1386 } 1387 port->p_mh = NULL; 1388 return (DDI_SUCCESS); 1389 } 1390 1391 /* 1392 * Free up port specific resources. This is called only when the 1393 * port is not registered (and hence not running). 1394 */ 1395 static void 1396 yge_uninit_port(yge_port_t *port) 1397 { 1398 ASSERT(!port->p_running); 1399 1400 if (port->p_mreg) 1401 mac_free(port->p_mreg); 1402 1403 if (port->p_mii) 1404 mii_free(port->p_mii); 1405 1406 yge_txrx_dma_free(port); 1407 1408 if (port->p_tx_buf) 1409 kmem_free(port->p_tx_buf, 1410 sizeof (yge_buf_t) * YGE_TX_RING_CNT); 1411 if (port->p_rx_buf) 1412 kmem_free(port->p_rx_buf, 1413 sizeof (yge_buf_t) * YGE_RX_RING_CNT); 1414 } 1415 1416 static void 1417 yge_detach(yge_dev_t *dev) 1418 { 1419 /* 1420 * Turn off the periodic. 1421 */ 1422 if (dev->d_periodic) 1423 ddi_periodic_delete(dev->d_periodic); 1424 1425 for (int i = 0; i < dev->d_num_port; i++) { 1426 yge_uninit_port(dev->d_port[i]); 1427 } 1428 1429 /* 1430 * Make sure all interrupts are disabled. 1431 */ 1432 CSR_WRITE_4(dev, B0_IMSK, 0); 1433 (void) CSR_READ_4(dev, B0_IMSK); 1434 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 1435 (void) CSR_READ_4(dev, B0_HWE_IMSK); 1436 1437 /* LED Off. */ 1438 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_OFF); 1439 1440 /* Put hardware reset. */ 1441 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 1442 1443 yge_free_ring(&dev->d_status_ring); 1444 1445 if (dev->d_task_q != NULL) { 1446 yge_dispatch(dev, YGE_TASK_EXIT); 1447 ddi_taskq_destroy(dev->d_task_q); 1448 dev->d_task_q = NULL; 1449 } 1450 1451 cv_destroy(&dev->d_task_cv); 1452 1453 yge_intr_disable(dev); 1454 1455 if (dev->d_intrh != NULL) { 1456 for (int i = 0; i < dev->d_intrcnt; i++) { 1457 (void) ddi_intr_remove_handler(dev->d_intrh[i]); 1458 (void) ddi_intr_free(dev->d_intrh[i]); 1459 } 1460 kmem_free(dev->d_intrh, dev->d_intrsize); 1461 mutex_destroy(&dev->d_phylock); 1462 mutex_destroy(&dev->d_txlock); 1463 mutex_destroy(&dev->d_rxlock); 1464 mutex_destroy(&dev->d_task_mtx); 1465 } 1466 if (dev->d_regsh != NULL) 1467 ddi_regs_map_free(&dev->d_regsh); 1468 1469 if (dev->d_pcih != NULL) 1470 pci_config_teardown(&dev->d_pcih); 1471 } 1472 1473 static int 1474 yge_alloc_ring(yge_port_t *port, yge_dev_t *dev, yge_ring_t *ring, uint32_t num) 1475 { 1476 dev_info_t *dip; 1477 caddr_t kaddr; 1478 size_t len; 1479 int rv; 1480 ddi_dma_cookie_t dmac; 1481 unsigned ndmac; 1482 1483 if (port && !dev) 1484 dev = port->p_dev; 1485 dip = dev->d_dip; 1486 1487 ring->r_num = num; 1488 1489 rv = ddi_dma_alloc_handle(dip, &yge_ring_dma_attr, DDI_DMA_DONTWAIT, 1490 NULL, &ring->r_dmah); 1491 if (rv != DDI_SUCCESS) { 1492 yge_error(dev, port, "Unable to allocate ring DMA handle"); 1493 return (DDI_FAILURE); 1494 } 1495 1496 rv = ddi_dma_mem_alloc(ring->r_dmah, num * sizeof (yge_desc_t), 1497 &yge_ring_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 1498 &kaddr, &len, &ring->r_acch); 1499 if (rv != DDI_SUCCESS) { 1500 yge_error(dev, port, "Unable to allocate ring DMA memory"); 1501 return (DDI_FAILURE); 1502 } 1503 ring->r_size = len; 1504 ring->r_kaddr = (void *)kaddr; 1505 1506 bzero(kaddr, len); 1507 1508 rv = ddi_dma_addr_bind_handle(ring->r_dmah, NULL, kaddr, 1509 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1510 &dmac, &ndmac); 1511 if (rv != DDI_DMA_MAPPED) { 1512 yge_error(dev, port, "Unable to bind ring DMA handle"); 1513 return (DDI_FAILURE); 1514 } 1515 ASSERT(ndmac == 1); 1516 ring->r_paddr = dmac.dmac_address; 1517 1518 return (DDI_SUCCESS); 1519 } 1520 1521 static void 1522 yge_free_ring(yge_ring_t *ring) 1523 { 1524 if (ring->r_paddr) 1525 (void) ddi_dma_unbind_handle(ring->r_dmah); 1526 ring->r_paddr = 0; 1527 if (ring->r_acch) 1528 ddi_dma_mem_free(&ring->r_acch); 1529 ring->r_kaddr = NULL; 1530 ring->r_acch = NULL; 1531 if (ring->r_dmah) 1532 ddi_dma_free_handle(&ring->r_dmah); 1533 ring->r_dmah = NULL; 1534 } 1535 1536 static int 1537 yge_alloc_buf(yge_port_t *port, yge_buf_t *b, size_t bufsz, int flag) 1538 { 1539 yge_dev_t *dev = port->p_dev; 1540 size_t l; 1541 int sflag; 1542 int rv; 1543 ddi_dma_cookie_t dmac; 1544 unsigned ndmac; 1545 1546 sflag = flag & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT); 1547 1548 /* Now allocate Tx buffers. */ 1549 rv = ddi_dma_alloc_handle(dev->d_dip, &yge_buf_dma_attr, 1550 DDI_DMA_DONTWAIT, NULL, &b->b_dmah); 1551 if (rv != DDI_SUCCESS) { 1552 yge_error(NULL, port, "Unable to alloc DMA handle for buffer"); 1553 return (DDI_FAILURE); 1554 } 1555 1556 rv = ddi_dma_mem_alloc(b->b_dmah, bufsz, &yge_buf_attr, 1557 sflag, DDI_DMA_DONTWAIT, NULL, &b->b_buf, &l, &b->b_acch); 1558 if (rv != DDI_SUCCESS) { 1559 yge_error(NULL, port, "Unable to alloc DMA memory for buffer"); 1560 return (DDI_FAILURE); 1561 } 1562 1563 rv = ddi_dma_addr_bind_handle(b->b_dmah, NULL, b->b_buf, l, flag, 1564 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac); 1565 if (rv != DDI_DMA_MAPPED) { 1566 yge_error(NULL, port, "Unable to bind DMA handle for buffer"); 1567 return (DDI_FAILURE); 1568 } 1569 ASSERT(ndmac == 1); 1570 b->b_paddr = dmac.dmac_address; 1571 return (DDI_SUCCESS); 1572 } 1573 1574 static void 1575 yge_free_buf(yge_buf_t *b) 1576 { 1577 if (b->b_paddr) 1578 (void) ddi_dma_unbind_handle(b->b_dmah); 1579 b->b_paddr = 0; 1580 if (b->b_acch) 1581 ddi_dma_mem_free(&b->b_acch); 1582 b->b_buf = NULL; 1583 b->b_acch = NULL; 1584 if (b->b_dmah) 1585 ddi_dma_free_handle(&b->b_dmah); 1586 b->b_dmah = NULL; 1587 } 1588 1589 static int 1590 yge_txrx_dma_alloc(yge_port_t *port) 1591 { 1592 uint32_t bufsz; 1593 int rv; 1594 int i; 1595 yge_buf_t *b; 1596 1597 /* 1598 * It seems that Yukon II supports full 64 bit DMA operations. 1599 * But we limit it to 32 bits only for now. The 64 bit 1600 * operation would require substantially more complex 1601 * descriptor handling, since in such a case we would need two 1602 * LEs to represent a single physical address. 1603 * 1604 * If we find that this is limiting us, then we should go back 1605 * and re-examine it. 1606 */ 1607 1608 /* Note our preferred buffer size. */ 1609 bufsz = port->p_mtu; 1610 1611 /* Allocate Tx ring. */ 1612 rv = yge_alloc_ring(port, NULL, &port->p_tx_ring, YGE_TX_RING_CNT); 1613 if (rv != DDI_SUCCESS) { 1614 return (DDI_FAILURE); 1615 } 1616 1617 /* Now allocate Tx buffers. */ 1618 b = port->p_tx_buf; 1619 for (i = 0; i < YGE_TX_RING_CNT; i++) { 1620 rv = yge_alloc_buf(port, b, bufsz, 1621 DDI_DMA_STREAMING | DDI_DMA_WRITE); 1622 if (rv != DDI_SUCCESS) { 1623 return (DDI_FAILURE); 1624 } 1625 b++; 1626 } 1627 1628 /* Allocate Rx ring. */ 1629 rv = yge_alloc_ring(port, NULL, &port->p_rx_ring, YGE_RX_RING_CNT); 1630 if (rv != DDI_SUCCESS) { 1631 return (DDI_FAILURE); 1632 } 1633 1634 /* Now allocate Rx buffers. */ 1635 b = port->p_rx_buf; 1636 for (i = 0; i < YGE_RX_RING_CNT; i++) { 1637 rv = yge_alloc_buf(port, b, bufsz, 1638 DDI_DMA_STREAMING | DDI_DMA_READ); 1639 if (rv != DDI_SUCCESS) { 1640 return (DDI_FAILURE); 1641 } 1642 b++; 1643 } 1644 1645 return (DDI_SUCCESS); 1646 } 1647 1648 static void 1649 yge_txrx_dma_free(yge_port_t *port) 1650 { 1651 yge_buf_t *b; 1652 1653 /* Tx ring. */ 1654 yge_free_ring(&port->p_tx_ring); 1655 1656 /* Rx ring. */ 1657 yge_free_ring(&port->p_rx_ring); 1658 1659 /* Tx buffers. */ 1660 b = port->p_tx_buf; 1661 for (int i = 0; i < YGE_TX_RING_CNT; i++, b++) { 1662 yge_free_buf(b); 1663 } 1664 /* Rx buffers. */ 1665 b = port->p_rx_buf; 1666 for (int i = 0; i < YGE_RX_RING_CNT; i++, b++) { 1667 yge_free_buf(b); 1668 } 1669 } 1670 1671 boolean_t 1672 yge_send(yge_port_t *port, mblk_t *mp) 1673 { 1674 yge_ring_t *ring = &port->p_tx_ring; 1675 yge_buf_t *txb; 1676 int16_t prod; 1677 size_t len; 1678 1679 /* 1680 * For now we're not going to support checksum offload or LSO. 1681 */ 1682 1683 len = msgsize(mp); 1684 if (len > port->p_framesize) { 1685 /* too big! */ 1686 freemsg(mp); 1687 return (B_TRUE); 1688 } 1689 1690 /* Check number of available descriptors. */ 1691 if (port->p_tx_cnt + 1 >= 1692 (YGE_TX_RING_CNT - YGE_RESERVED_TX_DESC_CNT)) { 1693 port->p_wantw = B_TRUE; 1694 return (B_FALSE); 1695 } 1696 1697 prod = port->p_tx_prod; 1698 1699 txb = &port->p_tx_buf[prod]; 1700 mcopymsg(mp, txb->b_buf); 1701 SYNCBUF(txb, DDI_DMA_SYNC_FORDEV); 1702 1703 PUTADDR(ring, prod, txb->b_paddr); 1704 PUTCTRL(ring, prod, len | OP_PACKET | HW_OWNER | EOP); 1705 SYNCENTRY(ring, prod, DDI_DMA_SYNC_FORDEV); 1706 port->p_tx_cnt++; 1707 1708 YGE_INC(prod, YGE_TX_RING_CNT); 1709 1710 /* Update producer index. */ 1711 port->p_tx_prod = prod; 1712 1713 return (B_TRUE); 1714 } 1715 1716 static int 1717 yge_suspend(yge_dev_t *dev) 1718 { 1719 for (int i = 0; i < dev->d_num_port; i++) { 1720 yge_port_t *port = dev->d_port[i]; 1721 mii_suspend(port->p_mii); 1722 } 1723 1724 1725 DEV_LOCK(dev); 1726 1727 for (int i = 0; i < dev->d_num_port; i++) { 1728 yge_port_t *port = dev->d_port[i]; 1729 1730 if (port->p_running) { 1731 yge_stop_port(port); 1732 } 1733 } 1734 1735 /* Disable all interrupts. */ 1736 CSR_WRITE_4(dev, B0_IMSK, 0); 1737 (void) CSR_READ_4(dev, B0_IMSK); 1738 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 1739 (void) CSR_READ_4(dev, B0_HWE_IMSK); 1740 1741 yge_phy_power(dev, B_FALSE); 1742 1743 /* Put hardware reset. */ 1744 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 1745 dev->d_suspended = B_TRUE; 1746 1747 DEV_UNLOCK(dev); 1748 1749 return (DDI_SUCCESS); 1750 } 1751 1752 static int 1753 yge_resume(yge_dev_t *dev) 1754 { 1755 uint8_t pm_cap; 1756 1757 DEV_LOCK(dev); 1758 1759 /* ensure the pmcsr status is D0 state */ 1760 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1761 1762 if ((pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM)) != 0) { 1763 uint16_t pmcsr; 1764 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR); 1765 pmcsr &= ~PCI_PMCSR_STATE_MASK; 1766 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR, 1767 pmcsr | PCI_PMCSR_D0); 1768 } 1769 1770 /* Enable PCI access and bus master. */ 1771 pci_config_put16(dev->d_pcih, PCI_CONF_COMM, 1772 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) | 1773 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME); 1774 1775 /* Enable all clocks. */ 1776 switch (dev->d_hw_id) { 1777 case CHIP_ID_YUKON_EX: 1778 case CHIP_ID_YUKON_EC_U: 1779 case CHIP_ID_YUKON_FE_P: 1780 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0); 1781 break; 1782 } 1783 1784 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1785 1786 yge_reset(dev); 1787 1788 /* Make sure interrupts are reenabled */ 1789 CSR_WRITE_4(dev, B0_IMSK, 0); 1790 CSR_WRITE_4(dev, B0_IMSK, Y2_IS_HW_ERR | Y2_IS_STAT_BMU); 1791 CSR_WRITE_4(dev, B0_HWE_IMSK, 1792 Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1793 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP); 1794 1795 for (int i = 0; i < dev->d_num_port; i++) { 1796 yge_port_t *port = dev->d_port[i]; 1797 1798 if (port != NULL && port->p_running) { 1799 yge_start_port(port); 1800 } 1801 } 1802 dev->d_suspended = B_FALSE; 1803 1804 DEV_UNLOCK(dev); 1805 1806 /* Reset MII layer */ 1807 for (int i = 0; i < dev->d_num_port; i++) { 1808 yge_port_t *port = dev->d_port[i]; 1809 1810 if (port->p_running) { 1811 mii_resume(port->p_mii); 1812 mac_tx_update(port->p_mh); 1813 } 1814 } 1815 1816 return (DDI_SUCCESS); 1817 } 1818 1819 static mblk_t * 1820 yge_rxeof(yge_port_t *port, uint32_t status, int len) 1821 { 1822 yge_dev_t *dev = port->p_dev; 1823 mblk_t *mp; 1824 int cons, rxlen; 1825 yge_buf_t *rxb; 1826 yge_ring_t *ring; 1827 1828 ASSERT(mutex_owned(&dev->d_rxlock)); 1829 1830 if (!port->p_running) 1831 return (NULL); 1832 1833 ring = &port->p_rx_ring; 1834 cons = port->p_rx_cons; 1835 rxlen = status >> 16; 1836 rxb = &port->p_rx_buf[cons]; 1837 mp = NULL; 1838 1839 1840 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 1841 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) { 1842 /* 1843 * Apparently the status for this chip is not reliable. 1844 * Only perform minimal consistency checking; the MAC 1845 * and upper protocols will have to filter any garbage. 1846 */ 1847 if ((len > port->p_framesize) || (rxlen != len)) { 1848 goto bad; 1849 } 1850 } else { 1851 if ((len > port->p_framesize) || (rxlen != len) || 1852 ((status & GMR_FS_ANY_ERR) != 0) || 1853 ((status & GMR_FS_RX_OK) == 0)) { 1854 goto bad; 1855 } 1856 } 1857 1858 if ((mp = allocb(len + YGE_HEADROOM, BPRI_HI)) != NULL) { 1859 1860 /* good packet - yay */ 1861 mp->b_rptr += YGE_HEADROOM; 1862 SYNCBUF(rxb, DDI_DMA_SYNC_FORKERNEL); 1863 bcopy(rxb->b_buf, mp->b_rptr, len); 1864 mp->b_wptr = mp->b_rptr + len; 1865 } else { 1866 port->p_stats.rx_nobuf++; 1867 } 1868 1869 bad: 1870 1871 PUTCTRL(ring, cons, port->p_framesize | OP_PACKET | HW_OWNER); 1872 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV); 1873 1874 CSR_WRITE_2(dev, 1875 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG), 1876 cons); 1877 1878 YGE_INC(port->p_rx_cons, YGE_RX_RING_CNT); 1879 1880 return (mp); 1881 } 1882 1883 static boolean_t 1884 yge_txeof_locked(yge_port_t *port, int idx) 1885 { 1886 int prog; 1887 int16_t cons; 1888 boolean_t resched; 1889 1890 if (!port->p_running) { 1891 return (B_FALSE); 1892 } 1893 1894 cons = port->p_tx_cons; 1895 prog = 0; 1896 for (; cons != idx; YGE_INC(cons, YGE_TX_RING_CNT)) { 1897 if (port->p_tx_cnt <= 0) 1898 break; 1899 prog++; 1900 port->p_tx_cnt--; 1901 /* No need to sync LEs as we didn't update LEs. */ 1902 } 1903 1904 port->p_tx_cons = cons; 1905 1906 if (prog > 0) { 1907 resched = port->p_wantw; 1908 port->p_tx_wdog = 0; 1909 port->p_wantw = B_FALSE; 1910 return (resched); 1911 } else { 1912 return (B_FALSE); 1913 } 1914 } 1915 1916 static void 1917 yge_txeof(yge_port_t *port, int idx) 1918 { 1919 boolean_t resched; 1920 1921 TX_LOCK(port->p_dev); 1922 1923 resched = yge_txeof_locked(port, idx); 1924 1925 TX_UNLOCK(port->p_dev); 1926 1927 if (resched && port->p_running) { 1928 mac_tx_update(port->p_mh); 1929 } 1930 } 1931 1932 static void 1933 yge_restart_task(yge_dev_t *dev) 1934 { 1935 yge_port_t *port; 1936 1937 DEV_LOCK(dev); 1938 1939 /* Cancel pending I/O and free all Rx/Tx buffers. */ 1940 for (int i = 0; i < dev->d_num_port; i++) { 1941 port = dev->d_port[i]; 1942 if (port->p_running) 1943 yge_stop_port(dev->d_port[i]); 1944 } 1945 yge_reset(dev); 1946 for (int i = 0; i < dev->d_num_port; i++) { 1947 port = dev->d_port[i]; 1948 1949 if (port->p_running) 1950 yge_start_port(port); 1951 } 1952 1953 DEV_UNLOCK(dev); 1954 1955 for (int i = 0; i < dev->d_num_port; i++) { 1956 port = dev->d_port[i]; 1957 1958 mii_reset(port->p_mii); 1959 if (port->p_running) 1960 mac_tx_update(port->p_mh); 1961 } 1962 } 1963 1964 static void 1965 yge_tick(void *arg) 1966 { 1967 yge_dev_t *dev = arg; 1968 yge_port_t *port; 1969 boolean_t restart = B_FALSE; 1970 boolean_t resched = B_FALSE; 1971 int idx; 1972 1973 DEV_LOCK(dev); 1974 1975 if (dev->d_suspended) { 1976 DEV_UNLOCK(dev); 1977 return; 1978 } 1979 1980 for (int i = 0; i < dev->d_num_port; i++) { 1981 port = dev->d_port[i]; 1982 1983 if (!port->p_running) 1984 continue; 1985 1986 if (port->p_tx_cnt) { 1987 uint32_t ridx; 1988 1989 /* 1990 * Reclaim first as there is a possibility of losing 1991 * Tx completion interrupts. 1992 */ 1993 ridx = port->p_port == YGE_PORT_A ? 1994 STAT_TXA1_RIDX : STAT_TXA2_RIDX; 1995 idx = CSR_READ_2(dev, ridx); 1996 if (port->p_tx_cons != idx) { 1997 resched = yge_txeof_locked(port, idx); 1998 1999 } else { 2000 2001 /* detect TX hang */ 2002 port->p_tx_wdog++; 2003 if (port->p_tx_wdog > YGE_TX_TIMEOUT) { 2004 port->p_tx_wdog = 0; 2005 yge_error(NULL, port, 2006 "TX hang detected!"); 2007 restart = B_TRUE; 2008 } 2009 } 2010 } 2011 } 2012 2013 DEV_UNLOCK(dev); 2014 if (restart) { 2015 yge_dispatch(dev, YGE_TASK_RESTART); 2016 } else { 2017 if (resched) { 2018 for (int i = 0; i < dev->d_num_port; i++) { 2019 port = dev->d_port[i]; 2020 2021 if (port->p_running) 2022 mac_tx_update(port->p_mh); 2023 } 2024 } 2025 } 2026 } 2027 2028 static int 2029 yge_intr_gmac(yge_port_t *port) 2030 { 2031 yge_dev_t *dev = port->p_dev; 2032 int pnum = port->p_port; 2033 uint8_t status; 2034 int dispatch_wrk = 0; 2035 2036 status = CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC)); 2037 2038 /* GMAC Rx FIFO overrun. */ 2039 if ((status & GM_IS_RX_FF_OR) != 0) { 2040 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2041 yge_error(NULL, port, "Rx FIFO overrun!"); 2042 dispatch_wrk |= YGE_TASK_RESTART; 2043 } 2044 /* GMAC Tx FIFO underrun. */ 2045 if ((status & GM_IS_TX_FF_UR) != 0) { 2046 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2047 yge_error(NULL, port, "Tx FIFO underrun!"); 2048 /* 2049 * In case of Tx underrun, we may need to flush/reset 2050 * Tx MAC but that would also require 2051 * resynchronization with status LEs. Reinitializing 2052 * status LEs would affect the other port in dual MAC 2053 * configuration so it should be avoided if we can. 2054 * Due to lack of documentation it's all vague guess 2055 * but it needs more investigation. 2056 */ 2057 } 2058 return (dispatch_wrk); 2059 } 2060 2061 static void 2062 yge_handle_hwerr(yge_port_t *port, uint32_t status) 2063 { 2064 yge_dev_t *dev = port->p_dev; 2065 2066 if ((status & Y2_IS_PAR_RD1) != 0) { 2067 yge_error(NULL, port, "RAM buffer read parity error"); 2068 /* Clear IRQ. */ 2069 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL), 2070 RI_CLR_RD_PERR); 2071 } 2072 if ((status & Y2_IS_PAR_WR1) != 0) { 2073 yge_error(NULL, port, "RAM buffer write parity error"); 2074 /* Clear IRQ. */ 2075 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL), 2076 RI_CLR_WR_PERR); 2077 } 2078 if ((status & Y2_IS_PAR_MAC1) != 0) { 2079 yge_error(NULL, port, "Tx MAC parity error"); 2080 /* Clear IRQ. */ 2081 CSR_WRITE_4(dev, MR_ADDR(port->p_port, TX_GMF_CTRL_T), 2082 GMF_CLI_TX_PE); 2083 } 2084 if ((status & Y2_IS_PAR_RX1) != 0) { 2085 yge_error(NULL, port, "Rx parity error"); 2086 /* Clear IRQ. */ 2087 CSR_WRITE_4(dev, Q_ADDR(port->p_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 2088 } 2089 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 2090 yge_error(NULL, port, "TCP segmentation error"); 2091 /* Clear IRQ. */ 2092 CSR_WRITE_4(dev, Q_ADDR(port->p_txq, Q_CSR), BMU_CLR_IRQ_TCP); 2093 } 2094 } 2095 2096 static void 2097 yge_intr_hwerr(yge_dev_t *dev) 2098 { 2099 uint32_t status; 2100 uint32_t tlphead[4]; 2101 2102 status = CSR_READ_4(dev, B0_HWE_ISRC); 2103 /* Time Stamp timer overflow. */ 2104 if ((status & Y2_IS_TIST_OV) != 0) 2105 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2106 if ((status & Y2_IS_PCI_NEXP) != 0) { 2107 /* 2108 * PCI Express Error occurred which is not described in PEX 2109 * spec. 2110 * This error is also mapped either to Master Abort( 2111 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 2112 * can only be cleared there. 2113 */ 2114 yge_error(dev, NULL, "PCI Express protocol violation error"); 2115 } 2116 2117 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 2118 uint16_t v16; 2119 2120 if ((status & Y2_IS_IRQ_STAT) != 0) 2121 yge_error(dev, NULL, "Unexpected IRQ Status error"); 2122 if ((status & Y2_IS_MST_ERR) != 0) 2123 yge_error(dev, NULL, "Unexpected IRQ Master error"); 2124 /* Reset all bits in the PCI status register. */ 2125 v16 = pci_config_get16(dev->d_pcih, PCI_CONF_STAT); 2126 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2127 pci_config_put16(dev->d_pcih, PCI_CONF_STAT, v16 | 2128 PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB | 2129 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR); 2130 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2131 } 2132 2133 /* Check for PCI Express Uncorrectable Error. */ 2134 if ((status & Y2_IS_PCI_EXP) != 0) { 2135 uint32_t v32; 2136 2137 /* 2138 * On PCI Express bus bridges are called root complexes (RC). 2139 * PCI Express errors are recognized by the root complex too, 2140 * which requests the system to handle the problem. After 2141 * error occurrence it may be that no access to the adapter 2142 * may be performed any longer. 2143 */ 2144 2145 v32 = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT); 2146 if ((v32 & PEX_UNSUP_REQ) != 0) { 2147 /* Ignore unsupported request error. */ 2148 yge_error(dev, NULL, 2149 "Uncorrectable PCI Express error"); 2150 } 2151 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 2152 int i; 2153 2154 /* Get TLP header form Log Registers. */ 2155 for (i = 0; i < 4; i++) 2156 tlphead[i] = CSR_PCI_READ_4(dev, 2157 PEX_HEADER_LOG + i * 4); 2158 /* Check for vendor defined broadcast message. */ 2159 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 2160 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP; 2161 CSR_WRITE_4(dev, B0_HWE_IMSK, 2162 dev->d_intrhwemask); 2163 (void) CSR_READ_4(dev, B0_HWE_IMSK); 2164 } 2165 } 2166 /* Clear the interrupt. */ 2167 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2168 CSR_PCI_WRITE_4(dev, PEX_UNC_ERR_STAT, 0xffffffff); 2169 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2170 } 2171 2172 if ((status & Y2_HWE_L1_MASK) != 0 && dev->d_port[YGE_PORT_A] != NULL) 2173 yge_handle_hwerr(dev->d_port[YGE_PORT_A], status); 2174 if ((status & Y2_HWE_L2_MASK) != 0 && dev->d_port[YGE_PORT_B] != NULL) 2175 yge_handle_hwerr(dev->d_port[YGE_PORT_B], status >> 8); 2176 } 2177 2178 /* 2179 * Returns B_TRUE if there is potentially more work to do. 2180 */ 2181 static boolean_t 2182 yge_handle_events(yge_dev_t *dev, mblk_t **heads, mblk_t **tails, int *txindex) 2183 { 2184 yge_port_t *port; 2185 yge_ring_t *ring; 2186 uint32_t control, status; 2187 int cons, idx, len, pnum; 2188 mblk_t *mp; 2189 uint32_t rxprogs[2]; 2190 2191 rxprogs[0] = rxprogs[1] = 0; 2192 2193 idx = CSR_READ_2(dev, STAT_PUT_IDX); 2194 if (idx == dev->d_stat_cons) { 2195 return (B_FALSE); 2196 } 2197 2198 ring = &dev->d_status_ring; 2199 2200 for (cons = dev->d_stat_cons; cons != idx; ) { 2201 /* Sync status LE. */ 2202 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORKERNEL); 2203 control = GETCTRL(ring, cons); 2204 if ((control & HW_OWNER) == 0) { 2205 yge_error(dev, NULL, "Status descriptor error: " 2206 "index %d, control %x", cons, control); 2207 break; 2208 } 2209 2210 status = GETSTAT(ring, cons); 2211 2212 control &= ~HW_OWNER; 2213 len = control & STLE_LEN_MASK; 2214 pnum = ((control >> 16) & 0x01); 2215 port = dev->d_port[pnum]; 2216 if (port == NULL) { 2217 yge_error(dev, NULL, "Invalid port opcode: 0x%08x", 2218 control & STLE_OP_MASK); 2219 goto finish; 2220 } 2221 2222 switch (control & STLE_OP_MASK) { 2223 case OP_RXSTAT: 2224 mp = yge_rxeof(port, status, len); 2225 if (mp != NULL) { 2226 if (heads[pnum] == NULL) 2227 heads[pnum] = mp; 2228 else 2229 tails[pnum]->b_next = mp; 2230 tails[pnum] = mp; 2231 } 2232 2233 rxprogs[pnum]++; 2234 break; 2235 2236 case OP_TXINDEXLE: 2237 txindex[0] = status & STLE_TXA1_MSKL; 2238 txindex[1] = 2239 ((status & STLE_TXA2_MSKL) >> STLE_TXA2_SHIFTL) | 2240 ((len & STLE_TXA2_MSKH) << STLE_TXA2_SHIFTH); 2241 break; 2242 default: 2243 yge_error(dev, NULL, "Unhandled opcode: 0x%08x", 2244 control & STLE_OP_MASK); 2245 break; 2246 } 2247 finish: 2248 2249 /* Give it back to HW. */ 2250 PUTCTRL(ring, cons, control); 2251 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV); 2252 2253 YGE_INC(cons, YGE_STAT_RING_CNT); 2254 if (rxprogs[pnum] > dev->d_process_limit) { 2255 break; 2256 } 2257 } 2258 2259 dev->d_stat_cons = cons; 2260 if (dev->d_stat_cons != CSR_READ_2(dev, STAT_PUT_IDX)) 2261 return (B_TRUE); 2262 else 2263 return (B_FALSE); 2264 } 2265 2266 /*ARGSUSED1*/ 2267 static uint_t 2268 yge_intr(caddr_t arg1, caddr_t arg2) 2269 { 2270 yge_dev_t *dev; 2271 yge_port_t *port1; 2272 yge_port_t *port2; 2273 uint32_t status; 2274 mblk_t *heads[2], *tails[2]; 2275 int txindex[2]; 2276 int dispatch_wrk; 2277 2278 dev = (void *)arg1; 2279 2280 heads[0] = heads[1] = NULL; 2281 tails[0] = tails[1] = NULL; 2282 txindex[0] = txindex[1] = -1; 2283 dispatch_wrk = 0; 2284 2285 port1 = dev->d_port[YGE_PORT_A]; 2286 port2 = dev->d_port[YGE_PORT_B]; 2287 2288 RX_LOCK(dev); 2289 2290 if (dev->d_suspended) { 2291 RX_UNLOCK(dev); 2292 return (DDI_INTR_UNCLAIMED); 2293 } 2294 2295 /* Get interrupt source. */ 2296 status = CSR_READ_4(dev, B0_Y2_SP_ISRC2); 2297 if (status == 0 || status == 0xffffffff || 2298 (status & dev->d_intrmask) == 0) { /* Stray interrupt ? */ 2299 /* Reenable interrupts. */ 2300 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2); 2301 RX_UNLOCK(dev); 2302 return (DDI_INTR_UNCLAIMED); 2303 } 2304 2305 if ((status & Y2_IS_HW_ERR) != 0) { 2306 yge_intr_hwerr(dev); 2307 } 2308 2309 if (status & Y2_IS_IRQ_MAC1) { 2310 dispatch_wrk |= yge_intr_gmac(port1); 2311 } 2312 if (status & Y2_IS_IRQ_MAC2) { 2313 dispatch_wrk |= yge_intr_gmac(port2); 2314 } 2315 2316 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 2317 yge_error(NULL, status & Y2_IS_CHK_RX1 ? port1 : port2, 2318 "Rx descriptor error"); 2319 dev->d_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 2320 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2321 (void) CSR_READ_4(dev, B0_IMSK); 2322 } 2323 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 2324 yge_error(NULL, status & Y2_IS_CHK_TXA1 ? port1 : port2, 2325 "Tx descriptor error"); 2326 dev->d_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 2327 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2328 (void) CSR_READ_4(dev, B0_IMSK); 2329 } 2330 2331 /* handle events until it returns false */ 2332 while (yge_handle_events(dev, heads, tails, txindex)) 2333 /* NOP */; 2334 2335 /* Do receive/transmit events */ 2336 if ((status & Y2_IS_STAT_BMU)) { 2337 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_CLR_IRQ); 2338 } 2339 2340 /* Reenable interrupts. */ 2341 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2); 2342 2343 RX_UNLOCK(dev); 2344 2345 if (dispatch_wrk) { 2346 yge_dispatch(dev, dispatch_wrk); 2347 } 2348 2349 if (port1->p_running) { 2350 if (txindex[0] >= 0) { 2351 yge_txeof(port1, txindex[0]); 2352 } 2353 if (heads[0]) 2354 mac_rx(port1->p_mh, NULL, heads[0]); 2355 } else { 2356 if (heads[0]) { 2357 mblk_t *mp; 2358 while ((mp = heads[0]) != NULL) { 2359 heads[0] = mp->b_next; 2360 freemsg(mp); 2361 } 2362 } 2363 } 2364 2365 if (port2->p_running) { 2366 if (txindex[1] >= 0) { 2367 yge_txeof(port2, txindex[1]); 2368 } 2369 if (heads[1]) 2370 mac_rx(port2->p_mh, NULL, heads[1]); 2371 } else { 2372 if (heads[1]) { 2373 mblk_t *mp; 2374 while ((mp = heads[1]) != NULL) { 2375 heads[1] = mp->b_next; 2376 freemsg(mp); 2377 } 2378 } 2379 } 2380 2381 return (DDI_INTR_CLAIMED); 2382 } 2383 2384 static void 2385 yge_set_tx_stfwd(yge_port_t *port) 2386 { 2387 yge_dev_t *dev = port->p_dev; 2388 int pnum = port->p_port; 2389 2390 switch (dev->d_hw_id) { 2391 case CHIP_ID_YUKON_EX: 2392 if (dev->d_hw_rev == CHIP_REV_YU_EX_A0) 2393 goto yukon_ex_workaround; 2394 2395 if (port->p_mtu > ETHERMTU) 2396 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2397 TX_JUMBO_ENA | TX_STFW_ENA); 2398 else 2399 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2400 TX_JUMBO_DIS | TX_STFW_ENA); 2401 break; 2402 default: 2403 yukon_ex_workaround: 2404 if (port->p_mtu > ETHERMTU) { 2405 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 2406 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_AE_THR), 2407 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 2408 /* Disable Store & Forward mode for Tx. */ 2409 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2410 TX_JUMBO_ENA | TX_STFW_DIS); 2411 } else { 2412 /* Enable Store & Forward mode for Tx. */ 2413 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2414 TX_JUMBO_DIS | TX_STFW_ENA); 2415 } 2416 break; 2417 } 2418 } 2419 2420 static void 2421 yge_start_port(yge_port_t *port) 2422 { 2423 yge_dev_t *dev = port->p_dev; 2424 uint16_t gmac; 2425 int32_t pnum; 2426 int32_t rxq; 2427 int32_t txq; 2428 uint32_t reg; 2429 2430 pnum = port->p_port; 2431 txq = port->p_txq; 2432 rxq = port->p_rxq; 2433 2434 if (port->p_mtu < ETHERMTU) 2435 port->p_framesize = ETHERMTU; 2436 else 2437 port->p_framesize = port->p_mtu; 2438 port->p_framesize += sizeof (struct ether_vlan_header); 2439 2440 /* 2441 * Note for the future, if we enable offloads: 2442 * In Yukon EC Ultra, TSO & checksum offload is not 2443 * supported for jumbo frame. 2444 */ 2445 2446 /* GMAC Control reset */ 2447 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_SET); 2448 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_CLR); 2449 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_F_LOOPB_OFF); 2450 if (dev->d_hw_id == CHIP_ID_YUKON_EX) 2451 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), 2452 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 2453 GMC_BYP_RETR_ON); 2454 /* 2455 * Initialize GMAC first such that speed/duplex/flow-control 2456 * parameters are renegotiated with the interface is brought up. 2457 */ 2458 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, 0); 2459 2460 /* Dummy read the Interrupt Source Register. */ 2461 (void) CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC)); 2462 2463 /* Clear MIB stats. */ 2464 yge_stats_clear(port); 2465 2466 /* Disable FCS. */ 2467 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, GM_RXCR_CRC_DIS); 2468 2469 /* Setup Transmit Control Register. */ 2470 GMAC_WRITE_2(dev, pnum, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 2471 2472 /* Setup Transmit Flow Control Register. */ 2473 GMAC_WRITE_2(dev, pnum, GM_TX_FLOW_CTRL, 0xffff); 2474 2475 /* Setup Transmit Parameter Register. */ 2476 GMAC_WRITE_2(dev, pnum, GM_TX_PARAM, 2477 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 2478 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 2479 2480 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 2481 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 2482 2483 if (port->p_mtu > ETHERMTU) 2484 gmac |= GM_SMOD_JUMBO_ENA; 2485 GMAC_WRITE_2(dev, pnum, GM_SERIAL_MODE, gmac); 2486 2487 /* Disable interrupts for counter overflows. */ 2488 GMAC_WRITE_2(dev, pnum, GM_TX_IRQ_MSK, 0); 2489 GMAC_WRITE_2(dev, pnum, GM_RX_IRQ_MSK, 0); 2490 GMAC_WRITE_2(dev, pnum, GM_TR_IRQ_MSK, 0); 2491 2492 /* Configure Rx MAC FIFO. */ 2493 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET); 2494 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_CLR); 2495 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 2496 if (dev->d_hw_id == CHIP_ID_YUKON_FE_P || 2497 dev->d_hw_id == CHIP_ID_YUKON_EX) 2498 reg |= GMF_RX_OVER_ON; 2499 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), reg); 2500 2501 /* Set receive filter. */ 2502 yge_setrxfilt(port); 2503 2504 /* Flush Rx MAC FIFO on any flow control or error. */ 2505 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); 2506 2507 /* 2508 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word 2509 * due to hardware hang on receipt of pause frames. 2510 */ 2511 reg = RX_GMF_FL_THR_DEF + 1; 2512 /* FE+ magic */ 2513 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 2514 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) 2515 reg = 0x178; 2516 2517 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_THR), reg); 2518 2519 /* Configure Tx MAC FIFO. */ 2520 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET); 2521 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_CLR); 2522 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_OPER_ON); 2523 2524 /* Disable hardware VLAN tag insertion/stripping. */ 2525 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 2526 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 2527 2528 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0) { 2529 /* Set Rx Pause threshold. */ 2530 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 2531 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) { 2532 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR), 2533 MSK_ECU_LLPP); 2534 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR), 2535 MSK_FEP_ULPP); 2536 } else { 2537 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR), 2538 MSK_ECU_LLPP); 2539 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR), 2540 MSK_ECU_ULPP); 2541 } 2542 /* Configure store-and-forward for TX */ 2543 yge_set_tx_stfwd(port); 2544 } 2545 2546 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 2547 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) { 2548 /* Disable dynamic watermark */ 2549 reg = CSR_READ_4(dev, MR_ADDR(pnum, TX_GMF_EA)); 2550 reg &= ~TX_DYN_WM_ENA; 2551 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_EA), reg); 2552 } 2553 2554 /* 2555 * Disable Force Sync bit and Alloc bit in Tx RAM interface 2556 * arbiter as we don't use Sync Tx queue. 2557 */ 2558 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), 2559 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2560 /* Enable the RAM Interface Arbiter. */ 2561 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_ENA_ARB); 2562 2563 /* Setup RAM buffer. */ 2564 yge_set_rambuffer(port); 2565 2566 /* Disable Tx sync Queue. */ 2567 CSR_WRITE_1(dev, RB_ADDR(port->p_txsq, RB_CTRL), RB_RST_SET); 2568 2569 /* Setup Tx Queue Bus Memory Interface. */ 2570 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_CLR_RESET); 2571 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_OPER_INIT); 2572 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_FIFO_OP_ON); 2573 CSR_WRITE_2(dev, Q_ADDR(txq, Q_WM), MSK_BMU_TX_WM); 2574 2575 switch (dev->d_hw_id) { 2576 case CHIP_ID_YUKON_EC_U: 2577 if (dev->d_hw_rev == CHIP_REV_YU_EC_U_A0) { 2578 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 2579 CSR_WRITE_2(dev, Q_ADDR(txq, Q_AL), MSK_ECU_TXFF_LEV); 2580 } 2581 break; 2582 case CHIP_ID_YUKON_EX: 2583 /* 2584 * Yukon Extreme seems to have silicon bug for 2585 * automatic Tx checksum calculation capability. 2586 */ 2587 if (dev->d_hw_rev == CHIP_REV_YU_EX_B0) 2588 CSR_WRITE_4(dev, Q_ADDR(txq, Q_F), F_TX_CHK_AUTO_OFF); 2589 break; 2590 } 2591 2592 /* Setup Rx Queue Bus Memory Interface. */ 2593 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_CLR_RESET); 2594 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_OPER_INIT); 2595 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_FIFO_OP_ON); 2596 if (dev->d_bustype == PEX_BUS) { 2597 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), 0x80); 2598 } else { 2599 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), MSK_BMU_RX_WM); 2600 } 2601 if (dev->d_hw_id == CHIP_ID_YUKON_EC_U && 2602 dev->d_hw_rev >= CHIP_REV_YU_EC_U_A1) { 2603 /* MAC Rx RAM Read is controlled by hardware. */ 2604 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS); 2605 } 2606 2607 yge_init_tx_ring(port); 2608 2609 /* Disable Rx checksum offload and RSS hash. */ 2610 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), 2611 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 2612 2613 yge_init_rx_ring(port); 2614 2615 /* Configure interrupt handling. */ 2616 if (port == dev->d_port[YGE_PORT_A]) { 2617 dev->d_intrmask |= Y2_IS_PORT_A; 2618 dev->d_intrhwemask |= Y2_HWE_L1_MASK; 2619 } else if (port == dev->d_port[YGE_PORT_B]) { 2620 dev->d_intrmask |= Y2_IS_PORT_B; 2621 dev->d_intrhwemask |= Y2_HWE_L2_MASK; 2622 } 2623 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask); 2624 (void) CSR_READ_4(dev, B0_HWE_IMSK); 2625 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2626 (void) CSR_READ_4(dev, B0_IMSK); 2627 2628 /* Enable RX/TX GMAC */ 2629 gmac = GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2630 gmac |= (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2631 GMAC_WRITE_2(port->p_dev, port->p_port, GM_GP_CTRL, gmac); 2632 /* Read again to ensure writing. */ 2633 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2634 2635 /* Reset TX timer */ 2636 port->p_tx_wdog = 0; 2637 } 2638 2639 static void 2640 yge_set_rambuffer(yge_port_t *port) 2641 { 2642 yge_dev_t *dev; 2643 int ltpp, utpp; 2644 int pnum; 2645 uint32_t rxq; 2646 uint32_t txq; 2647 2648 dev = port->p_dev; 2649 pnum = port->p_port; 2650 rxq = port->p_rxq; 2651 txq = port->p_txq; 2652 2653 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0) 2654 return; 2655 2656 /* Setup Rx Queue. */ 2657 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_CLR); 2658 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_START), dev->d_rxqstart[pnum] / 8); 2659 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_END), dev->d_rxqend[pnum] / 8); 2660 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_WP), dev->d_rxqstart[pnum] / 8); 2661 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RP), dev->d_rxqstart[pnum] / 8); 2662 2663 utpp = 2664 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_ULPP) / 8; 2665 ltpp = 2666 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_LLPP_B) / 8; 2667 2668 if (dev->d_rxqsize < MSK_MIN_RXQ_SIZE) 2669 ltpp += (RB_LLPP_B - RB_LLPP_S) / 8; 2670 2671 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_UTPP), utpp); 2672 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_LTPP), ltpp); 2673 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 2674 2675 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_ENA_OP_MD); 2676 (void) CSR_READ_1(dev, RB_ADDR(rxq, RB_CTRL)); 2677 2678 /* Setup Tx Queue. */ 2679 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_CLR); 2680 CSR_WRITE_4(dev, RB_ADDR(txq, RB_START), dev->d_txqstart[pnum] / 8); 2681 CSR_WRITE_4(dev, RB_ADDR(txq, RB_END), dev->d_txqend[pnum] / 8); 2682 CSR_WRITE_4(dev, RB_ADDR(txq, RB_WP), dev->d_txqstart[pnum] / 8); 2683 CSR_WRITE_4(dev, RB_ADDR(txq, RB_RP), dev->d_txqstart[pnum] / 8); 2684 /* Enable Store & Forward for Tx side. */ 2685 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_STFWD); 2686 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_OP_MD); 2687 (void) CSR_READ_1(dev, RB_ADDR(txq, RB_CTRL)); 2688 } 2689 2690 static void 2691 yge_set_prefetch(yge_dev_t *dev, int qaddr, yge_ring_t *ring) 2692 { 2693 /* Reset the prefetch unit. */ 2694 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 2695 PREF_UNIT_RST_SET); 2696 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 2697 PREF_UNIT_RST_CLR); 2698 /* Set LE base address. */ 2699 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 2700 YGE_ADDR_LO(ring->r_paddr)); 2701 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 2702 YGE_ADDR_HI(ring->r_paddr)); 2703 /* Set the list last index. */ 2704 CSR_WRITE_2(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 2705 ring->r_num - 1); 2706 /* Turn on prefetch unit. */ 2707 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 2708 PREF_UNIT_OP_ON); 2709 /* Dummy read to ensure write. */ 2710 (void) CSR_READ_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 2711 } 2712 2713 static void 2714 yge_stop_port(yge_port_t *port) 2715 { 2716 yge_dev_t *dev = port->p_dev; 2717 int pnum = port->p_port; 2718 uint32_t txq = port->p_txq; 2719 uint32_t rxq = port->p_rxq; 2720 uint32_t val; 2721 int i; 2722 2723 dev = port->p_dev; 2724 2725 /* 2726 * shutdown timeout 2727 */ 2728 port->p_tx_wdog = 0; 2729 2730 /* Disable interrupts. */ 2731 if (pnum == YGE_PORT_A) { 2732 dev->d_intrmask &= ~Y2_IS_PORT_A; 2733 dev->d_intrhwemask &= ~Y2_HWE_L1_MASK; 2734 } else { 2735 dev->d_intrmask &= ~Y2_IS_PORT_B; 2736 dev->d_intrhwemask &= ~Y2_HWE_L2_MASK; 2737 } 2738 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask); 2739 (void) CSR_READ_4(dev, B0_HWE_IMSK); 2740 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2741 (void) CSR_READ_4(dev, B0_IMSK); 2742 2743 /* Disable Tx/Rx MAC. */ 2744 val = GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2745 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2746 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, val); 2747 /* Read again to ensure writing. */ 2748 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2749 2750 /* Update stats and clear counters. */ 2751 yge_stats_update(port); 2752 2753 /* Stop Tx BMU. */ 2754 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP); 2755 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR)); 2756 for (i = 0; i < YGE_TIMEOUT; i += 10) { 2757 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 2758 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP); 2759 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR)); 2760 } else 2761 break; 2762 drv_usecwait(10); 2763 } 2764 /* This is probably fairly catastrophic. */ 2765 if ((val & (BMU_STOP | BMU_IDLE)) == 0) 2766 yge_error(NULL, port, "Tx BMU stop failed"); 2767 2768 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET | RB_DIS_OP_MD); 2769 2770 /* Disable all GMAC interrupt. */ 2771 CSR_WRITE_1(dev, MR_ADDR(pnum, GMAC_IRQ_MSK), 0); 2772 2773 /* Disable the RAM Interface Arbiter. */ 2774 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_DIS_ARB); 2775 2776 /* Reset the PCI FIFO of the async Tx queue */ 2777 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); 2778 2779 /* Reset the Tx prefetch units. */ 2780 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(txq, PREF_UNIT_CTRL_REG), 2781 PREF_UNIT_RST_SET); 2782 2783 /* Reset the RAM Buffer async Tx queue. */ 2784 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET); 2785 2786 /* Reset Tx MAC FIFO. */ 2787 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET); 2788 /* Set Pause Off. */ 2789 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_PAUSE_OFF); 2790 2791 /* 2792 * The Rx Stop command will not work for Yukon-2 if the BMU does not 2793 * reach the end of packet and since we can't make sure that we have 2794 * incoming data, we must reset the BMU while it is not during a DMA 2795 * transfer. Since it is possible that the Rx path is still active, 2796 * the Rx RAM buffer will be stopped first, so any possible incoming 2797 * data will not trigger a DMA. After the RAM buffer is stopped, the 2798 * BMU is polled until any DMA in progress is ended and only then it 2799 * will be reset. 2800 */ 2801 2802 /* Disable the RAM Buffer receive queue. */ 2803 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); 2804 for (i = 0; i < YGE_TIMEOUT; i += 10) { 2805 if (CSR_READ_1(dev, RB_ADDR(rxq, Q_RSL)) == 2806 CSR_READ_1(dev, RB_ADDR(rxq, Q_RL))) 2807 break; 2808 drv_usecwait(10); 2809 } 2810 /* This is probably nearly a fatal error. */ 2811 if (i == YGE_TIMEOUT) 2812 yge_error(NULL, port, "Rx BMU stop failed"); 2813 2814 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); 2815 /* Reset the Rx prefetch unit. */ 2816 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(rxq, PREF_UNIT_CTRL_REG), 2817 PREF_UNIT_RST_SET); 2818 /* Reset the RAM Buffer receive queue. */ 2819 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_SET); 2820 /* Reset Rx MAC FIFO. */ 2821 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET); 2822 } 2823 2824 /* 2825 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower 2826 * counter clears high 16 bits of the counter such that accessing 2827 * lower 16 bits should be the last operation. 2828 */ 2829 #define YGE_READ_MIB32(x, y) \ 2830 GMAC_READ_4(dev, x, y) 2831 2832 #define YGE_READ_MIB64(x, y) \ 2833 ((((uint64_t)YGE_READ_MIB32(x, (y) + 8)) << 32) + \ 2834 (uint64_t)YGE_READ_MIB32(x, y)) 2835 2836 static void 2837 yge_stats_clear(yge_port_t *port) 2838 { 2839 yge_dev_t *dev; 2840 uint16_t gmac; 2841 int32_t pnum; 2842 2843 pnum = port->p_port; 2844 dev = port->p_dev; 2845 2846 /* Set MIB Clear Counter Mode. */ 2847 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR); 2848 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 2849 /* Read all MIB Counters with Clear Mode set. */ 2850 for (int i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += 4) 2851 (void) YGE_READ_MIB32(pnum, i); 2852 /* Clear MIB Clear Counter Mode. */ 2853 gmac &= ~GM_PAR_MIB_CLR; 2854 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac); 2855 } 2856 2857 static void 2858 yge_stats_update(yge_port_t *port) 2859 { 2860 yge_dev_t *dev; 2861 struct yge_hw_stats *stats; 2862 uint16_t gmac; 2863 int32_t pnum; 2864 2865 dev = port->p_dev; 2866 pnum = port->p_port; 2867 2868 if (dev->d_suspended || !port->p_running) { 2869 return; 2870 } 2871 stats = &port->p_stats; 2872 /* Set MIB Clear Counter Mode. */ 2873 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR); 2874 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 2875 2876 /* Rx stats. */ 2877 stats->rx_ucast_frames += YGE_READ_MIB32(pnum, GM_RXF_UC_OK); 2878 stats->rx_bcast_frames += YGE_READ_MIB32(pnum, GM_RXF_BC_OK); 2879 stats->rx_pause_frames += YGE_READ_MIB32(pnum, GM_RXF_MPAUSE); 2880 stats->rx_mcast_frames += YGE_READ_MIB32(pnum, GM_RXF_MC_OK); 2881 stats->rx_crc_errs += YGE_READ_MIB32(pnum, GM_RXF_FCS_ERR); 2882 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE1); 2883 stats->rx_good_octets += YGE_READ_MIB64(pnum, GM_RXO_OK_LO); 2884 stats->rx_bad_octets += YGE_READ_MIB64(pnum, GM_RXO_ERR_LO); 2885 stats->rx_runts += YGE_READ_MIB32(pnum, GM_RXF_SHT); 2886 stats->rx_runt_errs += YGE_READ_MIB32(pnum, GM_RXE_FRAG); 2887 stats->rx_pkts_64 += YGE_READ_MIB32(pnum, GM_RXF_64B); 2888 stats->rx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_RXF_127B); 2889 stats->rx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_RXF_255B); 2890 stats->rx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_RXF_511B); 2891 stats->rx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_RXF_1023B); 2892 stats->rx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_RXF_1518B); 2893 stats->rx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_RXF_MAX_SZ); 2894 stats->rx_pkts_too_long += YGE_READ_MIB32(pnum, GM_RXF_LNG_ERR); 2895 stats->rx_pkts_jabbers += YGE_READ_MIB32(pnum, GM_RXF_JAB_PKT); 2896 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE2); 2897 stats->rx_fifo_oflows += YGE_READ_MIB32(pnum, GM_RXE_FIFO_OV); 2898 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE3); 2899 2900 /* Tx stats. */ 2901 stats->tx_ucast_frames += YGE_READ_MIB32(pnum, GM_TXF_UC_OK); 2902 stats->tx_bcast_frames += YGE_READ_MIB32(pnum, GM_TXF_BC_OK); 2903 stats->tx_pause_frames += YGE_READ_MIB32(pnum, GM_TXF_MPAUSE); 2904 stats->tx_mcast_frames += YGE_READ_MIB32(pnum, GM_TXF_MC_OK); 2905 stats->tx_octets += YGE_READ_MIB64(pnum, GM_TXO_OK_LO); 2906 stats->tx_pkts_64 += YGE_READ_MIB32(pnum, GM_TXF_64B); 2907 stats->tx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_TXF_127B); 2908 stats->tx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_TXF_255B); 2909 stats->tx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_TXF_511B); 2910 stats->tx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_TXF_1023B); 2911 stats->tx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_TXF_1518B); 2912 stats->tx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_TXF_MAX_SZ); 2913 (void) YGE_READ_MIB32(pnum, GM_TXF_SPARE1); 2914 stats->tx_colls += YGE_READ_MIB32(pnum, GM_TXF_COL); 2915 stats->tx_late_colls += YGE_READ_MIB32(pnum, GM_TXF_LAT_COL); 2916 stats->tx_excess_colls += YGE_READ_MIB32(pnum, GM_TXF_ABO_COL); 2917 stats->tx_multi_colls += YGE_READ_MIB32(pnum, GM_TXF_MUL_COL); 2918 stats->tx_single_colls += YGE_READ_MIB32(pnum, GM_TXF_SNG_COL); 2919 stats->tx_underflows += YGE_READ_MIB32(pnum, GM_TXE_FIFO_UR); 2920 /* Clear MIB Clear Counter Mode. */ 2921 gmac &= ~GM_PAR_MIB_CLR; 2922 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac); 2923 } 2924 2925 #undef YGE_READ_MIB32 2926 #undef YGE_READ_MIB64 2927 2928 uint32_t 2929 yge_hashbit(const uint8_t *addr) 2930 { 2931 int idx; 2932 int bit; 2933 uint_t data; 2934 uint32_t crc; 2935 #define POLY_BE 0x04c11db7 2936 2937 crc = 0xffffffff; 2938 for (idx = 0; idx < 6; idx++) { 2939 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { 2940 crc = (crc << 1) 2941 ^ ((((crc >> 31) ^ data) & 1) ? POLY_BE : 0); 2942 } 2943 } 2944 #undef POLY_BE 2945 2946 return (crc % 64); 2947 } 2948 2949 int 2950 yge_m_stat(void *arg, uint_t stat, uint64_t *val) 2951 { 2952 yge_port_t *port = arg; 2953 struct yge_hw_stats *stats = &port->p_stats; 2954 2955 if (stat == MAC_STAT_IFSPEED) { 2956 /* 2957 * This is the first stat we are asked about. We update only 2958 * for this stat, to avoid paying the hefty cost of the update 2959 * once for each stat. 2960 */ 2961 DEV_LOCK(port->p_dev); 2962 yge_stats_update(port); 2963 DEV_UNLOCK(port->p_dev); 2964 } 2965 2966 if (mii_m_getstat(port->p_mii, stat, val) == 0) { 2967 return (0); 2968 } 2969 2970 switch (stat) { 2971 case MAC_STAT_MULTIRCV: 2972 *val = stats->rx_mcast_frames; 2973 break; 2974 2975 case MAC_STAT_BRDCSTRCV: 2976 *val = stats->rx_bcast_frames; 2977 break; 2978 2979 case MAC_STAT_MULTIXMT: 2980 *val = stats->tx_mcast_frames; 2981 break; 2982 2983 case MAC_STAT_BRDCSTXMT: 2984 *val = stats->tx_bcast_frames; 2985 break; 2986 2987 case MAC_STAT_IPACKETS: 2988 *val = stats->rx_ucast_frames; 2989 break; 2990 2991 case MAC_STAT_RBYTES: 2992 *val = stats->rx_good_octets; 2993 break; 2994 2995 case MAC_STAT_OPACKETS: 2996 *val = stats->tx_ucast_frames; 2997 break; 2998 2999 case MAC_STAT_OBYTES: 3000 *val = stats->tx_octets; 3001 break; 3002 3003 case MAC_STAT_NORCVBUF: 3004 *val = stats->rx_nobuf; 3005 break; 3006 3007 case MAC_STAT_COLLISIONS: 3008 *val = stats->tx_colls; 3009 break; 3010 3011 case ETHER_STAT_ALIGN_ERRORS: 3012 *val = stats->rx_runt_errs; 3013 break; 3014 3015 case ETHER_STAT_FCS_ERRORS: 3016 *val = stats->rx_crc_errs; 3017 break; 3018 3019 case ETHER_STAT_FIRST_COLLISIONS: 3020 *val = stats->tx_single_colls; 3021 break; 3022 3023 case ETHER_STAT_MULTI_COLLISIONS: 3024 *val = stats->tx_multi_colls; 3025 break; 3026 3027 case ETHER_STAT_TX_LATE_COLLISIONS: 3028 *val = stats->tx_late_colls; 3029 break; 3030 3031 case ETHER_STAT_EX_COLLISIONS: 3032 *val = stats->tx_excess_colls; 3033 break; 3034 3035 case ETHER_STAT_TOOLONG_ERRORS: 3036 *val = stats->rx_pkts_too_long; 3037 break; 3038 3039 case MAC_STAT_OVERFLOWS: 3040 *val = stats->rx_fifo_oflows; 3041 break; 3042 3043 case MAC_STAT_UNDERFLOWS: 3044 *val = stats->tx_underflows; 3045 break; 3046 3047 case ETHER_STAT_TOOSHORT_ERRORS: 3048 *val = stats->rx_runts; 3049 break; 3050 3051 case ETHER_STAT_JABBER_ERRORS: 3052 *val = stats->rx_pkts_jabbers; 3053 break; 3054 3055 default: 3056 return (ENOTSUP); 3057 } 3058 return (0); 3059 } 3060 3061 int 3062 yge_m_start(void *arg) 3063 { 3064 yge_port_t *port = arg; 3065 3066 DEV_LOCK(port->p_dev); 3067 3068 /* 3069 * We defer resource allocation to this point, because we 3070 * don't want to waste DMA resources that might better be used 3071 * elsewhere, if the port is not actually being used. 3072 * 3073 * Furthermore, this gives us a more graceful handling of dynamic 3074 * MTU modification. 3075 */ 3076 if (yge_txrx_dma_alloc(port) != DDI_SUCCESS) { 3077 /* Make sure we free up partially allocated resources. */ 3078 yge_txrx_dma_free(port); 3079 DEV_UNLOCK(port->p_dev); 3080 return (ENOMEM); 3081 } 3082 3083 if (!port->p_dev->d_suspended) 3084 yge_start_port(port); 3085 port->p_running = B_TRUE; 3086 DEV_UNLOCK(port->p_dev); 3087 3088 mii_start(port->p_mii); 3089 3090 return (0); 3091 } 3092 3093 void 3094 yge_m_stop(void *arg) 3095 { 3096 yge_port_t *port = arg; 3097 yge_dev_t *dev = port->p_dev; 3098 3099 DEV_LOCK(dev); 3100 if (!dev->d_suspended) 3101 yge_stop_port(port); 3102 3103 port->p_running = B_FALSE; 3104 3105 /* Release resources we don't need */ 3106 yge_txrx_dma_free(port); 3107 DEV_UNLOCK(dev); 3108 } 3109 3110 int 3111 yge_m_promisc(void *arg, boolean_t on) 3112 { 3113 yge_port_t *port = arg; 3114 3115 DEV_LOCK(port->p_dev); 3116 3117 /* Save current promiscuous mode. */ 3118 port->p_promisc = on; 3119 yge_setrxfilt(port); 3120 3121 DEV_UNLOCK(port->p_dev); 3122 3123 return (0); 3124 } 3125 3126 int 3127 yge_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 3128 { 3129 yge_port_t *port = arg; 3130 int bit; 3131 boolean_t update; 3132 3133 bit = yge_hashbit(addr); 3134 ASSERT(bit < 64); 3135 3136 DEV_LOCK(port->p_dev); 3137 if (add) { 3138 if (port->p_mccount[bit] == 0) { 3139 /* Set the corresponding bit in the hash table. */ 3140 port->p_mchash[bit / 32] |= (1 << (bit % 32)); 3141 update = B_TRUE; 3142 } 3143 port->p_mccount[bit]++; 3144 } else { 3145 ASSERT(port->p_mccount[bit] > 0); 3146 port->p_mccount[bit]--; 3147 if (port->p_mccount[bit] == 0) { 3148 port->p_mchash[bit / 32] &= ~(1 << (bit % 32)); 3149 update = B_TRUE; 3150 } 3151 } 3152 3153 if (update) { 3154 yge_setrxfilt(port); 3155 } 3156 DEV_UNLOCK(port->p_dev); 3157 return (0); 3158 } 3159 3160 int 3161 yge_m_unicst(void *arg, const uint8_t *macaddr) 3162 { 3163 yge_port_t *port = arg; 3164 3165 DEV_LOCK(port->p_dev); 3166 3167 bcopy(macaddr, port->p_curraddr, ETHERADDRL); 3168 yge_setrxfilt(port); 3169 3170 DEV_UNLOCK(port->p_dev); 3171 3172 return (0); 3173 } 3174 3175 mblk_t * 3176 yge_m_tx(void *arg, mblk_t *mp) 3177 { 3178 yge_port_t *port = arg; 3179 mblk_t *nmp; 3180 int enq = 0; 3181 uint32_t ridx; 3182 int idx; 3183 boolean_t resched = B_FALSE; 3184 3185 TX_LOCK(port->p_dev); 3186 3187 if (port->p_dev->d_suspended) { 3188 3189 TX_UNLOCK(port->p_dev); 3190 3191 while ((nmp = mp) != NULL) { 3192 /* carrier_errors++; */ 3193 mp = mp->b_next; 3194 freemsg(nmp); 3195 } 3196 return (NULL); 3197 } 3198 3199 /* attempt a reclaim */ 3200 ridx = port->p_port == YGE_PORT_A ? 3201 STAT_TXA1_RIDX : STAT_TXA2_RIDX; 3202 idx = CSR_READ_2(port->p_dev, ridx); 3203 if (port->p_tx_cons != idx) 3204 resched = yge_txeof_locked(port, idx); 3205 3206 while (mp != NULL) { 3207 nmp = mp->b_next; 3208 mp->b_next = NULL; 3209 3210 if (!yge_send(port, mp)) { 3211 mp->b_next = nmp; 3212 break; 3213 } 3214 enq++; 3215 mp = nmp; 3216 3217 } 3218 if (enq > 0) { 3219 /* Transmit */ 3220 CSR_WRITE_2(port->p_dev, 3221 Y2_PREF_Q_ADDR(port->p_txq, PREF_UNIT_PUT_IDX_REG), 3222 port->p_tx_prod); 3223 } 3224 3225 TX_UNLOCK(port->p_dev); 3226 3227 if (resched) 3228 mac_tx_update(port->p_mh); 3229 3230 return (mp); 3231 } 3232 3233 void 3234 yge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3235 { 3236 #ifdef YGE_MII_LOOPBACK 3237 /* LINTED E_FUNC_SET_NOT_USED */ 3238 yge_port_t *port = arg; 3239 3240 /* 3241 * Right now, the MII common layer does not properly handle 3242 * loopback on these PHYs. Fixing this should be done at some 3243 * point in the future. 3244 */ 3245 if (mii_m_loop_ioctl(port->p_mii, wq, mp)) 3246 return; 3247 #else 3248 _NOTE(ARGUNUSED(arg)); 3249 #endif 3250 3251 miocnak(wq, mp, 0, EINVAL); 3252 } 3253 3254 int 3255 yge_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3256 uint_t pr_valsize, const void *pr_val) 3257 { 3258 yge_port_t *port = arg; 3259 uint32_t new_mtu; 3260 int err = 0; 3261 3262 err = mii_m_setprop(port->p_mii, pr_name, pr_num, pr_valsize, pr_val); 3263 if (err != ENOTSUP) { 3264 return (err); 3265 } 3266 3267 DEV_LOCK(port->p_dev); 3268 3269 switch (pr_num) { 3270 case MAC_PROP_MTU: 3271 if (pr_valsize < sizeof (new_mtu)) { 3272 err = EINVAL; 3273 break; 3274 } 3275 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3276 if (new_mtu == port->p_mtu) { 3277 /* no change */ 3278 err = 0; 3279 break; 3280 } 3281 if (new_mtu < ETHERMTU) { 3282 yge_error(NULL, port, 3283 "Maximum MTU size too small: %d", new_mtu); 3284 err = EINVAL; 3285 break; 3286 } 3287 if (new_mtu > (port->p_flags & PORT_FLAG_NOJUMBO ? 3288 ETHERMTU : YGE_JUMBO_MTU)) { 3289 yge_error(NULL, port, 3290 "Maximum MTU size too big: %d", new_mtu); 3291 err = EINVAL; 3292 break; 3293 } 3294 if (port->p_running) { 3295 yge_error(NULL, port, 3296 "Unable to change maximum MTU while running"); 3297 err = EBUSY; 3298 break; 3299 } 3300 3301 3302 /* 3303 * NB: It would probably be better not to hold the 3304 * DEVLOCK, but releasing it creates a potential race 3305 * if m_start is called concurrently. 3306 * 3307 * It turns out that the MAC layer guarantees safety 3308 * for us here by using a cut out for this kind of 3309 * notification call back anyway. 3310 * 3311 * See R8. and R14. in mac.c locking comments, which read 3312 * as follows: 3313 * 3314 * R8. Since it is not guaranteed (see R14) that 3315 * drivers won't hold locks across mac driver 3316 * interfaces, the MAC layer must provide a cut out 3317 * for control interfaces like upcall notifications 3318 * and start them in a separate thread. 3319 * 3320 * R14. It would be preferable if MAC drivers don't 3321 * hold any locks across any mac call. However at a 3322 * minimum they must not hold any locks across data 3323 * upcalls. They must also make sure that all 3324 * references to mac data structures are cleaned up 3325 * and that it is single threaded at mac_unregister 3326 * time. 3327 */ 3328 err = mac_maxsdu_update(port->p_mh, new_mtu); 3329 if (err != 0) { 3330 /* This should never occur! */ 3331 yge_error(NULL, port, 3332 "Failed notifying GLDv3 of new maximum MTU"); 3333 } else { 3334 port->p_mtu = new_mtu; 3335 } 3336 break; 3337 3338 default: 3339 err = ENOTSUP; 3340 break; 3341 } 3342 3343 err: 3344 DEV_UNLOCK(port->p_dev); 3345 3346 return (err); 3347 } 3348 3349 int 3350 yge_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3351 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 3352 { 3353 yge_port_t *port = arg; 3354 mac_propval_range_t range; 3355 int err; 3356 3357 err = mii_m_getprop(port->p_mii, pr_name, pr_num, pr_flags, 3358 pr_valsize, pr_val, perm); 3359 if (err != ENOTSUP) { 3360 return (err); 3361 } 3362 3363 if (pr_valsize == 0) 3364 return (EINVAL); 3365 3366 bzero(pr_val, pr_valsize); 3367 *perm = MAC_PROP_PERM_RW; 3368 3369 switch (pr_num) { 3370 case MAC_PROP_MTU: 3371 if (!(pr_flags & MAC_PROP_POSSIBLE)) { 3372 err = ENOTSUP; 3373 break; 3374 } 3375 if (pr_valsize < sizeof (mac_propval_range_t)) 3376 return (EINVAL); 3377 range.mpr_count = 1; 3378 range.mpr_type = MAC_PROPVAL_UINT32; 3379 range.range_uint32[0].mpur_min = ETHERMTU; 3380 range.range_uint32[0].mpur_max = 3381 port->p_flags & PORT_FLAG_NOJUMBO ? 3382 ETHERMTU : YGE_JUMBO_MTU; 3383 bcopy(&range, pr_val, sizeof (range)); 3384 err = 0; 3385 break; 3386 3387 default: 3388 err = ENOTSUP; 3389 break; 3390 } 3391 return (err); 3392 } 3393 3394 void 3395 yge_dispatch(yge_dev_t *dev, int flag) 3396 { 3397 TASK_LOCK(dev); 3398 dev->d_task_flags |= flag; 3399 TASK_SIGNAL(dev); 3400 TASK_UNLOCK(dev); 3401 } 3402 3403 void 3404 yge_task(void *arg) 3405 { 3406 yge_dev_t *dev = arg; 3407 int flags; 3408 3409 for (;;) { 3410 3411 TASK_LOCK(dev); 3412 while ((flags = dev->d_task_flags) == 0) 3413 TASK_WAIT(dev); 3414 3415 dev->d_task_flags = 0; 3416 TASK_UNLOCK(dev); 3417 3418 /* 3419 * This should be the first thing after the sleep so if we are 3420 * requested to exit we do that and not waste time doing work 3421 * we will then abandone. 3422 */ 3423 if (flags & YGE_TASK_EXIT) 3424 break; 3425 3426 /* all processing done without holding locks */ 3427 if (flags & YGE_TASK_RESTART) 3428 yge_restart_task(dev); 3429 } 3430 } 3431 3432 void 3433 yge_error(yge_dev_t *dev, yge_port_t *port, char *fmt, ...) 3434 { 3435 va_list ap; 3436 char buf[256]; 3437 dev_info_t *dip; 3438 3439 va_start(ap, fmt); 3440 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 3441 va_end(ap); 3442 3443 if (dev == NULL) 3444 dev = port->p_dev; 3445 dip = dev->d_dip; 3446 cmn_err(CE_WARN, "%s%d: %s", 3447 ddi_driver_name(dip), 3448 ddi_get_instance(dip) + port ? port->p_ppa : 0, 3449 buf); 3450 } 3451 3452 static int 3453 yge_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 3454 { 3455 yge_dev_t *dev; 3456 int rv; 3457 3458 switch (cmd) { 3459 case DDI_ATTACH: 3460 dev = kmem_zalloc(sizeof (*dev), KM_SLEEP); 3461 dev->d_port[0] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP); 3462 dev->d_port[1] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP); 3463 dev->d_dip = dip; 3464 ddi_set_driver_private(dip, dev); 3465 3466 dev->d_port[0]->p_port = 0; 3467 dev->d_port[0]->p_dev = dev; 3468 dev->d_port[1]->p_port = 0; 3469 dev->d_port[1]->p_dev = dev; 3470 3471 rv = yge_attach(dev); 3472 if (rv != DDI_SUCCESS) { 3473 ddi_set_driver_private(dip, 0); 3474 kmem_free(dev->d_port[1], sizeof (yge_port_t)); 3475 kmem_free(dev->d_port[0], sizeof (yge_port_t)); 3476 kmem_free(dev, sizeof (*dev)); 3477 } 3478 return (rv); 3479 3480 case DDI_RESUME: 3481 dev = ddi_get_driver_private(dip); 3482 ASSERT(dev != NULL); 3483 return (yge_resume(dev)); 3484 3485 default: 3486 return (DDI_FAILURE); 3487 } 3488 } 3489 3490 static int 3491 yge_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 3492 { 3493 yge_dev_t *dev; 3494 int rv; 3495 3496 switch (cmd) { 3497 case DDI_DETACH: 3498 3499 dev = ddi_get_driver_private(dip); 3500 3501 /* attempt to unregister MACs from Nemo */ 3502 for (int i = 0; i < dev->d_num_port; i++) { 3503 rv = yge_unregister_port(dev->d_port[i]); 3504 if (rv != DDI_SUCCESS) { 3505 return (DDI_FAILURE); 3506 } 3507 } 3508 3509 ASSERT(dip == dev->d_dip); 3510 yge_detach(dev); 3511 ddi_set_driver_private(dip, 0); 3512 kmem_free(dev->d_port[1], sizeof (yge_port_t)); 3513 kmem_free(dev->d_port[0], sizeof (yge_port_t)); 3514 kmem_free(dev, sizeof (*dev)); 3515 return (DDI_SUCCESS); 3516 3517 case DDI_SUSPEND: 3518 dev = ddi_get_driver_private(dip); 3519 ASSERT(dev != NULL); 3520 return (yge_suspend(dev)); 3521 3522 default: 3523 return (DDI_FAILURE); 3524 } 3525 } 3526 3527 static int 3528 yge_quiesce(dev_info_t *dip) 3529 { 3530 yge_dev_t *dev; 3531 3532 dev = ddi_get_driver_private(dip); 3533 ASSERT(dev != NULL); 3534 3535 /* NB: No locking! We are called in single threaded context */ 3536 for (int i = 0; i < dev->d_num_port; i++) { 3537 yge_port_t *port = dev->d_port[i]; 3538 if (port->p_running) 3539 yge_stop_port(port); 3540 } 3541 3542 /* Disable all interrupts. */ 3543 CSR_WRITE_4(dev, B0_IMSK, 0); 3544 (void) CSR_READ_4(dev, B0_IMSK); 3545 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 3546 (void) CSR_READ_4(dev, B0_HWE_IMSK); 3547 3548 /* Put hardware into reset. */ 3549 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 3550 3551 return (DDI_SUCCESS); 3552 } 3553 3554 /* 3555 * Stream information 3556 */ 3557 DDI_DEFINE_STREAM_OPS(yge_devops, nulldev, nulldev, yge_ddi_attach, 3558 yge_ddi_detach, nodev, NULL, D_MP, NULL, yge_quiesce); 3559 3560 /* 3561 * Module linkage information. 3562 */ 3563 3564 static struct modldrv yge_modldrv = { 3565 &mod_driverops, /* drv_modops */ 3566 "Yukon 2 Ethernet", /* drv_linkinfo */ 3567 &yge_devops /* drv_dev_ops */ 3568 }; 3569 3570 static struct modlinkage yge_modlinkage = { 3571 MODREV_1, /* ml_rev */ 3572 &yge_modldrv, /* ml_linkage */ 3573 NULL 3574 }; 3575 3576 /* 3577 * DDI entry points. 3578 */ 3579 int 3580 _init(void) 3581 { 3582 int rv; 3583 mac_init_ops(&yge_devops, "yge"); 3584 if ((rv = mod_install(&yge_modlinkage)) != DDI_SUCCESS) { 3585 mac_fini_ops(&yge_devops); 3586 } 3587 return (rv); 3588 } 3589 3590 int 3591 _fini(void) 3592 { 3593 int rv; 3594 if ((rv = mod_remove(&yge_modlinkage)) == DDI_SUCCESS) { 3595 mac_fini_ops(&yge_devops); 3596 } 3597 return (rv); 3598 } 3599 3600 int 3601 _info(struct modinfo *modinfop) 3602 { 3603 return (mod_info(&yge_modlinkage, modinfop)); 3604 } 3605