1 /* 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 /* 7 * This driver was derived from the FreeBSD if_msk.c driver, which 8 * bears the following copyright attributions and licenses. 9 */ 10 11 /* 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 */ 47 /* 48 * Copyright (c) 1997, 1998, 1999, 2000 49 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 50 * 51 * Redistribution and use in source and binary forms, with or without 52 * modification, are permitted provided that the following conditions 53 * are met: 54 * 1. Redistributions of source code must retain the above copyright 55 * notice, this list of conditions and the following disclaimer. 56 * 2. Redistributions in binary form must reproduce the above copyright 57 * notice, this list of conditions and the following disclaimer in the 58 * documentation and/or other materials provided with the distribution. 59 * 3. All advertising materials mentioning features or use of this software 60 * must display the following acknowledgement: 61 * This product includes software developed by Bill Paul. 62 * 4. Neither the name of the author nor the names of any co-contributors 63 * may be used to endorse or promote products derived from this software 64 * without specific prior written permission. 65 * 66 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 69 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 70 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 71 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 72 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 73 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 74 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 75 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 76 * THE POSSIBILITY OF SUCH DAMAGE. 77 */ 78 /* 79 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 80 * 81 * Permission to use, copy, modify, and distribute this software for any 82 * purpose with or without fee is hereby granted, provided that the above 83 * copyright notice and this permission notice appear in all copies. 84 * 85 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 86 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 87 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 88 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 89 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 90 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 91 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 92 */ 93 94 #include <sys/varargs.h> 95 #include <sys/types.h> 96 #include <sys/modctl.h> 97 #include <sys/conf.h> 98 #include <sys/devops.h> 99 #include <sys/stream.h> 100 #include <sys/strsun.h> 101 #include <sys/cmn_err.h> 102 #include <sys/ethernet.h> 103 #include <sys/kmem.h> 104 #include <sys/time.h> 105 #include <sys/pci.h> 106 #include <sys/mii.h> 107 #include <sys/miiregs.h> 108 #include <sys/mac.h> 109 #include <sys/mac_ether.h> 110 #include <sys/mac_provider.h> 111 #include <sys/debug.h> 112 #include <sys/note.h> 113 #include <sys/ddi.h> 114 #include <sys/sunddi.h> 115 #include <sys/vlan.h> 116 117 #include "yge.h" 118 119 static struct ddi_device_acc_attr yge_regs_attr = { 120 DDI_DEVICE_ATTR_V0, 121 DDI_STRUCTURE_LE_ACC, 122 DDI_STRICTORDER_ACC, 123 DDI_FLAGERR_ACC 124 }; 125 126 static struct ddi_device_acc_attr yge_ring_attr = { 127 DDI_DEVICE_ATTR_V0, 128 DDI_STRUCTURE_LE_ACC, 129 DDI_STRICTORDER_ACC 130 }; 131 132 static struct ddi_device_acc_attr yge_buf_attr = { 133 DDI_DEVICE_ATTR_V0, 134 DDI_NEVERSWAP_ACC, 135 DDI_STRICTORDER_ACC 136 }; 137 138 #define DESC_ALIGN 0x1000 139 140 static ddi_dma_attr_t yge_ring_dma_attr = { 141 DMA_ATTR_V0, /* dma_attr_version */ 142 0, /* dma_attr_addr_lo */ 143 0x00000000ffffffffull, /* dma_attr_addr_hi */ 144 0x00000000ffffffffull, /* dma_attr_count_max */ 145 DESC_ALIGN, /* dma_attr_align */ 146 0x000007fc, /* dma_attr_burstsizes */ 147 1, /* dma_attr_minxfer */ 148 0x00000000ffffffffull, /* dma_attr_maxxfer */ 149 0x00000000ffffffffull, /* dma_attr_seg */ 150 1, /* dma_attr_sgllen */ 151 1, /* dma_attr_granular */ 152 0 /* dma_attr_flags */ 153 }; 154 155 static ddi_dma_attr_t yge_buf_dma_attr = { 156 DMA_ATTR_V0, /* dma_attr_version */ 157 0, /* dma_attr_addr_lo */ 158 0x00000000ffffffffull, /* dma_attr_addr_hi */ 159 0x00000000ffffffffull, /* dma_attr_count_max */ 160 1, /* dma_attr_align */ 161 0x0000fffc, /* dma_attr_burstsizes */ 162 1, /* dma_attr_minxfer */ 163 0x000000000000ffffull, /* dma_attr_maxxfer */ 164 0x00000000ffffffffull, /* dma_attr_seg */ 165 8, /* dma_attr_sgllen */ 166 1, /* dma_attr_granular */ 167 0 /* dma_attr_flags */ 168 }; 169 170 171 static int yge_attach(yge_dev_t *); 172 static void yge_detach(yge_dev_t *); 173 static int yge_suspend(yge_dev_t *); 174 static int yge_resume(yge_dev_t *); 175 176 static void yge_reset(yge_dev_t *); 177 static void yge_setup_rambuffer(yge_dev_t *); 178 179 static int yge_init_port(yge_port_t *); 180 static void yge_uninit_port(yge_port_t *); 181 static int yge_register_port(yge_port_t *); 182 static int yge_unregister_port(yge_port_t *); 183 184 static void yge_tick(void *); 185 static uint_t yge_intr(caddr_t, caddr_t); 186 static int yge_intr_gmac(yge_port_t *); 187 static void yge_intr_enable(yge_dev_t *); 188 static void yge_intr_disable(yge_dev_t *); 189 static boolean_t yge_handle_events(yge_dev_t *, mblk_t **, mblk_t **, int *); 190 static void yge_handle_hwerr(yge_port_t *, uint32_t); 191 static void yge_intr_hwerr(yge_dev_t *); 192 static mblk_t *yge_rxeof(yge_port_t *, uint32_t, int); 193 static void yge_txeof(yge_port_t *, int); 194 static boolean_t yge_send(yge_port_t *, mblk_t *); 195 static void yge_set_prefetch(yge_dev_t *, int, yge_ring_t *); 196 static void yge_set_rambuffer(yge_port_t *); 197 static void yge_start_port(yge_port_t *); 198 static void yge_stop_port(yge_port_t *); 199 static void yge_phy_power(yge_dev_t *, boolean_t); 200 static int yge_alloc_ring(yge_port_t *, yge_dev_t *, yge_ring_t *, uint32_t); 201 static void yge_free_ring(yge_ring_t *); 202 static uint8_t yge_find_capability(yge_dev_t *, uint8_t); 203 204 static int yge_txrx_dma_alloc(yge_port_t *); 205 static void yge_txrx_dma_free(yge_port_t *); 206 static void yge_init_rx_ring(yge_port_t *); 207 static void yge_init_tx_ring(yge_port_t *); 208 209 static uint16_t yge_mii_readreg(yge_port_t *, uint8_t, uint8_t); 210 static void yge_mii_writereg(yge_port_t *, uint8_t, uint8_t, uint16_t); 211 212 static uint16_t yge_mii_read(void *, uint8_t, uint8_t); 213 static void yge_mii_write(void *, uint8_t, uint8_t, uint16_t); 214 static void yge_mii_notify(void *, link_state_t); 215 216 static void yge_setrxfilt(yge_port_t *); 217 static void yge_restart_task(yge_dev_t *); 218 static void yge_task(void *); 219 static void yge_dispatch(yge_dev_t *, int); 220 221 static void yge_stats_clear(yge_port_t *); 222 static void yge_stats_update(yge_port_t *); 223 static uint32_t yge_hashbit(const uint8_t *); 224 225 static int yge_m_unicst(void *, const uint8_t *); 226 static int yge_m_multicst(void *, boolean_t, const uint8_t *); 227 static int yge_m_promisc(void *, boolean_t); 228 static mblk_t *yge_m_tx(void *, mblk_t *); 229 static int yge_m_stat(void *, uint_t, uint64_t *); 230 static int yge_m_start(void *); 231 static void yge_m_stop(void *); 232 static int yge_m_getprop(void *, const char *, mac_prop_id_t, uint_t, 233 uint_t, void *, uint_t *); 234 static int yge_m_setprop(void *, const char *, mac_prop_id_t, uint_t, 235 const void *); 236 static void yge_m_ioctl(void *, queue_t *, mblk_t *); 237 238 void yge_error(yge_dev_t *, yge_port_t *, char *, ...); 239 extern void yge_phys_update(yge_port_t *); 240 extern int yge_phys_restart(yge_port_t *, boolean_t); 241 extern int yge_phys_init(yge_port_t *, phy_readreg_t, phy_writereg_t); 242 243 static mac_callbacks_t yge_m_callbacks = { 244 MC_IOCTL | MC_SETPROP | MC_GETPROP, 245 yge_m_stat, 246 yge_m_start, 247 yge_m_stop, 248 yge_m_promisc, 249 yge_m_multicst, 250 yge_m_unicst, 251 yge_m_tx, 252 yge_m_ioctl, 253 NULL, /* mc_getcapab */ 254 NULL, /* mc_open */ 255 NULL, /* mc_close */ 256 yge_m_setprop, 257 yge_m_getprop, 258 }; 259 260 static mii_ops_t yge_mii_ops = { 261 MII_OPS_VERSION, 262 yge_mii_read, 263 yge_mii_write, 264 yge_mii_notify, 265 NULL /* reset */ 266 }; 267 268 /* 269 * This is the low level interface routine to read from the PHY 270 * MII registers. There is multiple steps to these accesses. First 271 * the register number is written to an address register. Then after 272 * a specified delay status is checked until the data is present. 273 */ 274 static uint16_t 275 yge_mii_readreg(yge_port_t *port, uint8_t phy, uint8_t reg) 276 { 277 yge_dev_t *dev = port->p_dev; 278 int pnum = port->p_port; 279 uint16_t val; 280 281 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL, 282 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 283 284 for (int i = 0; i < YGE_TIMEOUT; i += 10) { 285 drv_usecwait(10); 286 val = GMAC_READ_2(dev, pnum, GM_SMI_CTRL); 287 if ((val & GM_SMI_CT_RD_VAL) != 0) { 288 val = GMAC_READ_2(dev, pnum, GM_SMI_DATA); 289 return (val); 290 } 291 } 292 293 return (0xffff); 294 } 295 296 /* 297 * This is the low level interface routine to write to the PHY 298 * MII registers. There is multiple steps to these accesses. The 299 * data and the target registers address are written to the PHY. 300 * Then the PHY is polled until it is done with the write. Note 301 * that the delays are specified and required! 302 */ 303 static void 304 yge_mii_writereg(yge_port_t *port, uint8_t phy, uint8_t reg, uint16_t val) 305 { 306 yge_dev_t *dev = port->p_dev; 307 int pnum = port->p_port; 308 309 GMAC_WRITE_2(dev, pnum, GM_SMI_DATA, val); 310 GMAC_WRITE_2(dev, pnum, GM_SMI_CTRL, 311 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 312 313 for (int i = 0; i < YGE_TIMEOUT; i += 10) { 314 drv_usecwait(10); 315 if ((GMAC_READ_2(dev, pnum, GM_SMI_CTRL) & GM_SMI_CT_BUSY) == 0) 316 return; 317 } 318 319 yge_error(NULL, port, "phy write timeout"); 320 } 321 322 static uint16_t 323 yge_mii_read(void *arg, uint8_t phy, uint8_t reg) 324 { 325 yge_port_t *port = arg; 326 uint16_t rv; 327 328 PHY_LOCK(port->p_dev); 329 rv = yge_mii_readreg(port, phy, reg); 330 PHY_UNLOCK(port->p_dev); 331 return (rv); 332 } 333 334 static void 335 yge_mii_write(void *arg, uint8_t phy, uint8_t reg, uint16_t val) 336 { 337 yge_port_t *port = arg; 338 339 PHY_LOCK(port->p_dev); 340 yge_mii_writereg(port, phy, reg, val); 341 PHY_UNLOCK(port->p_dev); 342 } 343 344 /* 345 * The MII common code calls this function to let the MAC driver 346 * know when there has been a change in status. 347 */ 348 void 349 yge_mii_notify(void *arg, link_state_t link) 350 { 351 yge_port_t *port = arg; 352 yge_dev_t *dev = port->p_dev; 353 uint32_t gmac; 354 uint32_t gpcr; 355 link_flowctrl_t fc; 356 link_duplex_t duplex; 357 int speed; 358 359 fc = mii_get_flowctrl(port->p_mii); 360 duplex = mii_get_duplex(port->p_mii); 361 speed = mii_get_speed(port->p_mii); 362 363 DEV_LOCK(dev); 364 365 if (link == LINK_STATE_UP) { 366 367 /* Enable Tx FIFO Underrun. */ 368 CSR_WRITE_1(dev, MR_ADDR(port->p_port, GMAC_IRQ_MSK), 369 GM_IS_TX_FF_UR | /* TX FIFO underflow */ 370 GM_IS_RX_FF_OR); /* RX FIFO overflow */ 371 372 gpcr = GM_GPCR_AU_ALL_DIS; 373 374 switch (fc) { 375 case LINK_FLOWCTRL_BI: 376 gmac = GMC_PAUSE_ON; 377 gpcr &= ~(GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS); 378 break; 379 case LINK_FLOWCTRL_TX: 380 gmac = GMC_PAUSE_ON; 381 gpcr |= GM_GPCR_FC_RX_DIS; 382 break; 383 case LINK_FLOWCTRL_RX: 384 gmac = GMC_PAUSE_ON; 385 gpcr |= GM_GPCR_FC_TX_DIS; 386 break; 387 case LINK_FLOWCTRL_NONE: 388 default: 389 gmac = GMC_PAUSE_OFF; 390 gpcr |= GM_GPCR_FC_RX_DIS; 391 gpcr |= GM_GPCR_FC_TX_DIS; 392 break; 393 } 394 395 gpcr &= ~((GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100)); 396 switch (speed) { 397 case 1000: 398 gpcr |= GM_GPCR_SPEED_1000; 399 break; 400 case 100: 401 gpcr |= GM_GPCR_SPEED_100; 402 break; 403 case 10: 404 default: 405 break; 406 } 407 408 if (duplex == LINK_DUPLEX_FULL) { 409 gpcr |= GM_GPCR_DUP_FULL; 410 } else { 411 gpcr &= ~(GM_GPCR_DUP_FULL); 412 gmac = GMC_PAUSE_OFF; 413 gpcr |= GM_GPCR_FC_RX_DIS; 414 gpcr |= GM_GPCR_FC_TX_DIS; 415 } 416 417 gpcr |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 418 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr); 419 420 /* Read again to ensure writing. */ 421 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL); 422 423 /* write out the flow control gmac setting */ 424 CSR_WRITE_4(dev, MR_ADDR(port->p_port, GMAC_CTRL), gmac); 425 426 } else { 427 /* Disable Rx/Tx MAC. */ 428 gpcr = GMAC_READ_2(dev, port->p_port, GM_GP_CTRL); 429 gpcr &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 430 GMAC_WRITE_2(dev, port->p_port, GM_GP_CTRL, gpcr); 431 432 /* Read again to ensure writing. */ 433 (void) GMAC_READ_2(dev, port->p_port, GM_GP_CTRL); 434 } 435 436 DEV_UNLOCK(dev); 437 438 mac_link_update(port->p_mh, link); 439 440 if (port->p_running && (link == LINK_STATE_UP)) { 441 mac_tx_update(port->p_mh); 442 } 443 } 444 445 static void 446 yge_setrxfilt(yge_port_t *port) 447 { 448 yge_dev_t *dev; 449 uint16_t mode; 450 uint8_t *ea; 451 uint32_t *mchash; 452 int pnum; 453 454 dev = port->p_dev; 455 pnum = port->p_port; 456 ea = port->p_curraddr; 457 mchash = port->p_mchash; 458 459 if (dev->d_suspended) 460 return; 461 462 /* Set station address. */ 463 for (int i = 0; i < (ETHERADDRL / 2); i++) { 464 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_1L + i * 4, 465 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8))); 466 } 467 for (int i = 0; i < (ETHERADDRL / 2); i++) { 468 GMAC_WRITE_2(dev, pnum, GM_SRC_ADDR_2L + i * 4, 469 ((uint16_t)ea[i * 2] | ((uint16_t)ea[(i * 2) + 1] << 8))); 470 } 471 472 /* Figure out receive filtering mode. */ 473 mode = GMAC_READ_2(dev, pnum, GM_RX_CTRL); 474 if (port->p_promisc) { 475 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 476 } else { 477 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 478 } 479 /* Write the multicast filter. */ 480 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H1, mchash[0] & 0xffff); 481 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H2, (mchash[0] >> 16) & 0xffff); 482 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H3, mchash[1] & 0xffff); 483 GMAC_WRITE_2(dev, pnum, GM_MC_ADDR_H4, (mchash[1] >> 16) & 0xffff); 484 /* Write the receive filtering mode. */ 485 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, mode); 486 } 487 488 static void 489 yge_init_rx_ring(yge_port_t *port) 490 { 491 yge_buf_t *rxb; 492 yge_ring_t *ring; 493 int prod; 494 495 port->p_rx_cons = 0; 496 port->p_rx_putwm = YGE_PUT_WM; 497 ring = &port->p_rx_ring; 498 499 /* ala bzero, but uses safer acch access */ 500 CLEARRING(ring); 501 502 for (prod = 0; prod < YGE_RX_RING_CNT; prod++) { 503 /* Hang out receive buffers. */ 504 rxb = &port->p_rx_buf[prod]; 505 506 PUTADDR(ring, prod, rxb->b_paddr); 507 PUTCTRL(ring, prod, port->p_framesize | OP_PACKET | HW_OWNER); 508 } 509 510 SYNCRING(ring, DDI_DMA_SYNC_FORDEV); 511 512 yge_set_prefetch(port->p_dev, port->p_rxq, ring); 513 514 /* Update prefetch unit. */ 515 CSR_WRITE_2(port->p_dev, 516 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG), 517 YGE_RX_RING_CNT - 1); 518 } 519 520 static void 521 yge_init_tx_ring(yge_port_t *port) 522 { 523 yge_ring_t *ring = &port->p_tx_ring; 524 525 port->p_tx_prod = 0; 526 port->p_tx_cons = 0; 527 port->p_tx_cnt = 0; 528 529 CLEARRING(ring); 530 SYNCRING(ring, DDI_DMA_SYNC_FORDEV); 531 532 yge_set_prefetch(port->p_dev, port->p_txq, ring); 533 } 534 535 static void 536 yge_setup_rambuffer(yge_dev_t *dev) 537 { 538 int next; 539 int i; 540 541 /* Get adapter SRAM size. */ 542 dev->d_ramsize = CSR_READ_1(dev, B2_E_0) * 4; 543 if (dev->d_ramsize == 0) 544 return; 545 546 dev->d_pflags |= PORT_FLAG_RAMBUF; 547 /* 548 * Give receiver 2/3 of memory and round down to the multiple 549 * of 1024. Tx/Rx RAM buffer size of Yukon 2 should be multiple 550 * of 1024. 551 */ 552 dev->d_rxqsize = (((dev->d_ramsize * 1024 * 2) / 3) & ~(1024 - 1)); 553 dev->d_txqsize = (dev->d_ramsize * 1024) - dev->d_rxqsize; 554 555 for (i = 0, next = 0; i < dev->d_num_port; i++) { 556 dev->d_rxqstart[i] = next; 557 dev->d_rxqend[i] = next + dev->d_rxqsize - 1; 558 next = dev->d_rxqend[i] + 1; 559 dev->d_txqstart[i] = next; 560 dev->d_txqend[i] = next + dev->d_txqsize - 1; 561 next = dev->d_txqend[i] + 1; 562 } 563 } 564 565 static void 566 yge_phy_power(yge_dev_t *dev, boolean_t powerup) 567 { 568 uint32_t val; 569 int i; 570 571 if (powerup) { 572 /* Switch power to VCC (WA for VAUX problem). */ 573 CSR_WRITE_1(dev, B0_POWER_CTRL, 574 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 575 /* Disable Core Clock Division, set Clock Select to 0. */ 576 CSR_WRITE_4(dev, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 577 578 val = 0; 579 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 580 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 581 /* Enable bits are inverted. */ 582 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 583 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 584 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 585 } 586 /* 587 * Enable PCI & Core Clock, enable clock gating for both Links. 588 */ 589 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val); 590 591 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1); 592 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 593 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 594 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 595 /* Deassert Low Power for 1st PHY. */ 596 val |= PCI_Y2_PHY1_COMA; 597 if (dev->d_num_port > 1) 598 val |= PCI_Y2_PHY2_COMA; 599 } 600 601 /* Release PHY from PowerDown/COMA mode. */ 602 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val); 603 604 switch (dev->d_hw_id) { 605 case CHIP_ID_YUKON_EC_U: 606 case CHIP_ID_YUKON_EX: 607 case CHIP_ID_YUKON_FE_P: { 608 uint32_t our; 609 610 CSR_WRITE_2(dev, B0_CTST, Y2_HW_WOL_OFF); 611 612 /* Enable all clocks. */ 613 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0); 614 615 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_4); 616 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN| 617 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST); 618 /* Set all bits to 0 except bits 15..12. */ 619 pci_config_put32(dev->d_pcih, PCI_OUR_REG_4, our); 620 621 /* Set to default value. */ 622 our = pci_config_get32(dev->d_pcih, PCI_OUR_REG_5); 623 our &= P_CTL_TIM_VMAIN_AV_MSK; 624 pci_config_put32(dev->d_pcih, PCI_OUR_REG_5, our); 625 626 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, 0); 627 628 /* 629 * Enable workaround for dev 4.107 on Yukon-Ultra 630 * and Extreme 631 */ 632 our = CSR_READ_4(dev, B2_GP_IO); 633 our |= GLB_GPIO_STAT_RACE_DIS; 634 CSR_WRITE_4(dev, B2_GP_IO, our); 635 636 (void) CSR_READ_4(dev, B2_GP_IO); 637 break; 638 } 639 default: 640 break; 641 } 642 643 for (i = 0; i < dev->d_num_port; i++) { 644 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL), 645 GMLC_RST_SET); 646 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_LINK_CTRL), 647 GMLC_RST_CLR); 648 } 649 } else { 650 val = pci_config_get32(dev->d_pcih, PCI_OUR_REG_1); 651 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 652 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 653 val &= ~PCI_Y2_PHY1_COMA; 654 if (dev->d_num_port > 1) 655 val &= ~PCI_Y2_PHY2_COMA; 656 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 657 } else { 658 val |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 659 } 660 pci_config_put32(dev->d_pcih, PCI_OUR_REG_1, val); 661 662 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 663 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 664 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 665 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 666 dev->d_hw_rev > CHIP_REV_YU_XL_A1) { 667 /* Enable bits are inverted. */ 668 val = 0; 669 } 670 /* 671 * Disable PCI & Core Clock, disable clock gating for 672 * both Links. 673 */ 674 CSR_WRITE_1(dev, B2_Y2_CLK_GATE, val); 675 CSR_WRITE_1(dev, B0_POWER_CTRL, 676 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 677 } 678 } 679 680 static void 681 yge_reset(yge_dev_t *dev) 682 { 683 uint64_t addr; 684 uint16_t status; 685 uint32_t val; 686 int i; 687 ddi_acc_handle_t pcih = dev->d_pcih; 688 689 /* Turn off ASF */ 690 if (dev->d_hw_id == CHIP_ID_YUKON_EX) { 691 status = CSR_READ_2(dev, B28_Y2_ASF_STAT_CMD); 692 /* Clear AHB bridge & microcontroller reset */ 693 status &= ~Y2_ASF_CPU_MODE; 694 status &= ~Y2_ASF_AHB_RST; 695 /* Clear ASF microcontroller state */ 696 status &= ~Y2_ASF_STAT_MSK; 697 CSR_WRITE_2(dev, B28_Y2_ASF_STAT_CMD, status); 698 } else { 699 CSR_WRITE_1(dev, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 700 } 701 CSR_WRITE_2(dev, B0_CTST, Y2_ASF_DISABLE); 702 703 /* 704 * Since we disabled ASF, S/W reset is required for Power Management. 705 */ 706 CSR_WRITE_1(dev, B0_CTST, CS_RST_SET); 707 CSR_WRITE_1(dev, B0_CTST, CS_RST_CLR); 708 709 /* Allow writes to PCI config space */ 710 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 711 712 /* Clear all error bits in the PCI status register. */ 713 status = pci_config_get16(pcih, PCI_CONF_STAT); 714 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 715 716 status |= (PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB | 717 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR); 718 pci_config_put16(pcih, PCI_CONF_STAT, status); 719 720 CSR_WRITE_1(dev, B0_CTST, CS_MRST_CLR); 721 722 switch (dev->d_bustype) { 723 case PEX_BUS: 724 /* Clear all PEX errors. */ 725 CSR_PCI_WRITE_4(dev, Y2_CFG_AER + AER_UNCOR_ERR, 0xffffffff); 726 727 /* is error bit status stuck? */ 728 val = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT); 729 if ((val & PEX_RX_OV) != 0) { 730 dev->d_intrmask &= ~Y2_IS_HW_ERR; 731 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP; 732 } 733 break; 734 case PCI_BUS: 735 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */ 736 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0) 737 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2); 738 break; 739 case PCIX_BUS: 740 /* Set Cache Line Size to 2 (8 bytes) if configured to 0. */ 741 if (pci_config_get8(pcih, PCI_CONF_CACHE_LINESZ) == 0) 742 pci_config_put16(pcih, PCI_CONF_CACHE_LINESZ, 2); 743 744 /* Set Cache Line Size opt. */ 745 val = pci_config_get32(pcih, PCI_OUR_REG_1); 746 val |= PCI_CLS_OPT; 747 pci_config_put32(pcih, PCI_OUR_REG_1, val); 748 break; 749 } 750 751 /* Set PHY power state. */ 752 yge_phy_power(dev, B_TRUE); 753 754 /* Reset GPHY/GMAC Control */ 755 for (i = 0; i < dev->d_num_port; i++) { 756 /* GPHY Control reset. */ 757 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 758 CSR_WRITE_4(dev, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 759 /* GMAC Control reset. */ 760 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 761 CSR_WRITE_4(dev, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 762 if (dev->d_hw_id == CHIP_ID_YUKON_EX || 763 dev->d_hw_id == CHIP_ID_YUKON_SUPR) { 764 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL), 765 (GMC_BYP_RETR_ON | GMC_BYP_MACSECRX_ON | 766 GMC_BYP_MACSECTX_ON)); 767 } 768 CSR_WRITE_2(dev, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 769 770 } 771 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 772 773 /* LED On. */ 774 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_ON); 775 776 /* Clear TWSI IRQ. */ 777 CSR_WRITE_4(dev, B2_I2C_IRQ, I2C_CLR_IRQ); 778 779 /* Turn off hardware timer. */ 780 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_STOP); 781 CSR_WRITE_1(dev, B2_TI_CTRL, TIM_CLR_IRQ); 782 783 /* Turn off descriptor polling. */ 784 CSR_WRITE_1(dev, B28_DPT_CTRL, DPT_STOP); 785 786 /* Turn off time stamps. */ 787 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_STOP); 788 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 789 790 /* Don't permit config space writing */ 791 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 792 793 /* enable TX Arbiters */ 794 for (i = 0; i < dev->d_num_port; i++) 795 CSR_WRITE_1(dev, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB); 796 797 /* Configure timeout values. */ 798 for (i = 0; i < dev->d_num_port; i++) { 799 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR); 800 801 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), RI_TO_53); 802 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), RI_TO_53); 803 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), RI_TO_53); 804 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), RI_TO_53); 805 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), RI_TO_53); 806 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), RI_TO_53); 807 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), RI_TO_53); 808 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), RI_TO_53); 809 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), RI_TO_53); 810 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), RI_TO_53); 811 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), RI_TO_53); 812 CSR_WRITE_1(dev, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), RI_TO_53); 813 } 814 815 /* Disable all interrupts. */ 816 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 817 (void) CSR_READ_4(dev, B0_HWE_IMSK); 818 CSR_WRITE_4(dev, B0_IMSK, 0); 819 (void) CSR_READ_4(dev, B0_IMSK); 820 821 /* 822 * On dual port PCI-X card, there is an problem where status 823 * can be received out of order due to split transactions. 824 */ 825 if (dev->d_bustype == PCIX_BUS && dev->d_num_port > 1) { 826 int pcix; 827 uint16_t pcix_cmd; 828 829 if ((pcix = yge_find_capability(dev, PCI_CAP_ID_PCIX)) != 0) { 830 pcix_cmd = pci_config_get16(pcih, pcix + 2); 831 /* Clear Max Outstanding Split Transactions. */ 832 pcix_cmd &= ~0x70; 833 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 834 pci_config_put16(pcih, pcix + 2, pcix_cmd); 835 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 836 } 837 } 838 if (dev->d_bustype == PEX_BUS) { 839 uint16_t v, width; 840 841 v = pci_config_get16(pcih, PEX_DEV_CTRL); 842 /* Change Max. Read Request Size to 4096 bytes. */ 843 v &= ~PEX_DC_MAX_RRS_MSK; 844 v |= PEX_DC_MAX_RD_RQ_SIZE(5); 845 pci_config_put16(pcih, PEX_DEV_CTRL, v); 846 width = pci_config_get16(pcih, PEX_LNK_STAT); 847 width = (width & PEX_LS_LINK_WI_MSK) >> 4; 848 v = pci_config_get16(pcih, PEX_LNK_CAP); 849 v = (v & PEX_LS_LINK_WI_MSK) >> 4; 850 if (v != width) 851 yge_error(dev, NULL, 852 "Negotiated width of PCIe link(x%d) != " 853 "max. width of link(x%d)\n", width, v); 854 } 855 856 /* Clear status list. */ 857 CLEARRING(&dev->d_status_ring); 858 SYNCRING(&dev->d_status_ring, DDI_DMA_SYNC_FORDEV); 859 860 dev->d_stat_cons = 0; 861 862 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_SET); 863 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_RST_CLR); 864 865 /* Set the status list base address. */ 866 addr = dev->d_status_ring.r_paddr; 867 CSR_WRITE_4(dev, STAT_LIST_ADDR_LO, YGE_ADDR_LO(addr)); 868 CSR_WRITE_4(dev, STAT_LIST_ADDR_HI, YGE_ADDR_HI(addr)); 869 870 /* Set the status list last index. */ 871 CSR_WRITE_2(dev, STAT_LAST_IDX, YGE_STAT_RING_CNT - 1); 872 CSR_WRITE_2(dev, STAT_PUT_IDX, 0); 873 874 if (dev->d_hw_id == CHIP_ID_YUKON_EC && 875 dev->d_hw_rev == CHIP_REV_YU_EC_A1) { 876 /* WA for dev. #4.3 */ 877 CSR_WRITE_2(dev, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 878 /* WA for dev #4.18 */ 879 CSR_WRITE_1(dev, STAT_FIFO_WM, 0x21); 880 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 7); 881 } else { 882 CSR_WRITE_2(dev, STAT_TX_IDX_TH, 10); 883 CSR_WRITE_1(dev, STAT_FIFO_WM, 16); 884 885 /* ISR status FIFO watermark */ 886 if (dev->d_hw_id == CHIP_ID_YUKON_XL && 887 dev->d_hw_rev == CHIP_REV_YU_XL_A0) 888 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 4); 889 else 890 CSR_WRITE_1(dev, STAT_FIFO_ISR_WM, 16); 891 892 CSR_WRITE_4(dev, STAT_ISR_TIMER_INI, 0x0190); 893 } 894 895 /* 896 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 897 */ 898 CSR_WRITE_4(dev, STAT_TX_TIMER_INI, YGE_USECS(dev, 1000)); 899 900 /* Enable status unit. */ 901 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_OP_ON); 902 903 CSR_WRITE_1(dev, STAT_TX_TIMER_CTRL, TIM_START); 904 CSR_WRITE_1(dev, STAT_LEV_TIMER_CTRL, TIM_START); 905 CSR_WRITE_1(dev, STAT_ISR_TIMER_CTRL, TIM_START); 906 } 907 908 static int 909 yge_init_port(yge_port_t *port) 910 { 911 yge_dev_t *dev = port->p_dev; 912 int i; 913 mac_register_t *macp; 914 915 port->p_flags = dev->d_pflags; 916 port->p_ppa = ddi_get_instance(dev->d_dip) + (port->p_port * 100); 917 918 port->p_tx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_TX_RING_CNT, 919 KM_SLEEP); 920 port->p_rx_buf = kmem_zalloc(sizeof (yge_buf_t) * YGE_RX_RING_CNT, 921 KM_SLEEP); 922 923 /* Setup Tx/Rx queue register offsets. */ 924 if (port->p_port == YGE_PORT_A) { 925 port->p_txq = Q_XA1; 926 port->p_txsq = Q_XS1; 927 port->p_rxq = Q_R1; 928 } else { 929 port->p_txq = Q_XA2; 930 port->p_txsq = Q_XS2; 931 port->p_rxq = Q_R2; 932 } 933 934 /* Disable jumbo frame for Yukon FE. */ 935 if (dev->d_hw_id == CHIP_ID_YUKON_FE) 936 port->p_flags |= PORT_FLAG_NOJUMBO; 937 938 /* 939 * Start out assuming a regular MTU. Users can change this 940 * with dladm. The dladm daemon is supposed to issue commands 941 * to change the default MTU using m_setprop during early boot 942 * (before the interface is plumbed) if the user has so 943 * requested. 944 */ 945 port->p_mtu = ETHERMTU; 946 947 port->p_mii = mii_alloc(port, dev->d_dip, &yge_mii_ops); 948 if (port->p_mii == NULL) { 949 yge_error(NULL, port, "MII handle allocation failed"); 950 return (DDI_FAILURE); 951 } 952 /* We assume all parts support asymmetric pause */ 953 mii_set_pauseable(port->p_mii, B_TRUE, B_TRUE); 954 955 /* 956 * Get station address for this interface. Note that 957 * dual port cards actually come with three station 958 * addresses: one for each port, plus an extra. The 959 * extra one is used by the SysKonnect driver software 960 * as a 'virtual' station address for when both ports 961 * are operating in failover mode. Currently we don't 962 * use this extra address. 963 */ 964 for (i = 0; i < ETHERADDRL; i++) { 965 port->p_curraddr[i] = 966 CSR_READ_1(dev, B2_MAC_1 + (port->p_port * 8) + i); 967 } 968 969 /* Register with Nemo. */ 970 if ((macp = mac_alloc(MAC_VERSION)) == NULL) { 971 yge_error(NULL, port, "MAC handle allocation failed"); 972 return (DDI_FAILURE); 973 } 974 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 975 macp->m_driver = port; 976 macp->m_dip = dev->d_dip; 977 macp->m_src_addr = port->p_curraddr; 978 macp->m_callbacks = &yge_m_callbacks; 979 macp->m_min_sdu = 0; 980 macp->m_max_sdu = port->p_mtu; 981 macp->m_instance = port->p_ppa; 982 macp->m_margin = VLAN_TAGSZ; 983 984 port->p_mreg = macp; 985 986 return (DDI_SUCCESS); 987 } 988 989 static int 990 yge_add_intr(yge_dev_t *dev, int intr_type) 991 { 992 dev_info_t *dip; 993 int count; 994 int actual; 995 int rv; 996 int i, j; 997 998 dip = dev->d_dip; 999 1000 rv = ddi_intr_get_nintrs(dip, intr_type, &count); 1001 if ((rv != DDI_SUCCESS) || (count == 0)) { 1002 yge_error(dev, NULL, 1003 "ddi_intr_get_nintrs failed, rv %d, count %d", rv, count); 1004 return (DDI_FAILURE); 1005 } 1006 1007 /* 1008 * Allocate the interrupt. Note that we only bother with a single 1009 * interrupt. One could argue that for MSI devices with dual ports, 1010 * it would be nice to have a separate interrupt per port. But right 1011 * now I don't know how to configure that, so we'll just settle for 1012 * a single interrupt. 1013 */ 1014 dev->d_intrcnt = 1; 1015 1016 dev->d_intrsize = count * sizeof (ddi_intr_handle_t); 1017 dev->d_intrh = kmem_zalloc(dev->d_intrsize, KM_SLEEP); 1018 if (dev->d_intrh == NULL) { 1019 yge_error(dev, NULL, "Unable to allocate interrupt handle"); 1020 return (DDI_FAILURE); 1021 } 1022 1023 rv = ddi_intr_alloc(dip, dev->d_intrh, intr_type, 0, dev->d_intrcnt, 1024 &actual, DDI_INTR_ALLOC_STRICT); 1025 if ((rv != DDI_SUCCESS) || (actual == 0)) { 1026 yge_error(dev, NULL, 1027 "Unable to allocate interrupt, %d, count %d", 1028 rv, actual); 1029 kmem_free(dev->d_intrh, dev->d_intrsize); 1030 return (DDI_FAILURE); 1031 } 1032 1033 if ((rv = ddi_intr_get_pri(dev->d_intrh[0], &dev->d_intrpri)) != 1034 DDI_SUCCESS) { 1035 for (i = 0; i < dev->d_intrcnt; i++) 1036 (void) ddi_intr_free(dev->d_intrh[i]); 1037 yge_error(dev, NULL, 1038 "Unable to get interrupt priority, %d", rv); 1039 kmem_free(dev->d_intrh, dev->d_intrsize); 1040 return (DDI_FAILURE); 1041 } 1042 1043 if ((rv = ddi_intr_get_cap(dev->d_intrh[0], &dev->d_intrcap)) != 1044 DDI_SUCCESS) { 1045 yge_error(dev, NULL, 1046 "Unable to get interrupt capabilities, %d", rv); 1047 for (i = 0; i < dev->d_intrcnt; i++) 1048 (void) ddi_intr_free(dev->d_intrh[i]); 1049 kmem_free(dev->d_intrh, dev->d_intrsize); 1050 return (DDI_FAILURE); 1051 } 1052 1053 /* register interrupt handler to kernel */ 1054 for (i = 0; i < dev->d_intrcnt; i++) { 1055 if ((rv = ddi_intr_add_handler(dev->d_intrh[i], yge_intr, 1056 dev, NULL)) != DDI_SUCCESS) { 1057 yge_error(dev, NULL, 1058 "Unable to add interrupt handler, %d", rv); 1059 for (j = 0; j < i; j++) 1060 (void) ddi_intr_remove_handler(dev->d_intrh[j]); 1061 for (i = 0; i < dev->d_intrcnt; i++) 1062 (void) ddi_intr_free(dev->d_intrh[i]); 1063 kmem_free(dev->d_intrh, dev->d_intrsize); 1064 return (DDI_FAILURE); 1065 } 1066 } 1067 1068 mutex_init(&dev->d_rxlock, NULL, MUTEX_DRIVER, 1069 DDI_INTR_PRI(dev->d_intrpri)); 1070 mutex_init(&dev->d_txlock, NULL, MUTEX_DRIVER, 1071 DDI_INTR_PRI(dev->d_intrpri)); 1072 mutex_init(&dev->d_phylock, NULL, MUTEX_DRIVER, 1073 DDI_INTR_PRI(dev->d_intrpri)); 1074 mutex_init(&dev->d_task_mtx, NULL, MUTEX_DRIVER, 1075 DDI_INTR_PRI(dev->d_intrpri)); 1076 1077 return (DDI_SUCCESS); 1078 } 1079 1080 static int 1081 yge_attach_intr(yge_dev_t *dev) 1082 { 1083 dev_info_t *dip = dev->d_dip; 1084 int intr_types; 1085 int rv; 1086 1087 /* Allocate IRQ resources. */ 1088 rv = ddi_intr_get_supported_types(dip, &intr_types); 1089 if (rv != DDI_SUCCESS) { 1090 yge_error(dev, NULL, 1091 "Unable to determine supported interrupt types, %d", rv); 1092 return (DDI_FAILURE); 1093 } 1094 1095 /* 1096 * We default to not supporting MSI. We've found some device 1097 * and motherboard combinations don't always work well with 1098 * MSI interrupts. Users may override this if they choose. 1099 */ 1100 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "msi_enable", 0) == 0) { 1101 /* If msi disable property present, disable both msix/msi. */ 1102 if (intr_types & DDI_INTR_TYPE_FIXED) { 1103 intr_types &= ~(DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX); 1104 } 1105 } 1106 1107 if (intr_types & DDI_INTR_TYPE_MSIX) { 1108 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSIX)) == 1109 DDI_SUCCESS) 1110 return (DDI_SUCCESS); 1111 } 1112 1113 if (intr_types & DDI_INTR_TYPE_MSI) { 1114 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_MSI)) == 1115 DDI_SUCCESS) 1116 return (DDI_SUCCESS); 1117 } 1118 1119 if (intr_types & DDI_INTR_TYPE_FIXED) { 1120 if ((rv = yge_add_intr(dev, DDI_INTR_TYPE_FIXED)) == 1121 DDI_SUCCESS) 1122 return (DDI_SUCCESS); 1123 } 1124 1125 yge_error(dev, NULL, "Unable to configure any interrupts"); 1126 return (DDI_FAILURE); 1127 } 1128 1129 static void 1130 yge_intr_enable(yge_dev_t *dev) 1131 { 1132 int i; 1133 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) { 1134 /* Call ddi_intr_block_enable() for MSI interrupts */ 1135 (void) ddi_intr_block_enable(dev->d_intrh, dev->d_intrcnt); 1136 } else { 1137 /* Call ddi_intr_enable for FIXED interrupts */ 1138 for (i = 0; i < dev->d_intrcnt; i++) 1139 (void) ddi_intr_enable(dev->d_intrh[i]); 1140 } 1141 } 1142 1143 void 1144 yge_intr_disable(yge_dev_t *dev) 1145 { 1146 int i; 1147 1148 if (dev->d_intrcap & DDI_INTR_FLAG_BLOCK) { 1149 (void) ddi_intr_block_disable(dev->d_intrh, dev->d_intrcnt); 1150 } else { 1151 for (i = 0; i < dev->d_intrcnt; i++) 1152 (void) ddi_intr_disable(dev->d_intrh[i]); 1153 } 1154 } 1155 1156 static uint8_t 1157 yge_find_capability(yge_dev_t *dev, uint8_t cap) 1158 { 1159 uint8_t ptr; 1160 uint16_t capit; 1161 ddi_acc_handle_t pcih = dev->d_pcih; 1162 1163 if ((pci_config_get16(pcih, PCI_CONF_STAT) & PCI_STAT_CAP) == 0) { 1164 return (0); 1165 } 1166 /* This assumes PCI, and not CardBus. */ 1167 ptr = pci_config_get8(pcih, PCI_CONF_CAP_PTR); 1168 while (ptr != 0) { 1169 capit = pci_config_get8(pcih, ptr + PCI_CAP_ID); 1170 if (capit == cap) { 1171 return (ptr); 1172 } 1173 ptr = pci_config_get8(pcih, ptr + PCI_CAP_NEXT_PTR); 1174 } 1175 return (0); 1176 } 1177 1178 static int 1179 yge_attach(yge_dev_t *dev) 1180 { 1181 dev_info_t *dip = dev->d_dip; 1182 int rv; 1183 int nattached; 1184 uint8_t pm_cap; 1185 1186 if (pci_config_setup(dip, &dev->d_pcih) != DDI_SUCCESS) { 1187 yge_error(dev, NULL, "Unable to map PCI configuration space"); 1188 goto fail; 1189 } 1190 1191 /* 1192 * Map control/status registers. 1193 */ 1194 1195 /* ensure the pmcsr status is D0 state */ 1196 pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM); 1197 if (pm_cap != 0) { 1198 uint16_t pmcsr; 1199 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR); 1200 pmcsr &= ~PCI_PMCSR_STATE_MASK; 1201 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR, 1202 pmcsr | PCI_PMCSR_D0); 1203 } 1204 1205 /* Enable PCI access and bus master. */ 1206 pci_config_put16(dev->d_pcih, PCI_CONF_COMM, 1207 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) | 1208 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME); 1209 1210 1211 /* Allocate I/O resource */ 1212 rv = ddi_regs_map_setup(dip, 1, &dev->d_regs, 0, 0, &yge_regs_attr, 1213 &dev->d_regsh); 1214 if (rv != DDI_SUCCESS) { 1215 yge_error(dev, NULL, "Unable to map device registers"); 1216 goto fail; 1217 } 1218 1219 1220 /* Enable all clocks. */ 1221 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1222 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0); 1223 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1224 1225 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR); 1226 dev->d_hw_id = CSR_READ_1(dev, B2_CHIP_ID); 1227 dev->d_hw_rev = (CSR_READ_1(dev, B2_MAC_CFG) >> 4) & 0x0f; 1228 1229 1230 /* 1231 * Bail out if chip is not recognized. Note that we only enforce 1232 * this in production builds. The Ultra-2 (88e8057) has a problem 1233 * right now where TX works fine, but RX seems not to. So we've 1234 * disabled that for now. 1235 */ 1236 if (dev->d_hw_id < CHIP_ID_YUKON_XL || 1237 dev->d_hw_id >= CHIP_ID_YUKON_UL_2) { 1238 yge_error(dev, NULL, "Unknown device: id=0x%02x, rev=0x%02x", 1239 dev->d_hw_id, dev->d_hw_rev); 1240 #ifndef DEBUG 1241 goto fail; 1242 #endif 1243 } 1244 1245 /* Soft reset. */ 1246 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 1247 CSR_WRITE_2(dev, B0_CTST, CS_RST_CLR); 1248 dev->d_pmd = CSR_READ_1(dev, B2_PMD_TYP); 1249 if (dev->d_pmd == 'L' || dev->d_pmd == 'S' || dev->d_pmd == 'P') 1250 dev->d_coppertype = 0; 1251 else 1252 dev->d_coppertype = 1; 1253 /* Check number of MACs. */ 1254 dev->d_num_port = 1; 1255 if ((CSR_READ_1(dev, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1256 CFG_DUAL_MAC_MSK) { 1257 if (!(CSR_READ_1(dev, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1258 dev->d_num_port++; 1259 } 1260 1261 /* Check bus type. */ 1262 if (yge_find_capability(dev, PCI_CAP_ID_PCI_E) != 0) { 1263 dev->d_bustype = PEX_BUS; 1264 } else if (yge_find_capability(dev, PCI_CAP_ID_PCIX) != 0) { 1265 dev->d_bustype = PCIX_BUS; 1266 } else { 1267 dev->d_bustype = PCI_BUS; 1268 } 1269 1270 switch (dev->d_hw_id) { 1271 case CHIP_ID_YUKON_EC: 1272 dev->d_clock = 125; /* 125 Mhz */ 1273 break; 1274 case CHIP_ID_YUKON_UL_2: 1275 dev->d_clock = 125; /* 125 Mhz */ 1276 break; 1277 case CHIP_ID_YUKON_SUPR: 1278 dev->d_clock = 125; /* 125 Mhz */ 1279 break; 1280 case CHIP_ID_YUKON_EC_U: 1281 dev->d_clock = 125; /* 125 Mhz */ 1282 break; 1283 case CHIP_ID_YUKON_EX: 1284 dev->d_clock = 125; /* 125 Mhz */ 1285 break; 1286 case CHIP_ID_YUKON_FE: 1287 dev->d_clock = 100; /* 100 Mhz */ 1288 break; 1289 case CHIP_ID_YUKON_FE_P: 1290 dev->d_clock = 50; /* 50 Mhz */ 1291 break; 1292 case CHIP_ID_YUKON_XL: 1293 dev->d_clock = 156; /* 156 Mhz */ 1294 break; 1295 default: 1296 dev->d_clock = 156; /* 156 Mhz */ 1297 break; 1298 } 1299 1300 dev->d_process_limit = YGE_RX_RING_CNT/2; 1301 1302 rv = yge_alloc_ring(NULL, dev, &dev->d_status_ring, YGE_STAT_RING_CNT); 1303 if (rv != DDI_SUCCESS) 1304 goto fail; 1305 1306 /* Setup event taskq. */ 1307 dev->d_task_q = ddi_taskq_create(dip, "tq", 1, TASKQ_DEFAULTPRI, 0); 1308 if (dev->d_task_q == NULL) { 1309 yge_error(dev, NULL, "failed to create taskq"); 1310 goto fail; 1311 } 1312 1313 /* Init the condition variable */ 1314 cv_init(&dev->d_task_cv, NULL, CV_DRIVER, NULL); 1315 1316 /* Allocate IRQ resources. */ 1317 if ((rv = yge_attach_intr(dev)) != DDI_SUCCESS) { 1318 goto fail; 1319 } 1320 1321 /* Set base interrupt mask. */ 1322 dev->d_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1323 dev->d_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1324 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1325 1326 /* Reset the adapter. */ 1327 yge_reset(dev); 1328 1329 yge_setup_rambuffer(dev); 1330 1331 nattached = 0; 1332 for (int i = 0; i < dev->d_num_port; i++) { 1333 yge_port_t *port = dev->d_port[i]; 1334 if (yge_init_port(port) != DDI_SUCCESS) { 1335 goto fail; 1336 } 1337 } 1338 1339 yge_intr_enable(dev); 1340 1341 /* set up the periodic to run once per second */ 1342 dev->d_periodic = ddi_periodic_add(yge_tick, dev, 1000000000, 0); 1343 1344 for (int i = 0; i < dev->d_num_port; i++) { 1345 yge_port_t *port = dev->d_port[i]; 1346 if (yge_register_port(port) == DDI_SUCCESS) { 1347 nattached++; 1348 } 1349 } 1350 1351 if (nattached == 0) { 1352 goto fail; 1353 } 1354 1355 /* Dispatch the taskq */ 1356 if (ddi_taskq_dispatch(dev->d_task_q, yge_task, dev, DDI_SLEEP) != 1357 DDI_SUCCESS) { 1358 yge_error(dev, NULL, "failed to start taskq"); 1359 goto fail; 1360 } 1361 1362 ddi_report_dev(dip); 1363 1364 return (DDI_SUCCESS); 1365 1366 fail: 1367 yge_detach(dev); 1368 return (DDI_FAILURE); 1369 } 1370 1371 static int 1372 yge_register_port(yge_port_t *port) 1373 { 1374 if (mac_register(port->p_mreg, &port->p_mh) != DDI_SUCCESS) { 1375 yge_error(NULL, port, "MAC registration failed"); 1376 return (DDI_FAILURE); 1377 } 1378 1379 return (DDI_SUCCESS); 1380 } 1381 1382 static int 1383 yge_unregister_port(yge_port_t *port) 1384 { 1385 if ((port->p_mh) && (mac_unregister(port->p_mh) != 0)) { 1386 return (DDI_FAILURE); 1387 } 1388 port->p_mh = NULL; 1389 return (DDI_SUCCESS); 1390 } 1391 1392 /* 1393 * Free up port specific resources. This is called only when the 1394 * port is not registered (and hence not running). 1395 */ 1396 static void 1397 yge_uninit_port(yge_port_t *port) 1398 { 1399 ASSERT(!port->p_running); 1400 1401 if (port->p_mreg) 1402 mac_free(port->p_mreg); 1403 1404 if (port->p_mii) 1405 mii_free(port->p_mii); 1406 1407 yge_txrx_dma_free(port); 1408 1409 if (port->p_tx_buf) 1410 kmem_free(port->p_tx_buf, 1411 sizeof (yge_buf_t) * YGE_TX_RING_CNT); 1412 if (port->p_rx_buf) 1413 kmem_free(port->p_rx_buf, 1414 sizeof (yge_buf_t) * YGE_RX_RING_CNT); 1415 } 1416 1417 static void 1418 yge_detach(yge_dev_t *dev) 1419 { 1420 /* 1421 * Turn off the periodic. 1422 */ 1423 if (dev->d_periodic) 1424 ddi_periodic_delete(dev->d_periodic); 1425 1426 for (int i = 0; i < dev->d_num_port; i++) { 1427 yge_uninit_port(dev->d_port[i]); 1428 } 1429 1430 /* 1431 * Make sure all interrupts are disabled. 1432 */ 1433 CSR_WRITE_4(dev, B0_IMSK, 0); 1434 (void) CSR_READ_4(dev, B0_IMSK); 1435 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 1436 (void) CSR_READ_4(dev, B0_HWE_IMSK); 1437 1438 /* LED Off. */ 1439 CSR_WRITE_2(dev, B0_CTST, Y2_LED_STAT_OFF); 1440 1441 /* Put hardware reset. */ 1442 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 1443 1444 yge_free_ring(&dev->d_status_ring); 1445 1446 if (dev->d_task_q != NULL) { 1447 yge_dispatch(dev, YGE_TASK_EXIT); 1448 ddi_taskq_destroy(dev->d_task_q); 1449 dev->d_task_q = NULL; 1450 } 1451 1452 cv_destroy(&dev->d_task_cv); 1453 1454 yge_intr_disable(dev); 1455 1456 if (dev->d_intrh != NULL) { 1457 for (int i = 0; i < dev->d_intrcnt; i++) { 1458 (void) ddi_intr_remove_handler(dev->d_intrh[i]); 1459 (void) ddi_intr_free(dev->d_intrh[i]); 1460 } 1461 kmem_free(dev->d_intrh, dev->d_intrsize); 1462 mutex_destroy(&dev->d_phylock); 1463 mutex_destroy(&dev->d_txlock); 1464 mutex_destroy(&dev->d_rxlock); 1465 mutex_destroy(&dev->d_task_mtx); 1466 } 1467 if (dev->d_regsh != NULL) 1468 ddi_regs_map_free(&dev->d_regsh); 1469 1470 if (dev->d_pcih != NULL) 1471 pci_config_teardown(&dev->d_pcih); 1472 } 1473 1474 static int 1475 yge_alloc_ring(yge_port_t *port, yge_dev_t *dev, yge_ring_t *ring, uint32_t num) 1476 { 1477 dev_info_t *dip; 1478 caddr_t kaddr; 1479 size_t len; 1480 int rv; 1481 ddi_dma_cookie_t dmac; 1482 unsigned ndmac; 1483 1484 if (port && !dev) 1485 dev = port->p_dev; 1486 dip = dev->d_dip; 1487 1488 ring->r_num = num; 1489 1490 rv = ddi_dma_alloc_handle(dip, &yge_ring_dma_attr, DDI_DMA_DONTWAIT, 1491 NULL, &ring->r_dmah); 1492 if (rv != DDI_SUCCESS) { 1493 yge_error(dev, port, "Unable to allocate ring DMA handle"); 1494 return (DDI_FAILURE); 1495 } 1496 1497 rv = ddi_dma_mem_alloc(ring->r_dmah, num * sizeof (yge_desc_t), 1498 &yge_ring_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 1499 &kaddr, &len, &ring->r_acch); 1500 if (rv != DDI_SUCCESS) { 1501 yge_error(dev, port, "Unable to allocate ring DMA memory"); 1502 return (DDI_FAILURE); 1503 } 1504 ring->r_size = len; 1505 ring->r_kaddr = (void *)kaddr; 1506 1507 bzero(kaddr, len); 1508 1509 rv = ddi_dma_addr_bind_handle(ring->r_dmah, NULL, kaddr, 1510 len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 1511 &dmac, &ndmac); 1512 if (rv != DDI_DMA_MAPPED) { 1513 yge_error(dev, port, "Unable to bind ring DMA handle"); 1514 return (DDI_FAILURE); 1515 } 1516 ASSERT(ndmac == 1); 1517 ring->r_paddr = dmac.dmac_address; 1518 1519 return (DDI_SUCCESS); 1520 } 1521 1522 static void 1523 yge_free_ring(yge_ring_t *ring) 1524 { 1525 if (ring->r_paddr) 1526 (void) ddi_dma_unbind_handle(ring->r_dmah); 1527 ring->r_paddr = 0; 1528 if (ring->r_acch) 1529 ddi_dma_mem_free(&ring->r_acch); 1530 ring->r_kaddr = NULL; 1531 ring->r_acch = NULL; 1532 if (ring->r_dmah) 1533 ddi_dma_free_handle(&ring->r_dmah); 1534 ring->r_dmah = NULL; 1535 } 1536 1537 static int 1538 yge_alloc_buf(yge_port_t *port, yge_buf_t *b, size_t bufsz, int flag) 1539 { 1540 yge_dev_t *dev = port->p_dev; 1541 size_t l; 1542 int sflag; 1543 int rv; 1544 ddi_dma_cookie_t dmac; 1545 unsigned ndmac; 1546 1547 sflag = flag & (DDI_DMA_STREAMING | DDI_DMA_CONSISTENT); 1548 1549 /* Now allocate Tx buffers. */ 1550 rv = ddi_dma_alloc_handle(dev->d_dip, &yge_buf_dma_attr, 1551 DDI_DMA_DONTWAIT, NULL, &b->b_dmah); 1552 if (rv != DDI_SUCCESS) { 1553 yge_error(NULL, port, "Unable to alloc DMA handle for buffer"); 1554 return (DDI_FAILURE); 1555 } 1556 1557 rv = ddi_dma_mem_alloc(b->b_dmah, bufsz, &yge_buf_attr, 1558 sflag, DDI_DMA_DONTWAIT, NULL, &b->b_buf, &l, &b->b_acch); 1559 if (rv != DDI_SUCCESS) { 1560 yge_error(NULL, port, "Unable to alloc DMA memory for buffer"); 1561 return (DDI_FAILURE); 1562 } 1563 1564 rv = ddi_dma_addr_bind_handle(b->b_dmah, NULL, b->b_buf, l, flag, 1565 DDI_DMA_DONTWAIT, NULL, &dmac, &ndmac); 1566 if (rv != DDI_DMA_MAPPED) { 1567 yge_error(NULL, port, "Unable to bind DMA handle for buffer"); 1568 return (DDI_FAILURE); 1569 } 1570 ASSERT(ndmac == 1); 1571 b->b_paddr = dmac.dmac_address; 1572 return (DDI_SUCCESS); 1573 } 1574 1575 static void 1576 yge_free_buf(yge_buf_t *b) 1577 { 1578 if (b->b_paddr) 1579 (void) ddi_dma_unbind_handle(b->b_dmah); 1580 b->b_paddr = 0; 1581 if (b->b_acch) 1582 ddi_dma_mem_free(&b->b_acch); 1583 b->b_buf = NULL; 1584 b->b_acch = NULL; 1585 if (b->b_dmah) 1586 ddi_dma_free_handle(&b->b_dmah); 1587 b->b_dmah = NULL; 1588 } 1589 1590 static int 1591 yge_txrx_dma_alloc(yge_port_t *port) 1592 { 1593 uint32_t bufsz; 1594 int rv; 1595 int i; 1596 yge_buf_t *b; 1597 1598 /* 1599 * It seems that Yukon II supports full 64 bit DMA operations. 1600 * But we limit it to 32 bits only for now. The 64 bit 1601 * operation would require substantially more complex 1602 * descriptor handling, since in such a case we would need two 1603 * LEs to represent a single physical address. 1604 * 1605 * If we find that this is limiting us, then we should go back 1606 * and re-examine it. 1607 */ 1608 1609 /* Note our preferred buffer size. */ 1610 bufsz = port->p_mtu; 1611 1612 /* Allocate Tx ring. */ 1613 rv = yge_alloc_ring(port, NULL, &port->p_tx_ring, YGE_TX_RING_CNT); 1614 if (rv != DDI_SUCCESS) { 1615 return (DDI_FAILURE); 1616 } 1617 1618 /* Now allocate Tx buffers. */ 1619 b = port->p_tx_buf; 1620 for (i = 0; i < YGE_TX_RING_CNT; i++) { 1621 rv = yge_alloc_buf(port, b, bufsz, 1622 DDI_DMA_STREAMING | DDI_DMA_WRITE); 1623 if (rv != DDI_SUCCESS) { 1624 return (DDI_FAILURE); 1625 } 1626 b++; 1627 } 1628 1629 /* Allocate Rx ring. */ 1630 rv = yge_alloc_ring(port, NULL, &port->p_rx_ring, YGE_RX_RING_CNT); 1631 if (rv != DDI_SUCCESS) { 1632 return (DDI_FAILURE); 1633 } 1634 1635 /* Now allocate Rx buffers. */ 1636 b = port->p_rx_buf; 1637 for (i = 0; i < YGE_RX_RING_CNT; i++) { 1638 rv = yge_alloc_buf(port, b, bufsz, 1639 DDI_DMA_STREAMING | DDI_DMA_READ); 1640 if (rv != DDI_SUCCESS) { 1641 return (DDI_FAILURE); 1642 } 1643 b++; 1644 } 1645 1646 return (DDI_SUCCESS); 1647 } 1648 1649 static void 1650 yge_txrx_dma_free(yge_port_t *port) 1651 { 1652 yge_buf_t *b; 1653 1654 /* Tx ring. */ 1655 yge_free_ring(&port->p_tx_ring); 1656 1657 /* Rx ring. */ 1658 yge_free_ring(&port->p_rx_ring); 1659 1660 /* Tx buffers. */ 1661 b = port->p_tx_buf; 1662 for (int i = 0; i < YGE_TX_RING_CNT; i++, b++) { 1663 yge_free_buf(b); 1664 } 1665 /* Rx buffers. */ 1666 b = port->p_rx_buf; 1667 for (int i = 0; i < YGE_RX_RING_CNT; i++, b++) { 1668 yge_free_buf(b); 1669 } 1670 } 1671 1672 boolean_t 1673 yge_send(yge_port_t *port, mblk_t *mp) 1674 { 1675 yge_ring_t *ring = &port->p_tx_ring; 1676 yge_buf_t *txb; 1677 int16_t prod; 1678 size_t len; 1679 1680 /* 1681 * For now we're not going to support checksum offload or LSO. 1682 */ 1683 1684 len = msgsize(mp); 1685 if (len > port->p_framesize) { 1686 /* too big! */ 1687 freemsg(mp); 1688 return (B_TRUE); 1689 } 1690 1691 /* Check number of available descriptors. */ 1692 if (port->p_tx_cnt + 1 >= 1693 (YGE_TX_RING_CNT - YGE_RESERVED_TX_DESC_CNT)) { 1694 port->p_wantw = B_TRUE; 1695 return (B_FALSE); 1696 } 1697 1698 prod = port->p_tx_prod; 1699 1700 txb = &port->p_tx_buf[prod]; 1701 mcopymsg(mp, txb->b_buf); 1702 SYNCBUF(txb, DDI_DMA_SYNC_FORDEV); 1703 1704 PUTADDR(ring, prod, txb->b_paddr); 1705 PUTCTRL(ring, prod, len | OP_PACKET | HW_OWNER | EOP); 1706 SYNCENTRY(ring, prod, DDI_DMA_SYNC_FORDEV); 1707 port->p_tx_cnt++; 1708 1709 YGE_INC(prod, YGE_TX_RING_CNT); 1710 1711 /* Update producer index. */ 1712 port->p_tx_prod = prod; 1713 1714 return (B_TRUE); 1715 } 1716 1717 static int 1718 yge_suspend(yge_dev_t *dev) 1719 { 1720 for (int i = 0; i < dev->d_num_port; i++) { 1721 yge_port_t *port = dev->d_port[i]; 1722 mii_suspend(port->p_mii); 1723 } 1724 1725 1726 DEV_LOCK(dev); 1727 1728 for (int i = 0; i < dev->d_num_port; i++) { 1729 yge_port_t *port = dev->d_port[i]; 1730 1731 if (port->p_running) { 1732 yge_stop_port(port); 1733 } 1734 } 1735 1736 /* Disable all interrupts. */ 1737 CSR_WRITE_4(dev, B0_IMSK, 0); 1738 (void) CSR_READ_4(dev, B0_IMSK); 1739 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 1740 (void) CSR_READ_4(dev, B0_HWE_IMSK); 1741 1742 yge_phy_power(dev, B_FALSE); 1743 1744 /* Put hardware reset. */ 1745 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 1746 dev->d_suspended = B_TRUE; 1747 1748 DEV_UNLOCK(dev); 1749 1750 return (DDI_SUCCESS); 1751 } 1752 1753 static int 1754 yge_resume(yge_dev_t *dev) 1755 { 1756 uint8_t pm_cap; 1757 1758 DEV_LOCK(dev); 1759 1760 /* ensure the pmcsr status is D0 state */ 1761 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1762 1763 if ((pm_cap = yge_find_capability(dev, PCI_CAP_ID_PM)) != 0) { 1764 uint16_t pmcsr; 1765 pmcsr = pci_config_get16(dev->d_pcih, pm_cap + PCI_PMCSR); 1766 pmcsr &= ~PCI_PMCSR_STATE_MASK; 1767 pci_config_put16(dev->d_pcih, pm_cap + PCI_PMCSR, 1768 pmcsr | PCI_PMCSR_D0); 1769 } 1770 1771 /* Enable PCI access and bus master. */ 1772 pci_config_put16(dev->d_pcih, PCI_CONF_COMM, 1773 pci_config_get16(dev->d_pcih, PCI_CONF_COMM) | 1774 PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME); 1775 1776 /* Enable all clocks. */ 1777 switch (dev->d_hw_id) { 1778 case CHIP_ID_YUKON_EX: 1779 case CHIP_ID_YUKON_EC_U: 1780 case CHIP_ID_YUKON_FE_P: 1781 pci_config_put32(dev->d_pcih, PCI_OUR_REG_3, 0); 1782 break; 1783 } 1784 1785 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1786 1787 yge_reset(dev); 1788 1789 /* Make sure interrupts are reenabled */ 1790 CSR_WRITE_4(dev, B0_IMSK, 0); 1791 CSR_WRITE_4(dev, B0_IMSK, Y2_IS_HW_ERR | Y2_IS_STAT_BMU); 1792 CSR_WRITE_4(dev, B0_HWE_IMSK, 1793 Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1794 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP); 1795 1796 for (int i = 0; i < dev->d_num_port; i++) { 1797 yge_port_t *port = dev->d_port[i]; 1798 1799 if (port != NULL && port->p_running) { 1800 yge_start_port(port); 1801 } 1802 } 1803 dev->d_suspended = B_FALSE; 1804 1805 DEV_UNLOCK(dev); 1806 1807 /* Reset MII layer */ 1808 for (int i = 0; i < dev->d_num_port; i++) { 1809 yge_port_t *port = dev->d_port[i]; 1810 1811 if (port->p_running) { 1812 mii_resume(port->p_mii); 1813 mac_tx_update(port->p_mh); 1814 } 1815 } 1816 1817 return (DDI_SUCCESS); 1818 } 1819 1820 static mblk_t * 1821 yge_rxeof(yge_port_t *port, uint32_t status, int len) 1822 { 1823 yge_dev_t *dev = port->p_dev; 1824 mblk_t *mp; 1825 int cons, rxlen; 1826 yge_buf_t *rxb; 1827 yge_ring_t *ring; 1828 1829 ASSERT(mutex_owned(&dev->d_rxlock)); 1830 1831 if (!port->p_running) 1832 return (NULL); 1833 1834 ring = &port->p_rx_ring; 1835 cons = port->p_rx_cons; 1836 rxlen = status >> 16; 1837 rxb = &port->p_rx_buf[cons]; 1838 mp = NULL; 1839 1840 1841 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 1842 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) { 1843 /* 1844 * Apparently the status for this chip is not reliable. 1845 * Only perform minimal consistency checking; the MAC 1846 * and upper protocols will have to filter any garbage. 1847 */ 1848 if ((len > port->p_framesize) || (rxlen != len)) { 1849 goto bad; 1850 } 1851 } else { 1852 if ((len > port->p_framesize) || (rxlen != len) || 1853 ((status & GMR_FS_ANY_ERR) != 0) || 1854 ((status & GMR_FS_RX_OK) == 0)) { 1855 goto bad; 1856 } 1857 } 1858 1859 if ((mp = allocb(len + YGE_HEADROOM, BPRI_HI)) != NULL) { 1860 1861 /* good packet - yay */ 1862 mp->b_rptr += YGE_HEADROOM; 1863 SYNCBUF(rxb, DDI_DMA_SYNC_FORKERNEL); 1864 bcopy(rxb->b_buf, mp->b_rptr, len); 1865 mp->b_wptr = mp->b_rptr + len; 1866 } else { 1867 port->p_stats.rx_nobuf++; 1868 } 1869 1870 bad: 1871 1872 PUTCTRL(ring, cons, port->p_framesize | OP_PACKET | HW_OWNER); 1873 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV); 1874 1875 CSR_WRITE_2(dev, 1876 Y2_PREF_Q_ADDR(port->p_rxq, PREF_UNIT_PUT_IDX_REG), 1877 cons); 1878 1879 YGE_INC(port->p_rx_cons, YGE_RX_RING_CNT); 1880 1881 return (mp); 1882 } 1883 1884 static boolean_t 1885 yge_txeof_locked(yge_port_t *port, int idx) 1886 { 1887 int prog; 1888 int16_t cons; 1889 boolean_t resched; 1890 1891 if (!port->p_running) { 1892 return (B_FALSE); 1893 } 1894 1895 cons = port->p_tx_cons; 1896 prog = 0; 1897 for (; cons != idx; YGE_INC(cons, YGE_TX_RING_CNT)) { 1898 if (port->p_tx_cnt <= 0) 1899 break; 1900 prog++; 1901 port->p_tx_cnt--; 1902 /* No need to sync LEs as we didn't update LEs. */ 1903 } 1904 1905 port->p_tx_cons = cons; 1906 1907 if (prog > 0) { 1908 resched = port->p_wantw; 1909 port->p_tx_wdog = 0; 1910 port->p_wantw = B_FALSE; 1911 return (resched); 1912 } else { 1913 return (B_FALSE); 1914 } 1915 } 1916 1917 static void 1918 yge_txeof(yge_port_t *port, int idx) 1919 { 1920 boolean_t resched; 1921 1922 TX_LOCK(port->p_dev); 1923 1924 resched = yge_txeof_locked(port, idx); 1925 1926 TX_UNLOCK(port->p_dev); 1927 1928 if (resched && port->p_running) { 1929 mac_tx_update(port->p_mh); 1930 } 1931 } 1932 1933 static void 1934 yge_restart_task(yge_dev_t *dev) 1935 { 1936 yge_port_t *port; 1937 1938 DEV_LOCK(dev); 1939 1940 /* Cancel pending I/O and free all Rx/Tx buffers. */ 1941 for (int i = 0; i < dev->d_num_port; i++) { 1942 port = dev->d_port[i]; 1943 if (port->p_running) 1944 yge_stop_port(dev->d_port[i]); 1945 } 1946 yge_reset(dev); 1947 for (int i = 0; i < dev->d_num_port; i++) { 1948 port = dev->d_port[i]; 1949 1950 if (port->p_running) 1951 yge_start_port(port); 1952 } 1953 1954 DEV_UNLOCK(dev); 1955 1956 for (int i = 0; i < dev->d_num_port; i++) { 1957 port = dev->d_port[i]; 1958 1959 mii_reset(port->p_mii); 1960 if (port->p_running) 1961 mac_tx_update(port->p_mh); 1962 } 1963 } 1964 1965 static void 1966 yge_tick(void *arg) 1967 { 1968 yge_dev_t *dev = arg; 1969 yge_port_t *port; 1970 boolean_t restart = B_FALSE; 1971 boolean_t resched = B_FALSE; 1972 int idx; 1973 1974 DEV_LOCK(dev); 1975 1976 if (dev->d_suspended) { 1977 DEV_UNLOCK(dev); 1978 return; 1979 } 1980 1981 for (int i = 0; i < dev->d_num_port; i++) { 1982 port = dev->d_port[i]; 1983 1984 if (!port->p_running) 1985 continue; 1986 1987 if (port->p_tx_cnt) { 1988 uint32_t ridx; 1989 1990 /* 1991 * Reclaim first as there is a possibility of losing 1992 * Tx completion interrupts. 1993 */ 1994 ridx = port->p_port == YGE_PORT_A ? 1995 STAT_TXA1_RIDX : STAT_TXA2_RIDX; 1996 idx = CSR_READ_2(dev, ridx); 1997 if (port->p_tx_cons != idx) { 1998 resched = yge_txeof_locked(port, idx); 1999 2000 } else { 2001 2002 /* detect TX hang */ 2003 port->p_tx_wdog++; 2004 if (port->p_tx_wdog > YGE_TX_TIMEOUT) { 2005 port->p_tx_wdog = 0; 2006 yge_error(NULL, port, 2007 "TX hang detected!"); 2008 restart = B_TRUE; 2009 } 2010 } 2011 } 2012 } 2013 2014 DEV_UNLOCK(dev); 2015 if (restart) { 2016 yge_dispatch(dev, YGE_TASK_RESTART); 2017 } else { 2018 if (resched) { 2019 for (int i = 0; i < dev->d_num_port; i++) { 2020 port = dev->d_port[i]; 2021 2022 if (port->p_running) 2023 mac_tx_update(port->p_mh); 2024 } 2025 } 2026 } 2027 } 2028 2029 static int 2030 yge_intr_gmac(yge_port_t *port) 2031 { 2032 yge_dev_t *dev = port->p_dev; 2033 int pnum = port->p_port; 2034 uint8_t status; 2035 int dispatch_wrk = 0; 2036 2037 status = CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC)); 2038 2039 /* GMAC Rx FIFO overrun. */ 2040 if ((status & GM_IS_RX_FF_OR) != 0) { 2041 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2042 yge_error(NULL, port, "Rx FIFO overrun!"); 2043 dispatch_wrk |= YGE_TASK_RESTART; 2044 } 2045 /* GMAC Tx FIFO underrun. */ 2046 if ((status & GM_IS_TX_FF_UR) != 0) { 2047 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2048 yge_error(NULL, port, "Tx FIFO underrun!"); 2049 /* 2050 * In case of Tx underrun, we may need to flush/reset 2051 * Tx MAC but that would also require 2052 * resynchronization with status LEs. Reinitializing 2053 * status LEs would affect the other port in dual MAC 2054 * configuration so it should be avoided if we can. 2055 * Due to lack of documentation it's all vague guess 2056 * but it needs more investigation. 2057 */ 2058 } 2059 return (dispatch_wrk); 2060 } 2061 2062 static void 2063 yge_handle_hwerr(yge_port_t *port, uint32_t status) 2064 { 2065 yge_dev_t *dev = port->p_dev; 2066 2067 if ((status & Y2_IS_PAR_RD1) != 0) { 2068 yge_error(NULL, port, "RAM buffer read parity error"); 2069 /* Clear IRQ. */ 2070 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL), 2071 RI_CLR_RD_PERR); 2072 } 2073 if ((status & Y2_IS_PAR_WR1) != 0) { 2074 yge_error(NULL, port, "RAM buffer write parity error"); 2075 /* Clear IRQ. */ 2076 CSR_WRITE_2(dev, SELECT_RAM_BUFFER(port->p_port, B3_RI_CTRL), 2077 RI_CLR_WR_PERR); 2078 } 2079 if ((status & Y2_IS_PAR_MAC1) != 0) { 2080 yge_error(NULL, port, "Tx MAC parity error"); 2081 /* Clear IRQ. */ 2082 CSR_WRITE_4(dev, MR_ADDR(port->p_port, TX_GMF_CTRL_T), 2083 GMF_CLI_TX_PE); 2084 } 2085 if ((status & Y2_IS_PAR_RX1) != 0) { 2086 yge_error(NULL, port, "Rx parity error"); 2087 /* Clear IRQ. */ 2088 CSR_WRITE_4(dev, Q_ADDR(port->p_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 2089 } 2090 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 2091 yge_error(NULL, port, "TCP segmentation error"); 2092 /* Clear IRQ. */ 2093 CSR_WRITE_4(dev, Q_ADDR(port->p_txq, Q_CSR), BMU_CLR_IRQ_TCP); 2094 } 2095 } 2096 2097 static void 2098 yge_intr_hwerr(yge_dev_t *dev) 2099 { 2100 uint32_t status; 2101 uint32_t tlphead[4]; 2102 2103 status = CSR_READ_4(dev, B0_HWE_ISRC); 2104 /* Time Stamp timer overflow. */ 2105 if ((status & Y2_IS_TIST_OV) != 0) 2106 CSR_WRITE_1(dev, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 2107 if ((status & Y2_IS_PCI_NEXP) != 0) { 2108 /* 2109 * PCI Express Error occurred which is not described in PEX 2110 * spec. 2111 * This error is also mapped either to Master Abort( 2112 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 2113 * can only be cleared there. 2114 */ 2115 yge_error(dev, NULL, "PCI Express protocol violation error"); 2116 } 2117 2118 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 2119 uint16_t v16; 2120 2121 if ((status & Y2_IS_IRQ_STAT) != 0) 2122 yge_error(dev, NULL, "Unexpected IRQ Status error"); 2123 if ((status & Y2_IS_MST_ERR) != 0) 2124 yge_error(dev, NULL, "Unexpected IRQ Master error"); 2125 /* Reset all bits in the PCI status register. */ 2126 v16 = pci_config_get16(dev->d_pcih, PCI_CONF_STAT); 2127 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2128 pci_config_put16(dev->d_pcih, PCI_CONF_STAT, v16 | 2129 PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR | PCI_STAT_R_MAST_AB | 2130 PCI_STAT_R_TARG_AB | PCI_STAT_PERROR); 2131 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2132 } 2133 2134 /* Check for PCI Express Uncorrectable Error. */ 2135 if ((status & Y2_IS_PCI_EXP) != 0) { 2136 uint32_t v32; 2137 2138 /* 2139 * On PCI Express bus bridges are called root complexes (RC). 2140 * PCI Express errors are recognized by the root complex too, 2141 * which requests the system to handle the problem. After 2142 * error occurrence it may be that no access to the adapter 2143 * may be performed any longer. 2144 */ 2145 2146 v32 = CSR_PCI_READ_4(dev, PEX_UNC_ERR_STAT); 2147 if ((v32 & PEX_UNSUP_REQ) != 0) { 2148 /* Ignore unsupported request error. */ 2149 yge_error(dev, NULL, 2150 "Uncorrectable PCI Express error"); 2151 } 2152 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 2153 int i; 2154 2155 /* Get TLP header form Log Registers. */ 2156 for (i = 0; i < 4; i++) 2157 tlphead[i] = CSR_PCI_READ_4(dev, 2158 PEX_HEADER_LOG + i * 4); 2159 /* Check for vendor defined broadcast message. */ 2160 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 2161 dev->d_intrhwemask &= ~Y2_IS_PCI_EXP; 2162 CSR_WRITE_4(dev, B0_HWE_IMSK, 2163 dev->d_intrhwemask); 2164 (void) CSR_READ_4(dev, B0_HWE_IMSK); 2165 } 2166 } 2167 /* Clear the interrupt. */ 2168 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2169 CSR_PCI_WRITE_4(dev, PEX_UNC_ERR_STAT, 0xffffffff); 2170 CSR_WRITE_1(dev, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2171 } 2172 2173 if ((status & Y2_HWE_L1_MASK) != 0 && dev->d_port[YGE_PORT_A] != NULL) 2174 yge_handle_hwerr(dev->d_port[YGE_PORT_A], status); 2175 if ((status & Y2_HWE_L2_MASK) != 0 && dev->d_port[YGE_PORT_B] != NULL) 2176 yge_handle_hwerr(dev->d_port[YGE_PORT_B], status >> 8); 2177 } 2178 2179 /* 2180 * Returns B_TRUE if there is potentially more work to do. 2181 */ 2182 static boolean_t 2183 yge_handle_events(yge_dev_t *dev, mblk_t **heads, mblk_t **tails, int *txindex) 2184 { 2185 yge_port_t *port; 2186 yge_ring_t *ring; 2187 uint32_t control, status; 2188 int cons, idx, len, pnum; 2189 mblk_t *mp; 2190 uint32_t rxprogs[2]; 2191 2192 rxprogs[0] = rxprogs[1] = 0; 2193 2194 idx = CSR_READ_2(dev, STAT_PUT_IDX); 2195 if (idx == dev->d_stat_cons) { 2196 return (B_FALSE); 2197 } 2198 2199 ring = &dev->d_status_ring; 2200 2201 for (cons = dev->d_stat_cons; cons != idx; ) { 2202 /* Sync status LE. */ 2203 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORKERNEL); 2204 control = GETCTRL(ring, cons); 2205 if ((control & HW_OWNER) == 0) { 2206 yge_error(dev, NULL, "Status descriptor error: " 2207 "index %d, control %x", cons, control); 2208 break; 2209 } 2210 2211 status = GETSTAT(ring, cons); 2212 2213 control &= ~HW_OWNER; 2214 len = control & STLE_LEN_MASK; 2215 pnum = ((control >> 16) & 0x01); 2216 port = dev->d_port[pnum]; 2217 if (port == NULL) { 2218 yge_error(dev, NULL, "Invalid port opcode: 0x%08x", 2219 control & STLE_OP_MASK); 2220 goto finish; 2221 } 2222 2223 switch (control & STLE_OP_MASK) { 2224 case OP_RXSTAT: 2225 mp = yge_rxeof(port, status, len); 2226 if (mp != NULL) { 2227 if (heads[pnum] == NULL) 2228 heads[pnum] = mp; 2229 else 2230 tails[pnum]->b_next = mp; 2231 tails[pnum] = mp; 2232 } 2233 2234 rxprogs[pnum]++; 2235 break; 2236 2237 case OP_TXINDEXLE: 2238 txindex[0] = status & STLE_TXA1_MSKL; 2239 txindex[1] = 2240 ((status & STLE_TXA2_MSKL) >> STLE_TXA2_SHIFTL) | 2241 ((len & STLE_TXA2_MSKH) << STLE_TXA2_SHIFTH); 2242 break; 2243 default: 2244 yge_error(dev, NULL, "Unhandled opcode: 0x%08x", 2245 control & STLE_OP_MASK); 2246 break; 2247 } 2248 finish: 2249 2250 /* Give it back to HW. */ 2251 PUTCTRL(ring, cons, control); 2252 SYNCENTRY(ring, cons, DDI_DMA_SYNC_FORDEV); 2253 2254 YGE_INC(cons, YGE_STAT_RING_CNT); 2255 if (rxprogs[pnum] > dev->d_process_limit) { 2256 break; 2257 } 2258 } 2259 2260 dev->d_stat_cons = cons; 2261 if (dev->d_stat_cons != CSR_READ_2(dev, STAT_PUT_IDX)) 2262 return (B_TRUE); 2263 else 2264 return (B_FALSE); 2265 } 2266 2267 /*ARGSUSED1*/ 2268 static uint_t 2269 yge_intr(caddr_t arg1, caddr_t arg2) 2270 { 2271 yge_dev_t *dev; 2272 yge_port_t *port1; 2273 yge_port_t *port2; 2274 uint32_t status; 2275 mblk_t *heads[2], *tails[2]; 2276 int txindex[2]; 2277 int dispatch_wrk; 2278 2279 dev = (void *)arg1; 2280 2281 heads[0] = heads[1] = NULL; 2282 tails[0] = tails[1] = NULL; 2283 txindex[0] = txindex[1] = -1; 2284 dispatch_wrk = 0; 2285 2286 port1 = dev->d_port[YGE_PORT_A]; 2287 port2 = dev->d_port[YGE_PORT_B]; 2288 2289 RX_LOCK(dev); 2290 2291 if (dev->d_suspended) { 2292 RX_UNLOCK(dev); 2293 return (DDI_INTR_UNCLAIMED); 2294 } 2295 2296 /* Get interrupt source. */ 2297 status = CSR_READ_4(dev, B0_Y2_SP_ISRC2); 2298 if (status == 0 || status == 0xffffffff || 2299 (status & dev->d_intrmask) == 0) { /* Stray interrupt ? */ 2300 /* Reenable interrupts. */ 2301 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2); 2302 RX_UNLOCK(dev); 2303 return (DDI_INTR_UNCLAIMED); 2304 } 2305 2306 if ((status & Y2_IS_HW_ERR) != 0) { 2307 yge_intr_hwerr(dev); 2308 } 2309 2310 if (status & Y2_IS_IRQ_MAC1) { 2311 dispatch_wrk |= yge_intr_gmac(port1); 2312 } 2313 if (status & Y2_IS_IRQ_MAC2) { 2314 dispatch_wrk |= yge_intr_gmac(port2); 2315 } 2316 2317 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 2318 yge_error(NULL, status & Y2_IS_CHK_RX1 ? port1 : port2, 2319 "Rx descriptor error"); 2320 dev->d_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 2321 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2322 (void) CSR_READ_4(dev, B0_IMSK); 2323 } 2324 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 2325 yge_error(NULL, status & Y2_IS_CHK_TXA1 ? port1 : port2, 2326 "Tx descriptor error"); 2327 dev->d_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 2328 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2329 (void) CSR_READ_4(dev, B0_IMSK); 2330 } 2331 2332 /* handle events until it returns false */ 2333 while (yge_handle_events(dev, heads, tails, txindex)) 2334 /* NOP */; 2335 2336 /* Do receive/transmit events */ 2337 if ((status & Y2_IS_STAT_BMU)) { 2338 CSR_WRITE_4(dev, STAT_CTRL, SC_STAT_CLR_IRQ); 2339 } 2340 2341 /* Reenable interrupts. */ 2342 CSR_WRITE_4(dev, B0_Y2_SP_ICR, 2); 2343 2344 RX_UNLOCK(dev); 2345 2346 if (dispatch_wrk) { 2347 yge_dispatch(dev, dispatch_wrk); 2348 } 2349 2350 if (port1->p_running) { 2351 if (txindex[0] >= 0) { 2352 yge_txeof(port1, txindex[0]); 2353 } 2354 if (heads[0]) 2355 mac_rx(port1->p_mh, NULL, heads[0]); 2356 } else { 2357 if (heads[0]) { 2358 mblk_t *mp; 2359 while ((mp = heads[0]) != NULL) { 2360 heads[0] = mp->b_next; 2361 freemsg(mp); 2362 } 2363 } 2364 } 2365 2366 if (port2->p_running) { 2367 if (txindex[1] >= 0) { 2368 yge_txeof(port2, txindex[1]); 2369 } 2370 if (heads[1]) 2371 mac_rx(port2->p_mh, NULL, heads[1]); 2372 } else { 2373 if (heads[1]) { 2374 mblk_t *mp; 2375 while ((mp = heads[1]) != NULL) { 2376 heads[1] = mp->b_next; 2377 freemsg(mp); 2378 } 2379 } 2380 } 2381 2382 return (DDI_INTR_CLAIMED); 2383 } 2384 2385 static void 2386 yge_set_tx_stfwd(yge_port_t *port) 2387 { 2388 yge_dev_t *dev = port->p_dev; 2389 int pnum = port->p_port; 2390 2391 switch (dev->d_hw_id) { 2392 case CHIP_ID_YUKON_EX: 2393 if (dev->d_hw_rev == CHIP_REV_YU_EX_A0) 2394 goto yukon_ex_workaround; 2395 2396 if (port->p_mtu > ETHERMTU) 2397 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2398 TX_JUMBO_ENA | TX_STFW_ENA); 2399 else 2400 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2401 TX_JUMBO_DIS | TX_STFW_ENA); 2402 break; 2403 default: 2404 yukon_ex_workaround: 2405 if (port->p_mtu > ETHERMTU) { 2406 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 2407 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_AE_THR), 2408 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 2409 /* Disable Store & Forward mode for Tx. */ 2410 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2411 TX_JUMBO_ENA | TX_STFW_DIS); 2412 } else { 2413 /* Enable Store & Forward mode for Tx. */ 2414 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), 2415 TX_JUMBO_DIS | TX_STFW_ENA); 2416 } 2417 break; 2418 } 2419 } 2420 2421 static void 2422 yge_start_port(yge_port_t *port) 2423 { 2424 yge_dev_t *dev = port->p_dev; 2425 uint16_t gmac; 2426 int32_t pnum; 2427 int32_t rxq; 2428 int32_t txq; 2429 uint32_t reg; 2430 2431 pnum = port->p_port; 2432 txq = port->p_txq; 2433 rxq = port->p_rxq; 2434 2435 if (port->p_mtu < ETHERMTU) 2436 port->p_framesize = ETHERMTU; 2437 else 2438 port->p_framesize = port->p_mtu; 2439 port->p_framesize += sizeof (struct ether_vlan_header); 2440 2441 /* 2442 * Note for the future, if we enable offloads: 2443 * In Yukon EC Ultra, TSO & checksum offload is not 2444 * supported for jumbo frame. 2445 */ 2446 2447 /* GMAC Control reset */ 2448 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_SET); 2449 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_RST_CLR); 2450 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_F_LOOPB_OFF); 2451 if (dev->d_hw_id == CHIP_ID_YUKON_EX) 2452 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), 2453 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 2454 GMC_BYP_RETR_ON); 2455 /* 2456 * Initialize GMAC first such that speed/duplex/flow-control 2457 * parameters are renegotiated with the interface is brought up. 2458 */ 2459 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, 0); 2460 2461 /* Dummy read the Interrupt Source Register. */ 2462 (void) CSR_READ_1(dev, MR_ADDR(pnum, GMAC_IRQ_SRC)); 2463 2464 /* Clear MIB stats. */ 2465 yge_stats_clear(port); 2466 2467 /* Disable FCS. */ 2468 GMAC_WRITE_2(dev, pnum, GM_RX_CTRL, GM_RXCR_CRC_DIS); 2469 2470 /* Setup Transmit Control Register. */ 2471 GMAC_WRITE_2(dev, pnum, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 2472 2473 /* Setup Transmit Flow Control Register. */ 2474 GMAC_WRITE_2(dev, pnum, GM_TX_FLOW_CTRL, 0xffff); 2475 2476 /* Setup Transmit Parameter Register. */ 2477 GMAC_WRITE_2(dev, pnum, GM_TX_PARAM, 2478 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 2479 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 2480 2481 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 2482 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 2483 2484 if (port->p_mtu > ETHERMTU) 2485 gmac |= GM_SMOD_JUMBO_ENA; 2486 GMAC_WRITE_2(dev, pnum, GM_SERIAL_MODE, gmac); 2487 2488 /* Disable interrupts for counter overflows. */ 2489 GMAC_WRITE_2(dev, pnum, GM_TX_IRQ_MSK, 0); 2490 GMAC_WRITE_2(dev, pnum, GM_RX_IRQ_MSK, 0); 2491 GMAC_WRITE_2(dev, pnum, GM_TR_IRQ_MSK, 0); 2492 2493 /* Configure Rx MAC FIFO. */ 2494 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET); 2495 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_CLR); 2496 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 2497 if (dev->d_hw_id == CHIP_ID_YUKON_FE_P || 2498 dev->d_hw_id == CHIP_ID_YUKON_EX) 2499 reg |= GMF_RX_OVER_ON; 2500 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), reg); 2501 2502 /* Set receive filter. */ 2503 yge_setrxfilt(port); 2504 2505 /* Flush Rx MAC FIFO on any flow control or error. */ 2506 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); 2507 2508 /* 2509 * Set Rx FIFO flush threshold to 64 bytes + 1 FIFO word 2510 * due to hardware hang on receipt of pause frames. 2511 */ 2512 reg = RX_GMF_FL_THR_DEF + 1; 2513 /* FE+ magic */ 2514 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 2515 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) 2516 reg = 0x178; 2517 2518 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_FL_THR), reg); 2519 2520 /* Configure Tx MAC FIFO. */ 2521 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET); 2522 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_CLR); 2523 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_OPER_ON); 2524 2525 /* Disable hardware VLAN tag insertion/stripping. */ 2526 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); 2527 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); 2528 2529 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0) { 2530 /* Set Rx Pause threshold. */ 2531 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 2532 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) { 2533 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR), 2534 MSK_ECU_LLPP); 2535 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR), 2536 MSK_FEP_ULPP); 2537 } else { 2538 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_LP_THR), 2539 MSK_ECU_LLPP); 2540 CSR_WRITE_1(dev, MR_ADDR(pnum, RX_GMF_UP_THR), 2541 MSK_ECU_ULPP); 2542 } 2543 /* Configure store-and-forward for TX */ 2544 yge_set_tx_stfwd(port); 2545 } 2546 2547 if ((dev->d_hw_id == CHIP_ID_YUKON_FE_P) && 2548 (dev->d_hw_rev == CHIP_REV_YU_FE2_A0)) { 2549 /* Disable dynamic watermark */ 2550 reg = CSR_READ_4(dev, MR_ADDR(pnum, TX_GMF_EA)); 2551 reg &= ~TX_DYN_WM_ENA; 2552 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_EA), reg); 2553 } 2554 2555 /* 2556 * Disable Force Sync bit and Alloc bit in Tx RAM interface 2557 * arbiter as we don't use Sync Tx queue. 2558 */ 2559 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), 2560 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2561 /* Enable the RAM Interface Arbiter. */ 2562 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_ENA_ARB); 2563 2564 /* Setup RAM buffer. */ 2565 yge_set_rambuffer(port); 2566 2567 /* Disable Tx sync Queue. */ 2568 CSR_WRITE_1(dev, RB_ADDR(port->p_txsq, RB_CTRL), RB_RST_SET); 2569 2570 /* Setup Tx Queue Bus Memory Interface. */ 2571 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_CLR_RESET); 2572 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_OPER_INIT); 2573 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_FIFO_OP_ON); 2574 CSR_WRITE_2(dev, Q_ADDR(txq, Q_WM), MSK_BMU_TX_WM); 2575 2576 switch (dev->d_hw_id) { 2577 case CHIP_ID_YUKON_EC_U: 2578 if (dev->d_hw_rev == CHIP_REV_YU_EC_U_A0) { 2579 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 2580 CSR_WRITE_2(dev, Q_ADDR(txq, Q_AL), MSK_ECU_TXFF_LEV); 2581 } 2582 break; 2583 case CHIP_ID_YUKON_EX: 2584 /* 2585 * Yukon Extreme seems to have silicon bug for 2586 * automatic Tx checksum calculation capability. 2587 */ 2588 if (dev->d_hw_rev == CHIP_REV_YU_EX_B0) 2589 CSR_WRITE_4(dev, Q_ADDR(txq, Q_F), F_TX_CHK_AUTO_OFF); 2590 break; 2591 } 2592 2593 /* Setup Rx Queue Bus Memory Interface. */ 2594 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_CLR_RESET); 2595 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_OPER_INIT); 2596 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_FIFO_OP_ON); 2597 if (dev->d_bustype == PEX_BUS) { 2598 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), 0x80); 2599 } else { 2600 CSR_WRITE_2(dev, Q_ADDR(rxq, Q_WM), MSK_BMU_RX_WM); 2601 } 2602 if (dev->d_hw_id == CHIP_ID_YUKON_EC_U && 2603 dev->d_hw_rev >= CHIP_REV_YU_EC_U_A1) { 2604 /* MAC Rx RAM Read is controlled by hardware. */ 2605 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS); 2606 } 2607 2608 yge_init_tx_ring(port); 2609 2610 /* Disable Rx checksum offload and RSS hash. */ 2611 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), 2612 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 2613 2614 yge_init_rx_ring(port); 2615 2616 /* Configure interrupt handling. */ 2617 if (port == dev->d_port[YGE_PORT_A]) { 2618 dev->d_intrmask |= Y2_IS_PORT_A; 2619 dev->d_intrhwemask |= Y2_HWE_L1_MASK; 2620 } else if (port == dev->d_port[YGE_PORT_B]) { 2621 dev->d_intrmask |= Y2_IS_PORT_B; 2622 dev->d_intrhwemask |= Y2_HWE_L2_MASK; 2623 } 2624 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask); 2625 (void) CSR_READ_4(dev, B0_HWE_IMSK); 2626 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2627 (void) CSR_READ_4(dev, B0_IMSK); 2628 2629 /* Enable RX/TX GMAC */ 2630 gmac = GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2631 gmac |= (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2632 GMAC_WRITE_2(port->p_dev, port->p_port, GM_GP_CTRL, gmac); 2633 /* Read again to ensure writing. */ 2634 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2635 2636 /* Reset TX timer */ 2637 port->p_tx_wdog = 0; 2638 } 2639 2640 static void 2641 yge_set_rambuffer(yge_port_t *port) 2642 { 2643 yge_dev_t *dev; 2644 int ltpp, utpp; 2645 int pnum; 2646 uint32_t rxq; 2647 uint32_t txq; 2648 2649 dev = port->p_dev; 2650 pnum = port->p_port; 2651 rxq = port->p_rxq; 2652 txq = port->p_txq; 2653 2654 if ((port->p_flags & PORT_FLAG_RAMBUF) == 0) 2655 return; 2656 2657 /* Setup Rx Queue. */ 2658 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_CLR); 2659 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_START), dev->d_rxqstart[pnum] / 8); 2660 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_END), dev->d_rxqend[pnum] / 8); 2661 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_WP), dev->d_rxqstart[pnum] / 8); 2662 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RP), dev->d_rxqstart[pnum] / 8); 2663 2664 utpp = 2665 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_ULPP) / 8; 2666 ltpp = 2667 (dev->d_rxqend[pnum] + 1 - dev->d_rxqstart[pnum] - RB_LLPP_B) / 8; 2668 2669 if (dev->d_rxqsize < MSK_MIN_RXQ_SIZE) 2670 ltpp += (RB_LLPP_B - RB_LLPP_S) / 8; 2671 2672 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_UTPP), utpp); 2673 CSR_WRITE_4(dev, RB_ADDR(rxq, RB_RX_LTPP), ltpp); 2674 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 2675 2676 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_ENA_OP_MD); 2677 (void) CSR_READ_1(dev, RB_ADDR(rxq, RB_CTRL)); 2678 2679 /* Setup Tx Queue. */ 2680 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_CLR); 2681 CSR_WRITE_4(dev, RB_ADDR(txq, RB_START), dev->d_txqstart[pnum] / 8); 2682 CSR_WRITE_4(dev, RB_ADDR(txq, RB_END), dev->d_txqend[pnum] / 8); 2683 CSR_WRITE_4(dev, RB_ADDR(txq, RB_WP), dev->d_txqstart[pnum] / 8); 2684 CSR_WRITE_4(dev, RB_ADDR(txq, RB_RP), dev->d_txqstart[pnum] / 8); 2685 /* Enable Store & Forward for Tx side. */ 2686 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_STFWD); 2687 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_ENA_OP_MD); 2688 (void) CSR_READ_1(dev, RB_ADDR(txq, RB_CTRL)); 2689 } 2690 2691 static void 2692 yge_set_prefetch(yge_dev_t *dev, int qaddr, yge_ring_t *ring) 2693 { 2694 /* Reset the prefetch unit. */ 2695 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 2696 PREF_UNIT_RST_SET); 2697 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 2698 PREF_UNIT_RST_CLR); 2699 /* Set LE base address. */ 2700 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 2701 YGE_ADDR_LO(ring->r_paddr)); 2702 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 2703 YGE_ADDR_HI(ring->r_paddr)); 2704 /* Set the list last index. */ 2705 CSR_WRITE_2(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 2706 ring->r_num - 1); 2707 /* Turn on prefetch unit. */ 2708 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 2709 PREF_UNIT_OP_ON); 2710 /* Dummy read to ensure write. */ 2711 (void) CSR_READ_4(dev, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 2712 } 2713 2714 static void 2715 yge_stop_port(yge_port_t *port) 2716 { 2717 yge_dev_t *dev = port->p_dev; 2718 int pnum = port->p_port; 2719 uint32_t txq = port->p_txq; 2720 uint32_t rxq = port->p_rxq; 2721 uint32_t val; 2722 int i; 2723 2724 dev = port->p_dev; 2725 2726 /* 2727 * shutdown timeout 2728 */ 2729 port->p_tx_wdog = 0; 2730 2731 /* Disable interrupts. */ 2732 if (pnum == YGE_PORT_A) { 2733 dev->d_intrmask &= ~Y2_IS_PORT_A; 2734 dev->d_intrhwemask &= ~Y2_HWE_L1_MASK; 2735 } else { 2736 dev->d_intrmask &= ~Y2_IS_PORT_B; 2737 dev->d_intrhwemask &= ~Y2_HWE_L2_MASK; 2738 } 2739 CSR_WRITE_4(dev, B0_HWE_IMSK, dev->d_intrhwemask); 2740 (void) CSR_READ_4(dev, B0_HWE_IMSK); 2741 CSR_WRITE_4(dev, B0_IMSK, dev->d_intrmask); 2742 (void) CSR_READ_4(dev, B0_IMSK); 2743 2744 /* Disable Tx/Rx MAC. */ 2745 val = GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2746 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 2747 GMAC_WRITE_2(dev, pnum, GM_GP_CTRL, val); 2748 /* Read again to ensure writing. */ 2749 (void) GMAC_READ_2(dev, pnum, GM_GP_CTRL); 2750 2751 /* Update stats and clear counters. */ 2752 yge_stats_update(port); 2753 2754 /* Stop Tx BMU. */ 2755 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP); 2756 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR)); 2757 for (i = 0; i < YGE_TIMEOUT; i += 10) { 2758 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 2759 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_STOP); 2760 val = CSR_READ_4(dev, Q_ADDR(txq, Q_CSR)); 2761 } else 2762 break; 2763 drv_usecwait(10); 2764 } 2765 /* This is probably fairly catastrophic. */ 2766 if ((val & (BMU_STOP | BMU_IDLE)) == 0) 2767 yge_error(NULL, port, "Tx BMU stop failed"); 2768 2769 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET | RB_DIS_OP_MD); 2770 2771 /* Disable all GMAC interrupt. */ 2772 CSR_WRITE_1(dev, MR_ADDR(pnum, GMAC_IRQ_MSK), 0); 2773 2774 /* Disable the RAM Interface Arbiter. */ 2775 CSR_WRITE_1(dev, MR_ADDR(pnum, TXA_CTRL), TXA_DIS_ARB); 2776 2777 /* Reset the PCI FIFO of the async Tx queue */ 2778 CSR_WRITE_4(dev, Q_ADDR(txq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); 2779 2780 /* Reset the Tx prefetch units. */ 2781 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(txq, PREF_UNIT_CTRL_REG), 2782 PREF_UNIT_RST_SET); 2783 2784 /* Reset the RAM Buffer async Tx queue. */ 2785 CSR_WRITE_1(dev, RB_ADDR(txq, RB_CTRL), RB_RST_SET); 2786 2787 /* Reset Tx MAC FIFO. */ 2788 CSR_WRITE_4(dev, MR_ADDR(pnum, TX_GMF_CTRL_T), GMF_RST_SET); 2789 /* Set Pause Off. */ 2790 CSR_WRITE_4(dev, MR_ADDR(pnum, GMAC_CTRL), GMC_PAUSE_OFF); 2791 2792 /* 2793 * The Rx Stop command will not work for Yukon-2 if the BMU does not 2794 * reach the end of packet and since we can't make sure that we have 2795 * incoming data, we must reset the BMU while it is not during a DMA 2796 * transfer. Since it is possible that the Rx path is still active, 2797 * the Rx RAM buffer will be stopped first, so any possible incoming 2798 * data will not trigger a DMA. After the RAM buffer is stopped, the 2799 * BMU is polled until any DMA in progress is ended and only then it 2800 * will be reset. 2801 */ 2802 2803 /* Disable the RAM Buffer receive queue. */ 2804 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); 2805 for (i = 0; i < YGE_TIMEOUT; i += 10) { 2806 if (CSR_READ_1(dev, RB_ADDR(rxq, Q_RSL)) == 2807 CSR_READ_1(dev, RB_ADDR(rxq, Q_RL))) 2808 break; 2809 drv_usecwait(10); 2810 } 2811 /* This is probably nearly a fatal error. */ 2812 if (i == YGE_TIMEOUT) 2813 yge_error(NULL, port, "Rx BMU stop failed"); 2814 2815 CSR_WRITE_4(dev, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); 2816 /* Reset the Rx prefetch unit. */ 2817 CSR_WRITE_4(dev, Y2_PREF_Q_ADDR(rxq, PREF_UNIT_CTRL_REG), 2818 PREF_UNIT_RST_SET); 2819 /* Reset the RAM Buffer receive queue. */ 2820 CSR_WRITE_1(dev, RB_ADDR(rxq, RB_CTRL), RB_RST_SET); 2821 /* Reset Rx MAC FIFO. */ 2822 CSR_WRITE_4(dev, MR_ADDR(pnum, RX_GMF_CTRL_T), GMF_RST_SET); 2823 } 2824 2825 /* 2826 * When GM_PAR_MIB_CLR bit of GM_PHY_ADDR is set, reading lower 2827 * counter clears high 16 bits of the counter such that accessing 2828 * lower 16 bits should be the last operation. 2829 */ 2830 #define YGE_READ_MIB32(x, y) \ 2831 GMAC_READ_4(dev, x, y) 2832 2833 #define YGE_READ_MIB64(x, y) \ 2834 ((((uint64_t)YGE_READ_MIB32(x, (y) + 8)) << 32) + \ 2835 (uint64_t)YGE_READ_MIB32(x, y)) 2836 2837 static void 2838 yge_stats_clear(yge_port_t *port) 2839 { 2840 yge_dev_t *dev; 2841 uint16_t gmac; 2842 int32_t pnum; 2843 2844 pnum = port->p_port; 2845 dev = port->p_dev; 2846 2847 /* Set MIB Clear Counter Mode. */ 2848 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR); 2849 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 2850 /* Read all MIB Counters with Clear Mode set. */ 2851 for (int i = GM_RXF_UC_OK; i <= GM_TXE_FIFO_UR; i += 4) 2852 (void) YGE_READ_MIB32(pnum, i); 2853 /* Clear MIB Clear Counter Mode. */ 2854 gmac &= ~GM_PAR_MIB_CLR; 2855 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac); 2856 } 2857 2858 static void 2859 yge_stats_update(yge_port_t *port) 2860 { 2861 yge_dev_t *dev; 2862 struct yge_hw_stats *stats; 2863 uint16_t gmac; 2864 int32_t pnum; 2865 2866 dev = port->p_dev; 2867 pnum = port->p_port; 2868 2869 if (dev->d_suspended || !port->p_running) { 2870 return; 2871 } 2872 stats = &port->p_stats; 2873 /* Set MIB Clear Counter Mode. */ 2874 gmac = GMAC_READ_2(dev, pnum, GM_PHY_ADDR); 2875 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 2876 2877 /* Rx stats. */ 2878 stats->rx_ucast_frames += YGE_READ_MIB32(pnum, GM_RXF_UC_OK); 2879 stats->rx_bcast_frames += YGE_READ_MIB32(pnum, GM_RXF_BC_OK); 2880 stats->rx_pause_frames += YGE_READ_MIB32(pnum, GM_RXF_MPAUSE); 2881 stats->rx_mcast_frames += YGE_READ_MIB32(pnum, GM_RXF_MC_OK); 2882 stats->rx_crc_errs += YGE_READ_MIB32(pnum, GM_RXF_FCS_ERR); 2883 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE1); 2884 stats->rx_good_octets += YGE_READ_MIB64(pnum, GM_RXO_OK_LO); 2885 stats->rx_bad_octets += YGE_READ_MIB64(pnum, GM_RXO_ERR_LO); 2886 stats->rx_runts += YGE_READ_MIB32(pnum, GM_RXF_SHT); 2887 stats->rx_runt_errs += YGE_READ_MIB32(pnum, GM_RXE_FRAG); 2888 stats->rx_pkts_64 += YGE_READ_MIB32(pnum, GM_RXF_64B); 2889 stats->rx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_RXF_127B); 2890 stats->rx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_RXF_255B); 2891 stats->rx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_RXF_511B); 2892 stats->rx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_RXF_1023B); 2893 stats->rx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_RXF_1518B); 2894 stats->rx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_RXF_MAX_SZ); 2895 stats->rx_pkts_too_long += YGE_READ_MIB32(pnum, GM_RXF_LNG_ERR); 2896 stats->rx_pkts_jabbers += YGE_READ_MIB32(pnum, GM_RXF_JAB_PKT); 2897 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE2); 2898 stats->rx_fifo_oflows += YGE_READ_MIB32(pnum, GM_RXE_FIFO_OV); 2899 (void) YGE_READ_MIB32(pnum, GM_RXF_SPARE3); 2900 2901 /* Tx stats. */ 2902 stats->tx_ucast_frames += YGE_READ_MIB32(pnum, GM_TXF_UC_OK); 2903 stats->tx_bcast_frames += YGE_READ_MIB32(pnum, GM_TXF_BC_OK); 2904 stats->tx_pause_frames += YGE_READ_MIB32(pnum, GM_TXF_MPAUSE); 2905 stats->tx_mcast_frames += YGE_READ_MIB32(pnum, GM_TXF_MC_OK); 2906 stats->tx_octets += YGE_READ_MIB64(pnum, GM_TXO_OK_LO); 2907 stats->tx_pkts_64 += YGE_READ_MIB32(pnum, GM_TXF_64B); 2908 stats->tx_pkts_65_127 += YGE_READ_MIB32(pnum, GM_TXF_127B); 2909 stats->tx_pkts_128_255 += YGE_READ_MIB32(pnum, GM_TXF_255B); 2910 stats->tx_pkts_256_511 += YGE_READ_MIB32(pnum, GM_TXF_511B); 2911 stats->tx_pkts_512_1023 += YGE_READ_MIB32(pnum, GM_TXF_1023B); 2912 stats->tx_pkts_1024_1518 += YGE_READ_MIB32(pnum, GM_TXF_1518B); 2913 stats->tx_pkts_1519_max += YGE_READ_MIB32(pnum, GM_TXF_MAX_SZ); 2914 (void) YGE_READ_MIB32(pnum, GM_TXF_SPARE1); 2915 stats->tx_colls += YGE_READ_MIB32(pnum, GM_TXF_COL); 2916 stats->tx_late_colls += YGE_READ_MIB32(pnum, GM_TXF_LAT_COL); 2917 stats->tx_excess_colls += YGE_READ_MIB32(pnum, GM_TXF_ABO_COL); 2918 stats->tx_multi_colls += YGE_READ_MIB32(pnum, GM_TXF_MUL_COL); 2919 stats->tx_single_colls += YGE_READ_MIB32(pnum, GM_TXF_SNG_COL); 2920 stats->tx_underflows += YGE_READ_MIB32(pnum, GM_TXE_FIFO_UR); 2921 /* Clear MIB Clear Counter Mode. */ 2922 gmac &= ~GM_PAR_MIB_CLR; 2923 GMAC_WRITE_2(dev, pnum, GM_PHY_ADDR, gmac); 2924 } 2925 2926 #undef YGE_READ_MIB32 2927 #undef YGE_READ_MIB64 2928 2929 uint32_t 2930 yge_hashbit(const uint8_t *addr) 2931 { 2932 int idx; 2933 int bit; 2934 uint_t data; 2935 uint32_t crc; 2936 #define POLY_BE 0x04c11db7 2937 2938 crc = 0xffffffff; 2939 for (idx = 0; idx < 6; idx++) { 2940 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { 2941 crc = (crc << 1) 2942 ^ ((((crc >> 31) ^ data) & 1) ? POLY_BE : 0); 2943 } 2944 } 2945 #undef POLY_BE 2946 2947 return (crc % 64); 2948 } 2949 2950 int 2951 yge_m_stat(void *arg, uint_t stat, uint64_t *val) 2952 { 2953 yge_port_t *port = arg; 2954 struct yge_hw_stats *stats = &port->p_stats; 2955 2956 if (stat == MAC_STAT_IFSPEED) { 2957 /* 2958 * This is the first stat we are asked about. We update only 2959 * for this stat, to avoid paying the hefty cost of the update 2960 * once for each stat. 2961 */ 2962 DEV_LOCK(port->p_dev); 2963 yge_stats_update(port); 2964 DEV_UNLOCK(port->p_dev); 2965 } 2966 2967 if (mii_m_getstat(port->p_mii, stat, val) == 0) { 2968 return (0); 2969 } 2970 2971 switch (stat) { 2972 case MAC_STAT_MULTIRCV: 2973 *val = stats->rx_mcast_frames; 2974 break; 2975 2976 case MAC_STAT_BRDCSTRCV: 2977 *val = stats->rx_bcast_frames; 2978 break; 2979 2980 case MAC_STAT_MULTIXMT: 2981 *val = stats->tx_mcast_frames; 2982 break; 2983 2984 case MAC_STAT_BRDCSTXMT: 2985 *val = stats->tx_bcast_frames; 2986 break; 2987 2988 case MAC_STAT_IPACKETS: 2989 *val = stats->rx_ucast_frames; 2990 break; 2991 2992 case MAC_STAT_RBYTES: 2993 *val = stats->rx_good_octets; 2994 break; 2995 2996 case MAC_STAT_OPACKETS: 2997 *val = stats->tx_ucast_frames; 2998 break; 2999 3000 case MAC_STAT_OBYTES: 3001 *val = stats->tx_octets; 3002 break; 3003 3004 case MAC_STAT_NORCVBUF: 3005 *val = stats->rx_nobuf; 3006 break; 3007 3008 case MAC_STAT_COLLISIONS: 3009 *val = stats->tx_colls; 3010 break; 3011 3012 case ETHER_STAT_ALIGN_ERRORS: 3013 *val = stats->rx_runt_errs; 3014 break; 3015 3016 case ETHER_STAT_FCS_ERRORS: 3017 *val = stats->rx_crc_errs; 3018 break; 3019 3020 case ETHER_STAT_FIRST_COLLISIONS: 3021 *val = stats->tx_single_colls; 3022 break; 3023 3024 case ETHER_STAT_MULTI_COLLISIONS: 3025 *val = stats->tx_multi_colls; 3026 break; 3027 3028 case ETHER_STAT_TX_LATE_COLLISIONS: 3029 *val = stats->tx_late_colls; 3030 break; 3031 3032 case ETHER_STAT_EX_COLLISIONS: 3033 *val = stats->tx_excess_colls; 3034 break; 3035 3036 case ETHER_STAT_TOOLONG_ERRORS: 3037 *val = stats->rx_pkts_too_long; 3038 break; 3039 3040 case MAC_STAT_OVERFLOWS: 3041 *val = stats->rx_fifo_oflows; 3042 break; 3043 3044 case MAC_STAT_UNDERFLOWS: 3045 *val = stats->tx_underflows; 3046 break; 3047 3048 case ETHER_STAT_TOOSHORT_ERRORS: 3049 *val = stats->rx_runts; 3050 break; 3051 3052 case ETHER_STAT_JABBER_ERRORS: 3053 *val = stats->rx_pkts_jabbers; 3054 break; 3055 3056 default: 3057 return (ENOTSUP); 3058 } 3059 return (0); 3060 } 3061 3062 int 3063 yge_m_start(void *arg) 3064 { 3065 yge_port_t *port = arg; 3066 3067 DEV_LOCK(port->p_dev); 3068 3069 /* 3070 * We defer resource allocation to this point, because we 3071 * don't want to waste DMA resources that might better be used 3072 * elsewhere, if the port is not actually being used. 3073 * 3074 * Furthermore, this gives us a more graceful handling of dynamic 3075 * MTU modification. 3076 */ 3077 if (yge_txrx_dma_alloc(port) != DDI_SUCCESS) { 3078 /* Make sure we free up partially allocated resources. */ 3079 yge_txrx_dma_free(port); 3080 DEV_UNLOCK(port->p_dev); 3081 return (ENOMEM); 3082 } 3083 3084 if (!port->p_dev->d_suspended) 3085 yge_start_port(port); 3086 port->p_running = B_TRUE; 3087 DEV_UNLOCK(port->p_dev); 3088 3089 mii_start(port->p_mii); 3090 3091 return (0); 3092 } 3093 3094 void 3095 yge_m_stop(void *arg) 3096 { 3097 yge_port_t *port = arg; 3098 yge_dev_t *dev = port->p_dev; 3099 3100 DEV_LOCK(dev); 3101 if (!dev->d_suspended) 3102 yge_stop_port(port); 3103 3104 port->p_running = B_FALSE; 3105 3106 /* Release resources we don't need */ 3107 yge_txrx_dma_free(port); 3108 DEV_UNLOCK(dev); 3109 } 3110 3111 int 3112 yge_m_promisc(void *arg, boolean_t on) 3113 { 3114 yge_port_t *port = arg; 3115 3116 DEV_LOCK(port->p_dev); 3117 3118 /* Save current promiscuous mode. */ 3119 port->p_promisc = on; 3120 yge_setrxfilt(port); 3121 3122 DEV_UNLOCK(port->p_dev); 3123 3124 return (0); 3125 } 3126 3127 int 3128 yge_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 3129 { 3130 yge_port_t *port = arg; 3131 int bit; 3132 boolean_t update; 3133 3134 bit = yge_hashbit(addr); 3135 ASSERT(bit < 64); 3136 3137 DEV_LOCK(port->p_dev); 3138 if (add) { 3139 if (port->p_mccount[bit] == 0) { 3140 /* Set the corresponding bit in the hash table. */ 3141 port->p_mchash[bit / 32] |= (1 << (bit % 32)); 3142 update = B_TRUE; 3143 } 3144 port->p_mccount[bit]++; 3145 } else { 3146 ASSERT(port->p_mccount[bit] > 0); 3147 port->p_mccount[bit]--; 3148 if (port->p_mccount[bit] == 0) { 3149 port->p_mchash[bit / 32] &= ~(1 << (bit % 32)); 3150 update = B_TRUE; 3151 } 3152 } 3153 3154 if (update) { 3155 yge_setrxfilt(port); 3156 } 3157 DEV_UNLOCK(port->p_dev); 3158 return (0); 3159 } 3160 3161 int 3162 yge_m_unicst(void *arg, const uint8_t *macaddr) 3163 { 3164 yge_port_t *port = arg; 3165 3166 DEV_LOCK(port->p_dev); 3167 3168 bcopy(macaddr, port->p_curraddr, ETHERADDRL); 3169 yge_setrxfilt(port); 3170 3171 DEV_UNLOCK(port->p_dev); 3172 3173 return (0); 3174 } 3175 3176 mblk_t * 3177 yge_m_tx(void *arg, mblk_t *mp) 3178 { 3179 yge_port_t *port = arg; 3180 mblk_t *nmp; 3181 int enq = 0; 3182 uint32_t ridx; 3183 int idx; 3184 boolean_t resched = B_FALSE; 3185 3186 TX_LOCK(port->p_dev); 3187 3188 if (port->p_dev->d_suspended) { 3189 3190 TX_UNLOCK(port->p_dev); 3191 3192 while ((nmp = mp) != NULL) { 3193 /* carrier_errors++; */ 3194 mp = mp->b_next; 3195 freemsg(nmp); 3196 } 3197 return (NULL); 3198 } 3199 3200 /* attempt a reclaim */ 3201 ridx = port->p_port == YGE_PORT_A ? 3202 STAT_TXA1_RIDX : STAT_TXA2_RIDX; 3203 idx = CSR_READ_2(port->p_dev, ridx); 3204 if (port->p_tx_cons != idx) 3205 resched = yge_txeof_locked(port, idx); 3206 3207 while (mp != NULL) { 3208 nmp = mp->b_next; 3209 mp->b_next = NULL; 3210 3211 if (!yge_send(port, mp)) { 3212 mp->b_next = nmp; 3213 break; 3214 } 3215 enq++; 3216 mp = nmp; 3217 3218 } 3219 if (enq > 0) { 3220 /* Transmit */ 3221 CSR_WRITE_2(port->p_dev, 3222 Y2_PREF_Q_ADDR(port->p_txq, PREF_UNIT_PUT_IDX_REG), 3223 port->p_tx_prod); 3224 } 3225 3226 TX_UNLOCK(port->p_dev); 3227 3228 if (resched) 3229 mac_tx_update(port->p_mh); 3230 3231 return (mp); 3232 } 3233 3234 void 3235 yge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp) 3236 { 3237 #ifdef YGE_MII_LOOPBACK 3238 /* LINTED E_FUNC_SET_NOT_USED */ 3239 yge_port_t *port = arg; 3240 3241 /* 3242 * Right now, the MII common layer does not properly handle 3243 * loopback on these PHYs. Fixing this should be done at some 3244 * point in the future. 3245 */ 3246 if (mii_m_loop_ioctl(port->p_mii, wq, mp)) 3247 return; 3248 #else 3249 _NOTE(ARGUNUSED(arg)); 3250 #endif 3251 3252 miocnak(wq, mp, 0, EINVAL); 3253 } 3254 3255 int 3256 yge_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3257 uint_t pr_valsize, const void *pr_val) 3258 { 3259 yge_port_t *port = arg; 3260 uint32_t new_mtu; 3261 int err = 0; 3262 3263 err = mii_m_setprop(port->p_mii, pr_name, pr_num, pr_valsize, pr_val); 3264 if (err != ENOTSUP) { 3265 return (err); 3266 } 3267 3268 DEV_LOCK(port->p_dev); 3269 3270 switch (pr_num) { 3271 case MAC_PROP_MTU: 3272 if (pr_valsize < sizeof (new_mtu)) { 3273 err = EINVAL; 3274 break; 3275 } 3276 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3277 if (new_mtu == port->p_mtu) { 3278 /* no change */ 3279 err = 0; 3280 break; 3281 } 3282 if (new_mtu < ETHERMTU) { 3283 yge_error(NULL, port, 3284 "Maximum MTU size too small: %d", new_mtu); 3285 err = EINVAL; 3286 break; 3287 } 3288 if (new_mtu > (port->p_flags & PORT_FLAG_NOJUMBO ? 3289 ETHERMTU : YGE_JUMBO_MTU)) { 3290 yge_error(NULL, port, 3291 "Maximum MTU size too big: %d", new_mtu); 3292 err = EINVAL; 3293 break; 3294 } 3295 if (port->p_running) { 3296 yge_error(NULL, port, 3297 "Unable to change maximum MTU while running"); 3298 err = EBUSY; 3299 break; 3300 } 3301 3302 3303 /* 3304 * NB: It would probably be better not to hold the 3305 * DEVLOCK, but releasing it creates a potential race 3306 * if m_start is called concurrently. 3307 * 3308 * It turns out that the MAC layer guarantees safety 3309 * for us here by using a cut out for this kind of 3310 * notification call back anyway. 3311 * 3312 * See R8. and R14. in mac.c locking comments, which read 3313 * as follows: 3314 * 3315 * R8. Since it is not guaranteed (see R14) that 3316 * drivers won't hold locks across mac driver 3317 * interfaces, the MAC layer must provide a cut out 3318 * for control interfaces like upcall notifications 3319 * and start them in a separate thread. 3320 * 3321 * R14. It would be preferable if MAC drivers don't 3322 * hold any locks across any mac call. However at a 3323 * minimum they must not hold any locks across data 3324 * upcalls. They must also make sure that all 3325 * references to mac data structures are cleaned up 3326 * and that it is single threaded at mac_unregister 3327 * time. 3328 */ 3329 err = mac_maxsdu_update(port->p_mh, new_mtu); 3330 if (err != 0) { 3331 /* This should never occur! */ 3332 yge_error(NULL, port, 3333 "Failed notifying GLDv3 of new maximum MTU"); 3334 } else { 3335 port->p_mtu = new_mtu; 3336 } 3337 break; 3338 3339 default: 3340 err = ENOTSUP; 3341 break; 3342 } 3343 3344 err: 3345 DEV_UNLOCK(port->p_dev); 3346 3347 return (err); 3348 } 3349 3350 int 3351 yge_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3352 uint_t pr_flags, uint_t pr_valsize, void *pr_val, uint_t *perm) 3353 { 3354 yge_port_t *port = arg; 3355 mac_propval_range_t range; 3356 int err; 3357 3358 err = mii_m_getprop(port->p_mii, pr_name, pr_num, pr_flags, 3359 pr_valsize, pr_val, perm); 3360 if (err != ENOTSUP) { 3361 return (err); 3362 } 3363 3364 if (pr_valsize == 0) 3365 return (EINVAL); 3366 3367 bzero(pr_val, pr_valsize); 3368 *perm = MAC_PROP_PERM_RW; 3369 3370 switch (pr_num) { 3371 case MAC_PROP_MTU: 3372 if (!(pr_flags & MAC_PROP_POSSIBLE)) { 3373 err = ENOTSUP; 3374 break; 3375 } 3376 if (pr_valsize < sizeof (mac_propval_range_t)) 3377 return (EINVAL); 3378 range.mpr_count = 1; 3379 range.mpr_type = MAC_PROPVAL_UINT32; 3380 range.range_uint32[0].mpur_min = ETHERMTU; 3381 range.range_uint32[0].mpur_max = 3382 port->p_flags & PORT_FLAG_NOJUMBO ? 3383 ETHERMTU : YGE_JUMBO_MTU; 3384 bcopy(&range, pr_val, sizeof (range)); 3385 err = 0; 3386 break; 3387 3388 default: 3389 err = ENOTSUP; 3390 break; 3391 } 3392 return (err); 3393 } 3394 3395 void 3396 yge_dispatch(yge_dev_t *dev, int flag) 3397 { 3398 TASK_LOCK(dev); 3399 dev->d_task_flags |= flag; 3400 TASK_SIGNAL(dev); 3401 TASK_UNLOCK(dev); 3402 } 3403 3404 void 3405 yge_task(void *arg) 3406 { 3407 yge_dev_t *dev = arg; 3408 int flags; 3409 3410 for (;;) { 3411 3412 TASK_LOCK(dev); 3413 while ((flags = dev->d_task_flags) == 0) 3414 TASK_WAIT(dev); 3415 3416 dev->d_task_flags = 0; 3417 TASK_UNLOCK(dev); 3418 3419 /* 3420 * This should be the first thing after the sleep so if we are 3421 * requested to exit we do that and not waste time doing work 3422 * we will then abandone. 3423 */ 3424 if (flags & YGE_TASK_EXIT) 3425 break; 3426 3427 /* all processing done without holding locks */ 3428 if (flags & YGE_TASK_RESTART) 3429 yge_restart_task(dev); 3430 } 3431 } 3432 3433 void 3434 yge_error(yge_dev_t *dev, yge_port_t *port, char *fmt, ...) 3435 { 3436 va_list ap; 3437 char buf[256]; 3438 dev_info_t *dip; 3439 3440 va_start(ap, fmt); 3441 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 3442 va_end(ap); 3443 3444 if (dev == NULL) 3445 dev = port->p_dev; 3446 dip = dev->d_dip; 3447 cmn_err(CE_WARN, "%s%d: %s", 3448 ddi_driver_name(dip), 3449 ddi_get_instance(dip) + port ? port->p_ppa : 0, 3450 buf); 3451 } 3452 3453 static int 3454 yge_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 3455 { 3456 yge_dev_t *dev; 3457 int rv; 3458 3459 switch (cmd) { 3460 case DDI_ATTACH: 3461 dev = kmem_zalloc(sizeof (*dev), KM_SLEEP); 3462 dev->d_port[0] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP); 3463 dev->d_port[1] = kmem_zalloc(sizeof (yge_port_t), KM_SLEEP); 3464 dev->d_dip = dip; 3465 ddi_set_driver_private(dip, dev); 3466 3467 dev->d_port[0]->p_port = 0; 3468 dev->d_port[0]->p_dev = dev; 3469 dev->d_port[1]->p_port = 0; 3470 dev->d_port[1]->p_dev = dev; 3471 3472 rv = yge_attach(dev); 3473 if (rv != DDI_SUCCESS) { 3474 ddi_set_driver_private(dip, 0); 3475 kmem_free(dev->d_port[1], sizeof (yge_port_t)); 3476 kmem_free(dev->d_port[0], sizeof (yge_port_t)); 3477 kmem_free(dev, sizeof (*dev)); 3478 } 3479 return (rv); 3480 3481 case DDI_RESUME: 3482 dev = ddi_get_driver_private(dip); 3483 ASSERT(dev != NULL); 3484 return (yge_resume(dev)); 3485 3486 default: 3487 return (DDI_FAILURE); 3488 } 3489 } 3490 3491 static int 3492 yge_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 3493 { 3494 yge_dev_t *dev; 3495 int rv; 3496 3497 switch (cmd) { 3498 case DDI_DETACH: 3499 3500 dev = ddi_get_driver_private(dip); 3501 3502 /* attempt to unregister MACs from Nemo */ 3503 for (int i = 0; i < dev->d_num_port; i++) { 3504 rv = yge_unregister_port(dev->d_port[i]); 3505 if (rv != DDI_SUCCESS) { 3506 return (DDI_FAILURE); 3507 } 3508 } 3509 3510 ASSERT(dip == dev->d_dip); 3511 yge_detach(dev); 3512 ddi_set_driver_private(dip, 0); 3513 kmem_free(dev->d_port[1], sizeof (yge_port_t)); 3514 kmem_free(dev->d_port[0], sizeof (yge_port_t)); 3515 kmem_free(dev, sizeof (*dev)); 3516 return (DDI_SUCCESS); 3517 3518 case DDI_SUSPEND: 3519 dev = ddi_get_driver_private(dip); 3520 ASSERT(dev != NULL); 3521 return (yge_suspend(dev)); 3522 3523 default: 3524 return (DDI_FAILURE); 3525 } 3526 } 3527 3528 static int 3529 yge_quiesce(dev_info_t *dip) 3530 { 3531 yge_dev_t *dev; 3532 3533 dev = ddi_get_driver_private(dip); 3534 ASSERT(dev != NULL); 3535 3536 /* NB: No locking! We are called in single threaded context */ 3537 for (int i = 0; i < dev->d_num_port; i++) { 3538 yge_port_t *port = dev->d_port[i]; 3539 if (port->p_running) 3540 yge_stop_port(port); 3541 } 3542 3543 /* Disable all interrupts. */ 3544 CSR_WRITE_4(dev, B0_IMSK, 0); 3545 (void) CSR_READ_4(dev, B0_IMSK); 3546 CSR_WRITE_4(dev, B0_HWE_IMSK, 0); 3547 (void) CSR_READ_4(dev, B0_HWE_IMSK); 3548 3549 /* Put hardware into reset. */ 3550 CSR_WRITE_2(dev, B0_CTST, CS_RST_SET); 3551 3552 return (DDI_SUCCESS); 3553 } 3554 3555 /* 3556 * Stream information 3557 */ 3558 DDI_DEFINE_STREAM_OPS(yge_devops, nulldev, nulldev, yge_ddi_attach, 3559 yge_ddi_detach, nodev, NULL, D_MP, NULL, yge_quiesce); 3560 3561 /* 3562 * Module linkage information. 3563 */ 3564 3565 static struct modldrv yge_modldrv = { 3566 &mod_driverops, /* drv_modops */ 3567 "Yukon 2 Ethernet", /* drv_linkinfo */ 3568 &yge_devops /* drv_dev_ops */ 3569 }; 3570 3571 static struct modlinkage yge_modlinkage = { 3572 MODREV_1, /* ml_rev */ 3573 &yge_modldrv, /* ml_linkage */ 3574 NULL 3575 }; 3576 3577 /* 3578 * DDI entry points. 3579 */ 3580 int 3581 _init(void) 3582 { 3583 int rv; 3584 mac_init_ops(&yge_devops, "yge"); 3585 if ((rv = mod_install(&yge_modlinkage)) != DDI_SUCCESS) { 3586 mac_fini_ops(&yge_devops); 3587 } 3588 return (rv); 3589 } 3590 3591 int 3592 _fini(void) 3593 { 3594 int rv; 3595 if ((rv = mod_remove(&yge_modlinkage)) == DDI_SUCCESS) { 3596 mac_fini_ops(&yge_devops); 3597 } 3598 return (rv); 3599 } 3600 3601 int 3602 _info(struct modinfo *modinfop) 3603 { 3604 return (mod_info(&yge_modlinkage, modinfop)); 3605 } 3606