1098ca2bdSWarner Losh /*- 295d67482SBill Paul * Copyright (c) 2001 Wind River Systems 395d67482SBill Paul * Copyright (c) 1997, 1998, 1999, 2001 495d67482SBill Paul * Bill Paul <wpaul@windriver.com>. All rights reserved. 595d67482SBill Paul * 695d67482SBill Paul * Redistribution and use in source and binary forms, with or without 795d67482SBill Paul * modification, are permitted provided that the following conditions 895d67482SBill Paul * are met: 995d67482SBill Paul * 1. Redistributions of source code must retain the above copyright 1095d67482SBill Paul * notice, this list of conditions and the following disclaimer. 1195d67482SBill Paul * 2. Redistributions in binary form must reproduce the above copyright 1295d67482SBill Paul * notice, this list of conditions and the following disclaimer in the 1395d67482SBill Paul * documentation and/or other materials provided with the distribution. 1495d67482SBill Paul * 3. All advertising materials mentioning features or use of this software 1595d67482SBill Paul * must display the following acknowledgement: 1695d67482SBill Paul * This product includes software developed by Bill Paul. 1795d67482SBill Paul * 4. Neither the name of the author nor the names of any co-contributors 1895d67482SBill Paul * may be used to endorse or promote products derived from this software 1995d67482SBill Paul * without specific prior written permission. 2095d67482SBill Paul * 2195d67482SBill Paul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 2295d67482SBill Paul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2395d67482SBill Paul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2495d67482SBill Paul * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 2595d67482SBill Paul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2695d67482SBill Paul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2795d67482SBill Paul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2895d67482SBill Paul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2995d67482SBill Paul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3095d67482SBill Paul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 3195d67482SBill Paul * THE POSSIBILITY OF SUCH DAMAGE. 3295d67482SBill Paul */ 3395d67482SBill Paul 34aad970f1SDavid E. O'Brien #include <sys/cdefs.h> 35aad970f1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 36aad970f1SDavid E. O'Brien 3795d67482SBill Paul /* 3895d67482SBill Paul * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 3995d67482SBill Paul * 4095d67482SBill Paul * The Broadcom BCM5700 is based on technology originally developed by 4195d67482SBill Paul * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 4295d67482SBill Paul * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 4395d67482SBill Paul * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 4495d67482SBill Paul * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 4595d67482SBill Paul * frames, highly configurable RX filtering, and 16 RX and TX queues 4695d67482SBill Paul * (which, along with RX filter rules, can be used for QOS applications). 4795d67482SBill Paul * Other features, such as TCP segmentation, may be available as part 4895d67482SBill Paul * of value-added firmware updates. Unlike the Tigon I and Tigon II, 4995d67482SBill Paul * firmware images can be stored in hardware and need not be compiled 5095d67482SBill Paul * into the driver. 5195d67482SBill Paul * 5295d67482SBill Paul * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 5395d67482SBill Paul * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 5495d67482SBill Paul * 5595d67482SBill Paul * The BCM5701 is a single-chip solution incorporating both the BCM5700 5698b28ee5SBill Paul * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 5795d67482SBill Paul * does not support external SSRAM. 5895d67482SBill Paul * 5995d67482SBill Paul * Broadcom also produces a variation of the BCM5700 under the "Altima" 6095d67482SBill Paul * brand name, which is functionally similar but lacks PCI-X support. 6195d67482SBill Paul * 6295d67482SBill Paul * Without external SSRAM, you can only have at most 4 TX rings, 6395d67482SBill Paul * and the use of the mini RX ring is disabled. This seems to imply 6495d67482SBill Paul * that these features are simply not available on the BCM5701. As a 6595d67482SBill Paul * result, this driver does not implement any support for the mini RX 6695d67482SBill Paul * ring. 6795d67482SBill Paul */ 6895d67482SBill Paul 6975719184SGleb Smirnoff #ifdef HAVE_KERNEL_OPTION_HEADERS 7075719184SGleb Smirnoff #include "opt_device_polling.h" 7175719184SGleb Smirnoff #endif 7275719184SGleb Smirnoff 7395d67482SBill Paul #include <sys/param.h> 74f41ac2beSBill Paul #include <sys/endian.h> 7595d67482SBill Paul #include <sys/systm.h> 7695d67482SBill Paul #include <sys/sockio.h> 7795d67482SBill Paul #include <sys/mbuf.h> 7895d67482SBill Paul #include <sys/malloc.h> 7995d67482SBill Paul #include <sys/kernel.h> 80fe12f24bSPoul-Henning Kamp #include <sys/module.h> 8195d67482SBill Paul #include <sys/socket.h> 82f1a7e6d5SScott Long #include <sys/sysctl.h> 83dfe0df9aSPyun YongHyeon #include <sys/taskqueue.h> 8495d67482SBill Paul 8595d67482SBill Paul #include <net/if.h> 8695d67482SBill Paul #include <net/if_arp.h> 8795d67482SBill Paul #include <net/ethernet.h> 8895d67482SBill Paul #include <net/if_dl.h> 8995d67482SBill Paul #include <net/if_media.h> 9095d67482SBill Paul 9195d67482SBill Paul #include <net/bpf.h> 9295d67482SBill Paul 9395d67482SBill Paul #include <net/if_types.h> 9495d67482SBill Paul #include <net/if_vlan_var.h> 9595d67482SBill Paul 9695d67482SBill Paul #include <netinet/in_systm.h> 9795d67482SBill Paul #include <netinet/in.h> 9895d67482SBill Paul #include <netinet/ip.h> 99ca3f1187SPyun YongHyeon #include <netinet/tcp.h> 10095d67482SBill Paul 10195d67482SBill Paul #include <machine/bus.h> 10295d67482SBill Paul #include <machine/resource.h> 10395d67482SBill Paul #include <sys/bus.h> 10495d67482SBill Paul #include <sys/rman.h> 10595d67482SBill Paul 10695d67482SBill Paul #include <dev/mii/mii.h> 10795d67482SBill Paul #include <dev/mii/miivar.h> 1082d3ce713SDavid E. O'Brien #include "miidevs.h" 10995d67482SBill Paul #include <dev/mii/brgphyreg.h> 11095d67482SBill Paul 11108013fd3SMarius Strobl #ifdef __sparc64__ 11208013fd3SMarius Strobl #include <dev/ofw/ofw_bus.h> 11308013fd3SMarius Strobl #include <dev/ofw/openfirm.h> 11408013fd3SMarius Strobl #include <machine/ofw_machdep.h> 11508013fd3SMarius Strobl #include <machine/ver.h> 11608013fd3SMarius Strobl #endif 11708013fd3SMarius Strobl 1184fbd232cSWarner Losh #include <dev/pci/pcireg.h> 1194fbd232cSWarner Losh #include <dev/pci/pcivar.h> 12095d67482SBill Paul 12195d67482SBill Paul #include <dev/bge/if_bgereg.h> 12295d67482SBill Paul 12335f945cdSPyun YongHyeon #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) 124d375e524SGleb Smirnoff #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 12595d67482SBill Paul 126f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, pci, 1, 1, 1); 127f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, ether, 1, 1, 1); 12895d67482SBill Paul MODULE_DEPEND(bge, miibus, 1, 1, 1); 12995d67482SBill Paul 1307b279558SWarner Losh /* "device miibus" required. See GENERIC if you get errors here. */ 13195d67482SBill Paul #include "miibus_if.h" 13295d67482SBill Paul 13395d67482SBill Paul /* 13495d67482SBill Paul * Various supported device vendors/types and their names. Note: the 13595d67482SBill Paul * spec seems to indicate that the hardware still has Alteon's vendor 13695d67482SBill Paul * ID burned into it, though it will always be overriden by the vendor 13795d67482SBill Paul * ID in the EEPROM. Just to be safe, we cover all possibilities. 13895d67482SBill Paul */ 139852c67f9SMarius Strobl static const struct bge_type { 1404c0da0ffSGleb Smirnoff uint16_t bge_vid; 1414c0da0ffSGleb Smirnoff uint16_t bge_did; 1424c0da0ffSGleb Smirnoff } bge_devs[] = { 1434c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 1444c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 14595d67482SBill Paul 1464c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 1474c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 1484c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 1494c0da0ffSGleb Smirnoff 1504c0da0ffSGleb Smirnoff { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 1514c0da0ffSGleb Smirnoff 1524c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 1534c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 1544c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 1554c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 1564c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 1574c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 1584c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 1594c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 1604c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 1614c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 1624c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 1634c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 1644c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 1654c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 1664c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 1674c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 1684c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 1694c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 1704c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 1714c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 1724c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 1734c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 174effef978SRemko Lodder { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, 175a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, 1764c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 1774c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 1784c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 1794c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 1804c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 1814c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 1824c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 1834c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 1844c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 1854c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 1869e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 1879e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 1889e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 1899e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 190f7d1b2ebSXin LI { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 }, 191a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, 192a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, 193a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, 194a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, 195a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, 1964c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 1974c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 1984c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 1994c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 200a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, 201a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, 202a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, 2039e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 2049e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 205a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, 2069e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 2074c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 2084c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 2094c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 2104c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 2114c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 21238cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, 21338cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, 214a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, 215a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, 216a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, 217a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, 2184c0da0ffSGleb Smirnoff 2194c0da0ffSGleb Smirnoff { SK_VENDORID, SK_DEVICEID_ALTIMA }, 2204c0da0ffSGleb Smirnoff 2214c0da0ffSGleb Smirnoff { TC_VENDORID, TC_DEVICEID_3C996 }, 2224c0da0ffSGleb Smirnoff 223a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, 224a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, 225a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 }, 226a5779553SStanislav Sedov 2274c0da0ffSGleb Smirnoff { 0, 0 } 22895d67482SBill Paul }; 22995d67482SBill Paul 2304c0da0ffSGleb Smirnoff static const struct bge_vendor { 2314c0da0ffSGleb Smirnoff uint16_t v_id; 2324c0da0ffSGleb Smirnoff const char *v_name; 2334c0da0ffSGleb Smirnoff } bge_vendors[] = { 2344c0da0ffSGleb Smirnoff { ALTEON_VENDORID, "Alteon" }, 2354c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, "Altima" }, 2364c0da0ffSGleb Smirnoff { APPLE_VENDORID, "Apple" }, 2374c0da0ffSGleb Smirnoff { BCOM_VENDORID, "Broadcom" }, 2384c0da0ffSGleb Smirnoff { SK_VENDORID, "SysKonnect" }, 2394c0da0ffSGleb Smirnoff { TC_VENDORID, "3Com" }, 240a5779553SStanislav Sedov { FJTSU_VENDORID, "Fujitsu" }, 2414c0da0ffSGleb Smirnoff 2424c0da0ffSGleb Smirnoff { 0, NULL } 2434c0da0ffSGleb Smirnoff }; 2444c0da0ffSGleb Smirnoff 2454c0da0ffSGleb Smirnoff static const struct bge_revision { 2464c0da0ffSGleb Smirnoff uint32_t br_chipid; 2474c0da0ffSGleb Smirnoff const char *br_name; 2484c0da0ffSGleb Smirnoff } bge_revisions[] = { 2494c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 2504c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 2514c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 2524c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 2534c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 2544c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 2554c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 2564c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 2574c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 2584c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 2594c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 2604c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 2614c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 2624c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 2634c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 2644c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 2659e86676bSGleb Smirnoff { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 2664c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 2674c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 2684c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 2694c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 2704c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 2714c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 2724c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 2734c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 2744c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 2754c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 2764c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 2774c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 2784c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 2794c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 2804c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 2814c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 28242787b76SGleb Smirnoff { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 2834c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 2844c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 2854c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 2864c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 2874c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 2884c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 2894c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 2904c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 2910c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 2920c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 2930c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 2940c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 295bcc20328SJohn Baldwin { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, 296a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 297a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 298a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 299a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 30081179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 3016f8718a3SScott Long { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 3026f8718a3SScott Long { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 3036f8718a3SScott Long { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 30438cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 30538cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 306a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 307a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 3084c0da0ffSGleb Smirnoff 3094c0da0ffSGleb Smirnoff { 0, NULL } 3104c0da0ffSGleb Smirnoff }; 3114c0da0ffSGleb Smirnoff 3124c0da0ffSGleb Smirnoff /* 3134c0da0ffSGleb Smirnoff * Some defaults for major revisions, so that newer steppings 3144c0da0ffSGleb Smirnoff * that we don't know about have a shot at working. 3154c0da0ffSGleb Smirnoff */ 3164c0da0ffSGleb Smirnoff static const struct bge_revision bge_majorrevs[] = { 3179e86676bSGleb Smirnoff { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 3189e86676bSGleb Smirnoff { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 3199e86676bSGleb Smirnoff { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 3209e86676bSGleb Smirnoff { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 3219e86676bSGleb Smirnoff { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 3229e86676bSGleb Smirnoff { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 3239e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 3249e86676bSGleb Smirnoff { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 3259e86676bSGleb Smirnoff { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 3269e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 3279e86676bSGleb Smirnoff { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 328a5779553SStanislav Sedov { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 329a5779553SStanislav Sedov { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 330a5779553SStanislav Sedov { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 33181179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 3326f8718a3SScott Long { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 33338cc658fSJohn Baldwin { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 334a5779553SStanislav Sedov { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 3354c0da0ffSGleb Smirnoff 3364c0da0ffSGleb Smirnoff { 0, NULL } 3374c0da0ffSGleb Smirnoff }; 3384c0da0ffSGleb Smirnoff 3390c8aa4eaSJung-uk Kim #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 3400c8aa4eaSJung-uk Kim #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 3410c8aa4eaSJung-uk Kim #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 3420c8aa4eaSJung-uk Kim #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 3430c8aa4eaSJung-uk Kim #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 344a5779553SStanislav Sedov #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 3454c0da0ffSGleb Smirnoff 3464c0da0ffSGleb Smirnoff const struct bge_revision * bge_lookup_rev(uint32_t); 3474c0da0ffSGleb Smirnoff const struct bge_vendor * bge_lookup_vendor(uint16_t); 34838cc658fSJohn Baldwin 34938cc658fSJohn Baldwin typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 35038cc658fSJohn Baldwin 351e51a25f8SAlfred Perlstein static int bge_probe(device_t); 352e51a25f8SAlfred Perlstein static int bge_attach(device_t); 353e51a25f8SAlfred Perlstein static int bge_detach(device_t); 35414afefa3SPawel Jakub Dawidek static int bge_suspend(device_t); 35514afefa3SPawel Jakub Dawidek static int bge_resume(device_t); 3563f74909aSGleb Smirnoff static void bge_release_resources(struct bge_softc *); 357f41ac2beSBill Paul static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 3585b610048SPyun YongHyeon static int bge_dma_alloc(struct bge_softc *); 359f41ac2beSBill Paul static void bge_dma_free(struct bge_softc *); 3605b610048SPyun YongHyeon static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t, 3615b610048SPyun YongHyeon bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *); 362f41ac2beSBill Paul 3635fea260fSMarius Strobl static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); 36438cc658fSJohn Baldwin static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 36538cc658fSJohn Baldwin static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 36638cc658fSJohn Baldwin static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 36738cc658fSJohn Baldwin static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 36838cc658fSJohn Baldwin 369b9c05fa5SPyun YongHyeon static void bge_txeof(struct bge_softc *, uint16_t); 370dfe0df9aSPyun YongHyeon static int bge_rxeof(struct bge_softc *, uint16_t, int); 37195d67482SBill Paul 3728cb1383cSDoug Ambrisko static void bge_asf_driver_up (struct bge_softc *); 373e51a25f8SAlfred Perlstein static void bge_tick(void *); 374e51a25f8SAlfred Perlstein static void bge_stats_update(struct bge_softc *); 3753f74909aSGleb Smirnoff static void bge_stats_update_regs(struct bge_softc *); 3762e1d4df4SPyun YongHyeon static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, 3772e1d4df4SPyun YongHyeon uint16_t *); 378676ad2c9SGleb Smirnoff static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 37995d67482SBill Paul 380e51a25f8SAlfred Perlstein static void bge_intr(void *); 381dfe0df9aSPyun YongHyeon static int bge_msi_intr(void *); 382dfe0df9aSPyun YongHyeon static void bge_intr_task(void *, int); 3830f9bd73bSSam Leffler static void bge_start_locked(struct ifnet *); 384e51a25f8SAlfred Perlstein static void bge_start(struct ifnet *); 385e51a25f8SAlfred Perlstein static int bge_ioctl(struct ifnet *, u_long, caddr_t); 3860f9bd73bSSam Leffler static void bge_init_locked(struct bge_softc *); 387e51a25f8SAlfred Perlstein static void bge_init(void *); 388e51a25f8SAlfred Perlstein static void bge_stop(struct bge_softc *); 389b74e67fbSGleb Smirnoff static void bge_watchdog(struct bge_softc *); 390b6c974e8SWarner Losh static int bge_shutdown(device_t); 39167d5e043SOleg Bulyzhin static int bge_ifmedia_upd_locked(struct ifnet *); 392e51a25f8SAlfred Perlstein static int bge_ifmedia_upd(struct ifnet *); 393e51a25f8SAlfred Perlstein static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 39495d67482SBill Paul 39538cc658fSJohn Baldwin static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 39638cc658fSJohn Baldwin static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 39738cc658fSJohn Baldwin 3983f74909aSGleb Smirnoff static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 399e51a25f8SAlfred Perlstein static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 40095d67482SBill Paul 4013e9b1bcaSJung-uk Kim static void bge_setpromisc(struct bge_softc *); 402e51a25f8SAlfred Perlstein static void bge_setmulti(struct bge_softc *); 403cb2eacc7SYaroslav Tykhiy static void bge_setvlan(struct bge_softc *); 40495d67482SBill Paul 405e0b7b101SPyun YongHyeon static __inline void bge_rxreuse_std(struct bge_softc *, int); 406e0b7b101SPyun YongHyeon static __inline void bge_rxreuse_jumbo(struct bge_softc *, int); 407943787f3SPyun YongHyeon static int bge_newbuf_std(struct bge_softc *, int); 408943787f3SPyun YongHyeon static int bge_newbuf_jumbo(struct bge_softc *, int); 409e51a25f8SAlfred Perlstein static int bge_init_rx_ring_std(struct bge_softc *); 410e51a25f8SAlfred Perlstein static void bge_free_rx_ring_std(struct bge_softc *); 411e51a25f8SAlfred Perlstein static int bge_init_rx_ring_jumbo(struct bge_softc *); 412e51a25f8SAlfred Perlstein static void bge_free_rx_ring_jumbo(struct bge_softc *); 413e51a25f8SAlfred Perlstein static void bge_free_tx_ring(struct bge_softc *); 414e51a25f8SAlfred Perlstein static int bge_init_tx_ring(struct bge_softc *); 41595d67482SBill Paul 416e51a25f8SAlfred Perlstein static int bge_chipinit(struct bge_softc *); 417e51a25f8SAlfred Perlstein static int bge_blockinit(struct bge_softc *); 41895d67482SBill Paul 4195fea260fSMarius Strobl static int bge_has_eaddr(struct bge_softc *); 4203f74909aSGleb Smirnoff static uint32_t bge_readmem_ind(struct bge_softc *, int); 421e51a25f8SAlfred Perlstein static void bge_writemem_ind(struct bge_softc *, int, int); 42238cc658fSJohn Baldwin static void bge_writembx(struct bge_softc *, int, int); 42395d67482SBill Paul #ifdef notdef 4243f74909aSGleb Smirnoff static uint32_t bge_readreg_ind(struct bge_softc *, int); 42595d67482SBill Paul #endif 4269ba784dbSScott Long static void bge_writemem_direct(struct bge_softc *, int, int); 427e51a25f8SAlfred Perlstein static void bge_writereg_ind(struct bge_softc *, int, int); 42895d67482SBill Paul 429e51a25f8SAlfred Perlstein static int bge_miibus_readreg(device_t, int, int); 430e51a25f8SAlfred Perlstein static int bge_miibus_writereg(device_t, int, int, int); 431e51a25f8SAlfred Perlstein static void bge_miibus_statchg(device_t); 43275719184SGleb Smirnoff #ifdef DEVICE_POLLING 4331abcdbd1SAttilio Rao static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 43475719184SGleb Smirnoff #endif 43595d67482SBill Paul 4368cb1383cSDoug Ambrisko #define BGE_RESET_START 1 4378cb1383cSDoug Ambrisko #define BGE_RESET_STOP 2 4388cb1383cSDoug Ambrisko static void bge_sig_post_reset(struct bge_softc *, int); 4398cb1383cSDoug Ambrisko static void bge_sig_legacy(struct bge_softc *, int); 4408cb1383cSDoug Ambrisko static void bge_sig_pre_reset(struct bge_softc *, int); 441797ab05eSPyun YongHyeon static void bge_stop_fw(struct bge_softc *); 4428cb1383cSDoug Ambrisko static int bge_reset(struct bge_softc *); 443dab5cd05SOleg Bulyzhin static void bge_link_upd(struct bge_softc *); 44495d67482SBill Paul 4456f8718a3SScott Long /* 4466f8718a3SScott Long * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 4476f8718a3SScott Long * leak information to untrusted users. It is also known to cause alignment 4486f8718a3SScott Long * traps on certain architectures. 4496f8718a3SScott Long */ 4506f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 4516f8718a3SScott Long static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 4526f8718a3SScott Long static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 4536f8718a3SScott Long static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 4546f8718a3SScott Long #endif 4556f8718a3SScott Long static void bge_add_sysctls(struct bge_softc *); 456763757b2SScott Long static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 4576f8718a3SScott Long 45895d67482SBill Paul static device_method_t bge_methods[] = { 45995d67482SBill Paul /* Device interface */ 46095d67482SBill Paul DEVMETHOD(device_probe, bge_probe), 46195d67482SBill Paul DEVMETHOD(device_attach, bge_attach), 46295d67482SBill Paul DEVMETHOD(device_detach, bge_detach), 46395d67482SBill Paul DEVMETHOD(device_shutdown, bge_shutdown), 46414afefa3SPawel Jakub Dawidek DEVMETHOD(device_suspend, bge_suspend), 46514afefa3SPawel Jakub Dawidek DEVMETHOD(device_resume, bge_resume), 46695d67482SBill Paul 46795d67482SBill Paul /* bus interface */ 46895d67482SBill Paul DEVMETHOD(bus_print_child, bus_generic_print_child), 46995d67482SBill Paul DEVMETHOD(bus_driver_added, bus_generic_driver_added), 47095d67482SBill Paul 47195d67482SBill Paul /* MII interface */ 47295d67482SBill Paul DEVMETHOD(miibus_readreg, bge_miibus_readreg), 47395d67482SBill Paul DEVMETHOD(miibus_writereg, bge_miibus_writereg), 47495d67482SBill Paul DEVMETHOD(miibus_statchg, bge_miibus_statchg), 47595d67482SBill Paul 47695d67482SBill Paul { 0, 0 } 47795d67482SBill Paul }; 47895d67482SBill Paul 47995d67482SBill Paul static driver_t bge_driver = { 48095d67482SBill Paul "bge", 48195d67482SBill Paul bge_methods, 48295d67482SBill Paul sizeof(struct bge_softc) 48395d67482SBill Paul }; 48495d67482SBill Paul 48595d67482SBill Paul static devclass_t bge_devclass; 48695d67482SBill Paul 487f246e4a1SMatthew N. Dodd DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 48895d67482SBill Paul DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 48995d67482SBill Paul 490f1a7e6d5SScott Long static int bge_allow_asf = 1; 491f1a7e6d5SScott Long 492f1a7e6d5SScott Long TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 493f1a7e6d5SScott Long 494f1a7e6d5SScott Long SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 495f1a7e6d5SScott Long SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 496f1a7e6d5SScott Long "Allow ASF mode if available"); 497c4529f41SMichael Reifenberger 49808013fd3SMarius Strobl #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" 49908013fd3SMarius Strobl #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" 50008013fd3SMarius Strobl #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" 50108013fd3SMarius Strobl #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" 50208013fd3SMarius Strobl #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" 50308013fd3SMarius Strobl 50408013fd3SMarius Strobl static int 5055fea260fSMarius Strobl bge_has_eaddr(struct bge_softc *sc) 50608013fd3SMarius Strobl { 50708013fd3SMarius Strobl #ifdef __sparc64__ 50808013fd3SMarius Strobl char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; 50908013fd3SMarius Strobl device_t dev; 51008013fd3SMarius Strobl uint32_t subvendor; 51108013fd3SMarius Strobl 51208013fd3SMarius Strobl dev = sc->bge_dev; 51308013fd3SMarius Strobl 51408013fd3SMarius Strobl /* 51508013fd3SMarius Strobl * The on-board BGEs found in sun4u machines aren't fitted with 51608013fd3SMarius Strobl * an EEPROM which means that we have to obtain the MAC address 51708013fd3SMarius Strobl * via OFW and that some tests will always fail. We distinguish 51808013fd3SMarius Strobl * such BGEs by the subvendor ID, which also has to be obtained 51908013fd3SMarius Strobl * from OFW instead of the PCI configuration space as the latter 52008013fd3SMarius Strobl * indicates Broadcom as the subvendor of the netboot interface. 52108013fd3SMarius Strobl * For early Blade 1500 and 2500 we even have to check the OFW 52208013fd3SMarius Strobl * device path as the subvendor ID always defaults to Broadcom 52308013fd3SMarius Strobl * there. 52408013fd3SMarius Strobl */ 52508013fd3SMarius Strobl if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, 52608013fd3SMarius Strobl &subvendor, sizeof(subvendor)) == sizeof(subvendor) && 5272d857b9bSMarius Strobl (subvendor == FJTSU_VENDORID || subvendor == SUN_VENDORID)) 52808013fd3SMarius Strobl return (0); 52908013fd3SMarius Strobl memset(buf, 0, sizeof(buf)); 53008013fd3SMarius Strobl if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { 53108013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && 53208013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) 53308013fd3SMarius Strobl return (0); 53408013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && 53508013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) 53608013fd3SMarius Strobl return (0); 53708013fd3SMarius Strobl } 53808013fd3SMarius Strobl #endif 53908013fd3SMarius Strobl return (1); 54008013fd3SMarius Strobl } 54108013fd3SMarius Strobl 5423f74909aSGleb Smirnoff static uint32_t 5433f74909aSGleb Smirnoff bge_readmem_ind(struct bge_softc *sc, int off) 54495d67482SBill Paul { 54595d67482SBill Paul device_t dev; 5466f8718a3SScott Long uint32_t val; 54795d67482SBill Paul 54895d67482SBill Paul dev = sc->bge_dev; 54995d67482SBill Paul 55095d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 5516f8718a3SScott Long val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 5526f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 5536f8718a3SScott Long return (val); 55495d67482SBill Paul } 55595d67482SBill Paul 55695d67482SBill Paul static void 5573f74909aSGleb Smirnoff bge_writemem_ind(struct bge_softc *sc, int off, int val) 55895d67482SBill Paul { 55995d67482SBill Paul device_t dev; 56095d67482SBill Paul 56195d67482SBill Paul dev = sc->bge_dev; 56295d67482SBill Paul 56395d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 56495d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 5656f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 56695d67482SBill Paul } 56795d67482SBill Paul 56895d67482SBill Paul #ifdef notdef 5693f74909aSGleb Smirnoff static uint32_t 5703f74909aSGleb Smirnoff bge_readreg_ind(struct bge_softc *sc, int off) 57195d67482SBill Paul { 57295d67482SBill Paul device_t dev; 57395d67482SBill Paul 57495d67482SBill Paul dev = sc->bge_dev; 57595d67482SBill Paul 57695d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 57795d67482SBill Paul return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 57895d67482SBill Paul } 57995d67482SBill Paul #endif 58095d67482SBill Paul 58195d67482SBill Paul static void 5823f74909aSGleb Smirnoff bge_writereg_ind(struct bge_softc *sc, int off, int val) 58395d67482SBill Paul { 58495d67482SBill Paul device_t dev; 58595d67482SBill Paul 58695d67482SBill Paul dev = sc->bge_dev; 58795d67482SBill Paul 58895d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 58995d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 59095d67482SBill Paul } 59195d67482SBill Paul 5926f8718a3SScott Long static void 5936f8718a3SScott Long bge_writemem_direct(struct bge_softc *sc, int off, int val) 5946f8718a3SScott Long { 5956f8718a3SScott Long CSR_WRITE_4(sc, off, val); 5966f8718a3SScott Long } 5976f8718a3SScott Long 59838cc658fSJohn Baldwin static void 59938cc658fSJohn Baldwin bge_writembx(struct bge_softc *sc, int off, int val) 60038cc658fSJohn Baldwin { 60138cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 60238cc658fSJohn Baldwin off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 60338cc658fSJohn Baldwin 60438cc658fSJohn Baldwin CSR_WRITE_4(sc, off, val); 60538cc658fSJohn Baldwin } 60638cc658fSJohn Baldwin 607f41ac2beSBill Paul /* 608f41ac2beSBill Paul * Map a single buffer address. 609f41ac2beSBill Paul */ 610f41ac2beSBill Paul 611f41ac2beSBill Paul static void 6123f74909aSGleb Smirnoff bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 613f41ac2beSBill Paul { 614f41ac2beSBill Paul struct bge_dmamap_arg *ctx; 615f41ac2beSBill Paul 616f41ac2beSBill Paul if (error) 617f41ac2beSBill Paul return; 618f41ac2beSBill Paul 6195b610048SPyun YongHyeon KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); 6205b610048SPyun YongHyeon 621f41ac2beSBill Paul ctx = arg; 622f41ac2beSBill Paul ctx->bge_busaddr = segs->ds_addr; 623f41ac2beSBill Paul } 624f41ac2beSBill Paul 62538cc658fSJohn Baldwin static uint8_t 62638cc658fSJohn Baldwin bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 62738cc658fSJohn Baldwin { 62838cc658fSJohn Baldwin uint32_t access, byte = 0; 62938cc658fSJohn Baldwin int i; 63038cc658fSJohn Baldwin 63138cc658fSJohn Baldwin /* Lock. */ 63238cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 63338cc658fSJohn Baldwin for (i = 0; i < 8000; i++) { 63438cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 63538cc658fSJohn Baldwin break; 63638cc658fSJohn Baldwin DELAY(20); 63738cc658fSJohn Baldwin } 63838cc658fSJohn Baldwin if (i == 8000) 63938cc658fSJohn Baldwin return (1); 64038cc658fSJohn Baldwin 64138cc658fSJohn Baldwin /* Enable access. */ 64238cc658fSJohn Baldwin access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 64338cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 64438cc658fSJohn Baldwin 64538cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 64638cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 64738cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT * 10; i++) { 64838cc658fSJohn Baldwin DELAY(10); 64938cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 65038cc658fSJohn Baldwin DELAY(10); 65138cc658fSJohn Baldwin break; 65238cc658fSJohn Baldwin } 65338cc658fSJohn Baldwin } 65438cc658fSJohn Baldwin 65538cc658fSJohn Baldwin if (i == BGE_TIMEOUT * 10) { 65638cc658fSJohn Baldwin if_printf(sc->bge_ifp, "nvram read timed out\n"); 65738cc658fSJohn Baldwin return (1); 65838cc658fSJohn Baldwin } 65938cc658fSJohn Baldwin 66038cc658fSJohn Baldwin /* Get result. */ 66138cc658fSJohn Baldwin byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 66238cc658fSJohn Baldwin 66338cc658fSJohn Baldwin *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 66438cc658fSJohn Baldwin 66538cc658fSJohn Baldwin /* Disable access. */ 66638cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 66738cc658fSJohn Baldwin 66838cc658fSJohn Baldwin /* Unlock. */ 66938cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 67038cc658fSJohn Baldwin CSR_READ_4(sc, BGE_NVRAM_SWARB); 67138cc658fSJohn Baldwin 67238cc658fSJohn Baldwin return (0); 67338cc658fSJohn Baldwin } 67438cc658fSJohn Baldwin 67538cc658fSJohn Baldwin /* 67638cc658fSJohn Baldwin * Read a sequence of bytes from NVRAM. 67738cc658fSJohn Baldwin */ 67838cc658fSJohn Baldwin static int 67938cc658fSJohn Baldwin bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 68038cc658fSJohn Baldwin { 68138cc658fSJohn Baldwin int err = 0, i; 68238cc658fSJohn Baldwin uint8_t byte = 0; 68338cc658fSJohn Baldwin 68438cc658fSJohn Baldwin if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 68538cc658fSJohn Baldwin return (1); 68638cc658fSJohn Baldwin 68738cc658fSJohn Baldwin for (i = 0; i < cnt; i++) { 68838cc658fSJohn Baldwin err = bge_nvram_getbyte(sc, off + i, &byte); 68938cc658fSJohn Baldwin if (err) 69038cc658fSJohn Baldwin break; 69138cc658fSJohn Baldwin *(dest + i) = byte; 69238cc658fSJohn Baldwin } 69338cc658fSJohn Baldwin 69438cc658fSJohn Baldwin return (err ? 1 : 0); 69538cc658fSJohn Baldwin } 69638cc658fSJohn Baldwin 69795d67482SBill Paul /* 69895d67482SBill Paul * Read a byte of data stored in the EEPROM at address 'addr.' The 69995d67482SBill Paul * BCM570x supports both the traditional bitbang interface and an 70095d67482SBill Paul * auto access interface for reading the EEPROM. We use the auto 70195d67482SBill Paul * access method. 70295d67482SBill Paul */ 7033f74909aSGleb Smirnoff static uint8_t 7043f74909aSGleb Smirnoff bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 70595d67482SBill Paul { 70695d67482SBill Paul int i; 7073f74909aSGleb Smirnoff uint32_t byte = 0; 70895d67482SBill Paul 70995d67482SBill Paul /* 71095d67482SBill Paul * Enable use of auto EEPROM access so we can avoid 71195d67482SBill Paul * having to use the bitbang method. 71295d67482SBill Paul */ 71395d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 71495d67482SBill Paul 71595d67482SBill Paul /* Reset the EEPROM, load the clock period. */ 71695d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, 71795d67482SBill Paul BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 71895d67482SBill Paul DELAY(20); 71995d67482SBill Paul 72095d67482SBill Paul /* Issue the read EEPROM command. */ 72195d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 72295d67482SBill Paul 72395d67482SBill Paul /* Wait for completion */ 72495d67482SBill Paul for(i = 0; i < BGE_TIMEOUT * 10; i++) { 72595d67482SBill Paul DELAY(10); 72695d67482SBill Paul if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 72795d67482SBill Paul break; 72895d67482SBill Paul } 72995d67482SBill Paul 730d5d23857SJung-uk Kim if (i == BGE_TIMEOUT * 10) { 731fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "EEPROM read timed out\n"); 732f6789fbaSPyun YongHyeon return (1); 73395d67482SBill Paul } 73495d67482SBill Paul 73595d67482SBill Paul /* Get result. */ 73695d67482SBill Paul byte = CSR_READ_4(sc, BGE_EE_DATA); 73795d67482SBill Paul 7380c8aa4eaSJung-uk Kim *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 73995d67482SBill Paul 74095d67482SBill Paul return (0); 74195d67482SBill Paul } 74295d67482SBill Paul 74395d67482SBill Paul /* 74495d67482SBill Paul * Read a sequence of bytes from the EEPROM. 74595d67482SBill Paul */ 74695d67482SBill Paul static int 7473f74909aSGleb Smirnoff bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 74895d67482SBill Paul { 7493f74909aSGleb Smirnoff int i, error = 0; 7503f74909aSGleb Smirnoff uint8_t byte = 0; 75195d67482SBill Paul 75295d67482SBill Paul for (i = 0; i < cnt; i++) { 7533f74909aSGleb Smirnoff error = bge_eeprom_getbyte(sc, off + i, &byte); 7543f74909aSGleb Smirnoff if (error) 75595d67482SBill Paul break; 75695d67482SBill Paul *(dest + i) = byte; 75795d67482SBill Paul } 75895d67482SBill Paul 7593f74909aSGleb Smirnoff return (error ? 1 : 0); 76095d67482SBill Paul } 76195d67482SBill Paul 76295d67482SBill Paul static int 7633f74909aSGleb Smirnoff bge_miibus_readreg(device_t dev, int phy, int reg) 76495d67482SBill Paul { 76595d67482SBill Paul struct bge_softc *sc; 7663f74909aSGleb Smirnoff uint32_t val, autopoll; 76795d67482SBill Paul int i; 76895d67482SBill Paul 76995d67482SBill Paul sc = device_get_softc(dev); 77095d67482SBill Paul 7710434d1b8SBill Paul /* 7720434d1b8SBill Paul * Broadcom's own driver always assumes the internal 7730434d1b8SBill Paul * PHY is at GMII address 1. On some chips, the PHY responds 7740434d1b8SBill Paul * to accesses at all addresses, which could cause us to 7750434d1b8SBill Paul * bogusly attach the PHY 32 times at probe type. Always 7760434d1b8SBill Paul * restricting the lookup to address 1 is simpler than 7770434d1b8SBill Paul * trying to figure out which chips revisions should be 7780434d1b8SBill Paul * special-cased. 7790434d1b8SBill Paul */ 780b1265c1aSJohn Polstra if (phy != 1) 78198b28ee5SBill Paul return (0); 78298b28ee5SBill Paul 78337ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 78437ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 78537ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 78637ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 78737ceeb4dSPaul Saab DELAY(40); 78837ceeb4dSPaul Saab } 78937ceeb4dSPaul Saab 79095d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 79195d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg)); 79295d67482SBill Paul 79395d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 794d5d23857SJung-uk Kim DELAY(10); 79595d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 79695d67482SBill Paul if (!(val & BGE_MICOMM_BUSY)) 79795d67482SBill Paul break; 79895d67482SBill Paul } 79995d67482SBill Paul 80095d67482SBill Paul if (i == BGE_TIMEOUT) { 8015fea260fSMarius Strobl device_printf(sc->bge_dev, 8025fea260fSMarius Strobl "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", 8035fea260fSMarius Strobl phy, reg, val); 80437ceeb4dSPaul Saab val = 0; 80537ceeb4dSPaul Saab goto done; 80695d67482SBill Paul } 80795d67482SBill Paul 80838cc658fSJohn Baldwin DELAY(5); 80995d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 81095d67482SBill Paul 81137ceeb4dSPaul Saab done: 81237ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 81337ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 81437ceeb4dSPaul Saab DELAY(40); 81537ceeb4dSPaul Saab } 81637ceeb4dSPaul Saab 81795d67482SBill Paul if (val & BGE_MICOMM_READFAIL) 81895d67482SBill Paul return (0); 81995d67482SBill Paul 8200c8aa4eaSJung-uk Kim return (val & 0xFFFF); 82195d67482SBill Paul } 82295d67482SBill Paul 82395d67482SBill Paul static int 8243f74909aSGleb Smirnoff bge_miibus_writereg(device_t dev, int phy, int reg, int val) 82595d67482SBill Paul { 82695d67482SBill Paul struct bge_softc *sc; 8273f74909aSGleb Smirnoff uint32_t autopoll; 82895d67482SBill Paul int i; 82995d67482SBill Paul 83095d67482SBill Paul sc = device_get_softc(dev); 83195d67482SBill Paul 83238cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 83338cc658fSJohn Baldwin (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 83438cc658fSJohn Baldwin return (0); 83538cc658fSJohn Baldwin 83637ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 83737ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 83837ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 83937ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 84037ceeb4dSPaul Saab DELAY(40); 84137ceeb4dSPaul Saab } 84237ceeb4dSPaul Saab 84395d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 84495d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 84595d67482SBill Paul 84695d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 847d5d23857SJung-uk Kim DELAY(10); 84838cc658fSJohn Baldwin if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 84938cc658fSJohn Baldwin DELAY(5); 85038cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 85195d67482SBill Paul break; 852d5d23857SJung-uk Kim } 85338cc658fSJohn Baldwin } 854d5d23857SJung-uk Kim 855d5d23857SJung-uk Kim if (i == BGE_TIMEOUT) { 85638cc658fSJohn Baldwin device_printf(sc->bge_dev, 85738cc658fSJohn Baldwin "PHY write timed out (phy %d, reg %d, val %d)\n", 85838cc658fSJohn Baldwin phy, reg, val); 859d5d23857SJung-uk Kim return (0); 86095d67482SBill Paul } 86195d67482SBill Paul 86237ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 86337ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 86437ceeb4dSPaul Saab DELAY(40); 86537ceeb4dSPaul Saab } 86637ceeb4dSPaul Saab 86795d67482SBill Paul return (0); 86895d67482SBill Paul } 86995d67482SBill Paul 87095d67482SBill Paul static void 8713f74909aSGleb Smirnoff bge_miibus_statchg(device_t dev) 87295d67482SBill Paul { 87395d67482SBill Paul struct bge_softc *sc; 87495d67482SBill Paul struct mii_data *mii; 87595d67482SBill Paul sc = device_get_softc(dev); 87695d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 87795d67482SBill Paul 87895d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 879ea3b4127SPyun YongHyeon if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 880ea3b4127SPyun YongHyeon IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 88195d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 8823f74909aSGleb Smirnoff else 88395d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 88495d67482SBill Paul 8853f74909aSGleb Smirnoff if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 88695d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 8873f74909aSGleb Smirnoff else 88895d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 88995d67482SBill Paul } 89095d67482SBill Paul 89195d67482SBill Paul /* 89295d67482SBill Paul * Intialize a standard receive ring descriptor. 89395d67482SBill Paul */ 89495d67482SBill Paul static int 895943787f3SPyun YongHyeon bge_newbuf_std(struct bge_softc *sc, int i) 89695d67482SBill Paul { 897943787f3SPyun YongHyeon struct mbuf *m; 89895d67482SBill Paul struct bge_rx_bd *r; 899a23634a1SPyun YongHyeon bus_dma_segment_t segs[1]; 900943787f3SPyun YongHyeon bus_dmamap_t map; 901a23634a1SPyun YongHyeon int error, nsegs; 90295d67482SBill Paul 903943787f3SPyun YongHyeon m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 904943787f3SPyun YongHyeon if (m == NULL) 90595d67482SBill Paul return (ENOBUFS); 906943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MCLBYTES; 907652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 908943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 909943787f3SPyun YongHyeon 9100ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, 911943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); 912a23634a1SPyun YongHyeon if (error != 0) { 913943787f3SPyun YongHyeon m_freem(m); 914a23634a1SPyun YongHyeon return (error); 915f41ac2beSBill Paul } 916943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 917943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 918943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); 919943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 920943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i]); 921943787f3SPyun YongHyeon } 922943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_std_dmamap[i]; 923943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; 924943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap = map; 925943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = m; 926e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len; 927943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; 928a23634a1SPyun YongHyeon r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 929a23634a1SPyun YongHyeon r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 930e907febfSPyun YongHyeon r->bge_flags = BGE_RXBDFLAG_END; 931a23634a1SPyun YongHyeon r->bge_len = segs[0].ds_len; 932e907febfSPyun YongHyeon r->bge_idx = i; 933f41ac2beSBill Paul 9340ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 935943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); 93695d67482SBill Paul 93795d67482SBill Paul return (0); 93895d67482SBill Paul } 93995d67482SBill Paul 94095d67482SBill Paul /* 94195d67482SBill Paul * Initialize a jumbo receive ring descriptor. This allocates 94295d67482SBill Paul * a jumbo buffer from the pool managed internally by the driver. 94395d67482SBill Paul */ 94495d67482SBill Paul static int 945943787f3SPyun YongHyeon bge_newbuf_jumbo(struct bge_softc *sc, int i) 94695d67482SBill Paul { 9471be6acb7SGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 948943787f3SPyun YongHyeon bus_dmamap_t map; 9491be6acb7SGleb Smirnoff struct bge_extrx_bd *r; 950943787f3SPyun YongHyeon struct mbuf *m; 951943787f3SPyun YongHyeon int error, nsegs; 95295d67482SBill Paul 953943787f3SPyun YongHyeon MGETHDR(m, M_DONTWAIT, MT_DATA); 954943787f3SPyun YongHyeon if (m == NULL) 95595d67482SBill Paul return (ENOBUFS); 95695d67482SBill Paul 957943787f3SPyun YongHyeon m_cljget(m, M_DONTWAIT, MJUM9BYTES); 958943787f3SPyun YongHyeon if (!(m->m_flags & M_EXT)) { 959943787f3SPyun YongHyeon m_freem(m); 96095d67482SBill Paul return (ENOBUFS); 96195d67482SBill Paul } 962943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MJUM9BYTES; 963652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 964943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 9651be6acb7SGleb Smirnoff 9661be6acb7SGleb Smirnoff error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 967943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); 968943787f3SPyun YongHyeon if (error != 0) { 969943787f3SPyun YongHyeon m_freem(m); 9701be6acb7SGleb Smirnoff return (error); 971f7cea149SGleb Smirnoff } 9721be6acb7SGleb Smirnoff 973943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) { 974943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 975943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); 976943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 977943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 978943787f3SPyun YongHyeon } 979943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; 980943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i] = 981943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap; 982943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap = map; 983943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = m; 984e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0; 985e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0; 986e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0; 987e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0; 988e0b7b101SPyun YongHyeon 9891be6acb7SGleb Smirnoff /* 9901be6acb7SGleb Smirnoff * Fill in the extended RX buffer descriptor. 9911be6acb7SGleb Smirnoff */ 992943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; 9934e7ba1abSGleb Smirnoff r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 9944e7ba1abSGleb Smirnoff r->bge_idx = i; 9954e7ba1abSGleb Smirnoff r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 9964e7ba1abSGleb Smirnoff switch (nsegs) { 9974e7ba1abSGleb Smirnoff case 4: 9984e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 9994e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 10004e7ba1abSGleb Smirnoff r->bge_len3 = segs[3].ds_len; 1001e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len; 10024e7ba1abSGleb Smirnoff case 3: 1003e907febfSPyun YongHyeon r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 1004e907febfSPyun YongHyeon r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 1005e907febfSPyun YongHyeon r->bge_len2 = segs[2].ds_len; 1006e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len; 10074e7ba1abSGleb Smirnoff case 2: 10084e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 10094e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 10104e7ba1abSGleb Smirnoff r->bge_len1 = segs[1].ds_len; 1011e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len; 10124e7ba1abSGleb Smirnoff case 1: 10134e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 10144e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 10154e7ba1abSGleb Smirnoff r->bge_len0 = segs[0].ds_len; 1016e0b7b101SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len; 10174e7ba1abSGleb Smirnoff break; 10184e7ba1abSGleb Smirnoff default: 10194e7ba1abSGleb Smirnoff panic("%s: %d segments\n", __func__, nsegs); 10204e7ba1abSGleb Smirnoff } 1021f41ac2beSBill Paul 1022a41504a9SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1023943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); 102495d67482SBill Paul 102595d67482SBill Paul return (0); 102695d67482SBill Paul } 102795d67482SBill Paul 102895d67482SBill Paul static int 10293f74909aSGleb Smirnoff bge_init_rx_ring_std(struct bge_softc *sc) 103095d67482SBill Paul { 10313ee5d7daSPyun YongHyeon int error, i; 103295d67482SBill Paul 1033e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 103403e78bd0SPyun YongHyeon sc->bge_std = 0; 1035e0b7b101SPyun YongHyeon for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1036943787f3SPyun YongHyeon if ((error = bge_newbuf_std(sc, i)) != 0) 10373ee5d7daSPyun YongHyeon return (error); 103803e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 103995d67482SBill Paul }; 104095d67482SBill Paul 1041f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1042d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 1043f41ac2beSBill Paul 1044e0b7b101SPyun YongHyeon sc->bge_std = 0; 1045e0b7b101SPyun YongHyeon bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); 104695d67482SBill Paul 104795d67482SBill Paul return (0); 104895d67482SBill Paul } 104995d67482SBill Paul 105095d67482SBill Paul static void 10513f74909aSGleb Smirnoff bge_free_rx_ring_std(struct bge_softc *sc) 105295d67482SBill Paul { 105395d67482SBill Paul int i; 105495d67482SBill Paul 105595d67482SBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 105695d67482SBill Paul if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 10570ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1058e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], 1059e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 10600ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1061f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 1062e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1063e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = NULL; 106495d67482SBill Paul } 1065f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 106695d67482SBill Paul sizeof(struct bge_rx_bd)); 106795d67482SBill Paul } 106895d67482SBill Paul } 106995d67482SBill Paul 107095d67482SBill Paul static int 10713f74909aSGleb Smirnoff bge_init_rx_ring_jumbo(struct bge_softc *sc) 107295d67482SBill Paul { 107395d67482SBill Paul struct bge_rcb *rcb; 10743ee5d7daSPyun YongHyeon int error, i; 107595d67482SBill Paul 1076e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); 107703e78bd0SPyun YongHyeon sc->bge_jumbo = 0; 107895d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1079943787f3SPyun YongHyeon if ((error = bge_newbuf_jumbo(sc, i)) != 0) 10803ee5d7daSPyun YongHyeon return (error); 108103e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 108295d67482SBill Paul }; 108395d67482SBill Paul 1084f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1085d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 1086f41ac2beSBill Paul 1087e0b7b101SPyun YongHyeon sc->bge_jumbo = 0; 108895d67482SBill Paul 1089f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 10901be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 10911be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD); 109267111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 109395d67482SBill Paul 1094e0b7b101SPyun YongHyeon bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1); 109595d67482SBill Paul 109695d67482SBill Paul return (0); 109795d67482SBill Paul } 109895d67482SBill Paul 109995d67482SBill Paul static void 11003f74909aSGleb Smirnoff bge_free_rx_ring_jumbo(struct bge_softc *sc) 110195d67482SBill Paul { 110295d67482SBill Paul int i; 110395d67482SBill Paul 110495d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 110595d67482SBill Paul if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1106e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1107e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], 1108e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 1109f41ac2beSBill Paul bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1110f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1111e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1112e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 111395d67482SBill Paul } 1114f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 11151be6acb7SGleb Smirnoff sizeof(struct bge_extrx_bd)); 111695d67482SBill Paul } 111795d67482SBill Paul } 111895d67482SBill Paul 111995d67482SBill Paul static void 11203f74909aSGleb Smirnoff bge_free_tx_ring(struct bge_softc *sc) 112195d67482SBill Paul { 112295d67482SBill Paul int i; 112395d67482SBill Paul 1124f41ac2beSBill Paul if (sc->bge_ldata.bge_tx_ring == NULL) 112595d67482SBill Paul return; 112695d67482SBill Paul 112795d67482SBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 112895d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 11290ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 1130e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[i], 1131e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 11320ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1133f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 1134e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[i]); 1135e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[i] = NULL; 113695d67482SBill Paul } 1137f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 113895d67482SBill Paul sizeof(struct bge_tx_bd)); 113995d67482SBill Paul } 114095d67482SBill Paul } 114195d67482SBill Paul 114295d67482SBill Paul static int 11433f74909aSGleb Smirnoff bge_init_tx_ring(struct bge_softc *sc) 114495d67482SBill Paul { 114595d67482SBill Paul sc->bge_txcnt = 0; 114695d67482SBill Paul sc->bge_tx_saved_considx = 0; 11473927098fSPaul Saab 1148e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1149e6bf277eSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 11505c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 1151e6bf277eSPyun YongHyeon 115214bbd30fSGleb Smirnoff /* Initialize transmit producer index for host-memory send ring. */ 115314bbd30fSGleb Smirnoff sc->bge_tx_prodidx = 0; 115438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 115514bbd30fSGleb Smirnoff 11563927098fSPaul Saab /* 5700 b2 errata */ 1157e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 115838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 11593927098fSPaul Saab 116014bbd30fSGleb Smirnoff /* NIC-memory send ring not used; initialize to zero. */ 116138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 11623927098fSPaul Saab /* 5700 b2 errata */ 1163e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 116438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 116595d67482SBill Paul 116695d67482SBill Paul return (0); 116795d67482SBill Paul } 116895d67482SBill Paul 116995d67482SBill Paul static void 11703e9b1bcaSJung-uk Kim bge_setpromisc(struct bge_softc *sc) 11713e9b1bcaSJung-uk Kim { 11723e9b1bcaSJung-uk Kim struct ifnet *ifp; 11733e9b1bcaSJung-uk Kim 11743e9b1bcaSJung-uk Kim BGE_LOCK_ASSERT(sc); 11753e9b1bcaSJung-uk Kim 11763e9b1bcaSJung-uk Kim ifp = sc->bge_ifp; 11773e9b1bcaSJung-uk Kim 117845ee6ab3SJung-uk Kim /* Enable or disable promiscuous mode as needed. */ 11793e9b1bcaSJung-uk Kim if (ifp->if_flags & IFF_PROMISC) 118045ee6ab3SJung-uk Kim BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 11813e9b1bcaSJung-uk Kim else 118245ee6ab3SJung-uk Kim BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 11833e9b1bcaSJung-uk Kim } 11843e9b1bcaSJung-uk Kim 11853e9b1bcaSJung-uk Kim static void 11863f74909aSGleb Smirnoff bge_setmulti(struct bge_softc *sc) 118795d67482SBill Paul { 118895d67482SBill Paul struct ifnet *ifp; 118995d67482SBill Paul struct ifmultiaddr *ifma; 11903f74909aSGleb Smirnoff uint32_t hashes[4] = { 0, 0, 0, 0 }; 119195d67482SBill Paul int h, i; 119295d67482SBill Paul 11930f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 11940f9bd73bSSam Leffler 1195fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 119695d67482SBill Paul 119795d67482SBill Paul if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 119895d67482SBill Paul for (i = 0; i < 4; i++) 11990c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 120095d67482SBill Paul return; 120195d67482SBill Paul } 120295d67482SBill Paul 120395d67482SBill Paul /* First, zot all the existing filters. */ 120495d67482SBill Paul for (i = 0; i < 4; i++) 120595d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 120695d67482SBill Paul 120795d67482SBill Paul /* Now program new ones. */ 1208eb956cd0SRobert Watson if_maddr_rlock(ifp); 120995d67482SBill Paul TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 121095d67482SBill Paul if (ifma->ifma_addr->sa_family != AF_LINK) 121195d67482SBill Paul continue; 12120e939c0cSChristian Weisgerber h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 12130c8aa4eaSJung-uk Kim ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 12140c8aa4eaSJung-uk Kim hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 121595d67482SBill Paul } 1216eb956cd0SRobert Watson if_maddr_runlock(ifp); 121795d67482SBill Paul 121895d67482SBill Paul for (i = 0; i < 4; i++) 121995d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 122095d67482SBill Paul } 122195d67482SBill Paul 12228cb1383cSDoug Ambrisko static void 1223cb2eacc7SYaroslav Tykhiy bge_setvlan(struct bge_softc *sc) 1224cb2eacc7SYaroslav Tykhiy { 1225cb2eacc7SYaroslav Tykhiy struct ifnet *ifp; 1226cb2eacc7SYaroslav Tykhiy 1227cb2eacc7SYaroslav Tykhiy BGE_LOCK_ASSERT(sc); 1228cb2eacc7SYaroslav Tykhiy 1229cb2eacc7SYaroslav Tykhiy ifp = sc->bge_ifp; 1230cb2eacc7SYaroslav Tykhiy 1231cb2eacc7SYaroslav Tykhiy /* Enable or disable VLAN tag stripping as needed. */ 1232cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1233cb2eacc7SYaroslav Tykhiy BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1234cb2eacc7SYaroslav Tykhiy else 1235cb2eacc7SYaroslav Tykhiy BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1236cb2eacc7SYaroslav Tykhiy } 1237cb2eacc7SYaroslav Tykhiy 1238cb2eacc7SYaroslav Tykhiy static void 1239797ab05eSPyun YongHyeon bge_sig_pre_reset(struct bge_softc *sc, int type) 12408cb1383cSDoug Ambrisko { 1241797ab05eSPyun YongHyeon 12428cb1383cSDoug Ambrisko /* 12438cb1383cSDoug Ambrisko * Some chips don't like this so only do this if ASF is enabled 12448cb1383cSDoug Ambrisko */ 12458cb1383cSDoug Ambrisko if (sc->bge_asf_mode) 12468cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 12478cb1383cSDoug Ambrisko 12488cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 12498cb1383cSDoug Ambrisko switch (type) { 12508cb1383cSDoug Ambrisko case BGE_RESET_START: 12518cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 12528cb1383cSDoug Ambrisko break; 12538cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12548cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 12558cb1383cSDoug Ambrisko break; 12568cb1383cSDoug Ambrisko } 12578cb1383cSDoug Ambrisko } 12588cb1383cSDoug Ambrisko } 12598cb1383cSDoug Ambrisko 12608cb1383cSDoug Ambrisko static void 1261797ab05eSPyun YongHyeon bge_sig_post_reset(struct bge_softc *sc, int type) 12628cb1383cSDoug Ambrisko { 1263797ab05eSPyun YongHyeon 12648cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 12658cb1383cSDoug Ambrisko switch (type) { 12668cb1383cSDoug Ambrisko case BGE_RESET_START: 12678cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 12688cb1383cSDoug Ambrisko /* START DONE */ 12698cb1383cSDoug Ambrisko break; 12708cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12718cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 12728cb1383cSDoug Ambrisko break; 12738cb1383cSDoug Ambrisko } 12748cb1383cSDoug Ambrisko } 12758cb1383cSDoug Ambrisko } 12768cb1383cSDoug Ambrisko 12778cb1383cSDoug Ambrisko static void 1278797ab05eSPyun YongHyeon bge_sig_legacy(struct bge_softc *sc, int type) 12798cb1383cSDoug Ambrisko { 1280797ab05eSPyun YongHyeon 12818cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 12828cb1383cSDoug Ambrisko switch (type) { 12838cb1383cSDoug Ambrisko case BGE_RESET_START: 12848cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 12858cb1383cSDoug Ambrisko break; 12868cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12878cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 12888cb1383cSDoug Ambrisko break; 12898cb1383cSDoug Ambrisko } 12908cb1383cSDoug Ambrisko } 12918cb1383cSDoug Ambrisko } 12928cb1383cSDoug Ambrisko 1293797ab05eSPyun YongHyeon static void 1294797ab05eSPyun YongHyeon bge_stop_fw(struct bge_softc *sc) 12958cb1383cSDoug Ambrisko { 12968cb1383cSDoug Ambrisko int i; 12978cb1383cSDoug Ambrisko 12988cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 12998cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 13008cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 130139153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 13028cb1383cSDoug Ambrisko 13038cb1383cSDoug Ambrisko for (i = 0; i < 100; i++ ) { 13048cb1383cSDoug Ambrisko if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 13058cb1383cSDoug Ambrisko break; 13068cb1383cSDoug Ambrisko DELAY(10); 13078cb1383cSDoug Ambrisko } 13088cb1383cSDoug Ambrisko } 13098cb1383cSDoug Ambrisko } 13108cb1383cSDoug Ambrisko 131195d67482SBill Paul /* 1312c9ffd9f0SMarius Strobl * Do endian, PCI and DMA initialization. 131395d67482SBill Paul */ 131495d67482SBill Paul static int 13153f74909aSGleb Smirnoff bge_chipinit(struct bge_softc *sc) 131695d67482SBill Paul { 13173f74909aSGleb Smirnoff uint32_t dma_rw_ctl; 1318fbc374afSPyun YongHyeon uint16_t val; 131995d67482SBill Paul int i; 132095d67482SBill Paul 13218cb1383cSDoug Ambrisko /* Set endianness before we access any non-PCI registers. */ 1322e907febfSPyun YongHyeon pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 132395d67482SBill Paul 132495d67482SBill Paul /* Clear the MAC control register */ 132595d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 132695d67482SBill Paul 132795d67482SBill Paul /* 132895d67482SBill Paul * Clear the MAC statistics block in the NIC's 132995d67482SBill Paul * internal memory. 133095d67482SBill Paul */ 133195d67482SBill Paul for (i = BGE_STATS_BLOCK; 13323f74909aSGleb Smirnoff i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 133395d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 133495d67482SBill Paul 133595d67482SBill Paul for (i = BGE_STATUS_BLOCK; 13363f74909aSGleb Smirnoff i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 133795d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 133895d67482SBill Paul 1339fbc374afSPyun YongHyeon if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { 1340fbc374afSPyun YongHyeon /* 1341d896b3feSPyun YongHyeon * Fix data corruption caused by non-qword write with WB. 1342fbc374afSPyun YongHyeon * Fix master abort in PCI mode. 1343fbc374afSPyun YongHyeon * Fix PCI latency timer. 1344fbc374afSPyun YongHyeon */ 1345fbc374afSPyun YongHyeon val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); 1346fbc374afSPyun YongHyeon val |= (1 << 10) | (1 << 12) | (1 << 13); 1347fbc374afSPyun YongHyeon pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); 1348fbc374afSPyun YongHyeon } 1349fbc374afSPyun YongHyeon 1350186f842bSJung-uk Kim /* 1351186f842bSJung-uk Kim * Set up the PCI DMA control register. 1352186f842bSJung-uk Kim */ 1353186f842bSJung-uk Kim dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1354186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1355652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 1356186f842bSJung-uk Kim /* Read watermark not used, 128 bytes for write. */ 1357186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1358652ae483SGleb Smirnoff } else if (sc->bge_flags & BGE_FLAG_PCIX) { 13594c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 1360186f842bSJung-uk Kim /* 256 bytes for read and write. */ 1361186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1362186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1363186f842bSJung-uk Kim dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? 1364186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : 1365186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1366cbb2b2feSPyun YongHyeon } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { 1367cbb2b2feSPyun YongHyeon /* 1368cbb2b2feSPyun YongHyeon * In the BCM5703, the DMA read watermark should 1369cbb2b2feSPyun YongHyeon * be set to less than or equal to the maximum 1370cbb2b2feSPyun YongHyeon * memory read byte count of the PCI-X command 1371cbb2b2feSPyun YongHyeon * register. 1372cbb2b2feSPyun YongHyeon */ 1373cbb2b2feSPyun YongHyeon dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | 1374cbb2b2feSPyun YongHyeon BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1375186f842bSJung-uk Kim } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1376186f842bSJung-uk Kim /* 1536 bytes for read, 384 bytes for write. */ 1377186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1378186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1379186f842bSJung-uk Kim } else { 1380186f842bSJung-uk Kim /* 384 bytes for read and write. */ 1381186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1382186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 13830c8aa4eaSJung-uk Kim 0x0F; 1384186f842bSJung-uk Kim } 1385e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1386e0ced696SPaul Saab sc->bge_asicrev == BGE_ASICREV_BCM5704) { 13873f74909aSGleb Smirnoff uint32_t tmp; 13885cba12d3SPaul Saab 1389186f842bSJung-uk Kim /* Set ONE_DMA_AT_ONCE for hardware workaround. */ 13900c8aa4eaSJung-uk Kim tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; 1391186f842bSJung-uk Kim if (tmp == 6 || tmp == 7) 1392186f842bSJung-uk Kim dma_rw_ctl |= 1393186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 13945cba12d3SPaul Saab 1395186f842bSJung-uk Kim /* Set PCI-X DMA write workaround. */ 1396186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1397186f842bSJung-uk Kim } 1398186f842bSJung-uk Kim } else { 1399186f842bSJung-uk Kim /* Conventional PCI bus: 256 bytes for read and write. */ 1400186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1401186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1402186f842bSJung-uk Kim 1403186f842bSJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1404186f842bSJung-uk Kim sc->bge_asicrev != BGE_ASICREV_BCM5750) 1405186f842bSJung-uk Kim dma_rw_ctl |= 0x0F; 1406186f842bSJung-uk Kim } 1407186f842bSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1408186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5701) 1409186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1410186f842bSJung-uk Kim BGE_PCIDMARWCTL_ASRT_ALL_BE; 1411e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1412186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5704) 14135cba12d3SPaul Saab dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 14145cba12d3SPaul Saab pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 141595d67482SBill Paul 141695d67482SBill Paul /* 141795d67482SBill Paul * Set up general mode register. 141895d67482SBill Paul */ 1419e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 142095d67482SBill Paul BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1421ee7ef91cSOleg Bulyzhin BGE_MODECTL_TX_NO_PHDR_CSUM); 142295d67482SBill Paul 142395d67482SBill Paul /* 142490447aadSMarius Strobl * BCM5701 B5 have a bug causing data corruption when using 142590447aadSMarius Strobl * 64-bit DMA reads, which can be terminated early and then 142690447aadSMarius Strobl * completed later as 32-bit accesses, in combination with 142790447aadSMarius Strobl * certain bridges. 142890447aadSMarius Strobl */ 142990447aadSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 143090447aadSMarius Strobl sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 143190447aadSMarius Strobl BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 143290447aadSMarius Strobl 143390447aadSMarius Strobl /* 14348cb1383cSDoug Ambrisko * Tell the firmware the driver is running 14358cb1383cSDoug Ambrisko */ 14368cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 14378cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 14388cb1383cSDoug Ambrisko 14398cb1383cSDoug Ambrisko /* 1440ea13bdd5SJohn Polstra * Disable memory write invalidate. Apparently it is not supported 1441c9ffd9f0SMarius Strobl * properly by these devices. Also ensure that INTx isn't disabled, 1442c9ffd9f0SMarius Strobl * as these chips need it even when using MSI. 144395d67482SBill Paul */ 1444c9ffd9f0SMarius Strobl PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1445c9ffd9f0SMarius Strobl PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4); 144695d67482SBill Paul 144795d67482SBill Paul /* Set the timer prescaler (always 66Mhz) */ 14480c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 144995d67482SBill Paul 145038cc658fSJohn Baldwin /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ 145138cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 145238cc658fSJohn Baldwin DELAY(40); /* XXX */ 145338cc658fSJohn Baldwin 145438cc658fSJohn Baldwin /* Put PHY into ready state */ 145538cc658fSJohn Baldwin BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 145638cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 145738cc658fSJohn Baldwin DELAY(40); 145838cc658fSJohn Baldwin } 145938cc658fSJohn Baldwin 146095d67482SBill Paul return (0); 146195d67482SBill Paul } 146295d67482SBill Paul 146395d67482SBill Paul static int 14643f74909aSGleb Smirnoff bge_blockinit(struct bge_softc *sc) 146595d67482SBill Paul { 146695d67482SBill Paul struct bge_rcb *rcb; 1467e907febfSPyun YongHyeon bus_size_t vrcb; 1468e907febfSPyun YongHyeon bge_hostaddr taddr; 14696f8718a3SScott Long uint32_t val; 147095d67482SBill Paul int i; 147195d67482SBill Paul 147295d67482SBill Paul /* 147395d67482SBill Paul * Initialize the memory window pointer register so that 147495d67482SBill Paul * we can access the first 32K of internal NIC RAM. This will 147595d67482SBill Paul * allow us to set up the TX send ring RCBs and the RX return 147695d67482SBill Paul * ring RCBs, plus other things which live in NIC memory. 147795d67482SBill Paul */ 147895d67482SBill Paul CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 147995d67482SBill Paul 1480822f63fcSBill Paul /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1481822f63fcSBill Paul 14827ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 148395d67482SBill Paul /* Configure mbuf memory pool */ 14840dae9719SJung-uk Kim CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1485822f63fcSBill Paul if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1486822f63fcSBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1487822f63fcSBill Paul else 148895d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 148995d67482SBill Paul 149095d67482SBill Paul /* Configure DMA resource pool */ 14910434d1b8SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 14920434d1b8SBill Paul BGE_DMA_DESCRIPTORS); 149395d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 14940434d1b8SBill Paul } 149595d67482SBill Paul 149695d67482SBill Paul /* Configure mbuf pool watermarks */ 149738cc658fSJohn Baldwin if (!BGE_IS_5705_PLUS(sc)) { 1498fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1499fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1500fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 150138cc658fSJohn Baldwin } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 150238cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 150338cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 150438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 150538cc658fSJohn Baldwin } else { 150638cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 150738cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 150838cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 150938cc658fSJohn Baldwin } 151095d67482SBill Paul 151195d67482SBill Paul /* Configure DMA resource watermarks */ 151295d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 151395d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 151495d67482SBill Paul 151595d67482SBill Paul /* Enable buffer manager */ 15167ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 151795d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MODE, 151895d67482SBill Paul BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 151995d67482SBill Paul 152095d67482SBill Paul /* Poll for buffer manager start indication */ 152195d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1522d5d23857SJung-uk Kim DELAY(10); 15230c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 152495d67482SBill Paul break; 152595d67482SBill Paul } 152695d67482SBill Paul 152795d67482SBill Paul if (i == BGE_TIMEOUT) { 1528fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1529fe806fdaSPyun YongHyeon "buffer manager failed to start\n"); 153095d67482SBill Paul return (ENXIO); 153195d67482SBill Paul } 15320434d1b8SBill Paul } 153395d67482SBill Paul 153495d67482SBill Paul /* Enable flow-through queues */ 15350c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 153695d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 153795d67482SBill Paul 153895d67482SBill Paul /* Wait until queue initialization is complete */ 153995d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1540d5d23857SJung-uk Kim DELAY(10); 154195d67482SBill Paul if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 154295d67482SBill Paul break; 154395d67482SBill Paul } 154495d67482SBill Paul 154595d67482SBill Paul if (i == BGE_TIMEOUT) { 1546fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "flow-through queue init failed\n"); 154795d67482SBill Paul return (ENXIO); 154895d67482SBill Paul } 154995d67482SBill Paul 155095d67482SBill Paul /* Initialize the standard RX ring control block */ 1551f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1552f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1553f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1554f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1555f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1556f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1557f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 15587ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 15590434d1b8SBill Paul rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 15600434d1b8SBill Paul else 15610434d1b8SBill Paul rcb->bge_maxlen_flags = 15620434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 156395d67482SBill Paul rcb->bge_nicaddr = BGE_STD_RX_RINGS; 15640c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 15650c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1566f41ac2beSBill Paul 156767111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 156867111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 156995d67482SBill Paul 157095d67482SBill Paul /* 157195d67482SBill Paul * Initialize the jumbo RX ring control block 157295d67482SBill Paul * We set the 'ring disabled' bit in the flags 157395d67482SBill Paul * field until we're actually ready to start 157495d67482SBill Paul * using this ring (i.e. once we set the MTU 157595d67482SBill Paul * high enough to require it). 157695d67482SBill Paul */ 15774c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 1578f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1579f41ac2beSBill Paul 1580f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1581f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1582f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1583f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1584f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1585f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map, 1586f41ac2beSBill Paul BUS_DMASYNC_PREREAD); 15871be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 15881be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 158995d67482SBill Paul rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 159067111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 159167111612SJohn Polstra rcb->bge_hostaddr.bge_addr_hi); 159267111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 159367111612SJohn Polstra rcb->bge_hostaddr.bge_addr_lo); 1594f41ac2beSBill Paul 15950434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 15960434d1b8SBill Paul rcb->bge_maxlen_flags); 159767111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 159895d67482SBill Paul 159995d67482SBill Paul /* Set up dummy disabled mini ring RCB */ 1600f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 160167111612SJohn Polstra rcb->bge_maxlen_flags = 160267111612SJohn Polstra BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 16030434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 16040434d1b8SBill Paul rcb->bge_maxlen_flags); 16050434d1b8SBill Paul } 160695d67482SBill Paul 160795d67482SBill Paul /* 160895d67482SBill Paul * Set the BD ring replentish thresholds. The recommended 160995d67482SBill Paul * values are 1/8th the number of descriptors allocated to 161095d67482SBill Paul * each ring. 16119ba784dbSScott Long * XXX The 5754 requires a lower threshold, so it might be a 16129ba784dbSScott Long * requirement of all 575x family chips. The Linux driver sets 16139ba784dbSScott Long * the lower threshold for all 5705 family chips as well, but there 16149ba784dbSScott Long * are reports that it might not need to be so strict. 161538cc658fSJohn Baldwin * 161638cc658fSJohn Baldwin * XXX Linux does some extra fiddling here for the 5906 parts as 161738cc658fSJohn Baldwin * well. 161895d67482SBill Paul */ 16195345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 16206f8718a3SScott Long val = 8; 16216f8718a3SScott Long else 16226f8718a3SScott Long val = BGE_STD_RX_RING_CNT / 8; 16236f8718a3SScott Long CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 16242a141b94SPyun YongHyeon if (BGE_IS_JUMBO_CAPABLE(sc)) 16252a141b94SPyun YongHyeon CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 16262a141b94SPyun YongHyeon BGE_JUMBO_RX_RING_CNT/8); 162795d67482SBill Paul 162895d67482SBill Paul /* 162995d67482SBill Paul * Disable all unused send rings by setting the 'ring disabled' 163095d67482SBill Paul * bit in the flags field of all the TX send ring control blocks. 163195d67482SBill Paul * These are located in NIC memory. 163295d67482SBill Paul */ 1633e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 163495d67482SBill Paul for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1635e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1636e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1637e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1638e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 163995d67482SBill Paul } 164095d67482SBill Paul 164195d67482SBill Paul /* Configure TX RCB 0 (we use only the first ring) */ 1642e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1643e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1644e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1645e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1646e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1647e907febfSPyun YongHyeon BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 16487ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 1649e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1650e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 165195d67482SBill Paul 165295d67482SBill Paul /* Disable all unused RX return rings */ 1653e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 165495d67482SBill Paul for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1655e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1656e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1657e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 16580434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1659e907febfSPyun YongHyeon BGE_RCB_FLAG_RING_DISABLED)); 1660e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 166138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 16623f74909aSGleb Smirnoff (i * (sizeof(uint64_t))), 0); 1663e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 166495d67482SBill Paul } 166595d67482SBill Paul 166695d67482SBill Paul /* Initialize RX ring indexes */ 166738cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 16682a141b94SPyun YongHyeon if (BGE_IS_JUMBO_CAPABLE(sc)) 166938cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 16702a141b94SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 167138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 167295d67482SBill Paul 167395d67482SBill Paul /* 167495d67482SBill Paul * Set up RX return ring 0 167595d67482SBill Paul * Note that the NIC address for RX return rings is 0x00000000. 167695d67482SBill Paul * The return rings live entirely within the host, so the 167795d67482SBill Paul * nicaddr field in the RCB isn't used. 167895d67482SBill Paul */ 1679e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1680e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1681e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1682e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1683e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1684e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1685e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 168695d67482SBill Paul 168795d67482SBill Paul /* Set random backoff seed for TX */ 168895d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 16894a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 16904a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 16914a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 169295d67482SBill Paul BGE_TX_BACKOFF_SEED_MASK); 169395d67482SBill Paul 169495d67482SBill Paul /* Set inter-packet gap */ 169595d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 169695d67482SBill Paul 169795d67482SBill Paul /* 169895d67482SBill Paul * Specify which ring to use for packets that don't match 169995d67482SBill Paul * any RX rules. 170095d67482SBill Paul */ 170195d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 170295d67482SBill Paul 170395d67482SBill Paul /* 170495d67482SBill Paul * Configure number of RX lists. One interrupt distribution 170595d67482SBill Paul * list, sixteen active lists, one bad frames class. 170695d67482SBill Paul */ 170795d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 170895d67482SBill Paul 170995d67482SBill Paul /* Inialize RX list placement stats mask. */ 17100c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 171195d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 171295d67482SBill Paul 171395d67482SBill Paul /* Disable host coalescing until we get it set up */ 171495d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 171595d67482SBill Paul 171695d67482SBill Paul /* Poll to make sure it's shut down. */ 171795d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1718d5d23857SJung-uk Kim DELAY(10); 171995d67482SBill Paul if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 172095d67482SBill Paul break; 172195d67482SBill Paul } 172295d67482SBill Paul 172395d67482SBill Paul if (i == BGE_TIMEOUT) { 1724fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1725fe806fdaSPyun YongHyeon "host coalescing engine failed to idle\n"); 172695d67482SBill Paul return (ENXIO); 172795d67482SBill Paul } 172895d67482SBill Paul 172995d67482SBill Paul /* Set up host coalescing defaults */ 173095d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 173195d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 173295d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 173395d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 17347ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 173595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 173695d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 17370434d1b8SBill Paul } 1738b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1739b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 174095d67482SBill Paul 174195d67482SBill Paul /* Set up address of statistics block */ 17427ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 1743f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1744f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 174595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1746f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 17470434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 174895d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 17490434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 17500434d1b8SBill Paul } 17510434d1b8SBill Paul 17520434d1b8SBill Paul /* Set up address of status block */ 1753f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1754f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 175595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1756f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 175795d67482SBill Paul 175830f57f61SPyun YongHyeon /* Set up status block size. */ 175930f57f61SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1760864104feSPyun YongHyeon sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { 176130f57f61SPyun YongHyeon val = BGE_STATBLKSZ_FULL; 1762864104feSPyun YongHyeon bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 1763864104feSPyun YongHyeon } else { 176430f57f61SPyun YongHyeon val = BGE_STATBLKSZ_32BYTE; 1765864104feSPyun YongHyeon bzero(sc->bge_ldata.bge_status_block, 32); 1766864104feSPyun YongHyeon } 1767864104feSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 1768864104feSPyun YongHyeon sc->bge_cdata.bge_status_map, 1769864104feSPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 177030f57f61SPyun YongHyeon 177195d67482SBill Paul /* Turn on host coalescing state machine */ 177230f57f61SPyun YongHyeon CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 177395d67482SBill Paul 177495d67482SBill Paul /* Turn on RX BD completion state machine and enable attentions */ 177595d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDC_MODE, 177695d67482SBill Paul BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 177795d67482SBill Paul 177895d67482SBill Paul /* Turn on RX list placement state machine */ 177995d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 178095d67482SBill Paul 178195d67482SBill Paul /* Turn on RX list selector state machine. */ 17827ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 178395d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 178495d67482SBill Paul 1785ea3b4127SPyun YongHyeon val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | 1786ea3b4127SPyun YongHyeon BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | 1787ea3b4127SPyun YongHyeon BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | 1788ea3b4127SPyun YongHyeon BGE_MACMODE_FRMHDR_DMA_ENB; 1789ea3b4127SPyun YongHyeon 1790ea3b4127SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TBI) 1791ea3b4127SPyun YongHyeon val |= BGE_PORTMODE_TBI; 1792ea3b4127SPyun YongHyeon else if (sc->bge_flags & BGE_FLAG_MII_SERDES) 1793ea3b4127SPyun YongHyeon val |= BGE_PORTMODE_GMII; 1794ea3b4127SPyun YongHyeon else 1795ea3b4127SPyun YongHyeon val |= BGE_PORTMODE_MII; 1796ea3b4127SPyun YongHyeon 179795d67482SBill Paul /* Turn on DMA, clear stats */ 1798ea3b4127SPyun YongHyeon CSR_WRITE_4(sc, BGE_MAC_MODE, val); 179995d67482SBill Paul 180095d67482SBill Paul /* Set misc. local control, enable interrupts on attentions */ 180195d67482SBill Paul CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 180295d67482SBill Paul 180395d67482SBill Paul #ifdef notdef 180495d67482SBill Paul /* Assert GPIO pins for PHY reset */ 180595d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 180695d67482SBill Paul BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 180795d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 180895d67482SBill Paul BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 180995d67482SBill Paul #endif 181095d67482SBill Paul 181195d67482SBill Paul /* Turn on DMA completion state machine */ 18127ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 181395d67482SBill Paul CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 181495d67482SBill Paul 18156f8718a3SScott Long val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 18166f8718a3SScott Long 18176f8718a3SScott Long /* Enable host coalescing bug fix. */ 1818a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 18193889907fSStanislav Sedov val |= BGE_WDMAMODE_STATUS_TAG_FIX; 18206f8718a3SScott Long 182195d67482SBill Paul /* Turn on write DMA state machine */ 18226f8718a3SScott Long CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 18234f09c4c7SMarius Strobl DELAY(40); 182495d67482SBill Paul 182595d67482SBill Paul /* Turn on read DMA state machine */ 18264f09c4c7SMarius Strobl val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1827a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1828a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1829a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM57780) 1830a5779553SStanislav Sedov val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1831a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1832a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 18334f09c4c7SMarius Strobl if (sc->bge_flags & BGE_FLAG_PCIE) 18344f09c4c7SMarius Strobl val |= BGE_RDMAMODE_FIFO_LONG_BURST; 183555a24a05SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) { 1836ca3f1187SPyun YongHyeon val |= BGE_RDMAMODE_TSO4_ENABLE; 183755a24a05SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5785 || 183855a24a05SPyun YongHyeon sc->bge_asicrev == BGE_ASICREV_BCM57780) 183955a24a05SPyun YongHyeon val |= BGE_RDMAMODE_TSO6_ENABLE; 184055a24a05SPyun YongHyeon } 18414f09c4c7SMarius Strobl CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 18424f09c4c7SMarius Strobl DELAY(40); 184395d67482SBill Paul 184495d67482SBill Paul /* Turn on RX data completion state machine */ 184595d67482SBill Paul CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 184695d67482SBill Paul 184795d67482SBill Paul /* Turn on RX BD initiator state machine */ 184895d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 184995d67482SBill Paul 185095d67482SBill Paul /* Turn on RX data and RX BD initiator state machine */ 185195d67482SBill Paul CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 185295d67482SBill Paul 185395d67482SBill Paul /* Turn on Mbuf cluster free state machine */ 18547ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 185595d67482SBill Paul CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 185695d67482SBill Paul 185795d67482SBill Paul /* Turn on send BD completion state machine */ 185895d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 185995d67482SBill Paul 186095d67482SBill Paul /* Turn on send data completion state machine */ 1861a5779553SStanislav Sedov val = BGE_SDCMODE_ENABLE; 1862a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 1863a5779553SStanislav Sedov val |= BGE_SDCMODE_CDELAY; 1864a5779553SStanislav Sedov CSR_WRITE_4(sc, BGE_SDC_MODE, val); 186595d67482SBill Paul 186695d67482SBill Paul /* Turn on send data initiator state machine */ 1867ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) 1868ca3f1187SPyun YongHyeon CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1869ca3f1187SPyun YongHyeon else 187095d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 187195d67482SBill Paul 187295d67482SBill Paul /* Turn on send BD initiator state machine */ 187395d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 187495d67482SBill Paul 187595d67482SBill Paul /* Turn on send BD selector state machine */ 187695d67482SBill Paul CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 187795d67482SBill Paul 18780c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 187995d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 188095d67482SBill Paul BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 188195d67482SBill Paul 188295d67482SBill Paul /* ack/clear link change events */ 188395d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 18840434d1b8SBill Paul BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 18850434d1b8SBill Paul BGE_MACSTAT_LINK_CHANGED); 1886f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_MI_STS, 0); 188795d67482SBill Paul 188895d67482SBill Paul /* Enable PHY auto polling (for MII/GMII only) */ 1889652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 189095d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1891a1d52896SBill Paul } else { 18926098821cSJung-uk Kim BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 18931f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 18944c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1895a1d52896SBill Paul CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1896a1d52896SBill Paul BGE_EVTENB_MI_INTERRUPT); 1897a1d52896SBill Paul } 189895d67482SBill Paul 18991f313773SOleg Bulyzhin /* 19001f313773SOleg Bulyzhin * Clear any pending link state attention. 19011f313773SOleg Bulyzhin * Otherwise some link state change events may be lost until attention 19021f313773SOleg Bulyzhin * is cleared by bge_intr() -> bge_link_upd() sequence. 19031f313773SOleg Bulyzhin * It's not necessary on newer BCM chips - perhaps enabling link 19041f313773SOleg Bulyzhin * state change attentions implies clearing pending attention. 19051f313773SOleg Bulyzhin */ 19061f313773SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 19071f313773SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 19081f313773SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 19091f313773SOleg Bulyzhin 191095d67482SBill Paul /* Enable link state change attentions. */ 191195d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 191295d67482SBill Paul 191395d67482SBill Paul return (0); 191495d67482SBill Paul } 191595d67482SBill Paul 19164c0da0ffSGleb Smirnoff const struct bge_revision * 19174c0da0ffSGleb Smirnoff bge_lookup_rev(uint32_t chipid) 19184c0da0ffSGleb Smirnoff { 19194c0da0ffSGleb Smirnoff const struct bge_revision *br; 19204c0da0ffSGleb Smirnoff 19214c0da0ffSGleb Smirnoff for (br = bge_revisions; br->br_name != NULL; br++) { 19224c0da0ffSGleb Smirnoff if (br->br_chipid == chipid) 19234c0da0ffSGleb Smirnoff return (br); 19244c0da0ffSGleb Smirnoff } 19254c0da0ffSGleb Smirnoff 19264c0da0ffSGleb Smirnoff for (br = bge_majorrevs; br->br_name != NULL; br++) { 19274c0da0ffSGleb Smirnoff if (br->br_chipid == BGE_ASICREV(chipid)) 19284c0da0ffSGleb Smirnoff return (br); 19294c0da0ffSGleb Smirnoff } 19304c0da0ffSGleb Smirnoff 19314c0da0ffSGleb Smirnoff return (NULL); 19324c0da0ffSGleb Smirnoff } 19334c0da0ffSGleb Smirnoff 19344c0da0ffSGleb Smirnoff const struct bge_vendor * 19354c0da0ffSGleb Smirnoff bge_lookup_vendor(uint16_t vid) 19364c0da0ffSGleb Smirnoff { 19374c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19384c0da0ffSGleb Smirnoff 19394c0da0ffSGleb Smirnoff for (v = bge_vendors; v->v_name != NULL; v++) 19404c0da0ffSGleb Smirnoff if (v->v_id == vid) 19414c0da0ffSGleb Smirnoff return (v); 19424c0da0ffSGleb Smirnoff 19434c0da0ffSGleb Smirnoff panic("%s: unknown vendor %d", __func__, vid); 19444c0da0ffSGleb Smirnoff return (NULL); 19454c0da0ffSGleb Smirnoff } 19464c0da0ffSGleb Smirnoff 194795d67482SBill Paul /* 194895d67482SBill Paul * Probe for a Broadcom chip. Check the PCI vendor and device IDs 19494c0da0ffSGleb Smirnoff * against our list and return its name if we find a match. 19504c0da0ffSGleb Smirnoff * 19514c0da0ffSGleb Smirnoff * Note that since the Broadcom controller contains VPD support, we 19527c929cf9SJung-uk Kim * try to get the device name string from the controller itself instead 19537c929cf9SJung-uk Kim * of the compiled-in string. It guarantees we'll always announce the 19547c929cf9SJung-uk Kim * right product name. We fall back to the compiled-in string when 19557c929cf9SJung-uk Kim * VPD is unavailable or corrupt. 195695d67482SBill Paul */ 195795d67482SBill Paul static int 19583f74909aSGleb Smirnoff bge_probe(device_t dev) 195995d67482SBill Paul { 1960852c67f9SMarius Strobl const struct bge_type *t = bge_devs; 19614c0da0ffSGleb Smirnoff struct bge_softc *sc = device_get_softc(dev); 19627c929cf9SJung-uk Kim uint16_t vid, did; 196395d67482SBill Paul 196495d67482SBill Paul sc->bge_dev = dev; 19657c929cf9SJung-uk Kim vid = pci_get_vendor(dev); 19667c929cf9SJung-uk Kim did = pci_get_device(dev); 19674c0da0ffSGleb Smirnoff while(t->bge_vid != 0) { 19687c929cf9SJung-uk Kim if ((vid == t->bge_vid) && (did == t->bge_did)) { 19697c929cf9SJung-uk Kim char model[64], buf[96]; 19704c0da0ffSGleb Smirnoff const struct bge_revision *br; 19714c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19724c0da0ffSGleb Smirnoff uint32_t id; 19734c0da0ffSGleb Smirnoff 1974a5779553SStanislav Sedov id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1975a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 1976a5779553SStanislav Sedov if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) 1977a5779553SStanislav Sedov id = pci_read_config(dev, 1978a5779553SStanislav Sedov BGE_PCI_PRODID_ASICREV, 4); 19794c0da0ffSGleb Smirnoff br = bge_lookup_rev(id); 19807c929cf9SJung-uk Kim v = bge_lookup_vendor(vid); 19814e35d186SJung-uk Kim { 19824e35d186SJung-uk Kim #if __FreeBSD_version > 700024 19834e35d186SJung-uk Kim const char *pname; 19844e35d186SJung-uk Kim 1985852c67f9SMarius Strobl if (bge_has_eaddr(sc) && 1986852c67f9SMarius Strobl pci_get_vpd_ident(dev, &pname) == 0) 19874e35d186SJung-uk Kim snprintf(model, 64, "%s", pname); 19884e35d186SJung-uk Kim else 19894e35d186SJung-uk Kim #endif 19907c929cf9SJung-uk Kim snprintf(model, 64, "%s %s", 19917c929cf9SJung-uk Kim v->v_name, 19927c929cf9SJung-uk Kim br != NULL ? br->br_name : 19937c929cf9SJung-uk Kim "NetXtreme Ethernet Controller"); 19944e35d186SJung-uk Kim } 1995a5779553SStanislav Sedov snprintf(buf, 96, "%s, %sASIC rev. %#08x", model, 1996a5779553SStanislav Sedov br != NULL ? "" : "unknown ", id); 19974c0da0ffSGleb Smirnoff device_set_desc_copy(dev, buf); 199895d67482SBill Paul return (0); 199995d67482SBill Paul } 200095d67482SBill Paul t++; 200195d67482SBill Paul } 200295d67482SBill Paul 200395d67482SBill Paul return (ENXIO); 200495d67482SBill Paul } 200595d67482SBill Paul 2006f41ac2beSBill Paul static void 20073f74909aSGleb Smirnoff bge_dma_free(struct bge_softc *sc) 2008f41ac2beSBill Paul { 2009f41ac2beSBill Paul int i; 2010f41ac2beSBill Paul 20113f74909aSGleb Smirnoff /* Destroy DMA maps for RX buffers. */ 2012f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 2013f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_dmamap[i]) 20140ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2015f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 2016f41ac2beSBill Paul } 2017943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_sparemap) 2018943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2019943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap); 2020f41ac2beSBill Paul 20213f74909aSGleb Smirnoff /* Destroy DMA maps for jumbo RX buffers. */ 2022f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2023f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 2024f41ac2beSBill Paul bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2025f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2026f41ac2beSBill Paul } 2027943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_sparemap) 2028943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2029943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap); 2030f41ac2beSBill Paul 20313f74909aSGleb Smirnoff /* Destroy DMA maps for TX buffers. */ 2032f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 2033f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_dmamap[i]) 20340ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 2035f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 2036f41ac2beSBill Paul } 2037f41ac2beSBill Paul 20380ac56796SPyun YongHyeon if (sc->bge_cdata.bge_rx_mtag) 20390ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 20400ac56796SPyun YongHyeon if (sc->bge_cdata.bge_tx_mtag) 20410ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 2042f41ac2beSBill Paul 2043f41ac2beSBill Paul 20443f74909aSGleb Smirnoff /* Destroy standard RX ring. */ 2045e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map) 2046e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 2047e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map); 2048e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 2049f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 2050f41ac2beSBill Paul sc->bge_ldata.bge_rx_std_ring, 2051f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map); 2052f41ac2beSBill Paul 2053f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_ring_tag) 2054f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 2055f41ac2beSBill Paul 20563f74909aSGleb Smirnoff /* Destroy jumbo RX ring. */ 2057e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map) 2058e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2059e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map); 2060e65bed95SPyun YongHyeon 2061e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map && 2062e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_jumbo_ring) 2063f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2064f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring, 2065f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map); 2066f41ac2beSBill Paul 2067f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 2068f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 2069f41ac2beSBill Paul 20703f74909aSGleb Smirnoff /* Destroy RX return ring. */ 2071e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map) 2072e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 2073e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map); 2074e65bed95SPyun YongHyeon 2075e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map && 2076e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_return_ring) 2077f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 2078f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring, 2079f41ac2beSBill Paul sc->bge_cdata.bge_rx_return_ring_map); 2080f41ac2beSBill Paul 2081f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_return_ring_tag) 2082f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 2083f41ac2beSBill Paul 20843f74909aSGleb Smirnoff /* Destroy TX ring. */ 2085e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map) 2086e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 2087e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_ring_map); 2088e65bed95SPyun YongHyeon 2089e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 2090f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 2091f41ac2beSBill Paul sc->bge_ldata.bge_tx_ring, 2092f41ac2beSBill Paul sc->bge_cdata.bge_tx_ring_map); 2093f41ac2beSBill Paul 2094f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_ring_tag) 2095f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 2096f41ac2beSBill Paul 20973f74909aSGleb Smirnoff /* Destroy status block. */ 2098e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map) 2099e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 2100e65bed95SPyun YongHyeon sc->bge_cdata.bge_status_map); 2101e65bed95SPyun YongHyeon 2102e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 2103f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_status_tag, 2104f41ac2beSBill Paul sc->bge_ldata.bge_status_block, 2105f41ac2beSBill Paul sc->bge_cdata.bge_status_map); 2106f41ac2beSBill Paul 2107f41ac2beSBill Paul if (sc->bge_cdata.bge_status_tag) 2108f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 2109f41ac2beSBill Paul 21103f74909aSGleb Smirnoff /* Destroy statistics block. */ 2111e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map) 2112e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 2113e65bed95SPyun YongHyeon sc->bge_cdata.bge_stats_map); 2114e65bed95SPyun YongHyeon 2115e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 2116f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 2117f41ac2beSBill Paul sc->bge_ldata.bge_stats, 2118f41ac2beSBill Paul sc->bge_cdata.bge_stats_map); 2119f41ac2beSBill Paul 2120f41ac2beSBill Paul if (sc->bge_cdata.bge_stats_tag) 2121f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 2122f41ac2beSBill Paul 21235b610048SPyun YongHyeon if (sc->bge_cdata.bge_buffer_tag) 21245b610048SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag); 21255b610048SPyun YongHyeon 21263f74909aSGleb Smirnoff /* Destroy the parent tag. */ 2127f41ac2beSBill Paul if (sc->bge_cdata.bge_parent_tag) 2128f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 2129f41ac2beSBill Paul } 2130f41ac2beSBill Paul 2131f41ac2beSBill Paul static int 21325b610048SPyun YongHyeon bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment, 21335b610048SPyun YongHyeon bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, 21345b610048SPyun YongHyeon bus_addr_t *paddr, const char *msg) 2135f41ac2beSBill Paul { 21363f74909aSGleb Smirnoff struct bge_dmamap_arg ctx; 2137f681b29aSPyun YongHyeon bus_addr_t lowaddr; 21385b610048SPyun YongHyeon bus_size_t ring_end; 21395b610048SPyun YongHyeon int error; 2140f41ac2beSBill Paul 21415b610048SPyun YongHyeon lowaddr = BUS_SPACE_MAXADDR; 21425b610048SPyun YongHyeon again: 21435b610048SPyun YongHyeon error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 21445b610048SPyun YongHyeon alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, 21455b610048SPyun YongHyeon NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag); 21465b610048SPyun YongHyeon if (error != 0) { 21475b610048SPyun YongHyeon device_printf(sc->bge_dev, 21485b610048SPyun YongHyeon "could not create %s dma tag\n", msg); 21495b610048SPyun YongHyeon return (ENOMEM); 21505b610048SPyun YongHyeon } 21515b610048SPyun YongHyeon /* Allocate DMA'able memory for ring. */ 21525b610048SPyun YongHyeon error = bus_dmamem_alloc(*tag, (void **)ring, 21535b610048SPyun YongHyeon BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); 21545b610048SPyun YongHyeon if (error != 0) { 21555b610048SPyun YongHyeon device_printf(sc->bge_dev, 21565b610048SPyun YongHyeon "could not allocate DMA'able memory for %s\n", msg); 21575b610048SPyun YongHyeon return (ENOMEM); 21585b610048SPyun YongHyeon } 21595b610048SPyun YongHyeon /* Load the address of the ring. */ 21605b610048SPyun YongHyeon ctx.bge_busaddr = 0; 21615b610048SPyun YongHyeon error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr, 21625b610048SPyun YongHyeon &ctx, BUS_DMA_NOWAIT); 21635b610048SPyun YongHyeon if (error != 0) { 21645b610048SPyun YongHyeon device_printf(sc->bge_dev, 21655b610048SPyun YongHyeon "could not load DMA'able memory for %s\n", msg); 21665b610048SPyun YongHyeon return (ENOMEM); 21675b610048SPyun YongHyeon } 21685b610048SPyun YongHyeon *paddr = ctx.bge_busaddr; 21695b610048SPyun YongHyeon ring_end = *paddr + maxsize; 21705b610048SPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 && 21715b610048SPyun YongHyeon BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) { 21725b610048SPyun YongHyeon /* 21735b610048SPyun YongHyeon * 4GB boundary crossed. Limit maximum allowable DMA 21745b610048SPyun YongHyeon * address space to 32bit and try again. 21755b610048SPyun YongHyeon */ 21765b610048SPyun YongHyeon bus_dmamap_unload(*tag, *map); 21775b610048SPyun YongHyeon bus_dmamem_free(*tag, *ring, *map); 21785b610048SPyun YongHyeon bus_dma_tag_destroy(*tag); 21795b610048SPyun YongHyeon if (bootverbose) 21805b610048SPyun YongHyeon device_printf(sc->bge_dev, "4GB boundary crossed, " 21815b610048SPyun YongHyeon "limit DMA address space to 32bit for %s\n", msg); 21825b610048SPyun YongHyeon *ring = NULL; 21835b610048SPyun YongHyeon *tag = NULL; 21845b610048SPyun YongHyeon *map = NULL; 21855b610048SPyun YongHyeon lowaddr = BUS_SPACE_MAXADDR_32BIT; 21865b610048SPyun YongHyeon goto again; 21875b610048SPyun YongHyeon } 21885b610048SPyun YongHyeon return (0); 21895b610048SPyun YongHyeon } 21905b610048SPyun YongHyeon 21915b610048SPyun YongHyeon static int 21925b610048SPyun YongHyeon bge_dma_alloc(struct bge_softc *sc) 21935b610048SPyun YongHyeon { 21945b610048SPyun YongHyeon bus_addr_t lowaddr; 21955b610048SPyun YongHyeon bus_size_t boundary, sbsz, txsegsz, txmaxsegsz; 21965b610048SPyun YongHyeon int i, error; 2197f41ac2beSBill Paul 2198f681b29aSPyun YongHyeon lowaddr = BUS_SPACE_MAXADDR; 2199f681b29aSPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) 2200f681b29aSPyun YongHyeon lowaddr = BGE_DMA_MAXADDR; 2201f41ac2beSBill Paul /* 2202f41ac2beSBill Paul * Allocate the parent bus DMA tag appropriate for PCI. 2203f41ac2beSBill Paul */ 22044eee14cbSMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 2205f681b29aSPyun YongHyeon 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, 22064eee14cbSMarius Strobl NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 22074eee14cbSMarius Strobl 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); 2208e65bed95SPyun YongHyeon if (error != 0) { 2209fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2210fe806fdaSPyun YongHyeon "could not allocate parent dma tag\n"); 2211e65bed95SPyun YongHyeon return (ENOMEM); 2212e65bed95SPyun YongHyeon } 2213e65bed95SPyun YongHyeon 22145b610048SPyun YongHyeon /* Create tag for standard RX ring. */ 22155b610048SPyun YongHyeon error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ, 22165b610048SPyun YongHyeon &sc->bge_cdata.bge_rx_std_ring_tag, 22175b610048SPyun YongHyeon (uint8_t **)&sc->bge_ldata.bge_rx_std_ring, 22185b610048SPyun YongHyeon &sc->bge_cdata.bge_rx_std_ring_map, 22195b610048SPyun YongHyeon &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring"); 22205b610048SPyun YongHyeon if (error) 22215b610048SPyun YongHyeon return (error); 22225b610048SPyun YongHyeon 22235b610048SPyun YongHyeon /* Create tag for RX return ring. */ 22245b610048SPyun YongHyeon error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc), 22255b610048SPyun YongHyeon &sc->bge_cdata.bge_rx_return_ring_tag, 22265b610048SPyun YongHyeon (uint8_t **)&sc->bge_ldata.bge_rx_return_ring, 22275b610048SPyun YongHyeon &sc->bge_cdata.bge_rx_return_ring_map, 22285b610048SPyun YongHyeon &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring"); 22295b610048SPyun YongHyeon if (error) 22305b610048SPyun YongHyeon return (error); 22315b610048SPyun YongHyeon 22325b610048SPyun YongHyeon /* Create tag for TX ring. */ 22335b610048SPyun YongHyeon error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ, 22345b610048SPyun YongHyeon &sc->bge_cdata.bge_tx_ring_tag, 22355b610048SPyun YongHyeon (uint8_t **)&sc->bge_ldata.bge_tx_ring, 22365b610048SPyun YongHyeon &sc->bge_cdata.bge_tx_ring_map, 22375b610048SPyun YongHyeon &sc->bge_ldata.bge_tx_ring_paddr, "TX ring"); 22385b610048SPyun YongHyeon if (error) 22395b610048SPyun YongHyeon return (error); 22405b610048SPyun YongHyeon 2241f41ac2beSBill Paul /* 22425b610048SPyun YongHyeon * Create tag for status block. 22435b610048SPyun YongHyeon * Because we only use single Tx/Rx/Rx return ring, use 22445b610048SPyun YongHyeon * minimum status block size except BCM5700 AX/BX which 22455b610048SPyun YongHyeon * seems to want to see full status block size regardless 22465b610048SPyun YongHyeon * of configured number of ring. 2247f41ac2beSBill Paul */ 22485b610048SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 22495b610048SPyun YongHyeon sc->bge_chipid != BGE_CHIPID_BCM5700_C0) 22505b610048SPyun YongHyeon sbsz = BGE_STATUS_BLK_SZ; 22515b610048SPyun YongHyeon else 22525b610048SPyun YongHyeon sbsz = 32; 22535b610048SPyun YongHyeon error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz, 22545b610048SPyun YongHyeon &sc->bge_cdata.bge_status_tag, 22555b610048SPyun YongHyeon (uint8_t **)&sc->bge_ldata.bge_status_block, 22565b610048SPyun YongHyeon &sc->bge_cdata.bge_status_map, 22575b610048SPyun YongHyeon &sc->bge_ldata.bge_status_block_paddr, "status block"); 22585b610048SPyun YongHyeon if (error) 22595b610048SPyun YongHyeon return (error); 22605b610048SPyun YongHyeon 226112c65daeSPyun YongHyeon /* Create tag for statistics block. */ 226212c65daeSPyun YongHyeon error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ, 226312c65daeSPyun YongHyeon &sc->bge_cdata.bge_stats_tag, 226412c65daeSPyun YongHyeon (uint8_t **)&sc->bge_ldata.bge_stats, 226512c65daeSPyun YongHyeon &sc->bge_cdata.bge_stats_map, 226612c65daeSPyun YongHyeon &sc->bge_ldata.bge_stats_paddr, "statistics block"); 226712c65daeSPyun YongHyeon if (error) 226812c65daeSPyun YongHyeon return (error); 226912c65daeSPyun YongHyeon 22705b610048SPyun YongHyeon /* Create tag for jumbo RX ring. */ 22715b610048SPyun YongHyeon if (BGE_IS_JUMBO_CAPABLE(sc)) { 22725b610048SPyun YongHyeon error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ, 22735b610048SPyun YongHyeon &sc->bge_cdata.bge_rx_jumbo_ring_tag, 22745b610048SPyun YongHyeon (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring, 22755b610048SPyun YongHyeon &sc->bge_cdata.bge_rx_jumbo_ring_map, 22765b610048SPyun YongHyeon &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring"); 22775b610048SPyun YongHyeon if (error) 22785b610048SPyun YongHyeon return (error); 22795b610048SPyun YongHyeon } 22805b610048SPyun YongHyeon 22815b610048SPyun YongHyeon /* Create parent tag for buffers. */ 22825b610048SPyun YongHyeon boundary = 0; 22835b610048SPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) 228438cc6151SPyun YongHyeon boundary = BGE_DMA_BNDRY; 22855b610048SPyun YongHyeon error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 22865b610048SPyun YongHyeon 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL, 22875b610048SPyun YongHyeon NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 22885b610048SPyun YongHyeon 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag); 22895b610048SPyun YongHyeon if (error != 0) { 22905b610048SPyun YongHyeon device_printf(sc->bge_dev, 22915b610048SPyun YongHyeon "could not allocate buffer dma tag\n"); 22925b610048SPyun YongHyeon return (ENOMEM); 22935b610048SPyun YongHyeon } 22945b610048SPyun YongHyeon /* Create tag for Tx mbufs. */ 2295ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) { 2296ca3f1187SPyun YongHyeon txsegsz = BGE_TSOSEG_SZ; 2297ca3f1187SPyun YongHyeon txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); 2298ca3f1187SPyun YongHyeon } else { 2299ca3f1187SPyun YongHyeon txsegsz = MCLBYTES; 2300ca3f1187SPyun YongHyeon txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; 2301ca3f1187SPyun YongHyeon } 23025b610048SPyun YongHyeon error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 2303ca3f1187SPyun YongHyeon 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2304ca3f1187SPyun YongHyeon txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, 2305ca3f1187SPyun YongHyeon &sc->bge_cdata.bge_tx_mtag); 2306f41ac2beSBill Paul 2307f41ac2beSBill Paul if (error) { 23080ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); 23090ac56796SPyun YongHyeon return (ENOMEM); 23100ac56796SPyun YongHyeon } 23110ac56796SPyun YongHyeon 23125b610048SPyun YongHyeon /* Create tag for Rx mbufs. */ 23135b610048SPyun YongHyeon error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, 23140ac56796SPyun YongHyeon BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 2315ca3f1187SPyun YongHyeon MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); 23160ac56796SPyun YongHyeon 23170ac56796SPyun YongHyeon if (error) { 23180ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); 2319f41ac2beSBill Paul return (ENOMEM); 2320f41ac2beSBill Paul } 2321f41ac2beSBill Paul 23223f74909aSGleb Smirnoff /* Create DMA maps for RX buffers. */ 2323943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2324943787f3SPyun YongHyeon &sc->bge_cdata.bge_rx_std_sparemap); 2325943787f3SPyun YongHyeon if (error) { 2326943787f3SPyun YongHyeon device_printf(sc->bge_dev, 2327943787f3SPyun YongHyeon "can't create spare DMA map for RX\n"); 2328943787f3SPyun YongHyeon return (ENOMEM); 2329943787f3SPyun YongHyeon } 2330f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 23310ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2332f41ac2beSBill Paul &sc->bge_cdata.bge_rx_std_dmamap[i]); 2333f41ac2beSBill Paul if (error) { 2334fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2335fe806fdaSPyun YongHyeon "can't create DMA map for RX\n"); 2336f41ac2beSBill Paul return (ENOMEM); 2337f41ac2beSBill Paul } 2338f41ac2beSBill Paul } 2339f41ac2beSBill Paul 23403f74909aSGleb Smirnoff /* Create DMA maps for TX buffers. */ 2341f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 23420ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, 2343f41ac2beSBill Paul &sc->bge_cdata.bge_tx_dmamap[i]); 2344f41ac2beSBill Paul if (error) { 2345fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 23460ac56796SPyun YongHyeon "can't create DMA map for TX\n"); 2347f41ac2beSBill Paul return (ENOMEM); 2348f41ac2beSBill Paul } 2349f41ac2beSBill Paul } 2350f41ac2beSBill Paul 23515b610048SPyun YongHyeon /* Create tags for jumbo RX buffers. */ 23524c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 23535b610048SPyun YongHyeon error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 23548a2e22deSScott Long 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 23551be6acb7SGleb Smirnoff NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 23561be6acb7SGleb Smirnoff 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 2357f41ac2beSBill Paul if (error) { 2358fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 23593f74909aSGleb Smirnoff "could not allocate jumbo dma tag\n"); 2360f41ac2beSBill Paul return (ENOMEM); 2361f41ac2beSBill Paul } 23623f74909aSGleb Smirnoff /* Create DMA maps for jumbo RX buffers. */ 2363943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2364943787f3SPyun YongHyeon 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); 2365943787f3SPyun YongHyeon if (error) { 2366943787f3SPyun YongHyeon device_printf(sc->bge_dev, 23671b90d0bdSPyun YongHyeon "can't create spare DMA map for jumbo RX\n"); 2368943787f3SPyun YongHyeon return (ENOMEM); 2369943787f3SPyun YongHyeon } 2370f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2371f41ac2beSBill Paul error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2372f41ac2beSBill Paul 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2373f41ac2beSBill Paul if (error) { 2374fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 23753f74909aSGleb Smirnoff "can't create DMA map for jumbo RX\n"); 2376f41ac2beSBill Paul return (ENOMEM); 2377f41ac2beSBill Paul } 2378f41ac2beSBill Paul } 2379f41ac2beSBill Paul } 2380f41ac2beSBill Paul 2381f41ac2beSBill Paul return (0); 2382f41ac2beSBill Paul } 2383f41ac2beSBill Paul 2384bf6ef57aSJohn Polstra /* 2385bf6ef57aSJohn Polstra * Return true if this device has more than one port. 2386bf6ef57aSJohn Polstra */ 2387bf6ef57aSJohn Polstra static int 2388bf6ef57aSJohn Polstra bge_has_multiple_ports(struct bge_softc *sc) 2389bf6ef57aSJohn Polstra { 2390bf6ef57aSJohn Polstra device_t dev = sc->bge_dev; 239155aaf894SMarius Strobl u_int b, d, f, fscan, s; 2392bf6ef57aSJohn Polstra 239355aaf894SMarius Strobl d = pci_get_domain(dev); 2394bf6ef57aSJohn Polstra b = pci_get_bus(dev); 2395bf6ef57aSJohn Polstra s = pci_get_slot(dev); 2396bf6ef57aSJohn Polstra f = pci_get_function(dev); 2397bf6ef57aSJohn Polstra for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 239855aaf894SMarius Strobl if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) 2399bf6ef57aSJohn Polstra return (1); 2400bf6ef57aSJohn Polstra return (0); 2401bf6ef57aSJohn Polstra } 2402bf6ef57aSJohn Polstra 2403bf6ef57aSJohn Polstra /* 2404bf6ef57aSJohn Polstra * Return true if MSI can be used with this device. 2405bf6ef57aSJohn Polstra */ 2406bf6ef57aSJohn Polstra static int 2407bf6ef57aSJohn Polstra bge_can_use_msi(struct bge_softc *sc) 2408bf6ef57aSJohn Polstra { 2409bf6ef57aSJohn Polstra int can_use_msi = 0; 2410bf6ef57aSJohn Polstra 2411bf6ef57aSJohn Polstra switch (sc->bge_asicrev) { 2412a8376f70SMarius Strobl case BGE_ASICREV_BCM5714_A0: 2413bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5714: 2414bf6ef57aSJohn Polstra /* 2415a8376f70SMarius Strobl * Apparently, MSI doesn't work when these chips are 2416a8376f70SMarius Strobl * configured in single-port mode. 2417bf6ef57aSJohn Polstra */ 2418bf6ef57aSJohn Polstra if (bge_has_multiple_ports(sc)) 2419bf6ef57aSJohn Polstra can_use_msi = 1; 2420bf6ef57aSJohn Polstra break; 2421bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5750: 2422bf6ef57aSJohn Polstra if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 2423bf6ef57aSJohn Polstra sc->bge_chiprev != BGE_CHIPREV_5750_BX) 2424bf6ef57aSJohn Polstra can_use_msi = 1; 2425bf6ef57aSJohn Polstra break; 2426a8376f70SMarius Strobl default: 2427a8376f70SMarius Strobl if (BGE_IS_575X_PLUS(sc)) 2428bf6ef57aSJohn Polstra can_use_msi = 1; 2429bf6ef57aSJohn Polstra } 2430bf6ef57aSJohn Polstra return (can_use_msi); 2431bf6ef57aSJohn Polstra } 2432bf6ef57aSJohn Polstra 243395d67482SBill Paul static int 24343f74909aSGleb Smirnoff bge_attach(device_t dev) 243595d67482SBill Paul { 243695d67482SBill Paul struct ifnet *ifp; 243795d67482SBill Paul struct bge_softc *sc; 24384f0794ffSBjoern A. Zeeb uint32_t hwcfg = 0, misccfg; 243908013fd3SMarius Strobl u_char eaddr[ETHER_ADDR_LEN]; 2440d648358bSPyun YongHyeon int error, msicount, reg, rid, trys; 244195d67482SBill Paul 244295d67482SBill Paul sc = device_get_softc(dev); 244395d67482SBill Paul sc->bge_dev = dev; 244495d67482SBill Paul 2445dfe0df9aSPyun YongHyeon TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); 2446dfe0df9aSPyun YongHyeon 244795d67482SBill Paul /* 244895d67482SBill Paul * Map control/status registers. 244995d67482SBill Paul */ 245095d67482SBill Paul pci_enable_busmaster(dev); 245195d67482SBill Paul 2452736b9319SPyun YongHyeon rid = PCIR_BAR(0); 24535f96beb9SNate Lawson sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 245444f8f2fcSMarius Strobl RF_ACTIVE); 245595d67482SBill Paul 245695d67482SBill Paul if (sc->bge_res == NULL) { 2457fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, "couldn't map memory\n"); 245895d67482SBill Paul error = ENXIO; 245995d67482SBill Paul goto fail; 246095d67482SBill Paul } 246195d67482SBill Paul 24624f09c4c7SMarius Strobl /* Save various chip information. */ 2463e53d81eeSPaul Saab sc->bge_chipid = 2464a5779553SStanislav Sedov pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 2465a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 2466a5779553SStanislav Sedov if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) 2467a5779553SStanislav Sedov sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 2468a5779553SStanislav Sedov 4); 2469e53d81eeSPaul Saab sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2470e53d81eeSPaul Saab sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2471e53d81eeSPaul Saab 247286543395SJung-uk Kim /* 247338cc658fSJohn Baldwin * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the 247486543395SJung-uk Kim * 5705 A0 and A1 chips. 247586543395SJung-uk Kim */ 247686543395SJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && 247738cc658fSJohn Baldwin sc->bge_asicrev != BGE_ASICREV_BCM5906 && 247886543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 247986543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A1) 248086543395SJung-uk Kim sc->bge_flags |= BGE_FLAG_WIRESPEED; 248186543395SJung-uk Kim 24825fea260fSMarius Strobl if (bge_has_eaddr(sc)) 24835fea260fSMarius Strobl sc->bge_flags |= BGE_FLAG_EADDR; 248408013fd3SMarius Strobl 24850dae9719SJung-uk Kim /* Save chipset family. */ 24860dae9719SJung-uk Kim switch (sc->bge_asicrev) { 2487a5779553SStanislav Sedov case BGE_ASICREV_BCM5755: 2488a5779553SStanislav Sedov case BGE_ASICREV_BCM5761: 2489a5779553SStanislav Sedov case BGE_ASICREV_BCM5784: 2490a5779553SStanislav Sedov case BGE_ASICREV_BCM5785: 2491a5779553SStanislav Sedov case BGE_ASICREV_BCM5787: 2492a5779553SStanislav Sedov case BGE_ASICREV_BCM57780: 2493a5779553SStanislav Sedov sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 2494a5779553SStanislav Sedov BGE_FLAG_5705_PLUS; 2495a5779553SStanislav Sedov break; 24960dae9719SJung-uk Kim case BGE_ASICREV_BCM5700: 24970dae9719SJung-uk Kim case BGE_ASICREV_BCM5701: 24980dae9719SJung-uk Kim case BGE_ASICREV_BCM5703: 24990dae9719SJung-uk Kim case BGE_ASICREV_BCM5704: 25007ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 25010dae9719SJung-uk Kim break; 25020dae9719SJung-uk Kim case BGE_ASICREV_BCM5714_A0: 25030dae9719SJung-uk Kim case BGE_ASICREV_BCM5780: 25040dae9719SJung-uk Kim case BGE_ASICREV_BCM5714: 25057ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; 25069fe569d8SXin LI /* FALLTHROUGH */ 25070dae9719SJung-uk Kim case BGE_ASICREV_BCM5750: 25080dae9719SJung-uk Kim case BGE_ASICREV_BCM5752: 250938cc658fSJohn Baldwin case BGE_ASICREV_BCM5906: 25100dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_575X_PLUS; 25119fe569d8SXin LI /* FALLTHROUGH */ 25120dae9719SJung-uk Kim case BGE_ASICREV_BCM5705: 25130dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_5705_PLUS; 25140dae9719SJung-uk Kim break; 25150dae9719SJung-uk Kim } 25160dae9719SJung-uk Kim 25175ee49a3aSJung-uk Kim /* Set various bug flags. */ 25181ec4c3a8SJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 25191ec4c3a8SJung-uk Kim sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 25201ec4c3a8SJung-uk Kim sc->bge_flags |= BGE_FLAG_CRC_BUG; 25215ee49a3aSJung-uk Kim if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 25225ee49a3aSJung-uk Kim sc->bge_chiprev == BGE_CHIPREV_5704_AX) 25235ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_ADC_BUG; 25245ee49a3aSJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 25255ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 25264150ce6fSPyun YongHyeon if (pci_get_subvendor(dev) == DELL_VENDORID) 25274150ce6fSPyun YongHyeon sc->bge_flags |= BGE_FLAG_NO_3LED; 25284150ce6fSPyun YongHyeon if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M) 25294150ce6fSPyun YongHyeon sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; 253008bf8bb7SJung-uk Kim if (BGE_IS_5705_PLUS(sc) && 253108bf8bb7SJung-uk Kim !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 25325ee49a3aSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2533a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2534a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5784 || 25354fcf220bSJohn Baldwin sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2536f7d1b2ebSXin LI if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 && 2537f7d1b2ebSXin LI pci_get_device(dev) != BCOM_DEVICEID_BCM5756) 25385ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_JITTER_BUG; 253938cc658fSJohn Baldwin } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 25405ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_BER_BUG; 25415ee49a3aSJung-uk Kim } 25425ee49a3aSJung-uk Kim 2543f681b29aSPyun YongHyeon /* 2544f681b29aSPyun YongHyeon * All controllers that are not 5755 or higher have 4GB 2545f681b29aSPyun YongHyeon * boundary DMA bug. 2546f681b29aSPyun YongHyeon * Whenever an address crosses a multiple of the 4GB boundary 2547f681b29aSPyun YongHyeon * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 2548f681b29aSPyun YongHyeon * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 2549f681b29aSPyun YongHyeon * state machine will lockup and cause the device to hang. 2550f681b29aSPyun YongHyeon */ 2551f681b29aSPyun YongHyeon if (BGE_IS_5755_PLUS(sc) == 0) 2552f681b29aSPyun YongHyeon sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; 25534f0794ffSBjoern A. Zeeb 25544f0794ffSBjoern A. Zeeb /* 25554f0794ffSBjoern A. Zeeb * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe() 25564f0794ffSBjoern A. Zeeb * but I do not know the DEVICEID for the 5788M. 25574f0794ffSBjoern A. Zeeb */ 25584f0794ffSBjoern A. Zeeb misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID; 25594f0794ffSBjoern A. Zeeb if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 25604f0794ffSBjoern A. Zeeb misccfg == BGE_MISCCFG_BOARD_ID_5788M) 25614f0794ffSBjoern A. Zeeb sc->bge_flags |= BGE_FLAG_5788; 25624f0794ffSBjoern A. Zeeb 2563e53d81eeSPaul Saab /* 2564ca3f1187SPyun YongHyeon * Some controllers seem to require a special firmware to use 2565ca3f1187SPyun YongHyeon * TSO. But the firmware is not available to FreeBSD and Linux 2566ca3f1187SPyun YongHyeon * claims that the TSO performed by the firmware is slower than 2567ca3f1187SPyun YongHyeon * hardware based TSO. Moreover the firmware based TSO has one 2568ca3f1187SPyun YongHyeon * known bug which can't handle TSO if ethernet header + IP/TCP 2569ca3f1187SPyun YongHyeon * header is greater than 80 bytes. The workaround for the TSO 2570ca3f1187SPyun YongHyeon * bug exist but it seems it's too expensive than not using 2571ca3f1187SPyun YongHyeon * TSO at all. Some hardwares also have the TSO bug so limit 2572ca3f1187SPyun YongHyeon * the TSO to the controllers that are not affected TSO issues 2573ca3f1187SPyun YongHyeon * (e.g. 5755 or higher). 2574ca3f1187SPyun YongHyeon */ 25754f4a16e1SPyun YongHyeon if (BGE_IS_5755_PLUS(sc)) { 25764f4a16e1SPyun YongHyeon /* 25774f4a16e1SPyun YongHyeon * BCM5754 and BCM5787 shares the same ASIC id so 25784f4a16e1SPyun YongHyeon * explicit device id check is required. 2579be95548dSPyun YongHyeon * Due to unknown reason TSO does not work on BCM5755M. 25804f4a16e1SPyun YongHyeon */ 25814f4a16e1SPyun YongHyeon if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 && 2582be95548dSPyun YongHyeon pci_get_device(dev) != BCOM_DEVICEID_BCM5754M && 2583be95548dSPyun YongHyeon pci_get_device(dev) != BCOM_DEVICEID_BCM5755M) 2584ca3f1187SPyun YongHyeon sc->bge_flags |= BGE_FLAG_TSO; 25854f4a16e1SPyun YongHyeon } 2586ca3f1187SPyun YongHyeon 2587ca3f1187SPyun YongHyeon /* 25886f8718a3SScott Long * Check if this is a PCI-X or PCI Express device. 2589e53d81eeSPaul Saab */ 25906f8718a3SScott Long if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 25914c0da0ffSGleb Smirnoff /* 25926f8718a3SScott Long * Found a PCI Express capabilities register, this 25936f8718a3SScott Long * must be a PCI Express device. 25946f8718a3SScott Long */ 25956f8718a3SScott Long sc->bge_flags |= BGE_FLAG_PCIE; 25960aaf1057SPyun YongHyeon sc->bge_expcap = reg; 2597d2b6e9a0SPyun YongHyeon if (pci_get_max_read_req(dev) != 4096) 2598d2b6e9a0SPyun YongHyeon pci_set_max_read_req(dev, 4096); 25996f8718a3SScott Long } else { 26006f8718a3SScott Long /* 26016f8718a3SScott Long * Check if the device is in PCI-X Mode. 26026f8718a3SScott Long * (This bit is not valid on PCI Express controllers.) 26034c0da0ffSGleb Smirnoff */ 26040aaf1057SPyun YongHyeon if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) 26050aaf1057SPyun YongHyeon sc->bge_pcixcap = reg; 260690447aadSMarius Strobl if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 26074c0da0ffSGleb Smirnoff BGE_PCISTATE_PCI_BUSMODE) == 0) 2608652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_PCIX; 26096f8718a3SScott Long } 26104c0da0ffSGleb Smirnoff 2611bf6ef57aSJohn Polstra /* 2612fd4d32feSPyun YongHyeon * The 40bit DMA bug applies to the 5714/5715 controllers and is 2613fd4d32feSPyun YongHyeon * not actually a MAC controller bug but an issue with the embedded 2614fd4d32feSPyun YongHyeon * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 2615fd4d32feSPyun YongHyeon */ 2616fd4d32feSPyun YongHyeon if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) 2617fd4d32feSPyun YongHyeon sc->bge_flags |= BGE_FLAG_40BIT_BUG; 2618fd4d32feSPyun YongHyeon /* 2619bf6ef57aSJohn Polstra * Allocate the interrupt, using MSI if possible. These devices 2620bf6ef57aSJohn Polstra * support 8 MSI messages, but only the first one is used in 2621bf6ef57aSJohn Polstra * normal operation. 2622bf6ef57aSJohn Polstra */ 26230aaf1057SPyun YongHyeon rid = 0; 26246a15578dSPyun YongHyeon if (pci_find_extcap(sc->bge_dev, PCIY_MSI, ®) == 0) { 26250aaf1057SPyun YongHyeon sc->bge_msicap = reg; 2626bf6ef57aSJohn Polstra if (bge_can_use_msi(sc)) { 2627bf6ef57aSJohn Polstra msicount = pci_msi_count(dev); 2628bf6ef57aSJohn Polstra if (msicount > 1) 2629bf6ef57aSJohn Polstra msicount = 1; 2630bf6ef57aSJohn Polstra } else 2631bf6ef57aSJohn Polstra msicount = 0; 2632bf6ef57aSJohn Polstra if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { 2633bf6ef57aSJohn Polstra rid = 1; 2634bf6ef57aSJohn Polstra sc->bge_flags |= BGE_FLAG_MSI; 26350aaf1057SPyun YongHyeon } 26360aaf1057SPyun YongHyeon } 2637bf6ef57aSJohn Polstra 2638bf6ef57aSJohn Polstra sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2639bf6ef57aSJohn Polstra RF_SHAREABLE | RF_ACTIVE); 2640bf6ef57aSJohn Polstra 2641bf6ef57aSJohn Polstra if (sc->bge_irq == NULL) { 2642bf6ef57aSJohn Polstra device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2643bf6ef57aSJohn Polstra error = ENXIO; 2644bf6ef57aSJohn Polstra goto fail; 2645bf6ef57aSJohn Polstra } 2646bf6ef57aSJohn Polstra 26474f09c4c7SMarius Strobl device_printf(dev, 26484f09c4c7SMarius Strobl "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", 26494f09c4c7SMarius Strobl sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, 26504f09c4c7SMarius Strobl (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" : 26514f09c4c7SMarius Strobl ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI")); 26524f09c4c7SMarius Strobl 2653bf6ef57aSJohn Polstra BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2654bf6ef57aSJohn Polstra 265595d67482SBill Paul /* Try to reset the chip. */ 26568cb1383cSDoug Ambrisko if (bge_reset(sc)) { 26578cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 26588cb1383cSDoug Ambrisko error = ENXIO; 26598cb1383cSDoug Ambrisko goto fail; 26608cb1383cSDoug Ambrisko } 26618cb1383cSDoug Ambrisko 26628cb1383cSDoug Ambrisko sc->bge_asf_mode = 0; 2663f1a7e6d5SScott Long if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2664f1a7e6d5SScott Long == BGE_MAGIC_NUMBER)) { 26658cb1383cSDoug Ambrisko if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 26668cb1383cSDoug Ambrisko & BGE_HWCFG_ASF) { 26678cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_ENABLE; 26688cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_STACKUP; 2669d67eba2fSPyun YongHyeon if (BGE_IS_575X_PLUS(sc)) 26708cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 26718cb1383cSDoug Ambrisko } 26728cb1383cSDoug Ambrisko } 26738cb1383cSDoug Ambrisko 26748cb1383cSDoug Ambrisko /* Try to reset the chip again the nice way. */ 26758cb1383cSDoug Ambrisko bge_stop_fw(sc); 26768cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_STOP); 26778cb1383cSDoug Ambrisko if (bge_reset(sc)) { 26788cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 26798cb1383cSDoug Ambrisko error = ENXIO; 26808cb1383cSDoug Ambrisko goto fail; 26818cb1383cSDoug Ambrisko } 26828cb1383cSDoug Ambrisko 26838cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 26848cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 268595d67482SBill Paul 268695d67482SBill Paul if (bge_chipinit(sc)) { 2687fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "chip initialization failed\n"); 268895d67482SBill Paul error = ENXIO; 268995d67482SBill Paul goto fail; 269095d67482SBill Paul } 269195d67482SBill Paul 269238cc658fSJohn Baldwin error = bge_get_eaddr(sc, eaddr); 269338cc658fSJohn Baldwin if (error) { 269408013fd3SMarius Strobl device_printf(sc->bge_dev, 269508013fd3SMarius Strobl "failed to read station address\n"); 269695d67482SBill Paul error = ENXIO; 269795d67482SBill Paul goto fail; 269895d67482SBill Paul } 269995d67482SBill Paul 2700f41ac2beSBill Paul /* 5705 limits RX return ring to 512 entries. */ 27017ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 2702f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2703f41ac2beSBill Paul else 2704f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2705f41ac2beSBill Paul 27065b610048SPyun YongHyeon if (bge_dma_alloc(sc)) { 2707fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2708fe806fdaSPyun YongHyeon "failed to allocate DMA resources\n"); 2709f41ac2beSBill Paul error = ENXIO; 2710f41ac2beSBill Paul goto fail; 2711f41ac2beSBill Paul } 2712f41ac2beSBill Paul 271335f945cdSPyun YongHyeon bge_add_sysctls(sc); 271435f945cdSPyun YongHyeon 271595d67482SBill Paul /* Set default tuneable values. */ 271695d67482SBill Paul sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 271795d67482SBill Paul sc->bge_rx_coal_ticks = 150; 271895d67482SBill Paul sc->bge_tx_coal_ticks = 150; 27196f8718a3SScott Long sc->bge_rx_max_coal_bds = 10; 27206f8718a3SScott Long sc->bge_tx_max_coal_bds = 10; 272195d67482SBill Paul 272235f945cdSPyun YongHyeon /* Initialize checksum features to use. */ 272335f945cdSPyun YongHyeon sc->bge_csum_features = BGE_CSUM_FEATURES; 272435f945cdSPyun YongHyeon if (sc->bge_forced_udpcsum != 0) 272535f945cdSPyun YongHyeon sc->bge_csum_features |= CSUM_UDP; 272635f945cdSPyun YongHyeon 272795d67482SBill Paul /* Set up ifnet structure */ 2728fc74a9f9SBrooks Davis ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2729fc74a9f9SBrooks Davis if (ifp == NULL) { 2730fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2731fc74a9f9SBrooks Davis error = ENXIO; 2732fc74a9f9SBrooks Davis goto fail; 2733fc74a9f9SBrooks Davis } 273495d67482SBill Paul ifp->if_softc = sc; 27359bf40edeSBrooks Davis if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 273695d67482SBill Paul ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 273795d67482SBill Paul ifp->if_ioctl = bge_ioctl; 273895d67482SBill Paul ifp->if_start = bge_start; 273995d67482SBill Paul ifp->if_init = bge_init; 27404d665c4dSDag-Erling Smørgrav ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 27414d665c4dSDag-Erling Smørgrav IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 27424d665c4dSDag-Erling Smørgrav IFQ_SET_READY(&ifp->if_snd); 274335f945cdSPyun YongHyeon ifp->if_hwassist = sc->bge_csum_features; 2744d375e524SGleb Smirnoff ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 27454e35d186SJung-uk Kim IFCAP_VLAN_MTU; 2746ca3f1187SPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_TSO) != 0) { 2747ca3f1187SPyun YongHyeon ifp->if_hwassist |= CSUM_TSO; 274804bde852SPyun YongHyeon ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO; 2749ca3f1187SPyun YongHyeon } 27504e35d186SJung-uk Kim #ifdef IFCAP_VLAN_HWCSUM 27514e35d186SJung-uk Kim ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 27524e35d186SJung-uk Kim #endif 275395d67482SBill Paul ifp->if_capenable = ifp->if_capabilities; 275475719184SGleb Smirnoff #ifdef DEVICE_POLLING 275575719184SGleb Smirnoff ifp->if_capabilities |= IFCAP_POLLING; 275675719184SGleb Smirnoff #endif 275795d67482SBill Paul 2758a1d52896SBill Paul /* 2759d375e524SGleb Smirnoff * 5700 B0 chips do not support checksumming correctly due 2760d375e524SGleb Smirnoff * to hardware bugs. 2761d375e524SGleb Smirnoff */ 2762d375e524SGleb Smirnoff if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2763d375e524SGleb Smirnoff ifp->if_capabilities &= ~IFCAP_HWCSUM; 27644d3a629cSPyun YongHyeon ifp->if_capenable &= ~IFCAP_HWCSUM; 2765d375e524SGleb Smirnoff ifp->if_hwassist = 0; 2766d375e524SGleb Smirnoff } 2767d375e524SGleb Smirnoff 2768d375e524SGleb Smirnoff /* 2769a1d52896SBill Paul * Figure out what sort of media we have by checking the 277041abcc1bSPaul Saab * hardware config word in the first 32k of NIC internal memory, 277141abcc1bSPaul Saab * or fall back to examining the EEPROM if necessary. 277241abcc1bSPaul Saab * Note: on some BCM5700 cards, this value appears to be unset. 277341abcc1bSPaul Saab * If that's the case, we have to rely on identifying the NIC 277441abcc1bSPaul Saab * by its PCI subsystem ID, as we do below for the SysKonnect 277541abcc1bSPaul Saab * SK-9D41. 2776a1d52896SBill Paul */ 277741abcc1bSPaul Saab if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 277841abcc1bSPaul Saab hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 27795fea260fSMarius Strobl else if ((sc->bge_flags & BGE_FLAG_EADDR) && 27805fea260fSMarius Strobl (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 2781f6789fbaSPyun YongHyeon if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2782f6789fbaSPyun YongHyeon sizeof(hwcfg))) { 2783fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2784f6789fbaSPyun YongHyeon error = ENXIO; 2785f6789fbaSPyun YongHyeon goto fail; 2786f6789fbaSPyun YongHyeon } 278741abcc1bSPaul Saab hwcfg = ntohl(hwcfg); 278841abcc1bSPaul Saab } 278941abcc1bSPaul Saab 279095d67482SBill Paul /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2791ea3b4127SPyun YongHyeon if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == 2792ea3b4127SPyun YongHyeon SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { 2793ea3b4127SPyun YongHyeon if (BGE_IS_5714_FAMILY(sc)) 2794ea3b4127SPyun YongHyeon sc->bge_flags |= BGE_FLAG_MII_SERDES; 2795ea3b4127SPyun YongHyeon else 2796652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_TBI; 2797ea3b4127SPyun YongHyeon } 279895d67482SBill Paul 2799652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 28000c8aa4eaSJung-uk Kim ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 28010c8aa4eaSJung-uk Kim bge_ifmedia_sts); 28020c8aa4eaSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 28036098821cSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 28046098821cSJung-uk Kim 0, NULL); 280595d67482SBill Paul ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 280695d67482SBill Paul ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2807da3003f0SBill Paul sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 280895d67482SBill Paul } else { 280995d67482SBill Paul /* 28108cb1383cSDoug Ambrisko * Do transceiver setup and tell the firmware the 28118cb1383cSDoug Ambrisko * driver is down so we can try to get access the 28128cb1383cSDoug Ambrisko * probe if ASF is running. Retry a couple of times 28138cb1383cSDoug Ambrisko * if we get a conflict with the ASF firmware accessing 28148cb1383cSDoug Ambrisko * the PHY. 281595d67482SBill Paul */ 28164012d104SMarius Strobl trys = 0; 28178cb1383cSDoug Ambrisko BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 28188cb1383cSDoug Ambrisko again: 28198cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 28208cb1383cSDoug Ambrisko 282195d67482SBill Paul if (mii_phy_probe(dev, &sc->bge_miibus, 282295d67482SBill Paul bge_ifmedia_upd, bge_ifmedia_sts)) { 28238cb1383cSDoug Ambrisko if (trys++ < 4) { 28248cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "Try again\n"); 28254e35d186SJung-uk Kim bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, 28264e35d186SJung-uk Kim BMCR_RESET); 28278cb1383cSDoug Ambrisko goto again; 28288cb1383cSDoug Ambrisko } 28298cb1383cSDoug Ambrisko 2830fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "MII without any PHY!\n"); 283195d67482SBill Paul error = ENXIO; 283295d67482SBill Paul goto fail; 283395d67482SBill Paul } 28348cb1383cSDoug Ambrisko 28358cb1383cSDoug Ambrisko /* 28368cb1383cSDoug Ambrisko * Now tell the firmware we are going up after probing the PHY 28378cb1383cSDoug Ambrisko */ 28388cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 28398cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 284095d67482SBill Paul } 284195d67482SBill Paul 284295d67482SBill Paul /* 2843e255b776SJohn Polstra * When using the BCM5701 in PCI-X mode, data corruption has 2844e255b776SJohn Polstra * been observed in the first few bytes of some received packets. 2845e255b776SJohn Polstra * Aligning the packet buffer in memory eliminates the corruption. 2846e255b776SJohn Polstra * Unfortunately, this misaligns the packet payloads. On platforms 2847e255b776SJohn Polstra * which do not support unaligned accesses, we will realign the 2848e255b776SJohn Polstra * payloads by copying the received packets. 2849e255b776SJohn Polstra */ 2850652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2851652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_PCIX) 2852652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2853e255b776SJohn Polstra 2854e255b776SJohn Polstra /* 285595d67482SBill Paul * Call MI attach routine. 285695d67482SBill Paul */ 2857fc74a9f9SBrooks Davis ether_ifattach(ifp, eaddr); 2858b74e67fbSGleb Smirnoff callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 28590f9bd73bSSam Leffler 286061ccb9daSPyun YongHyeon /* Tell upper layer we support long frames. */ 286161ccb9daSPyun YongHyeon ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 286261ccb9daSPyun YongHyeon 28630f9bd73bSSam Leffler /* 28640f9bd73bSSam Leffler * Hookup IRQ last. 28650f9bd73bSSam Leffler */ 28664e35d186SJung-uk Kim #if __FreeBSD_version > 700030 2867dfe0df9aSPyun YongHyeon if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { 2868dfe0df9aSPyun YongHyeon /* Take advantage of single-shot MSI. */ 28697e6acdf1SPyun YongHyeon CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & 28707e6acdf1SPyun YongHyeon ~BGE_MSIMODE_ONE_SHOT_DISABLE); 2871dfe0df9aSPyun YongHyeon sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, 2872dfe0df9aSPyun YongHyeon taskqueue_thread_enqueue, &sc->bge_tq); 2873dfe0df9aSPyun YongHyeon if (sc->bge_tq == NULL) { 2874dfe0df9aSPyun YongHyeon device_printf(dev, "could not create taskqueue.\n"); 2875dfe0df9aSPyun YongHyeon ether_ifdetach(ifp); 2876dfe0df9aSPyun YongHyeon error = ENXIO; 2877dfe0df9aSPyun YongHyeon goto fail; 2878dfe0df9aSPyun YongHyeon } 2879dfe0df9aSPyun YongHyeon taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq", 2880dfe0df9aSPyun YongHyeon device_get_nameunit(sc->bge_dev)); 2881dfe0df9aSPyun YongHyeon error = bus_setup_intr(dev, sc->bge_irq, 2882dfe0df9aSPyun YongHyeon INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, 2883dfe0df9aSPyun YongHyeon &sc->bge_intrhand); 2884dfe0df9aSPyun YongHyeon if (error) 2885dfe0df9aSPyun YongHyeon ether_ifdetach(ifp); 2886dfe0df9aSPyun YongHyeon } else 2887dfe0df9aSPyun YongHyeon error = bus_setup_intr(dev, sc->bge_irq, 2888dfe0df9aSPyun YongHyeon INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, 2889dfe0df9aSPyun YongHyeon &sc->bge_intrhand); 28904e35d186SJung-uk Kim #else 28914e35d186SJung-uk Kim error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 28924e35d186SJung-uk Kim bge_intr, sc, &sc->bge_intrhand); 28934e35d186SJung-uk Kim #endif 28940f9bd73bSSam Leffler 28950f9bd73bSSam Leffler if (error) { 2896fc74a9f9SBrooks Davis bge_detach(dev); 2897fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "couldn't set up irq\n"); 28980f9bd73bSSam Leffler } 289995d67482SBill Paul 290008013fd3SMarius Strobl return (0); 290108013fd3SMarius Strobl 290295d67482SBill Paul fail: 290308013fd3SMarius Strobl bge_release_resources(sc); 290408013fd3SMarius Strobl 290595d67482SBill Paul return (error); 290695d67482SBill Paul } 290795d67482SBill Paul 290895d67482SBill Paul static int 29093f74909aSGleb Smirnoff bge_detach(device_t dev) 291095d67482SBill Paul { 291195d67482SBill Paul struct bge_softc *sc; 291295d67482SBill Paul struct ifnet *ifp; 291395d67482SBill Paul 291495d67482SBill Paul sc = device_get_softc(dev); 2915fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 291695d67482SBill Paul 291775719184SGleb Smirnoff #ifdef DEVICE_POLLING 291875719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) 291975719184SGleb Smirnoff ether_poll_deregister(ifp); 292075719184SGleb Smirnoff #endif 292175719184SGleb Smirnoff 29220f9bd73bSSam Leffler BGE_LOCK(sc); 292395d67482SBill Paul bge_stop(sc); 292495d67482SBill Paul bge_reset(sc); 29250f9bd73bSSam Leffler BGE_UNLOCK(sc); 29260f9bd73bSSam Leffler 29275dda8085SOleg Bulyzhin callout_drain(&sc->bge_stat_ch); 29285dda8085SOleg Bulyzhin 2929dfe0df9aSPyun YongHyeon if (sc->bge_tq) 2930dfe0df9aSPyun YongHyeon taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); 29310f9bd73bSSam Leffler ether_ifdetach(ifp); 293295d67482SBill Paul 2933652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 293495d67482SBill Paul ifmedia_removeall(&sc->bge_ifmedia); 293595d67482SBill Paul } else { 293695d67482SBill Paul bus_generic_detach(dev); 293795d67482SBill Paul device_delete_child(dev, sc->bge_miibus); 293895d67482SBill Paul } 293995d67482SBill Paul 294095d67482SBill Paul bge_release_resources(sc); 294195d67482SBill Paul 294295d67482SBill Paul return (0); 294395d67482SBill Paul } 294495d67482SBill Paul 294595d67482SBill Paul static void 29463f74909aSGleb Smirnoff bge_release_resources(struct bge_softc *sc) 294795d67482SBill Paul { 294895d67482SBill Paul device_t dev; 294995d67482SBill Paul 295095d67482SBill Paul dev = sc->bge_dev; 295195d67482SBill Paul 2952dfe0df9aSPyun YongHyeon if (sc->bge_tq != NULL) 2953dfe0df9aSPyun YongHyeon taskqueue_free(sc->bge_tq); 2954dfe0df9aSPyun YongHyeon 295595d67482SBill Paul if (sc->bge_intrhand != NULL) 295695d67482SBill Paul bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 295795d67482SBill Paul 295895d67482SBill Paul if (sc->bge_irq != NULL) 2959724bd939SJohn Polstra bus_release_resource(dev, SYS_RES_IRQ, 2960724bd939SJohn Polstra sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); 2961724bd939SJohn Polstra 2962724bd939SJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) 2963724bd939SJohn Polstra pci_release_msi(dev); 296495d67482SBill Paul 296595d67482SBill Paul if (sc->bge_res != NULL) 296695d67482SBill Paul bus_release_resource(dev, SYS_RES_MEMORY, 2967736b9319SPyun YongHyeon PCIR_BAR(0), sc->bge_res); 296895d67482SBill Paul 2969ad61f896SRuslan Ermilov if (sc->bge_ifp != NULL) 2970ad61f896SRuslan Ermilov if_free(sc->bge_ifp); 2971ad61f896SRuslan Ermilov 2972f41ac2beSBill Paul bge_dma_free(sc); 297395d67482SBill Paul 29740f9bd73bSSam Leffler if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 29750f9bd73bSSam Leffler BGE_LOCK_DESTROY(sc); 297695d67482SBill Paul } 297795d67482SBill Paul 29788cb1383cSDoug Ambrisko static int 29793f74909aSGleb Smirnoff bge_reset(struct bge_softc *sc) 298095d67482SBill Paul { 298195d67482SBill Paul device_t dev; 29825fea260fSMarius Strobl uint32_t cachesize, command, pcistate, reset, val; 29836f8718a3SScott Long void (*write_op)(struct bge_softc *, int, int); 29840aaf1057SPyun YongHyeon uint16_t devctl; 29855fea260fSMarius Strobl int i; 298695d67482SBill Paul 298795d67482SBill Paul dev = sc->bge_dev; 298895d67482SBill Paul 298938cc658fSJohn Baldwin if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 299038cc658fSJohn Baldwin (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 29916f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 29926f8718a3SScott Long write_op = bge_writemem_direct; 29936f8718a3SScott Long else 29946f8718a3SScott Long write_op = bge_writemem_ind; 29959ba784dbSScott Long } else 29966f8718a3SScott Long write_op = bge_writereg_ind; 29976f8718a3SScott Long 299895d67482SBill Paul /* Save some important PCI state. */ 299995d67482SBill Paul cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 300095d67482SBill Paul command = pci_read_config(dev, BGE_PCI_CMD, 4); 300195d67482SBill Paul pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 300295d67482SBill Paul 300395d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 300495d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3005e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 300695d67482SBill Paul 30076f8718a3SScott Long /* Disable fastboot on controllers that support it. */ 30086f8718a3SScott Long if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 3009a5779553SStanislav Sedov BGE_IS_5755_PLUS(sc)) { 30106f8718a3SScott Long if (bootverbose) 3011333704a3SPyun YongHyeon device_printf(dev, "Disabling fastboot\n"); 30126f8718a3SScott Long CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 30136f8718a3SScott Long } 30146f8718a3SScott Long 30156f8718a3SScott Long /* 30166f8718a3SScott Long * Write the magic number to SRAM at offset 0xB50. 30176f8718a3SScott Long * When firmware finishes its initialization it will 30186f8718a3SScott Long * write ~BGE_MAGIC_NUMBER to the same location. 30196f8718a3SScott Long */ 30206f8718a3SScott Long bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 30216f8718a3SScott Long 30220c8aa4eaSJung-uk Kim reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 3023e53d81eeSPaul Saab 3024e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3025652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 30260c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 30270c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, 0x7E2C, 0x20); 3028e53d81eeSPaul Saab if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3029e53d81eeSPaul Saab /* Prevent PCIE link training during global reset */ 30300c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 30310c8aa4eaSJung-uk Kim reset |= 1 << 29; 3032e53d81eeSPaul Saab } 3033e53d81eeSPaul Saab } 3034e53d81eeSPaul Saab 303521c9e407SDavid Christensen /* 30366f8718a3SScott Long * Set GPHY Power Down Override to leave GPHY 30376f8718a3SScott Long * powered up in D0 uninitialized. 30386f8718a3SScott Long */ 30395345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 3040caf088fcSPyun YongHyeon reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; 30416f8718a3SScott Long 304295d67482SBill Paul /* Issue global reset */ 30436f8718a3SScott Long write_op(sc, BGE_MISC_CFG, reset); 304495d67482SBill Paul 304538cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 30465fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_STATUS); 304738cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_STATUS, 30485fea260fSMarius Strobl val | BGE_VCPU_STATUS_DRV_RESET); 30495fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 305038cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 30515fea260fSMarius Strobl val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 305238cc658fSJohn Baldwin } 305338cc658fSJohn Baldwin 305495d67482SBill Paul DELAY(1000); 305595d67482SBill Paul 3056e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3057652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 3058e53d81eeSPaul Saab if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3059e53d81eeSPaul Saab DELAY(500000); /* wait for link training to complete */ 30605fea260fSMarius Strobl val = pci_read_config(dev, 0xC4, 4); 30615fea260fSMarius Strobl pci_write_config(dev, 0xC4, val | (1 << 15), 4); 3062e53d81eeSPaul Saab } 30630aaf1057SPyun YongHyeon devctl = pci_read_config(dev, 30640aaf1057SPyun YongHyeon sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2); 30650aaf1057SPyun YongHyeon /* Clear enable no snoop and disable relaxed ordering. */ 30669a6e301dSPyun YongHyeon devctl &= ~(PCIM_EXP_CTL_RELAXED_ORD_ENABLE | 30679a6e301dSPyun YongHyeon PCIM_EXP_CTL_NOSNOOP_ENABLE); 30680aaf1057SPyun YongHyeon /* Set PCIE max payload size to 128. */ 30690aaf1057SPyun YongHyeon devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD; 30700aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 30710aaf1057SPyun YongHyeon devctl, 2); 30720aaf1057SPyun YongHyeon /* Clear error status. */ 30730aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA, 30749a6e301dSPyun YongHyeon PCIM_EXP_STA_CORRECTABLE_ERROR | 30759a6e301dSPyun YongHyeon PCIM_EXP_STA_NON_FATAL_ERROR | PCIM_EXP_STA_FATAL_ERROR | 30769a6e301dSPyun YongHyeon PCIM_EXP_STA_UNSUPPORTED_REQ, 2); 3077e53d81eeSPaul Saab } 3078e53d81eeSPaul Saab 30793f74909aSGleb Smirnoff /* Reset some of the PCI state that got zapped by reset. */ 308095d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 308195d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3082e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 308395d67482SBill Paul pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 308495d67482SBill Paul pci_write_config(dev, BGE_PCI_CMD, command, 4); 30850c8aa4eaSJung-uk Kim write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 3086cbb2b2feSPyun YongHyeon /* 3087cbb2b2feSPyun YongHyeon * Disable PCI-X relaxed ordering to ensure status block update 3088fa8b4d63SPyun YongHyeon * comes first then packet buffer DMA. Otherwise driver may 3089cbb2b2feSPyun YongHyeon * read stale status block. 3090cbb2b2feSPyun YongHyeon */ 3091cbb2b2feSPyun YongHyeon if (sc->bge_flags & BGE_FLAG_PCIX) { 3092cbb2b2feSPyun YongHyeon devctl = pci_read_config(dev, 3093cbb2b2feSPyun YongHyeon sc->bge_pcixcap + PCIXR_COMMAND, 2); 3094cbb2b2feSPyun YongHyeon devctl &= ~PCIXM_COMMAND_ERO; 3095cbb2b2feSPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { 3096cbb2b2feSPyun YongHyeon devctl &= ~PCIXM_COMMAND_MAX_READ; 3097cbb2b2feSPyun YongHyeon devctl |= PCIXM_COMMAND_MAX_READ_2048; 3098cbb2b2feSPyun YongHyeon } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3099cbb2b2feSPyun YongHyeon devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | 3100cbb2b2feSPyun YongHyeon PCIXM_COMMAND_MAX_READ); 3101cbb2b2feSPyun YongHyeon devctl |= PCIXM_COMMAND_MAX_READ_2048; 3102cbb2b2feSPyun YongHyeon } 3103cbb2b2feSPyun YongHyeon pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 3104cbb2b2feSPyun YongHyeon devctl, 2); 3105cbb2b2feSPyun YongHyeon } 3106bf6ef57aSJohn Polstra /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ 31074c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 3108bf6ef57aSJohn Polstra /* This chip disables MSI on reset. */ 3109bf6ef57aSJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) { 31100aaf1057SPyun YongHyeon val = pci_read_config(dev, 31110aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 2); 31120aaf1057SPyun YongHyeon pci_write_config(dev, 31130aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 3114bf6ef57aSJohn Polstra val | PCIM_MSICTRL_MSI_ENABLE, 2); 3115bf6ef57aSJohn Polstra val = CSR_READ_4(sc, BGE_MSI_MODE); 3116bf6ef57aSJohn Polstra CSR_WRITE_4(sc, BGE_MSI_MODE, 3117bf6ef57aSJohn Polstra val | BGE_MSIMODE_ENABLE); 3118bf6ef57aSJohn Polstra } 31194c0da0ffSGleb Smirnoff val = CSR_READ_4(sc, BGE_MARB_MODE); 31204c0da0ffSGleb Smirnoff CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 31214c0da0ffSGleb Smirnoff } else 3122a7b0c314SPaul Saab CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3123a7b0c314SPaul Saab 312438cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 312538cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT; i++) { 312638cc658fSJohn Baldwin val = CSR_READ_4(sc, BGE_VCPU_STATUS); 312738cc658fSJohn Baldwin if (val & BGE_VCPU_STATUS_INIT_DONE) 312838cc658fSJohn Baldwin break; 312938cc658fSJohn Baldwin DELAY(100); 313038cc658fSJohn Baldwin } 313138cc658fSJohn Baldwin if (i == BGE_TIMEOUT) { 3132333704a3SPyun YongHyeon device_printf(dev, "reset timed out\n"); 313338cc658fSJohn Baldwin return (1); 313438cc658fSJohn Baldwin } 313538cc658fSJohn Baldwin } else { 313695d67482SBill Paul /* 31376f8718a3SScott Long * Poll until we see the 1's complement of the magic number. 313808013fd3SMarius Strobl * This indicates that the firmware initialization is complete. 31395fea260fSMarius Strobl * We expect this to fail if no chip containing the Ethernet 31405fea260fSMarius Strobl * address is fitted though. 314195d67482SBill Paul */ 314295d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 3143d5d23857SJung-uk Kim DELAY(10); 314495d67482SBill Paul val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 314595d67482SBill Paul if (val == ~BGE_MAGIC_NUMBER) 314695d67482SBill Paul break; 314795d67482SBill Paul } 314895d67482SBill Paul 31495fea260fSMarius Strobl if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) 3150333704a3SPyun YongHyeon device_printf(dev, 3151333704a3SPyun YongHyeon "firmware handshake timed out, found 0x%08x\n", 3152333704a3SPyun YongHyeon val); 315338cc658fSJohn Baldwin } 315495d67482SBill Paul 315595d67482SBill Paul /* 315695d67482SBill Paul * XXX Wait for the value of the PCISTATE register to 315795d67482SBill Paul * return to its original pre-reset state. This is a 315895d67482SBill Paul * fairly good indicator of reset completion. If we don't 315995d67482SBill Paul * wait for the reset to fully complete, trying to read 316095d67482SBill Paul * from the device's non-PCI registers may yield garbage 316195d67482SBill Paul * results. 316295d67482SBill Paul */ 316395d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 316495d67482SBill Paul if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 316595d67482SBill Paul break; 316695d67482SBill Paul DELAY(10); 316795d67482SBill Paul } 316895d67482SBill Paul 31693f74909aSGleb Smirnoff /* Fix up byte swapping. */ 3170e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 317195d67482SBill Paul BGE_MODECTL_BYTESWAP_DATA); 317295d67482SBill Paul 31738cb1383cSDoug Ambrisko /* Tell the ASF firmware we are up */ 31748cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 31758cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 31768cb1383cSDoug Ambrisko 317795d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 317895d67482SBill Paul 3179da3003f0SBill Paul /* 3180da3003f0SBill Paul * The 5704 in TBI mode apparently needs some special 3181da3003f0SBill Paul * adjustment to insure the SERDES drive level is set 3182da3003f0SBill Paul * to 1.2V. 3183da3003f0SBill Paul */ 3184652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 3185652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_TBI) { 31865fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_SERDES_CFG); 31875fea260fSMarius Strobl val = (val & ~0xFFF) | 0x880; 31885fea260fSMarius Strobl CSR_WRITE_4(sc, BGE_SERDES_CFG, val); 3189da3003f0SBill Paul } 3190da3003f0SBill Paul 3191e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3192652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE && 3193a5ad2f15SPyun YongHyeon sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && 3194a5ad2f15SPyun YongHyeon sc->bge_asicrev != BGE_ASICREV_BCM5785) { 3195a5ad2f15SPyun YongHyeon /* Enable Data FIFO protection. */ 31965fea260fSMarius Strobl val = CSR_READ_4(sc, 0x7C00); 31975fea260fSMarius Strobl CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); 3198e53d81eeSPaul Saab } 319995d67482SBill Paul DELAY(10000); 32008cb1383cSDoug Ambrisko 32018cb1383cSDoug Ambrisko return (0); 320295d67482SBill Paul } 320395d67482SBill Paul 3204e0b7b101SPyun YongHyeon static __inline void 3205e0b7b101SPyun YongHyeon bge_rxreuse_std(struct bge_softc *sc, int i) 3206e0b7b101SPyun YongHyeon { 3207e0b7b101SPyun YongHyeon struct bge_rx_bd *r; 3208e0b7b101SPyun YongHyeon 3209e0b7b101SPyun YongHyeon r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; 3210e0b7b101SPyun YongHyeon r->bge_flags = BGE_RXBDFLAG_END; 3211e0b7b101SPyun YongHyeon r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i]; 3212e0b7b101SPyun YongHyeon r->bge_idx = i; 3213e0b7b101SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3214e0b7b101SPyun YongHyeon } 3215e0b7b101SPyun YongHyeon 3216e0b7b101SPyun YongHyeon static __inline void 3217e0b7b101SPyun YongHyeon bge_rxreuse_jumbo(struct bge_softc *sc, int i) 3218e0b7b101SPyun YongHyeon { 3219e0b7b101SPyun YongHyeon struct bge_extrx_bd *r; 3220e0b7b101SPyun YongHyeon 3221e0b7b101SPyun YongHyeon r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; 3222e0b7b101SPyun YongHyeon r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 3223e0b7b101SPyun YongHyeon r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0]; 3224e0b7b101SPyun YongHyeon r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1]; 3225e0b7b101SPyun YongHyeon r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2]; 3226e0b7b101SPyun YongHyeon r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3]; 3227e0b7b101SPyun YongHyeon r->bge_idx = i; 3228e0b7b101SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3229e0b7b101SPyun YongHyeon } 3230e0b7b101SPyun YongHyeon 323195d67482SBill Paul /* 323295d67482SBill Paul * Frame reception handling. This is called if there's a frame 323395d67482SBill Paul * on the receive return list. 323495d67482SBill Paul * 323595d67482SBill Paul * Note: we have to be able to handle two possibilities here: 32361be6acb7SGleb Smirnoff * 1) the frame is from the jumbo receive ring 323795d67482SBill Paul * 2) the frame is from the standard receive ring 323895d67482SBill Paul */ 323995d67482SBill Paul 32401abcdbd1SAttilio Rao static int 3241dfe0df9aSPyun YongHyeon bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) 324295d67482SBill Paul { 324395d67482SBill Paul struct ifnet *ifp; 32441abcdbd1SAttilio Rao int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; 3245b9c05fa5SPyun YongHyeon uint16_t rx_cons; 324695d67482SBill Paul 32477f21e273SStanislav Sedov rx_cons = sc->bge_rx_saved_considx; 32480f9bd73bSSam Leffler 32493f74909aSGleb Smirnoff /* Nothing to do. */ 32507f21e273SStanislav Sedov if (rx_cons == rx_prod) 32511abcdbd1SAttilio Rao return (rx_npkts); 3252cfcb5025SOleg Bulyzhin 3253fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 325495d67482SBill Paul 3255f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 3256e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 3257f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 325815eda801SStanislav Sedov sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); 3259c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 3260c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) 3261f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 326215eda801SStanislav Sedov sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); 3263f41ac2beSBill Paul 32647f21e273SStanislav Sedov while (rx_cons != rx_prod) { 326595d67482SBill Paul struct bge_rx_bd *cur_rx; 32663f74909aSGleb Smirnoff uint32_t rxidx; 326795d67482SBill Paul struct mbuf *m = NULL; 32683f74909aSGleb Smirnoff uint16_t vlan_tag = 0; 326995d67482SBill Paul int have_tag = 0; 327095d67482SBill Paul 327175719184SGleb Smirnoff #ifdef DEVICE_POLLING 327275719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 327375719184SGleb Smirnoff if (sc->rxcycles <= 0) 327475719184SGleb Smirnoff break; 327575719184SGleb Smirnoff sc->rxcycles--; 327675719184SGleb Smirnoff } 327775719184SGleb Smirnoff #endif 327875719184SGleb Smirnoff 32797f21e273SStanislav Sedov cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; 328095d67482SBill Paul 328195d67482SBill Paul rxidx = cur_rx->bge_idx; 32827f21e273SStanislav Sedov BGE_INC(rx_cons, sc->bge_return_ring_cnt); 328395d67482SBill Paul 3284cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING && 3285cb2eacc7SYaroslav Tykhiy cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 328695d67482SBill Paul have_tag = 1; 328795d67482SBill Paul vlan_tag = cur_rx->bge_vlan_tag; 328895d67482SBill Paul } 328995d67482SBill Paul 329095d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 329195d67482SBill Paul jumbocnt++; 3292943787f3SPyun YongHyeon m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 329395d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3294e0b7b101SPyun YongHyeon bge_rxreuse_jumbo(sc, rxidx); 329595d67482SBill Paul continue; 329695d67482SBill Paul } 3297943787f3SPyun YongHyeon if (bge_newbuf_jumbo(sc, rxidx) != 0) { 3298e0b7b101SPyun YongHyeon bge_rxreuse_jumbo(sc, rxidx); 3299943787f3SPyun YongHyeon ifp->if_iqdrops++; 330095d67482SBill Paul continue; 330195d67482SBill Paul } 330203e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 330395d67482SBill Paul } else { 330495d67482SBill Paul stdcnt++; 3305e0b7b101SPyun YongHyeon m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 330695d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3307e0b7b101SPyun YongHyeon bge_rxreuse_std(sc, rxidx); 330895d67482SBill Paul continue; 330995d67482SBill Paul } 3310943787f3SPyun YongHyeon if (bge_newbuf_std(sc, rxidx) != 0) { 3311e0b7b101SPyun YongHyeon bge_rxreuse_std(sc, rxidx); 3312943787f3SPyun YongHyeon ifp->if_iqdrops++; 331395d67482SBill Paul continue; 331495d67482SBill Paul } 331503e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 331695d67482SBill Paul } 331795d67482SBill Paul 331895d67482SBill Paul ifp->if_ipackets++; 3319e65bed95SPyun YongHyeon #ifndef __NO_STRICT_ALIGNMENT 3320e255b776SJohn Polstra /* 3321e65bed95SPyun YongHyeon * For architectures with strict alignment we must make sure 3322e65bed95SPyun YongHyeon * the payload is aligned. 3323e255b776SJohn Polstra */ 3324652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 3325e255b776SJohn Polstra bcopy(m->m_data, m->m_data + ETHER_ALIGN, 3326e255b776SJohn Polstra cur_rx->bge_len); 3327e255b776SJohn Polstra m->m_data += ETHER_ALIGN; 3328e255b776SJohn Polstra } 3329e255b776SJohn Polstra #endif 3330473851baSPaul Saab m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 333195d67482SBill Paul m->m_pkthdr.rcvif = ifp; 333295d67482SBill Paul 3333b874fdd4SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_RXCSUM) { 333478178cd1SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 333595d67482SBill Paul m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 33360c8aa4eaSJung-uk Kim if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 33370c8aa4eaSJung-uk Kim m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 333878178cd1SGleb Smirnoff } 3339d375e524SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3340d375e524SGleb Smirnoff m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 334195d67482SBill Paul m->m_pkthdr.csum_data = 334295d67482SBill Paul cur_rx->bge_tcp_udp_csum; 3343ee7ef91cSOleg Bulyzhin m->m_pkthdr.csum_flags |= 3344ee7ef91cSOleg Bulyzhin CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 334595d67482SBill Paul } 334695d67482SBill Paul } 334795d67482SBill Paul 334895d67482SBill Paul /* 3349673d9191SSam Leffler * If we received a packet with a vlan tag, 3350673d9191SSam Leffler * attach that information to the packet. 335195d67482SBill Paul */ 3352d147662cSGleb Smirnoff if (have_tag) { 33534e35d186SJung-uk Kim #if __FreeBSD_version > 700022 335478ba57b9SAndre Oppermann m->m_pkthdr.ether_vtag = vlan_tag; 335578ba57b9SAndre Oppermann m->m_flags |= M_VLANTAG; 33564e35d186SJung-uk Kim #else 33574e35d186SJung-uk Kim VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); 33584e35d186SJung-uk Kim if (m == NULL) 33594e35d186SJung-uk Kim continue; 33604e35d186SJung-uk Kim #endif 3361d147662cSGleb Smirnoff } 336295d67482SBill Paul 3363dfe0df9aSPyun YongHyeon if (holdlck != 0) { 33640f9bd73bSSam Leffler BGE_UNLOCK(sc); 3365673d9191SSam Leffler (*ifp->if_input)(ifp, m); 33660f9bd73bSSam Leffler BGE_LOCK(sc); 3367dfe0df9aSPyun YongHyeon } else 3368dfe0df9aSPyun YongHyeon (*ifp->if_input)(ifp, m); 3369d4da719cSAttilio Rao rx_npkts++; 337025e13e68SXin LI 337125e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 33728cf7d13dSAttilio Rao return (rx_npkts); 337395d67482SBill Paul } 337495d67482SBill Paul 337515eda801SStanislav Sedov bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 337615eda801SStanislav Sedov sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); 3377e65bed95SPyun YongHyeon if (stdcnt > 0) 3378f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 3379e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 33804c0da0ffSGleb Smirnoff 3381c215fd77SPyun YongHyeon if (jumbocnt > 0) 3382f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 33834c0da0ffSGleb Smirnoff sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 3384f41ac2beSBill Paul 33857f21e273SStanislav Sedov sc->bge_rx_saved_considx = rx_cons; 338638cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 338795d67482SBill Paul if (stdcnt) 3388767c3593SPyun YongHyeon bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + 3389767c3593SPyun YongHyeon BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); 339095d67482SBill Paul if (jumbocnt) 3391767c3593SPyun YongHyeon bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo + 3392767c3593SPyun YongHyeon BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT); 3393f5a034f9SPyun YongHyeon #ifdef notyet 3394f5a034f9SPyun YongHyeon /* 3395f5a034f9SPyun YongHyeon * This register wraps very quickly under heavy packet drops. 3396f5a034f9SPyun YongHyeon * If you need correct statistics, you can enable this check. 3397f5a034f9SPyun YongHyeon */ 3398f5a034f9SPyun YongHyeon if (BGE_IS_5705_PLUS(sc)) 3399f5a034f9SPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3400f5a034f9SPyun YongHyeon #endif 34011abcdbd1SAttilio Rao return (rx_npkts); 340295d67482SBill Paul } 340395d67482SBill Paul 340495d67482SBill Paul static void 3405b9c05fa5SPyun YongHyeon bge_txeof(struct bge_softc *sc, uint16_t tx_cons) 340695d67482SBill Paul { 340795a0a340SPyun YongHyeon struct bge_tx_bd *cur_tx; 340895d67482SBill Paul struct ifnet *ifp; 340995d67482SBill Paul 34100f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 34110f9bd73bSSam Leffler 34123f74909aSGleb Smirnoff /* Nothing to do. */ 3413b9c05fa5SPyun YongHyeon if (sc->bge_tx_saved_considx == tx_cons) 3414cfcb5025SOleg Bulyzhin return; 3415cfcb5025SOleg Bulyzhin 3416fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 341795d67482SBill Paul 3418e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 34195c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); 342095d67482SBill Paul /* 342195d67482SBill Paul * Go through our tx ring and free mbufs for those 342295d67482SBill Paul * frames that have been sent. 342395d67482SBill Paul */ 3424b9c05fa5SPyun YongHyeon while (sc->bge_tx_saved_considx != tx_cons) { 342595a0a340SPyun YongHyeon uint32_t idx; 342695d67482SBill Paul 342795d67482SBill Paul idx = sc->bge_tx_saved_considx; 3428f41ac2beSBill Paul cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 342995d67482SBill Paul if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 343095d67482SBill Paul ifp->if_opackets++; 343195d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 34320ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 3433e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[idx], 3434e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 34350ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 3436f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[idx]); 3437e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3438e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[idx] = NULL; 343995d67482SBill Paul } 344095d67482SBill Paul sc->bge_txcnt--; 344195d67482SBill Paul BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 344295d67482SBill Paul } 344395d67482SBill Paul 344413f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 34455b01e77cSBruce Evans if (sc->bge_txcnt == 0) 34465b01e77cSBruce Evans sc->bge_timer = 0; 344795d67482SBill Paul } 344895d67482SBill Paul 344975719184SGleb Smirnoff #ifdef DEVICE_POLLING 34501abcdbd1SAttilio Rao static int 345175719184SGleb Smirnoff bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 345275719184SGleb Smirnoff { 345375719184SGleb Smirnoff struct bge_softc *sc = ifp->if_softc; 3454b9c05fa5SPyun YongHyeon uint16_t rx_prod, tx_cons; 3455366454f2SOleg Bulyzhin uint32_t statusword; 34561abcdbd1SAttilio Rao int rx_npkts = 0; 345775719184SGleb Smirnoff 34583f74909aSGleb Smirnoff BGE_LOCK(sc); 34593f74909aSGleb Smirnoff if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 34603f74909aSGleb Smirnoff BGE_UNLOCK(sc); 34611abcdbd1SAttilio Rao return (rx_npkts); 34623f74909aSGleb Smirnoff } 346375719184SGleb Smirnoff 3464dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3465b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3466b9c05fa5SPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3467b9c05fa5SPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3468b9c05fa5SPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3469dab5cd05SOleg Bulyzhin 3470175f8742SPyun YongHyeon statusword = sc->bge_ldata.bge_status_block->bge_status; 3471175f8742SPyun YongHyeon sc->bge_ldata.bge_status_block->bge_status = 0; 3472dab5cd05SOleg Bulyzhin 3473dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3474b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3475b9c05fa5SPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3476366454f2SOleg Bulyzhin 34770c8aa4eaSJung-uk Kim /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 3478366454f2SOleg Bulyzhin if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 3479366454f2SOleg Bulyzhin sc->bge_link_evt++; 3480366454f2SOleg Bulyzhin 3481366454f2SOleg Bulyzhin if (cmd == POLL_AND_CHECK_STATUS) 3482366454f2SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 34834c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3484652ae483SGleb Smirnoff sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 3485366454f2SOleg Bulyzhin bge_link_upd(sc); 3486366454f2SOleg Bulyzhin 3487366454f2SOleg Bulyzhin sc->rxcycles = count; 3488dfe0df9aSPyun YongHyeon rx_npkts = bge_rxeof(sc, rx_prod, 1); 348925e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 349025e13e68SXin LI BGE_UNLOCK(sc); 34918cf7d13dSAttilio Rao return (rx_npkts); 349225e13e68SXin LI } 3493b9c05fa5SPyun YongHyeon bge_txeof(sc, tx_cons); 3494366454f2SOleg Bulyzhin if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3495366454f2SOleg Bulyzhin bge_start_locked(ifp); 34963f74909aSGleb Smirnoff 34973f74909aSGleb Smirnoff BGE_UNLOCK(sc); 34981abcdbd1SAttilio Rao return (rx_npkts); 349975719184SGleb Smirnoff } 350075719184SGleb Smirnoff #endif /* DEVICE_POLLING */ 350175719184SGleb Smirnoff 3502dfe0df9aSPyun YongHyeon static int 3503dfe0df9aSPyun YongHyeon bge_msi_intr(void *arg) 3504dfe0df9aSPyun YongHyeon { 3505dfe0df9aSPyun YongHyeon struct bge_softc *sc; 3506dfe0df9aSPyun YongHyeon 3507dfe0df9aSPyun YongHyeon sc = (struct bge_softc *)arg; 3508dfe0df9aSPyun YongHyeon /* 3509dfe0df9aSPyun YongHyeon * This interrupt is not shared and controller already 3510dfe0df9aSPyun YongHyeon * disabled further interrupt. 3511dfe0df9aSPyun YongHyeon */ 3512dfe0df9aSPyun YongHyeon taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); 3513dfe0df9aSPyun YongHyeon return (FILTER_HANDLED); 3514dfe0df9aSPyun YongHyeon } 3515dfe0df9aSPyun YongHyeon 3516dfe0df9aSPyun YongHyeon static void 3517dfe0df9aSPyun YongHyeon bge_intr_task(void *arg, int pending) 3518dfe0df9aSPyun YongHyeon { 3519dfe0df9aSPyun YongHyeon struct bge_softc *sc; 3520dfe0df9aSPyun YongHyeon struct ifnet *ifp; 3521dfe0df9aSPyun YongHyeon uint32_t status; 3522dfe0df9aSPyun YongHyeon uint16_t rx_prod, tx_cons; 3523dfe0df9aSPyun YongHyeon 3524dfe0df9aSPyun YongHyeon sc = (struct bge_softc *)arg; 3525dfe0df9aSPyun YongHyeon ifp = sc->bge_ifp; 3526dfe0df9aSPyun YongHyeon 3527dfe0df9aSPyun YongHyeon if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 3528dfe0df9aSPyun YongHyeon return; 3529dfe0df9aSPyun YongHyeon 3530dfe0df9aSPyun YongHyeon /* Get updated status block. */ 3531dfe0df9aSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3532dfe0df9aSPyun YongHyeon sc->bge_cdata.bge_status_map, 3533dfe0df9aSPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3534dfe0df9aSPyun YongHyeon 3535dfe0df9aSPyun YongHyeon /* Save producer/consumer indexess. */ 3536dfe0df9aSPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3537dfe0df9aSPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3538dfe0df9aSPyun YongHyeon status = sc->bge_ldata.bge_status_block->bge_status; 3539dfe0df9aSPyun YongHyeon sc->bge_ldata.bge_status_block->bge_status = 0; 3540dfe0df9aSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3541dfe0df9aSPyun YongHyeon sc->bge_cdata.bge_status_map, 3542dfe0df9aSPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3543dfe0df9aSPyun YongHyeon /* Let controller work. */ 3544dfe0df9aSPyun YongHyeon bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3545dfe0df9aSPyun YongHyeon 3546dfe0df9aSPyun YongHyeon if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) { 3547dfe0df9aSPyun YongHyeon BGE_LOCK(sc); 3548dfe0df9aSPyun YongHyeon bge_link_upd(sc); 3549dfe0df9aSPyun YongHyeon BGE_UNLOCK(sc); 3550dfe0df9aSPyun YongHyeon } 3551dfe0df9aSPyun YongHyeon if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3552dfe0df9aSPyun YongHyeon /* Check RX return ring producer/consumer. */ 3553dfe0df9aSPyun YongHyeon bge_rxeof(sc, rx_prod, 0); 3554dfe0df9aSPyun YongHyeon } 3555dfe0df9aSPyun YongHyeon if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3556dfe0df9aSPyun YongHyeon BGE_LOCK(sc); 3557dfe0df9aSPyun YongHyeon /* Check TX ring producer/consumer. */ 3558dfe0df9aSPyun YongHyeon bge_txeof(sc, tx_cons); 3559dfe0df9aSPyun YongHyeon if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3560dfe0df9aSPyun YongHyeon bge_start_locked(ifp); 3561dfe0df9aSPyun YongHyeon BGE_UNLOCK(sc); 3562dfe0df9aSPyun YongHyeon } 3563dfe0df9aSPyun YongHyeon } 3564dfe0df9aSPyun YongHyeon 356595d67482SBill Paul static void 35663f74909aSGleb Smirnoff bge_intr(void *xsc) 356795d67482SBill Paul { 356895d67482SBill Paul struct bge_softc *sc; 356995d67482SBill Paul struct ifnet *ifp; 3570dab5cd05SOleg Bulyzhin uint32_t statusword; 3571b9c05fa5SPyun YongHyeon uint16_t rx_prod, tx_cons; 357295d67482SBill Paul 357395d67482SBill Paul sc = xsc; 3574f41ac2beSBill Paul 35750f9bd73bSSam Leffler BGE_LOCK(sc); 35760f9bd73bSSam Leffler 3577dab5cd05SOleg Bulyzhin ifp = sc->bge_ifp; 3578dab5cd05SOleg Bulyzhin 357975719184SGleb Smirnoff #ifdef DEVICE_POLLING 358075719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 358175719184SGleb Smirnoff BGE_UNLOCK(sc); 358275719184SGleb Smirnoff return; 358375719184SGleb Smirnoff } 358475719184SGleb Smirnoff #endif 358575719184SGleb Smirnoff 3586f30cbfc6SScott Long /* 3587b848e032SBruce Evans * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3588b848e032SBruce Evans * disable interrupts by writing nonzero like we used to, since with 3589b848e032SBruce Evans * our current organization this just gives complications and 3590b848e032SBruce Evans * pessimizations for re-enabling interrupts. We used to have races 3591b848e032SBruce Evans * instead of the necessary complications. Disabling interrupts 3592b848e032SBruce Evans * would just reduce the chance of a status update while we are 3593b848e032SBruce Evans * running (by switching to the interrupt-mode coalescence 3594b848e032SBruce Evans * parameters), but this chance is already very low so it is more 3595b848e032SBruce Evans * efficient to get another interrupt than prevent it. 3596b848e032SBruce Evans * 3597b848e032SBruce Evans * We do the ack first to ensure another interrupt if there is a 3598b848e032SBruce Evans * status update after the ack. We don't check for the status 3599b848e032SBruce Evans * changing later because it is more efficient to get another 3600b848e032SBruce Evans * interrupt than prevent it, not quite as above (not checking is 3601b848e032SBruce Evans * a smaller optimization than not toggling the interrupt enable, 3602b848e032SBruce Evans * since checking doesn't involve PCI accesses and toggling require 3603b848e032SBruce Evans * the status check). So toggling would probably be a pessimization 3604b848e032SBruce Evans * even with MSI. It would only be needed for using a task queue. 3605b848e032SBruce Evans */ 360638cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3607b848e032SBruce Evans 3608f584dfd1SPyun YongHyeon /* 3609f584dfd1SPyun YongHyeon * Do the mandatory PCI flush as well as get the link status. 3610f584dfd1SPyun YongHyeon */ 3611f584dfd1SPyun YongHyeon statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 3612f584dfd1SPyun YongHyeon 3613f584dfd1SPyun YongHyeon /* Make sure the descriptor ring indexes are coherent. */ 3614f584dfd1SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3615f584dfd1SPyun YongHyeon sc->bge_cdata.bge_status_map, 3616f584dfd1SPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3617f584dfd1SPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3618f584dfd1SPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3619f584dfd1SPyun YongHyeon sc->bge_ldata.bge_status_block->bge_status = 0; 3620f584dfd1SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3621f584dfd1SPyun YongHyeon sc->bge_cdata.bge_status_map, 3622f584dfd1SPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3623f584dfd1SPyun YongHyeon 36241f313773SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 36254c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3626f30cbfc6SScott Long statusword || sc->bge_link_evt) 3627dab5cd05SOleg Bulyzhin bge_link_upd(sc); 362895d67482SBill Paul 362913f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 36303f74909aSGleb Smirnoff /* Check RX return ring producer/consumer. */ 3631dfe0df9aSPyun YongHyeon bge_rxeof(sc, rx_prod, 1); 363225e13e68SXin LI } 363395d67482SBill Paul 363425e13e68SXin LI if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 36353f74909aSGleb Smirnoff /* Check TX ring producer/consumer. */ 3636b9c05fa5SPyun YongHyeon bge_txeof(sc, tx_cons); 363795d67482SBill Paul } 363895d67482SBill Paul 363913f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING && 364013f4c340SRobert Watson !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 36410f9bd73bSSam Leffler bge_start_locked(ifp); 36420f9bd73bSSam Leffler 36430f9bd73bSSam Leffler BGE_UNLOCK(sc); 364495d67482SBill Paul } 364595d67482SBill Paul 364695d67482SBill Paul static void 36478cb1383cSDoug Ambrisko bge_asf_driver_up(struct bge_softc *sc) 36488cb1383cSDoug Ambrisko { 36498cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) { 36508cb1383cSDoug Ambrisko /* Send ASF heartbeat aprox. every 2s */ 36518cb1383cSDoug Ambrisko if (sc->bge_asf_count) 36528cb1383cSDoug Ambrisko sc->bge_asf_count --; 36538cb1383cSDoug Ambrisko else { 3654899d6846SPyun YongHyeon sc->bge_asf_count = 2; 36558cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 36568cb1383cSDoug Ambrisko BGE_FW_DRV_ALIVE); 36578cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 36588cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 36598cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 366039153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 36618cb1383cSDoug Ambrisko } 36628cb1383cSDoug Ambrisko } 36638cb1383cSDoug Ambrisko } 36648cb1383cSDoug Ambrisko 36658cb1383cSDoug Ambrisko static void 3666b74e67fbSGleb Smirnoff bge_tick(void *xsc) 36670f9bd73bSSam Leffler { 3668b74e67fbSGleb Smirnoff struct bge_softc *sc = xsc; 366995d67482SBill Paul struct mii_data *mii = NULL; 367095d67482SBill Paul 36710f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 367295d67482SBill Paul 36735dda8085SOleg Bulyzhin /* Synchronize with possible callout reset/stop. */ 36745dda8085SOleg Bulyzhin if (callout_pending(&sc->bge_stat_ch) || 36755dda8085SOleg Bulyzhin !callout_active(&sc->bge_stat_ch)) 36765dda8085SOleg Bulyzhin return; 36775dda8085SOleg Bulyzhin 36787ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 36790434d1b8SBill Paul bge_stats_update_regs(sc); 36800434d1b8SBill Paul else 368195d67482SBill Paul bge_stats_update(sc); 368295d67482SBill Paul 3683652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 368495d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 368582b67c01SOleg Bulyzhin /* 368682b67c01SOleg Bulyzhin * Do not touch PHY if we have link up. This could break 368782b67c01SOleg Bulyzhin * IPMI/ASF mode or produce extra input errors 368882b67c01SOleg Bulyzhin * (extra errors was reported for bcm5701 & bcm5704). 368982b67c01SOleg Bulyzhin */ 369082b67c01SOleg Bulyzhin if (!sc->bge_link) 369195d67482SBill Paul mii_tick(mii); 36927b97099dSOleg Bulyzhin } else { 36937b97099dSOleg Bulyzhin /* 36947b97099dSOleg Bulyzhin * Since in TBI mode auto-polling can't be used we should poll 36957b97099dSOleg Bulyzhin * link status manually. Here we register pending link event 36967b97099dSOleg Bulyzhin * and trigger interrupt. 36977b97099dSOleg Bulyzhin */ 36987b97099dSOleg Bulyzhin #ifdef DEVICE_POLLING 36993f74909aSGleb Smirnoff /* In polling mode we poll link state in bge_poll(). */ 37007b97099dSOleg Bulyzhin if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 37017b97099dSOleg Bulyzhin #endif 37027b97099dSOleg Bulyzhin { 37037b97099dSOleg Bulyzhin sc->bge_link_evt++; 37044f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 37054f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 37067b97099dSOleg Bulyzhin BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 37074f0794ffSBjoern A. Zeeb else 37084f0794ffSBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 37097b97099dSOleg Bulyzhin } 3710dab5cd05SOleg Bulyzhin } 371195d67482SBill Paul 37128cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 3713b74e67fbSGleb Smirnoff bge_watchdog(sc); 37148cb1383cSDoug Ambrisko 3715dab5cd05SOleg Bulyzhin callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 371695d67482SBill Paul } 371795d67482SBill Paul 371895d67482SBill Paul static void 37193f74909aSGleb Smirnoff bge_stats_update_regs(struct bge_softc *sc) 37200434d1b8SBill Paul { 37213f74909aSGleb Smirnoff struct ifnet *ifp; 37220434d1b8SBill Paul 3723fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 37240434d1b8SBill Paul 37256b037352SJung-uk Kim ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 37267e6e2507SJung-uk Kim offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 37277e6e2507SJung-uk Kim 3728e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 37296b037352SJung-uk Kim ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3730e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 37310434d1b8SBill Paul } 37320434d1b8SBill Paul 37330434d1b8SBill Paul static void 37343f74909aSGleb Smirnoff bge_stats_update(struct bge_softc *sc) 373595d67482SBill Paul { 373695d67482SBill Paul struct ifnet *ifp; 3737e907febfSPyun YongHyeon bus_size_t stats; 37387e6e2507SJung-uk Kim uint32_t cnt; /* current register value */ 373995d67482SBill Paul 3740fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 374195d67482SBill Paul 3742e907febfSPyun YongHyeon stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3743e907febfSPyun YongHyeon 3744e907febfSPyun YongHyeon #define READ_STAT(sc, stats, stat) \ 3745e907febfSPyun YongHyeon CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 374695d67482SBill Paul 37478634dfffSJung-uk Kim cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 37486b037352SJung-uk Kim ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); 37496fb34dd2SOleg Bulyzhin sc->bge_tx_collisions = cnt; 37506fb34dd2SOleg Bulyzhin 37516fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 37526b037352SJung-uk Kim ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); 37536fb34dd2SOleg Bulyzhin sc->bge_rx_discards = cnt; 37546fb34dd2SOleg Bulyzhin 37556fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 37566b037352SJung-uk Kim ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); 37576fb34dd2SOleg Bulyzhin sc->bge_tx_discards = cnt; 375895d67482SBill Paul 3759e907febfSPyun YongHyeon #undef READ_STAT 376095d67482SBill Paul } 376195d67482SBill Paul 376295d67482SBill Paul /* 3763d375e524SGleb Smirnoff * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3764d375e524SGleb Smirnoff * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3765d375e524SGleb Smirnoff * but when such padded frames employ the bge IP/TCP checksum offload, 3766d375e524SGleb Smirnoff * the hardware checksum assist gives incorrect results (possibly 3767d375e524SGleb Smirnoff * from incorporating its own padding into the UDP/TCP checksum; who knows). 3768d375e524SGleb Smirnoff * If we pad such runts with zeros, the onboard checksum comes out correct. 3769d375e524SGleb Smirnoff */ 3770d375e524SGleb Smirnoff static __inline int 3771d375e524SGleb Smirnoff bge_cksum_pad(struct mbuf *m) 3772d375e524SGleb Smirnoff { 3773d375e524SGleb Smirnoff int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3774d375e524SGleb Smirnoff struct mbuf *last; 3775d375e524SGleb Smirnoff 3776d375e524SGleb Smirnoff /* If there's only the packet-header and we can pad there, use it. */ 3777d375e524SGleb Smirnoff if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3778d375e524SGleb Smirnoff M_TRAILINGSPACE(m) >= padlen) { 3779d375e524SGleb Smirnoff last = m; 3780d375e524SGleb Smirnoff } else { 3781d375e524SGleb Smirnoff /* 3782d375e524SGleb Smirnoff * Walk packet chain to find last mbuf. We will either 3783d375e524SGleb Smirnoff * pad there, or append a new mbuf and pad it. 3784d375e524SGleb Smirnoff */ 3785d375e524SGleb Smirnoff for (last = m; last->m_next != NULL; last = last->m_next); 3786d375e524SGleb Smirnoff if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3787d375e524SGleb Smirnoff /* Allocate new empty mbuf, pad it. Compact later. */ 3788d375e524SGleb Smirnoff struct mbuf *n; 3789d375e524SGleb Smirnoff 3790d375e524SGleb Smirnoff MGET(n, M_DONTWAIT, MT_DATA); 3791d375e524SGleb Smirnoff if (n == NULL) 3792d375e524SGleb Smirnoff return (ENOBUFS); 3793d375e524SGleb Smirnoff n->m_len = 0; 3794d375e524SGleb Smirnoff last->m_next = n; 3795d375e524SGleb Smirnoff last = n; 3796d375e524SGleb Smirnoff } 3797d375e524SGleb Smirnoff } 3798d375e524SGleb Smirnoff 3799d375e524SGleb Smirnoff /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3800d375e524SGleb Smirnoff memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3801d375e524SGleb Smirnoff last->m_len += padlen; 3802d375e524SGleb Smirnoff m->m_pkthdr.len += padlen; 3803d375e524SGleb Smirnoff 3804d375e524SGleb Smirnoff return (0); 3805d375e524SGleb Smirnoff } 3806d375e524SGleb Smirnoff 3807ca3f1187SPyun YongHyeon static struct mbuf * 3808ca3f1187SPyun YongHyeon bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss) 3809ca3f1187SPyun YongHyeon { 3810ca3f1187SPyun YongHyeon struct ip *ip; 3811ca3f1187SPyun YongHyeon struct tcphdr *tcp; 3812ca3f1187SPyun YongHyeon struct mbuf *n; 3813ca3f1187SPyun YongHyeon uint16_t hlen; 38145b355c4fSPyun YongHyeon uint32_t poff; 3815ca3f1187SPyun YongHyeon 3816ca3f1187SPyun YongHyeon if (M_WRITABLE(m) == 0) { 3817ca3f1187SPyun YongHyeon /* Get a writable copy. */ 3818ca3f1187SPyun YongHyeon n = m_dup(m, M_DONTWAIT); 3819ca3f1187SPyun YongHyeon m_freem(m); 3820ca3f1187SPyun YongHyeon if (n == NULL) 3821ca3f1187SPyun YongHyeon return (NULL); 3822ca3f1187SPyun YongHyeon m = n; 3823ca3f1187SPyun YongHyeon } 38245b355c4fSPyun YongHyeon m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip)); 3825ca3f1187SPyun YongHyeon if (m == NULL) 3826ca3f1187SPyun YongHyeon return (NULL); 38275b355c4fSPyun YongHyeon ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); 38285b355c4fSPyun YongHyeon poff = sizeof(struct ether_header) + (ip->ip_hl << 2); 3829ca3f1187SPyun YongHyeon m = m_pullup(m, poff + sizeof(struct tcphdr)); 3830ca3f1187SPyun YongHyeon if (m == NULL) 3831ca3f1187SPyun YongHyeon return (NULL); 3832ca3f1187SPyun YongHyeon tcp = (struct tcphdr *)(mtod(m, char *) + poff); 38335b355c4fSPyun YongHyeon m = m_pullup(m, poff + (tcp->th_off << 2)); 3834ca3f1187SPyun YongHyeon if (m == NULL) 3835ca3f1187SPyun YongHyeon return (NULL); 3836ca3f1187SPyun YongHyeon /* 3837ca3f1187SPyun YongHyeon * It seems controller doesn't modify IP length and TCP pseudo 3838ca3f1187SPyun YongHyeon * checksum. These checksum computed by upper stack should be 0. 3839ca3f1187SPyun YongHyeon */ 3840ca3f1187SPyun YongHyeon *mss = m->m_pkthdr.tso_segsz; 3841ca3f1187SPyun YongHyeon ip->ip_sum = 0; 3842ca3f1187SPyun YongHyeon ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); 3843ca3f1187SPyun YongHyeon /* Clear pseudo checksum computed by TCP stack. */ 3844ca3f1187SPyun YongHyeon tcp->th_sum = 0; 3845ca3f1187SPyun YongHyeon /* 3846ca3f1187SPyun YongHyeon * Broadcom controllers uses different descriptor format for 3847ca3f1187SPyun YongHyeon * TSO depending on ASIC revision. Due to TSO-capable firmware 3848ca3f1187SPyun YongHyeon * license issue and lower performance of firmware based TSO 3849ca3f1187SPyun YongHyeon * we only support hardware based TSO which is applicable for 3850ca3f1187SPyun YongHyeon * BCM5755 or newer controllers. Hardware based TSO uses 11 3851ca3f1187SPyun YongHyeon * bits to store MSS and upper 5 bits are used to store IP/TCP 3852ca3f1187SPyun YongHyeon * header length(including IP/TCP options). The header length 3853ca3f1187SPyun YongHyeon * is expressed as 32 bits unit. 3854ca3f1187SPyun YongHyeon */ 3855ca3f1187SPyun YongHyeon hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; 3856ca3f1187SPyun YongHyeon *mss |= (hlen << 11); 3857ca3f1187SPyun YongHyeon return (m); 3858ca3f1187SPyun YongHyeon } 3859ca3f1187SPyun YongHyeon 3860d375e524SGleb Smirnoff /* 386195d67482SBill Paul * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 386295d67482SBill Paul * pointers to descriptors. 386395d67482SBill Paul */ 386495d67482SBill Paul static int 3865676ad2c9SGleb Smirnoff bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 386695d67482SBill Paul { 38677e27542aSGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_NEW]; 3868f41ac2beSBill Paul bus_dmamap_t map; 3869676ad2c9SGleb Smirnoff struct bge_tx_bd *d; 3870676ad2c9SGleb Smirnoff struct mbuf *m = *m_head; 38717e27542aSGleb Smirnoff uint32_t idx = *txidx; 3872ca3f1187SPyun YongHyeon uint16_t csum_flags, mss, vlan_tag; 38737e27542aSGleb Smirnoff int nsegs, i, error; 387495d67482SBill Paul 38756909dc43SGleb Smirnoff csum_flags = 0; 3876ca3f1187SPyun YongHyeon mss = 0; 3877ca3f1187SPyun YongHyeon vlan_tag = 0; 3878ca3f1187SPyun YongHyeon if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 3879ca3f1187SPyun YongHyeon *m_head = m = bge_setup_tso(sc, m, &mss); 3880ca3f1187SPyun YongHyeon if (*m_head == NULL) 3881ca3f1187SPyun YongHyeon return (ENOBUFS); 3882ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | 3883ca3f1187SPyun YongHyeon BGE_TXBDFLAG_CPU_POST_DMA; 388435f945cdSPyun YongHyeon } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) { 38856909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & CSUM_IP) 38866909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_CSUM; 38876909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 38886909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 38896909dc43SGleb Smirnoff if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 38906909dc43SGleb Smirnoff (error = bge_cksum_pad(m)) != 0) { 38916909dc43SGleb Smirnoff m_freem(m); 38926909dc43SGleb Smirnoff *m_head = NULL; 38936909dc43SGleb Smirnoff return (error); 38946909dc43SGleb Smirnoff } 38956909dc43SGleb Smirnoff } 38966909dc43SGleb Smirnoff if (m->m_flags & M_LASTFRAG) 38976909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 38986909dc43SGleb Smirnoff else if (m->m_flags & M_FRAG) 38996909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG; 39006909dc43SGleb Smirnoff } 39016909dc43SGleb Smirnoff 3902d94f2b85SPyun YongHyeon if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3903beaa2ae1SPyun YongHyeon sc->bge_forced_collapse > 0 && 3904beaa2ae1SPyun YongHyeon (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) { 3905d94f2b85SPyun YongHyeon /* 3906d94f2b85SPyun YongHyeon * Forcedly collapse mbuf chains to overcome hardware 3907d94f2b85SPyun YongHyeon * limitation which only support a single outstanding 3908d94f2b85SPyun YongHyeon * DMA read operation. 3909d94f2b85SPyun YongHyeon */ 3910beaa2ae1SPyun YongHyeon if (sc->bge_forced_collapse == 1) 3911d94f2b85SPyun YongHyeon m = m_defrag(m, M_DONTWAIT); 3912d94f2b85SPyun YongHyeon else 3913beaa2ae1SPyun YongHyeon m = m_collapse(m, M_DONTWAIT, sc->bge_forced_collapse); 3914261f04d6SPyun YongHyeon if (m == NULL) 3915261f04d6SPyun YongHyeon m = *m_head; 3916d94f2b85SPyun YongHyeon *m_head = m; 3917d94f2b85SPyun YongHyeon } 3918d94f2b85SPyun YongHyeon 39197e27542aSGleb Smirnoff map = sc->bge_cdata.bge_tx_dmamap[idx]; 39200ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, 3921676ad2c9SGleb Smirnoff &nsegs, BUS_DMA_NOWAIT); 39227e27542aSGleb Smirnoff if (error == EFBIG) { 39234eee14cbSMarius Strobl m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW); 3924676ad2c9SGleb Smirnoff if (m == NULL) { 3925676ad2c9SGleb Smirnoff m_freem(*m_head); 3926676ad2c9SGleb Smirnoff *m_head = NULL; 39277e27542aSGleb Smirnoff return (ENOBUFS); 39287e27542aSGleb Smirnoff } 3929676ad2c9SGleb Smirnoff *m_head = m; 39300ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, 39310ac56796SPyun YongHyeon m, segs, &nsegs, BUS_DMA_NOWAIT); 3932676ad2c9SGleb Smirnoff if (error) { 3933676ad2c9SGleb Smirnoff m_freem(m); 3934676ad2c9SGleb Smirnoff *m_head = NULL; 39357e27542aSGleb Smirnoff return (error); 39367e27542aSGleb Smirnoff } 3937676ad2c9SGleb Smirnoff } else if (error != 0) 3938676ad2c9SGleb Smirnoff return (error); 39397e27542aSGleb Smirnoff 3940167fdb62SPyun YongHyeon /* Check if we have enough free send BDs. */ 3941167fdb62SPyun YongHyeon if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { 39420ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); 394395d67482SBill Paul return (ENOBUFS); 39447e27542aSGleb Smirnoff } 39457e27542aSGleb Smirnoff 39460ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3947e65bed95SPyun YongHyeon 3948ca3f1187SPyun YongHyeon #if __FreeBSD_version > 700022 3949ca3f1187SPyun YongHyeon if (m->m_flags & M_VLANTAG) { 3950ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3951ca3f1187SPyun YongHyeon vlan_tag = m->m_pkthdr.ether_vtag; 3952ca3f1187SPyun YongHyeon } 3953ca3f1187SPyun YongHyeon #else 3954ca3f1187SPyun YongHyeon { 3955ca3f1187SPyun YongHyeon struct m_tag *mtag; 3956ca3f1187SPyun YongHyeon 3957ca3f1187SPyun YongHyeon if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { 3958ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3959ca3f1187SPyun YongHyeon vlan_tag = VLAN_TAG_VALUE(mtag); 3960ca3f1187SPyun YongHyeon } 3961ca3f1187SPyun YongHyeon } 3962ca3f1187SPyun YongHyeon #endif 39637e27542aSGleb Smirnoff for (i = 0; ; i++) { 39647e27542aSGleb Smirnoff d = &sc->bge_ldata.bge_tx_ring[idx]; 39657e27542aSGleb Smirnoff d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 39667e27542aSGleb Smirnoff d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 39677e27542aSGleb Smirnoff d->bge_len = segs[i].ds_len; 39687e27542aSGleb Smirnoff d->bge_flags = csum_flags; 3969ca3f1187SPyun YongHyeon d->bge_vlan_tag = vlan_tag; 3970ca3f1187SPyun YongHyeon d->bge_mss = mss; 39717e27542aSGleb Smirnoff if (i == nsegs - 1) 39727e27542aSGleb Smirnoff break; 39737e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 39747e27542aSGleb Smirnoff } 39757e27542aSGleb Smirnoff 39767e27542aSGleb Smirnoff /* Mark the last segment as end of packet... */ 39777e27542aSGleb Smirnoff d->bge_flags |= BGE_TXBDFLAG_END; 3978676ad2c9SGleb Smirnoff 3979f41ac2beSBill Paul /* 3980f41ac2beSBill Paul * Insure that the map for this transmission 3981f41ac2beSBill Paul * is placed at the array index of the last descriptor 3982f41ac2beSBill Paul * in this chain. 3983f41ac2beSBill Paul */ 39847e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 39857e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[idx] = map; 3986676ad2c9SGleb Smirnoff sc->bge_cdata.bge_tx_chain[idx] = m; 39877e27542aSGleb Smirnoff sc->bge_txcnt += nsegs; 398895d67482SBill Paul 39897e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 39907e27542aSGleb Smirnoff *txidx = idx; 399195d67482SBill Paul 399295d67482SBill Paul return (0); 399395d67482SBill Paul } 399495d67482SBill Paul 399595d67482SBill Paul /* 399695d67482SBill Paul * Main transmit routine. To avoid having to do mbuf copies, we put pointers 399795d67482SBill Paul * to the mbuf data regions directly in the transmit descriptors. 399895d67482SBill Paul */ 399995d67482SBill Paul static void 40003f74909aSGleb Smirnoff bge_start_locked(struct ifnet *ifp) 400195d67482SBill Paul { 400295d67482SBill Paul struct bge_softc *sc; 4003167fdb62SPyun YongHyeon struct mbuf *m_head; 400414bbd30fSGleb Smirnoff uint32_t prodidx; 4005167fdb62SPyun YongHyeon int count; 400695d67482SBill Paul 400795d67482SBill Paul sc = ifp->if_softc; 4008167fdb62SPyun YongHyeon BGE_LOCK_ASSERT(sc); 400995d67482SBill Paul 4010167fdb62SPyun YongHyeon if (!sc->bge_link || 4011167fdb62SPyun YongHyeon (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 4012167fdb62SPyun YongHyeon IFF_DRV_RUNNING) 401395d67482SBill Paul return; 401495d67482SBill Paul 401514bbd30fSGleb Smirnoff prodidx = sc->bge_tx_prodidx; 401695d67482SBill Paul 4017167fdb62SPyun YongHyeon for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 4018167fdb62SPyun YongHyeon if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { 4019167fdb62SPyun YongHyeon ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4020167fdb62SPyun YongHyeon break; 4021167fdb62SPyun YongHyeon } 40224d665c4dSDag-Erling Smørgrav IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 402395d67482SBill Paul if (m_head == NULL) 402495d67482SBill Paul break; 402595d67482SBill Paul 402695d67482SBill Paul /* 402795d67482SBill Paul * XXX 4028b874fdd4SYaroslav Tykhiy * The code inside the if() block is never reached since we 4029b874fdd4SYaroslav Tykhiy * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 4030b874fdd4SYaroslav Tykhiy * requests to checksum TCP/UDP in a fragmented packet. 4031b874fdd4SYaroslav Tykhiy * 4032b874fdd4SYaroslav Tykhiy * XXX 403395d67482SBill Paul * safety overkill. If this is a fragmented packet chain 403495d67482SBill Paul * with delayed TCP/UDP checksums, then only encapsulate 403595d67482SBill Paul * it if we have enough descriptors to handle the entire 403695d67482SBill Paul * chain at once. 403795d67482SBill Paul * (paranoia -- may not actually be needed) 403895d67482SBill Paul */ 403995d67482SBill Paul if (m_head->m_flags & M_FIRSTFRAG && 404095d67482SBill Paul m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 404195d67482SBill Paul if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 404295d67482SBill Paul m_head->m_pkthdr.csum_data + 16) { 40434d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 404413f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 404595d67482SBill Paul break; 404695d67482SBill Paul } 404795d67482SBill Paul } 404895d67482SBill Paul 404995d67482SBill Paul /* 405095d67482SBill Paul * Pack the data into the transmit ring. If we 405195d67482SBill Paul * don't have room, set the OACTIVE flag and wait 405295d67482SBill Paul * for the NIC to drain the ring. 405395d67482SBill Paul */ 4054676ad2c9SGleb Smirnoff if (bge_encap(sc, &m_head, &prodidx)) { 4055676ad2c9SGleb Smirnoff if (m_head == NULL) 4056676ad2c9SGleb Smirnoff break; 40574d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 405813f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 405995d67482SBill Paul break; 406095d67482SBill Paul } 4061303a718cSDag-Erling Smørgrav ++count; 406295d67482SBill Paul 406395d67482SBill Paul /* 406495d67482SBill Paul * If there's a BPF listener, bounce a copy of this frame 406595d67482SBill Paul * to him. 406695d67482SBill Paul */ 40674e35d186SJung-uk Kim #ifdef ETHER_BPF_MTAP 406845ee6ab3SJung-uk Kim ETHER_BPF_MTAP(ifp, m_head); 40694e35d186SJung-uk Kim #else 40704e35d186SJung-uk Kim BPF_MTAP(ifp, m_head); 40714e35d186SJung-uk Kim #endif 407295d67482SBill Paul } 407395d67482SBill Paul 4074167fdb62SPyun YongHyeon if (count > 0) { 4075aa94f333SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 40765c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 40773f74909aSGleb Smirnoff /* Transmit. */ 407838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 40793927098fSPaul Saab /* 5700 b2 errata */ 4080e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 408138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 408295d67482SBill Paul 408314bbd30fSGleb Smirnoff sc->bge_tx_prodidx = prodidx; 408414bbd30fSGleb Smirnoff 408595d67482SBill Paul /* 408695d67482SBill Paul * Set a timeout in case the chip goes out to lunch. 408795d67482SBill Paul */ 4088b74e67fbSGleb Smirnoff sc->bge_timer = 5; 408995d67482SBill Paul } 4090167fdb62SPyun YongHyeon } 409195d67482SBill Paul 40920f9bd73bSSam Leffler /* 40930f9bd73bSSam Leffler * Main transmit routine. To avoid having to do mbuf copies, we put pointers 40940f9bd73bSSam Leffler * to the mbuf data regions directly in the transmit descriptors. 40950f9bd73bSSam Leffler */ 409695d67482SBill Paul static void 40973f74909aSGleb Smirnoff bge_start(struct ifnet *ifp) 409895d67482SBill Paul { 40990f9bd73bSSam Leffler struct bge_softc *sc; 41000f9bd73bSSam Leffler 41010f9bd73bSSam Leffler sc = ifp->if_softc; 41020f9bd73bSSam Leffler BGE_LOCK(sc); 41030f9bd73bSSam Leffler bge_start_locked(ifp); 41040f9bd73bSSam Leffler BGE_UNLOCK(sc); 41050f9bd73bSSam Leffler } 41060f9bd73bSSam Leffler 41070f9bd73bSSam Leffler static void 41083f74909aSGleb Smirnoff bge_init_locked(struct bge_softc *sc) 41090f9bd73bSSam Leffler { 411095d67482SBill Paul struct ifnet *ifp; 41113f74909aSGleb Smirnoff uint16_t *m; 411295d67482SBill Paul 41130f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 411495d67482SBill Paul 4115fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 411695d67482SBill Paul 411713f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) 411895d67482SBill Paul return; 411995d67482SBill Paul 412095d67482SBill Paul /* Cancel pending I/O and flush buffers. */ 412195d67482SBill Paul bge_stop(sc); 41228cb1383cSDoug Ambrisko 41238cb1383cSDoug Ambrisko bge_stop_fw(sc); 41248cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_START); 412595d67482SBill Paul bge_reset(sc); 41268cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_START); 41278cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_START); 41288cb1383cSDoug Ambrisko 412995d67482SBill Paul bge_chipinit(sc); 413095d67482SBill Paul 413195d67482SBill Paul /* 413295d67482SBill Paul * Init the various state machines, ring 413395d67482SBill Paul * control blocks and firmware. 413495d67482SBill Paul */ 413595d67482SBill Paul if (bge_blockinit(sc)) { 4136fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "initialization failure\n"); 413795d67482SBill Paul return; 413895d67482SBill Paul } 413995d67482SBill Paul 4140fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 414195d67482SBill Paul 414295d67482SBill Paul /* Specify MTU. */ 414395d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 4144cb2eacc7SYaroslav Tykhiy ETHER_HDR_LEN + ETHER_CRC_LEN + 4145cb2eacc7SYaroslav Tykhiy (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); 414695d67482SBill Paul 414795d67482SBill Paul /* Load our MAC address. */ 41483f74909aSGleb Smirnoff m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 414995d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 415095d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 415195d67482SBill Paul 41523e9b1bcaSJung-uk Kim /* Program promiscuous mode. */ 41533e9b1bcaSJung-uk Kim bge_setpromisc(sc); 415495d67482SBill Paul 415595d67482SBill Paul /* Program multicast filter. */ 415695d67482SBill Paul bge_setmulti(sc); 415795d67482SBill Paul 4158cb2eacc7SYaroslav Tykhiy /* Program VLAN tag stripping. */ 4159cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 4160cb2eacc7SYaroslav Tykhiy 416135f945cdSPyun YongHyeon /* Override UDP checksum offloading. */ 416235f945cdSPyun YongHyeon if (sc->bge_forced_udpcsum == 0) 416335f945cdSPyun YongHyeon sc->bge_csum_features &= ~CSUM_UDP; 416435f945cdSPyun YongHyeon else 416535f945cdSPyun YongHyeon sc->bge_csum_features |= CSUM_UDP; 416635f945cdSPyun YongHyeon if (ifp->if_capabilities & IFCAP_TXCSUM && 416735f945cdSPyun YongHyeon ifp->if_capenable & IFCAP_TXCSUM) { 416835f945cdSPyun YongHyeon ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP); 416935f945cdSPyun YongHyeon ifp->if_hwassist |= sc->bge_csum_features; 417035f945cdSPyun YongHyeon } 417135f945cdSPyun YongHyeon 417295d67482SBill Paul /* Init RX ring. */ 41733ee5d7daSPyun YongHyeon if (bge_init_rx_ring_std(sc) != 0) { 41743ee5d7daSPyun YongHyeon device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 41753ee5d7daSPyun YongHyeon bge_stop(sc); 41763ee5d7daSPyun YongHyeon return; 41773ee5d7daSPyun YongHyeon } 417895d67482SBill Paul 41790434d1b8SBill Paul /* 41800434d1b8SBill Paul * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 41810434d1b8SBill Paul * memory to insure that the chip has in fact read the first 41820434d1b8SBill Paul * entry of the ring. 41830434d1b8SBill Paul */ 41840434d1b8SBill Paul if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 41853f74909aSGleb Smirnoff uint32_t v, i; 41860434d1b8SBill Paul for (i = 0; i < 10; i++) { 41870434d1b8SBill Paul DELAY(20); 41880434d1b8SBill Paul v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 41890434d1b8SBill Paul if (v == (MCLBYTES - ETHER_ALIGN)) 41900434d1b8SBill Paul break; 41910434d1b8SBill Paul } 41920434d1b8SBill Paul if (i == 10) 4193fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, 4194fe806fdaSPyun YongHyeon "5705 A0 chip failed to load RX ring\n"); 41950434d1b8SBill Paul } 41960434d1b8SBill Paul 419795d67482SBill Paul /* Init jumbo RX ring. */ 4198c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 4199c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) { 42003ee5d7daSPyun YongHyeon if (bge_init_rx_ring_jumbo(sc) != 0) { 4201333704a3SPyun YongHyeon device_printf(sc->bge_dev, 4202b65256d7SPyun YongHyeon "no memory for jumbo Rx buffers.\n"); 42033ee5d7daSPyun YongHyeon bge_stop(sc); 42043ee5d7daSPyun YongHyeon return; 42053ee5d7daSPyun YongHyeon } 42063ee5d7daSPyun YongHyeon } 420795d67482SBill Paul 42083f74909aSGleb Smirnoff /* Init our RX return ring index. */ 420995d67482SBill Paul sc->bge_rx_saved_considx = 0; 421095d67482SBill Paul 42117e6e2507SJung-uk Kim /* Init our RX/TX stat counters. */ 42127e6e2507SJung-uk Kim sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 42137e6e2507SJung-uk Kim 421495d67482SBill Paul /* Init TX ring. */ 421595d67482SBill Paul bge_init_tx_ring(sc); 421695d67482SBill Paul 42173f74909aSGleb Smirnoff /* Turn on transmitter. */ 421895d67482SBill Paul BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 421995d67482SBill Paul 42203f74909aSGleb Smirnoff /* Turn on receiver. */ 422195d67482SBill Paul BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 422295d67482SBill Paul 4223*dedcdf57SPyun YongHyeon /* 4224*dedcdf57SPyun YongHyeon * Set the number of good frames to receive after RX MBUF 4225*dedcdf57SPyun YongHyeon * Low Watermark has been reached. After the RX MAC receives 4226*dedcdf57SPyun YongHyeon * this number of frames, it will drop subsequent incoming 4227*dedcdf57SPyun YongHyeon * frames until the MBUF High Watermark is reached. 4228*dedcdf57SPyun YongHyeon */ 4229*dedcdf57SPyun YongHyeon CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); 4230*dedcdf57SPyun YongHyeon 423195d67482SBill Paul /* Tell firmware we're alive. */ 423295d67482SBill Paul BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 423395d67482SBill Paul 423475719184SGleb Smirnoff #ifdef DEVICE_POLLING 423575719184SGleb Smirnoff /* Disable interrupts if we are polling. */ 423675719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 423775719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 423875719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 423938cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 424075719184SGleb Smirnoff } else 424175719184SGleb Smirnoff #endif 424275719184SGleb Smirnoff 424395d67482SBill Paul /* Enable host interrupts. */ 424475719184SGleb Smirnoff { 424595d67482SBill Paul BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 424695d67482SBill Paul BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 424738cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 424875719184SGleb Smirnoff } 424995d67482SBill Paul 425067d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(ifp); 425195d67482SBill Paul 425213f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 425313f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 425495d67482SBill Paul 42550f9bd73bSSam Leffler callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 42560f9bd73bSSam Leffler } 42570f9bd73bSSam Leffler 42580f9bd73bSSam Leffler static void 42593f74909aSGleb Smirnoff bge_init(void *xsc) 42600f9bd73bSSam Leffler { 42610f9bd73bSSam Leffler struct bge_softc *sc = xsc; 42620f9bd73bSSam Leffler 42630f9bd73bSSam Leffler BGE_LOCK(sc); 42640f9bd73bSSam Leffler bge_init_locked(sc); 42650f9bd73bSSam Leffler BGE_UNLOCK(sc); 426695d67482SBill Paul } 426795d67482SBill Paul 426895d67482SBill Paul /* 426995d67482SBill Paul * Set media options. 427095d67482SBill Paul */ 427195d67482SBill Paul static int 42723f74909aSGleb Smirnoff bge_ifmedia_upd(struct ifnet *ifp) 427395d67482SBill Paul { 427467d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 427567d5e043SOleg Bulyzhin int res; 427667d5e043SOleg Bulyzhin 427767d5e043SOleg Bulyzhin BGE_LOCK(sc); 427867d5e043SOleg Bulyzhin res = bge_ifmedia_upd_locked(ifp); 427967d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 428067d5e043SOleg Bulyzhin 428167d5e043SOleg Bulyzhin return (res); 428267d5e043SOleg Bulyzhin } 428367d5e043SOleg Bulyzhin 428467d5e043SOleg Bulyzhin static int 428567d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(struct ifnet *ifp) 428667d5e043SOleg Bulyzhin { 428767d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 428895d67482SBill Paul struct mii_data *mii; 42894f09c4c7SMarius Strobl struct mii_softc *miisc; 429095d67482SBill Paul struct ifmedia *ifm; 429195d67482SBill Paul 429267d5e043SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 429367d5e043SOleg Bulyzhin 429495d67482SBill Paul ifm = &sc->bge_ifmedia; 429595d67482SBill Paul 429695d67482SBill Paul /* If this is a 1000baseX NIC, enable the TBI port. */ 4297652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 429895d67482SBill Paul if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 429995d67482SBill Paul return (EINVAL); 430095d67482SBill Paul switch(IFM_SUBTYPE(ifm->ifm_media)) { 430195d67482SBill Paul case IFM_AUTO: 4302ff50922bSDoug White /* 4303ff50922bSDoug White * The BCM5704 ASIC appears to have a special 4304ff50922bSDoug White * mechanism for programming the autoneg 4305ff50922bSDoug White * advertisement registers in TBI mode. 4306ff50922bSDoug White */ 43070f89fde2SJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 4308ff50922bSDoug White uint32_t sgdig; 43090f89fde2SJung-uk Kim sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 43100f89fde2SJung-uk Kim if (sgdig & BGE_SGDIGSTS_DONE) { 4311ff50922bSDoug White CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4312ff50922bSDoug White sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4313ff50922bSDoug White sgdig |= BGE_SGDIGCFG_AUTO | 4314ff50922bSDoug White BGE_SGDIGCFG_PAUSE_CAP | 4315ff50922bSDoug White BGE_SGDIGCFG_ASYM_PAUSE; 4316ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4317ff50922bSDoug White sgdig | BGE_SGDIGCFG_SEND); 4318ff50922bSDoug White DELAY(5); 4319ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4320ff50922bSDoug White } 43210f89fde2SJung-uk Kim } 432295d67482SBill Paul break; 432395d67482SBill Paul case IFM_1000_SX: 432495d67482SBill Paul if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 432595d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, 432695d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 432795d67482SBill Paul } else { 432895d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, 432995d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 433095d67482SBill Paul } 433195d67482SBill Paul break; 433295d67482SBill Paul default: 433395d67482SBill Paul return (EINVAL); 433495d67482SBill Paul } 433595d67482SBill Paul return (0); 433695d67482SBill Paul } 433795d67482SBill Paul 43381493e883SOleg Bulyzhin sc->bge_link_evt++; 433995d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 43404f09c4c7SMarius Strobl if (mii->mii_instance) 43414f09c4c7SMarius Strobl LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 434295d67482SBill Paul mii_phy_reset(miisc); 434395d67482SBill Paul mii_mediachg(mii); 434495d67482SBill Paul 4345902827f6SBjoern A. Zeeb /* 4346902827f6SBjoern A. Zeeb * Force an interrupt so that we will call bge_link_upd 4347902827f6SBjoern A. Zeeb * if needed and clear any pending link state attention. 4348902827f6SBjoern A. Zeeb * Without this we are not getting any further interrupts 4349902827f6SBjoern A. Zeeb * for link state changes and thus will not UP the link and 4350902827f6SBjoern A. Zeeb * not be able to send in bge_start_locked. The only 4351902827f6SBjoern A. Zeeb * way to get things working was to receive a packet and 4352902827f6SBjoern A. Zeeb * get an RX intr. 4353902827f6SBjoern A. Zeeb * bge_tick should help for fiber cards and we might not 4354902827f6SBjoern A. Zeeb * need to do this here if BGE_FLAG_TBI is set but as 4355902827f6SBjoern A. Zeeb * we poll for fiber anyway it should not harm. 4356902827f6SBjoern A. Zeeb */ 43574f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 43584f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 4359902827f6SBjoern A. Zeeb BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 43604f0794ffSBjoern A. Zeeb else 436163ccfe30SBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4362902827f6SBjoern A. Zeeb 436395d67482SBill Paul return (0); 436495d67482SBill Paul } 436595d67482SBill Paul 436695d67482SBill Paul /* 436795d67482SBill Paul * Report current media status. 436895d67482SBill Paul */ 436995d67482SBill Paul static void 43703f74909aSGleb Smirnoff bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 437195d67482SBill Paul { 437267d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 437395d67482SBill Paul struct mii_data *mii; 437495d67482SBill Paul 437567d5e043SOleg Bulyzhin BGE_LOCK(sc); 437695d67482SBill Paul 4377652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 437895d67482SBill Paul ifmr->ifm_status = IFM_AVALID; 437995d67482SBill Paul ifmr->ifm_active = IFM_ETHER; 438095d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_STS) & 438195d67482SBill Paul BGE_MACSTAT_TBI_PCS_SYNCHED) 438295d67482SBill Paul ifmr->ifm_status |= IFM_ACTIVE; 43834c0da0ffSGleb Smirnoff else { 43844c0da0ffSGleb Smirnoff ifmr->ifm_active |= IFM_NONE; 438567d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 43864c0da0ffSGleb Smirnoff return; 43874c0da0ffSGleb Smirnoff } 438895d67482SBill Paul ifmr->ifm_active |= IFM_1000_SX; 438995d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 439095d67482SBill Paul ifmr->ifm_active |= IFM_HDX; 439195d67482SBill Paul else 439295d67482SBill Paul ifmr->ifm_active |= IFM_FDX; 439367d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 439495d67482SBill Paul return; 439595d67482SBill Paul } 439695d67482SBill Paul 439795d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 439895d67482SBill Paul mii_pollstat(mii); 439995d67482SBill Paul ifmr->ifm_active = mii->mii_media_active; 440095d67482SBill Paul ifmr->ifm_status = mii->mii_media_status; 440167d5e043SOleg Bulyzhin 440267d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 440395d67482SBill Paul } 440495d67482SBill Paul 440595d67482SBill Paul static int 44063f74909aSGleb Smirnoff bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 440795d67482SBill Paul { 440895d67482SBill Paul struct bge_softc *sc = ifp->if_softc; 440995d67482SBill Paul struct ifreq *ifr = (struct ifreq *) data; 441095d67482SBill Paul struct mii_data *mii; 4411f9004b6dSJung-uk Kim int flags, mask, error = 0; 441295d67482SBill Paul 441395d67482SBill Paul switch (command) { 441495d67482SBill Paul case SIOCSIFMTU: 44154c0da0ffSGleb Smirnoff if (ifr->ifr_mtu < ETHERMIN || 44164c0da0ffSGleb Smirnoff ((BGE_IS_JUMBO_CAPABLE(sc)) && 44174c0da0ffSGleb Smirnoff ifr->ifr_mtu > BGE_JUMBO_MTU) || 44184c0da0ffSGleb Smirnoff ((!BGE_IS_JUMBO_CAPABLE(sc)) && 44194c0da0ffSGleb Smirnoff ifr->ifr_mtu > ETHERMTU)) 442095d67482SBill Paul error = EINVAL; 44214c0da0ffSGleb Smirnoff else if (ifp->if_mtu != ifr->ifr_mtu) { 442295d67482SBill Paul ifp->if_mtu = ifr->ifr_mtu; 442313f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 442495d67482SBill Paul bge_init(sc); 442595d67482SBill Paul } 442695d67482SBill Paul break; 442795d67482SBill Paul case SIOCSIFFLAGS: 44280f9bd73bSSam Leffler BGE_LOCK(sc); 442995d67482SBill Paul if (ifp->if_flags & IFF_UP) { 443095d67482SBill Paul /* 443195d67482SBill Paul * If only the state of the PROMISC flag changed, 443295d67482SBill Paul * then just use the 'set promisc mode' command 443395d67482SBill Paul * instead of reinitializing the entire NIC. Doing 443495d67482SBill Paul * a full re-init means reloading the firmware and 443595d67482SBill Paul * waiting for it to start up, which may take a 4436d183af7fSRuslan Ermilov * second or two. Similarly for ALLMULTI. 443795d67482SBill Paul */ 4438f9004b6dSJung-uk Kim if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4439f9004b6dSJung-uk Kim flags = ifp->if_flags ^ sc->bge_if_flags; 44403e9b1bcaSJung-uk Kim if (flags & IFF_PROMISC) 44413e9b1bcaSJung-uk Kim bge_setpromisc(sc); 4442f9004b6dSJung-uk Kim if (flags & IFF_ALLMULTI) 4443d183af7fSRuslan Ermilov bge_setmulti(sc); 444495d67482SBill Paul } else 44450f9bd73bSSam Leffler bge_init_locked(sc); 444695d67482SBill Paul } else { 444713f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 444895d67482SBill Paul bge_stop(sc); 444995d67482SBill Paul } 445095d67482SBill Paul } 445195d67482SBill Paul sc->bge_if_flags = ifp->if_flags; 44520f9bd73bSSam Leffler BGE_UNLOCK(sc); 445395d67482SBill Paul error = 0; 445495d67482SBill Paul break; 445595d67482SBill Paul case SIOCADDMULTI: 445695d67482SBill Paul case SIOCDELMULTI: 445713f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 44580f9bd73bSSam Leffler BGE_LOCK(sc); 445995d67482SBill Paul bge_setmulti(sc); 44600f9bd73bSSam Leffler BGE_UNLOCK(sc); 446195d67482SBill Paul error = 0; 446295d67482SBill Paul } 446395d67482SBill Paul break; 446495d67482SBill Paul case SIOCSIFMEDIA: 446595d67482SBill Paul case SIOCGIFMEDIA: 4466652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 446795d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 446895d67482SBill Paul &sc->bge_ifmedia, command); 446995d67482SBill Paul } else { 447095d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 447195d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 447295d67482SBill Paul &mii->mii_media, command); 447395d67482SBill Paul } 447495d67482SBill Paul break; 447595d67482SBill Paul case SIOCSIFCAP: 447695d67482SBill Paul mask = ifr->ifr_reqcap ^ ifp->if_capenable; 447775719184SGleb Smirnoff #ifdef DEVICE_POLLING 447875719184SGleb Smirnoff if (mask & IFCAP_POLLING) { 447975719184SGleb Smirnoff if (ifr->ifr_reqcap & IFCAP_POLLING) { 448075719184SGleb Smirnoff error = ether_poll_register(bge_poll, ifp); 448175719184SGleb Smirnoff if (error) 448275719184SGleb Smirnoff return (error); 448375719184SGleb Smirnoff BGE_LOCK(sc); 448475719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 448575719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 448638cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 448775719184SGleb Smirnoff ifp->if_capenable |= IFCAP_POLLING; 448875719184SGleb Smirnoff BGE_UNLOCK(sc); 448975719184SGleb Smirnoff } else { 449075719184SGleb Smirnoff error = ether_poll_deregister(ifp); 449175719184SGleb Smirnoff /* Enable interrupt even in error case */ 449275719184SGleb Smirnoff BGE_LOCK(sc); 449375719184SGleb Smirnoff BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 449475719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 449538cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 449675719184SGleb Smirnoff ifp->if_capenable &= ~IFCAP_POLLING; 449775719184SGleb Smirnoff BGE_UNLOCK(sc); 449875719184SGleb Smirnoff } 449975719184SGleb Smirnoff } 450075719184SGleb Smirnoff #endif 4501d375e524SGleb Smirnoff if (mask & IFCAP_HWCSUM) { 4502d375e524SGleb Smirnoff ifp->if_capenable ^= IFCAP_HWCSUM; 4503d375e524SGleb Smirnoff if (IFCAP_HWCSUM & ifp->if_capenable && 4504d375e524SGleb Smirnoff IFCAP_HWCSUM & ifp->if_capabilities) 450535f945cdSPyun YongHyeon ifp->if_hwassist |= sc->bge_csum_features; 450695d67482SBill Paul else 450735f945cdSPyun YongHyeon ifp->if_hwassist &= ~sc->bge_csum_features; 450895d67482SBill Paul } 4509cb2eacc7SYaroslav Tykhiy 4510ca3f1187SPyun YongHyeon if ((mask & IFCAP_TSO4) != 0 && 4511ca3f1187SPyun YongHyeon (ifp->if_capabilities & IFCAP_TSO4) != 0) { 4512ca3f1187SPyun YongHyeon ifp->if_capenable ^= IFCAP_TSO4; 4513ca3f1187SPyun YongHyeon if ((ifp->if_capenable & IFCAP_TSO4) != 0) 4514ca3f1187SPyun YongHyeon ifp->if_hwassist |= CSUM_TSO; 4515ca3f1187SPyun YongHyeon else 4516ca3f1187SPyun YongHyeon ifp->if_hwassist &= ~CSUM_TSO; 4517ca3f1187SPyun YongHyeon } 4518ca3f1187SPyun YongHyeon 4519cb2eacc7SYaroslav Tykhiy if (mask & IFCAP_VLAN_MTU) { 4520cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_MTU; 4521cb2eacc7SYaroslav Tykhiy ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4522cb2eacc7SYaroslav Tykhiy bge_init(sc); 4523cb2eacc7SYaroslav Tykhiy } 4524cb2eacc7SYaroslav Tykhiy 452504bde852SPyun YongHyeon if ((mask & IFCAP_VLAN_HWTSO) != 0 && 452604bde852SPyun YongHyeon (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 452704bde852SPyun YongHyeon ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 452804bde852SPyun YongHyeon if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 452904bde852SPyun YongHyeon (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 4530cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 453104bde852SPyun YongHyeon if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 453204bde852SPyun YongHyeon ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; 4533cb2eacc7SYaroslav Tykhiy BGE_LOCK(sc); 4534cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 4535cb2eacc7SYaroslav Tykhiy BGE_UNLOCK(sc); 453604bde852SPyun YongHyeon } 4537cb2eacc7SYaroslav Tykhiy #ifdef VLAN_CAPABILITIES 4538cb2eacc7SYaroslav Tykhiy VLAN_CAPABILITIES(ifp); 4539cb2eacc7SYaroslav Tykhiy #endif 454095d67482SBill Paul break; 454195d67482SBill Paul default: 4542673d9191SSam Leffler error = ether_ioctl(ifp, command, data); 454395d67482SBill Paul break; 454495d67482SBill Paul } 454595d67482SBill Paul 454695d67482SBill Paul return (error); 454795d67482SBill Paul } 454895d67482SBill Paul 454995d67482SBill Paul static void 4550b74e67fbSGleb Smirnoff bge_watchdog(struct bge_softc *sc) 455195d67482SBill Paul { 4552b74e67fbSGleb Smirnoff struct ifnet *ifp; 455395d67482SBill Paul 4554b74e67fbSGleb Smirnoff BGE_LOCK_ASSERT(sc); 4555b74e67fbSGleb Smirnoff 4556b74e67fbSGleb Smirnoff if (sc->bge_timer == 0 || --sc->bge_timer) 4557b74e67fbSGleb Smirnoff return; 4558b74e67fbSGleb Smirnoff 4559b74e67fbSGleb Smirnoff ifp = sc->bge_ifp; 456095d67482SBill Paul 4561fe806fdaSPyun YongHyeon if_printf(ifp, "watchdog timeout -- resetting\n"); 456295d67482SBill Paul 456313f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4564426742bfSGleb Smirnoff bge_init_locked(sc); 456595d67482SBill Paul 456695d67482SBill Paul ifp->if_oerrors++; 456795d67482SBill Paul } 456895d67482SBill Paul 456995d67482SBill Paul /* 457095d67482SBill Paul * Stop the adapter and free any mbufs allocated to the 457195d67482SBill Paul * RX and TX lists. 457295d67482SBill Paul */ 457395d67482SBill Paul static void 45743f74909aSGleb Smirnoff bge_stop(struct bge_softc *sc) 457595d67482SBill Paul { 457695d67482SBill Paul struct ifnet *ifp; 457795d67482SBill Paul 45780f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 45790f9bd73bSSam Leffler 4580fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 458195d67482SBill Paul 45820f9bd73bSSam Leffler callout_stop(&sc->bge_stat_ch); 458395d67482SBill Paul 458444b63691SBjoern A. Zeeb /* Disable host interrupts. */ 458544b63691SBjoern A. Zeeb BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 458644b63691SBjoern A. Zeeb bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 458744b63691SBjoern A. Zeeb 458844b63691SBjoern A. Zeeb /* 458944b63691SBjoern A. Zeeb * Tell firmware we're shutting down. 459044b63691SBjoern A. Zeeb */ 459144b63691SBjoern A. Zeeb bge_stop_fw(sc); 459244b63691SBjoern A. Zeeb bge_sig_pre_reset(sc, BGE_RESET_STOP); 459344b63691SBjoern A. Zeeb 459495d67482SBill Paul /* 45953f74909aSGleb Smirnoff * Disable all of the receiver blocks. 459695d67482SBill Paul */ 459795d67482SBill Paul BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 459895d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 459995d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 46007ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 460195d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 460295d67482SBill Paul BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 460395d67482SBill Paul BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 460495d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 460595d67482SBill Paul 460695d67482SBill Paul /* 46073f74909aSGleb Smirnoff * Disable all of the transmit blocks. 460895d67482SBill Paul */ 460995d67482SBill Paul BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 461095d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 461195d67482SBill Paul BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 461295d67482SBill Paul BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 461395d67482SBill Paul BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 46147ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 461595d67482SBill Paul BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 461695d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 461795d67482SBill Paul 461895d67482SBill Paul /* 461995d67482SBill Paul * Shut down all of the memory managers and related 462095d67482SBill Paul * state machines. 462195d67482SBill Paul */ 462295d67482SBill Paul BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 462395d67482SBill Paul BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 46247ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 462595d67482SBill Paul BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 46260c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 462795d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 46287ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 462995d67482SBill Paul BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 463095d67482SBill Paul BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 46310434d1b8SBill Paul } 463295d67482SBill Paul 46338cb1383cSDoug Ambrisko bge_reset(sc); 46348cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 46358cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 46368cb1383cSDoug Ambrisko 46378cb1383cSDoug Ambrisko /* 46388cb1383cSDoug Ambrisko * Keep the ASF firmware running if up. 46398cb1383cSDoug Ambrisko */ 46408cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 46418cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 46428cb1383cSDoug Ambrisko else 464395d67482SBill Paul BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 464495d67482SBill Paul 464595d67482SBill Paul /* Free the RX lists. */ 464695d67482SBill Paul bge_free_rx_ring_std(sc); 464795d67482SBill Paul 464895d67482SBill Paul /* Free jumbo RX list. */ 46494c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) 465095d67482SBill Paul bge_free_rx_ring_jumbo(sc); 465195d67482SBill Paul 465295d67482SBill Paul /* Free TX buffers. */ 465395d67482SBill Paul bge_free_tx_ring(sc); 465495d67482SBill Paul 465595d67482SBill Paul sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 465695d67482SBill Paul 46575dda8085SOleg Bulyzhin /* Clear MAC's link state (PHY may still have link UP). */ 46581493e883SOleg Bulyzhin if (bootverbose && sc->bge_link) 46591493e883SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 46601493e883SOleg Bulyzhin sc->bge_link = 0; 466195d67482SBill Paul 46621493e883SOleg Bulyzhin ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 466395d67482SBill Paul } 466495d67482SBill Paul 466595d67482SBill Paul /* 466695d67482SBill Paul * Stop all chip I/O so that the kernel's probe routines don't 466795d67482SBill Paul * get confused by errant DMAs when rebooting. 466895d67482SBill Paul */ 4669b6c974e8SWarner Losh static int 46703f74909aSGleb Smirnoff bge_shutdown(device_t dev) 467195d67482SBill Paul { 467295d67482SBill Paul struct bge_softc *sc; 467395d67482SBill Paul 467495d67482SBill Paul sc = device_get_softc(dev); 46750f9bd73bSSam Leffler BGE_LOCK(sc); 467695d67482SBill Paul bge_stop(sc); 467795d67482SBill Paul bge_reset(sc); 46780f9bd73bSSam Leffler BGE_UNLOCK(sc); 4679b6c974e8SWarner Losh 4680b6c974e8SWarner Losh return (0); 468195d67482SBill Paul } 468214afefa3SPawel Jakub Dawidek 468314afefa3SPawel Jakub Dawidek static int 468414afefa3SPawel Jakub Dawidek bge_suspend(device_t dev) 468514afefa3SPawel Jakub Dawidek { 468614afefa3SPawel Jakub Dawidek struct bge_softc *sc; 468714afefa3SPawel Jakub Dawidek 468814afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 468914afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 469014afefa3SPawel Jakub Dawidek bge_stop(sc); 469114afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 469214afefa3SPawel Jakub Dawidek 469314afefa3SPawel Jakub Dawidek return (0); 469414afefa3SPawel Jakub Dawidek } 469514afefa3SPawel Jakub Dawidek 469614afefa3SPawel Jakub Dawidek static int 469714afefa3SPawel Jakub Dawidek bge_resume(device_t dev) 469814afefa3SPawel Jakub Dawidek { 469914afefa3SPawel Jakub Dawidek struct bge_softc *sc; 470014afefa3SPawel Jakub Dawidek struct ifnet *ifp; 470114afefa3SPawel Jakub Dawidek 470214afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 470314afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 470414afefa3SPawel Jakub Dawidek ifp = sc->bge_ifp; 470514afefa3SPawel Jakub Dawidek if (ifp->if_flags & IFF_UP) { 470614afefa3SPawel Jakub Dawidek bge_init_locked(sc); 470714afefa3SPawel Jakub Dawidek if (ifp->if_drv_flags & IFF_DRV_RUNNING) 470814afefa3SPawel Jakub Dawidek bge_start_locked(ifp); 470914afefa3SPawel Jakub Dawidek } 471014afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 471114afefa3SPawel Jakub Dawidek 471214afefa3SPawel Jakub Dawidek return (0); 471314afefa3SPawel Jakub Dawidek } 4714dab5cd05SOleg Bulyzhin 4715dab5cd05SOleg Bulyzhin static void 47163f74909aSGleb Smirnoff bge_link_upd(struct bge_softc *sc) 4717dab5cd05SOleg Bulyzhin { 47181f313773SOleg Bulyzhin struct mii_data *mii; 47191f313773SOleg Bulyzhin uint32_t link, status; 4720dab5cd05SOleg Bulyzhin 4721dab5cd05SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 47221f313773SOleg Bulyzhin 47233f74909aSGleb Smirnoff /* Clear 'pending link event' flag. */ 47247b97099dSOleg Bulyzhin sc->bge_link_evt = 0; 47257b97099dSOleg Bulyzhin 4726dab5cd05SOleg Bulyzhin /* 4727dab5cd05SOleg Bulyzhin * Process link state changes. 4728dab5cd05SOleg Bulyzhin * Grrr. The link status word in the status block does 4729dab5cd05SOleg Bulyzhin * not work correctly on the BCM5700 rev AX and BX chips, 4730dab5cd05SOleg Bulyzhin * according to all available information. Hence, we have 4731dab5cd05SOleg Bulyzhin * to enable MII interrupts in order to properly obtain 4732dab5cd05SOleg Bulyzhin * async link changes. Unfortunately, this also means that 4733dab5cd05SOleg Bulyzhin * we have to read the MAC status register to detect link 4734dab5cd05SOleg Bulyzhin * changes, thereby adding an additional register access to 4735dab5cd05SOleg Bulyzhin * the interrupt handler. 47361f313773SOleg Bulyzhin * 47371f313773SOleg Bulyzhin * XXX: perhaps link state detection procedure used for 47384c0da0ffSGleb Smirnoff * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4739dab5cd05SOleg Bulyzhin */ 4740dab5cd05SOleg Bulyzhin 47411f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 47424c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 4743dab5cd05SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 4744dab5cd05SOleg Bulyzhin if (status & BGE_MACSTAT_MI_INTERRUPT) { 47451f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 47465dda8085SOleg Bulyzhin mii_pollstat(mii); 47471f313773SOleg Bulyzhin if (!sc->bge_link && 47481f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 47491f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 47501f313773SOleg Bulyzhin sc->bge_link++; 47511f313773SOleg Bulyzhin if (bootverbose) 47521f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 47531f313773SOleg Bulyzhin } else if (sc->bge_link && 47541f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 47551f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 47561f313773SOleg Bulyzhin sc->bge_link = 0; 47571f313773SOleg Bulyzhin if (bootverbose) 47581f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 47591f313773SOleg Bulyzhin } 47601f313773SOleg Bulyzhin 47613f74909aSGleb Smirnoff /* Clear the interrupt. */ 4762dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4763dab5cd05SOleg Bulyzhin BGE_EVTENB_MI_INTERRUPT); 4764dab5cd05SOleg Bulyzhin bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4765dab5cd05SOleg Bulyzhin bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4766dab5cd05SOleg Bulyzhin BRGPHY_INTRS); 4767dab5cd05SOleg Bulyzhin } 4768dab5cd05SOleg Bulyzhin return; 4769dab5cd05SOleg Bulyzhin } 4770dab5cd05SOleg Bulyzhin 4771652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 47721f313773SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 47737b97099dSOleg Bulyzhin if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 47747b97099dSOleg Bulyzhin if (!sc->bge_link) { 47751f313773SOleg Bulyzhin sc->bge_link++; 47761f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 47771f313773SOleg Bulyzhin BGE_CLRBIT(sc, BGE_MAC_MODE, 47781f313773SOleg Bulyzhin BGE_MACMODE_TBI_SEND_CFGS); 47790c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 47801f313773SOleg Bulyzhin if (bootverbose) 47811f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 47823f74909aSGleb Smirnoff if_link_state_change(sc->bge_ifp, 47833f74909aSGleb Smirnoff LINK_STATE_UP); 47847b97099dSOleg Bulyzhin } 47851f313773SOleg Bulyzhin } else if (sc->bge_link) { 4786dab5cd05SOleg Bulyzhin sc->bge_link = 0; 47871f313773SOleg Bulyzhin if (bootverbose) 47881f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 47897b97099dSOleg Bulyzhin if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 47901f313773SOleg Bulyzhin } 47911493e883SOleg Bulyzhin } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 47921f313773SOleg Bulyzhin /* 47930c8aa4eaSJung-uk Kim * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 47940c8aa4eaSJung-uk Kim * in status word always set. Workaround this bug by reading 47950c8aa4eaSJung-uk Kim * PHY link status directly. 47961f313773SOleg Bulyzhin */ 47971f313773SOleg Bulyzhin link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 47981f313773SOleg Bulyzhin 47991f313773SOleg Bulyzhin if (link != sc->bge_link || 48001f313773SOleg Bulyzhin sc->bge_asicrev == BGE_ASICREV_BCM5700) { 48011f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 48025dda8085SOleg Bulyzhin mii_pollstat(mii); 48031f313773SOleg Bulyzhin if (!sc->bge_link && 48041f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 48051f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 48061f313773SOleg Bulyzhin sc->bge_link++; 48071f313773SOleg Bulyzhin if (bootverbose) 48081f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 48091f313773SOleg Bulyzhin } else if (sc->bge_link && 48101f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 48111f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 48121f313773SOleg Bulyzhin sc->bge_link = 0; 48131f313773SOleg Bulyzhin if (bootverbose) 48141f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 48151f313773SOleg Bulyzhin } 48161f313773SOleg Bulyzhin } 48170c8aa4eaSJung-uk Kim } else { 48180c8aa4eaSJung-uk Kim /* 48190c8aa4eaSJung-uk Kim * Discard link events for MII/GMII controllers 48200c8aa4eaSJung-uk Kim * if MI auto-polling is disabled. 48210c8aa4eaSJung-uk Kim */ 4822dab5cd05SOleg Bulyzhin } 4823dab5cd05SOleg Bulyzhin 48243f74909aSGleb Smirnoff /* Clear the attention. */ 4825dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4826dab5cd05SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4827dab5cd05SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 4828dab5cd05SOleg Bulyzhin } 48296f8718a3SScott Long 4830763757b2SScott Long #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 483106e83c7eSScott Long SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ 4832763757b2SScott Long sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ 4833763757b2SScott Long desc) 4834763757b2SScott Long 48356f8718a3SScott Long static void 48366f8718a3SScott Long bge_add_sysctls(struct bge_softc *sc) 48376f8718a3SScott Long { 48386f8718a3SScott Long struct sysctl_ctx_list *ctx; 4839763757b2SScott Long struct sysctl_oid_list *children, *schildren; 4840763757b2SScott Long struct sysctl_oid *tree; 48417e32f79aSPyun YongHyeon char tn[32]; 48427e32f79aSPyun YongHyeon int unit; 48436f8718a3SScott Long 48446f8718a3SScott Long ctx = device_get_sysctl_ctx(sc->bge_dev); 48456f8718a3SScott Long children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 48466f8718a3SScott Long 48476f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 48486f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 48496f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", 48506f8718a3SScott Long "Debug Information"); 48516f8718a3SScott Long 48526f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 48536f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", 48546f8718a3SScott Long "Register Read"); 48556f8718a3SScott Long 48566f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 48576f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", 48586f8718a3SScott Long "Memory Read"); 48596f8718a3SScott Long 48606f8718a3SScott Long #endif 4861763757b2SScott Long 48627e32f79aSPyun YongHyeon unit = device_get_unit(sc->bge_dev); 4863beaa2ae1SPyun YongHyeon /* 4864beaa2ae1SPyun YongHyeon * A common design characteristic for many Broadcom client controllers 4865beaa2ae1SPyun YongHyeon * is that they only support a single outstanding DMA read operation 4866beaa2ae1SPyun YongHyeon * on the PCIe bus. This means that it will take twice as long to fetch 4867beaa2ae1SPyun YongHyeon * a TX frame that is split into header and payload buffers as it does 4868beaa2ae1SPyun YongHyeon * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For 4869beaa2ae1SPyun YongHyeon * these controllers, coalescing buffers to reduce the number of memory 4870beaa2ae1SPyun YongHyeon * reads is effective way to get maximum performance(about 940Mbps). 4871beaa2ae1SPyun YongHyeon * Without collapsing TX buffers the maximum TCP bulk transfer 4872beaa2ae1SPyun YongHyeon * performance is about 850Mbps. However forcing coalescing mbufs 4873beaa2ae1SPyun YongHyeon * consumes a lot of CPU cycles, so leave it off by default. 4874beaa2ae1SPyun YongHyeon */ 48757e32f79aSPyun YongHyeon sc->bge_forced_collapse = 0; 48767e32f79aSPyun YongHyeon snprintf(tn, sizeof(tn), "dev.bge.%d.forced_collapse", unit); 48777e32f79aSPyun YongHyeon TUNABLE_INT_FETCH(tn, &sc->bge_forced_collapse); 4878beaa2ae1SPyun YongHyeon SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse", 4879beaa2ae1SPyun YongHyeon CTLFLAG_RW, &sc->bge_forced_collapse, 0, 4880beaa2ae1SPyun YongHyeon "Number of fragmented TX buffers of a frame allowed before " 4881beaa2ae1SPyun YongHyeon "forced collapsing"); 4882beaa2ae1SPyun YongHyeon 488335f945cdSPyun YongHyeon /* 488435f945cdSPyun YongHyeon * It seems all Broadcom controllers have a bug that can generate UDP 488535f945cdSPyun YongHyeon * datagrams with checksum value 0 when TX UDP checksum offloading is 488635f945cdSPyun YongHyeon * enabled. Generating UDP checksum value 0 is RFC 768 violation. 488735f945cdSPyun YongHyeon * Even though the probability of generating such UDP datagrams is 488835f945cdSPyun YongHyeon * low, I don't want to see FreeBSD boxes to inject such datagrams 488935f945cdSPyun YongHyeon * into network so disable UDP checksum offloading by default. Users 489035f945cdSPyun YongHyeon * still override this behavior by setting a sysctl variable, 489135f945cdSPyun YongHyeon * dev.bge.0.forced_udpcsum. 489235f945cdSPyun YongHyeon */ 489335f945cdSPyun YongHyeon sc->bge_forced_udpcsum = 0; 489435f945cdSPyun YongHyeon snprintf(tn, sizeof(tn), "dev.bge.%d.bge_forced_udpcsum", unit); 489535f945cdSPyun YongHyeon TUNABLE_INT_FETCH(tn, &sc->bge_forced_udpcsum); 489635f945cdSPyun YongHyeon SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum", 489735f945cdSPyun YongHyeon CTLFLAG_RW, &sc->bge_forced_udpcsum, 0, 489835f945cdSPyun YongHyeon "Enable UDP checksum offloading even if controller can " 489935f945cdSPyun YongHyeon "generate UDP checksum value 0"); 490035f945cdSPyun YongHyeon 4901d949071dSJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 4902d949071dSJung-uk Kim return; 4903d949071dSJung-uk Kim 4904763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4905763757b2SScott Long NULL, "BGE Statistics"); 4906763757b2SScott Long schildren = children = SYSCTL_CHILDREN(tree); 4907763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 4908763757b2SScott Long children, COSFramesDroppedDueToFilters, 4909763757b2SScott Long "FramesDroppedDueToFilters"); 4910763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 4911763757b2SScott Long children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 4912763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 4913763757b2SScott Long children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 4914763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 4915763757b2SScott Long children, nicNoMoreRxBDs, "NoMoreRxBDs"); 491606e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 491706e83c7eSScott Long children, ifInDiscards, "InputDiscards"); 491806e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 491906e83c7eSScott Long children, ifInErrors, "InputErrors"); 4920763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 4921763757b2SScott Long children, nicRecvThresholdHit, "RecvThresholdHit"); 4922763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 4923763757b2SScott Long children, nicDmaReadQueueFull, "DmaReadQueueFull"); 4924763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 4925763757b2SScott Long children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 4926763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 4927763757b2SScott Long children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 4928763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 4929763757b2SScott Long children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 4930763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 4931763757b2SScott Long children, nicRingStatusUpdate, "RingStatusUpdate"); 4932763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 4933763757b2SScott Long children, nicInterrupts, "Interrupts"); 4934763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 4935763757b2SScott Long children, nicAvoidedInterrupts, "AvoidedInterrupts"); 4936763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 4937763757b2SScott Long children, nicSendThresholdHit, "SendThresholdHit"); 4938763757b2SScott Long 4939763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, 4940763757b2SScott Long NULL, "BGE RX Statistics"); 4941763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4942763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 4943763757b2SScott Long children, rxstats.ifHCInOctets, "Octets"); 4944763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Fragments", 4945763757b2SScott Long children, rxstats.etherStatsFragments, "Fragments"); 4946763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 4947763757b2SScott Long children, rxstats.ifHCInUcastPkts, "UcastPkts"); 4948763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 4949763757b2SScott Long children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 4950763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 4951763757b2SScott Long children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 4952763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 4953763757b2SScott Long children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 4954763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 4955763757b2SScott Long children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 4956763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 4957763757b2SScott Long children, rxstats.xoffPauseFramesReceived, 4958763757b2SScott Long "xoffPauseFramesReceived"); 4959763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 4960763757b2SScott Long children, rxstats.macControlFramesReceived, 4961763757b2SScott Long "ControlFramesReceived"); 4962763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 4963763757b2SScott Long children, rxstats.xoffStateEntered, "xoffStateEntered"); 4964763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 4965763757b2SScott Long children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 4966763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 4967763757b2SScott Long children, rxstats.etherStatsJabbers, "Jabbers"); 4968763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 4969763757b2SScott Long children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 4970763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 497106e83c7eSScott Long children, rxstats.inRangeLengthError, "inRangeLengthError"); 4972763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 497306e83c7eSScott Long children, rxstats.outRangeLengthError, "outRangeLengthError"); 4974763757b2SScott Long 4975763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, 4976763757b2SScott Long NULL, "BGE TX Statistics"); 4977763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4978763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 4979763757b2SScott Long children, txstats.ifHCOutOctets, "Octets"); 4980763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 4981763757b2SScott Long children, txstats.etherStatsCollisions, "Collisions"); 4982763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 4983763757b2SScott Long children, txstats.outXonSent, "XonSent"); 4984763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 4985763757b2SScott Long children, txstats.outXoffSent, "XoffSent"); 4986763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 4987763757b2SScott Long children, txstats.flowControlDone, "flowControlDone"); 4988763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 4989763757b2SScott Long children, txstats.dot3StatsInternalMacTransmitErrors, 4990763757b2SScott Long "InternalMacTransmitErrors"); 4991763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 4992763757b2SScott Long children, txstats.dot3StatsSingleCollisionFrames, 4993763757b2SScott Long "SingleCollisionFrames"); 4994763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 4995763757b2SScott Long children, txstats.dot3StatsMultipleCollisionFrames, 4996763757b2SScott Long "MultipleCollisionFrames"); 4997763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 4998763757b2SScott Long children, txstats.dot3StatsDeferredTransmissions, 4999763757b2SScott Long "DeferredTransmissions"); 5000763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 5001763757b2SScott Long children, txstats.dot3StatsExcessiveCollisions, 5002763757b2SScott Long "ExcessiveCollisions"); 5003763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 500406e83c7eSScott Long children, txstats.dot3StatsLateCollisions, 500506e83c7eSScott Long "LateCollisions"); 5006763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 5007763757b2SScott Long children, txstats.ifHCOutUcastPkts, "UcastPkts"); 5008763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 5009763757b2SScott Long children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 5010763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 5011763757b2SScott Long children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 5012763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 5013763757b2SScott Long children, txstats.dot3StatsCarrierSenseErrors, 5014763757b2SScott Long "CarrierSenseErrors"); 5015763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 5016763757b2SScott Long children, txstats.ifOutDiscards, "Discards"); 5017763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 5018763757b2SScott Long children, txstats.ifOutErrors, "Errors"); 5019763757b2SScott Long } 5020763757b2SScott Long 5021763757b2SScott Long static int 5022763757b2SScott Long bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 5023763757b2SScott Long { 5024763757b2SScott Long struct bge_softc *sc; 502506e83c7eSScott Long uint32_t result; 5026d949071dSJung-uk Kim int offset; 5027763757b2SScott Long 5028763757b2SScott Long sc = (struct bge_softc *)arg1; 5029763757b2SScott Long offset = arg2; 5030d949071dSJung-uk Kim result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + 5031d949071dSJung-uk Kim offsetof(bge_hostaddr, bge_addr_lo)); 5032041b706bSDavid Malone return (sysctl_handle_int(oidp, &result, 0, req)); 50336f8718a3SScott Long } 50346f8718a3SScott Long 50356f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 50366f8718a3SScott Long static int 50376f8718a3SScott Long bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 50386f8718a3SScott Long { 50396f8718a3SScott Long struct bge_softc *sc; 50406f8718a3SScott Long uint16_t *sbdata; 50416f8718a3SScott Long int error; 50426f8718a3SScott Long int result; 50436f8718a3SScott Long int i, j; 50446f8718a3SScott Long 50456f8718a3SScott Long result = -1; 50466f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 50476f8718a3SScott Long if (error || (req->newptr == NULL)) 50486f8718a3SScott Long return (error); 50496f8718a3SScott Long 50506f8718a3SScott Long if (result == 1) { 50516f8718a3SScott Long sc = (struct bge_softc *)arg1; 50526f8718a3SScott Long 50536f8718a3SScott Long sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 50546f8718a3SScott Long printf("Status Block:\n"); 50556f8718a3SScott Long for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { 50566f8718a3SScott Long printf("%06x:", i); 50576f8718a3SScott Long for (j = 0; j < 8; j++) { 50586f8718a3SScott Long printf(" %04x", sbdata[i]); 50596f8718a3SScott Long i += 4; 50606f8718a3SScott Long } 50616f8718a3SScott Long printf("\n"); 50626f8718a3SScott Long } 50636f8718a3SScott Long 50646f8718a3SScott Long printf("Registers:\n"); 50650c8aa4eaSJung-uk Kim for (i = 0x800; i < 0xA00; ) { 50666f8718a3SScott Long printf("%06x:", i); 50676f8718a3SScott Long for (j = 0; j < 8; j++) { 50686f8718a3SScott Long printf(" %08x", CSR_READ_4(sc, i)); 50696f8718a3SScott Long i += 4; 50706f8718a3SScott Long } 50716f8718a3SScott Long printf("\n"); 50726f8718a3SScott Long } 50736f8718a3SScott Long 50746f8718a3SScott Long printf("Hardware Flags:\n"); 5075a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 5076a5779553SStanislav Sedov printf(" - 5755 Plus\n"); 50775345bad0SScott Long if (BGE_IS_575X_PLUS(sc)) 50786f8718a3SScott Long printf(" - 575X Plus\n"); 50795345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 50806f8718a3SScott Long printf(" - 5705 Plus\n"); 50815345bad0SScott Long if (BGE_IS_5714_FAMILY(sc)) 50825345bad0SScott Long printf(" - 5714 Family\n"); 50835345bad0SScott Long if (BGE_IS_5700_FAMILY(sc)) 50845345bad0SScott Long printf(" - 5700 Family\n"); 50856f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_JUMBO) 50866f8718a3SScott Long printf(" - Supports Jumbo Frames\n"); 50876f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIX) 50886f8718a3SScott Long printf(" - PCI-X Bus\n"); 50896f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 50906f8718a3SScott Long printf(" - PCI Express Bus\n"); 50915ee49a3aSJung-uk Kim if (sc->bge_flags & BGE_FLAG_NO_3LED) 50926f8718a3SScott Long printf(" - No 3 LEDs\n"); 50936f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 50946f8718a3SScott Long printf(" - RX Alignment Bug\n"); 50956f8718a3SScott Long } 50966f8718a3SScott Long 50976f8718a3SScott Long return (error); 50986f8718a3SScott Long } 50996f8718a3SScott Long 51006f8718a3SScott Long static int 51016f8718a3SScott Long bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 51026f8718a3SScott Long { 51036f8718a3SScott Long struct bge_softc *sc; 51046f8718a3SScott Long int error; 51056f8718a3SScott Long uint16_t result; 51066f8718a3SScott Long uint32_t val; 51076f8718a3SScott Long 51086f8718a3SScott Long result = -1; 51096f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 51106f8718a3SScott Long if (error || (req->newptr == NULL)) 51116f8718a3SScott Long return (error); 51126f8718a3SScott Long 51136f8718a3SScott Long if (result < 0x8000) { 51146f8718a3SScott Long sc = (struct bge_softc *)arg1; 51156f8718a3SScott Long val = CSR_READ_4(sc, result); 51166f8718a3SScott Long printf("reg 0x%06X = 0x%08X\n", result, val); 51176f8718a3SScott Long } 51186f8718a3SScott Long 51196f8718a3SScott Long return (error); 51206f8718a3SScott Long } 51216f8718a3SScott Long 51226f8718a3SScott Long static int 51236f8718a3SScott Long bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 51246f8718a3SScott Long { 51256f8718a3SScott Long struct bge_softc *sc; 51266f8718a3SScott Long int error; 51276f8718a3SScott Long uint16_t result; 51286f8718a3SScott Long uint32_t val; 51296f8718a3SScott Long 51306f8718a3SScott Long result = -1; 51316f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 51326f8718a3SScott Long if (error || (req->newptr == NULL)) 51336f8718a3SScott Long return (error); 51346f8718a3SScott Long 51356f8718a3SScott Long if (result < 0x8000) { 51366f8718a3SScott Long sc = (struct bge_softc *)arg1; 51376f8718a3SScott Long val = bge_readmem_ind(sc, result); 51386f8718a3SScott Long printf("mem 0x%06X = 0x%08X\n", result, val); 51396f8718a3SScott Long } 51406f8718a3SScott Long 51416f8718a3SScott Long return (error); 51426f8718a3SScott Long } 51436f8718a3SScott Long #endif 514438cc658fSJohn Baldwin 514538cc658fSJohn Baldwin static int 51465fea260fSMarius Strobl bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 51475fea260fSMarius Strobl { 51485fea260fSMarius Strobl 51495fea260fSMarius Strobl if (sc->bge_flags & BGE_FLAG_EADDR) 51505fea260fSMarius Strobl return (1); 51515fea260fSMarius Strobl 51525fea260fSMarius Strobl #ifdef __sparc64__ 51535fea260fSMarius Strobl OF_getetheraddr(sc->bge_dev, ether_addr); 51545fea260fSMarius Strobl return (0); 51555fea260fSMarius Strobl #endif 51565fea260fSMarius Strobl return (1); 51575fea260fSMarius Strobl } 51585fea260fSMarius Strobl 51595fea260fSMarius Strobl static int 516038cc658fSJohn Baldwin bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 516138cc658fSJohn Baldwin { 516238cc658fSJohn Baldwin uint32_t mac_addr; 516338cc658fSJohn Baldwin 516438cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c14); 516538cc658fSJohn Baldwin if ((mac_addr >> 16) == 0x484b) { 516638cc658fSJohn Baldwin ether_addr[0] = (uint8_t)(mac_addr >> 8); 516738cc658fSJohn Baldwin ether_addr[1] = (uint8_t)mac_addr; 516838cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c18); 516938cc658fSJohn Baldwin ether_addr[2] = (uint8_t)(mac_addr >> 24); 517038cc658fSJohn Baldwin ether_addr[3] = (uint8_t)(mac_addr >> 16); 517138cc658fSJohn Baldwin ether_addr[4] = (uint8_t)(mac_addr >> 8); 517238cc658fSJohn Baldwin ether_addr[5] = (uint8_t)mac_addr; 51735fea260fSMarius Strobl return (0); 517438cc658fSJohn Baldwin } 51755fea260fSMarius Strobl return (1); 517638cc658fSJohn Baldwin } 517738cc658fSJohn Baldwin 517838cc658fSJohn Baldwin static int 517938cc658fSJohn Baldwin bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 518038cc658fSJohn Baldwin { 518138cc658fSJohn Baldwin int mac_offset = BGE_EE_MAC_OFFSET; 518238cc658fSJohn Baldwin 518338cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 518438cc658fSJohn Baldwin mac_offset = BGE_EE_MAC_OFFSET_5906; 518538cc658fSJohn Baldwin 51865fea260fSMarius Strobl return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 51875fea260fSMarius Strobl ETHER_ADDR_LEN)); 518838cc658fSJohn Baldwin } 518938cc658fSJohn Baldwin 519038cc658fSJohn Baldwin static int 519138cc658fSJohn Baldwin bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 519238cc658fSJohn Baldwin { 519338cc658fSJohn Baldwin 51945fea260fSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 51955fea260fSMarius Strobl return (1); 51965fea260fSMarius Strobl 51975fea260fSMarius Strobl return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 51985fea260fSMarius Strobl ETHER_ADDR_LEN)); 519938cc658fSJohn Baldwin } 520038cc658fSJohn Baldwin 520138cc658fSJohn Baldwin static int 520238cc658fSJohn Baldwin bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 520338cc658fSJohn Baldwin { 520438cc658fSJohn Baldwin static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 520538cc658fSJohn Baldwin /* NOTE: Order is critical */ 52065fea260fSMarius Strobl bge_get_eaddr_fw, 520738cc658fSJohn Baldwin bge_get_eaddr_mem, 520838cc658fSJohn Baldwin bge_get_eaddr_nvram, 520938cc658fSJohn Baldwin bge_get_eaddr_eeprom, 521038cc658fSJohn Baldwin NULL 521138cc658fSJohn Baldwin }; 521238cc658fSJohn Baldwin const bge_eaddr_fcn_t *func; 521338cc658fSJohn Baldwin 521438cc658fSJohn Baldwin for (func = bge_eaddr_funcs; *func != NULL; ++func) { 521538cc658fSJohn Baldwin if ((*func)(sc, eaddr) == 0) 521638cc658fSJohn Baldwin break; 521738cc658fSJohn Baldwin } 521838cc658fSJohn Baldwin return (*func == NULL ? ENXIO : 0); 521938cc658fSJohn Baldwin } 5220