1098ca2bdSWarner Losh /*- 295d67482SBill Paul * Copyright (c) 2001 Wind River Systems 395d67482SBill Paul * Copyright (c) 1997, 1998, 1999, 2001 495d67482SBill Paul * Bill Paul <wpaul@windriver.com>. All rights reserved. 595d67482SBill Paul * 695d67482SBill Paul * Redistribution and use in source and binary forms, with or without 795d67482SBill Paul * modification, are permitted provided that the following conditions 895d67482SBill Paul * are met: 995d67482SBill Paul * 1. Redistributions of source code must retain the above copyright 1095d67482SBill Paul * notice, this list of conditions and the following disclaimer. 1195d67482SBill Paul * 2. Redistributions in binary form must reproduce the above copyright 1295d67482SBill Paul * notice, this list of conditions and the following disclaimer in the 1395d67482SBill Paul * documentation and/or other materials provided with the distribution. 1495d67482SBill Paul * 3. All advertising materials mentioning features or use of this software 1595d67482SBill Paul * must display the following acknowledgement: 1695d67482SBill Paul * This product includes software developed by Bill Paul. 1795d67482SBill Paul * 4. Neither the name of the author nor the names of any co-contributors 1895d67482SBill Paul * may be used to endorse or promote products derived from this software 1995d67482SBill Paul * without specific prior written permission. 2095d67482SBill Paul * 2195d67482SBill Paul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 2295d67482SBill Paul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2395d67482SBill Paul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2495d67482SBill Paul * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 2595d67482SBill Paul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2695d67482SBill Paul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2795d67482SBill Paul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2895d67482SBill Paul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2995d67482SBill Paul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3095d67482SBill Paul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 3195d67482SBill Paul * THE POSSIBILITY OF SUCH DAMAGE. 3295d67482SBill Paul */ 3395d67482SBill Paul 34aad970f1SDavid E. O'Brien #include <sys/cdefs.h> 35aad970f1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 36aad970f1SDavid E. O'Brien 3795d67482SBill Paul /* 3895d67482SBill Paul * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 3995d67482SBill Paul * 4095d67482SBill Paul * The Broadcom BCM5700 is based on technology originally developed by 4195d67482SBill Paul * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 4295d67482SBill Paul * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 4395d67482SBill Paul * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 4495d67482SBill Paul * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 4595d67482SBill Paul * frames, highly configurable RX filtering, and 16 RX and TX queues 4695d67482SBill Paul * (which, along with RX filter rules, can be used for QOS applications). 4795d67482SBill Paul * Other features, such as TCP segmentation, may be available as part 4895d67482SBill Paul * of value-added firmware updates. Unlike the Tigon I and Tigon II, 4995d67482SBill Paul * firmware images can be stored in hardware and need not be compiled 5095d67482SBill Paul * into the driver. 5195d67482SBill Paul * 5295d67482SBill Paul * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 5395d67482SBill Paul * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 5495d67482SBill Paul * 5595d67482SBill Paul * The BCM5701 is a single-chip solution incorporating both the BCM5700 5698b28ee5SBill Paul * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 5795d67482SBill Paul * does not support external SSRAM. 5895d67482SBill Paul * 5995d67482SBill Paul * Broadcom also produces a variation of the BCM5700 under the "Altima" 6095d67482SBill Paul * brand name, which is functionally similar but lacks PCI-X support. 6195d67482SBill Paul * 6295d67482SBill Paul * Without external SSRAM, you can only have at most 4 TX rings, 6395d67482SBill Paul * and the use of the mini RX ring is disabled. This seems to imply 6495d67482SBill Paul * that these features are simply not available on the BCM5701. As a 6595d67482SBill Paul * result, this driver does not implement any support for the mini RX 6695d67482SBill Paul * ring. 6795d67482SBill Paul */ 6895d67482SBill Paul 6975719184SGleb Smirnoff #ifdef HAVE_KERNEL_OPTION_HEADERS 7075719184SGleb Smirnoff #include "opt_device_polling.h" 7175719184SGleb Smirnoff #endif 7275719184SGleb Smirnoff 7395d67482SBill Paul #include <sys/param.h> 74f41ac2beSBill Paul #include <sys/endian.h> 7595d67482SBill Paul #include <sys/systm.h> 7695d67482SBill Paul #include <sys/sockio.h> 7795d67482SBill Paul #include <sys/mbuf.h> 7895d67482SBill Paul #include <sys/malloc.h> 7995d67482SBill Paul #include <sys/kernel.h> 80fe12f24bSPoul-Henning Kamp #include <sys/module.h> 8195d67482SBill Paul #include <sys/socket.h> 82f1a7e6d5SScott Long #include <sys/sysctl.h> 83dfe0df9aSPyun YongHyeon #include <sys/taskqueue.h> 8495d67482SBill Paul 8595d67482SBill Paul #include <net/if.h> 8695d67482SBill Paul #include <net/if_arp.h> 8795d67482SBill Paul #include <net/ethernet.h> 8895d67482SBill Paul #include <net/if_dl.h> 8995d67482SBill Paul #include <net/if_media.h> 9095d67482SBill Paul 9195d67482SBill Paul #include <net/bpf.h> 9295d67482SBill Paul 9395d67482SBill Paul #include <net/if_types.h> 9495d67482SBill Paul #include <net/if_vlan_var.h> 9595d67482SBill Paul 9695d67482SBill Paul #include <netinet/in_systm.h> 9795d67482SBill Paul #include <netinet/in.h> 9895d67482SBill Paul #include <netinet/ip.h> 99ca3f1187SPyun YongHyeon #include <netinet/tcp.h> 10095d67482SBill Paul 10195d67482SBill Paul #include <machine/bus.h> 10295d67482SBill Paul #include <machine/resource.h> 10395d67482SBill Paul #include <sys/bus.h> 10495d67482SBill Paul #include <sys/rman.h> 10595d67482SBill Paul 10695d67482SBill Paul #include <dev/mii/mii.h> 10795d67482SBill Paul #include <dev/mii/miivar.h> 1082d3ce713SDavid E. O'Brien #include "miidevs.h" 10995d67482SBill Paul #include <dev/mii/brgphyreg.h> 11095d67482SBill Paul 11108013fd3SMarius Strobl #ifdef __sparc64__ 11208013fd3SMarius Strobl #include <dev/ofw/ofw_bus.h> 11308013fd3SMarius Strobl #include <dev/ofw/openfirm.h> 11408013fd3SMarius Strobl #include <machine/ofw_machdep.h> 11508013fd3SMarius Strobl #include <machine/ver.h> 11608013fd3SMarius Strobl #endif 11708013fd3SMarius Strobl 1184fbd232cSWarner Losh #include <dev/pci/pcireg.h> 1194fbd232cSWarner Losh #include <dev/pci/pcivar.h> 12095d67482SBill Paul 12195d67482SBill Paul #include <dev/bge/if_bgereg.h> 12295d67482SBill Paul 1235ddc5794SJohn Polstra #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 124d375e524SGleb Smirnoff #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 12595d67482SBill Paul 126f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, pci, 1, 1, 1); 127f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, ether, 1, 1, 1); 12895d67482SBill Paul MODULE_DEPEND(bge, miibus, 1, 1, 1); 12995d67482SBill Paul 1307b279558SWarner Losh /* "device miibus" required. See GENERIC if you get errors here. */ 13195d67482SBill Paul #include "miibus_if.h" 13295d67482SBill Paul 13395d67482SBill Paul /* 13495d67482SBill Paul * Various supported device vendors/types and their names. Note: the 13595d67482SBill Paul * spec seems to indicate that the hardware still has Alteon's vendor 13695d67482SBill Paul * ID burned into it, though it will always be overriden by the vendor 13795d67482SBill Paul * ID in the EEPROM. Just to be safe, we cover all possibilities. 13895d67482SBill Paul */ 139852c67f9SMarius Strobl static const struct bge_type { 1404c0da0ffSGleb Smirnoff uint16_t bge_vid; 1414c0da0ffSGleb Smirnoff uint16_t bge_did; 1424c0da0ffSGleb Smirnoff } bge_devs[] = { 1434c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 1444c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 14595d67482SBill Paul 1464c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 1474c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 1484c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 1494c0da0ffSGleb Smirnoff 1504c0da0ffSGleb Smirnoff { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 1514c0da0ffSGleb Smirnoff 1524c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 1534c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 1544c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 1554c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 1564c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 1574c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 1584c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 1594c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 1604c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 1614c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 1624c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 1634c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 1644c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 1654c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 1664c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 1674c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 1684c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 1694c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 1704c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 1714c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 1724c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 1734c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 174effef978SRemko Lodder { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, 175a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, 1764c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 1774c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 1784c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 1794c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 1804c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 1814c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 1824c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 1834c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 1844c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 1854c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 1869e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 1879e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 1889e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 1899e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 190a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, 191a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, 192a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, 193a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, 194a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, 1954c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 1964c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 1974c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 1984c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 199a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, 200a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, 201a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, 2029e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 2039e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 204a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, 2059e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 2064c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 2074c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 2084c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 2094c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 2104c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 21138cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, 21238cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, 213a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, 214a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, 215a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, 216a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, 2174c0da0ffSGleb Smirnoff 2184c0da0ffSGleb Smirnoff { SK_VENDORID, SK_DEVICEID_ALTIMA }, 2194c0da0ffSGleb Smirnoff 2204c0da0ffSGleb Smirnoff { TC_VENDORID, TC_DEVICEID_3C996 }, 2214c0da0ffSGleb Smirnoff 222a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, 223a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, 224a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 }, 225a5779553SStanislav Sedov 2264c0da0ffSGleb Smirnoff { 0, 0 } 22795d67482SBill Paul }; 22895d67482SBill Paul 2294c0da0ffSGleb Smirnoff static const struct bge_vendor { 2304c0da0ffSGleb Smirnoff uint16_t v_id; 2314c0da0ffSGleb Smirnoff const char *v_name; 2324c0da0ffSGleb Smirnoff } bge_vendors[] = { 2334c0da0ffSGleb Smirnoff { ALTEON_VENDORID, "Alteon" }, 2344c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, "Altima" }, 2354c0da0ffSGleb Smirnoff { APPLE_VENDORID, "Apple" }, 2364c0da0ffSGleb Smirnoff { BCOM_VENDORID, "Broadcom" }, 2374c0da0ffSGleb Smirnoff { SK_VENDORID, "SysKonnect" }, 2384c0da0ffSGleb Smirnoff { TC_VENDORID, "3Com" }, 239a5779553SStanislav Sedov { FJTSU_VENDORID, "Fujitsu" }, 2404c0da0ffSGleb Smirnoff 2414c0da0ffSGleb Smirnoff { 0, NULL } 2424c0da0ffSGleb Smirnoff }; 2434c0da0ffSGleb Smirnoff 2444c0da0ffSGleb Smirnoff static const struct bge_revision { 2454c0da0ffSGleb Smirnoff uint32_t br_chipid; 2464c0da0ffSGleb Smirnoff const char *br_name; 2474c0da0ffSGleb Smirnoff } bge_revisions[] = { 2484c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 2494c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 2504c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 2514c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 2524c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 2534c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 2544c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 2554c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 2564c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 2574c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 2584c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 2594c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 2604c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 2614c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 2624c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 2634c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 2649e86676bSGleb Smirnoff { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 2654c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 2664c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 2674c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 2684c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 2694c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 2704c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 2714c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 2724c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 2734c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 2744c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 2754c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 2764c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 2774c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 2784c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 2794c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 2804c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 28142787b76SGleb Smirnoff { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 2824c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 2834c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 2844c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 2854c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 2864c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 2874c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 2884c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 2894c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 2900c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 2910c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 2920c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 2930c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 294bcc20328SJohn Baldwin { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, 295a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 296a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 297a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 298a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 29981179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 3006f8718a3SScott Long { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 3016f8718a3SScott Long { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 3026f8718a3SScott Long { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 30338cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 30438cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 305a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 306a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 3074c0da0ffSGleb Smirnoff 3084c0da0ffSGleb Smirnoff { 0, NULL } 3094c0da0ffSGleb Smirnoff }; 3104c0da0ffSGleb Smirnoff 3114c0da0ffSGleb Smirnoff /* 3124c0da0ffSGleb Smirnoff * Some defaults for major revisions, so that newer steppings 3134c0da0ffSGleb Smirnoff * that we don't know about have a shot at working. 3144c0da0ffSGleb Smirnoff */ 3154c0da0ffSGleb Smirnoff static const struct bge_revision bge_majorrevs[] = { 3169e86676bSGleb Smirnoff { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 3179e86676bSGleb Smirnoff { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 3189e86676bSGleb Smirnoff { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 3199e86676bSGleb Smirnoff { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 3209e86676bSGleb Smirnoff { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 3219e86676bSGleb Smirnoff { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 3229e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 3239e86676bSGleb Smirnoff { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 3249e86676bSGleb Smirnoff { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 3259e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 3269e86676bSGleb Smirnoff { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 327a5779553SStanislav Sedov { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 328a5779553SStanislav Sedov { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 329a5779553SStanislav Sedov { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 33081179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 3316f8718a3SScott Long { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 33238cc658fSJohn Baldwin { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 333a5779553SStanislav Sedov { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 3344c0da0ffSGleb Smirnoff 3354c0da0ffSGleb Smirnoff { 0, NULL } 3364c0da0ffSGleb Smirnoff }; 3374c0da0ffSGleb Smirnoff 3380c8aa4eaSJung-uk Kim #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 3390c8aa4eaSJung-uk Kim #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 3400c8aa4eaSJung-uk Kim #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 3410c8aa4eaSJung-uk Kim #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 3420c8aa4eaSJung-uk Kim #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 343a5779553SStanislav Sedov #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 3444c0da0ffSGleb Smirnoff 3454c0da0ffSGleb Smirnoff const struct bge_revision * bge_lookup_rev(uint32_t); 3464c0da0ffSGleb Smirnoff const struct bge_vendor * bge_lookup_vendor(uint16_t); 34738cc658fSJohn Baldwin 34838cc658fSJohn Baldwin typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 34938cc658fSJohn Baldwin 350e51a25f8SAlfred Perlstein static int bge_probe(device_t); 351e51a25f8SAlfred Perlstein static int bge_attach(device_t); 352e51a25f8SAlfred Perlstein static int bge_detach(device_t); 35314afefa3SPawel Jakub Dawidek static int bge_suspend(device_t); 35414afefa3SPawel Jakub Dawidek static int bge_resume(device_t); 3553f74909aSGleb Smirnoff static void bge_release_resources(struct bge_softc *); 356f41ac2beSBill Paul static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 357f41ac2beSBill Paul static int bge_dma_alloc(device_t); 358f41ac2beSBill Paul static void bge_dma_free(struct bge_softc *); 359f41ac2beSBill Paul 3605fea260fSMarius Strobl static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); 36138cc658fSJohn Baldwin static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 36238cc658fSJohn Baldwin static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 36338cc658fSJohn Baldwin static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 36438cc658fSJohn Baldwin static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 36538cc658fSJohn Baldwin 366b9c05fa5SPyun YongHyeon static void bge_txeof(struct bge_softc *, uint16_t); 367dfe0df9aSPyun YongHyeon static int bge_rxeof(struct bge_softc *, uint16_t, int); 36895d67482SBill Paul 3698cb1383cSDoug Ambrisko static void bge_asf_driver_up (struct bge_softc *); 370e51a25f8SAlfred Perlstein static void bge_tick(void *); 371e51a25f8SAlfred Perlstein static void bge_stats_update(struct bge_softc *); 3723f74909aSGleb Smirnoff static void bge_stats_update_regs(struct bge_softc *); 3732e1d4df4SPyun YongHyeon static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, 3742e1d4df4SPyun YongHyeon uint16_t *); 375676ad2c9SGleb Smirnoff static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 37695d67482SBill Paul 377e51a25f8SAlfred Perlstein static void bge_intr(void *); 378dfe0df9aSPyun YongHyeon static int bge_msi_intr(void *); 379dfe0df9aSPyun YongHyeon static void bge_intr_task(void *, int); 3800f9bd73bSSam Leffler static void bge_start_locked(struct ifnet *); 381e51a25f8SAlfred Perlstein static void bge_start(struct ifnet *); 382e51a25f8SAlfred Perlstein static int bge_ioctl(struct ifnet *, u_long, caddr_t); 3830f9bd73bSSam Leffler static void bge_init_locked(struct bge_softc *); 384e51a25f8SAlfred Perlstein static void bge_init(void *); 385e51a25f8SAlfred Perlstein static void bge_stop(struct bge_softc *); 386b74e67fbSGleb Smirnoff static void bge_watchdog(struct bge_softc *); 387b6c974e8SWarner Losh static int bge_shutdown(device_t); 38867d5e043SOleg Bulyzhin static int bge_ifmedia_upd_locked(struct ifnet *); 389e51a25f8SAlfred Perlstein static int bge_ifmedia_upd(struct ifnet *); 390e51a25f8SAlfred Perlstein static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 39195d67482SBill Paul 39238cc658fSJohn Baldwin static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 39338cc658fSJohn Baldwin static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 39438cc658fSJohn Baldwin 3953f74909aSGleb Smirnoff static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 396e51a25f8SAlfred Perlstein static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 39795d67482SBill Paul 3983e9b1bcaSJung-uk Kim static void bge_setpromisc(struct bge_softc *); 399e51a25f8SAlfred Perlstein static void bge_setmulti(struct bge_softc *); 400cb2eacc7SYaroslav Tykhiy static void bge_setvlan(struct bge_softc *); 40195d67482SBill Paul 402943787f3SPyun YongHyeon static int bge_newbuf_std(struct bge_softc *, int); 403943787f3SPyun YongHyeon static int bge_newbuf_jumbo(struct bge_softc *, int); 404e51a25f8SAlfred Perlstein static int bge_init_rx_ring_std(struct bge_softc *); 405e51a25f8SAlfred Perlstein static void bge_free_rx_ring_std(struct bge_softc *); 406e51a25f8SAlfred Perlstein static int bge_init_rx_ring_jumbo(struct bge_softc *); 407e51a25f8SAlfred Perlstein static void bge_free_rx_ring_jumbo(struct bge_softc *); 408e51a25f8SAlfred Perlstein static void bge_free_tx_ring(struct bge_softc *); 409e51a25f8SAlfred Perlstein static int bge_init_tx_ring(struct bge_softc *); 41095d67482SBill Paul 411e51a25f8SAlfred Perlstein static int bge_chipinit(struct bge_softc *); 412e51a25f8SAlfred Perlstein static int bge_blockinit(struct bge_softc *); 41395d67482SBill Paul 4145fea260fSMarius Strobl static int bge_has_eaddr(struct bge_softc *); 4153f74909aSGleb Smirnoff static uint32_t bge_readmem_ind(struct bge_softc *, int); 416e51a25f8SAlfred Perlstein static void bge_writemem_ind(struct bge_softc *, int, int); 41738cc658fSJohn Baldwin static void bge_writembx(struct bge_softc *, int, int); 41895d67482SBill Paul #ifdef notdef 4193f74909aSGleb Smirnoff static uint32_t bge_readreg_ind(struct bge_softc *, int); 42095d67482SBill Paul #endif 4219ba784dbSScott Long static void bge_writemem_direct(struct bge_softc *, int, int); 422e51a25f8SAlfred Perlstein static void bge_writereg_ind(struct bge_softc *, int, int); 4230aaf1057SPyun YongHyeon static void bge_set_max_readrq(struct bge_softc *); 42495d67482SBill Paul 425e51a25f8SAlfred Perlstein static int bge_miibus_readreg(device_t, int, int); 426e51a25f8SAlfred Perlstein static int bge_miibus_writereg(device_t, int, int, int); 427e51a25f8SAlfred Perlstein static void bge_miibus_statchg(device_t); 42875719184SGleb Smirnoff #ifdef DEVICE_POLLING 4291abcdbd1SAttilio Rao static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 43075719184SGleb Smirnoff #endif 43195d67482SBill Paul 4328cb1383cSDoug Ambrisko #define BGE_RESET_START 1 4338cb1383cSDoug Ambrisko #define BGE_RESET_STOP 2 4348cb1383cSDoug Ambrisko static void bge_sig_post_reset(struct bge_softc *, int); 4358cb1383cSDoug Ambrisko static void bge_sig_legacy(struct bge_softc *, int); 4368cb1383cSDoug Ambrisko static void bge_sig_pre_reset(struct bge_softc *, int); 4378cb1383cSDoug Ambrisko static int bge_reset(struct bge_softc *); 438dab5cd05SOleg Bulyzhin static void bge_link_upd(struct bge_softc *); 43995d67482SBill Paul 4406f8718a3SScott Long /* 4416f8718a3SScott Long * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 4426f8718a3SScott Long * leak information to untrusted users. It is also known to cause alignment 4436f8718a3SScott Long * traps on certain architectures. 4446f8718a3SScott Long */ 4456f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 4466f8718a3SScott Long static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 4476f8718a3SScott Long static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 4486f8718a3SScott Long static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 4496f8718a3SScott Long #endif 4506f8718a3SScott Long static void bge_add_sysctls(struct bge_softc *); 451763757b2SScott Long static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 4526f8718a3SScott Long 45395d67482SBill Paul static device_method_t bge_methods[] = { 45495d67482SBill Paul /* Device interface */ 45595d67482SBill Paul DEVMETHOD(device_probe, bge_probe), 45695d67482SBill Paul DEVMETHOD(device_attach, bge_attach), 45795d67482SBill Paul DEVMETHOD(device_detach, bge_detach), 45895d67482SBill Paul DEVMETHOD(device_shutdown, bge_shutdown), 45914afefa3SPawel Jakub Dawidek DEVMETHOD(device_suspend, bge_suspend), 46014afefa3SPawel Jakub Dawidek DEVMETHOD(device_resume, bge_resume), 46195d67482SBill Paul 46295d67482SBill Paul /* bus interface */ 46395d67482SBill Paul DEVMETHOD(bus_print_child, bus_generic_print_child), 46495d67482SBill Paul DEVMETHOD(bus_driver_added, bus_generic_driver_added), 46595d67482SBill Paul 46695d67482SBill Paul /* MII interface */ 46795d67482SBill Paul DEVMETHOD(miibus_readreg, bge_miibus_readreg), 46895d67482SBill Paul DEVMETHOD(miibus_writereg, bge_miibus_writereg), 46995d67482SBill Paul DEVMETHOD(miibus_statchg, bge_miibus_statchg), 47095d67482SBill Paul 47195d67482SBill Paul { 0, 0 } 47295d67482SBill Paul }; 47395d67482SBill Paul 47495d67482SBill Paul static driver_t bge_driver = { 47595d67482SBill Paul "bge", 47695d67482SBill Paul bge_methods, 47795d67482SBill Paul sizeof(struct bge_softc) 47895d67482SBill Paul }; 47995d67482SBill Paul 48095d67482SBill Paul static devclass_t bge_devclass; 48195d67482SBill Paul 482f246e4a1SMatthew N. Dodd DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 48395d67482SBill Paul DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 48495d67482SBill Paul 485f1a7e6d5SScott Long static int bge_allow_asf = 1; 486d94f2b85SPyun YongHyeon /* 487d94f2b85SPyun YongHyeon * A common design characteristic for many Broadcom client controllers 488d94f2b85SPyun YongHyeon * is that they only support a single outstanding DMA read operation 489d94f2b85SPyun YongHyeon * on the PCIe bus. This means that it will take twice as long to fetch 490d94f2b85SPyun YongHyeon * a TX frame that is split into header and payload buffers as it does 491d94f2b85SPyun YongHyeon * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For 492d94f2b85SPyun YongHyeon * these controllers, coalescing buffers to reduce the number of memory 493d94f2b85SPyun YongHyeon * reads is effective way to get maximum performance(about 940Mbps). 494d94f2b85SPyun YongHyeon * Without collapsing TX buffers the maximum TCP bulk transfer 495d94f2b85SPyun YongHyeon * performance is about 850Mbps. However forcing coalescing mbufs 496d94f2b85SPyun YongHyeon * consumes a lot of CPU cycles, so leave it off by default. 497d94f2b85SPyun YongHyeon */ 498d94f2b85SPyun YongHyeon static int bge_forced_collapse = 0; 499f1a7e6d5SScott Long 500f1a7e6d5SScott Long TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 501d94f2b85SPyun YongHyeon TUNABLE_INT("hw.bge.forced_collapse", &bge_forced_collapse); 502f1a7e6d5SScott Long 503f1a7e6d5SScott Long SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 504f1a7e6d5SScott Long SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 505f1a7e6d5SScott Long "Allow ASF mode if available"); 506d94f2b85SPyun YongHyeon SYSCTL_INT(_hw_bge, OID_AUTO, forced_collapse, CTLFLAG_RD, &bge_forced_collapse, 507d94f2b85SPyun YongHyeon 0, "Number of fragmented TX buffers of a frame allowed before " 508d94f2b85SPyun YongHyeon "forced collapsing"); 509c4529f41SMichael Reifenberger 51008013fd3SMarius Strobl #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" 51108013fd3SMarius Strobl #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" 51208013fd3SMarius Strobl #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" 51308013fd3SMarius Strobl #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" 51408013fd3SMarius Strobl #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" 51508013fd3SMarius Strobl 51608013fd3SMarius Strobl static int 5175fea260fSMarius Strobl bge_has_eaddr(struct bge_softc *sc) 51808013fd3SMarius Strobl { 51908013fd3SMarius Strobl #ifdef __sparc64__ 52008013fd3SMarius Strobl char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; 52108013fd3SMarius Strobl device_t dev; 52208013fd3SMarius Strobl uint32_t subvendor; 52308013fd3SMarius Strobl 52408013fd3SMarius Strobl dev = sc->bge_dev; 52508013fd3SMarius Strobl 52608013fd3SMarius Strobl /* 52708013fd3SMarius Strobl * The on-board BGEs found in sun4u machines aren't fitted with 52808013fd3SMarius Strobl * an EEPROM which means that we have to obtain the MAC address 52908013fd3SMarius Strobl * via OFW and that some tests will always fail. We distinguish 53008013fd3SMarius Strobl * such BGEs by the subvendor ID, which also has to be obtained 53108013fd3SMarius Strobl * from OFW instead of the PCI configuration space as the latter 53208013fd3SMarius Strobl * indicates Broadcom as the subvendor of the netboot interface. 53308013fd3SMarius Strobl * For early Blade 1500 and 2500 we even have to check the OFW 53408013fd3SMarius Strobl * device path as the subvendor ID always defaults to Broadcom 53508013fd3SMarius Strobl * there. 53608013fd3SMarius Strobl */ 53708013fd3SMarius Strobl if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, 53808013fd3SMarius Strobl &subvendor, sizeof(subvendor)) == sizeof(subvendor) && 53908013fd3SMarius Strobl subvendor == SUN_VENDORID) 54008013fd3SMarius Strobl return (0); 54108013fd3SMarius Strobl memset(buf, 0, sizeof(buf)); 54208013fd3SMarius Strobl if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { 54308013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && 54408013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) 54508013fd3SMarius Strobl return (0); 54608013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && 54708013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) 54808013fd3SMarius Strobl return (0); 54908013fd3SMarius Strobl } 55008013fd3SMarius Strobl #endif 55108013fd3SMarius Strobl return (1); 55208013fd3SMarius Strobl } 55308013fd3SMarius Strobl 5543f74909aSGleb Smirnoff static uint32_t 5553f74909aSGleb Smirnoff bge_readmem_ind(struct bge_softc *sc, int off) 55695d67482SBill Paul { 55795d67482SBill Paul device_t dev; 5586f8718a3SScott Long uint32_t val; 55995d67482SBill Paul 56095d67482SBill Paul dev = sc->bge_dev; 56195d67482SBill Paul 56295d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 5636f8718a3SScott Long val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 5646f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 5656f8718a3SScott Long return (val); 56695d67482SBill Paul } 56795d67482SBill Paul 56895d67482SBill Paul static void 5693f74909aSGleb Smirnoff bge_writemem_ind(struct bge_softc *sc, int off, int val) 57095d67482SBill Paul { 57195d67482SBill Paul device_t dev; 57295d67482SBill Paul 57395d67482SBill Paul dev = sc->bge_dev; 57495d67482SBill Paul 57595d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 57695d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 5776f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 57895d67482SBill Paul } 57995d67482SBill Paul 5804f09c4c7SMarius Strobl /* 5814f09c4c7SMarius Strobl * PCI Express only 5824f09c4c7SMarius Strobl */ 5834f09c4c7SMarius Strobl static void 5840aaf1057SPyun YongHyeon bge_set_max_readrq(struct bge_softc *sc) 5854f09c4c7SMarius Strobl { 5864f09c4c7SMarius Strobl device_t dev; 5874f09c4c7SMarius Strobl uint16_t val; 5884f09c4c7SMarius Strobl 5894f09c4c7SMarius Strobl dev = sc->bge_dev; 5904f09c4c7SMarius Strobl 5910aaf1057SPyun YongHyeon val = pci_read_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2); 5920aaf1057SPyun YongHyeon if ((val & PCIM_EXP_CTL_MAX_READ_REQUEST) != 5934f09c4c7SMarius Strobl BGE_PCIE_DEVCTL_MAX_READRQ_4096) { 5944f09c4c7SMarius Strobl if (bootverbose) 5954f09c4c7SMarius Strobl device_printf(dev, "adjust device control 0x%04x ", 5964f09c4c7SMarius Strobl val); 5970aaf1057SPyun YongHyeon val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; 5984f09c4c7SMarius Strobl val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 5990aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 6000aaf1057SPyun YongHyeon val, 2); 6014f09c4c7SMarius Strobl if (bootverbose) 6024f09c4c7SMarius Strobl printf("-> 0x%04x\n", val); 6034f09c4c7SMarius Strobl } 6044f09c4c7SMarius Strobl } 6054f09c4c7SMarius Strobl 60695d67482SBill Paul #ifdef notdef 6073f74909aSGleb Smirnoff static uint32_t 6083f74909aSGleb Smirnoff bge_readreg_ind(struct bge_softc *sc, int off) 60995d67482SBill Paul { 61095d67482SBill Paul device_t dev; 61195d67482SBill Paul 61295d67482SBill Paul dev = sc->bge_dev; 61395d67482SBill Paul 61495d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 61595d67482SBill Paul return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 61695d67482SBill Paul } 61795d67482SBill Paul #endif 61895d67482SBill Paul 61995d67482SBill Paul static void 6203f74909aSGleb Smirnoff bge_writereg_ind(struct bge_softc *sc, int off, int val) 62195d67482SBill Paul { 62295d67482SBill Paul device_t dev; 62395d67482SBill Paul 62495d67482SBill Paul dev = sc->bge_dev; 62595d67482SBill Paul 62695d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 62795d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 62895d67482SBill Paul } 62995d67482SBill Paul 6306f8718a3SScott Long static void 6316f8718a3SScott Long bge_writemem_direct(struct bge_softc *sc, int off, int val) 6326f8718a3SScott Long { 6336f8718a3SScott Long CSR_WRITE_4(sc, off, val); 6346f8718a3SScott Long } 6356f8718a3SScott Long 63638cc658fSJohn Baldwin static void 63738cc658fSJohn Baldwin bge_writembx(struct bge_softc *sc, int off, int val) 63838cc658fSJohn Baldwin { 63938cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 64038cc658fSJohn Baldwin off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 64138cc658fSJohn Baldwin 64238cc658fSJohn Baldwin CSR_WRITE_4(sc, off, val); 64338cc658fSJohn Baldwin } 64438cc658fSJohn Baldwin 645f41ac2beSBill Paul /* 646f41ac2beSBill Paul * Map a single buffer address. 647f41ac2beSBill Paul */ 648f41ac2beSBill Paul 649f41ac2beSBill Paul static void 6503f74909aSGleb Smirnoff bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 651f41ac2beSBill Paul { 652f41ac2beSBill Paul struct bge_dmamap_arg *ctx; 653f41ac2beSBill Paul 654f41ac2beSBill Paul if (error) 655f41ac2beSBill Paul return; 656f41ac2beSBill Paul 657f41ac2beSBill Paul ctx = arg; 658f41ac2beSBill Paul 659f41ac2beSBill Paul if (nseg > ctx->bge_maxsegs) { 660f41ac2beSBill Paul ctx->bge_maxsegs = 0; 661f41ac2beSBill Paul return; 662f41ac2beSBill Paul } 663f41ac2beSBill Paul 664f41ac2beSBill Paul ctx->bge_busaddr = segs->ds_addr; 665f41ac2beSBill Paul } 666f41ac2beSBill Paul 66738cc658fSJohn Baldwin static uint8_t 66838cc658fSJohn Baldwin bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 66938cc658fSJohn Baldwin { 67038cc658fSJohn Baldwin uint32_t access, byte = 0; 67138cc658fSJohn Baldwin int i; 67238cc658fSJohn Baldwin 67338cc658fSJohn Baldwin /* Lock. */ 67438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 67538cc658fSJohn Baldwin for (i = 0; i < 8000; i++) { 67638cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 67738cc658fSJohn Baldwin break; 67838cc658fSJohn Baldwin DELAY(20); 67938cc658fSJohn Baldwin } 68038cc658fSJohn Baldwin if (i == 8000) 68138cc658fSJohn Baldwin return (1); 68238cc658fSJohn Baldwin 68338cc658fSJohn Baldwin /* Enable access. */ 68438cc658fSJohn Baldwin access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 68538cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 68638cc658fSJohn Baldwin 68738cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 68838cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 68938cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT * 10; i++) { 69038cc658fSJohn Baldwin DELAY(10); 69138cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 69238cc658fSJohn Baldwin DELAY(10); 69338cc658fSJohn Baldwin break; 69438cc658fSJohn Baldwin } 69538cc658fSJohn Baldwin } 69638cc658fSJohn Baldwin 69738cc658fSJohn Baldwin if (i == BGE_TIMEOUT * 10) { 69838cc658fSJohn Baldwin if_printf(sc->bge_ifp, "nvram read timed out\n"); 69938cc658fSJohn Baldwin return (1); 70038cc658fSJohn Baldwin } 70138cc658fSJohn Baldwin 70238cc658fSJohn Baldwin /* Get result. */ 70338cc658fSJohn Baldwin byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 70438cc658fSJohn Baldwin 70538cc658fSJohn Baldwin *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 70638cc658fSJohn Baldwin 70738cc658fSJohn Baldwin /* Disable access. */ 70838cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 70938cc658fSJohn Baldwin 71038cc658fSJohn Baldwin /* Unlock. */ 71138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 71238cc658fSJohn Baldwin CSR_READ_4(sc, BGE_NVRAM_SWARB); 71338cc658fSJohn Baldwin 71438cc658fSJohn Baldwin return (0); 71538cc658fSJohn Baldwin } 71638cc658fSJohn Baldwin 71738cc658fSJohn Baldwin /* 71838cc658fSJohn Baldwin * Read a sequence of bytes from NVRAM. 71938cc658fSJohn Baldwin */ 72038cc658fSJohn Baldwin static int 72138cc658fSJohn Baldwin bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 72238cc658fSJohn Baldwin { 72338cc658fSJohn Baldwin int err = 0, i; 72438cc658fSJohn Baldwin uint8_t byte = 0; 72538cc658fSJohn Baldwin 72638cc658fSJohn Baldwin if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 72738cc658fSJohn Baldwin return (1); 72838cc658fSJohn Baldwin 72938cc658fSJohn Baldwin for (i = 0; i < cnt; i++) { 73038cc658fSJohn Baldwin err = bge_nvram_getbyte(sc, off + i, &byte); 73138cc658fSJohn Baldwin if (err) 73238cc658fSJohn Baldwin break; 73338cc658fSJohn Baldwin *(dest + i) = byte; 73438cc658fSJohn Baldwin } 73538cc658fSJohn Baldwin 73638cc658fSJohn Baldwin return (err ? 1 : 0); 73738cc658fSJohn Baldwin } 73838cc658fSJohn Baldwin 73995d67482SBill Paul /* 74095d67482SBill Paul * Read a byte of data stored in the EEPROM at address 'addr.' The 74195d67482SBill Paul * BCM570x supports both the traditional bitbang interface and an 74295d67482SBill Paul * auto access interface for reading the EEPROM. We use the auto 74395d67482SBill Paul * access method. 74495d67482SBill Paul */ 7453f74909aSGleb Smirnoff static uint8_t 7463f74909aSGleb Smirnoff bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 74795d67482SBill Paul { 74895d67482SBill Paul int i; 7493f74909aSGleb Smirnoff uint32_t byte = 0; 75095d67482SBill Paul 75195d67482SBill Paul /* 75295d67482SBill Paul * Enable use of auto EEPROM access so we can avoid 75395d67482SBill Paul * having to use the bitbang method. 75495d67482SBill Paul */ 75595d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 75695d67482SBill Paul 75795d67482SBill Paul /* Reset the EEPROM, load the clock period. */ 75895d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, 75995d67482SBill Paul BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 76095d67482SBill Paul DELAY(20); 76195d67482SBill Paul 76295d67482SBill Paul /* Issue the read EEPROM command. */ 76395d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 76495d67482SBill Paul 76595d67482SBill Paul /* Wait for completion */ 76695d67482SBill Paul for(i = 0; i < BGE_TIMEOUT * 10; i++) { 76795d67482SBill Paul DELAY(10); 76895d67482SBill Paul if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 76995d67482SBill Paul break; 77095d67482SBill Paul } 77195d67482SBill Paul 772d5d23857SJung-uk Kim if (i == BGE_TIMEOUT * 10) { 773fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "EEPROM read timed out\n"); 774f6789fbaSPyun YongHyeon return (1); 77595d67482SBill Paul } 77695d67482SBill Paul 77795d67482SBill Paul /* Get result. */ 77895d67482SBill Paul byte = CSR_READ_4(sc, BGE_EE_DATA); 77995d67482SBill Paul 7800c8aa4eaSJung-uk Kim *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 78195d67482SBill Paul 78295d67482SBill Paul return (0); 78395d67482SBill Paul } 78495d67482SBill Paul 78595d67482SBill Paul /* 78695d67482SBill Paul * Read a sequence of bytes from the EEPROM. 78795d67482SBill Paul */ 78895d67482SBill Paul static int 7893f74909aSGleb Smirnoff bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 79095d67482SBill Paul { 7913f74909aSGleb Smirnoff int i, error = 0; 7923f74909aSGleb Smirnoff uint8_t byte = 0; 79395d67482SBill Paul 79495d67482SBill Paul for (i = 0; i < cnt; i++) { 7953f74909aSGleb Smirnoff error = bge_eeprom_getbyte(sc, off + i, &byte); 7963f74909aSGleb Smirnoff if (error) 79795d67482SBill Paul break; 79895d67482SBill Paul *(dest + i) = byte; 79995d67482SBill Paul } 80095d67482SBill Paul 8013f74909aSGleb Smirnoff return (error ? 1 : 0); 80295d67482SBill Paul } 80395d67482SBill Paul 80495d67482SBill Paul static int 8053f74909aSGleb Smirnoff bge_miibus_readreg(device_t dev, int phy, int reg) 80695d67482SBill Paul { 80795d67482SBill Paul struct bge_softc *sc; 8083f74909aSGleb Smirnoff uint32_t val, autopoll; 80995d67482SBill Paul int i; 81095d67482SBill Paul 81195d67482SBill Paul sc = device_get_softc(dev); 81295d67482SBill Paul 8130434d1b8SBill Paul /* 8140434d1b8SBill Paul * Broadcom's own driver always assumes the internal 8150434d1b8SBill Paul * PHY is at GMII address 1. On some chips, the PHY responds 8160434d1b8SBill Paul * to accesses at all addresses, which could cause us to 8170434d1b8SBill Paul * bogusly attach the PHY 32 times at probe type. Always 8180434d1b8SBill Paul * restricting the lookup to address 1 is simpler than 8190434d1b8SBill Paul * trying to figure out which chips revisions should be 8200434d1b8SBill Paul * special-cased. 8210434d1b8SBill Paul */ 822b1265c1aSJohn Polstra if (phy != 1) 82398b28ee5SBill Paul return (0); 82498b28ee5SBill Paul 82537ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 82637ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 82737ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 82837ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 82937ceeb4dSPaul Saab DELAY(40); 83037ceeb4dSPaul Saab } 83137ceeb4dSPaul Saab 83295d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 83395d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg)); 83495d67482SBill Paul 83595d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 836d5d23857SJung-uk Kim DELAY(10); 83795d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 83895d67482SBill Paul if (!(val & BGE_MICOMM_BUSY)) 83995d67482SBill Paul break; 84095d67482SBill Paul } 84195d67482SBill Paul 84295d67482SBill Paul if (i == BGE_TIMEOUT) { 8435fea260fSMarius Strobl device_printf(sc->bge_dev, 8445fea260fSMarius Strobl "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", 8455fea260fSMarius Strobl phy, reg, val); 84637ceeb4dSPaul Saab val = 0; 84737ceeb4dSPaul Saab goto done; 84895d67482SBill Paul } 84995d67482SBill Paul 85038cc658fSJohn Baldwin DELAY(5); 85195d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 85295d67482SBill Paul 85337ceeb4dSPaul Saab done: 85437ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 85537ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 85637ceeb4dSPaul Saab DELAY(40); 85737ceeb4dSPaul Saab } 85837ceeb4dSPaul Saab 85995d67482SBill Paul if (val & BGE_MICOMM_READFAIL) 86095d67482SBill Paul return (0); 86195d67482SBill Paul 8620c8aa4eaSJung-uk Kim return (val & 0xFFFF); 86395d67482SBill Paul } 86495d67482SBill Paul 86595d67482SBill Paul static int 8663f74909aSGleb Smirnoff bge_miibus_writereg(device_t dev, int phy, int reg, int val) 86795d67482SBill Paul { 86895d67482SBill Paul struct bge_softc *sc; 8693f74909aSGleb Smirnoff uint32_t autopoll; 87095d67482SBill Paul int i; 87195d67482SBill Paul 87295d67482SBill Paul sc = device_get_softc(dev); 87395d67482SBill Paul 87438cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 87538cc658fSJohn Baldwin (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 87638cc658fSJohn Baldwin return(0); 87738cc658fSJohn Baldwin 87837ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 87937ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 88037ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 88137ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 88237ceeb4dSPaul Saab DELAY(40); 88337ceeb4dSPaul Saab } 88437ceeb4dSPaul Saab 88595d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 88695d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 88795d67482SBill Paul 88895d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 889d5d23857SJung-uk Kim DELAY(10); 89038cc658fSJohn Baldwin if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 89138cc658fSJohn Baldwin DELAY(5); 89238cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 89395d67482SBill Paul break; 894d5d23857SJung-uk Kim } 89538cc658fSJohn Baldwin } 896d5d23857SJung-uk Kim 897d5d23857SJung-uk Kim if (i == BGE_TIMEOUT) { 89838cc658fSJohn Baldwin device_printf(sc->bge_dev, 89938cc658fSJohn Baldwin "PHY write timed out (phy %d, reg %d, val %d)\n", 90038cc658fSJohn Baldwin phy, reg, val); 901d5d23857SJung-uk Kim return (0); 90295d67482SBill Paul } 90395d67482SBill Paul 90437ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 90537ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 90637ceeb4dSPaul Saab DELAY(40); 90737ceeb4dSPaul Saab } 90837ceeb4dSPaul Saab 90995d67482SBill Paul return (0); 91095d67482SBill Paul } 91195d67482SBill Paul 91295d67482SBill Paul static void 9133f74909aSGleb Smirnoff bge_miibus_statchg(device_t dev) 91495d67482SBill Paul { 91595d67482SBill Paul struct bge_softc *sc; 91695d67482SBill Paul struct mii_data *mii; 91795d67482SBill Paul sc = device_get_softc(dev); 91895d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 91995d67482SBill Paul 92095d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 9213f74909aSGleb Smirnoff if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 92295d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 9233f74909aSGleb Smirnoff else 92495d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 92595d67482SBill Paul 9263f74909aSGleb Smirnoff if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 92795d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 9283f74909aSGleb Smirnoff else 92995d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 93095d67482SBill Paul } 93195d67482SBill Paul 93295d67482SBill Paul /* 93395d67482SBill Paul * Intialize a standard receive ring descriptor. 93495d67482SBill Paul */ 93595d67482SBill Paul static int 936943787f3SPyun YongHyeon bge_newbuf_std(struct bge_softc *sc, int i) 93795d67482SBill Paul { 938943787f3SPyun YongHyeon struct mbuf *m; 93995d67482SBill Paul struct bge_rx_bd *r; 940a23634a1SPyun YongHyeon bus_dma_segment_t segs[1]; 941943787f3SPyun YongHyeon bus_dmamap_t map; 942a23634a1SPyun YongHyeon int error, nsegs; 94395d67482SBill Paul 944943787f3SPyun YongHyeon m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 945943787f3SPyun YongHyeon if (m == NULL) 94695d67482SBill Paul return (ENOBUFS); 947943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MCLBYTES; 948652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 949943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 950943787f3SPyun YongHyeon 9510ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, 952943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); 953a23634a1SPyun YongHyeon if (error != 0) { 954943787f3SPyun YongHyeon m_freem(m); 955a23634a1SPyun YongHyeon return (error); 956f41ac2beSBill Paul } 957943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 958943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 959943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); 960943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 961943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i]); 962943787f3SPyun YongHyeon } 963943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_std_dmamap[i]; 964943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; 965943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap = map; 966943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = m; 967943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; 968a23634a1SPyun YongHyeon r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 969a23634a1SPyun YongHyeon r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 970e907febfSPyun YongHyeon r->bge_flags = BGE_RXBDFLAG_END; 971a23634a1SPyun YongHyeon r->bge_len = segs[0].ds_len; 972e907febfSPyun YongHyeon r->bge_idx = i; 973f41ac2beSBill Paul 9740ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 975943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); 97695d67482SBill Paul 97795d67482SBill Paul return (0); 97895d67482SBill Paul } 97995d67482SBill Paul 98095d67482SBill Paul /* 98195d67482SBill Paul * Initialize a jumbo receive ring descriptor. This allocates 98295d67482SBill Paul * a jumbo buffer from the pool managed internally by the driver. 98395d67482SBill Paul */ 98495d67482SBill Paul static int 985943787f3SPyun YongHyeon bge_newbuf_jumbo(struct bge_softc *sc, int i) 98695d67482SBill Paul { 9871be6acb7SGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 988943787f3SPyun YongHyeon bus_dmamap_t map; 9891be6acb7SGleb Smirnoff struct bge_extrx_bd *r; 990943787f3SPyun YongHyeon struct mbuf *m; 991943787f3SPyun YongHyeon int error, nsegs; 99295d67482SBill Paul 993943787f3SPyun YongHyeon MGETHDR(m, M_DONTWAIT, MT_DATA); 994943787f3SPyun YongHyeon if (m == NULL) 99595d67482SBill Paul return (ENOBUFS); 99695d67482SBill Paul 997943787f3SPyun YongHyeon m_cljget(m, M_DONTWAIT, MJUM9BYTES); 998943787f3SPyun YongHyeon if (!(m->m_flags & M_EXT)) { 999943787f3SPyun YongHyeon m_freem(m); 100095d67482SBill Paul return (ENOBUFS); 100195d67482SBill Paul } 1002943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MJUM9BYTES; 1003652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 1004943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 10051be6acb7SGleb Smirnoff 10061be6acb7SGleb Smirnoff error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 1007943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); 1008943787f3SPyun YongHyeon if (error != 0) { 1009943787f3SPyun YongHyeon m_freem(m); 10101be6acb7SGleb Smirnoff return (error); 1011f7cea149SGleb Smirnoff } 10121be6acb7SGleb Smirnoff 1013943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) { 1014943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1015943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); 1016943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1017943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1018943787f3SPyun YongHyeon } 1019943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; 1020943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i] = 1021943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap; 1022943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap = map; 1023943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = m; 10241be6acb7SGleb Smirnoff /* 10251be6acb7SGleb Smirnoff * Fill in the extended RX buffer descriptor. 10261be6acb7SGleb Smirnoff */ 1027943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; 10284e7ba1abSGleb Smirnoff r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 10294e7ba1abSGleb Smirnoff r->bge_idx = i; 10304e7ba1abSGleb Smirnoff r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 10314e7ba1abSGleb Smirnoff switch (nsegs) { 10324e7ba1abSGleb Smirnoff case 4: 10334e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 10344e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 10354e7ba1abSGleb Smirnoff r->bge_len3 = segs[3].ds_len; 10364e7ba1abSGleb Smirnoff case 3: 1037e907febfSPyun YongHyeon r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 1038e907febfSPyun YongHyeon r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 1039e907febfSPyun YongHyeon r->bge_len2 = segs[2].ds_len; 10404e7ba1abSGleb Smirnoff case 2: 10414e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 10424e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 10434e7ba1abSGleb Smirnoff r->bge_len1 = segs[1].ds_len; 10444e7ba1abSGleb Smirnoff case 1: 10454e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 10464e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 10474e7ba1abSGleb Smirnoff r->bge_len0 = segs[0].ds_len; 10484e7ba1abSGleb Smirnoff break; 10494e7ba1abSGleb Smirnoff default: 10504e7ba1abSGleb Smirnoff panic("%s: %d segments\n", __func__, nsegs); 10514e7ba1abSGleb Smirnoff } 1052f41ac2beSBill Paul 1053a41504a9SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1054943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); 105595d67482SBill Paul 105695d67482SBill Paul return (0); 105795d67482SBill Paul } 105895d67482SBill Paul 105995d67482SBill Paul /* 106095d67482SBill Paul * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 106195d67482SBill Paul * that's 1MB or memory, which is a lot. For now, we fill only the first 106295d67482SBill Paul * 256 ring entries and hope that our CPU is fast enough to keep up with 106395d67482SBill Paul * the NIC. 106495d67482SBill Paul */ 106595d67482SBill Paul static int 10663f74909aSGleb Smirnoff bge_init_rx_ring_std(struct bge_softc *sc) 106795d67482SBill Paul { 10683ee5d7daSPyun YongHyeon int error, i; 106995d67482SBill Paul 1070e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 107103e78bd0SPyun YongHyeon sc->bge_std = 0; 107295d67482SBill Paul for (i = 0; i < BGE_SSLOTS; i++) { 1073943787f3SPyun YongHyeon if ((error = bge_newbuf_std(sc, i)) != 0) 10743ee5d7daSPyun YongHyeon return (error); 107503e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 107695d67482SBill Paul }; 107795d67482SBill Paul 1078f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1079d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 1080f41ac2beSBill Paul 108195d67482SBill Paul sc->bge_std = i - 1; 108238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 108395d67482SBill Paul 108495d67482SBill Paul return (0); 108595d67482SBill Paul } 108695d67482SBill Paul 108795d67482SBill Paul static void 10883f74909aSGleb Smirnoff bge_free_rx_ring_std(struct bge_softc *sc) 108995d67482SBill Paul { 109095d67482SBill Paul int i; 109195d67482SBill Paul 109295d67482SBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 109395d67482SBill Paul if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 10940ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1095e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], 1096e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 10970ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1098f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 1099e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1100e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = NULL; 110195d67482SBill Paul } 1102f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 110395d67482SBill Paul sizeof(struct bge_rx_bd)); 110495d67482SBill Paul } 110595d67482SBill Paul } 110695d67482SBill Paul 110795d67482SBill Paul static int 11083f74909aSGleb Smirnoff bge_init_rx_ring_jumbo(struct bge_softc *sc) 110995d67482SBill Paul { 111095d67482SBill Paul struct bge_rcb *rcb; 11113ee5d7daSPyun YongHyeon int error, i; 111295d67482SBill Paul 1113e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); 111403e78bd0SPyun YongHyeon sc->bge_jumbo = 0; 111595d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1116943787f3SPyun YongHyeon if ((error = bge_newbuf_jumbo(sc, i)) != 0) 11173ee5d7daSPyun YongHyeon return (error); 111803e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 111995d67482SBill Paul }; 112095d67482SBill Paul 1121f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1122d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 1123f41ac2beSBill Paul 112495d67482SBill Paul sc->bge_jumbo = i - 1; 112595d67482SBill Paul 1126f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 11271be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 11281be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD); 112967111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 113095d67482SBill Paul 113138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 113295d67482SBill Paul 113395d67482SBill Paul return (0); 113495d67482SBill Paul } 113595d67482SBill Paul 113695d67482SBill Paul static void 11373f74909aSGleb Smirnoff bge_free_rx_ring_jumbo(struct bge_softc *sc) 113895d67482SBill Paul { 113995d67482SBill Paul int i; 114095d67482SBill Paul 114195d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 114295d67482SBill Paul if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1143e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1144e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], 1145e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 1146f41ac2beSBill Paul bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1147f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1148e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1149e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 115095d67482SBill Paul } 1151f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 11521be6acb7SGleb Smirnoff sizeof(struct bge_extrx_bd)); 115395d67482SBill Paul } 115495d67482SBill Paul } 115595d67482SBill Paul 115695d67482SBill Paul static void 11573f74909aSGleb Smirnoff bge_free_tx_ring(struct bge_softc *sc) 115895d67482SBill Paul { 115995d67482SBill Paul int i; 116095d67482SBill Paul 1161f41ac2beSBill Paul if (sc->bge_ldata.bge_tx_ring == NULL) 116295d67482SBill Paul return; 116395d67482SBill Paul 116495d67482SBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 116595d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 11660ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 1167e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[i], 1168e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 11690ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1170f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 1171e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[i]); 1172e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[i] = NULL; 117395d67482SBill Paul } 1174f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 117595d67482SBill Paul sizeof(struct bge_tx_bd)); 117695d67482SBill Paul } 117795d67482SBill Paul } 117895d67482SBill Paul 117995d67482SBill Paul static int 11803f74909aSGleb Smirnoff bge_init_tx_ring(struct bge_softc *sc) 118195d67482SBill Paul { 118295d67482SBill Paul sc->bge_txcnt = 0; 118395d67482SBill Paul sc->bge_tx_saved_considx = 0; 11843927098fSPaul Saab 1185e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1186e6bf277eSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 11875c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 1188e6bf277eSPyun YongHyeon 118914bbd30fSGleb Smirnoff /* Initialize transmit producer index for host-memory send ring. */ 119014bbd30fSGleb Smirnoff sc->bge_tx_prodidx = 0; 119138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 119214bbd30fSGleb Smirnoff 11933927098fSPaul Saab /* 5700 b2 errata */ 1194e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 119538cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 11963927098fSPaul Saab 119714bbd30fSGleb Smirnoff /* NIC-memory send ring not used; initialize to zero. */ 119838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 11993927098fSPaul Saab /* 5700 b2 errata */ 1200e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 120138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 120295d67482SBill Paul 120395d67482SBill Paul return (0); 120495d67482SBill Paul } 120595d67482SBill Paul 120695d67482SBill Paul static void 12073e9b1bcaSJung-uk Kim bge_setpromisc(struct bge_softc *sc) 12083e9b1bcaSJung-uk Kim { 12093e9b1bcaSJung-uk Kim struct ifnet *ifp; 12103e9b1bcaSJung-uk Kim 12113e9b1bcaSJung-uk Kim BGE_LOCK_ASSERT(sc); 12123e9b1bcaSJung-uk Kim 12133e9b1bcaSJung-uk Kim ifp = sc->bge_ifp; 12143e9b1bcaSJung-uk Kim 121545ee6ab3SJung-uk Kim /* Enable or disable promiscuous mode as needed. */ 12163e9b1bcaSJung-uk Kim if (ifp->if_flags & IFF_PROMISC) 121745ee6ab3SJung-uk Kim BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 12183e9b1bcaSJung-uk Kim else 121945ee6ab3SJung-uk Kim BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 12203e9b1bcaSJung-uk Kim } 12213e9b1bcaSJung-uk Kim 12223e9b1bcaSJung-uk Kim static void 12233f74909aSGleb Smirnoff bge_setmulti(struct bge_softc *sc) 122495d67482SBill Paul { 122595d67482SBill Paul struct ifnet *ifp; 122695d67482SBill Paul struct ifmultiaddr *ifma; 12273f74909aSGleb Smirnoff uint32_t hashes[4] = { 0, 0, 0, 0 }; 122895d67482SBill Paul int h, i; 122995d67482SBill Paul 12300f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 12310f9bd73bSSam Leffler 1232fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 123395d67482SBill Paul 123495d67482SBill Paul if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 123595d67482SBill Paul for (i = 0; i < 4; i++) 12360c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 123795d67482SBill Paul return; 123895d67482SBill Paul } 123995d67482SBill Paul 124095d67482SBill Paul /* First, zot all the existing filters. */ 124195d67482SBill Paul for (i = 0; i < 4; i++) 124295d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 124395d67482SBill Paul 124495d67482SBill Paul /* Now program new ones. */ 1245eb956cd0SRobert Watson if_maddr_rlock(ifp); 124695d67482SBill Paul TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 124795d67482SBill Paul if (ifma->ifma_addr->sa_family != AF_LINK) 124895d67482SBill Paul continue; 12490e939c0cSChristian Weisgerber h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 12500c8aa4eaSJung-uk Kim ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 12510c8aa4eaSJung-uk Kim hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 125295d67482SBill Paul } 1253eb956cd0SRobert Watson if_maddr_runlock(ifp); 125495d67482SBill Paul 125595d67482SBill Paul for (i = 0; i < 4; i++) 125695d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 125795d67482SBill Paul } 125895d67482SBill Paul 12598cb1383cSDoug Ambrisko static void 1260cb2eacc7SYaroslav Tykhiy bge_setvlan(struct bge_softc *sc) 1261cb2eacc7SYaroslav Tykhiy { 1262cb2eacc7SYaroslav Tykhiy struct ifnet *ifp; 1263cb2eacc7SYaroslav Tykhiy 1264cb2eacc7SYaroslav Tykhiy BGE_LOCK_ASSERT(sc); 1265cb2eacc7SYaroslav Tykhiy 1266cb2eacc7SYaroslav Tykhiy ifp = sc->bge_ifp; 1267cb2eacc7SYaroslav Tykhiy 1268cb2eacc7SYaroslav Tykhiy /* Enable or disable VLAN tag stripping as needed. */ 1269cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1270cb2eacc7SYaroslav Tykhiy BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1271cb2eacc7SYaroslav Tykhiy else 1272cb2eacc7SYaroslav Tykhiy BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1273cb2eacc7SYaroslav Tykhiy } 1274cb2eacc7SYaroslav Tykhiy 1275cb2eacc7SYaroslav Tykhiy static void 12768cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, type) 12778cb1383cSDoug Ambrisko struct bge_softc *sc; 12788cb1383cSDoug Ambrisko int type; 12798cb1383cSDoug Ambrisko { 12808cb1383cSDoug Ambrisko /* 12818cb1383cSDoug Ambrisko * Some chips don't like this so only do this if ASF is enabled 12828cb1383cSDoug Ambrisko */ 12838cb1383cSDoug Ambrisko if (sc->bge_asf_mode) 12848cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 12858cb1383cSDoug Ambrisko 12868cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 12878cb1383cSDoug Ambrisko switch (type) { 12888cb1383cSDoug Ambrisko case BGE_RESET_START: 12898cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 12908cb1383cSDoug Ambrisko break; 12918cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12928cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 12938cb1383cSDoug Ambrisko break; 12948cb1383cSDoug Ambrisko } 12958cb1383cSDoug Ambrisko } 12968cb1383cSDoug Ambrisko } 12978cb1383cSDoug Ambrisko 12988cb1383cSDoug Ambrisko static void 12998cb1383cSDoug Ambrisko bge_sig_post_reset(sc, type) 13008cb1383cSDoug Ambrisko struct bge_softc *sc; 13018cb1383cSDoug Ambrisko int type; 13028cb1383cSDoug Ambrisko { 13038cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 13048cb1383cSDoug Ambrisko switch (type) { 13058cb1383cSDoug Ambrisko case BGE_RESET_START: 13068cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 13078cb1383cSDoug Ambrisko /* START DONE */ 13088cb1383cSDoug Ambrisko break; 13098cb1383cSDoug Ambrisko case BGE_RESET_STOP: 13108cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 13118cb1383cSDoug Ambrisko break; 13128cb1383cSDoug Ambrisko } 13138cb1383cSDoug Ambrisko } 13148cb1383cSDoug Ambrisko } 13158cb1383cSDoug Ambrisko 13168cb1383cSDoug Ambrisko static void 13178cb1383cSDoug Ambrisko bge_sig_legacy(sc, type) 13188cb1383cSDoug Ambrisko struct bge_softc *sc; 13198cb1383cSDoug Ambrisko int type; 13208cb1383cSDoug Ambrisko { 13218cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 13228cb1383cSDoug Ambrisko switch (type) { 13238cb1383cSDoug Ambrisko case BGE_RESET_START: 13248cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 13258cb1383cSDoug Ambrisko break; 13268cb1383cSDoug Ambrisko case BGE_RESET_STOP: 13278cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 13288cb1383cSDoug Ambrisko break; 13298cb1383cSDoug Ambrisko } 13308cb1383cSDoug Ambrisko } 13318cb1383cSDoug Ambrisko } 13328cb1383cSDoug Ambrisko 13338cb1383cSDoug Ambrisko void bge_stop_fw(struct bge_softc *); 13348cb1383cSDoug Ambrisko void 13358cb1383cSDoug Ambrisko bge_stop_fw(sc) 13368cb1383cSDoug Ambrisko struct bge_softc *sc; 13378cb1383cSDoug Ambrisko { 13388cb1383cSDoug Ambrisko int i; 13398cb1383cSDoug Ambrisko 13408cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 13418cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 13428cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 134339153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 13448cb1383cSDoug Ambrisko 13458cb1383cSDoug Ambrisko for (i = 0; i < 100; i++ ) { 13468cb1383cSDoug Ambrisko if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 13478cb1383cSDoug Ambrisko break; 13488cb1383cSDoug Ambrisko DELAY(10); 13498cb1383cSDoug Ambrisko } 13508cb1383cSDoug Ambrisko } 13518cb1383cSDoug Ambrisko } 13528cb1383cSDoug Ambrisko 135395d67482SBill Paul /* 1354c9ffd9f0SMarius Strobl * Do endian, PCI and DMA initialization. 135595d67482SBill Paul */ 135695d67482SBill Paul static int 13573f74909aSGleb Smirnoff bge_chipinit(struct bge_softc *sc) 135895d67482SBill Paul { 13593f74909aSGleb Smirnoff uint32_t dma_rw_ctl; 136095d67482SBill Paul int i; 136195d67482SBill Paul 13628cb1383cSDoug Ambrisko /* Set endianness before we access any non-PCI registers. */ 1363e907febfSPyun YongHyeon pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 136495d67482SBill Paul 136595d67482SBill Paul /* Clear the MAC control register */ 136695d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 136795d67482SBill Paul 136895d67482SBill Paul /* 136995d67482SBill Paul * Clear the MAC statistics block in the NIC's 137095d67482SBill Paul * internal memory. 137195d67482SBill Paul */ 137295d67482SBill Paul for (i = BGE_STATS_BLOCK; 13733f74909aSGleb Smirnoff i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 137495d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 137595d67482SBill Paul 137695d67482SBill Paul for (i = BGE_STATUS_BLOCK; 13773f74909aSGleb Smirnoff i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 137895d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 137995d67482SBill Paul 1380186f842bSJung-uk Kim /* 1381186f842bSJung-uk Kim * Set up the PCI DMA control register. 1382186f842bSJung-uk Kim */ 1383186f842bSJung-uk Kim dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1384186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1385652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 1386186f842bSJung-uk Kim /* Read watermark not used, 128 bytes for write. */ 1387186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1388652ae483SGleb Smirnoff } else if (sc->bge_flags & BGE_FLAG_PCIX) { 13894c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 1390186f842bSJung-uk Kim /* 256 bytes for read and write. */ 1391186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1392186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1393186f842bSJung-uk Kim dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? 1394186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : 1395186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1396186f842bSJung-uk Kim } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1397186f842bSJung-uk Kim /* 1536 bytes for read, 384 bytes for write. */ 1398186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1399186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1400186f842bSJung-uk Kim } else { 1401186f842bSJung-uk Kim /* 384 bytes for read and write. */ 1402186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1403186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 14040c8aa4eaSJung-uk Kim 0x0F; 1405186f842bSJung-uk Kim } 1406e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1407e0ced696SPaul Saab sc->bge_asicrev == BGE_ASICREV_BCM5704) { 14083f74909aSGleb Smirnoff uint32_t tmp; 14095cba12d3SPaul Saab 1410186f842bSJung-uk Kim /* Set ONE_DMA_AT_ONCE for hardware workaround. */ 14110c8aa4eaSJung-uk Kim tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; 1412186f842bSJung-uk Kim if (tmp == 6 || tmp == 7) 1413186f842bSJung-uk Kim dma_rw_ctl |= 1414186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 14155cba12d3SPaul Saab 1416186f842bSJung-uk Kim /* Set PCI-X DMA write workaround. */ 1417186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1418186f842bSJung-uk Kim } 1419186f842bSJung-uk Kim } else { 1420186f842bSJung-uk Kim /* Conventional PCI bus: 256 bytes for read and write. */ 1421186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1422186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1423186f842bSJung-uk Kim 1424186f842bSJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1425186f842bSJung-uk Kim sc->bge_asicrev != BGE_ASICREV_BCM5750) 1426186f842bSJung-uk Kim dma_rw_ctl |= 0x0F; 1427186f842bSJung-uk Kim } 1428186f842bSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1429186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5701) 1430186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1431186f842bSJung-uk Kim BGE_PCIDMARWCTL_ASRT_ALL_BE; 1432e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1433186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5704) 14345cba12d3SPaul Saab dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 14355cba12d3SPaul Saab pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 143695d67482SBill Paul 143795d67482SBill Paul /* 143895d67482SBill Paul * Set up general mode register. 143995d67482SBill Paul */ 1440e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 144195d67482SBill Paul BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1442ee7ef91cSOleg Bulyzhin BGE_MODECTL_TX_NO_PHDR_CSUM); 144395d67482SBill Paul 144495d67482SBill Paul /* 144590447aadSMarius Strobl * BCM5701 B5 have a bug causing data corruption when using 144690447aadSMarius Strobl * 64-bit DMA reads, which can be terminated early and then 144790447aadSMarius Strobl * completed later as 32-bit accesses, in combination with 144890447aadSMarius Strobl * certain bridges. 144990447aadSMarius Strobl */ 145090447aadSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 145190447aadSMarius Strobl sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 145290447aadSMarius Strobl BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 145390447aadSMarius Strobl 145490447aadSMarius Strobl /* 14558cb1383cSDoug Ambrisko * Tell the firmware the driver is running 14568cb1383cSDoug Ambrisko */ 14578cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 14588cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 14598cb1383cSDoug Ambrisko 14608cb1383cSDoug Ambrisko /* 1461ea13bdd5SJohn Polstra * Disable memory write invalidate. Apparently it is not supported 1462c9ffd9f0SMarius Strobl * properly by these devices. Also ensure that INTx isn't disabled, 1463c9ffd9f0SMarius Strobl * as these chips need it even when using MSI. 146495d67482SBill Paul */ 1465c9ffd9f0SMarius Strobl PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1466c9ffd9f0SMarius Strobl PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4); 146795d67482SBill Paul 146895d67482SBill Paul /* Set the timer prescaler (always 66Mhz) */ 14690c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 147095d67482SBill Paul 147138cc658fSJohn Baldwin /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ 147238cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 147338cc658fSJohn Baldwin DELAY(40); /* XXX */ 147438cc658fSJohn Baldwin 147538cc658fSJohn Baldwin /* Put PHY into ready state */ 147638cc658fSJohn Baldwin BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 147738cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 147838cc658fSJohn Baldwin DELAY(40); 147938cc658fSJohn Baldwin } 148038cc658fSJohn Baldwin 148195d67482SBill Paul return (0); 148295d67482SBill Paul } 148395d67482SBill Paul 148495d67482SBill Paul static int 14853f74909aSGleb Smirnoff bge_blockinit(struct bge_softc *sc) 148695d67482SBill Paul { 148795d67482SBill Paul struct bge_rcb *rcb; 1488e907febfSPyun YongHyeon bus_size_t vrcb; 1489e907febfSPyun YongHyeon bge_hostaddr taddr; 14906f8718a3SScott Long uint32_t val; 149195d67482SBill Paul int i; 149295d67482SBill Paul 149395d67482SBill Paul /* 149495d67482SBill Paul * Initialize the memory window pointer register so that 149595d67482SBill Paul * we can access the first 32K of internal NIC RAM. This will 149695d67482SBill Paul * allow us to set up the TX send ring RCBs and the RX return 149795d67482SBill Paul * ring RCBs, plus other things which live in NIC memory. 149895d67482SBill Paul */ 149995d67482SBill Paul CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 150095d67482SBill Paul 1501822f63fcSBill Paul /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1502822f63fcSBill Paul 15037ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 150495d67482SBill Paul /* Configure mbuf memory pool */ 15050dae9719SJung-uk Kim CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1506822f63fcSBill Paul if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1507822f63fcSBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1508822f63fcSBill Paul else 150995d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 151095d67482SBill Paul 151195d67482SBill Paul /* Configure DMA resource pool */ 15120434d1b8SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 15130434d1b8SBill Paul BGE_DMA_DESCRIPTORS); 151495d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 15150434d1b8SBill Paul } 151695d67482SBill Paul 151795d67482SBill Paul /* Configure mbuf pool watermarks */ 151838cc658fSJohn Baldwin if (!BGE_IS_5705_PLUS(sc)) { 1519fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1520fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1521fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 152238cc658fSJohn Baldwin } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 152338cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 152438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 152538cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 152638cc658fSJohn Baldwin } else { 152738cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 152838cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 152938cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 153038cc658fSJohn Baldwin } 153195d67482SBill Paul 153295d67482SBill Paul /* Configure DMA resource watermarks */ 153395d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 153495d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 153595d67482SBill Paul 153695d67482SBill Paul /* Enable buffer manager */ 15377ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 153895d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MODE, 153995d67482SBill Paul BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 154095d67482SBill Paul 154195d67482SBill Paul /* Poll for buffer manager start indication */ 154295d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1543d5d23857SJung-uk Kim DELAY(10); 15440c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 154595d67482SBill Paul break; 154695d67482SBill Paul } 154795d67482SBill Paul 154895d67482SBill Paul if (i == BGE_TIMEOUT) { 1549fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1550fe806fdaSPyun YongHyeon "buffer manager failed to start\n"); 155195d67482SBill Paul return (ENXIO); 155295d67482SBill Paul } 15530434d1b8SBill Paul } 155495d67482SBill Paul 155595d67482SBill Paul /* Enable flow-through queues */ 15560c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 155795d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 155895d67482SBill Paul 155995d67482SBill Paul /* Wait until queue initialization is complete */ 156095d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1561d5d23857SJung-uk Kim DELAY(10); 156295d67482SBill Paul if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 156395d67482SBill Paul break; 156495d67482SBill Paul } 156595d67482SBill Paul 156695d67482SBill Paul if (i == BGE_TIMEOUT) { 1567fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "flow-through queue init failed\n"); 156895d67482SBill Paul return (ENXIO); 156995d67482SBill Paul } 157095d67482SBill Paul 157195d67482SBill Paul /* Initialize the standard RX ring control block */ 1572f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1573f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1574f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1575f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1576f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1577f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1578f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 15797ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 15800434d1b8SBill Paul rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 15810434d1b8SBill Paul else 15820434d1b8SBill Paul rcb->bge_maxlen_flags = 15830434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 158495d67482SBill Paul rcb->bge_nicaddr = BGE_STD_RX_RINGS; 15850c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 15860c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1587f41ac2beSBill Paul 158867111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 158967111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 159095d67482SBill Paul 159195d67482SBill Paul /* 159295d67482SBill Paul * Initialize the jumbo RX ring control block 159395d67482SBill Paul * We set the 'ring disabled' bit in the flags 159495d67482SBill Paul * field until we're actually ready to start 159595d67482SBill Paul * using this ring (i.e. once we set the MTU 159695d67482SBill Paul * high enough to require it). 159795d67482SBill Paul */ 15984c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 1599f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1600f41ac2beSBill Paul 1601f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1602f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1603f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1604f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1605f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1606f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map, 1607f41ac2beSBill Paul BUS_DMASYNC_PREREAD); 16081be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 16091be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 161095d67482SBill Paul rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 161167111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 161267111612SJohn Polstra rcb->bge_hostaddr.bge_addr_hi); 161367111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 161467111612SJohn Polstra rcb->bge_hostaddr.bge_addr_lo); 1615f41ac2beSBill Paul 16160434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 16170434d1b8SBill Paul rcb->bge_maxlen_flags); 161867111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 161995d67482SBill Paul 162095d67482SBill Paul /* Set up dummy disabled mini ring RCB */ 16212a141b94SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { 1622f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 162367111612SJohn Polstra rcb->bge_maxlen_flags = 162467111612SJohn Polstra BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 16250434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 16260434d1b8SBill Paul rcb->bge_maxlen_flags); 16270434d1b8SBill Paul } 16282a141b94SPyun YongHyeon } 162995d67482SBill Paul 163095d67482SBill Paul /* 163195d67482SBill Paul * Set the BD ring replentish thresholds. The recommended 163295d67482SBill Paul * values are 1/8th the number of descriptors allocated to 163395d67482SBill Paul * each ring. 16349ba784dbSScott Long * XXX The 5754 requires a lower threshold, so it might be a 16359ba784dbSScott Long * requirement of all 575x family chips. The Linux driver sets 16369ba784dbSScott Long * the lower threshold for all 5705 family chips as well, but there 16379ba784dbSScott Long * are reports that it might not need to be so strict. 163838cc658fSJohn Baldwin * 163938cc658fSJohn Baldwin * XXX Linux does some extra fiddling here for the 5906 parts as 164038cc658fSJohn Baldwin * well. 164195d67482SBill Paul */ 16425345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 16436f8718a3SScott Long val = 8; 16446f8718a3SScott Long else 16456f8718a3SScott Long val = BGE_STD_RX_RING_CNT / 8; 16466f8718a3SScott Long CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 16472a141b94SPyun YongHyeon if (BGE_IS_JUMBO_CAPABLE(sc)) 16482a141b94SPyun YongHyeon CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, 16492a141b94SPyun YongHyeon BGE_JUMBO_RX_RING_CNT/8); 165095d67482SBill Paul 165195d67482SBill Paul /* 165295d67482SBill Paul * Disable all unused send rings by setting the 'ring disabled' 165395d67482SBill Paul * bit in the flags field of all the TX send ring control blocks. 165495d67482SBill Paul * These are located in NIC memory. 165595d67482SBill Paul */ 1656e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 165795d67482SBill Paul for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1658e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1659e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1660e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1661e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 166295d67482SBill Paul } 166395d67482SBill Paul 166495d67482SBill Paul /* Configure TX RCB 0 (we use only the first ring) */ 1665e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1666e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1667e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1668e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1669e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1670e907febfSPyun YongHyeon BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 16717ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 1672e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1673e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 167495d67482SBill Paul 167595d67482SBill Paul /* Disable all unused RX return rings */ 1676e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 167795d67482SBill Paul for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1678e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1679e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1680e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 16810434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1682e907febfSPyun YongHyeon BGE_RCB_FLAG_RING_DISABLED)); 1683e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 168438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 16853f74909aSGleb Smirnoff (i * (sizeof(uint64_t))), 0); 1686e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 168795d67482SBill Paul } 168895d67482SBill Paul 168995d67482SBill Paul /* Initialize RX ring indexes */ 169038cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 16912a141b94SPyun YongHyeon if (BGE_IS_JUMBO_CAPABLE(sc)) 169238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 16932a141b94SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700) 169438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 169595d67482SBill Paul 169695d67482SBill Paul /* 169795d67482SBill Paul * Set up RX return ring 0 169895d67482SBill Paul * Note that the NIC address for RX return rings is 0x00000000. 169995d67482SBill Paul * The return rings live entirely within the host, so the 170095d67482SBill Paul * nicaddr field in the RCB isn't used. 170195d67482SBill Paul */ 1702e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1703e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1704e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1705e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1706e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1707e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1708e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 170995d67482SBill Paul 171095d67482SBill Paul /* Set random backoff seed for TX */ 171195d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 17124a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 17134a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 17144a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 171595d67482SBill Paul BGE_TX_BACKOFF_SEED_MASK); 171695d67482SBill Paul 171795d67482SBill Paul /* Set inter-packet gap */ 171895d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 171995d67482SBill Paul 172095d67482SBill Paul /* 172195d67482SBill Paul * Specify which ring to use for packets that don't match 172295d67482SBill Paul * any RX rules. 172395d67482SBill Paul */ 172495d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 172595d67482SBill Paul 172695d67482SBill Paul /* 172795d67482SBill Paul * Configure number of RX lists. One interrupt distribution 172895d67482SBill Paul * list, sixteen active lists, one bad frames class. 172995d67482SBill Paul */ 173095d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 173195d67482SBill Paul 173295d67482SBill Paul /* Inialize RX list placement stats mask. */ 17330c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 173495d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 173595d67482SBill Paul 173695d67482SBill Paul /* Disable host coalescing until we get it set up */ 173795d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 173895d67482SBill Paul 173995d67482SBill Paul /* Poll to make sure it's shut down. */ 174095d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1741d5d23857SJung-uk Kim DELAY(10); 174295d67482SBill Paul if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 174395d67482SBill Paul break; 174495d67482SBill Paul } 174595d67482SBill Paul 174695d67482SBill Paul if (i == BGE_TIMEOUT) { 1747fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1748fe806fdaSPyun YongHyeon "host coalescing engine failed to idle\n"); 174995d67482SBill Paul return (ENXIO); 175095d67482SBill Paul } 175195d67482SBill Paul 175295d67482SBill Paul /* Set up host coalescing defaults */ 175395d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 175495d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 175595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 175695d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 17577ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 175895d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 175995d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 17600434d1b8SBill Paul } 1761b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1762b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 176395d67482SBill Paul 176495d67482SBill Paul /* Set up address of statistics block */ 17657ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 1766f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1767f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 176895d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1769f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 17700434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 177195d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 17720434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 17730434d1b8SBill Paul } 17740434d1b8SBill Paul 17750434d1b8SBill Paul /* Set up address of status block */ 1776f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1777f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 177895d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1779f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1780f41ac2beSBill Paul sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1781f41ac2beSBill Paul sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 178295d67482SBill Paul 178330f57f61SPyun YongHyeon /* Set up status block size. */ 178430f57f61SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 178530f57f61SPyun YongHyeon sc->bge_chipid != BGE_CHIPID_BCM5700_C0) 178630f57f61SPyun YongHyeon val = BGE_STATBLKSZ_FULL; 178730f57f61SPyun YongHyeon else 178830f57f61SPyun YongHyeon val = BGE_STATBLKSZ_32BYTE; 178930f57f61SPyun YongHyeon 179095d67482SBill Paul /* Turn on host coalescing state machine */ 179130f57f61SPyun YongHyeon CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 179295d67482SBill Paul 179395d67482SBill Paul /* Turn on RX BD completion state machine and enable attentions */ 179495d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDC_MODE, 179595d67482SBill Paul BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 179695d67482SBill Paul 179795d67482SBill Paul /* Turn on RX list placement state machine */ 179895d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 179995d67482SBill Paul 180095d67482SBill Paul /* Turn on RX list selector state machine. */ 18017ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 180295d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 180395d67482SBill Paul 180495d67482SBill Paul /* Turn on DMA, clear stats */ 180595d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB | 180695d67482SBill Paul BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | 180795d67482SBill Paul BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | 180895d67482SBill Paul BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB | 1809652ae483SGleb Smirnoff ((sc->bge_flags & BGE_FLAG_TBI) ? 1810652ae483SGleb Smirnoff BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 181195d67482SBill Paul 181295d67482SBill Paul /* Set misc. local control, enable interrupts on attentions */ 181395d67482SBill Paul CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 181495d67482SBill Paul 181595d67482SBill Paul #ifdef notdef 181695d67482SBill Paul /* Assert GPIO pins for PHY reset */ 181795d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 181895d67482SBill Paul BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 181995d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 182095d67482SBill Paul BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 182195d67482SBill Paul #endif 182295d67482SBill Paul 182395d67482SBill Paul /* Turn on DMA completion state machine */ 18247ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 182595d67482SBill Paul CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 182695d67482SBill Paul 18276f8718a3SScott Long val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 18286f8718a3SScott Long 18296f8718a3SScott Long /* Enable host coalescing bug fix. */ 1830a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 18313889907fSStanislav Sedov val |= BGE_WDMAMODE_STATUS_TAG_FIX; 18326f8718a3SScott Long 183395d67482SBill Paul /* Turn on write DMA state machine */ 18346f8718a3SScott Long CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 18354f09c4c7SMarius Strobl DELAY(40); 183695d67482SBill Paul 183795d67482SBill Paul /* Turn on read DMA state machine */ 18384f09c4c7SMarius Strobl val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1839a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1840a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1841a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM57780) 1842a5779553SStanislav Sedov val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1843a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1844a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 18454f09c4c7SMarius Strobl if (sc->bge_flags & BGE_FLAG_PCIE) 18464f09c4c7SMarius Strobl val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1847ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) 1848ca3f1187SPyun YongHyeon val |= BGE_RDMAMODE_TSO4_ENABLE; 18494f09c4c7SMarius Strobl CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 18504f09c4c7SMarius Strobl DELAY(40); 185195d67482SBill Paul 185295d67482SBill Paul /* Turn on RX data completion state machine */ 185395d67482SBill Paul CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 185495d67482SBill Paul 185595d67482SBill Paul /* Turn on RX BD initiator state machine */ 185695d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 185795d67482SBill Paul 185895d67482SBill Paul /* Turn on RX data and RX BD initiator state machine */ 185995d67482SBill Paul CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 186095d67482SBill Paul 186195d67482SBill Paul /* Turn on Mbuf cluster free state machine */ 18627ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 186395d67482SBill Paul CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 186495d67482SBill Paul 186595d67482SBill Paul /* Turn on send BD completion state machine */ 186695d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 186795d67482SBill Paul 186895d67482SBill Paul /* Turn on send data completion state machine */ 1869a5779553SStanislav Sedov val = BGE_SDCMODE_ENABLE; 1870a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 1871a5779553SStanislav Sedov val |= BGE_SDCMODE_CDELAY; 1872a5779553SStanislav Sedov CSR_WRITE_4(sc, BGE_SDC_MODE, val); 187395d67482SBill Paul 187495d67482SBill Paul /* Turn on send data initiator state machine */ 1875ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) 1876ca3f1187SPyun YongHyeon CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1877ca3f1187SPyun YongHyeon else 187895d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 187995d67482SBill Paul 188095d67482SBill Paul /* Turn on send BD initiator state machine */ 188195d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 188295d67482SBill Paul 188395d67482SBill Paul /* Turn on send BD selector state machine */ 188495d67482SBill Paul CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 188595d67482SBill Paul 18860c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 188795d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 188895d67482SBill Paul BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 188995d67482SBill Paul 189095d67482SBill Paul /* ack/clear link change events */ 189195d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 18920434d1b8SBill Paul BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 18930434d1b8SBill Paul BGE_MACSTAT_LINK_CHANGED); 1894f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_MI_STS, 0); 189595d67482SBill Paul 189695d67482SBill Paul /* Enable PHY auto polling (for MII/GMII only) */ 1897652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 189895d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1899a1d52896SBill Paul } else { 19006098821cSJung-uk Kim BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 19011f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 19024c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1903a1d52896SBill Paul CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1904a1d52896SBill Paul BGE_EVTENB_MI_INTERRUPT); 1905a1d52896SBill Paul } 190695d67482SBill Paul 19071f313773SOleg Bulyzhin /* 19081f313773SOleg Bulyzhin * Clear any pending link state attention. 19091f313773SOleg Bulyzhin * Otherwise some link state change events may be lost until attention 19101f313773SOleg Bulyzhin * is cleared by bge_intr() -> bge_link_upd() sequence. 19111f313773SOleg Bulyzhin * It's not necessary on newer BCM chips - perhaps enabling link 19121f313773SOleg Bulyzhin * state change attentions implies clearing pending attention. 19131f313773SOleg Bulyzhin */ 19141f313773SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 19151f313773SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 19161f313773SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 19171f313773SOleg Bulyzhin 191895d67482SBill Paul /* Enable link state change attentions. */ 191995d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 192095d67482SBill Paul 192195d67482SBill Paul return (0); 192295d67482SBill Paul } 192395d67482SBill Paul 19244c0da0ffSGleb Smirnoff const struct bge_revision * 19254c0da0ffSGleb Smirnoff bge_lookup_rev(uint32_t chipid) 19264c0da0ffSGleb Smirnoff { 19274c0da0ffSGleb Smirnoff const struct bge_revision *br; 19284c0da0ffSGleb Smirnoff 19294c0da0ffSGleb Smirnoff for (br = bge_revisions; br->br_name != NULL; br++) { 19304c0da0ffSGleb Smirnoff if (br->br_chipid == chipid) 19314c0da0ffSGleb Smirnoff return (br); 19324c0da0ffSGleb Smirnoff } 19334c0da0ffSGleb Smirnoff 19344c0da0ffSGleb Smirnoff for (br = bge_majorrevs; br->br_name != NULL; br++) { 19354c0da0ffSGleb Smirnoff if (br->br_chipid == BGE_ASICREV(chipid)) 19364c0da0ffSGleb Smirnoff return (br); 19374c0da0ffSGleb Smirnoff } 19384c0da0ffSGleb Smirnoff 19394c0da0ffSGleb Smirnoff return (NULL); 19404c0da0ffSGleb Smirnoff } 19414c0da0ffSGleb Smirnoff 19424c0da0ffSGleb Smirnoff const struct bge_vendor * 19434c0da0ffSGleb Smirnoff bge_lookup_vendor(uint16_t vid) 19444c0da0ffSGleb Smirnoff { 19454c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19464c0da0ffSGleb Smirnoff 19474c0da0ffSGleb Smirnoff for (v = bge_vendors; v->v_name != NULL; v++) 19484c0da0ffSGleb Smirnoff if (v->v_id == vid) 19494c0da0ffSGleb Smirnoff return (v); 19504c0da0ffSGleb Smirnoff 19514c0da0ffSGleb Smirnoff panic("%s: unknown vendor %d", __func__, vid); 19524c0da0ffSGleb Smirnoff return (NULL); 19534c0da0ffSGleb Smirnoff } 19544c0da0ffSGleb Smirnoff 195595d67482SBill Paul /* 195695d67482SBill Paul * Probe for a Broadcom chip. Check the PCI vendor and device IDs 19574c0da0ffSGleb Smirnoff * against our list and return its name if we find a match. 19584c0da0ffSGleb Smirnoff * 19594c0da0ffSGleb Smirnoff * Note that since the Broadcom controller contains VPD support, we 19607c929cf9SJung-uk Kim * try to get the device name string from the controller itself instead 19617c929cf9SJung-uk Kim * of the compiled-in string. It guarantees we'll always announce the 19627c929cf9SJung-uk Kim * right product name. We fall back to the compiled-in string when 19637c929cf9SJung-uk Kim * VPD is unavailable or corrupt. 196495d67482SBill Paul */ 196595d67482SBill Paul static int 19663f74909aSGleb Smirnoff bge_probe(device_t dev) 196795d67482SBill Paul { 1968852c67f9SMarius Strobl const struct bge_type *t = bge_devs; 19694c0da0ffSGleb Smirnoff struct bge_softc *sc = device_get_softc(dev); 19707c929cf9SJung-uk Kim uint16_t vid, did; 197195d67482SBill Paul 197295d67482SBill Paul sc->bge_dev = dev; 19737c929cf9SJung-uk Kim vid = pci_get_vendor(dev); 19747c929cf9SJung-uk Kim did = pci_get_device(dev); 19754c0da0ffSGleb Smirnoff while(t->bge_vid != 0) { 19767c929cf9SJung-uk Kim if ((vid == t->bge_vid) && (did == t->bge_did)) { 19777c929cf9SJung-uk Kim char model[64], buf[96]; 19784c0da0ffSGleb Smirnoff const struct bge_revision *br; 19794c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19804c0da0ffSGleb Smirnoff uint32_t id; 19814c0da0ffSGleb Smirnoff 1982a5779553SStanislav Sedov id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1983a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 1984a5779553SStanislav Sedov if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) 1985a5779553SStanislav Sedov id = pci_read_config(dev, 1986a5779553SStanislav Sedov BGE_PCI_PRODID_ASICREV, 4); 19874c0da0ffSGleb Smirnoff br = bge_lookup_rev(id); 19887c929cf9SJung-uk Kim v = bge_lookup_vendor(vid); 19894e35d186SJung-uk Kim { 19904e35d186SJung-uk Kim #if __FreeBSD_version > 700024 19914e35d186SJung-uk Kim const char *pname; 19924e35d186SJung-uk Kim 1993852c67f9SMarius Strobl if (bge_has_eaddr(sc) && 1994852c67f9SMarius Strobl pci_get_vpd_ident(dev, &pname) == 0) 19954e35d186SJung-uk Kim snprintf(model, 64, "%s", pname); 19964e35d186SJung-uk Kim else 19974e35d186SJung-uk Kim #endif 19987c929cf9SJung-uk Kim snprintf(model, 64, "%s %s", 19997c929cf9SJung-uk Kim v->v_name, 20007c929cf9SJung-uk Kim br != NULL ? br->br_name : 20017c929cf9SJung-uk Kim "NetXtreme Ethernet Controller"); 20024e35d186SJung-uk Kim } 2003a5779553SStanislav Sedov snprintf(buf, 96, "%s, %sASIC rev. %#08x", model, 2004a5779553SStanislav Sedov br != NULL ? "" : "unknown ", id); 20054c0da0ffSGleb Smirnoff device_set_desc_copy(dev, buf); 20066d2a9bd6SDoug Ambrisko if (pci_get_subvendor(dev) == DELL_VENDORID) 20075ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_NO_3LED; 200808bf8bb7SJung-uk Kim if (did == BCOM_DEVICEID_BCM5755M) 200908bf8bb7SJung-uk Kim sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; 201095d67482SBill Paul return (0); 201195d67482SBill Paul } 201295d67482SBill Paul t++; 201395d67482SBill Paul } 201495d67482SBill Paul 201595d67482SBill Paul return (ENXIO); 201695d67482SBill Paul } 201795d67482SBill Paul 2018f41ac2beSBill Paul static void 20193f74909aSGleb Smirnoff bge_dma_free(struct bge_softc *sc) 2020f41ac2beSBill Paul { 2021f41ac2beSBill Paul int i; 2022f41ac2beSBill Paul 20233f74909aSGleb Smirnoff /* Destroy DMA maps for RX buffers. */ 2024f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 2025f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_dmamap[i]) 20260ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2027f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 2028f41ac2beSBill Paul } 2029943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_sparemap) 2030943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2031943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap); 2032f41ac2beSBill Paul 20333f74909aSGleb Smirnoff /* Destroy DMA maps for jumbo RX buffers. */ 2034f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2035f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 2036f41ac2beSBill Paul bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2037f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2038f41ac2beSBill Paul } 2039943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_sparemap) 2040943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2041943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap); 2042f41ac2beSBill Paul 20433f74909aSGleb Smirnoff /* Destroy DMA maps for TX buffers. */ 2044f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 2045f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_dmamap[i]) 20460ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 2047f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 2048f41ac2beSBill Paul } 2049f41ac2beSBill Paul 20500ac56796SPyun YongHyeon if (sc->bge_cdata.bge_rx_mtag) 20510ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 20520ac56796SPyun YongHyeon if (sc->bge_cdata.bge_tx_mtag) 20530ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 2054f41ac2beSBill Paul 2055f41ac2beSBill Paul 20563f74909aSGleb Smirnoff /* Destroy standard RX ring. */ 2057e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map) 2058e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 2059e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map); 2060e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 2061f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 2062f41ac2beSBill Paul sc->bge_ldata.bge_rx_std_ring, 2063f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map); 2064f41ac2beSBill Paul 2065f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_ring_tag) 2066f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 2067f41ac2beSBill Paul 20683f74909aSGleb Smirnoff /* Destroy jumbo RX ring. */ 2069e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map) 2070e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2071e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map); 2072e65bed95SPyun YongHyeon 2073e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map && 2074e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_jumbo_ring) 2075f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2076f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring, 2077f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map); 2078f41ac2beSBill Paul 2079f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 2080f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 2081f41ac2beSBill Paul 20823f74909aSGleb Smirnoff /* Destroy RX return ring. */ 2083e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map) 2084e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 2085e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map); 2086e65bed95SPyun YongHyeon 2087e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map && 2088e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_return_ring) 2089f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 2090f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring, 2091f41ac2beSBill Paul sc->bge_cdata.bge_rx_return_ring_map); 2092f41ac2beSBill Paul 2093f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_return_ring_tag) 2094f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 2095f41ac2beSBill Paul 20963f74909aSGleb Smirnoff /* Destroy TX ring. */ 2097e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map) 2098e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 2099e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_ring_map); 2100e65bed95SPyun YongHyeon 2101e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 2102f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 2103f41ac2beSBill Paul sc->bge_ldata.bge_tx_ring, 2104f41ac2beSBill Paul sc->bge_cdata.bge_tx_ring_map); 2105f41ac2beSBill Paul 2106f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_ring_tag) 2107f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 2108f41ac2beSBill Paul 21093f74909aSGleb Smirnoff /* Destroy status block. */ 2110e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map) 2111e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 2112e65bed95SPyun YongHyeon sc->bge_cdata.bge_status_map); 2113e65bed95SPyun YongHyeon 2114e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 2115f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_status_tag, 2116f41ac2beSBill Paul sc->bge_ldata.bge_status_block, 2117f41ac2beSBill Paul sc->bge_cdata.bge_status_map); 2118f41ac2beSBill Paul 2119f41ac2beSBill Paul if (sc->bge_cdata.bge_status_tag) 2120f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 2121f41ac2beSBill Paul 21223f74909aSGleb Smirnoff /* Destroy statistics block. */ 2123e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map) 2124e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 2125e65bed95SPyun YongHyeon sc->bge_cdata.bge_stats_map); 2126e65bed95SPyun YongHyeon 2127e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 2128f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 2129f41ac2beSBill Paul sc->bge_ldata.bge_stats, 2130f41ac2beSBill Paul sc->bge_cdata.bge_stats_map); 2131f41ac2beSBill Paul 2132f41ac2beSBill Paul if (sc->bge_cdata.bge_stats_tag) 2133f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 2134f41ac2beSBill Paul 21353f74909aSGleb Smirnoff /* Destroy the parent tag. */ 2136f41ac2beSBill Paul if (sc->bge_cdata.bge_parent_tag) 2137f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 2138f41ac2beSBill Paul } 2139f41ac2beSBill Paul 2140f41ac2beSBill Paul static int 21413f74909aSGleb Smirnoff bge_dma_alloc(device_t dev) 2142f41ac2beSBill Paul { 21433f74909aSGleb Smirnoff struct bge_dmamap_arg ctx; 2144f41ac2beSBill Paul struct bge_softc *sc; 2145f681b29aSPyun YongHyeon bus_addr_t lowaddr; 214630f57f61SPyun YongHyeon bus_size_t sbsz, txsegsz, txmaxsegsz; 21471be6acb7SGleb Smirnoff int i, error; 2148f41ac2beSBill Paul 2149f41ac2beSBill Paul sc = device_get_softc(dev); 2150f41ac2beSBill Paul 2151f681b29aSPyun YongHyeon lowaddr = BUS_SPACE_MAXADDR; 2152f681b29aSPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) 2153f681b29aSPyun YongHyeon lowaddr = BGE_DMA_MAXADDR; 2154f681b29aSPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) 2155f681b29aSPyun YongHyeon lowaddr = BUS_SPACE_MAXADDR_32BIT; 2156f41ac2beSBill Paul /* 2157f41ac2beSBill Paul * Allocate the parent bus DMA tag appropriate for PCI. 2158f41ac2beSBill Paul */ 21594eee14cbSMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 2160f681b29aSPyun YongHyeon 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, 21614eee14cbSMarius Strobl NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 21624eee14cbSMarius Strobl 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); 2163f41ac2beSBill Paul 2164e65bed95SPyun YongHyeon if (error != 0) { 2165fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2166fe806fdaSPyun YongHyeon "could not allocate parent dma tag\n"); 2167e65bed95SPyun YongHyeon return (ENOMEM); 2168e65bed95SPyun YongHyeon } 2169e65bed95SPyun YongHyeon 2170f41ac2beSBill Paul /* 21710ac56796SPyun YongHyeon * Create tag for Tx mbufs. 2172f41ac2beSBill Paul */ 2173ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) { 2174ca3f1187SPyun YongHyeon txsegsz = BGE_TSOSEG_SZ; 2175ca3f1187SPyun YongHyeon txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); 2176ca3f1187SPyun YongHyeon } else { 2177ca3f1187SPyun YongHyeon txsegsz = MCLBYTES; 2178ca3f1187SPyun YongHyeon txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; 2179ca3f1187SPyun YongHyeon } 21808a2e22deSScott Long error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 2181ca3f1187SPyun YongHyeon 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2182ca3f1187SPyun YongHyeon txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, 2183ca3f1187SPyun YongHyeon &sc->bge_cdata.bge_tx_mtag); 2184f41ac2beSBill Paul 2185f41ac2beSBill Paul if (error) { 21860ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); 21870ac56796SPyun YongHyeon return (ENOMEM); 21880ac56796SPyun YongHyeon } 21890ac56796SPyun YongHyeon 21900ac56796SPyun YongHyeon /* 21910ac56796SPyun YongHyeon * Create tag for Rx mbufs. 21920ac56796SPyun YongHyeon */ 21930ac56796SPyun YongHyeon error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 21940ac56796SPyun YongHyeon BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 2195ca3f1187SPyun YongHyeon MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); 21960ac56796SPyun YongHyeon 21970ac56796SPyun YongHyeon if (error) { 21980ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); 2199f41ac2beSBill Paul return (ENOMEM); 2200f41ac2beSBill Paul } 2201f41ac2beSBill Paul 22023f74909aSGleb Smirnoff /* Create DMA maps for RX buffers. */ 2203943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2204943787f3SPyun YongHyeon &sc->bge_cdata.bge_rx_std_sparemap); 2205943787f3SPyun YongHyeon if (error) { 2206943787f3SPyun YongHyeon device_printf(sc->bge_dev, 2207943787f3SPyun YongHyeon "can't create spare DMA map for RX\n"); 2208943787f3SPyun YongHyeon return (ENOMEM); 2209943787f3SPyun YongHyeon } 2210f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 22110ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2212f41ac2beSBill Paul &sc->bge_cdata.bge_rx_std_dmamap[i]); 2213f41ac2beSBill Paul if (error) { 2214fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2215fe806fdaSPyun YongHyeon "can't create DMA map for RX\n"); 2216f41ac2beSBill Paul return (ENOMEM); 2217f41ac2beSBill Paul } 2218f41ac2beSBill Paul } 2219f41ac2beSBill Paul 22203f74909aSGleb Smirnoff /* Create DMA maps for TX buffers. */ 2221f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 22220ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, 2223f41ac2beSBill Paul &sc->bge_cdata.bge_tx_dmamap[i]); 2224f41ac2beSBill Paul if (error) { 2225fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22260ac56796SPyun YongHyeon "can't create DMA map for TX\n"); 2227f41ac2beSBill Paul return (ENOMEM); 2228f41ac2beSBill Paul } 2229f41ac2beSBill Paul } 2230f41ac2beSBill Paul 22313f74909aSGleb Smirnoff /* Create tag for standard RX ring. */ 2232f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2233f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2234f41ac2beSBill Paul NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 2235f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 2236f41ac2beSBill Paul 2237f41ac2beSBill Paul if (error) { 2238fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2239f41ac2beSBill Paul return (ENOMEM); 2240f41ac2beSBill Paul } 2241f41ac2beSBill Paul 22423f74909aSGleb Smirnoff /* Allocate DMA'able memory for standard RX ring. */ 2243f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 2244f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 2245f41ac2beSBill Paul &sc->bge_cdata.bge_rx_std_ring_map); 2246f41ac2beSBill Paul if (error) 2247f41ac2beSBill Paul return (ENOMEM); 2248f41ac2beSBill Paul 2249f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 2250f41ac2beSBill Paul 22513f74909aSGleb Smirnoff /* Load the address of the standard RX ring. */ 2252f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2253f41ac2beSBill Paul ctx.sc = sc; 2254f41ac2beSBill Paul 2255f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 2256f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 2257f41ac2beSBill Paul BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2258f41ac2beSBill Paul 2259f41ac2beSBill Paul if (error) 2260f41ac2beSBill Paul return (ENOMEM); 2261f41ac2beSBill Paul 2262f41ac2beSBill Paul sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 2263f41ac2beSBill Paul 22643f74909aSGleb Smirnoff /* Create tags for jumbo mbufs. */ 22654c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 2266f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 22678a2e22deSScott Long 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 22681be6acb7SGleb Smirnoff NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 22691be6acb7SGleb Smirnoff 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 2270f41ac2beSBill Paul if (error) { 2271fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22723f74909aSGleb Smirnoff "could not allocate jumbo dma tag\n"); 2273f41ac2beSBill Paul return (ENOMEM); 2274f41ac2beSBill Paul } 2275f41ac2beSBill Paul 22763f74909aSGleb Smirnoff /* Create tag for jumbo RX ring. */ 2277f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2278f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2279f41ac2beSBill Paul NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 2280f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 2281f41ac2beSBill Paul 2282f41ac2beSBill Paul if (error) { 2283fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22843f74909aSGleb Smirnoff "could not allocate jumbo ring dma tag\n"); 2285f41ac2beSBill Paul return (ENOMEM); 2286f41ac2beSBill Paul } 2287f41ac2beSBill Paul 22883f74909aSGleb Smirnoff /* Allocate DMA'able memory for jumbo RX ring. */ 2289f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 22901be6acb7SGleb Smirnoff (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 22911be6acb7SGleb Smirnoff BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2292f41ac2beSBill Paul &sc->bge_cdata.bge_rx_jumbo_ring_map); 2293f41ac2beSBill Paul if (error) 2294f41ac2beSBill Paul return (ENOMEM); 2295f41ac2beSBill Paul 22963f74909aSGleb Smirnoff /* Load the address of the jumbo RX ring. */ 2297f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2298f41ac2beSBill Paul ctx.sc = sc; 2299f41ac2beSBill Paul 2300f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2301f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map, 2302f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 2303f41ac2beSBill Paul bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2304f41ac2beSBill Paul 2305f41ac2beSBill Paul if (error) 2306f41ac2beSBill Paul return (ENOMEM); 2307f41ac2beSBill Paul 2308f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 2309f41ac2beSBill Paul 23103f74909aSGleb Smirnoff /* Create DMA maps for jumbo RX buffers. */ 2311943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2312943787f3SPyun YongHyeon 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); 2313943787f3SPyun YongHyeon if (error) { 2314943787f3SPyun YongHyeon device_printf(sc->bge_dev, 23151b90d0bdSPyun YongHyeon "can't create spare DMA map for jumbo RX\n"); 2316943787f3SPyun YongHyeon return (ENOMEM); 2317943787f3SPyun YongHyeon } 2318f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2319f41ac2beSBill Paul error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2320f41ac2beSBill Paul 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2321f41ac2beSBill Paul if (error) { 2322fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 23233f74909aSGleb Smirnoff "can't create DMA map for jumbo RX\n"); 2324f41ac2beSBill Paul return (ENOMEM); 2325f41ac2beSBill Paul } 2326f41ac2beSBill Paul } 2327f41ac2beSBill Paul 2328f41ac2beSBill Paul } 2329f41ac2beSBill Paul 23303f74909aSGleb Smirnoff /* Create tag for RX return ring. */ 2331f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2332f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2333f41ac2beSBill Paul NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 2334f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 2335f41ac2beSBill Paul 2336f41ac2beSBill Paul if (error) { 2337fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2338f41ac2beSBill Paul return (ENOMEM); 2339f41ac2beSBill Paul } 2340f41ac2beSBill Paul 23413f74909aSGleb Smirnoff /* Allocate DMA'able memory for RX return ring. */ 2342f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 2343f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 2344f41ac2beSBill Paul &sc->bge_cdata.bge_rx_return_ring_map); 2345f41ac2beSBill Paul if (error) 2346f41ac2beSBill Paul return (ENOMEM); 2347f41ac2beSBill Paul 2348f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_rx_return_ring, 2349f41ac2beSBill Paul BGE_RX_RTN_RING_SZ(sc)); 2350f41ac2beSBill Paul 23513f74909aSGleb Smirnoff /* Load the address of the RX return ring. */ 2352f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2353f41ac2beSBill Paul ctx.sc = sc; 2354f41ac2beSBill Paul 2355f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 2356f41ac2beSBill Paul sc->bge_cdata.bge_rx_return_ring_map, 2357f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 2358f41ac2beSBill Paul bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2359f41ac2beSBill Paul 2360f41ac2beSBill Paul if (error) 2361f41ac2beSBill Paul return (ENOMEM); 2362f41ac2beSBill Paul 2363f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2364f41ac2beSBill Paul 23653f74909aSGleb Smirnoff /* Create tag for TX ring. */ 2366f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2367f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2368f41ac2beSBill Paul NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2369f41ac2beSBill Paul &sc->bge_cdata.bge_tx_ring_tag); 2370f41ac2beSBill Paul 2371f41ac2beSBill Paul if (error) { 2372fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2373f41ac2beSBill Paul return (ENOMEM); 2374f41ac2beSBill Paul } 2375f41ac2beSBill Paul 23763f74909aSGleb Smirnoff /* Allocate DMA'able memory for TX ring. */ 2377f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2378f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2379f41ac2beSBill Paul &sc->bge_cdata.bge_tx_ring_map); 2380f41ac2beSBill Paul if (error) 2381f41ac2beSBill Paul return (ENOMEM); 2382f41ac2beSBill Paul 2383f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2384f41ac2beSBill Paul 23853f74909aSGleb Smirnoff /* Load the address of the TX ring. */ 2386f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2387f41ac2beSBill Paul ctx.sc = sc; 2388f41ac2beSBill Paul 2389f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2390f41ac2beSBill Paul sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2391f41ac2beSBill Paul BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2392f41ac2beSBill Paul 2393f41ac2beSBill Paul if (error) 2394f41ac2beSBill Paul return (ENOMEM); 2395f41ac2beSBill Paul 2396f41ac2beSBill Paul sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2397f41ac2beSBill Paul 239830f57f61SPyun YongHyeon /* 239930f57f61SPyun YongHyeon * Create tag for status block. 240030f57f61SPyun YongHyeon * Because we only use single Tx/Rx/Rx return ring, use 240130f57f61SPyun YongHyeon * minimum status block size except BCM5700 AX/BX which 240230f57f61SPyun YongHyeon * seems to want to see full status block size regardless 240330f57f61SPyun YongHyeon * of configured number of ring. 240430f57f61SPyun YongHyeon */ 240530f57f61SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 240630f57f61SPyun YongHyeon sc->bge_chipid != BGE_CHIPID_BCM5700_C0) 240730f57f61SPyun YongHyeon sbsz = BGE_STATUS_BLK_SZ; 240830f57f61SPyun YongHyeon else 240930f57f61SPyun YongHyeon sbsz = 32; 2410f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2411f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 241230f57f61SPyun YongHyeon NULL, sbsz, 1, sbsz, 0, NULL, NULL, &sc->bge_cdata.bge_status_tag); 2413f41ac2beSBill Paul 2414f41ac2beSBill Paul if (error) { 241530f57f61SPyun YongHyeon device_printf(sc->bge_dev, 241630f57f61SPyun YongHyeon "could not allocate status dma tag\n"); 2417f41ac2beSBill Paul return (ENOMEM); 2418f41ac2beSBill Paul } 2419f41ac2beSBill Paul 24203f74909aSGleb Smirnoff /* Allocate DMA'able memory for status block. */ 2421f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2422f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2423f41ac2beSBill Paul &sc->bge_cdata.bge_status_map); 2424f41ac2beSBill Paul if (error) 2425f41ac2beSBill Paul return (ENOMEM); 2426f41ac2beSBill Paul 242730f57f61SPyun YongHyeon bzero((char *)sc->bge_ldata.bge_status_block, sbsz); 2428f41ac2beSBill Paul 24293f74909aSGleb Smirnoff /* Load the address of the status block. */ 2430f41ac2beSBill Paul ctx.sc = sc; 2431f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2432f41ac2beSBill Paul 2433f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2434f41ac2beSBill Paul sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 243530f57f61SPyun YongHyeon sbsz, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2436f41ac2beSBill Paul 2437f41ac2beSBill Paul if (error) 2438f41ac2beSBill Paul return (ENOMEM); 2439f41ac2beSBill Paul 2440f41ac2beSBill Paul sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2441f41ac2beSBill Paul 24423f74909aSGleb Smirnoff /* Create tag for statistics block. */ 2443f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2444f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2445f41ac2beSBill Paul NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2446f41ac2beSBill Paul &sc->bge_cdata.bge_stats_tag); 2447f41ac2beSBill Paul 2448f41ac2beSBill Paul if (error) { 2449fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2450f41ac2beSBill Paul return (ENOMEM); 2451f41ac2beSBill Paul } 2452f41ac2beSBill Paul 24533f74909aSGleb Smirnoff /* Allocate DMA'able memory for statistics block. */ 2454f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2455f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2456f41ac2beSBill Paul &sc->bge_cdata.bge_stats_map); 2457f41ac2beSBill Paul if (error) 2458f41ac2beSBill Paul return (ENOMEM); 2459f41ac2beSBill Paul 2460f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2461f41ac2beSBill Paul 24623f74909aSGleb Smirnoff /* Load the address of the statstics block. */ 2463f41ac2beSBill Paul ctx.sc = sc; 2464f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2465f41ac2beSBill Paul 2466f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2467f41ac2beSBill Paul sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2468f41ac2beSBill Paul BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2469f41ac2beSBill Paul 2470f41ac2beSBill Paul if (error) 2471f41ac2beSBill Paul return (ENOMEM); 2472f41ac2beSBill Paul 2473f41ac2beSBill Paul sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2474f41ac2beSBill Paul 2475f41ac2beSBill Paul return (0); 2476f41ac2beSBill Paul } 2477f41ac2beSBill Paul 2478bf6ef57aSJohn Polstra /* 2479bf6ef57aSJohn Polstra * Return true if this device has more than one port. 2480bf6ef57aSJohn Polstra */ 2481bf6ef57aSJohn Polstra static int 2482bf6ef57aSJohn Polstra bge_has_multiple_ports(struct bge_softc *sc) 2483bf6ef57aSJohn Polstra { 2484bf6ef57aSJohn Polstra device_t dev = sc->bge_dev; 248555aaf894SMarius Strobl u_int b, d, f, fscan, s; 2486bf6ef57aSJohn Polstra 248755aaf894SMarius Strobl d = pci_get_domain(dev); 2488bf6ef57aSJohn Polstra b = pci_get_bus(dev); 2489bf6ef57aSJohn Polstra s = pci_get_slot(dev); 2490bf6ef57aSJohn Polstra f = pci_get_function(dev); 2491bf6ef57aSJohn Polstra for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 249255aaf894SMarius Strobl if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) 2493bf6ef57aSJohn Polstra return (1); 2494bf6ef57aSJohn Polstra return (0); 2495bf6ef57aSJohn Polstra } 2496bf6ef57aSJohn Polstra 2497bf6ef57aSJohn Polstra /* 2498bf6ef57aSJohn Polstra * Return true if MSI can be used with this device. 2499bf6ef57aSJohn Polstra */ 2500bf6ef57aSJohn Polstra static int 2501bf6ef57aSJohn Polstra bge_can_use_msi(struct bge_softc *sc) 2502bf6ef57aSJohn Polstra { 2503bf6ef57aSJohn Polstra int can_use_msi = 0; 2504bf6ef57aSJohn Polstra 2505bf6ef57aSJohn Polstra switch (sc->bge_asicrev) { 2506a8376f70SMarius Strobl case BGE_ASICREV_BCM5714_A0: 2507bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5714: 2508bf6ef57aSJohn Polstra /* 2509a8376f70SMarius Strobl * Apparently, MSI doesn't work when these chips are 2510a8376f70SMarius Strobl * configured in single-port mode. 2511bf6ef57aSJohn Polstra */ 2512bf6ef57aSJohn Polstra if (bge_has_multiple_ports(sc)) 2513bf6ef57aSJohn Polstra can_use_msi = 1; 2514bf6ef57aSJohn Polstra break; 2515bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5750: 2516bf6ef57aSJohn Polstra if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 2517bf6ef57aSJohn Polstra sc->bge_chiprev != BGE_CHIPREV_5750_BX) 2518bf6ef57aSJohn Polstra can_use_msi = 1; 2519bf6ef57aSJohn Polstra break; 2520a8376f70SMarius Strobl default: 2521a8376f70SMarius Strobl if (BGE_IS_575X_PLUS(sc)) 2522bf6ef57aSJohn Polstra can_use_msi = 1; 2523bf6ef57aSJohn Polstra } 2524bf6ef57aSJohn Polstra return (can_use_msi); 2525bf6ef57aSJohn Polstra } 2526bf6ef57aSJohn Polstra 252795d67482SBill Paul static int 25283f74909aSGleb Smirnoff bge_attach(device_t dev) 252995d67482SBill Paul { 253095d67482SBill Paul struct ifnet *ifp; 253195d67482SBill Paul struct bge_softc *sc; 25324f0794ffSBjoern A. Zeeb uint32_t hwcfg = 0, misccfg; 253308013fd3SMarius Strobl u_char eaddr[ETHER_ADDR_LEN]; 2534d648358bSPyun YongHyeon int error, msicount, reg, rid, trys; 253595d67482SBill Paul 253695d67482SBill Paul sc = device_get_softc(dev); 253795d67482SBill Paul sc->bge_dev = dev; 253895d67482SBill Paul 2539dfe0df9aSPyun YongHyeon TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); 2540dfe0df9aSPyun YongHyeon 254195d67482SBill Paul /* 254295d67482SBill Paul * Map control/status registers. 254395d67482SBill Paul */ 254495d67482SBill Paul pci_enable_busmaster(dev); 254595d67482SBill Paul 254695d67482SBill Paul rid = BGE_PCI_BAR0; 25475f96beb9SNate Lawson sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 254844f8f2fcSMarius Strobl RF_ACTIVE); 254995d67482SBill Paul 255095d67482SBill Paul if (sc->bge_res == NULL) { 2551fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, "couldn't map memory\n"); 255295d67482SBill Paul error = ENXIO; 255395d67482SBill Paul goto fail; 255495d67482SBill Paul } 255595d67482SBill Paul 25564f09c4c7SMarius Strobl /* Save various chip information. */ 2557e53d81eeSPaul Saab sc->bge_chipid = 2558a5779553SStanislav Sedov pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 2559a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 2560a5779553SStanislav Sedov if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) 2561a5779553SStanislav Sedov sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 2562a5779553SStanislav Sedov 4); 2563e53d81eeSPaul Saab sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2564e53d81eeSPaul Saab sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2565e53d81eeSPaul Saab 256686543395SJung-uk Kim /* 256738cc658fSJohn Baldwin * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the 256886543395SJung-uk Kim * 5705 A0 and A1 chips. 256986543395SJung-uk Kim */ 257086543395SJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && 257138cc658fSJohn Baldwin sc->bge_asicrev != BGE_ASICREV_BCM5906 && 257286543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 257386543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A1) 257486543395SJung-uk Kim sc->bge_flags |= BGE_FLAG_WIRESPEED; 257586543395SJung-uk Kim 25765fea260fSMarius Strobl if (bge_has_eaddr(sc)) 25775fea260fSMarius Strobl sc->bge_flags |= BGE_FLAG_EADDR; 257808013fd3SMarius Strobl 25790dae9719SJung-uk Kim /* Save chipset family. */ 25800dae9719SJung-uk Kim switch (sc->bge_asicrev) { 2581a5779553SStanislav Sedov case BGE_ASICREV_BCM5755: 2582a5779553SStanislav Sedov case BGE_ASICREV_BCM5761: 2583a5779553SStanislav Sedov case BGE_ASICREV_BCM5784: 2584a5779553SStanislav Sedov case BGE_ASICREV_BCM5785: 2585a5779553SStanislav Sedov case BGE_ASICREV_BCM5787: 2586a5779553SStanislav Sedov case BGE_ASICREV_BCM57780: 2587a5779553SStanislav Sedov sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 2588a5779553SStanislav Sedov BGE_FLAG_5705_PLUS; 2589a5779553SStanislav Sedov break; 25900dae9719SJung-uk Kim case BGE_ASICREV_BCM5700: 25910dae9719SJung-uk Kim case BGE_ASICREV_BCM5701: 25920dae9719SJung-uk Kim case BGE_ASICREV_BCM5703: 25930dae9719SJung-uk Kim case BGE_ASICREV_BCM5704: 25947ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 25950dae9719SJung-uk Kim break; 25960dae9719SJung-uk Kim case BGE_ASICREV_BCM5714_A0: 25970dae9719SJung-uk Kim case BGE_ASICREV_BCM5780: 25980dae9719SJung-uk Kim case BGE_ASICREV_BCM5714: 25997ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; 26009fe569d8SXin LI /* FALLTHROUGH */ 26010dae9719SJung-uk Kim case BGE_ASICREV_BCM5750: 26020dae9719SJung-uk Kim case BGE_ASICREV_BCM5752: 260338cc658fSJohn Baldwin case BGE_ASICREV_BCM5906: 26040dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_575X_PLUS; 26059fe569d8SXin LI /* FALLTHROUGH */ 26060dae9719SJung-uk Kim case BGE_ASICREV_BCM5705: 26070dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_5705_PLUS; 26080dae9719SJung-uk Kim break; 26090dae9719SJung-uk Kim } 26100dae9719SJung-uk Kim 26115ee49a3aSJung-uk Kim /* Set various bug flags. */ 26121ec4c3a8SJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 26131ec4c3a8SJung-uk Kim sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 26141ec4c3a8SJung-uk Kim sc->bge_flags |= BGE_FLAG_CRC_BUG; 26155ee49a3aSJung-uk Kim if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 26165ee49a3aSJung-uk Kim sc->bge_chiprev == BGE_CHIPREV_5704_AX) 26175ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_ADC_BUG; 26185ee49a3aSJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 26195ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 262008bf8bb7SJung-uk Kim if (BGE_IS_5705_PLUS(sc) && 262108bf8bb7SJung-uk Kim !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 26225ee49a3aSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2623a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2624a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5784 || 26254fcf220bSJohn Baldwin sc->bge_asicrev == BGE_ASICREV_BCM5787) { 26264fcf220bSJohn Baldwin if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0) 26275ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_JITTER_BUG; 262838cc658fSJohn Baldwin } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 26295ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_BER_BUG; 26305ee49a3aSJung-uk Kim } 26315ee49a3aSJung-uk Kim 2632f681b29aSPyun YongHyeon /* 2633f681b29aSPyun YongHyeon * All controllers that are not 5755 or higher have 4GB 2634f681b29aSPyun YongHyeon * boundary DMA bug. 2635f681b29aSPyun YongHyeon * Whenever an address crosses a multiple of the 4GB boundary 2636f681b29aSPyun YongHyeon * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 2637f681b29aSPyun YongHyeon * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 2638f681b29aSPyun YongHyeon * state machine will lockup and cause the device to hang. 2639f681b29aSPyun YongHyeon */ 2640f681b29aSPyun YongHyeon if (BGE_IS_5755_PLUS(sc) == 0) 2641f681b29aSPyun YongHyeon sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; 26424f0794ffSBjoern A. Zeeb 26434f0794ffSBjoern A. Zeeb /* 26444f0794ffSBjoern A. Zeeb * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe() 26454f0794ffSBjoern A. Zeeb * but I do not know the DEVICEID for the 5788M. 26464f0794ffSBjoern A. Zeeb */ 26474f0794ffSBjoern A. Zeeb misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID; 26484f0794ffSBjoern A. Zeeb if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 26494f0794ffSBjoern A. Zeeb misccfg == BGE_MISCCFG_BOARD_ID_5788M) 26504f0794ffSBjoern A. Zeeb sc->bge_flags |= BGE_FLAG_5788; 26514f0794ffSBjoern A. Zeeb 2652e53d81eeSPaul Saab /* 2653ca3f1187SPyun YongHyeon * Some controllers seem to require a special firmware to use 2654ca3f1187SPyun YongHyeon * TSO. But the firmware is not available to FreeBSD and Linux 2655ca3f1187SPyun YongHyeon * claims that the TSO performed by the firmware is slower than 2656ca3f1187SPyun YongHyeon * hardware based TSO. Moreover the firmware based TSO has one 2657ca3f1187SPyun YongHyeon * known bug which can't handle TSO if ethernet header + IP/TCP 2658ca3f1187SPyun YongHyeon * header is greater than 80 bytes. The workaround for the TSO 2659ca3f1187SPyun YongHyeon * bug exist but it seems it's too expensive than not using 2660ca3f1187SPyun YongHyeon * TSO at all. Some hardwares also have the TSO bug so limit 2661ca3f1187SPyun YongHyeon * the TSO to the controllers that are not affected TSO issues 2662ca3f1187SPyun YongHyeon * (e.g. 5755 or higher). 2663ca3f1187SPyun YongHyeon */ 2664ca3f1187SPyun YongHyeon if (BGE_IS_5755_PLUS(sc)) 2665ca3f1187SPyun YongHyeon sc->bge_flags |= BGE_FLAG_TSO; 2666ca3f1187SPyun YongHyeon 2667ca3f1187SPyun YongHyeon /* 26686f8718a3SScott Long * Check if this is a PCI-X or PCI Express device. 2669e53d81eeSPaul Saab */ 26706f8718a3SScott Long if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 26714c0da0ffSGleb Smirnoff /* 26726f8718a3SScott Long * Found a PCI Express capabilities register, this 26736f8718a3SScott Long * must be a PCI Express device. 26746f8718a3SScott Long */ 26756f8718a3SScott Long sc->bge_flags |= BGE_FLAG_PCIE; 26760aaf1057SPyun YongHyeon sc->bge_expcap = reg; 26770aaf1057SPyun YongHyeon bge_set_max_readrq(sc); 26786f8718a3SScott Long } else { 26796f8718a3SScott Long /* 26806f8718a3SScott Long * Check if the device is in PCI-X Mode. 26816f8718a3SScott Long * (This bit is not valid on PCI Express controllers.) 26824c0da0ffSGleb Smirnoff */ 26830aaf1057SPyun YongHyeon if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) 26840aaf1057SPyun YongHyeon sc->bge_pcixcap = reg; 268590447aadSMarius Strobl if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 26864c0da0ffSGleb Smirnoff BGE_PCISTATE_PCI_BUSMODE) == 0) 2687652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_PCIX; 26886f8718a3SScott Long } 26894c0da0ffSGleb Smirnoff 2690bf6ef57aSJohn Polstra /* 2691fd4d32feSPyun YongHyeon * The 40bit DMA bug applies to the 5714/5715 controllers and is 2692fd4d32feSPyun YongHyeon * not actually a MAC controller bug but an issue with the embedded 2693fd4d32feSPyun YongHyeon * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 2694fd4d32feSPyun YongHyeon */ 2695fd4d32feSPyun YongHyeon if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) 2696fd4d32feSPyun YongHyeon sc->bge_flags |= BGE_FLAG_40BIT_BUG; 2697fd4d32feSPyun YongHyeon /* 2698bf6ef57aSJohn Polstra * Allocate the interrupt, using MSI if possible. These devices 2699bf6ef57aSJohn Polstra * support 8 MSI messages, but only the first one is used in 2700bf6ef57aSJohn Polstra * normal operation. 2701bf6ef57aSJohn Polstra */ 27020aaf1057SPyun YongHyeon rid = 0; 27036a15578dSPyun YongHyeon if (pci_find_extcap(sc->bge_dev, PCIY_MSI, ®) == 0) { 27040aaf1057SPyun YongHyeon sc->bge_msicap = reg; 2705bf6ef57aSJohn Polstra if (bge_can_use_msi(sc)) { 2706bf6ef57aSJohn Polstra msicount = pci_msi_count(dev); 2707bf6ef57aSJohn Polstra if (msicount > 1) 2708bf6ef57aSJohn Polstra msicount = 1; 2709bf6ef57aSJohn Polstra } else 2710bf6ef57aSJohn Polstra msicount = 0; 2711bf6ef57aSJohn Polstra if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { 2712bf6ef57aSJohn Polstra rid = 1; 2713bf6ef57aSJohn Polstra sc->bge_flags |= BGE_FLAG_MSI; 27140aaf1057SPyun YongHyeon } 27150aaf1057SPyun YongHyeon } 2716bf6ef57aSJohn Polstra 2717bf6ef57aSJohn Polstra sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2718bf6ef57aSJohn Polstra RF_SHAREABLE | RF_ACTIVE); 2719bf6ef57aSJohn Polstra 2720bf6ef57aSJohn Polstra if (sc->bge_irq == NULL) { 2721bf6ef57aSJohn Polstra device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2722bf6ef57aSJohn Polstra error = ENXIO; 2723bf6ef57aSJohn Polstra goto fail; 2724bf6ef57aSJohn Polstra } 2725bf6ef57aSJohn Polstra 27264f09c4c7SMarius Strobl if (bootverbose) 27274f09c4c7SMarius Strobl device_printf(dev, 27284f09c4c7SMarius Strobl "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", 27294f09c4c7SMarius Strobl sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, 27304f09c4c7SMarius Strobl (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" : 27314f09c4c7SMarius Strobl ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI")); 27324f09c4c7SMarius Strobl 2733bf6ef57aSJohn Polstra BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2734bf6ef57aSJohn Polstra 273595d67482SBill Paul /* Try to reset the chip. */ 27368cb1383cSDoug Ambrisko if (bge_reset(sc)) { 27378cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 27388cb1383cSDoug Ambrisko error = ENXIO; 27398cb1383cSDoug Ambrisko goto fail; 27408cb1383cSDoug Ambrisko } 27418cb1383cSDoug Ambrisko 27428cb1383cSDoug Ambrisko sc->bge_asf_mode = 0; 2743f1a7e6d5SScott Long if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2744f1a7e6d5SScott Long == BGE_MAGIC_NUMBER)) { 27458cb1383cSDoug Ambrisko if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 27468cb1383cSDoug Ambrisko & BGE_HWCFG_ASF) { 27478cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_ENABLE; 27488cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_STACKUP; 27498cb1383cSDoug Ambrisko if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 27508cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 27518cb1383cSDoug Ambrisko } 27528cb1383cSDoug Ambrisko } 27538cb1383cSDoug Ambrisko } 27548cb1383cSDoug Ambrisko 27558cb1383cSDoug Ambrisko /* Try to reset the chip again the nice way. */ 27568cb1383cSDoug Ambrisko bge_stop_fw(sc); 27578cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_STOP); 27588cb1383cSDoug Ambrisko if (bge_reset(sc)) { 27598cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 27608cb1383cSDoug Ambrisko error = ENXIO; 27618cb1383cSDoug Ambrisko goto fail; 27628cb1383cSDoug Ambrisko } 27638cb1383cSDoug Ambrisko 27648cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 27658cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 276695d67482SBill Paul 276795d67482SBill Paul if (bge_chipinit(sc)) { 2768fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "chip initialization failed\n"); 276995d67482SBill Paul error = ENXIO; 277095d67482SBill Paul goto fail; 277195d67482SBill Paul } 277295d67482SBill Paul 277338cc658fSJohn Baldwin error = bge_get_eaddr(sc, eaddr); 277438cc658fSJohn Baldwin if (error) { 277508013fd3SMarius Strobl device_printf(sc->bge_dev, 277608013fd3SMarius Strobl "failed to read station address\n"); 277795d67482SBill Paul error = ENXIO; 277895d67482SBill Paul goto fail; 277995d67482SBill Paul } 278095d67482SBill Paul 2781f41ac2beSBill Paul /* 5705 limits RX return ring to 512 entries. */ 27827ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 2783f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2784f41ac2beSBill Paul else 2785f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2786f41ac2beSBill Paul 2787f41ac2beSBill Paul if (bge_dma_alloc(dev)) { 2788fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2789fe806fdaSPyun YongHyeon "failed to allocate DMA resources\n"); 2790f41ac2beSBill Paul error = ENXIO; 2791f41ac2beSBill Paul goto fail; 2792f41ac2beSBill Paul } 2793f41ac2beSBill Paul 279495d67482SBill Paul /* Set default tuneable values. */ 279595d67482SBill Paul sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 279695d67482SBill Paul sc->bge_rx_coal_ticks = 150; 279795d67482SBill Paul sc->bge_tx_coal_ticks = 150; 27986f8718a3SScott Long sc->bge_rx_max_coal_bds = 10; 27996f8718a3SScott Long sc->bge_tx_max_coal_bds = 10; 280095d67482SBill Paul 280195d67482SBill Paul /* Set up ifnet structure */ 2802fc74a9f9SBrooks Davis ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2803fc74a9f9SBrooks Davis if (ifp == NULL) { 2804fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2805fc74a9f9SBrooks Davis error = ENXIO; 2806fc74a9f9SBrooks Davis goto fail; 2807fc74a9f9SBrooks Davis } 280895d67482SBill Paul ifp->if_softc = sc; 28099bf40edeSBrooks Davis if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 281095d67482SBill Paul ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 281195d67482SBill Paul ifp->if_ioctl = bge_ioctl; 281295d67482SBill Paul ifp->if_start = bge_start; 281395d67482SBill Paul ifp->if_init = bge_init; 28144d665c4dSDag-Erling Smørgrav ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 28154d665c4dSDag-Erling Smørgrav IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 28164d665c4dSDag-Erling Smørgrav IFQ_SET_READY(&ifp->if_snd); 281795d67482SBill Paul ifp->if_hwassist = BGE_CSUM_FEATURES; 2818d375e524SGleb Smirnoff ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 28194e35d186SJung-uk Kim IFCAP_VLAN_MTU; 2820ca3f1187SPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_TSO) != 0) { 2821ca3f1187SPyun YongHyeon ifp->if_hwassist |= CSUM_TSO; 2822ca3f1187SPyun YongHyeon ifp->if_capabilities |= IFCAP_TSO4; 2823ca3f1187SPyun YongHyeon } 28244e35d186SJung-uk Kim #ifdef IFCAP_VLAN_HWCSUM 28254e35d186SJung-uk Kim ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 28264e35d186SJung-uk Kim #endif 282795d67482SBill Paul ifp->if_capenable = ifp->if_capabilities; 282875719184SGleb Smirnoff #ifdef DEVICE_POLLING 282975719184SGleb Smirnoff ifp->if_capabilities |= IFCAP_POLLING; 283075719184SGleb Smirnoff #endif 283195d67482SBill Paul 2832a1d52896SBill Paul /* 2833d375e524SGleb Smirnoff * 5700 B0 chips do not support checksumming correctly due 2834d375e524SGleb Smirnoff * to hardware bugs. 2835d375e524SGleb Smirnoff */ 2836d375e524SGleb Smirnoff if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2837d375e524SGleb Smirnoff ifp->if_capabilities &= ~IFCAP_HWCSUM; 28384d3a629cSPyun YongHyeon ifp->if_capenable &= ~IFCAP_HWCSUM; 2839d375e524SGleb Smirnoff ifp->if_hwassist = 0; 2840d375e524SGleb Smirnoff } 2841d375e524SGleb Smirnoff 2842d375e524SGleb Smirnoff /* 2843a1d52896SBill Paul * Figure out what sort of media we have by checking the 284441abcc1bSPaul Saab * hardware config word in the first 32k of NIC internal memory, 284541abcc1bSPaul Saab * or fall back to examining the EEPROM if necessary. 284641abcc1bSPaul Saab * Note: on some BCM5700 cards, this value appears to be unset. 284741abcc1bSPaul Saab * If that's the case, we have to rely on identifying the NIC 284841abcc1bSPaul Saab * by its PCI subsystem ID, as we do below for the SysKonnect 284941abcc1bSPaul Saab * SK-9D41. 2850a1d52896SBill Paul */ 285141abcc1bSPaul Saab if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 285241abcc1bSPaul Saab hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 28535fea260fSMarius Strobl else if ((sc->bge_flags & BGE_FLAG_EADDR) && 28545fea260fSMarius Strobl (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 2855f6789fbaSPyun YongHyeon if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2856f6789fbaSPyun YongHyeon sizeof(hwcfg))) { 2857fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2858f6789fbaSPyun YongHyeon error = ENXIO; 2859f6789fbaSPyun YongHyeon goto fail; 2860f6789fbaSPyun YongHyeon } 286141abcc1bSPaul Saab hwcfg = ntohl(hwcfg); 286241abcc1bSPaul Saab } 286341abcc1bSPaul Saab 286441abcc1bSPaul Saab if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2865652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_TBI; 2866a1d52896SBill Paul 286795d67482SBill Paul /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 28680c8aa4eaSJung-uk Kim if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2869652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_TBI; 287095d67482SBill Paul 2871652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 28720c8aa4eaSJung-uk Kim ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 28730c8aa4eaSJung-uk Kim bge_ifmedia_sts); 28740c8aa4eaSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 28756098821cSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 28766098821cSJung-uk Kim 0, NULL); 287795d67482SBill Paul ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 287895d67482SBill Paul ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2879da3003f0SBill Paul sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 288095d67482SBill Paul } else { 288195d67482SBill Paul /* 28828cb1383cSDoug Ambrisko * Do transceiver setup and tell the firmware the 28838cb1383cSDoug Ambrisko * driver is down so we can try to get access the 28848cb1383cSDoug Ambrisko * probe if ASF is running. Retry a couple of times 28858cb1383cSDoug Ambrisko * if we get a conflict with the ASF firmware accessing 28868cb1383cSDoug Ambrisko * the PHY. 288795d67482SBill Paul */ 28884012d104SMarius Strobl trys = 0; 28898cb1383cSDoug Ambrisko BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 28908cb1383cSDoug Ambrisko again: 28918cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 28928cb1383cSDoug Ambrisko 289395d67482SBill Paul if (mii_phy_probe(dev, &sc->bge_miibus, 289495d67482SBill Paul bge_ifmedia_upd, bge_ifmedia_sts)) { 28958cb1383cSDoug Ambrisko if (trys++ < 4) { 28968cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "Try again\n"); 28974e35d186SJung-uk Kim bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, 28984e35d186SJung-uk Kim BMCR_RESET); 28998cb1383cSDoug Ambrisko goto again; 29008cb1383cSDoug Ambrisko } 29018cb1383cSDoug Ambrisko 2902fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "MII without any PHY!\n"); 290395d67482SBill Paul error = ENXIO; 290495d67482SBill Paul goto fail; 290595d67482SBill Paul } 29068cb1383cSDoug Ambrisko 29078cb1383cSDoug Ambrisko /* 29088cb1383cSDoug Ambrisko * Now tell the firmware we are going up after probing the PHY 29098cb1383cSDoug Ambrisko */ 29108cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 29118cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 291295d67482SBill Paul } 291395d67482SBill Paul 291495d67482SBill Paul /* 2915e255b776SJohn Polstra * When using the BCM5701 in PCI-X mode, data corruption has 2916e255b776SJohn Polstra * been observed in the first few bytes of some received packets. 2917e255b776SJohn Polstra * Aligning the packet buffer in memory eliminates the corruption. 2918e255b776SJohn Polstra * Unfortunately, this misaligns the packet payloads. On platforms 2919e255b776SJohn Polstra * which do not support unaligned accesses, we will realign the 2920e255b776SJohn Polstra * payloads by copying the received packets. 2921e255b776SJohn Polstra */ 2922652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2923652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_PCIX) 2924652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2925e255b776SJohn Polstra 2926e255b776SJohn Polstra /* 292795d67482SBill Paul * Call MI attach routine. 292895d67482SBill Paul */ 2929fc74a9f9SBrooks Davis ether_ifattach(ifp, eaddr); 2930b74e67fbSGleb Smirnoff callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 29310f9bd73bSSam Leffler 293261ccb9daSPyun YongHyeon /* Tell upper layer we support long frames. */ 293361ccb9daSPyun YongHyeon ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 293461ccb9daSPyun YongHyeon 29350f9bd73bSSam Leffler /* 29360f9bd73bSSam Leffler * Hookup IRQ last. 29370f9bd73bSSam Leffler */ 29384e35d186SJung-uk Kim #if __FreeBSD_version > 700030 2939dfe0df9aSPyun YongHyeon if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { 2940dfe0df9aSPyun YongHyeon /* Take advantage of single-shot MSI. */ 29417e6acdf1SPyun YongHyeon CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & 29427e6acdf1SPyun YongHyeon ~BGE_MSIMODE_ONE_SHOT_DISABLE); 2943dfe0df9aSPyun YongHyeon sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, 2944dfe0df9aSPyun YongHyeon taskqueue_thread_enqueue, &sc->bge_tq); 2945dfe0df9aSPyun YongHyeon if (sc->bge_tq == NULL) { 2946dfe0df9aSPyun YongHyeon device_printf(dev, "could not create taskqueue.\n"); 2947dfe0df9aSPyun YongHyeon ether_ifdetach(ifp); 2948dfe0df9aSPyun YongHyeon error = ENXIO; 2949dfe0df9aSPyun YongHyeon goto fail; 2950dfe0df9aSPyun YongHyeon } 2951dfe0df9aSPyun YongHyeon taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq", 2952dfe0df9aSPyun YongHyeon device_get_nameunit(sc->bge_dev)); 2953dfe0df9aSPyun YongHyeon error = bus_setup_intr(dev, sc->bge_irq, 2954dfe0df9aSPyun YongHyeon INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, 2955dfe0df9aSPyun YongHyeon &sc->bge_intrhand); 2956dfe0df9aSPyun YongHyeon if (error) 2957dfe0df9aSPyun YongHyeon ether_ifdetach(ifp); 2958dfe0df9aSPyun YongHyeon } else 2959dfe0df9aSPyun YongHyeon error = bus_setup_intr(dev, sc->bge_irq, 2960dfe0df9aSPyun YongHyeon INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, 2961dfe0df9aSPyun YongHyeon &sc->bge_intrhand); 29624e35d186SJung-uk Kim #else 29634e35d186SJung-uk Kim error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 29644e35d186SJung-uk Kim bge_intr, sc, &sc->bge_intrhand); 29654e35d186SJung-uk Kim #endif 29660f9bd73bSSam Leffler 29670f9bd73bSSam Leffler if (error) { 2968fc74a9f9SBrooks Davis bge_detach(dev); 2969fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "couldn't set up irq\n"); 29700f9bd73bSSam Leffler } 297195d67482SBill Paul 29726f8718a3SScott Long bge_add_sysctls(sc); 29736f8718a3SScott Long 297408013fd3SMarius Strobl return (0); 297508013fd3SMarius Strobl 297695d67482SBill Paul fail: 297708013fd3SMarius Strobl bge_release_resources(sc); 297808013fd3SMarius Strobl 297995d67482SBill Paul return (error); 298095d67482SBill Paul } 298195d67482SBill Paul 298295d67482SBill Paul static int 29833f74909aSGleb Smirnoff bge_detach(device_t dev) 298495d67482SBill Paul { 298595d67482SBill Paul struct bge_softc *sc; 298695d67482SBill Paul struct ifnet *ifp; 298795d67482SBill Paul 298895d67482SBill Paul sc = device_get_softc(dev); 2989fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 299095d67482SBill Paul 299175719184SGleb Smirnoff #ifdef DEVICE_POLLING 299275719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) 299375719184SGleb Smirnoff ether_poll_deregister(ifp); 299475719184SGleb Smirnoff #endif 299575719184SGleb Smirnoff 29960f9bd73bSSam Leffler BGE_LOCK(sc); 299795d67482SBill Paul bge_stop(sc); 299895d67482SBill Paul bge_reset(sc); 29990f9bd73bSSam Leffler BGE_UNLOCK(sc); 30000f9bd73bSSam Leffler 30015dda8085SOleg Bulyzhin callout_drain(&sc->bge_stat_ch); 30025dda8085SOleg Bulyzhin 3003dfe0df9aSPyun YongHyeon if (sc->bge_tq) 3004dfe0df9aSPyun YongHyeon taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); 30050f9bd73bSSam Leffler ether_ifdetach(ifp); 300695d67482SBill Paul 3007652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 300895d67482SBill Paul ifmedia_removeall(&sc->bge_ifmedia); 300995d67482SBill Paul } else { 301095d67482SBill Paul bus_generic_detach(dev); 301195d67482SBill Paul device_delete_child(dev, sc->bge_miibus); 301295d67482SBill Paul } 301395d67482SBill Paul 301495d67482SBill Paul bge_release_resources(sc); 301595d67482SBill Paul 301695d67482SBill Paul return (0); 301795d67482SBill Paul } 301895d67482SBill Paul 301995d67482SBill Paul static void 30203f74909aSGleb Smirnoff bge_release_resources(struct bge_softc *sc) 302195d67482SBill Paul { 302295d67482SBill Paul device_t dev; 302395d67482SBill Paul 302495d67482SBill Paul dev = sc->bge_dev; 302595d67482SBill Paul 3026dfe0df9aSPyun YongHyeon if (sc->bge_tq != NULL) 3027dfe0df9aSPyun YongHyeon taskqueue_free(sc->bge_tq); 3028dfe0df9aSPyun YongHyeon 302995d67482SBill Paul if (sc->bge_intrhand != NULL) 303095d67482SBill Paul bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 303195d67482SBill Paul 303295d67482SBill Paul if (sc->bge_irq != NULL) 3033724bd939SJohn Polstra bus_release_resource(dev, SYS_RES_IRQ, 3034724bd939SJohn Polstra sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); 3035724bd939SJohn Polstra 3036724bd939SJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) 3037724bd939SJohn Polstra pci_release_msi(dev); 303895d67482SBill Paul 303995d67482SBill Paul if (sc->bge_res != NULL) 304095d67482SBill Paul bus_release_resource(dev, SYS_RES_MEMORY, 304195d67482SBill Paul BGE_PCI_BAR0, sc->bge_res); 304295d67482SBill Paul 3043ad61f896SRuslan Ermilov if (sc->bge_ifp != NULL) 3044ad61f896SRuslan Ermilov if_free(sc->bge_ifp); 3045ad61f896SRuslan Ermilov 3046f41ac2beSBill Paul bge_dma_free(sc); 304795d67482SBill Paul 30480f9bd73bSSam Leffler if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 30490f9bd73bSSam Leffler BGE_LOCK_DESTROY(sc); 305095d67482SBill Paul } 305195d67482SBill Paul 30528cb1383cSDoug Ambrisko static int 30533f74909aSGleb Smirnoff bge_reset(struct bge_softc *sc) 305495d67482SBill Paul { 305595d67482SBill Paul device_t dev; 30565fea260fSMarius Strobl uint32_t cachesize, command, pcistate, reset, val; 30576f8718a3SScott Long void (*write_op)(struct bge_softc *, int, int); 30580aaf1057SPyun YongHyeon uint16_t devctl; 30595fea260fSMarius Strobl int i; 306095d67482SBill Paul 306195d67482SBill Paul dev = sc->bge_dev; 306295d67482SBill Paul 306338cc658fSJohn Baldwin if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 306438cc658fSJohn Baldwin (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 30656f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 30666f8718a3SScott Long write_op = bge_writemem_direct; 30676f8718a3SScott Long else 30686f8718a3SScott Long write_op = bge_writemem_ind; 30699ba784dbSScott Long } else 30706f8718a3SScott Long write_op = bge_writereg_ind; 30716f8718a3SScott Long 307295d67482SBill Paul /* Save some important PCI state. */ 307395d67482SBill Paul cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 307495d67482SBill Paul command = pci_read_config(dev, BGE_PCI_CMD, 4); 307595d67482SBill Paul pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 307695d67482SBill Paul 307795d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 307895d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3079e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 308095d67482SBill Paul 30816f8718a3SScott Long /* Disable fastboot on controllers that support it. */ 30826f8718a3SScott Long if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 3083a5779553SStanislav Sedov BGE_IS_5755_PLUS(sc)) { 30846f8718a3SScott Long if (bootverbose) 30859ba784dbSScott Long device_printf(sc->bge_dev, "Disabling fastboot\n"); 30866f8718a3SScott Long CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 30876f8718a3SScott Long } 30886f8718a3SScott Long 30896f8718a3SScott Long /* 30906f8718a3SScott Long * Write the magic number to SRAM at offset 0xB50. 30916f8718a3SScott Long * When firmware finishes its initialization it will 30926f8718a3SScott Long * write ~BGE_MAGIC_NUMBER to the same location. 30936f8718a3SScott Long */ 30946f8718a3SScott Long bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 30956f8718a3SScott Long 30960c8aa4eaSJung-uk Kim reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 3097e53d81eeSPaul Saab 3098e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3099652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 31000c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 31010c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, 0x7E2C, 0x20); 3102e53d81eeSPaul Saab if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3103e53d81eeSPaul Saab /* Prevent PCIE link training during global reset */ 31040c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 31050c8aa4eaSJung-uk Kim reset |= 1 << 29; 3106e53d81eeSPaul Saab } 3107e53d81eeSPaul Saab } 3108e53d81eeSPaul Saab 310921c9e407SDavid Christensen /* 31106f8718a3SScott Long * Set GPHY Power Down Override to leave GPHY 31116f8718a3SScott Long * powered up in D0 uninitialized. 31126f8718a3SScott Long */ 31135345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 31146f8718a3SScott Long reset |= 0x04000000; 31156f8718a3SScott Long 311695d67482SBill Paul /* Issue global reset */ 31176f8718a3SScott Long write_op(sc, BGE_MISC_CFG, reset); 311895d67482SBill Paul 311938cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 31205fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_STATUS); 312138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_STATUS, 31225fea260fSMarius Strobl val | BGE_VCPU_STATUS_DRV_RESET); 31235fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 312438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 31255fea260fSMarius Strobl val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 312638cc658fSJohn Baldwin } 312738cc658fSJohn Baldwin 312895d67482SBill Paul DELAY(1000); 312995d67482SBill Paul 3130e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3131652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 3132e53d81eeSPaul Saab if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3133e53d81eeSPaul Saab DELAY(500000); /* wait for link training to complete */ 31345fea260fSMarius Strobl val = pci_read_config(dev, 0xC4, 4); 31355fea260fSMarius Strobl pci_write_config(dev, 0xC4, val | (1 << 15), 4); 3136e53d81eeSPaul Saab } 31370aaf1057SPyun YongHyeon devctl = pci_read_config(dev, 31380aaf1057SPyun YongHyeon sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2); 31390aaf1057SPyun YongHyeon /* Clear enable no snoop and disable relaxed ordering. */ 31400aaf1057SPyun YongHyeon devctl &= ~(0x0010 | 0x0800); 31410aaf1057SPyun YongHyeon /* Set PCIE max payload size to 128. */ 31420aaf1057SPyun YongHyeon devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD; 31430aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 31440aaf1057SPyun YongHyeon devctl, 2); 31450aaf1057SPyun YongHyeon /* Clear error status. */ 31460aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA, 31470aaf1057SPyun YongHyeon 0, 2); 3148e53d81eeSPaul Saab } 3149e53d81eeSPaul Saab 31503f74909aSGleb Smirnoff /* Reset some of the PCI state that got zapped by reset. */ 315195d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 315295d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3153e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 315495d67482SBill Paul pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 315595d67482SBill Paul pci_write_config(dev, BGE_PCI_CMD, command, 4); 31560c8aa4eaSJung-uk Kim write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 315795d67482SBill Paul 3158bf6ef57aSJohn Polstra /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ 31594c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 3160bf6ef57aSJohn Polstra /* This chip disables MSI on reset. */ 3161bf6ef57aSJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) { 31620aaf1057SPyun YongHyeon val = pci_read_config(dev, 31630aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 2); 31640aaf1057SPyun YongHyeon pci_write_config(dev, 31650aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 3166bf6ef57aSJohn Polstra val | PCIM_MSICTRL_MSI_ENABLE, 2); 3167bf6ef57aSJohn Polstra val = CSR_READ_4(sc, BGE_MSI_MODE); 3168bf6ef57aSJohn Polstra CSR_WRITE_4(sc, BGE_MSI_MODE, 3169bf6ef57aSJohn Polstra val | BGE_MSIMODE_ENABLE); 3170bf6ef57aSJohn Polstra } 31714c0da0ffSGleb Smirnoff val = CSR_READ_4(sc, BGE_MARB_MODE); 31724c0da0ffSGleb Smirnoff CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 31734c0da0ffSGleb Smirnoff } else 3174a7b0c314SPaul Saab CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3175a7b0c314SPaul Saab 317638cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 317738cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT; i++) { 317838cc658fSJohn Baldwin val = CSR_READ_4(sc, BGE_VCPU_STATUS); 317938cc658fSJohn Baldwin if (val & BGE_VCPU_STATUS_INIT_DONE) 318038cc658fSJohn Baldwin break; 318138cc658fSJohn Baldwin DELAY(100); 318238cc658fSJohn Baldwin } 318338cc658fSJohn Baldwin if (i == BGE_TIMEOUT) { 318438cc658fSJohn Baldwin device_printf(sc->bge_dev, "reset timed out\n"); 318538cc658fSJohn Baldwin return (1); 318638cc658fSJohn Baldwin } 318738cc658fSJohn Baldwin } else { 318895d67482SBill Paul /* 31896f8718a3SScott Long * Poll until we see the 1's complement of the magic number. 319008013fd3SMarius Strobl * This indicates that the firmware initialization is complete. 31915fea260fSMarius Strobl * We expect this to fail if no chip containing the Ethernet 31925fea260fSMarius Strobl * address is fitted though. 319395d67482SBill Paul */ 319495d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 3195d5d23857SJung-uk Kim DELAY(10); 319695d67482SBill Paul val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 319795d67482SBill Paul if (val == ~BGE_MAGIC_NUMBER) 319895d67482SBill Paul break; 319995d67482SBill Paul } 320095d67482SBill Paul 32015fea260fSMarius Strobl if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) 32029ba784dbSScott Long device_printf(sc->bge_dev, "firmware handshake timed out, " 32039ba784dbSScott Long "found 0x%08x\n", val); 320438cc658fSJohn Baldwin } 320595d67482SBill Paul 320695d67482SBill Paul /* 320795d67482SBill Paul * XXX Wait for the value of the PCISTATE register to 320895d67482SBill Paul * return to its original pre-reset state. This is a 320995d67482SBill Paul * fairly good indicator of reset completion. If we don't 321095d67482SBill Paul * wait for the reset to fully complete, trying to read 321195d67482SBill Paul * from the device's non-PCI registers may yield garbage 321295d67482SBill Paul * results. 321395d67482SBill Paul */ 321495d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 321595d67482SBill Paul if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 321695d67482SBill Paul break; 321795d67482SBill Paul DELAY(10); 321895d67482SBill Paul } 321995d67482SBill Paul 32206f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) { 32210c8aa4eaSJung-uk Kim reset = bge_readmem_ind(sc, 0x7C00); 32220c8aa4eaSJung-uk Kim bge_writemem_ind(sc, 0x7C00, reset | (1 << 25)); 32236f8718a3SScott Long } 32246f8718a3SScott Long 32253f74909aSGleb Smirnoff /* Fix up byte swapping. */ 3226e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 322795d67482SBill Paul BGE_MODECTL_BYTESWAP_DATA); 322895d67482SBill Paul 32298cb1383cSDoug Ambrisko /* Tell the ASF firmware we are up */ 32308cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 32318cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 32328cb1383cSDoug Ambrisko 323395d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 323495d67482SBill Paul 3235da3003f0SBill Paul /* 3236da3003f0SBill Paul * The 5704 in TBI mode apparently needs some special 3237da3003f0SBill Paul * adjustment to insure the SERDES drive level is set 3238da3003f0SBill Paul * to 1.2V. 3239da3003f0SBill Paul */ 3240652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 3241652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_TBI) { 32425fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_SERDES_CFG); 32435fea260fSMarius Strobl val = (val & ~0xFFF) | 0x880; 32445fea260fSMarius Strobl CSR_WRITE_4(sc, BGE_SERDES_CFG, val); 3245da3003f0SBill Paul } 3246da3003f0SBill Paul 3247e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3248652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE && 3249652ae483SGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 32505fea260fSMarius Strobl val = CSR_READ_4(sc, 0x7C00); 32515fea260fSMarius Strobl CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); 3252e53d81eeSPaul Saab } 325395d67482SBill Paul DELAY(10000); 32548cb1383cSDoug Ambrisko 32558cb1383cSDoug Ambrisko return(0); 325695d67482SBill Paul } 325795d67482SBill Paul 325895d67482SBill Paul /* 325995d67482SBill Paul * Frame reception handling. This is called if there's a frame 326095d67482SBill Paul * on the receive return list. 326195d67482SBill Paul * 326295d67482SBill Paul * Note: we have to be able to handle two possibilities here: 32631be6acb7SGleb Smirnoff * 1) the frame is from the jumbo receive ring 326495d67482SBill Paul * 2) the frame is from the standard receive ring 326595d67482SBill Paul */ 326695d67482SBill Paul 32671abcdbd1SAttilio Rao static int 3268dfe0df9aSPyun YongHyeon bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) 326995d67482SBill Paul { 327095d67482SBill Paul struct ifnet *ifp; 32711abcdbd1SAttilio Rao int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; 3272b9c05fa5SPyun YongHyeon uint16_t rx_cons; 327395d67482SBill Paul 32747f21e273SStanislav Sedov rx_cons = sc->bge_rx_saved_considx; 32750f9bd73bSSam Leffler 32763f74909aSGleb Smirnoff /* Nothing to do. */ 32777f21e273SStanislav Sedov if (rx_cons == rx_prod) 32781abcdbd1SAttilio Rao return (rx_npkts); 3279cfcb5025SOleg Bulyzhin 3280fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 328195d67482SBill Paul 3282f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 3283e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 3284f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 328515eda801SStanislav Sedov sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); 3286c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 3287c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) 3288f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 328915eda801SStanislav Sedov sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); 3290f41ac2beSBill Paul 32917f21e273SStanislav Sedov while (rx_cons != rx_prod) { 329295d67482SBill Paul struct bge_rx_bd *cur_rx; 32933f74909aSGleb Smirnoff uint32_t rxidx; 329495d67482SBill Paul struct mbuf *m = NULL; 32953f74909aSGleb Smirnoff uint16_t vlan_tag = 0; 329695d67482SBill Paul int have_tag = 0; 329795d67482SBill Paul 329875719184SGleb Smirnoff #ifdef DEVICE_POLLING 329975719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 330075719184SGleb Smirnoff if (sc->rxcycles <= 0) 330175719184SGleb Smirnoff break; 330275719184SGleb Smirnoff sc->rxcycles--; 330375719184SGleb Smirnoff } 330475719184SGleb Smirnoff #endif 330575719184SGleb Smirnoff 33067f21e273SStanislav Sedov cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; 330795d67482SBill Paul 330895d67482SBill Paul rxidx = cur_rx->bge_idx; 33097f21e273SStanislav Sedov BGE_INC(rx_cons, sc->bge_return_ring_cnt); 331095d67482SBill Paul 3311cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING && 3312cb2eacc7SYaroslav Tykhiy cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 331395d67482SBill Paul have_tag = 1; 331495d67482SBill Paul vlan_tag = cur_rx->bge_vlan_tag; 331595d67482SBill Paul } 331695d67482SBill Paul 331795d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 331895d67482SBill Paul jumbocnt++; 3319943787f3SPyun YongHyeon m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 332095d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3321943787f3SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 332295d67482SBill Paul continue; 332395d67482SBill Paul } 3324943787f3SPyun YongHyeon if (bge_newbuf_jumbo(sc, rxidx) != 0) { 3325943787f3SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3326943787f3SPyun YongHyeon ifp->if_iqdrops++; 332795d67482SBill Paul continue; 332895d67482SBill Paul } 332903e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 333095d67482SBill Paul } else { 333195d67482SBill Paul stdcnt++; 333295d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3333943787f3SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 333495d67482SBill Paul continue; 333595d67482SBill Paul } 3336943787f3SPyun YongHyeon m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3337943787f3SPyun YongHyeon if (bge_newbuf_std(sc, rxidx) != 0) { 3338943787f3SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3339943787f3SPyun YongHyeon ifp->if_iqdrops++; 334095d67482SBill Paul continue; 334195d67482SBill Paul } 334203e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 334395d67482SBill Paul } 334495d67482SBill Paul 334595d67482SBill Paul ifp->if_ipackets++; 3346e65bed95SPyun YongHyeon #ifndef __NO_STRICT_ALIGNMENT 3347e255b776SJohn Polstra /* 3348e65bed95SPyun YongHyeon * For architectures with strict alignment we must make sure 3349e65bed95SPyun YongHyeon * the payload is aligned. 3350e255b776SJohn Polstra */ 3351652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 3352e255b776SJohn Polstra bcopy(m->m_data, m->m_data + ETHER_ALIGN, 3353e255b776SJohn Polstra cur_rx->bge_len); 3354e255b776SJohn Polstra m->m_data += ETHER_ALIGN; 3355e255b776SJohn Polstra } 3356e255b776SJohn Polstra #endif 3357473851baSPaul Saab m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 335895d67482SBill Paul m->m_pkthdr.rcvif = ifp; 335995d67482SBill Paul 3360b874fdd4SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_RXCSUM) { 336178178cd1SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 336295d67482SBill Paul m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 33630c8aa4eaSJung-uk Kim if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 33640c8aa4eaSJung-uk Kim m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 336578178cd1SGleb Smirnoff } 3366d375e524SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3367d375e524SGleb Smirnoff m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 336895d67482SBill Paul m->m_pkthdr.csum_data = 336995d67482SBill Paul cur_rx->bge_tcp_udp_csum; 3370ee7ef91cSOleg Bulyzhin m->m_pkthdr.csum_flags |= 3371ee7ef91cSOleg Bulyzhin CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 337295d67482SBill Paul } 337395d67482SBill Paul } 337495d67482SBill Paul 337595d67482SBill Paul /* 3376673d9191SSam Leffler * If we received a packet with a vlan tag, 3377673d9191SSam Leffler * attach that information to the packet. 337895d67482SBill Paul */ 3379d147662cSGleb Smirnoff if (have_tag) { 33804e35d186SJung-uk Kim #if __FreeBSD_version > 700022 338178ba57b9SAndre Oppermann m->m_pkthdr.ether_vtag = vlan_tag; 338278ba57b9SAndre Oppermann m->m_flags |= M_VLANTAG; 33834e35d186SJung-uk Kim #else 33844e35d186SJung-uk Kim VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); 33854e35d186SJung-uk Kim if (m == NULL) 33864e35d186SJung-uk Kim continue; 33874e35d186SJung-uk Kim #endif 3388d147662cSGleb Smirnoff } 338995d67482SBill Paul 3390dfe0df9aSPyun YongHyeon if (holdlck != 0) { 33910f9bd73bSSam Leffler BGE_UNLOCK(sc); 3392673d9191SSam Leffler (*ifp->if_input)(ifp, m); 33930f9bd73bSSam Leffler BGE_LOCK(sc); 3394dfe0df9aSPyun YongHyeon } else 3395dfe0df9aSPyun YongHyeon (*ifp->if_input)(ifp, m); 3396d4da719cSAttilio Rao rx_npkts++; 339725e13e68SXin LI 339825e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 33998cf7d13dSAttilio Rao return (rx_npkts); 340095d67482SBill Paul } 340195d67482SBill Paul 340215eda801SStanislav Sedov bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 340315eda801SStanislav Sedov sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); 3404e65bed95SPyun YongHyeon if (stdcnt > 0) 3405f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 3406e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 34074c0da0ffSGleb Smirnoff 3408c215fd77SPyun YongHyeon if (jumbocnt > 0) 3409f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 34104c0da0ffSGleb Smirnoff sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 3411f41ac2beSBill Paul 34127f21e273SStanislav Sedov sc->bge_rx_saved_considx = rx_cons; 341338cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 341495d67482SBill Paul if (stdcnt) 341538cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 341695d67482SBill Paul if (jumbocnt) 341738cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3418f5a034f9SPyun YongHyeon #ifdef notyet 3419f5a034f9SPyun YongHyeon /* 3420f5a034f9SPyun YongHyeon * This register wraps very quickly under heavy packet drops. 3421f5a034f9SPyun YongHyeon * If you need correct statistics, you can enable this check. 3422f5a034f9SPyun YongHyeon */ 3423f5a034f9SPyun YongHyeon if (BGE_IS_5705_PLUS(sc)) 3424f5a034f9SPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3425f5a034f9SPyun YongHyeon #endif 34261abcdbd1SAttilio Rao return (rx_npkts); 342795d67482SBill Paul } 342895d67482SBill Paul 342995d67482SBill Paul static void 3430b9c05fa5SPyun YongHyeon bge_txeof(struct bge_softc *sc, uint16_t tx_cons) 343195d67482SBill Paul { 343295d67482SBill Paul struct bge_tx_bd *cur_tx = NULL; 343395d67482SBill Paul struct ifnet *ifp; 343495d67482SBill Paul 34350f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 34360f9bd73bSSam Leffler 34373f74909aSGleb Smirnoff /* Nothing to do. */ 3438b9c05fa5SPyun YongHyeon if (sc->bge_tx_saved_considx == tx_cons) 3439cfcb5025SOleg Bulyzhin return; 3440cfcb5025SOleg Bulyzhin 3441fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 344295d67482SBill Paul 3443e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 34445c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); 344595d67482SBill Paul /* 344695d67482SBill Paul * Go through our tx ring and free mbufs for those 344795d67482SBill Paul * frames that have been sent. 344895d67482SBill Paul */ 3449b9c05fa5SPyun YongHyeon while (sc->bge_tx_saved_considx != tx_cons) { 34503f74909aSGleb Smirnoff uint32_t idx = 0; 345195d67482SBill Paul 345295d67482SBill Paul idx = sc->bge_tx_saved_considx; 3453f41ac2beSBill Paul cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 345495d67482SBill Paul if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 345595d67482SBill Paul ifp->if_opackets++; 345695d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 34570ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 3458e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[idx], 3459e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 34600ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 3461f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[idx]); 3462e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3463e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[idx] = NULL; 346495d67482SBill Paul } 346595d67482SBill Paul sc->bge_txcnt--; 346695d67482SBill Paul BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 346795d67482SBill Paul } 346895d67482SBill Paul 346995d67482SBill Paul if (cur_tx != NULL) 347013f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 34715b01e77cSBruce Evans if (sc->bge_txcnt == 0) 34725b01e77cSBruce Evans sc->bge_timer = 0; 347395d67482SBill Paul } 347495d67482SBill Paul 347575719184SGleb Smirnoff #ifdef DEVICE_POLLING 34761abcdbd1SAttilio Rao static int 347775719184SGleb Smirnoff bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 347875719184SGleb Smirnoff { 347975719184SGleb Smirnoff struct bge_softc *sc = ifp->if_softc; 3480b9c05fa5SPyun YongHyeon uint16_t rx_prod, tx_cons; 3481366454f2SOleg Bulyzhin uint32_t statusword; 34821abcdbd1SAttilio Rao int rx_npkts = 0; 348375719184SGleb Smirnoff 34843f74909aSGleb Smirnoff BGE_LOCK(sc); 34853f74909aSGleb Smirnoff if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 34863f74909aSGleb Smirnoff BGE_UNLOCK(sc); 34871abcdbd1SAttilio Rao return (rx_npkts); 34883f74909aSGleb Smirnoff } 348975719184SGleb Smirnoff 3490dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3491b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3492b9c05fa5SPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3493b9c05fa5SPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3494b9c05fa5SPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3495dab5cd05SOleg Bulyzhin 34963f74909aSGleb Smirnoff statusword = atomic_readandclear_32( 34973f74909aSGleb Smirnoff &sc->bge_ldata.bge_status_block->bge_status); 3498dab5cd05SOleg Bulyzhin 3499dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3500b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3501b9c05fa5SPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3502366454f2SOleg Bulyzhin 35030c8aa4eaSJung-uk Kim /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 3504366454f2SOleg Bulyzhin if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 3505366454f2SOleg Bulyzhin sc->bge_link_evt++; 3506366454f2SOleg Bulyzhin 3507366454f2SOleg Bulyzhin if (cmd == POLL_AND_CHECK_STATUS) 3508366454f2SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 35094c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3510652ae483SGleb Smirnoff sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 3511366454f2SOleg Bulyzhin bge_link_upd(sc); 3512366454f2SOleg Bulyzhin 3513366454f2SOleg Bulyzhin sc->rxcycles = count; 3514dfe0df9aSPyun YongHyeon rx_npkts = bge_rxeof(sc, rx_prod, 1); 351525e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 351625e13e68SXin LI BGE_UNLOCK(sc); 35178cf7d13dSAttilio Rao return (rx_npkts); 351825e13e68SXin LI } 3519b9c05fa5SPyun YongHyeon bge_txeof(sc, tx_cons); 3520366454f2SOleg Bulyzhin if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3521366454f2SOleg Bulyzhin bge_start_locked(ifp); 35223f74909aSGleb Smirnoff 35233f74909aSGleb Smirnoff BGE_UNLOCK(sc); 35241abcdbd1SAttilio Rao return (rx_npkts); 352575719184SGleb Smirnoff } 352675719184SGleb Smirnoff #endif /* DEVICE_POLLING */ 352775719184SGleb Smirnoff 3528dfe0df9aSPyun YongHyeon static int 3529dfe0df9aSPyun YongHyeon bge_msi_intr(void *arg) 3530dfe0df9aSPyun YongHyeon { 3531dfe0df9aSPyun YongHyeon struct bge_softc *sc; 3532dfe0df9aSPyun YongHyeon 3533dfe0df9aSPyun YongHyeon sc = (struct bge_softc *)arg; 3534dfe0df9aSPyun YongHyeon /* 3535dfe0df9aSPyun YongHyeon * This interrupt is not shared and controller already 3536dfe0df9aSPyun YongHyeon * disabled further interrupt. 3537dfe0df9aSPyun YongHyeon */ 3538dfe0df9aSPyun YongHyeon taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); 3539dfe0df9aSPyun YongHyeon return (FILTER_HANDLED); 3540dfe0df9aSPyun YongHyeon } 3541dfe0df9aSPyun YongHyeon 3542dfe0df9aSPyun YongHyeon static void 3543dfe0df9aSPyun YongHyeon bge_intr_task(void *arg, int pending) 3544dfe0df9aSPyun YongHyeon { 3545dfe0df9aSPyun YongHyeon struct bge_softc *sc; 3546dfe0df9aSPyun YongHyeon struct ifnet *ifp; 3547dfe0df9aSPyun YongHyeon uint32_t status; 3548dfe0df9aSPyun YongHyeon uint16_t rx_prod, tx_cons; 3549dfe0df9aSPyun YongHyeon 3550dfe0df9aSPyun YongHyeon sc = (struct bge_softc *)arg; 3551dfe0df9aSPyun YongHyeon ifp = sc->bge_ifp; 3552dfe0df9aSPyun YongHyeon 3553dfe0df9aSPyun YongHyeon if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 3554dfe0df9aSPyun YongHyeon return; 3555dfe0df9aSPyun YongHyeon 3556dfe0df9aSPyun YongHyeon /* Get updated status block. */ 3557dfe0df9aSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3558dfe0df9aSPyun YongHyeon sc->bge_cdata.bge_status_map, 3559dfe0df9aSPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3560dfe0df9aSPyun YongHyeon 3561dfe0df9aSPyun YongHyeon /* Save producer/consumer indexess. */ 3562dfe0df9aSPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3563dfe0df9aSPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3564dfe0df9aSPyun YongHyeon status = sc->bge_ldata.bge_status_block->bge_status; 3565dfe0df9aSPyun YongHyeon sc->bge_ldata.bge_status_block->bge_status = 0; 3566dfe0df9aSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3567dfe0df9aSPyun YongHyeon sc->bge_cdata.bge_status_map, 3568dfe0df9aSPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3569dfe0df9aSPyun YongHyeon /* Let controller work. */ 3570dfe0df9aSPyun YongHyeon bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3571dfe0df9aSPyun YongHyeon 3572dfe0df9aSPyun YongHyeon if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) { 3573dfe0df9aSPyun YongHyeon BGE_LOCK(sc); 3574dfe0df9aSPyun YongHyeon bge_link_upd(sc); 3575dfe0df9aSPyun YongHyeon BGE_UNLOCK(sc); 3576dfe0df9aSPyun YongHyeon } 3577dfe0df9aSPyun YongHyeon if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3578dfe0df9aSPyun YongHyeon /* Check RX return ring producer/consumer. */ 3579dfe0df9aSPyun YongHyeon bge_rxeof(sc, rx_prod, 0); 3580dfe0df9aSPyun YongHyeon } 3581dfe0df9aSPyun YongHyeon if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3582dfe0df9aSPyun YongHyeon BGE_LOCK(sc); 3583dfe0df9aSPyun YongHyeon /* Check TX ring producer/consumer. */ 3584dfe0df9aSPyun YongHyeon bge_txeof(sc, tx_cons); 3585dfe0df9aSPyun YongHyeon if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3586dfe0df9aSPyun YongHyeon bge_start_locked(ifp); 3587dfe0df9aSPyun YongHyeon BGE_UNLOCK(sc); 3588dfe0df9aSPyun YongHyeon } 3589dfe0df9aSPyun YongHyeon } 3590dfe0df9aSPyun YongHyeon 359195d67482SBill Paul static void 35923f74909aSGleb Smirnoff bge_intr(void *xsc) 359395d67482SBill Paul { 359495d67482SBill Paul struct bge_softc *sc; 359595d67482SBill Paul struct ifnet *ifp; 3596dab5cd05SOleg Bulyzhin uint32_t statusword; 3597b9c05fa5SPyun YongHyeon uint16_t rx_prod, tx_cons; 359895d67482SBill Paul 359995d67482SBill Paul sc = xsc; 3600f41ac2beSBill Paul 36010f9bd73bSSam Leffler BGE_LOCK(sc); 36020f9bd73bSSam Leffler 3603dab5cd05SOleg Bulyzhin ifp = sc->bge_ifp; 3604dab5cd05SOleg Bulyzhin 360575719184SGleb Smirnoff #ifdef DEVICE_POLLING 360675719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 360775719184SGleb Smirnoff BGE_UNLOCK(sc); 360875719184SGleb Smirnoff return; 360975719184SGleb Smirnoff } 361075719184SGleb Smirnoff #endif 361175719184SGleb Smirnoff 3612f30cbfc6SScott Long /* 3613b848e032SBruce Evans * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3614b848e032SBruce Evans * disable interrupts by writing nonzero like we used to, since with 3615b848e032SBruce Evans * our current organization this just gives complications and 3616b848e032SBruce Evans * pessimizations for re-enabling interrupts. We used to have races 3617b848e032SBruce Evans * instead of the necessary complications. Disabling interrupts 3618b848e032SBruce Evans * would just reduce the chance of a status update while we are 3619b848e032SBruce Evans * running (by switching to the interrupt-mode coalescence 3620b848e032SBruce Evans * parameters), but this chance is already very low so it is more 3621b848e032SBruce Evans * efficient to get another interrupt than prevent it. 3622b848e032SBruce Evans * 3623b848e032SBruce Evans * We do the ack first to ensure another interrupt if there is a 3624b848e032SBruce Evans * status update after the ack. We don't check for the status 3625b848e032SBruce Evans * changing later because it is more efficient to get another 3626b848e032SBruce Evans * interrupt than prevent it, not quite as above (not checking is 3627b848e032SBruce Evans * a smaller optimization than not toggling the interrupt enable, 3628b848e032SBruce Evans * since checking doesn't involve PCI accesses and toggling require 3629b848e032SBruce Evans * the status check). So toggling would probably be a pessimization 3630b848e032SBruce Evans * even with MSI. It would only be needed for using a task queue. 3631b848e032SBruce Evans */ 363238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3633b848e032SBruce Evans 3634b848e032SBruce Evans /* 3635f30cbfc6SScott Long * Do the mandatory PCI flush as well as get the link status. 3636f30cbfc6SScott Long */ 3637f30cbfc6SScott Long statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 3638f41ac2beSBill Paul 3639f30cbfc6SScott Long /* Make sure the descriptor ring indexes are coherent. */ 3640f30cbfc6SScott Long bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3641b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3642b9c05fa5SPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3643b9c05fa5SPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3644b9c05fa5SPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3645b9c05fa5SPyun YongHyeon sc->bge_ldata.bge_status_block->bge_status = 0; 3646b9c05fa5SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3647b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3648b9c05fa5SPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3649f30cbfc6SScott Long 36501f313773SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 36514c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3652f30cbfc6SScott Long statusword || sc->bge_link_evt) 3653dab5cd05SOleg Bulyzhin bge_link_upd(sc); 365495d67482SBill Paul 365513f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 36563f74909aSGleb Smirnoff /* Check RX return ring producer/consumer. */ 3657dfe0df9aSPyun YongHyeon bge_rxeof(sc, rx_prod, 1); 365825e13e68SXin LI } 365995d67482SBill Paul 366025e13e68SXin LI if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 36613f74909aSGleb Smirnoff /* Check TX ring producer/consumer. */ 3662b9c05fa5SPyun YongHyeon bge_txeof(sc, tx_cons); 366395d67482SBill Paul } 366495d67482SBill Paul 366513f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING && 366613f4c340SRobert Watson !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 36670f9bd73bSSam Leffler bge_start_locked(ifp); 36680f9bd73bSSam Leffler 36690f9bd73bSSam Leffler BGE_UNLOCK(sc); 367095d67482SBill Paul } 367195d67482SBill Paul 367295d67482SBill Paul static void 36738cb1383cSDoug Ambrisko bge_asf_driver_up(struct bge_softc *sc) 36748cb1383cSDoug Ambrisko { 36758cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) { 36768cb1383cSDoug Ambrisko /* Send ASF heartbeat aprox. every 2s */ 36778cb1383cSDoug Ambrisko if (sc->bge_asf_count) 36788cb1383cSDoug Ambrisko sc->bge_asf_count --; 36798cb1383cSDoug Ambrisko else { 36808cb1383cSDoug Ambrisko sc->bge_asf_count = 5; 36818cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 36828cb1383cSDoug Ambrisko BGE_FW_DRV_ALIVE); 36838cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 36848cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 36858cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 368639153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 36878cb1383cSDoug Ambrisko } 36888cb1383cSDoug Ambrisko } 36898cb1383cSDoug Ambrisko } 36908cb1383cSDoug Ambrisko 36918cb1383cSDoug Ambrisko static void 3692b74e67fbSGleb Smirnoff bge_tick(void *xsc) 36930f9bd73bSSam Leffler { 3694b74e67fbSGleb Smirnoff struct bge_softc *sc = xsc; 369595d67482SBill Paul struct mii_data *mii = NULL; 369695d67482SBill Paul 36970f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 369895d67482SBill Paul 36995dda8085SOleg Bulyzhin /* Synchronize with possible callout reset/stop. */ 37005dda8085SOleg Bulyzhin if (callout_pending(&sc->bge_stat_ch) || 37015dda8085SOleg Bulyzhin !callout_active(&sc->bge_stat_ch)) 37025dda8085SOleg Bulyzhin return; 37035dda8085SOleg Bulyzhin 37047ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 37050434d1b8SBill Paul bge_stats_update_regs(sc); 37060434d1b8SBill Paul else 370795d67482SBill Paul bge_stats_update(sc); 370895d67482SBill Paul 3709652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 371095d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 371182b67c01SOleg Bulyzhin /* 371282b67c01SOleg Bulyzhin * Do not touch PHY if we have link up. This could break 371382b67c01SOleg Bulyzhin * IPMI/ASF mode or produce extra input errors 371482b67c01SOleg Bulyzhin * (extra errors was reported for bcm5701 & bcm5704). 371582b67c01SOleg Bulyzhin */ 371682b67c01SOleg Bulyzhin if (!sc->bge_link) 371795d67482SBill Paul mii_tick(mii); 37187b97099dSOleg Bulyzhin } else { 37197b97099dSOleg Bulyzhin /* 37207b97099dSOleg Bulyzhin * Since in TBI mode auto-polling can't be used we should poll 37217b97099dSOleg Bulyzhin * link status manually. Here we register pending link event 37227b97099dSOleg Bulyzhin * and trigger interrupt. 37237b97099dSOleg Bulyzhin */ 37247b97099dSOleg Bulyzhin #ifdef DEVICE_POLLING 37253f74909aSGleb Smirnoff /* In polling mode we poll link state in bge_poll(). */ 37267b97099dSOleg Bulyzhin if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 37277b97099dSOleg Bulyzhin #endif 37287b97099dSOleg Bulyzhin { 37297b97099dSOleg Bulyzhin sc->bge_link_evt++; 37304f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 37314f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 37327b97099dSOleg Bulyzhin BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 37334f0794ffSBjoern A. Zeeb else 37344f0794ffSBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 37357b97099dSOleg Bulyzhin } 3736dab5cd05SOleg Bulyzhin } 373795d67482SBill Paul 37388cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 3739b74e67fbSGleb Smirnoff bge_watchdog(sc); 37408cb1383cSDoug Ambrisko 3741dab5cd05SOleg Bulyzhin callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 374295d67482SBill Paul } 374395d67482SBill Paul 374495d67482SBill Paul static void 37453f74909aSGleb Smirnoff bge_stats_update_regs(struct bge_softc *sc) 37460434d1b8SBill Paul { 37473f74909aSGleb Smirnoff struct ifnet *ifp; 37480434d1b8SBill Paul 3749fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 37500434d1b8SBill Paul 37516b037352SJung-uk Kim ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 37527e6e2507SJung-uk Kim offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 37537e6e2507SJung-uk Kim 3754e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 37556b037352SJung-uk Kim ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3756e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 37570434d1b8SBill Paul } 37580434d1b8SBill Paul 37590434d1b8SBill Paul static void 37603f74909aSGleb Smirnoff bge_stats_update(struct bge_softc *sc) 376195d67482SBill Paul { 376295d67482SBill Paul struct ifnet *ifp; 3763e907febfSPyun YongHyeon bus_size_t stats; 37647e6e2507SJung-uk Kim uint32_t cnt; /* current register value */ 376595d67482SBill Paul 3766fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 376795d67482SBill Paul 3768e907febfSPyun YongHyeon stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3769e907febfSPyun YongHyeon 3770e907febfSPyun YongHyeon #define READ_STAT(sc, stats, stat) \ 3771e907febfSPyun YongHyeon CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 377295d67482SBill Paul 37738634dfffSJung-uk Kim cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 37746b037352SJung-uk Kim ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); 37756fb34dd2SOleg Bulyzhin sc->bge_tx_collisions = cnt; 37766fb34dd2SOleg Bulyzhin 37776fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 37786b037352SJung-uk Kim ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); 37796fb34dd2SOleg Bulyzhin sc->bge_rx_discards = cnt; 37806fb34dd2SOleg Bulyzhin 37816fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 37826b037352SJung-uk Kim ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); 37836fb34dd2SOleg Bulyzhin sc->bge_tx_discards = cnt; 378495d67482SBill Paul 3785e907febfSPyun YongHyeon #undef READ_STAT 378695d67482SBill Paul } 378795d67482SBill Paul 378895d67482SBill Paul /* 3789d375e524SGleb Smirnoff * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3790d375e524SGleb Smirnoff * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3791d375e524SGleb Smirnoff * but when such padded frames employ the bge IP/TCP checksum offload, 3792d375e524SGleb Smirnoff * the hardware checksum assist gives incorrect results (possibly 3793d375e524SGleb Smirnoff * from incorporating its own padding into the UDP/TCP checksum; who knows). 3794d375e524SGleb Smirnoff * If we pad such runts with zeros, the onboard checksum comes out correct. 3795d375e524SGleb Smirnoff */ 3796d375e524SGleb Smirnoff static __inline int 3797d375e524SGleb Smirnoff bge_cksum_pad(struct mbuf *m) 3798d375e524SGleb Smirnoff { 3799d375e524SGleb Smirnoff int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3800d375e524SGleb Smirnoff struct mbuf *last; 3801d375e524SGleb Smirnoff 3802d375e524SGleb Smirnoff /* If there's only the packet-header and we can pad there, use it. */ 3803d375e524SGleb Smirnoff if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3804d375e524SGleb Smirnoff M_TRAILINGSPACE(m) >= padlen) { 3805d375e524SGleb Smirnoff last = m; 3806d375e524SGleb Smirnoff } else { 3807d375e524SGleb Smirnoff /* 3808d375e524SGleb Smirnoff * Walk packet chain to find last mbuf. We will either 3809d375e524SGleb Smirnoff * pad there, or append a new mbuf and pad it. 3810d375e524SGleb Smirnoff */ 3811d375e524SGleb Smirnoff for (last = m; last->m_next != NULL; last = last->m_next); 3812d375e524SGleb Smirnoff if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3813d375e524SGleb Smirnoff /* Allocate new empty mbuf, pad it. Compact later. */ 3814d375e524SGleb Smirnoff struct mbuf *n; 3815d375e524SGleb Smirnoff 3816d375e524SGleb Smirnoff MGET(n, M_DONTWAIT, MT_DATA); 3817d375e524SGleb Smirnoff if (n == NULL) 3818d375e524SGleb Smirnoff return (ENOBUFS); 3819d375e524SGleb Smirnoff n->m_len = 0; 3820d375e524SGleb Smirnoff last->m_next = n; 3821d375e524SGleb Smirnoff last = n; 3822d375e524SGleb Smirnoff } 3823d375e524SGleb Smirnoff } 3824d375e524SGleb Smirnoff 3825d375e524SGleb Smirnoff /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3826d375e524SGleb Smirnoff memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3827d375e524SGleb Smirnoff last->m_len += padlen; 3828d375e524SGleb Smirnoff m->m_pkthdr.len += padlen; 3829d375e524SGleb Smirnoff 3830d375e524SGleb Smirnoff return (0); 3831d375e524SGleb Smirnoff } 3832d375e524SGleb Smirnoff 3833ca3f1187SPyun YongHyeon static struct mbuf * 3834ca3f1187SPyun YongHyeon bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss) 3835ca3f1187SPyun YongHyeon { 3836ca3f1187SPyun YongHyeon struct ether_header *eh; 3837ca3f1187SPyun YongHyeon struct ip *ip; 3838ca3f1187SPyun YongHyeon struct tcphdr *tcp; 3839ca3f1187SPyun YongHyeon struct mbuf *n; 3840ca3f1187SPyun YongHyeon uint16_t hlen; 3841ca3f1187SPyun YongHyeon uint32_t ip_off, poff; 3842ca3f1187SPyun YongHyeon 3843ca3f1187SPyun YongHyeon if (M_WRITABLE(m) == 0) { 3844ca3f1187SPyun YongHyeon /* Get a writable copy. */ 3845ca3f1187SPyun YongHyeon n = m_dup(m, M_DONTWAIT); 3846ca3f1187SPyun YongHyeon m_freem(m); 3847ca3f1187SPyun YongHyeon if (n == NULL) 3848ca3f1187SPyun YongHyeon return (NULL); 3849ca3f1187SPyun YongHyeon m = n; 3850ca3f1187SPyun YongHyeon } 3851ca3f1187SPyun YongHyeon ip_off = sizeof(struct ether_header); 3852ca3f1187SPyun YongHyeon m = m_pullup(m, ip_off); 3853ca3f1187SPyun YongHyeon if (m == NULL) 3854ca3f1187SPyun YongHyeon return (NULL); 3855ca3f1187SPyun YongHyeon eh = mtod(m, struct ether_header *); 3856ca3f1187SPyun YongHyeon /* Check the existence of VLAN tag. */ 3857ca3f1187SPyun YongHyeon if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 3858ca3f1187SPyun YongHyeon ip_off = sizeof(struct ether_vlan_header); 3859ca3f1187SPyun YongHyeon m = m_pullup(m, ip_off); 3860ca3f1187SPyun YongHyeon if (m == NULL) 3861ca3f1187SPyun YongHyeon return (NULL); 3862ca3f1187SPyun YongHyeon } 3863ca3f1187SPyun YongHyeon m = m_pullup(m, ip_off + sizeof(struct ip)); 3864ca3f1187SPyun YongHyeon if (m == NULL) 3865ca3f1187SPyun YongHyeon return (NULL); 3866ca3f1187SPyun YongHyeon ip = (struct ip *)(mtod(m, char *) + ip_off); 3867ca3f1187SPyun YongHyeon poff = ip_off + (ip->ip_hl << 2); 3868ca3f1187SPyun YongHyeon m = m_pullup(m, poff + sizeof(struct tcphdr)); 3869ca3f1187SPyun YongHyeon if (m == NULL) 3870ca3f1187SPyun YongHyeon return (NULL); 3871ca3f1187SPyun YongHyeon tcp = (struct tcphdr *)(mtod(m, char *) + poff); 3872ca3f1187SPyun YongHyeon m = m_pullup(m, poff + sizeof(struct tcphdr) + tcp->th_off); 3873ca3f1187SPyun YongHyeon if (m == NULL) 3874ca3f1187SPyun YongHyeon return (NULL); 3875ca3f1187SPyun YongHyeon /* 3876ca3f1187SPyun YongHyeon * It seems controller doesn't modify IP length and TCP pseudo 3877ca3f1187SPyun YongHyeon * checksum. These checksum computed by upper stack should be 0. 3878ca3f1187SPyun YongHyeon */ 3879ca3f1187SPyun YongHyeon *mss = m->m_pkthdr.tso_segsz; 3880ca3f1187SPyun YongHyeon ip->ip_sum = 0; 3881ca3f1187SPyun YongHyeon ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); 3882ca3f1187SPyun YongHyeon /* Clear pseudo checksum computed by TCP stack. */ 3883ca3f1187SPyun YongHyeon tcp->th_sum = 0; 3884ca3f1187SPyun YongHyeon /* 3885ca3f1187SPyun YongHyeon * Broadcom controllers uses different descriptor format for 3886ca3f1187SPyun YongHyeon * TSO depending on ASIC revision. Due to TSO-capable firmware 3887ca3f1187SPyun YongHyeon * license issue and lower performance of firmware based TSO 3888ca3f1187SPyun YongHyeon * we only support hardware based TSO which is applicable for 3889ca3f1187SPyun YongHyeon * BCM5755 or newer controllers. Hardware based TSO uses 11 3890ca3f1187SPyun YongHyeon * bits to store MSS and upper 5 bits are used to store IP/TCP 3891ca3f1187SPyun YongHyeon * header length(including IP/TCP options). The header length 3892ca3f1187SPyun YongHyeon * is expressed as 32 bits unit. 3893ca3f1187SPyun YongHyeon */ 3894ca3f1187SPyun YongHyeon hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; 3895ca3f1187SPyun YongHyeon *mss |= (hlen << 11); 3896ca3f1187SPyun YongHyeon return (m); 3897ca3f1187SPyun YongHyeon } 3898ca3f1187SPyun YongHyeon 3899d375e524SGleb Smirnoff /* 390095d67482SBill Paul * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 390195d67482SBill Paul * pointers to descriptors. 390295d67482SBill Paul */ 390395d67482SBill Paul static int 3904676ad2c9SGleb Smirnoff bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 390595d67482SBill Paul { 39067e27542aSGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_NEW]; 3907f41ac2beSBill Paul bus_dmamap_t map; 3908676ad2c9SGleb Smirnoff struct bge_tx_bd *d; 3909676ad2c9SGleb Smirnoff struct mbuf *m = *m_head; 39107e27542aSGleb Smirnoff uint32_t idx = *txidx; 3911ca3f1187SPyun YongHyeon uint16_t csum_flags, mss, vlan_tag; 39127e27542aSGleb Smirnoff int nsegs, i, error; 391395d67482SBill Paul 39146909dc43SGleb Smirnoff csum_flags = 0; 3915ca3f1187SPyun YongHyeon mss = 0; 3916ca3f1187SPyun YongHyeon vlan_tag = 0; 3917ca3f1187SPyun YongHyeon if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 3918ca3f1187SPyun YongHyeon *m_head = m = bge_setup_tso(sc, m, &mss); 3919ca3f1187SPyun YongHyeon if (*m_head == NULL) 3920ca3f1187SPyun YongHyeon return (ENOBUFS); 3921ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | 3922ca3f1187SPyun YongHyeon BGE_TXBDFLAG_CPU_POST_DMA; 3923ca3f1187SPyun YongHyeon } else if ((m->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) != 0) { 39246909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & CSUM_IP) 39256909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_CSUM; 39266909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 39276909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 39286909dc43SGleb Smirnoff if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 39296909dc43SGleb Smirnoff (error = bge_cksum_pad(m)) != 0) { 39306909dc43SGleb Smirnoff m_freem(m); 39316909dc43SGleb Smirnoff *m_head = NULL; 39326909dc43SGleb Smirnoff return (error); 39336909dc43SGleb Smirnoff } 39346909dc43SGleb Smirnoff } 39356909dc43SGleb Smirnoff if (m->m_flags & M_LASTFRAG) 39366909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 39376909dc43SGleb Smirnoff else if (m->m_flags & M_FRAG) 39386909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG; 39396909dc43SGleb Smirnoff } 39406909dc43SGleb Smirnoff 3941d94f2b85SPyun YongHyeon if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0 && 3942d94f2b85SPyun YongHyeon bge_forced_collapse > 0 && (sc->bge_flags & BGE_FLAG_PCIE) != 0 && 3943d94f2b85SPyun YongHyeon m->m_next != NULL) { 3944d94f2b85SPyun YongHyeon /* 3945d94f2b85SPyun YongHyeon * Forcedly collapse mbuf chains to overcome hardware 3946d94f2b85SPyun YongHyeon * limitation which only support a single outstanding 3947d94f2b85SPyun YongHyeon * DMA read operation. 3948d94f2b85SPyun YongHyeon */ 3949d94f2b85SPyun YongHyeon if (bge_forced_collapse == 1) 3950d94f2b85SPyun YongHyeon m = m_defrag(m, M_DONTWAIT); 3951d94f2b85SPyun YongHyeon else 3952d94f2b85SPyun YongHyeon m = m_collapse(m, M_DONTWAIT, bge_forced_collapse); 3953d94f2b85SPyun YongHyeon if (m == NULL) { 3954d94f2b85SPyun YongHyeon m_freem(*m_head); 3955d94f2b85SPyun YongHyeon *m_head = NULL; 3956d94f2b85SPyun YongHyeon return (ENOBUFS); 3957d94f2b85SPyun YongHyeon } 3958d94f2b85SPyun YongHyeon *m_head = m; 3959d94f2b85SPyun YongHyeon } 3960d94f2b85SPyun YongHyeon 39617e27542aSGleb Smirnoff map = sc->bge_cdata.bge_tx_dmamap[idx]; 39620ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, 3963676ad2c9SGleb Smirnoff &nsegs, BUS_DMA_NOWAIT); 39647e27542aSGleb Smirnoff if (error == EFBIG) { 39654eee14cbSMarius Strobl m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW); 3966676ad2c9SGleb Smirnoff if (m == NULL) { 3967676ad2c9SGleb Smirnoff m_freem(*m_head); 3968676ad2c9SGleb Smirnoff *m_head = NULL; 39697e27542aSGleb Smirnoff return (ENOBUFS); 39707e27542aSGleb Smirnoff } 3971676ad2c9SGleb Smirnoff *m_head = m; 39720ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, 39730ac56796SPyun YongHyeon m, segs, &nsegs, BUS_DMA_NOWAIT); 3974676ad2c9SGleb Smirnoff if (error) { 3975676ad2c9SGleb Smirnoff m_freem(m); 3976676ad2c9SGleb Smirnoff *m_head = NULL; 39777e27542aSGleb Smirnoff return (error); 39787e27542aSGleb Smirnoff } 3979676ad2c9SGleb Smirnoff } else if (error != 0) 3980676ad2c9SGleb Smirnoff return (error); 39817e27542aSGleb Smirnoff 3982167fdb62SPyun YongHyeon /* Check if we have enough free send BDs. */ 3983167fdb62SPyun YongHyeon if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { 39840ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); 398595d67482SBill Paul return (ENOBUFS); 39867e27542aSGleb Smirnoff } 39877e27542aSGleb Smirnoff 39880ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3989e65bed95SPyun YongHyeon 3990ca3f1187SPyun YongHyeon #if __FreeBSD_version > 700022 3991ca3f1187SPyun YongHyeon if (m->m_flags & M_VLANTAG) { 3992ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3993ca3f1187SPyun YongHyeon vlan_tag = m->m_pkthdr.ether_vtag; 3994ca3f1187SPyun YongHyeon } 3995ca3f1187SPyun YongHyeon #else 3996ca3f1187SPyun YongHyeon { 3997ca3f1187SPyun YongHyeon struct m_tag *mtag; 3998ca3f1187SPyun YongHyeon 3999ca3f1187SPyun YongHyeon if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { 4000ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 4001ca3f1187SPyun YongHyeon vlan_tag = VLAN_TAG_VALUE(mtag); 4002ca3f1187SPyun YongHyeon } 4003ca3f1187SPyun YongHyeon } 4004ca3f1187SPyun YongHyeon #endif 40057e27542aSGleb Smirnoff for (i = 0; ; i++) { 40067e27542aSGleb Smirnoff d = &sc->bge_ldata.bge_tx_ring[idx]; 40077e27542aSGleb Smirnoff d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 40087e27542aSGleb Smirnoff d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 40097e27542aSGleb Smirnoff d->bge_len = segs[i].ds_len; 40107e27542aSGleb Smirnoff d->bge_flags = csum_flags; 4011ca3f1187SPyun YongHyeon d->bge_vlan_tag = vlan_tag; 4012ca3f1187SPyun YongHyeon d->bge_mss = mss; 40137e27542aSGleb Smirnoff if (i == nsegs - 1) 40147e27542aSGleb Smirnoff break; 40157e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 40167e27542aSGleb Smirnoff } 40177e27542aSGleb Smirnoff 40187e27542aSGleb Smirnoff /* Mark the last segment as end of packet... */ 40197e27542aSGleb Smirnoff d->bge_flags |= BGE_TXBDFLAG_END; 4020676ad2c9SGleb Smirnoff 4021f41ac2beSBill Paul /* 4022f41ac2beSBill Paul * Insure that the map for this transmission 4023f41ac2beSBill Paul * is placed at the array index of the last descriptor 4024f41ac2beSBill Paul * in this chain. 4025f41ac2beSBill Paul */ 40267e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 40277e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[idx] = map; 4028676ad2c9SGleb Smirnoff sc->bge_cdata.bge_tx_chain[idx] = m; 40297e27542aSGleb Smirnoff sc->bge_txcnt += nsegs; 403095d67482SBill Paul 40317e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 40327e27542aSGleb Smirnoff *txidx = idx; 403395d67482SBill Paul 403495d67482SBill Paul return (0); 403595d67482SBill Paul } 403695d67482SBill Paul 403795d67482SBill Paul /* 403895d67482SBill Paul * Main transmit routine. To avoid having to do mbuf copies, we put pointers 403995d67482SBill Paul * to the mbuf data regions directly in the transmit descriptors. 404095d67482SBill Paul */ 404195d67482SBill Paul static void 40423f74909aSGleb Smirnoff bge_start_locked(struct ifnet *ifp) 404395d67482SBill Paul { 404495d67482SBill Paul struct bge_softc *sc; 4045167fdb62SPyun YongHyeon struct mbuf *m_head; 404614bbd30fSGleb Smirnoff uint32_t prodidx; 4047167fdb62SPyun YongHyeon int count; 404895d67482SBill Paul 404995d67482SBill Paul sc = ifp->if_softc; 4050167fdb62SPyun YongHyeon BGE_LOCK_ASSERT(sc); 405195d67482SBill Paul 4052167fdb62SPyun YongHyeon if (!sc->bge_link || 4053167fdb62SPyun YongHyeon (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 4054167fdb62SPyun YongHyeon IFF_DRV_RUNNING) 405595d67482SBill Paul return; 405695d67482SBill Paul 405714bbd30fSGleb Smirnoff prodidx = sc->bge_tx_prodidx; 405895d67482SBill Paul 4059167fdb62SPyun YongHyeon for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 4060167fdb62SPyun YongHyeon if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { 4061167fdb62SPyun YongHyeon ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4062167fdb62SPyun YongHyeon break; 4063167fdb62SPyun YongHyeon } 40644d665c4dSDag-Erling Smørgrav IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 406595d67482SBill Paul if (m_head == NULL) 406695d67482SBill Paul break; 406795d67482SBill Paul 406895d67482SBill Paul /* 406995d67482SBill Paul * XXX 4070b874fdd4SYaroslav Tykhiy * The code inside the if() block is never reached since we 4071b874fdd4SYaroslav Tykhiy * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 4072b874fdd4SYaroslav Tykhiy * requests to checksum TCP/UDP in a fragmented packet. 4073b874fdd4SYaroslav Tykhiy * 4074b874fdd4SYaroslav Tykhiy * XXX 407595d67482SBill Paul * safety overkill. If this is a fragmented packet chain 407695d67482SBill Paul * with delayed TCP/UDP checksums, then only encapsulate 407795d67482SBill Paul * it if we have enough descriptors to handle the entire 407895d67482SBill Paul * chain at once. 407995d67482SBill Paul * (paranoia -- may not actually be needed) 408095d67482SBill Paul */ 408195d67482SBill Paul if (m_head->m_flags & M_FIRSTFRAG && 408295d67482SBill Paul m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 408395d67482SBill Paul if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 408495d67482SBill Paul m_head->m_pkthdr.csum_data + 16) { 40854d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 408613f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 408795d67482SBill Paul break; 408895d67482SBill Paul } 408995d67482SBill Paul } 409095d67482SBill Paul 409195d67482SBill Paul /* 409295d67482SBill Paul * Pack the data into the transmit ring. If we 409395d67482SBill Paul * don't have room, set the OACTIVE flag and wait 409495d67482SBill Paul * for the NIC to drain the ring. 409595d67482SBill Paul */ 4096676ad2c9SGleb Smirnoff if (bge_encap(sc, &m_head, &prodidx)) { 4097676ad2c9SGleb Smirnoff if (m_head == NULL) 4098676ad2c9SGleb Smirnoff break; 40994d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 410013f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 410195d67482SBill Paul break; 410295d67482SBill Paul } 4103303a718cSDag-Erling Smørgrav ++count; 410495d67482SBill Paul 410595d67482SBill Paul /* 410695d67482SBill Paul * If there's a BPF listener, bounce a copy of this frame 410795d67482SBill Paul * to him. 410895d67482SBill Paul */ 41094e35d186SJung-uk Kim #ifdef ETHER_BPF_MTAP 411045ee6ab3SJung-uk Kim ETHER_BPF_MTAP(ifp, m_head); 41114e35d186SJung-uk Kim #else 41124e35d186SJung-uk Kim BPF_MTAP(ifp, m_head); 41134e35d186SJung-uk Kim #endif 411495d67482SBill Paul } 411595d67482SBill Paul 4116167fdb62SPyun YongHyeon if (count > 0) { 4117aa94f333SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 41185c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 41193f74909aSGleb Smirnoff /* Transmit. */ 412038cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 41213927098fSPaul Saab /* 5700 b2 errata */ 4122e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 412338cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 412495d67482SBill Paul 412514bbd30fSGleb Smirnoff sc->bge_tx_prodidx = prodidx; 412614bbd30fSGleb Smirnoff 412795d67482SBill Paul /* 412895d67482SBill Paul * Set a timeout in case the chip goes out to lunch. 412995d67482SBill Paul */ 4130b74e67fbSGleb Smirnoff sc->bge_timer = 5; 413195d67482SBill Paul } 4132167fdb62SPyun YongHyeon } 413395d67482SBill Paul 41340f9bd73bSSam Leffler /* 41350f9bd73bSSam Leffler * Main transmit routine. To avoid having to do mbuf copies, we put pointers 41360f9bd73bSSam Leffler * to the mbuf data regions directly in the transmit descriptors. 41370f9bd73bSSam Leffler */ 413895d67482SBill Paul static void 41393f74909aSGleb Smirnoff bge_start(struct ifnet *ifp) 414095d67482SBill Paul { 41410f9bd73bSSam Leffler struct bge_softc *sc; 41420f9bd73bSSam Leffler 41430f9bd73bSSam Leffler sc = ifp->if_softc; 41440f9bd73bSSam Leffler BGE_LOCK(sc); 41450f9bd73bSSam Leffler bge_start_locked(ifp); 41460f9bd73bSSam Leffler BGE_UNLOCK(sc); 41470f9bd73bSSam Leffler } 41480f9bd73bSSam Leffler 41490f9bd73bSSam Leffler static void 41503f74909aSGleb Smirnoff bge_init_locked(struct bge_softc *sc) 41510f9bd73bSSam Leffler { 415295d67482SBill Paul struct ifnet *ifp; 41533f74909aSGleb Smirnoff uint16_t *m; 415495d67482SBill Paul 41550f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 415695d67482SBill Paul 4157fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 415895d67482SBill Paul 415913f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) 416095d67482SBill Paul return; 416195d67482SBill Paul 416295d67482SBill Paul /* Cancel pending I/O and flush buffers. */ 416395d67482SBill Paul bge_stop(sc); 41648cb1383cSDoug Ambrisko 41658cb1383cSDoug Ambrisko bge_stop_fw(sc); 41668cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_START); 416795d67482SBill Paul bge_reset(sc); 41688cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_START); 41698cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_START); 41708cb1383cSDoug Ambrisko 417195d67482SBill Paul bge_chipinit(sc); 417295d67482SBill Paul 417395d67482SBill Paul /* 417495d67482SBill Paul * Init the various state machines, ring 417595d67482SBill Paul * control blocks and firmware. 417695d67482SBill Paul */ 417795d67482SBill Paul if (bge_blockinit(sc)) { 4178fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "initialization failure\n"); 417995d67482SBill Paul return; 418095d67482SBill Paul } 418195d67482SBill Paul 4182fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 418395d67482SBill Paul 418495d67482SBill Paul /* Specify MTU. */ 418595d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 4186cb2eacc7SYaroslav Tykhiy ETHER_HDR_LEN + ETHER_CRC_LEN + 4187cb2eacc7SYaroslav Tykhiy (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); 418895d67482SBill Paul 418995d67482SBill Paul /* Load our MAC address. */ 41903f74909aSGleb Smirnoff m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 419195d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 419295d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 419395d67482SBill Paul 41943e9b1bcaSJung-uk Kim /* Program promiscuous mode. */ 41953e9b1bcaSJung-uk Kim bge_setpromisc(sc); 419695d67482SBill Paul 419795d67482SBill Paul /* Program multicast filter. */ 419895d67482SBill Paul bge_setmulti(sc); 419995d67482SBill Paul 4200cb2eacc7SYaroslav Tykhiy /* Program VLAN tag stripping. */ 4201cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 4202cb2eacc7SYaroslav Tykhiy 420395d67482SBill Paul /* Init RX ring. */ 42043ee5d7daSPyun YongHyeon if (bge_init_rx_ring_std(sc) != 0) { 42053ee5d7daSPyun YongHyeon device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 42063ee5d7daSPyun YongHyeon bge_stop(sc); 42073ee5d7daSPyun YongHyeon return; 42083ee5d7daSPyun YongHyeon } 420995d67482SBill Paul 42100434d1b8SBill Paul /* 42110434d1b8SBill Paul * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 42120434d1b8SBill Paul * memory to insure that the chip has in fact read the first 42130434d1b8SBill Paul * entry of the ring. 42140434d1b8SBill Paul */ 42150434d1b8SBill Paul if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 42163f74909aSGleb Smirnoff uint32_t v, i; 42170434d1b8SBill Paul for (i = 0; i < 10; i++) { 42180434d1b8SBill Paul DELAY(20); 42190434d1b8SBill Paul v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 42200434d1b8SBill Paul if (v == (MCLBYTES - ETHER_ALIGN)) 42210434d1b8SBill Paul break; 42220434d1b8SBill Paul } 42230434d1b8SBill Paul if (i == 10) 4224fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, 4225fe806fdaSPyun YongHyeon "5705 A0 chip failed to load RX ring\n"); 42260434d1b8SBill Paul } 42270434d1b8SBill Paul 422895d67482SBill Paul /* Init jumbo RX ring. */ 4229c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 4230c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) { 42313ee5d7daSPyun YongHyeon if (bge_init_rx_ring_jumbo(sc) != 0) { 42323ee5d7daSPyun YongHyeon device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 42333ee5d7daSPyun YongHyeon bge_stop(sc); 42343ee5d7daSPyun YongHyeon return; 42353ee5d7daSPyun YongHyeon } 42363ee5d7daSPyun YongHyeon } 423795d67482SBill Paul 42383f74909aSGleb Smirnoff /* Init our RX return ring index. */ 423995d67482SBill Paul sc->bge_rx_saved_considx = 0; 424095d67482SBill Paul 42417e6e2507SJung-uk Kim /* Init our RX/TX stat counters. */ 42427e6e2507SJung-uk Kim sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 42437e6e2507SJung-uk Kim 424495d67482SBill Paul /* Init TX ring. */ 424595d67482SBill Paul bge_init_tx_ring(sc); 424695d67482SBill Paul 42473f74909aSGleb Smirnoff /* Turn on transmitter. */ 424895d67482SBill Paul BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 424995d67482SBill Paul 42503f74909aSGleb Smirnoff /* Turn on receiver. */ 425195d67482SBill Paul BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 425295d67482SBill Paul 425395d67482SBill Paul /* Tell firmware we're alive. */ 425495d67482SBill Paul BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 425595d67482SBill Paul 425675719184SGleb Smirnoff #ifdef DEVICE_POLLING 425775719184SGleb Smirnoff /* Disable interrupts if we are polling. */ 425875719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 425975719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 426075719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 426138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 426275719184SGleb Smirnoff } else 426375719184SGleb Smirnoff #endif 426475719184SGleb Smirnoff 426595d67482SBill Paul /* Enable host interrupts. */ 426675719184SGleb Smirnoff { 426795d67482SBill Paul BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 426895d67482SBill Paul BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 426938cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 427075719184SGleb Smirnoff } 427195d67482SBill Paul 427267d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(ifp); 427395d67482SBill Paul 427413f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 427513f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 427695d67482SBill Paul 42770f9bd73bSSam Leffler callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 42780f9bd73bSSam Leffler } 42790f9bd73bSSam Leffler 42800f9bd73bSSam Leffler static void 42813f74909aSGleb Smirnoff bge_init(void *xsc) 42820f9bd73bSSam Leffler { 42830f9bd73bSSam Leffler struct bge_softc *sc = xsc; 42840f9bd73bSSam Leffler 42850f9bd73bSSam Leffler BGE_LOCK(sc); 42860f9bd73bSSam Leffler bge_init_locked(sc); 42870f9bd73bSSam Leffler BGE_UNLOCK(sc); 428895d67482SBill Paul } 428995d67482SBill Paul 429095d67482SBill Paul /* 429195d67482SBill Paul * Set media options. 429295d67482SBill Paul */ 429395d67482SBill Paul static int 42943f74909aSGleb Smirnoff bge_ifmedia_upd(struct ifnet *ifp) 429595d67482SBill Paul { 429667d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 429767d5e043SOleg Bulyzhin int res; 429867d5e043SOleg Bulyzhin 429967d5e043SOleg Bulyzhin BGE_LOCK(sc); 430067d5e043SOleg Bulyzhin res = bge_ifmedia_upd_locked(ifp); 430167d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 430267d5e043SOleg Bulyzhin 430367d5e043SOleg Bulyzhin return (res); 430467d5e043SOleg Bulyzhin } 430567d5e043SOleg Bulyzhin 430667d5e043SOleg Bulyzhin static int 430767d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(struct ifnet *ifp) 430867d5e043SOleg Bulyzhin { 430967d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 431095d67482SBill Paul struct mii_data *mii; 43114f09c4c7SMarius Strobl struct mii_softc *miisc; 431295d67482SBill Paul struct ifmedia *ifm; 431395d67482SBill Paul 431467d5e043SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 431567d5e043SOleg Bulyzhin 431695d67482SBill Paul ifm = &sc->bge_ifmedia; 431795d67482SBill Paul 431895d67482SBill Paul /* If this is a 1000baseX NIC, enable the TBI port. */ 4319652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 432095d67482SBill Paul if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 432195d67482SBill Paul return (EINVAL); 432295d67482SBill Paul switch(IFM_SUBTYPE(ifm->ifm_media)) { 432395d67482SBill Paul case IFM_AUTO: 4324ff50922bSDoug White /* 4325ff50922bSDoug White * The BCM5704 ASIC appears to have a special 4326ff50922bSDoug White * mechanism for programming the autoneg 4327ff50922bSDoug White * advertisement registers in TBI mode. 4328ff50922bSDoug White */ 43290f89fde2SJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 4330ff50922bSDoug White uint32_t sgdig; 43310f89fde2SJung-uk Kim sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 43320f89fde2SJung-uk Kim if (sgdig & BGE_SGDIGSTS_DONE) { 4333ff50922bSDoug White CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4334ff50922bSDoug White sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4335ff50922bSDoug White sgdig |= BGE_SGDIGCFG_AUTO | 4336ff50922bSDoug White BGE_SGDIGCFG_PAUSE_CAP | 4337ff50922bSDoug White BGE_SGDIGCFG_ASYM_PAUSE; 4338ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4339ff50922bSDoug White sgdig | BGE_SGDIGCFG_SEND); 4340ff50922bSDoug White DELAY(5); 4341ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4342ff50922bSDoug White } 43430f89fde2SJung-uk Kim } 434495d67482SBill Paul break; 434595d67482SBill Paul case IFM_1000_SX: 434695d67482SBill Paul if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 434795d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, 434895d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 434995d67482SBill Paul } else { 435095d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, 435195d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 435295d67482SBill Paul } 435395d67482SBill Paul break; 435495d67482SBill Paul default: 435595d67482SBill Paul return (EINVAL); 435695d67482SBill Paul } 435795d67482SBill Paul return (0); 435895d67482SBill Paul } 435995d67482SBill Paul 43601493e883SOleg Bulyzhin sc->bge_link_evt++; 436195d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 43624f09c4c7SMarius Strobl if (mii->mii_instance) 43634f09c4c7SMarius Strobl LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 436495d67482SBill Paul mii_phy_reset(miisc); 436595d67482SBill Paul mii_mediachg(mii); 436695d67482SBill Paul 4367902827f6SBjoern A. Zeeb /* 4368902827f6SBjoern A. Zeeb * Force an interrupt so that we will call bge_link_upd 4369902827f6SBjoern A. Zeeb * if needed and clear any pending link state attention. 4370902827f6SBjoern A. Zeeb * Without this we are not getting any further interrupts 4371902827f6SBjoern A. Zeeb * for link state changes and thus will not UP the link and 4372902827f6SBjoern A. Zeeb * not be able to send in bge_start_locked. The only 4373902827f6SBjoern A. Zeeb * way to get things working was to receive a packet and 4374902827f6SBjoern A. Zeeb * get an RX intr. 4375902827f6SBjoern A. Zeeb * bge_tick should help for fiber cards and we might not 4376902827f6SBjoern A. Zeeb * need to do this here if BGE_FLAG_TBI is set but as 4377902827f6SBjoern A. Zeeb * we poll for fiber anyway it should not harm. 4378902827f6SBjoern A. Zeeb */ 43794f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 43804f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 4381902827f6SBjoern A. Zeeb BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 43824f0794ffSBjoern A. Zeeb else 438363ccfe30SBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4384902827f6SBjoern A. Zeeb 438595d67482SBill Paul return (0); 438695d67482SBill Paul } 438795d67482SBill Paul 438895d67482SBill Paul /* 438995d67482SBill Paul * Report current media status. 439095d67482SBill Paul */ 439195d67482SBill Paul static void 43923f74909aSGleb Smirnoff bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 439395d67482SBill Paul { 439467d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 439595d67482SBill Paul struct mii_data *mii; 439695d67482SBill Paul 439767d5e043SOleg Bulyzhin BGE_LOCK(sc); 439895d67482SBill Paul 4399652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 440095d67482SBill Paul ifmr->ifm_status = IFM_AVALID; 440195d67482SBill Paul ifmr->ifm_active = IFM_ETHER; 440295d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_STS) & 440395d67482SBill Paul BGE_MACSTAT_TBI_PCS_SYNCHED) 440495d67482SBill Paul ifmr->ifm_status |= IFM_ACTIVE; 44054c0da0ffSGleb Smirnoff else { 44064c0da0ffSGleb Smirnoff ifmr->ifm_active |= IFM_NONE; 440767d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 44084c0da0ffSGleb Smirnoff return; 44094c0da0ffSGleb Smirnoff } 441095d67482SBill Paul ifmr->ifm_active |= IFM_1000_SX; 441195d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 441295d67482SBill Paul ifmr->ifm_active |= IFM_HDX; 441395d67482SBill Paul else 441495d67482SBill Paul ifmr->ifm_active |= IFM_FDX; 441567d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 441695d67482SBill Paul return; 441795d67482SBill Paul } 441895d67482SBill Paul 441995d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 442095d67482SBill Paul mii_pollstat(mii); 442195d67482SBill Paul ifmr->ifm_active = mii->mii_media_active; 442295d67482SBill Paul ifmr->ifm_status = mii->mii_media_status; 442367d5e043SOleg Bulyzhin 442467d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 442595d67482SBill Paul } 442695d67482SBill Paul 442795d67482SBill Paul static int 44283f74909aSGleb Smirnoff bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 442995d67482SBill Paul { 443095d67482SBill Paul struct bge_softc *sc = ifp->if_softc; 443195d67482SBill Paul struct ifreq *ifr = (struct ifreq *) data; 443295d67482SBill Paul struct mii_data *mii; 4433f9004b6dSJung-uk Kim int flags, mask, error = 0; 443495d67482SBill Paul 443595d67482SBill Paul switch (command) { 443695d67482SBill Paul case SIOCSIFMTU: 44374c0da0ffSGleb Smirnoff if (ifr->ifr_mtu < ETHERMIN || 44384c0da0ffSGleb Smirnoff ((BGE_IS_JUMBO_CAPABLE(sc)) && 44394c0da0ffSGleb Smirnoff ifr->ifr_mtu > BGE_JUMBO_MTU) || 44404c0da0ffSGleb Smirnoff ((!BGE_IS_JUMBO_CAPABLE(sc)) && 44414c0da0ffSGleb Smirnoff ifr->ifr_mtu > ETHERMTU)) 444295d67482SBill Paul error = EINVAL; 44434c0da0ffSGleb Smirnoff else if (ifp->if_mtu != ifr->ifr_mtu) { 444495d67482SBill Paul ifp->if_mtu = ifr->ifr_mtu; 444513f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 444695d67482SBill Paul bge_init(sc); 444795d67482SBill Paul } 444895d67482SBill Paul break; 444995d67482SBill Paul case SIOCSIFFLAGS: 44500f9bd73bSSam Leffler BGE_LOCK(sc); 445195d67482SBill Paul if (ifp->if_flags & IFF_UP) { 445295d67482SBill Paul /* 445395d67482SBill Paul * If only the state of the PROMISC flag changed, 445495d67482SBill Paul * then just use the 'set promisc mode' command 445595d67482SBill Paul * instead of reinitializing the entire NIC. Doing 445695d67482SBill Paul * a full re-init means reloading the firmware and 445795d67482SBill Paul * waiting for it to start up, which may take a 4458d183af7fSRuslan Ermilov * second or two. Similarly for ALLMULTI. 445995d67482SBill Paul */ 4460f9004b6dSJung-uk Kim if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4461f9004b6dSJung-uk Kim flags = ifp->if_flags ^ sc->bge_if_flags; 44623e9b1bcaSJung-uk Kim if (flags & IFF_PROMISC) 44633e9b1bcaSJung-uk Kim bge_setpromisc(sc); 4464f9004b6dSJung-uk Kim if (flags & IFF_ALLMULTI) 4465d183af7fSRuslan Ermilov bge_setmulti(sc); 446695d67482SBill Paul } else 44670f9bd73bSSam Leffler bge_init_locked(sc); 446895d67482SBill Paul } else { 446913f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 447095d67482SBill Paul bge_stop(sc); 447195d67482SBill Paul } 447295d67482SBill Paul } 447395d67482SBill Paul sc->bge_if_flags = ifp->if_flags; 44740f9bd73bSSam Leffler BGE_UNLOCK(sc); 447595d67482SBill Paul error = 0; 447695d67482SBill Paul break; 447795d67482SBill Paul case SIOCADDMULTI: 447895d67482SBill Paul case SIOCDELMULTI: 447913f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 44800f9bd73bSSam Leffler BGE_LOCK(sc); 448195d67482SBill Paul bge_setmulti(sc); 44820f9bd73bSSam Leffler BGE_UNLOCK(sc); 448395d67482SBill Paul error = 0; 448495d67482SBill Paul } 448595d67482SBill Paul break; 448695d67482SBill Paul case SIOCSIFMEDIA: 448795d67482SBill Paul case SIOCGIFMEDIA: 4488652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 448995d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 449095d67482SBill Paul &sc->bge_ifmedia, command); 449195d67482SBill Paul } else { 449295d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 449395d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 449495d67482SBill Paul &mii->mii_media, command); 449595d67482SBill Paul } 449695d67482SBill Paul break; 449795d67482SBill Paul case SIOCSIFCAP: 449895d67482SBill Paul mask = ifr->ifr_reqcap ^ ifp->if_capenable; 449975719184SGleb Smirnoff #ifdef DEVICE_POLLING 450075719184SGleb Smirnoff if (mask & IFCAP_POLLING) { 450175719184SGleb Smirnoff if (ifr->ifr_reqcap & IFCAP_POLLING) { 450275719184SGleb Smirnoff error = ether_poll_register(bge_poll, ifp); 450375719184SGleb Smirnoff if (error) 450475719184SGleb Smirnoff return (error); 450575719184SGleb Smirnoff BGE_LOCK(sc); 450675719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 450775719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 450838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 450975719184SGleb Smirnoff ifp->if_capenable |= IFCAP_POLLING; 451075719184SGleb Smirnoff BGE_UNLOCK(sc); 451175719184SGleb Smirnoff } else { 451275719184SGleb Smirnoff error = ether_poll_deregister(ifp); 451375719184SGleb Smirnoff /* Enable interrupt even in error case */ 451475719184SGleb Smirnoff BGE_LOCK(sc); 451575719184SGleb Smirnoff BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 451675719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 451738cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 451875719184SGleb Smirnoff ifp->if_capenable &= ~IFCAP_POLLING; 451975719184SGleb Smirnoff BGE_UNLOCK(sc); 452075719184SGleb Smirnoff } 452175719184SGleb Smirnoff } 452275719184SGleb Smirnoff #endif 4523d375e524SGleb Smirnoff if (mask & IFCAP_HWCSUM) { 4524d375e524SGleb Smirnoff ifp->if_capenable ^= IFCAP_HWCSUM; 4525d375e524SGleb Smirnoff if (IFCAP_HWCSUM & ifp->if_capenable && 4526d375e524SGleb Smirnoff IFCAP_HWCSUM & ifp->if_capabilities) 4527ca3f1187SPyun YongHyeon ifp->if_hwassist |= BGE_CSUM_FEATURES; 452895d67482SBill Paul else 4529ca3f1187SPyun YongHyeon ifp->if_hwassist &= ~BGE_CSUM_FEATURES; 45304e35d186SJung-uk Kim #ifdef VLAN_CAPABILITIES 4531479b23b7SGleb Smirnoff VLAN_CAPABILITIES(ifp); 45324e35d186SJung-uk Kim #endif 453395d67482SBill Paul } 4534cb2eacc7SYaroslav Tykhiy 4535ca3f1187SPyun YongHyeon if ((mask & IFCAP_TSO4) != 0 && 4536ca3f1187SPyun YongHyeon (ifp->if_capabilities & IFCAP_TSO4) != 0) { 4537ca3f1187SPyun YongHyeon ifp->if_capenable ^= IFCAP_TSO4; 4538ca3f1187SPyun YongHyeon if ((ifp->if_capenable & IFCAP_TSO4) != 0) 4539ca3f1187SPyun YongHyeon ifp->if_hwassist |= CSUM_TSO; 4540ca3f1187SPyun YongHyeon else 4541ca3f1187SPyun YongHyeon ifp->if_hwassist &= ~CSUM_TSO; 4542ca3f1187SPyun YongHyeon } 4543ca3f1187SPyun YongHyeon 4544cb2eacc7SYaroslav Tykhiy if (mask & IFCAP_VLAN_MTU) { 4545cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_MTU; 4546cb2eacc7SYaroslav Tykhiy ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4547cb2eacc7SYaroslav Tykhiy bge_init(sc); 4548cb2eacc7SYaroslav Tykhiy } 4549cb2eacc7SYaroslav Tykhiy 4550cb2eacc7SYaroslav Tykhiy if (mask & IFCAP_VLAN_HWTAGGING) { 4551cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 4552cb2eacc7SYaroslav Tykhiy BGE_LOCK(sc); 4553cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 4554cb2eacc7SYaroslav Tykhiy BGE_UNLOCK(sc); 4555cb2eacc7SYaroslav Tykhiy #ifdef VLAN_CAPABILITIES 4556cb2eacc7SYaroslav Tykhiy VLAN_CAPABILITIES(ifp); 4557cb2eacc7SYaroslav Tykhiy #endif 4558cb2eacc7SYaroslav Tykhiy } 4559cb2eacc7SYaroslav Tykhiy 456095d67482SBill Paul break; 456195d67482SBill Paul default: 4562673d9191SSam Leffler error = ether_ioctl(ifp, command, data); 456395d67482SBill Paul break; 456495d67482SBill Paul } 456595d67482SBill Paul 456695d67482SBill Paul return (error); 456795d67482SBill Paul } 456895d67482SBill Paul 456995d67482SBill Paul static void 4570b74e67fbSGleb Smirnoff bge_watchdog(struct bge_softc *sc) 457195d67482SBill Paul { 4572b74e67fbSGleb Smirnoff struct ifnet *ifp; 457395d67482SBill Paul 4574b74e67fbSGleb Smirnoff BGE_LOCK_ASSERT(sc); 4575b74e67fbSGleb Smirnoff 4576b74e67fbSGleb Smirnoff if (sc->bge_timer == 0 || --sc->bge_timer) 4577b74e67fbSGleb Smirnoff return; 4578b74e67fbSGleb Smirnoff 4579b74e67fbSGleb Smirnoff ifp = sc->bge_ifp; 458095d67482SBill Paul 4581fe806fdaSPyun YongHyeon if_printf(ifp, "watchdog timeout -- resetting\n"); 458295d67482SBill Paul 458313f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4584426742bfSGleb Smirnoff bge_init_locked(sc); 458595d67482SBill Paul 458695d67482SBill Paul ifp->if_oerrors++; 458795d67482SBill Paul } 458895d67482SBill Paul 458995d67482SBill Paul /* 459095d67482SBill Paul * Stop the adapter and free any mbufs allocated to the 459195d67482SBill Paul * RX and TX lists. 459295d67482SBill Paul */ 459395d67482SBill Paul static void 45943f74909aSGleb Smirnoff bge_stop(struct bge_softc *sc) 459595d67482SBill Paul { 459695d67482SBill Paul struct ifnet *ifp; 459795d67482SBill Paul 45980f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 45990f9bd73bSSam Leffler 4600fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 460195d67482SBill Paul 46020f9bd73bSSam Leffler callout_stop(&sc->bge_stat_ch); 460395d67482SBill Paul 460444b63691SBjoern A. Zeeb /* Disable host interrupts. */ 460544b63691SBjoern A. Zeeb BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 460644b63691SBjoern A. Zeeb bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 460744b63691SBjoern A. Zeeb 460844b63691SBjoern A. Zeeb /* 460944b63691SBjoern A. Zeeb * Tell firmware we're shutting down. 461044b63691SBjoern A. Zeeb */ 461144b63691SBjoern A. Zeeb bge_stop_fw(sc); 461244b63691SBjoern A. Zeeb bge_sig_pre_reset(sc, BGE_RESET_STOP); 461344b63691SBjoern A. Zeeb 461495d67482SBill Paul /* 46153f74909aSGleb Smirnoff * Disable all of the receiver blocks. 461695d67482SBill Paul */ 461795d67482SBill Paul BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 461895d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 461995d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 46207ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 462195d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 462295d67482SBill Paul BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 462395d67482SBill Paul BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 462495d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 462595d67482SBill Paul 462695d67482SBill Paul /* 46273f74909aSGleb Smirnoff * Disable all of the transmit blocks. 462895d67482SBill Paul */ 462995d67482SBill Paul BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 463095d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 463195d67482SBill Paul BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 463295d67482SBill Paul BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 463395d67482SBill Paul BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 46347ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 463595d67482SBill Paul BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 463695d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 463795d67482SBill Paul 463895d67482SBill Paul /* 463995d67482SBill Paul * Shut down all of the memory managers and related 464095d67482SBill Paul * state machines. 464195d67482SBill Paul */ 464295d67482SBill Paul BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 464395d67482SBill Paul BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 46447ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 464595d67482SBill Paul BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 46460c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 464795d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 46487ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 464995d67482SBill Paul BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 465095d67482SBill Paul BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 46510434d1b8SBill Paul } 465295d67482SBill Paul 46538cb1383cSDoug Ambrisko bge_reset(sc); 46548cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 46558cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 46568cb1383cSDoug Ambrisko 46578cb1383cSDoug Ambrisko /* 46588cb1383cSDoug Ambrisko * Keep the ASF firmware running if up. 46598cb1383cSDoug Ambrisko */ 46608cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 46618cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 46628cb1383cSDoug Ambrisko else 466395d67482SBill Paul BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 466495d67482SBill Paul 466595d67482SBill Paul /* Free the RX lists. */ 466695d67482SBill Paul bge_free_rx_ring_std(sc); 466795d67482SBill Paul 466895d67482SBill Paul /* Free jumbo RX list. */ 46694c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) 467095d67482SBill Paul bge_free_rx_ring_jumbo(sc); 467195d67482SBill Paul 467295d67482SBill Paul /* Free TX buffers. */ 467395d67482SBill Paul bge_free_tx_ring(sc); 467495d67482SBill Paul 467595d67482SBill Paul sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 467695d67482SBill Paul 46775dda8085SOleg Bulyzhin /* Clear MAC's link state (PHY may still have link UP). */ 46781493e883SOleg Bulyzhin if (bootverbose && sc->bge_link) 46791493e883SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 46801493e883SOleg Bulyzhin sc->bge_link = 0; 468195d67482SBill Paul 46821493e883SOleg Bulyzhin ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 468395d67482SBill Paul } 468495d67482SBill Paul 468595d67482SBill Paul /* 468695d67482SBill Paul * Stop all chip I/O so that the kernel's probe routines don't 468795d67482SBill Paul * get confused by errant DMAs when rebooting. 468895d67482SBill Paul */ 4689b6c974e8SWarner Losh static int 46903f74909aSGleb Smirnoff bge_shutdown(device_t dev) 469195d67482SBill Paul { 469295d67482SBill Paul struct bge_softc *sc; 469395d67482SBill Paul 469495d67482SBill Paul sc = device_get_softc(dev); 46950f9bd73bSSam Leffler BGE_LOCK(sc); 469695d67482SBill Paul bge_stop(sc); 469795d67482SBill Paul bge_reset(sc); 46980f9bd73bSSam Leffler BGE_UNLOCK(sc); 4699b6c974e8SWarner Losh 4700b6c974e8SWarner Losh return (0); 470195d67482SBill Paul } 470214afefa3SPawel Jakub Dawidek 470314afefa3SPawel Jakub Dawidek static int 470414afefa3SPawel Jakub Dawidek bge_suspend(device_t dev) 470514afefa3SPawel Jakub Dawidek { 470614afefa3SPawel Jakub Dawidek struct bge_softc *sc; 470714afefa3SPawel Jakub Dawidek 470814afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 470914afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 471014afefa3SPawel Jakub Dawidek bge_stop(sc); 471114afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 471214afefa3SPawel Jakub Dawidek 471314afefa3SPawel Jakub Dawidek return (0); 471414afefa3SPawel Jakub Dawidek } 471514afefa3SPawel Jakub Dawidek 471614afefa3SPawel Jakub Dawidek static int 471714afefa3SPawel Jakub Dawidek bge_resume(device_t dev) 471814afefa3SPawel Jakub Dawidek { 471914afefa3SPawel Jakub Dawidek struct bge_softc *sc; 472014afefa3SPawel Jakub Dawidek struct ifnet *ifp; 472114afefa3SPawel Jakub Dawidek 472214afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 472314afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 472414afefa3SPawel Jakub Dawidek ifp = sc->bge_ifp; 472514afefa3SPawel Jakub Dawidek if (ifp->if_flags & IFF_UP) { 472614afefa3SPawel Jakub Dawidek bge_init_locked(sc); 472714afefa3SPawel Jakub Dawidek if (ifp->if_drv_flags & IFF_DRV_RUNNING) 472814afefa3SPawel Jakub Dawidek bge_start_locked(ifp); 472914afefa3SPawel Jakub Dawidek } 473014afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 473114afefa3SPawel Jakub Dawidek 473214afefa3SPawel Jakub Dawidek return (0); 473314afefa3SPawel Jakub Dawidek } 4734dab5cd05SOleg Bulyzhin 4735dab5cd05SOleg Bulyzhin static void 47363f74909aSGleb Smirnoff bge_link_upd(struct bge_softc *sc) 4737dab5cd05SOleg Bulyzhin { 47381f313773SOleg Bulyzhin struct mii_data *mii; 47391f313773SOleg Bulyzhin uint32_t link, status; 4740dab5cd05SOleg Bulyzhin 4741dab5cd05SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 47421f313773SOleg Bulyzhin 47433f74909aSGleb Smirnoff /* Clear 'pending link event' flag. */ 47447b97099dSOleg Bulyzhin sc->bge_link_evt = 0; 47457b97099dSOleg Bulyzhin 4746dab5cd05SOleg Bulyzhin /* 4747dab5cd05SOleg Bulyzhin * Process link state changes. 4748dab5cd05SOleg Bulyzhin * Grrr. The link status word in the status block does 4749dab5cd05SOleg Bulyzhin * not work correctly on the BCM5700 rev AX and BX chips, 4750dab5cd05SOleg Bulyzhin * according to all available information. Hence, we have 4751dab5cd05SOleg Bulyzhin * to enable MII interrupts in order to properly obtain 4752dab5cd05SOleg Bulyzhin * async link changes. Unfortunately, this also means that 4753dab5cd05SOleg Bulyzhin * we have to read the MAC status register to detect link 4754dab5cd05SOleg Bulyzhin * changes, thereby adding an additional register access to 4755dab5cd05SOleg Bulyzhin * the interrupt handler. 47561f313773SOleg Bulyzhin * 47571f313773SOleg Bulyzhin * XXX: perhaps link state detection procedure used for 47584c0da0ffSGleb Smirnoff * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4759dab5cd05SOleg Bulyzhin */ 4760dab5cd05SOleg Bulyzhin 47611f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 47624c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 4763dab5cd05SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 4764dab5cd05SOleg Bulyzhin if (status & BGE_MACSTAT_MI_INTERRUPT) { 47651f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 47665dda8085SOleg Bulyzhin mii_pollstat(mii); 47671f313773SOleg Bulyzhin if (!sc->bge_link && 47681f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 47691f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 47701f313773SOleg Bulyzhin sc->bge_link++; 47711f313773SOleg Bulyzhin if (bootverbose) 47721f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 47731f313773SOleg Bulyzhin } else if (sc->bge_link && 47741f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 47751f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 47761f313773SOleg Bulyzhin sc->bge_link = 0; 47771f313773SOleg Bulyzhin if (bootverbose) 47781f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 47791f313773SOleg Bulyzhin } 47801f313773SOleg Bulyzhin 47813f74909aSGleb Smirnoff /* Clear the interrupt. */ 4782dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4783dab5cd05SOleg Bulyzhin BGE_EVTENB_MI_INTERRUPT); 4784dab5cd05SOleg Bulyzhin bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4785dab5cd05SOleg Bulyzhin bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4786dab5cd05SOleg Bulyzhin BRGPHY_INTRS); 4787dab5cd05SOleg Bulyzhin } 4788dab5cd05SOleg Bulyzhin return; 4789dab5cd05SOleg Bulyzhin } 4790dab5cd05SOleg Bulyzhin 4791652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 47921f313773SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 47937b97099dSOleg Bulyzhin if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 47947b97099dSOleg Bulyzhin if (!sc->bge_link) { 47951f313773SOleg Bulyzhin sc->bge_link++; 47961f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 47971f313773SOleg Bulyzhin BGE_CLRBIT(sc, BGE_MAC_MODE, 47981f313773SOleg Bulyzhin BGE_MACMODE_TBI_SEND_CFGS); 47990c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 48001f313773SOleg Bulyzhin if (bootverbose) 48011f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 48023f74909aSGleb Smirnoff if_link_state_change(sc->bge_ifp, 48033f74909aSGleb Smirnoff LINK_STATE_UP); 48047b97099dSOleg Bulyzhin } 48051f313773SOleg Bulyzhin } else if (sc->bge_link) { 4806dab5cd05SOleg Bulyzhin sc->bge_link = 0; 48071f313773SOleg Bulyzhin if (bootverbose) 48081f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 48097b97099dSOleg Bulyzhin if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 48101f313773SOleg Bulyzhin } 48111493e883SOleg Bulyzhin } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 48121f313773SOleg Bulyzhin /* 48130c8aa4eaSJung-uk Kim * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 48140c8aa4eaSJung-uk Kim * in status word always set. Workaround this bug by reading 48150c8aa4eaSJung-uk Kim * PHY link status directly. 48161f313773SOleg Bulyzhin */ 48171f313773SOleg Bulyzhin link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 48181f313773SOleg Bulyzhin 48191f313773SOleg Bulyzhin if (link != sc->bge_link || 48201f313773SOleg Bulyzhin sc->bge_asicrev == BGE_ASICREV_BCM5700) { 48211f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 48225dda8085SOleg Bulyzhin mii_pollstat(mii); 48231f313773SOleg Bulyzhin if (!sc->bge_link && 48241f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 48251f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 48261f313773SOleg Bulyzhin sc->bge_link++; 48271f313773SOleg Bulyzhin if (bootverbose) 48281f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 48291f313773SOleg Bulyzhin } else if (sc->bge_link && 48301f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 48311f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 48321f313773SOleg Bulyzhin sc->bge_link = 0; 48331f313773SOleg Bulyzhin if (bootverbose) 48341f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 48351f313773SOleg Bulyzhin } 48361f313773SOleg Bulyzhin } 48370c8aa4eaSJung-uk Kim } else { 48380c8aa4eaSJung-uk Kim /* 48390c8aa4eaSJung-uk Kim * Discard link events for MII/GMII controllers 48400c8aa4eaSJung-uk Kim * if MI auto-polling is disabled. 48410c8aa4eaSJung-uk Kim */ 4842dab5cd05SOleg Bulyzhin } 4843dab5cd05SOleg Bulyzhin 48443f74909aSGleb Smirnoff /* Clear the attention. */ 4845dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4846dab5cd05SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4847dab5cd05SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 4848dab5cd05SOleg Bulyzhin } 48496f8718a3SScott Long 4850763757b2SScott Long #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 485106e83c7eSScott Long SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ 4852763757b2SScott Long sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ 4853763757b2SScott Long desc) 4854763757b2SScott Long 48556f8718a3SScott Long static void 48566f8718a3SScott Long bge_add_sysctls(struct bge_softc *sc) 48576f8718a3SScott Long { 48586f8718a3SScott Long struct sysctl_ctx_list *ctx; 4859763757b2SScott Long struct sysctl_oid_list *children, *schildren; 4860763757b2SScott Long struct sysctl_oid *tree; 48616f8718a3SScott Long 48626f8718a3SScott Long ctx = device_get_sysctl_ctx(sc->bge_dev); 48636f8718a3SScott Long children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 48646f8718a3SScott Long 48656f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 48666f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 48676f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", 48686f8718a3SScott Long "Debug Information"); 48696f8718a3SScott Long 48706f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 48716f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", 48726f8718a3SScott Long "Register Read"); 48736f8718a3SScott Long 48746f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 48756f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", 48766f8718a3SScott Long "Memory Read"); 48776f8718a3SScott Long 48786f8718a3SScott Long #endif 4879763757b2SScott Long 4880d949071dSJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 4881d949071dSJung-uk Kim return; 4882d949071dSJung-uk Kim 4883763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4884763757b2SScott Long NULL, "BGE Statistics"); 4885763757b2SScott Long schildren = children = SYSCTL_CHILDREN(tree); 4886763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 4887763757b2SScott Long children, COSFramesDroppedDueToFilters, 4888763757b2SScott Long "FramesDroppedDueToFilters"); 4889763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 4890763757b2SScott Long children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 4891763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 4892763757b2SScott Long children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 4893763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 4894763757b2SScott Long children, nicNoMoreRxBDs, "NoMoreRxBDs"); 489506e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 489606e83c7eSScott Long children, ifInDiscards, "InputDiscards"); 489706e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 489806e83c7eSScott Long children, ifInErrors, "InputErrors"); 4899763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 4900763757b2SScott Long children, nicRecvThresholdHit, "RecvThresholdHit"); 4901763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 4902763757b2SScott Long children, nicDmaReadQueueFull, "DmaReadQueueFull"); 4903763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 4904763757b2SScott Long children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 4905763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 4906763757b2SScott Long children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 4907763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 4908763757b2SScott Long children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 4909763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 4910763757b2SScott Long children, nicRingStatusUpdate, "RingStatusUpdate"); 4911763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 4912763757b2SScott Long children, nicInterrupts, "Interrupts"); 4913763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 4914763757b2SScott Long children, nicAvoidedInterrupts, "AvoidedInterrupts"); 4915763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 4916763757b2SScott Long children, nicSendThresholdHit, "SendThresholdHit"); 4917763757b2SScott Long 4918763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, 4919763757b2SScott Long NULL, "BGE RX Statistics"); 4920763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4921763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 4922763757b2SScott Long children, rxstats.ifHCInOctets, "Octets"); 4923763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Fragments", 4924763757b2SScott Long children, rxstats.etherStatsFragments, "Fragments"); 4925763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 4926763757b2SScott Long children, rxstats.ifHCInUcastPkts, "UcastPkts"); 4927763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 4928763757b2SScott Long children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 4929763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 4930763757b2SScott Long children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 4931763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 4932763757b2SScott Long children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 4933763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 4934763757b2SScott Long children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 4935763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 4936763757b2SScott Long children, rxstats.xoffPauseFramesReceived, 4937763757b2SScott Long "xoffPauseFramesReceived"); 4938763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 4939763757b2SScott Long children, rxstats.macControlFramesReceived, 4940763757b2SScott Long "ControlFramesReceived"); 4941763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 4942763757b2SScott Long children, rxstats.xoffStateEntered, "xoffStateEntered"); 4943763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 4944763757b2SScott Long children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 4945763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 4946763757b2SScott Long children, rxstats.etherStatsJabbers, "Jabbers"); 4947763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 4948763757b2SScott Long children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 4949763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 495006e83c7eSScott Long children, rxstats.inRangeLengthError, "inRangeLengthError"); 4951763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 495206e83c7eSScott Long children, rxstats.outRangeLengthError, "outRangeLengthError"); 4953763757b2SScott Long 4954763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, 4955763757b2SScott Long NULL, "BGE TX Statistics"); 4956763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4957763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 4958763757b2SScott Long children, txstats.ifHCOutOctets, "Octets"); 4959763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 4960763757b2SScott Long children, txstats.etherStatsCollisions, "Collisions"); 4961763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 4962763757b2SScott Long children, txstats.outXonSent, "XonSent"); 4963763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 4964763757b2SScott Long children, txstats.outXoffSent, "XoffSent"); 4965763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 4966763757b2SScott Long children, txstats.flowControlDone, "flowControlDone"); 4967763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 4968763757b2SScott Long children, txstats.dot3StatsInternalMacTransmitErrors, 4969763757b2SScott Long "InternalMacTransmitErrors"); 4970763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 4971763757b2SScott Long children, txstats.dot3StatsSingleCollisionFrames, 4972763757b2SScott Long "SingleCollisionFrames"); 4973763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 4974763757b2SScott Long children, txstats.dot3StatsMultipleCollisionFrames, 4975763757b2SScott Long "MultipleCollisionFrames"); 4976763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 4977763757b2SScott Long children, txstats.dot3StatsDeferredTransmissions, 4978763757b2SScott Long "DeferredTransmissions"); 4979763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 4980763757b2SScott Long children, txstats.dot3StatsExcessiveCollisions, 4981763757b2SScott Long "ExcessiveCollisions"); 4982763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 498306e83c7eSScott Long children, txstats.dot3StatsLateCollisions, 498406e83c7eSScott Long "LateCollisions"); 4985763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 4986763757b2SScott Long children, txstats.ifHCOutUcastPkts, "UcastPkts"); 4987763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 4988763757b2SScott Long children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 4989763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 4990763757b2SScott Long children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 4991763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 4992763757b2SScott Long children, txstats.dot3StatsCarrierSenseErrors, 4993763757b2SScott Long "CarrierSenseErrors"); 4994763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 4995763757b2SScott Long children, txstats.ifOutDiscards, "Discards"); 4996763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 4997763757b2SScott Long children, txstats.ifOutErrors, "Errors"); 4998763757b2SScott Long } 4999763757b2SScott Long 5000763757b2SScott Long static int 5001763757b2SScott Long bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 5002763757b2SScott Long { 5003763757b2SScott Long struct bge_softc *sc; 500406e83c7eSScott Long uint32_t result; 5005d949071dSJung-uk Kim int offset; 5006763757b2SScott Long 5007763757b2SScott Long sc = (struct bge_softc *)arg1; 5008763757b2SScott Long offset = arg2; 5009d949071dSJung-uk Kim result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + 5010d949071dSJung-uk Kim offsetof(bge_hostaddr, bge_addr_lo)); 5011041b706bSDavid Malone return (sysctl_handle_int(oidp, &result, 0, req)); 50126f8718a3SScott Long } 50136f8718a3SScott Long 50146f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 50156f8718a3SScott Long static int 50166f8718a3SScott Long bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 50176f8718a3SScott Long { 50186f8718a3SScott Long struct bge_softc *sc; 50196f8718a3SScott Long uint16_t *sbdata; 50206f8718a3SScott Long int error; 50216f8718a3SScott Long int result; 50226f8718a3SScott Long int i, j; 50236f8718a3SScott Long 50246f8718a3SScott Long result = -1; 50256f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 50266f8718a3SScott Long if (error || (req->newptr == NULL)) 50276f8718a3SScott Long return (error); 50286f8718a3SScott Long 50296f8718a3SScott Long if (result == 1) { 50306f8718a3SScott Long sc = (struct bge_softc *)arg1; 50316f8718a3SScott Long 50326f8718a3SScott Long sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 50336f8718a3SScott Long printf("Status Block:\n"); 50346f8718a3SScott Long for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { 50356f8718a3SScott Long printf("%06x:", i); 50366f8718a3SScott Long for (j = 0; j < 8; j++) { 50376f8718a3SScott Long printf(" %04x", sbdata[i]); 50386f8718a3SScott Long i += 4; 50396f8718a3SScott Long } 50406f8718a3SScott Long printf("\n"); 50416f8718a3SScott Long } 50426f8718a3SScott Long 50436f8718a3SScott Long printf("Registers:\n"); 50440c8aa4eaSJung-uk Kim for (i = 0x800; i < 0xA00; ) { 50456f8718a3SScott Long printf("%06x:", i); 50466f8718a3SScott Long for (j = 0; j < 8; j++) { 50476f8718a3SScott Long printf(" %08x", CSR_READ_4(sc, i)); 50486f8718a3SScott Long i += 4; 50496f8718a3SScott Long } 50506f8718a3SScott Long printf("\n"); 50516f8718a3SScott Long } 50526f8718a3SScott Long 50536f8718a3SScott Long printf("Hardware Flags:\n"); 5054a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 5055a5779553SStanislav Sedov printf(" - 5755 Plus\n"); 50565345bad0SScott Long if (BGE_IS_575X_PLUS(sc)) 50576f8718a3SScott Long printf(" - 575X Plus\n"); 50585345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 50596f8718a3SScott Long printf(" - 5705 Plus\n"); 50605345bad0SScott Long if (BGE_IS_5714_FAMILY(sc)) 50615345bad0SScott Long printf(" - 5714 Family\n"); 50625345bad0SScott Long if (BGE_IS_5700_FAMILY(sc)) 50635345bad0SScott Long printf(" - 5700 Family\n"); 50646f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_JUMBO) 50656f8718a3SScott Long printf(" - Supports Jumbo Frames\n"); 50666f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIX) 50676f8718a3SScott Long printf(" - PCI-X Bus\n"); 50686f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 50696f8718a3SScott Long printf(" - PCI Express Bus\n"); 50705ee49a3aSJung-uk Kim if (sc->bge_flags & BGE_FLAG_NO_3LED) 50716f8718a3SScott Long printf(" - No 3 LEDs\n"); 50726f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 50736f8718a3SScott Long printf(" - RX Alignment Bug\n"); 50746f8718a3SScott Long } 50756f8718a3SScott Long 50766f8718a3SScott Long return (error); 50776f8718a3SScott Long } 50786f8718a3SScott Long 50796f8718a3SScott Long static int 50806f8718a3SScott Long bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 50816f8718a3SScott Long { 50826f8718a3SScott Long struct bge_softc *sc; 50836f8718a3SScott Long int error; 50846f8718a3SScott Long uint16_t result; 50856f8718a3SScott Long uint32_t val; 50866f8718a3SScott Long 50876f8718a3SScott Long result = -1; 50886f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 50896f8718a3SScott Long if (error || (req->newptr == NULL)) 50906f8718a3SScott Long return (error); 50916f8718a3SScott Long 50926f8718a3SScott Long if (result < 0x8000) { 50936f8718a3SScott Long sc = (struct bge_softc *)arg1; 50946f8718a3SScott Long val = CSR_READ_4(sc, result); 50956f8718a3SScott Long printf("reg 0x%06X = 0x%08X\n", result, val); 50966f8718a3SScott Long } 50976f8718a3SScott Long 50986f8718a3SScott Long return (error); 50996f8718a3SScott Long } 51006f8718a3SScott Long 51016f8718a3SScott Long static int 51026f8718a3SScott Long bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 51036f8718a3SScott Long { 51046f8718a3SScott Long struct bge_softc *sc; 51056f8718a3SScott Long int error; 51066f8718a3SScott Long uint16_t result; 51076f8718a3SScott Long uint32_t val; 51086f8718a3SScott Long 51096f8718a3SScott Long result = -1; 51106f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 51116f8718a3SScott Long if (error || (req->newptr == NULL)) 51126f8718a3SScott Long return (error); 51136f8718a3SScott Long 51146f8718a3SScott Long if (result < 0x8000) { 51156f8718a3SScott Long sc = (struct bge_softc *)arg1; 51166f8718a3SScott Long val = bge_readmem_ind(sc, result); 51176f8718a3SScott Long printf("mem 0x%06X = 0x%08X\n", result, val); 51186f8718a3SScott Long } 51196f8718a3SScott Long 51206f8718a3SScott Long return (error); 51216f8718a3SScott Long } 51226f8718a3SScott Long #endif 512338cc658fSJohn Baldwin 512438cc658fSJohn Baldwin static int 51255fea260fSMarius Strobl bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 51265fea260fSMarius Strobl { 51275fea260fSMarius Strobl 51285fea260fSMarius Strobl if (sc->bge_flags & BGE_FLAG_EADDR) 51295fea260fSMarius Strobl return (1); 51305fea260fSMarius Strobl 51315fea260fSMarius Strobl #ifdef __sparc64__ 51325fea260fSMarius Strobl OF_getetheraddr(sc->bge_dev, ether_addr); 51335fea260fSMarius Strobl return (0); 51345fea260fSMarius Strobl #endif 51355fea260fSMarius Strobl return (1); 51365fea260fSMarius Strobl } 51375fea260fSMarius Strobl 51385fea260fSMarius Strobl static int 513938cc658fSJohn Baldwin bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 514038cc658fSJohn Baldwin { 514138cc658fSJohn Baldwin uint32_t mac_addr; 514238cc658fSJohn Baldwin 514338cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c14); 514438cc658fSJohn Baldwin if ((mac_addr >> 16) == 0x484b) { 514538cc658fSJohn Baldwin ether_addr[0] = (uint8_t)(mac_addr >> 8); 514638cc658fSJohn Baldwin ether_addr[1] = (uint8_t)mac_addr; 514738cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c18); 514838cc658fSJohn Baldwin ether_addr[2] = (uint8_t)(mac_addr >> 24); 514938cc658fSJohn Baldwin ether_addr[3] = (uint8_t)(mac_addr >> 16); 515038cc658fSJohn Baldwin ether_addr[4] = (uint8_t)(mac_addr >> 8); 515138cc658fSJohn Baldwin ether_addr[5] = (uint8_t)mac_addr; 51525fea260fSMarius Strobl return (0); 515338cc658fSJohn Baldwin } 51545fea260fSMarius Strobl return (1); 515538cc658fSJohn Baldwin } 515638cc658fSJohn Baldwin 515738cc658fSJohn Baldwin static int 515838cc658fSJohn Baldwin bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 515938cc658fSJohn Baldwin { 516038cc658fSJohn Baldwin int mac_offset = BGE_EE_MAC_OFFSET; 516138cc658fSJohn Baldwin 516238cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 516338cc658fSJohn Baldwin mac_offset = BGE_EE_MAC_OFFSET_5906; 516438cc658fSJohn Baldwin 51655fea260fSMarius Strobl return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 51665fea260fSMarius Strobl ETHER_ADDR_LEN)); 516738cc658fSJohn Baldwin } 516838cc658fSJohn Baldwin 516938cc658fSJohn Baldwin static int 517038cc658fSJohn Baldwin bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 517138cc658fSJohn Baldwin { 517238cc658fSJohn Baldwin 51735fea260fSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 51745fea260fSMarius Strobl return (1); 51755fea260fSMarius Strobl 51765fea260fSMarius Strobl return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 51775fea260fSMarius Strobl ETHER_ADDR_LEN)); 517838cc658fSJohn Baldwin } 517938cc658fSJohn Baldwin 518038cc658fSJohn Baldwin static int 518138cc658fSJohn Baldwin bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 518238cc658fSJohn Baldwin { 518338cc658fSJohn Baldwin static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 518438cc658fSJohn Baldwin /* NOTE: Order is critical */ 51855fea260fSMarius Strobl bge_get_eaddr_fw, 518638cc658fSJohn Baldwin bge_get_eaddr_mem, 518738cc658fSJohn Baldwin bge_get_eaddr_nvram, 518838cc658fSJohn Baldwin bge_get_eaddr_eeprom, 518938cc658fSJohn Baldwin NULL 519038cc658fSJohn Baldwin }; 519138cc658fSJohn Baldwin const bge_eaddr_fcn_t *func; 519238cc658fSJohn Baldwin 519338cc658fSJohn Baldwin for (func = bge_eaddr_funcs; *func != NULL; ++func) { 519438cc658fSJohn Baldwin if ((*func)(sc, eaddr) == 0) 519538cc658fSJohn Baldwin break; 519638cc658fSJohn Baldwin } 519738cc658fSJohn Baldwin return (*func == NULL ? ENXIO : 0); 519838cc658fSJohn Baldwin } 5199