1098ca2bdSWarner Losh /*- 295d67482SBill Paul * Copyright (c) 2001 Wind River Systems 395d67482SBill Paul * Copyright (c) 1997, 1998, 1999, 2001 495d67482SBill Paul * Bill Paul <wpaul@windriver.com>. All rights reserved. 595d67482SBill Paul * 695d67482SBill Paul * Redistribution and use in source and binary forms, with or without 795d67482SBill Paul * modification, are permitted provided that the following conditions 895d67482SBill Paul * are met: 995d67482SBill Paul * 1. Redistributions of source code must retain the above copyright 1095d67482SBill Paul * notice, this list of conditions and the following disclaimer. 1195d67482SBill Paul * 2. Redistributions in binary form must reproduce the above copyright 1295d67482SBill Paul * notice, this list of conditions and the following disclaimer in the 1395d67482SBill Paul * documentation and/or other materials provided with the distribution. 1495d67482SBill Paul * 3. All advertising materials mentioning features or use of this software 1595d67482SBill Paul * must display the following acknowledgement: 1695d67482SBill Paul * This product includes software developed by Bill Paul. 1795d67482SBill Paul * 4. Neither the name of the author nor the names of any co-contributors 1895d67482SBill Paul * may be used to endorse or promote products derived from this software 1995d67482SBill Paul * without specific prior written permission. 2095d67482SBill Paul * 2195d67482SBill Paul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 2295d67482SBill Paul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2395d67482SBill Paul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2495d67482SBill Paul * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 2595d67482SBill Paul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2695d67482SBill Paul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2795d67482SBill Paul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2895d67482SBill Paul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2995d67482SBill Paul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3095d67482SBill Paul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 3195d67482SBill Paul * THE POSSIBILITY OF SUCH DAMAGE. 3295d67482SBill Paul */ 3395d67482SBill Paul 34aad970f1SDavid E. O'Brien #include <sys/cdefs.h> 35aad970f1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 36aad970f1SDavid E. O'Brien 3795d67482SBill Paul /* 3895d67482SBill Paul * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 3995d67482SBill Paul * 4095d67482SBill Paul * The Broadcom BCM5700 is based on technology originally developed by 4195d67482SBill Paul * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 4295d67482SBill Paul * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 4395d67482SBill Paul * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 4495d67482SBill Paul * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 4595d67482SBill Paul * frames, highly configurable RX filtering, and 16 RX and TX queues 4695d67482SBill Paul * (which, along with RX filter rules, can be used for QOS applications). 4795d67482SBill Paul * Other features, such as TCP segmentation, may be available as part 4895d67482SBill Paul * of value-added firmware updates. Unlike the Tigon I and Tigon II, 4995d67482SBill Paul * firmware images can be stored in hardware and need not be compiled 5095d67482SBill Paul * into the driver. 5195d67482SBill Paul * 5295d67482SBill Paul * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 5395d67482SBill Paul * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 5495d67482SBill Paul * 5595d67482SBill Paul * The BCM5701 is a single-chip solution incorporating both the BCM5700 5698b28ee5SBill Paul * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 5795d67482SBill Paul * does not support external SSRAM. 5895d67482SBill Paul * 5995d67482SBill Paul * Broadcom also produces a variation of the BCM5700 under the "Altima" 6095d67482SBill Paul * brand name, which is functionally similar but lacks PCI-X support. 6195d67482SBill Paul * 6295d67482SBill Paul * Without external SSRAM, you can only have at most 4 TX rings, 6395d67482SBill Paul * and the use of the mini RX ring is disabled. This seems to imply 6495d67482SBill Paul * that these features are simply not available on the BCM5701. As a 6595d67482SBill Paul * result, this driver does not implement any support for the mini RX 6695d67482SBill Paul * ring. 6795d67482SBill Paul */ 6895d67482SBill Paul 6975719184SGleb Smirnoff #ifdef HAVE_KERNEL_OPTION_HEADERS 7075719184SGleb Smirnoff #include "opt_device_polling.h" 7175719184SGleb Smirnoff #endif 7275719184SGleb Smirnoff 7395d67482SBill Paul #include <sys/param.h> 74f41ac2beSBill Paul #include <sys/endian.h> 7595d67482SBill Paul #include <sys/systm.h> 7695d67482SBill Paul #include <sys/sockio.h> 7795d67482SBill Paul #include <sys/mbuf.h> 7895d67482SBill Paul #include <sys/malloc.h> 7995d67482SBill Paul #include <sys/kernel.h> 80fe12f24bSPoul-Henning Kamp #include <sys/module.h> 8195d67482SBill Paul #include <sys/socket.h> 82f1a7e6d5SScott Long #include <sys/sysctl.h> 83dfe0df9aSPyun YongHyeon #include <sys/taskqueue.h> 8495d67482SBill Paul 8595d67482SBill Paul #include <net/if.h> 8695d67482SBill Paul #include <net/if_arp.h> 8795d67482SBill Paul #include <net/ethernet.h> 8895d67482SBill Paul #include <net/if_dl.h> 8995d67482SBill Paul #include <net/if_media.h> 9095d67482SBill Paul 9195d67482SBill Paul #include <net/bpf.h> 9295d67482SBill Paul 9395d67482SBill Paul #include <net/if_types.h> 9495d67482SBill Paul #include <net/if_vlan_var.h> 9595d67482SBill Paul 9695d67482SBill Paul #include <netinet/in_systm.h> 9795d67482SBill Paul #include <netinet/in.h> 9895d67482SBill Paul #include <netinet/ip.h> 99ca3f1187SPyun YongHyeon #include <netinet/tcp.h> 10095d67482SBill Paul 10195d67482SBill Paul #include <machine/bus.h> 10295d67482SBill Paul #include <machine/resource.h> 10395d67482SBill Paul #include <sys/bus.h> 10495d67482SBill Paul #include <sys/rman.h> 10595d67482SBill Paul 10695d67482SBill Paul #include <dev/mii/mii.h> 10795d67482SBill Paul #include <dev/mii/miivar.h> 1082d3ce713SDavid E. O'Brien #include "miidevs.h" 10995d67482SBill Paul #include <dev/mii/brgphyreg.h> 11095d67482SBill Paul 11108013fd3SMarius Strobl #ifdef __sparc64__ 11208013fd3SMarius Strobl #include <dev/ofw/ofw_bus.h> 11308013fd3SMarius Strobl #include <dev/ofw/openfirm.h> 11408013fd3SMarius Strobl #include <machine/ofw_machdep.h> 11508013fd3SMarius Strobl #include <machine/ver.h> 11608013fd3SMarius Strobl #endif 11708013fd3SMarius Strobl 1184fbd232cSWarner Losh #include <dev/pci/pcireg.h> 1194fbd232cSWarner Losh #include <dev/pci/pcivar.h> 12095d67482SBill Paul 12195d67482SBill Paul #include <dev/bge/if_bgereg.h> 12295d67482SBill Paul 1235ddc5794SJohn Polstra #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 124d375e524SGleb Smirnoff #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 12595d67482SBill Paul 126f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, pci, 1, 1, 1); 127f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, ether, 1, 1, 1); 12895d67482SBill Paul MODULE_DEPEND(bge, miibus, 1, 1, 1); 12995d67482SBill Paul 1307b279558SWarner Losh /* "device miibus" required. See GENERIC if you get errors here. */ 13195d67482SBill Paul #include "miibus_if.h" 13295d67482SBill Paul 13395d67482SBill Paul /* 13495d67482SBill Paul * Various supported device vendors/types and their names. Note: the 13595d67482SBill Paul * spec seems to indicate that the hardware still has Alteon's vendor 13695d67482SBill Paul * ID burned into it, though it will always be overriden by the vendor 13795d67482SBill Paul * ID in the EEPROM. Just to be safe, we cover all possibilities. 13895d67482SBill Paul */ 139852c67f9SMarius Strobl static const struct bge_type { 1404c0da0ffSGleb Smirnoff uint16_t bge_vid; 1414c0da0ffSGleb Smirnoff uint16_t bge_did; 1424c0da0ffSGleb Smirnoff } bge_devs[] = { 1434c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 1444c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 14595d67482SBill Paul 1464c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 1474c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 1484c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 1494c0da0ffSGleb Smirnoff 1504c0da0ffSGleb Smirnoff { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 1514c0da0ffSGleb Smirnoff 1524c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 1534c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 1544c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 1554c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 1564c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 1574c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 1584c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 1594c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 1604c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 1614c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 1624c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 1634c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 1644c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 1654c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 1664c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 1674c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 1684c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 1694c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 1704c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 1714c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 1724c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 1734c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 174effef978SRemko Lodder { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, 175a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, 1764c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 1774c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 1784c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 1794c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 1804c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 1814c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 1824c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 1834c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 1844c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 1854c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 1869e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 1879e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 1889e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 1899e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 190a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, 191a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, 192a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, 193a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, 194a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, 1954c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 1964c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 1974c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 1984c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 199a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, 200a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, 201a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, 2029e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 2039e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 204a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, 2059e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 2064c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 2074c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 2084c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 2094c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 2104c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 21138cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, 21238cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, 213a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, 214a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, 215a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, 216a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, 2174c0da0ffSGleb Smirnoff 2184c0da0ffSGleb Smirnoff { SK_VENDORID, SK_DEVICEID_ALTIMA }, 2194c0da0ffSGleb Smirnoff 2204c0da0ffSGleb Smirnoff { TC_VENDORID, TC_DEVICEID_3C996 }, 2214c0da0ffSGleb Smirnoff 222a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, 223a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, 224a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 }, 225a5779553SStanislav Sedov 2264c0da0ffSGleb Smirnoff { 0, 0 } 22795d67482SBill Paul }; 22895d67482SBill Paul 2294c0da0ffSGleb Smirnoff static const struct bge_vendor { 2304c0da0ffSGleb Smirnoff uint16_t v_id; 2314c0da0ffSGleb Smirnoff const char *v_name; 2324c0da0ffSGleb Smirnoff } bge_vendors[] = { 2334c0da0ffSGleb Smirnoff { ALTEON_VENDORID, "Alteon" }, 2344c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, "Altima" }, 2354c0da0ffSGleb Smirnoff { APPLE_VENDORID, "Apple" }, 2364c0da0ffSGleb Smirnoff { BCOM_VENDORID, "Broadcom" }, 2374c0da0ffSGleb Smirnoff { SK_VENDORID, "SysKonnect" }, 2384c0da0ffSGleb Smirnoff { TC_VENDORID, "3Com" }, 239a5779553SStanislav Sedov { FJTSU_VENDORID, "Fujitsu" }, 2404c0da0ffSGleb Smirnoff 2414c0da0ffSGleb Smirnoff { 0, NULL } 2424c0da0ffSGleb Smirnoff }; 2434c0da0ffSGleb Smirnoff 2444c0da0ffSGleb Smirnoff static const struct bge_revision { 2454c0da0ffSGleb Smirnoff uint32_t br_chipid; 2464c0da0ffSGleb Smirnoff const char *br_name; 2474c0da0ffSGleb Smirnoff } bge_revisions[] = { 2484c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 2494c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 2504c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 2514c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 2524c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 2534c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 2544c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 2554c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 2564c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 2574c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 2584c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 2594c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 2604c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 2614c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 2624c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 2634c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 2649e86676bSGleb Smirnoff { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 2654c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 2664c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 2674c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 2684c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 2694c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 2704c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 2714c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 2724c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 2734c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 2744c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 2754c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 2764c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 2774c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 2784c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 2794c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 2804c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 28142787b76SGleb Smirnoff { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 2824c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 2834c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 2844c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 2854c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 2864c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 2874c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 2884c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 2894c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 2900c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 2910c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 2920c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 2930c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 294bcc20328SJohn Baldwin { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, 295a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 296a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 297a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 298a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 29981179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 3006f8718a3SScott Long { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 3016f8718a3SScott Long { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 3026f8718a3SScott Long { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 30338cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 30438cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 305a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 306a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 3074c0da0ffSGleb Smirnoff 3084c0da0ffSGleb Smirnoff { 0, NULL } 3094c0da0ffSGleb Smirnoff }; 3104c0da0ffSGleb Smirnoff 3114c0da0ffSGleb Smirnoff /* 3124c0da0ffSGleb Smirnoff * Some defaults for major revisions, so that newer steppings 3134c0da0ffSGleb Smirnoff * that we don't know about have a shot at working. 3144c0da0ffSGleb Smirnoff */ 3154c0da0ffSGleb Smirnoff static const struct bge_revision bge_majorrevs[] = { 3169e86676bSGleb Smirnoff { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 3179e86676bSGleb Smirnoff { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 3189e86676bSGleb Smirnoff { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 3199e86676bSGleb Smirnoff { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 3209e86676bSGleb Smirnoff { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 3219e86676bSGleb Smirnoff { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 3229e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 3239e86676bSGleb Smirnoff { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 3249e86676bSGleb Smirnoff { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 3259e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 3269e86676bSGleb Smirnoff { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 327a5779553SStanislav Sedov { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 328a5779553SStanislav Sedov { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 329a5779553SStanislav Sedov { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 33081179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 3316f8718a3SScott Long { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 33238cc658fSJohn Baldwin { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 333a5779553SStanislav Sedov { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 3344c0da0ffSGleb Smirnoff 3354c0da0ffSGleb Smirnoff { 0, NULL } 3364c0da0ffSGleb Smirnoff }; 3374c0da0ffSGleb Smirnoff 3380c8aa4eaSJung-uk Kim #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 3390c8aa4eaSJung-uk Kim #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 3400c8aa4eaSJung-uk Kim #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 3410c8aa4eaSJung-uk Kim #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 3420c8aa4eaSJung-uk Kim #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 343a5779553SStanislav Sedov #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 3444c0da0ffSGleb Smirnoff 3454c0da0ffSGleb Smirnoff const struct bge_revision * bge_lookup_rev(uint32_t); 3464c0da0ffSGleb Smirnoff const struct bge_vendor * bge_lookup_vendor(uint16_t); 34738cc658fSJohn Baldwin 34838cc658fSJohn Baldwin typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 34938cc658fSJohn Baldwin 350e51a25f8SAlfred Perlstein static int bge_probe(device_t); 351e51a25f8SAlfred Perlstein static int bge_attach(device_t); 352e51a25f8SAlfred Perlstein static int bge_detach(device_t); 35314afefa3SPawel Jakub Dawidek static int bge_suspend(device_t); 35414afefa3SPawel Jakub Dawidek static int bge_resume(device_t); 3553f74909aSGleb Smirnoff static void bge_release_resources(struct bge_softc *); 356f41ac2beSBill Paul static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 357f41ac2beSBill Paul static int bge_dma_alloc(device_t); 358f41ac2beSBill Paul static void bge_dma_free(struct bge_softc *); 359f41ac2beSBill Paul 3605fea260fSMarius Strobl static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); 36138cc658fSJohn Baldwin static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 36238cc658fSJohn Baldwin static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 36338cc658fSJohn Baldwin static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 36438cc658fSJohn Baldwin static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 36538cc658fSJohn Baldwin 366b9c05fa5SPyun YongHyeon static void bge_txeof(struct bge_softc *, uint16_t); 367dfe0df9aSPyun YongHyeon static int bge_rxeof(struct bge_softc *, uint16_t, int); 36895d67482SBill Paul 3698cb1383cSDoug Ambrisko static void bge_asf_driver_up (struct bge_softc *); 370e51a25f8SAlfred Perlstein static void bge_tick(void *); 371e51a25f8SAlfred Perlstein static void bge_stats_update(struct bge_softc *); 3723f74909aSGleb Smirnoff static void bge_stats_update_regs(struct bge_softc *); 3732e1d4df4SPyun YongHyeon static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, 3742e1d4df4SPyun YongHyeon uint16_t *); 375676ad2c9SGleb Smirnoff static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 37695d67482SBill Paul 377e51a25f8SAlfred Perlstein static void bge_intr(void *); 378dfe0df9aSPyun YongHyeon static int bge_msi_intr(void *); 379dfe0df9aSPyun YongHyeon static void bge_intr_task(void *, int); 3800f9bd73bSSam Leffler static void bge_start_locked(struct ifnet *); 381e51a25f8SAlfred Perlstein static void bge_start(struct ifnet *); 382e51a25f8SAlfred Perlstein static int bge_ioctl(struct ifnet *, u_long, caddr_t); 3830f9bd73bSSam Leffler static void bge_init_locked(struct bge_softc *); 384e51a25f8SAlfred Perlstein static void bge_init(void *); 385e51a25f8SAlfred Perlstein static void bge_stop(struct bge_softc *); 386b74e67fbSGleb Smirnoff static void bge_watchdog(struct bge_softc *); 387b6c974e8SWarner Losh static int bge_shutdown(device_t); 38867d5e043SOleg Bulyzhin static int bge_ifmedia_upd_locked(struct ifnet *); 389e51a25f8SAlfred Perlstein static int bge_ifmedia_upd(struct ifnet *); 390e51a25f8SAlfred Perlstein static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 39195d67482SBill Paul 39238cc658fSJohn Baldwin static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 39338cc658fSJohn Baldwin static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 39438cc658fSJohn Baldwin 3953f74909aSGleb Smirnoff static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 396e51a25f8SAlfred Perlstein static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 39795d67482SBill Paul 3983e9b1bcaSJung-uk Kim static void bge_setpromisc(struct bge_softc *); 399e51a25f8SAlfred Perlstein static void bge_setmulti(struct bge_softc *); 400cb2eacc7SYaroslav Tykhiy static void bge_setvlan(struct bge_softc *); 40195d67482SBill Paul 402943787f3SPyun YongHyeon static int bge_newbuf_std(struct bge_softc *, int); 403943787f3SPyun YongHyeon static int bge_newbuf_jumbo(struct bge_softc *, int); 404e51a25f8SAlfred Perlstein static int bge_init_rx_ring_std(struct bge_softc *); 405e51a25f8SAlfred Perlstein static void bge_free_rx_ring_std(struct bge_softc *); 406e51a25f8SAlfred Perlstein static int bge_init_rx_ring_jumbo(struct bge_softc *); 407e51a25f8SAlfred Perlstein static void bge_free_rx_ring_jumbo(struct bge_softc *); 408e51a25f8SAlfred Perlstein static void bge_free_tx_ring(struct bge_softc *); 409e51a25f8SAlfred Perlstein static int bge_init_tx_ring(struct bge_softc *); 41095d67482SBill Paul 411e51a25f8SAlfred Perlstein static int bge_chipinit(struct bge_softc *); 412e51a25f8SAlfred Perlstein static int bge_blockinit(struct bge_softc *); 41395d67482SBill Paul 4145fea260fSMarius Strobl static int bge_has_eaddr(struct bge_softc *); 4153f74909aSGleb Smirnoff static uint32_t bge_readmem_ind(struct bge_softc *, int); 416e51a25f8SAlfred Perlstein static void bge_writemem_ind(struct bge_softc *, int, int); 41738cc658fSJohn Baldwin static void bge_writembx(struct bge_softc *, int, int); 41895d67482SBill Paul #ifdef notdef 4193f74909aSGleb Smirnoff static uint32_t bge_readreg_ind(struct bge_softc *, int); 42095d67482SBill Paul #endif 4219ba784dbSScott Long static void bge_writemem_direct(struct bge_softc *, int, int); 422e51a25f8SAlfred Perlstein static void bge_writereg_ind(struct bge_softc *, int, int); 4230aaf1057SPyun YongHyeon static void bge_set_max_readrq(struct bge_softc *); 42495d67482SBill Paul 425e51a25f8SAlfred Perlstein static int bge_miibus_readreg(device_t, int, int); 426e51a25f8SAlfred Perlstein static int bge_miibus_writereg(device_t, int, int, int); 427e51a25f8SAlfred Perlstein static void bge_miibus_statchg(device_t); 42875719184SGleb Smirnoff #ifdef DEVICE_POLLING 4291abcdbd1SAttilio Rao static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 43075719184SGleb Smirnoff #endif 43195d67482SBill Paul 4328cb1383cSDoug Ambrisko #define BGE_RESET_START 1 4338cb1383cSDoug Ambrisko #define BGE_RESET_STOP 2 4348cb1383cSDoug Ambrisko static void bge_sig_post_reset(struct bge_softc *, int); 4358cb1383cSDoug Ambrisko static void bge_sig_legacy(struct bge_softc *, int); 4368cb1383cSDoug Ambrisko static void bge_sig_pre_reset(struct bge_softc *, int); 4378cb1383cSDoug Ambrisko static int bge_reset(struct bge_softc *); 438dab5cd05SOleg Bulyzhin static void bge_link_upd(struct bge_softc *); 43995d67482SBill Paul 4406f8718a3SScott Long /* 4416f8718a3SScott Long * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 4426f8718a3SScott Long * leak information to untrusted users. It is also known to cause alignment 4436f8718a3SScott Long * traps on certain architectures. 4446f8718a3SScott Long */ 4456f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 4466f8718a3SScott Long static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 4476f8718a3SScott Long static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 4486f8718a3SScott Long static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 4496f8718a3SScott Long #endif 4506f8718a3SScott Long static void bge_add_sysctls(struct bge_softc *); 451763757b2SScott Long static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 4526f8718a3SScott Long 45395d67482SBill Paul static device_method_t bge_methods[] = { 45495d67482SBill Paul /* Device interface */ 45595d67482SBill Paul DEVMETHOD(device_probe, bge_probe), 45695d67482SBill Paul DEVMETHOD(device_attach, bge_attach), 45795d67482SBill Paul DEVMETHOD(device_detach, bge_detach), 45895d67482SBill Paul DEVMETHOD(device_shutdown, bge_shutdown), 45914afefa3SPawel Jakub Dawidek DEVMETHOD(device_suspend, bge_suspend), 46014afefa3SPawel Jakub Dawidek DEVMETHOD(device_resume, bge_resume), 46195d67482SBill Paul 46295d67482SBill Paul /* bus interface */ 46395d67482SBill Paul DEVMETHOD(bus_print_child, bus_generic_print_child), 46495d67482SBill Paul DEVMETHOD(bus_driver_added, bus_generic_driver_added), 46595d67482SBill Paul 46695d67482SBill Paul /* MII interface */ 46795d67482SBill Paul DEVMETHOD(miibus_readreg, bge_miibus_readreg), 46895d67482SBill Paul DEVMETHOD(miibus_writereg, bge_miibus_writereg), 46995d67482SBill Paul DEVMETHOD(miibus_statchg, bge_miibus_statchg), 47095d67482SBill Paul 47195d67482SBill Paul { 0, 0 } 47295d67482SBill Paul }; 47395d67482SBill Paul 47495d67482SBill Paul static driver_t bge_driver = { 47595d67482SBill Paul "bge", 47695d67482SBill Paul bge_methods, 47795d67482SBill Paul sizeof(struct bge_softc) 47895d67482SBill Paul }; 47995d67482SBill Paul 48095d67482SBill Paul static devclass_t bge_devclass; 48195d67482SBill Paul 482f246e4a1SMatthew N. Dodd DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 48395d67482SBill Paul DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 48495d67482SBill Paul 485f1a7e6d5SScott Long static int bge_allow_asf = 1; 486f1a7e6d5SScott Long 487f1a7e6d5SScott Long TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 488f1a7e6d5SScott Long 489f1a7e6d5SScott Long SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 490f1a7e6d5SScott Long SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 491f1a7e6d5SScott Long "Allow ASF mode if available"); 492c4529f41SMichael Reifenberger 49308013fd3SMarius Strobl #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" 49408013fd3SMarius Strobl #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" 49508013fd3SMarius Strobl #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" 49608013fd3SMarius Strobl #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" 49708013fd3SMarius Strobl #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" 49808013fd3SMarius Strobl 49908013fd3SMarius Strobl static int 5005fea260fSMarius Strobl bge_has_eaddr(struct bge_softc *sc) 50108013fd3SMarius Strobl { 50208013fd3SMarius Strobl #ifdef __sparc64__ 50308013fd3SMarius Strobl char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; 50408013fd3SMarius Strobl device_t dev; 50508013fd3SMarius Strobl uint32_t subvendor; 50608013fd3SMarius Strobl 50708013fd3SMarius Strobl dev = sc->bge_dev; 50808013fd3SMarius Strobl 50908013fd3SMarius Strobl /* 51008013fd3SMarius Strobl * The on-board BGEs found in sun4u machines aren't fitted with 51108013fd3SMarius Strobl * an EEPROM which means that we have to obtain the MAC address 51208013fd3SMarius Strobl * via OFW and that some tests will always fail. We distinguish 51308013fd3SMarius Strobl * such BGEs by the subvendor ID, which also has to be obtained 51408013fd3SMarius Strobl * from OFW instead of the PCI configuration space as the latter 51508013fd3SMarius Strobl * indicates Broadcom as the subvendor of the netboot interface. 51608013fd3SMarius Strobl * For early Blade 1500 and 2500 we even have to check the OFW 51708013fd3SMarius Strobl * device path as the subvendor ID always defaults to Broadcom 51808013fd3SMarius Strobl * there. 51908013fd3SMarius Strobl */ 52008013fd3SMarius Strobl if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, 52108013fd3SMarius Strobl &subvendor, sizeof(subvendor)) == sizeof(subvendor) && 52208013fd3SMarius Strobl subvendor == SUN_VENDORID) 52308013fd3SMarius Strobl return (0); 52408013fd3SMarius Strobl memset(buf, 0, sizeof(buf)); 52508013fd3SMarius Strobl if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { 52608013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && 52708013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) 52808013fd3SMarius Strobl return (0); 52908013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && 53008013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) 53108013fd3SMarius Strobl return (0); 53208013fd3SMarius Strobl } 53308013fd3SMarius Strobl #endif 53408013fd3SMarius Strobl return (1); 53508013fd3SMarius Strobl } 53608013fd3SMarius Strobl 5373f74909aSGleb Smirnoff static uint32_t 5383f74909aSGleb Smirnoff bge_readmem_ind(struct bge_softc *sc, int off) 53995d67482SBill Paul { 54095d67482SBill Paul device_t dev; 5416f8718a3SScott Long uint32_t val; 54295d67482SBill Paul 54395d67482SBill Paul dev = sc->bge_dev; 54495d67482SBill Paul 54595d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 5466f8718a3SScott Long val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 5476f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 5486f8718a3SScott Long return (val); 54995d67482SBill Paul } 55095d67482SBill Paul 55195d67482SBill Paul static void 5523f74909aSGleb Smirnoff bge_writemem_ind(struct bge_softc *sc, int off, int val) 55395d67482SBill Paul { 55495d67482SBill Paul device_t dev; 55595d67482SBill Paul 55695d67482SBill Paul dev = sc->bge_dev; 55795d67482SBill Paul 55895d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 55995d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 5606f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 56195d67482SBill Paul } 56295d67482SBill Paul 5634f09c4c7SMarius Strobl /* 5644f09c4c7SMarius Strobl * PCI Express only 5654f09c4c7SMarius Strobl */ 5664f09c4c7SMarius Strobl static void 5670aaf1057SPyun YongHyeon bge_set_max_readrq(struct bge_softc *sc) 5684f09c4c7SMarius Strobl { 5694f09c4c7SMarius Strobl device_t dev; 5704f09c4c7SMarius Strobl uint16_t val; 5714f09c4c7SMarius Strobl 5724f09c4c7SMarius Strobl dev = sc->bge_dev; 5734f09c4c7SMarius Strobl 5740aaf1057SPyun YongHyeon val = pci_read_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2); 5750aaf1057SPyun YongHyeon if ((val & PCIM_EXP_CTL_MAX_READ_REQUEST) != 5764f09c4c7SMarius Strobl BGE_PCIE_DEVCTL_MAX_READRQ_4096) { 5774f09c4c7SMarius Strobl if (bootverbose) 5784f09c4c7SMarius Strobl device_printf(dev, "adjust device control 0x%04x ", 5794f09c4c7SMarius Strobl val); 5800aaf1057SPyun YongHyeon val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; 5814f09c4c7SMarius Strobl val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 5820aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 5830aaf1057SPyun YongHyeon val, 2); 5844f09c4c7SMarius Strobl if (bootverbose) 5854f09c4c7SMarius Strobl printf("-> 0x%04x\n", val); 5864f09c4c7SMarius Strobl } 5874f09c4c7SMarius Strobl } 5884f09c4c7SMarius Strobl 58995d67482SBill Paul #ifdef notdef 5903f74909aSGleb Smirnoff static uint32_t 5913f74909aSGleb Smirnoff bge_readreg_ind(struct bge_softc *sc, int off) 59295d67482SBill Paul { 59395d67482SBill Paul device_t dev; 59495d67482SBill Paul 59595d67482SBill Paul dev = sc->bge_dev; 59695d67482SBill Paul 59795d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 59895d67482SBill Paul return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 59995d67482SBill Paul } 60095d67482SBill Paul #endif 60195d67482SBill Paul 60295d67482SBill Paul static void 6033f74909aSGleb Smirnoff bge_writereg_ind(struct bge_softc *sc, int off, int val) 60495d67482SBill Paul { 60595d67482SBill Paul device_t dev; 60695d67482SBill Paul 60795d67482SBill Paul dev = sc->bge_dev; 60895d67482SBill Paul 60995d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 61095d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 61195d67482SBill Paul } 61295d67482SBill Paul 6136f8718a3SScott Long static void 6146f8718a3SScott Long bge_writemem_direct(struct bge_softc *sc, int off, int val) 6156f8718a3SScott Long { 6166f8718a3SScott Long CSR_WRITE_4(sc, off, val); 6176f8718a3SScott Long } 6186f8718a3SScott Long 61938cc658fSJohn Baldwin static void 62038cc658fSJohn Baldwin bge_writembx(struct bge_softc *sc, int off, int val) 62138cc658fSJohn Baldwin { 62238cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 62338cc658fSJohn Baldwin off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 62438cc658fSJohn Baldwin 62538cc658fSJohn Baldwin CSR_WRITE_4(sc, off, val); 62638cc658fSJohn Baldwin } 62738cc658fSJohn Baldwin 628f41ac2beSBill Paul /* 629f41ac2beSBill Paul * Map a single buffer address. 630f41ac2beSBill Paul */ 631f41ac2beSBill Paul 632f41ac2beSBill Paul static void 6333f74909aSGleb Smirnoff bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 634f41ac2beSBill Paul { 635f41ac2beSBill Paul struct bge_dmamap_arg *ctx; 636f41ac2beSBill Paul 637f41ac2beSBill Paul if (error) 638f41ac2beSBill Paul return; 639f41ac2beSBill Paul 640f41ac2beSBill Paul ctx = arg; 641f41ac2beSBill Paul 642f41ac2beSBill Paul if (nseg > ctx->bge_maxsegs) { 643f41ac2beSBill Paul ctx->bge_maxsegs = 0; 644f41ac2beSBill Paul return; 645f41ac2beSBill Paul } 646f41ac2beSBill Paul 647f41ac2beSBill Paul ctx->bge_busaddr = segs->ds_addr; 648f41ac2beSBill Paul } 649f41ac2beSBill Paul 65038cc658fSJohn Baldwin static uint8_t 65138cc658fSJohn Baldwin bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 65238cc658fSJohn Baldwin { 65338cc658fSJohn Baldwin uint32_t access, byte = 0; 65438cc658fSJohn Baldwin int i; 65538cc658fSJohn Baldwin 65638cc658fSJohn Baldwin /* Lock. */ 65738cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 65838cc658fSJohn Baldwin for (i = 0; i < 8000; i++) { 65938cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 66038cc658fSJohn Baldwin break; 66138cc658fSJohn Baldwin DELAY(20); 66238cc658fSJohn Baldwin } 66338cc658fSJohn Baldwin if (i == 8000) 66438cc658fSJohn Baldwin return (1); 66538cc658fSJohn Baldwin 66638cc658fSJohn Baldwin /* Enable access. */ 66738cc658fSJohn Baldwin access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 66838cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 66938cc658fSJohn Baldwin 67038cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 67138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 67238cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT * 10; i++) { 67338cc658fSJohn Baldwin DELAY(10); 67438cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 67538cc658fSJohn Baldwin DELAY(10); 67638cc658fSJohn Baldwin break; 67738cc658fSJohn Baldwin } 67838cc658fSJohn Baldwin } 67938cc658fSJohn Baldwin 68038cc658fSJohn Baldwin if (i == BGE_TIMEOUT * 10) { 68138cc658fSJohn Baldwin if_printf(sc->bge_ifp, "nvram read timed out\n"); 68238cc658fSJohn Baldwin return (1); 68338cc658fSJohn Baldwin } 68438cc658fSJohn Baldwin 68538cc658fSJohn Baldwin /* Get result. */ 68638cc658fSJohn Baldwin byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 68738cc658fSJohn Baldwin 68838cc658fSJohn Baldwin *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 68938cc658fSJohn Baldwin 69038cc658fSJohn Baldwin /* Disable access. */ 69138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 69238cc658fSJohn Baldwin 69338cc658fSJohn Baldwin /* Unlock. */ 69438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 69538cc658fSJohn Baldwin CSR_READ_4(sc, BGE_NVRAM_SWARB); 69638cc658fSJohn Baldwin 69738cc658fSJohn Baldwin return (0); 69838cc658fSJohn Baldwin } 69938cc658fSJohn Baldwin 70038cc658fSJohn Baldwin /* 70138cc658fSJohn Baldwin * Read a sequence of bytes from NVRAM. 70238cc658fSJohn Baldwin */ 70338cc658fSJohn Baldwin static int 70438cc658fSJohn Baldwin bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 70538cc658fSJohn Baldwin { 70638cc658fSJohn Baldwin int err = 0, i; 70738cc658fSJohn Baldwin uint8_t byte = 0; 70838cc658fSJohn Baldwin 70938cc658fSJohn Baldwin if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 71038cc658fSJohn Baldwin return (1); 71138cc658fSJohn Baldwin 71238cc658fSJohn Baldwin for (i = 0; i < cnt; i++) { 71338cc658fSJohn Baldwin err = bge_nvram_getbyte(sc, off + i, &byte); 71438cc658fSJohn Baldwin if (err) 71538cc658fSJohn Baldwin break; 71638cc658fSJohn Baldwin *(dest + i) = byte; 71738cc658fSJohn Baldwin } 71838cc658fSJohn Baldwin 71938cc658fSJohn Baldwin return (err ? 1 : 0); 72038cc658fSJohn Baldwin } 72138cc658fSJohn Baldwin 72295d67482SBill Paul /* 72395d67482SBill Paul * Read a byte of data stored in the EEPROM at address 'addr.' The 72495d67482SBill Paul * BCM570x supports both the traditional bitbang interface and an 72595d67482SBill Paul * auto access interface for reading the EEPROM. We use the auto 72695d67482SBill Paul * access method. 72795d67482SBill Paul */ 7283f74909aSGleb Smirnoff static uint8_t 7293f74909aSGleb Smirnoff bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 73095d67482SBill Paul { 73195d67482SBill Paul int i; 7323f74909aSGleb Smirnoff uint32_t byte = 0; 73395d67482SBill Paul 73495d67482SBill Paul /* 73595d67482SBill Paul * Enable use of auto EEPROM access so we can avoid 73695d67482SBill Paul * having to use the bitbang method. 73795d67482SBill Paul */ 73895d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 73995d67482SBill Paul 74095d67482SBill Paul /* Reset the EEPROM, load the clock period. */ 74195d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, 74295d67482SBill Paul BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 74395d67482SBill Paul DELAY(20); 74495d67482SBill Paul 74595d67482SBill Paul /* Issue the read EEPROM command. */ 74695d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 74795d67482SBill Paul 74895d67482SBill Paul /* Wait for completion */ 74995d67482SBill Paul for(i = 0; i < BGE_TIMEOUT * 10; i++) { 75095d67482SBill Paul DELAY(10); 75195d67482SBill Paul if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 75295d67482SBill Paul break; 75395d67482SBill Paul } 75495d67482SBill Paul 755d5d23857SJung-uk Kim if (i == BGE_TIMEOUT * 10) { 756fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "EEPROM read timed out\n"); 757f6789fbaSPyun YongHyeon return (1); 75895d67482SBill Paul } 75995d67482SBill Paul 76095d67482SBill Paul /* Get result. */ 76195d67482SBill Paul byte = CSR_READ_4(sc, BGE_EE_DATA); 76295d67482SBill Paul 7630c8aa4eaSJung-uk Kim *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 76495d67482SBill Paul 76595d67482SBill Paul return (0); 76695d67482SBill Paul } 76795d67482SBill Paul 76895d67482SBill Paul /* 76995d67482SBill Paul * Read a sequence of bytes from the EEPROM. 77095d67482SBill Paul */ 77195d67482SBill Paul static int 7723f74909aSGleb Smirnoff bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 77395d67482SBill Paul { 7743f74909aSGleb Smirnoff int i, error = 0; 7753f74909aSGleb Smirnoff uint8_t byte = 0; 77695d67482SBill Paul 77795d67482SBill Paul for (i = 0; i < cnt; i++) { 7783f74909aSGleb Smirnoff error = bge_eeprom_getbyte(sc, off + i, &byte); 7793f74909aSGleb Smirnoff if (error) 78095d67482SBill Paul break; 78195d67482SBill Paul *(dest + i) = byte; 78295d67482SBill Paul } 78395d67482SBill Paul 7843f74909aSGleb Smirnoff return (error ? 1 : 0); 78595d67482SBill Paul } 78695d67482SBill Paul 78795d67482SBill Paul static int 7883f74909aSGleb Smirnoff bge_miibus_readreg(device_t dev, int phy, int reg) 78995d67482SBill Paul { 79095d67482SBill Paul struct bge_softc *sc; 7913f74909aSGleb Smirnoff uint32_t val, autopoll; 79295d67482SBill Paul int i; 79395d67482SBill Paul 79495d67482SBill Paul sc = device_get_softc(dev); 79595d67482SBill Paul 7960434d1b8SBill Paul /* 7970434d1b8SBill Paul * Broadcom's own driver always assumes the internal 7980434d1b8SBill Paul * PHY is at GMII address 1. On some chips, the PHY responds 7990434d1b8SBill Paul * to accesses at all addresses, which could cause us to 8000434d1b8SBill Paul * bogusly attach the PHY 32 times at probe type. Always 8010434d1b8SBill Paul * restricting the lookup to address 1 is simpler than 8020434d1b8SBill Paul * trying to figure out which chips revisions should be 8030434d1b8SBill Paul * special-cased. 8040434d1b8SBill Paul */ 805b1265c1aSJohn Polstra if (phy != 1) 80698b28ee5SBill Paul return (0); 80798b28ee5SBill Paul 80837ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 80937ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 81037ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 81137ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 81237ceeb4dSPaul Saab DELAY(40); 81337ceeb4dSPaul Saab } 81437ceeb4dSPaul Saab 81595d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 81695d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg)); 81795d67482SBill Paul 81895d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 819d5d23857SJung-uk Kim DELAY(10); 82095d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 82195d67482SBill Paul if (!(val & BGE_MICOMM_BUSY)) 82295d67482SBill Paul break; 82395d67482SBill Paul } 82495d67482SBill Paul 82595d67482SBill Paul if (i == BGE_TIMEOUT) { 8265fea260fSMarius Strobl device_printf(sc->bge_dev, 8275fea260fSMarius Strobl "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", 8285fea260fSMarius Strobl phy, reg, val); 82937ceeb4dSPaul Saab val = 0; 83037ceeb4dSPaul Saab goto done; 83195d67482SBill Paul } 83295d67482SBill Paul 83338cc658fSJohn Baldwin DELAY(5); 83495d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 83595d67482SBill Paul 83637ceeb4dSPaul Saab done: 83737ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 83837ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 83937ceeb4dSPaul Saab DELAY(40); 84037ceeb4dSPaul Saab } 84137ceeb4dSPaul Saab 84295d67482SBill Paul if (val & BGE_MICOMM_READFAIL) 84395d67482SBill Paul return (0); 84495d67482SBill Paul 8450c8aa4eaSJung-uk Kim return (val & 0xFFFF); 84695d67482SBill Paul } 84795d67482SBill Paul 84895d67482SBill Paul static int 8493f74909aSGleb Smirnoff bge_miibus_writereg(device_t dev, int phy, int reg, int val) 85095d67482SBill Paul { 85195d67482SBill Paul struct bge_softc *sc; 8523f74909aSGleb Smirnoff uint32_t autopoll; 85395d67482SBill Paul int i; 85495d67482SBill Paul 85595d67482SBill Paul sc = device_get_softc(dev); 85695d67482SBill Paul 85738cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 85838cc658fSJohn Baldwin (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 85938cc658fSJohn Baldwin return(0); 86038cc658fSJohn Baldwin 86137ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 86237ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 86337ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 86437ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 86537ceeb4dSPaul Saab DELAY(40); 86637ceeb4dSPaul Saab } 86737ceeb4dSPaul Saab 86895d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 86995d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 87095d67482SBill Paul 87195d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 872d5d23857SJung-uk Kim DELAY(10); 87338cc658fSJohn Baldwin if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 87438cc658fSJohn Baldwin DELAY(5); 87538cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 87695d67482SBill Paul break; 877d5d23857SJung-uk Kim } 87838cc658fSJohn Baldwin } 879d5d23857SJung-uk Kim 880d5d23857SJung-uk Kim if (i == BGE_TIMEOUT) { 88138cc658fSJohn Baldwin device_printf(sc->bge_dev, 88238cc658fSJohn Baldwin "PHY write timed out (phy %d, reg %d, val %d)\n", 88338cc658fSJohn Baldwin phy, reg, val); 884d5d23857SJung-uk Kim return (0); 88595d67482SBill Paul } 88695d67482SBill Paul 88737ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 88837ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 88937ceeb4dSPaul Saab DELAY(40); 89037ceeb4dSPaul Saab } 89137ceeb4dSPaul Saab 89295d67482SBill Paul return (0); 89395d67482SBill Paul } 89495d67482SBill Paul 89595d67482SBill Paul static void 8963f74909aSGleb Smirnoff bge_miibus_statchg(device_t dev) 89795d67482SBill Paul { 89895d67482SBill Paul struct bge_softc *sc; 89995d67482SBill Paul struct mii_data *mii; 90095d67482SBill Paul sc = device_get_softc(dev); 90195d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 90295d67482SBill Paul 90395d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 9043f74909aSGleb Smirnoff if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 90595d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 9063f74909aSGleb Smirnoff else 90795d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 90895d67482SBill Paul 9093f74909aSGleb Smirnoff if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 91095d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 9113f74909aSGleb Smirnoff else 91295d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 91395d67482SBill Paul } 91495d67482SBill Paul 91595d67482SBill Paul /* 91695d67482SBill Paul * Intialize a standard receive ring descriptor. 91795d67482SBill Paul */ 91895d67482SBill Paul static int 919943787f3SPyun YongHyeon bge_newbuf_std(struct bge_softc *sc, int i) 92095d67482SBill Paul { 921943787f3SPyun YongHyeon struct mbuf *m; 92295d67482SBill Paul struct bge_rx_bd *r; 923a23634a1SPyun YongHyeon bus_dma_segment_t segs[1]; 924943787f3SPyun YongHyeon bus_dmamap_t map; 925a23634a1SPyun YongHyeon int error, nsegs; 92695d67482SBill Paul 927943787f3SPyun YongHyeon m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 928943787f3SPyun YongHyeon if (m == NULL) 92995d67482SBill Paul return (ENOBUFS); 930943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MCLBYTES; 931652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 932943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 933943787f3SPyun YongHyeon 9340ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, 935943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); 936a23634a1SPyun YongHyeon if (error != 0) { 937943787f3SPyun YongHyeon m_freem(m); 938a23634a1SPyun YongHyeon return (error); 939f41ac2beSBill Paul } 940943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 941943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 942943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); 943943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 944943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i]); 945943787f3SPyun YongHyeon } 946943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_std_dmamap[i]; 947943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; 948943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap = map; 949943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = m; 950943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; 951a23634a1SPyun YongHyeon r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 952a23634a1SPyun YongHyeon r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 953e907febfSPyun YongHyeon r->bge_flags = BGE_RXBDFLAG_END; 954a23634a1SPyun YongHyeon r->bge_len = segs[0].ds_len; 955e907febfSPyun YongHyeon r->bge_idx = i; 956f41ac2beSBill Paul 9570ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 958943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); 95995d67482SBill Paul 96095d67482SBill Paul return (0); 96195d67482SBill Paul } 96295d67482SBill Paul 96395d67482SBill Paul /* 96495d67482SBill Paul * Initialize a jumbo receive ring descriptor. This allocates 96595d67482SBill Paul * a jumbo buffer from the pool managed internally by the driver. 96695d67482SBill Paul */ 96795d67482SBill Paul static int 968943787f3SPyun YongHyeon bge_newbuf_jumbo(struct bge_softc *sc, int i) 96995d67482SBill Paul { 9701be6acb7SGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 971943787f3SPyun YongHyeon bus_dmamap_t map; 9721be6acb7SGleb Smirnoff struct bge_extrx_bd *r; 973943787f3SPyun YongHyeon struct mbuf *m; 974943787f3SPyun YongHyeon int error, nsegs; 97595d67482SBill Paul 976943787f3SPyun YongHyeon MGETHDR(m, M_DONTWAIT, MT_DATA); 977943787f3SPyun YongHyeon if (m == NULL) 97895d67482SBill Paul return (ENOBUFS); 97995d67482SBill Paul 980943787f3SPyun YongHyeon m_cljget(m, M_DONTWAIT, MJUM9BYTES); 981943787f3SPyun YongHyeon if (!(m->m_flags & M_EXT)) { 982943787f3SPyun YongHyeon m_freem(m); 98395d67482SBill Paul return (ENOBUFS); 98495d67482SBill Paul } 985943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MJUM9BYTES; 986652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 987943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 9881be6acb7SGleb Smirnoff 9891be6acb7SGleb Smirnoff error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 990943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); 991943787f3SPyun YongHyeon if (error != 0) { 992943787f3SPyun YongHyeon m_freem(m); 9931be6acb7SGleb Smirnoff return (error); 994f7cea149SGleb Smirnoff } 9951be6acb7SGleb Smirnoff 996943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) { 997943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 998943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); 999943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1000943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1001943787f3SPyun YongHyeon } 1002943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; 1003943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i] = 1004943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap; 1005943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap = map; 1006943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = m; 10071be6acb7SGleb Smirnoff /* 10081be6acb7SGleb Smirnoff * Fill in the extended RX buffer descriptor. 10091be6acb7SGleb Smirnoff */ 1010943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; 10114e7ba1abSGleb Smirnoff r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 10124e7ba1abSGleb Smirnoff r->bge_idx = i; 10134e7ba1abSGleb Smirnoff r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 10144e7ba1abSGleb Smirnoff switch (nsegs) { 10154e7ba1abSGleb Smirnoff case 4: 10164e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 10174e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 10184e7ba1abSGleb Smirnoff r->bge_len3 = segs[3].ds_len; 10194e7ba1abSGleb Smirnoff case 3: 1020e907febfSPyun YongHyeon r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 1021e907febfSPyun YongHyeon r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 1022e907febfSPyun YongHyeon r->bge_len2 = segs[2].ds_len; 10234e7ba1abSGleb Smirnoff case 2: 10244e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 10254e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 10264e7ba1abSGleb Smirnoff r->bge_len1 = segs[1].ds_len; 10274e7ba1abSGleb Smirnoff case 1: 10284e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 10294e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 10304e7ba1abSGleb Smirnoff r->bge_len0 = segs[0].ds_len; 10314e7ba1abSGleb Smirnoff break; 10324e7ba1abSGleb Smirnoff default: 10334e7ba1abSGleb Smirnoff panic("%s: %d segments\n", __func__, nsegs); 10344e7ba1abSGleb Smirnoff } 1035f41ac2beSBill Paul 1036a41504a9SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1037943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); 103895d67482SBill Paul 103995d67482SBill Paul return (0); 104095d67482SBill Paul } 104195d67482SBill Paul 104295d67482SBill Paul /* 104395d67482SBill Paul * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 104495d67482SBill Paul * that's 1MB or memory, which is a lot. For now, we fill only the first 104595d67482SBill Paul * 256 ring entries and hope that our CPU is fast enough to keep up with 104695d67482SBill Paul * the NIC. 104795d67482SBill Paul */ 104895d67482SBill Paul static int 10493f74909aSGleb Smirnoff bge_init_rx_ring_std(struct bge_softc *sc) 105095d67482SBill Paul { 10513ee5d7daSPyun YongHyeon int error, i; 105295d67482SBill Paul 1053e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 105403e78bd0SPyun YongHyeon sc->bge_std = 0; 105595d67482SBill Paul for (i = 0; i < BGE_SSLOTS; i++) { 1056943787f3SPyun YongHyeon if ((error = bge_newbuf_std(sc, i)) != 0) 10573ee5d7daSPyun YongHyeon return (error); 105803e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 105995d67482SBill Paul }; 106095d67482SBill Paul 1061f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1062d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 1063f41ac2beSBill Paul 106495d67482SBill Paul sc->bge_std = i - 1; 106538cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 106695d67482SBill Paul 106795d67482SBill Paul return (0); 106895d67482SBill Paul } 106995d67482SBill Paul 107095d67482SBill Paul static void 10713f74909aSGleb Smirnoff bge_free_rx_ring_std(struct bge_softc *sc) 107295d67482SBill Paul { 107395d67482SBill Paul int i; 107495d67482SBill Paul 107595d67482SBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 107695d67482SBill Paul if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 10770ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1078e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], 1079e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 10800ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1081f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 1082e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1083e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = NULL; 108495d67482SBill Paul } 1085f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 108695d67482SBill Paul sizeof(struct bge_rx_bd)); 108795d67482SBill Paul } 108895d67482SBill Paul } 108995d67482SBill Paul 109095d67482SBill Paul static int 10913f74909aSGleb Smirnoff bge_init_rx_ring_jumbo(struct bge_softc *sc) 109295d67482SBill Paul { 109395d67482SBill Paul struct bge_rcb *rcb; 10943ee5d7daSPyun YongHyeon int error, i; 109595d67482SBill Paul 1096e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); 109703e78bd0SPyun YongHyeon sc->bge_jumbo = 0; 109895d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1099943787f3SPyun YongHyeon if ((error = bge_newbuf_jumbo(sc, i)) != 0) 11003ee5d7daSPyun YongHyeon return (error); 110103e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 110295d67482SBill Paul }; 110395d67482SBill Paul 1104f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1105d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 1106f41ac2beSBill Paul 110795d67482SBill Paul sc->bge_jumbo = i - 1; 110895d67482SBill Paul 1109f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 11101be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 11111be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD); 111267111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 111395d67482SBill Paul 111438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 111595d67482SBill Paul 111695d67482SBill Paul return (0); 111795d67482SBill Paul } 111895d67482SBill Paul 111995d67482SBill Paul static void 11203f74909aSGleb Smirnoff bge_free_rx_ring_jumbo(struct bge_softc *sc) 112195d67482SBill Paul { 112295d67482SBill Paul int i; 112395d67482SBill Paul 112495d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 112595d67482SBill Paul if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1126e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1127e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], 1128e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 1129f41ac2beSBill Paul bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1130f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1131e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1132e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 113395d67482SBill Paul } 1134f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 11351be6acb7SGleb Smirnoff sizeof(struct bge_extrx_bd)); 113695d67482SBill Paul } 113795d67482SBill Paul } 113895d67482SBill Paul 113995d67482SBill Paul static void 11403f74909aSGleb Smirnoff bge_free_tx_ring(struct bge_softc *sc) 114195d67482SBill Paul { 114295d67482SBill Paul int i; 114395d67482SBill Paul 1144f41ac2beSBill Paul if (sc->bge_ldata.bge_tx_ring == NULL) 114595d67482SBill Paul return; 114695d67482SBill Paul 114795d67482SBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 114895d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 11490ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 1150e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[i], 1151e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 11520ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1153f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 1154e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[i]); 1155e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[i] = NULL; 115695d67482SBill Paul } 1157f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 115895d67482SBill Paul sizeof(struct bge_tx_bd)); 115995d67482SBill Paul } 116095d67482SBill Paul } 116195d67482SBill Paul 116295d67482SBill Paul static int 11633f74909aSGleb Smirnoff bge_init_tx_ring(struct bge_softc *sc) 116495d67482SBill Paul { 116595d67482SBill Paul sc->bge_txcnt = 0; 116695d67482SBill Paul sc->bge_tx_saved_considx = 0; 11673927098fSPaul Saab 1168e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1169e6bf277eSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 11705c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 1171e6bf277eSPyun YongHyeon 117214bbd30fSGleb Smirnoff /* Initialize transmit producer index for host-memory send ring. */ 117314bbd30fSGleb Smirnoff sc->bge_tx_prodidx = 0; 117438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 117514bbd30fSGleb Smirnoff 11763927098fSPaul Saab /* 5700 b2 errata */ 1177e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 117838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 11793927098fSPaul Saab 118014bbd30fSGleb Smirnoff /* NIC-memory send ring not used; initialize to zero. */ 118138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 11823927098fSPaul Saab /* 5700 b2 errata */ 1183e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 118438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 118595d67482SBill Paul 118695d67482SBill Paul return (0); 118795d67482SBill Paul } 118895d67482SBill Paul 118995d67482SBill Paul static void 11903e9b1bcaSJung-uk Kim bge_setpromisc(struct bge_softc *sc) 11913e9b1bcaSJung-uk Kim { 11923e9b1bcaSJung-uk Kim struct ifnet *ifp; 11933e9b1bcaSJung-uk Kim 11943e9b1bcaSJung-uk Kim BGE_LOCK_ASSERT(sc); 11953e9b1bcaSJung-uk Kim 11963e9b1bcaSJung-uk Kim ifp = sc->bge_ifp; 11973e9b1bcaSJung-uk Kim 119845ee6ab3SJung-uk Kim /* Enable or disable promiscuous mode as needed. */ 11993e9b1bcaSJung-uk Kim if (ifp->if_flags & IFF_PROMISC) 120045ee6ab3SJung-uk Kim BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 12013e9b1bcaSJung-uk Kim else 120245ee6ab3SJung-uk Kim BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 12033e9b1bcaSJung-uk Kim } 12043e9b1bcaSJung-uk Kim 12053e9b1bcaSJung-uk Kim static void 12063f74909aSGleb Smirnoff bge_setmulti(struct bge_softc *sc) 120795d67482SBill Paul { 120895d67482SBill Paul struct ifnet *ifp; 120995d67482SBill Paul struct ifmultiaddr *ifma; 12103f74909aSGleb Smirnoff uint32_t hashes[4] = { 0, 0, 0, 0 }; 121195d67482SBill Paul int h, i; 121295d67482SBill Paul 12130f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 12140f9bd73bSSam Leffler 1215fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 121695d67482SBill Paul 121795d67482SBill Paul if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 121895d67482SBill Paul for (i = 0; i < 4; i++) 12190c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 122095d67482SBill Paul return; 122195d67482SBill Paul } 122295d67482SBill Paul 122395d67482SBill Paul /* First, zot all the existing filters. */ 122495d67482SBill Paul for (i = 0; i < 4; i++) 122595d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 122695d67482SBill Paul 122795d67482SBill Paul /* Now program new ones. */ 1228eb956cd0SRobert Watson if_maddr_rlock(ifp); 122995d67482SBill Paul TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 123095d67482SBill Paul if (ifma->ifma_addr->sa_family != AF_LINK) 123195d67482SBill Paul continue; 12320e939c0cSChristian Weisgerber h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 12330c8aa4eaSJung-uk Kim ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 12340c8aa4eaSJung-uk Kim hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 123595d67482SBill Paul } 1236eb956cd0SRobert Watson if_maddr_runlock(ifp); 123795d67482SBill Paul 123895d67482SBill Paul for (i = 0; i < 4; i++) 123995d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 124095d67482SBill Paul } 124195d67482SBill Paul 12428cb1383cSDoug Ambrisko static void 1243cb2eacc7SYaroslav Tykhiy bge_setvlan(struct bge_softc *sc) 1244cb2eacc7SYaroslav Tykhiy { 1245cb2eacc7SYaroslav Tykhiy struct ifnet *ifp; 1246cb2eacc7SYaroslav Tykhiy 1247cb2eacc7SYaroslav Tykhiy BGE_LOCK_ASSERT(sc); 1248cb2eacc7SYaroslav Tykhiy 1249cb2eacc7SYaroslav Tykhiy ifp = sc->bge_ifp; 1250cb2eacc7SYaroslav Tykhiy 1251cb2eacc7SYaroslav Tykhiy /* Enable or disable VLAN tag stripping as needed. */ 1252cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1253cb2eacc7SYaroslav Tykhiy BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1254cb2eacc7SYaroslav Tykhiy else 1255cb2eacc7SYaroslav Tykhiy BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1256cb2eacc7SYaroslav Tykhiy } 1257cb2eacc7SYaroslav Tykhiy 1258cb2eacc7SYaroslav Tykhiy static void 12598cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, type) 12608cb1383cSDoug Ambrisko struct bge_softc *sc; 12618cb1383cSDoug Ambrisko int type; 12628cb1383cSDoug Ambrisko { 12638cb1383cSDoug Ambrisko /* 12648cb1383cSDoug Ambrisko * Some chips don't like this so only do this if ASF is enabled 12658cb1383cSDoug Ambrisko */ 12668cb1383cSDoug Ambrisko if (sc->bge_asf_mode) 12678cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 12688cb1383cSDoug Ambrisko 12698cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 12708cb1383cSDoug Ambrisko switch (type) { 12718cb1383cSDoug Ambrisko case BGE_RESET_START: 12728cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 12738cb1383cSDoug Ambrisko break; 12748cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12758cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 12768cb1383cSDoug Ambrisko break; 12778cb1383cSDoug Ambrisko } 12788cb1383cSDoug Ambrisko } 12798cb1383cSDoug Ambrisko } 12808cb1383cSDoug Ambrisko 12818cb1383cSDoug Ambrisko static void 12828cb1383cSDoug Ambrisko bge_sig_post_reset(sc, type) 12838cb1383cSDoug Ambrisko struct bge_softc *sc; 12848cb1383cSDoug Ambrisko int type; 12858cb1383cSDoug Ambrisko { 12868cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 12878cb1383cSDoug Ambrisko switch (type) { 12888cb1383cSDoug Ambrisko case BGE_RESET_START: 12898cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 12908cb1383cSDoug Ambrisko /* START DONE */ 12918cb1383cSDoug Ambrisko break; 12928cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12938cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 12948cb1383cSDoug Ambrisko break; 12958cb1383cSDoug Ambrisko } 12968cb1383cSDoug Ambrisko } 12978cb1383cSDoug Ambrisko } 12988cb1383cSDoug Ambrisko 12998cb1383cSDoug Ambrisko static void 13008cb1383cSDoug Ambrisko bge_sig_legacy(sc, type) 13018cb1383cSDoug Ambrisko struct bge_softc *sc; 13028cb1383cSDoug Ambrisko int type; 13038cb1383cSDoug Ambrisko { 13048cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 13058cb1383cSDoug Ambrisko switch (type) { 13068cb1383cSDoug Ambrisko case BGE_RESET_START: 13078cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 13088cb1383cSDoug Ambrisko break; 13098cb1383cSDoug Ambrisko case BGE_RESET_STOP: 13108cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 13118cb1383cSDoug Ambrisko break; 13128cb1383cSDoug Ambrisko } 13138cb1383cSDoug Ambrisko } 13148cb1383cSDoug Ambrisko } 13158cb1383cSDoug Ambrisko 13168cb1383cSDoug Ambrisko void bge_stop_fw(struct bge_softc *); 13178cb1383cSDoug Ambrisko void 13188cb1383cSDoug Ambrisko bge_stop_fw(sc) 13198cb1383cSDoug Ambrisko struct bge_softc *sc; 13208cb1383cSDoug Ambrisko { 13218cb1383cSDoug Ambrisko int i; 13228cb1383cSDoug Ambrisko 13238cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 13248cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 13258cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 132639153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 13278cb1383cSDoug Ambrisko 13288cb1383cSDoug Ambrisko for (i = 0; i < 100; i++ ) { 13298cb1383cSDoug Ambrisko if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 13308cb1383cSDoug Ambrisko break; 13318cb1383cSDoug Ambrisko DELAY(10); 13328cb1383cSDoug Ambrisko } 13338cb1383cSDoug Ambrisko } 13348cb1383cSDoug Ambrisko } 13358cb1383cSDoug Ambrisko 133695d67482SBill Paul /* 1337c9ffd9f0SMarius Strobl * Do endian, PCI and DMA initialization. 133895d67482SBill Paul */ 133995d67482SBill Paul static int 13403f74909aSGleb Smirnoff bge_chipinit(struct bge_softc *sc) 134195d67482SBill Paul { 13423f74909aSGleb Smirnoff uint32_t dma_rw_ctl; 134395d67482SBill Paul int i; 134495d67482SBill Paul 13458cb1383cSDoug Ambrisko /* Set endianness before we access any non-PCI registers. */ 1346e907febfSPyun YongHyeon pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 134795d67482SBill Paul 134895d67482SBill Paul /* Clear the MAC control register */ 134995d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 135095d67482SBill Paul 135195d67482SBill Paul /* 135295d67482SBill Paul * Clear the MAC statistics block in the NIC's 135395d67482SBill Paul * internal memory. 135495d67482SBill Paul */ 135595d67482SBill Paul for (i = BGE_STATS_BLOCK; 13563f74909aSGleb Smirnoff i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 135795d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 135895d67482SBill Paul 135995d67482SBill Paul for (i = BGE_STATUS_BLOCK; 13603f74909aSGleb Smirnoff i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 136195d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 136295d67482SBill Paul 1363186f842bSJung-uk Kim /* 1364186f842bSJung-uk Kim * Set up the PCI DMA control register. 1365186f842bSJung-uk Kim */ 1366186f842bSJung-uk Kim dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1367186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1368652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 1369186f842bSJung-uk Kim /* Read watermark not used, 128 bytes for write. */ 1370186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1371652ae483SGleb Smirnoff } else if (sc->bge_flags & BGE_FLAG_PCIX) { 13724c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 1373186f842bSJung-uk Kim /* 256 bytes for read and write. */ 1374186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1375186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1376186f842bSJung-uk Kim dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? 1377186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : 1378186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1379186f842bSJung-uk Kim } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1380186f842bSJung-uk Kim /* 1536 bytes for read, 384 bytes for write. */ 1381186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1382186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1383186f842bSJung-uk Kim } else { 1384186f842bSJung-uk Kim /* 384 bytes for read and write. */ 1385186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1386186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 13870c8aa4eaSJung-uk Kim 0x0F; 1388186f842bSJung-uk Kim } 1389e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1390e0ced696SPaul Saab sc->bge_asicrev == BGE_ASICREV_BCM5704) { 13913f74909aSGleb Smirnoff uint32_t tmp; 13925cba12d3SPaul Saab 1393186f842bSJung-uk Kim /* Set ONE_DMA_AT_ONCE for hardware workaround. */ 13940c8aa4eaSJung-uk Kim tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; 1395186f842bSJung-uk Kim if (tmp == 6 || tmp == 7) 1396186f842bSJung-uk Kim dma_rw_ctl |= 1397186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 13985cba12d3SPaul Saab 1399186f842bSJung-uk Kim /* Set PCI-X DMA write workaround. */ 1400186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1401186f842bSJung-uk Kim } 1402186f842bSJung-uk Kim } else { 1403186f842bSJung-uk Kim /* Conventional PCI bus: 256 bytes for read and write. */ 1404186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1405186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1406186f842bSJung-uk Kim 1407186f842bSJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1408186f842bSJung-uk Kim sc->bge_asicrev != BGE_ASICREV_BCM5750) 1409186f842bSJung-uk Kim dma_rw_ctl |= 0x0F; 1410186f842bSJung-uk Kim } 1411186f842bSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1412186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5701) 1413186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1414186f842bSJung-uk Kim BGE_PCIDMARWCTL_ASRT_ALL_BE; 1415e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1416186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5704) 14175cba12d3SPaul Saab dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 14185cba12d3SPaul Saab pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 141995d67482SBill Paul 142095d67482SBill Paul /* 142195d67482SBill Paul * Set up general mode register. 142295d67482SBill Paul */ 1423e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 142495d67482SBill Paul BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1425ee7ef91cSOleg Bulyzhin BGE_MODECTL_TX_NO_PHDR_CSUM); 142695d67482SBill Paul 142795d67482SBill Paul /* 142890447aadSMarius Strobl * BCM5701 B5 have a bug causing data corruption when using 142990447aadSMarius Strobl * 64-bit DMA reads, which can be terminated early and then 143090447aadSMarius Strobl * completed later as 32-bit accesses, in combination with 143190447aadSMarius Strobl * certain bridges. 143290447aadSMarius Strobl */ 143390447aadSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 143490447aadSMarius Strobl sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 143590447aadSMarius Strobl BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 143690447aadSMarius Strobl 143790447aadSMarius Strobl /* 14388cb1383cSDoug Ambrisko * Tell the firmware the driver is running 14398cb1383cSDoug Ambrisko */ 14408cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 14418cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 14428cb1383cSDoug Ambrisko 14438cb1383cSDoug Ambrisko /* 1444ea13bdd5SJohn Polstra * Disable memory write invalidate. Apparently it is not supported 1445c9ffd9f0SMarius Strobl * properly by these devices. Also ensure that INTx isn't disabled, 1446c9ffd9f0SMarius Strobl * as these chips need it even when using MSI. 144795d67482SBill Paul */ 1448c9ffd9f0SMarius Strobl PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1449c9ffd9f0SMarius Strobl PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4); 145095d67482SBill Paul 145195d67482SBill Paul /* Set the timer prescaler (always 66Mhz) */ 14520c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 145395d67482SBill Paul 145438cc658fSJohn Baldwin /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ 145538cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 145638cc658fSJohn Baldwin DELAY(40); /* XXX */ 145738cc658fSJohn Baldwin 145838cc658fSJohn Baldwin /* Put PHY into ready state */ 145938cc658fSJohn Baldwin BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 146038cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 146138cc658fSJohn Baldwin DELAY(40); 146238cc658fSJohn Baldwin } 146338cc658fSJohn Baldwin 146495d67482SBill Paul return (0); 146595d67482SBill Paul } 146695d67482SBill Paul 146795d67482SBill Paul static int 14683f74909aSGleb Smirnoff bge_blockinit(struct bge_softc *sc) 146995d67482SBill Paul { 147095d67482SBill Paul struct bge_rcb *rcb; 1471e907febfSPyun YongHyeon bus_size_t vrcb; 1472e907febfSPyun YongHyeon bge_hostaddr taddr; 14736f8718a3SScott Long uint32_t val; 147495d67482SBill Paul int i; 147595d67482SBill Paul 147695d67482SBill Paul /* 147795d67482SBill Paul * Initialize the memory window pointer register so that 147895d67482SBill Paul * we can access the first 32K of internal NIC RAM. This will 147995d67482SBill Paul * allow us to set up the TX send ring RCBs and the RX return 148095d67482SBill Paul * ring RCBs, plus other things which live in NIC memory. 148195d67482SBill Paul */ 148295d67482SBill Paul CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 148395d67482SBill Paul 1484822f63fcSBill Paul /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1485822f63fcSBill Paul 14867ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 148795d67482SBill Paul /* Configure mbuf memory pool */ 14880dae9719SJung-uk Kim CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1489822f63fcSBill Paul if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1490822f63fcSBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1491822f63fcSBill Paul else 149295d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 149395d67482SBill Paul 149495d67482SBill Paul /* Configure DMA resource pool */ 14950434d1b8SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 14960434d1b8SBill Paul BGE_DMA_DESCRIPTORS); 149795d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 14980434d1b8SBill Paul } 149995d67482SBill Paul 150095d67482SBill Paul /* Configure mbuf pool watermarks */ 150138cc658fSJohn Baldwin if (!BGE_IS_5705_PLUS(sc)) { 1502fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1503fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1504fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 150538cc658fSJohn Baldwin } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 150638cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 150738cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 150838cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 150938cc658fSJohn Baldwin } else { 151038cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 151138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 151238cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 151338cc658fSJohn Baldwin } 151495d67482SBill Paul 151595d67482SBill Paul /* Configure DMA resource watermarks */ 151695d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 151795d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 151895d67482SBill Paul 151995d67482SBill Paul /* Enable buffer manager */ 15207ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 152195d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MODE, 152295d67482SBill Paul BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 152395d67482SBill Paul 152495d67482SBill Paul /* Poll for buffer manager start indication */ 152595d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1526d5d23857SJung-uk Kim DELAY(10); 15270c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 152895d67482SBill Paul break; 152995d67482SBill Paul } 153095d67482SBill Paul 153195d67482SBill Paul if (i == BGE_TIMEOUT) { 1532fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1533fe806fdaSPyun YongHyeon "buffer manager failed to start\n"); 153495d67482SBill Paul return (ENXIO); 153595d67482SBill Paul } 15360434d1b8SBill Paul } 153795d67482SBill Paul 153895d67482SBill Paul /* Enable flow-through queues */ 15390c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 154095d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 154195d67482SBill Paul 154295d67482SBill Paul /* Wait until queue initialization is complete */ 154395d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1544d5d23857SJung-uk Kim DELAY(10); 154595d67482SBill Paul if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 154695d67482SBill Paul break; 154795d67482SBill Paul } 154895d67482SBill Paul 154995d67482SBill Paul if (i == BGE_TIMEOUT) { 1550fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "flow-through queue init failed\n"); 155195d67482SBill Paul return (ENXIO); 155295d67482SBill Paul } 155395d67482SBill Paul 155495d67482SBill Paul /* Initialize the standard RX ring control block */ 1555f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1556f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1557f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1558f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1559f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1560f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1561f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 15627ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 15630434d1b8SBill Paul rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 15640434d1b8SBill Paul else 15650434d1b8SBill Paul rcb->bge_maxlen_flags = 15660434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 156795d67482SBill Paul rcb->bge_nicaddr = BGE_STD_RX_RINGS; 15680c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 15690c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1570f41ac2beSBill Paul 157167111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 157267111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 157395d67482SBill Paul 157495d67482SBill Paul /* 157595d67482SBill Paul * Initialize the jumbo RX ring control block 157695d67482SBill Paul * We set the 'ring disabled' bit in the flags 157795d67482SBill Paul * field until we're actually ready to start 157895d67482SBill Paul * using this ring (i.e. once we set the MTU 157995d67482SBill Paul * high enough to require it). 158095d67482SBill Paul */ 15814c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 1582f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1583f41ac2beSBill Paul 1584f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1585f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1586f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1587f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1588f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1589f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map, 1590f41ac2beSBill Paul BUS_DMASYNC_PREREAD); 15911be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 15921be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 159395d67482SBill Paul rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 159467111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 159567111612SJohn Polstra rcb->bge_hostaddr.bge_addr_hi); 159667111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 159767111612SJohn Polstra rcb->bge_hostaddr.bge_addr_lo); 1598f41ac2beSBill Paul 15990434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 16000434d1b8SBill Paul rcb->bge_maxlen_flags); 160167111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 160295d67482SBill Paul 160395d67482SBill Paul /* Set up dummy disabled mini ring RCB */ 1604f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 160567111612SJohn Polstra rcb->bge_maxlen_flags = 160667111612SJohn Polstra BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 16070434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 16080434d1b8SBill Paul rcb->bge_maxlen_flags); 16090434d1b8SBill Paul } 161095d67482SBill Paul 161195d67482SBill Paul /* 161295d67482SBill Paul * Set the BD ring replentish thresholds. The recommended 161395d67482SBill Paul * values are 1/8th the number of descriptors allocated to 161495d67482SBill Paul * each ring. 16159ba784dbSScott Long * XXX The 5754 requires a lower threshold, so it might be a 16169ba784dbSScott Long * requirement of all 575x family chips. The Linux driver sets 16179ba784dbSScott Long * the lower threshold for all 5705 family chips as well, but there 16189ba784dbSScott Long * are reports that it might not need to be so strict. 161938cc658fSJohn Baldwin * 162038cc658fSJohn Baldwin * XXX Linux does some extra fiddling here for the 5906 parts as 162138cc658fSJohn Baldwin * well. 162295d67482SBill Paul */ 16235345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 16246f8718a3SScott Long val = 8; 16256f8718a3SScott Long else 16266f8718a3SScott Long val = BGE_STD_RX_RING_CNT / 8; 16276f8718a3SScott Long CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 162895d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 162995d67482SBill Paul 163095d67482SBill Paul /* 163195d67482SBill Paul * Disable all unused send rings by setting the 'ring disabled' 163295d67482SBill Paul * bit in the flags field of all the TX send ring control blocks. 163395d67482SBill Paul * These are located in NIC memory. 163495d67482SBill Paul */ 1635e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 163695d67482SBill Paul for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1637e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1638e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1639e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1640e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 164195d67482SBill Paul } 164295d67482SBill Paul 164395d67482SBill Paul /* Configure TX RCB 0 (we use only the first ring) */ 1644e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1645e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1646e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1647e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1648e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1649e907febfSPyun YongHyeon BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 16507ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 1651e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1652e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 165395d67482SBill Paul 165495d67482SBill Paul /* Disable all unused RX return rings */ 1655e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 165695d67482SBill Paul for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1657e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1658e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1659e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 16600434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1661e907febfSPyun YongHyeon BGE_RCB_FLAG_RING_DISABLED)); 1662e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 166338cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 16643f74909aSGleb Smirnoff (i * (sizeof(uint64_t))), 0); 1665e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 166695d67482SBill Paul } 166795d67482SBill Paul 166895d67482SBill Paul /* Initialize RX ring indexes */ 166938cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 167038cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 167138cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 167295d67482SBill Paul 167395d67482SBill Paul /* 167495d67482SBill Paul * Set up RX return ring 0 167595d67482SBill Paul * Note that the NIC address for RX return rings is 0x00000000. 167695d67482SBill Paul * The return rings live entirely within the host, so the 167795d67482SBill Paul * nicaddr field in the RCB isn't used. 167895d67482SBill Paul */ 1679e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1680e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1681e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1682e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1683e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1684e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1685e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 168695d67482SBill Paul 168795d67482SBill Paul /* Set random backoff seed for TX */ 168895d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 16894a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 16904a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 16914a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 169295d67482SBill Paul BGE_TX_BACKOFF_SEED_MASK); 169395d67482SBill Paul 169495d67482SBill Paul /* Set inter-packet gap */ 169595d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 169695d67482SBill Paul 169795d67482SBill Paul /* 169895d67482SBill Paul * Specify which ring to use for packets that don't match 169995d67482SBill Paul * any RX rules. 170095d67482SBill Paul */ 170195d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 170295d67482SBill Paul 170395d67482SBill Paul /* 170495d67482SBill Paul * Configure number of RX lists. One interrupt distribution 170595d67482SBill Paul * list, sixteen active lists, one bad frames class. 170695d67482SBill Paul */ 170795d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 170895d67482SBill Paul 170995d67482SBill Paul /* Inialize RX list placement stats mask. */ 17100c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 171195d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 171295d67482SBill Paul 171395d67482SBill Paul /* Disable host coalescing until we get it set up */ 171495d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 171595d67482SBill Paul 171695d67482SBill Paul /* Poll to make sure it's shut down. */ 171795d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1718d5d23857SJung-uk Kim DELAY(10); 171995d67482SBill Paul if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 172095d67482SBill Paul break; 172195d67482SBill Paul } 172295d67482SBill Paul 172395d67482SBill Paul if (i == BGE_TIMEOUT) { 1724fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1725fe806fdaSPyun YongHyeon "host coalescing engine failed to idle\n"); 172695d67482SBill Paul return (ENXIO); 172795d67482SBill Paul } 172895d67482SBill Paul 172995d67482SBill Paul /* Set up host coalescing defaults */ 173095d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 173195d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 173295d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 173395d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 17347ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 173595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 173695d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 17370434d1b8SBill Paul } 1738b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1739b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 174095d67482SBill Paul 174195d67482SBill Paul /* Set up address of statistics block */ 17427ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 1743f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1744f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 174595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1746f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 17470434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 174895d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 17490434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 17500434d1b8SBill Paul } 17510434d1b8SBill Paul 17520434d1b8SBill Paul /* Set up address of status block */ 1753f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1754f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 175595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1756f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1757f41ac2beSBill Paul sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1758f41ac2beSBill Paul sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 175995d67482SBill Paul 176030f57f61SPyun YongHyeon /* Set up status block size. */ 176130f57f61SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 176230f57f61SPyun YongHyeon sc->bge_chipid != BGE_CHIPID_BCM5700_C0) 176330f57f61SPyun YongHyeon val = BGE_STATBLKSZ_FULL; 176430f57f61SPyun YongHyeon else 176530f57f61SPyun YongHyeon val = BGE_STATBLKSZ_32BYTE; 176630f57f61SPyun YongHyeon 176795d67482SBill Paul /* Turn on host coalescing state machine */ 176830f57f61SPyun YongHyeon CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); 176995d67482SBill Paul 177095d67482SBill Paul /* Turn on RX BD completion state machine and enable attentions */ 177195d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDC_MODE, 177295d67482SBill Paul BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 177395d67482SBill Paul 177495d67482SBill Paul /* Turn on RX list placement state machine */ 177595d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 177695d67482SBill Paul 177795d67482SBill Paul /* Turn on RX list selector state machine. */ 17787ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 177995d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 178095d67482SBill Paul 178195d67482SBill Paul /* Turn on DMA, clear stats */ 178295d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB | 178395d67482SBill Paul BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | 178495d67482SBill Paul BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | 178595d67482SBill Paul BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB | 1786652ae483SGleb Smirnoff ((sc->bge_flags & BGE_FLAG_TBI) ? 1787652ae483SGleb Smirnoff BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 178895d67482SBill Paul 178995d67482SBill Paul /* Set misc. local control, enable interrupts on attentions */ 179095d67482SBill Paul CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 179195d67482SBill Paul 179295d67482SBill Paul #ifdef notdef 179395d67482SBill Paul /* Assert GPIO pins for PHY reset */ 179495d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 179595d67482SBill Paul BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 179695d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 179795d67482SBill Paul BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 179895d67482SBill Paul #endif 179995d67482SBill Paul 180095d67482SBill Paul /* Turn on DMA completion state machine */ 18017ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 180295d67482SBill Paul CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 180395d67482SBill Paul 18046f8718a3SScott Long val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 18056f8718a3SScott Long 18066f8718a3SScott Long /* Enable host coalescing bug fix. */ 1807a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 18083889907fSStanislav Sedov val |= BGE_WDMAMODE_STATUS_TAG_FIX; 18096f8718a3SScott Long 181095d67482SBill Paul /* Turn on write DMA state machine */ 18116f8718a3SScott Long CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 18124f09c4c7SMarius Strobl DELAY(40); 181395d67482SBill Paul 181495d67482SBill Paul /* Turn on read DMA state machine */ 18154f09c4c7SMarius Strobl val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1816a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1817a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1818a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM57780) 1819a5779553SStanislav Sedov val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1820a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1821a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 18224f09c4c7SMarius Strobl if (sc->bge_flags & BGE_FLAG_PCIE) 18234f09c4c7SMarius Strobl val |= BGE_RDMAMODE_FIFO_LONG_BURST; 1824ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) 1825ca3f1187SPyun YongHyeon val |= BGE_RDMAMODE_TSO4_ENABLE; 18264f09c4c7SMarius Strobl CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 18274f09c4c7SMarius Strobl DELAY(40); 182895d67482SBill Paul 182995d67482SBill Paul /* Turn on RX data completion state machine */ 183095d67482SBill Paul CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 183195d67482SBill Paul 183295d67482SBill Paul /* Turn on RX BD initiator state machine */ 183395d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 183495d67482SBill Paul 183595d67482SBill Paul /* Turn on RX data and RX BD initiator state machine */ 183695d67482SBill Paul CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 183795d67482SBill Paul 183895d67482SBill Paul /* Turn on Mbuf cluster free state machine */ 18397ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 184095d67482SBill Paul CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 184195d67482SBill Paul 184295d67482SBill Paul /* Turn on send BD completion state machine */ 184395d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 184495d67482SBill Paul 184595d67482SBill Paul /* Turn on send data completion state machine */ 1846a5779553SStanislav Sedov val = BGE_SDCMODE_ENABLE; 1847a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 1848a5779553SStanislav Sedov val |= BGE_SDCMODE_CDELAY; 1849a5779553SStanislav Sedov CSR_WRITE_4(sc, BGE_SDC_MODE, val); 185095d67482SBill Paul 185195d67482SBill Paul /* Turn on send data initiator state machine */ 1852ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) 1853ca3f1187SPyun YongHyeon CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08); 1854ca3f1187SPyun YongHyeon else 185595d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 185695d67482SBill Paul 185795d67482SBill Paul /* Turn on send BD initiator state machine */ 185895d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 185995d67482SBill Paul 186095d67482SBill Paul /* Turn on send BD selector state machine */ 186195d67482SBill Paul CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 186295d67482SBill Paul 18630c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 186495d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 186595d67482SBill Paul BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 186695d67482SBill Paul 186795d67482SBill Paul /* ack/clear link change events */ 186895d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 18690434d1b8SBill Paul BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 18700434d1b8SBill Paul BGE_MACSTAT_LINK_CHANGED); 1871f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_MI_STS, 0); 187295d67482SBill Paul 187395d67482SBill Paul /* Enable PHY auto polling (for MII/GMII only) */ 1874652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 187595d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1876a1d52896SBill Paul } else { 18776098821cSJung-uk Kim BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 18781f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 18794c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1880a1d52896SBill Paul CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1881a1d52896SBill Paul BGE_EVTENB_MI_INTERRUPT); 1882a1d52896SBill Paul } 188395d67482SBill Paul 18841f313773SOleg Bulyzhin /* 18851f313773SOleg Bulyzhin * Clear any pending link state attention. 18861f313773SOleg Bulyzhin * Otherwise some link state change events may be lost until attention 18871f313773SOleg Bulyzhin * is cleared by bge_intr() -> bge_link_upd() sequence. 18881f313773SOleg Bulyzhin * It's not necessary on newer BCM chips - perhaps enabling link 18891f313773SOleg Bulyzhin * state change attentions implies clearing pending attention. 18901f313773SOleg Bulyzhin */ 18911f313773SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 18921f313773SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 18931f313773SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 18941f313773SOleg Bulyzhin 189595d67482SBill Paul /* Enable link state change attentions. */ 189695d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 189795d67482SBill Paul 189895d67482SBill Paul return (0); 189995d67482SBill Paul } 190095d67482SBill Paul 19014c0da0ffSGleb Smirnoff const struct bge_revision * 19024c0da0ffSGleb Smirnoff bge_lookup_rev(uint32_t chipid) 19034c0da0ffSGleb Smirnoff { 19044c0da0ffSGleb Smirnoff const struct bge_revision *br; 19054c0da0ffSGleb Smirnoff 19064c0da0ffSGleb Smirnoff for (br = bge_revisions; br->br_name != NULL; br++) { 19074c0da0ffSGleb Smirnoff if (br->br_chipid == chipid) 19084c0da0ffSGleb Smirnoff return (br); 19094c0da0ffSGleb Smirnoff } 19104c0da0ffSGleb Smirnoff 19114c0da0ffSGleb Smirnoff for (br = bge_majorrevs; br->br_name != NULL; br++) { 19124c0da0ffSGleb Smirnoff if (br->br_chipid == BGE_ASICREV(chipid)) 19134c0da0ffSGleb Smirnoff return (br); 19144c0da0ffSGleb Smirnoff } 19154c0da0ffSGleb Smirnoff 19164c0da0ffSGleb Smirnoff return (NULL); 19174c0da0ffSGleb Smirnoff } 19184c0da0ffSGleb Smirnoff 19194c0da0ffSGleb Smirnoff const struct bge_vendor * 19204c0da0ffSGleb Smirnoff bge_lookup_vendor(uint16_t vid) 19214c0da0ffSGleb Smirnoff { 19224c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19234c0da0ffSGleb Smirnoff 19244c0da0ffSGleb Smirnoff for (v = bge_vendors; v->v_name != NULL; v++) 19254c0da0ffSGleb Smirnoff if (v->v_id == vid) 19264c0da0ffSGleb Smirnoff return (v); 19274c0da0ffSGleb Smirnoff 19284c0da0ffSGleb Smirnoff panic("%s: unknown vendor %d", __func__, vid); 19294c0da0ffSGleb Smirnoff return (NULL); 19304c0da0ffSGleb Smirnoff } 19314c0da0ffSGleb Smirnoff 193295d67482SBill Paul /* 193395d67482SBill Paul * Probe for a Broadcom chip. Check the PCI vendor and device IDs 19344c0da0ffSGleb Smirnoff * against our list and return its name if we find a match. 19354c0da0ffSGleb Smirnoff * 19364c0da0ffSGleb Smirnoff * Note that since the Broadcom controller contains VPD support, we 19377c929cf9SJung-uk Kim * try to get the device name string from the controller itself instead 19387c929cf9SJung-uk Kim * of the compiled-in string. It guarantees we'll always announce the 19397c929cf9SJung-uk Kim * right product name. We fall back to the compiled-in string when 19407c929cf9SJung-uk Kim * VPD is unavailable or corrupt. 194195d67482SBill Paul */ 194295d67482SBill Paul static int 19433f74909aSGleb Smirnoff bge_probe(device_t dev) 194495d67482SBill Paul { 1945852c67f9SMarius Strobl const struct bge_type *t = bge_devs; 19464c0da0ffSGleb Smirnoff struct bge_softc *sc = device_get_softc(dev); 19477c929cf9SJung-uk Kim uint16_t vid, did; 194895d67482SBill Paul 194995d67482SBill Paul sc->bge_dev = dev; 19507c929cf9SJung-uk Kim vid = pci_get_vendor(dev); 19517c929cf9SJung-uk Kim did = pci_get_device(dev); 19524c0da0ffSGleb Smirnoff while(t->bge_vid != 0) { 19537c929cf9SJung-uk Kim if ((vid == t->bge_vid) && (did == t->bge_did)) { 19547c929cf9SJung-uk Kim char model[64], buf[96]; 19554c0da0ffSGleb Smirnoff const struct bge_revision *br; 19564c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19574c0da0ffSGleb Smirnoff uint32_t id; 19584c0da0ffSGleb Smirnoff 1959a5779553SStanislav Sedov id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1960a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 1961a5779553SStanislav Sedov if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) 1962a5779553SStanislav Sedov id = pci_read_config(dev, 1963a5779553SStanislav Sedov BGE_PCI_PRODID_ASICREV, 4); 19644c0da0ffSGleb Smirnoff br = bge_lookup_rev(id); 19657c929cf9SJung-uk Kim v = bge_lookup_vendor(vid); 19664e35d186SJung-uk Kim { 19674e35d186SJung-uk Kim #if __FreeBSD_version > 700024 19684e35d186SJung-uk Kim const char *pname; 19694e35d186SJung-uk Kim 1970852c67f9SMarius Strobl if (bge_has_eaddr(sc) && 1971852c67f9SMarius Strobl pci_get_vpd_ident(dev, &pname) == 0) 19724e35d186SJung-uk Kim snprintf(model, 64, "%s", pname); 19734e35d186SJung-uk Kim else 19744e35d186SJung-uk Kim #endif 19757c929cf9SJung-uk Kim snprintf(model, 64, "%s %s", 19767c929cf9SJung-uk Kim v->v_name, 19777c929cf9SJung-uk Kim br != NULL ? br->br_name : 19787c929cf9SJung-uk Kim "NetXtreme Ethernet Controller"); 19794e35d186SJung-uk Kim } 1980a5779553SStanislav Sedov snprintf(buf, 96, "%s, %sASIC rev. %#08x", model, 1981a5779553SStanislav Sedov br != NULL ? "" : "unknown ", id); 19824c0da0ffSGleb Smirnoff device_set_desc_copy(dev, buf); 19836d2a9bd6SDoug Ambrisko if (pci_get_subvendor(dev) == DELL_VENDORID) 19845ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_NO_3LED; 198508bf8bb7SJung-uk Kim if (did == BCOM_DEVICEID_BCM5755M) 198608bf8bb7SJung-uk Kim sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; 198795d67482SBill Paul return (0); 198895d67482SBill Paul } 198995d67482SBill Paul t++; 199095d67482SBill Paul } 199195d67482SBill Paul 199295d67482SBill Paul return (ENXIO); 199395d67482SBill Paul } 199495d67482SBill Paul 1995f41ac2beSBill Paul static void 19963f74909aSGleb Smirnoff bge_dma_free(struct bge_softc *sc) 1997f41ac2beSBill Paul { 1998f41ac2beSBill Paul int i; 1999f41ac2beSBill Paul 20003f74909aSGleb Smirnoff /* Destroy DMA maps for RX buffers. */ 2001f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 2002f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_dmamap[i]) 20030ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2004f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 2005f41ac2beSBill Paul } 2006943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_sparemap) 2007943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 2008943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap); 2009f41ac2beSBill Paul 20103f74909aSGleb Smirnoff /* Destroy DMA maps for jumbo RX buffers. */ 2011f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2012f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 2013f41ac2beSBill Paul bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2014f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2015f41ac2beSBill Paul } 2016943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_sparemap) 2017943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2018943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap); 2019f41ac2beSBill Paul 20203f74909aSGleb Smirnoff /* Destroy DMA maps for TX buffers. */ 2021f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 2022f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_dmamap[i]) 20230ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 2024f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 2025f41ac2beSBill Paul } 2026f41ac2beSBill Paul 20270ac56796SPyun YongHyeon if (sc->bge_cdata.bge_rx_mtag) 20280ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 20290ac56796SPyun YongHyeon if (sc->bge_cdata.bge_tx_mtag) 20300ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 2031f41ac2beSBill Paul 2032f41ac2beSBill Paul 20333f74909aSGleb Smirnoff /* Destroy standard RX ring. */ 2034e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map) 2035e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 2036e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map); 2037e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 2038f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 2039f41ac2beSBill Paul sc->bge_ldata.bge_rx_std_ring, 2040f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map); 2041f41ac2beSBill Paul 2042f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_ring_tag) 2043f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 2044f41ac2beSBill Paul 20453f74909aSGleb Smirnoff /* Destroy jumbo RX ring. */ 2046e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map) 2047e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2048e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map); 2049e65bed95SPyun YongHyeon 2050e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map && 2051e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_jumbo_ring) 2052f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2053f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring, 2054f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map); 2055f41ac2beSBill Paul 2056f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 2057f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 2058f41ac2beSBill Paul 20593f74909aSGleb Smirnoff /* Destroy RX return ring. */ 2060e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map) 2061e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 2062e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map); 2063e65bed95SPyun YongHyeon 2064e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map && 2065e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_return_ring) 2066f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 2067f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring, 2068f41ac2beSBill Paul sc->bge_cdata.bge_rx_return_ring_map); 2069f41ac2beSBill Paul 2070f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_return_ring_tag) 2071f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 2072f41ac2beSBill Paul 20733f74909aSGleb Smirnoff /* Destroy TX ring. */ 2074e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map) 2075e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 2076e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_ring_map); 2077e65bed95SPyun YongHyeon 2078e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 2079f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 2080f41ac2beSBill Paul sc->bge_ldata.bge_tx_ring, 2081f41ac2beSBill Paul sc->bge_cdata.bge_tx_ring_map); 2082f41ac2beSBill Paul 2083f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_ring_tag) 2084f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 2085f41ac2beSBill Paul 20863f74909aSGleb Smirnoff /* Destroy status block. */ 2087e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map) 2088e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 2089e65bed95SPyun YongHyeon sc->bge_cdata.bge_status_map); 2090e65bed95SPyun YongHyeon 2091e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 2092f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_status_tag, 2093f41ac2beSBill Paul sc->bge_ldata.bge_status_block, 2094f41ac2beSBill Paul sc->bge_cdata.bge_status_map); 2095f41ac2beSBill Paul 2096f41ac2beSBill Paul if (sc->bge_cdata.bge_status_tag) 2097f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 2098f41ac2beSBill Paul 20993f74909aSGleb Smirnoff /* Destroy statistics block. */ 2100e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map) 2101e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 2102e65bed95SPyun YongHyeon sc->bge_cdata.bge_stats_map); 2103e65bed95SPyun YongHyeon 2104e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 2105f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 2106f41ac2beSBill Paul sc->bge_ldata.bge_stats, 2107f41ac2beSBill Paul sc->bge_cdata.bge_stats_map); 2108f41ac2beSBill Paul 2109f41ac2beSBill Paul if (sc->bge_cdata.bge_stats_tag) 2110f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 2111f41ac2beSBill Paul 21123f74909aSGleb Smirnoff /* Destroy the parent tag. */ 2113f41ac2beSBill Paul if (sc->bge_cdata.bge_parent_tag) 2114f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 2115f41ac2beSBill Paul } 2116f41ac2beSBill Paul 2117f41ac2beSBill Paul static int 21183f74909aSGleb Smirnoff bge_dma_alloc(device_t dev) 2119f41ac2beSBill Paul { 21203f74909aSGleb Smirnoff struct bge_dmamap_arg ctx; 2121f41ac2beSBill Paul struct bge_softc *sc; 2122f681b29aSPyun YongHyeon bus_addr_t lowaddr; 212330f57f61SPyun YongHyeon bus_size_t sbsz, txsegsz, txmaxsegsz; 21241be6acb7SGleb Smirnoff int i, error; 2125f41ac2beSBill Paul 2126f41ac2beSBill Paul sc = device_get_softc(dev); 2127f41ac2beSBill Paul 2128f681b29aSPyun YongHyeon lowaddr = BUS_SPACE_MAXADDR; 2129f681b29aSPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) 2130f681b29aSPyun YongHyeon lowaddr = BGE_DMA_MAXADDR; 2131f681b29aSPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) 2132f681b29aSPyun YongHyeon lowaddr = BUS_SPACE_MAXADDR_32BIT; 2133f41ac2beSBill Paul /* 2134f41ac2beSBill Paul * Allocate the parent bus DMA tag appropriate for PCI. 2135f41ac2beSBill Paul */ 21364eee14cbSMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 2137f681b29aSPyun YongHyeon 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, 21384eee14cbSMarius Strobl NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 21394eee14cbSMarius Strobl 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); 2140f41ac2beSBill Paul 2141e65bed95SPyun YongHyeon if (error != 0) { 2142fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2143fe806fdaSPyun YongHyeon "could not allocate parent dma tag\n"); 2144e65bed95SPyun YongHyeon return (ENOMEM); 2145e65bed95SPyun YongHyeon } 2146e65bed95SPyun YongHyeon 2147f41ac2beSBill Paul /* 21480ac56796SPyun YongHyeon * Create tag for Tx mbufs. 2149f41ac2beSBill Paul */ 2150ca3f1187SPyun YongHyeon if (sc->bge_flags & BGE_FLAG_TSO) { 2151ca3f1187SPyun YongHyeon txsegsz = BGE_TSOSEG_SZ; 2152ca3f1187SPyun YongHyeon txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); 2153ca3f1187SPyun YongHyeon } else { 2154ca3f1187SPyun YongHyeon txsegsz = MCLBYTES; 2155ca3f1187SPyun YongHyeon txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; 2156ca3f1187SPyun YongHyeon } 21578a2e22deSScott Long error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 2158ca3f1187SPyun YongHyeon 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 2159ca3f1187SPyun YongHyeon txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, 2160ca3f1187SPyun YongHyeon &sc->bge_cdata.bge_tx_mtag); 2161f41ac2beSBill Paul 2162f41ac2beSBill Paul if (error) { 21630ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); 21640ac56796SPyun YongHyeon return (ENOMEM); 21650ac56796SPyun YongHyeon } 21660ac56796SPyun YongHyeon 21670ac56796SPyun YongHyeon /* 21680ac56796SPyun YongHyeon * Create tag for Rx mbufs. 21690ac56796SPyun YongHyeon */ 21700ac56796SPyun YongHyeon error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 21710ac56796SPyun YongHyeon BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 2172ca3f1187SPyun YongHyeon MCLBYTES, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); 21730ac56796SPyun YongHyeon 21740ac56796SPyun YongHyeon if (error) { 21750ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); 2176f41ac2beSBill Paul return (ENOMEM); 2177f41ac2beSBill Paul } 2178f41ac2beSBill Paul 21793f74909aSGleb Smirnoff /* Create DMA maps for RX buffers. */ 2180943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2181943787f3SPyun YongHyeon &sc->bge_cdata.bge_rx_std_sparemap); 2182943787f3SPyun YongHyeon if (error) { 2183943787f3SPyun YongHyeon device_printf(sc->bge_dev, 2184943787f3SPyun YongHyeon "can't create spare DMA map for RX\n"); 2185943787f3SPyun YongHyeon return (ENOMEM); 2186943787f3SPyun YongHyeon } 2187f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 21880ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2189f41ac2beSBill Paul &sc->bge_cdata.bge_rx_std_dmamap[i]); 2190f41ac2beSBill Paul if (error) { 2191fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2192fe806fdaSPyun YongHyeon "can't create DMA map for RX\n"); 2193f41ac2beSBill Paul return (ENOMEM); 2194f41ac2beSBill Paul } 2195f41ac2beSBill Paul } 2196f41ac2beSBill Paul 21973f74909aSGleb Smirnoff /* Create DMA maps for TX buffers. */ 2198f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 21990ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, 2200f41ac2beSBill Paul &sc->bge_cdata.bge_tx_dmamap[i]); 2201f41ac2beSBill Paul if (error) { 2202fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22030ac56796SPyun YongHyeon "can't create DMA map for TX\n"); 2204f41ac2beSBill Paul return (ENOMEM); 2205f41ac2beSBill Paul } 2206f41ac2beSBill Paul } 2207f41ac2beSBill Paul 22083f74909aSGleb Smirnoff /* Create tag for standard RX ring. */ 2209f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2210f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2211f41ac2beSBill Paul NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 2212f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 2213f41ac2beSBill Paul 2214f41ac2beSBill Paul if (error) { 2215fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2216f41ac2beSBill Paul return (ENOMEM); 2217f41ac2beSBill Paul } 2218f41ac2beSBill Paul 22193f74909aSGleb Smirnoff /* Allocate DMA'able memory for standard RX ring. */ 2220f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 2221f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 2222f41ac2beSBill Paul &sc->bge_cdata.bge_rx_std_ring_map); 2223f41ac2beSBill Paul if (error) 2224f41ac2beSBill Paul return (ENOMEM); 2225f41ac2beSBill Paul 2226f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 2227f41ac2beSBill Paul 22283f74909aSGleb Smirnoff /* Load the address of the standard RX ring. */ 2229f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2230f41ac2beSBill Paul ctx.sc = sc; 2231f41ac2beSBill Paul 2232f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 2233f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 2234f41ac2beSBill Paul BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2235f41ac2beSBill Paul 2236f41ac2beSBill Paul if (error) 2237f41ac2beSBill Paul return (ENOMEM); 2238f41ac2beSBill Paul 2239f41ac2beSBill Paul sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 2240f41ac2beSBill Paul 22413f74909aSGleb Smirnoff /* Create tags for jumbo mbufs. */ 22424c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 2243f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 22448a2e22deSScott Long 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 22451be6acb7SGleb Smirnoff NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 22461be6acb7SGleb Smirnoff 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 2247f41ac2beSBill Paul if (error) { 2248fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22493f74909aSGleb Smirnoff "could not allocate jumbo dma tag\n"); 2250f41ac2beSBill Paul return (ENOMEM); 2251f41ac2beSBill Paul } 2252f41ac2beSBill Paul 22533f74909aSGleb Smirnoff /* Create tag for jumbo RX ring. */ 2254f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2255f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2256f41ac2beSBill Paul NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 2257f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 2258f41ac2beSBill Paul 2259f41ac2beSBill Paul if (error) { 2260fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22613f74909aSGleb Smirnoff "could not allocate jumbo ring dma tag\n"); 2262f41ac2beSBill Paul return (ENOMEM); 2263f41ac2beSBill Paul } 2264f41ac2beSBill Paul 22653f74909aSGleb Smirnoff /* Allocate DMA'able memory for jumbo RX ring. */ 2266f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 22671be6acb7SGleb Smirnoff (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 22681be6acb7SGleb Smirnoff BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2269f41ac2beSBill Paul &sc->bge_cdata.bge_rx_jumbo_ring_map); 2270f41ac2beSBill Paul if (error) 2271f41ac2beSBill Paul return (ENOMEM); 2272f41ac2beSBill Paul 22733f74909aSGleb Smirnoff /* Load the address of the jumbo RX ring. */ 2274f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2275f41ac2beSBill Paul ctx.sc = sc; 2276f41ac2beSBill Paul 2277f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2278f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map, 2279f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 2280f41ac2beSBill Paul bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2281f41ac2beSBill Paul 2282f41ac2beSBill Paul if (error) 2283f41ac2beSBill Paul return (ENOMEM); 2284f41ac2beSBill Paul 2285f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 2286f41ac2beSBill Paul 22873f74909aSGleb Smirnoff /* Create DMA maps for jumbo RX buffers. */ 2288943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2289943787f3SPyun YongHyeon 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); 2290943787f3SPyun YongHyeon if (error) { 2291943787f3SPyun YongHyeon device_printf(sc->bge_dev, 22921b90d0bdSPyun YongHyeon "can't create spare DMA map for jumbo RX\n"); 2293943787f3SPyun YongHyeon return (ENOMEM); 2294943787f3SPyun YongHyeon } 2295f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2296f41ac2beSBill Paul error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2297f41ac2beSBill Paul 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2298f41ac2beSBill Paul if (error) { 2299fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 23003f74909aSGleb Smirnoff "can't create DMA map for jumbo RX\n"); 2301f41ac2beSBill Paul return (ENOMEM); 2302f41ac2beSBill Paul } 2303f41ac2beSBill Paul } 2304f41ac2beSBill Paul 2305f41ac2beSBill Paul } 2306f41ac2beSBill Paul 23073f74909aSGleb Smirnoff /* Create tag for RX return ring. */ 2308f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2309f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2310f41ac2beSBill Paul NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 2311f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 2312f41ac2beSBill Paul 2313f41ac2beSBill Paul if (error) { 2314fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2315f41ac2beSBill Paul return (ENOMEM); 2316f41ac2beSBill Paul } 2317f41ac2beSBill Paul 23183f74909aSGleb Smirnoff /* Allocate DMA'able memory for RX return ring. */ 2319f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 2320f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 2321f41ac2beSBill Paul &sc->bge_cdata.bge_rx_return_ring_map); 2322f41ac2beSBill Paul if (error) 2323f41ac2beSBill Paul return (ENOMEM); 2324f41ac2beSBill Paul 2325f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_rx_return_ring, 2326f41ac2beSBill Paul BGE_RX_RTN_RING_SZ(sc)); 2327f41ac2beSBill Paul 23283f74909aSGleb Smirnoff /* Load the address of the RX return ring. */ 2329f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2330f41ac2beSBill Paul ctx.sc = sc; 2331f41ac2beSBill Paul 2332f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 2333f41ac2beSBill Paul sc->bge_cdata.bge_rx_return_ring_map, 2334f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 2335f41ac2beSBill Paul bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2336f41ac2beSBill Paul 2337f41ac2beSBill Paul if (error) 2338f41ac2beSBill Paul return (ENOMEM); 2339f41ac2beSBill Paul 2340f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2341f41ac2beSBill Paul 23423f74909aSGleb Smirnoff /* Create tag for TX ring. */ 2343f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2344f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2345f41ac2beSBill Paul NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2346f41ac2beSBill Paul &sc->bge_cdata.bge_tx_ring_tag); 2347f41ac2beSBill Paul 2348f41ac2beSBill Paul if (error) { 2349fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2350f41ac2beSBill Paul return (ENOMEM); 2351f41ac2beSBill Paul } 2352f41ac2beSBill Paul 23533f74909aSGleb Smirnoff /* Allocate DMA'able memory for TX ring. */ 2354f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2355f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2356f41ac2beSBill Paul &sc->bge_cdata.bge_tx_ring_map); 2357f41ac2beSBill Paul if (error) 2358f41ac2beSBill Paul return (ENOMEM); 2359f41ac2beSBill Paul 2360f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2361f41ac2beSBill Paul 23623f74909aSGleb Smirnoff /* Load the address of the TX ring. */ 2363f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2364f41ac2beSBill Paul ctx.sc = sc; 2365f41ac2beSBill Paul 2366f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2367f41ac2beSBill Paul sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2368f41ac2beSBill Paul BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2369f41ac2beSBill Paul 2370f41ac2beSBill Paul if (error) 2371f41ac2beSBill Paul return (ENOMEM); 2372f41ac2beSBill Paul 2373f41ac2beSBill Paul sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2374f41ac2beSBill Paul 237530f57f61SPyun YongHyeon /* 237630f57f61SPyun YongHyeon * Create tag for status block. 237730f57f61SPyun YongHyeon * Because we only use single Tx/Rx/Rx return ring, use 237830f57f61SPyun YongHyeon * minimum status block size except BCM5700 AX/BX which 237930f57f61SPyun YongHyeon * seems to want to see full status block size regardless 238030f57f61SPyun YongHyeon * of configured number of ring. 238130f57f61SPyun YongHyeon */ 238230f57f61SPyun YongHyeon if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 238330f57f61SPyun YongHyeon sc->bge_chipid != BGE_CHIPID_BCM5700_C0) 238430f57f61SPyun YongHyeon sbsz = BGE_STATUS_BLK_SZ; 238530f57f61SPyun YongHyeon else 238630f57f61SPyun YongHyeon sbsz = 32; 2387f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2388f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 238930f57f61SPyun YongHyeon NULL, sbsz, 1, sbsz, 0, NULL, NULL, &sc->bge_cdata.bge_status_tag); 2390f41ac2beSBill Paul 2391f41ac2beSBill Paul if (error) { 239230f57f61SPyun YongHyeon device_printf(sc->bge_dev, 239330f57f61SPyun YongHyeon "could not allocate status dma tag\n"); 2394f41ac2beSBill Paul return (ENOMEM); 2395f41ac2beSBill Paul } 2396f41ac2beSBill Paul 23973f74909aSGleb Smirnoff /* Allocate DMA'able memory for status block. */ 2398f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2399f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2400f41ac2beSBill Paul &sc->bge_cdata.bge_status_map); 2401f41ac2beSBill Paul if (error) 2402f41ac2beSBill Paul return (ENOMEM); 2403f41ac2beSBill Paul 240430f57f61SPyun YongHyeon bzero((char *)sc->bge_ldata.bge_status_block, sbsz); 2405f41ac2beSBill Paul 24063f74909aSGleb Smirnoff /* Load the address of the status block. */ 2407f41ac2beSBill Paul ctx.sc = sc; 2408f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2409f41ac2beSBill Paul 2410f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2411f41ac2beSBill Paul sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 241230f57f61SPyun YongHyeon sbsz, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2413f41ac2beSBill Paul 2414f41ac2beSBill Paul if (error) 2415f41ac2beSBill Paul return (ENOMEM); 2416f41ac2beSBill Paul 2417f41ac2beSBill Paul sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2418f41ac2beSBill Paul 24193f74909aSGleb Smirnoff /* Create tag for statistics block. */ 2420f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2421f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2422f41ac2beSBill Paul NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2423f41ac2beSBill Paul &sc->bge_cdata.bge_stats_tag); 2424f41ac2beSBill Paul 2425f41ac2beSBill Paul if (error) { 2426fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2427f41ac2beSBill Paul return (ENOMEM); 2428f41ac2beSBill Paul } 2429f41ac2beSBill Paul 24303f74909aSGleb Smirnoff /* Allocate DMA'able memory for statistics block. */ 2431f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2432f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2433f41ac2beSBill Paul &sc->bge_cdata.bge_stats_map); 2434f41ac2beSBill Paul if (error) 2435f41ac2beSBill Paul return (ENOMEM); 2436f41ac2beSBill Paul 2437f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2438f41ac2beSBill Paul 24393f74909aSGleb Smirnoff /* Load the address of the statstics block. */ 2440f41ac2beSBill Paul ctx.sc = sc; 2441f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2442f41ac2beSBill Paul 2443f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2444f41ac2beSBill Paul sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2445f41ac2beSBill Paul BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2446f41ac2beSBill Paul 2447f41ac2beSBill Paul if (error) 2448f41ac2beSBill Paul return (ENOMEM); 2449f41ac2beSBill Paul 2450f41ac2beSBill Paul sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2451f41ac2beSBill Paul 2452f41ac2beSBill Paul return (0); 2453f41ac2beSBill Paul } 2454f41ac2beSBill Paul 2455bf6ef57aSJohn Polstra /* 2456bf6ef57aSJohn Polstra * Return true if this device has more than one port. 2457bf6ef57aSJohn Polstra */ 2458bf6ef57aSJohn Polstra static int 2459bf6ef57aSJohn Polstra bge_has_multiple_ports(struct bge_softc *sc) 2460bf6ef57aSJohn Polstra { 2461bf6ef57aSJohn Polstra device_t dev = sc->bge_dev; 246255aaf894SMarius Strobl u_int b, d, f, fscan, s; 2463bf6ef57aSJohn Polstra 246455aaf894SMarius Strobl d = pci_get_domain(dev); 2465bf6ef57aSJohn Polstra b = pci_get_bus(dev); 2466bf6ef57aSJohn Polstra s = pci_get_slot(dev); 2467bf6ef57aSJohn Polstra f = pci_get_function(dev); 2468bf6ef57aSJohn Polstra for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 246955aaf894SMarius Strobl if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) 2470bf6ef57aSJohn Polstra return (1); 2471bf6ef57aSJohn Polstra return (0); 2472bf6ef57aSJohn Polstra } 2473bf6ef57aSJohn Polstra 2474bf6ef57aSJohn Polstra /* 2475bf6ef57aSJohn Polstra * Return true if MSI can be used with this device. 2476bf6ef57aSJohn Polstra */ 2477bf6ef57aSJohn Polstra static int 2478bf6ef57aSJohn Polstra bge_can_use_msi(struct bge_softc *sc) 2479bf6ef57aSJohn Polstra { 2480bf6ef57aSJohn Polstra int can_use_msi = 0; 2481bf6ef57aSJohn Polstra 2482bf6ef57aSJohn Polstra switch (sc->bge_asicrev) { 2483a8376f70SMarius Strobl case BGE_ASICREV_BCM5714_A0: 2484bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5714: 2485bf6ef57aSJohn Polstra /* 2486a8376f70SMarius Strobl * Apparently, MSI doesn't work when these chips are 2487a8376f70SMarius Strobl * configured in single-port mode. 2488bf6ef57aSJohn Polstra */ 2489bf6ef57aSJohn Polstra if (bge_has_multiple_ports(sc)) 2490bf6ef57aSJohn Polstra can_use_msi = 1; 2491bf6ef57aSJohn Polstra break; 2492bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5750: 2493bf6ef57aSJohn Polstra if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 2494bf6ef57aSJohn Polstra sc->bge_chiprev != BGE_CHIPREV_5750_BX) 2495bf6ef57aSJohn Polstra can_use_msi = 1; 2496bf6ef57aSJohn Polstra break; 2497a8376f70SMarius Strobl default: 2498a8376f70SMarius Strobl if (BGE_IS_575X_PLUS(sc)) 2499bf6ef57aSJohn Polstra can_use_msi = 1; 2500bf6ef57aSJohn Polstra } 2501bf6ef57aSJohn Polstra return (can_use_msi); 2502bf6ef57aSJohn Polstra } 2503bf6ef57aSJohn Polstra 250495d67482SBill Paul static int 25053f74909aSGleb Smirnoff bge_attach(device_t dev) 250695d67482SBill Paul { 250795d67482SBill Paul struct ifnet *ifp; 250895d67482SBill Paul struct bge_softc *sc; 25094f0794ffSBjoern A. Zeeb uint32_t hwcfg = 0, misccfg; 251008013fd3SMarius Strobl u_char eaddr[ETHER_ADDR_LEN]; 2511d648358bSPyun YongHyeon int error, msicount, reg, rid, trys; 251295d67482SBill Paul 251395d67482SBill Paul sc = device_get_softc(dev); 251495d67482SBill Paul sc->bge_dev = dev; 251595d67482SBill Paul 2516dfe0df9aSPyun YongHyeon TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); 2517dfe0df9aSPyun YongHyeon 251895d67482SBill Paul /* 251995d67482SBill Paul * Map control/status registers. 252095d67482SBill Paul */ 252195d67482SBill Paul pci_enable_busmaster(dev); 252295d67482SBill Paul 252395d67482SBill Paul rid = BGE_PCI_BAR0; 25245f96beb9SNate Lawson sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 252544f8f2fcSMarius Strobl RF_ACTIVE); 252695d67482SBill Paul 252795d67482SBill Paul if (sc->bge_res == NULL) { 2528fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, "couldn't map memory\n"); 252995d67482SBill Paul error = ENXIO; 253095d67482SBill Paul goto fail; 253195d67482SBill Paul } 253295d67482SBill Paul 25334f09c4c7SMarius Strobl /* Save various chip information. */ 2534e53d81eeSPaul Saab sc->bge_chipid = 2535a5779553SStanislav Sedov pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 2536a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 2537a5779553SStanislav Sedov if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) 2538a5779553SStanislav Sedov sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 2539a5779553SStanislav Sedov 4); 2540e53d81eeSPaul Saab sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2541e53d81eeSPaul Saab sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2542e53d81eeSPaul Saab 254386543395SJung-uk Kim /* 254438cc658fSJohn Baldwin * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the 254586543395SJung-uk Kim * 5705 A0 and A1 chips. 254686543395SJung-uk Kim */ 254786543395SJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && 254838cc658fSJohn Baldwin sc->bge_asicrev != BGE_ASICREV_BCM5906 && 254986543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 255086543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A1) 255186543395SJung-uk Kim sc->bge_flags |= BGE_FLAG_WIRESPEED; 255286543395SJung-uk Kim 25535fea260fSMarius Strobl if (bge_has_eaddr(sc)) 25545fea260fSMarius Strobl sc->bge_flags |= BGE_FLAG_EADDR; 255508013fd3SMarius Strobl 25560dae9719SJung-uk Kim /* Save chipset family. */ 25570dae9719SJung-uk Kim switch (sc->bge_asicrev) { 2558a5779553SStanislav Sedov case BGE_ASICREV_BCM5755: 2559a5779553SStanislav Sedov case BGE_ASICREV_BCM5761: 2560a5779553SStanislav Sedov case BGE_ASICREV_BCM5784: 2561a5779553SStanislav Sedov case BGE_ASICREV_BCM5785: 2562a5779553SStanislav Sedov case BGE_ASICREV_BCM5787: 2563a5779553SStanislav Sedov case BGE_ASICREV_BCM57780: 2564a5779553SStanislav Sedov sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 2565a5779553SStanislav Sedov BGE_FLAG_5705_PLUS; 2566a5779553SStanislav Sedov break; 25670dae9719SJung-uk Kim case BGE_ASICREV_BCM5700: 25680dae9719SJung-uk Kim case BGE_ASICREV_BCM5701: 25690dae9719SJung-uk Kim case BGE_ASICREV_BCM5703: 25700dae9719SJung-uk Kim case BGE_ASICREV_BCM5704: 25717ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 25720dae9719SJung-uk Kim break; 25730dae9719SJung-uk Kim case BGE_ASICREV_BCM5714_A0: 25740dae9719SJung-uk Kim case BGE_ASICREV_BCM5780: 25750dae9719SJung-uk Kim case BGE_ASICREV_BCM5714: 25767ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; 25779fe569d8SXin LI /* FALLTHROUGH */ 25780dae9719SJung-uk Kim case BGE_ASICREV_BCM5750: 25790dae9719SJung-uk Kim case BGE_ASICREV_BCM5752: 258038cc658fSJohn Baldwin case BGE_ASICREV_BCM5906: 25810dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_575X_PLUS; 25829fe569d8SXin LI /* FALLTHROUGH */ 25830dae9719SJung-uk Kim case BGE_ASICREV_BCM5705: 25840dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_5705_PLUS; 25850dae9719SJung-uk Kim break; 25860dae9719SJung-uk Kim } 25870dae9719SJung-uk Kim 25885ee49a3aSJung-uk Kim /* Set various bug flags. */ 25891ec4c3a8SJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 25901ec4c3a8SJung-uk Kim sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 25911ec4c3a8SJung-uk Kim sc->bge_flags |= BGE_FLAG_CRC_BUG; 25925ee49a3aSJung-uk Kim if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 25935ee49a3aSJung-uk Kim sc->bge_chiprev == BGE_CHIPREV_5704_AX) 25945ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_ADC_BUG; 25955ee49a3aSJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 25965ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 259708bf8bb7SJung-uk Kim if (BGE_IS_5705_PLUS(sc) && 259808bf8bb7SJung-uk Kim !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 25995ee49a3aSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2600a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2601a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5784 || 26024fcf220bSJohn Baldwin sc->bge_asicrev == BGE_ASICREV_BCM5787) { 26034fcf220bSJohn Baldwin if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0) 26045ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_JITTER_BUG; 260538cc658fSJohn Baldwin } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 26065ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_BER_BUG; 26075ee49a3aSJung-uk Kim } 26085ee49a3aSJung-uk Kim 2609f681b29aSPyun YongHyeon /* 2610f681b29aSPyun YongHyeon * All controllers that are not 5755 or higher have 4GB 2611f681b29aSPyun YongHyeon * boundary DMA bug. 2612f681b29aSPyun YongHyeon * Whenever an address crosses a multiple of the 4GB boundary 2613f681b29aSPyun YongHyeon * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition 2614f681b29aSPyun YongHyeon * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA 2615f681b29aSPyun YongHyeon * state machine will lockup and cause the device to hang. 2616f681b29aSPyun YongHyeon */ 2617f681b29aSPyun YongHyeon if (BGE_IS_5755_PLUS(sc) == 0) 2618f681b29aSPyun YongHyeon sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; 26194f0794ffSBjoern A. Zeeb 26204f0794ffSBjoern A. Zeeb /* 26214f0794ffSBjoern A. Zeeb * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe() 26224f0794ffSBjoern A. Zeeb * but I do not know the DEVICEID for the 5788M. 26234f0794ffSBjoern A. Zeeb */ 26244f0794ffSBjoern A. Zeeb misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID; 26254f0794ffSBjoern A. Zeeb if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 26264f0794ffSBjoern A. Zeeb misccfg == BGE_MISCCFG_BOARD_ID_5788M) 26274f0794ffSBjoern A. Zeeb sc->bge_flags |= BGE_FLAG_5788; 26284f0794ffSBjoern A. Zeeb 2629e53d81eeSPaul Saab /* 2630ca3f1187SPyun YongHyeon * Some controllers seem to require a special firmware to use 2631ca3f1187SPyun YongHyeon * TSO. But the firmware is not available to FreeBSD and Linux 2632ca3f1187SPyun YongHyeon * claims that the TSO performed by the firmware is slower than 2633ca3f1187SPyun YongHyeon * hardware based TSO. Moreover the firmware based TSO has one 2634ca3f1187SPyun YongHyeon * known bug which can't handle TSO if ethernet header + IP/TCP 2635ca3f1187SPyun YongHyeon * header is greater than 80 bytes. The workaround for the TSO 2636ca3f1187SPyun YongHyeon * bug exist but it seems it's too expensive than not using 2637ca3f1187SPyun YongHyeon * TSO at all. Some hardwares also have the TSO bug so limit 2638ca3f1187SPyun YongHyeon * the TSO to the controllers that are not affected TSO issues 2639ca3f1187SPyun YongHyeon * (e.g. 5755 or higher). 2640ca3f1187SPyun YongHyeon */ 2641ca3f1187SPyun YongHyeon if (BGE_IS_5755_PLUS(sc)) 2642ca3f1187SPyun YongHyeon sc->bge_flags |= BGE_FLAG_TSO; 2643ca3f1187SPyun YongHyeon 2644ca3f1187SPyun YongHyeon /* 26456f8718a3SScott Long * Check if this is a PCI-X or PCI Express device. 2646e53d81eeSPaul Saab */ 26476f8718a3SScott Long if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 26484c0da0ffSGleb Smirnoff /* 26496f8718a3SScott Long * Found a PCI Express capabilities register, this 26506f8718a3SScott Long * must be a PCI Express device. 26516f8718a3SScott Long */ 26526f8718a3SScott Long sc->bge_flags |= BGE_FLAG_PCIE; 26530aaf1057SPyun YongHyeon sc->bge_expcap = reg; 26540aaf1057SPyun YongHyeon bge_set_max_readrq(sc); 26556f8718a3SScott Long } else { 26566f8718a3SScott Long /* 26576f8718a3SScott Long * Check if the device is in PCI-X Mode. 26586f8718a3SScott Long * (This bit is not valid on PCI Express controllers.) 26594c0da0ffSGleb Smirnoff */ 26600aaf1057SPyun YongHyeon if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) 26610aaf1057SPyun YongHyeon sc->bge_pcixcap = reg; 266290447aadSMarius Strobl if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 26634c0da0ffSGleb Smirnoff BGE_PCISTATE_PCI_BUSMODE) == 0) 2664652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_PCIX; 26656f8718a3SScott Long } 26664c0da0ffSGleb Smirnoff 2667bf6ef57aSJohn Polstra /* 2668fd4d32feSPyun YongHyeon * The 40bit DMA bug applies to the 5714/5715 controllers and is 2669fd4d32feSPyun YongHyeon * not actually a MAC controller bug but an issue with the embedded 2670fd4d32feSPyun YongHyeon * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. 2671fd4d32feSPyun YongHyeon */ 2672fd4d32feSPyun YongHyeon if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) 2673fd4d32feSPyun YongHyeon sc->bge_flags |= BGE_FLAG_40BIT_BUG; 2674fd4d32feSPyun YongHyeon /* 2675bf6ef57aSJohn Polstra * Allocate the interrupt, using MSI if possible. These devices 2676bf6ef57aSJohn Polstra * support 8 MSI messages, but only the first one is used in 2677bf6ef57aSJohn Polstra * normal operation. 2678bf6ef57aSJohn Polstra */ 26790aaf1057SPyun YongHyeon rid = 0; 26800aaf1057SPyun YongHyeon if (pci_find_extcap(sc->bge_dev, PCIY_MSI, ®) != 0) { 26810aaf1057SPyun YongHyeon sc->bge_msicap = reg; 2682bf6ef57aSJohn Polstra if (bge_can_use_msi(sc)) { 2683bf6ef57aSJohn Polstra msicount = pci_msi_count(dev); 2684bf6ef57aSJohn Polstra if (msicount > 1) 2685bf6ef57aSJohn Polstra msicount = 1; 2686bf6ef57aSJohn Polstra } else 2687bf6ef57aSJohn Polstra msicount = 0; 2688bf6ef57aSJohn Polstra if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { 2689bf6ef57aSJohn Polstra rid = 1; 2690bf6ef57aSJohn Polstra sc->bge_flags |= BGE_FLAG_MSI; 26910aaf1057SPyun YongHyeon } 26920aaf1057SPyun YongHyeon } 2693bf6ef57aSJohn Polstra 2694bf6ef57aSJohn Polstra sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2695bf6ef57aSJohn Polstra RF_SHAREABLE | RF_ACTIVE); 2696bf6ef57aSJohn Polstra 2697bf6ef57aSJohn Polstra if (sc->bge_irq == NULL) { 2698bf6ef57aSJohn Polstra device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2699bf6ef57aSJohn Polstra error = ENXIO; 2700bf6ef57aSJohn Polstra goto fail; 2701bf6ef57aSJohn Polstra } 2702bf6ef57aSJohn Polstra 27034f09c4c7SMarius Strobl if (bootverbose) 27044f09c4c7SMarius Strobl device_printf(dev, 27054f09c4c7SMarius Strobl "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", 27064f09c4c7SMarius Strobl sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, 27074f09c4c7SMarius Strobl (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" : 27084f09c4c7SMarius Strobl ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI")); 27094f09c4c7SMarius Strobl 2710bf6ef57aSJohn Polstra BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2711bf6ef57aSJohn Polstra 271295d67482SBill Paul /* Try to reset the chip. */ 27138cb1383cSDoug Ambrisko if (bge_reset(sc)) { 27148cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 27158cb1383cSDoug Ambrisko error = ENXIO; 27168cb1383cSDoug Ambrisko goto fail; 27178cb1383cSDoug Ambrisko } 27188cb1383cSDoug Ambrisko 27198cb1383cSDoug Ambrisko sc->bge_asf_mode = 0; 2720f1a7e6d5SScott Long if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2721f1a7e6d5SScott Long == BGE_MAGIC_NUMBER)) { 27228cb1383cSDoug Ambrisko if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 27238cb1383cSDoug Ambrisko & BGE_HWCFG_ASF) { 27248cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_ENABLE; 27258cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_STACKUP; 27268cb1383cSDoug Ambrisko if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 27278cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 27288cb1383cSDoug Ambrisko } 27298cb1383cSDoug Ambrisko } 27308cb1383cSDoug Ambrisko } 27318cb1383cSDoug Ambrisko 27328cb1383cSDoug Ambrisko /* Try to reset the chip again the nice way. */ 27338cb1383cSDoug Ambrisko bge_stop_fw(sc); 27348cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_STOP); 27358cb1383cSDoug Ambrisko if (bge_reset(sc)) { 27368cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 27378cb1383cSDoug Ambrisko error = ENXIO; 27388cb1383cSDoug Ambrisko goto fail; 27398cb1383cSDoug Ambrisko } 27408cb1383cSDoug Ambrisko 27418cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 27428cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 274395d67482SBill Paul 274495d67482SBill Paul if (bge_chipinit(sc)) { 2745fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "chip initialization failed\n"); 274695d67482SBill Paul error = ENXIO; 274795d67482SBill Paul goto fail; 274895d67482SBill Paul } 274995d67482SBill Paul 275038cc658fSJohn Baldwin error = bge_get_eaddr(sc, eaddr); 275138cc658fSJohn Baldwin if (error) { 275208013fd3SMarius Strobl device_printf(sc->bge_dev, 275308013fd3SMarius Strobl "failed to read station address\n"); 275495d67482SBill Paul error = ENXIO; 275595d67482SBill Paul goto fail; 275695d67482SBill Paul } 275795d67482SBill Paul 2758f41ac2beSBill Paul /* 5705 limits RX return ring to 512 entries. */ 27597ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 2760f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2761f41ac2beSBill Paul else 2762f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2763f41ac2beSBill Paul 2764f41ac2beSBill Paul if (bge_dma_alloc(dev)) { 2765fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2766fe806fdaSPyun YongHyeon "failed to allocate DMA resources\n"); 2767f41ac2beSBill Paul error = ENXIO; 2768f41ac2beSBill Paul goto fail; 2769f41ac2beSBill Paul } 2770f41ac2beSBill Paul 277195d67482SBill Paul /* Set default tuneable values. */ 277295d67482SBill Paul sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 277395d67482SBill Paul sc->bge_rx_coal_ticks = 150; 277495d67482SBill Paul sc->bge_tx_coal_ticks = 150; 27756f8718a3SScott Long sc->bge_rx_max_coal_bds = 10; 27766f8718a3SScott Long sc->bge_tx_max_coal_bds = 10; 277795d67482SBill Paul 277895d67482SBill Paul /* Set up ifnet structure */ 2779fc74a9f9SBrooks Davis ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2780fc74a9f9SBrooks Davis if (ifp == NULL) { 2781fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2782fc74a9f9SBrooks Davis error = ENXIO; 2783fc74a9f9SBrooks Davis goto fail; 2784fc74a9f9SBrooks Davis } 278595d67482SBill Paul ifp->if_softc = sc; 27869bf40edeSBrooks Davis if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 278795d67482SBill Paul ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 278895d67482SBill Paul ifp->if_ioctl = bge_ioctl; 278995d67482SBill Paul ifp->if_start = bge_start; 279095d67482SBill Paul ifp->if_init = bge_init; 27914d665c4dSDag-Erling Smørgrav ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 27924d665c4dSDag-Erling Smørgrav IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 27934d665c4dSDag-Erling Smørgrav IFQ_SET_READY(&ifp->if_snd); 279495d67482SBill Paul ifp->if_hwassist = BGE_CSUM_FEATURES; 2795d375e524SGleb Smirnoff ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 27964e35d186SJung-uk Kim IFCAP_VLAN_MTU; 2797ca3f1187SPyun YongHyeon if ((sc->bge_flags & BGE_FLAG_TSO) != 0) { 2798ca3f1187SPyun YongHyeon ifp->if_hwassist |= CSUM_TSO; 2799ca3f1187SPyun YongHyeon ifp->if_capabilities |= IFCAP_TSO4; 2800ca3f1187SPyun YongHyeon } 28014e35d186SJung-uk Kim #ifdef IFCAP_VLAN_HWCSUM 28024e35d186SJung-uk Kim ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 28034e35d186SJung-uk Kim #endif 280495d67482SBill Paul ifp->if_capenable = ifp->if_capabilities; 280575719184SGleb Smirnoff #ifdef DEVICE_POLLING 280675719184SGleb Smirnoff ifp->if_capabilities |= IFCAP_POLLING; 280775719184SGleb Smirnoff #endif 280895d67482SBill Paul 2809a1d52896SBill Paul /* 2810d375e524SGleb Smirnoff * 5700 B0 chips do not support checksumming correctly due 2811d375e524SGleb Smirnoff * to hardware bugs. 2812d375e524SGleb Smirnoff */ 2813d375e524SGleb Smirnoff if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2814d375e524SGleb Smirnoff ifp->if_capabilities &= ~IFCAP_HWCSUM; 28154d3a629cSPyun YongHyeon ifp->if_capenable &= ~IFCAP_HWCSUM; 2816d375e524SGleb Smirnoff ifp->if_hwassist = 0; 2817d375e524SGleb Smirnoff } 2818d375e524SGleb Smirnoff 2819d375e524SGleb Smirnoff /* 2820a1d52896SBill Paul * Figure out what sort of media we have by checking the 282141abcc1bSPaul Saab * hardware config word in the first 32k of NIC internal memory, 282241abcc1bSPaul Saab * or fall back to examining the EEPROM if necessary. 282341abcc1bSPaul Saab * Note: on some BCM5700 cards, this value appears to be unset. 282441abcc1bSPaul Saab * If that's the case, we have to rely on identifying the NIC 282541abcc1bSPaul Saab * by its PCI subsystem ID, as we do below for the SysKonnect 282641abcc1bSPaul Saab * SK-9D41. 2827a1d52896SBill Paul */ 282841abcc1bSPaul Saab if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 282941abcc1bSPaul Saab hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 28305fea260fSMarius Strobl else if ((sc->bge_flags & BGE_FLAG_EADDR) && 28315fea260fSMarius Strobl (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 2832f6789fbaSPyun YongHyeon if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2833f6789fbaSPyun YongHyeon sizeof(hwcfg))) { 2834fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2835f6789fbaSPyun YongHyeon error = ENXIO; 2836f6789fbaSPyun YongHyeon goto fail; 2837f6789fbaSPyun YongHyeon } 283841abcc1bSPaul Saab hwcfg = ntohl(hwcfg); 283941abcc1bSPaul Saab } 284041abcc1bSPaul Saab 284141abcc1bSPaul Saab if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2842652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_TBI; 2843a1d52896SBill Paul 284495d67482SBill Paul /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 28450c8aa4eaSJung-uk Kim if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2846652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_TBI; 284795d67482SBill Paul 2848652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 28490c8aa4eaSJung-uk Kim ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 28500c8aa4eaSJung-uk Kim bge_ifmedia_sts); 28510c8aa4eaSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 28526098821cSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 28536098821cSJung-uk Kim 0, NULL); 285495d67482SBill Paul ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 285595d67482SBill Paul ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2856da3003f0SBill Paul sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 285795d67482SBill Paul } else { 285895d67482SBill Paul /* 28598cb1383cSDoug Ambrisko * Do transceiver setup and tell the firmware the 28608cb1383cSDoug Ambrisko * driver is down so we can try to get access the 28618cb1383cSDoug Ambrisko * probe if ASF is running. Retry a couple of times 28628cb1383cSDoug Ambrisko * if we get a conflict with the ASF firmware accessing 28638cb1383cSDoug Ambrisko * the PHY. 286495d67482SBill Paul */ 28654012d104SMarius Strobl trys = 0; 28668cb1383cSDoug Ambrisko BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 28678cb1383cSDoug Ambrisko again: 28688cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 28698cb1383cSDoug Ambrisko 287095d67482SBill Paul if (mii_phy_probe(dev, &sc->bge_miibus, 287195d67482SBill Paul bge_ifmedia_upd, bge_ifmedia_sts)) { 28728cb1383cSDoug Ambrisko if (trys++ < 4) { 28738cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "Try again\n"); 28744e35d186SJung-uk Kim bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, 28754e35d186SJung-uk Kim BMCR_RESET); 28768cb1383cSDoug Ambrisko goto again; 28778cb1383cSDoug Ambrisko } 28788cb1383cSDoug Ambrisko 2879fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "MII without any PHY!\n"); 288095d67482SBill Paul error = ENXIO; 288195d67482SBill Paul goto fail; 288295d67482SBill Paul } 28838cb1383cSDoug Ambrisko 28848cb1383cSDoug Ambrisko /* 28858cb1383cSDoug Ambrisko * Now tell the firmware we are going up after probing the PHY 28868cb1383cSDoug Ambrisko */ 28878cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 28888cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 288995d67482SBill Paul } 289095d67482SBill Paul 289195d67482SBill Paul /* 2892e255b776SJohn Polstra * When using the BCM5701 in PCI-X mode, data corruption has 2893e255b776SJohn Polstra * been observed in the first few bytes of some received packets. 2894e255b776SJohn Polstra * Aligning the packet buffer in memory eliminates the corruption. 2895e255b776SJohn Polstra * Unfortunately, this misaligns the packet payloads. On platforms 2896e255b776SJohn Polstra * which do not support unaligned accesses, we will realign the 2897e255b776SJohn Polstra * payloads by copying the received packets. 2898e255b776SJohn Polstra */ 2899652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2900652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_PCIX) 2901652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2902e255b776SJohn Polstra 2903e255b776SJohn Polstra /* 290495d67482SBill Paul * Call MI attach routine. 290595d67482SBill Paul */ 2906fc74a9f9SBrooks Davis ether_ifattach(ifp, eaddr); 2907b74e67fbSGleb Smirnoff callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 29080f9bd73bSSam Leffler 290961ccb9daSPyun YongHyeon /* Tell upper layer we support long frames. */ 291061ccb9daSPyun YongHyeon ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 291161ccb9daSPyun YongHyeon 29120f9bd73bSSam Leffler /* 29130f9bd73bSSam Leffler * Hookup IRQ last. 29140f9bd73bSSam Leffler */ 29154e35d186SJung-uk Kim #if __FreeBSD_version > 700030 2916dfe0df9aSPyun YongHyeon if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { 2917dfe0df9aSPyun YongHyeon /* Take advantage of single-shot MSI. */ 2918dfe0df9aSPyun YongHyeon sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, 2919dfe0df9aSPyun YongHyeon taskqueue_thread_enqueue, &sc->bge_tq); 2920dfe0df9aSPyun YongHyeon if (sc->bge_tq == NULL) { 2921dfe0df9aSPyun YongHyeon device_printf(dev, "could not create taskqueue.\n"); 2922dfe0df9aSPyun YongHyeon ether_ifdetach(ifp); 2923dfe0df9aSPyun YongHyeon error = ENXIO; 2924dfe0df9aSPyun YongHyeon goto fail; 2925dfe0df9aSPyun YongHyeon } 2926dfe0df9aSPyun YongHyeon taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq", 2927dfe0df9aSPyun YongHyeon device_get_nameunit(sc->bge_dev)); 2928dfe0df9aSPyun YongHyeon error = bus_setup_intr(dev, sc->bge_irq, 2929dfe0df9aSPyun YongHyeon INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, 2930dfe0df9aSPyun YongHyeon &sc->bge_intrhand); 2931dfe0df9aSPyun YongHyeon if (error) 2932dfe0df9aSPyun YongHyeon ether_ifdetach(ifp); 2933dfe0df9aSPyun YongHyeon } else 2934dfe0df9aSPyun YongHyeon error = bus_setup_intr(dev, sc->bge_irq, 2935dfe0df9aSPyun YongHyeon INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, 2936dfe0df9aSPyun YongHyeon &sc->bge_intrhand); 29374e35d186SJung-uk Kim #else 29384e35d186SJung-uk Kim error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 29394e35d186SJung-uk Kim bge_intr, sc, &sc->bge_intrhand); 29404e35d186SJung-uk Kim #endif 29410f9bd73bSSam Leffler 29420f9bd73bSSam Leffler if (error) { 2943fc74a9f9SBrooks Davis bge_detach(dev); 2944fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "couldn't set up irq\n"); 29450f9bd73bSSam Leffler } 294695d67482SBill Paul 29476f8718a3SScott Long bge_add_sysctls(sc); 29486f8718a3SScott Long 294908013fd3SMarius Strobl return (0); 295008013fd3SMarius Strobl 295195d67482SBill Paul fail: 295208013fd3SMarius Strobl bge_release_resources(sc); 295308013fd3SMarius Strobl 295495d67482SBill Paul return (error); 295595d67482SBill Paul } 295695d67482SBill Paul 295795d67482SBill Paul static int 29583f74909aSGleb Smirnoff bge_detach(device_t dev) 295995d67482SBill Paul { 296095d67482SBill Paul struct bge_softc *sc; 296195d67482SBill Paul struct ifnet *ifp; 296295d67482SBill Paul 296395d67482SBill Paul sc = device_get_softc(dev); 2964fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 296595d67482SBill Paul 296675719184SGleb Smirnoff #ifdef DEVICE_POLLING 296775719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) 296875719184SGleb Smirnoff ether_poll_deregister(ifp); 296975719184SGleb Smirnoff #endif 297075719184SGleb Smirnoff 29710f9bd73bSSam Leffler BGE_LOCK(sc); 297295d67482SBill Paul bge_stop(sc); 297395d67482SBill Paul bge_reset(sc); 29740f9bd73bSSam Leffler BGE_UNLOCK(sc); 29750f9bd73bSSam Leffler 29765dda8085SOleg Bulyzhin callout_drain(&sc->bge_stat_ch); 29775dda8085SOleg Bulyzhin 2978dfe0df9aSPyun YongHyeon if (sc->bge_tq) 2979dfe0df9aSPyun YongHyeon taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); 29800f9bd73bSSam Leffler ether_ifdetach(ifp); 298195d67482SBill Paul 2982652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 298395d67482SBill Paul ifmedia_removeall(&sc->bge_ifmedia); 298495d67482SBill Paul } else { 298595d67482SBill Paul bus_generic_detach(dev); 298695d67482SBill Paul device_delete_child(dev, sc->bge_miibus); 298795d67482SBill Paul } 298895d67482SBill Paul 298995d67482SBill Paul bge_release_resources(sc); 299095d67482SBill Paul 299195d67482SBill Paul return (0); 299295d67482SBill Paul } 299395d67482SBill Paul 299495d67482SBill Paul static void 29953f74909aSGleb Smirnoff bge_release_resources(struct bge_softc *sc) 299695d67482SBill Paul { 299795d67482SBill Paul device_t dev; 299895d67482SBill Paul 299995d67482SBill Paul dev = sc->bge_dev; 300095d67482SBill Paul 3001dfe0df9aSPyun YongHyeon if (sc->bge_tq != NULL) 3002dfe0df9aSPyun YongHyeon taskqueue_free(sc->bge_tq); 3003dfe0df9aSPyun YongHyeon 300495d67482SBill Paul if (sc->bge_intrhand != NULL) 300595d67482SBill Paul bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 300695d67482SBill Paul 300795d67482SBill Paul if (sc->bge_irq != NULL) 3008724bd939SJohn Polstra bus_release_resource(dev, SYS_RES_IRQ, 3009724bd939SJohn Polstra sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); 3010724bd939SJohn Polstra 3011724bd939SJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) 3012724bd939SJohn Polstra pci_release_msi(dev); 301395d67482SBill Paul 301495d67482SBill Paul if (sc->bge_res != NULL) 301595d67482SBill Paul bus_release_resource(dev, SYS_RES_MEMORY, 301695d67482SBill Paul BGE_PCI_BAR0, sc->bge_res); 301795d67482SBill Paul 3018ad61f896SRuslan Ermilov if (sc->bge_ifp != NULL) 3019ad61f896SRuslan Ermilov if_free(sc->bge_ifp); 3020ad61f896SRuslan Ermilov 3021f41ac2beSBill Paul bge_dma_free(sc); 302295d67482SBill Paul 30230f9bd73bSSam Leffler if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 30240f9bd73bSSam Leffler BGE_LOCK_DESTROY(sc); 302595d67482SBill Paul } 302695d67482SBill Paul 30278cb1383cSDoug Ambrisko static int 30283f74909aSGleb Smirnoff bge_reset(struct bge_softc *sc) 302995d67482SBill Paul { 303095d67482SBill Paul device_t dev; 30315fea260fSMarius Strobl uint32_t cachesize, command, pcistate, reset, val; 30326f8718a3SScott Long void (*write_op)(struct bge_softc *, int, int); 30330aaf1057SPyun YongHyeon uint16_t devctl; 30345fea260fSMarius Strobl int i; 303595d67482SBill Paul 303695d67482SBill Paul dev = sc->bge_dev; 303795d67482SBill Paul 303838cc658fSJohn Baldwin if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 303938cc658fSJohn Baldwin (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 30406f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 30416f8718a3SScott Long write_op = bge_writemem_direct; 30426f8718a3SScott Long else 30436f8718a3SScott Long write_op = bge_writemem_ind; 30449ba784dbSScott Long } else 30456f8718a3SScott Long write_op = bge_writereg_ind; 30466f8718a3SScott Long 304795d67482SBill Paul /* Save some important PCI state. */ 304895d67482SBill Paul cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 304995d67482SBill Paul command = pci_read_config(dev, BGE_PCI_CMD, 4); 305095d67482SBill Paul pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 305195d67482SBill Paul 305295d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 305395d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3054e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 305595d67482SBill Paul 30566f8718a3SScott Long /* Disable fastboot on controllers that support it. */ 30576f8718a3SScott Long if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 3058a5779553SStanislav Sedov BGE_IS_5755_PLUS(sc)) { 30596f8718a3SScott Long if (bootverbose) 30609ba784dbSScott Long device_printf(sc->bge_dev, "Disabling fastboot\n"); 30616f8718a3SScott Long CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 30626f8718a3SScott Long } 30636f8718a3SScott Long 30646f8718a3SScott Long /* 30656f8718a3SScott Long * Write the magic number to SRAM at offset 0xB50. 30666f8718a3SScott Long * When firmware finishes its initialization it will 30676f8718a3SScott Long * write ~BGE_MAGIC_NUMBER to the same location. 30686f8718a3SScott Long */ 30696f8718a3SScott Long bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 30706f8718a3SScott Long 30710c8aa4eaSJung-uk Kim reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 3072e53d81eeSPaul Saab 3073e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3074652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 30750c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 30760c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, 0x7E2C, 0x20); 3077e53d81eeSPaul Saab if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 3078e53d81eeSPaul Saab /* Prevent PCIE link training during global reset */ 30790c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 30800c8aa4eaSJung-uk Kim reset |= 1 << 29; 3081e53d81eeSPaul Saab } 3082e53d81eeSPaul Saab } 3083e53d81eeSPaul Saab 308421c9e407SDavid Christensen /* 30856f8718a3SScott Long * Set GPHY Power Down Override to leave GPHY 30866f8718a3SScott Long * powered up in D0 uninitialized. 30876f8718a3SScott Long */ 30885345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 30896f8718a3SScott Long reset |= 0x04000000; 30906f8718a3SScott Long 309195d67482SBill Paul /* Issue global reset */ 30926f8718a3SScott Long write_op(sc, BGE_MISC_CFG, reset); 309395d67482SBill Paul 309438cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 30955fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_STATUS); 309638cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_STATUS, 30975fea260fSMarius Strobl val | BGE_VCPU_STATUS_DRV_RESET); 30985fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 309938cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 31005fea260fSMarius Strobl val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 310138cc658fSJohn Baldwin } 310238cc658fSJohn Baldwin 310395d67482SBill Paul DELAY(1000); 310495d67482SBill Paul 3105e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3106652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 3107e53d81eeSPaul Saab if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3108e53d81eeSPaul Saab DELAY(500000); /* wait for link training to complete */ 31095fea260fSMarius Strobl val = pci_read_config(dev, 0xC4, 4); 31105fea260fSMarius Strobl pci_write_config(dev, 0xC4, val | (1 << 15), 4); 3111e53d81eeSPaul Saab } 31120aaf1057SPyun YongHyeon devctl = pci_read_config(dev, 31130aaf1057SPyun YongHyeon sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2); 31140aaf1057SPyun YongHyeon /* Clear enable no snoop and disable relaxed ordering. */ 31150aaf1057SPyun YongHyeon devctl &= ~(0x0010 | 0x0800); 31160aaf1057SPyun YongHyeon /* Set PCIE max payload size to 128. */ 31170aaf1057SPyun YongHyeon devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD; 31180aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 31190aaf1057SPyun YongHyeon devctl, 2); 31200aaf1057SPyun YongHyeon /* Clear error status. */ 31210aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA, 31220aaf1057SPyun YongHyeon 0, 2); 3123e53d81eeSPaul Saab } 3124e53d81eeSPaul Saab 31253f74909aSGleb Smirnoff /* Reset some of the PCI state that got zapped by reset. */ 312695d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 312795d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3128e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 312995d67482SBill Paul pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 313095d67482SBill Paul pci_write_config(dev, BGE_PCI_CMD, command, 4); 31310c8aa4eaSJung-uk Kim write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 313295d67482SBill Paul 3133bf6ef57aSJohn Polstra /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ 31344c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 3135bf6ef57aSJohn Polstra /* This chip disables MSI on reset. */ 3136bf6ef57aSJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) { 31370aaf1057SPyun YongHyeon val = pci_read_config(dev, 31380aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 2); 31390aaf1057SPyun YongHyeon pci_write_config(dev, 31400aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 3141bf6ef57aSJohn Polstra val | PCIM_MSICTRL_MSI_ENABLE, 2); 3142bf6ef57aSJohn Polstra val = CSR_READ_4(sc, BGE_MSI_MODE); 3143bf6ef57aSJohn Polstra CSR_WRITE_4(sc, BGE_MSI_MODE, 3144bf6ef57aSJohn Polstra val | BGE_MSIMODE_ENABLE); 3145bf6ef57aSJohn Polstra } 31464c0da0ffSGleb Smirnoff val = CSR_READ_4(sc, BGE_MARB_MODE); 31474c0da0ffSGleb Smirnoff CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 31484c0da0ffSGleb Smirnoff } else 3149a7b0c314SPaul Saab CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3150a7b0c314SPaul Saab 315138cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 315238cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT; i++) { 315338cc658fSJohn Baldwin val = CSR_READ_4(sc, BGE_VCPU_STATUS); 315438cc658fSJohn Baldwin if (val & BGE_VCPU_STATUS_INIT_DONE) 315538cc658fSJohn Baldwin break; 315638cc658fSJohn Baldwin DELAY(100); 315738cc658fSJohn Baldwin } 315838cc658fSJohn Baldwin if (i == BGE_TIMEOUT) { 315938cc658fSJohn Baldwin device_printf(sc->bge_dev, "reset timed out\n"); 316038cc658fSJohn Baldwin return (1); 316138cc658fSJohn Baldwin } 316238cc658fSJohn Baldwin } else { 316395d67482SBill Paul /* 31646f8718a3SScott Long * Poll until we see the 1's complement of the magic number. 316508013fd3SMarius Strobl * This indicates that the firmware initialization is complete. 31665fea260fSMarius Strobl * We expect this to fail if no chip containing the Ethernet 31675fea260fSMarius Strobl * address is fitted though. 316895d67482SBill Paul */ 316995d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 3170d5d23857SJung-uk Kim DELAY(10); 317195d67482SBill Paul val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 317295d67482SBill Paul if (val == ~BGE_MAGIC_NUMBER) 317395d67482SBill Paul break; 317495d67482SBill Paul } 317595d67482SBill Paul 31765fea260fSMarius Strobl if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) 31779ba784dbSScott Long device_printf(sc->bge_dev, "firmware handshake timed out, " 31789ba784dbSScott Long "found 0x%08x\n", val); 317938cc658fSJohn Baldwin } 318095d67482SBill Paul 318195d67482SBill Paul /* 318295d67482SBill Paul * XXX Wait for the value of the PCISTATE register to 318395d67482SBill Paul * return to its original pre-reset state. This is a 318495d67482SBill Paul * fairly good indicator of reset completion. If we don't 318595d67482SBill Paul * wait for the reset to fully complete, trying to read 318695d67482SBill Paul * from the device's non-PCI registers may yield garbage 318795d67482SBill Paul * results. 318895d67482SBill Paul */ 318995d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 319095d67482SBill Paul if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 319195d67482SBill Paul break; 319295d67482SBill Paul DELAY(10); 319395d67482SBill Paul } 319495d67482SBill Paul 31956f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) { 31960c8aa4eaSJung-uk Kim reset = bge_readmem_ind(sc, 0x7C00); 31970c8aa4eaSJung-uk Kim bge_writemem_ind(sc, 0x7C00, reset | (1 << 25)); 31986f8718a3SScott Long } 31996f8718a3SScott Long 32003f74909aSGleb Smirnoff /* Fix up byte swapping. */ 3201e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 320295d67482SBill Paul BGE_MODECTL_BYTESWAP_DATA); 320395d67482SBill Paul 32048cb1383cSDoug Ambrisko /* Tell the ASF firmware we are up */ 32058cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 32068cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 32078cb1383cSDoug Ambrisko 320895d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 320995d67482SBill Paul 3210da3003f0SBill Paul /* 3211da3003f0SBill Paul * The 5704 in TBI mode apparently needs some special 3212da3003f0SBill Paul * adjustment to insure the SERDES drive level is set 3213da3003f0SBill Paul * to 1.2V. 3214da3003f0SBill Paul */ 3215652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 3216652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_TBI) { 32175fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_SERDES_CFG); 32185fea260fSMarius Strobl val = (val & ~0xFFF) | 0x880; 32195fea260fSMarius Strobl CSR_WRITE_4(sc, BGE_SERDES_CFG, val); 3220da3003f0SBill Paul } 3221da3003f0SBill Paul 3222e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3223652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE && 3224652ae483SGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 32255fea260fSMarius Strobl val = CSR_READ_4(sc, 0x7C00); 32265fea260fSMarius Strobl CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); 3227e53d81eeSPaul Saab } 322895d67482SBill Paul DELAY(10000); 32298cb1383cSDoug Ambrisko 32308cb1383cSDoug Ambrisko return(0); 323195d67482SBill Paul } 323295d67482SBill Paul 323395d67482SBill Paul /* 323495d67482SBill Paul * Frame reception handling. This is called if there's a frame 323595d67482SBill Paul * on the receive return list. 323695d67482SBill Paul * 323795d67482SBill Paul * Note: we have to be able to handle two possibilities here: 32381be6acb7SGleb Smirnoff * 1) the frame is from the jumbo receive ring 323995d67482SBill Paul * 2) the frame is from the standard receive ring 324095d67482SBill Paul */ 324195d67482SBill Paul 32421abcdbd1SAttilio Rao static int 3243dfe0df9aSPyun YongHyeon bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) 324495d67482SBill Paul { 324595d67482SBill Paul struct ifnet *ifp; 32461abcdbd1SAttilio Rao int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; 3247b9c05fa5SPyun YongHyeon uint16_t rx_cons; 324895d67482SBill Paul 32497f21e273SStanislav Sedov rx_cons = sc->bge_rx_saved_considx; 32500f9bd73bSSam Leffler 32513f74909aSGleb Smirnoff /* Nothing to do. */ 32527f21e273SStanislav Sedov if (rx_cons == rx_prod) 32531abcdbd1SAttilio Rao return (rx_npkts); 3254cfcb5025SOleg Bulyzhin 3255fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 325695d67482SBill Paul 3257f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 3258e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 3259f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 326015eda801SStanislav Sedov sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); 3261c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 3262c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) 3263f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 326415eda801SStanislav Sedov sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); 3265f41ac2beSBill Paul 32667f21e273SStanislav Sedov while (rx_cons != rx_prod) { 326795d67482SBill Paul struct bge_rx_bd *cur_rx; 32683f74909aSGleb Smirnoff uint32_t rxidx; 326995d67482SBill Paul struct mbuf *m = NULL; 32703f74909aSGleb Smirnoff uint16_t vlan_tag = 0; 327195d67482SBill Paul int have_tag = 0; 327295d67482SBill Paul 327375719184SGleb Smirnoff #ifdef DEVICE_POLLING 327475719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 327575719184SGleb Smirnoff if (sc->rxcycles <= 0) 327675719184SGleb Smirnoff break; 327775719184SGleb Smirnoff sc->rxcycles--; 327875719184SGleb Smirnoff } 327975719184SGleb Smirnoff #endif 328075719184SGleb Smirnoff 32817f21e273SStanislav Sedov cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; 328295d67482SBill Paul 328395d67482SBill Paul rxidx = cur_rx->bge_idx; 32847f21e273SStanislav Sedov BGE_INC(rx_cons, sc->bge_return_ring_cnt); 328595d67482SBill Paul 3286cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING && 3287cb2eacc7SYaroslav Tykhiy cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 328895d67482SBill Paul have_tag = 1; 328995d67482SBill Paul vlan_tag = cur_rx->bge_vlan_tag; 329095d67482SBill Paul } 329195d67482SBill Paul 329295d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 329395d67482SBill Paul jumbocnt++; 3294943787f3SPyun YongHyeon m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 329595d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3296943787f3SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 329795d67482SBill Paul continue; 329895d67482SBill Paul } 3299943787f3SPyun YongHyeon if (bge_newbuf_jumbo(sc, rxidx) != 0) { 3300943787f3SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3301943787f3SPyun YongHyeon ifp->if_iqdrops++; 330295d67482SBill Paul continue; 330395d67482SBill Paul } 330403e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 330595d67482SBill Paul } else { 330695d67482SBill Paul stdcnt++; 330795d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3308943787f3SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 330995d67482SBill Paul continue; 331095d67482SBill Paul } 3311943787f3SPyun YongHyeon m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3312943787f3SPyun YongHyeon if (bge_newbuf_std(sc, rxidx) != 0) { 3313943787f3SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3314943787f3SPyun YongHyeon ifp->if_iqdrops++; 331595d67482SBill Paul continue; 331695d67482SBill Paul } 331703e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 331895d67482SBill Paul } 331995d67482SBill Paul 332095d67482SBill Paul ifp->if_ipackets++; 3321e65bed95SPyun YongHyeon #ifndef __NO_STRICT_ALIGNMENT 3322e255b776SJohn Polstra /* 3323e65bed95SPyun YongHyeon * For architectures with strict alignment we must make sure 3324e65bed95SPyun YongHyeon * the payload is aligned. 3325e255b776SJohn Polstra */ 3326652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 3327e255b776SJohn Polstra bcopy(m->m_data, m->m_data + ETHER_ALIGN, 3328e255b776SJohn Polstra cur_rx->bge_len); 3329e255b776SJohn Polstra m->m_data += ETHER_ALIGN; 3330e255b776SJohn Polstra } 3331e255b776SJohn Polstra #endif 3332473851baSPaul Saab m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 333395d67482SBill Paul m->m_pkthdr.rcvif = ifp; 333495d67482SBill Paul 3335b874fdd4SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_RXCSUM) { 333678178cd1SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 333795d67482SBill Paul m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 33380c8aa4eaSJung-uk Kim if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 33390c8aa4eaSJung-uk Kim m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 334078178cd1SGleb Smirnoff } 3341d375e524SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3342d375e524SGleb Smirnoff m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 334395d67482SBill Paul m->m_pkthdr.csum_data = 334495d67482SBill Paul cur_rx->bge_tcp_udp_csum; 3345ee7ef91cSOleg Bulyzhin m->m_pkthdr.csum_flags |= 3346ee7ef91cSOleg Bulyzhin CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 334795d67482SBill Paul } 334895d67482SBill Paul } 334995d67482SBill Paul 335095d67482SBill Paul /* 3351673d9191SSam Leffler * If we received a packet with a vlan tag, 3352673d9191SSam Leffler * attach that information to the packet. 335395d67482SBill Paul */ 3354d147662cSGleb Smirnoff if (have_tag) { 33554e35d186SJung-uk Kim #if __FreeBSD_version > 700022 335678ba57b9SAndre Oppermann m->m_pkthdr.ether_vtag = vlan_tag; 335778ba57b9SAndre Oppermann m->m_flags |= M_VLANTAG; 33584e35d186SJung-uk Kim #else 33594e35d186SJung-uk Kim VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); 33604e35d186SJung-uk Kim if (m == NULL) 33614e35d186SJung-uk Kim continue; 33624e35d186SJung-uk Kim #endif 3363d147662cSGleb Smirnoff } 336495d67482SBill Paul 3365dfe0df9aSPyun YongHyeon if (holdlck != 0) { 33660f9bd73bSSam Leffler BGE_UNLOCK(sc); 3367673d9191SSam Leffler (*ifp->if_input)(ifp, m); 33680f9bd73bSSam Leffler BGE_LOCK(sc); 3369dfe0df9aSPyun YongHyeon } else 3370dfe0df9aSPyun YongHyeon (*ifp->if_input)(ifp, m); 3371d4da719cSAttilio Rao rx_npkts++; 337225e13e68SXin LI 337325e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 33748cf7d13dSAttilio Rao return (rx_npkts); 337595d67482SBill Paul } 337695d67482SBill Paul 337715eda801SStanislav Sedov bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 337815eda801SStanislav Sedov sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); 3379e65bed95SPyun YongHyeon if (stdcnt > 0) 3380f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 3381e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 33824c0da0ffSGleb Smirnoff 3383c215fd77SPyun YongHyeon if (jumbocnt > 0) 3384f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 33854c0da0ffSGleb Smirnoff sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 3386f41ac2beSBill Paul 33877f21e273SStanislav Sedov sc->bge_rx_saved_considx = rx_cons; 338838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 338995d67482SBill Paul if (stdcnt) 339038cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 339195d67482SBill Paul if (jumbocnt) 339238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3393f5a034f9SPyun YongHyeon #ifdef notyet 3394f5a034f9SPyun YongHyeon /* 3395f5a034f9SPyun YongHyeon * This register wraps very quickly under heavy packet drops. 3396f5a034f9SPyun YongHyeon * If you need correct statistics, you can enable this check. 3397f5a034f9SPyun YongHyeon */ 3398f5a034f9SPyun YongHyeon if (BGE_IS_5705_PLUS(sc)) 3399f5a034f9SPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3400f5a034f9SPyun YongHyeon #endif 34011abcdbd1SAttilio Rao return (rx_npkts); 340295d67482SBill Paul } 340395d67482SBill Paul 340495d67482SBill Paul static void 3405b9c05fa5SPyun YongHyeon bge_txeof(struct bge_softc *sc, uint16_t tx_cons) 340695d67482SBill Paul { 340795d67482SBill Paul struct bge_tx_bd *cur_tx = NULL; 340895d67482SBill Paul struct ifnet *ifp; 340995d67482SBill Paul 34100f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 34110f9bd73bSSam Leffler 34123f74909aSGleb Smirnoff /* Nothing to do. */ 3413b9c05fa5SPyun YongHyeon if (sc->bge_tx_saved_considx == tx_cons) 3414cfcb5025SOleg Bulyzhin return; 3415cfcb5025SOleg Bulyzhin 3416fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 341795d67482SBill Paul 3418e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 34195c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); 342095d67482SBill Paul /* 342195d67482SBill Paul * Go through our tx ring and free mbufs for those 342295d67482SBill Paul * frames that have been sent. 342395d67482SBill Paul */ 3424b9c05fa5SPyun YongHyeon while (sc->bge_tx_saved_considx != tx_cons) { 34253f74909aSGleb Smirnoff uint32_t idx = 0; 342695d67482SBill Paul 342795d67482SBill Paul idx = sc->bge_tx_saved_considx; 3428f41ac2beSBill Paul cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 342995d67482SBill Paul if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 343095d67482SBill Paul ifp->if_opackets++; 343195d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 34320ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 3433e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[idx], 3434e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 34350ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 3436f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[idx]); 3437e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3438e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[idx] = NULL; 343995d67482SBill Paul } 344095d67482SBill Paul sc->bge_txcnt--; 344195d67482SBill Paul BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 344295d67482SBill Paul } 344395d67482SBill Paul 344495d67482SBill Paul if (cur_tx != NULL) 344513f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 34465b01e77cSBruce Evans if (sc->bge_txcnt == 0) 34475b01e77cSBruce Evans sc->bge_timer = 0; 344895d67482SBill Paul } 344995d67482SBill Paul 345075719184SGleb Smirnoff #ifdef DEVICE_POLLING 34511abcdbd1SAttilio Rao static int 345275719184SGleb Smirnoff bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 345375719184SGleb Smirnoff { 345475719184SGleb Smirnoff struct bge_softc *sc = ifp->if_softc; 3455b9c05fa5SPyun YongHyeon uint16_t rx_prod, tx_cons; 3456366454f2SOleg Bulyzhin uint32_t statusword; 34571abcdbd1SAttilio Rao int rx_npkts = 0; 345875719184SGleb Smirnoff 34593f74909aSGleb Smirnoff BGE_LOCK(sc); 34603f74909aSGleb Smirnoff if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 34613f74909aSGleb Smirnoff BGE_UNLOCK(sc); 34621abcdbd1SAttilio Rao return (rx_npkts); 34633f74909aSGleb Smirnoff } 346475719184SGleb Smirnoff 3465dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3466b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3467b9c05fa5SPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3468b9c05fa5SPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3469b9c05fa5SPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3470dab5cd05SOleg Bulyzhin 34713f74909aSGleb Smirnoff statusword = atomic_readandclear_32( 34723f74909aSGleb Smirnoff &sc->bge_ldata.bge_status_block->bge_status); 3473dab5cd05SOleg Bulyzhin 3474dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3475b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3476b9c05fa5SPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3477366454f2SOleg Bulyzhin 34780c8aa4eaSJung-uk Kim /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 3479366454f2SOleg Bulyzhin if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 3480366454f2SOleg Bulyzhin sc->bge_link_evt++; 3481366454f2SOleg Bulyzhin 3482366454f2SOleg Bulyzhin if (cmd == POLL_AND_CHECK_STATUS) 3483366454f2SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 34844c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3485652ae483SGleb Smirnoff sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 3486366454f2SOleg Bulyzhin bge_link_upd(sc); 3487366454f2SOleg Bulyzhin 3488366454f2SOleg Bulyzhin sc->rxcycles = count; 3489dfe0df9aSPyun YongHyeon rx_npkts = bge_rxeof(sc, rx_prod, 1); 349025e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 349125e13e68SXin LI BGE_UNLOCK(sc); 34928cf7d13dSAttilio Rao return (rx_npkts); 349325e13e68SXin LI } 3494b9c05fa5SPyun YongHyeon bge_txeof(sc, tx_cons); 3495366454f2SOleg Bulyzhin if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3496366454f2SOleg Bulyzhin bge_start_locked(ifp); 34973f74909aSGleb Smirnoff 34983f74909aSGleb Smirnoff BGE_UNLOCK(sc); 34991abcdbd1SAttilio Rao return (rx_npkts); 350075719184SGleb Smirnoff } 350175719184SGleb Smirnoff #endif /* DEVICE_POLLING */ 350275719184SGleb Smirnoff 3503dfe0df9aSPyun YongHyeon static int 3504dfe0df9aSPyun YongHyeon bge_msi_intr(void *arg) 3505dfe0df9aSPyun YongHyeon { 3506dfe0df9aSPyun YongHyeon struct bge_softc *sc; 3507dfe0df9aSPyun YongHyeon 3508dfe0df9aSPyun YongHyeon sc = (struct bge_softc *)arg; 3509dfe0df9aSPyun YongHyeon /* 3510dfe0df9aSPyun YongHyeon * This interrupt is not shared and controller already 3511dfe0df9aSPyun YongHyeon * disabled further interrupt. 3512dfe0df9aSPyun YongHyeon */ 3513dfe0df9aSPyun YongHyeon taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); 3514dfe0df9aSPyun YongHyeon return (FILTER_HANDLED); 3515dfe0df9aSPyun YongHyeon } 3516dfe0df9aSPyun YongHyeon 3517dfe0df9aSPyun YongHyeon static void 3518dfe0df9aSPyun YongHyeon bge_intr_task(void *arg, int pending) 3519dfe0df9aSPyun YongHyeon { 3520dfe0df9aSPyun YongHyeon struct bge_softc *sc; 3521dfe0df9aSPyun YongHyeon struct ifnet *ifp; 3522dfe0df9aSPyun YongHyeon uint32_t status; 3523dfe0df9aSPyun YongHyeon uint16_t rx_prod, tx_cons; 3524dfe0df9aSPyun YongHyeon 3525dfe0df9aSPyun YongHyeon sc = (struct bge_softc *)arg; 3526dfe0df9aSPyun YongHyeon ifp = sc->bge_ifp; 3527dfe0df9aSPyun YongHyeon 3528dfe0df9aSPyun YongHyeon if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 3529dfe0df9aSPyun YongHyeon return; 3530dfe0df9aSPyun YongHyeon 3531dfe0df9aSPyun YongHyeon /* Get updated status block. */ 3532dfe0df9aSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3533dfe0df9aSPyun YongHyeon sc->bge_cdata.bge_status_map, 3534dfe0df9aSPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3535dfe0df9aSPyun YongHyeon 3536dfe0df9aSPyun YongHyeon /* Save producer/consumer indexess. */ 3537dfe0df9aSPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3538dfe0df9aSPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3539dfe0df9aSPyun YongHyeon status = sc->bge_ldata.bge_status_block->bge_status; 3540dfe0df9aSPyun YongHyeon sc->bge_ldata.bge_status_block->bge_status = 0; 3541dfe0df9aSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3542dfe0df9aSPyun YongHyeon sc->bge_cdata.bge_status_map, 3543dfe0df9aSPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3544dfe0df9aSPyun YongHyeon /* Let controller work. */ 3545dfe0df9aSPyun YongHyeon bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3546dfe0df9aSPyun YongHyeon 3547dfe0df9aSPyun YongHyeon if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) { 3548dfe0df9aSPyun YongHyeon BGE_LOCK(sc); 3549dfe0df9aSPyun YongHyeon bge_link_upd(sc); 3550dfe0df9aSPyun YongHyeon BGE_UNLOCK(sc); 3551dfe0df9aSPyun YongHyeon } 3552dfe0df9aSPyun YongHyeon if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3553dfe0df9aSPyun YongHyeon /* Check RX return ring producer/consumer. */ 3554dfe0df9aSPyun YongHyeon bge_rxeof(sc, rx_prod, 0); 3555dfe0df9aSPyun YongHyeon } 3556dfe0df9aSPyun YongHyeon if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3557dfe0df9aSPyun YongHyeon BGE_LOCK(sc); 3558dfe0df9aSPyun YongHyeon /* Check TX ring producer/consumer. */ 3559dfe0df9aSPyun YongHyeon bge_txeof(sc, tx_cons); 3560dfe0df9aSPyun YongHyeon if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3561dfe0df9aSPyun YongHyeon bge_start_locked(ifp); 3562dfe0df9aSPyun YongHyeon BGE_UNLOCK(sc); 3563dfe0df9aSPyun YongHyeon } 3564dfe0df9aSPyun YongHyeon } 3565dfe0df9aSPyun YongHyeon 356695d67482SBill Paul static void 35673f74909aSGleb Smirnoff bge_intr(void *xsc) 356895d67482SBill Paul { 356995d67482SBill Paul struct bge_softc *sc; 357095d67482SBill Paul struct ifnet *ifp; 3571dab5cd05SOleg Bulyzhin uint32_t statusword; 3572b9c05fa5SPyun YongHyeon uint16_t rx_prod, tx_cons; 357395d67482SBill Paul 357495d67482SBill Paul sc = xsc; 3575f41ac2beSBill Paul 35760f9bd73bSSam Leffler BGE_LOCK(sc); 35770f9bd73bSSam Leffler 3578dab5cd05SOleg Bulyzhin ifp = sc->bge_ifp; 3579dab5cd05SOleg Bulyzhin 358075719184SGleb Smirnoff #ifdef DEVICE_POLLING 358175719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 358275719184SGleb Smirnoff BGE_UNLOCK(sc); 358375719184SGleb Smirnoff return; 358475719184SGleb Smirnoff } 358575719184SGleb Smirnoff #endif 358675719184SGleb Smirnoff 3587f30cbfc6SScott Long /* 3588b848e032SBruce Evans * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3589b848e032SBruce Evans * disable interrupts by writing nonzero like we used to, since with 3590b848e032SBruce Evans * our current organization this just gives complications and 3591b848e032SBruce Evans * pessimizations for re-enabling interrupts. We used to have races 3592b848e032SBruce Evans * instead of the necessary complications. Disabling interrupts 3593b848e032SBruce Evans * would just reduce the chance of a status update while we are 3594b848e032SBruce Evans * running (by switching to the interrupt-mode coalescence 3595b848e032SBruce Evans * parameters), but this chance is already very low so it is more 3596b848e032SBruce Evans * efficient to get another interrupt than prevent it. 3597b848e032SBruce Evans * 3598b848e032SBruce Evans * We do the ack first to ensure another interrupt if there is a 3599b848e032SBruce Evans * status update after the ack. We don't check for the status 3600b848e032SBruce Evans * changing later because it is more efficient to get another 3601b848e032SBruce Evans * interrupt than prevent it, not quite as above (not checking is 3602b848e032SBruce Evans * a smaller optimization than not toggling the interrupt enable, 3603b848e032SBruce Evans * since checking doesn't involve PCI accesses and toggling require 3604b848e032SBruce Evans * the status check). So toggling would probably be a pessimization 3605b848e032SBruce Evans * even with MSI. It would only be needed for using a task queue. 3606b848e032SBruce Evans */ 360738cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3608b848e032SBruce Evans 3609b848e032SBruce Evans /* 3610f30cbfc6SScott Long * Do the mandatory PCI flush as well as get the link status. 3611f30cbfc6SScott Long */ 3612f30cbfc6SScott Long statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 3613f41ac2beSBill Paul 3614f30cbfc6SScott Long /* Make sure the descriptor ring indexes are coherent. */ 3615f30cbfc6SScott Long bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3616b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3617b9c05fa5SPyun YongHyeon BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3618b9c05fa5SPyun YongHyeon rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 3619b9c05fa5SPyun YongHyeon tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; 3620b9c05fa5SPyun YongHyeon sc->bge_ldata.bge_status_block->bge_status = 0; 3621b9c05fa5SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3622b9c05fa5SPyun YongHyeon sc->bge_cdata.bge_status_map, 3623b9c05fa5SPyun YongHyeon BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3624f30cbfc6SScott Long 36251f313773SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 36264c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3627f30cbfc6SScott Long statusword || sc->bge_link_evt) 3628dab5cd05SOleg Bulyzhin bge_link_upd(sc); 362995d67482SBill Paul 363013f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 36313f74909aSGleb Smirnoff /* Check RX return ring producer/consumer. */ 3632dfe0df9aSPyun YongHyeon bge_rxeof(sc, rx_prod, 1); 363325e13e68SXin LI } 363495d67482SBill Paul 363525e13e68SXin LI if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 36363f74909aSGleb Smirnoff /* Check TX ring producer/consumer. */ 3637b9c05fa5SPyun YongHyeon bge_txeof(sc, tx_cons); 363895d67482SBill Paul } 363995d67482SBill Paul 364013f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING && 364113f4c340SRobert Watson !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 36420f9bd73bSSam Leffler bge_start_locked(ifp); 36430f9bd73bSSam Leffler 36440f9bd73bSSam Leffler BGE_UNLOCK(sc); 364595d67482SBill Paul } 364695d67482SBill Paul 364795d67482SBill Paul static void 36488cb1383cSDoug Ambrisko bge_asf_driver_up(struct bge_softc *sc) 36498cb1383cSDoug Ambrisko { 36508cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) { 36518cb1383cSDoug Ambrisko /* Send ASF heartbeat aprox. every 2s */ 36528cb1383cSDoug Ambrisko if (sc->bge_asf_count) 36538cb1383cSDoug Ambrisko sc->bge_asf_count --; 36548cb1383cSDoug Ambrisko else { 36558cb1383cSDoug Ambrisko sc->bge_asf_count = 5; 36568cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 36578cb1383cSDoug Ambrisko BGE_FW_DRV_ALIVE); 36588cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 36598cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 36608cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 366139153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 36628cb1383cSDoug Ambrisko } 36638cb1383cSDoug Ambrisko } 36648cb1383cSDoug Ambrisko } 36658cb1383cSDoug Ambrisko 36668cb1383cSDoug Ambrisko static void 3667b74e67fbSGleb Smirnoff bge_tick(void *xsc) 36680f9bd73bSSam Leffler { 3669b74e67fbSGleb Smirnoff struct bge_softc *sc = xsc; 367095d67482SBill Paul struct mii_data *mii = NULL; 367195d67482SBill Paul 36720f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 367395d67482SBill Paul 36745dda8085SOleg Bulyzhin /* Synchronize with possible callout reset/stop. */ 36755dda8085SOleg Bulyzhin if (callout_pending(&sc->bge_stat_ch) || 36765dda8085SOleg Bulyzhin !callout_active(&sc->bge_stat_ch)) 36775dda8085SOleg Bulyzhin return; 36785dda8085SOleg Bulyzhin 36797ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 36800434d1b8SBill Paul bge_stats_update_regs(sc); 36810434d1b8SBill Paul else 368295d67482SBill Paul bge_stats_update(sc); 368395d67482SBill Paul 3684652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 368595d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 368682b67c01SOleg Bulyzhin /* 368782b67c01SOleg Bulyzhin * Do not touch PHY if we have link up. This could break 368882b67c01SOleg Bulyzhin * IPMI/ASF mode or produce extra input errors 368982b67c01SOleg Bulyzhin * (extra errors was reported for bcm5701 & bcm5704). 369082b67c01SOleg Bulyzhin */ 369182b67c01SOleg Bulyzhin if (!sc->bge_link) 369295d67482SBill Paul mii_tick(mii); 36937b97099dSOleg Bulyzhin } else { 36947b97099dSOleg Bulyzhin /* 36957b97099dSOleg Bulyzhin * Since in TBI mode auto-polling can't be used we should poll 36967b97099dSOleg Bulyzhin * link status manually. Here we register pending link event 36977b97099dSOleg Bulyzhin * and trigger interrupt. 36987b97099dSOleg Bulyzhin */ 36997b97099dSOleg Bulyzhin #ifdef DEVICE_POLLING 37003f74909aSGleb Smirnoff /* In polling mode we poll link state in bge_poll(). */ 37017b97099dSOleg Bulyzhin if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 37027b97099dSOleg Bulyzhin #endif 37037b97099dSOleg Bulyzhin { 37047b97099dSOleg Bulyzhin sc->bge_link_evt++; 37054f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 37064f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 37077b97099dSOleg Bulyzhin BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 37084f0794ffSBjoern A. Zeeb else 37094f0794ffSBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 37107b97099dSOleg Bulyzhin } 3711dab5cd05SOleg Bulyzhin } 371295d67482SBill Paul 37138cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 3714b74e67fbSGleb Smirnoff bge_watchdog(sc); 37158cb1383cSDoug Ambrisko 3716dab5cd05SOleg Bulyzhin callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 371795d67482SBill Paul } 371895d67482SBill Paul 371995d67482SBill Paul static void 37203f74909aSGleb Smirnoff bge_stats_update_regs(struct bge_softc *sc) 37210434d1b8SBill Paul { 37223f74909aSGleb Smirnoff struct ifnet *ifp; 37230434d1b8SBill Paul 3724fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 37250434d1b8SBill Paul 37266b037352SJung-uk Kim ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 37277e6e2507SJung-uk Kim offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 37287e6e2507SJung-uk Kim 3729e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 37306b037352SJung-uk Kim ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3731e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 37320434d1b8SBill Paul } 37330434d1b8SBill Paul 37340434d1b8SBill Paul static void 37353f74909aSGleb Smirnoff bge_stats_update(struct bge_softc *sc) 373695d67482SBill Paul { 373795d67482SBill Paul struct ifnet *ifp; 3738e907febfSPyun YongHyeon bus_size_t stats; 37397e6e2507SJung-uk Kim uint32_t cnt; /* current register value */ 374095d67482SBill Paul 3741fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 374295d67482SBill Paul 3743e907febfSPyun YongHyeon stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3744e907febfSPyun YongHyeon 3745e907febfSPyun YongHyeon #define READ_STAT(sc, stats, stat) \ 3746e907febfSPyun YongHyeon CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 374795d67482SBill Paul 37488634dfffSJung-uk Kim cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 37496b037352SJung-uk Kim ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); 37506fb34dd2SOleg Bulyzhin sc->bge_tx_collisions = cnt; 37516fb34dd2SOleg Bulyzhin 37526fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 37536b037352SJung-uk Kim ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); 37546fb34dd2SOleg Bulyzhin sc->bge_rx_discards = cnt; 37556fb34dd2SOleg Bulyzhin 37566fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 37576b037352SJung-uk Kim ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); 37586fb34dd2SOleg Bulyzhin sc->bge_tx_discards = cnt; 375995d67482SBill Paul 3760e907febfSPyun YongHyeon #undef READ_STAT 376195d67482SBill Paul } 376295d67482SBill Paul 376395d67482SBill Paul /* 3764d375e524SGleb Smirnoff * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3765d375e524SGleb Smirnoff * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3766d375e524SGleb Smirnoff * but when such padded frames employ the bge IP/TCP checksum offload, 3767d375e524SGleb Smirnoff * the hardware checksum assist gives incorrect results (possibly 3768d375e524SGleb Smirnoff * from incorporating its own padding into the UDP/TCP checksum; who knows). 3769d375e524SGleb Smirnoff * If we pad such runts with zeros, the onboard checksum comes out correct. 3770d375e524SGleb Smirnoff */ 3771d375e524SGleb Smirnoff static __inline int 3772d375e524SGleb Smirnoff bge_cksum_pad(struct mbuf *m) 3773d375e524SGleb Smirnoff { 3774d375e524SGleb Smirnoff int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3775d375e524SGleb Smirnoff struct mbuf *last; 3776d375e524SGleb Smirnoff 3777d375e524SGleb Smirnoff /* If there's only the packet-header and we can pad there, use it. */ 3778d375e524SGleb Smirnoff if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3779d375e524SGleb Smirnoff M_TRAILINGSPACE(m) >= padlen) { 3780d375e524SGleb Smirnoff last = m; 3781d375e524SGleb Smirnoff } else { 3782d375e524SGleb Smirnoff /* 3783d375e524SGleb Smirnoff * Walk packet chain to find last mbuf. We will either 3784d375e524SGleb Smirnoff * pad there, or append a new mbuf and pad it. 3785d375e524SGleb Smirnoff */ 3786d375e524SGleb Smirnoff for (last = m; last->m_next != NULL; last = last->m_next); 3787d375e524SGleb Smirnoff if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3788d375e524SGleb Smirnoff /* Allocate new empty mbuf, pad it. Compact later. */ 3789d375e524SGleb Smirnoff struct mbuf *n; 3790d375e524SGleb Smirnoff 3791d375e524SGleb Smirnoff MGET(n, M_DONTWAIT, MT_DATA); 3792d375e524SGleb Smirnoff if (n == NULL) 3793d375e524SGleb Smirnoff return (ENOBUFS); 3794d375e524SGleb Smirnoff n->m_len = 0; 3795d375e524SGleb Smirnoff last->m_next = n; 3796d375e524SGleb Smirnoff last = n; 3797d375e524SGleb Smirnoff } 3798d375e524SGleb Smirnoff } 3799d375e524SGleb Smirnoff 3800d375e524SGleb Smirnoff /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3801d375e524SGleb Smirnoff memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3802d375e524SGleb Smirnoff last->m_len += padlen; 3803d375e524SGleb Smirnoff m->m_pkthdr.len += padlen; 3804d375e524SGleb Smirnoff 3805d375e524SGleb Smirnoff return (0); 3806d375e524SGleb Smirnoff } 3807d375e524SGleb Smirnoff 3808ca3f1187SPyun YongHyeon static struct mbuf * 3809ca3f1187SPyun YongHyeon bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss) 3810ca3f1187SPyun YongHyeon { 3811ca3f1187SPyun YongHyeon struct ether_header *eh; 3812ca3f1187SPyun YongHyeon struct ip *ip; 3813ca3f1187SPyun YongHyeon struct tcphdr *tcp; 3814ca3f1187SPyun YongHyeon struct mbuf *n; 3815ca3f1187SPyun YongHyeon uint16_t hlen; 3816ca3f1187SPyun YongHyeon uint32_t ip_off, poff; 3817ca3f1187SPyun YongHyeon 3818ca3f1187SPyun YongHyeon if (M_WRITABLE(m) == 0) { 3819ca3f1187SPyun YongHyeon /* Get a writable copy. */ 3820ca3f1187SPyun YongHyeon n = m_dup(m, M_DONTWAIT); 3821ca3f1187SPyun YongHyeon m_freem(m); 3822ca3f1187SPyun YongHyeon if (n == NULL) 3823ca3f1187SPyun YongHyeon return (NULL); 3824ca3f1187SPyun YongHyeon m = n; 3825ca3f1187SPyun YongHyeon } 3826ca3f1187SPyun YongHyeon ip_off = sizeof(struct ether_header); 3827ca3f1187SPyun YongHyeon m = m_pullup(m, ip_off); 3828ca3f1187SPyun YongHyeon if (m == NULL) 3829ca3f1187SPyun YongHyeon return (NULL); 3830ca3f1187SPyun YongHyeon eh = mtod(m, struct ether_header *); 3831ca3f1187SPyun YongHyeon /* Check the existence of VLAN tag. */ 3832ca3f1187SPyun YongHyeon if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 3833ca3f1187SPyun YongHyeon ip_off = sizeof(struct ether_vlan_header); 3834ca3f1187SPyun YongHyeon m = m_pullup(m, ip_off); 3835ca3f1187SPyun YongHyeon if (m == NULL) 3836ca3f1187SPyun YongHyeon return (NULL); 3837ca3f1187SPyun YongHyeon } 3838ca3f1187SPyun YongHyeon m = m_pullup(m, ip_off + sizeof(struct ip)); 3839ca3f1187SPyun YongHyeon if (m == NULL) 3840ca3f1187SPyun YongHyeon return (NULL); 3841ca3f1187SPyun YongHyeon ip = (struct ip *)(mtod(m, char *) + ip_off); 3842ca3f1187SPyun YongHyeon poff = ip_off + (ip->ip_hl << 2); 3843ca3f1187SPyun YongHyeon m = m_pullup(m, poff + sizeof(struct tcphdr)); 3844ca3f1187SPyun YongHyeon if (m == NULL) 3845ca3f1187SPyun YongHyeon return (NULL); 3846ca3f1187SPyun YongHyeon tcp = (struct tcphdr *)(mtod(m, char *) + poff); 3847ca3f1187SPyun YongHyeon m = m_pullup(m, poff + sizeof(struct tcphdr) + tcp->th_off); 3848ca3f1187SPyun YongHyeon if (m == NULL) 3849ca3f1187SPyun YongHyeon return (NULL); 3850ca3f1187SPyun YongHyeon /* 3851ca3f1187SPyun YongHyeon * It seems controller doesn't modify IP length and TCP pseudo 3852ca3f1187SPyun YongHyeon * checksum. These checksum computed by upper stack should be 0. 3853ca3f1187SPyun YongHyeon */ 3854ca3f1187SPyun YongHyeon *mss = m->m_pkthdr.tso_segsz; 3855ca3f1187SPyun YongHyeon ip->ip_sum = 0; 3856ca3f1187SPyun YongHyeon ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); 3857ca3f1187SPyun YongHyeon /* Clear pseudo checksum computed by TCP stack. */ 3858ca3f1187SPyun YongHyeon tcp->th_sum = 0; 3859ca3f1187SPyun YongHyeon /* 3860ca3f1187SPyun YongHyeon * Broadcom controllers uses different descriptor format for 3861ca3f1187SPyun YongHyeon * TSO depending on ASIC revision. Due to TSO-capable firmware 3862ca3f1187SPyun YongHyeon * license issue and lower performance of firmware based TSO 3863ca3f1187SPyun YongHyeon * we only support hardware based TSO which is applicable for 3864ca3f1187SPyun YongHyeon * BCM5755 or newer controllers. Hardware based TSO uses 11 3865ca3f1187SPyun YongHyeon * bits to store MSS and upper 5 bits are used to store IP/TCP 3866ca3f1187SPyun YongHyeon * header length(including IP/TCP options). The header length 3867ca3f1187SPyun YongHyeon * is expressed as 32 bits unit. 3868ca3f1187SPyun YongHyeon */ 3869ca3f1187SPyun YongHyeon hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; 3870ca3f1187SPyun YongHyeon *mss |= (hlen << 11); 3871ca3f1187SPyun YongHyeon return (m); 3872ca3f1187SPyun YongHyeon } 3873ca3f1187SPyun YongHyeon 3874d375e524SGleb Smirnoff /* 387595d67482SBill Paul * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 387695d67482SBill Paul * pointers to descriptors. 387795d67482SBill Paul */ 387895d67482SBill Paul static int 3879676ad2c9SGleb Smirnoff bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 388095d67482SBill Paul { 38817e27542aSGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_NEW]; 3882f41ac2beSBill Paul bus_dmamap_t map; 3883676ad2c9SGleb Smirnoff struct bge_tx_bd *d; 3884676ad2c9SGleb Smirnoff struct mbuf *m = *m_head; 38857e27542aSGleb Smirnoff uint32_t idx = *txidx; 3886ca3f1187SPyun YongHyeon uint16_t csum_flags, mss, vlan_tag; 38877e27542aSGleb Smirnoff int nsegs, i, error; 388895d67482SBill Paul 38896909dc43SGleb Smirnoff csum_flags = 0; 3890ca3f1187SPyun YongHyeon mss = 0; 3891ca3f1187SPyun YongHyeon vlan_tag = 0; 3892ca3f1187SPyun YongHyeon if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 3893ca3f1187SPyun YongHyeon *m_head = m = bge_setup_tso(sc, m, &mss); 3894ca3f1187SPyun YongHyeon if (*m_head == NULL) 3895ca3f1187SPyun YongHyeon return (ENOBUFS); 3896ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | 3897ca3f1187SPyun YongHyeon BGE_TXBDFLAG_CPU_POST_DMA; 3898ca3f1187SPyun YongHyeon } else if ((m->m_pkthdr.csum_flags & BGE_CSUM_FEATURES) != 0) { 38996909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & CSUM_IP) 39006909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_CSUM; 39016909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 39026909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 39036909dc43SGleb Smirnoff if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 39046909dc43SGleb Smirnoff (error = bge_cksum_pad(m)) != 0) { 39056909dc43SGleb Smirnoff m_freem(m); 39066909dc43SGleb Smirnoff *m_head = NULL; 39076909dc43SGleb Smirnoff return (error); 39086909dc43SGleb Smirnoff } 39096909dc43SGleb Smirnoff } 39106909dc43SGleb Smirnoff if (m->m_flags & M_LASTFRAG) 39116909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 39126909dc43SGleb Smirnoff else if (m->m_flags & M_FRAG) 39136909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG; 39146909dc43SGleb Smirnoff } 39156909dc43SGleb Smirnoff 39167e27542aSGleb Smirnoff map = sc->bge_cdata.bge_tx_dmamap[idx]; 39170ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, 3918676ad2c9SGleb Smirnoff &nsegs, BUS_DMA_NOWAIT); 39197e27542aSGleb Smirnoff if (error == EFBIG) { 39204eee14cbSMarius Strobl m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW); 3921676ad2c9SGleb Smirnoff if (m == NULL) { 3922676ad2c9SGleb Smirnoff m_freem(*m_head); 3923676ad2c9SGleb Smirnoff *m_head = NULL; 39247e27542aSGleb Smirnoff return (ENOBUFS); 39257e27542aSGleb Smirnoff } 3926676ad2c9SGleb Smirnoff *m_head = m; 39270ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, 39280ac56796SPyun YongHyeon m, segs, &nsegs, BUS_DMA_NOWAIT); 3929676ad2c9SGleb Smirnoff if (error) { 3930676ad2c9SGleb Smirnoff m_freem(m); 3931676ad2c9SGleb Smirnoff *m_head = NULL; 39327e27542aSGleb Smirnoff return (error); 39337e27542aSGleb Smirnoff } 3934676ad2c9SGleb Smirnoff } else if (error != 0) 3935676ad2c9SGleb Smirnoff return (error); 39367e27542aSGleb Smirnoff 3937167fdb62SPyun YongHyeon /* Check if we have enough free send BDs. */ 3938167fdb62SPyun YongHyeon if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { 39390ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); 394095d67482SBill Paul return (ENOBUFS); 39417e27542aSGleb Smirnoff } 39427e27542aSGleb Smirnoff 39430ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3944e65bed95SPyun YongHyeon 3945ca3f1187SPyun YongHyeon #if __FreeBSD_version > 700022 3946ca3f1187SPyun YongHyeon if (m->m_flags & M_VLANTAG) { 3947ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3948ca3f1187SPyun YongHyeon vlan_tag = m->m_pkthdr.ether_vtag; 3949ca3f1187SPyun YongHyeon } 3950ca3f1187SPyun YongHyeon #else 3951ca3f1187SPyun YongHyeon { 3952ca3f1187SPyun YongHyeon struct m_tag *mtag; 3953ca3f1187SPyun YongHyeon 3954ca3f1187SPyun YongHyeon if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { 3955ca3f1187SPyun YongHyeon csum_flags |= BGE_TXBDFLAG_VLAN_TAG; 3956ca3f1187SPyun YongHyeon vlan_tag = VLAN_TAG_VALUE(mtag); 3957ca3f1187SPyun YongHyeon } 3958ca3f1187SPyun YongHyeon } 3959ca3f1187SPyun YongHyeon #endif 39607e27542aSGleb Smirnoff for (i = 0; ; i++) { 39617e27542aSGleb Smirnoff d = &sc->bge_ldata.bge_tx_ring[idx]; 39627e27542aSGleb Smirnoff d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 39637e27542aSGleb Smirnoff d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 39647e27542aSGleb Smirnoff d->bge_len = segs[i].ds_len; 39657e27542aSGleb Smirnoff d->bge_flags = csum_flags; 3966ca3f1187SPyun YongHyeon d->bge_vlan_tag = vlan_tag; 3967ca3f1187SPyun YongHyeon d->bge_mss = mss; 39687e27542aSGleb Smirnoff if (i == nsegs - 1) 39697e27542aSGleb Smirnoff break; 39707e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 39717e27542aSGleb Smirnoff } 39727e27542aSGleb Smirnoff 39737e27542aSGleb Smirnoff /* Mark the last segment as end of packet... */ 39747e27542aSGleb Smirnoff d->bge_flags |= BGE_TXBDFLAG_END; 3975676ad2c9SGleb Smirnoff 3976f41ac2beSBill Paul /* 3977f41ac2beSBill Paul * Insure that the map for this transmission 3978f41ac2beSBill Paul * is placed at the array index of the last descriptor 3979f41ac2beSBill Paul * in this chain. 3980f41ac2beSBill Paul */ 39817e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 39827e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[idx] = map; 3983676ad2c9SGleb Smirnoff sc->bge_cdata.bge_tx_chain[idx] = m; 39847e27542aSGleb Smirnoff sc->bge_txcnt += nsegs; 398595d67482SBill Paul 39867e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 39877e27542aSGleb Smirnoff *txidx = idx; 398895d67482SBill Paul 398995d67482SBill Paul return (0); 399095d67482SBill Paul } 399195d67482SBill Paul 399295d67482SBill Paul /* 399395d67482SBill Paul * Main transmit routine. To avoid having to do mbuf copies, we put pointers 399495d67482SBill Paul * to the mbuf data regions directly in the transmit descriptors. 399595d67482SBill Paul */ 399695d67482SBill Paul static void 39973f74909aSGleb Smirnoff bge_start_locked(struct ifnet *ifp) 399895d67482SBill Paul { 399995d67482SBill Paul struct bge_softc *sc; 4000167fdb62SPyun YongHyeon struct mbuf *m_head; 400114bbd30fSGleb Smirnoff uint32_t prodidx; 4002167fdb62SPyun YongHyeon int count; 400395d67482SBill Paul 400495d67482SBill Paul sc = ifp->if_softc; 4005167fdb62SPyun YongHyeon BGE_LOCK_ASSERT(sc); 400695d67482SBill Paul 4007167fdb62SPyun YongHyeon if (!sc->bge_link || 4008167fdb62SPyun YongHyeon (ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 4009167fdb62SPyun YongHyeon IFF_DRV_RUNNING) 401095d67482SBill Paul return; 401195d67482SBill Paul 401214bbd30fSGleb Smirnoff prodidx = sc->bge_tx_prodidx; 401395d67482SBill Paul 4014167fdb62SPyun YongHyeon for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 4015167fdb62SPyun YongHyeon if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { 4016167fdb62SPyun YongHyeon ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4017167fdb62SPyun YongHyeon break; 4018167fdb62SPyun YongHyeon } 40194d665c4dSDag-Erling Smørgrav IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 402095d67482SBill Paul if (m_head == NULL) 402195d67482SBill Paul break; 402295d67482SBill Paul 402395d67482SBill Paul /* 402495d67482SBill Paul * XXX 4025b874fdd4SYaroslav Tykhiy * The code inside the if() block is never reached since we 4026b874fdd4SYaroslav Tykhiy * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 4027b874fdd4SYaroslav Tykhiy * requests to checksum TCP/UDP in a fragmented packet. 4028b874fdd4SYaroslav Tykhiy * 4029b874fdd4SYaroslav Tykhiy * XXX 403095d67482SBill Paul * safety overkill. If this is a fragmented packet chain 403195d67482SBill Paul * with delayed TCP/UDP checksums, then only encapsulate 403295d67482SBill Paul * it if we have enough descriptors to handle the entire 403395d67482SBill Paul * chain at once. 403495d67482SBill Paul * (paranoia -- may not actually be needed) 403595d67482SBill Paul */ 403695d67482SBill Paul if (m_head->m_flags & M_FIRSTFRAG && 403795d67482SBill Paul m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 403895d67482SBill Paul if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 403995d67482SBill Paul m_head->m_pkthdr.csum_data + 16) { 40404d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 404113f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 404295d67482SBill Paul break; 404395d67482SBill Paul } 404495d67482SBill Paul } 404595d67482SBill Paul 404695d67482SBill Paul /* 404795d67482SBill Paul * Pack the data into the transmit ring. If we 404895d67482SBill Paul * don't have room, set the OACTIVE flag and wait 404995d67482SBill Paul * for the NIC to drain the ring. 405095d67482SBill Paul */ 4051676ad2c9SGleb Smirnoff if (bge_encap(sc, &m_head, &prodidx)) { 4052676ad2c9SGleb Smirnoff if (m_head == NULL) 4053676ad2c9SGleb Smirnoff break; 40544d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 405513f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 405695d67482SBill Paul break; 405795d67482SBill Paul } 4058303a718cSDag-Erling Smørgrav ++count; 405995d67482SBill Paul 406095d67482SBill Paul /* 406195d67482SBill Paul * If there's a BPF listener, bounce a copy of this frame 406295d67482SBill Paul * to him. 406395d67482SBill Paul */ 40644e35d186SJung-uk Kim #ifdef ETHER_BPF_MTAP 406545ee6ab3SJung-uk Kim ETHER_BPF_MTAP(ifp, m_head); 40664e35d186SJung-uk Kim #else 40674e35d186SJung-uk Kim BPF_MTAP(ifp, m_head); 40684e35d186SJung-uk Kim #endif 406995d67482SBill Paul } 407095d67482SBill Paul 4071167fdb62SPyun YongHyeon if (count > 0) { 4072aa94f333SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 40735c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 40743f74909aSGleb Smirnoff /* Transmit. */ 407538cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 40763927098fSPaul Saab /* 5700 b2 errata */ 4077e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 407838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 407995d67482SBill Paul 408014bbd30fSGleb Smirnoff sc->bge_tx_prodidx = prodidx; 408114bbd30fSGleb Smirnoff 408295d67482SBill Paul /* 408395d67482SBill Paul * Set a timeout in case the chip goes out to lunch. 408495d67482SBill Paul */ 4085b74e67fbSGleb Smirnoff sc->bge_timer = 5; 408695d67482SBill Paul } 4087167fdb62SPyun YongHyeon } 408895d67482SBill Paul 40890f9bd73bSSam Leffler /* 40900f9bd73bSSam Leffler * Main transmit routine. To avoid having to do mbuf copies, we put pointers 40910f9bd73bSSam Leffler * to the mbuf data regions directly in the transmit descriptors. 40920f9bd73bSSam Leffler */ 409395d67482SBill Paul static void 40943f74909aSGleb Smirnoff bge_start(struct ifnet *ifp) 409595d67482SBill Paul { 40960f9bd73bSSam Leffler struct bge_softc *sc; 40970f9bd73bSSam Leffler 40980f9bd73bSSam Leffler sc = ifp->if_softc; 40990f9bd73bSSam Leffler BGE_LOCK(sc); 41000f9bd73bSSam Leffler bge_start_locked(ifp); 41010f9bd73bSSam Leffler BGE_UNLOCK(sc); 41020f9bd73bSSam Leffler } 41030f9bd73bSSam Leffler 41040f9bd73bSSam Leffler static void 41053f74909aSGleb Smirnoff bge_init_locked(struct bge_softc *sc) 41060f9bd73bSSam Leffler { 410795d67482SBill Paul struct ifnet *ifp; 41083f74909aSGleb Smirnoff uint16_t *m; 410995d67482SBill Paul 41100f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 411195d67482SBill Paul 4112fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 411395d67482SBill Paul 411413f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) 411595d67482SBill Paul return; 411695d67482SBill Paul 411795d67482SBill Paul /* Cancel pending I/O and flush buffers. */ 411895d67482SBill Paul bge_stop(sc); 41198cb1383cSDoug Ambrisko 41208cb1383cSDoug Ambrisko bge_stop_fw(sc); 41218cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_START); 412295d67482SBill Paul bge_reset(sc); 41238cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_START); 41248cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_START); 41258cb1383cSDoug Ambrisko 412695d67482SBill Paul bge_chipinit(sc); 412795d67482SBill Paul 412895d67482SBill Paul /* 412995d67482SBill Paul * Init the various state machines, ring 413095d67482SBill Paul * control blocks and firmware. 413195d67482SBill Paul */ 413295d67482SBill Paul if (bge_blockinit(sc)) { 4133fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "initialization failure\n"); 413495d67482SBill Paul return; 413595d67482SBill Paul } 413695d67482SBill Paul 4137fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 413895d67482SBill Paul 413995d67482SBill Paul /* Specify MTU. */ 414095d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 4141cb2eacc7SYaroslav Tykhiy ETHER_HDR_LEN + ETHER_CRC_LEN + 4142cb2eacc7SYaroslav Tykhiy (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); 414395d67482SBill Paul 414495d67482SBill Paul /* Load our MAC address. */ 41453f74909aSGleb Smirnoff m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 414695d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 414795d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 414895d67482SBill Paul 41493e9b1bcaSJung-uk Kim /* Program promiscuous mode. */ 41503e9b1bcaSJung-uk Kim bge_setpromisc(sc); 415195d67482SBill Paul 415295d67482SBill Paul /* Program multicast filter. */ 415395d67482SBill Paul bge_setmulti(sc); 415495d67482SBill Paul 4155cb2eacc7SYaroslav Tykhiy /* Program VLAN tag stripping. */ 4156cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 4157cb2eacc7SYaroslav Tykhiy 415895d67482SBill Paul /* Init RX ring. */ 41593ee5d7daSPyun YongHyeon if (bge_init_rx_ring_std(sc) != 0) { 41603ee5d7daSPyun YongHyeon device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 41613ee5d7daSPyun YongHyeon bge_stop(sc); 41623ee5d7daSPyun YongHyeon return; 41633ee5d7daSPyun YongHyeon } 416495d67482SBill Paul 41650434d1b8SBill Paul /* 41660434d1b8SBill Paul * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 41670434d1b8SBill Paul * memory to insure that the chip has in fact read the first 41680434d1b8SBill Paul * entry of the ring. 41690434d1b8SBill Paul */ 41700434d1b8SBill Paul if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 41713f74909aSGleb Smirnoff uint32_t v, i; 41720434d1b8SBill Paul for (i = 0; i < 10; i++) { 41730434d1b8SBill Paul DELAY(20); 41740434d1b8SBill Paul v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 41750434d1b8SBill Paul if (v == (MCLBYTES - ETHER_ALIGN)) 41760434d1b8SBill Paul break; 41770434d1b8SBill Paul } 41780434d1b8SBill Paul if (i == 10) 4179fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, 4180fe806fdaSPyun YongHyeon "5705 A0 chip failed to load RX ring\n"); 41810434d1b8SBill Paul } 41820434d1b8SBill Paul 418395d67482SBill Paul /* Init jumbo RX ring. */ 4184c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 4185c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) { 41863ee5d7daSPyun YongHyeon if (bge_init_rx_ring_jumbo(sc) != 0) { 41873ee5d7daSPyun YongHyeon device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 41883ee5d7daSPyun YongHyeon bge_stop(sc); 41893ee5d7daSPyun YongHyeon return; 41903ee5d7daSPyun YongHyeon } 41913ee5d7daSPyun YongHyeon } 419295d67482SBill Paul 41933f74909aSGleb Smirnoff /* Init our RX return ring index. */ 419495d67482SBill Paul sc->bge_rx_saved_considx = 0; 419595d67482SBill Paul 41967e6e2507SJung-uk Kim /* Init our RX/TX stat counters. */ 41977e6e2507SJung-uk Kim sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 41987e6e2507SJung-uk Kim 419995d67482SBill Paul /* Init TX ring. */ 420095d67482SBill Paul bge_init_tx_ring(sc); 420195d67482SBill Paul 42023f74909aSGleb Smirnoff /* Turn on transmitter. */ 420395d67482SBill Paul BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 420495d67482SBill Paul 42053f74909aSGleb Smirnoff /* Turn on receiver. */ 420695d67482SBill Paul BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 420795d67482SBill Paul 420895d67482SBill Paul /* Tell firmware we're alive. */ 420995d67482SBill Paul BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 421095d67482SBill Paul 421175719184SGleb Smirnoff #ifdef DEVICE_POLLING 421275719184SGleb Smirnoff /* Disable interrupts if we are polling. */ 421375719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 421475719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 421575719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 421638cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 421775719184SGleb Smirnoff } else 421875719184SGleb Smirnoff #endif 421975719184SGleb Smirnoff 422095d67482SBill Paul /* Enable host interrupts. */ 422175719184SGleb Smirnoff { 422295d67482SBill Paul BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 422395d67482SBill Paul BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 422438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 422575719184SGleb Smirnoff } 422695d67482SBill Paul 422767d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(ifp); 422895d67482SBill Paul 422913f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 423013f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 423195d67482SBill Paul 42320f9bd73bSSam Leffler callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 42330f9bd73bSSam Leffler } 42340f9bd73bSSam Leffler 42350f9bd73bSSam Leffler static void 42363f74909aSGleb Smirnoff bge_init(void *xsc) 42370f9bd73bSSam Leffler { 42380f9bd73bSSam Leffler struct bge_softc *sc = xsc; 42390f9bd73bSSam Leffler 42400f9bd73bSSam Leffler BGE_LOCK(sc); 42410f9bd73bSSam Leffler bge_init_locked(sc); 42420f9bd73bSSam Leffler BGE_UNLOCK(sc); 424395d67482SBill Paul } 424495d67482SBill Paul 424595d67482SBill Paul /* 424695d67482SBill Paul * Set media options. 424795d67482SBill Paul */ 424895d67482SBill Paul static int 42493f74909aSGleb Smirnoff bge_ifmedia_upd(struct ifnet *ifp) 425095d67482SBill Paul { 425167d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 425267d5e043SOleg Bulyzhin int res; 425367d5e043SOleg Bulyzhin 425467d5e043SOleg Bulyzhin BGE_LOCK(sc); 425567d5e043SOleg Bulyzhin res = bge_ifmedia_upd_locked(ifp); 425667d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 425767d5e043SOleg Bulyzhin 425867d5e043SOleg Bulyzhin return (res); 425967d5e043SOleg Bulyzhin } 426067d5e043SOleg Bulyzhin 426167d5e043SOleg Bulyzhin static int 426267d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(struct ifnet *ifp) 426367d5e043SOleg Bulyzhin { 426467d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 426595d67482SBill Paul struct mii_data *mii; 42664f09c4c7SMarius Strobl struct mii_softc *miisc; 426795d67482SBill Paul struct ifmedia *ifm; 426895d67482SBill Paul 426967d5e043SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 427067d5e043SOleg Bulyzhin 427195d67482SBill Paul ifm = &sc->bge_ifmedia; 427295d67482SBill Paul 427395d67482SBill Paul /* If this is a 1000baseX NIC, enable the TBI port. */ 4274652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 427595d67482SBill Paul if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 427695d67482SBill Paul return (EINVAL); 427795d67482SBill Paul switch(IFM_SUBTYPE(ifm->ifm_media)) { 427895d67482SBill Paul case IFM_AUTO: 4279ff50922bSDoug White /* 4280ff50922bSDoug White * The BCM5704 ASIC appears to have a special 4281ff50922bSDoug White * mechanism for programming the autoneg 4282ff50922bSDoug White * advertisement registers in TBI mode. 4283ff50922bSDoug White */ 42840f89fde2SJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 4285ff50922bSDoug White uint32_t sgdig; 42860f89fde2SJung-uk Kim sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 42870f89fde2SJung-uk Kim if (sgdig & BGE_SGDIGSTS_DONE) { 4288ff50922bSDoug White CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4289ff50922bSDoug White sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4290ff50922bSDoug White sgdig |= BGE_SGDIGCFG_AUTO | 4291ff50922bSDoug White BGE_SGDIGCFG_PAUSE_CAP | 4292ff50922bSDoug White BGE_SGDIGCFG_ASYM_PAUSE; 4293ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4294ff50922bSDoug White sgdig | BGE_SGDIGCFG_SEND); 4295ff50922bSDoug White DELAY(5); 4296ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4297ff50922bSDoug White } 42980f89fde2SJung-uk Kim } 429995d67482SBill Paul break; 430095d67482SBill Paul case IFM_1000_SX: 430195d67482SBill Paul if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 430295d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, 430395d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 430495d67482SBill Paul } else { 430595d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, 430695d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 430795d67482SBill Paul } 430895d67482SBill Paul break; 430995d67482SBill Paul default: 431095d67482SBill Paul return (EINVAL); 431195d67482SBill Paul } 431295d67482SBill Paul return (0); 431395d67482SBill Paul } 431495d67482SBill Paul 43151493e883SOleg Bulyzhin sc->bge_link_evt++; 431695d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 43174f09c4c7SMarius Strobl if (mii->mii_instance) 43184f09c4c7SMarius Strobl LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 431995d67482SBill Paul mii_phy_reset(miisc); 432095d67482SBill Paul mii_mediachg(mii); 432195d67482SBill Paul 4322902827f6SBjoern A. Zeeb /* 4323902827f6SBjoern A. Zeeb * Force an interrupt so that we will call bge_link_upd 4324902827f6SBjoern A. Zeeb * if needed and clear any pending link state attention. 4325902827f6SBjoern A. Zeeb * Without this we are not getting any further interrupts 4326902827f6SBjoern A. Zeeb * for link state changes and thus will not UP the link and 4327902827f6SBjoern A. Zeeb * not be able to send in bge_start_locked. The only 4328902827f6SBjoern A. Zeeb * way to get things working was to receive a packet and 4329902827f6SBjoern A. Zeeb * get an RX intr. 4330902827f6SBjoern A. Zeeb * bge_tick should help for fiber cards and we might not 4331902827f6SBjoern A. Zeeb * need to do this here if BGE_FLAG_TBI is set but as 4332902827f6SBjoern A. Zeeb * we poll for fiber anyway it should not harm. 4333902827f6SBjoern A. Zeeb */ 43344f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 43354f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 4336902827f6SBjoern A. Zeeb BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 43374f0794ffSBjoern A. Zeeb else 433863ccfe30SBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4339902827f6SBjoern A. Zeeb 434095d67482SBill Paul return (0); 434195d67482SBill Paul } 434295d67482SBill Paul 434395d67482SBill Paul /* 434495d67482SBill Paul * Report current media status. 434595d67482SBill Paul */ 434695d67482SBill Paul static void 43473f74909aSGleb Smirnoff bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 434895d67482SBill Paul { 434967d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 435095d67482SBill Paul struct mii_data *mii; 435195d67482SBill Paul 435267d5e043SOleg Bulyzhin BGE_LOCK(sc); 435395d67482SBill Paul 4354652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 435595d67482SBill Paul ifmr->ifm_status = IFM_AVALID; 435695d67482SBill Paul ifmr->ifm_active = IFM_ETHER; 435795d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_STS) & 435895d67482SBill Paul BGE_MACSTAT_TBI_PCS_SYNCHED) 435995d67482SBill Paul ifmr->ifm_status |= IFM_ACTIVE; 43604c0da0ffSGleb Smirnoff else { 43614c0da0ffSGleb Smirnoff ifmr->ifm_active |= IFM_NONE; 436267d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 43634c0da0ffSGleb Smirnoff return; 43644c0da0ffSGleb Smirnoff } 436595d67482SBill Paul ifmr->ifm_active |= IFM_1000_SX; 436695d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 436795d67482SBill Paul ifmr->ifm_active |= IFM_HDX; 436895d67482SBill Paul else 436995d67482SBill Paul ifmr->ifm_active |= IFM_FDX; 437067d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 437195d67482SBill Paul return; 437295d67482SBill Paul } 437395d67482SBill Paul 437495d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 437595d67482SBill Paul mii_pollstat(mii); 437695d67482SBill Paul ifmr->ifm_active = mii->mii_media_active; 437795d67482SBill Paul ifmr->ifm_status = mii->mii_media_status; 437867d5e043SOleg Bulyzhin 437967d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 438095d67482SBill Paul } 438195d67482SBill Paul 438295d67482SBill Paul static int 43833f74909aSGleb Smirnoff bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 438495d67482SBill Paul { 438595d67482SBill Paul struct bge_softc *sc = ifp->if_softc; 438695d67482SBill Paul struct ifreq *ifr = (struct ifreq *) data; 438795d67482SBill Paul struct mii_data *mii; 4388f9004b6dSJung-uk Kim int flags, mask, error = 0; 438995d67482SBill Paul 439095d67482SBill Paul switch (command) { 439195d67482SBill Paul case SIOCSIFMTU: 43924c0da0ffSGleb Smirnoff if (ifr->ifr_mtu < ETHERMIN || 43934c0da0ffSGleb Smirnoff ((BGE_IS_JUMBO_CAPABLE(sc)) && 43944c0da0ffSGleb Smirnoff ifr->ifr_mtu > BGE_JUMBO_MTU) || 43954c0da0ffSGleb Smirnoff ((!BGE_IS_JUMBO_CAPABLE(sc)) && 43964c0da0ffSGleb Smirnoff ifr->ifr_mtu > ETHERMTU)) 439795d67482SBill Paul error = EINVAL; 43984c0da0ffSGleb Smirnoff else if (ifp->if_mtu != ifr->ifr_mtu) { 439995d67482SBill Paul ifp->if_mtu = ifr->ifr_mtu; 440013f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 440195d67482SBill Paul bge_init(sc); 440295d67482SBill Paul } 440395d67482SBill Paul break; 440495d67482SBill Paul case SIOCSIFFLAGS: 44050f9bd73bSSam Leffler BGE_LOCK(sc); 440695d67482SBill Paul if (ifp->if_flags & IFF_UP) { 440795d67482SBill Paul /* 440895d67482SBill Paul * If only the state of the PROMISC flag changed, 440995d67482SBill Paul * then just use the 'set promisc mode' command 441095d67482SBill Paul * instead of reinitializing the entire NIC. Doing 441195d67482SBill Paul * a full re-init means reloading the firmware and 441295d67482SBill Paul * waiting for it to start up, which may take a 4413d183af7fSRuslan Ermilov * second or two. Similarly for ALLMULTI. 441495d67482SBill Paul */ 4415f9004b6dSJung-uk Kim if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4416f9004b6dSJung-uk Kim flags = ifp->if_flags ^ sc->bge_if_flags; 44173e9b1bcaSJung-uk Kim if (flags & IFF_PROMISC) 44183e9b1bcaSJung-uk Kim bge_setpromisc(sc); 4419f9004b6dSJung-uk Kim if (flags & IFF_ALLMULTI) 4420d183af7fSRuslan Ermilov bge_setmulti(sc); 442195d67482SBill Paul } else 44220f9bd73bSSam Leffler bge_init_locked(sc); 442395d67482SBill Paul } else { 442413f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 442595d67482SBill Paul bge_stop(sc); 442695d67482SBill Paul } 442795d67482SBill Paul } 442895d67482SBill Paul sc->bge_if_flags = ifp->if_flags; 44290f9bd73bSSam Leffler BGE_UNLOCK(sc); 443095d67482SBill Paul error = 0; 443195d67482SBill Paul break; 443295d67482SBill Paul case SIOCADDMULTI: 443395d67482SBill Paul case SIOCDELMULTI: 443413f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 44350f9bd73bSSam Leffler BGE_LOCK(sc); 443695d67482SBill Paul bge_setmulti(sc); 44370f9bd73bSSam Leffler BGE_UNLOCK(sc); 443895d67482SBill Paul error = 0; 443995d67482SBill Paul } 444095d67482SBill Paul break; 444195d67482SBill Paul case SIOCSIFMEDIA: 444295d67482SBill Paul case SIOCGIFMEDIA: 4443652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 444495d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 444595d67482SBill Paul &sc->bge_ifmedia, command); 444695d67482SBill Paul } else { 444795d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 444895d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 444995d67482SBill Paul &mii->mii_media, command); 445095d67482SBill Paul } 445195d67482SBill Paul break; 445295d67482SBill Paul case SIOCSIFCAP: 445395d67482SBill Paul mask = ifr->ifr_reqcap ^ ifp->if_capenable; 445475719184SGleb Smirnoff #ifdef DEVICE_POLLING 445575719184SGleb Smirnoff if (mask & IFCAP_POLLING) { 445675719184SGleb Smirnoff if (ifr->ifr_reqcap & IFCAP_POLLING) { 445775719184SGleb Smirnoff error = ether_poll_register(bge_poll, ifp); 445875719184SGleb Smirnoff if (error) 445975719184SGleb Smirnoff return (error); 446075719184SGleb Smirnoff BGE_LOCK(sc); 446175719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 446275719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 446338cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 446475719184SGleb Smirnoff ifp->if_capenable |= IFCAP_POLLING; 446575719184SGleb Smirnoff BGE_UNLOCK(sc); 446675719184SGleb Smirnoff } else { 446775719184SGleb Smirnoff error = ether_poll_deregister(ifp); 446875719184SGleb Smirnoff /* Enable interrupt even in error case */ 446975719184SGleb Smirnoff BGE_LOCK(sc); 447075719184SGleb Smirnoff BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 447175719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 447238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 447375719184SGleb Smirnoff ifp->if_capenable &= ~IFCAP_POLLING; 447475719184SGleb Smirnoff BGE_UNLOCK(sc); 447575719184SGleb Smirnoff } 447675719184SGleb Smirnoff } 447775719184SGleb Smirnoff #endif 4478d375e524SGleb Smirnoff if (mask & IFCAP_HWCSUM) { 4479d375e524SGleb Smirnoff ifp->if_capenable ^= IFCAP_HWCSUM; 4480d375e524SGleb Smirnoff if (IFCAP_HWCSUM & ifp->if_capenable && 4481d375e524SGleb Smirnoff IFCAP_HWCSUM & ifp->if_capabilities) 4482ca3f1187SPyun YongHyeon ifp->if_hwassist |= BGE_CSUM_FEATURES; 448395d67482SBill Paul else 4484ca3f1187SPyun YongHyeon ifp->if_hwassist &= ~BGE_CSUM_FEATURES; 44854e35d186SJung-uk Kim #ifdef VLAN_CAPABILITIES 4486479b23b7SGleb Smirnoff VLAN_CAPABILITIES(ifp); 44874e35d186SJung-uk Kim #endif 448895d67482SBill Paul } 4489cb2eacc7SYaroslav Tykhiy 4490ca3f1187SPyun YongHyeon if ((mask & IFCAP_TSO4) != 0 && 4491ca3f1187SPyun YongHyeon (ifp->if_capabilities & IFCAP_TSO4) != 0) { 4492ca3f1187SPyun YongHyeon ifp->if_capenable ^= IFCAP_TSO4; 4493ca3f1187SPyun YongHyeon if ((ifp->if_capenable & IFCAP_TSO4) != 0) 4494ca3f1187SPyun YongHyeon ifp->if_hwassist |= CSUM_TSO; 4495ca3f1187SPyun YongHyeon else 4496ca3f1187SPyun YongHyeon ifp->if_hwassist &= ~CSUM_TSO; 4497ca3f1187SPyun YongHyeon } 4498ca3f1187SPyun YongHyeon 4499cb2eacc7SYaroslav Tykhiy if (mask & IFCAP_VLAN_MTU) { 4500cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_MTU; 4501cb2eacc7SYaroslav Tykhiy ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4502cb2eacc7SYaroslav Tykhiy bge_init(sc); 4503cb2eacc7SYaroslav Tykhiy } 4504cb2eacc7SYaroslav Tykhiy 4505cb2eacc7SYaroslav Tykhiy if (mask & IFCAP_VLAN_HWTAGGING) { 4506cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 4507cb2eacc7SYaroslav Tykhiy BGE_LOCK(sc); 4508cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 4509cb2eacc7SYaroslav Tykhiy BGE_UNLOCK(sc); 4510cb2eacc7SYaroslav Tykhiy #ifdef VLAN_CAPABILITIES 4511cb2eacc7SYaroslav Tykhiy VLAN_CAPABILITIES(ifp); 4512cb2eacc7SYaroslav Tykhiy #endif 4513cb2eacc7SYaroslav Tykhiy } 4514cb2eacc7SYaroslav Tykhiy 451595d67482SBill Paul break; 451695d67482SBill Paul default: 4517673d9191SSam Leffler error = ether_ioctl(ifp, command, data); 451895d67482SBill Paul break; 451995d67482SBill Paul } 452095d67482SBill Paul 452195d67482SBill Paul return (error); 452295d67482SBill Paul } 452395d67482SBill Paul 452495d67482SBill Paul static void 4525b74e67fbSGleb Smirnoff bge_watchdog(struct bge_softc *sc) 452695d67482SBill Paul { 4527b74e67fbSGleb Smirnoff struct ifnet *ifp; 452895d67482SBill Paul 4529b74e67fbSGleb Smirnoff BGE_LOCK_ASSERT(sc); 4530b74e67fbSGleb Smirnoff 4531b74e67fbSGleb Smirnoff if (sc->bge_timer == 0 || --sc->bge_timer) 4532b74e67fbSGleb Smirnoff return; 4533b74e67fbSGleb Smirnoff 4534b74e67fbSGleb Smirnoff ifp = sc->bge_ifp; 453595d67482SBill Paul 4536fe806fdaSPyun YongHyeon if_printf(ifp, "watchdog timeout -- resetting\n"); 453795d67482SBill Paul 453813f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4539426742bfSGleb Smirnoff bge_init_locked(sc); 454095d67482SBill Paul 454195d67482SBill Paul ifp->if_oerrors++; 454295d67482SBill Paul } 454395d67482SBill Paul 454495d67482SBill Paul /* 454595d67482SBill Paul * Stop the adapter and free any mbufs allocated to the 454695d67482SBill Paul * RX and TX lists. 454795d67482SBill Paul */ 454895d67482SBill Paul static void 45493f74909aSGleb Smirnoff bge_stop(struct bge_softc *sc) 455095d67482SBill Paul { 455195d67482SBill Paul struct ifnet *ifp; 455295d67482SBill Paul struct ifmedia_entry *ifm; 455395d67482SBill Paul struct mii_data *mii = NULL; 455495d67482SBill Paul int mtmp, itmp; 455595d67482SBill Paul 45560f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 45570f9bd73bSSam Leffler 4558fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 455995d67482SBill Paul 4560652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) 456195d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 456295d67482SBill Paul 45630f9bd73bSSam Leffler callout_stop(&sc->bge_stat_ch); 456495d67482SBill Paul 456544b63691SBjoern A. Zeeb /* Disable host interrupts. */ 456644b63691SBjoern A. Zeeb BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 456744b63691SBjoern A. Zeeb bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 456844b63691SBjoern A. Zeeb 456944b63691SBjoern A. Zeeb /* 457044b63691SBjoern A. Zeeb * Tell firmware we're shutting down. 457144b63691SBjoern A. Zeeb */ 457244b63691SBjoern A. Zeeb bge_stop_fw(sc); 457344b63691SBjoern A. Zeeb bge_sig_pre_reset(sc, BGE_RESET_STOP); 457444b63691SBjoern A. Zeeb 457595d67482SBill Paul /* 45763f74909aSGleb Smirnoff * Disable all of the receiver blocks. 457795d67482SBill Paul */ 457895d67482SBill Paul BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 457995d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 458095d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 45817ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 458295d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 458395d67482SBill Paul BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 458495d67482SBill Paul BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 458595d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 458695d67482SBill Paul 458795d67482SBill Paul /* 45883f74909aSGleb Smirnoff * Disable all of the transmit blocks. 458995d67482SBill Paul */ 459095d67482SBill Paul BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 459195d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 459295d67482SBill Paul BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 459395d67482SBill Paul BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 459495d67482SBill Paul BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 45957ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 459695d67482SBill Paul BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 459795d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 459895d67482SBill Paul 459995d67482SBill Paul /* 460095d67482SBill Paul * Shut down all of the memory managers and related 460195d67482SBill Paul * state machines. 460295d67482SBill Paul */ 460395d67482SBill Paul BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 460495d67482SBill Paul BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 46057ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 460695d67482SBill Paul BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 46070c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 460895d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 46097ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 461095d67482SBill Paul BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 461195d67482SBill Paul BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 46120434d1b8SBill Paul } 461395d67482SBill Paul 46148cb1383cSDoug Ambrisko bge_reset(sc); 46158cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 46168cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 46178cb1383cSDoug Ambrisko 46188cb1383cSDoug Ambrisko /* 46198cb1383cSDoug Ambrisko * Keep the ASF firmware running if up. 46208cb1383cSDoug Ambrisko */ 46218cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 46228cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 46238cb1383cSDoug Ambrisko else 462495d67482SBill Paul BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 462595d67482SBill Paul 462695d67482SBill Paul /* Free the RX lists. */ 462795d67482SBill Paul bge_free_rx_ring_std(sc); 462895d67482SBill Paul 462995d67482SBill Paul /* Free jumbo RX list. */ 46304c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) 463195d67482SBill Paul bge_free_rx_ring_jumbo(sc); 463295d67482SBill Paul 463395d67482SBill Paul /* Free TX buffers. */ 463495d67482SBill Paul bge_free_tx_ring(sc); 463595d67482SBill Paul 463695d67482SBill Paul /* 463795d67482SBill Paul * Isolate/power down the PHY, but leave the media selection 463895d67482SBill Paul * unchanged so that things will be put back to normal when 463995d67482SBill Paul * we bring the interface back up. 464095d67482SBill Paul */ 4641652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 464295d67482SBill Paul itmp = ifp->if_flags; 464395d67482SBill Paul ifp->if_flags |= IFF_UP; 4644dcc34049SPawel Jakub Dawidek /* 4645dcc34049SPawel Jakub Dawidek * If we are called from bge_detach(), mii is already NULL. 4646dcc34049SPawel Jakub Dawidek */ 4647dcc34049SPawel Jakub Dawidek if (mii != NULL) { 464895d67482SBill Paul ifm = mii->mii_media.ifm_cur; 464995d67482SBill Paul mtmp = ifm->ifm_media; 465095d67482SBill Paul ifm->ifm_media = IFM_ETHER | IFM_NONE; 465195d67482SBill Paul mii_mediachg(mii); 465295d67482SBill Paul ifm->ifm_media = mtmp; 4653dcc34049SPawel Jakub Dawidek } 465495d67482SBill Paul ifp->if_flags = itmp; 465595d67482SBill Paul } 465695d67482SBill Paul 465795d67482SBill Paul sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 465895d67482SBill Paul 46595dda8085SOleg Bulyzhin /* Clear MAC's link state (PHY may still have link UP). */ 46601493e883SOleg Bulyzhin if (bootverbose && sc->bge_link) 46611493e883SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 46621493e883SOleg Bulyzhin sc->bge_link = 0; 466395d67482SBill Paul 46641493e883SOleg Bulyzhin ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 466595d67482SBill Paul } 466695d67482SBill Paul 466795d67482SBill Paul /* 466895d67482SBill Paul * Stop all chip I/O so that the kernel's probe routines don't 466995d67482SBill Paul * get confused by errant DMAs when rebooting. 467095d67482SBill Paul */ 4671b6c974e8SWarner Losh static int 46723f74909aSGleb Smirnoff bge_shutdown(device_t dev) 467395d67482SBill Paul { 467495d67482SBill Paul struct bge_softc *sc; 467595d67482SBill Paul 467695d67482SBill Paul sc = device_get_softc(dev); 46770f9bd73bSSam Leffler BGE_LOCK(sc); 467895d67482SBill Paul bge_stop(sc); 467995d67482SBill Paul bge_reset(sc); 46800f9bd73bSSam Leffler BGE_UNLOCK(sc); 4681b6c974e8SWarner Losh 4682b6c974e8SWarner Losh return (0); 468395d67482SBill Paul } 468414afefa3SPawel Jakub Dawidek 468514afefa3SPawel Jakub Dawidek static int 468614afefa3SPawel Jakub Dawidek bge_suspend(device_t dev) 468714afefa3SPawel Jakub Dawidek { 468814afefa3SPawel Jakub Dawidek struct bge_softc *sc; 468914afefa3SPawel Jakub Dawidek 469014afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 469114afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 469214afefa3SPawel Jakub Dawidek bge_stop(sc); 469314afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 469414afefa3SPawel Jakub Dawidek 469514afefa3SPawel Jakub Dawidek return (0); 469614afefa3SPawel Jakub Dawidek } 469714afefa3SPawel Jakub Dawidek 469814afefa3SPawel Jakub Dawidek static int 469914afefa3SPawel Jakub Dawidek bge_resume(device_t dev) 470014afefa3SPawel Jakub Dawidek { 470114afefa3SPawel Jakub Dawidek struct bge_softc *sc; 470214afefa3SPawel Jakub Dawidek struct ifnet *ifp; 470314afefa3SPawel Jakub Dawidek 470414afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 470514afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 470614afefa3SPawel Jakub Dawidek ifp = sc->bge_ifp; 470714afefa3SPawel Jakub Dawidek if (ifp->if_flags & IFF_UP) { 470814afefa3SPawel Jakub Dawidek bge_init_locked(sc); 470914afefa3SPawel Jakub Dawidek if (ifp->if_drv_flags & IFF_DRV_RUNNING) 471014afefa3SPawel Jakub Dawidek bge_start_locked(ifp); 471114afefa3SPawel Jakub Dawidek } 471214afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 471314afefa3SPawel Jakub Dawidek 471414afefa3SPawel Jakub Dawidek return (0); 471514afefa3SPawel Jakub Dawidek } 4716dab5cd05SOleg Bulyzhin 4717dab5cd05SOleg Bulyzhin static void 47183f74909aSGleb Smirnoff bge_link_upd(struct bge_softc *sc) 4719dab5cd05SOleg Bulyzhin { 47201f313773SOleg Bulyzhin struct mii_data *mii; 47211f313773SOleg Bulyzhin uint32_t link, status; 4722dab5cd05SOleg Bulyzhin 4723dab5cd05SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 47241f313773SOleg Bulyzhin 47253f74909aSGleb Smirnoff /* Clear 'pending link event' flag. */ 47267b97099dSOleg Bulyzhin sc->bge_link_evt = 0; 47277b97099dSOleg Bulyzhin 4728dab5cd05SOleg Bulyzhin /* 4729dab5cd05SOleg Bulyzhin * Process link state changes. 4730dab5cd05SOleg Bulyzhin * Grrr. The link status word in the status block does 4731dab5cd05SOleg Bulyzhin * not work correctly on the BCM5700 rev AX and BX chips, 4732dab5cd05SOleg Bulyzhin * according to all available information. Hence, we have 4733dab5cd05SOleg Bulyzhin * to enable MII interrupts in order to properly obtain 4734dab5cd05SOleg Bulyzhin * async link changes. Unfortunately, this also means that 4735dab5cd05SOleg Bulyzhin * we have to read the MAC status register to detect link 4736dab5cd05SOleg Bulyzhin * changes, thereby adding an additional register access to 4737dab5cd05SOleg Bulyzhin * the interrupt handler. 47381f313773SOleg Bulyzhin * 47391f313773SOleg Bulyzhin * XXX: perhaps link state detection procedure used for 47404c0da0ffSGleb Smirnoff * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4741dab5cd05SOleg Bulyzhin */ 4742dab5cd05SOleg Bulyzhin 47431f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 47444c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 4745dab5cd05SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 4746dab5cd05SOleg Bulyzhin if (status & BGE_MACSTAT_MI_INTERRUPT) { 47471f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 47485dda8085SOleg Bulyzhin mii_pollstat(mii); 47491f313773SOleg Bulyzhin if (!sc->bge_link && 47501f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 47511f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 47521f313773SOleg Bulyzhin sc->bge_link++; 47531f313773SOleg Bulyzhin if (bootverbose) 47541f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 47551f313773SOleg Bulyzhin } else if (sc->bge_link && 47561f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 47571f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 47581f313773SOleg Bulyzhin sc->bge_link = 0; 47591f313773SOleg Bulyzhin if (bootverbose) 47601f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 47611f313773SOleg Bulyzhin } 47621f313773SOleg Bulyzhin 47633f74909aSGleb Smirnoff /* Clear the interrupt. */ 4764dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4765dab5cd05SOleg Bulyzhin BGE_EVTENB_MI_INTERRUPT); 4766dab5cd05SOleg Bulyzhin bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4767dab5cd05SOleg Bulyzhin bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4768dab5cd05SOleg Bulyzhin BRGPHY_INTRS); 4769dab5cd05SOleg Bulyzhin } 4770dab5cd05SOleg Bulyzhin return; 4771dab5cd05SOleg Bulyzhin } 4772dab5cd05SOleg Bulyzhin 4773652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 47741f313773SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 47757b97099dSOleg Bulyzhin if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 47767b97099dSOleg Bulyzhin if (!sc->bge_link) { 47771f313773SOleg Bulyzhin sc->bge_link++; 47781f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 47791f313773SOleg Bulyzhin BGE_CLRBIT(sc, BGE_MAC_MODE, 47801f313773SOleg Bulyzhin BGE_MACMODE_TBI_SEND_CFGS); 47810c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 47821f313773SOleg Bulyzhin if (bootverbose) 47831f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 47843f74909aSGleb Smirnoff if_link_state_change(sc->bge_ifp, 47853f74909aSGleb Smirnoff LINK_STATE_UP); 47867b97099dSOleg Bulyzhin } 47871f313773SOleg Bulyzhin } else if (sc->bge_link) { 4788dab5cd05SOleg Bulyzhin sc->bge_link = 0; 47891f313773SOleg Bulyzhin if (bootverbose) 47901f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 47917b97099dSOleg Bulyzhin if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 47921f313773SOleg Bulyzhin } 47931493e883SOleg Bulyzhin } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 47941f313773SOleg Bulyzhin /* 47950c8aa4eaSJung-uk Kim * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 47960c8aa4eaSJung-uk Kim * in status word always set. Workaround this bug by reading 47970c8aa4eaSJung-uk Kim * PHY link status directly. 47981f313773SOleg Bulyzhin */ 47991f313773SOleg Bulyzhin link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 48001f313773SOleg Bulyzhin 48011f313773SOleg Bulyzhin if (link != sc->bge_link || 48021f313773SOleg Bulyzhin sc->bge_asicrev == BGE_ASICREV_BCM5700) { 48031f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 48045dda8085SOleg Bulyzhin mii_pollstat(mii); 48051f313773SOleg Bulyzhin if (!sc->bge_link && 48061f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 48071f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 48081f313773SOleg Bulyzhin sc->bge_link++; 48091f313773SOleg Bulyzhin if (bootverbose) 48101f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 48111f313773SOleg Bulyzhin } else if (sc->bge_link && 48121f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 48131f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 48141f313773SOleg Bulyzhin sc->bge_link = 0; 48151f313773SOleg Bulyzhin if (bootverbose) 48161f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 48171f313773SOleg Bulyzhin } 48181f313773SOleg Bulyzhin } 48190c8aa4eaSJung-uk Kim } else { 48200c8aa4eaSJung-uk Kim /* 48210c8aa4eaSJung-uk Kim * Discard link events for MII/GMII controllers 48220c8aa4eaSJung-uk Kim * if MI auto-polling is disabled. 48230c8aa4eaSJung-uk Kim */ 4824dab5cd05SOleg Bulyzhin } 4825dab5cd05SOleg Bulyzhin 48263f74909aSGleb Smirnoff /* Clear the attention. */ 4827dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4828dab5cd05SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4829dab5cd05SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 4830dab5cd05SOleg Bulyzhin } 48316f8718a3SScott Long 4832763757b2SScott Long #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 483306e83c7eSScott Long SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ 4834763757b2SScott Long sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ 4835763757b2SScott Long desc) 4836763757b2SScott Long 48376f8718a3SScott Long static void 48386f8718a3SScott Long bge_add_sysctls(struct bge_softc *sc) 48396f8718a3SScott Long { 48406f8718a3SScott Long struct sysctl_ctx_list *ctx; 4841763757b2SScott Long struct sysctl_oid_list *children, *schildren; 4842763757b2SScott Long struct sysctl_oid *tree; 48436f8718a3SScott Long 48446f8718a3SScott Long ctx = device_get_sysctl_ctx(sc->bge_dev); 48456f8718a3SScott Long children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 48466f8718a3SScott Long 48476f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 48486f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 48496f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", 48506f8718a3SScott Long "Debug Information"); 48516f8718a3SScott Long 48526f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 48536f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", 48546f8718a3SScott Long "Register Read"); 48556f8718a3SScott Long 48566f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 48576f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", 48586f8718a3SScott Long "Memory Read"); 48596f8718a3SScott Long 48606f8718a3SScott Long #endif 4861763757b2SScott Long 4862d949071dSJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 4863d949071dSJung-uk Kim return; 4864d949071dSJung-uk Kim 4865763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4866763757b2SScott Long NULL, "BGE Statistics"); 4867763757b2SScott Long schildren = children = SYSCTL_CHILDREN(tree); 4868763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 4869763757b2SScott Long children, COSFramesDroppedDueToFilters, 4870763757b2SScott Long "FramesDroppedDueToFilters"); 4871763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 4872763757b2SScott Long children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 4873763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 4874763757b2SScott Long children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 4875763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 4876763757b2SScott Long children, nicNoMoreRxBDs, "NoMoreRxBDs"); 487706e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 487806e83c7eSScott Long children, ifInDiscards, "InputDiscards"); 487906e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 488006e83c7eSScott Long children, ifInErrors, "InputErrors"); 4881763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 4882763757b2SScott Long children, nicRecvThresholdHit, "RecvThresholdHit"); 4883763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 4884763757b2SScott Long children, nicDmaReadQueueFull, "DmaReadQueueFull"); 4885763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 4886763757b2SScott Long children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 4887763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 4888763757b2SScott Long children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 4889763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 4890763757b2SScott Long children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 4891763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 4892763757b2SScott Long children, nicRingStatusUpdate, "RingStatusUpdate"); 4893763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 4894763757b2SScott Long children, nicInterrupts, "Interrupts"); 4895763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 4896763757b2SScott Long children, nicAvoidedInterrupts, "AvoidedInterrupts"); 4897763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 4898763757b2SScott Long children, nicSendThresholdHit, "SendThresholdHit"); 4899763757b2SScott Long 4900763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, 4901763757b2SScott Long NULL, "BGE RX Statistics"); 4902763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4903763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 4904763757b2SScott Long children, rxstats.ifHCInOctets, "Octets"); 4905763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Fragments", 4906763757b2SScott Long children, rxstats.etherStatsFragments, "Fragments"); 4907763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 4908763757b2SScott Long children, rxstats.ifHCInUcastPkts, "UcastPkts"); 4909763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 4910763757b2SScott Long children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 4911763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 4912763757b2SScott Long children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 4913763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 4914763757b2SScott Long children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 4915763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 4916763757b2SScott Long children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 4917763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 4918763757b2SScott Long children, rxstats.xoffPauseFramesReceived, 4919763757b2SScott Long "xoffPauseFramesReceived"); 4920763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 4921763757b2SScott Long children, rxstats.macControlFramesReceived, 4922763757b2SScott Long "ControlFramesReceived"); 4923763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 4924763757b2SScott Long children, rxstats.xoffStateEntered, "xoffStateEntered"); 4925763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 4926763757b2SScott Long children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 4927763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 4928763757b2SScott Long children, rxstats.etherStatsJabbers, "Jabbers"); 4929763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 4930763757b2SScott Long children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 4931763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 493206e83c7eSScott Long children, rxstats.inRangeLengthError, "inRangeLengthError"); 4933763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 493406e83c7eSScott Long children, rxstats.outRangeLengthError, "outRangeLengthError"); 4935763757b2SScott Long 4936763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, 4937763757b2SScott Long NULL, "BGE TX Statistics"); 4938763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4939763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 4940763757b2SScott Long children, txstats.ifHCOutOctets, "Octets"); 4941763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 4942763757b2SScott Long children, txstats.etherStatsCollisions, "Collisions"); 4943763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 4944763757b2SScott Long children, txstats.outXonSent, "XonSent"); 4945763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 4946763757b2SScott Long children, txstats.outXoffSent, "XoffSent"); 4947763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 4948763757b2SScott Long children, txstats.flowControlDone, "flowControlDone"); 4949763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 4950763757b2SScott Long children, txstats.dot3StatsInternalMacTransmitErrors, 4951763757b2SScott Long "InternalMacTransmitErrors"); 4952763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 4953763757b2SScott Long children, txstats.dot3StatsSingleCollisionFrames, 4954763757b2SScott Long "SingleCollisionFrames"); 4955763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 4956763757b2SScott Long children, txstats.dot3StatsMultipleCollisionFrames, 4957763757b2SScott Long "MultipleCollisionFrames"); 4958763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 4959763757b2SScott Long children, txstats.dot3StatsDeferredTransmissions, 4960763757b2SScott Long "DeferredTransmissions"); 4961763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 4962763757b2SScott Long children, txstats.dot3StatsExcessiveCollisions, 4963763757b2SScott Long "ExcessiveCollisions"); 4964763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 496506e83c7eSScott Long children, txstats.dot3StatsLateCollisions, 496606e83c7eSScott Long "LateCollisions"); 4967763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 4968763757b2SScott Long children, txstats.ifHCOutUcastPkts, "UcastPkts"); 4969763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 4970763757b2SScott Long children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 4971763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 4972763757b2SScott Long children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 4973763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 4974763757b2SScott Long children, txstats.dot3StatsCarrierSenseErrors, 4975763757b2SScott Long "CarrierSenseErrors"); 4976763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 4977763757b2SScott Long children, txstats.ifOutDiscards, "Discards"); 4978763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 4979763757b2SScott Long children, txstats.ifOutErrors, "Errors"); 4980763757b2SScott Long } 4981763757b2SScott Long 4982763757b2SScott Long static int 4983763757b2SScott Long bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 4984763757b2SScott Long { 4985763757b2SScott Long struct bge_softc *sc; 498606e83c7eSScott Long uint32_t result; 4987d949071dSJung-uk Kim int offset; 4988763757b2SScott Long 4989763757b2SScott Long sc = (struct bge_softc *)arg1; 4990763757b2SScott Long offset = arg2; 4991d949071dSJung-uk Kim result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + 4992d949071dSJung-uk Kim offsetof(bge_hostaddr, bge_addr_lo)); 4993041b706bSDavid Malone return (sysctl_handle_int(oidp, &result, 0, req)); 49946f8718a3SScott Long } 49956f8718a3SScott Long 49966f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 49976f8718a3SScott Long static int 49986f8718a3SScott Long bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 49996f8718a3SScott Long { 50006f8718a3SScott Long struct bge_softc *sc; 50016f8718a3SScott Long uint16_t *sbdata; 50026f8718a3SScott Long int error; 50036f8718a3SScott Long int result; 50046f8718a3SScott Long int i, j; 50056f8718a3SScott Long 50066f8718a3SScott Long result = -1; 50076f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 50086f8718a3SScott Long if (error || (req->newptr == NULL)) 50096f8718a3SScott Long return (error); 50106f8718a3SScott Long 50116f8718a3SScott Long if (result == 1) { 50126f8718a3SScott Long sc = (struct bge_softc *)arg1; 50136f8718a3SScott Long 50146f8718a3SScott Long sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 50156f8718a3SScott Long printf("Status Block:\n"); 50166f8718a3SScott Long for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { 50176f8718a3SScott Long printf("%06x:", i); 50186f8718a3SScott Long for (j = 0; j < 8; j++) { 50196f8718a3SScott Long printf(" %04x", sbdata[i]); 50206f8718a3SScott Long i += 4; 50216f8718a3SScott Long } 50226f8718a3SScott Long printf("\n"); 50236f8718a3SScott Long } 50246f8718a3SScott Long 50256f8718a3SScott Long printf("Registers:\n"); 50260c8aa4eaSJung-uk Kim for (i = 0x800; i < 0xA00; ) { 50276f8718a3SScott Long printf("%06x:", i); 50286f8718a3SScott Long for (j = 0; j < 8; j++) { 50296f8718a3SScott Long printf(" %08x", CSR_READ_4(sc, i)); 50306f8718a3SScott Long i += 4; 50316f8718a3SScott Long } 50326f8718a3SScott Long printf("\n"); 50336f8718a3SScott Long } 50346f8718a3SScott Long 50356f8718a3SScott Long printf("Hardware Flags:\n"); 5036a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 5037a5779553SStanislav Sedov printf(" - 5755 Plus\n"); 50385345bad0SScott Long if (BGE_IS_575X_PLUS(sc)) 50396f8718a3SScott Long printf(" - 575X Plus\n"); 50405345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 50416f8718a3SScott Long printf(" - 5705 Plus\n"); 50425345bad0SScott Long if (BGE_IS_5714_FAMILY(sc)) 50435345bad0SScott Long printf(" - 5714 Family\n"); 50445345bad0SScott Long if (BGE_IS_5700_FAMILY(sc)) 50455345bad0SScott Long printf(" - 5700 Family\n"); 50466f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_JUMBO) 50476f8718a3SScott Long printf(" - Supports Jumbo Frames\n"); 50486f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIX) 50496f8718a3SScott Long printf(" - PCI-X Bus\n"); 50506f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 50516f8718a3SScott Long printf(" - PCI Express Bus\n"); 50525ee49a3aSJung-uk Kim if (sc->bge_flags & BGE_FLAG_NO_3LED) 50536f8718a3SScott Long printf(" - No 3 LEDs\n"); 50546f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 50556f8718a3SScott Long printf(" - RX Alignment Bug\n"); 50566f8718a3SScott Long } 50576f8718a3SScott Long 50586f8718a3SScott Long return (error); 50596f8718a3SScott Long } 50606f8718a3SScott Long 50616f8718a3SScott Long static int 50626f8718a3SScott Long bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 50636f8718a3SScott Long { 50646f8718a3SScott Long struct bge_softc *sc; 50656f8718a3SScott Long int error; 50666f8718a3SScott Long uint16_t result; 50676f8718a3SScott Long uint32_t val; 50686f8718a3SScott Long 50696f8718a3SScott Long result = -1; 50706f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 50716f8718a3SScott Long if (error || (req->newptr == NULL)) 50726f8718a3SScott Long return (error); 50736f8718a3SScott Long 50746f8718a3SScott Long if (result < 0x8000) { 50756f8718a3SScott Long sc = (struct bge_softc *)arg1; 50766f8718a3SScott Long val = CSR_READ_4(sc, result); 50776f8718a3SScott Long printf("reg 0x%06X = 0x%08X\n", result, val); 50786f8718a3SScott Long } 50796f8718a3SScott Long 50806f8718a3SScott Long return (error); 50816f8718a3SScott Long } 50826f8718a3SScott Long 50836f8718a3SScott Long static int 50846f8718a3SScott Long bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 50856f8718a3SScott Long { 50866f8718a3SScott Long struct bge_softc *sc; 50876f8718a3SScott Long int error; 50886f8718a3SScott Long uint16_t result; 50896f8718a3SScott Long uint32_t val; 50906f8718a3SScott Long 50916f8718a3SScott Long result = -1; 50926f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 50936f8718a3SScott Long if (error || (req->newptr == NULL)) 50946f8718a3SScott Long return (error); 50956f8718a3SScott Long 50966f8718a3SScott Long if (result < 0x8000) { 50976f8718a3SScott Long sc = (struct bge_softc *)arg1; 50986f8718a3SScott Long val = bge_readmem_ind(sc, result); 50996f8718a3SScott Long printf("mem 0x%06X = 0x%08X\n", result, val); 51006f8718a3SScott Long } 51016f8718a3SScott Long 51026f8718a3SScott Long return (error); 51036f8718a3SScott Long } 51046f8718a3SScott Long #endif 510538cc658fSJohn Baldwin 510638cc658fSJohn Baldwin static int 51075fea260fSMarius Strobl bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 51085fea260fSMarius Strobl { 51095fea260fSMarius Strobl 51105fea260fSMarius Strobl if (sc->bge_flags & BGE_FLAG_EADDR) 51115fea260fSMarius Strobl return (1); 51125fea260fSMarius Strobl 51135fea260fSMarius Strobl #ifdef __sparc64__ 51145fea260fSMarius Strobl OF_getetheraddr(sc->bge_dev, ether_addr); 51155fea260fSMarius Strobl return (0); 51165fea260fSMarius Strobl #endif 51175fea260fSMarius Strobl return (1); 51185fea260fSMarius Strobl } 51195fea260fSMarius Strobl 51205fea260fSMarius Strobl static int 512138cc658fSJohn Baldwin bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 512238cc658fSJohn Baldwin { 512338cc658fSJohn Baldwin uint32_t mac_addr; 512438cc658fSJohn Baldwin 512538cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c14); 512638cc658fSJohn Baldwin if ((mac_addr >> 16) == 0x484b) { 512738cc658fSJohn Baldwin ether_addr[0] = (uint8_t)(mac_addr >> 8); 512838cc658fSJohn Baldwin ether_addr[1] = (uint8_t)mac_addr; 512938cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c18); 513038cc658fSJohn Baldwin ether_addr[2] = (uint8_t)(mac_addr >> 24); 513138cc658fSJohn Baldwin ether_addr[3] = (uint8_t)(mac_addr >> 16); 513238cc658fSJohn Baldwin ether_addr[4] = (uint8_t)(mac_addr >> 8); 513338cc658fSJohn Baldwin ether_addr[5] = (uint8_t)mac_addr; 51345fea260fSMarius Strobl return (0); 513538cc658fSJohn Baldwin } 51365fea260fSMarius Strobl return (1); 513738cc658fSJohn Baldwin } 513838cc658fSJohn Baldwin 513938cc658fSJohn Baldwin static int 514038cc658fSJohn Baldwin bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 514138cc658fSJohn Baldwin { 514238cc658fSJohn Baldwin int mac_offset = BGE_EE_MAC_OFFSET; 514338cc658fSJohn Baldwin 514438cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 514538cc658fSJohn Baldwin mac_offset = BGE_EE_MAC_OFFSET_5906; 514638cc658fSJohn Baldwin 51475fea260fSMarius Strobl return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 51485fea260fSMarius Strobl ETHER_ADDR_LEN)); 514938cc658fSJohn Baldwin } 515038cc658fSJohn Baldwin 515138cc658fSJohn Baldwin static int 515238cc658fSJohn Baldwin bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 515338cc658fSJohn Baldwin { 515438cc658fSJohn Baldwin 51555fea260fSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 51565fea260fSMarius Strobl return (1); 51575fea260fSMarius Strobl 51585fea260fSMarius Strobl return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 51595fea260fSMarius Strobl ETHER_ADDR_LEN)); 516038cc658fSJohn Baldwin } 516138cc658fSJohn Baldwin 516238cc658fSJohn Baldwin static int 516338cc658fSJohn Baldwin bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 516438cc658fSJohn Baldwin { 516538cc658fSJohn Baldwin static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 516638cc658fSJohn Baldwin /* NOTE: Order is critical */ 51675fea260fSMarius Strobl bge_get_eaddr_fw, 516838cc658fSJohn Baldwin bge_get_eaddr_mem, 516938cc658fSJohn Baldwin bge_get_eaddr_nvram, 517038cc658fSJohn Baldwin bge_get_eaddr_eeprom, 517138cc658fSJohn Baldwin NULL 517238cc658fSJohn Baldwin }; 517338cc658fSJohn Baldwin const bge_eaddr_fcn_t *func; 517438cc658fSJohn Baldwin 517538cc658fSJohn Baldwin for (func = bge_eaddr_funcs; *func != NULL; ++func) { 517638cc658fSJohn Baldwin if ((*func)(sc, eaddr) == 0) 517738cc658fSJohn Baldwin break; 517838cc658fSJohn Baldwin } 517938cc658fSJohn Baldwin return (*func == NULL ? ENXIO : 0); 518038cc658fSJohn Baldwin } 5181