1098ca2bdSWarner Losh /*- 295d67482SBill Paul * Copyright (c) 2001 Wind River Systems 395d67482SBill Paul * Copyright (c) 1997, 1998, 1999, 2001 495d67482SBill Paul * Bill Paul <wpaul@windriver.com>. All rights reserved. 595d67482SBill Paul * 695d67482SBill Paul * Redistribution and use in source and binary forms, with or without 795d67482SBill Paul * modification, are permitted provided that the following conditions 895d67482SBill Paul * are met: 995d67482SBill Paul * 1. Redistributions of source code must retain the above copyright 1095d67482SBill Paul * notice, this list of conditions and the following disclaimer. 1195d67482SBill Paul * 2. Redistributions in binary form must reproduce the above copyright 1295d67482SBill Paul * notice, this list of conditions and the following disclaimer in the 1395d67482SBill Paul * documentation and/or other materials provided with the distribution. 1495d67482SBill Paul * 3. All advertising materials mentioning features or use of this software 1595d67482SBill Paul * must display the following acknowledgement: 1695d67482SBill Paul * This product includes software developed by Bill Paul. 1795d67482SBill Paul * 4. Neither the name of the author nor the names of any co-contributors 1895d67482SBill Paul * may be used to endorse or promote products derived from this software 1995d67482SBill Paul * without specific prior written permission. 2095d67482SBill Paul * 2195d67482SBill Paul * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 2295d67482SBill Paul * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2395d67482SBill Paul * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2495d67482SBill Paul * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 2595d67482SBill Paul * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2695d67482SBill Paul * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2795d67482SBill Paul * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2895d67482SBill Paul * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2995d67482SBill Paul * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3095d67482SBill Paul * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 3195d67482SBill Paul * THE POSSIBILITY OF SUCH DAMAGE. 3295d67482SBill Paul */ 3395d67482SBill Paul 34aad970f1SDavid E. O'Brien #include <sys/cdefs.h> 35aad970f1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 36aad970f1SDavid E. O'Brien 3795d67482SBill Paul /* 3895d67482SBill Paul * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 3995d67482SBill Paul * 4095d67482SBill Paul * The Broadcom BCM5700 is based on technology originally developed by 4195d67482SBill Paul * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 4295d67482SBill Paul * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 4395d67482SBill Paul * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 4495d67482SBill Paul * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 4595d67482SBill Paul * frames, highly configurable RX filtering, and 16 RX and TX queues 4695d67482SBill Paul * (which, along with RX filter rules, can be used for QOS applications). 4795d67482SBill Paul * Other features, such as TCP segmentation, may be available as part 4895d67482SBill Paul * of value-added firmware updates. Unlike the Tigon I and Tigon II, 4995d67482SBill Paul * firmware images can be stored in hardware and need not be compiled 5095d67482SBill Paul * into the driver. 5195d67482SBill Paul * 5295d67482SBill Paul * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 5395d67482SBill Paul * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 5495d67482SBill Paul * 5595d67482SBill Paul * The BCM5701 is a single-chip solution incorporating both the BCM5700 5698b28ee5SBill Paul * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 5795d67482SBill Paul * does not support external SSRAM. 5895d67482SBill Paul * 5995d67482SBill Paul * Broadcom also produces a variation of the BCM5700 under the "Altima" 6095d67482SBill Paul * brand name, which is functionally similar but lacks PCI-X support. 6195d67482SBill Paul * 6295d67482SBill Paul * Without external SSRAM, you can only have at most 4 TX rings, 6395d67482SBill Paul * and the use of the mini RX ring is disabled. This seems to imply 6495d67482SBill Paul * that these features are simply not available on the BCM5701. As a 6595d67482SBill Paul * result, this driver does not implement any support for the mini RX 6695d67482SBill Paul * ring. 6795d67482SBill Paul */ 6895d67482SBill Paul 6975719184SGleb Smirnoff #ifdef HAVE_KERNEL_OPTION_HEADERS 7075719184SGleb Smirnoff #include "opt_device_polling.h" 7175719184SGleb Smirnoff #endif 7275719184SGleb Smirnoff 7395d67482SBill Paul #include <sys/param.h> 74f41ac2beSBill Paul #include <sys/endian.h> 7595d67482SBill Paul #include <sys/systm.h> 7695d67482SBill Paul #include <sys/sockio.h> 7795d67482SBill Paul #include <sys/mbuf.h> 7895d67482SBill Paul #include <sys/malloc.h> 7995d67482SBill Paul #include <sys/kernel.h> 80fe12f24bSPoul-Henning Kamp #include <sys/module.h> 8195d67482SBill Paul #include <sys/socket.h> 82f1a7e6d5SScott Long #include <sys/sysctl.h> 8395d67482SBill Paul 8495d67482SBill Paul #include <net/if.h> 8595d67482SBill Paul #include <net/if_arp.h> 8695d67482SBill Paul #include <net/ethernet.h> 8795d67482SBill Paul #include <net/if_dl.h> 8895d67482SBill Paul #include <net/if_media.h> 8995d67482SBill Paul 9095d67482SBill Paul #include <net/bpf.h> 9195d67482SBill Paul 9295d67482SBill Paul #include <net/if_types.h> 9395d67482SBill Paul #include <net/if_vlan_var.h> 9495d67482SBill Paul 9595d67482SBill Paul #include <netinet/in_systm.h> 9695d67482SBill Paul #include <netinet/in.h> 9795d67482SBill Paul #include <netinet/ip.h> 9895d67482SBill Paul 9995d67482SBill Paul #include <machine/bus.h> 10095d67482SBill Paul #include <machine/resource.h> 10195d67482SBill Paul #include <sys/bus.h> 10295d67482SBill Paul #include <sys/rman.h> 10395d67482SBill Paul 10495d67482SBill Paul #include <dev/mii/mii.h> 10595d67482SBill Paul #include <dev/mii/miivar.h> 1062d3ce713SDavid E. O'Brien #include "miidevs.h" 10795d67482SBill Paul #include <dev/mii/brgphyreg.h> 10895d67482SBill Paul 10908013fd3SMarius Strobl #ifdef __sparc64__ 11008013fd3SMarius Strobl #include <dev/ofw/ofw_bus.h> 11108013fd3SMarius Strobl #include <dev/ofw/openfirm.h> 11208013fd3SMarius Strobl #include <machine/ofw_machdep.h> 11308013fd3SMarius Strobl #include <machine/ver.h> 11408013fd3SMarius Strobl #endif 11508013fd3SMarius Strobl 1164fbd232cSWarner Losh #include <dev/pci/pcireg.h> 1174fbd232cSWarner Losh #include <dev/pci/pcivar.h> 11895d67482SBill Paul 11995d67482SBill Paul #include <dev/bge/if_bgereg.h> 12095d67482SBill Paul 1215ddc5794SJohn Polstra #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 122d375e524SGleb Smirnoff #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 12395d67482SBill Paul 124f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, pci, 1, 1, 1); 125f246e4a1SMatthew N. Dodd MODULE_DEPEND(bge, ether, 1, 1, 1); 12695d67482SBill Paul MODULE_DEPEND(bge, miibus, 1, 1, 1); 12795d67482SBill Paul 1287b279558SWarner Losh /* "device miibus" required. See GENERIC if you get errors here. */ 12995d67482SBill Paul #include "miibus_if.h" 13095d67482SBill Paul 13195d67482SBill Paul /* 13295d67482SBill Paul * Various supported device vendors/types and their names. Note: the 13395d67482SBill Paul * spec seems to indicate that the hardware still has Alteon's vendor 13495d67482SBill Paul * ID burned into it, though it will always be overriden by the vendor 13595d67482SBill Paul * ID in the EEPROM. Just to be safe, we cover all possibilities. 13695d67482SBill Paul */ 137852c67f9SMarius Strobl static const struct bge_type { 1384c0da0ffSGleb Smirnoff uint16_t bge_vid; 1394c0da0ffSGleb Smirnoff uint16_t bge_did; 1404c0da0ffSGleb Smirnoff } bge_devs[] = { 1414c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 1424c0da0ffSGleb Smirnoff { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 14395d67482SBill Paul 1444c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 1454c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 1464c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 1474c0da0ffSGleb Smirnoff 1484c0da0ffSGleb Smirnoff { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 1494c0da0ffSGleb Smirnoff 1504c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 1514c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 1524c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 1534c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 1544c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 1554c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 1564c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 1574c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 1584c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 1594c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 1604c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 1614c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 1624c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 1634c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 1644c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 1654c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 1664c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 1674c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 1684c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 1694c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 1704c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 1714c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 172effef978SRemko Lodder { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, 173a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, 1744c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 1754c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 1764c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 1774c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 1784c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 1794c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 1804c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 1814c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 1824c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 1834c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 1849e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 1859e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 1869e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 1879e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 188a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, 189a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, 190a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, 191a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, 192a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, 1934c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 1944c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 1954c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 1964c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 197a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, 198a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, 199a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, 2009e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 2019e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 202a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, 2039e86676bSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 2044c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 2054c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 2064c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 2074c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 2084c0da0ffSGleb Smirnoff { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 20938cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, 21038cc658fSJohn Baldwin { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, 211a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, 212a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, 213a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, 214a5779553SStanislav Sedov { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, 2154c0da0ffSGleb Smirnoff 2164c0da0ffSGleb Smirnoff { SK_VENDORID, SK_DEVICEID_ALTIMA }, 2174c0da0ffSGleb Smirnoff 2184c0da0ffSGleb Smirnoff { TC_VENDORID, TC_DEVICEID_3C996 }, 2194c0da0ffSGleb Smirnoff 220a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, 221a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, 222a5779553SStanislav Sedov { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 }, 223a5779553SStanislav Sedov 2244c0da0ffSGleb Smirnoff { 0, 0 } 22595d67482SBill Paul }; 22695d67482SBill Paul 2274c0da0ffSGleb Smirnoff static const struct bge_vendor { 2284c0da0ffSGleb Smirnoff uint16_t v_id; 2294c0da0ffSGleb Smirnoff const char *v_name; 2304c0da0ffSGleb Smirnoff } bge_vendors[] = { 2314c0da0ffSGleb Smirnoff { ALTEON_VENDORID, "Alteon" }, 2324c0da0ffSGleb Smirnoff { ALTIMA_VENDORID, "Altima" }, 2334c0da0ffSGleb Smirnoff { APPLE_VENDORID, "Apple" }, 2344c0da0ffSGleb Smirnoff { BCOM_VENDORID, "Broadcom" }, 2354c0da0ffSGleb Smirnoff { SK_VENDORID, "SysKonnect" }, 2364c0da0ffSGleb Smirnoff { TC_VENDORID, "3Com" }, 237a5779553SStanislav Sedov { FJTSU_VENDORID, "Fujitsu" }, 2384c0da0ffSGleb Smirnoff 2394c0da0ffSGleb Smirnoff { 0, NULL } 2404c0da0ffSGleb Smirnoff }; 2414c0da0ffSGleb Smirnoff 2424c0da0ffSGleb Smirnoff static const struct bge_revision { 2434c0da0ffSGleb Smirnoff uint32_t br_chipid; 2444c0da0ffSGleb Smirnoff const char *br_name; 2454c0da0ffSGleb Smirnoff } bge_revisions[] = { 2464c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 2474c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 2484c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 2494c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 2504c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 2514c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 2524c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 2534c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 2544c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 2554c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 2564c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 2574c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 2584c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 2594c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 2604c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 2614c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 2629e86676bSGleb Smirnoff { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 2634c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 2644c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 2654c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 2664c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 2674c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 2684c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 2694c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 2704c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 2714c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 2724c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 2734c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 2744c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 2754c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 2764c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 2774c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 2784c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 27942787b76SGleb Smirnoff { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 2804c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 2814c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 2824c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 2834c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 2844c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 2854c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 2864c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 2874c0da0ffSGleb Smirnoff { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 2880c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 2890c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 2900c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 2910c4a1ef8SJung-uk Kim { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 292bcc20328SJohn Baldwin { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, 293a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, 294a5779553SStanislav Sedov { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, 295a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, 296a5779553SStanislav Sedov { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, 29781179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 2986f8718a3SScott Long { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 2996f8718a3SScott Long { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 3006f8718a3SScott Long { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 30138cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, 30238cc658fSJohn Baldwin { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, 303a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, 304a5779553SStanislav Sedov { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, 3054c0da0ffSGleb Smirnoff 3064c0da0ffSGleb Smirnoff { 0, NULL } 3074c0da0ffSGleb Smirnoff }; 3084c0da0ffSGleb Smirnoff 3094c0da0ffSGleb Smirnoff /* 3104c0da0ffSGleb Smirnoff * Some defaults for major revisions, so that newer steppings 3114c0da0ffSGleb Smirnoff * that we don't know about have a shot at working. 3124c0da0ffSGleb Smirnoff */ 3134c0da0ffSGleb Smirnoff static const struct bge_revision bge_majorrevs[] = { 3149e86676bSGleb Smirnoff { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 3159e86676bSGleb Smirnoff { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 3169e86676bSGleb Smirnoff { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 3179e86676bSGleb Smirnoff { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 3189e86676bSGleb Smirnoff { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 3199e86676bSGleb Smirnoff { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 3209e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 3219e86676bSGleb Smirnoff { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 3229e86676bSGleb Smirnoff { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 3239e86676bSGleb Smirnoff { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 3249e86676bSGleb Smirnoff { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 325a5779553SStanislav Sedov { BGE_ASICREV_BCM5761, "unknown BCM5761" }, 326a5779553SStanislav Sedov { BGE_ASICREV_BCM5784, "unknown BCM5784" }, 327a5779553SStanislav Sedov { BGE_ASICREV_BCM5785, "unknown BCM5785" }, 32881179070SJung-uk Kim /* 5754 and 5787 share the same ASIC ID */ 3296f8718a3SScott Long { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 33038cc658fSJohn Baldwin { BGE_ASICREV_BCM5906, "unknown BCM5906" }, 331a5779553SStanislav Sedov { BGE_ASICREV_BCM57780, "unknown BCM57780" }, 3324c0da0ffSGleb Smirnoff 3334c0da0ffSGleb Smirnoff { 0, NULL } 3344c0da0ffSGleb Smirnoff }; 3354c0da0ffSGleb Smirnoff 3360c8aa4eaSJung-uk Kim #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 3370c8aa4eaSJung-uk Kim #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 3380c8aa4eaSJung-uk Kim #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 3390c8aa4eaSJung-uk Kim #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 3400c8aa4eaSJung-uk Kim #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 341a5779553SStanislav Sedov #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) 3424c0da0ffSGleb Smirnoff 3434c0da0ffSGleb Smirnoff const struct bge_revision * bge_lookup_rev(uint32_t); 3444c0da0ffSGleb Smirnoff const struct bge_vendor * bge_lookup_vendor(uint16_t); 34538cc658fSJohn Baldwin 34638cc658fSJohn Baldwin typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); 34738cc658fSJohn Baldwin 348e51a25f8SAlfred Perlstein static int bge_probe(device_t); 349e51a25f8SAlfred Perlstein static int bge_attach(device_t); 350e51a25f8SAlfred Perlstein static int bge_detach(device_t); 35114afefa3SPawel Jakub Dawidek static int bge_suspend(device_t); 35214afefa3SPawel Jakub Dawidek static int bge_resume(device_t); 3533f74909aSGleb Smirnoff static void bge_release_resources(struct bge_softc *); 354f41ac2beSBill Paul static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 355f41ac2beSBill Paul static int bge_dma_alloc(device_t); 356f41ac2beSBill Paul static void bge_dma_free(struct bge_softc *); 357f41ac2beSBill Paul 3585fea260fSMarius Strobl static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); 35938cc658fSJohn Baldwin static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); 36038cc658fSJohn Baldwin static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); 36138cc658fSJohn Baldwin static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); 36238cc658fSJohn Baldwin static int bge_get_eaddr(struct bge_softc *, uint8_t[]); 36338cc658fSJohn Baldwin 364e51a25f8SAlfred Perlstein static void bge_txeof(struct bge_softc *); 3651abcdbd1SAttilio Rao static int bge_rxeof(struct bge_softc *); 36695d67482SBill Paul 3678cb1383cSDoug Ambrisko static void bge_asf_driver_up (struct bge_softc *); 368e51a25f8SAlfred Perlstein static void bge_tick(void *); 369e51a25f8SAlfred Perlstein static void bge_stats_update(struct bge_softc *); 3703f74909aSGleb Smirnoff static void bge_stats_update_regs(struct bge_softc *); 371676ad2c9SGleb Smirnoff static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 37295d67482SBill Paul 373e51a25f8SAlfred Perlstein static void bge_intr(void *); 3740f9bd73bSSam Leffler static void bge_start_locked(struct ifnet *); 375e51a25f8SAlfred Perlstein static void bge_start(struct ifnet *); 376e51a25f8SAlfred Perlstein static int bge_ioctl(struct ifnet *, u_long, caddr_t); 3770f9bd73bSSam Leffler static void bge_init_locked(struct bge_softc *); 378e51a25f8SAlfred Perlstein static void bge_init(void *); 379e51a25f8SAlfred Perlstein static void bge_stop(struct bge_softc *); 380b74e67fbSGleb Smirnoff static void bge_watchdog(struct bge_softc *); 381b6c974e8SWarner Losh static int bge_shutdown(device_t); 38267d5e043SOleg Bulyzhin static int bge_ifmedia_upd_locked(struct ifnet *); 383e51a25f8SAlfred Perlstein static int bge_ifmedia_upd(struct ifnet *); 384e51a25f8SAlfred Perlstein static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 38595d67482SBill Paul 38638cc658fSJohn Baldwin static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); 38738cc658fSJohn Baldwin static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); 38838cc658fSJohn Baldwin 3893f74909aSGleb Smirnoff static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 390e51a25f8SAlfred Perlstein static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 39195d67482SBill Paul 3923e9b1bcaSJung-uk Kim static void bge_setpromisc(struct bge_softc *); 393e51a25f8SAlfred Perlstein static void bge_setmulti(struct bge_softc *); 394cb2eacc7SYaroslav Tykhiy static void bge_setvlan(struct bge_softc *); 39595d67482SBill Paul 396943787f3SPyun YongHyeon static int bge_newbuf_std(struct bge_softc *, int); 397943787f3SPyun YongHyeon static int bge_newbuf_jumbo(struct bge_softc *, int); 398e51a25f8SAlfred Perlstein static int bge_init_rx_ring_std(struct bge_softc *); 399e51a25f8SAlfred Perlstein static void bge_free_rx_ring_std(struct bge_softc *); 400e51a25f8SAlfred Perlstein static int bge_init_rx_ring_jumbo(struct bge_softc *); 401e51a25f8SAlfred Perlstein static void bge_free_rx_ring_jumbo(struct bge_softc *); 402e51a25f8SAlfred Perlstein static void bge_free_tx_ring(struct bge_softc *); 403e51a25f8SAlfred Perlstein static int bge_init_tx_ring(struct bge_softc *); 40495d67482SBill Paul 405e51a25f8SAlfred Perlstein static int bge_chipinit(struct bge_softc *); 406e51a25f8SAlfred Perlstein static int bge_blockinit(struct bge_softc *); 40795d67482SBill Paul 4085fea260fSMarius Strobl static int bge_has_eaddr(struct bge_softc *); 4093f74909aSGleb Smirnoff static uint32_t bge_readmem_ind(struct bge_softc *, int); 410e51a25f8SAlfred Perlstein static void bge_writemem_ind(struct bge_softc *, int, int); 41138cc658fSJohn Baldwin static void bge_writembx(struct bge_softc *, int, int); 41295d67482SBill Paul #ifdef notdef 4133f74909aSGleb Smirnoff static uint32_t bge_readreg_ind(struct bge_softc *, int); 41495d67482SBill Paul #endif 4159ba784dbSScott Long static void bge_writemem_direct(struct bge_softc *, int, int); 416e51a25f8SAlfred Perlstein static void bge_writereg_ind(struct bge_softc *, int, int); 4170aaf1057SPyun YongHyeon static void bge_set_max_readrq(struct bge_softc *); 41895d67482SBill Paul 419e51a25f8SAlfred Perlstein static int bge_miibus_readreg(device_t, int, int); 420e51a25f8SAlfred Perlstein static int bge_miibus_writereg(device_t, int, int, int); 421e51a25f8SAlfred Perlstein static void bge_miibus_statchg(device_t); 42275719184SGleb Smirnoff #ifdef DEVICE_POLLING 4231abcdbd1SAttilio Rao static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 42475719184SGleb Smirnoff #endif 42595d67482SBill Paul 4268cb1383cSDoug Ambrisko #define BGE_RESET_START 1 4278cb1383cSDoug Ambrisko #define BGE_RESET_STOP 2 4288cb1383cSDoug Ambrisko static void bge_sig_post_reset(struct bge_softc *, int); 4298cb1383cSDoug Ambrisko static void bge_sig_legacy(struct bge_softc *, int); 4308cb1383cSDoug Ambrisko static void bge_sig_pre_reset(struct bge_softc *, int); 4318cb1383cSDoug Ambrisko static int bge_reset(struct bge_softc *); 432dab5cd05SOleg Bulyzhin static void bge_link_upd(struct bge_softc *); 43395d67482SBill Paul 4346f8718a3SScott Long /* 4356f8718a3SScott Long * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 4366f8718a3SScott Long * leak information to untrusted users. It is also known to cause alignment 4376f8718a3SScott Long * traps on certain architectures. 4386f8718a3SScott Long */ 4396f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 4406f8718a3SScott Long static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 4416f8718a3SScott Long static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 4426f8718a3SScott Long static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 4436f8718a3SScott Long #endif 4446f8718a3SScott Long static void bge_add_sysctls(struct bge_softc *); 445763757b2SScott Long static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 4466f8718a3SScott Long 44795d67482SBill Paul static device_method_t bge_methods[] = { 44895d67482SBill Paul /* Device interface */ 44995d67482SBill Paul DEVMETHOD(device_probe, bge_probe), 45095d67482SBill Paul DEVMETHOD(device_attach, bge_attach), 45195d67482SBill Paul DEVMETHOD(device_detach, bge_detach), 45295d67482SBill Paul DEVMETHOD(device_shutdown, bge_shutdown), 45314afefa3SPawel Jakub Dawidek DEVMETHOD(device_suspend, bge_suspend), 45414afefa3SPawel Jakub Dawidek DEVMETHOD(device_resume, bge_resume), 45595d67482SBill Paul 45695d67482SBill Paul /* bus interface */ 45795d67482SBill Paul DEVMETHOD(bus_print_child, bus_generic_print_child), 45895d67482SBill Paul DEVMETHOD(bus_driver_added, bus_generic_driver_added), 45995d67482SBill Paul 46095d67482SBill Paul /* MII interface */ 46195d67482SBill Paul DEVMETHOD(miibus_readreg, bge_miibus_readreg), 46295d67482SBill Paul DEVMETHOD(miibus_writereg, bge_miibus_writereg), 46395d67482SBill Paul DEVMETHOD(miibus_statchg, bge_miibus_statchg), 46495d67482SBill Paul 46595d67482SBill Paul { 0, 0 } 46695d67482SBill Paul }; 46795d67482SBill Paul 46895d67482SBill Paul static driver_t bge_driver = { 46995d67482SBill Paul "bge", 47095d67482SBill Paul bge_methods, 47195d67482SBill Paul sizeof(struct bge_softc) 47295d67482SBill Paul }; 47395d67482SBill Paul 47495d67482SBill Paul static devclass_t bge_devclass; 47595d67482SBill Paul 476f246e4a1SMatthew N. Dodd DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 47795d67482SBill Paul DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 47895d67482SBill Paul 479f1a7e6d5SScott Long static int bge_allow_asf = 1; 480f1a7e6d5SScott Long 481f1a7e6d5SScott Long TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 482f1a7e6d5SScott Long 483f1a7e6d5SScott Long SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 484f1a7e6d5SScott Long SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 485f1a7e6d5SScott Long "Allow ASF mode if available"); 486c4529f41SMichael Reifenberger 48708013fd3SMarius Strobl #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" 48808013fd3SMarius Strobl #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" 48908013fd3SMarius Strobl #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" 49008013fd3SMarius Strobl #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" 49108013fd3SMarius Strobl #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" 49208013fd3SMarius Strobl 49308013fd3SMarius Strobl static int 4945fea260fSMarius Strobl bge_has_eaddr(struct bge_softc *sc) 49508013fd3SMarius Strobl { 49608013fd3SMarius Strobl #ifdef __sparc64__ 49708013fd3SMarius Strobl char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; 49808013fd3SMarius Strobl device_t dev; 49908013fd3SMarius Strobl uint32_t subvendor; 50008013fd3SMarius Strobl 50108013fd3SMarius Strobl dev = sc->bge_dev; 50208013fd3SMarius Strobl 50308013fd3SMarius Strobl /* 50408013fd3SMarius Strobl * The on-board BGEs found in sun4u machines aren't fitted with 50508013fd3SMarius Strobl * an EEPROM which means that we have to obtain the MAC address 50608013fd3SMarius Strobl * via OFW and that some tests will always fail. We distinguish 50708013fd3SMarius Strobl * such BGEs by the subvendor ID, which also has to be obtained 50808013fd3SMarius Strobl * from OFW instead of the PCI configuration space as the latter 50908013fd3SMarius Strobl * indicates Broadcom as the subvendor of the netboot interface. 51008013fd3SMarius Strobl * For early Blade 1500 and 2500 we even have to check the OFW 51108013fd3SMarius Strobl * device path as the subvendor ID always defaults to Broadcom 51208013fd3SMarius Strobl * there. 51308013fd3SMarius Strobl */ 51408013fd3SMarius Strobl if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, 51508013fd3SMarius Strobl &subvendor, sizeof(subvendor)) == sizeof(subvendor) && 51608013fd3SMarius Strobl subvendor == SUN_VENDORID) 51708013fd3SMarius Strobl return (0); 51808013fd3SMarius Strobl memset(buf, 0, sizeof(buf)); 51908013fd3SMarius Strobl if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { 52008013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && 52108013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) 52208013fd3SMarius Strobl return (0); 52308013fd3SMarius Strobl if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && 52408013fd3SMarius Strobl strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) 52508013fd3SMarius Strobl return (0); 52608013fd3SMarius Strobl } 52708013fd3SMarius Strobl #endif 52808013fd3SMarius Strobl return (1); 52908013fd3SMarius Strobl } 53008013fd3SMarius Strobl 5313f74909aSGleb Smirnoff static uint32_t 5323f74909aSGleb Smirnoff bge_readmem_ind(struct bge_softc *sc, int off) 53395d67482SBill Paul { 53495d67482SBill Paul device_t dev; 5356f8718a3SScott Long uint32_t val; 53695d67482SBill Paul 53795d67482SBill Paul dev = sc->bge_dev; 53895d67482SBill Paul 53995d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 5406f8718a3SScott Long val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 5416f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 5426f8718a3SScott Long return (val); 54395d67482SBill Paul } 54495d67482SBill Paul 54595d67482SBill Paul static void 5463f74909aSGleb Smirnoff bge_writemem_ind(struct bge_softc *sc, int off, int val) 54795d67482SBill Paul { 54895d67482SBill Paul device_t dev; 54995d67482SBill Paul 55095d67482SBill Paul dev = sc->bge_dev; 55195d67482SBill Paul 55295d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 55395d67482SBill Paul pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 5546f8718a3SScott Long pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 55595d67482SBill Paul } 55695d67482SBill Paul 5574f09c4c7SMarius Strobl /* 5584f09c4c7SMarius Strobl * PCI Express only 5594f09c4c7SMarius Strobl */ 5604f09c4c7SMarius Strobl static void 5610aaf1057SPyun YongHyeon bge_set_max_readrq(struct bge_softc *sc) 5624f09c4c7SMarius Strobl { 5634f09c4c7SMarius Strobl device_t dev; 5644f09c4c7SMarius Strobl uint16_t val; 5654f09c4c7SMarius Strobl 5664f09c4c7SMarius Strobl dev = sc->bge_dev; 5674f09c4c7SMarius Strobl 5680aaf1057SPyun YongHyeon val = pci_read_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2); 5690aaf1057SPyun YongHyeon if ((val & PCIM_EXP_CTL_MAX_READ_REQUEST) != 5704f09c4c7SMarius Strobl BGE_PCIE_DEVCTL_MAX_READRQ_4096) { 5714f09c4c7SMarius Strobl if (bootverbose) 5724f09c4c7SMarius Strobl device_printf(dev, "adjust device control 0x%04x ", 5734f09c4c7SMarius Strobl val); 5740aaf1057SPyun YongHyeon val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; 5754f09c4c7SMarius Strobl val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; 5760aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 5770aaf1057SPyun YongHyeon val, 2); 5784f09c4c7SMarius Strobl if (bootverbose) 5794f09c4c7SMarius Strobl printf("-> 0x%04x\n", val); 5804f09c4c7SMarius Strobl } 5814f09c4c7SMarius Strobl } 5824f09c4c7SMarius Strobl 58395d67482SBill Paul #ifdef notdef 5843f74909aSGleb Smirnoff static uint32_t 5853f74909aSGleb Smirnoff bge_readreg_ind(struct bge_softc *sc, int off) 58695d67482SBill Paul { 58795d67482SBill Paul device_t dev; 58895d67482SBill Paul 58995d67482SBill Paul dev = sc->bge_dev; 59095d67482SBill Paul 59195d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 59295d67482SBill Paul return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 59395d67482SBill Paul } 59495d67482SBill Paul #endif 59595d67482SBill Paul 59695d67482SBill Paul static void 5973f74909aSGleb Smirnoff bge_writereg_ind(struct bge_softc *sc, int off, int val) 59895d67482SBill Paul { 59995d67482SBill Paul device_t dev; 60095d67482SBill Paul 60195d67482SBill Paul dev = sc->bge_dev; 60295d67482SBill Paul 60395d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 60495d67482SBill Paul pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 60595d67482SBill Paul } 60695d67482SBill Paul 6076f8718a3SScott Long static void 6086f8718a3SScott Long bge_writemem_direct(struct bge_softc *sc, int off, int val) 6096f8718a3SScott Long { 6106f8718a3SScott Long CSR_WRITE_4(sc, off, val); 6116f8718a3SScott Long } 6126f8718a3SScott Long 61338cc658fSJohn Baldwin static void 61438cc658fSJohn Baldwin bge_writembx(struct bge_softc *sc, int off, int val) 61538cc658fSJohn Baldwin { 61638cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 61738cc658fSJohn Baldwin off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; 61838cc658fSJohn Baldwin 61938cc658fSJohn Baldwin CSR_WRITE_4(sc, off, val); 62038cc658fSJohn Baldwin } 62138cc658fSJohn Baldwin 622f41ac2beSBill Paul /* 623f41ac2beSBill Paul * Map a single buffer address. 624f41ac2beSBill Paul */ 625f41ac2beSBill Paul 626f41ac2beSBill Paul static void 6273f74909aSGleb Smirnoff bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 628f41ac2beSBill Paul { 629f41ac2beSBill Paul struct bge_dmamap_arg *ctx; 630f41ac2beSBill Paul 631f41ac2beSBill Paul if (error) 632f41ac2beSBill Paul return; 633f41ac2beSBill Paul 634f41ac2beSBill Paul ctx = arg; 635f41ac2beSBill Paul 636f41ac2beSBill Paul if (nseg > ctx->bge_maxsegs) { 637f41ac2beSBill Paul ctx->bge_maxsegs = 0; 638f41ac2beSBill Paul return; 639f41ac2beSBill Paul } 640f41ac2beSBill Paul 641f41ac2beSBill Paul ctx->bge_busaddr = segs->ds_addr; 642f41ac2beSBill Paul } 643f41ac2beSBill Paul 64438cc658fSJohn Baldwin static uint8_t 64538cc658fSJohn Baldwin bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 64638cc658fSJohn Baldwin { 64738cc658fSJohn Baldwin uint32_t access, byte = 0; 64838cc658fSJohn Baldwin int i; 64938cc658fSJohn Baldwin 65038cc658fSJohn Baldwin /* Lock. */ 65138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); 65238cc658fSJohn Baldwin for (i = 0; i < 8000; i++) { 65338cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) 65438cc658fSJohn Baldwin break; 65538cc658fSJohn Baldwin DELAY(20); 65638cc658fSJohn Baldwin } 65738cc658fSJohn Baldwin if (i == 8000) 65838cc658fSJohn Baldwin return (1); 65938cc658fSJohn Baldwin 66038cc658fSJohn Baldwin /* Enable access. */ 66138cc658fSJohn Baldwin access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); 66238cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); 66338cc658fSJohn Baldwin 66438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); 66538cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); 66638cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT * 10; i++) { 66738cc658fSJohn Baldwin DELAY(10); 66838cc658fSJohn Baldwin if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { 66938cc658fSJohn Baldwin DELAY(10); 67038cc658fSJohn Baldwin break; 67138cc658fSJohn Baldwin } 67238cc658fSJohn Baldwin } 67338cc658fSJohn Baldwin 67438cc658fSJohn Baldwin if (i == BGE_TIMEOUT * 10) { 67538cc658fSJohn Baldwin if_printf(sc->bge_ifp, "nvram read timed out\n"); 67638cc658fSJohn Baldwin return (1); 67738cc658fSJohn Baldwin } 67838cc658fSJohn Baldwin 67938cc658fSJohn Baldwin /* Get result. */ 68038cc658fSJohn Baldwin byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); 68138cc658fSJohn Baldwin 68238cc658fSJohn Baldwin *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; 68338cc658fSJohn Baldwin 68438cc658fSJohn Baldwin /* Disable access. */ 68538cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); 68638cc658fSJohn Baldwin 68738cc658fSJohn Baldwin /* Unlock. */ 68838cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); 68938cc658fSJohn Baldwin CSR_READ_4(sc, BGE_NVRAM_SWARB); 69038cc658fSJohn Baldwin 69138cc658fSJohn Baldwin return (0); 69238cc658fSJohn Baldwin } 69338cc658fSJohn Baldwin 69438cc658fSJohn Baldwin /* 69538cc658fSJohn Baldwin * Read a sequence of bytes from NVRAM. 69638cc658fSJohn Baldwin */ 69738cc658fSJohn Baldwin static int 69838cc658fSJohn Baldwin bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) 69938cc658fSJohn Baldwin { 70038cc658fSJohn Baldwin int err = 0, i; 70138cc658fSJohn Baldwin uint8_t byte = 0; 70238cc658fSJohn Baldwin 70338cc658fSJohn Baldwin if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 70438cc658fSJohn Baldwin return (1); 70538cc658fSJohn Baldwin 70638cc658fSJohn Baldwin for (i = 0; i < cnt; i++) { 70738cc658fSJohn Baldwin err = bge_nvram_getbyte(sc, off + i, &byte); 70838cc658fSJohn Baldwin if (err) 70938cc658fSJohn Baldwin break; 71038cc658fSJohn Baldwin *(dest + i) = byte; 71138cc658fSJohn Baldwin } 71238cc658fSJohn Baldwin 71338cc658fSJohn Baldwin return (err ? 1 : 0); 71438cc658fSJohn Baldwin } 71538cc658fSJohn Baldwin 71695d67482SBill Paul /* 71795d67482SBill Paul * Read a byte of data stored in the EEPROM at address 'addr.' The 71895d67482SBill Paul * BCM570x supports both the traditional bitbang interface and an 71995d67482SBill Paul * auto access interface for reading the EEPROM. We use the auto 72095d67482SBill Paul * access method. 72195d67482SBill Paul */ 7223f74909aSGleb Smirnoff static uint8_t 7233f74909aSGleb Smirnoff bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 72495d67482SBill Paul { 72595d67482SBill Paul int i; 7263f74909aSGleb Smirnoff uint32_t byte = 0; 72795d67482SBill Paul 72895d67482SBill Paul /* 72995d67482SBill Paul * Enable use of auto EEPROM access so we can avoid 73095d67482SBill Paul * having to use the bitbang method. 73195d67482SBill Paul */ 73295d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 73395d67482SBill Paul 73495d67482SBill Paul /* Reset the EEPROM, load the clock period. */ 73595d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, 73695d67482SBill Paul BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 73795d67482SBill Paul DELAY(20); 73895d67482SBill Paul 73995d67482SBill Paul /* Issue the read EEPROM command. */ 74095d67482SBill Paul CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 74195d67482SBill Paul 74295d67482SBill Paul /* Wait for completion */ 74395d67482SBill Paul for(i = 0; i < BGE_TIMEOUT * 10; i++) { 74495d67482SBill Paul DELAY(10); 74595d67482SBill Paul if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 74695d67482SBill Paul break; 74795d67482SBill Paul } 74895d67482SBill Paul 749d5d23857SJung-uk Kim if (i == BGE_TIMEOUT * 10) { 750fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "EEPROM read timed out\n"); 751f6789fbaSPyun YongHyeon return (1); 75295d67482SBill Paul } 75395d67482SBill Paul 75495d67482SBill Paul /* Get result. */ 75595d67482SBill Paul byte = CSR_READ_4(sc, BGE_EE_DATA); 75695d67482SBill Paul 7570c8aa4eaSJung-uk Kim *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 75895d67482SBill Paul 75995d67482SBill Paul return (0); 76095d67482SBill Paul } 76195d67482SBill Paul 76295d67482SBill Paul /* 76395d67482SBill Paul * Read a sequence of bytes from the EEPROM. 76495d67482SBill Paul */ 76595d67482SBill Paul static int 7663f74909aSGleb Smirnoff bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 76795d67482SBill Paul { 7683f74909aSGleb Smirnoff int i, error = 0; 7693f74909aSGleb Smirnoff uint8_t byte = 0; 77095d67482SBill Paul 77195d67482SBill Paul for (i = 0; i < cnt; i++) { 7723f74909aSGleb Smirnoff error = bge_eeprom_getbyte(sc, off + i, &byte); 7733f74909aSGleb Smirnoff if (error) 77495d67482SBill Paul break; 77595d67482SBill Paul *(dest + i) = byte; 77695d67482SBill Paul } 77795d67482SBill Paul 7783f74909aSGleb Smirnoff return (error ? 1 : 0); 77995d67482SBill Paul } 78095d67482SBill Paul 78195d67482SBill Paul static int 7823f74909aSGleb Smirnoff bge_miibus_readreg(device_t dev, int phy, int reg) 78395d67482SBill Paul { 78495d67482SBill Paul struct bge_softc *sc; 7853f74909aSGleb Smirnoff uint32_t val, autopoll; 78695d67482SBill Paul int i; 78795d67482SBill Paul 78895d67482SBill Paul sc = device_get_softc(dev); 78995d67482SBill Paul 7900434d1b8SBill Paul /* 7910434d1b8SBill Paul * Broadcom's own driver always assumes the internal 7920434d1b8SBill Paul * PHY is at GMII address 1. On some chips, the PHY responds 7930434d1b8SBill Paul * to accesses at all addresses, which could cause us to 7940434d1b8SBill Paul * bogusly attach the PHY 32 times at probe type. Always 7950434d1b8SBill Paul * restricting the lookup to address 1 is simpler than 7960434d1b8SBill Paul * trying to figure out which chips revisions should be 7970434d1b8SBill Paul * special-cased. 7980434d1b8SBill Paul */ 799b1265c1aSJohn Polstra if (phy != 1) 80098b28ee5SBill Paul return (0); 80198b28ee5SBill Paul 80237ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 80337ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 80437ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 80537ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 80637ceeb4dSPaul Saab DELAY(40); 80737ceeb4dSPaul Saab } 80837ceeb4dSPaul Saab 80995d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 81095d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg)); 81195d67482SBill Paul 81295d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 813d5d23857SJung-uk Kim DELAY(10); 81495d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 81595d67482SBill Paul if (!(val & BGE_MICOMM_BUSY)) 81695d67482SBill Paul break; 81795d67482SBill Paul } 81895d67482SBill Paul 81995d67482SBill Paul if (i == BGE_TIMEOUT) { 8205fea260fSMarius Strobl device_printf(sc->bge_dev, 8215fea260fSMarius Strobl "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", 8225fea260fSMarius Strobl phy, reg, val); 82337ceeb4dSPaul Saab val = 0; 82437ceeb4dSPaul Saab goto done; 82595d67482SBill Paul } 82695d67482SBill Paul 82738cc658fSJohn Baldwin DELAY(5); 82895d67482SBill Paul val = CSR_READ_4(sc, BGE_MI_COMM); 82995d67482SBill Paul 83037ceeb4dSPaul Saab done: 83137ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 83237ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 83337ceeb4dSPaul Saab DELAY(40); 83437ceeb4dSPaul Saab } 83537ceeb4dSPaul Saab 83695d67482SBill Paul if (val & BGE_MICOMM_READFAIL) 83795d67482SBill Paul return (0); 83895d67482SBill Paul 8390c8aa4eaSJung-uk Kim return (val & 0xFFFF); 84095d67482SBill Paul } 84195d67482SBill Paul 84295d67482SBill Paul static int 8433f74909aSGleb Smirnoff bge_miibus_writereg(device_t dev, int phy, int reg, int val) 84495d67482SBill Paul { 84595d67482SBill Paul struct bge_softc *sc; 8463f74909aSGleb Smirnoff uint32_t autopoll; 84795d67482SBill Paul int i; 84895d67482SBill Paul 84995d67482SBill Paul sc = device_get_softc(dev); 85095d67482SBill Paul 85138cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && 85238cc658fSJohn Baldwin (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) 85338cc658fSJohn Baldwin return(0); 85438cc658fSJohn Baldwin 85537ceeb4dSPaul Saab /* Reading with autopolling on may trigger PCI errors */ 85637ceeb4dSPaul Saab autopoll = CSR_READ_4(sc, BGE_MI_MODE); 85737ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 85837ceeb4dSPaul Saab BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 85937ceeb4dSPaul Saab DELAY(40); 86037ceeb4dSPaul Saab } 86137ceeb4dSPaul Saab 86295d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 86395d67482SBill Paul BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 86495d67482SBill Paul 86595d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 866d5d23857SJung-uk Kim DELAY(10); 86738cc658fSJohn Baldwin if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { 86838cc658fSJohn Baldwin DELAY(5); 86938cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ 87095d67482SBill Paul break; 871d5d23857SJung-uk Kim } 87238cc658fSJohn Baldwin } 873d5d23857SJung-uk Kim 874d5d23857SJung-uk Kim if (i == BGE_TIMEOUT) { 87538cc658fSJohn Baldwin device_printf(sc->bge_dev, 87638cc658fSJohn Baldwin "PHY write timed out (phy %d, reg %d, val %d)\n", 87738cc658fSJohn Baldwin phy, reg, val); 878d5d23857SJung-uk Kim return (0); 87995d67482SBill Paul } 88095d67482SBill Paul 88137ceeb4dSPaul Saab if (autopoll & BGE_MIMODE_AUTOPOLL) { 88237ceeb4dSPaul Saab BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 88337ceeb4dSPaul Saab DELAY(40); 88437ceeb4dSPaul Saab } 88537ceeb4dSPaul Saab 88695d67482SBill Paul return (0); 88795d67482SBill Paul } 88895d67482SBill Paul 88995d67482SBill Paul static void 8903f74909aSGleb Smirnoff bge_miibus_statchg(device_t dev) 89195d67482SBill Paul { 89295d67482SBill Paul struct bge_softc *sc; 89395d67482SBill Paul struct mii_data *mii; 89495d67482SBill Paul sc = device_get_softc(dev); 89595d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 89695d67482SBill Paul 89795d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 8983f74909aSGleb Smirnoff if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 89995d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 9003f74909aSGleb Smirnoff else 90195d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 90295d67482SBill Paul 9033f74909aSGleb Smirnoff if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 90495d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 9053f74909aSGleb Smirnoff else 90695d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 90795d67482SBill Paul } 90895d67482SBill Paul 90995d67482SBill Paul /* 91095d67482SBill Paul * Intialize a standard receive ring descriptor. 91195d67482SBill Paul */ 91295d67482SBill Paul static int 913943787f3SPyun YongHyeon bge_newbuf_std(struct bge_softc *sc, int i) 91495d67482SBill Paul { 915943787f3SPyun YongHyeon struct mbuf *m; 91695d67482SBill Paul struct bge_rx_bd *r; 917a23634a1SPyun YongHyeon bus_dma_segment_t segs[1]; 918943787f3SPyun YongHyeon bus_dmamap_t map; 919a23634a1SPyun YongHyeon int error, nsegs; 92095d67482SBill Paul 921943787f3SPyun YongHyeon m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 922943787f3SPyun YongHyeon if (m == NULL) 92395d67482SBill Paul return (ENOBUFS); 924943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MCLBYTES; 925652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 926943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 927943787f3SPyun YongHyeon 9280ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, 929943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); 930a23634a1SPyun YongHyeon if (error != 0) { 931943787f3SPyun YongHyeon m_freem(m); 932a23634a1SPyun YongHyeon return (error); 933f41ac2beSBill Paul } 934943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 935943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 936943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); 937943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 938943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i]); 939943787f3SPyun YongHyeon } 940943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_std_dmamap[i]; 941943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; 942943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap = map; 943943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = m; 944943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; 945a23634a1SPyun YongHyeon r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 946a23634a1SPyun YongHyeon r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 947e907febfSPyun YongHyeon r->bge_flags = BGE_RXBDFLAG_END; 948a23634a1SPyun YongHyeon r->bge_len = segs[0].ds_len; 949e907febfSPyun YongHyeon r->bge_idx = i; 950f41ac2beSBill Paul 9510ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 952943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); 95395d67482SBill Paul 95495d67482SBill Paul return (0); 95595d67482SBill Paul } 95695d67482SBill Paul 95795d67482SBill Paul /* 95895d67482SBill Paul * Initialize a jumbo receive ring descriptor. This allocates 95995d67482SBill Paul * a jumbo buffer from the pool managed internally by the driver. 96095d67482SBill Paul */ 96195d67482SBill Paul static int 962943787f3SPyun YongHyeon bge_newbuf_jumbo(struct bge_softc *sc, int i) 96395d67482SBill Paul { 9641be6acb7SGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 965943787f3SPyun YongHyeon bus_dmamap_t map; 9661be6acb7SGleb Smirnoff struct bge_extrx_bd *r; 967943787f3SPyun YongHyeon struct mbuf *m; 968943787f3SPyun YongHyeon int error, nsegs; 96995d67482SBill Paul 970943787f3SPyun YongHyeon MGETHDR(m, M_DONTWAIT, MT_DATA); 971943787f3SPyun YongHyeon if (m == NULL) 97295d67482SBill Paul return (ENOBUFS); 97395d67482SBill Paul 974943787f3SPyun YongHyeon m_cljget(m, M_DONTWAIT, MJUM9BYTES); 975943787f3SPyun YongHyeon if (!(m->m_flags & M_EXT)) { 976943787f3SPyun YongHyeon m_freem(m); 97795d67482SBill Paul return (ENOBUFS); 97895d67482SBill Paul } 979943787f3SPyun YongHyeon m->m_len = m->m_pkthdr.len = MJUM9BYTES; 980652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 981943787f3SPyun YongHyeon m_adj(m, ETHER_ALIGN); 9821be6acb7SGleb Smirnoff 9831be6acb7SGleb Smirnoff error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 984943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); 985943787f3SPyun YongHyeon if (error != 0) { 986943787f3SPyun YongHyeon m_freem(m); 9871be6acb7SGleb Smirnoff return (error); 988f7cea149SGleb Smirnoff } 9891be6acb7SGleb Smirnoff 990943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_chain[i] == NULL) { 991943787f3SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 992943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); 993943787f3SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 994943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 995943787f3SPyun YongHyeon } 996943787f3SPyun YongHyeon map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; 997943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i] = 998943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap; 999943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap = map; 1000943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = m; 10011be6acb7SGleb Smirnoff /* 10021be6acb7SGleb Smirnoff * Fill in the extended RX buffer descriptor. 10031be6acb7SGleb Smirnoff */ 1004943787f3SPyun YongHyeon r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; 10054e7ba1abSGleb Smirnoff r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 10064e7ba1abSGleb Smirnoff r->bge_idx = i; 10074e7ba1abSGleb Smirnoff r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 10084e7ba1abSGleb Smirnoff switch (nsegs) { 10094e7ba1abSGleb Smirnoff case 4: 10104e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 10114e7ba1abSGleb Smirnoff r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 10124e7ba1abSGleb Smirnoff r->bge_len3 = segs[3].ds_len; 10134e7ba1abSGleb Smirnoff case 3: 1014e907febfSPyun YongHyeon r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 1015e907febfSPyun YongHyeon r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 1016e907febfSPyun YongHyeon r->bge_len2 = segs[2].ds_len; 10174e7ba1abSGleb Smirnoff case 2: 10184e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 10194e7ba1abSGleb Smirnoff r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 10204e7ba1abSGleb Smirnoff r->bge_len1 = segs[1].ds_len; 10214e7ba1abSGleb Smirnoff case 1: 10224e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 10234e7ba1abSGleb Smirnoff r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 10244e7ba1abSGleb Smirnoff r->bge_len0 = segs[0].ds_len; 10254e7ba1abSGleb Smirnoff break; 10264e7ba1abSGleb Smirnoff default: 10274e7ba1abSGleb Smirnoff panic("%s: %d segments\n", __func__, nsegs); 10284e7ba1abSGleb Smirnoff } 1029f41ac2beSBill Paul 1030a41504a9SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1031943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); 103295d67482SBill Paul 103395d67482SBill Paul return (0); 103495d67482SBill Paul } 103595d67482SBill Paul 103695d67482SBill Paul /* 103795d67482SBill Paul * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 103895d67482SBill Paul * that's 1MB or memory, which is a lot. For now, we fill only the first 103995d67482SBill Paul * 256 ring entries and hope that our CPU is fast enough to keep up with 104095d67482SBill Paul * the NIC. 104195d67482SBill Paul */ 104295d67482SBill Paul static int 10433f74909aSGleb Smirnoff bge_init_rx_ring_std(struct bge_softc *sc) 104495d67482SBill Paul { 10453ee5d7daSPyun YongHyeon int error, i; 104695d67482SBill Paul 1047e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 104803e78bd0SPyun YongHyeon sc->bge_std = 0; 104995d67482SBill Paul for (i = 0; i < BGE_SSLOTS; i++) { 1050943787f3SPyun YongHyeon if ((error = bge_newbuf_std(sc, i)) != 0) 10513ee5d7daSPyun YongHyeon return (error); 105203e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 105395d67482SBill Paul }; 105495d67482SBill Paul 1055f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1056d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 1057f41ac2beSBill Paul 105895d67482SBill Paul sc->bge_std = i - 1; 105938cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 106095d67482SBill Paul 106195d67482SBill Paul return (0); 106295d67482SBill Paul } 106395d67482SBill Paul 106495d67482SBill Paul static void 10653f74909aSGleb Smirnoff bge_free_rx_ring_std(struct bge_softc *sc) 106695d67482SBill Paul { 106795d67482SBill Paul int i; 106895d67482SBill Paul 106995d67482SBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 107095d67482SBill Paul if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 10710ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, 1072e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_dmamap[i], 1073e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 10740ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, 1075f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 1076e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 1077e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_chain[i] = NULL; 107895d67482SBill Paul } 1079f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 108095d67482SBill Paul sizeof(struct bge_rx_bd)); 108195d67482SBill Paul } 108295d67482SBill Paul } 108395d67482SBill Paul 108495d67482SBill Paul static int 10853f74909aSGleb Smirnoff bge_init_rx_ring_jumbo(struct bge_softc *sc) 108695d67482SBill Paul { 108795d67482SBill Paul struct bge_rcb *rcb; 10883ee5d7daSPyun YongHyeon int error, i; 108995d67482SBill Paul 1090e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); 109103e78bd0SPyun YongHyeon sc->bge_jumbo = 0; 109295d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1093943787f3SPyun YongHyeon if ((error = bge_newbuf_jumbo(sc, i)) != 0) 10943ee5d7daSPyun YongHyeon return (error); 109503e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 109695d67482SBill Paul }; 109795d67482SBill Paul 1098f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1099d77e9fa7SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 1100f41ac2beSBill Paul 110195d67482SBill Paul sc->bge_jumbo = i - 1; 110295d67482SBill Paul 1103f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 11041be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 11051be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD); 110667111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 110795d67482SBill Paul 110838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 110995d67482SBill Paul 111095d67482SBill Paul return (0); 111195d67482SBill Paul } 111295d67482SBill Paul 111395d67482SBill Paul static void 11143f74909aSGleb Smirnoff bge_free_rx_ring_jumbo(struct bge_softc *sc) 111595d67482SBill Paul { 111695d67482SBill Paul int i; 111795d67482SBill Paul 111895d67482SBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 111995d67482SBill Paul if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 1120e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 1121e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_dmamap[i], 1122e65bed95SPyun YongHyeon BUS_DMASYNC_POSTREAD); 1123f41ac2beSBill Paul bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 1124f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1125e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 1126e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 112795d67482SBill Paul } 1128f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 11291be6acb7SGleb Smirnoff sizeof(struct bge_extrx_bd)); 113095d67482SBill Paul } 113195d67482SBill Paul } 113295d67482SBill Paul 113395d67482SBill Paul static void 11343f74909aSGleb Smirnoff bge_free_tx_ring(struct bge_softc *sc) 113595d67482SBill Paul { 113695d67482SBill Paul int i; 113795d67482SBill Paul 1138f41ac2beSBill Paul if (sc->bge_ldata.bge_tx_ring == NULL) 113995d67482SBill Paul return; 114095d67482SBill Paul 114195d67482SBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 114295d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 11430ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 1144e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[i], 1145e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 11460ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 1147f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 1148e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[i]); 1149e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[i] = NULL; 115095d67482SBill Paul } 1151f41ac2beSBill Paul bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 115295d67482SBill Paul sizeof(struct bge_tx_bd)); 115395d67482SBill Paul } 115495d67482SBill Paul } 115595d67482SBill Paul 115695d67482SBill Paul static int 11573f74909aSGleb Smirnoff bge_init_tx_ring(struct bge_softc *sc) 115895d67482SBill Paul { 115995d67482SBill Paul sc->bge_txcnt = 0; 116095d67482SBill Paul sc->bge_tx_saved_considx = 0; 11613927098fSPaul Saab 1162e6bf277eSPyun YongHyeon bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 1163e6bf277eSPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 11645c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 1165e6bf277eSPyun YongHyeon 116614bbd30fSGleb Smirnoff /* Initialize transmit producer index for host-memory send ring. */ 116714bbd30fSGleb Smirnoff sc->bge_tx_prodidx = 0; 116838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 116914bbd30fSGleb Smirnoff 11703927098fSPaul Saab /* 5700 b2 errata */ 1171e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 117238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 11733927098fSPaul Saab 117414bbd30fSGleb Smirnoff /* NIC-memory send ring not used; initialize to zero. */ 117538cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 11763927098fSPaul Saab /* 5700 b2 errata */ 1177e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 117838cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 117995d67482SBill Paul 118095d67482SBill Paul return (0); 118195d67482SBill Paul } 118295d67482SBill Paul 118395d67482SBill Paul static void 11843e9b1bcaSJung-uk Kim bge_setpromisc(struct bge_softc *sc) 11853e9b1bcaSJung-uk Kim { 11863e9b1bcaSJung-uk Kim struct ifnet *ifp; 11873e9b1bcaSJung-uk Kim 11883e9b1bcaSJung-uk Kim BGE_LOCK_ASSERT(sc); 11893e9b1bcaSJung-uk Kim 11903e9b1bcaSJung-uk Kim ifp = sc->bge_ifp; 11913e9b1bcaSJung-uk Kim 119245ee6ab3SJung-uk Kim /* Enable or disable promiscuous mode as needed. */ 11933e9b1bcaSJung-uk Kim if (ifp->if_flags & IFF_PROMISC) 119445ee6ab3SJung-uk Kim BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 11953e9b1bcaSJung-uk Kim else 119645ee6ab3SJung-uk Kim BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 11973e9b1bcaSJung-uk Kim } 11983e9b1bcaSJung-uk Kim 11993e9b1bcaSJung-uk Kim static void 12003f74909aSGleb Smirnoff bge_setmulti(struct bge_softc *sc) 120195d67482SBill Paul { 120295d67482SBill Paul struct ifnet *ifp; 120395d67482SBill Paul struct ifmultiaddr *ifma; 12043f74909aSGleb Smirnoff uint32_t hashes[4] = { 0, 0, 0, 0 }; 120595d67482SBill Paul int h, i; 120695d67482SBill Paul 12070f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 12080f9bd73bSSam Leffler 1209fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 121095d67482SBill Paul 121195d67482SBill Paul if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 121295d67482SBill Paul for (i = 0; i < 4; i++) 12130c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 121495d67482SBill Paul return; 121595d67482SBill Paul } 121695d67482SBill Paul 121795d67482SBill Paul /* First, zot all the existing filters. */ 121895d67482SBill Paul for (i = 0; i < 4; i++) 121995d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 122095d67482SBill Paul 122195d67482SBill Paul /* Now program new ones. */ 1222eb956cd0SRobert Watson if_maddr_rlock(ifp); 122395d67482SBill Paul TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 122495d67482SBill Paul if (ifma->ifma_addr->sa_family != AF_LINK) 122595d67482SBill Paul continue; 12260e939c0cSChristian Weisgerber h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 12270c8aa4eaSJung-uk Kim ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 12280c8aa4eaSJung-uk Kim hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 122995d67482SBill Paul } 1230eb956cd0SRobert Watson if_maddr_runlock(ifp); 123195d67482SBill Paul 123295d67482SBill Paul for (i = 0; i < 4; i++) 123395d67482SBill Paul CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 123495d67482SBill Paul } 123595d67482SBill Paul 12368cb1383cSDoug Ambrisko static void 1237cb2eacc7SYaroslav Tykhiy bge_setvlan(struct bge_softc *sc) 1238cb2eacc7SYaroslav Tykhiy { 1239cb2eacc7SYaroslav Tykhiy struct ifnet *ifp; 1240cb2eacc7SYaroslav Tykhiy 1241cb2eacc7SYaroslav Tykhiy BGE_LOCK_ASSERT(sc); 1242cb2eacc7SYaroslav Tykhiy 1243cb2eacc7SYaroslav Tykhiy ifp = sc->bge_ifp; 1244cb2eacc7SYaroslav Tykhiy 1245cb2eacc7SYaroslav Tykhiy /* Enable or disable VLAN tag stripping as needed. */ 1246cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1247cb2eacc7SYaroslav Tykhiy BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1248cb2eacc7SYaroslav Tykhiy else 1249cb2eacc7SYaroslav Tykhiy BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1250cb2eacc7SYaroslav Tykhiy } 1251cb2eacc7SYaroslav Tykhiy 1252cb2eacc7SYaroslav Tykhiy static void 12538cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, type) 12548cb1383cSDoug Ambrisko struct bge_softc *sc; 12558cb1383cSDoug Ambrisko int type; 12568cb1383cSDoug Ambrisko { 12578cb1383cSDoug Ambrisko /* 12588cb1383cSDoug Ambrisko * Some chips don't like this so only do this if ASF is enabled 12598cb1383cSDoug Ambrisko */ 12608cb1383cSDoug Ambrisko if (sc->bge_asf_mode) 12618cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 12628cb1383cSDoug Ambrisko 12638cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 12648cb1383cSDoug Ambrisko switch (type) { 12658cb1383cSDoug Ambrisko case BGE_RESET_START: 12668cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 12678cb1383cSDoug Ambrisko break; 12688cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12698cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 12708cb1383cSDoug Ambrisko break; 12718cb1383cSDoug Ambrisko } 12728cb1383cSDoug Ambrisko } 12738cb1383cSDoug Ambrisko } 12748cb1383cSDoug Ambrisko 12758cb1383cSDoug Ambrisko static void 12768cb1383cSDoug Ambrisko bge_sig_post_reset(sc, type) 12778cb1383cSDoug Ambrisko struct bge_softc *sc; 12788cb1383cSDoug Ambrisko int type; 12798cb1383cSDoug Ambrisko { 12808cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 12818cb1383cSDoug Ambrisko switch (type) { 12828cb1383cSDoug Ambrisko case BGE_RESET_START: 12838cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 12848cb1383cSDoug Ambrisko /* START DONE */ 12858cb1383cSDoug Ambrisko break; 12868cb1383cSDoug Ambrisko case BGE_RESET_STOP: 12878cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 12888cb1383cSDoug Ambrisko break; 12898cb1383cSDoug Ambrisko } 12908cb1383cSDoug Ambrisko } 12918cb1383cSDoug Ambrisko } 12928cb1383cSDoug Ambrisko 12938cb1383cSDoug Ambrisko static void 12948cb1383cSDoug Ambrisko bge_sig_legacy(sc, type) 12958cb1383cSDoug Ambrisko struct bge_softc *sc; 12968cb1383cSDoug Ambrisko int type; 12978cb1383cSDoug Ambrisko { 12988cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 12998cb1383cSDoug Ambrisko switch (type) { 13008cb1383cSDoug Ambrisko case BGE_RESET_START: 13018cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 13028cb1383cSDoug Ambrisko break; 13038cb1383cSDoug Ambrisko case BGE_RESET_STOP: 13048cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 13058cb1383cSDoug Ambrisko break; 13068cb1383cSDoug Ambrisko } 13078cb1383cSDoug Ambrisko } 13088cb1383cSDoug Ambrisko } 13098cb1383cSDoug Ambrisko 13108cb1383cSDoug Ambrisko void bge_stop_fw(struct bge_softc *); 13118cb1383cSDoug Ambrisko void 13128cb1383cSDoug Ambrisko bge_stop_fw(sc) 13138cb1383cSDoug Ambrisko struct bge_softc *sc; 13148cb1383cSDoug Ambrisko { 13158cb1383cSDoug Ambrisko int i; 13168cb1383cSDoug Ambrisko 13178cb1383cSDoug Ambrisko if (sc->bge_asf_mode) { 13188cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 13198cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 132039153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 13218cb1383cSDoug Ambrisko 13228cb1383cSDoug Ambrisko for (i = 0; i < 100; i++ ) { 13238cb1383cSDoug Ambrisko if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 13248cb1383cSDoug Ambrisko break; 13258cb1383cSDoug Ambrisko DELAY(10); 13268cb1383cSDoug Ambrisko } 13278cb1383cSDoug Ambrisko } 13288cb1383cSDoug Ambrisko } 13298cb1383cSDoug Ambrisko 133095d67482SBill Paul /* 1331c9ffd9f0SMarius Strobl * Do endian, PCI and DMA initialization. 133295d67482SBill Paul */ 133395d67482SBill Paul static int 13343f74909aSGleb Smirnoff bge_chipinit(struct bge_softc *sc) 133595d67482SBill Paul { 13363f74909aSGleb Smirnoff uint32_t dma_rw_ctl; 133795d67482SBill Paul int i; 133895d67482SBill Paul 13398cb1383cSDoug Ambrisko /* Set endianness before we access any non-PCI registers. */ 1340e907febfSPyun YongHyeon pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 134195d67482SBill Paul 134295d67482SBill Paul /* Clear the MAC control register */ 134395d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 134495d67482SBill Paul 134595d67482SBill Paul /* 134695d67482SBill Paul * Clear the MAC statistics block in the NIC's 134795d67482SBill Paul * internal memory. 134895d67482SBill Paul */ 134995d67482SBill Paul for (i = BGE_STATS_BLOCK; 13503f74909aSGleb Smirnoff i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 135195d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 135295d67482SBill Paul 135395d67482SBill Paul for (i = BGE_STATUS_BLOCK; 13543f74909aSGleb Smirnoff i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 135595d67482SBill Paul BGE_MEMWIN_WRITE(sc, i, 0); 135695d67482SBill Paul 1357186f842bSJung-uk Kim /* 1358186f842bSJung-uk Kim * Set up the PCI DMA control register. 1359186f842bSJung-uk Kim */ 1360186f842bSJung-uk Kim dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1361186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1362652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 1363186f842bSJung-uk Kim /* Read watermark not used, 128 bytes for write. */ 1364186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1365652ae483SGleb Smirnoff } else if (sc->bge_flags & BGE_FLAG_PCIX) { 13664c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 1367186f842bSJung-uk Kim /* 256 bytes for read and write. */ 1368186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1369186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1370186f842bSJung-uk Kim dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? 1371186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : 1372186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1373186f842bSJung-uk Kim } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1374186f842bSJung-uk Kim /* 1536 bytes for read, 384 bytes for write. */ 1375186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1376186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1377186f842bSJung-uk Kim } else { 1378186f842bSJung-uk Kim /* 384 bytes for read and write. */ 1379186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1380186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 13810c8aa4eaSJung-uk Kim 0x0F; 1382186f842bSJung-uk Kim } 1383e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1384e0ced696SPaul Saab sc->bge_asicrev == BGE_ASICREV_BCM5704) { 13853f74909aSGleb Smirnoff uint32_t tmp; 13865cba12d3SPaul Saab 1387186f842bSJung-uk Kim /* Set ONE_DMA_AT_ONCE for hardware workaround. */ 13880c8aa4eaSJung-uk Kim tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; 1389186f842bSJung-uk Kim if (tmp == 6 || tmp == 7) 1390186f842bSJung-uk Kim dma_rw_ctl |= 1391186f842bSJung-uk Kim BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 13925cba12d3SPaul Saab 1393186f842bSJung-uk Kim /* Set PCI-X DMA write workaround. */ 1394186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1395186f842bSJung-uk Kim } 1396186f842bSJung-uk Kim } else { 1397186f842bSJung-uk Kim /* Conventional PCI bus: 256 bytes for read and write. */ 1398186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1399186f842bSJung-uk Kim BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1400186f842bSJung-uk Kim 1401186f842bSJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1402186f842bSJung-uk Kim sc->bge_asicrev != BGE_ASICREV_BCM5750) 1403186f842bSJung-uk Kim dma_rw_ctl |= 0x0F; 1404186f842bSJung-uk Kim } 1405186f842bSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1406186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5701) 1407186f842bSJung-uk Kim dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1408186f842bSJung-uk Kim BGE_PCIDMARWCTL_ASRT_ALL_BE; 1409e0ced696SPaul Saab if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1410186f842bSJung-uk Kim sc->bge_asicrev == BGE_ASICREV_BCM5704) 14115cba12d3SPaul Saab dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 14125cba12d3SPaul Saab pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 141395d67482SBill Paul 141495d67482SBill Paul /* 141595d67482SBill Paul * Set up general mode register. 141695d67482SBill Paul */ 1417e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 141895d67482SBill Paul BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1419ee7ef91cSOleg Bulyzhin BGE_MODECTL_TX_NO_PHDR_CSUM); 142095d67482SBill Paul 142195d67482SBill Paul /* 142290447aadSMarius Strobl * BCM5701 B5 have a bug causing data corruption when using 142390447aadSMarius Strobl * 64-bit DMA reads, which can be terminated early and then 142490447aadSMarius Strobl * completed later as 32-bit accesses, in combination with 142590447aadSMarius Strobl * certain bridges. 142690447aadSMarius Strobl */ 142790447aadSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 142890447aadSMarius Strobl sc->bge_chipid == BGE_CHIPID_BCM5701_B5) 142990447aadSMarius Strobl BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); 143090447aadSMarius Strobl 143190447aadSMarius Strobl /* 14328cb1383cSDoug Ambrisko * Tell the firmware the driver is running 14338cb1383cSDoug Ambrisko */ 14348cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 14358cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 14368cb1383cSDoug Ambrisko 14378cb1383cSDoug Ambrisko /* 1438ea13bdd5SJohn Polstra * Disable memory write invalidate. Apparently it is not supported 1439c9ffd9f0SMarius Strobl * properly by these devices. Also ensure that INTx isn't disabled, 1440c9ffd9f0SMarius Strobl * as these chips need it even when using MSI. 144195d67482SBill Paul */ 1442c9ffd9f0SMarius Strobl PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, 1443c9ffd9f0SMarius Strobl PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4); 144495d67482SBill Paul 144595d67482SBill Paul /* Set the timer prescaler (always 66Mhz) */ 14460c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 144795d67482SBill Paul 144838cc658fSJohn Baldwin /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ 144938cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 145038cc658fSJohn Baldwin DELAY(40); /* XXX */ 145138cc658fSJohn Baldwin 145238cc658fSJohn Baldwin /* Put PHY into ready state */ 145338cc658fSJohn Baldwin BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); 145438cc658fSJohn Baldwin CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ 145538cc658fSJohn Baldwin DELAY(40); 145638cc658fSJohn Baldwin } 145738cc658fSJohn Baldwin 145895d67482SBill Paul return (0); 145995d67482SBill Paul } 146095d67482SBill Paul 146195d67482SBill Paul static int 14623f74909aSGleb Smirnoff bge_blockinit(struct bge_softc *sc) 146395d67482SBill Paul { 146495d67482SBill Paul struct bge_rcb *rcb; 1465e907febfSPyun YongHyeon bus_size_t vrcb; 1466e907febfSPyun YongHyeon bge_hostaddr taddr; 14676f8718a3SScott Long uint32_t val; 146895d67482SBill Paul int i; 146995d67482SBill Paul 147095d67482SBill Paul /* 147195d67482SBill Paul * Initialize the memory window pointer register so that 147295d67482SBill Paul * we can access the first 32K of internal NIC RAM. This will 147395d67482SBill Paul * allow us to set up the TX send ring RCBs and the RX return 147495d67482SBill Paul * ring RCBs, plus other things which live in NIC memory. 147595d67482SBill Paul */ 147695d67482SBill Paul CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 147795d67482SBill Paul 1478822f63fcSBill Paul /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1479822f63fcSBill Paul 14807ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 148195d67482SBill Paul /* Configure mbuf memory pool */ 14820dae9719SJung-uk Kim CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1483822f63fcSBill Paul if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1484822f63fcSBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1485822f63fcSBill Paul else 148695d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 148795d67482SBill Paul 148895d67482SBill Paul /* Configure DMA resource pool */ 14890434d1b8SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 14900434d1b8SBill Paul BGE_DMA_DESCRIPTORS); 149195d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 14920434d1b8SBill Paul } 149395d67482SBill Paul 149495d67482SBill Paul /* Configure mbuf pool watermarks */ 149538cc658fSJohn Baldwin if (!BGE_IS_5705_PLUS(sc)) { 1496fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1497fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1498fa228020SPaul Saab CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 149938cc658fSJohn Baldwin } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 150038cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 150138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); 150238cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); 150338cc658fSJohn Baldwin } else { 150438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 150538cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 150638cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 150738cc658fSJohn Baldwin } 150895d67482SBill Paul 150995d67482SBill Paul /* Configure DMA resource watermarks */ 151095d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 151195d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 151295d67482SBill Paul 151395d67482SBill Paul /* Enable buffer manager */ 15147ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 151595d67482SBill Paul CSR_WRITE_4(sc, BGE_BMAN_MODE, 151695d67482SBill Paul BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 151795d67482SBill Paul 151895d67482SBill Paul /* Poll for buffer manager start indication */ 151995d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1520d5d23857SJung-uk Kim DELAY(10); 15210c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 152295d67482SBill Paul break; 152395d67482SBill Paul } 152495d67482SBill Paul 152595d67482SBill Paul if (i == BGE_TIMEOUT) { 1526fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1527fe806fdaSPyun YongHyeon "buffer manager failed to start\n"); 152895d67482SBill Paul return (ENXIO); 152995d67482SBill Paul } 15300434d1b8SBill Paul } 153195d67482SBill Paul 153295d67482SBill Paul /* Enable flow-through queues */ 15330c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 153495d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 153595d67482SBill Paul 153695d67482SBill Paul /* Wait until queue initialization is complete */ 153795d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1538d5d23857SJung-uk Kim DELAY(10); 153995d67482SBill Paul if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 154095d67482SBill Paul break; 154195d67482SBill Paul } 154295d67482SBill Paul 154395d67482SBill Paul if (i == BGE_TIMEOUT) { 1544fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "flow-through queue init failed\n"); 154595d67482SBill Paul return (ENXIO); 154695d67482SBill Paul } 154795d67482SBill Paul 154895d67482SBill Paul /* Initialize the standard RX ring control block */ 1549f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1550f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1551f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1552f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1553f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1554f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1555f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 15567ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 15570434d1b8SBill Paul rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 15580434d1b8SBill Paul else 15590434d1b8SBill Paul rcb->bge_maxlen_flags = 15600434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 156195d67482SBill Paul rcb->bge_nicaddr = BGE_STD_RX_RINGS; 15620c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 15630c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1564f41ac2beSBill Paul 156567111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 156667111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 156795d67482SBill Paul 156895d67482SBill Paul /* 156995d67482SBill Paul * Initialize the jumbo RX ring control block 157095d67482SBill Paul * We set the 'ring disabled' bit in the flags 157195d67482SBill Paul * field until we're actually ready to start 157295d67482SBill Paul * using this ring (i.e. once we set the MTU 157395d67482SBill Paul * high enough to require it). 157495d67482SBill Paul */ 15754c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 1576f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1577f41ac2beSBill Paul 1578f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_lo = 1579f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1580f41ac2beSBill Paul rcb->bge_hostaddr.bge_addr_hi = 1581f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1582f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1583f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map, 1584f41ac2beSBill Paul BUS_DMASYNC_PREREAD); 15851be6acb7SGleb Smirnoff rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 15861be6acb7SGleb Smirnoff BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 158795d67482SBill Paul rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 158867111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 158967111612SJohn Polstra rcb->bge_hostaddr.bge_addr_hi); 159067111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 159167111612SJohn Polstra rcb->bge_hostaddr.bge_addr_lo); 1592f41ac2beSBill Paul 15930434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 15940434d1b8SBill Paul rcb->bge_maxlen_flags); 159567111612SJohn Polstra CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 159695d67482SBill Paul 159795d67482SBill Paul /* Set up dummy disabled mini ring RCB */ 1598f41ac2beSBill Paul rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 159967111612SJohn Polstra rcb->bge_maxlen_flags = 160067111612SJohn Polstra BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 16010434d1b8SBill Paul CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 16020434d1b8SBill Paul rcb->bge_maxlen_flags); 16030434d1b8SBill Paul } 160495d67482SBill Paul 160595d67482SBill Paul /* 160695d67482SBill Paul * Set the BD ring replentish thresholds. The recommended 160795d67482SBill Paul * values are 1/8th the number of descriptors allocated to 160895d67482SBill Paul * each ring. 16099ba784dbSScott Long * XXX The 5754 requires a lower threshold, so it might be a 16109ba784dbSScott Long * requirement of all 575x family chips. The Linux driver sets 16119ba784dbSScott Long * the lower threshold for all 5705 family chips as well, but there 16129ba784dbSScott Long * are reports that it might not need to be so strict. 161338cc658fSJohn Baldwin * 161438cc658fSJohn Baldwin * XXX Linux does some extra fiddling here for the 5906 parts as 161538cc658fSJohn Baldwin * well. 161695d67482SBill Paul */ 16175345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 16186f8718a3SScott Long val = 8; 16196f8718a3SScott Long else 16206f8718a3SScott Long val = BGE_STD_RX_RING_CNT / 8; 16216f8718a3SScott Long CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 162295d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 162395d67482SBill Paul 162495d67482SBill Paul /* 162595d67482SBill Paul * Disable all unused send rings by setting the 'ring disabled' 162695d67482SBill Paul * bit in the flags field of all the TX send ring control blocks. 162795d67482SBill Paul * These are located in NIC memory. 162895d67482SBill Paul */ 1629e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 163095d67482SBill Paul for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1631e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1632e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1633e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1634e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 163595d67482SBill Paul } 163695d67482SBill Paul 163795d67482SBill Paul /* Configure TX RCB 0 (we use only the first ring) */ 1638e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1639e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1640e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1641e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1642e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1643e907febfSPyun YongHyeon BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 16447ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 1645e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1646e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 164795d67482SBill Paul 164895d67482SBill Paul /* Disable all unused RX return rings */ 1649e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 165095d67482SBill Paul for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1651e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1652e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1653e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 16540434d1b8SBill Paul BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1655e907febfSPyun YongHyeon BGE_RCB_FLAG_RING_DISABLED)); 1656e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 165738cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO + 16583f74909aSGleb Smirnoff (i * (sizeof(uint64_t))), 0); 1659e907febfSPyun YongHyeon vrcb += sizeof(struct bge_rcb); 166095d67482SBill Paul } 166195d67482SBill Paul 166295d67482SBill Paul /* Initialize RX ring indexes */ 166338cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); 166438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 166538cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 166695d67482SBill Paul 166795d67482SBill Paul /* 166895d67482SBill Paul * Set up RX return ring 0 166995d67482SBill Paul * Note that the NIC address for RX return rings is 0x00000000. 167095d67482SBill Paul * The return rings live entirely within the host, so the 167195d67482SBill Paul * nicaddr field in the RCB isn't used. 167295d67482SBill Paul */ 1673e907febfSPyun YongHyeon vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1674e907febfSPyun YongHyeon BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1675e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1676e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1677e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1678e907febfSPyun YongHyeon RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1679e907febfSPyun YongHyeon BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 168095d67482SBill Paul 168195d67482SBill Paul /* Set random backoff seed for TX */ 168295d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 16834a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 16844a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 16854a0d6638SRuslan Ermilov IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 168695d67482SBill Paul BGE_TX_BACKOFF_SEED_MASK); 168795d67482SBill Paul 168895d67482SBill Paul /* Set inter-packet gap */ 168995d67482SBill Paul CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 169095d67482SBill Paul 169195d67482SBill Paul /* 169295d67482SBill Paul * Specify which ring to use for packets that don't match 169395d67482SBill Paul * any RX rules. 169495d67482SBill Paul */ 169595d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 169695d67482SBill Paul 169795d67482SBill Paul /* 169895d67482SBill Paul * Configure number of RX lists. One interrupt distribution 169995d67482SBill Paul * list, sixteen active lists, one bad frames class. 170095d67482SBill Paul */ 170195d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 170295d67482SBill Paul 170395d67482SBill Paul /* Inialize RX list placement stats mask. */ 17040c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 170595d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 170695d67482SBill Paul 170795d67482SBill Paul /* Disable host coalescing until we get it set up */ 170895d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 170995d67482SBill Paul 171095d67482SBill Paul /* Poll to make sure it's shut down. */ 171195d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 1712d5d23857SJung-uk Kim DELAY(10); 171395d67482SBill Paul if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 171495d67482SBill Paul break; 171595d67482SBill Paul } 171695d67482SBill Paul 171795d67482SBill Paul if (i == BGE_TIMEOUT) { 1718fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 1719fe806fdaSPyun YongHyeon "host coalescing engine failed to idle\n"); 172095d67482SBill Paul return (ENXIO); 172195d67482SBill Paul } 172295d67482SBill Paul 172395d67482SBill Paul /* Set up host coalescing defaults */ 172495d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 172595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 172695d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 172795d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 17287ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 172995d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 173095d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 17310434d1b8SBill Paul } 1732b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1733b64728e5SBruce Evans CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 173495d67482SBill Paul 173595d67482SBill Paul /* Set up address of statistics block */ 17367ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 1737f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1738f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 173995d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1740f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 17410434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 174295d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 17430434d1b8SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 17440434d1b8SBill Paul } 17450434d1b8SBill Paul 17460434d1b8SBill Paul /* Set up address of status block */ 1747f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1748f41ac2beSBill Paul BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 174995d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1750f41ac2beSBill Paul BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1751f41ac2beSBill Paul sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1752f41ac2beSBill Paul sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 175395d67482SBill Paul 175495d67482SBill Paul /* Turn on host coalescing state machine */ 175595d67482SBill Paul CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 175695d67482SBill Paul 175795d67482SBill Paul /* Turn on RX BD completion state machine and enable attentions */ 175895d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDC_MODE, 175995d67482SBill Paul BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 176095d67482SBill Paul 176195d67482SBill Paul /* Turn on RX list placement state machine */ 176295d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 176395d67482SBill Paul 176495d67482SBill Paul /* Turn on RX list selector state machine. */ 17657ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 176695d67482SBill Paul CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 176795d67482SBill Paul 176895d67482SBill Paul /* Turn on DMA, clear stats */ 176995d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB | 177095d67482SBill Paul BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | 177195d67482SBill Paul BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | 177295d67482SBill Paul BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB | 1773652ae483SGleb Smirnoff ((sc->bge_flags & BGE_FLAG_TBI) ? 1774652ae483SGleb Smirnoff BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 177595d67482SBill Paul 177695d67482SBill Paul /* Set misc. local control, enable interrupts on attentions */ 177795d67482SBill Paul CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 177895d67482SBill Paul 177995d67482SBill Paul #ifdef notdef 178095d67482SBill Paul /* Assert GPIO pins for PHY reset */ 178195d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 178295d67482SBill Paul BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 178395d67482SBill Paul BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 178495d67482SBill Paul BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 178595d67482SBill Paul #endif 178695d67482SBill Paul 178795d67482SBill Paul /* Turn on DMA completion state machine */ 17887ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 178995d67482SBill Paul CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 179095d67482SBill Paul 17916f8718a3SScott Long val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 17926f8718a3SScott Long 17936f8718a3SScott Long /* Enable host coalescing bug fix. */ 1794a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 17953889907fSStanislav Sedov val |= BGE_WDMAMODE_STATUS_TAG_FIX; 17966f8718a3SScott Long 179795d67482SBill Paul /* Turn on write DMA state machine */ 17986f8718a3SScott Long CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 17994f09c4c7SMarius Strobl DELAY(40); 180095d67482SBill Paul 180195d67482SBill Paul /* Turn on read DMA state machine */ 18024f09c4c7SMarius Strobl val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; 1803a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || 1804a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5785 || 1805a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM57780) 1806a5779553SStanislav Sedov val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | 1807a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | 1808a5779553SStanislav Sedov BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; 18094f09c4c7SMarius Strobl if (sc->bge_flags & BGE_FLAG_PCIE) 18104f09c4c7SMarius Strobl val |= BGE_RDMAMODE_FIFO_LONG_BURST; 18114f09c4c7SMarius Strobl CSR_WRITE_4(sc, BGE_RDMA_MODE, val); 18124f09c4c7SMarius Strobl DELAY(40); 181395d67482SBill Paul 181495d67482SBill Paul /* Turn on RX data completion state machine */ 181595d67482SBill Paul CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 181695d67482SBill Paul 181795d67482SBill Paul /* Turn on RX BD initiator state machine */ 181895d67482SBill Paul CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 181995d67482SBill Paul 182095d67482SBill Paul /* Turn on RX data and RX BD initiator state machine */ 182195d67482SBill Paul CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 182295d67482SBill Paul 182395d67482SBill Paul /* Turn on Mbuf cluster free state machine */ 18247ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 182595d67482SBill Paul CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 182695d67482SBill Paul 182795d67482SBill Paul /* Turn on send BD completion state machine */ 182895d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 182995d67482SBill Paul 183095d67482SBill Paul /* Turn on send data completion state machine */ 1831a5779553SStanislav Sedov val = BGE_SDCMODE_ENABLE; 1832a5779553SStanislav Sedov if (sc->bge_asicrev == BGE_ASICREV_BCM5761) 1833a5779553SStanislav Sedov val |= BGE_SDCMODE_CDELAY; 1834a5779553SStanislav Sedov CSR_WRITE_4(sc, BGE_SDC_MODE, val); 183595d67482SBill Paul 183695d67482SBill Paul /* Turn on send data initiator state machine */ 183795d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 183895d67482SBill Paul 183995d67482SBill Paul /* Turn on send BD initiator state machine */ 184095d67482SBill Paul CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 184195d67482SBill Paul 184295d67482SBill Paul /* Turn on send BD selector state machine */ 184395d67482SBill Paul CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 184495d67482SBill Paul 18450c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 184695d67482SBill Paul CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 184795d67482SBill Paul BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 184895d67482SBill Paul 184995d67482SBill Paul /* ack/clear link change events */ 185095d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 18510434d1b8SBill Paul BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 18520434d1b8SBill Paul BGE_MACSTAT_LINK_CHANGED); 1853f41ac2beSBill Paul CSR_WRITE_4(sc, BGE_MI_STS, 0); 185495d67482SBill Paul 185595d67482SBill Paul /* Enable PHY auto polling (for MII/GMII only) */ 1856652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 185795d67482SBill Paul CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1858a1d52896SBill Paul } else { 18596098821cSJung-uk Kim BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 18601f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 18614c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1862a1d52896SBill Paul CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1863a1d52896SBill Paul BGE_EVTENB_MI_INTERRUPT); 1864a1d52896SBill Paul } 186595d67482SBill Paul 18661f313773SOleg Bulyzhin /* 18671f313773SOleg Bulyzhin * Clear any pending link state attention. 18681f313773SOleg Bulyzhin * Otherwise some link state change events may be lost until attention 18691f313773SOleg Bulyzhin * is cleared by bge_intr() -> bge_link_upd() sequence. 18701f313773SOleg Bulyzhin * It's not necessary on newer BCM chips - perhaps enabling link 18711f313773SOleg Bulyzhin * state change attentions implies clearing pending attention. 18721f313773SOleg Bulyzhin */ 18731f313773SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 18741f313773SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 18751f313773SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 18761f313773SOleg Bulyzhin 187795d67482SBill Paul /* Enable link state change attentions. */ 187895d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 187995d67482SBill Paul 188095d67482SBill Paul return (0); 188195d67482SBill Paul } 188295d67482SBill Paul 18834c0da0ffSGleb Smirnoff const struct bge_revision * 18844c0da0ffSGleb Smirnoff bge_lookup_rev(uint32_t chipid) 18854c0da0ffSGleb Smirnoff { 18864c0da0ffSGleb Smirnoff const struct bge_revision *br; 18874c0da0ffSGleb Smirnoff 18884c0da0ffSGleb Smirnoff for (br = bge_revisions; br->br_name != NULL; br++) { 18894c0da0ffSGleb Smirnoff if (br->br_chipid == chipid) 18904c0da0ffSGleb Smirnoff return (br); 18914c0da0ffSGleb Smirnoff } 18924c0da0ffSGleb Smirnoff 18934c0da0ffSGleb Smirnoff for (br = bge_majorrevs; br->br_name != NULL; br++) { 18944c0da0ffSGleb Smirnoff if (br->br_chipid == BGE_ASICREV(chipid)) 18954c0da0ffSGleb Smirnoff return (br); 18964c0da0ffSGleb Smirnoff } 18974c0da0ffSGleb Smirnoff 18984c0da0ffSGleb Smirnoff return (NULL); 18994c0da0ffSGleb Smirnoff } 19004c0da0ffSGleb Smirnoff 19014c0da0ffSGleb Smirnoff const struct bge_vendor * 19024c0da0ffSGleb Smirnoff bge_lookup_vendor(uint16_t vid) 19034c0da0ffSGleb Smirnoff { 19044c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19054c0da0ffSGleb Smirnoff 19064c0da0ffSGleb Smirnoff for (v = bge_vendors; v->v_name != NULL; v++) 19074c0da0ffSGleb Smirnoff if (v->v_id == vid) 19084c0da0ffSGleb Smirnoff return (v); 19094c0da0ffSGleb Smirnoff 19104c0da0ffSGleb Smirnoff panic("%s: unknown vendor %d", __func__, vid); 19114c0da0ffSGleb Smirnoff return (NULL); 19124c0da0ffSGleb Smirnoff } 19134c0da0ffSGleb Smirnoff 191495d67482SBill Paul /* 191595d67482SBill Paul * Probe for a Broadcom chip. Check the PCI vendor and device IDs 19164c0da0ffSGleb Smirnoff * against our list and return its name if we find a match. 19174c0da0ffSGleb Smirnoff * 19184c0da0ffSGleb Smirnoff * Note that since the Broadcom controller contains VPD support, we 19197c929cf9SJung-uk Kim * try to get the device name string from the controller itself instead 19207c929cf9SJung-uk Kim * of the compiled-in string. It guarantees we'll always announce the 19217c929cf9SJung-uk Kim * right product name. We fall back to the compiled-in string when 19227c929cf9SJung-uk Kim * VPD is unavailable or corrupt. 192395d67482SBill Paul */ 192495d67482SBill Paul static int 19253f74909aSGleb Smirnoff bge_probe(device_t dev) 192695d67482SBill Paul { 1927852c67f9SMarius Strobl const struct bge_type *t = bge_devs; 19284c0da0ffSGleb Smirnoff struct bge_softc *sc = device_get_softc(dev); 19297c929cf9SJung-uk Kim uint16_t vid, did; 193095d67482SBill Paul 193195d67482SBill Paul sc->bge_dev = dev; 19327c929cf9SJung-uk Kim vid = pci_get_vendor(dev); 19337c929cf9SJung-uk Kim did = pci_get_device(dev); 19344c0da0ffSGleb Smirnoff while(t->bge_vid != 0) { 19357c929cf9SJung-uk Kim if ((vid == t->bge_vid) && (did == t->bge_did)) { 19367c929cf9SJung-uk Kim char model[64], buf[96]; 19374c0da0ffSGleb Smirnoff const struct bge_revision *br; 19384c0da0ffSGleb Smirnoff const struct bge_vendor *v; 19394c0da0ffSGleb Smirnoff uint32_t id; 19404c0da0ffSGleb Smirnoff 1941a5779553SStanislav Sedov id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 1942a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 1943a5779553SStanislav Sedov if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) 1944a5779553SStanislav Sedov id = pci_read_config(dev, 1945a5779553SStanislav Sedov BGE_PCI_PRODID_ASICREV, 4); 19464c0da0ffSGleb Smirnoff br = bge_lookup_rev(id); 19477c929cf9SJung-uk Kim v = bge_lookup_vendor(vid); 19484e35d186SJung-uk Kim { 19494e35d186SJung-uk Kim #if __FreeBSD_version > 700024 19504e35d186SJung-uk Kim const char *pname; 19514e35d186SJung-uk Kim 1952852c67f9SMarius Strobl if (bge_has_eaddr(sc) && 1953852c67f9SMarius Strobl pci_get_vpd_ident(dev, &pname) == 0) 19544e35d186SJung-uk Kim snprintf(model, 64, "%s", pname); 19554e35d186SJung-uk Kim else 19564e35d186SJung-uk Kim #endif 19577c929cf9SJung-uk Kim snprintf(model, 64, "%s %s", 19587c929cf9SJung-uk Kim v->v_name, 19597c929cf9SJung-uk Kim br != NULL ? br->br_name : 19607c929cf9SJung-uk Kim "NetXtreme Ethernet Controller"); 19614e35d186SJung-uk Kim } 1962a5779553SStanislav Sedov snprintf(buf, 96, "%s, %sASIC rev. %#08x", model, 1963a5779553SStanislav Sedov br != NULL ? "" : "unknown ", id); 19644c0da0ffSGleb Smirnoff device_set_desc_copy(dev, buf); 19656d2a9bd6SDoug Ambrisko if (pci_get_subvendor(dev) == DELL_VENDORID) 19665ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_NO_3LED; 196708bf8bb7SJung-uk Kim if (did == BCOM_DEVICEID_BCM5755M) 196808bf8bb7SJung-uk Kim sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; 196995d67482SBill Paul return (0); 197095d67482SBill Paul } 197195d67482SBill Paul t++; 197295d67482SBill Paul } 197395d67482SBill Paul 197495d67482SBill Paul return (ENXIO); 197595d67482SBill Paul } 197695d67482SBill Paul 1977f41ac2beSBill Paul static void 19783f74909aSGleb Smirnoff bge_dma_free(struct bge_softc *sc) 1979f41ac2beSBill Paul { 1980f41ac2beSBill Paul int i; 1981f41ac2beSBill Paul 19823f74909aSGleb Smirnoff /* Destroy DMA maps for RX buffers. */ 1983f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1984f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_dmamap[i]) 19850ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 1986f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_dmamap[i]); 1987f41ac2beSBill Paul } 1988943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_sparemap) 1989943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, 1990943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_std_sparemap); 1991f41ac2beSBill Paul 19923f74909aSGleb Smirnoff /* Destroy DMA maps for jumbo RX buffers. */ 1993f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1994f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1995f41ac2beSBill Paul bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1996f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1997f41ac2beSBill Paul } 1998943787f3SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_sparemap) 1999943787f3SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 2000943787f3SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_sparemap); 2001f41ac2beSBill Paul 20023f74909aSGleb Smirnoff /* Destroy DMA maps for TX buffers. */ 2003f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 2004f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_dmamap[i]) 20050ac56796SPyun YongHyeon bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, 2006f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[i]); 2007f41ac2beSBill Paul } 2008f41ac2beSBill Paul 20090ac56796SPyun YongHyeon if (sc->bge_cdata.bge_rx_mtag) 20100ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); 20110ac56796SPyun YongHyeon if (sc->bge_cdata.bge_tx_mtag) 20120ac56796SPyun YongHyeon bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); 2013f41ac2beSBill Paul 2014f41ac2beSBill Paul 20153f74909aSGleb Smirnoff /* Destroy standard RX ring. */ 2016e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map) 2017e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 2018e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map); 2019e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 2020f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 2021f41ac2beSBill Paul sc->bge_ldata.bge_rx_std_ring, 2022f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map); 2023f41ac2beSBill Paul 2024f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_std_ring_tag) 2025f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 2026f41ac2beSBill Paul 20273f74909aSGleb Smirnoff /* Destroy jumbo RX ring. */ 2028e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map) 2029e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2030e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_jumbo_ring_map); 2031e65bed95SPyun YongHyeon 2032e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_jumbo_ring_map && 2033e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_jumbo_ring) 2034f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2035f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring, 2036f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map); 2037f41ac2beSBill Paul 2038f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 2039f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 2040f41ac2beSBill Paul 20413f74909aSGleb Smirnoff /* Destroy RX return ring. */ 2042e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map) 2043e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 2044e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map); 2045e65bed95SPyun YongHyeon 2046e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_rx_return_ring_map && 2047e65bed95SPyun YongHyeon sc->bge_ldata.bge_rx_return_ring) 2048f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 2049f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring, 2050f41ac2beSBill Paul sc->bge_cdata.bge_rx_return_ring_map); 2051f41ac2beSBill Paul 2052f41ac2beSBill Paul if (sc->bge_cdata.bge_rx_return_ring_tag) 2053f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 2054f41ac2beSBill Paul 20553f74909aSGleb Smirnoff /* Destroy TX ring. */ 2056e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map) 2057e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 2058e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_ring_map); 2059e65bed95SPyun YongHyeon 2060e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 2061f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 2062f41ac2beSBill Paul sc->bge_ldata.bge_tx_ring, 2063f41ac2beSBill Paul sc->bge_cdata.bge_tx_ring_map); 2064f41ac2beSBill Paul 2065f41ac2beSBill Paul if (sc->bge_cdata.bge_tx_ring_tag) 2066f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 2067f41ac2beSBill Paul 20683f74909aSGleb Smirnoff /* Destroy status block. */ 2069e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map) 2070e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 2071e65bed95SPyun YongHyeon sc->bge_cdata.bge_status_map); 2072e65bed95SPyun YongHyeon 2073e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 2074f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_status_tag, 2075f41ac2beSBill Paul sc->bge_ldata.bge_status_block, 2076f41ac2beSBill Paul sc->bge_cdata.bge_status_map); 2077f41ac2beSBill Paul 2078f41ac2beSBill Paul if (sc->bge_cdata.bge_status_tag) 2079f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 2080f41ac2beSBill Paul 20813f74909aSGleb Smirnoff /* Destroy statistics block. */ 2082e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map) 2083e65bed95SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 2084e65bed95SPyun YongHyeon sc->bge_cdata.bge_stats_map); 2085e65bed95SPyun YongHyeon 2086e65bed95SPyun YongHyeon if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 2087f41ac2beSBill Paul bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 2088f41ac2beSBill Paul sc->bge_ldata.bge_stats, 2089f41ac2beSBill Paul sc->bge_cdata.bge_stats_map); 2090f41ac2beSBill Paul 2091f41ac2beSBill Paul if (sc->bge_cdata.bge_stats_tag) 2092f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 2093f41ac2beSBill Paul 20943f74909aSGleb Smirnoff /* Destroy the parent tag. */ 2095f41ac2beSBill Paul if (sc->bge_cdata.bge_parent_tag) 2096f41ac2beSBill Paul bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 2097f41ac2beSBill Paul } 2098f41ac2beSBill Paul 2099f41ac2beSBill Paul static int 21003f74909aSGleb Smirnoff bge_dma_alloc(device_t dev) 2101f41ac2beSBill Paul { 21023f74909aSGleb Smirnoff struct bge_dmamap_arg ctx; 2103f41ac2beSBill Paul struct bge_softc *sc; 21041be6acb7SGleb Smirnoff int i, error; 2105f41ac2beSBill Paul 2106f41ac2beSBill Paul sc = device_get_softc(dev); 2107f41ac2beSBill Paul 2108f41ac2beSBill Paul /* 2109f41ac2beSBill Paul * Allocate the parent bus DMA tag appropriate for PCI. 2110f41ac2beSBill Paul */ 21114eee14cbSMarius Strobl error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 21124eee14cbSMarius Strobl 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 21134eee14cbSMarius Strobl NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 21144eee14cbSMarius Strobl 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); 2115f41ac2beSBill Paul 2116e65bed95SPyun YongHyeon if (error != 0) { 2117fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2118fe806fdaSPyun YongHyeon "could not allocate parent dma tag\n"); 2119e65bed95SPyun YongHyeon return (ENOMEM); 2120e65bed95SPyun YongHyeon } 2121e65bed95SPyun YongHyeon 2122f41ac2beSBill Paul /* 21230ac56796SPyun YongHyeon * Create tag for Tx mbufs. 2124f41ac2beSBill Paul */ 21258a2e22deSScott Long error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 2126f41ac2beSBill Paul 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 21271be6acb7SGleb Smirnoff NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 21280ac56796SPyun YongHyeon BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_tx_mtag); 2129f41ac2beSBill Paul 2130f41ac2beSBill Paul if (error) { 21310ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); 21320ac56796SPyun YongHyeon return (ENOMEM); 21330ac56796SPyun YongHyeon } 21340ac56796SPyun YongHyeon 21350ac56796SPyun YongHyeon /* 21360ac56796SPyun YongHyeon * Create tag for Rx mbufs. 21370ac56796SPyun YongHyeon */ 21380ac56796SPyun YongHyeon error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, 21390ac56796SPyun YongHyeon BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 21400ac56796SPyun YongHyeon MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); 21410ac56796SPyun YongHyeon 21420ac56796SPyun YongHyeon if (error) { 21430ac56796SPyun YongHyeon device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); 2144f41ac2beSBill Paul return (ENOMEM); 2145f41ac2beSBill Paul } 2146f41ac2beSBill Paul 21473f74909aSGleb Smirnoff /* Create DMA maps for RX buffers. */ 2148943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2149943787f3SPyun YongHyeon &sc->bge_cdata.bge_rx_std_sparemap); 2150943787f3SPyun YongHyeon if (error) { 2151943787f3SPyun YongHyeon device_printf(sc->bge_dev, 2152943787f3SPyun YongHyeon "can't create spare DMA map for RX\n"); 2153943787f3SPyun YongHyeon return (ENOMEM); 2154943787f3SPyun YongHyeon } 2155f41ac2beSBill Paul for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 21560ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, 2157f41ac2beSBill Paul &sc->bge_cdata.bge_rx_std_dmamap[i]); 2158f41ac2beSBill Paul if (error) { 2159fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2160fe806fdaSPyun YongHyeon "can't create DMA map for RX\n"); 2161f41ac2beSBill Paul return (ENOMEM); 2162f41ac2beSBill Paul } 2163f41ac2beSBill Paul } 2164f41ac2beSBill Paul 21653f74909aSGleb Smirnoff /* Create DMA maps for TX buffers. */ 2166f41ac2beSBill Paul for (i = 0; i < BGE_TX_RING_CNT; i++) { 21670ac56796SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, 2168f41ac2beSBill Paul &sc->bge_cdata.bge_tx_dmamap[i]); 2169f41ac2beSBill Paul if (error) { 2170fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 21710ac56796SPyun YongHyeon "can't create DMA map for TX\n"); 2172f41ac2beSBill Paul return (ENOMEM); 2173f41ac2beSBill Paul } 2174f41ac2beSBill Paul } 2175f41ac2beSBill Paul 21763f74909aSGleb Smirnoff /* Create tag for standard RX ring. */ 2177f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2178f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2179f41ac2beSBill Paul NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 2180f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 2181f41ac2beSBill Paul 2182f41ac2beSBill Paul if (error) { 2183fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2184f41ac2beSBill Paul return (ENOMEM); 2185f41ac2beSBill Paul } 2186f41ac2beSBill Paul 21873f74909aSGleb Smirnoff /* Allocate DMA'able memory for standard RX ring. */ 2188f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 2189f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 2190f41ac2beSBill Paul &sc->bge_cdata.bge_rx_std_ring_map); 2191f41ac2beSBill Paul if (error) 2192f41ac2beSBill Paul return (ENOMEM); 2193f41ac2beSBill Paul 2194f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 2195f41ac2beSBill Paul 21963f74909aSGleb Smirnoff /* Load the address of the standard RX ring. */ 2197f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2198f41ac2beSBill Paul ctx.sc = sc; 2199f41ac2beSBill Paul 2200f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 2201f41ac2beSBill Paul sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 2202f41ac2beSBill Paul BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2203f41ac2beSBill Paul 2204f41ac2beSBill Paul if (error) 2205f41ac2beSBill Paul return (ENOMEM); 2206f41ac2beSBill Paul 2207f41ac2beSBill Paul sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 2208f41ac2beSBill Paul 22093f74909aSGleb Smirnoff /* Create tags for jumbo mbufs. */ 22104c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) { 2211f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 22128a2e22deSScott Long 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 22131be6acb7SGleb Smirnoff NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 22141be6acb7SGleb Smirnoff 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 2215f41ac2beSBill Paul if (error) { 2216fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22173f74909aSGleb Smirnoff "could not allocate jumbo dma tag\n"); 2218f41ac2beSBill Paul return (ENOMEM); 2219f41ac2beSBill Paul } 2220f41ac2beSBill Paul 22213f74909aSGleb Smirnoff /* Create tag for jumbo RX ring. */ 2222f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2223f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2224f41ac2beSBill Paul NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 2225f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 2226f41ac2beSBill Paul 2227f41ac2beSBill Paul if (error) { 2228fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22293f74909aSGleb Smirnoff "could not allocate jumbo ring dma tag\n"); 2230f41ac2beSBill Paul return (ENOMEM); 2231f41ac2beSBill Paul } 2232f41ac2beSBill Paul 22333f74909aSGleb Smirnoff /* Allocate DMA'able memory for jumbo RX ring. */ 2234f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 22351be6acb7SGleb Smirnoff (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 22361be6acb7SGleb Smirnoff BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2237f41ac2beSBill Paul &sc->bge_cdata.bge_rx_jumbo_ring_map); 2238f41ac2beSBill Paul if (error) 2239f41ac2beSBill Paul return (ENOMEM); 2240f41ac2beSBill Paul 22413f74909aSGleb Smirnoff /* Load the address of the jumbo RX ring. */ 2242f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2243f41ac2beSBill Paul ctx.sc = sc; 2244f41ac2beSBill Paul 2245f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2246f41ac2beSBill Paul sc->bge_cdata.bge_rx_jumbo_ring_map, 2247f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 2248f41ac2beSBill Paul bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2249f41ac2beSBill Paul 2250f41ac2beSBill Paul if (error) 2251f41ac2beSBill Paul return (ENOMEM); 2252f41ac2beSBill Paul 2253f41ac2beSBill Paul sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 2254f41ac2beSBill Paul 22553f74909aSGleb Smirnoff /* Create DMA maps for jumbo RX buffers. */ 2256943787f3SPyun YongHyeon error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2257943787f3SPyun YongHyeon 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); 2258943787f3SPyun YongHyeon if (error) { 2259943787f3SPyun YongHyeon device_printf(sc->bge_dev, 22601b90d0bdSPyun YongHyeon "can't create spare DMA map for jumbo RX\n"); 2261943787f3SPyun YongHyeon return (ENOMEM); 2262943787f3SPyun YongHyeon } 2263f41ac2beSBill Paul for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2264f41ac2beSBill Paul error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2265f41ac2beSBill Paul 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2266f41ac2beSBill Paul if (error) { 2267fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 22683f74909aSGleb Smirnoff "can't create DMA map for jumbo RX\n"); 2269f41ac2beSBill Paul return (ENOMEM); 2270f41ac2beSBill Paul } 2271f41ac2beSBill Paul } 2272f41ac2beSBill Paul 2273f41ac2beSBill Paul } 2274f41ac2beSBill Paul 22753f74909aSGleb Smirnoff /* Create tag for RX return ring. */ 2276f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2277f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2278f41ac2beSBill Paul NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 2279f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 2280f41ac2beSBill Paul 2281f41ac2beSBill Paul if (error) { 2282fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2283f41ac2beSBill Paul return (ENOMEM); 2284f41ac2beSBill Paul } 2285f41ac2beSBill Paul 22863f74909aSGleb Smirnoff /* Allocate DMA'able memory for RX return ring. */ 2287f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 2288f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 2289f41ac2beSBill Paul &sc->bge_cdata.bge_rx_return_ring_map); 2290f41ac2beSBill Paul if (error) 2291f41ac2beSBill Paul return (ENOMEM); 2292f41ac2beSBill Paul 2293f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_rx_return_ring, 2294f41ac2beSBill Paul BGE_RX_RTN_RING_SZ(sc)); 2295f41ac2beSBill Paul 22963f74909aSGleb Smirnoff /* Load the address of the RX return ring. */ 2297f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2298f41ac2beSBill Paul ctx.sc = sc; 2299f41ac2beSBill Paul 2300f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 2301f41ac2beSBill Paul sc->bge_cdata.bge_rx_return_ring_map, 2302f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 2303f41ac2beSBill Paul bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2304f41ac2beSBill Paul 2305f41ac2beSBill Paul if (error) 2306f41ac2beSBill Paul return (ENOMEM); 2307f41ac2beSBill Paul 2308f41ac2beSBill Paul sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2309f41ac2beSBill Paul 23103f74909aSGleb Smirnoff /* Create tag for TX ring. */ 2311f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2312f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2313f41ac2beSBill Paul NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2314f41ac2beSBill Paul &sc->bge_cdata.bge_tx_ring_tag); 2315f41ac2beSBill Paul 2316f41ac2beSBill Paul if (error) { 2317fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2318f41ac2beSBill Paul return (ENOMEM); 2319f41ac2beSBill Paul } 2320f41ac2beSBill Paul 23213f74909aSGleb Smirnoff /* Allocate DMA'able memory for TX ring. */ 2322f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2323f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2324f41ac2beSBill Paul &sc->bge_cdata.bge_tx_ring_map); 2325f41ac2beSBill Paul if (error) 2326f41ac2beSBill Paul return (ENOMEM); 2327f41ac2beSBill Paul 2328f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2329f41ac2beSBill Paul 23303f74909aSGleb Smirnoff /* Load the address of the TX ring. */ 2331f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2332f41ac2beSBill Paul ctx.sc = sc; 2333f41ac2beSBill Paul 2334f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2335f41ac2beSBill Paul sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2336f41ac2beSBill Paul BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2337f41ac2beSBill Paul 2338f41ac2beSBill Paul if (error) 2339f41ac2beSBill Paul return (ENOMEM); 2340f41ac2beSBill Paul 2341f41ac2beSBill Paul sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2342f41ac2beSBill Paul 23433f74909aSGleb Smirnoff /* Create tag for status block. */ 2344f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2345f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2346f41ac2beSBill Paul NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2347f41ac2beSBill Paul NULL, NULL, &sc->bge_cdata.bge_status_tag); 2348f41ac2beSBill Paul 2349f41ac2beSBill Paul if (error) { 2350fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2351f41ac2beSBill Paul return (ENOMEM); 2352f41ac2beSBill Paul } 2353f41ac2beSBill Paul 23543f74909aSGleb Smirnoff /* Allocate DMA'able memory for status block. */ 2355f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2356f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2357f41ac2beSBill Paul &sc->bge_cdata.bge_status_map); 2358f41ac2beSBill Paul if (error) 2359f41ac2beSBill Paul return (ENOMEM); 2360f41ac2beSBill Paul 2361f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2362f41ac2beSBill Paul 23633f74909aSGleb Smirnoff /* Load the address of the status block. */ 2364f41ac2beSBill Paul ctx.sc = sc; 2365f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2366f41ac2beSBill Paul 2367f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2368f41ac2beSBill Paul sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2369f41ac2beSBill Paul BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2370f41ac2beSBill Paul 2371f41ac2beSBill Paul if (error) 2372f41ac2beSBill Paul return (ENOMEM); 2373f41ac2beSBill Paul 2374f41ac2beSBill Paul sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2375f41ac2beSBill Paul 23763f74909aSGleb Smirnoff /* Create tag for statistics block. */ 2377f41ac2beSBill Paul error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2378f41ac2beSBill Paul PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2379f41ac2beSBill Paul NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2380f41ac2beSBill Paul &sc->bge_cdata.bge_stats_tag); 2381f41ac2beSBill Paul 2382f41ac2beSBill Paul if (error) { 2383fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2384f41ac2beSBill Paul return (ENOMEM); 2385f41ac2beSBill Paul } 2386f41ac2beSBill Paul 23873f74909aSGleb Smirnoff /* Allocate DMA'able memory for statistics block. */ 2388f41ac2beSBill Paul error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2389f41ac2beSBill Paul (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2390f41ac2beSBill Paul &sc->bge_cdata.bge_stats_map); 2391f41ac2beSBill Paul if (error) 2392f41ac2beSBill Paul return (ENOMEM); 2393f41ac2beSBill Paul 2394f41ac2beSBill Paul bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2395f41ac2beSBill Paul 23963f74909aSGleb Smirnoff /* Load the address of the statstics block. */ 2397f41ac2beSBill Paul ctx.sc = sc; 2398f41ac2beSBill Paul ctx.bge_maxsegs = 1; 2399f41ac2beSBill Paul 2400f41ac2beSBill Paul error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2401f41ac2beSBill Paul sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2402f41ac2beSBill Paul BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2403f41ac2beSBill Paul 2404f41ac2beSBill Paul if (error) 2405f41ac2beSBill Paul return (ENOMEM); 2406f41ac2beSBill Paul 2407f41ac2beSBill Paul sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2408f41ac2beSBill Paul 2409f41ac2beSBill Paul return (0); 2410f41ac2beSBill Paul } 2411f41ac2beSBill Paul 2412bf6ef57aSJohn Polstra /* 2413bf6ef57aSJohn Polstra * Return true if this device has more than one port. 2414bf6ef57aSJohn Polstra */ 2415bf6ef57aSJohn Polstra static int 2416bf6ef57aSJohn Polstra bge_has_multiple_ports(struct bge_softc *sc) 2417bf6ef57aSJohn Polstra { 2418bf6ef57aSJohn Polstra device_t dev = sc->bge_dev; 241955aaf894SMarius Strobl u_int b, d, f, fscan, s; 2420bf6ef57aSJohn Polstra 242155aaf894SMarius Strobl d = pci_get_domain(dev); 2422bf6ef57aSJohn Polstra b = pci_get_bus(dev); 2423bf6ef57aSJohn Polstra s = pci_get_slot(dev); 2424bf6ef57aSJohn Polstra f = pci_get_function(dev); 2425bf6ef57aSJohn Polstra for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 242655aaf894SMarius Strobl if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) 2427bf6ef57aSJohn Polstra return (1); 2428bf6ef57aSJohn Polstra return (0); 2429bf6ef57aSJohn Polstra } 2430bf6ef57aSJohn Polstra 2431bf6ef57aSJohn Polstra /* 2432bf6ef57aSJohn Polstra * Return true if MSI can be used with this device. 2433bf6ef57aSJohn Polstra */ 2434bf6ef57aSJohn Polstra static int 2435bf6ef57aSJohn Polstra bge_can_use_msi(struct bge_softc *sc) 2436bf6ef57aSJohn Polstra { 2437bf6ef57aSJohn Polstra int can_use_msi = 0; 2438bf6ef57aSJohn Polstra 2439bf6ef57aSJohn Polstra switch (sc->bge_asicrev) { 2440a8376f70SMarius Strobl case BGE_ASICREV_BCM5714_A0: 2441bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5714: 2442bf6ef57aSJohn Polstra /* 2443a8376f70SMarius Strobl * Apparently, MSI doesn't work when these chips are 2444a8376f70SMarius Strobl * configured in single-port mode. 2445bf6ef57aSJohn Polstra */ 2446bf6ef57aSJohn Polstra if (bge_has_multiple_ports(sc)) 2447bf6ef57aSJohn Polstra can_use_msi = 1; 2448bf6ef57aSJohn Polstra break; 2449bf6ef57aSJohn Polstra case BGE_ASICREV_BCM5750: 2450bf6ef57aSJohn Polstra if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 2451bf6ef57aSJohn Polstra sc->bge_chiprev != BGE_CHIPREV_5750_BX) 2452bf6ef57aSJohn Polstra can_use_msi = 1; 2453bf6ef57aSJohn Polstra break; 2454a8376f70SMarius Strobl default: 2455a8376f70SMarius Strobl if (BGE_IS_575X_PLUS(sc)) 2456bf6ef57aSJohn Polstra can_use_msi = 1; 2457bf6ef57aSJohn Polstra } 2458bf6ef57aSJohn Polstra return (can_use_msi); 2459bf6ef57aSJohn Polstra } 2460bf6ef57aSJohn Polstra 246195d67482SBill Paul static int 24623f74909aSGleb Smirnoff bge_attach(device_t dev) 246395d67482SBill Paul { 246495d67482SBill Paul struct ifnet *ifp; 246595d67482SBill Paul struct bge_softc *sc; 24664f0794ffSBjoern A. Zeeb uint32_t hwcfg = 0, misccfg; 246708013fd3SMarius Strobl u_char eaddr[ETHER_ADDR_LEN]; 2468d648358bSPyun YongHyeon int error, msicount, reg, rid, trys; 246995d67482SBill Paul 247095d67482SBill Paul sc = device_get_softc(dev); 247195d67482SBill Paul sc->bge_dev = dev; 247295d67482SBill Paul 247395d67482SBill Paul /* 247495d67482SBill Paul * Map control/status registers. 247595d67482SBill Paul */ 247695d67482SBill Paul pci_enable_busmaster(dev); 247795d67482SBill Paul 247895d67482SBill Paul rid = BGE_PCI_BAR0; 24795f96beb9SNate Lawson sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 248044f8f2fcSMarius Strobl RF_ACTIVE); 248195d67482SBill Paul 248295d67482SBill Paul if (sc->bge_res == NULL) { 2483fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, "couldn't map memory\n"); 248495d67482SBill Paul error = ENXIO; 248595d67482SBill Paul goto fail; 248695d67482SBill Paul } 248795d67482SBill Paul 24884f09c4c7SMarius Strobl /* Save various chip information. */ 2489e53d81eeSPaul Saab sc->bge_chipid = 2490a5779553SStanislav Sedov pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 2491a5779553SStanislav Sedov BGE_PCIMISCCTL_ASICREV_SHIFT; 2492a5779553SStanislav Sedov if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_USE_PRODID_REG) 2493a5779553SStanislav Sedov sc->bge_chipid = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 2494a5779553SStanislav Sedov 4); 2495e53d81eeSPaul Saab sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2496e53d81eeSPaul Saab sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2497e53d81eeSPaul Saab 249886543395SJung-uk Kim /* 249938cc658fSJohn Baldwin * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the 250086543395SJung-uk Kim * 5705 A0 and A1 chips. 250186543395SJung-uk Kim */ 250286543395SJung-uk Kim if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && 250338cc658fSJohn Baldwin sc->bge_asicrev != BGE_ASICREV_BCM5906 && 250486543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 250586543395SJung-uk Kim sc->bge_chipid != BGE_CHIPID_BCM5705_A1) 250686543395SJung-uk Kim sc->bge_flags |= BGE_FLAG_WIRESPEED; 250786543395SJung-uk Kim 25085fea260fSMarius Strobl if (bge_has_eaddr(sc)) 25095fea260fSMarius Strobl sc->bge_flags |= BGE_FLAG_EADDR; 251008013fd3SMarius Strobl 25110dae9719SJung-uk Kim /* Save chipset family. */ 25120dae9719SJung-uk Kim switch (sc->bge_asicrev) { 2513a5779553SStanislav Sedov case BGE_ASICREV_BCM5755: 2514a5779553SStanislav Sedov case BGE_ASICREV_BCM5761: 2515a5779553SStanislav Sedov case BGE_ASICREV_BCM5784: 2516a5779553SStanislav Sedov case BGE_ASICREV_BCM5785: 2517a5779553SStanislav Sedov case BGE_ASICREV_BCM5787: 2518a5779553SStanislav Sedov case BGE_ASICREV_BCM57780: 2519a5779553SStanislav Sedov sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | 2520a5779553SStanislav Sedov BGE_FLAG_5705_PLUS; 2521a5779553SStanislav Sedov break; 25220dae9719SJung-uk Kim case BGE_ASICREV_BCM5700: 25230dae9719SJung-uk Kim case BGE_ASICREV_BCM5701: 25240dae9719SJung-uk Kim case BGE_ASICREV_BCM5703: 25250dae9719SJung-uk Kim case BGE_ASICREV_BCM5704: 25267ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 25270dae9719SJung-uk Kim break; 25280dae9719SJung-uk Kim case BGE_ASICREV_BCM5714_A0: 25290dae9719SJung-uk Kim case BGE_ASICREV_BCM5780: 25300dae9719SJung-uk Kim case BGE_ASICREV_BCM5714: 25317ee00338SJung-uk Kim sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; 25329fe569d8SXin LI /* FALLTHROUGH */ 25330dae9719SJung-uk Kim case BGE_ASICREV_BCM5750: 25340dae9719SJung-uk Kim case BGE_ASICREV_BCM5752: 253538cc658fSJohn Baldwin case BGE_ASICREV_BCM5906: 25360dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_575X_PLUS; 25379fe569d8SXin LI /* FALLTHROUGH */ 25380dae9719SJung-uk Kim case BGE_ASICREV_BCM5705: 25390dae9719SJung-uk Kim sc->bge_flags |= BGE_FLAG_5705_PLUS; 25400dae9719SJung-uk Kim break; 25410dae9719SJung-uk Kim } 25420dae9719SJung-uk Kim 25435ee49a3aSJung-uk Kim /* Set various bug flags. */ 25441ec4c3a8SJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 25451ec4c3a8SJung-uk Kim sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 25461ec4c3a8SJung-uk Kim sc->bge_flags |= BGE_FLAG_CRC_BUG; 25475ee49a3aSJung-uk Kim if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 25485ee49a3aSJung-uk Kim sc->bge_chiprev == BGE_CHIPREV_5704_AX) 25495ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_ADC_BUG; 25505ee49a3aSJung-uk Kim if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 25515ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 255208bf8bb7SJung-uk Kim if (BGE_IS_5705_PLUS(sc) && 255308bf8bb7SJung-uk Kim !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 25545ee49a3aSJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2555a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5761 || 2556a5779553SStanislav Sedov sc->bge_asicrev == BGE_ASICREV_BCM5784 || 25574fcf220bSJohn Baldwin sc->bge_asicrev == BGE_ASICREV_BCM5787) { 25584fcf220bSJohn Baldwin if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0) 25595ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_JITTER_BUG; 256038cc658fSJohn Baldwin } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) 25615ee49a3aSJung-uk Kim sc->bge_flags |= BGE_FLAG_BER_BUG; 25625ee49a3aSJung-uk Kim } 25635ee49a3aSJung-uk Kim 25644f0794ffSBjoern A. Zeeb 25654f0794ffSBjoern A. Zeeb /* 25664f0794ffSBjoern A. Zeeb * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe() 25674f0794ffSBjoern A. Zeeb * but I do not know the DEVICEID for the 5788M. 25684f0794ffSBjoern A. Zeeb */ 25694f0794ffSBjoern A. Zeeb misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID; 25704f0794ffSBjoern A. Zeeb if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || 25714f0794ffSBjoern A. Zeeb misccfg == BGE_MISCCFG_BOARD_ID_5788M) 25724f0794ffSBjoern A. Zeeb sc->bge_flags |= BGE_FLAG_5788; 25734f0794ffSBjoern A. Zeeb 2574e53d81eeSPaul Saab /* 25756f8718a3SScott Long * Check if this is a PCI-X or PCI Express device. 2576e53d81eeSPaul Saab */ 25776f8718a3SScott Long if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 25784c0da0ffSGleb Smirnoff /* 25796f8718a3SScott Long * Found a PCI Express capabilities register, this 25806f8718a3SScott Long * must be a PCI Express device. 25816f8718a3SScott Long */ 25826f8718a3SScott Long sc->bge_flags |= BGE_FLAG_PCIE; 25830aaf1057SPyun YongHyeon sc->bge_expcap = reg; 25840aaf1057SPyun YongHyeon bge_set_max_readrq(sc); 25856f8718a3SScott Long } else { 25866f8718a3SScott Long /* 25876f8718a3SScott Long * Check if the device is in PCI-X Mode. 25886f8718a3SScott Long * (This bit is not valid on PCI Express controllers.) 25894c0da0ffSGleb Smirnoff */ 25900aaf1057SPyun YongHyeon if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) 25910aaf1057SPyun YongHyeon sc->bge_pcixcap = reg; 259290447aadSMarius Strobl if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & 25934c0da0ffSGleb Smirnoff BGE_PCISTATE_PCI_BUSMODE) == 0) 2594652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_PCIX; 25956f8718a3SScott Long } 25964c0da0ffSGleb Smirnoff 2597bf6ef57aSJohn Polstra /* 2598bf6ef57aSJohn Polstra * Allocate the interrupt, using MSI if possible. These devices 2599bf6ef57aSJohn Polstra * support 8 MSI messages, but only the first one is used in 2600bf6ef57aSJohn Polstra * normal operation. 2601bf6ef57aSJohn Polstra */ 26020aaf1057SPyun YongHyeon rid = 0; 26030aaf1057SPyun YongHyeon if (pci_find_extcap(sc->bge_dev, PCIY_MSI, ®) != 0) { 26040aaf1057SPyun YongHyeon sc->bge_msicap = reg; 2605bf6ef57aSJohn Polstra if (bge_can_use_msi(sc)) { 2606bf6ef57aSJohn Polstra msicount = pci_msi_count(dev); 2607bf6ef57aSJohn Polstra if (msicount > 1) 2608bf6ef57aSJohn Polstra msicount = 1; 2609bf6ef57aSJohn Polstra } else 2610bf6ef57aSJohn Polstra msicount = 0; 2611bf6ef57aSJohn Polstra if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { 2612bf6ef57aSJohn Polstra rid = 1; 2613bf6ef57aSJohn Polstra sc->bge_flags |= BGE_FLAG_MSI; 26140aaf1057SPyun YongHyeon } 26150aaf1057SPyun YongHyeon } 2616bf6ef57aSJohn Polstra 2617bf6ef57aSJohn Polstra sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2618bf6ef57aSJohn Polstra RF_SHAREABLE | RF_ACTIVE); 2619bf6ef57aSJohn Polstra 2620bf6ef57aSJohn Polstra if (sc->bge_irq == NULL) { 2621bf6ef57aSJohn Polstra device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2622bf6ef57aSJohn Polstra error = ENXIO; 2623bf6ef57aSJohn Polstra goto fail; 2624bf6ef57aSJohn Polstra } 2625bf6ef57aSJohn Polstra 26264f09c4c7SMarius Strobl if (bootverbose) 26274f09c4c7SMarius Strobl device_printf(dev, 26284f09c4c7SMarius Strobl "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", 26294f09c4c7SMarius Strobl sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, 26304f09c4c7SMarius Strobl (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" : 26314f09c4c7SMarius Strobl ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI")); 26324f09c4c7SMarius Strobl 2633bf6ef57aSJohn Polstra BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2634bf6ef57aSJohn Polstra 263595d67482SBill Paul /* Try to reset the chip. */ 26368cb1383cSDoug Ambrisko if (bge_reset(sc)) { 26378cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 26388cb1383cSDoug Ambrisko error = ENXIO; 26398cb1383cSDoug Ambrisko goto fail; 26408cb1383cSDoug Ambrisko } 26418cb1383cSDoug Ambrisko 26428cb1383cSDoug Ambrisko sc->bge_asf_mode = 0; 2643f1a7e6d5SScott Long if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2644f1a7e6d5SScott Long == BGE_MAGIC_NUMBER)) { 26458cb1383cSDoug Ambrisko if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 26468cb1383cSDoug Ambrisko & BGE_HWCFG_ASF) { 26478cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_ENABLE; 26488cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_STACKUP; 26498cb1383cSDoug Ambrisko if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 26508cb1383cSDoug Ambrisko sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 26518cb1383cSDoug Ambrisko } 26528cb1383cSDoug Ambrisko } 26538cb1383cSDoug Ambrisko } 26548cb1383cSDoug Ambrisko 26558cb1383cSDoug Ambrisko /* Try to reset the chip again the nice way. */ 26568cb1383cSDoug Ambrisko bge_stop_fw(sc); 26578cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_STOP); 26588cb1383cSDoug Ambrisko if (bge_reset(sc)) { 26598cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "chip reset failed\n"); 26608cb1383cSDoug Ambrisko error = ENXIO; 26618cb1383cSDoug Ambrisko goto fail; 26628cb1383cSDoug Ambrisko } 26638cb1383cSDoug Ambrisko 26648cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 26658cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 266695d67482SBill Paul 266795d67482SBill Paul if (bge_chipinit(sc)) { 2668fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "chip initialization failed\n"); 266995d67482SBill Paul error = ENXIO; 267095d67482SBill Paul goto fail; 267195d67482SBill Paul } 267295d67482SBill Paul 267338cc658fSJohn Baldwin error = bge_get_eaddr(sc, eaddr); 267438cc658fSJohn Baldwin if (error) { 267508013fd3SMarius Strobl device_printf(sc->bge_dev, 267608013fd3SMarius Strobl "failed to read station address\n"); 267795d67482SBill Paul error = ENXIO; 267895d67482SBill Paul goto fail; 267995d67482SBill Paul } 268095d67482SBill Paul 2681f41ac2beSBill Paul /* 5705 limits RX return ring to 512 entries. */ 26827ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 2683f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2684f41ac2beSBill Paul else 2685f41ac2beSBill Paul sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2686f41ac2beSBill Paul 2687f41ac2beSBill Paul if (bge_dma_alloc(dev)) { 2688fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, 2689fe806fdaSPyun YongHyeon "failed to allocate DMA resources\n"); 2690f41ac2beSBill Paul error = ENXIO; 2691f41ac2beSBill Paul goto fail; 2692f41ac2beSBill Paul } 2693f41ac2beSBill Paul 269495d67482SBill Paul /* Set default tuneable values. */ 269595d67482SBill Paul sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 269695d67482SBill Paul sc->bge_rx_coal_ticks = 150; 269795d67482SBill Paul sc->bge_tx_coal_ticks = 150; 26986f8718a3SScott Long sc->bge_rx_max_coal_bds = 10; 26996f8718a3SScott Long sc->bge_tx_max_coal_bds = 10; 270095d67482SBill Paul 270195d67482SBill Paul /* Set up ifnet structure */ 2702fc74a9f9SBrooks Davis ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2703fc74a9f9SBrooks Davis if (ifp == NULL) { 2704fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2705fc74a9f9SBrooks Davis error = ENXIO; 2706fc74a9f9SBrooks Davis goto fail; 2707fc74a9f9SBrooks Davis } 270895d67482SBill Paul ifp->if_softc = sc; 27099bf40edeSBrooks Davis if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 271095d67482SBill Paul ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 271195d67482SBill Paul ifp->if_ioctl = bge_ioctl; 271295d67482SBill Paul ifp->if_start = bge_start; 271395d67482SBill Paul ifp->if_init = bge_init; 27144d665c4dSDag-Erling Smørgrav ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 27154d665c4dSDag-Erling Smørgrav IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 27164d665c4dSDag-Erling Smørgrav IFQ_SET_READY(&ifp->if_snd); 271795d67482SBill Paul ifp->if_hwassist = BGE_CSUM_FEATURES; 2718d375e524SGleb Smirnoff ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 27194e35d186SJung-uk Kim IFCAP_VLAN_MTU; 27204e35d186SJung-uk Kim #ifdef IFCAP_VLAN_HWCSUM 27214e35d186SJung-uk Kim ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 27224e35d186SJung-uk Kim #endif 272395d67482SBill Paul ifp->if_capenable = ifp->if_capabilities; 272475719184SGleb Smirnoff #ifdef DEVICE_POLLING 272575719184SGleb Smirnoff ifp->if_capabilities |= IFCAP_POLLING; 272675719184SGleb Smirnoff #endif 272795d67482SBill Paul 2728a1d52896SBill Paul /* 2729d375e524SGleb Smirnoff * 5700 B0 chips do not support checksumming correctly due 2730d375e524SGleb Smirnoff * to hardware bugs. 2731d375e524SGleb Smirnoff */ 2732d375e524SGleb Smirnoff if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2733d375e524SGleb Smirnoff ifp->if_capabilities &= ~IFCAP_HWCSUM; 27344d3a629cSPyun YongHyeon ifp->if_capenable &= ~IFCAP_HWCSUM; 2735d375e524SGleb Smirnoff ifp->if_hwassist = 0; 2736d375e524SGleb Smirnoff } 2737d375e524SGleb Smirnoff 2738d375e524SGleb Smirnoff /* 2739a1d52896SBill Paul * Figure out what sort of media we have by checking the 274041abcc1bSPaul Saab * hardware config word in the first 32k of NIC internal memory, 274141abcc1bSPaul Saab * or fall back to examining the EEPROM if necessary. 274241abcc1bSPaul Saab * Note: on some BCM5700 cards, this value appears to be unset. 274341abcc1bSPaul Saab * If that's the case, we have to rely on identifying the NIC 274441abcc1bSPaul Saab * by its PCI subsystem ID, as we do below for the SysKonnect 274541abcc1bSPaul Saab * SK-9D41. 2746a1d52896SBill Paul */ 274741abcc1bSPaul Saab if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 274841abcc1bSPaul Saab hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 27495fea260fSMarius Strobl else if ((sc->bge_flags & BGE_FLAG_EADDR) && 27505fea260fSMarius Strobl (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 2751f6789fbaSPyun YongHyeon if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2752f6789fbaSPyun YongHyeon sizeof(hwcfg))) { 2753fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2754f6789fbaSPyun YongHyeon error = ENXIO; 2755f6789fbaSPyun YongHyeon goto fail; 2756f6789fbaSPyun YongHyeon } 275741abcc1bSPaul Saab hwcfg = ntohl(hwcfg); 275841abcc1bSPaul Saab } 275941abcc1bSPaul Saab 276041abcc1bSPaul Saab if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2761652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_TBI; 2762a1d52896SBill Paul 276395d67482SBill Paul /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 27640c8aa4eaSJung-uk Kim if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2765652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_TBI; 276695d67482SBill Paul 2767652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 27680c8aa4eaSJung-uk Kim ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 27690c8aa4eaSJung-uk Kim bge_ifmedia_sts); 27700c8aa4eaSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 27716098821cSJung-uk Kim ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 27726098821cSJung-uk Kim 0, NULL); 277395d67482SBill Paul ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 277495d67482SBill Paul ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2775da3003f0SBill Paul sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 277695d67482SBill Paul } else { 277795d67482SBill Paul /* 27788cb1383cSDoug Ambrisko * Do transceiver setup and tell the firmware the 27798cb1383cSDoug Ambrisko * driver is down so we can try to get access the 27808cb1383cSDoug Ambrisko * probe if ASF is running. Retry a couple of times 27818cb1383cSDoug Ambrisko * if we get a conflict with the ASF firmware accessing 27828cb1383cSDoug Ambrisko * the PHY. 278395d67482SBill Paul */ 27844012d104SMarius Strobl trys = 0; 27858cb1383cSDoug Ambrisko BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 27868cb1383cSDoug Ambrisko again: 27878cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 27888cb1383cSDoug Ambrisko 278995d67482SBill Paul if (mii_phy_probe(dev, &sc->bge_miibus, 279095d67482SBill Paul bge_ifmedia_upd, bge_ifmedia_sts)) { 27918cb1383cSDoug Ambrisko if (trys++ < 4) { 27928cb1383cSDoug Ambrisko device_printf(sc->bge_dev, "Try again\n"); 27934e35d186SJung-uk Kim bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, 27944e35d186SJung-uk Kim BMCR_RESET); 27958cb1383cSDoug Ambrisko goto again; 27968cb1383cSDoug Ambrisko } 27978cb1383cSDoug Ambrisko 2798fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "MII without any PHY!\n"); 279995d67482SBill Paul error = ENXIO; 280095d67482SBill Paul goto fail; 280195d67482SBill Paul } 28028cb1383cSDoug Ambrisko 28038cb1383cSDoug Ambrisko /* 28048cb1383cSDoug Ambrisko * Now tell the firmware we are going up after probing the PHY 28058cb1383cSDoug Ambrisko */ 28068cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 28078cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 280895d67482SBill Paul } 280995d67482SBill Paul 281095d67482SBill Paul /* 2811e255b776SJohn Polstra * When using the BCM5701 in PCI-X mode, data corruption has 2812e255b776SJohn Polstra * been observed in the first few bytes of some received packets. 2813e255b776SJohn Polstra * Aligning the packet buffer in memory eliminates the corruption. 2814e255b776SJohn Polstra * Unfortunately, this misaligns the packet payloads. On platforms 2815e255b776SJohn Polstra * which do not support unaligned accesses, we will realign the 2816e255b776SJohn Polstra * payloads by copying the received packets. 2817e255b776SJohn Polstra */ 2818652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2819652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_PCIX) 2820652ae483SGleb Smirnoff sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2821e255b776SJohn Polstra 2822e255b776SJohn Polstra /* 282395d67482SBill Paul * Call MI attach routine. 282495d67482SBill Paul */ 2825fc74a9f9SBrooks Davis ether_ifattach(ifp, eaddr); 2826b74e67fbSGleb Smirnoff callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 28270f9bd73bSSam Leffler 282861ccb9daSPyun YongHyeon /* Tell upper layer we support long frames. */ 282961ccb9daSPyun YongHyeon ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 283061ccb9daSPyun YongHyeon 28310f9bd73bSSam Leffler /* 28320f9bd73bSSam Leffler * Hookup IRQ last. 28330f9bd73bSSam Leffler */ 28344e35d186SJung-uk Kim #if __FreeBSD_version > 700030 28350f9bd73bSSam Leffler error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2836ef544f63SPaolo Pisati NULL, bge_intr, sc, &sc->bge_intrhand); 28374e35d186SJung-uk Kim #else 28384e35d186SJung-uk Kim error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 28394e35d186SJung-uk Kim bge_intr, sc, &sc->bge_intrhand); 28404e35d186SJung-uk Kim #endif 28410f9bd73bSSam Leffler 28420f9bd73bSSam Leffler if (error) { 2843fc74a9f9SBrooks Davis bge_detach(dev); 2844fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "couldn't set up irq\n"); 28450f9bd73bSSam Leffler } 284695d67482SBill Paul 28476f8718a3SScott Long bge_add_sysctls(sc); 28486f8718a3SScott Long 284908013fd3SMarius Strobl return (0); 285008013fd3SMarius Strobl 285195d67482SBill Paul fail: 285208013fd3SMarius Strobl bge_release_resources(sc); 285308013fd3SMarius Strobl 285495d67482SBill Paul return (error); 285595d67482SBill Paul } 285695d67482SBill Paul 285795d67482SBill Paul static int 28583f74909aSGleb Smirnoff bge_detach(device_t dev) 285995d67482SBill Paul { 286095d67482SBill Paul struct bge_softc *sc; 286195d67482SBill Paul struct ifnet *ifp; 286295d67482SBill Paul 286395d67482SBill Paul sc = device_get_softc(dev); 2864fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 286595d67482SBill Paul 286675719184SGleb Smirnoff #ifdef DEVICE_POLLING 286775719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) 286875719184SGleb Smirnoff ether_poll_deregister(ifp); 286975719184SGleb Smirnoff #endif 287075719184SGleb Smirnoff 28710f9bd73bSSam Leffler BGE_LOCK(sc); 287295d67482SBill Paul bge_stop(sc); 287395d67482SBill Paul bge_reset(sc); 28740f9bd73bSSam Leffler BGE_UNLOCK(sc); 28750f9bd73bSSam Leffler 28765dda8085SOleg Bulyzhin callout_drain(&sc->bge_stat_ch); 28775dda8085SOleg Bulyzhin 28780f9bd73bSSam Leffler ether_ifdetach(ifp); 287995d67482SBill Paul 2880652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 288195d67482SBill Paul ifmedia_removeall(&sc->bge_ifmedia); 288295d67482SBill Paul } else { 288395d67482SBill Paul bus_generic_detach(dev); 288495d67482SBill Paul device_delete_child(dev, sc->bge_miibus); 288595d67482SBill Paul } 288695d67482SBill Paul 288795d67482SBill Paul bge_release_resources(sc); 288895d67482SBill Paul 288995d67482SBill Paul return (0); 289095d67482SBill Paul } 289195d67482SBill Paul 289295d67482SBill Paul static void 28933f74909aSGleb Smirnoff bge_release_resources(struct bge_softc *sc) 289495d67482SBill Paul { 289595d67482SBill Paul device_t dev; 289695d67482SBill Paul 289795d67482SBill Paul dev = sc->bge_dev; 289895d67482SBill Paul 289995d67482SBill Paul if (sc->bge_intrhand != NULL) 290095d67482SBill Paul bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 290195d67482SBill Paul 290295d67482SBill Paul if (sc->bge_irq != NULL) 2903724bd939SJohn Polstra bus_release_resource(dev, SYS_RES_IRQ, 2904724bd939SJohn Polstra sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); 2905724bd939SJohn Polstra 2906724bd939SJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) 2907724bd939SJohn Polstra pci_release_msi(dev); 290895d67482SBill Paul 290995d67482SBill Paul if (sc->bge_res != NULL) 291095d67482SBill Paul bus_release_resource(dev, SYS_RES_MEMORY, 291195d67482SBill Paul BGE_PCI_BAR0, sc->bge_res); 291295d67482SBill Paul 2913ad61f896SRuslan Ermilov if (sc->bge_ifp != NULL) 2914ad61f896SRuslan Ermilov if_free(sc->bge_ifp); 2915ad61f896SRuslan Ermilov 2916f41ac2beSBill Paul bge_dma_free(sc); 291795d67482SBill Paul 29180f9bd73bSSam Leffler if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 29190f9bd73bSSam Leffler BGE_LOCK_DESTROY(sc); 292095d67482SBill Paul } 292195d67482SBill Paul 29228cb1383cSDoug Ambrisko static int 29233f74909aSGleb Smirnoff bge_reset(struct bge_softc *sc) 292495d67482SBill Paul { 292595d67482SBill Paul device_t dev; 29265fea260fSMarius Strobl uint32_t cachesize, command, pcistate, reset, val; 29276f8718a3SScott Long void (*write_op)(struct bge_softc *, int, int); 29280aaf1057SPyun YongHyeon uint16_t devctl; 29295fea260fSMarius Strobl int i; 293095d67482SBill Paul 293195d67482SBill Paul dev = sc->bge_dev; 293295d67482SBill Paul 293338cc658fSJohn Baldwin if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && 293438cc658fSJohn Baldwin (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { 29356f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 29366f8718a3SScott Long write_op = bge_writemem_direct; 29376f8718a3SScott Long else 29386f8718a3SScott Long write_op = bge_writemem_ind; 29399ba784dbSScott Long } else 29406f8718a3SScott Long write_op = bge_writereg_ind; 29416f8718a3SScott Long 294295d67482SBill Paul /* Save some important PCI state. */ 294395d67482SBill Paul cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 294495d67482SBill Paul command = pci_read_config(dev, BGE_PCI_CMD, 4); 294595d67482SBill Paul pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 294695d67482SBill Paul 294795d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 294895d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 2949e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 295095d67482SBill Paul 29516f8718a3SScott Long /* Disable fastboot on controllers that support it. */ 29526f8718a3SScott Long if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2953a5779553SStanislav Sedov BGE_IS_5755_PLUS(sc)) { 29546f8718a3SScott Long if (bootverbose) 29559ba784dbSScott Long device_printf(sc->bge_dev, "Disabling fastboot\n"); 29566f8718a3SScott Long CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 29576f8718a3SScott Long } 29586f8718a3SScott Long 29596f8718a3SScott Long /* 29606f8718a3SScott Long * Write the magic number to SRAM at offset 0xB50. 29616f8718a3SScott Long * When firmware finishes its initialization it will 29626f8718a3SScott Long * write ~BGE_MAGIC_NUMBER to the same location. 29636f8718a3SScott Long */ 29646f8718a3SScott Long bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 29656f8718a3SScott Long 29660c8aa4eaSJung-uk Kim reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 2967e53d81eeSPaul Saab 2968e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 2969652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 29700c8aa4eaSJung-uk Kim if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 29710c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, 0x7E2C, 0x20); 2972e53d81eeSPaul Saab if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2973e53d81eeSPaul Saab /* Prevent PCIE link training during global reset */ 29740c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 29750c8aa4eaSJung-uk Kim reset |= 1 << 29; 2976e53d81eeSPaul Saab } 2977e53d81eeSPaul Saab } 2978e53d81eeSPaul Saab 297921c9e407SDavid Christensen /* 29806f8718a3SScott Long * Set GPHY Power Down Override to leave GPHY 29816f8718a3SScott Long * powered up in D0 uninitialized. 29826f8718a3SScott Long */ 29835345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 29846f8718a3SScott Long reset |= 0x04000000; 29856f8718a3SScott Long 298695d67482SBill Paul /* Issue global reset */ 29876f8718a3SScott Long write_op(sc, BGE_MISC_CFG, reset); 298895d67482SBill Paul 298938cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 29905fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_STATUS); 299138cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_STATUS, 29925fea260fSMarius Strobl val | BGE_VCPU_STATUS_DRV_RESET); 29935fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); 299438cc658fSJohn Baldwin CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, 29955fea260fSMarius Strobl val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); 299638cc658fSJohn Baldwin } 299738cc658fSJohn Baldwin 299895d67482SBill Paul DELAY(1000); 299995d67482SBill Paul 3000e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3001652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE) { 3002e53d81eeSPaul Saab if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 3003e53d81eeSPaul Saab DELAY(500000); /* wait for link training to complete */ 30045fea260fSMarius Strobl val = pci_read_config(dev, 0xC4, 4); 30055fea260fSMarius Strobl pci_write_config(dev, 0xC4, val | (1 << 15), 4); 3006e53d81eeSPaul Saab } 30070aaf1057SPyun YongHyeon devctl = pci_read_config(dev, 30080aaf1057SPyun YongHyeon sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2); 30090aaf1057SPyun YongHyeon /* Clear enable no snoop and disable relaxed ordering. */ 30100aaf1057SPyun YongHyeon devctl &= ~(0x0010 | 0x0800); 30110aaf1057SPyun YongHyeon /* Set PCIE max payload size to 128. */ 30120aaf1057SPyun YongHyeon devctl &= ~PCIM_EXP_CTL_MAX_PAYLOAD; 30130aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 30140aaf1057SPyun YongHyeon devctl, 2); 30150aaf1057SPyun YongHyeon /* Clear error status. */ 30160aaf1057SPyun YongHyeon pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_STA, 30170aaf1057SPyun YongHyeon 0, 2); 3018e53d81eeSPaul Saab } 3019e53d81eeSPaul Saab 30203f74909aSGleb Smirnoff /* Reset some of the PCI state that got zapped by reset. */ 302195d67482SBill Paul pci_write_config(dev, BGE_PCI_MISC_CTL, 302295d67482SBill Paul BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 3023e907febfSPyun YongHyeon BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 302495d67482SBill Paul pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 302595d67482SBill Paul pci_write_config(dev, BGE_PCI_CMD, command, 4); 30260c8aa4eaSJung-uk Kim write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 302795d67482SBill Paul 3028bf6ef57aSJohn Polstra /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ 30294c0da0ffSGleb Smirnoff if (BGE_IS_5714_FAMILY(sc)) { 3030bf6ef57aSJohn Polstra /* This chip disables MSI on reset. */ 3031bf6ef57aSJohn Polstra if (sc->bge_flags & BGE_FLAG_MSI) { 30320aaf1057SPyun YongHyeon val = pci_read_config(dev, 30330aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 2); 30340aaf1057SPyun YongHyeon pci_write_config(dev, 30350aaf1057SPyun YongHyeon sc->bge_msicap + PCIR_MSI_CTRL, 3036bf6ef57aSJohn Polstra val | PCIM_MSICTRL_MSI_ENABLE, 2); 3037bf6ef57aSJohn Polstra val = CSR_READ_4(sc, BGE_MSI_MODE); 3038bf6ef57aSJohn Polstra CSR_WRITE_4(sc, BGE_MSI_MODE, 3039bf6ef57aSJohn Polstra val | BGE_MSIMODE_ENABLE); 3040bf6ef57aSJohn Polstra } 30414c0da0ffSGleb Smirnoff val = CSR_READ_4(sc, BGE_MARB_MODE); 30424c0da0ffSGleb Smirnoff CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 30434c0da0ffSGleb Smirnoff } else 3044a7b0c314SPaul Saab CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3045a7b0c314SPaul Saab 304638cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { 304738cc658fSJohn Baldwin for (i = 0; i < BGE_TIMEOUT; i++) { 304838cc658fSJohn Baldwin val = CSR_READ_4(sc, BGE_VCPU_STATUS); 304938cc658fSJohn Baldwin if (val & BGE_VCPU_STATUS_INIT_DONE) 305038cc658fSJohn Baldwin break; 305138cc658fSJohn Baldwin DELAY(100); 305238cc658fSJohn Baldwin } 305338cc658fSJohn Baldwin if (i == BGE_TIMEOUT) { 305438cc658fSJohn Baldwin device_printf(sc->bge_dev, "reset timed out\n"); 305538cc658fSJohn Baldwin return (1); 305638cc658fSJohn Baldwin } 305738cc658fSJohn Baldwin } else { 305895d67482SBill Paul /* 30596f8718a3SScott Long * Poll until we see the 1's complement of the magic number. 306008013fd3SMarius Strobl * This indicates that the firmware initialization is complete. 30615fea260fSMarius Strobl * We expect this to fail if no chip containing the Ethernet 30625fea260fSMarius Strobl * address is fitted though. 306395d67482SBill Paul */ 306495d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 3065d5d23857SJung-uk Kim DELAY(10); 306695d67482SBill Paul val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 306795d67482SBill Paul if (val == ~BGE_MAGIC_NUMBER) 306895d67482SBill Paul break; 306995d67482SBill Paul } 307095d67482SBill Paul 30715fea260fSMarius Strobl if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) 30729ba784dbSScott Long device_printf(sc->bge_dev, "firmware handshake timed out, " 30739ba784dbSScott Long "found 0x%08x\n", val); 307438cc658fSJohn Baldwin } 307595d67482SBill Paul 307695d67482SBill Paul /* 307795d67482SBill Paul * XXX Wait for the value of the PCISTATE register to 307895d67482SBill Paul * return to its original pre-reset state. This is a 307995d67482SBill Paul * fairly good indicator of reset completion. If we don't 308095d67482SBill Paul * wait for the reset to fully complete, trying to read 308195d67482SBill Paul * from the device's non-PCI registers may yield garbage 308295d67482SBill Paul * results. 308395d67482SBill Paul */ 308495d67482SBill Paul for (i = 0; i < BGE_TIMEOUT; i++) { 308595d67482SBill Paul if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 308695d67482SBill Paul break; 308795d67482SBill Paul DELAY(10); 308895d67482SBill Paul } 308995d67482SBill Paul 30906f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) { 30910c8aa4eaSJung-uk Kim reset = bge_readmem_ind(sc, 0x7C00); 30920c8aa4eaSJung-uk Kim bge_writemem_ind(sc, 0x7C00, reset | (1 << 25)); 30936f8718a3SScott Long } 30946f8718a3SScott Long 30953f74909aSGleb Smirnoff /* Fix up byte swapping. */ 3096e907febfSPyun YongHyeon CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 309795d67482SBill Paul BGE_MODECTL_BYTESWAP_DATA); 309895d67482SBill Paul 30998cb1383cSDoug Ambrisko /* Tell the ASF firmware we are up */ 31008cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 31018cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 31028cb1383cSDoug Ambrisko 310395d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 310495d67482SBill Paul 3105da3003f0SBill Paul /* 3106da3003f0SBill Paul * The 5704 in TBI mode apparently needs some special 3107da3003f0SBill Paul * adjustment to insure the SERDES drive level is set 3108da3003f0SBill Paul * to 1.2V. 3109da3003f0SBill Paul */ 3110652ae483SGleb Smirnoff if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 3111652ae483SGleb Smirnoff sc->bge_flags & BGE_FLAG_TBI) { 31125fea260fSMarius Strobl val = CSR_READ_4(sc, BGE_SERDES_CFG); 31135fea260fSMarius Strobl val = (val & ~0xFFF) | 0x880; 31145fea260fSMarius Strobl CSR_WRITE_4(sc, BGE_SERDES_CFG, val); 3115da3003f0SBill Paul } 3116da3003f0SBill Paul 3117e53d81eeSPaul Saab /* XXX: Broadcom Linux driver. */ 3118652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_PCIE && 3119652ae483SGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 31205fea260fSMarius Strobl val = CSR_READ_4(sc, 0x7C00); 31215fea260fSMarius Strobl CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); 3122e53d81eeSPaul Saab } 312395d67482SBill Paul DELAY(10000); 31248cb1383cSDoug Ambrisko 31258cb1383cSDoug Ambrisko return(0); 312695d67482SBill Paul } 312795d67482SBill Paul 312895d67482SBill Paul /* 312995d67482SBill Paul * Frame reception handling. This is called if there's a frame 313095d67482SBill Paul * on the receive return list. 313195d67482SBill Paul * 313295d67482SBill Paul * Note: we have to be able to handle two possibilities here: 31331be6acb7SGleb Smirnoff * 1) the frame is from the jumbo receive ring 313495d67482SBill Paul * 2) the frame is from the standard receive ring 313595d67482SBill Paul */ 313695d67482SBill Paul 31371abcdbd1SAttilio Rao static int 31383f74909aSGleb Smirnoff bge_rxeof(struct bge_softc *sc) 313995d67482SBill Paul { 314095d67482SBill Paul struct ifnet *ifp; 31411abcdbd1SAttilio Rao int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; 31427f21e273SStanislav Sedov uint16_t rx_prod, rx_cons; 314395d67482SBill Paul 31440f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 31457f21e273SStanislav Sedov rx_cons = sc->bge_rx_saved_considx; 31467f21e273SStanislav Sedov rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; 31470f9bd73bSSam Leffler 31483f74909aSGleb Smirnoff /* Nothing to do. */ 31497f21e273SStanislav Sedov if (rx_cons == rx_prod) 31501abcdbd1SAttilio Rao return (rx_npkts); 3151cfcb5025SOleg Bulyzhin 3152fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 315395d67482SBill Paul 3154f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 3155e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 3156f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 315715eda801SStanislav Sedov sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); 3158c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 3159c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) 3160f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 316115eda801SStanislav Sedov sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); 3162f41ac2beSBill Paul 31637f21e273SStanislav Sedov while (rx_cons != rx_prod) { 316495d67482SBill Paul struct bge_rx_bd *cur_rx; 31653f74909aSGleb Smirnoff uint32_t rxidx; 316695d67482SBill Paul struct mbuf *m = NULL; 31673f74909aSGleb Smirnoff uint16_t vlan_tag = 0; 316895d67482SBill Paul int have_tag = 0; 316995d67482SBill Paul 317075719184SGleb Smirnoff #ifdef DEVICE_POLLING 317175719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 317275719184SGleb Smirnoff if (sc->rxcycles <= 0) 317375719184SGleb Smirnoff break; 317475719184SGleb Smirnoff sc->rxcycles--; 317575719184SGleb Smirnoff } 317675719184SGleb Smirnoff #endif 317775719184SGleb Smirnoff 31787f21e273SStanislav Sedov cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; 317995d67482SBill Paul 318095d67482SBill Paul rxidx = cur_rx->bge_idx; 31817f21e273SStanislav Sedov BGE_INC(rx_cons, sc->bge_return_ring_cnt); 318295d67482SBill Paul 3183cb2eacc7SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING && 3184cb2eacc7SYaroslav Tykhiy cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 318595d67482SBill Paul have_tag = 1; 318695d67482SBill Paul vlan_tag = cur_rx->bge_vlan_tag; 318795d67482SBill Paul } 318895d67482SBill Paul 318995d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 319095d67482SBill Paul jumbocnt++; 3191943787f3SPyun YongHyeon m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 319295d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3193943787f3SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 319495d67482SBill Paul continue; 319595d67482SBill Paul } 3196943787f3SPyun YongHyeon if (bge_newbuf_jumbo(sc, rxidx) != 0) { 3197943787f3SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 3198943787f3SPyun YongHyeon ifp->if_iqdrops++; 319995d67482SBill Paul continue; 320095d67482SBill Paul } 320103e78bd0SPyun YongHyeon BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 320295d67482SBill Paul } else { 320395d67482SBill Paul stdcnt++; 320495d67482SBill Paul if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 3205943787f3SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 320695d67482SBill Paul continue; 320795d67482SBill Paul } 3208943787f3SPyun YongHyeon m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 3209943787f3SPyun YongHyeon if (bge_newbuf_std(sc, rxidx) != 0) { 3210943787f3SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 3211943787f3SPyun YongHyeon ifp->if_iqdrops++; 321295d67482SBill Paul continue; 321395d67482SBill Paul } 321403e78bd0SPyun YongHyeon BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 321595d67482SBill Paul } 321695d67482SBill Paul 321795d67482SBill Paul ifp->if_ipackets++; 3218e65bed95SPyun YongHyeon #ifndef __NO_STRICT_ALIGNMENT 3219e255b776SJohn Polstra /* 3220e65bed95SPyun YongHyeon * For architectures with strict alignment we must make sure 3221e65bed95SPyun YongHyeon * the payload is aligned. 3222e255b776SJohn Polstra */ 3223652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 3224e255b776SJohn Polstra bcopy(m->m_data, m->m_data + ETHER_ALIGN, 3225e255b776SJohn Polstra cur_rx->bge_len); 3226e255b776SJohn Polstra m->m_data += ETHER_ALIGN; 3227e255b776SJohn Polstra } 3228e255b776SJohn Polstra #endif 3229473851baSPaul Saab m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 323095d67482SBill Paul m->m_pkthdr.rcvif = ifp; 323195d67482SBill Paul 3232b874fdd4SYaroslav Tykhiy if (ifp->if_capenable & IFCAP_RXCSUM) { 323378178cd1SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 323495d67482SBill Paul m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 32350c8aa4eaSJung-uk Kim if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 32360c8aa4eaSJung-uk Kim m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 323778178cd1SGleb Smirnoff } 3238d375e524SGleb Smirnoff if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3239d375e524SGleb Smirnoff m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 324095d67482SBill Paul m->m_pkthdr.csum_data = 324195d67482SBill Paul cur_rx->bge_tcp_udp_csum; 3242ee7ef91cSOleg Bulyzhin m->m_pkthdr.csum_flags |= 3243ee7ef91cSOleg Bulyzhin CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 324495d67482SBill Paul } 324595d67482SBill Paul } 324695d67482SBill Paul 324795d67482SBill Paul /* 3248673d9191SSam Leffler * If we received a packet with a vlan tag, 3249673d9191SSam Leffler * attach that information to the packet. 325095d67482SBill Paul */ 3251d147662cSGleb Smirnoff if (have_tag) { 32524e35d186SJung-uk Kim #if __FreeBSD_version > 700022 325378ba57b9SAndre Oppermann m->m_pkthdr.ether_vtag = vlan_tag; 325478ba57b9SAndre Oppermann m->m_flags |= M_VLANTAG; 32554e35d186SJung-uk Kim #else 32564e35d186SJung-uk Kim VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); 32574e35d186SJung-uk Kim if (m == NULL) 32584e35d186SJung-uk Kim continue; 32594e35d186SJung-uk Kim #endif 3260d147662cSGleb Smirnoff } 326195d67482SBill Paul 32620f9bd73bSSam Leffler BGE_UNLOCK(sc); 3263673d9191SSam Leffler (*ifp->if_input)(ifp, m); 32640f9bd73bSSam Leffler BGE_LOCK(sc); 3265d4da719cSAttilio Rao rx_npkts++; 326625e13e68SXin LI 326725e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 32688cf7d13dSAttilio Rao return (rx_npkts); 326995d67482SBill Paul } 327095d67482SBill Paul 327115eda801SStanislav Sedov bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 327215eda801SStanislav Sedov sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); 3273e65bed95SPyun YongHyeon if (stdcnt > 0) 3274f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 3275e65bed95SPyun YongHyeon sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 32764c0da0ffSGleb Smirnoff 3277c215fd77SPyun YongHyeon if (jumbocnt > 0) 3278f41ac2beSBill Paul bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 32794c0da0ffSGleb Smirnoff sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 3280f41ac2beSBill Paul 32817f21e273SStanislav Sedov sc->bge_rx_saved_considx = rx_cons; 328238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 328395d67482SBill Paul if (stdcnt) 328438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 328595d67482SBill Paul if (jumbocnt) 328638cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3287f5a034f9SPyun YongHyeon #ifdef notyet 3288f5a034f9SPyun YongHyeon /* 3289f5a034f9SPyun YongHyeon * This register wraps very quickly under heavy packet drops. 3290f5a034f9SPyun YongHyeon * If you need correct statistics, you can enable this check. 3291f5a034f9SPyun YongHyeon */ 3292f5a034f9SPyun YongHyeon if (BGE_IS_5705_PLUS(sc)) 3293f5a034f9SPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3294f5a034f9SPyun YongHyeon #endif 32951abcdbd1SAttilio Rao return (rx_npkts); 329695d67482SBill Paul } 329795d67482SBill Paul 329895d67482SBill Paul static void 32993f74909aSGleb Smirnoff bge_txeof(struct bge_softc *sc) 330095d67482SBill Paul { 330195d67482SBill Paul struct bge_tx_bd *cur_tx = NULL; 330295d67482SBill Paul struct ifnet *ifp; 330395d67482SBill Paul 33040f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 33050f9bd73bSSam Leffler 33063f74909aSGleb Smirnoff /* Nothing to do. */ 3307cfcb5025SOleg Bulyzhin if (sc->bge_tx_saved_considx == 3308cfcb5025SOleg Bulyzhin sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 3309cfcb5025SOleg Bulyzhin return; 3310cfcb5025SOleg Bulyzhin 3311fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 331295d67482SBill Paul 3313e65bed95SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 33145c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); 331595d67482SBill Paul /* 331695d67482SBill Paul * Go through our tx ring and free mbufs for those 331795d67482SBill Paul * frames that have been sent. 331895d67482SBill Paul */ 331995d67482SBill Paul while (sc->bge_tx_saved_considx != 3320f41ac2beSBill Paul sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 33213f74909aSGleb Smirnoff uint32_t idx = 0; 332295d67482SBill Paul 332395d67482SBill Paul idx = sc->bge_tx_saved_considx; 3324f41ac2beSBill Paul cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 332595d67482SBill Paul if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 332695d67482SBill Paul ifp->if_opackets++; 332795d67482SBill Paul if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 33280ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, 3329e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_dmamap[idx], 3330e65bed95SPyun YongHyeon BUS_DMASYNC_POSTWRITE); 33310ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, 3332f41ac2beSBill Paul sc->bge_cdata.bge_tx_dmamap[idx]); 3333e65bed95SPyun YongHyeon m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3334e65bed95SPyun YongHyeon sc->bge_cdata.bge_tx_chain[idx] = NULL; 333595d67482SBill Paul } 333695d67482SBill Paul sc->bge_txcnt--; 333795d67482SBill Paul BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 333895d67482SBill Paul } 333995d67482SBill Paul 334095d67482SBill Paul if (cur_tx != NULL) 334113f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 33425b01e77cSBruce Evans if (sc->bge_txcnt == 0) 33435b01e77cSBruce Evans sc->bge_timer = 0; 334495d67482SBill Paul } 334595d67482SBill Paul 334675719184SGleb Smirnoff #ifdef DEVICE_POLLING 33471abcdbd1SAttilio Rao static int 334875719184SGleb Smirnoff bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 334975719184SGleb Smirnoff { 335075719184SGleb Smirnoff struct bge_softc *sc = ifp->if_softc; 3351366454f2SOleg Bulyzhin uint32_t statusword; 33521abcdbd1SAttilio Rao int rx_npkts = 0; 335375719184SGleb Smirnoff 33543f74909aSGleb Smirnoff BGE_LOCK(sc); 33553f74909aSGleb Smirnoff if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 33563f74909aSGleb Smirnoff BGE_UNLOCK(sc); 33571abcdbd1SAttilio Rao return (rx_npkts); 33583f74909aSGleb Smirnoff } 335975719184SGleb Smirnoff 3360dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3361e65bed95SPyun YongHyeon sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3362dab5cd05SOleg Bulyzhin 33633f74909aSGleb Smirnoff statusword = atomic_readandclear_32( 33643f74909aSGleb Smirnoff &sc->bge_ldata.bge_status_block->bge_status); 3365dab5cd05SOleg Bulyzhin 3366dab5cd05SOleg Bulyzhin bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3367e65bed95SPyun YongHyeon sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 3368366454f2SOleg Bulyzhin 33690c8aa4eaSJung-uk Kim /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 3370366454f2SOleg Bulyzhin if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 3371366454f2SOleg Bulyzhin sc->bge_link_evt++; 3372366454f2SOleg Bulyzhin 3373366454f2SOleg Bulyzhin if (cmd == POLL_AND_CHECK_STATUS) 3374366454f2SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 33754c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3376652ae483SGleb Smirnoff sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 3377366454f2SOleg Bulyzhin bge_link_upd(sc); 3378366454f2SOleg Bulyzhin 3379366454f2SOleg Bulyzhin sc->rxcycles = count; 33801abcdbd1SAttilio Rao rx_npkts = bge_rxeof(sc); 338125e13e68SXin LI if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 338225e13e68SXin LI BGE_UNLOCK(sc); 33838cf7d13dSAttilio Rao return (rx_npkts); 338425e13e68SXin LI } 3385366454f2SOleg Bulyzhin bge_txeof(sc); 3386366454f2SOleg Bulyzhin if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3387366454f2SOleg Bulyzhin bge_start_locked(ifp); 33883f74909aSGleb Smirnoff 33893f74909aSGleb Smirnoff BGE_UNLOCK(sc); 33901abcdbd1SAttilio Rao return (rx_npkts); 339175719184SGleb Smirnoff } 339275719184SGleb Smirnoff #endif /* DEVICE_POLLING */ 339375719184SGleb Smirnoff 339495d67482SBill Paul static void 33953f74909aSGleb Smirnoff bge_intr(void *xsc) 339695d67482SBill Paul { 339795d67482SBill Paul struct bge_softc *sc; 339895d67482SBill Paul struct ifnet *ifp; 3399dab5cd05SOleg Bulyzhin uint32_t statusword; 340095d67482SBill Paul 340195d67482SBill Paul sc = xsc; 3402f41ac2beSBill Paul 34030f9bd73bSSam Leffler BGE_LOCK(sc); 34040f9bd73bSSam Leffler 3405dab5cd05SOleg Bulyzhin ifp = sc->bge_ifp; 3406dab5cd05SOleg Bulyzhin 340775719184SGleb Smirnoff #ifdef DEVICE_POLLING 340875719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 340975719184SGleb Smirnoff BGE_UNLOCK(sc); 341075719184SGleb Smirnoff return; 341175719184SGleb Smirnoff } 341275719184SGleb Smirnoff #endif 341375719184SGleb Smirnoff 3414f30cbfc6SScott Long /* 3415b848e032SBruce Evans * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3416b848e032SBruce Evans * disable interrupts by writing nonzero like we used to, since with 3417b848e032SBruce Evans * our current organization this just gives complications and 3418b848e032SBruce Evans * pessimizations for re-enabling interrupts. We used to have races 3419b848e032SBruce Evans * instead of the necessary complications. Disabling interrupts 3420b848e032SBruce Evans * would just reduce the chance of a status update while we are 3421b848e032SBruce Evans * running (by switching to the interrupt-mode coalescence 3422b848e032SBruce Evans * parameters), but this chance is already very low so it is more 3423b848e032SBruce Evans * efficient to get another interrupt than prevent it. 3424b848e032SBruce Evans * 3425b848e032SBruce Evans * We do the ack first to ensure another interrupt if there is a 3426b848e032SBruce Evans * status update after the ack. We don't check for the status 3427b848e032SBruce Evans * changing later because it is more efficient to get another 3428b848e032SBruce Evans * interrupt than prevent it, not quite as above (not checking is 3429b848e032SBruce Evans * a smaller optimization than not toggling the interrupt enable, 3430b848e032SBruce Evans * since checking doesn't involve PCI accesses and toggling require 3431b848e032SBruce Evans * the status check). So toggling would probably be a pessimization 3432b848e032SBruce Evans * even with MSI. It would only be needed for using a task queue. 3433b848e032SBruce Evans */ 343438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 3435b848e032SBruce Evans 3436b848e032SBruce Evans /* 3437f30cbfc6SScott Long * Do the mandatory PCI flush as well as get the link status. 3438f30cbfc6SScott Long */ 3439f30cbfc6SScott Long statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 3440f41ac2beSBill Paul 3441f30cbfc6SScott Long /* Make sure the descriptor ring indexes are coherent. */ 3442f30cbfc6SScott Long bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3443f30cbfc6SScott Long sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3444f30cbfc6SScott Long 34451f313773SOleg Bulyzhin if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 34464c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3447f30cbfc6SScott Long statusword || sc->bge_link_evt) 3448dab5cd05SOleg Bulyzhin bge_link_upd(sc); 344995d67482SBill Paul 345013f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 34513f74909aSGleb Smirnoff /* Check RX return ring producer/consumer. */ 345295d67482SBill Paul bge_rxeof(sc); 345325e13e68SXin LI } 345495d67482SBill Paul 345525e13e68SXin LI if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 34563f74909aSGleb Smirnoff /* Check TX ring producer/consumer. */ 345795d67482SBill Paul bge_txeof(sc); 345895d67482SBill Paul } 345995d67482SBill Paul 346013f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING && 346113f4c340SRobert Watson !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 34620f9bd73bSSam Leffler bge_start_locked(ifp); 34630f9bd73bSSam Leffler 346415eda801SStanislav Sedov bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 346515eda801SStanislav Sedov sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 346615eda801SStanislav Sedov 34670f9bd73bSSam Leffler BGE_UNLOCK(sc); 346895d67482SBill Paul } 346995d67482SBill Paul 347095d67482SBill Paul static void 34718cb1383cSDoug Ambrisko bge_asf_driver_up(struct bge_softc *sc) 34728cb1383cSDoug Ambrisko { 34738cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) { 34748cb1383cSDoug Ambrisko /* Send ASF heartbeat aprox. every 2s */ 34758cb1383cSDoug Ambrisko if (sc->bge_asf_count) 34768cb1383cSDoug Ambrisko sc->bge_asf_count --; 34778cb1383cSDoug Ambrisko else { 34788cb1383cSDoug Ambrisko sc->bge_asf_count = 5; 34798cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 34808cb1383cSDoug Ambrisko BGE_FW_DRV_ALIVE); 34818cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 34828cb1383cSDoug Ambrisko bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 34838cb1383cSDoug Ambrisko CSR_WRITE_4(sc, BGE_CPU_EVENT, 348439153c5aSJung-uk Kim CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 34858cb1383cSDoug Ambrisko } 34868cb1383cSDoug Ambrisko } 34878cb1383cSDoug Ambrisko } 34888cb1383cSDoug Ambrisko 34898cb1383cSDoug Ambrisko static void 3490b74e67fbSGleb Smirnoff bge_tick(void *xsc) 34910f9bd73bSSam Leffler { 3492b74e67fbSGleb Smirnoff struct bge_softc *sc = xsc; 349395d67482SBill Paul struct mii_data *mii = NULL; 349495d67482SBill Paul 34950f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 349695d67482SBill Paul 34975dda8085SOleg Bulyzhin /* Synchronize with possible callout reset/stop. */ 34985dda8085SOleg Bulyzhin if (callout_pending(&sc->bge_stat_ch) || 34995dda8085SOleg Bulyzhin !callout_active(&sc->bge_stat_ch)) 35005dda8085SOleg Bulyzhin return; 35015dda8085SOleg Bulyzhin 35027ee00338SJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 35030434d1b8SBill Paul bge_stats_update_regs(sc); 35040434d1b8SBill Paul else 350595d67482SBill Paul bge_stats_update(sc); 350695d67482SBill Paul 3507652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 350895d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 350982b67c01SOleg Bulyzhin /* 351082b67c01SOleg Bulyzhin * Do not touch PHY if we have link up. This could break 351182b67c01SOleg Bulyzhin * IPMI/ASF mode or produce extra input errors 351282b67c01SOleg Bulyzhin * (extra errors was reported for bcm5701 & bcm5704). 351382b67c01SOleg Bulyzhin */ 351482b67c01SOleg Bulyzhin if (!sc->bge_link) 351595d67482SBill Paul mii_tick(mii); 35167b97099dSOleg Bulyzhin } else { 35177b97099dSOleg Bulyzhin /* 35187b97099dSOleg Bulyzhin * Since in TBI mode auto-polling can't be used we should poll 35197b97099dSOleg Bulyzhin * link status manually. Here we register pending link event 35207b97099dSOleg Bulyzhin * and trigger interrupt. 35217b97099dSOleg Bulyzhin */ 35227b97099dSOleg Bulyzhin #ifdef DEVICE_POLLING 35233f74909aSGleb Smirnoff /* In polling mode we poll link state in bge_poll(). */ 35247b97099dSOleg Bulyzhin if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 35257b97099dSOleg Bulyzhin #endif 35267b97099dSOleg Bulyzhin { 35277b97099dSOleg Bulyzhin sc->bge_link_evt++; 35284f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 35294f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 35307b97099dSOleg Bulyzhin BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 35314f0794ffSBjoern A. Zeeb else 35324f0794ffSBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 35337b97099dSOleg Bulyzhin } 3534dab5cd05SOleg Bulyzhin } 353595d67482SBill Paul 35368cb1383cSDoug Ambrisko bge_asf_driver_up(sc); 3537b74e67fbSGleb Smirnoff bge_watchdog(sc); 35388cb1383cSDoug Ambrisko 3539dab5cd05SOleg Bulyzhin callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 354095d67482SBill Paul } 354195d67482SBill Paul 354295d67482SBill Paul static void 35433f74909aSGleb Smirnoff bge_stats_update_regs(struct bge_softc *sc) 35440434d1b8SBill Paul { 35453f74909aSGleb Smirnoff struct ifnet *ifp; 35460434d1b8SBill Paul 3547fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 35480434d1b8SBill Paul 35496b037352SJung-uk Kim ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 35507e6e2507SJung-uk Kim offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 35517e6e2507SJung-uk Kim 3552e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); 35536b037352SJung-uk Kim ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3554e238d4eaSPyun YongHyeon ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); 35550434d1b8SBill Paul } 35560434d1b8SBill Paul 35570434d1b8SBill Paul static void 35583f74909aSGleb Smirnoff bge_stats_update(struct bge_softc *sc) 355995d67482SBill Paul { 356095d67482SBill Paul struct ifnet *ifp; 3561e907febfSPyun YongHyeon bus_size_t stats; 35627e6e2507SJung-uk Kim uint32_t cnt; /* current register value */ 356395d67482SBill Paul 3564fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 356595d67482SBill Paul 3566e907febfSPyun YongHyeon stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3567e907febfSPyun YongHyeon 3568e907febfSPyun YongHyeon #define READ_STAT(sc, stats, stat) \ 3569e907febfSPyun YongHyeon CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 357095d67482SBill Paul 35718634dfffSJung-uk Kim cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 35726b037352SJung-uk Kim ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); 35736fb34dd2SOleg Bulyzhin sc->bge_tx_collisions = cnt; 35746fb34dd2SOleg Bulyzhin 35756fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 35766b037352SJung-uk Kim ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); 35776fb34dd2SOleg Bulyzhin sc->bge_rx_discards = cnt; 35786fb34dd2SOleg Bulyzhin 35796fb34dd2SOleg Bulyzhin cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 35806b037352SJung-uk Kim ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); 35816fb34dd2SOleg Bulyzhin sc->bge_tx_discards = cnt; 358295d67482SBill Paul 3583e907febfSPyun YongHyeon #undef READ_STAT 358495d67482SBill Paul } 358595d67482SBill Paul 358695d67482SBill Paul /* 3587d375e524SGleb Smirnoff * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3588d375e524SGleb Smirnoff * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3589d375e524SGleb Smirnoff * but when such padded frames employ the bge IP/TCP checksum offload, 3590d375e524SGleb Smirnoff * the hardware checksum assist gives incorrect results (possibly 3591d375e524SGleb Smirnoff * from incorporating its own padding into the UDP/TCP checksum; who knows). 3592d375e524SGleb Smirnoff * If we pad such runts with zeros, the onboard checksum comes out correct. 3593d375e524SGleb Smirnoff */ 3594d375e524SGleb Smirnoff static __inline int 3595d375e524SGleb Smirnoff bge_cksum_pad(struct mbuf *m) 3596d375e524SGleb Smirnoff { 3597d375e524SGleb Smirnoff int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3598d375e524SGleb Smirnoff struct mbuf *last; 3599d375e524SGleb Smirnoff 3600d375e524SGleb Smirnoff /* If there's only the packet-header and we can pad there, use it. */ 3601d375e524SGleb Smirnoff if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3602d375e524SGleb Smirnoff M_TRAILINGSPACE(m) >= padlen) { 3603d375e524SGleb Smirnoff last = m; 3604d375e524SGleb Smirnoff } else { 3605d375e524SGleb Smirnoff /* 3606d375e524SGleb Smirnoff * Walk packet chain to find last mbuf. We will either 3607d375e524SGleb Smirnoff * pad there, or append a new mbuf and pad it. 3608d375e524SGleb Smirnoff */ 3609d375e524SGleb Smirnoff for (last = m; last->m_next != NULL; last = last->m_next); 3610d375e524SGleb Smirnoff if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3611d375e524SGleb Smirnoff /* Allocate new empty mbuf, pad it. Compact later. */ 3612d375e524SGleb Smirnoff struct mbuf *n; 3613d375e524SGleb Smirnoff 3614d375e524SGleb Smirnoff MGET(n, M_DONTWAIT, MT_DATA); 3615d375e524SGleb Smirnoff if (n == NULL) 3616d375e524SGleb Smirnoff return (ENOBUFS); 3617d375e524SGleb Smirnoff n->m_len = 0; 3618d375e524SGleb Smirnoff last->m_next = n; 3619d375e524SGleb Smirnoff last = n; 3620d375e524SGleb Smirnoff } 3621d375e524SGleb Smirnoff } 3622d375e524SGleb Smirnoff 3623d375e524SGleb Smirnoff /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3624d375e524SGleb Smirnoff memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3625d375e524SGleb Smirnoff last->m_len += padlen; 3626d375e524SGleb Smirnoff m->m_pkthdr.len += padlen; 3627d375e524SGleb Smirnoff 3628d375e524SGleb Smirnoff return (0); 3629d375e524SGleb Smirnoff } 3630d375e524SGleb Smirnoff 3631d375e524SGleb Smirnoff /* 363295d67482SBill Paul * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 363395d67482SBill Paul * pointers to descriptors. 363495d67482SBill Paul */ 363595d67482SBill Paul static int 3636676ad2c9SGleb Smirnoff bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 363795d67482SBill Paul { 36387e27542aSGleb Smirnoff bus_dma_segment_t segs[BGE_NSEG_NEW]; 3639f41ac2beSBill Paul bus_dmamap_t map; 3640676ad2c9SGleb Smirnoff struct bge_tx_bd *d; 3641676ad2c9SGleb Smirnoff struct mbuf *m = *m_head; 36427e27542aSGleb Smirnoff uint32_t idx = *txidx; 3643676ad2c9SGleb Smirnoff uint16_t csum_flags; 36447e27542aSGleb Smirnoff int nsegs, i, error; 364595d67482SBill Paul 36466909dc43SGleb Smirnoff csum_flags = 0; 36476909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags) { 36486909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & CSUM_IP) 36496909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_CSUM; 36506909dc43SGleb Smirnoff if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 36516909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 36526909dc43SGleb Smirnoff if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 36536909dc43SGleb Smirnoff (error = bge_cksum_pad(m)) != 0) { 36546909dc43SGleb Smirnoff m_freem(m); 36556909dc43SGleb Smirnoff *m_head = NULL; 36566909dc43SGleb Smirnoff return (error); 36576909dc43SGleb Smirnoff } 36586909dc43SGleb Smirnoff } 36596909dc43SGleb Smirnoff if (m->m_flags & M_LASTFRAG) 36606909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 36616909dc43SGleb Smirnoff else if (m->m_flags & M_FRAG) 36626909dc43SGleb Smirnoff csum_flags |= BGE_TXBDFLAG_IP_FRAG; 36636909dc43SGleb Smirnoff } 36646909dc43SGleb Smirnoff 36657e27542aSGleb Smirnoff map = sc->bge_cdata.bge_tx_dmamap[idx]; 36660ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, 3667676ad2c9SGleb Smirnoff &nsegs, BUS_DMA_NOWAIT); 36687e27542aSGleb Smirnoff if (error == EFBIG) { 36694eee14cbSMarius Strobl m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW); 3670676ad2c9SGleb Smirnoff if (m == NULL) { 3671676ad2c9SGleb Smirnoff m_freem(*m_head); 3672676ad2c9SGleb Smirnoff *m_head = NULL; 36737e27542aSGleb Smirnoff return (ENOBUFS); 36747e27542aSGleb Smirnoff } 3675676ad2c9SGleb Smirnoff *m_head = m; 36760ac56796SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, 36770ac56796SPyun YongHyeon m, segs, &nsegs, BUS_DMA_NOWAIT); 3678676ad2c9SGleb Smirnoff if (error) { 3679676ad2c9SGleb Smirnoff m_freem(m); 3680676ad2c9SGleb Smirnoff *m_head = NULL; 36817e27542aSGleb Smirnoff return (error); 36827e27542aSGleb Smirnoff } 3683676ad2c9SGleb Smirnoff } else if (error != 0) 3684676ad2c9SGleb Smirnoff return (error); 36857e27542aSGleb Smirnoff 368695d67482SBill Paul /* 368795d67482SBill Paul * Sanity check: avoid coming within 16 descriptors 368895d67482SBill Paul * of the end of the ring. 368995d67482SBill Paul */ 36907e27542aSGleb Smirnoff if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 36910ac56796SPyun YongHyeon bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); 369295d67482SBill Paul return (ENOBUFS); 36937e27542aSGleb Smirnoff } 36947e27542aSGleb Smirnoff 36950ac56796SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); 3696e65bed95SPyun YongHyeon 36977e27542aSGleb Smirnoff for (i = 0; ; i++) { 36987e27542aSGleb Smirnoff d = &sc->bge_ldata.bge_tx_ring[idx]; 36997e27542aSGleb Smirnoff d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 37007e27542aSGleb Smirnoff d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 37017e27542aSGleb Smirnoff d->bge_len = segs[i].ds_len; 37027e27542aSGleb Smirnoff d->bge_flags = csum_flags; 37037e27542aSGleb Smirnoff if (i == nsegs - 1) 37047e27542aSGleb Smirnoff break; 37057e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 37067e27542aSGleb Smirnoff } 37077e27542aSGleb Smirnoff 37087e27542aSGleb Smirnoff /* Mark the last segment as end of packet... */ 37097e27542aSGleb Smirnoff d->bge_flags |= BGE_TXBDFLAG_END; 3710676ad2c9SGleb Smirnoff 37117e27542aSGleb Smirnoff /* ... and put VLAN tag into first segment. */ 37127e27542aSGleb Smirnoff d = &sc->bge_ldata.bge_tx_ring[*txidx]; 37134e35d186SJung-uk Kim #if __FreeBSD_version > 700022 371478ba57b9SAndre Oppermann if (m->m_flags & M_VLANTAG) { 37157e27542aSGleb Smirnoff d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 371678ba57b9SAndre Oppermann d->bge_vlan_tag = m->m_pkthdr.ether_vtag; 37177e27542aSGleb Smirnoff } else 37187e27542aSGleb Smirnoff d->bge_vlan_tag = 0; 37194e35d186SJung-uk Kim #else 37204e35d186SJung-uk Kim { 37214e35d186SJung-uk Kim struct m_tag *mtag; 37224e35d186SJung-uk Kim 37234e35d186SJung-uk Kim if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { 37244e35d186SJung-uk Kim d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 37254e35d186SJung-uk Kim d->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 37264e35d186SJung-uk Kim } else 37274e35d186SJung-uk Kim d->bge_vlan_tag = 0; 37284e35d186SJung-uk Kim } 37294e35d186SJung-uk Kim #endif 3730f41ac2beSBill Paul 3731f41ac2beSBill Paul /* 3732f41ac2beSBill Paul * Insure that the map for this transmission 3733f41ac2beSBill Paul * is placed at the array index of the last descriptor 3734f41ac2beSBill Paul * in this chain. 3735f41ac2beSBill Paul */ 37367e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 37377e27542aSGleb Smirnoff sc->bge_cdata.bge_tx_dmamap[idx] = map; 3738676ad2c9SGleb Smirnoff sc->bge_cdata.bge_tx_chain[idx] = m; 37397e27542aSGleb Smirnoff sc->bge_txcnt += nsegs; 374095d67482SBill Paul 37417e27542aSGleb Smirnoff BGE_INC(idx, BGE_TX_RING_CNT); 37427e27542aSGleb Smirnoff *txidx = idx; 374395d67482SBill Paul 374495d67482SBill Paul return (0); 374595d67482SBill Paul } 374695d67482SBill Paul 374795d67482SBill Paul /* 374895d67482SBill Paul * Main transmit routine. To avoid having to do mbuf copies, we put pointers 374995d67482SBill Paul * to the mbuf data regions directly in the transmit descriptors. 375095d67482SBill Paul */ 375195d67482SBill Paul static void 37523f74909aSGleb Smirnoff bge_start_locked(struct ifnet *ifp) 375395d67482SBill Paul { 375495d67482SBill Paul struct bge_softc *sc; 375595d67482SBill Paul struct mbuf *m_head = NULL; 375614bbd30fSGleb Smirnoff uint32_t prodidx; 3757303a718cSDag-Erling Smørgrav int count = 0; 375895d67482SBill Paul 375995d67482SBill Paul sc = ifp->if_softc; 376095d67482SBill Paul 3761dab5cd05SOleg Bulyzhin if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 376295d67482SBill Paul return; 376395d67482SBill Paul 376414bbd30fSGleb Smirnoff prodidx = sc->bge_tx_prodidx; 376595d67482SBill Paul 376695d67482SBill Paul while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 37674d665c4dSDag-Erling Smørgrav IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 376895d67482SBill Paul if (m_head == NULL) 376995d67482SBill Paul break; 377095d67482SBill Paul 377195d67482SBill Paul /* 377295d67482SBill Paul * XXX 3773b874fdd4SYaroslav Tykhiy * The code inside the if() block is never reached since we 3774b874fdd4SYaroslav Tykhiy * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3775b874fdd4SYaroslav Tykhiy * requests to checksum TCP/UDP in a fragmented packet. 3776b874fdd4SYaroslav Tykhiy * 3777b874fdd4SYaroslav Tykhiy * XXX 377895d67482SBill Paul * safety overkill. If this is a fragmented packet chain 377995d67482SBill Paul * with delayed TCP/UDP checksums, then only encapsulate 378095d67482SBill Paul * it if we have enough descriptors to handle the entire 378195d67482SBill Paul * chain at once. 378295d67482SBill Paul * (paranoia -- may not actually be needed) 378395d67482SBill Paul */ 378495d67482SBill Paul if (m_head->m_flags & M_FIRSTFRAG && 378595d67482SBill Paul m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 378695d67482SBill Paul if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 378795d67482SBill Paul m_head->m_pkthdr.csum_data + 16) { 37884d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 378913f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 379095d67482SBill Paul break; 379195d67482SBill Paul } 379295d67482SBill Paul } 379395d67482SBill Paul 379495d67482SBill Paul /* 379595d67482SBill Paul * Pack the data into the transmit ring. If we 379695d67482SBill Paul * don't have room, set the OACTIVE flag and wait 379795d67482SBill Paul * for the NIC to drain the ring. 379895d67482SBill Paul */ 3799676ad2c9SGleb Smirnoff if (bge_encap(sc, &m_head, &prodidx)) { 3800676ad2c9SGleb Smirnoff if (m_head == NULL) 3801676ad2c9SGleb Smirnoff break; 38024d665c4dSDag-Erling Smørgrav IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 380313f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_OACTIVE; 380495d67482SBill Paul break; 380595d67482SBill Paul } 3806303a718cSDag-Erling Smørgrav ++count; 380795d67482SBill Paul 380895d67482SBill Paul /* 380995d67482SBill Paul * If there's a BPF listener, bounce a copy of this frame 381095d67482SBill Paul * to him. 381195d67482SBill Paul */ 38124e35d186SJung-uk Kim #ifdef ETHER_BPF_MTAP 381345ee6ab3SJung-uk Kim ETHER_BPF_MTAP(ifp, m_head); 38144e35d186SJung-uk Kim #else 38154e35d186SJung-uk Kim BPF_MTAP(ifp, m_head); 38164e35d186SJung-uk Kim #endif 381795d67482SBill Paul } 381895d67482SBill Paul 38193f74909aSGleb Smirnoff if (count == 0) 38203f74909aSGleb Smirnoff /* No packets were dequeued. */ 3821303a718cSDag-Erling Smørgrav return; 3822303a718cSDag-Erling Smørgrav 3823aa94f333SPyun YongHyeon bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 38245c1da2faSPyun YongHyeon sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); 38253f74909aSGleb Smirnoff /* Transmit. */ 382638cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 38273927098fSPaul Saab /* 5700 b2 errata */ 3828e0ced696SPaul Saab if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 382938cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 383095d67482SBill Paul 383114bbd30fSGleb Smirnoff sc->bge_tx_prodidx = prodidx; 383214bbd30fSGleb Smirnoff 383395d67482SBill Paul /* 383495d67482SBill Paul * Set a timeout in case the chip goes out to lunch. 383595d67482SBill Paul */ 3836b74e67fbSGleb Smirnoff sc->bge_timer = 5; 383795d67482SBill Paul } 383895d67482SBill Paul 38390f9bd73bSSam Leffler /* 38400f9bd73bSSam Leffler * Main transmit routine. To avoid having to do mbuf copies, we put pointers 38410f9bd73bSSam Leffler * to the mbuf data regions directly in the transmit descriptors. 38420f9bd73bSSam Leffler */ 384395d67482SBill Paul static void 38443f74909aSGleb Smirnoff bge_start(struct ifnet *ifp) 384595d67482SBill Paul { 38460f9bd73bSSam Leffler struct bge_softc *sc; 38470f9bd73bSSam Leffler 38480f9bd73bSSam Leffler sc = ifp->if_softc; 38490f9bd73bSSam Leffler BGE_LOCK(sc); 38500f9bd73bSSam Leffler bge_start_locked(ifp); 38510f9bd73bSSam Leffler BGE_UNLOCK(sc); 38520f9bd73bSSam Leffler } 38530f9bd73bSSam Leffler 38540f9bd73bSSam Leffler static void 38553f74909aSGleb Smirnoff bge_init_locked(struct bge_softc *sc) 38560f9bd73bSSam Leffler { 385795d67482SBill Paul struct ifnet *ifp; 38583f74909aSGleb Smirnoff uint16_t *m; 385995d67482SBill Paul 38600f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 386195d67482SBill Paul 3862fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 386395d67482SBill Paul 386413f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) 386595d67482SBill Paul return; 386695d67482SBill Paul 386795d67482SBill Paul /* Cancel pending I/O and flush buffers. */ 386895d67482SBill Paul bge_stop(sc); 38698cb1383cSDoug Ambrisko 38708cb1383cSDoug Ambrisko bge_stop_fw(sc); 38718cb1383cSDoug Ambrisko bge_sig_pre_reset(sc, BGE_RESET_START); 387295d67482SBill Paul bge_reset(sc); 38738cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_START); 38748cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_START); 38758cb1383cSDoug Ambrisko 387695d67482SBill Paul bge_chipinit(sc); 387795d67482SBill Paul 387895d67482SBill Paul /* 387995d67482SBill Paul * Init the various state machines, ring 388095d67482SBill Paul * control blocks and firmware. 388195d67482SBill Paul */ 388295d67482SBill Paul if (bge_blockinit(sc)) { 3883fe806fdaSPyun YongHyeon device_printf(sc->bge_dev, "initialization failure\n"); 388495d67482SBill Paul return; 388595d67482SBill Paul } 388695d67482SBill Paul 3887fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 388895d67482SBill Paul 388995d67482SBill Paul /* Specify MTU. */ 389095d67482SBill Paul CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3891cb2eacc7SYaroslav Tykhiy ETHER_HDR_LEN + ETHER_CRC_LEN + 3892cb2eacc7SYaroslav Tykhiy (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); 389395d67482SBill Paul 389495d67482SBill Paul /* Load our MAC address. */ 38953f74909aSGleb Smirnoff m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 389695d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 389795d67482SBill Paul CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 389895d67482SBill Paul 38993e9b1bcaSJung-uk Kim /* Program promiscuous mode. */ 39003e9b1bcaSJung-uk Kim bge_setpromisc(sc); 390195d67482SBill Paul 390295d67482SBill Paul /* Program multicast filter. */ 390395d67482SBill Paul bge_setmulti(sc); 390495d67482SBill Paul 3905cb2eacc7SYaroslav Tykhiy /* Program VLAN tag stripping. */ 3906cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 3907cb2eacc7SYaroslav Tykhiy 390895d67482SBill Paul /* Init RX ring. */ 39093ee5d7daSPyun YongHyeon if (bge_init_rx_ring_std(sc) != 0) { 39103ee5d7daSPyun YongHyeon device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 39113ee5d7daSPyun YongHyeon bge_stop(sc); 39123ee5d7daSPyun YongHyeon return; 39133ee5d7daSPyun YongHyeon } 391495d67482SBill Paul 39150434d1b8SBill Paul /* 39160434d1b8SBill Paul * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 39170434d1b8SBill Paul * memory to insure that the chip has in fact read the first 39180434d1b8SBill Paul * entry of the ring. 39190434d1b8SBill Paul */ 39200434d1b8SBill Paul if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 39213f74909aSGleb Smirnoff uint32_t v, i; 39220434d1b8SBill Paul for (i = 0; i < 10; i++) { 39230434d1b8SBill Paul DELAY(20); 39240434d1b8SBill Paul v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 39250434d1b8SBill Paul if (v == (MCLBYTES - ETHER_ALIGN)) 39260434d1b8SBill Paul break; 39270434d1b8SBill Paul } 39280434d1b8SBill Paul if (i == 10) 3929fe806fdaSPyun YongHyeon device_printf (sc->bge_dev, 3930fe806fdaSPyun YongHyeon "5705 A0 chip failed to load RX ring\n"); 39310434d1b8SBill Paul } 39320434d1b8SBill Paul 393395d67482SBill Paul /* Init jumbo RX ring. */ 3934c215fd77SPyun YongHyeon if (ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > 3935c215fd77SPyun YongHyeon (MCLBYTES - ETHER_ALIGN)) { 39363ee5d7daSPyun YongHyeon if (bge_init_rx_ring_jumbo(sc) != 0) { 39373ee5d7daSPyun YongHyeon device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); 39383ee5d7daSPyun YongHyeon bge_stop(sc); 39393ee5d7daSPyun YongHyeon return; 39403ee5d7daSPyun YongHyeon } 39413ee5d7daSPyun YongHyeon } 394295d67482SBill Paul 39433f74909aSGleb Smirnoff /* Init our RX return ring index. */ 394495d67482SBill Paul sc->bge_rx_saved_considx = 0; 394595d67482SBill Paul 39467e6e2507SJung-uk Kim /* Init our RX/TX stat counters. */ 39477e6e2507SJung-uk Kim sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 39487e6e2507SJung-uk Kim 394995d67482SBill Paul /* Init TX ring. */ 395095d67482SBill Paul bge_init_tx_ring(sc); 395195d67482SBill Paul 39523f74909aSGleb Smirnoff /* Turn on transmitter. */ 395395d67482SBill Paul BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 395495d67482SBill Paul 39553f74909aSGleb Smirnoff /* Turn on receiver. */ 395695d67482SBill Paul BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 395795d67482SBill Paul 395895d67482SBill Paul /* Tell firmware we're alive. */ 395995d67482SBill Paul BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 396095d67482SBill Paul 396175719184SGleb Smirnoff #ifdef DEVICE_POLLING 396275719184SGleb Smirnoff /* Disable interrupts if we are polling. */ 396375719184SGleb Smirnoff if (ifp->if_capenable & IFCAP_POLLING) { 396475719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 396575719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 396638cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 396775719184SGleb Smirnoff } else 396875719184SGleb Smirnoff #endif 396975719184SGleb Smirnoff 397095d67482SBill Paul /* Enable host interrupts. */ 397175719184SGleb Smirnoff { 397295d67482SBill Paul BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 397395d67482SBill Paul BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 397438cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 397575719184SGleb Smirnoff } 397695d67482SBill Paul 397767d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(ifp); 397895d67482SBill Paul 397913f4c340SRobert Watson ifp->if_drv_flags |= IFF_DRV_RUNNING; 398013f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 398195d67482SBill Paul 39820f9bd73bSSam Leffler callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 39830f9bd73bSSam Leffler } 39840f9bd73bSSam Leffler 39850f9bd73bSSam Leffler static void 39863f74909aSGleb Smirnoff bge_init(void *xsc) 39870f9bd73bSSam Leffler { 39880f9bd73bSSam Leffler struct bge_softc *sc = xsc; 39890f9bd73bSSam Leffler 39900f9bd73bSSam Leffler BGE_LOCK(sc); 39910f9bd73bSSam Leffler bge_init_locked(sc); 39920f9bd73bSSam Leffler BGE_UNLOCK(sc); 399395d67482SBill Paul } 399495d67482SBill Paul 399595d67482SBill Paul /* 399695d67482SBill Paul * Set media options. 399795d67482SBill Paul */ 399895d67482SBill Paul static int 39993f74909aSGleb Smirnoff bge_ifmedia_upd(struct ifnet *ifp) 400095d67482SBill Paul { 400167d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 400267d5e043SOleg Bulyzhin int res; 400367d5e043SOleg Bulyzhin 400467d5e043SOleg Bulyzhin BGE_LOCK(sc); 400567d5e043SOleg Bulyzhin res = bge_ifmedia_upd_locked(ifp); 400667d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 400767d5e043SOleg Bulyzhin 400867d5e043SOleg Bulyzhin return (res); 400967d5e043SOleg Bulyzhin } 401067d5e043SOleg Bulyzhin 401167d5e043SOleg Bulyzhin static int 401267d5e043SOleg Bulyzhin bge_ifmedia_upd_locked(struct ifnet *ifp) 401367d5e043SOleg Bulyzhin { 401467d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 401595d67482SBill Paul struct mii_data *mii; 40164f09c4c7SMarius Strobl struct mii_softc *miisc; 401795d67482SBill Paul struct ifmedia *ifm; 401895d67482SBill Paul 401967d5e043SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 402067d5e043SOleg Bulyzhin 402195d67482SBill Paul ifm = &sc->bge_ifmedia; 402295d67482SBill Paul 402395d67482SBill Paul /* If this is a 1000baseX NIC, enable the TBI port. */ 4024652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 402595d67482SBill Paul if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 402695d67482SBill Paul return (EINVAL); 402795d67482SBill Paul switch(IFM_SUBTYPE(ifm->ifm_media)) { 402895d67482SBill Paul case IFM_AUTO: 4029ff50922bSDoug White /* 4030ff50922bSDoug White * The BCM5704 ASIC appears to have a special 4031ff50922bSDoug White * mechanism for programming the autoneg 4032ff50922bSDoug White * advertisement registers in TBI mode. 4033ff50922bSDoug White */ 40340f89fde2SJung-uk Kim if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 4035ff50922bSDoug White uint32_t sgdig; 40360f89fde2SJung-uk Kim sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 40370f89fde2SJung-uk Kim if (sgdig & BGE_SGDIGSTS_DONE) { 4038ff50922bSDoug White CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 4039ff50922bSDoug White sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 4040ff50922bSDoug White sgdig |= BGE_SGDIGCFG_AUTO | 4041ff50922bSDoug White BGE_SGDIGCFG_PAUSE_CAP | 4042ff50922bSDoug White BGE_SGDIGCFG_ASYM_PAUSE; 4043ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, 4044ff50922bSDoug White sgdig | BGE_SGDIGCFG_SEND); 4045ff50922bSDoug White DELAY(5); 4046ff50922bSDoug White CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 4047ff50922bSDoug White } 40480f89fde2SJung-uk Kim } 404995d67482SBill Paul break; 405095d67482SBill Paul case IFM_1000_SX: 405195d67482SBill Paul if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 405295d67482SBill Paul BGE_CLRBIT(sc, BGE_MAC_MODE, 405395d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 405495d67482SBill Paul } else { 405595d67482SBill Paul BGE_SETBIT(sc, BGE_MAC_MODE, 405695d67482SBill Paul BGE_MACMODE_HALF_DUPLEX); 405795d67482SBill Paul } 405895d67482SBill Paul break; 405995d67482SBill Paul default: 406095d67482SBill Paul return (EINVAL); 406195d67482SBill Paul } 406295d67482SBill Paul return (0); 406395d67482SBill Paul } 406495d67482SBill Paul 40651493e883SOleg Bulyzhin sc->bge_link_evt++; 406695d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 40674f09c4c7SMarius Strobl if (mii->mii_instance) 40684f09c4c7SMarius Strobl LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 406995d67482SBill Paul mii_phy_reset(miisc); 407095d67482SBill Paul mii_mediachg(mii); 407195d67482SBill Paul 4072902827f6SBjoern A. Zeeb /* 4073902827f6SBjoern A. Zeeb * Force an interrupt so that we will call bge_link_upd 4074902827f6SBjoern A. Zeeb * if needed and clear any pending link state attention. 4075902827f6SBjoern A. Zeeb * Without this we are not getting any further interrupts 4076902827f6SBjoern A. Zeeb * for link state changes and thus will not UP the link and 4077902827f6SBjoern A. Zeeb * not be able to send in bge_start_locked. The only 4078902827f6SBjoern A. Zeeb * way to get things working was to receive a packet and 4079902827f6SBjoern A. Zeeb * get an RX intr. 4080902827f6SBjoern A. Zeeb * bge_tick should help for fiber cards and we might not 4081902827f6SBjoern A. Zeeb * need to do this here if BGE_FLAG_TBI is set but as 4082902827f6SBjoern A. Zeeb * we poll for fiber anyway it should not harm. 4083902827f6SBjoern A. Zeeb */ 40844f0794ffSBjoern A. Zeeb if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 40854f0794ffSBjoern A. Zeeb sc->bge_flags & BGE_FLAG_5788) 4086902827f6SBjoern A. Zeeb BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 40874f0794ffSBjoern A. Zeeb else 408863ccfe30SBjoern A. Zeeb BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); 4089902827f6SBjoern A. Zeeb 409095d67482SBill Paul return (0); 409195d67482SBill Paul } 409295d67482SBill Paul 409395d67482SBill Paul /* 409495d67482SBill Paul * Report current media status. 409595d67482SBill Paul */ 409695d67482SBill Paul static void 40973f74909aSGleb Smirnoff bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 409895d67482SBill Paul { 409967d5e043SOleg Bulyzhin struct bge_softc *sc = ifp->if_softc; 410095d67482SBill Paul struct mii_data *mii; 410195d67482SBill Paul 410267d5e043SOleg Bulyzhin BGE_LOCK(sc); 410395d67482SBill Paul 4104652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 410595d67482SBill Paul ifmr->ifm_status = IFM_AVALID; 410695d67482SBill Paul ifmr->ifm_active = IFM_ETHER; 410795d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_STS) & 410895d67482SBill Paul BGE_MACSTAT_TBI_PCS_SYNCHED) 410995d67482SBill Paul ifmr->ifm_status |= IFM_ACTIVE; 41104c0da0ffSGleb Smirnoff else { 41114c0da0ffSGleb Smirnoff ifmr->ifm_active |= IFM_NONE; 411267d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 41134c0da0ffSGleb Smirnoff return; 41144c0da0ffSGleb Smirnoff } 411595d67482SBill Paul ifmr->ifm_active |= IFM_1000_SX; 411695d67482SBill Paul if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 411795d67482SBill Paul ifmr->ifm_active |= IFM_HDX; 411895d67482SBill Paul else 411995d67482SBill Paul ifmr->ifm_active |= IFM_FDX; 412067d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 412195d67482SBill Paul return; 412295d67482SBill Paul } 412395d67482SBill Paul 412495d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 412595d67482SBill Paul mii_pollstat(mii); 412695d67482SBill Paul ifmr->ifm_active = mii->mii_media_active; 412795d67482SBill Paul ifmr->ifm_status = mii->mii_media_status; 412867d5e043SOleg Bulyzhin 412967d5e043SOleg Bulyzhin BGE_UNLOCK(sc); 413095d67482SBill Paul } 413195d67482SBill Paul 413295d67482SBill Paul static int 41333f74909aSGleb Smirnoff bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 413495d67482SBill Paul { 413595d67482SBill Paul struct bge_softc *sc = ifp->if_softc; 413695d67482SBill Paul struct ifreq *ifr = (struct ifreq *) data; 413795d67482SBill Paul struct mii_data *mii; 4138f9004b6dSJung-uk Kim int flags, mask, error = 0; 413995d67482SBill Paul 414095d67482SBill Paul switch (command) { 414195d67482SBill Paul case SIOCSIFMTU: 41424c0da0ffSGleb Smirnoff if (ifr->ifr_mtu < ETHERMIN || 41434c0da0ffSGleb Smirnoff ((BGE_IS_JUMBO_CAPABLE(sc)) && 41444c0da0ffSGleb Smirnoff ifr->ifr_mtu > BGE_JUMBO_MTU) || 41454c0da0ffSGleb Smirnoff ((!BGE_IS_JUMBO_CAPABLE(sc)) && 41464c0da0ffSGleb Smirnoff ifr->ifr_mtu > ETHERMTU)) 414795d67482SBill Paul error = EINVAL; 41484c0da0ffSGleb Smirnoff else if (ifp->if_mtu != ifr->ifr_mtu) { 414995d67482SBill Paul ifp->if_mtu = ifr->ifr_mtu; 415013f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 415195d67482SBill Paul bge_init(sc); 415295d67482SBill Paul } 415395d67482SBill Paul break; 415495d67482SBill Paul case SIOCSIFFLAGS: 41550f9bd73bSSam Leffler BGE_LOCK(sc); 415695d67482SBill Paul if (ifp->if_flags & IFF_UP) { 415795d67482SBill Paul /* 415895d67482SBill Paul * If only the state of the PROMISC flag changed, 415995d67482SBill Paul * then just use the 'set promisc mode' command 416095d67482SBill Paul * instead of reinitializing the entire NIC. Doing 416195d67482SBill Paul * a full re-init means reloading the firmware and 416295d67482SBill Paul * waiting for it to start up, which may take a 4163d183af7fSRuslan Ermilov * second or two. Similarly for ALLMULTI. 416495d67482SBill Paul */ 4165f9004b6dSJung-uk Kim if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 4166f9004b6dSJung-uk Kim flags = ifp->if_flags ^ sc->bge_if_flags; 41673e9b1bcaSJung-uk Kim if (flags & IFF_PROMISC) 41683e9b1bcaSJung-uk Kim bge_setpromisc(sc); 4169f9004b6dSJung-uk Kim if (flags & IFF_ALLMULTI) 4170d183af7fSRuslan Ermilov bge_setmulti(sc); 417195d67482SBill Paul } else 41720f9bd73bSSam Leffler bge_init_locked(sc); 417395d67482SBill Paul } else { 417413f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 417595d67482SBill Paul bge_stop(sc); 417695d67482SBill Paul } 417795d67482SBill Paul } 417895d67482SBill Paul sc->bge_if_flags = ifp->if_flags; 41790f9bd73bSSam Leffler BGE_UNLOCK(sc); 418095d67482SBill Paul error = 0; 418195d67482SBill Paul break; 418295d67482SBill Paul case SIOCADDMULTI: 418395d67482SBill Paul case SIOCDELMULTI: 418413f4c340SRobert Watson if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 41850f9bd73bSSam Leffler BGE_LOCK(sc); 418695d67482SBill Paul bge_setmulti(sc); 41870f9bd73bSSam Leffler BGE_UNLOCK(sc); 418895d67482SBill Paul error = 0; 418995d67482SBill Paul } 419095d67482SBill Paul break; 419195d67482SBill Paul case SIOCSIFMEDIA: 419295d67482SBill Paul case SIOCGIFMEDIA: 4193652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 419495d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 419595d67482SBill Paul &sc->bge_ifmedia, command); 419695d67482SBill Paul } else { 419795d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 419895d67482SBill Paul error = ifmedia_ioctl(ifp, ifr, 419995d67482SBill Paul &mii->mii_media, command); 420095d67482SBill Paul } 420195d67482SBill Paul break; 420295d67482SBill Paul case SIOCSIFCAP: 420395d67482SBill Paul mask = ifr->ifr_reqcap ^ ifp->if_capenable; 420475719184SGleb Smirnoff #ifdef DEVICE_POLLING 420575719184SGleb Smirnoff if (mask & IFCAP_POLLING) { 420675719184SGleb Smirnoff if (ifr->ifr_reqcap & IFCAP_POLLING) { 420775719184SGleb Smirnoff error = ether_poll_register(bge_poll, ifp); 420875719184SGleb Smirnoff if (error) 420975719184SGleb Smirnoff return (error); 421075719184SGleb Smirnoff BGE_LOCK(sc); 421175719184SGleb Smirnoff BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 421275719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 421338cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 421475719184SGleb Smirnoff ifp->if_capenable |= IFCAP_POLLING; 421575719184SGleb Smirnoff BGE_UNLOCK(sc); 421675719184SGleb Smirnoff } else { 421775719184SGleb Smirnoff error = ether_poll_deregister(ifp); 421875719184SGleb Smirnoff /* Enable interrupt even in error case */ 421975719184SGleb Smirnoff BGE_LOCK(sc); 422075719184SGleb Smirnoff BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 422175719184SGleb Smirnoff BGE_PCIMISCCTL_MASK_PCI_INTR); 422238cc658fSJohn Baldwin bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); 422375719184SGleb Smirnoff ifp->if_capenable &= ~IFCAP_POLLING; 422475719184SGleb Smirnoff BGE_UNLOCK(sc); 422575719184SGleb Smirnoff } 422675719184SGleb Smirnoff } 422775719184SGleb Smirnoff #endif 4228d375e524SGleb Smirnoff if (mask & IFCAP_HWCSUM) { 4229d375e524SGleb Smirnoff ifp->if_capenable ^= IFCAP_HWCSUM; 4230d375e524SGleb Smirnoff if (IFCAP_HWCSUM & ifp->if_capenable && 4231d375e524SGleb Smirnoff IFCAP_HWCSUM & ifp->if_capabilities) 4232b874fdd4SYaroslav Tykhiy ifp->if_hwassist = BGE_CSUM_FEATURES; 423395d67482SBill Paul else 4234b874fdd4SYaroslav Tykhiy ifp->if_hwassist = 0; 42354e35d186SJung-uk Kim #ifdef VLAN_CAPABILITIES 4236479b23b7SGleb Smirnoff VLAN_CAPABILITIES(ifp); 42374e35d186SJung-uk Kim #endif 423895d67482SBill Paul } 4239cb2eacc7SYaroslav Tykhiy 4240cb2eacc7SYaroslav Tykhiy if (mask & IFCAP_VLAN_MTU) { 4241cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_MTU; 4242cb2eacc7SYaroslav Tykhiy ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4243cb2eacc7SYaroslav Tykhiy bge_init(sc); 4244cb2eacc7SYaroslav Tykhiy } 4245cb2eacc7SYaroslav Tykhiy 4246cb2eacc7SYaroslav Tykhiy if (mask & IFCAP_VLAN_HWTAGGING) { 4247cb2eacc7SYaroslav Tykhiy ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 4248cb2eacc7SYaroslav Tykhiy BGE_LOCK(sc); 4249cb2eacc7SYaroslav Tykhiy bge_setvlan(sc); 4250cb2eacc7SYaroslav Tykhiy BGE_UNLOCK(sc); 4251cb2eacc7SYaroslav Tykhiy #ifdef VLAN_CAPABILITIES 4252cb2eacc7SYaroslav Tykhiy VLAN_CAPABILITIES(ifp); 4253cb2eacc7SYaroslav Tykhiy #endif 4254cb2eacc7SYaroslav Tykhiy } 4255cb2eacc7SYaroslav Tykhiy 425695d67482SBill Paul break; 425795d67482SBill Paul default: 4258673d9191SSam Leffler error = ether_ioctl(ifp, command, data); 425995d67482SBill Paul break; 426095d67482SBill Paul } 426195d67482SBill Paul 426295d67482SBill Paul return (error); 426395d67482SBill Paul } 426495d67482SBill Paul 426595d67482SBill Paul static void 4266b74e67fbSGleb Smirnoff bge_watchdog(struct bge_softc *sc) 426795d67482SBill Paul { 4268b74e67fbSGleb Smirnoff struct ifnet *ifp; 426995d67482SBill Paul 4270b74e67fbSGleb Smirnoff BGE_LOCK_ASSERT(sc); 4271b74e67fbSGleb Smirnoff 4272b74e67fbSGleb Smirnoff if (sc->bge_timer == 0 || --sc->bge_timer) 4273b74e67fbSGleb Smirnoff return; 4274b74e67fbSGleb Smirnoff 4275b74e67fbSGleb Smirnoff ifp = sc->bge_ifp; 427695d67482SBill Paul 4277fe806fdaSPyun YongHyeon if_printf(ifp, "watchdog timeout -- resetting\n"); 427895d67482SBill Paul 427913f4c340SRobert Watson ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4280426742bfSGleb Smirnoff bge_init_locked(sc); 428195d67482SBill Paul 428295d67482SBill Paul ifp->if_oerrors++; 428395d67482SBill Paul } 428495d67482SBill Paul 428595d67482SBill Paul /* 428695d67482SBill Paul * Stop the adapter and free any mbufs allocated to the 428795d67482SBill Paul * RX and TX lists. 428895d67482SBill Paul */ 428995d67482SBill Paul static void 42903f74909aSGleb Smirnoff bge_stop(struct bge_softc *sc) 429195d67482SBill Paul { 429295d67482SBill Paul struct ifnet *ifp; 429395d67482SBill Paul struct ifmedia_entry *ifm; 429495d67482SBill Paul struct mii_data *mii = NULL; 429595d67482SBill Paul int mtmp, itmp; 429695d67482SBill Paul 42970f9bd73bSSam Leffler BGE_LOCK_ASSERT(sc); 42980f9bd73bSSam Leffler 4299fc74a9f9SBrooks Davis ifp = sc->bge_ifp; 430095d67482SBill Paul 4301652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) 430295d67482SBill Paul mii = device_get_softc(sc->bge_miibus); 430395d67482SBill Paul 43040f9bd73bSSam Leffler callout_stop(&sc->bge_stat_ch); 430595d67482SBill Paul 430644b63691SBjoern A. Zeeb /* Disable host interrupts. */ 430744b63691SBjoern A. Zeeb BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 430844b63691SBjoern A. Zeeb bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); 430944b63691SBjoern A. Zeeb 431044b63691SBjoern A. Zeeb /* 431144b63691SBjoern A. Zeeb * Tell firmware we're shutting down. 431244b63691SBjoern A. Zeeb */ 431344b63691SBjoern A. Zeeb bge_stop_fw(sc); 431444b63691SBjoern A. Zeeb bge_sig_pre_reset(sc, BGE_RESET_STOP); 431544b63691SBjoern A. Zeeb 431695d67482SBill Paul /* 43173f74909aSGleb Smirnoff * Disable all of the receiver blocks. 431895d67482SBill Paul */ 431995d67482SBill Paul BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 432095d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 432195d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 43227ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 432395d67482SBill Paul BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 432495d67482SBill Paul BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 432595d67482SBill Paul BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 432695d67482SBill Paul BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 432795d67482SBill Paul 432895d67482SBill Paul /* 43293f74909aSGleb Smirnoff * Disable all of the transmit blocks. 433095d67482SBill Paul */ 433195d67482SBill Paul BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 433295d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 433395d67482SBill Paul BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 433495d67482SBill Paul BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 433595d67482SBill Paul BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 43367ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 433795d67482SBill Paul BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 433895d67482SBill Paul BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 433995d67482SBill Paul 434095d67482SBill Paul /* 434195d67482SBill Paul * Shut down all of the memory managers and related 434295d67482SBill Paul * state machines. 434395d67482SBill Paul */ 434495d67482SBill Paul BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 434595d67482SBill Paul BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 43467ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) 434795d67482SBill Paul BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 43480c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 434995d67482SBill Paul CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 43507ee00338SJung-uk Kim if (!(BGE_IS_5705_PLUS(sc))) { 435195d67482SBill Paul BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 435295d67482SBill Paul BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 43530434d1b8SBill Paul } 435495d67482SBill Paul 43558cb1383cSDoug Ambrisko bge_reset(sc); 43568cb1383cSDoug Ambrisko bge_sig_legacy(sc, BGE_RESET_STOP); 43578cb1383cSDoug Ambrisko bge_sig_post_reset(sc, BGE_RESET_STOP); 43588cb1383cSDoug Ambrisko 43598cb1383cSDoug Ambrisko /* 43608cb1383cSDoug Ambrisko * Keep the ASF firmware running if up. 43618cb1383cSDoug Ambrisko */ 43628cb1383cSDoug Ambrisko if (sc->bge_asf_mode & ASF_STACKUP) 43638cb1383cSDoug Ambrisko BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 43648cb1383cSDoug Ambrisko else 436595d67482SBill Paul BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 436695d67482SBill Paul 436795d67482SBill Paul /* Free the RX lists. */ 436895d67482SBill Paul bge_free_rx_ring_std(sc); 436995d67482SBill Paul 437095d67482SBill Paul /* Free jumbo RX list. */ 43714c0da0ffSGleb Smirnoff if (BGE_IS_JUMBO_CAPABLE(sc)) 437295d67482SBill Paul bge_free_rx_ring_jumbo(sc); 437395d67482SBill Paul 437495d67482SBill Paul /* Free TX buffers. */ 437595d67482SBill Paul bge_free_tx_ring(sc); 437695d67482SBill Paul 437795d67482SBill Paul /* 437895d67482SBill Paul * Isolate/power down the PHY, but leave the media selection 437995d67482SBill Paul * unchanged so that things will be put back to normal when 438095d67482SBill Paul * we bring the interface back up. 438195d67482SBill Paul */ 4382652ae483SGleb Smirnoff if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 438395d67482SBill Paul itmp = ifp->if_flags; 438495d67482SBill Paul ifp->if_flags |= IFF_UP; 4385dcc34049SPawel Jakub Dawidek /* 4386dcc34049SPawel Jakub Dawidek * If we are called from bge_detach(), mii is already NULL. 4387dcc34049SPawel Jakub Dawidek */ 4388dcc34049SPawel Jakub Dawidek if (mii != NULL) { 438995d67482SBill Paul ifm = mii->mii_media.ifm_cur; 439095d67482SBill Paul mtmp = ifm->ifm_media; 439195d67482SBill Paul ifm->ifm_media = IFM_ETHER | IFM_NONE; 439295d67482SBill Paul mii_mediachg(mii); 439395d67482SBill Paul ifm->ifm_media = mtmp; 4394dcc34049SPawel Jakub Dawidek } 439595d67482SBill Paul ifp->if_flags = itmp; 439695d67482SBill Paul } 439795d67482SBill Paul 439895d67482SBill Paul sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 439995d67482SBill Paul 44005dda8085SOleg Bulyzhin /* Clear MAC's link state (PHY may still have link UP). */ 44011493e883SOleg Bulyzhin if (bootverbose && sc->bge_link) 44021493e883SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 44031493e883SOleg Bulyzhin sc->bge_link = 0; 440495d67482SBill Paul 44051493e883SOleg Bulyzhin ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 440695d67482SBill Paul } 440795d67482SBill Paul 440895d67482SBill Paul /* 440995d67482SBill Paul * Stop all chip I/O so that the kernel's probe routines don't 441095d67482SBill Paul * get confused by errant DMAs when rebooting. 441195d67482SBill Paul */ 4412b6c974e8SWarner Losh static int 44133f74909aSGleb Smirnoff bge_shutdown(device_t dev) 441495d67482SBill Paul { 441595d67482SBill Paul struct bge_softc *sc; 441695d67482SBill Paul 441795d67482SBill Paul sc = device_get_softc(dev); 44180f9bd73bSSam Leffler BGE_LOCK(sc); 441995d67482SBill Paul bge_stop(sc); 442095d67482SBill Paul bge_reset(sc); 44210f9bd73bSSam Leffler BGE_UNLOCK(sc); 4422b6c974e8SWarner Losh 4423b6c974e8SWarner Losh return (0); 442495d67482SBill Paul } 442514afefa3SPawel Jakub Dawidek 442614afefa3SPawel Jakub Dawidek static int 442714afefa3SPawel Jakub Dawidek bge_suspend(device_t dev) 442814afefa3SPawel Jakub Dawidek { 442914afefa3SPawel Jakub Dawidek struct bge_softc *sc; 443014afefa3SPawel Jakub Dawidek 443114afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 443214afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 443314afefa3SPawel Jakub Dawidek bge_stop(sc); 443414afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 443514afefa3SPawel Jakub Dawidek 443614afefa3SPawel Jakub Dawidek return (0); 443714afefa3SPawel Jakub Dawidek } 443814afefa3SPawel Jakub Dawidek 443914afefa3SPawel Jakub Dawidek static int 444014afefa3SPawel Jakub Dawidek bge_resume(device_t dev) 444114afefa3SPawel Jakub Dawidek { 444214afefa3SPawel Jakub Dawidek struct bge_softc *sc; 444314afefa3SPawel Jakub Dawidek struct ifnet *ifp; 444414afefa3SPawel Jakub Dawidek 444514afefa3SPawel Jakub Dawidek sc = device_get_softc(dev); 444614afefa3SPawel Jakub Dawidek BGE_LOCK(sc); 444714afefa3SPawel Jakub Dawidek ifp = sc->bge_ifp; 444814afefa3SPawel Jakub Dawidek if (ifp->if_flags & IFF_UP) { 444914afefa3SPawel Jakub Dawidek bge_init_locked(sc); 445014afefa3SPawel Jakub Dawidek if (ifp->if_drv_flags & IFF_DRV_RUNNING) 445114afefa3SPawel Jakub Dawidek bge_start_locked(ifp); 445214afefa3SPawel Jakub Dawidek } 445314afefa3SPawel Jakub Dawidek BGE_UNLOCK(sc); 445414afefa3SPawel Jakub Dawidek 445514afefa3SPawel Jakub Dawidek return (0); 445614afefa3SPawel Jakub Dawidek } 4457dab5cd05SOleg Bulyzhin 4458dab5cd05SOleg Bulyzhin static void 44593f74909aSGleb Smirnoff bge_link_upd(struct bge_softc *sc) 4460dab5cd05SOleg Bulyzhin { 44611f313773SOleg Bulyzhin struct mii_data *mii; 44621f313773SOleg Bulyzhin uint32_t link, status; 4463dab5cd05SOleg Bulyzhin 4464dab5cd05SOleg Bulyzhin BGE_LOCK_ASSERT(sc); 44651f313773SOleg Bulyzhin 44663f74909aSGleb Smirnoff /* Clear 'pending link event' flag. */ 44677b97099dSOleg Bulyzhin sc->bge_link_evt = 0; 44687b97099dSOleg Bulyzhin 4469dab5cd05SOleg Bulyzhin /* 4470dab5cd05SOleg Bulyzhin * Process link state changes. 4471dab5cd05SOleg Bulyzhin * Grrr. The link status word in the status block does 4472dab5cd05SOleg Bulyzhin * not work correctly on the BCM5700 rev AX and BX chips, 4473dab5cd05SOleg Bulyzhin * according to all available information. Hence, we have 4474dab5cd05SOleg Bulyzhin * to enable MII interrupts in order to properly obtain 4475dab5cd05SOleg Bulyzhin * async link changes. Unfortunately, this also means that 4476dab5cd05SOleg Bulyzhin * we have to read the MAC status register to detect link 4477dab5cd05SOleg Bulyzhin * changes, thereby adding an additional register access to 4478dab5cd05SOleg Bulyzhin * the interrupt handler. 44791f313773SOleg Bulyzhin * 44801f313773SOleg Bulyzhin * XXX: perhaps link state detection procedure used for 44814c0da0ffSGleb Smirnoff * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4482dab5cd05SOleg Bulyzhin */ 4483dab5cd05SOleg Bulyzhin 44841f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 44854c0da0ffSGleb Smirnoff sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 4486dab5cd05SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 4487dab5cd05SOleg Bulyzhin if (status & BGE_MACSTAT_MI_INTERRUPT) { 44881f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 44895dda8085SOleg Bulyzhin mii_pollstat(mii); 44901f313773SOleg Bulyzhin if (!sc->bge_link && 44911f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 44921f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 44931f313773SOleg Bulyzhin sc->bge_link++; 44941f313773SOleg Bulyzhin if (bootverbose) 44951f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 44961f313773SOleg Bulyzhin } else if (sc->bge_link && 44971f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 44981f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 44991f313773SOleg Bulyzhin sc->bge_link = 0; 45001f313773SOleg Bulyzhin if (bootverbose) 45011f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 45021f313773SOleg Bulyzhin } 45031f313773SOleg Bulyzhin 45043f74909aSGleb Smirnoff /* Clear the interrupt. */ 4505dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4506dab5cd05SOleg Bulyzhin BGE_EVTENB_MI_INTERRUPT); 4507dab5cd05SOleg Bulyzhin bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4508dab5cd05SOleg Bulyzhin bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4509dab5cd05SOleg Bulyzhin BRGPHY_INTRS); 4510dab5cd05SOleg Bulyzhin } 4511dab5cd05SOleg Bulyzhin return; 4512dab5cd05SOleg Bulyzhin } 4513dab5cd05SOleg Bulyzhin 4514652ae483SGleb Smirnoff if (sc->bge_flags & BGE_FLAG_TBI) { 45151f313773SOleg Bulyzhin status = CSR_READ_4(sc, BGE_MAC_STS); 45167b97099dSOleg Bulyzhin if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 45177b97099dSOleg Bulyzhin if (!sc->bge_link) { 45181f313773SOleg Bulyzhin sc->bge_link++; 45191f313773SOleg Bulyzhin if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 45201f313773SOleg Bulyzhin BGE_CLRBIT(sc, BGE_MAC_MODE, 45211f313773SOleg Bulyzhin BGE_MACMODE_TBI_SEND_CFGS); 45220c8aa4eaSJung-uk Kim CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 45231f313773SOleg Bulyzhin if (bootverbose) 45241f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 45253f74909aSGleb Smirnoff if_link_state_change(sc->bge_ifp, 45263f74909aSGleb Smirnoff LINK_STATE_UP); 45277b97099dSOleg Bulyzhin } 45281f313773SOleg Bulyzhin } else if (sc->bge_link) { 4529dab5cd05SOleg Bulyzhin sc->bge_link = 0; 45301f313773SOleg Bulyzhin if (bootverbose) 45311f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 45327b97099dSOleg Bulyzhin if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 45331f313773SOleg Bulyzhin } 45341493e883SOleg Bulyzhin } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 45351f313773SOleg Bulyzhin /* 45360c8aa4eaSJung-uk Kim * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 45370c8aa4eaSJung-uk Kim * in status word always set. Workaround this bug by reading 45380c8aa4eaSJung-uk Kim * PHY link status directly. 45391f313773SOleg Bulyzhin */ 45401f313773SOleg Bulyzhin link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 45411f313773SOleg Bulyzhin 45421f313773SOleg Bulyzhin if (link != sc->bge_link || 45431f313773SOleg Bulyzhin sc->bge_asicrev == BGE_ASICREV_BCM5700) { 45441f313773SOleg Bulyzhin mii = device_get_softc(sc->bge_miibus); 45455dda8085SOleg Bulyzhin mii_pollstat(mii); 45461f313773SOleg Bulyzhin if (!sc->bge_link && 45471f313773SOleg Bulyzhin mii->mii_media_status & IFM_ACTIVE && 45481f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 45491f313773SOleg Bulyzhin sc->bge_link++; 45501f313773SOleg Bulyzhin if (bootverbose) 45511f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link UP\n"); 45521f313773SOleg Bulyzhin } else if (sc->bge_link && 45531f313773SOleg Bulyzhin (!(mii->mii_media_status & IFM_ACTIVE) || 45541f313773SOleg Bulyzhin IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 45551f313773SOleg Bulyzhin sc->bge_link = 0; 45561f313773SOleg Bulyzhin if (bootverbose) 45571f313773SOleg Bulyzhin if_printf(sc->bge_ifp, "link DOWN\n"); 45581f313773SOleg Bulyzhin } 45591f313773SOleg Bulyzhin } 45600c8aa4eaSJung-uk Kim } else { 45610c8aa4eaSJung-uk Kim /* 45620c8aa4eaSJung-uk Kim * Discard link events for MII/GMII controllers 45630c8aa4eaSJung-uk Kim * if MI auto-polling is disabled. 45640c8aa4eaSJung-uk Kim */ 4565dab5cd05SOleg Bulyzhin } 4566dab5cd05SOleg Bulyzhin 45673f74909aSGleb Smirnoff /* Clear the attention. */ 4568dab5cd05SOleg Bulyzhin CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4569dab5cd05SOleg Bulyzhin BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4570dab5cd05SOleg Bulyzhin BGE_MACSTAT_LINK_CHANGED); 4571dab5cd05SOleg Bulyzhin } 45726f8718a3SScott Long 4573763757b2SScott Long #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 457406e83c7eSScott Long SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ 4575763757b2SScott Long sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ 4576763757b2SScott Long desc) 4577763757b2SScott Long 45786f8718a3SScott Long static void 45796f8718a3SScott Long bge_add_sysctls(struct bge_softc *sc) 45806f8718a3SScott Long { 45816f8718a3SScott Long struct sysctl_ctx_list *ctx; 4582763757b2SScott Long struct sysctl_oid_list *children, *schildren; 4583763757b2SScott Long struct sysctl_oid *tree; 45846f8718a3SScott Long 45856f8718a3SScott Long ctx = device_get_sysctl_ctx(sc->bge_dev); 45866f8718a3SScott Long children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 45876f8718a3SScott Long 45886f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 45896f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 45906f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", 45916f8718a3SScott Long "Debug Information"); 45926f8718a3SScott Long 45936f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 45946f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", 45956f8718a3SScott Long "Register Read"); 45966f8718a3SScott Long 45976f8718a3SScott Long SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 45986f8718a3SScott Long CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", 45996f8718a3SScott Long "Memory Read"); 46006f8718a3SScott Long 46016f8718a3SScott Long #endif 4602763757b2SScott Long 4603d949071dSJung-uk Kim if (BGE_IS_5705_PLUS(sc)) 4604d949071dSJung-uk Kim return; 4605d949071dSJung-uk Kim 4606763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4607763757b2SScott Long NULL, "BGE Statistics"); 4608763757b2SScott Long schildren = children = SYSCTL_CHILDREN(tree); 4609763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 4610763757b2SScott Long children, COSFramesDroppedDueToFilters, 4611763757b2SScott Long "FramesDroppedDueToFilters"); 4612763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 4613763757b2SScott Long children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 4614763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 4615763757b2SScott Long children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 4616763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 4617763757b2SScott Long children, nicNoMoreRxBDs, "NoMoreRxBDs"); 461806e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 461906e83c7eSScott Long children, ifInDiscards, "InputDiscards"); 462006e83c7eSScott Long BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 462106e83c7eSScott Long children, ifInErrors, "InputErrors"); 4622763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 4623763757b2SScott Long children, nicRecvThresholdHit, "RecvThresholdHit"); 4624763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 4625763757b2SScott Long children, nicDmaReadQueueFull, "DmaReadQueueFull"); 4626763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 4627763757b2SScott Long children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 4628763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 4629763757b2SScott Long children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 4630763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 4631763757b2SScott Long children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 4632763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 4633763757b2SScott Long children, nicRingStatusUpdate, "RingStatusUpdate"); 4634763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 4635763757b2SScott Long children, nicInterrupts, "Interrupts"); 4636763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 4637763757b2SScott Long children, nicAvoidedInterrupts, "AvoidedInterrupts"); 4638763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 4639763757b2SScott Long children, nicSendThresholdHit, "SendThresholdHit"); 4640763757b2SScott Long 4641763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, 4642763757b2SScott Long NULL, "BGE RX Statistics"); 4643763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4644763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 4645763757b2SScott Long children, rxstats.ifHCInOctets, "Octets"); 4646763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Fragments", 4647763757b2SScott Long children, rxstats.etherStatsFragments, "Fragments"); 4648763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 4649763757b2SScott Long children, rxstats.ifHCInUcastPkts, "UcastPkts"); 4650763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 4651763757b2SScott Long children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 4652763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 4653763757b2SScott Long children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 4654763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 4655763757b2SScott Long children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 4656763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 4657763757b2SScott Long children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 4658763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 4659763757b2SScott Long children, rxstats.xoffPauseFramesReceived, 4660763757b2SScott Long "xoffPauseFramesReceived"); 4661763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 4662763757b2SScott Long children, rxstats.macControlFramesReceived, 4663763757b2SScott Long "ControlFramesReceived"); 4664763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 4665763757b2SScott Long children, rxstats.xoffStateEntered, "xoffStateEntered"); 4666763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 4667763757b2SScott Long children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 4668763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 4669763757b2SScott Long children, rxstats.etherStatsJabbers, "Jabbers"); 4670763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 4671763757b2SScott Long children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 4672763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 467306e83c7eSScott Long children, rxstats.inRangeLengthError, "inRangeLengthError"); 4674763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 467506e83c7eSScott Long children, rxstats.outRangeLengthError, "outRangeLengthError"); 4676763757b2SScott Long 4677763757b2SScott Long tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, 4678763757b2SScott Long NULL, "BGE TX Statistics"); 4679763757b2SScott Long children = SYSCTL_CHILDREN(tree); 4680763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 4681763757b2SScott Long children, txstats.ifHCOutOctets, "Octets"); 4682763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 4683763757b2SScott Long children, txstats.etherStatsCollisions, "Collisions"); 4684763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 4685763757b2SScott Long children, txstats.outXonSent, "XonSent"); 4686763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 4687763757b2SScott Long children, txstats.outXoffSent, "XoffSent"); 4688763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 4689763757b2SScott Long children, txstats.flowControlDone, "flowControlDone"); 4690763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 4691763757b2SScott Long children, txstats.dot3StatsInternalMacTransmitErrors, 4692763757b2SScott Long "InternalMacTransmitErrors"); 4693763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 4694763757b2SScott Long children, txstats.dot3StatsSingleCollisionFrames, 4695763757b2SScott Long "SingleCollisionFrames"); 4696763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 4697763757b2SScott Long children, txstats.dot3StatsMultipleCollisionFrames, 4698763757b2SScott Long "MultipleCollisionFrames"); 4699763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 4700763757b2SScott Long children, txstats.dot3StatsDeferredTransmissions, 4701763757b2SScott Long "DeferredTransmissions"); 4702763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 4703763757b2SScott Long children, txstats.dot3StatsExcessiveCollisions, 4704763757b2SScott Long "ExcessiveCollisions"); 4705763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 470606e83c7eSScott Long children, txstats.dot3StatsLateCollisions, 470706e83c7eSScott Long "LateCollisions"); 4708763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 4709763757b2SScott Long children, txstats.ifHCOutUcastPkts, "UcastPkts"); 4710763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 4711763757b2SScott Long children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 4712763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 4713763757b2SScott Long children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 4714763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 4715763757b2SScott Long children, txstats.dot3StatsCarrierSenseErrors, 4716763757b2SScott Long "CarrierSenseErrors"); 4717763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 4718763757b2SScott Long children, txstats.ifOutDiscards, "Discards"); 4719763757b2SScott Long BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 4720763757b2SScott Long children, txstats.ifOutErrors, "Errors"); 4721763757b2SScott Long } 4722763757b2SScott Long 4723763757b2SScott Long static int 4724763757b2SScott Long bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 4725763757b2SScott Long { 4726763757b2SScott Long struct bge_softc *sc; 472706e83c7eSScott Long uint32_t result; 4728d949071dSJung-uk Kim int offset; 4729763757b2SScott Long 4730763757b2SScott Long sc = (struct bge_softc *)arg1; 4731763757b2SScott Long offset = arg2; 4732d949071dSJung-uk Kim result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + 4733d949071dSJung-uk Kim offsetof(bge_hostaddr, bge_addr_lo)); 4734041b706bSDavid Malone return (sysctl_handle_int(oidp, &result, 0, req)); 47356f8718a3SScott Long } 47366f8718a3SScott Long 47376f8718a3SScott Long #ifdef BGE_REGISTER_DEBUG 47386f8718a3SScott Long static int 47396f8718a3SScott Long bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 47406f8718a3SScott Long { 47416f8718a3SScott Long struct bge_softc *sc; 47426f8718a3SScott Long uint16_t *sbdata; 47436f8718a3SScott Long int error; 47446f8718a3SScott Long int result; 47456f8718a3SScott Long int i, j; 47466f8718a3SScott Long 47476f8718a3SScott Long result = -1; 47486f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 47496f8718a3SScott Long if (error || (req->newptr == NULL)) 47506f8718a3SScott Long return (error); 47516f8718a3SScott Long 47526f8718a3SScott Long if (result == 1) { 47536f8718a3SScott Long sc = (struct bge_softc *)arg1; 47546f8718a3SScott Long 47556f8718a3SScott Long sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 47566f8718a3SScott Long printf("Status Block:\n"); 47576f8718a3SScott Long for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { 47586f8718a3SScott Long printf("%06x:", i); 47596f8718a3SScott Long for (j = 0; j < 8; j++) { 47606f8718a3SScott Long printf(" %04x", sbdata[i]); 47616f8718a3SScott Long i += 4; 47626f8718a3SScott Long } 47636f8718a3SScott Long printf("\n"); 47646f8718a3SScott Long } 47656f8718a3SScott Long 47666f8718a3SScott Long printf("Registers:\n"); 47670c8aa4eaSJung-uk Kim for (i = 0x800; i < 0xA00; ) { 47686f8718a3SScott Long printf("%06x:", i); 47696f8718a3SScott Long for (j = 0; j < 8; j++) { 47706f8718a3SScott Long printf(" %08x", CSR_READ_4(sc, i)); 47716f8718a3SScott Long i += 4; 47726f8718a3SScott Long } 47736f8718a3SScott Long printf("\n"); 47746f8718a3SScott Long } 47756f8718a3SScott Long 47766f8718a3SScott Long printf("Hardware Flags:\n"); 4777a5779553SStanislav Sedov if (BGE_IS_5755_PLUS(sc)) 4778a5779553SStanislav Sedov printf(" - 5755 Plus\n"); 47795345bad0SScott Long if (BGE_IS_575X_PLUS(sc)) 47806f8718a3SScott Long printf(" - 575X Plus\n"); 47815345bad0SScott Long if (BGE_IS_5705_PLUS(sc)) 47826f8718a3SScott Long printf(" - 5705 Plus\n"); 47835345bad0SScott Long if (BGE_IS_5714_FAMILY(sc)) 47845345bad0SScott Long printf(" - 5714 Family\n"); 47855345bad0SScott Long if (BGE_IS_5700_FAMILY(sc)) 47865345bad0SScott Long printf(" - 5700 Family\n"); 47876f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_JUMBO) 47886f8718a3SScott Long printf(" - Supports Jumbo Frames\n"); 47896f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIX) 47906f8718a3SScott Long printf(" - PCI-X Bus\n"); 47916f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_PCIE) 47926f8718a3SScott Long printf(" - PCI Express Bus\n"); 47935ee49a3aSJung-uk Kim if (sc->bge_flags & BGE_FLAG_NO_3LED) 47946f8718a3SScott Long printf(" - No 3 LEDs\n"); 47956f8718a3SScott Long if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 47966f8718a3SScott Long printf(" - RX Alignment Bug\n"); 47976f8718a3SScott Long } 47986f8718a3SScott Long 47996f8718a3SScott Long return (error); 48006f8718a3SScott Long } 48016f8718a3SScott Long 48026f8718a3SScott Long static int 48036f8718a3SScott Long bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 48046f8718a3SScott Long { 48056f8718a3SScott Long struct bge_softc *sc; 48066f8718a3SScott Long int error; 48076f8718a3SScott Long uint16_t result; 48086f8718a3SScott Long uint32_t val; 48096f8718a3SScott Long 48106f8718a3SScott Long result = -1; 48116f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 48126f8718a3SScott Long if (error || (req->newptr == NULL)) 48136f8718a3SScott Long return (error); 48146f8718a3SScott Long 48156f8718a3SScott Long if (result < 0x8000) { 48166f8718a3SScott Long sc = (struct bge_softc *)arg1; 48176f8718a3SScott Long val = CSR_READ_4(sc, result); 48186f8718a3SScott Long printf("reg 0x%06X = 0x%08X\n", result, val); 48196f8718a3SScott Long } 48206f8718a3SScott Long 48216f8718a3SScott Long return (error); 48226f8718a3SScott Long } 48236f8718a3SScott Long 48246f8718a3SScott Long static int 48256f8718a3SScott Long bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 48266f8718a3SScott Long { 48276f8718a3SScott Long struct bge_softc *sc; 48286f8718a3SScott Long int error; 48296f8718a3SScott Long uint16_t result; 48306f8718a3SScott Long uint32_t val; 48316f8718a3SScott Long 48326f8718a3SScott Long result = -1; 48336f8718a3SScott Long error = sysctl_handle_int(oidp, &result, 0, req); 48346f8718a3SScott Long if (error || (req->newptr == NULL)) 48356f8718a3SScott Long return (error); 48366f8718a3SScott Long 48376f8718a3SScott Long if (result < 0x8000) { 48386f8718a3SScott Long sc = (struct bge_softc *)arg1; 48396f8718a3SScott Long val = bge_readmem_ind(sc, result); 48406f8718a3SScott Long printf("mem 0x%06X = 0x%08X\n", result, val); 48416f8718a3SScott Long } 48426f8718a3SScott Long 48436f8718a3SScott Long return (error); 48446f8718a3SScott Long } 48456f8718a3SScott Long #endif 484638cc658fSJohn Baldwin 484738cc658fSJohn Baldwin static int 48485fea260fSMarius Strobl bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) 48495fea260fSMarius Strobl { 48505fea260fSMarius Strobl 48515fea260fSMarius Strobl if (sc->bge_flags & BGE_FLAG_EADDR) 48525fea260fSMarius Strobl return (1); 48535fea260fSMarius Strobl 48545fea260fSMarius Strobl #ifdef __sparc64__ 48555fea260fSMarius Strobl OF_getetheraddr(sc->bge_dev, ether_addr); 48565fea260fSMarius Strobl return (0); 48575fea260fSMarius Strobl #endif 48585fea260fSMarius Strobl return (1); 48595fea260fSMarius Strobl } 48605fea260fSMarius Strobl 48615fea260fSMarius Strobl static int 486238cc658fSJohn Baldwin bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) 486338cc658fSJohn Baldwin { 486438cc658fSJohn Baldwin uint32_t mac_addr; 486538cc658fSJohn Baldwin 486638cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c14); 486738cc658fSJohn Baldwin if ((mac_addr >> 16) == 0x484b) { 486838cc658fSJohn Baldwin ether_addr[0] = (uint8_t)(mac_addr >> 8); 486938cc658fSJohn Baldwin ether_addr[1] = (uint8_t)mac_addr; 487038cc658fSJohn Baldwin mac_addr = bge_readmem_ind(sc, 0x0c18); 487138cc658fSJohn Baldwin ether_addr[2] = (uint8_t)(mac_addr >> 24); 487238cc658fSJohn Baldwin ether_addr[3] = (uint8_t)(mac_addr >> 16); 487338cc658fSJohn Baldwin ether_addr[4] = (uint8_t)(mac_addr >> 8); 487438cc658fSJohn Baldwin ether_addr[5] = (uint8_t)mac_addr; 48755fea260fSMarius Strobl return (0); 487638cc658fSJohn Baldwin } 48775fea260fSMarius Strobl return (1); 487838cc658fSJohn Baldwin } 487938cc658fSJohn Baldwin 488038cc658fSJohn Baldwin static int 488138cc658fSJohn Baldwin bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) 488238cc658fSJohn Baldwin { 488338cc658fSJohn Baldwin int mac_offset = BGE_EE_MAC_OFFSET; 488438cc658fSJohn Baldwin 488538cc658fSJohn Baldwin if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 488638cc658fSJohn Baldwin mac_offset = BGE_EE_MAC_OFFSET_5906; 488738cc658fSJohn Baldwin 48885fea260fSMarius Strobl return (bge_read_nvram(sc, ether_addr, mac_offset + 2, 48895fea260fSMarius Strobl ETHER_ADDR_LEN)); 489038cc658fSJohn Baldwin } 489138cc658fSJohn Baldwin 489238cc658fSJohn Baldwin static int 489338cc658fSJohn Baldwin bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) 489438cc658fSJohn Baldwin { 489538cc658fSJohn Baldwin 48965fea260fSMarius Strobl if (sc->bge_asicrev == BGE_ASICREV_BCM5906) 48975fea260fSMarius Strobl return (1); 48985fea260fSMarius Strobl 48995fea260fSMarius Strobl return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, 49005fea260fSMarius Strobl ETHER_ADDR_LEN)); 490138cc658fSJohn Baldwin } 490238cc658fSJohn Baldwin 490338cc658fSJohn Baldwin static int 490438cc658fSJohn Baldwin bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) 490538cc658fSJohn Baldwin { 490638cc658fSJohn Baldwin static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { 490738cc658fSJohn Baldwin /* NOTE: Order is critical */ 49085fea260fSMarius Strobl bge_get_eaddr_fw, 490938cc658fSJohn Baldwin bge_get_eaddr_mem, 491038cc658fSJohn Baldwin bge_get_eaddr_nvram, 491138cc658fSJohn Baldwin bge_get_eaddr_eeprom, 491238cc658fSJohn Baldwin NULL 491338cc658fSJohn Baldwin }; 491438cc658fSJohn Baldwin const bge_eaddr_fcn_t *func; 491538cc658fSJohn Baldwin 491638cc658fSJohn Baldwin for (func = bge_eaddr_funcs; *func != NULL; ++func) { 491738cc658fSJohn Baldwin if ((*func)(sc, eaddr) == 0) 491838cc658fSJohn Baldwin break; 491938cc658fSJohn Baldwin } 492038cc658fSJohn Baldwin return (*func == NULL ? ENXIO : 0); 492138cc658fSJohn Baldwin } 4922