1 /*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69 #ifdef HAVE_KERNEL_OPTION_HEADERS 70 #include "opt_device_polling.h" 71 #endif 72 73 #include <sys/param.h> 74 #include <sys/endian.h> 75 #include <sys/systm.h> 76 #include <sys/sockio.h> 77 #include <sys/mbuf.h> 78 #include <sys/malloc.h> 79 #include <sys/kernel.h> 80 #include <sys/module.h> 81 #include <sys/socket.h> 82 #include <sys/sysctl.h> 83 84 #include <net/if.h> 85 #include <net/if_arp.h> 86 #include <net/ethernet.h> 87 #include <net/if_dl.h> 88 #include <net/if_media.h> 89 90 #include <net/bpf.h> 91 92 #include <net/if_types.h> 93 #include <net/if_vlan_var.h> 94 95 #include <netinet/in_systm.h> 96 #include <netinet/in.h> 97 #include <netinet/ip.h> 98 99 #include <machine/bus.h> 100 #include <machine/resource.h> 101 #include <sys/bus.h> 102 #include <sys/rman.h> 103 104 #include <dev/mii/mii.h> 105 #include <dev/mii/miivar.h> 106 #include "miidevs.h" 107 #include <dev/mii/brgphyreg.h> 108 109 #ifdef __sparc64__ 110 #include <dev/ofw/ofw_bus.h> 111 #include <dev/ofw/openfirm.h> 112 #include <machine/ofw_machdep.h> 113 #include <machine/ver.h> 114 #endif 115 116 #include <dev/pci/pcireg.h> 117 #include <dev/pci/pcivar.h> 118 119 #include <dev/bge/if_bgereg.h> 120 121 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 122 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 123 124 MODULE_DEPEND(bge, pci, 1, 1, 1); 125 MODULE_DEPEND(bge, ether, 1, 1, 1); 126 MODULE_DEPEND(bge, miibus, 1, 1, 1); 127 128 /* "device miibus" required. See GENERIC if you get errors here. */ 129 #include "miibus_if.h" 130 131 /* 132 * Various supported device vendors/types and their names. Note: the 133 * spec seems to indicate that the hardware still has Alteon's vendor 134 * ID burned into it, though it will always be overriden by the vendor 135 * ID in the EEPROM. Just to be safe, we cover all possibilities. 136 */ 137 static struct bge_type { 138 uint16_t bge_vid; 139 uint16_t bge_did; 140 } bge_devs[] = { 141 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 142 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 143 144 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 145 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 146 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 147 148 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 149 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 199 200 { SK_VENDORID, SK_DEVICEID_ALTIMA }, 201 202 { TC_VENDORID, TC_DEVICEID_3C996 }, 203 204 { 0, 0 } 205 }; 206 207 static const struct bge_vendor { 208 uint16_t v_id; 209 const char *v_name; 210 } bge_vendors[] = { 211 { ALTEON_VENDORID, "Alteon" }, 212 { ALTIMA_VENDORID, "Altima" }, 213 { APPLE_VENDORID, "Apple" }, 214 { BCOM_VENDORID, "Broadcom" }, 215 { SK_VENDORID, "SysKonnect" }, 216 { TC_VENDORID, "3Com" }, 217 218 { 0, NULL } 219 }; 220 221 static const struct bge_revision { 222 uint32_t br_chipid; 223 const char *br_name; 224 } bge_revisions[] = { 225 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 226 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 227 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 228 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 229 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 230 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 231 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 232 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 233 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 234 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 235 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 236 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 237 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 238 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 239 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 240 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 241 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 242 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 243 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 244 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 245 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 246 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 247 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 248 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 249 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 250 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 251 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 252 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 253 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 254 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 255 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 256 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 257 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 258 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 259 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 260 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 261 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 262 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 263 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 264 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 265 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 266 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 267 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, 268 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, 269 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, 270 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, 271 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, 272 /* 5754 and 5787 share the same ASIC ID */ 273 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, 274 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, 275 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, 276 277 { 0, NULL } 278 }; 279 280 /* 281 * Some defaults for major revisions, so that newer steppings 282 * that we don't know about have a shot at working. 283 */ 284 static const struct bge_revision bge_majorrevs[] = { 285 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 286 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 287 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 288 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 289 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 290 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 291 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 292 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 293 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 294 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 295 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 296 /* 5754 and 5787 share the same ASIC ID */ 297 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, 298 299 { 0, NULL } 300 }; 301 302 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) 303 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) 304 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) 305 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) 306 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) 307 308 const struct bge_revision * bge_lookup_rev(uint32_t); 309 const struct bge_vendor * bge_lookup_vendor(uint16_t); 310 static int bge_probe(device_t); 311 static int bge_attach(device_t); 312 static int bge_detach(device_t); 313 static int bge_suspend(device_t); 314 static int bge_resume(device_t); 315 static void bge_release_resources(struct bge_softc *); 316 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 317 static int bge_dma_alloc(device_t); 318 static void bge_dma_free(struct bge_softc *); 319 320 static void bge_txeof(struct bge_softc *); 321 static void bge_rxeof(struct bge_softc *); 322 323 static void bge_asf_driver_up (struct bge_softc *); 324 static void bge_tick(void *); 325 static void bge_stats_update(struct bge_softc *); 326 static void bge_stats_update_regs(struct bge_softc *); 327 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 328 329 static void bge_intr(void *); 330 static void bge_start_locked(struct ifnet *); 331 static void bge_start(struct ifnet *); 332 static int bge_ioctl(struct ifnet *, u_long, caddr_t); 333 static void bge_init_locked(struct bge_softc *); 334 static void bge_init(void *); 335 static void bge_stop(struct bge_softc *); 336 static void bge_watchdog(struct bge_softc *); 337 static void bge_shutdown(device_t); 338 static int bge_ifmedia_upd_locked(struct ifnet *); 339 static int bge_ifmedia_upd(struct ifnet *); 340 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 341 342 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 343 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 344 345 static void bge_setpromisc(struct bge_softc *); 346 static void bge_setmulti(struct bge_softc *); 347 static void bge_setvlan(struct bge_softc *); 348 349 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 350 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 351 static int bge_init_rx_ring_std(struct bge_softc *); 352 static void bge_free_rx_ring_std(struct bge_softc *); 353 static int bge_init_rx_ring_jumbo(struct bge_softc *); 354 static void bge_free_rx_ring_jumbo(struct bge_softc *); 355 static void bge_free_tx_ring(struct bge_softc *); 356 static int bge_init_tx_ring(struct bge_softc *); 357 358 static int bge_chipinit(struct bge_softc *); 359 static int bge_blockinit(struct bge_softc *); 360 361 static int bge_has_eeprom(struct bge_softc *); 362 static uint32_t bge_readmem_ind(struct bge_softc *, int); 363 static void bge_writemem_ind(struct bge_softc *, int, int); 364 #ifdef notdef 365 static uint32_t bge_readreg_ind(struct bge_softc *, int); 366 #endif 367 static void bge_writemem_direct(struct bge_softc *, int, int); 368 static void bge_writereg_ind(struct bge_softc *, int, int); 369 370 static int bge_miibus_readreg(device_t, int, int); 371 static int bge_miibus_writereg(device_t, int, int, int); 372 static void bge_miibus_statchg(device_t); 373 #ifdef DEVICE_POLLING 374 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 375 #endif 376 377 #define BGE_RESET_START 1 378 #define BGE_RESET_STOP 2 379 static void bge_sig_post_reset(struct bge_softc *, int); 380 static void bge_sig_legacy(struct bge_softc *, int); 381 static void bge_sig_pre_reset(struct bge_softc *, int); 382 static int bge_reset(struct bge_softc *); 383 static void bge_link_upd(struct bge_softc *); 384 385 /* 386 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may 387 * leak information to untrusted users. It is also known to cause alignment 388 * traps on certain architectures. 389 */ 390 #ifdef BGE_REGISTER_DEBUG 391 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 392 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); 393 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); 394 #endif 395 static void bge_add_sysctls(struct bge_softc *); 396 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); 397 398 static device_method_t bge_methods[] = { 399 /* Device interface */ 400 DEVMETHOD(device_probe, bge_probe), 401 DEVMETHOD(device_attach, bge_attach), 402 DEVMETHOD(device_detach, bge_detach), 403 DEVMETHOD(device_shutdown, bge_shutdown), 404 DEVMETHOD(device_suspend, bge_suspend), 405 DEVMETHOD(device_resume, bge_resume), 406 407 /* bus interface */ 408 DEVMETHOD(bus_print_child, bus_generic_print_child), 409 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 410 411 /* MII interface */ 412 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 413 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 414 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 415 416 { 0, 0 } 417 }; 418 419 static driver_t bge_driver = { 420 "bge", 421 bge_methods, 422 sizeof(struct bge_softc) 423 }; 424 425 static devclass_t bge_devclass; 426 427 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 428 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 429 430 static int bge_allow_asf = 1; 431 432 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 433 434 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 435 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 436 "Allow ASF mode if available"); 437 438 #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" 439 #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" 440 #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" 441 #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" 442 #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" 443 444 static int 445 bge_has_eeprom(struct bge_softc *sc) 446 { 447 #ifdef __sparc64__ 448 char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; 449 device_t dev; 450 uint32_t subvendor; 451 452 dev = sc->bge_dev; 453 454 /* 455 * The on-board BGEs found in sun4u machines aren't fitted with 456 * an EEPROM which means that we have to obtain the MAC address 457 * via OFW and that some tests will always fail. We distinguish 458 * such BGEs by the subvendor ID, which also has to be obtained 459 * from OFW instead of the PCI configuration space as the latter 460 * indicates Broadcom as the subvendor of the netboot interface. 461 * For early Blade 1500 and 2500 we even have to check the OFW 462 * device path as the subvendor ID always defaults to Broadcom 463 * there. 464 */ 465 if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, 466 &subvendor, sizeof(subvendor)) == sizeof(subvendor) && 467 subvendor == SUN_VENDORID) 468 return (0); 469 memset(buf, 0, sizeof(buf)); 470 if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { 471 if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && 472 strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) 473 return (0); 474 if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && 475 strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) 476 return (0); 477 } 478 #endif 479 return (1); 480 } 481 482 static uint32_t 483 bge_readmem_ind(struct bge_softc *sc, int off) 484 { 485 device_t dev; 486 uint32_t val; 487 488 dev = sc->bge_dev; 489 490 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 491 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); 492 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 493 return (val); 494 } 495 496 static void 497 bge_writemem_ind(struct bge_softc *sc, int off, int val) 498 { 499 device_t dev; 500 501 dev = sc->bge_dev; 502 503 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 504 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 505 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); 506 } 507 508 #ifdef notdef 509 static uint32_t 510 bge_readreg_ind(struct bge_softc *sc, int off) 511 { 512 device_t dev; 513 514 dev = sc->bge_dev; 515 516 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 517 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 518 } 519 #endif 520 521 static void 522 bge_writereg_ind(struct bge_softc *sc, int off, int val) 523 { 524 device_t dev; 525 526 dev = sc->bge_dev; 527 528 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 529 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 530 } 531 532 static void 533 bge_writemem_direct(struct bge_softc *sc, int off, int val) 534 { 535 CSR_WRITE_4(sc, off, val); 536 } 537 538 /* 539 * Map a single buffer address. 540 */ 541 542 static void 543 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 544 { 545 struct bge_dmamap_arg *ctx; 546 547 if (error) 548 return; 549 550 ctx = arg; 551 552 if (nseg > ctx->bge_maxsegs) { 553 ctx->bge_maxsegs = 0; 554 return; 555 } 556 557 ctx->bge_busaddr = segs->ds_addr; 558 } 559 560 /* 561 * Read a byte of data stored in the EEPROM at address 'addr.' The 562 * BCM570x supports both the traditional bitbang interface and an 563 * auto access interface for reading the EEPROM. We use the auto 564 * access method. 565 */ 566 static uint8_t 567 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 568 { 569 int i; 570 uint32_t byte = 0; 571 572 /* 573 * Enable use of auto EEPROM access so we can avoid 574 * having to use the bitbang method. 575 */ 576 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 577 578 /* Reset the EEPROM, load the clock period. */ 579 CSR_WRITE_4(sc, BGE_EE_ADDR, 580 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 581 DELAY(20); 582 583 /* Issue the read EEPROM command. */ 584 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 585 586 /* Wait for completion */ 587 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 588 DELAY(10); 589 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 590 break; 591 } 592 593 if (i == BGE_TIMEOUT * 10) { 594 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 595 return (1); 596 } 597 598 /* Get result. */ 599 byte = CSR_READ_4(sc, BGE_EE_DATA); 600 601 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 602 603 return (0); 604 } 605 606 /* 607 * Read a sequence of bytes from the EEPROM. 608 */ 609 static int 610 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 611 { 612 int i, error = 0; 613 uint8_t byte = 0; 614 615 for (i = 0; i < cnt; i++) { 616 error = bge_eeprom_getbyte(sc, off + i, &byte); 617 if (error) 618 break; 619 *(dest + i) = byte; 620 } 621 622 return (error ? 1 : 0); 623 } 624 625 static int 626 bge_miibus_readreg(device_t dev, int phy, int reg) 627 { 628 struct bge_softc *sc; 629 uint32_t val, autopoll; 630 int i; 631 632 sc = device_get_softc(dev); 633 634 /* 635 * Broadcom's own driver always assumes the internal 636 * PHY is at GMII address 1. On some chips, the PHY responds 637 * to accesses at all addresses, which could cause us to 638 * bogusly attach the PHY 32 times at probe type. Always 639 * restricting the lookup to address 1 is simpler than 640 * trying to figure out which chips revisions should be 641 * special-cased. 642 */ 643 if (phy != 1) 644 return (0); 645 646 /* Reading with autopolling on may trigger PCI errors */ 647 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 648 if (autopoll & BGE_MIMODE_AUTOPOLL) { 649 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 650 DELAY(40); 651 } 652 653 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | 654 BGE_MIPHY(phy) | BGE_MIREG(reg)); 655 656 for (i = 0; i < BGE_TIMEOUT; i++) { 657 DELAY(10); 658 val = CSR_READ_4(sc, BGE_MI_COMM); 659 if (!(val & BGE_MICOMM_BUSY)) 660 break; 661 } 662 663 if (i == BGE_TIMEOUT) { 664 device_printf(sc->bge_dev, "PHY read timed out\n"); 665 val = 0; 666 goto done; 667 } 668 669 val = CSR_READ_4(sc, BGE_MI_COMM); 670 671 done: 672 if (autopoll & BGE_MIMODE_AUTOPOLL) { 673 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 674 DELAY(40); 675 } 676 677 if (val & BGE_MICOMM_READFAIL) 678 return (0); 679 680 return (val & 0xFFFF); 681 } 682 683 static int 684 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 685 { 686 struct bge_softc *sc; 687 uint32_t autopoll; 688 int i; 689 690 sc = device_get_softc(dev); 691 692 /* Reading with autopolling on may trigger PCI errors */ 693 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 694 if (autopoll & BGE_MIMODE_AUTOPOLL) { 695 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 696 DELAY(40); 697 } 698 699 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | 700 BGE_MIPHY(phy) | BGE_MIREG(reg) | val); 701 702 for (i = 0; i < BGE_TIMEOUT; i++) { 703 DELAY(10); 704 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 705 break; 706 } 707 708 if (i == BGE_TIMEOUT) { 709 device_printf(sc->bge_dev, "PHY write timed out\n"); 710 return (0); 711 } 712 713 if (autopoll & BGE_MIMODE_AUTOPOLL) { 714 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 715 DELAY(40); 716 } 717 718 return (0); 719 } 720 721 static void 722 bge_miibus_statchg(device_t dev) 723 { 724 struct bge_softc *sc; 725 struct mii_data *mii; 726 sc = device_get_softc(dev); 727 mii = device_get_softc(sc->bge_miibus); 728 729 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 730 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 731 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 732 else 733 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 734 735 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 736 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 737 else 738 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 739 } 740 741 /* 742 * Intialize a standard receive ring descriptor. 743 */ 744 static int 745 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) 746 { 747 struct mbuf *m_new = NULL; 748 struct bge_rx_bd *r; 749 struct bge_dmamap_arg ctx; 750 int error; 751 752 if (m == NULL) { 753 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 754 if (m_new == NULL) 755 return (ENOBUFS); 756 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 757 } else { 758 m_new = m; 759 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 760 m_new->m_data = m_new->m_ext.ext_buf; 761 } 762 763 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 764 m_adj(m_new, ETHER_ALIGN); 765 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 766 r = &sc->bge_ldata.bge_rx_std_ring[i]; 767 ctx.bge_maxsegs = 1; 768 ctx.sc = sc; 769 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 770 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 771 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 772 if (error || ctx.bge_maxsegs == 0) { 773 if (m == NULL) { 774 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 775 m_freem(m_new); 776 } 777 return (ENOMEM); 778 } 779 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 780 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 781 r->bge_flags = BGE_RXBDFLAG_END; 782 r->bge_len = m_new->m_len; 783 r->bge_idx = i; 784 785 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 786 sc->bge_cdata.bge_rx_std_dmamap[i], 787 BUS_DMASYNC_PREREAD); 788 789 return (0); 790 } 791 792 /* 793 * Initialize a jumbo receive ring descriptor. This allocates 794 * a jumbo buffer from the pool managed internally by the driver. 795 */ 796 static int 797 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 798 { 799 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 800 struct bge_extrx_bd *r; 801 struct mbuf *m_new = NULL; 802 int nsegs; 803 int error; 804 805 if (m == NULL) { 806 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 807 if (m_new == NULL) 808 return (ENOBUFS); 809 810 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 811 if (!(m_new->m_flags & M_EXT)) { 812 m_freem(m_new); 813 return (ENOBUFS); 814 } 815 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 816 } else { 817 m_new = m; 818 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 819 m_new->m_data = m_new->m_ext.ext_buf; 820 } 821 822 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 823 m_adj(m_new, ETHER_ALIGN); 824 825 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 826 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 827 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 828 if (error) { 829 if (m == NULL) 830 m_freem(m_new); 831 return (error); 832 } 833 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 834 835 /* 836 * Fill in the extended RX buffer descriptor. 837 */ 838 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 839 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; 840 r->bge_idx = i; 841 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 842 switch (nsegs) { 843 case 4: 844 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 845 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 846 r->bge_len3 = segs[3].ds_len; 847 case 3: 848 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 849 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 850 r->bge_len2 = segs[2].ds_len; 851 case 2: 852 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 853 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 854 r->bge_len1 = segs[1].ds_len; 855 case 1: 856 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 857 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 858 r->bge_len0 = segs[0].ds_len; 859 break; 860 default: 861 panic("%s: %d segments\n", __func__, nsegs); 862 } 863 864 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 865 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 866 BUS_DMASYNC_PREREAD); 867 868 return (0); 869 } 870 871 /* 872 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 873 * that's 1MB or memory, which is a lot. For now, we fill only the first 874 * 256 ring entries and hope that our CPU is fast enough to keep up with 875 * the NIC. 876 */ 877 static int 878 bge_init_rx_ring_std(struct bge_softc *sc) 879 { 880 int i; 881 882 for (i = 0; i < BGE_SSLOTS; i++) { 883 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 884 return (ENOBUFS); 885 }; 886 887 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 888 sc->bge_cdata.bge_rx_std_ring_map, 889 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 890 891 sc->bge_std = i - 1; 892 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 893 894 return (0); 895 } 896 897 static void 898 bge_free_rx_ring_std(struct bge_softc *sc) 899 { 900 int i; 901 902 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 903 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 904 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 905 sc->bge_cdata.bge_rx_std_dmamap[i], 906 BUS_DMASYNC_POSTREAD); 907 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 908 sc->bge_cdata.bge_rx_std_dmamap[i]); 909 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 910 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 911 } 912 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 913 sizeof(struct bge_rx_bd)); 914 } 915 } 916 917 static int 918 bge_init_rx_ring_jumbo(struct bge_softc *sc) 919 { 920 struct bge_rcb *rcb; 921 int i; 922 923 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 924 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 925 return (ENOBUFS); 926 }; 927 928 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 929 sc->bge_cdata.bge_rx_jumbo_ring_map, 930 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 931 932 sc->bge_jumbo = i - 1; 933 934 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 935 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 936 BGE_RCB_FLAG_USE_EXT_RX_BD); 937 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 938 939 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 940 941 return (0); 942 } 943 944 static void 945 bge_free_rx_ring_jumbo(struct bge_softc *sc) 946 { 947 int i; 948 949 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 950 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 951 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 952 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 953 BUS_DMASYNC_POSTREAD); 954 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 955 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 956 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 957 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 958 } 959 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 960 sizeof(struct bge_extrx_bd)); 961 } 962 } 963 964 static void 965 bge_free_tx_ring(struct bge_softc *sc) 966 { 967 int i; 968 969 if (sc->bge_ldata.bge_tx_ring == NULL) 970 return; 971 972 for (i = 0; i < BGE_TX_RING_CNT; i++) { 973 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 974 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 975 sc->bge_cdata.bge_tx_dmamap[i], 976 BUS_DMASYNC_POSTWRITE); 977 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 978 sc->bge_cdata.bge_tx_dmamap[i]); 979 m_freem(sc->bge_cdata.bge_tx_chain[i]); 980 sc->bge_cdata.bge_tx_chain[i] = NULL; 981 } 982 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 983 sizeof(struct bge_tx_bd)); 984 } 985 } 986 987 static int 988 bge_init_tx_ring(struct bge_softc *sc) 989 { 990 sc->bge_txcnt = 0; 991 sc->bge_tx_saved_considx = 0; 992 993 /* Initialize transmit producer index for host-memory send ring. */ 994 sc->bge_tx_prodidx = 0; 995 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 996 997 /* 5700 b2 errata */ 998 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 999 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 1000 1001 /* NIC-memory send ring not used; initialize to zero. */ 1002 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1003 /* 5700 b2 errata */ 1004 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 1005 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 1006 1007 return (0); 1008 } 1009 1010 static void 1011 bge_setpromisc(struct bge_softc *sc) 1012 { 1013 struct ifnet *ifp; 1014 1015 BGE_LOCK_ASSERT(sc); 1016 1017 ifp = sc->bge_ifp; 1018 1019 /* Enable or disable promiscuous mode as needed. */ 1020 if (ifp->if_flags & IFF_PROMISC) 1021 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1022 else 1023 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); 1024 } 1025 1026 static void 1027 bge_setmulti(struct bge_softc *sc) 1028 { 1029 struct ifnet *ifp; 1030 struct ifmultiaddr *ifma; 1031 uint32_t hashes[4] = { 0, 0, 0, 0 }; 1032 int h, i; 1033 1034 BGE_LOCK_ASSERT(sc); 1035 1036 ifp = sc->bge_ifp; 1037 1038 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 1039 for (i = 0; i < 4; i++) 1040 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 1041 return; 1042 } 1043 1044 /* First, zot all the existing filters. */ 1045 for (i = 0; i < 4; i++) 1046 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 1047 1048 /* Now program new ones. */ 1049 IF_ADDR_LOCK(ifp); 1050 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1051 if (ifma->ifma_addr->sa_family != AF_LINK) 1052 continue; 1053 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 1054 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 1055 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1056 } 1057 IF_ADDR_UNLOCK(ifp); 1058 1059 for (i = 0; i < 4; i++) 1060 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1061 } 1062 1063 static void 1064 bge_setvlan(struct bge_softc *sc) 1065 { 1066 struct ifnet *ifp; 1067 1068 BGE_LOCK_ASSERT(sc); 1069 1070 ifp = sc->bge_ifp; 1071 1072 /* Enable or disable VLAN tag stripping as needed. */ 1073 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 1074 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1075 else 1076 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); 1077 } 1078 1079 static void 1080 bge_sig_pre_reset(sc, type) 1081 struct bge_softc *sc; 1082 int type; 1083 { 1084 /* 1085 * Some chips don't like this so only do this if ASF is enabled 1086 */ 1087 if (sc->bge_asf_mode) 1088 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1089 1090 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1091 switch (type) { 1092 case BGE_RESET_START: 1093 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1094 break; 1095 case BGE_RESET_STOP: 1096 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1097 break; 1098 } 1099 } 1100 } 1101 1102 static void 1103 bge_sig_post_reset(sc, type) 1104 struct bge_softc *sc; 1105 int type; 1106 { 1107 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1108 switch (type) { 1109 case BGE_RESET_START: 1110 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1111 /* START DONE */ 1112 break; 1113 case BGE_RESET_STOP: 1114 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1115 break; 1116 } 1117 } 1118 } 1119 1120 static void 1121 bge_sig_legacy(sc, type) 1122 struct bge_softc *sc; 1123 int type; 1124 { 1125 if (sc->bge_asf_mode) { 1126 switch (type) { 1127 case BGE_RESET_START: 1128 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1129 break; 1130 case BGE_RESET_STOP: 1131 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1132 break; 1133 } 1134 } 1135 } 1136 1137 void bge_stop_fw(struct bge_softc *); 1138 void 1139 bge_stop_fw(sc) 1140 struct bge_softc *sc; 1141 { 1142 int i; 1143 1144 if (sc->bge_asf_mode) { 1145 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1146 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1147 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 1148 1149 for (i = 0; i < 100; i++ ) { 1150 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1151 break; 1152 DELAY(10); 1153 } 1154 } 1155 } 1156 1157 /* 1158 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1159 * self-test results. 1160 */ 1161 static int 1162 bge_chipinit(struct bge_softc *sc) 1163 { 1164 uint32_t dma_rw_ctl; 1165 int i; 1166 1167 /* Set endianness before we access any non-PCI registers. */ 1168 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1169 1170 /* 1171 * Check the 'ROM failed' bit on the RX CPU to see if 1172 * self-tests passed. Skip this check when there's no 1173 * EEPROM fitted, since in that case it will always 1174 * fail. 1175 */ 1176 if ((sc->bge_flags & BGE_FLAG_EEPROM) && 1177 CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1178 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1179 return (ENODEV); 1180 } 1181 1182 /* Clear the MAC control register */ 1183 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1184 1185 /* 1186 * Clear the MAC statistics block in the NIC's 1187 * internal memory. 1188 */ 1189 for (i = BGE_STATS_BLOCK; 1190 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1191 BGE_MEMWIN_WRITE(sc, i, 0); 1192 1193 for (i = BGE_STATUS_BLOCK; 1194 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1195 BGE_MEMWIN_WRITE(sc, i, 0); 1196 1197 /* 1198 * Set up the PCI DMA control register. 1199 */ 1200 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | 1201 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); 1202 if (sc->bge_flags & BGE_FLAG_PCIE) { 1203 /* Read watermark not used, 128 bytes for write. */ 1204 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1205 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1206 if (BGE_IS_5714_FAMILY(sc)) { 1207 /* 256 bytes for read and write. */ 1208 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | 1209 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); 1210 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? 1211 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : 1212 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; 1213 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1214 /* 1536 bytes for read, 384 bytes for write. */ 1215 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1216 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); 1217 } else { 1218 /* 384 bytes for read and write. */ 1219 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | 1220 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 1221 0x0F; 1222 } 1223 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1224 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1225 uint32_t tmp; 1226 1227 /* Set ONE_DMA_AT_ONCE for hardware workaround. */ 1228 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; 1229 if (tmp == 6 || tmp == 7) 1230 dma_rw_ctl |= 1231 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; 1232 1233 /* Set PCI-X DMA write workaround. */ 1234 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; 1235 } 1236 } else { 1237 /* Conventional PCI bus: 256 bytes for read and write. */ 1238 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | 1239 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); 1240 1241 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && 1242 sc->bge_asicrev != BGE_ASICREV_BCM5750) 1243 dma_rw_ctl |= 0x0F; 1244 } 1245 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || 1246 sc->bge_asicrev == BGE_ASICREV_BCM5701) 1247 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | 1248 BGE_PCIDMARWCTL_ASRT_ALL_BE; 1249 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1250 sc->bge_asicrev == BGE_ASICREV_BCM5704) 1251 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1252 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1253 1254 /* 1255 * Set up general mode register. 1256 */ 1257 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 1258 BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | 1259 BGE_MODECTL_TX_NO_PHDR_CSUM); 1260 1261 /* 1262 * Tell the firmware the driver is running 1263 */ 1264 if (sc->bge_asf_mode & ASF_STACKUP) 1265 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1266 1267 /* 1268 * Disable memory write invalidate. Apparently it is not supported 1269 * properly by these devices. 1270 */ 1271 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1272 1273 /* Set the timer prescaler (always 66Mhz) */ 1274 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 1275 1276 return (0); 1277 } 1278 1279 static int 1280 bge_blockinit(struct bge_softc *sc) 1281 { 1282 struct bge_rcb *rcb; 1283 bus_size_t vrcb; 1284 bge_hostaddr taddr; 1285 uint32_t val; 1286 int i; 1287 1288 /* 1289 * Initialize the memory window pointer register so that 1290 * we can access the first 32K of internal NIC RAM. This will 1291 * allow us to set up the TX send ring RCBs and the RX return 1292 * ring RCBs, plus other things which live in NIC memory. 1293 */ 1294 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1295 1296 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1297 1298 if (!(BGE_IS_5705_PLUS(sc))) { 1299 /* Configure mbuf memory pool */ 1300 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); 1301 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1302 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1303 else 1304 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1305 1306 /* Configure DMA resource pool */ 1307 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1308 BGE_DMA_DESCRIPTORS); 1309 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1310 } 1311 1312 /* Configure mbuf pool watermarks */ 1313 if (BGE_IS_5705_PLUS(sc)) { 1314 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1315 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1316 } else { 1317 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1318 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1319 } 1320 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1321 1322 /* Configure DMA resource watermarks */ 1323 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1324 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1325 1326 /* Enable buffer manager */ 1327 if (!(BGE_IS_5705_PLUS(sc))) { 1328 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1329 BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); 1330 1331 /* Poll for buffer manager start indication */ 1332 for (i = 0; i < BGE_TIMEOUT; i++) { 1333 DELAY(10); 1334 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1335 break; 1336 } 1337 1338 if (i == BGE_TIMEOUT) { 1339 device_printf(sc->bge_dev, 1340 "buffer manager failed to start\n"); 1341 return (ENXIO); 1342 } 1343 } 1344 1345 /* Enable flow-through queues */ 1346 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1347 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1348 1349 /* Wait until queue initialization is complete */ 1350 for (i = 0; i < BGE_TIMEOUT; i++) { 1351 DELAY(10); 1352 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1353 break; 1354 } 1355 1356 if (i == BGE_TIMEOUT) { 1357 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1358 return (ENXIO); 1359 } 1360 1361 /* Initialize the standard RX ring control block */ 1362 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1363 rcb->bge_hostaddr.bge_addr_lo = 1364 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1365 rcb->bge_hostaddr.bge_addr_hi = 1366 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1367 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1368 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1369 if (BGE_IS_5705_PLUS(sc)) 1370 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1371 else 1372 rcb->bge_maxlen_flags = 1373 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1374 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1375 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1376 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1377 1378 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1379 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1380 1381 /* 1382 * Initialize the jumbo RX ring control block 1383 * We set the 'ring disabled' bit in the flags 1384 * field until we're actually ready to start 1385 * using this ring (i.e. once we set the MTU 1386 * high enough to require it). 1387 */ 1388 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1389 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1390 1391 rcb->bge_hostaddr.bge_addr_lo = 1392 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1393 rcb->bge_hostaddr.bge_addr_hi = 1394 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1395 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1396 sc->bge_cdata.bge_rx_jumbo_ring_map, 1397 BUS_DMASYNC_PREREAD); 1398 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1399 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); 1400 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1401 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1402 rcb->bge_hostaddr.bge_addr_hi); 1403 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1404 rcb->bge_hostaddr.bge_addr_lo); 1405 1406 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1407 rcb->bge_maxlen_flags); 1408 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1409 1410 /* Set up dummy disabled mini ring RCB */ 1411 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1412 rcb->bge_maxlen_flags = 1413 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1414 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1415 rcb->bge_maxlen_flags); 1416 } 1417 1418 /* 1419 * Set the BD ring replentish thresholds. The recommended 1420 * values are 1/8th the number of descriptors allocated to 1421 * each ring. 1422 * XXX The 5754 requires a lower threshold, so it might be a 1423 * requirement of all 575x family chips. The Linux driver sets 1424 * the lower threshold for all 5705 family chips as well, but there 1425 * are reports that it might not need to be so strict. 1426 */ 1427 if (BGE_IS_5705_PLUS(sc)) 1428 val = 8; 1429 else 1430 val = BGE_STD_RX_RING_CNT / 8; 1431 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); 1432 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1433 1434 /* 1435 * Disable all unused send rings by setting the 'ring disabled' 1436 * bit in the flags field of all the TX send ring control blocks. 1437 * These are located in NIC memory. 1438 */ 1439 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1440 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1441 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1442 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1443 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1444 vrcb += sizeof(struct bge_rcb); 1445 } 1446 1447 /* Configure TX RCB 0 (we use only the first ring) */ 1448 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1449 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1450 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1451 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1452 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1453 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1454 if (!(BGE_IS_5705_PLUS(sc))) 1455 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1456 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1457 1458 /* Disable all unused RX return rings */ 1459 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1460 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1461 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1462 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1463 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1464 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1465 BGE_RCB_FLAG_RING_DISABLED)); 1466 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1467 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1468 (i * (sizeof(uint64_t))), 0); 1469 vrcb += sizeof(struct bge_rcb); 1470 } 1471 1472 /* Initialize RX ring indexes */ 1473 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1474 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1475 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1476 1477 /* 1478 * Set up RX return ring 0 1479 * Note that the NIC address for RX return rings is 0x00000000. 1480 * The return rings live entirely within the host, so the 1481 * nicaddr field in the RCB isn't used. 1482 */ 1483 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1484 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1485 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1486 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1487 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1488 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1489 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1490 1491 /* Set random backoff seed for TX */ 1492 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1493 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1494 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1495 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1496 BGE_TX_BACKOFF_SEED_MASK); 1497 1498 /* Set inter-packet gap */ 1499 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1500 1501 /* 1502 * Specify which ring to use for packets that don't match 1503 * any RX rules. 1504 */ 1505 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1506 1507 /* 1508 * Configure number of RX lists. One interrupt distribution 1509 * list, sixteen active lists, one bad frames class. 1510 */ 1511 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1512 1513 /* Inialize RX list placement stats mask. */ 1514 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1515 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1516 1517 /* Disable host coalescing until we get it set up */ 1518 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1519 1520 /* Poll to make sure it's shut down. */ 1521 for (i = 0; i < BGE_TIMEOUT; i++) { 1522 DELAY(10); 1523 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1524 break; 1525 } 1526 1527 if (i == BGE_TIMEOUT) { 1528 device_printf(sc->bge_dev, 1529 "host coalescing engine failed to idle\n"); 1530 return (ENXIO); 1531 } 1532 1533 /* Set up host coalescing defaults */ 1534 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1535 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1536 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1537 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1538 if (!(BGE_IS_5705_PLUS(sc))) { 1539 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1540 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1541 } 1542 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 1543 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 1544 1545 /* Set up address of statistics block */ 1546 if (!(BGE_IS_5705_PLUS(sc))) { 1547 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1548 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1549 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1550 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1551 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1552 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1553 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1554 } 1555 1556 /* Set up address of status block */ 1557 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1558 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1559 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1560 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1561 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1562 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1563 1564 /* Turn on host coalescing state machine */ 1565 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1566 1567 /* Turn on RX BD completion state machine and enable attentions */ 1568 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1569 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); 1570 1571 /* Turn on RX list placement state machine */ 1572 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1573 1574 /* Turn on RX list selector state machine. */ 1575 if (!(BGE_IS_5705_PLUS(sc))) 1576 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1577 1578 /* Turn on DMA, clear stats */ 1579 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB | 1580 BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | 1581 BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | 1582 BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB | 1583 ((sc->bge_flags & BGE_FLAG_TBI) ? 1584 BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1585 1586 /* Set misc. local control, enable interrupts on attentions */ 1587 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1588 1589 #ifdef notdef 1590 /* Assert GPIO pins for PHY reset */ 1591 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | 1592 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); 1593 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | 1594 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); 1595 #endif 1596 1597 /* Turn on DMA completion state machine */ 1598 if (!(BGE_IS_5705_PLUS(sc))) 1599 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1600 1601 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; 1602 1603 /* Enable host coalescing bug fix. */ 1604 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 1605 sc->bge_asicrev == BGE_ASICREV_BCM5787) 1606 val |= 1 << 29; 1607 1608 /* Turn on write DMA state machine */ 1609 CSR_WRITE_4(sc, BGE_WDMA_MODE, val); 1610 1611 /* Turn on read DMA state machine */ 1612 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1613 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS); 1614 1615 /* Turn on RX data completion state machine */ 1616 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1617 1618 /* Turn on RX BD initiator state machine */ 1619 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1620 1621 /* Turn on RX data and RX BD initiator state machine */ 1622 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1623 1624 /* Turn on Mbuf cluster free state machine */ 1625 if (!(BGE_IS_5705_PLUS(sc))) 1626 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1627 1628 /* Turn on send BD completion state machine */ 1629 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1630 1631 /* Turn on send data completion state machine */ 1632 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1633 1634 /* Turn on send data initiator state machine */ 1635 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1636 1637 /* Turn on send BD initiator state machine */ 1638 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1639 1640 /* Turn on send BD selector state machine */ 1641 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1642 1643 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1644 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1645 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); 1646 1647 /* ack/clear link change events */ 1648 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 1649 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 1650 BGE_MACSTAT_LINK_CHANGED); 1651 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1652 1653 /* Enable PHY auto polling (for MII/GMII only) */ 1654 if (sc->bge_flags & BGE_FLAG_TBI) { 1655 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1656 } else { 1657 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); 1658 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1659 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1660 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1661 BGE_EVTENB_MI_INTERRUPT); 1662 } 1663 1664 /* 1665 * Clear any pending link state attention. 1666 * Otherwise some link state change events may be lost until attention 1667 * is cleared by bge_intr() -> bge_link_upd() sequence. 1668 * It's not necessary on newer BCM chips - perhaps enabling link 1669 * state change attentions implies clearing pending attention. 1670 */ 1671 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 1672 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 1673 BGE_MACSTAT_LINK_CHANGED); 1674 1675 /* Enable link state change attentions. */ 1676 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1677 1678 return (0); 1679 } 1680 1681 const struct bge_revision * 1682 bge_lookup_rev(uint32_t chipid) 1683 { 1684 const struct bge_revision *br; 1685 1686 for (br = bge_revisions; br->br_name != NULL; br++) { 1687 if (br->br_chipid == chipid) 1688 return (br); 1689 } 1690 1691 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1692 if (br->br_chipid == BGE_ASICREV(chipid)) 1693 return (br); 1694 } 1695 1696 return (NULL); 1697 } 1698 1699 const struct bge_vendor * 1700 bge_lookup_vendor(uint16_t vid) 1701 { 1702 const struct bge_vendor *v; 1703 1704 for (v = bge_vendors; v->v_name != NULL; v++) 1705 if (v->v_id == vid) 1706 return (v); 1707 1708 panic("%s: unknown vendor %d", __func__, vid); 1709 return (NULL); 1710 } 1711 1712 /* 1713 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1714 * against our list and return its name if we find a match. 1715 * 1716 * Note that since the Broadcom controller contains VPD support, we 1717 * try to get the device name string from the controller itself instead 1718 * of the compiled-in string. It guarantees we'll always announce the 1719 * right product name. We fall back to the compiled-in string when 1720 * VPD is unavailable or corrupt. 1721 */ 1722 static int 1723 bge_probe(device_t dev) 1724 { 1725 struct bge_type *t = bge_devs; 1726 struct bge_softc *sc = device_get_softc(dev); 1727 uint16_t vid, did; 1728 1729 sc->bge_dev = dev; 1730 vid = pci_get_vendor(dev); 1731 did = pci_get_device(dev); 1732 while(t->bge_vid != 0) { 1733 if ((vid == t->bge_vid) && (did == t->bge_did)) { 1734 char model[64], buf[96]; 1735 const struct bge_revision *br; 1736 const struct bge_vendor *v; 1737 uint32_t id; 1738 1739 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1740 BGE_PCIMISCCTL_ASICREV; 1741 br = bge_lookup_rev(id); 1742 v = bge_lookup_vendor(vid); 1743 { 1744 #if __FreeBSD_version > 700024 1745 const char *pname; 1746 1747 if (pci_get_vpd_ident(dev, &pname) == 0) 1748 snprintf(model, 64, "%s", pname); 1749 else 1750 #endif 1751 snprintf(model, 64, "%s %s", 1752 v->v_name, 1753 br != NULL ? br->br_name : 1754 "NetXtreme Ethernet Controller"); 1755 } 1756 snprintf(buf, 96, "%s, %sASIC rev. %#04x", model, 1757 br != NULL ? "" : "unknown ", id >> 16); 1758 device_set_desc_copy(dev, buf); 1759 if (pci_get_subvendor(dev) == DELL_VENDORID) 1760 sc->bge_flags |= BGE_FLAG_NO_3LED; 1761 if (did == BCOM_DEVICEID_BCM5755M) 1762 sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; 1763 return (0); 1764 } 1765 t++; 1766 } 1767 1768 return (ENXIO); 1769 } 1770 1771 static void 1772 bge_dma_free(struct bge_softc *sc) 1773 { 1774 int i; 1775 1776 /* Destroy DMA maps for RX buffers. */ 1777 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1778 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1779 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1780 sc->bge_cdata.bge_rx_std_dmamap[i]); 1781 } 1782 1783 /* Destroy DMA maps for jumbo RX buffers. */ 1784 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1785 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1786 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1787 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1788 } 1789 1790 /* Destroy DMA maps for TX buffers. */ 1791 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1792 if (sc->bge_cdata.bge_tx_dmamap[i]) 1793 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1794 sc->bge_cdata.bge_tx_dmamap[i]); 1795 } 1796 1797 if (sc->bge_cdata.bge_mtag) 1798 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1799 1800 1801 /* Destroy standard RX ring. */ 1802 if (sc->bge_cdata.bge_rx_std_ring_map) 1803 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1804 sc->bge_cdata.bge_rx_std_ring_map); 1805 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1806 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1807 sc->bge_ldata.bge_rx_std_ring, 1808 sc->bge_cdata.bge_rx_std_ring_map); 1809 1810 if (sc->bge_cdata.bge_rx_std_ring_tag) 1811 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1812 1813 /* Destroy jumbo RX ring. */ 1814 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1815 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1816 sc->bge_cdata.bge_rx_jumbo_ring_map); 1817 1818 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1819 sc->bge_ldata.bge_rx_jumbo_ring) 1820 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1821 sc->bge_ldata.bge_rx_jumbo_ring, 1822 sc->bge_cdata.bge_rx_jumbo_ring_map); 1823 1824 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1825 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1826 1827 /* Destroy RX return ring. */ 1828 if (sc->bge_cdata.bge_rx_return_ring_map) 1829 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1830 sc->bge_cdata.bge_rx_return_ring_map); 1831 1832 if (sc->bge_cdata.bge_rx_return_ring_map && 1833 sc->bge_ldata.bge_rx_return_ring) 1834 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1835 sc->bge_ldata.bge_rx_return_ring, 1836 sc->bge_cdata.bge_rx_return_ring_map); 1837 1838 if (sc->bge_cdata.bge_rx_return_ring_tag) 1839 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1840 1841 /* Destroy TX ring. */ 1842 if (sc->bge_cdata.bge_tx_ring_map) 1843 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1844 sc->bge_cdata.bge_tx_ring_map); 1845 1846 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1847 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1848 sc->bge_ldata.bge_tx_ring, 1849 sc->bge_cdata.bge_tx_ring_map); 1850 1851 if (sc->bge_cdata.bge_tx_ring_tag) 1852 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1853 1854 /* Destroy status block. */ 1855 if (sc->bge_cdata.bge_status_map) 1856 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1857 sc->bge_cdata.bge_status_map); 1858 1859 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1860 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1861 sc->bge_ldata.bge_status_block, 1862 sc->bge_cdata.bge_status_map); 1863 1864 if (sc->bge_cdata.bge_status_tag) 1865 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1866 1867 /* Destroy statistics block. */ 1868 if (sc->bge_cdata.bge_stats_map) 1869 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1870 sc->bge_cdata.bge_stats_map); 1871 1872 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1873 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1874 sc->bge_ldata.bge_stats, 1875 sc->bge_cdata.bge_stats_map); 1876 1877 if (sc->bge_cdata.bge_stats_tag) 1878 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1879 1880 /* Destroy the parent tag. */ 1881 if (sc->bge_cdata.bge_parent_tag) 1882 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1883 } 1884 1885 static int 1886 bge_dma_alloc(device_t dev) 1887 { 1888 struct bge_dmamap_arg ctx; 1889 struct bge_softc *sc; 1890 int i, error; 1891 1892 sc = device_get_softc(dev); 1893 1894 /* 1895 * Allocate the parent bus DMA tag appropriate for PCI. 1896 */ 1897 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), /* parent */ 1898 1, 0, /* alignment, boundary */ 1899 BUS_SPACE_MAXADDR, /* lowaddr */ 1900 BUS_SPACE_MAXADDR, /* highaddr */ 1901 NULL, NULL, /* filter, filterarg */ 1902 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1903 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1904 0, /* flags */ 1905 NULL, NULL, /* lockfunc, lockarg */ 1906 &sc->bge_cdata.bge_parent_tag); 1907 1908 if (error != 0) { 1909 device_printf(sc->bge_dev, 1910 "could not allocate parent dma tag\n"); 1911 return (ENOMEM); 1912 } 1913 1914 /* 1915 * Create tag for RX mbufs. 1916 */ 1917 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1918 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1919 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1920 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1921 1922 if (error) { 1923 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1924 return (ENOMEM); 1925 } 1926 1927 /* Create DMA maps for RX buffers. */ 1928 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1929 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1930 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1931 if (error) { 1932 device_printf(sc->bge_dev, 1933 "can't create DMA map for RX\n"); 1934 return (ENOMEM); 1935 } 1936 } 1937 1938 /* Create DMA maps for TX buffers. */ 1939 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1940 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1941 &sc->bge_cdata.bge_tx_dmamap[i]); 1942 if (error) { 1943 device_printf(sc->bge_dev, 1944 "can't create DMA map for RX\n"); 1945 return (ENOMEM); 1946 } 1947 } 1948 1949 /* Create tag for standard RX ring. */ 1950 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1951 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1952 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1953 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1954 1955 if (error) { 1956 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1957 return (ENOMEM); 1958 } 1959 1960 /* Allocate DMA'able memory for standard RX ring. */ 1961 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1962 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1963 &sc->bge_cdata.bge_rx_std_ring_map); 1964 if (error) 1965 return (ENOMEM); 1966 1967 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1968 1969 /* Load the address of the standard RX ring. */ 1970 ctx.bge_maxsegs = 1; 1971 ctx.sc = sc; 1972 1973 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1974 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1975 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1976 1977 if (error) 1978 return (ENOMEM); 1979 1980 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1981 1982 /* Create tags for jumbo mbufs. */ 1983 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1984 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1985 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1986 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1987 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1988 if (error) { 1989 device_printf(sc->bge_dev, 1990 "could not allocate jumbo dma tag\n"); 1991 return (ENOMEM); 1992 } 1993 1994 /* Create tag for jumbo RX ring. */ 1995 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1996 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1997 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1998 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1999 2000 if (error) { 2001 device_printf(sc->bge_dev, 2002 "could not allocate jumbo ring dma tag\n"); 2003 return (ENOMEM); 2004 } 2005 2006 /* Allocate DMA'able memory for jumbo RX ring. */ 2007 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2008 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 2009 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 2010 &sc->bge_cdata.bge_rx_jumbo_ring_map); 2011 if (error) 2012 return (ENOMEM); 2013 2014 /* Load the address of the jumbo RX ring. */ 2015 ctx.bge_maxsegs = 1; 2016 ctx.sc = sc; 2017 2018 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2019 sc->bge_cdata.bge_rx_jumbo_ring_map, 2020 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 2021 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2022 2023 if (error) 2024 return (ENOMEM); 2025 2026 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 2027 2028 /* Create DMA maps for jumbo RX buffers. */ 2029 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 2030 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 2031 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 2032 if (error) { 2033 device_printf(sc->bge_dev, 2034 "can't create DMA map for jumbo RX\n"); 2035 return (ENOMEM); 2036 } 2037 } 2038 2039 } 2040 2041 /* Create tag for RX return ring. */ 2042 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2043 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2044 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 2045 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 2046 2047 if (error) { 2048 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2049 return (ENOMEM); 2050 } 2051 2052 /* Allocate DMA'able memory for RX return ring. */ 2053 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 2054 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 2055 &sc->bge_cdata.bge_rx_return_ring_map); 2056 if (error) 2057 return (ENOMEM); 2058 2059 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 2060 BGE_RX_RTN_RING_SZ(sc)); 2061 2062 /* Load the address of the RX return ring. */ 2063 ctx.bge_maxsegs = 1; 2064 ctx.sc = sc; 2065 2066 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 2067 sc->bge_cdata.bge_rx_return_ring_map, 2068 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 2069 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2070 2071 if (error) 2072 return (ENOMEM); 2073 2074 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2075 2076 /* Create tag for TX ring. */ 2077 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2078 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2079 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2080 &sc->bge_cdata.bge_tx_ring_tag); 2081 2082 if (error) { 2083 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2084 return (ENOMEM); 2085 } 2086 2087 /* Allocate DMA'able memory for TX ring. */ 2088 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2089 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2090 &sc->bge_cdata.bge_tx_ring_map); 2091 if (error) 2092 return (ENOMEM); 2093 2094 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2095 2096 /* Load the address of the TX ring. */ 2097 ctx.bge_maxsegs = 1; 2098 ctx.sc = sc; 2099 2100 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2101 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2102 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2103 2104 if (error) 2105 return (ENOMEM); 2106 2107 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2108 2109 /* Create tag for status block. */ 2110 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2111 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2112 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2113 NULL, NULL, &sc->bge_cdata.bge_status_tag); 2114 2115 if (error) { 2116 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2117 return (ENOMEM); 2118 } 2119 2120 /* Allocate DMA'able memory for status block. */ 2121 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2122 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2123 &sc->bge_cdata.bge_status_map); 2124 if (error) 2125 return (ENOMEM); 2126 2127 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2128 2129 /* Load the address of the status block. */ 2130 ctx.sc = sc; 2131 ctx.bge_maxsegs = 1; 2132 2133 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2134 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2135 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2136 2137 if (error) 2138 return (ENOMEM); 2139 2140 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2141 2142 /* Create tag for statistics block. */ 2143 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2144 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2145 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2146 &sc->bge_cdata.bge_stats_tag); 2147 2148 if (error) { 2149 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2150 return (ENOMEM); 2151 } 2152 2153 /* Allocate DMA'able memory for statistics block. */ 2154 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2155 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2156 &sc->bge_cdata.bge_stats_map); 2157 if (error) 2158 return (ENOMEM); 2159 2160 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2161 2162 /* Load the address of the statstics block. */ 2163 ctx.sc = sc; 2164 ctx.bge_maxsegs = 1; 2165 2166 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2167 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2168 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2169 2170 if (error) 2171 return (ENOMEM); 2172 2173 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2174 2175 return (0); 2176 } 2177 2178 #if __FreeBSD_version > 602105 2179 /* 2180 * Return true if this device has more than one port. 2181 */ 2182 static int 2183 bge_has_multiple_ports(struct bge_softc *sc) 2184 { 2185 device_t dev = sc->bge_dev; 2186 u_int b, d, f, fscan, s; 2187 2188 d = pci_get_domain(dev); 2189 b = pci_get_bus(dev); 2190 s = pci_get_slot(dev); 2191 f = pci_get_function(dev); 2192 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) 2193 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) 2194 return (1); 2195 return (0); 2196 } 2197 2198 /* 2199 * Return true if MSI can be used with this device. 2200 */ 2201 static int 2202 bge_can_use_msi(struct bge_softc *sc) 2203 { 2204 int can_use_msi = 0; 2205 2206 switch (sc->bge_asicrev) { 2207 case BGE_ASICREV_BCM5714: 2208 /* 2209 * Apparently, MSI doesn't work when this chip is configured 2210 * in single-port mode. 2211 */ 2212 if (bge_has_multiple_ports(sc)) 2213 can_use_msi = 1; 2214 break; 2215 case BGE_ASICREV_BCM5750: 2216 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && 2217 sc->bge_chiprev != BGE_CHIPREV_5750_BX) 2218 can_use_msi = 1; 2219 break; 2220 case BGE_ASICREV_BCM5752: 2221 case BGE_ASICREV_BCM5780: 2222 can_use_msi = 1; 2223 break; 2224 } 2225 return (can_use_msi); 2226 } 2227 #endif 2228 2229 static int 2230 bge_attach(device_t dev) 2231 { 2232 struct ifnet *ifp; 2233 struct bge_softc *sc; 2234 uint32_t hwcfg = 0; 2235 uint32_t mac_tmp = 0; 2236 u_char eaddr[ETHER_ADDR_LEN]; 2237 int error, reg, rid, trys; 2238 2239 sc = device_get_softc(dev); 2240 sc->bge_dev = dev; 2241 2242 /* 2243 * Map control/status registers. 2244 */ 2245 pci_enable_busmaster(dev); 2246 2247 rid = BGE_PCI_BAR0; 2248 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2249 RF_ACTIVE | PCI_RF_DENSE); 2250 2251 if (sc->bge_res == NULL) { 2252 device_printf (sc->bge_dev, "couldn't map memory\n"); 2253 error = ENXIO; 2254 goto fail; 2255 } 2256 2257 sc->bge_btag = rman_get_bustag(sc->bge_res); 2258 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2259 2260 /* Save ASIC rev. */ 2261 2262 sc->bge_chipid = 2263 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2264 BGE_PCIMISCCTL_ASICREV; 2265 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2266 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2267 2268 /* 2269 * Don't enable Ethernet@WireSpeed for the 5700 or the 2270 * 5705 A0 and A1 chips. 2271 */ 2272 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && 2273 sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && 2274 sc->bge_chipid != BGE_CHIPID_BCM5705_A1) 2275 sc->bge_flags |= BGE_FLAG_WIRESPEED; 2276 2277 if (bge_has_eeprom(sc)) 2278 sc->bge_flags |= BGE_FLAG_EEPROM; 2279 2280 /* Save chipset family. */ 2281 switch (sc->bge_asicrev) { 2282 case BGE_ASICREV_BCM5700: 2283 case BGE_ASICREV_BCM5701: 2284 case BGE_ASICREV_BCM5703: 2285 case BGE_ASICREV_BCM5704: 2286 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; 2287 break; 2288 case BGE_ASICREV_BCM5714_A0: 2289 case BGE_ASICREV_BCM5780: 2290 case BGE_ASICREV_BCM5714: 2291 sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; 2292 /* FALLTHRU */ 2293 case BGE_ASICREV_BCM5750: 2294 case BGE_ASICREV_BCM5752: 2295 case BGE_ASICREV_BCM5755: 2296 case BGE_ASICREV_BCM5787: 2297 sc->bge_flags |= BGE_FLAG_575X_PLUS; 2298 /* FALLTHRU */ 2299 case BGE_ASICREV_BCM5705: 2300 sc->bge_flags |= BGE_FLAG_5705_PLUS; 2301 break; 2302 } 2303 2304 /* Set various bug flags. */ 2305 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || 2306 sc->bge_chipid == BGE_CHIPID_BCM5701_B0) 2307 sc->bge_flags |= BGE_FLAG_CRC_BUG; 2308 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || 2309 sc->bge_chiprev == BGE_CHIPREV_5704_AX) 2310 sc->bge_flags |= BGE_FLAG_ADC_BUG; 2311 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) 2312 sc->bge_flags |= BGE_FLAG_5704_A0_BUG; 2313 if (BGE_IS_5705_PLUS(sc) && 2314 !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { 2315 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2316 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2317 if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0) 2318 sc->bge_flags |= BGE_FLAG_JITTER_BUG; 2319 } else 2320 sc->bge_flags |= BGE_FLAG_BER_BUG; 2321 } 2322 2323 /* 2324 * Check if this is a PCI-X or PCI Express device. 2325 */ 2326 #if __FreeBSD_version > 602101 2327 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 2328 /* 2329 * Found a PCI Express capabilities register, this 2330 * must be a PCI Express device. 2331 */ 2332 if (reg != 0) 2333 sc->bge_flags |= BGE_FLAG_PCIE; 2334 } else if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 2335 if (reg != 0) 2336 sc->bge_flags |= BGE_FLAG_PCIX; 2337 } 2338 2339 #else 2340 if (BGE_IS_5705_PLUS(sc)) { 2341 reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2342 if ((reg & 0xFF) == BGE_PCIE_CAPID) 2343 sc->bge_flags |= BGE_FLAG_PCIE; 2344 } else { 2345 /* 2346 * Check if the device is in PCI-X Mode. 2347 * (This bit is not valid on PCI Express controllers.) 2348 */ 2349 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2350 BGE_PCISTATE_PCI_BUSMODE) == 0) 2351 sc->bge_flags |= BGE_FLAG_PCIX; 2352 } 2353 #endif 2354 2355 #if __FreeBSD_version > 602105 2356 { 2357 int msicount; 2358 2359 /* 2360 * Allocate the interrupt, using MSI if possible. These devices 2361 * support 8 MSI messages, but only the first one is used in 2362 * normal operation. 2363 */ 2364 if (bge_can_use_msi(sc)) { 2365 msicount = pci_msi_count(dev); 2366 if (msicount > 1) 2367 msicount = 1; 2368 } else 2369 msicount = 0; 2370 if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { 2371 rid = 1; 2372 sc->bge_flags |= BGE_FLAG_MSI; 2373 } else 2374 rid = 0; 2375 } 2376 #else 2377 rid = 0; 2378 #endif 2379 2380 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2381 RF_SHAREABLE | RF_ACTIVE); 2382 2383 if (sc->bge_irq == NULL) { 2384 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2385 error = ENXIO; 2386 goto fail; 2387 } 2388 2389 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2390 2391 /* Try to reset the chip. */ 2392 if (bge_reset(sc)) { 2393 device_printf(sc->bge_dev, "chip reset failed\n"); 2394 error = ENXIO; 2395 goto fail; 2396 } 2397 2398 sc->bge_asf_mode = 0; 2399 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2400 == BGE_MAGIC_NUMBER)) { 2401 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2402 & BGE_HWCFG_ASF) { 2403 sc->bge_asf_mode |= ASF_ENABLE; 2404 sc->bge_asf_mode |= ASF_STACKUP; 2405 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2406 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2407 } 2408 } 2409 } 2410 2411 /* Try to reset the chip again the nice way. */ 2412 bge_stop_fw(sc); 2413 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2414 if (bge_reset(sc)) { 2415 device_printf(sc->bge_dev, "chip reset failed\n"); 2416 error = ENXIO; 2417 goto fail; 2418 } 2419 2420 bge_sig_legacy(sc, BGE_RESET_STOP); 2421 bge_sig_post_reset(sc, BGE_RESET_STOP); 2422 2423 if (bge_chipinit(sc)) { 2424 device_printf(sc->bge_dev, "chip initialization failed\n"); 2425 error = ENXIO; 2426 goto fail; 2427 } 2428 2429 #ifdef __sparc64__ 2430 if ((sc->bge_flags & BGE_FLAG_EEPROM) == 0) 2431 OF_getetheraddr(dev, eaddr); 2432 else 2433 #endif 2434 { 2435 mac_tmp = bge_readmem_ind(sc, 0x0C14); 2436 if ((mac_tmp >> 16) == 0x484B) { 2437 eaddr[0] = (u_char)(mac_tmp >> 8); 2438 eaddr[1] = (u_char)mac_tmp; 2439 mac_tmp = bge_readmem_ind(sc, 0x0C18); 2440 eaddr[2] = (u_char)(mac_tmp >> 24); 2441 eaddr[3] = (u_char)(mac_tmp >> 16); 2442 eaddr[4] = (u_char)(mac_tmp >> 8); 2443 eaddr[5] = (u_char)mac_tmp; 2444 } else if (bge_read_eeprom(sc, eaddr, 2445 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2446 device_printf(sc->bge_dev, 2447 "failed to read station address\n"); 2448 error = ENXIO; 2449 goto fail; 2450 } 2451 } 2452 2453 /* 5705 limits RX return ring to 512 entries. */ 2454 if (BGE_IS_5705_PLUS(sc)) 2455 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2456 else 2457 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2458 2459 if (bge_dma_alloc(dev)) { 2460 device_printf(sc->bge_dev, 2461 "failed to allocate DMA resources\n"); 2462 error = ENXIO; 2463 goto fail; 2464 } 2465 2466 /* Set default tuneable values. */ 2467 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2468 sc->bge_rx_coal_ticks = 150; 2469 sc->bge_tx_coal_ticks = 150; 2470 sc->bge_rx_max_coal_bds = 10; 2471 sc->bge_tx_max_coal_bds = 10; 2472 2473 /* Set up ifnet structure */ 2474 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2475 if (ifp == NULL) { 2476 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2477 error = ENXIO; 2478 goto fail; 2479 } 2480 ifp->if_softc = sc; 2481 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2482 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2483 ifp->if_ioctl = bge_ioctl; 2484 ifp->if_start = bge_start; 2485 ifp->if_init = bge_init; 2486 ifp->if_mtu = ETHERMTU; 2487 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2488 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2489 IFQ_SET_READY(&ifp->if_snd); 2490 ifp->if_hwassist = BGE_CSUM_FEATURES; 2491 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2492 IFCAP_VLAN_MTU; 2493 #ifdef IFCAP_VLAN_HWCSUM 2494 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2495 #endif 2496 ifp->if_capenable = ifp->if_capabilities; 2497 #ifdef DEVICE_POLLING 2498 ifp->if_capabilities |= IFCAP_POLLING; 2499 #endif 2500 2501 /* 2502 * 5700 B0 chips do not support checksumming correctly due 2503 * to hardware bugs. 2504 */ 2505 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2506 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2507 ifp->if_capenable &= IFCAP_HWCSUM; 2508 ifp->if_hwassist = 0; 2509 } 2510 2511 /* 2512 * Figure out what sort of media we have by checking the 2513 * hardware config word in the first 32k of NIC internal memory, 2514 * or fall back to examining the EEPROM if necessary. 2515 * Note: on some BCM5700 cards, this value appears to be unset. 2516 * If that's the case, we have to rely on identifying the NIC 2517 * by its PCI subsystem ID, as we do below for the SysKonnect 2518 * SK-9D41. 2519 */ 2520 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2521 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2522 else if (sc->bge_flags & BGE_FLAG_EEPROM) { 2523 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2524 sizeof(hwcfg))) { 2525 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2526 error = ENXIO; 2527 goto fail; 2528 } 2529 hwcfg = ntohl(hwcfg); 2530 } 2531 2532 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2533 sc->bge_flags |= BGE_FLAG_TBI; 2534 2535 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2536 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2537 sc->bge_flags |= BGE_FLAG_TBI; 2538 2539 if (sc->bge_flags & BGE_FLAG_TBI) { 2540 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, 2541 bge_ifmedia_sts); 2542 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); 2543 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2544 0, NULL); 2545 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); 2546 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); 2547 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2548 } else { 2549 /* 2550 * Do transceiver setup and tell the firmware the 2551 * driver is down so we can try to get access the 2552 * probe if ASF is running. Retry a couple of times 2553 * if we get a conflict with the ASF firmware accessing 2554 * the PHY. 2555 */ 2556 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2557 again: 2558 bge_asf_driver_up(sc); 2559 2560 trys = 0; 2561 if (mii_phy_probe(dev, &sc->bge_miibus, 2562 bge_ifmedia_upd, bge_ifmedia_sts)) { 2563 if (trys++ < 4) { 2564 device_printf(sc->bge_dev, "Try again\n"); 2565 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, 2566 BMCR_RESET); 2567 goto again; 2568 } 2569 2570 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2571 error = ENXIO; 2572 goto fail; 2573 } 2574 2575 /* 2576 * Now tell the firmware we are going up after probing the PHY 2577 */ 2578 if (sc->bge_asf_mode & ASF_STACKUP) 2579 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2580 } 2581 2582 /* 2583 * When using the BCM5701 in PCI-X mode, data corruption has 2584 * been observed in the first few bytes of some received packets. 2585 * Aligning the packet buffer in memory eliminates the corruption. 2586 * Unfortunately, this misaligns the packet payloads. On platforms 2587 * which do not support unaligned accesses, we will realign the 2588 * payloads by copying the received packets. 2589 */ 2590 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2591 sc->bge_flags & BGE_FLAG_PCIX) 2592 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2593 2594 /* 2595 * Call MI attach routine. 2596 */ 2597 ether_ifattach(ifp, eaddr); 2598 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); 2599 2600 /* 2601 * Hookup IRQ last. 2602 */ 2603 #if __FreeBSD_version > 700030 2604 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2605 NULL, bge_intr, sc, &sc->bge_intrhand); 2606 #else 2607 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2608 bge_intr, sc, &sc->bge_intrhand); 2609 #endif 2610 2611 if (error) { 2612 bge_detach(dev); 2613 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2614 } 2615 2616 bge_add_sysctls(sc); 2617 2618 return (0); 2619 2620 fail: 2621 bge_release_resources(sc); 2622 2623 return (error); 2624 } 2625 2626 static int 2627 bge_detach(device_t dev) 2628 { 2629 struct bge_softc *sc; 2630 struct ifnet *ifp; 2631 2632 sc = device_get_softc(dev); 2633 ifp = sc->bge_ifp; 2634 2635 #ifdef DEVICE_POLLING 2636 if (ifp->if_capenable & IFCAP_POLLING) 2637 ether_poll_deregister(ifp); 2638 #endif 2639 2640 BGE_LOCK(sc); 2641 bge_stop(sc); 2642 bge_reset(sc); 2643 BGE_UNLOCK(sc); 2644 2645 callout_drain(&sc->bge_stat_ch); 2646 2647 ether_ifdetach(ifp); 2648 2649 if (sc->bge_flags & BGE_FLAG_TBI) { 2650 ifmedia_removeall(&sc->bge_ifmedia); 2651 } else { 2652 bus_generic_detach(dev); 2653 device_delete_child(dev, sc->bge_miibus); 2654 } 2655 2656 bge_release_resources(sc); 2657 2658 return (0); 2659 } 2660 2661 static void 2662 bge_release_resources(struct bge_softc *sc) 2663 { 2664 device_t dev; 2665 2666 dev = sc->bge_dev; 2667 2668 if (sc->bge_intrhand != NULL) 2669 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2670 2671 if (sc->bge_irq != NULL) 2672 bus_release_resource(dev, SYS_RES_IRQ, 2673 sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); 2674 2675 #if __FreeBSD_version > 602105 2676 if (sc->bge_flags & BGE_FLAG_MSI) 2677 pci_release_msi(dev); 2678 #endif 2679 2680 if (sc->bge_res != NULL) 2681 bus_release_resource(dev, SYS_RES_MEMORY, 2682 BGE_PCI_BAR0, sc->bge_res); 2683 2684 if (sc->bge_ifp != NULL) 2685 if_free(sc->bge_ifp); 2686 2687 bge_dma_free(sc); 2688 2689 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2690 BGE_LOCK_DESTROY(sc); 2691 } 2692 2693 static int 2694 bge_reset(struct bge_softc *sc) 2695 { 2696 device_t dev; 2697 uint32_t cachesize, command, pcistate, reset; 2698 void (*write_op)(struct bge_softc *, int, int); 2699 int i, val = 0; 2700 2701 dev = sc->bge_dev; 2702 2703 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc)) { 2704 if (sc->bge_flags & BGE_FLAG_PCIE) 2705 write_op = bge_writemem_direct; 2706 else 2707 write_op = bge_writemem_ind; 2708 } else 2709 write_op = bge_writereg_ind; 2710 2711 /* Save some important PCI state. */ 2712 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2713 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2714 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2715 2716 pci_write_config(dev, BGE_PCI_MISC_CTL, 2717 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 2718 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 2719 2720 /* Disable fastboot on controllers that support it. */ 2721 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || 2722 sc->bge_asicrev == BGE_ASICREV_BCM5755 || 2723 sc->bge_asicrev == BGE_ASICREV_BCM5787) { 2724 if (bootverbose) 2725 device_printf(sc->bge_dev, "Disabling fastboot\n"); 2726 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); 2727 } 2728 2729 /* 2730 * Write the magic number to SRAM at offset 0xB50. 2731 * When firmware finishes its initialization it will 2732 * write ~BGE_MAGIC_NUMBER to the same location. 2733 */ 2734 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2735 2736 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; 2737 2738 /* XXX: Broadcom Linux driver. */ 2739 if (sc->bge_flags & BGE_FLAG_PCIE) { 2740 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ 2741 CSR_WRITE_4(sc, 0x7E2C, 0x20); 2742 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2743 /* Prevent PCIE link training during global reset */ 2744 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); 2745 reset |= 1 << 29; 2746 } 2747 } 2748 2749 /* 2750 * Set GPHY Power Down Override to leave GPHY 2751 * powered up in D0 uninitialized. 2752 */ 2753 if (BGE_IS_5705_PLUS(sc)) 2754 reset |= 0x04000000; 2755 2756 /* Issue global reset */ 2757 write_op(sc, BGE_MISC_CFG, reset); 2758 2759 DELAY(1000); 2760 2761 /* XXX: Broadcom Linux driver. */ 2762 if (sc->bge_flags & BGE_FLAG_PCIE) { 2763 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2764 uint32_t v; 2765 2766 DELAY(500000); /* wait for link training to complete */ 2767 v = pci_read_config(dev, 0xC4, 4); 2768 pci_write_config(dev, 0xC4, v | (1 << 15), 4); 2769 } 2770 /* 2771 * Set PCIE max payload size to 128 bytes and clear error 2772 * status. 2773 */ 2774 pci_write_config(dev, 0xD8, 0xF5000, 4); 2775 } 2776 2777 /* Reset some of the PCI state that got zapped by reset. */ 2778 pci_write_config(dev, BGE_PCI_MISC_CTL, 2779 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | 2780 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); 2781 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2782 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2783 write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); 2784 2785 /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ 2786 if (BGE_IS_5714_FAMILY(sc)) { 2787 uint32_t val; 2788 2789 /* This chip disables MSI on reset. */ 2790 if (sc->bge_flags & BGE_FLAG_MSI) { 2791 val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2); 2792 pci_write_config(dev, BGE_PCI_MSI_CTL, 2793 val | PCIM_MSICTRL_MSI_ENABLE, 2); 2794 val = CSR_READ_4(sc, BGE_MSI_MODE); 2795 CSR_WRITE_4(sc, BGE_MSI_MODE, 2796 val | BGE_MSIMODE_ENABLE); 2797 } 2798 val = CSR_READ_4(sc, BGE_MARB_MODE); 2799 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2800 } else 2801 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2802 2803 /* 2804 * Poll until we see the 1's complement of the magic number. 2805 * This indicates that the firmware initialization is complete. 2806 * We expect this to fail if no EEPROM is fitted though. 2807 */ 2808 for (i = 0; i < BGE_TIMEOUT; i++) { 2809 DELAY(10); 2810 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2811 if (val == ~BGE_MAGIC_NUMBER) 2812 break; 2813 } 2814 2815 if ((sc->bge_flags & BGE_FLAG_EEPROM) && i == BGE_TIMEOUT) 2816 device_printf(sc->bge_dev, "firmware handshake timed out, " 2817 "found 0x%08x\n", val); 2818 2819 /* 2820 * XXX Wait for the value of the PCISTATE register to 2821 * return to its original pre-reset state. This is a 2822 * fairly good indicator of reset completion. If we don't 2823 * wait for the reset to fully complete, trying to read 2824 * from the device's non-PCI registers may yield garbage 2825 * results. 2826 */ 2827 for (i = 0; i < BGE_TIMEOUT; i++) { 2828 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2829 break; 2830 DELAY(10); 2831 } 2832 2833 if (sc->bge_flags & BGE_FLAG_PCIE) { 2834 reset = bge_readmem_ind(sc, 0x7C00); 2835 bge_writemem_ind(sc, 0x7C00, reset | (1 << 25)); 2836 } 2837 2838 /* Fix up byte swapping. */ 2839 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | 2840 BGE_MODECTL_BYTESWAP_DATA); 2841 2842 /* Tell the ASF firmware we are up */ 2843 if (sc->bge_asf_mode & ASF_STACKUP) 2844 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2845 2846 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2847 2848 /* 2849 * The 5704 in TBI mode apparently needs some special 2850 * adjustment to insure the SERDES drive level is set 2851 * to 1.2V. 2852 */ 2853 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2854 sc->bge_flags & BGE_FLAG_TBI) { 2855 uint32_t serdescfg; 2856 2857 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2858 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2859 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2860 } 2861 2862 /* XXX: Broadcom Linux driver. */ 2863 if (sc->bge_flags & BGE_FLAG_PCIE && 2864 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2865 uint32_t v; 2866 2867 v = CSR_READ_4(sc, 0x7C00); 2868 CSR_WRITE_4(sc, 0x7C00, v | (1 << 25)); 2869 } 2870 DELAY(10000); 2871 2872 return(0); 2873 } 2874 2875 /* 2876 * Frame reception handling. This is called if there's a frame 2877 * on the receive return list. 2878 * 2879 * Note: we have to be able to handle two possibilities here: 2880 * 1) the frame is from the jumbo receive ring 2881 * 2) the frame is from the standard receive ring 2882 */ 2883 2884 static void 2885 bge_rxeof(struct bge_softc *sc) 2886 { 2887 struct ifnet *ifp; 2888 int stdcnt = 0, jumbocnt = 0; 2889 2890 BGE_LOCK_ASSERT(sc); 2891 2892 /* Nothing to do. */ 2893 if (sc->bge_rx_saved_considx == 2894 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2895 return; 2896 2897 ifp = sc->bge_ifp; 2898 2899 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2900 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2901 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2902 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2903 if (BGE_IS_JUMBO_CAPABLE(sc)) 2904 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2905 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); 2906 2907 while(sc->bge_rx_saved_considx != 2908 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2909 struct bge_rx_bd *cur_rx; 2910 uint32_t rxidx; 2911 struct mbuf *m = NULL; 2912 uint16_t vlan_tag = 0; 2913 int have_tag = 0; 2914 2915 #ifdef DEVICE_POLLING 2916 if (ifp->if_capenable & IFCAP_POLLING) { 2917 if (sc->rxcycles <= 0) 2918 break; 2919 sc->rxcycles--; 2920 } 2921 #endif 2922 2923 cur_rx = 2924 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2925 2926 rxidx = cur_rx->bge_idx; 2927 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2928 2929 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING && 2930 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { 2931 have_tag = 1; 2932 vlan_tag = cur_rx->bge_vlan_tag; 2933 } 2934 2935 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2936 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2937 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2938 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2939 BUS_DMASYNC_POSTREAD); 2940 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2941 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2942 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2943 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2944 jumbocnt++; 2945 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2946 ifp->if_ierrors++; 2947 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2948 continue; 2949 } 2950 if (bge_newbuf_jumbo(sc, 2951 sc->bge_jumbo, NULL) == ENOBUFS) { 2952 ifp->if_ierrors++; 2953 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2954 continue; 2955 } 2956 } else { 2957 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2958 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2959 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2960 BUS_DMASYNC_POSTREAD); 2961 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2962 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2963 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2964 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2965 stdcnt++; 2966 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2967 ifp->if_ierrors++; 2968 bge_newbuf_std(sc, sc->bge_std, m); 2969 continue; 2970 } 2971 if (bge_newbuf_std(sc, sc->bge_std, 2972 NULL) == ENOBUFS) { 2973 ifp->if_ierrors++; 2974 bge_newbuf_std(sc, sc->bge_std, m); 2975 continue; 2976 } 2977 } 2978 2979 ifp->if_ipackets++; 2980 #ifndef __NO_STRICT_ALIGNMENT 2981 /* 2982 * For architectures with strict alignment we must make sure 2983 * the payload is aligned. 2984 */ 2985 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2986 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2987 cur_rx->bge_len); 2988 m->m_data += ETHER_ALIGN; 2989 } 2990 #endif 2991 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2992 m->m_pkthdr.rcvif = ifp; 2993 2994 if (ifp->if_capenable & IFCAP_RXCSUM) { 2995 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2996 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2997 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) 2998 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2999 } 3000 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 3001 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 3002 m->m_pkthdr.csum_data = 3003 cur_rx->bge_tcp_udp_csum; 3004 m->m_pkthdr.csum_flags |= 3005 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 3006 } 3007 } 3008 3009 /* 3010 * If we received a packet with a vlan tag, 3011 * attach that information to the packet. 3012 */ 3013 if (have_tag) { 3014 #if __FreeBSD_version > 700022 3015 m->m_pkthdr.ether_vtag = vlan_tag; 3016 m->m_flags |= M_VLANTAG; 3017 #else 3018 VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); 3019 if (m == NULL) 3020 continue; 3021 #endif 3022 } 3023 3024 BGE_UNLOCK(sc); 3025 (*ifp->if_input)(ifp, m); 3026 BGE_LOCK(sc); 3027 } 3028 3029 if (stdcnt > 0) 3030 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 3031 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 3032 3033 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) 3034 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 3035 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 3036 3037 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 3038 if (stdcnt) 3039 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 3040 if (jumbocnt) 3041 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 3042 #ifdef notyet 3043 /* 3044 * This register wraps very quickly under heavy packet drops. 3045 * If you need correct statistics, you can enable this check. 3046 */ 3047 if (BGE_IS_5705_PLUS(sc)) 3048 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3049 #endif 3050 } 3051 3052 static void 3053 bge_txeof(struct bge_softc *sc) 3054 { 3055 struct bge_tx_bd *cur_tx = NULL; 3056 struct ifnet *ifp; 3057 3058 BGE_LOCK_ASSERT(sc); 3059 3060 /* Nothing to do. */ 3061 if (sc->bge_tx_saved_considx == 3062 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 3063 return; 3064 3065 ifp = sc->bge_ifp; 3066 3067 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 3068 sc->bge_cdata.bge_tx_ring_map, 3069 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3070 /* 3071 * Go through our tx ring and free mbufs for those 3072 * frames that have been sent. 3073 */ 3074 while (sc->bge_tx_saved_considx != 3075 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 3076 uint32_t idx = 0; 3077 3078 idx = sc->bge_tx_saved_considx; 3079 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 3080 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 3081 ifp->if_opackets++; 3082 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 3083 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 3084 sc->bge_cdata.bge_tx_dmamap[idx], 3085 BUS_DMASYNC_POSTWRITE); 3086 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 3087 sc->bge_cdata.bge_tx_dmamap[idx]); 3088 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 3089 sc->bge_cdata.bge_tx_chain[idx] = NULL; 3090 } 3091 sc->bge_txcnt--; 3092 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 3093 } 3094 3095 if (cur_tx != NULL) 3096 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3097 if (sc->bge_txcnt == 0) 3098 sc->bge_timer = 0; 3099 } 3100 3101 #ifdef DEVICE_POLLING 3102 static void 3103 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3104 { 3105 struct bge_softc *sc = ifp->if_softc; 3106 uint32_t statusword; 3107 3108 BGE_LOCK(sc); 3109 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3110 BGE_UNLOCK(sc); 3111 return; 3112 } 3113 3114 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3115 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3116 3117 statusword = atomic_readandclear_32( 3118 &sc->bge_ldata.bge_status_block->bge_status); 3119 3120 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3121 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 3122 3123 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ 3124 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 3125 sc->bge_link_evt++; 3126 3127 if (cmd == POLL_AND_CHECK_STATUS) 3128 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3129 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3130 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 3131 bge_link_upd(sc); 3132 3133 sc->rxcycles = count; 3134 bge_rxeof(sc); 3135 bge_txeof(sc); 3136 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3137 bge_start_locked(ifp); 3138 3139 BGE_UNLOCK(sc); 3140 } 3141 #endif /* DEVICE_POLLING */ 3142 3143 static void 3144 bge_intr(void *xsc) 3145 { 3146 struct bge_softc *sc; 3147 struct ifnet *ifp; 3148 uint32_t statusword; 3149 3150 sc = xsc; 3151 3152 BGE_LOCK(sc); 3153 3154 ifp = sc->bge_ifp; 3155 3156 #ifdef DEVICE_POLLING 3157 if (ifp->if_capenable & IFCAP_POLLING) { 3158 BGE_UNLOCK(sc); 3159 return; 3160 } 3161 #endif 3162 3163 /* 3164 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't 3165 * disable interrupts by writing nonzero like we used to, since with 3166 * our current organization this just gives complications and 3167 * pessimizations for re-enabling interrupts. We used to have races 3168 * instead of the necessary complications. Disabling interrupts 3169 * would just reduce the chance of a status update while we are 3170 * running (by switching to the interrupt-mode coalescence 3171 * parameters), but this chance is already very low so it is more 3172 * efficient to get another interrupt than prevent it. 3173 * 3174 * We do the ack first to ensure another interrupt if there is a 3175 * status update after the ack. We don't check for the status 3176 * changing later because it is more efficient to get another 3177 * interrupt than prevent it, not quite as above (not checking is 3178 * a smaller optimization than not toggling the interrupt enable, 3179 * since checking doesn't involve PCI accesses and toggling require 3180 * the status check). So toggling would probably be a pessimization 3181 * even with MSI. It would only be needed for using a task queue. 3182 */ 3183 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3184 3185 /* 3186 * Do the mandatory PCI flush as well as get the link status. 3187 */ 3188 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 3189 3190 /* Make sure the descriptor ring indexes are coherent. */ 3191 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3192 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 3193 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 3194 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 3195 3196 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3197 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 3198 statusword || sc->bge_link_evt) 3199 bge_link_upd(sc); 3200 3201 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3202 /* Check RX return ring producer/consumer. */ 3203 bge_rxeof(sc); 3204 3205 /* Check TX ring producer/consumer. */ 3206 bge_txeof(sc); 3207 } 3208 3209 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3210 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3211 bge_start_locked(ifp); 3212 3213 BGE_UNLOCK(sc); 3214 } 3215 3216 static void 3217 bge_asf_driver_up(struct bge_softc *sc) 3218 { 3219 if (sc->bge_asf_mode & ASF_STACKUP) { 3220 /* Send ASF heartbeat aprox. every 2s */ 3221 if (sc->bge_asf_count) 3222 sc->bge_asf_count --; 3223 else { 3224 sc->bge_asf_count = 5; 3225 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 3226 BGE_FW_DRV_ALIVE); 3227 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 3228 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 3229 CSR_WRITE_4(sc, BGE_CPU_EVENT, 3230 CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); 3231 } 3232 } 3233 } 3234 3235 static void 3236 bge_tick(void *xsc) 3237 { 3238 struct bge_softc *sc = xsc; 3239 struct mii_data *mii = NULL; 3240 3241 BGE_LOCK_ASSERT(sc); 3242 3243 /* Synchronize with possible callout reset/stop. */ 3244 if (callout_pending(&sc->bge_stat_ch) || 3245 !callout_active(&sc->bge_stat_ch)) 3246 return; 3247 3248 if (BGE_IS_5705_PLUS(sc)) 3249 bge_stats_update_regs(sc); 3250 else 3251 bge_stats_update(sc); 3252 3253 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 3254 mii = device_get_softc(sc->bge_miibus); 3255 /* Don't mess with the PHY in IPMI/ASF mode */ 3256 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link))) 3257 mii_tick(mii); 3258 } else { 3259 /* 3260 * Since in TBI mode auto-polling can't be used we should poll 3261 * link status manually. Here we register pending link event 3262 * and trigger interrupt. 3263 */ 3264 #ifdef DEVICE_POLLING 3265 /* In polling mode we poll link state in bge_poll(). */ 3266 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 3267 #endif 3268 { 3269 sc->bge_link_evt++; 3270 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3271 } 3272 } 3273 3274 bge_asf_driver_up(sc); 3275 bge_watchdog(sc); 3276 3277 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3278 } 3279 3280 static void 3281 bge_stats_update_regs(struct bge_softc *sc) 3282 { 3283 struct ifnet *ifp; 3284 3285 ifp = sc->bge_ifp; 3286 3287 ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + 3288 offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); 3289 3290 ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); 3291 } 3292 3293 static void 3294 bge_stats_update(struct bge_softc *sc) 3295 { 3296 struct ifnet *ifp; 3297 bus_size_t stats; 3298 uint32_t cnt; /* current register value */ 3299 3300 ifp = sc->bge_ifp; 3301 3302 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3303 3304 #define READ_STAT(sc, stats, stat) \ 3305 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3306 3307 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); 3308 ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); 3309 sc->bge_tx_collisions = cnt; 3310 3311 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 3312 ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); 3313 sc->bge_rx_discards = cnt; 3314 3315 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 3316 ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); 3317 sc->bge_tx_discards = cnt; 3318 3319 #undef READ_STAT 3320 } 3321 3322 /* 3323 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3324 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3325 * but when such padded frames employ the bge IP/TCP checksum offload, 3326 * the hardware checksum assist gives incorrect results (possibly 3327 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3328 * If we pad such runts with zeros, the onboard checksum comes out correct. 3329 */ 3330 static __inline int 3331 bge_cksum_pad(struct mbuf *m) 3332 { 3333 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3334 struct mbuf *last; 3335 3336 /* If there's only the packet-header and we can pad there, use it. */ 3337 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3338 M_TRAILINGSPACE(m) >= padlen) { 3339 last = m; 3340 } else { 3341 /* 3342 * Walk packet chain to find last mbuf. We will either 3343 * pad there, or append a new mbuf and pad it. 3344 */ 3345 for (last = m; last->m_next != NULL; last = last->m_next); 3346 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3347 /* Allocate new empty mbuf, pad it. Compact later. */ 3348 struct mbuf *n; 3349 3350 MGET(n, M_DONTWAIT, MT_DATA); 3351 if (n == NULL) 3352 return (ENOBUFS); 3353 n->m_len = 0; 3354 last->m_next = n; 3355 last = n; 3356 } 3357 } 3358 3359 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3360 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3361 last->m_len += padlen; 3362 m->m_pkthdr.len += padlen; 3363 3364 return (0); 3365 } 3366 3367 /* 3368 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3369 * pointers to descriptors. 3370 */ 3371 static int 3372 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 3373 { 3374 bus_dma_segment_t segs[BGE_NSEG_NEW]; 3375 bus_dmamap_t map; 3376 struct bge_tx_bd *d; 3377 struct mbuf *m = *m_head; 3378 uint32_t idx = *txidx; 3379 uint16_t csum_flags; 3380 int nsegs, i, error; 3381 3382 csum_flags = 0; 3383 if (m->m_pkthdr.csum_flags) { 3384 if (m->m_pkthdr.csum_flags & CSUM_IP) 3385 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3386 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 3387 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3388 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 3389 (error = bge_cksum_pad(m)) != 0) { 3390 m_freem(m); 3391 *m_head = NULL; 3392 return (error); 3393 } 3394 } 3395 if (m->m_flags & M_LASTFRAG) 3396 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3397 else if (m->m_flags & M_FRAG) 3398 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3399 } 3400 3401 map = sc->bge_cdata.bge_tx_dmamap[idx]; 3402 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs, 3403 &nsegs, BUS_DMA_NOWAIT); 3404 if (error == EFBIG) { 3405 m = m_defrag(m, M_DONTWAIT); 3406 if (m == NULL) { 3407 m_freem(*m_head); 3408 *m_head = NULL; 3409 return (ENOBUFS); 3410 } 3411 *m_head = m; 3412 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, 3413 segs, &nsegs, BUS_DMA_NOWAIT); 3414 if (error) { 3415 m_freem(m); 3416 *m_head = NULL; 3417 return (error); 3418 } 3419 } else if (error != 0) 3420 return (error); 3421 3422 /* 3423 * Sanity check: avoid coming within 16 descriptors 3424 * of the end of the ring. 3425 */ 3426 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3427 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map); 3428 return (ENOBUFS); 3429 } 3430 3431 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE); 3432 3433 for (i = 0; ; i++) { 3434 d = &sc->bge_ldata.bge_tx_ring[idx]; 3435 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3436 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3437 d->bge_len = segs[i].ds_len; 3438 d->bge_flags = csum_flags; 3439 if (i == nsegs - 1) 3440 break; 3441 BGE_INC(idx, BGE_TX_RING_CNT); 3442 } 3443 3444 /* Mark the last segment as end of packet... */ 3445 d->bge_flags |= BGE_TXBDFLAG_END; 3446 3447 /* ... and put VLAN tag into first segment. */ 3448 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 3449 #if __FreeBSD_version > 700022 3450 if (m->m_flags & M_VLANTAG) { 3451 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3452 d->bge_vlan_tag = m->m_pkthdr.ether_vtag; 3453 } else 3454 d->bge_vlan_tag = 0; 3455 #else 3456 { 3457 struct m_tag *mtag; 3458 3459 if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { 3460 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3461 d->bge_vlan_tag = VLAN_TAG_VALUE(mtag); 3462 } else 3463 d->bge_vlan_tag = 0; 3464 } 3465 #endif 3466 3467 /* 3468 * Insure that the map for this transmission 3469 * is placed at the array index of the last descriptor 3470 * in this chain. 3471 */ 3472 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 3473 sc->bge_cdata.bge_tx_dmamap[idx] = map; 3474 sc->bge_cdata.bge_tx_chain[idx] = m; 3475 sc->bge_txcnt += nsegs; 3476 3477 BGE_INC(idx, BGE_TX_RING_CNT); 3478 *txidx = idx; 3479 3480 return (0); 3481 } 3482 3483 /* 3484 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3485 * to the mbuf data regions directly in the transmit descriptors. 3486 */ 3487 static void 3488 bge_start_locked(struct ifnet *ifp) 3489 { 3490 struct bge_softc *sc; 3491 struct mbuf *m_head = NULL; 3492 uint32_t prodidx; 3493 int count = 0; 3494 3495 sc = ifp->if_softc; 3496 3497 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3498 return; 3499 3500 prodidx = sc->bge_tx_prodidx; 3501 3502 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3503 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 3504 if (m_head == NULL) 3505 break; 3506 3507 /* 3508 * XXX 3509 * The code inside the if() block is never reached since we 3510 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3511 * requests to checksum TCP/UDP in a fragmented packet. 3512 * 3513 * XXX 3514 * safety overkill. If this is a fragmented packet chain 3515 * with delayed TCP/UDP checksums, then only encapsulate 3516 * it if we have enough descriptors to handle the entire 3517 * chain at once. 3518 * (paranoia -- may not actually be needed) 3519 */ 3520 if (m_head->m_flags & M_FIRSTFRAG && 3521 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3522 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3523 m_head->m_pkthdr.csum_data + 16) { 3524 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3525 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3526 break; 3527 } 3528 } 3529 3530 /* 3531 * Pack the data into the transmit ring. If we 3532 * don't have room, set the OACTIVE flag and wait 3533 * for the NIC to drain the ring. 3534 */ 3535 if (bge_encap(sc, &m_head, &prodidx)) { 3536 if (m_head == NULL) 3537 break; 3538 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3539 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3540 break; 3541 } 3542 ++count; 3543 3544 /* 3545 * If there's a BPF listener, bounce a copy of this frame 3546 * to him. 3547 */ 3548 #ifdef ETHER_BPF_MTAP 3549 ETHER_BPF_MTAP(ifp, m_head); 3550 #else 3551 BPF_MTAP(ifp, m_head); 3552 #endif 3553 } 3554 3555 if (count == 0) 3556 /* No packets were dequeued. */ 3557 return; 3558 3559 /* Transmit. */ 3560 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3561 /* 5700 b2 errata */ 3562 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3563 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3564 3565 sc->bge_tx_prodidx = prodidx; 3566 3567 /* 3568 * Set a timeout in case the chip goes out to lunch. 3569 */ 3570 sc->bge_timer = 5; 3571 } 3572 3573 /* 3574 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3575 * to the mbuf data regions directly in the transmit descriptors. 3576 */ 3577 static void 3578 bge_start(struct ifnet *ifp) 3579 { 3580 struct bge_softc *sc; 3581 3582 sc = ifp->if_softc; 3583 BGE_LOCK(sc); 3584 bge_start_locked(ifp); 3585 BGE_UNLOCK(sc); 3586 } 3587 3588 static void 3589 bge_init_locked(struct bge_softc *sc) 3590 { 3591 struct ifnet *ifp; 3592 uint16_t *m; 3593 3594 BGE_LOCK_ASSERT(sc); 3595 3596 ifp = sc->bge_ifp; 3597 3598 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3599 return; 3600 3601 /* Cancel pending I/O and flush buffers. */ 3602 bge_stop(sc); 3603 3604 bge_stop_fw(sc); 3605 bge_sig_pre_reset(sc, BGE_RESET_START); 3606 bge_reset(sc); 3607 bge_sig_legacy(sc, BGE_RESET_START); 3608 bge_sig_post_reset(sc, BGE_RESET_START); 3609 3610 bge_chipinit(sc); 3611 3612 /* 3613 * Init the various state machines, ring 3614 * control blocks and firmware. 3615 */ 3616 if (bge_blockinit(sc)) { 3617 device_printf(sc->bge_dev, "initialization failure\n"); 3618 return; 3619 } 3620 3621 ifp = sc->bge_ifp; 3622 3623 /* Specify MTU. */ 3624 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3625 ETHER_HDR_LEN + ETHER_CRC_LEN + 3626 (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); 3627 3628 /* Load our MAC address. */ 3629 m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 3630 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3631 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3632 3633 /* Program promiscuous mode. */ 3634 bge_setpromisc(sc); 3635 3636 /* Program multicast filter. */ 3637 bge_setmulti(sc); 3638 3639 /* Program VLAN tag stripping. */ 3640 bge_setvlan(sc); 3641 3642 /* Init RX ring. */ 3643 bge_init_rx_ring_std(sc); 3644 3645 /* 3646 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3647 * memory to insure that the chip has in fact read the first 3648 * entry of the ring. 3649 */ 3650 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3651 uint32_t v, i; 3652 for (i = 0; i < 10; i++) { 3653 DELAY(20); 3654 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3655 if (v == (MCLBYTES - ETHER_ALIGN)) 3656 break; 3657 } 3658 if (i == 10) 3659 device_printf (sc->bge_dev, 3660 "5705 A0 chip failed to load RX ring\n"); 3661 } 3662 3663 /* Init jumbo RX ring. */ 3664 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3665 bge_init_rx_ring_jumbo(sc); 3666 3667 /* Init our RX return ring index. */ 3668 sc->bge_rx_saved_considx = 0; 3669 3670 /* Init our RX/TX stat counters. */ 3671 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; 3672 3673 /* Init TX ring. */ 3674 bge_init_tx_ring(sc); 3675 3676 /* Turn on transmitter. */ 3677 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3678 3679 /* Turn on receiver. */ 3680 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3681 3682 /* Tell firmware we're alive. */ 3683 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3684 3685 #ifdef DEVICE_POLLING 3686 /* Disable interrupts if we are polling. */ 3687 if (ifp->if_capenable & IFCAP_POLLING) { 3688 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3689 BGE_PCIMISCCTL_MASK_PCI_INTR); 3690 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3691 } else 3692 #endif 3693 3694 /* Enable host interrupts. */ 3695 { 3696 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3697 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3698 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3699 } 3700 3701 bge_ifmedia_upd_locked(ifp); 3702 3703 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3704 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3705 3706 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3707 } 3708 3709 static void 3710 bge_init(void *xsc) 3711 { 3712 struct bge_softc *sc = xsc; 3713 3714 BGE_LOCK(sc); 3715 bge_init_locked(sc); 3716 BGE_UNLOCK(sc); 3717 } 3718 3719 /* 3720 * Set media options. 3721 */ 3722 static int 3723 bge_ifmedia_upd(struct ifnet *ifp) 3724 { 3725 struct bge_softc *sc = ifp->if_softc; 3726 int res; 3727 3728 BGE_LOCK(sc); 3729 res = bge_ifmedia_upd_locked(ifp); 3730 BGE_UNLOCK(sc); 3731 3732 return (res); 3733 } 3734 3735 static int 3736 bge_ifmedia_upd_locked(struct ifnet *ifp) 3737 { 3738 struct bge_softc *sc = ifp->if_softc; 3739 struct mii_data *mii; 3740 struct ifmedia *ifm; 3741 3742 BGE_LOCK_ASSERT(sc); 3743 3744 ifm = &sc->bge_ifmedia; 3745 3746 /* If this is a 1000baseX NIC, enable the TBI port. */ 3747 if (sc->bge_flags & BGE_FLAG_TBI) { 3748 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3749 return (EINVAL); 3750 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3751 case IFM_AUTO: 3752 /* 3753 * The BCM5704 ASIC appears to have a special 3754 * mechanism for programming the autoneg 3755 * advertisement registers in TBI mode. 3756 */ 3757 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3758 uint32_t sgdig; 3759 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); 3760 if (sgdig & BGE_SGDIGSTS_DONE) { 3761 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3762 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3763 sgdig |= BGE_SGDIGCFG_AUTO | 3764 BGE_SGDIGCFG_PAUSE_CAP | 3765 BGE_SGDIGCFG_ASYM_PAUSE; 3766 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3767 sgdig | BGE_SGDIGCFG_SEND); 3768 DELAY(5); 3769 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3770 } 3771 } 3772 break; 3773 case IFM_1000_SX: 3774 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3775 BGE_CLRBIT(sc, BGE_MAC_MODE, 3776 BGE_MACMODE_HALF_DUPLEX); 3777 } else { 3778 BGE_SETBIT(sc, BGE_MAC_MODE, 3779 BGE_MACMODE_HALF_DUPLEX); 3780 } 3781 break; 3782 default: 3783 return (EINVAL); 3784 } 3785 return (0); 3786 } 3787 3788 sc->bge_link_evt++; 3789 mii = device_get_softc(sc->bge_miibus); 3790 if (mii->mii_instance) { 3791 struct mii_softc *miisc; 3792 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 3793 miisc = LIST_NEXT(miisc, mii_list)) 3794 mii_phy_reset(miisc); 3795 } 3796 mii_mediachg(mii); 3797 3798 /* 3799 * Force an interrupt so that we will call bge_link_upd 3800 * if needed and clear any pending link state attention. 3801 * Without this we are not getting any further interrupts 3802 * for link state changes and thus will not UP the link and 3803 * not be able to send in bge_start_locked. The only 3804 * way to get things working was to receive a packet and 3805 * get an RX intr. 3806 * bge_tick should help for fiber cards and we might not 3807 * need to do this here if BGE_FLAG_TBI is set but as 3808 * we poll for fiber anyway it should not harm. 3809 */ 3810 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 3811 #ifdef notyet 3812 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ATTN); 3813 #endif 3814 3815 return (0); 3816 } 3817 3818 /* 3819 * Report current media status. 3820 */ 3821 static void 3822 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3823 { 3824 struct bge_softc *sc = ifp->if_softc; 3825 struct mii_data *mii; 3826 3827 BGE_LOCK(sc); 3828 3829 if (sc->bge_flags & BGE_FLAG_TBI) { 3830 ifmr->ifm_status = IFM_AVALID; 3831 ifmr->ifm_active = IFM_ETHER; 3832 if (CSR_READ_4(sc, BGE_MAC_STS) & 3833 BGE_MACSTAT_TBI_PCS_SYNCHED) 3834 ifmr->ifm_status |= IFM_ACTIVE; 3835 else { 3836 ifmr->ifm_active |= IFM_NONE; 3837 BGE_UNLOCK(sc); 3838 return; 3839 } 3840 ifmr->ifm_active |= IFM_1000_SX; 3841 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3842 ifmr->ifm_active |= IFM_HDX; 3843 else 3844 ifmr->ifm_active |= IFM_FDX; 3845 BGE_UNLOCK(sc); 3846 return; 3847 } 3848 3849 mii = device_get_softc(sc->bge_miibus); 3850 mii_pollstat(mii); 3851 ifmr->ifm_active = mii->mii_media_active; 3852 ifmr->ifm_status = mii->mii_media_status; 3853 3854 BGE_UNLOCK(sc); 3855 } 3856 3857 static int 3858 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3859 { 3860 struct bge_softc *sc = ifp->if_softc; 3861 struct ifreq *ifr = (struct ifreq *) data; 3862 struct mii_data *mii; 3863 int flags, mask, error = 0; 3864 3865 switch (command) { 3866 case SIOCSIFMTU: 3867 if (ifr->ifr_mtu < ETHERMIN || 3868 ((BGE_IS_JUMBO_CAPABLE(sc)) && 3869 ifr->ifr_mtu > BGE_JUMBO_MTU) || 3870 ((!BGE_IS_JUMBO_CAPABLE(sc)) && 3871 ifr->ifr_mtu > ETHERMTU)) 3872 error = EINVAL; 3873 else if (ifp->if_mtu != ifr->ifr_mtu) { 3874 ifp->if_mtu = ifr->ifr_mtu; 3875 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3876 bge_init(sc); 3877 } 3878 break; 3879 case SIOCSIFFLAGS: 3880 BGE_LOCK(sc); 3881 if (ifp->if_flags & IFF_UP) { 3882 /* 3883 * If only the state of the PROMISC flag changed, 3884 * then just use the 'set promisc mode' command 3885 * instead of reinitializing the entire NIC. Doing 3886 * a full re-init means reloading the firmware and 3887 * waiting for it to start up, which may take a 3888 * second or two. Similarly for ALLMULTI. 3889 */ 3890 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3891 flags = ifp->if_flags ^ sc->bge_if_flags; 3892 if (flags & IFF_PROMISC) 3893 bge_setpromisc(sc); 3894 if (flags & IFF_ALLMULTI) 3895 bge_setmulti(sc); 3896 } else 3897 bge_init_locked(sc); 3898 } else { 3899 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3900 bge_stop(sc); 3901 } 3902 } 3903 sc->bge_if_flags = ifp->if_flags; 3904 BGE_UNLOCK(sc); 3905 error = 0; 3906 break; 3907 case SIOCADDMULTI: 3908 case SIOCDELMULTI: 3909 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3910 BGE_LOCK(sc); 3911 bge_setmulti(sc); 3912 BGE_UNLOCK(sc); 3913 error = 0; 3914 } 3915 break; 3916 case SIOCSIFMEDIA: 3917 case SIOCGIFMEDIA: 3918 if (sc->bge_flags & BGE_FLAG_TBI) { 3919 error = ifmedia_ioctl(ifp, ifr, 3920 &sc->bge_ifmedia, command); 3921 } else { 3922 mii = device_get_softc(sc->bge_miibus); 3923 error = ifmedia_ioctl(ifp, ifr, 3924 &mii->mii_media, command); 3925 } 3926 break; 3927 case SIOCSIFCAP: 3928 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3929 #ifdef DEVICE_POLLING 3930 if (mask & IFCAP_POLLING) { 3931 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3932 error = ether_poll_register(bge_poll, ifp); 3933 if (error) 3934 return (error); 3935 BGE_LOCK(sc); 3936 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3937 BGE_PCIMISCCTL_MASK_PCI_INTR); 3938 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3939 ifp->if_capenable |= IFCAP_POLLING; 3940 BGE_UNLOCK(sc); 3941 } else { 3942 error = ether_poll_deregister(ifp); 3943 /* Enable interrupt even in error case */ 3944 BGE_LOCK(sc); 3945 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 3946 BGE_PCIMISCCTL_MASK_PCI_INTR); 3947 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3948 ifp->if_capenable &= ~IFCAP_POLLING; 3949 BGE_UNLOCK(sc); 3950 } 3951 } 3952 #endif 3953 if (mask & IFCAP_HWCSUM) { 3954 ifp->if_capenable ^= IFCAP_HWCSUM; 3955 if (IFCAP_HWCSUM & ifp->if_capenable && 3956 IFCAP_HWCSUM & ifp->if_capabilities) 3957 ifp->if_hwassist = BGE_CSUM_FEATURES; 3958 else 3959 ifp->if_hwassist = 0; 3960 #ifdef VLAN_CAPABILITIES 3961 VLAN_CAPABILITIES(ifp); 3962 #endif 3963 } 3964 3965 if (mask & IFCAP_VLAN_MTU) { 3966 ifp->if_capenable ^= IFCAP_VLAN_MTU; 3967 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3968 bge_init(sc); 3969 } 3970 3971 if (mask & IFCAP_VLAN_HWTAGGING) { 3972 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3973 BGE_LOCK(sc); 3974 bge_setvlan(sc); 3975 BGE_UNLOCK(sc); 3976 #ifdef VLAN_CAPABILITIES 3977 VLAN_CAPABILITIES(ifp); 3978 #endif 3979 } 3980 3981 break; 3982 default: 3983 error = ether_ioctl(ifp, command, data); 3984 break; 3985 } 3986 3987 return (error); 3988 } 3989 3990 static void 3991 bge_watchdog(struct bge_softc *sc) 3992 { 3993 struct ifnet *ifp; 3994 3995 BGE_LOCK_ASSERT(sc); 3996 3997 if (sc->bge_timer == 0 || --sc->bge_timer) 3998 return; 3999 4000 ifp = sc->bge_ifp; 4001 4002 if_printf(ifp, "watchdog timeout -- resetting\n"); 4003 4004 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4005 bge_init_locked(sc); 4006 4007 ifp->if_oerrors++; 4008 } 4009 4010 /* 4011 * Stop the adapter and free any mbufs allocated to the 4012 * RX and TX lists. 4013 */ 4014 static void 4015 bge_stop(struct bge_softc *sc) 4016 { 4017 struct ifnet *ifp; 4018 struct ifmedia_entry *ifm; 4019 struct mii_data *mii = NULL; 4020 int mtmp, itmp; 4021 4022 BGE_LOCK_ASSERT(sc); 4023 4024 ifp = sc->bge_ifp; 4025 4026 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) 4027 mii = device_get_softc(sc->bge_miibus); 4028 4029 callout_stop(&sc->bge_stat_ch); 4030 4031 /* 4032 * Disable all of the receiver blocks. 4033 */ 4034 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 4035 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 4036 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 4037 if (!(BGE_IS_5705_PLUS(sc))) 4038 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 4039 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 4040 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 4041 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 4042 4043 /* 4044 * Disable all of the transmit blocks. 4045 */ 4046 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 4047 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 4048 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 4049 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 4050 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 4051 if (!(BGE_IS_5705_PLUS(sc))) 4052 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 4053 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 4054 4055 /* 4056 * Shut down all of the memory managers and related 4057 * state machines. 4058 */ 4059 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 4060 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 4061 if (!(BGE_IS_5705_PLUS(sc))) 4062 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 4063 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 4064 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 4065 if (!(BGE_IS_5705_PLUS(sc))) { 4066 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 4067 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 4068 } 4069 4070 /* Disable host interrupts. */ 4071 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 4072 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 4073 4074 /* 4075 * Tell firmware we're shutting down. 4076 */ 4077 4078 bge_stop_fw(sc); 4079 bge_sig_pre_reset(sc, BGE_RESET_STOP); 4080 bge_reset(sc); 4081 bge_sig_legacy(sc, BGE_RESET_STOP); 4082 bge_sig_post_reset(sc, BGE_RESET_STOP); 4083 4084 /* 4085 * Keep the ASF firmware running if up. 4086 */ 4087 if (sc->bge_asf_mode & ASF_STACKUP) 4088 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4089 else 4090 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 4091 4092 /* Free the RX lists. */ 4093 bge_free_rx_ring_std(sc); 4094 4095 /* Free jumbo RX list. */ 4096 if (BGE_IS_JUMBO_CAPABLE(sc)) 4097 bge_free_rx_ring_jumbo(sc); 4098 4099 /* Free TX buffers. */ 4100 bge_free_tx_ring(sc); 4101 4102 /* 4103 * Isolate/power down the PHY, but leave the media selection 4104 * unchanged so that things will be put back to normal when 4105 * we bring the interface back up. 4106 */ 4107 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 4108 itmp = ifp->if_flags; 4109 ifp->if_flags |= IFF_UP; 4110 /* 4111 * If we are called from bge_detach(), mii is already NULL. 4112 */ 4113 if (mii != NULL) { 4114 ifm = mii->mii_media.ifm_cur; 4115 mtmp = ifm->ifm_media; 4116 ifm->ifm_media = IFM_ETHER | IFM_NONE; 4117 mii_mediachg(mii); 4118 ifm->ifm_media = mtmp; 4119 } 4120 ifp->if_flags = itmp; 4121 } 4122 4123 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 4124 4125 /* Clear MAC's link state (PHY may still have link UP). */ 4126 if (bootverbose && sc->bge_link) 4127 if_printf(sc->bge_ifp, "link DOWN\n"); 4128 sc->bge_link = 0; 4129 4130 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4131 } 4132 4133 /* 4134 * Stop all chip I/O so that the kernel's probe routines don't 4135 * get confused by errant DMAs when rebooting. 4136 */ 4137 static void 4138 bge_shutdown(device_t dev) 4139 { 4140 struct bge_softc *sc; 4141 4142 sc = device_get_softc(dev); 4143 4144 BGE_LOCK(sc); 4145 bge_stop(sc); 4146 bge_reset(sc); 4147 BGE_UNLOCK(sc); 4148 } 4149 4150 static int 4151 bge_suspend(device_t dev) 4152 { 4153 struct bge_softc *sc; 4154 4155 sc = device_get_softc(dev); 4156 BGE_LOCK(sc); 4157 bge_stop(sc); 4158 BGE_UNLOCK(sc); 4159 4160 return (0); 4161 } 4162 4163 static int 4164 bge_resume(device_t dev) 4165 { 4166 struct bge_softc *sc; 4167 struct ifnet *ifp; 4168 4169 sc = device_get_softc(dev); 4170 BGE_LOCK(sc); 4171 ifp = sc->bge_ifp; 4172 if (ifp->if_flags & IFF_UP) { 4173 bge_init_locked(sc); 4174 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4175 bge_start_locked(ifp); 4176 } 4177 BGE_UNLOCK(sc); 4178 4179 return (0); 4180 } 4181 4182 static void 4183 bge_link_upd(struct bge_softc *sc) 4184 { 4185 struct mii_data *mii; 4186 uint32_t link, status; 4187 4188 BGE_LOCK_ASSERT(sc); 4189 4190 /* Clear 'pending link event' flag. */ 4191 sc->bge_link_evt = 0; 4192 4193 /* 4194 * Process link state changes. 4195 * Grrr. The link status word in the status block does 4196 * not work correctly on the BCM5700 rev AX and BX chips, 4197 * according to all available information. Hence, we have 4198 * to enable MII interrupts in order to properly obtain 4199 * async link changes. Unfortunately, this also means that 4200 * we have to read the MAC status register to detect link 4201 * changes, thereby adding an additional register access to 4202 * the interrupt handler. 4203 * 4204 * XXX: perhaps link state detection procedure used for 4205 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 4206 */ 4207 4208 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 4209 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 4210 status = CSR_READ_4(sc, BGE_MAC_STS); 4211 if (status & BGE_MACSTAT_MI_INTERRUPT) { 4212 mii = device_get_softc(sc->bge_miibus); 4213 mii_pollstat(mii); 4214 if (!sc->bge_link && 4215 mii->mii_media_status & IFM_ACTIVE && 4216 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4217 sc->bge_link++; 4218 if (bootverbose) 4219 if_printf(sc->bge_ifp, "link UP\n"); 4220 } else if (sc->bge_link && 4221 (!(mii->mii_media_status & IFM_ACTIVE) || 4222 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4223 sc->bge_link = 0; 4224 if (bootverbose) 4225 if_printf(sc->bge_ifp, "link DOWN\n"); 4226 } 4227 4228 /* Clear the interrupt. */ 4229 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 4230 BGE_EVTENB_MI_INTERRUPT); 4231 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 4232 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 4233 BRGPHY_INTRS); 4234 } 4235 return; 4236 } 4237 4238 if (sc->bge_flags & BGE_FLAG_TBI) { 4239 status = CSR_READ_4(sc, BGE_MAC_STS); 4240 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 4241 if (!sc->bge_link) { 4242 sc->bge_link++; 4243 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 4244 BGE_CLRBIT(sc, BGE_MAC_MODE, 4245 BGE_MACMODE_TBI_SEND_CFGS); 4246 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 4247 if (bootverbose) 4248 if_printf(sc->bge_ifp, "link UP\n"); 4249 if_link_state_change(sc->bge_ifp, 4250 LINK_STATE_UP); 4251 } 4252 } else if (sc->bge_link) { 4253 sc->bge_link = 0; 4254 if (bootverbose) 4255 if_printf(sc->bge_ifp, "link DOWN\n"); 4256 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 4257 } 4258 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 4259 /* 4260 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 4261 * in status word always set. Workaround this bug by reading 4262 * PHY link status directly. 4263 */ 4264 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 4265 4266 if (link != sc->bge_link || 4267 sc->bge_asicrev == BGE_ASICREV_BCM5700) { 4268 mii = device_get_softc(sc->bge_miibus); 4269 mii_pollstat(mii); 4270 if (!sc->bge_link && 4271 mii->mii_media_status & IFM_ACTIVE && 4272 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 4273 sc->bge_link++; 4274 if (bootverbose) 4275 if_printf(sc->bge_ifp, "link UP\n"); 4276 } else if (sc->bge_link && 4277 (!(mii->mii_media_status & IFM_ACTIVE) || 4278 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 4279 sc->bge_link = 0; 4280 if (bootverbose) 4281 if_printf(sc->bge_ifp, "link DOWN\n"); 4282 } 4283 } 4284 } else { 4285 /* 4286 * Discard link events for MII/GMII controllers 4287 * if MI auto-polling is disabled. 4288 */ 4289 } 4290 4291 /* Clear the attention. */ 4292 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | 4293 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | 4294 BGE_MACSTAT_LINK_CHANGED); 4295 } 4296 4297 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ 4298 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ 4299 sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ 4300 desc) 4301 4302 static void 4303 bge_add_sysctls(struct bge_softc *sc) 4304 { 4305 struct sysctl_ctx_list *ctx; 4306 struct sysctl_oid_list *children, *schildren; 4307 struct sysctl_oid *tree; 4308 4309 ctx = device_get_sysctl_ctx(sc->bge_dev); 4310 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); 4311 4312 #ifdef BGE_REGISTER_DEBUG 4313 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", 4314 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", 4315 "Debug Information"); 4316 4317 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", 4318 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", 4319 "Register Read"); 4320 4321 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", 4322 CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", 4323 "Memory Read"); 4324 4325 #endif 4326 4327 if (BGE_IS_5705_PLUS(sc)) 4328 return; 4329 4330 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4331 NULL, "BGE Statistics"); 4332 schildren = children = SYSCTL_CHILDREN(tree); 4333 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", 4334 children, COSFramesDroppedDueToFilters, 4335 "FramesDroppedDueToFilters"); 4336 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", 4337 children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); 4338 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", 4339 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); 4340 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", 4341 children, nicNoMoreRxBDs, "NoMoreRxBDs"); 4342 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", 4343 children, ifInDiscards, "InputDiscards"); 4344 BGE_SYSCTL_STAT(sc, ctx, "Input Errors", 4345 children, ifInErrors, "InputErrors"); 4346 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", 4347 children, nicRecvThresholdHit, "RecvThresholdHit"); 4348 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", 4349 children, nicDmaReadQueueFull, "DmaReadQueueFull"); 4350 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", 4351 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); 4352 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", 4353 children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); 4354 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", 4355 children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); 4356 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", 4357 children, nicRingStatusUpdate, "RingStatusUpdate"); 4358 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", 4359 children, nicInterrupts, "Interrupts"); 4360 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", 4361 children, nicAvoidedInterrupts, "AvoidedInterrupts"); 4362 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", 4363 children, nicSendThresholdHit, "SendThresholdHit"); 4364 4365 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, 4366 NULL, "BGE RX Statistics"); 4367 children = SYSCTL_CHILDREN(tree); 4368 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", 4369 children, rxstats.ifHCInOctets, "Octets"); 4370 BGE_SYSCTL_STAT(sc, ctx, "Fragments", 4371 children, rxstats.etherStatsFragments, "Fragments"); 4372 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", 4373 children, rxstats.ifHCInUcastPkts, "UcastPkts"); 4374 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", 4375 children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); 4376 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", 4377 children, rxstats.dot3StatsFCSErrors, "FCSErrors"); 4378 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", 4379 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); 4380 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", 4381 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); 4382 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", 4383 children, rxstats.xoffPauseFramesReceived, 4384 "xoffPauseFramesReceived"); 4385 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", 4386 children, rxstats.macControlFramesReceived, 4387 "ControlFramesReceived"); 4388 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", 4389 children, rxstats.xoffStateEntered, "xoffStateEntered"); 4390 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", 4391 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); 4392 BGE_SYSCTL_STAT(sc, ctx, "Jabbers", 4393 children, rxstats.etherStatsJabbers, "Jabbers"); 4394 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", 4395 children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); 4396 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", 4397 children, rxstats.inRangeLengthError, "inRangeLengthError"); 4398 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", 4399 children, rxstats.outRangeLengthError, "outRangeLengthError"); 4400 4401 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, 4402 NULL, "BGE TX Statistics"); 4403 children = SYSCTL_CHILDREN(tree); 4404 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", 4405 children, txstats.ifHCOutOctets, "Octets"); 4406 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", 4407 children, txstats.etherStatsCollisions, "Collisions"); 4408 BGE_SYSCTL_STAT(sc, ctx, "XON Sent", 4409 children, txstats.outXonSent, "XonSent"); 4410 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", 4411 children, txstats.outXoffSent, "XoffSent"); 4412 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", 4413 children, txstats.flowControlDone, "flowControlDone"); 4414 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", 4415 children, txstats.dot3StatsInternalMacTransmitErrors, 4416 "InternalMacTransmitErrors"); 4417 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", 4418 children, txstats.dot3StatsSingleCollisionFrames, 4419 "SingleCollisionFrames"); 4420 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", 4421 children, txstats.dot3StatsMultipleCollisionFrames, 4422 "MultipleCollisionFrames"); 4423 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", 4424 children, txstats.dot3StatsDeferredTransmissions, 4425 "DeferredTransmissions"); 4426 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", 4427 children, txstats.dot3StatsExcessiveCollisions, 4428 "ExcessiveCollisions"); 4429 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", 4430 children, txstats.dot3StatsLateCollisions, 4431 "LateCollisions"); 4432 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", 4433 children, txstats.ifHCOutUcastPkts, "UcastPkts"); 4434 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", 4435 children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); 4436 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", 4437 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); 4438 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", 4439 children, txstats.dot3StatsCarrierSenseErrors, 4440 "CarrierSenseErrors"); 4441 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", 4442 children, txstats.ifOutDiscards, "Discards"); 4443 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", 4444 children, txstats.ifOutErrors, "Errors"); 4445 } 4446 4447 static int 4448 bge_sysctl_stats(SYSCTL_HANDLER_ARGS) 4449 { 4450 struct bge_softc *sc; 4451 uint32_t result; 4452 int offset; 4453 4454 sc = (struct bge_softc *)arg1; 4455 offset = arg2; 4456 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + 4457 offsetof(bge_hostaddr, bge_addr_lo)); 4458 return (sysctl_handle_int(oidp, &result, 0, req)); 4459 } 4460 4461 #ifdef BGE_REGISTER_DEBUG 4462 static int 4463 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 4464 { 4465 struct bge_softc *sc; 4466 uint16_t *sbdata; 4467 int error; 4468 int result; 4469 int i, j; 4470 4471 result = -1; 4472 error = sysctl_handle_int(oidp, &result, 0, req); 4473 if (error || (req->newptr == NULL)) 4474 return (error); 4475 4476 if (result == 1) { 4477 sc = (struct bge_softc *)arg1; 4478 4479 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; 4480 printf("Status Block:\n"); 4481 for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { 4482 printf("%06x:", i); 4483 for (j = 0; j < 8; j++) { 4484 printf(" %04x", sbdata[i]); 4485 i += 4; 4486 } 4487 printf("\n"); 4488 } 4489 4490 printf("Registers:\n"); 4491 for (i = 0x800; i < 0xA00; ) { 4492 printf("%06x:", i); 4493 for (j = 0; j < 8; j++) { 4494 printf(" %08x", CSR_READ_4(sc, i)); 4495 i += 4; 4496 } 4497 printf("\n"); 4498 } 4499 4500 printf("Hardware Flags:\n"); 4501 if (BGE_IS_575X_PLUS(sc)) 4502 printf(" - 575X Plus\n"); 4503 if (BGE_IS_5705_PLUS(sc)) 4504 printf(" - 5705 Plus\n"); 4505 if (BGE_IS_5714_FAMILY(sc)) 4506 printf(" - 5714 Family\n"); 4507 if (BGE_IS_5700_FAMILY(sc)) 4508 printf(" - 5700 Family\n"); 4509 if (sc->bge_flags & BGE_FLAG_JUMBO) 4510 printf(" - Supports Jumbo Frames\n"); 4511 if (sc->bge_flags & BGE_FLAG_PCIX) 4512 printf(" - PCI-X Bus\n"); 4513 if (sc->bge_flags & BGE_FLAG_PCIE) 4514 printf(" - PCI Express Bus\n"); 4515 if (sc->bge_flags & BGE_FLAG_NO_3LED) 4516 printf(" - No 3 LEDs\n"); 4517 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) 4518 printf(" - RX Alignment Bug\n"); 4519 } 4520 4521 return (error); 4522 } 4523 4524 static int 4525 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) 4526 { 4527 struct bge_softc *sc; 4528 int error; 4529 uint16_t result; 4530 uint32_t val; 4531 4532 result = -1; 4533 error = sysctl_handle_int(oidp, &result, 0, req); 4534 if (error || (req->newptr == NULL)) 4535 return (error); 4536 4537 if (result < 0x8000) { 4538 sc = (struct bge_softc *)arg1; 4539 val = CSR_READ_4(sc, result); 4540 printf("reg 0x%06X = 0x%08X\n", result, val); 4541 } 4542 4543 return (error); 4544 } 4545 4546 static int 4547 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) 4548 { 4549 struct bge_softc *sc; 4550 int error; 4551 uint16_t result; 4552 uint32_t val; 4553 4554 result = -1; 4555 error = sysctl_handle_int(oidp, &result, 0, req); 4556 if (error || (req->newptr == NULL)) 4557 return (error); 4558 4559 if (result < 0x8000) { 4560 sc = (struct bge_softc *)arg1; 4561 val = bge_readmem_ind(sc, result); 4562 printf("mem 0x%06X = 0x%08X\n", result, val); 4563 } 4564 4565 return (error); 4566 } 4567 #endif 4568