1 /*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2001 4 * Bill Paul <wpaul@windriver.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. 39 * 40 * The Broadcom BCM5700 is based on technology originally developed by 41 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet 42 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has 43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external 44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo 45 * frames, highly configurable RX filtering, and 16 RX and TX queues 46 * (which, along with RX filter rules, can be used for QOS applications). 47 * Other features, such as TCP segmentation, may be available as part 48 * of value-added firmware updates. Unlike the Tigon I and Tigon II, 49 * firmware images can be stored in hardware and need not be compiled 50 * into the driver. 51 * 52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will 53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. 54 * 55 * The BCM5701 is a single-chip solution incorporating both the BCM5700 56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 57 * does not support external SSRAM. 58 * 59 * Broadcom also produces a variation of the BCM5700 under the "Altima" 60 * brand name, which is functionally similar but lacks PCI-X support. 61 * 62 * Without external SSRAM, you can only have at most 4 TX rings, 63 * and the use of the mini RX ring is disabled. This seems to imply 64 * that these features are simply not available on the BCM5701. As a 65 * result, this driver does not implement any support for the mini RX 66 * ring. 67 */ 68 69 #ifdef HAVE_KERNEL_OPTION_HEADERS 70 #include "opt_device_polling.h" 71 #endif 72 73 #include <sys/param.h> 74 #include <sys/endian.h> 75 #include <sys/systm.h> 76 #include <sys/sockio.h> 77 #include <sys/mbuf.h> 78 #include <sys/malloc.h> 79 #include <sys/kernel.h> 80 #include <sys/module.h> 81 #include <sys/socket.h> 82 #include <sys/sysctl.h> 83 84 #include <net/if.h> 85 #include <net/if_arp.h> 86 #include <net/ethernet.h> 87 #include <net/if_dl.h> 88 #include <net/if_media.h> 89 90 #include <net/bpf.h> 91 92 #include <net/if_types.h> 93 #include <net/if_vlan_var.h> 94 95 #include <netinet/in_systm.h> 96 #include <netinet/in.h> 97 #include <netinet/ip.h> 98 99 #include <machine/bus.h> 100 #include <machine/resource.h> 101 #include <sys/bus.h> 102 #include <sys/rman.h> 103 104 #include <dev/mii/mii.h> 105 #include <dev/mii/miivar.h> 106 #include "miidevs.h" 107 #include <dev/mii/brgphyreg.h> 108 109 #include <dev/pci/pcireg.h> 110 #include <dev/pci/pcivar.h> 111 112 #include <dev/bge/if_bgereg.h> 113 114 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 115 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ 116 117 MODULE_DEPEND(bge, pci, 1, 1, 1); 118 MODULE_DEPEND(bge, ether, 1, 1, 1); 119 MODULE_DEPEND(bge, miibus, 1, 1, 1); 120 121 /* "device miibus" required. See GENERIC if you get errors here. */ 122 #include "miibus_if.h" 123 124 /* 125 * Various supported device vendors/types and their names. Note: the 126 * spec seems to indicate that the hardware still has Alteon's vendor 127 * ID burned into it, though it will always be overriden by the vendor 128 * ID in the EEPROM. Just to be safe, we cover all possibilities. 129 */ 130 static struct bge_type { 131 uint16_t bge_vid; 132 uint16_t bge_did; 133 } bge_devs[] = { 134 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, 135 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, 136 137 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, 138 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, 139 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, 140 141 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, 142 143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, 144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, 145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, 146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, 147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, 148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, 149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, 150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, 151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, 152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, 153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, 154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, 155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, 156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, 157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, 158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, 159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, 160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, 161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, 162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, 163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, 164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, 165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, 166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, 167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, 168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, 169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, 170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, 171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, 172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, 173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, 174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, 175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, 176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, 177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, 178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, 179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, 180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, 181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, 182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, 183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, 184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, 185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, 186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, 187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, 188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, 189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, 190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, 191 192 { SK_VENDORID, SK_DEVICEID_ALTIMA }, 193 194 { TC_VENDORID, TC_DEVICEID_3C996 }, 195 196 { 0, 0 } 197 }; 198 199 static const struct bge_vendor { 200 uint16_t v_id; 201 const char *v_name; 202 } bge_vendors[] = { 203 { ALTEON_VENDORID, "Alteon" }, 204 { ALTIMA_VENDORID, "Altima" }, 205 { APPLE_VENDORID, "Apple" }, 206 { BCOM_VENDORID, "Broadcom" }, 207 { SK_VENDORID, "SysKonnect" }, 208 { TC_VENDORID, "3Com" }, 209 210 { 0, NULL } 211 }; 212 213 static const struct bge_revision { 214 uint32_t br_chipid; 215 const char *br_name; 216 } bge_revisions[] = { 217 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, 218 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, 219 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, 220 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, 221 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, 222 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, 223 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, 224 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, 225 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, 226 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, 227 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, 228 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, 229 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, 230 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, 231 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, 232 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, 233 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, 234 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, 235 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, 236 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, 237 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, 238 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, 239 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, 240 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, 241 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, 242 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, 243 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, 244 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, 245 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, 246 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, 247 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, 248 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, 249 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, 250 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, 251 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, 252 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, 253 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, 254 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, 255 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, 256 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, 257 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, 258 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, 259 260 { 0, NULL } 261 }; 262 263 /* 264 * Some defaults for major revisions, so that newer steppings 265 * that we don't know about have a shot at working. 266 */ 267 static const struct bge_revision bge_majorrevs[] = { 268 { BGE_ASICREV_BCM5700, "unknown BCM5700" }, 269 { BGE_ASICREV_BCM5701, "unknown BCM5701" }, 270 { BGE_ASICREV_BCM5703, "unknown BCM5703" }, 271 { BGE_ASICREV_BCM5704, "unknown BCM5704" }, 272 { BGE_ASICREV_BCM5705, "unknown BCM5705" }, 273 { BGE_ASICREV_BCM5750, "unknown BCM5750" }, 274 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, 275 { BGE_ASICREV_BCM5752, "unknown BCM5752" }, 276 { BGE_ASICREV_BCM5780, "unknown BCM5780" }, 277 { BGE_ASICREV_BCM5714, "unknown BCM5714" }, 278 { BGE_ASICREV_BCM5755, "unknown BCM5755" }, 279 { BGE_ASICREV_BCM5787, "unknown BCM5787" }, 280 281 { 0, NULL } 282 }; 283 284 #define BGE_IS_5705_OR_BEYOND(sc) \ 285 ((sc)->bge_asicrev == BGE_ASICREV_BCM5705 || \ 286 (sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \ 287 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 288 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 289 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \ 290 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \ 291 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \ 292 (sc)->bge_asicrev == BGE_ASICREV_BCM5787) 293 294 #define BGE_IS_575X_PLUS(sc) \ 295 ((sc)->bge_asicrev == BGE_ASICREV_BCM5750 || \ 296 (sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 297 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 298 (sc)->bge_asicrev == BGE_ASICREV_BCM5714 || \ 299 (sc)->bge_asicrev == BGE_ASICREV_BCM5752 || \ 300 (sc)->bge_asicrev == BGE_ASICREV_BCM5755 || \ 301 (sc)->bge_asicrev == BGE_ASICREV_BCM5787) 302 303 #define BGE_IS_5714_FAMILY(sc) \ 304 ((sc)->bge_asicrev == BGE_ASICREV_BCM5714_A0 || \ 305 (sc)->bge_asicrev == BGE_ASICREV_BCM5780 || \ 306 (sc)->bge_asicrev == BGE_ASICREV_BCM5714) 307 308 #define BGE_IS_JUMBO_CAPABLE(sc) \ 309 ((sc)->bge_asicrev == BGE_ASICREV_BCM5700 || \ 310 (sc)->bge_asicrev == BGE_ASICREV_BCM5701 || \ 311 (sc)->bge_asicrev == BGE_ASICREV_BCM5703 || \ 312 (sc)->bge_asicrev == BGE_ASICREV_BCM5704) 313 314 const struct bge_revision * bge_lookup_rev(uint32_t); 315 const struct bge_vendor * bge_lookup_vendor(uint16_t); 316 static int bge_probe(device_t); 317 static int bge_attach(device_t); 318 static int bge_detach(device_t); 319 static int bge_suspend(device_t); 320 static int bge_resume(device_t); 321 static void bge_release_resources(struct bge_softc *); 322 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); 323 static int bge_dma_alloc(device_t); 324 static void bge_dma_free(struct bge_softc *); 325 326 static void bge_txeof(struct bge_softc *); 327 static void bge_rxeof(struct bge_softc *); 328 329 static void bge_asf_driver_up (struct bge_softc *); 330 static void bge_tick_locked(struct bge_softc *); 331 static void bge_tick(void *); 332 static void bge_stats_update(struct bge_softc *); 333 static void bge_stats_update_regs(struct bge_softc *); 334 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); 335 336 static void bge_intr(void *); 337 static void bge_start_locked(struct ifnet *); 338 static void bge_start(struct ifnet *); 339 static int bge_ioctl(struct ifnet *, u_long, caddr_t); 340 static void bge_init_locked(struct bge_softc *); 341 static void bge_init(void *); 342 static void bge_stop(struct bge_softc *); 343 static void bge_watchdog(struct ifnet *); 344 static void bge_shutdown(device_t); 345 static int bge_ifmedia_upd_locked(struct ifnet *); 346 static int bge_ifmedia_upd(struct ifnet *); 347 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); 348 349 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); 350 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); 351 352 static void bge_setpromisc(struct bge_softc *); 353 static void bge_setmulti(struct bge_softc *); 354 355 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); 356 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); 357 static int bge_init_rx_ring_std(struct bge_softc *); 358 static void bge_free_rx_ring_std(struct bge_softc *); 359 static int bge_init_rx_ring_jumbo(struct bge_softc *); 360 static void bge_free_rx_ring_jumbo(struct bge_softc *); 361 static void bge_free_tx_ring(struct bge_softc *); 362 static int bge_init_tx_ring(struct bge_softc *); 363 364 static int bge_chipinit(struct bge_softc *); 365 static int bge_blockinit(struct bge_softc *); 366 367 static uint32_t bge_readmem_ind(struct bge_softc *, int); 368 static void bge_writemem_ind(struct bge_softc *, int, int); 369 #ifdef notdef 370 static uint32_t bge_readreg_ind(struct bge_softc *, int); 371 #endif 372 static void bge_writereg_ind(struct bge_softc *, int, int); 373 374 static int bge_miibus_readreg(device_t, int, int); 375 static int bge_miibus_writereg(device_t, int, int, int); 376 static void bge_miibus_statchg(device_t); 377 #ifdef DEVICE_POLLING 378 static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 379 #endif 380 381 #define BGE_RESET_START 1 382 #define BGE_RESET_STOP 2 383 static void bge_sig_post_reset(struct bge_softc *, int); 384 static void bge_sig_legacy(struct bge_softc *, int); 385 static void bge_sig_pre_reset(struct bge_softc *, int); 386 static int bge_reset(struct bge_softc *); 387 static void bge_link_upd(struct bge_softc *); 388 389 static device_method_t bge_methods[] = { 390 /* Device interface */ 391 DEVMETHOD(device_probe, bge_probe), 392 DEVMETHOD(device_attach, bge_attach), 393 DEVMETHOD(device_detach, bge_detach), 394 DEVMETHOD(device_shutdown, bge_shutdown), 395 DEVMETHOD(device_suspend, bge_suspend), 396 DEVMETHOD(device_resume, bge_resume), 397 398 /* bus interface */ 399 DEVMETHOD(bus_print_child, bus_generic_print_child), 400 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 401 402 /* MII interface */ 403 DEVMETHOD(miibus_readreg, bge_miibus_readreg), 404 DEVMETHOD(miibus_writereg, bge_miibus_writereg), 405 DEVMETHOD(miibus_statchg, bge_miibus_statchg), 406 407 { 0, 0 } 408 }; 409 410 static driver_t bge_driver = { 411 "bge", 412 bge_methods, 413 sizeof(struct bge_softc) 414 }; 415 416 static devclass_t bge_devclass; 417 418 DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); 419 DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); 420 421 static int bge_fake_autoneg = 0; 422 static int bge_allow_asf = 1; 423 424 TUNABLE_INT("hw.bge.fake_autoneg", &bge_fake_autoneg); 425 TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); 426 427 SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); 428 SYSCTL_INT(_hw_bge, OID_AUTO, fake_autoneg, CTLFLAG_RD, &bge_fake_autoneg, 0, 429 "Enable fake autonegotiation for certain blade systems"); 430 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, 431 "Allow ASF mode if available"); 432 433 static uint32_t 434 bge_readmem_ind(struct bge_softc *sc, int off) 435 { 436 device_t dev; 437 438 dev = sc->bge_dev; 439 440 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 441 return (pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); 442 } 443 444 static void 445 bge_writemem_ind(struct bge_softc *sc, int off, int val) 446 { 447 device_t dev; 448 449 dev = sc->bge_dev; 450 451 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); 452 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); 453 } 454 455 #ifdef notdef 456 static uint32_t 457 bge_readreg_ind(struct bge_softc *sc, int off) 458 { 459 device_t dev; 460 461 dev = sc->bge_dev; 462 463 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 464 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); 465 } 466 #endif 467 468 static void 469 bge_writereg_ind(struct bge_softc *sc, int off, int val) 470 { 471 device_t dev; 472 473 dev = sc->bge_dev; 474 475 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); 476 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); 477 } 478 479 /* 480 * Map a single buffer address. 481 */ 482 483 static void 484 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 485 { 486 struct bge_dmamap_arg *ctx; 487 488 if (error) 489 return; 490 491 ctx = arg; 492 493 if (nseg > ctx->bge_maxsegs) { 494 ctx->bge_maxsegs = 0; 495 return; 496 } 497 498 ctx->bge_busaddr = segs->ds_addr; 499 } 500 501 /* 502 * Read a byte of data stored in the EEPROM at address 'addr.' The 503 * BCM570x supports both the traditional bitbang interface and an 504 * auto access interface for reading the EEPROM. We use the auto 505 * access method. 506 */ 507 static uint8_t 508 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) 509 { 510 int i; 511 uint32_t byte = 0; 512 513 /* 514 * Enable use of auto EEPROM access so we can avoid 515 * having to use the bitbang method. 516 */ 517 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); 518 519 /* Reset the EEPROM, load the clock period. */ 520 CSR_WRITE_4(sc, BGE_EE_ADDR, 521 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); 522 DELAY(20); 523 524 /* Issue the read EEPROM command. */ 525 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); 526 527 /* Wait for completion */ 528 for(i = 0; i < BGE_TIMEOUT * 10; i++) { 529 DELAY(10); 530 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) 531 break; 532 } 533 534 if (i == BGE_TIMEOUT) { 535 device_printf(sc->bge_dev, "EEPROM read timed out\n"); 536 return (1); 537 } 538 539 /* Get result. */ 540 byte = CSR_READ_4(sc, BGE_EE_DATA); 541 542 *dest = (byte >> ((addr % 4) * 8)) & 0xFF; 543 544 return (0); 545 } 546 547 /* 548 * Read a sequence of bytes from the EEPROM. 549 */ 550 static int 551 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) 552 { 553 int i, error = 0; 554 uint8_t byte = 0; 555 556 for (i = 0; i < cnt; i++) { 557 error = bge_eeprom_getbyte(sc, off + i, &byte); 558 if (error) 559 break; 560 *(dest + i) = byte; 561 } 562 563 return (error ? 1 : 0); 564 } 565 566 static int 567 bge_miibus_readreg(device_t dev, int phy, int reg) 568 { 569 struct bge_softc *sc; 570 uint32_t val, autopoll; 571 int i; 572 573 sc = device_get_softc(dev); 574 575 /* 576 * Broadcom's own driver always assumes the internal 577 * PHY is at GMII address 1. On some chips, the PHY responds 578 * to accesses at all addresses, which could cause us to 579 * bogusly attach the PHY 32 times at probe type. Always 580 * restricting the lookup to address 1 is simpler than 581 * trying to figure out which chips revisions should be 582 * special-cased. 583 */ 584 if (phy != 1) 585 return (0); 586 587 /* Reading with autopolling on may trigger PCI errors */ 588 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 589 if (autopoll & BGE_MIMODE_AUTOPOLL) { 590 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 591 DELAY(40); 592 } 593 594 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| 595 BGE_MIPHY(phy)|BGE_MIREG(reg)); 596 597 for (i = 0; i < BGE_TIMEOUT; i++) { 598 val = CSR_READ_4(sc, BGE_MI_COMM); 599 if (!(val & BGE_MICOMM_BUSY)) 600 break; 601 } 602 603 if (i == BGE_TIMEOUT) { 604 device_printf(sc->bge_dev, "PHY read timed out\n"); 605 val = 0; 606 goto done; 607 } 608 609 val = CSR_READ_4(sc, BGE_MI_COMM); 610 611 done: 612 if (autopoll & BGE_MIMODE_AUTOPOLL) { 613 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 614 DELAY(40); 615 } 616 617 if (val & BGE_MICOMM_READFAIL) 618 return (0); 619 620 return (val & 0xFFFF); 621 } 622 623 static int 624 bge_miibus_writereg(device_t dev, int phy, int reg, int val) 625 { 626 struct bge_softc *sc; 627 uint32_t autopoll; 628 int i; 629 630 sc = device_get_softc(dev); 631 632 /* Reading with autopolling on may trigger PCI errors */ 633 autopoll = CSR_READ_4(sc, BGE_MI_MODE); 634 if (autopoll & BGE_MIMODE_AUTOPOLL) { 635 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 636 DELAY(40); 637 } 638 639 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| 640 BGE_MIPHY(phy)|BGE_MIREG(reg)|val); 641 642 for (i = 0; i < BGE_TIMEOUT; i++) { 643 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) 644 break; 645 } 646 647 if (autopoll & BGE_MIMODE_AUTOPOLL) { 648 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); 649 DELAY(40); 650 } 651 652 if (i == BGE_TIMEOUT) { 653 device_printf(sc->bge_dev, "PHY read timed out\n"); 654 return (0); 655 } 656 657 return (0); 658 } 659 660 static void 661 bge_miibus_statchg(device_t dev) 662 { 663 struct bge_softc *sc; 664 struct mii_data *mii; 665 sc = device_get_softc(dev); 666 mii = device_get_softc(sc->bge_miibus); 667 668 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); 669 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 670 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); 671 else 672 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); 673 674 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 675 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 676 else 677 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); 678 } 679 680 /* 681 * Intialize a standard receive ring descriptor. 682 */ 683 static int 684 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) 685 { 686 struct mbuf *m_new = NULL; 687 struct bge_rx_bd *r; 688 struct bge_dmamap_arg ctx; 689 int error; 690 691 if (m == NULL) { 692 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 693 if (m_new == NULL) 694 return (ENOBUFS); 695 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 696 } else { 697 m_new = m; 698 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 699 m_new->m_data = m_new->m_ext.ext_buf; 700 } 701 702 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 703 m_adj(m_new, ETHER_ALIGN); 704 sc->bge_cdata.bge_rx_std_chain[i] = m_new; 705 r = &sc->bge_ldata.bge_rx_std_ring[i]; 706 ctx.bge_maxsegs = 1; 707 ctx.sc = sc; 708 error = bus_dmamap_load(sc->bge_cdata.bge_mtag, 709 sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), 710 m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 711 if (error || ctx.bge_maxsegs == 0) { 712 if (m == NULL) { 713 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 714 m_freem(m_new); 715 } 716 return (ENOMEM); 717 } 718 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); 719 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); 720 r->bge_flags = BGE_RXBDFLAG_END; 721 r->bge_len = m_new->m_len; 722 r->bge_idx = i; 723 724 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 725 sc->bge_cdata.bge_rx_std_dmamap[i], 726 BUS_DMASYNC_PREREAD); 727 728 return (0); 729 } 730 731 /* 732 * Initialize a jumbo receive ring descriptor. This allocates 733 * a jumbo buffer from the pool managed internally by the driver. 734 */ 735 static int 736 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) 737 { 738 bus_dma_segment_t segs[BGE_NSEG_JUMBO]; 739 struct bge_extrx_bd *r; 740 struct mbuf *m_new = NULL; 741 int nsegs; 742 int error; 743 744 if (m == NULL) { 745 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 746 if (m_new == NULL) 747 return (ENOBUFS); 748 749 m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); 750 if (!(m_new->m_flags & M_EXT)) { 751 m_freem(m_new); 752 return (ENOBUFS); 753 } 754 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 755 } else { 756 m_new = m; 757 m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; 758 m_new->m_data = m_new->m_ext.ext_buf; 759 } 760 761 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) 762 m_adj(m_new, ETHER_ALIGN); 763 764 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, 765 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 766 m_new, segs, &nsegs, BUS_DMA_NOWAIT); 767 if (error) { 768 if (m == NULL) 769 m_freem(m_new); 770 return (error); 771 } 772 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; 773 774 /* 775 * Fill in the extended RX buffer descriptor. 776 */ 777 r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; 778 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING|BGE_RXBDFLAG_END; 779 r->bge_idx = i; 780 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; 781 switch (nsegs) { 782 case 4: 783 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); 784 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); 785 r->bge_len3 = segs[3].ds_len; 786 case 3: 787 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); 788 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); 789 r->bge_len2 = segs[2].ds_len; 790 case 2: 791 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); 792 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); 793 r->bge_len1 = segs[1].ds_len; 794 case 1: 795 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); 796 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); 797 r->bge_len0 = segs[0].ds_len; 798 break; 799 default: 800 panic("%s: %d segments\n", __func__, nsegs); 801 } 802 803 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 804 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 805 BUS_DMASYNC_PREREAD); 806 807 return (0); 808 } 809 810 /* 811 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 812 * that's 1MB or memory, which is a lot. For now, we fill only the first 813 * 256 ring entries and hope that our CPU is fast enough to keep up with 814 * the NIC. 815 */ 816 static int 817 bge_init_rx_ring_std(struct bge_softc *sc) 818 { 819 int i; 820 821 for (i = 0; i < BGE_SSLOTS; i++) { 822 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) 823 return (ENOBUFS); 824 }; 825 826 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 827 sc->bge_cdata.bge_rx_std_ring_map, 828 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 829 830 sc->bge_std = i - 1; 831 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 832 833 return (0); 834 } 835 836 static void 837 bge_free_rx_ring_std(struct bge_softc *sc) 838 { 839 int i; 840 841 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 842 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { 843 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 844 sc->bge_cdata.bge_rx_std_dmamap[i], 845 BUS_DMASYNC_POSTREAD); 846 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 847 sc->bge_cdata.bge_rx_std_dmamap[i]); 848 m_freem(sc->bge_cdata.bge_rx_std_chain[i]); 849 sc->bge_cdata.bge_rx_std_chain[i] = NULL; 850 } 851 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], 852 sizeof(struct bge_rx_bd)); 853 } 854 } 855 856 static int 857 bge_init_rx_ring_jumbo(struct bge_softc *sc) 858 { 859 struct bge_rcb *rcb; 860 int i; 861 862 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 863 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 864 return (ENOBUFS); 865 }; 866 867 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 868 sc->bge_cdata.bge_rx_jumbo_ring_map, 869 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 870 871 sc->bge_jumbo = i - 1; 872 873 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 874 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 875 BGE_RCB_FLAG_USE_EXT_RX_BD); 876 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 877 878 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 879 880 return (0); 881 } 882 883 static void 884 bge_free_rx_ring_jumbo(struct bge_softc *sc) 885 { 886 int i; 887 888 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 889 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { 890 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 891 sc->bge_cdata.bge_rx_jumbo_dmamap[i], 892 BUS_DMASYNC_POSTREAD); 893 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 894 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 895 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); 896 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; 897 } 898 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], 899 sizeof(struct bge_extrx_bd)); 900 } 901 } 902 903 static void 904 bge_free_tx_ring(struct bge_softc *sc) 905 { 906 int i; 907 908 if (sc->bge_ldata.bge_tx_ring == NULL) 909 return; 910 911 for (i = 0; i < BGE_TX_RING_CNT; i++) { 912 if (sc->bge_cdata.bge_tx_chain[i] != NULL) { 913 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 914 sc->bge_cdata.bge_tx_dmamap[i], 915 BUS_DMASYNC_POSTWRITE); 916 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 917 sc->bge_cdata.bge_tx_dmamap[i]); 918 m_freem(sc->bge_cdata.bge_tx_chain[i]); 919 sc->bge_cdata.bge_tx_chain[i] = NULL; 920 } 921 bzero((char *)&sc->bge_ldata.bge_tx_ring[i], 922 sizeof(struct bge_tx_bd)); 923 } 924 } 925 926 static int 927 bge_init_tx_ring(struct bge_softc *sc) 928 { 929 sc->bge_txcnt = 0; 930 sc->bge_tx_saved_considx = 0; 931 932 /* Initialize transmit producer index for host-memory send ring. */ 933 sc->bge_tx_prodidx = 0; 934 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 935 936 /* 5700 b2 errata */ 937 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 938 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); 939 940 /* NIC-memory send ring not used; initialize to zero. */ 941 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 942 /* 5700 b2 errata */ 943 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 944 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); 945 946 return (0); 947 } 948 949 static void 950 bge_setpromisc(struct bge_softc *sc) 951 { 952 struct ifnet *ifp; 953 954 BGE_LOCK_ASSERT(sc); 955 956 ifp = sc->bge_ifp; 957 958 /* 959 * Enable or disable promiscuous mode as needed. 960 * Do not strip VLAN tag when promiscuous mode is enabled. 961 */ 962 if (ifp->if_flags & IFF_PROMISC) 963 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC | 964 BGE_RXMODE_RX_KEEP_VLAN_DIAG); 965 else 966 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC | 967 BGE_RXMODE_RX_KEEP_VLAN_DIAG); 968 } 969 970 static void 971 bge_setmulti(struct bge_softc *sc) 972 { 973 struct ifnet *ifp; 974 struct ifmultiaddr *ifma; 975 uint32_t hashes[4] = { 0, 0, 0, 0 }; 976 int h, i; 977 978 BGE_LOCK_ASSERT(sc); 979 980 ifp = sc->bge_ifp; 981 982 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 983 for (i = 0; i < 4; i++) 984 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); 985 return; 986 } 987 988 /* First, zot all the existing filters. */ 989 for (i = 0; i < 4; i++) 990 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); 991 992 /* Now program new ones. */ 993 IF_ADDR_LOCK(ifp); 994 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 995 if (ifma->ifma_addr->sa_family != AF_LINK) 996 continue; 997 h = ether_crc32_le(LLADDR((struct sockaddr_dl *) 998 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; 999 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); 1000 } 1001 IF_ADDR_UNLOCK(ifp); 1002 1003 for (i = 0; i < 4; i++) 1004 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); 1005 } 1006 1007 static void 1008 bge_sig_pre_reset(sc, type) 1009 struct bge_softc *sc; 1010 int type; 1011 { 1012 /* 1013 * Some chips don't like this so only do this if ASF is enabled 1014 */ 1015 if (sc->bge_asf_mode) 1016 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 1017 1018 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1019 switch (type) { 1020 case BGE_RESET_START: 1021 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1022 break; 1023 case BGE_RESET_STOP: 1024 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1025 break; 1026 } 1027 } 1028 } 1029 1030 static void 1031 bge_sig_post_reset(sc, type) 1032 struct bge_softc *sc; 1033 int type; 1034 { 1035 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { 1036 switch (type) { 1037 case BGE_RESET_START: 1038 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); 1039 /* START DONE */ 1040 break; 1041 case BGE_RESET_STOP: 1042 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); 1043 break; 1044 } 1045 } 1046 } 1047 1048 static void 1049 bge_sig_legacy(sc, type) 1050 struct bge_softc *sc; 1051 int type; 1052 { 1053 if (sc->bge_asf_mode) { 1054 switch (type) { 1055 case BGE_RESET_START: 1056 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ 1057 break; 1058 case BGE_RESET_STOP: 1059 bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ 1060 break; 1061 } 1062 } 1063 } 1064 1065 void bge_stop_fw(struct bge_softc *); 1066 void 1067 bge_stop_fw(sc) 1068 struct bge_softc *sc; 1069 { 1070 int i; 1071 1072 if (sc->bge_asf_mode) { 1073 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); 1074 CSR_WRITE_4(sc, BGE_CPU_EVENT, 1075 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14)); 1076 1077 for (i = 0; i < 100; i++ ) { 1078 if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) 1079 break; 1080 DELAY(10); 1081 } 1082 } 1083 } 1084 1085 /* 1086 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1087 * self-test results. 1088 */ 1089 static int 1090 bge_chipinit(struct bge_softc *sc) 1091 { 1092 uint32_t dma_rw_ctl; 1093 int i; 1094 1095 /* Set endianness before we access any non-PCI registers. */ 1096 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); 1097 1098 /* 1099 * Check the 'ROM failed' bit on the RX CPU to see if 1100 * self-tests passed. 1101 */ 1102 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { 1103 device_printf(sc->bge_dev, "RX CPU self-diagnostics failed!\n"); 1104 return (ENODEV); 1105 } 1106 1107 /* Clear the MAC control register */ 1108 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 1109 1110 /* 1111 * Clear the MAC statistics block in the NIC's 1112 * internal memory. 1113 */ 1114 for (i = BGE_STATS_BLOCK; 1115 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) 1116 BGE_MEMWIN_WRITE(sc, i, 0); 1117 1118 for (i = BGE_STATUS_BLOCK; 1119 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) 1120 BGE_MEMWIN_WRITE(sc, i, 0); 1121 1122 /* Set up the PCI DMA control register. */ 1123 if (sc->bge_flags & BGE_FLAG_PCIE) { 1124 /* PCI Express bus */ 1125 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1126 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1127 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1128 } else if (sc->bge_flags & BGE_FLAG_PCIX) { 1129 /* PCI-X bus */ 1130 if (BGE_IS_5714_FAMILY(sc)) { 1131 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD; 1132 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */ 1133 /* XXX magic values, Broadcom-supplied Linux driver */ 1134 if (sc->bge_asicrev == BGE_ASICREV_BCM5780) 1135 dma_rw_ctl |= (1 << 20) | (1 << 18) | 1136 BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1137 else 1138 dma_rw_ctl |= (1 << 20) | (1 << 18) | (1 << 15); 1139 1140 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1141 /* 1142 * The 5704 uses a different encoding of read/write 1143 * watermarks. 1144 */ 1145 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1146 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1147 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); 1148 else 1149 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1150 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1151 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1152 (0x0F); 1153 1154 /* 1155 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround 1156 * for hardware bugs. 1157 */ 1158 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1159 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 1160 uint32_t tmp; 1161 1162 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; 1163 if (tmp == 0x6 || tmp == 0x7) 1164 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; 1165 } 1166 } else 1167 /* Conventional PCI bus */ 1168 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | 1169 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | 1170 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | 1171 (0x0F); 1172 1173 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || 1174 sc->bge_asicrev == BGE_ASICREV_BCM5704 || 1175 sc->bge_asicrev == BGE_ASICREV_BCM5705) 1176 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; 1177 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); 1178 1179 /* 1180 * Set up general mode register. 1181 */ 1182 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 1183 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| 1184 BGE_MODECTL_TX_NO_PHDR_CSUM); 1185 1186 /* 1187 * Tell the firmware the driver is running 1188 */ 1189 if (sc->bge_asf_mode & ASF_STACKUP) 1190 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 1191 1192 /* 1193 * Disable memory write invalidate. Apparently it is not supported 1194 * properly by these devices. 1195 */ 1196 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); 1197 1198 #ifdef __brokenalpha__ 1199 /* 1200 * Must insure that we do not cross an 8K (bytes) boundary 1201 * for DMA reads. Our highest limit is 1K bytes. This is a 1202 * restriction on some ALPHA platforms with early revision 1203 * 21174 PCI chipsets, such as the AlphaPC 164lx 1204 */ 1205 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, 1206 BGE_PCI_READ_BNDRY_1024BYTES, 4); 1207 #endif 1208 1209 /* Set the timer prescaler (always 66Mhz) */ 1210 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); 1211 1212 return (0); 1213 } 1214 1215 static int 1216 bge_blockinit(struct bge_softc *sc) 1217 { 1218 struct bge_rcb *rcb; 1219 bus_size_t vrcb; 1220 bge_hostaddr taddr; 1221 int i; 1222 1223 /* 1224 * Initialize the memory window pointer register so that 1225 * we can access the first 32K of internal NIC RAM. This will 1226 * allow us to set up the TX send ring RCBs and the RX return 1227 * ring RCBs, plus other things which live in NIC memory. 1228 */ 1229 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); 1230 1231 /* Note: the BCM5704 has a smaller mbuf space than other chips. */ 1232 1233 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1234 /* Configure mbuf memory pool */ 1235 if (sc->bge_flags & BGE_FLAG_EXTRAM) { 1236 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1237 BGE_EXT_SSRAM); 1238 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1239 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1240 else 1241 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1242 } else { 1243 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, 1244 BGE_BUFFPOOL_1); 1245 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 1246 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); 1247 else 1248 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); 1249 } 1250 1251 /* Configure DMA resource pool */ 1252 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, 1253 BGE_DMA_DESCRIPTORS); 1254 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); 1255 } 1256 1257 /* Configure mbuf pool watermarks */ 1258 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1259 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); 1260 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); 1261 } else { 1262 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); 1263 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); 1264 } 1265 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); 1266 1267 /* Configure DMA resource watermarks */ 1268 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); 1269 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); 1270 1271 /* Enable buffer manager */ 1272 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1273 CSR_WRITE_4(sc, BGE_BMAN_MODE, 1274 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); 1275 1276 /* Poll for buffer manager start indication */ 1277 for (i = 0; i < BGE_TIMEOUT; i++) { 1278 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) 1279 break; 1280 DELAY(10); 1281 } 1282 1283 if (i == BGE_TIMEOUT) { 1284 device_printf(sc->bge_dev, 1285 "buffer manager failed to start\n"); 1286 return (ENXIO); 1287 } 1288 } 1289 1290 /* Enable flow-through queues */ 1291 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 1292 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 1293 1294 /* Wait until queue initialization is complete */ 1295 for (i = 0; i < BGE_TIMEOUT; i++) { 1296 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) 1297 break; 1298 DELAY(10); 1299 } 1300 1301 if (i == BGE_TIMEOUT) { 1302 device_printf(sc->bge_dev, "flow-through queue init failed\n"); 1303 return (ENXIO); 1304 } 1305 1306 /* Initialize the standard RX ring control block */ 1307 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; 1308 rcb->bge_hostaddr.bge_addr_lo = 1309 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); 1310 rcb->bge_hostaddr.bge_addr_hi = 1311 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); 1312 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 1313 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); 1314 if (BGE_IS_5705_OR_BEYOND(sc)) 1315 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); 1316 else 1317 rcb->bge_maxlen_flags = 1318 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); 1319 if (sc->bge_flags & BGE_FLAG_EXTRAM) 1320 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; 1321 else 1322 rcb->bge_nicaddr = BGE_STD_RX_RINGS; 1323 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); 1324 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); 1325 1326 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); 1327 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); 1328 1329 /* 1330 * Initialize the jumbo RX ring control block 1331 * We set the 'ring disabled' bit in the flags 1332 * field until we're actually ready to start 1333 * using this ring (i.e. once we set the MTU 1334 * high enough to require it). 1335 */ 1336 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1337 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; 1338 1339 rcb->bge_hostaddr.bge_addr_lo = 1340 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1341 rcb->bge_hostaddr.bge_addr_hi = 1342 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); 1343 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1344 sc->bge_cdata.bge_rx_jumbo_ring_map, 1345 BUS_DMASYNC_PREREAD); 1346 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 1347 BGE_RCB_FLAG_USE_EXT_RX_BD|BGE_RCB_FLAG_RING_DISABLED); 1348 if (sc->bge_flags & BGE_FLAG_EXTRAM) 1349 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; 1350 else 1351 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; 1352 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, 1353 rcb->bge_hostaddr.bge_addr_hi); 1354 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, 1355 rcb->bge_hostaddr.bge_addr_lo); 1356 1357 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, 1358 rcb->bge_maxlen_flags); 1359 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); 1360 1361 /* Set up dummy disabled mini ring RCB */ 1362 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; 1363 rcb->bge_maxlen_flags = 1364 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); 1365 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, 1366 rcb->bge_maxlen_flags); 1367 } 1368 1369 /* 1370 * Set the BD ring replentish thresholds. The recommended 1371 * values are 1/8th the number of descriptors allocated to 1372 * each ring. 1373 */ 1374 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); 1375 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); 1376 1377 /* 1378 * Disable all unused send rings by setting the 'ring disabled' 1379 * bit in the flags field of all the TX send ring control blocks. 1380 * These are located in NIC memory. 1381 */ 1382 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1383 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { 1384 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1385 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); 1386 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1387 vrcb += sizeof(struct bge_rcb); 1388 } 1389 1390 /* Configure TX RCB 0 (we use only the first ring) */ 1391 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; 1392 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); 1393 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1394 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1395 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 1396 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); 1397 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1398 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1399 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); 1400 1401 /* Disable all unused RX return rings */ 1402 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1403 for (i = 0; i < BGE_RX_RINGS_MAX; i++) { 1404 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); 1405 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); 1406 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1407 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 1408 BGE_RCB_FLAG_RING_DISABLED)); 1409 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); 1410 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + 1411 (i * (sizeof(uint64_t))), 0); 1412 vrcb += sizeof(struct bge_rcb); 1413 } 1414 1415 /* Initialize RX ring indexes */ 1416 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); 1417 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); 1418 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); 1419 1420 /* 1421 * Set up RX return ring 0 1422 * Note that the NIC address for RX return rings is 0x00000000. 1423 * The return rings live entirely within the host, so the 1424 * nicaddr field in the RCB isn't used. 1425 */ 1426 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; 1427 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); 1428 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); 1429 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); 1430 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); 1431 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, 1432 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); 1433 1434 /* Set random backoff seed for TX */ 1435 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, 1436 IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + 1437 IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + 1438 IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + 1439 BGE_TX_BACKOFF_SEED_MASK); 1440 1441 /* Set inter-packet gap */ 1442 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); 1443 1444 /* 1445 * Specify which ring to use for packets that don't match 1446 * any RX rules. 1447 */ 1448 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); 1449 1450 /* 1451 * Configure number of RX lists. One interrupt distribution 1452 * list, sixteen active lists, one bad frames class. 1453 */ 1454 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); 1455 1456 /* Inialize RX list placement stats mask. */ 1457 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); 1458 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); 1459 1460 /* Disable host coalescing until we get it set up */ 1461 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); 1462 1463 /* Poll to make sure it's shut down. */ 1464 for (i = 0; i < BGE_TIMEOUT; i++) { 1465 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) 1466 break; 1467 DELAY(10); 1468 } 1469 1470 if (i == BGE_TIMEOUT) { 1471 device_printf(sc->bge_dev, 1472 "host coalescing engine failed to idle\n"); 1473 return (ENXIO); 1474 } 1475 1476 /* Set up host coalescing defaults */ 1477 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); 1478 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); 1479 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); 1480 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); 1481 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1482 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); 1483 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); 1484 } 1485 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 1486 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 1487 1488 /* Set up address of statistics block */ 1489 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 1490 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 1491 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); 1492 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, 1493 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); 1494 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); 1495 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); 1496 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); 1497 } 1498 1499 /* Set up address of status block */ 1500 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 1501 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); 1502 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, 1503 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); 1504 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; 1505 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; 1506 1507 /* Turn on host coalescing state machine */ 1508 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 1509 1510 /* Turn on RX BD completion state machine and enable attentions */ 1511 CSR_WRITE_4(sc, BGE_RBDC_MODE, 1512 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); 1513 1514 /* Turn on RX list placement state machine */ 1515 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 1516 1517 /* Turn on RX list selector state machine. */ 1518 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1519 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 1520 1521 /* Turn on DMA, clear stats */ 1522 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| 1523 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| 1524 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| 1525 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| 1526 ((sc->bge_flags & BGE_FLAG_TBI) ? 1527 BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); 1528 1529 /* Set misc. local control, enable interrupts on attentions */ 1530 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); 1531 1532 #ifdef notdef 1533 /* Assert GPIO pins for PHY reset */ 1534 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| 1535 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); 1536 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| 1537 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); 1538 #endif 1539 1540 /* Turn on DMA completion state machine */ 1541 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1542 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 1543 1544 /* Turn on write DMA state machine */ 1545 CSR_WRITE_4(sc, BGE_WDMA_MODE, 1546 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); 1547 1548 /* Turn on read DMA state machine */ 1549 CSR_WRITE_4(sc, BGE_RDMA_MODE, 1550 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); 1551 1552 /* Turn on RX data completion state machine */ 1553 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 1554 1555 /* Turn on RX BD initiator state machine */ 1556 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 1557 1558 /* Turn on RX data and RX BD initiator state machine */ 1559 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); 1560 1561 /* Turn on Mbuf cluster free state machine */ 1562 if (!(BGE_IS_5705_OR_BEYOND(sc))) 1563 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 1564 1565 /* Turn on send BD completion state machine */ 1566 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 1567 1568 /* Turn on send data completion state machine */ 1569 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 1570 1571 /* Turn on send data initiator state machine */ 1572 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 1573 1574 /* Turn on send BD initiator state machine */ 1575 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 1576 1577 /* Turn on send BD selector state machine */ 1578 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 1579 1580 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); 1581 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, 1582 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); 1583 1584 /* ack/clear link change events */ 1585 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1586 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1587 BGE_MACSTAT_LINK_CHANGED); 1588 CSR_WRITE_4(sc, BGE_MI_STS, 0); 1589 1590 /* Enable PHY auto polling (for MII/GMII only) */ 1591 if (sc->bge_flags & BGE_FLAG_TBI) { 1592 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); 1593 } else { 1594 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); 1595 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 1596 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) 1597 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 1598 BGE_EVTENB_MI_INTERRUPT); 1599 } 1600 1601 /* 1602 * Clear any pending link state attention. 1603 * Otherwise some link state change events may be lost until attention 1604 * is cleared by bge_intr() -> bge_link_upd() sequence. 1605 * It's not necessary on newer BCM chips - perhaps enabling link 1606 * state change attentions implies clearing pending attention. 1607 */ 1608 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 1609 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 1610 BGE_MACSTAT_LINK_CHANGED); 1611 1612 /* Enable link state change attentions. */ 1613 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); 1614 1615 return (0); 1616 } 1617 1618 const struct bge_revision * 1619 bge_lookup_rev(uint32_t chipid) 1620 { 1621 const struct bge_revision *br; 1622 1623 for (br = bge_revisions; br->br_name != NULL; br++) { 1624 if (br->br_chipid == chipid) 1625 return (br); 1626 } 1627 1628 for (br = bge_majorrevs; br->br_name != NULL; br++) { 1629 if (br->br_chipid == BGE_ASICREV(chipid)) 1630 return (br); 1631 } 1632 1633 return (NULL); 1634 } 1635 1636 const struct bge_vendor * 1637 bge_lookup_vendor(uint16_t vid) 1638 { 1639 const struct bge_vendor *v; 1640 1641 for (v = bge_vendors; v->v_name != NULL; v++) 1642 if (v->v_id == vid) 1643 return (v); 1644 1645 panic("%s: unknown vendor %d", __func__, vid); 1646 return (NULL); 1647 } 1648 1649 /* 1650 * Probe for a Broadcom chip. Check the PCI vendor and device IDs 1651 * against our list and return its name if we find a match. 1652 * 1653 * Note that since the Broadcom controller contains VPD support, we 1654 * can get the device name string from the controller itself instead 1655 * of the compiled-in string. This is a little slow, but it guarantees 1656 * we'll always announce the right product name. Unfortunately, this 1657 * is possible only later in bge_attach(), when we have established 1658 * access to EEPROM. 1659 */ 1660 static int 1661 bge_probe(device_t dev) 1662 { 1663 struct bge_type *t = bge_devs; 1664 struct bge_softc *sc = device_get_softc(dev); 1665 1666 bzero(sc, sizeof(struct bge_softc)); 1667 sc->bge_dev = dev; 1668 1669 while(t->bge_vid != 0) { 1670 if ((pci_get_vendor(dev) == t->bge_vid) && 1671 (pci_get_device(dev) == t->bge_did)) { 1672 char buf[64]; 1673 const struct bge_revision *br; 1674 const struct bge_vendor *v; 1675 uint32_t id; 1676 1677 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 1678 BGE_PCIMISCCTL_ASICREV; 1679 br = bge_lookup_rev(id); 1680 id >>= 16; 1681 v = bge_lookup_vendor(t->bge_vid); 1682 if (br == NULL) 1683 snprintf(buf, 64, "%s unknown ASIC (%#04x)", 1684 v->v_name, id); 1685 else 1686 snprintf(buf, 64, "%s %s, ASIC rev. %#04x", 1687 v->v_name, br->br_name, id); 1688 device_set_desc_copy(dev, buf); 1689 if (pci_get_subvendor(dev) == DELL_VENDORID) 1690 sc->bge_flags |= BGE_FLAG_NO3LED; 1691 return (0); 1692 } 1693 t++; 1694 } 1695 1696 return (ENXIO); 1697 } 1698 1699 static void 1700 bge_dma_free(struct bge_softc *sc) 1701 { 1702 int i; 1703 1704 /* Destroy DMA maps for RX buffers. */ 1705 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1706 if (sc->bge_cdata.bge_rx_std_dmamap[i]) 1707 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1708 sc->bge_cdata.bge_rx_std_dmamap[i]); 1709 } 1710 1711 /* Destroy DMA maps for jumbo RX buffers. */ 1712 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1713 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) 1714 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, 1715 sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1716 } 1717 1718 /* Destroy DMA maps for TX buffers. */ 1719 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1720 if (sc->bge_cdata.bge_tx_dmamap[i]) 1721 bus_dmamap_destroy(sc->bge_cdata.bge_mtag, 1722 sc->bge_cdata.bge_tx_dmamap[i]); 1723 } 1724 1725 if (sc->bge_cdata.bge_mtag) 1726 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); 1727 1728 1729 /* Destroy standard RX ring. */ 1730 if (sc->bge_cdata.bge_rx_std_ring_map) 1731 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, 1732 sc->bge_cdata.bge_rx_std_ring_map); 1733 if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) 1734 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, 1735 sc->bge_ldata.bge_rx_std_ring, 1736 sc->bge_cdata.bge_rx_std_ring_map); 1737 1738 if (sc->bge_cdata.bge_rx_std_ring_tag) 1739 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); 1740 1741 /* Destroy jumbo RX ring. */ 1742 if (sc->bge_cdata.bge_rx_jumbo_ring_map) 1743 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1744 sc->bge_cdata.bge_rx_jumbo_ring_map); 1745 1746 if (sc->bge_cdata.bge_rx_jumbo_ring_map && 1747 sc->bge_ldata.bge_rx_jumbo_ring) 1748 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1749 sc->bge_ldata.bge_rx_jumbo_ring, 1750 sc->bge_cdata.bge_rx_jumbo_ring_map); 1751 1752 if (sc->bge_cdata.bge_rx_jumbo_ring_tag) 1753 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); 1754 1755 /* Destroy RX return ring. */ 1756 if (sc->bge_cdata.bge_rx_return_ring_map) 1757 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, 1758 sc->bge_cdata.bge_rx_return_ring_map); 1759 1760 if (sc->bge_cdata.bge_rx_return_ring_map && 1761 sc->bge_ldata.bge_rx_return_ring) 1762 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, 1763 sc->bge_ldata.bge_rx_return_ring, 1764 sc->bge_cdata.bge_rx_return_ring_map); 1765 1766 if (sc->bge_cdata.bge_rx_return_ring_tag) 1767 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); 1768 1769 /* Destroy TX ring. */ 1770 if (sc->bge_cdata.bge_tx_ring_map) 1771 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, 1772 sc->bge_cdata.bge_tx_ring_map); 1773 1774 if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) 1775 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, 1776 sc->bge_ldata.bge_tx_ring, 1777 sc->bge_cdata.bge_tx_ring_map); 1778 1779 if (sc->bge_cdata.bge_tx_ring_tag) 1780 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); 1781 1782 /* Destroy status block. */ 1783 if (sc->bge_cdata.bge_status_map) 1784 bus_dmamap_unload(sc->bge_cdata.bge_status_tag, 1785 sc->bge_cdata.bge_status_map); 1786 1787 if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) 1788 bus_dmamem_free(sc->bge_cdata.bge_status_tag, 1789 sc->bge_ldata.bge_status_block, 1790 sc->bge_cdata.bge_status_map); 1791 1792 if (sc->bge_cdata.bge_status_tag) 1793 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); 1794 1795 /* Destroy statistics block. */ 1796 if (sc->bge_cdata.bge_stats_map) 1797 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, 1798 sc->bge_cdata.bge_stats_map); 1799 1800 if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) 1801 bus_dmamem_free(sc->bge_cdata.bge_stats_tag, 1802 sc->bge_ldata.bge_stats, 1803 sc->bge_cdata.bge_stats_map); 1804 1805 if (sc->bge_cdata.bge_stats_tag) 1806 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); 1807 1808 /* Destroy the parent tag. */ 1809 if (sc->bge_cdata.bge_parent_tag) 1810 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); 1811 } 1812 1813 static int 1814 bge_dma_alloc(device_t dev) 1815 { 1816 struct bge_dmamap_arg ctx; 1817 struct bge_softc *sc; 1818 int i, error; 1819 1820 sc = device_get_softc(dev); 1821 1822 /* 1823 * Allocate the parent bus DMA tag appropriate for PCI. 1824 */ 1825 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),/* parent */ 1826 1, 0, /* alignment, boundary */ 1827 BUS_SPACE_MAXADDR, /* lowaddr */ 1828 BUS_SPACE_MAXADDR, /* highaddr */ 1829 NULL, NULL, /* filter, filterarg */ 1830 MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ 1831 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1832 0, /* flags */ 1833 NULL, NULL, /* lockfunc, lockarg */ 1834 &sc->bge_cdata.bge_parent_tag); 1835 1836 if (error != 0) { 1837 device_printf(sc->bge_dev, 1838 "could not allocate parent dma tag\n"); 1839 return (ENOMEM); 1840 } 1841 1842 /* 1843 * Create tag for RX mbufs. 1844 */ 1845 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 1846 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1847 NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, 1848 BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); 1849 1850 if (error) { 1851 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1852 return (ENOMEM); 1853 } 1854 1855 /* Create DMA maps for RX buffers. */ 1856 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { 1857 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1858 &sc->bge_cdata.bge_rx_std_dmamap[i]); 1859 if (error) { 1860 device_printf(sc->bge_dev, 1861 "can't create DMA map for RX\n"); 1862 return (ENOMEM); 1863 } 1864 } 1865 1866 /* Create DMA maps for TX buffers. */ 1867 for (i = 0; i < BGE_TX_RING_CNT; i++) { 1868 error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, 1869 &sc->bge_cdata.bge_tx_dmamap[i]); 1870 if (error) { 1871 device_printf(sc->bge_dev, 1872 "can't create DMA map for RX\n"); 1873 return (ENOMEM); 1874 } 1875 } 1876 1877 /* Create tag for standard RX ring. */ 1878 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1879 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1880 NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, 1881 NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); 1882 1883 if (error) { 1884 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1885 return (ENOMEM); 1886 } 1887 1888 /* Allocate DMA'able memory for standard RX ring. */ 1889 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, 1890 (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, 1891 &sc->bge_cdata.bge_rx_std_ring_map); 1892 if (error) 1893 return (ENOMEM); 1894 1895 bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); 1896 1897 /* Load the address of the standard RX ring. */ 1898 ctx.bge_maxsegs = 1; 1899 ctx.sc = sc; 1900 1901 error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, 1902 sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, 1903 BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1904 1905 if (error) 1906 return (ENOMEM); 1907 1908 sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; 1909 1910 /* Create tags for jumbo mbufs. */ 1911 if (BGE_IS_JUMBO_CAPABLE(sc)) { 1912 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1913 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1914 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 1915 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); 1916 if (error) { 1917 device_printf(sc->bge_dev, 1918 "could not allocate jumbo dma tag\n"); 1919 return (ENOMEM); 1920 } 1921 1922 /* Create tag for jumbo RX ring. */ 1923 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1924 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1925 NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, 1926 NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); 1927 1928 if (error) { 1929 device_printf(sc->bge_dev, 1930 "could not allocate jumbo ring dma tag\n"); 1931 return (ENOMEM); 1932 } 1933 1934 /* Allocate DMA'able memory for jumbo RX ring. */ 1935 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1936 (void **)&sc->bge_ldata.bge_rx_jumbo_ring, 1937 BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1938 &sc->bge_cdata.bge_rx_jumbo_ring_map); 1939 if (error) 1940 return (ENOMEM); 1941 1942 /* Load the address of the jumbo RX ring. */ 1943 ctx.bge_maxsegs = 1; 1944 ctx.sc = sc; 1945 1946 error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, 1947 sc->bge_cdata.bge_rx_jumbo_ring_map, 1948 sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, 1949 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1950 1951 if (error) 1952 return (ENOMEM); 1953 1954 sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; 1955 1956 /* Create DMA maps for jumbo RX buffers. */ 1957 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { 1958 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 1959 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); 1960 if (error) { 1961 device_printf(sc->bge_dev, 1962 "can't create DMA map for jumbo RX\n"); 1963 return (ENOMEM); 1964 } 1965 } 1966 1967 } 1968 1969 /* Create tag for RX return ring. */ 1970 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1971 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 1972 NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, 1973 NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); 1974 1975 if (error) { 1976 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 1977 return (ENOMEM); 1978 } 1979 1980 /* Allocate DMA'able memory for RX return ring. */ 1981 error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, 1982 (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, 1983 &sc->bge_cdata.bge_rx_return_ring_map); 1984 if (error) 1985 return (ENOMEM); 1986 1987 bzero((char *)sc->bge_ldata.bge_rx_return_ring, 1988 BGE_RX_RTN_RING_SZ(sc)); 1989 1990 /* Load the address of the RX return ring. */ 1991 ctx.bge_maxsegs = 1; 1992 ctx.sc = sc; 1993 1994 error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, 1995 sc->bge_cdata.bge_rx_return_ring_map, 1996 sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), 1997 bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 1998 1999 if (error) 2000 return (ENOMEM); 2001 2002 sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; 2003 2004 /* Create tag for TX ring. */ 2005 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2006 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2007 NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, 2008 &sc->bge_cdata.bge_tx_ring_tag); 2009 2010 if (error) { 2011 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2012 return (ENOMEM); 2013 } 2014 2015 /* Allocate DMA'able memory for TX ring. */ 2016 error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, 2017 (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, 2018 &sc->bge_cdata.bge_tx_ring_map); 2019 if (error) 2020 return (ENOMEM); 2021 2022 bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); 2023 2024 /* Load the address of the TX ring. */ 2025 ctx.bge_maxsegs = 1; 2026 ctx.sc = sc; 2027 2028 error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, 2029 sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, 2030 BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2031 2032 if (error) 2033 return (ENOMEM); 2034 2035 sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; 2036 2037 /* Create tag for status block. */ 2038 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2039 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2040 NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, 2041 NULL, NULL, &sc->bge_cdata.bge_status_tag); 2042 2043 if (error) { 2044 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2045 return (ENOMEM); 2046 } 2047 2048 /* Allocate DMA'able memory for status block. */ 2049 error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, 2050 (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, 2051 &sc->bge_cdata.bge_status_map); 2052 if (error) 2053 return (ENOMEM); 2054 2055 bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); 2056 2057 /* Load the address of the status block. */ 2058 ctx.sc = sc; 2059 ctx.bge_maxsegs = 1; 2060 2061 error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, 2062 sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, 2063 BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2064 2065 if (error) 2066 return (ENOMEM); 2067 2068 sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; 2069 2070 /* Create tag for statistics block. */ 2071 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 2072 PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 2073 NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, 2074 &sc->bge_cdata.bge_stats_tag); 2075 2076 if (error) { 2077 device_printf(sc->bge_dev, "could not allocate dma tag\n"); 2078 return (ENOMEM); 2079 } 2080 2081 /* Allocate DMA'able memory for statistics block. */ 2082 error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, 2083 (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, 2084 &sc->bge_cdata.bge_stats_map); 2085 if (error) 2086 return (ENOMEM); 2087 2088 bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); 2089 2090 /* Load the address of the statstics block. */ 2091 ctx.sc = sc; 2092 ctx.bge_maxsegs = 1; 2093 2094 error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, 2095 sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, 2096 BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); 2097 2098 if (error) 2099 return (ENOMEM); 2100 2101 sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; 2102 2103 return (0); 2104 } 2105 2106 static int 2107 bge_attach(device_t dev) 2108 { 2109 struct ifnet *ifp; 2110 struct bge_softc *sc; 2111 uint32_t hwcfg = 0; 2112 uint32_t mac_tmp = 0; 2113 u_char eaddr[6]; 2114 int error = 0, rid; 2115 int trys; 2116 2117 sc = device_get_softc(dev); 2118 sc->bge_dev = dev; 2119 2120 /* 2121 * Map control/status registers. 2122 */ 2123 pci_enable_busmaster(dev); 2124 2125 rid = BGE_PCI_BAR0; 2126 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2127 RF_ACTIVE|PCI_RF_DENSE); 2128 2129 if (sc->bge_res == NULL) { 2130 device_printf (sc->bge_dev, "couldn't map memory\n"); 2131 error = ENXIO; 2132 goto fail; 2133 } 2134 2135 sc->bge_btag = rman_get_bustag(sc->bge_res); 2136 sc->bge_bhandle = rman_get_bushandle(sc->bge_res); 2137 2138 /* Allocate interrupt. */ 2139 rid = 0; 2140 2141 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2142 RF_SHAREABLE | RF_ACTIVE); 2143 2144 if (sc->bge_irq == NULL) { 2145 device_printf(sc->bge_dev, "couldn't map interrupt\n"); 2146 error = ENXIO; 2147 goto fail; 2148 } 2149 2150 BGE_LOCK_INIT(sc, device_get_nameunit(dev)); 2151 2152 /* Save ASIC rev. */ 2153 2154 sc->bge_chipid = 2155 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & 2156 BGE_PCIMISCCTL_ASICREV; 2157 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); 2158 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); 2159 2160 /* 2161 * XXX: Broadcom Linux driver. Not in specs or eratta. 2162 * PCI-Express? 2163 */ 2164 if (BGE_IS_5705_OR_BEYOND(sc)) { 2165 uint32_t v; 2166 2167 v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); 2168 if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { 2169 v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); 2170 if ((v & 0xff) == BGE_PCIE_CAPID) 2171 sc->bge_flags |= BGE_FLAG_PCIE; 2172 } 2173 } 2174 2175 /* 2176 * PCI-X ? 2177 */ 2178 if ((pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & 2179 BGE_PCISTATE_PCI_BUSMODE) == 0) 2180 sc->bge_flags |= BGE_FLAG_PCIX; 2181 2182 /* Try to reset the chip. */ 2183 if (bge_reset(sc)) { 2184 device_printf(sc->bge_dev, "chip reset failed\n"); 2185 bge_release_resources(sc); 2186 error = ENXIO; 2187 goto fail; 2188 } 2189 2190 sc->bge_asf_mode = 0; 2191 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) 2192 == BGE_MAGIC_NUMBER)) { 2193 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) 2194 & BGE_HWCFG_ASF) { 2195 sc->bge_asf_mode |= ASF_ENABLE; 2196 sc->bge_asf_mode |= ASF_STACKUP; 2197 if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { 2198 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; 2199 } 2200 } 2201 } 2202 2203 /* Try to reset the chip again the nice way. */ 2204 bge_stop_fw(sc); 2205 bge_sig_pre_reset(sc, BGE_RESET_STOP); 2206 if (bge_reset(sc)) { 2207 device_printf(sc->bge_dev, "chip reset failed\n"); 2208 bge_release_resources(sc); 2209 error = ENXIO; 2210 goto fail; 2211 } 2212 2213 bge_sig_legacy(sc, BGE_RESET_STOP); 2214 bge_sig_post_reset(sc, BGE_RESET_STOP); 2215 2216 if (bge_chipinit(sc)) { 2217 device_printf(sc->bge_dev, "chip initialization failed\n"); 2218 bge_release_resources(sc); 2219 error = ENXIO; 2220 goto fail; 2221 } 2222 2223 /* 2224 * Get station address from the EEPROM. 2225 */ 2226 mac_tmp = bge_readmem_ind(sc, 0x0c14); 2227 if ((mac_tmp >> 16) == 0x484b) { 2228 eaddr[0] = (u_char)(mac_tmp >> 8); 2229 eaddr[1] = (u_char)mac_tmp; 2230 mac_tmp = bge_readmem_ind(sc, 0x0c18); 2231 eaddr[2] = (u_char)(mac_tmp >> 24); 2232 eaddr[3] = (u_char)(mac_tmp >> 16); 2233 eaddr[4] = (u_char)(mac_tmp >> 8); 2234 eaddr[5] = (u_char)mac_tmp; 2235 } else if (bge_read_eeprom(sc, eaddr, 2236 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2237 device_printf(sc->bge_dev, "failed to read station address\n"); 2238 bge_release_resources(sc); 2239 error = ENXIO; 2240 goto fail; 2241 } 2242 2243 /* 5705 limits RX return ring to 512 entries. */ 2244 if (BGE_IS_5705_OR_BEYOND(sc)) 2245 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; 2246 else 2247 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; 2248 2249 if (bge_dma_alloc(dev)) { 2250 device_printf(sc->bge_dev, 2251 "failed to allocate DMA resources\n"); 2252 bge_release_resources(sc); 2253 error = ENXIO; 2254 goto fail; 2255 } 2256 2257 /* Set default tuneable values. */ 2258 sc->bge_stat_ticks = BGE_TICKS_PER_SEC; 2259 sc->bge_rx_coal_ticks = 150; 2260 sc->bge_tx_coal_ticks = 150; 2261 sc->bge_rx_max_coal_bds = 64; 2262 sc->bge_tx_max_coal_bds = 128; 2263 2264 /* Set up ifnet structure */ 2265 ifp = sc->bge_ifp = if_alloc(IFT_ETHER); 2266 if (ifp == NULL) { 2267 device_printf(sc->bge_dev, "failed to if_alloc()\n"); 2268 bge_release_resources(sc); 2269 error = ENXIO; 2270 goto fail; 2271 } 2272 ifp->if_softc = sc; 2273 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2275 ifp->if_ioctl = bge_ioctl; 2276 ifp->if_start = bge_start; 2277 ifp->if_watchdog = bge_watchdog; 2278 ifp->if_init = bge_init; 2279 ifp->if_mtu = ETHERMTU; 2280 ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; 2281 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 2282 IFQ_SET_READY(&ifp->if_snd); 2283 ifp->if_hwassist = BGE_CSUM_FEATURES; 2284 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | 2285 IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; 2286 ifp->if_capenable = ifp->if_capabilities; 2287 #ifdef DEVICE_POLLING 2288 ifp->if_capabilities |= IFCAP_POLLING; 2289 #endif 2290 2291 /* 2292 * 5700 B0 chips do not support checksumming correctly due 2293 * to hardware bugs. 2294 */ 2295 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { 2296 ifp->if_capabilities &= ~IFCAP_HWCSUM; 2297 ifp->if_capenable &= IFCAP_HWCSUM; 2298 ifp->if_hwassist = 0; 2299 } 2300 2301 /* 2302 * Figure out what sort of media we have by checking the 2303 * hardware config word in the first 32k of NIC internal memory, 2304 * or fall back to examining the EEPROM if necessary. 2305 * Note: on some BCM5700 cards, this value appears to be unset. 2306 * If that's the case, we have to rely on identifying the NIC 2307 * by its PCI subsystem ID, as we do below for the SysKonnect 2308 * SK-9D41. 2309 */ 2310 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) 2311 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); 2312 else { 2313 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, 2314 sizeof(hwcfg))) { 2315 device_printf(sc->bge_dev, "failed to read EEPROM\n"); 2316 bge_release_resources(sc); 2317 error = ENXIO; 2318 goto fail; 2319 } 2320 hwcfg = ntohl(hwcfg); 2321 } 2322 2323 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) 2324 sc->bge_flags |= BGE_FLAG_TBI; 2325 2326 /* The SysKonnect SK-9D41 is a 1000baseSX card. */ 2327 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) 2328 sc->bge_flags |= BGE_FLAG_TBI; 2329 2330 if (sc->bge_flags & BGE_FLAG_TBI) { 2331 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, 2332 bge_ifmedia_upd, bge_ifmedia_sts); 2333 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2334 ifmedia_add(&sc->bge_ifmedia, 2335 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2336 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2337 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); 2338 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; 2339 } else { 2340 /* 2341 * Do transceiver setup and tell the firmware the 2342 * driver is down so we can try to get access the 2343 * probe if ASF is running. Retry a couple of times 2344 * if we get a conflict with the ASF firmware accessing 2345 * the PHY. 2346 */ 2347 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2348 again: 2349 bge_asf_driver_up(sc); 2350 2351 trys = 0; 2352 if (mii_phy_probe(dev, &sc->bge_miibus, 2353 bge_ifmedia_upd, bge_ifmedia_sts)) { 2354 if (trys++ < 4) { 2355 device_printf(sc->bge_dev, "Try again\n"); 2356 bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET); 2357 goto again; 2358 } 2359 2360 device_printf(sc->bge_dev, "MII without any PHY!\n"); 2361 bge_release_resources(sc); 2362 error = ENXIO; 2363 goto fail; 2364 } 2365 2366 /* 2367 * Now tell the firmware we are going up after probing the PHY 2368 */ 2369 if (sc->bge_asf_mode & ASF_STACKUP) 2370 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2371 } 2372 2373 /* 2374 * When using the BCM5701 in PCI-X mode, data corruption has 2375 * been observed in the first few bytes of some received packets. 2376 * Aligning the packet buffer in memory eliminates the corruption. 2377 * Unfortunately, this misaligns the packet payloads. On platforms 2378 * which do not support unaligned accesses, we will realign the 2379 * payloads by copying the received packets. 2380 */ 2381 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && 2382 sc->bge_flags & BGE_FLAG_PCIX) 2383 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; 2384 2385 /* 2386 * Call MI attach routine. 2387 */ 2388 ether_ifattach(ifp, eaddr); 2389 callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE); 2390 2391 /* 2392 * Hookup IRQ last. 2393 */ 2394 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, 2395 bge_intr, sc, &sc->bge_intrhand); 2396 2397 if (error) { 2398 bge_detach(dev); 2399 device_printf(sc->bge_dev, "couldn't set up irq\n"); 2400 } 2401 2402 fail: 2403 return (error); 2404 } 2405 2406 static int 2407 bge_detach(device_t dev) 2408 { 2409 struct bge_softc *sc; 2410 struct ifnet *ifp; 2411 2412 sc = device_get_softc(dev); 2413 ifp = sc->bge_ifp; 2414 2415 #ifdef DEVICE_POLLING 2416 if (ifp->if_capenable & IFCAP_POLLING) 2417 ether_poll_deregister(ifp); 2418 #endif 2419 2420 BGE_LOCK(sc); 2421 bge_stop(sc); 2422 bge_reset(sc); 2423 BGE_UNLOCK(sc); 2424 2425 ether_ifdetach(ifp); 2426 2427 if (sc->bge_flags & BGE_FLAG_TBI) { 2428 ifmedia_removeall(&sc->bge_ifmedia); 2429 } else { 2430 bus_generic_detach(dev); 2431 device_delete_child(dev, sc->bge_miibus); 2432 } 2433 2434 bge_release_resources(sc); 2435 2436 return (0); 2437 } 2438 2439 static void 2440 bge_release_resources(struct bge_softc *sc) 2441 { 2442 device_t dev; 2443 2444 dev = sc->bge_dev; 2445 2446 if (sc->bge_vpd_prodname != NULL) 2447 free(sc->bge_vpd_prodname, M_DEVBUF); 2448 2449 if (sc->bge_vpd_readonly != NULL) 2450 free(sc->bge_vpd_readonly, M_DEVBUF); 2451 2452 if (sc->bge_intrhand != NULL) 2453 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); 2454 2455 if (sc->bge_irq != NULL) 2456 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); 2457 2458 if (sc->bge_res != NULL) 2459 bus_release_resource(dev, SYS_RES_MEMORY, 2460 BGE_PCI_BAR0, sc->bge_res); 2461 2462 if (sc->bge_ifp != NULL) 2463 if_free(sc->bge_ifp); 2464 2465 bge_dma_free(sc); 2466 2467 if (mtx_initialized(&sc->bge_mtx)) /* XXX */ 2468 BGE_LOCK_DESTROY(sc); 2469 } 2470 2471 static int 2472 bge_reset(struct bge_softc *sc) 2473 { 2474 device_t dev; 2475 uint32_t cachesize, command, pcistate, reset; 2476 int i, val = 0; 2477 2478 dev = sc->bge_dev; 2479 2480 /* Save some important PCI state. */ 2481 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); 2482 command = pci_read_config(dev, BGE_PCI_CMD, 4); 2483 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); 2484 2485 pci_write_config(dev, BGE_PCI_MISC_CTL, 2486 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2487 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2488 2489 reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); 2490 2491 /* XXX: Broadcom Linux driver. */ 2492 if (sc->bge_flags & BGE_FLAG_PCIE) { 2493 if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ 2494 CSR_WRITE_4(sc, 0x7e2c, 0x20); 2495 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2496 /* Prevent PCIE link training during global reset */ 2497 CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); 2498 reset |= (1<<29); 2499 } 2500 } 2501 2502 /* 2503 * Write the magic number to the firmware mailbox at 0xb50 2504 * so that the driver can synchronize with the firmware. 2505 */ 2506 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); 2507 2508 /* Issue global reset */ 2509 bge_writereg_ind(sc, BGE_MISC_CFG, reset); 2510 2511 DELAY(1000); 2512 2513 /* XXX: Broadcom Linux driver. */ 2514 if (sc->bge_flags & BGE_FLAG_PCIE) { 2515 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { 2516 uint32_t v; 2517 2518 DELAY(500000); /* wait for link training to complete */ 2519 v = pci_read_config(dev, 0xc4, 4); 2520 pci_write_config(dev, 0xc4, v | (1<<15), 4); 2521 } 2522 /* Set PCIE max payload size and clear error status. */ 2523 pci_write_config(dev, 0xd8, 0xf5000, 4); 2524 } 2525 2526 /* Reset some of the PCI state that got zapped by reset. */ 2527 pci_write_config(dev, BGE_PCI_MISC_CTL, 2528 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| 2529 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW, 4); 2530 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); 2531 pci_write_config(dev, BGE_PCI_CMD, command, 4); 2532 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); 2533 2534 /* Enable memory arbiter. */ 2535 if (BGE_IS_5714_FAMILY(sc)) { 2536 uint32_t val; 2537 2538 val = CSR_READ_4(sc, BGE_MARB_MODE); 2539 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); 2540 } else 2541 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 2542 2543 /* 2544 * Poll the value location we just wrote until 2545 * we see the 1's complement of the magic number. 2546 * This indicates that the firmware initialization 2547 * is complete. 2548 */ 2549 for (i = 0; i < BGE_TIMEOUT; i++) { 2550 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); 2551 if (val == ~BGE_MAGIC_NUMBER) 2552 break; 2553 DELAY(10); 2554 } 2555 2556 if (i == BGE_TIMEOUT) { 2557 device_printf(sc->bge_dev, "firmware handshake timed out\n"); 2558 return(0); 2559 } 2560 2561 /* 2562 * XXX Wait for the value of the PCISTATE register to 2563 * return to its original pre-reset state. This is a 2564 * fairly good indicator of reset completion. If we don't 2565 * wait for the reset to fully complete, trying to read 2566 * from the device's non-PCI registers may yield garbage 2567 * results. 2568 */ 2569 for (i = 0; i < BGE_TIMEOUT; i++) { 2570 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) 2571 break; 2572 DELAY(10); 2573 } 2574 2575 /* Fix up byte swapping. */ 2576 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS| 2577 BGE_MODECTL_BYTESWAP_DATA); 2578 2579 /* Tell the ASF firmware we are up */ 2580 if (sc->bge_asf_mode & ASF_STACKUP) 2581 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 2582 2583 CSR_WRITE_4(sc, BGE_MAC_MODE, 0); 2584 2585 /* 2586 * The 5704 in TBI mode apparently needs some special 2587 * adjustment to insure the SERDES drive level is set 2588 * to 1.2V. 2589 */ 2590 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && 2591 sc->bge_flags & BGE_FLAG_TBI) { 2592 uint32_t serdescfg; 2593 2594 serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); 2595 serdescfg = (serdescfg & ~0xFFF) | 0x880; 2596 CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); 2597 } 2598 2599 /* XXX: Broadcom Linux driver. */ 2600 if (sc->bge_flags & BGE_FLAG_PCIE && 2601 sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { 2602 uint32_t v; 2603 2604 v = CSR_READ_4(sc, 0x7c00); 2605 CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); 2606 } 2607 DELAY(10000); 2608 2609 return(0); 2610 } 2611 2612 /* 2613 * Frame reception handling. This is called if there's a frame 2614 * on the receive return list. 2615 * 2616 * Note: we have to be able to handle two possibilities here: 2617 * 1) the frame is from the jumbo receive ring 2618 * 2) the frame is from the standard receive ring 2619 */ 2620 2621 static void 2622 bge_rxeof(struct bge_softc *sc) 2623 { 2624 struct ifnet *ifp; 2625 int stdcnt = 0, jumbocnt = 0; 2626 2627 BGE_LOCK_ASSERT(sc); 2628 2629 /* Nothing to do. */ 2630 if (sc->bge_rx_saved_considx == 2631 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) 2632 return; 2633 2634 ifp = sc->bge_ifp; 2635 2636 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, 2637 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); 2638 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2639 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); 2640 if (BGE_IS_JUMBO_CAPABLE(sc)) 2641 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2642 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); 2643 2644 while(sc->bge_rx_saved_considx != 2645 sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { 2646 struct bge_rx_bd *cur_rx; 2647 uint32_t rxidx; 2648 struct mbuf *m = NULL; 2649 uint16_t vlan_tag = 0; 2650 int have_tag = 0; 2651 2652 #ifdef DEVICE_POLLING 2653 if (ifp->if_capenable & IFCAP_POLLING) { 2654 if (sc->rxcycles <= 0) 2655 break; 2656 sc->rxcycles--; 2657 } 2658 #endif 2659 2660 cur_rx = 2661 &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; 2662 2663 rxidx = cur_rx->bge_idx; 2664 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); 2665 2666 if (!(ifp->if_flags & IFF_PROMISC) && 2667 (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)) { 2668 have_tag = 1; 2669 vlan_tag = cur_rx->bge_vlan_tag; 2670 } 2671 2672 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { 2673 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); 2674 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, 2675 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], 2676 BUS_DMASYNC_POSTREAD); 2677 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, 2678 sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); 2679 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; 2680 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; 2681 jumbocnt++; 2682 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2683 ifp->if_ierrors++; 2684 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2685 continue; 2686 } 2687 if (bge_newbuf_jumbo(sc, 2688 sc->bge_jumbo, NULL) == ENOBUFS) { 2689 ifp->if_ierrors++; 2690 bge_newbuf_jumbo(sc, sc->bge_jumbo, m); 2691 continue; 2692 } 2693 } else { 2694 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); 2695 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2696 sc->bge_cdata.bge_rx_std_dmamap[rxidx], 2697 BUS_DMASYNC_POSTREAD); 2698 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2699 sc->bge_cdata.bge_rx_std_dmamap[rxidx]); 2700 m = sc->bge_cdata.bge_rx_std_chain[rxidx]; 2701 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; 2702 stdcnt++; 2703 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { 2704 ifp->if_ierrors++; 2705 bge_newbuf_std(sc, sc->bge_std, m); 2706 continue; 2707 } 2708 if (bge_newbuf_std(sc, sc->bge_std, 2709 NULL) == ENOBUFS) { 2710 ifp->if_ierrors++; 2711 bge_newbuf_std(sc, sc->bge_std, m); 2712 continue; 2713 } 2714 } 2715 2716 ifp->if_ipackets++; 2717 #ifndef __NO_STRICT_ALIGNMENT 2718 /* 2719 * For architectures with strict alignment we must make sure 2720 * the payload is aligned. 2721 */ 2722 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { 2723 bcopy(m->m_data, m->m_data + ETHER_ALIGN, 2724 cur_rx->bge_len); 2725 m->m_data += ETHER_ALIGN; 2726 } 2727 #endif 2728 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; 2729 m->m_pkthdr.rcvif = ifp; 2730 2731 if (ifp->if_capenable & IFCAP_RXCSUM) { 2732 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { 2733 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2734 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) 2735 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2736 } 2737 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && 2738 m->m_pkthdr.len >= ETHER_MIN_NOPAD) { 2739 m->m_pkthdr.csum_data = 2740 cur_rx->bge_tcp_udp_csum; 2741 m->m_pkthdr.csum_flags |= 2742 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2743 } 2744 } 2745 2746 /* 2747 * If we received a packet with a vlan tag, 2748 * attach that information to the packet. 2749 */ 2750 if (have_tag) { 2751 m->m_pkthdr.ether_vtag = vlan_tag; 2752 m->m_flags |= M_VLANTAG; 2753 } 2754 2755 BGE_UNLOCK(sc); 2756 (*ifp->if_input)(ifp, m); 2757 BGE_LOCK(sc); 2758 } 2759 2760 if (stdcnt > 0) 2761 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, 2762 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); 2763 2764 if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) 2765 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, 2766 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); 2767 2768 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); 2769 if (stdcnt) 2770 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); 2771 if (jumbocnt) 2772 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); 2773 } 2774 2775 static void 2776 bge_txeof(struct bge_softc *sc) 2777 { 2778 struct bge_tx_bd *cur_tx = NULL; 2779 struct ifnet *ifp; 2780 2781 BGE_LOCK_ASSERT(sc); 2782 2783 /* Nothing to do. */ 2784 if (sc->bge_tx_saved_considx == 2785 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) 2786 return; 2787 2788 ifp = sc->bge_ifp; 2789 2790 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, 2791 sc->bge_cdata.bge_tx_ring_map, 2792 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2793 /* 2794 * Go through our tx ring and free mbufs for those 2795 * frames that have been sent. 2796 */ 2797 while (sc->bge_tx_saved_considx != 2798 sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { 2799 uint32_t idx = 0; 2800 2801 idx = sc->bge_tx_saved_considx; 2802 cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; 2803 if (cur_tx->bge_flags & BGE_TXBDFLAG_END) 2804 ifp->if_opackets++; 2805 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { 2806 bus_dmamap_sync(sc->bge_cdata.bge_mtag, 2807 sc->bge_cdata.bge_tx_dmamap[idx], 2808 BUS_DMASYNC_POSTWRITE); 2809 bus_dmamap_unload(sc->bge_cdata.bge_mtag, 2810 sc->bge_cdata.bge_tx_dmamap[idx]); 2811 m_freem(sc->bge_cdata.bge_tx_chain[idx]); 2812 sc->bge_cdata.bge_tx_chain[idx] = NULL; 2813 } 2814 sc->bge_txcnt--; 2815 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); 2816 ifp->if_timer = 0; 2817 } 2818 2819 if (cur_tx != NULL) 2820 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2821 } 2822 2823 #ifdef DEVICE_POLLING 2824 static void 2825 bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2826 { 2827 struct bge_softc *sc = ifp->if_softc; 2828 uint32_t statusword; 2829 2830 BGE_LOCK(sc); 2831 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2832 BGE_UNLOCK(sc); 2833 return; 2834 } 2835 2836 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2837 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2838 2839 statusword = atomic_readandclear_32( 2840 &sc->bge_ldata.bge_status_block->bge_status); 2841 2842 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2843 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2844 2845 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS cmd */ 2846 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) 2847 sc->bge_link_evt++; 2848 2849 if (cmd == POLL_AND_CHECK_STATUS) 2850 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2851 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 2852 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) 2853 bge_link_upd(sc); 2854 2855 sc->rxcycles = count; 2856 bge_rxeof(sc); 2857 bge_txeof(sc); 2858 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2859 bge_start_locked(ifp); 2860 2861 BGE_UNLOCK(sc); 2862 } 2863 #endif /* DEVICE_POLLING */ 2864 2865 static void 2866 bge_intr(void *xsc) 2867 { 2868 struct bge_softc *sc; 2869 struct ifnet *ifp; 2870 uint32_t statusword; 2871 2872 sc = xsc; 2873 2874 BGE_LOCK(sc); 2875 2876 ifp = sc->bge_ifp; 2877 2878 #ifdef DEVICE_POLLING 2879 if (ifp->if_capenable & IFCAP_POLLING) { 2880 BGE_UNLOCK(sc); 2881 return; 2882 } 2883 #endif 2884 2885 /* 2886 * Do the mandatory PCI flush as well as get the link status. 2887 */ 2888 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; 2889 2890 /* Ack interrupt and stop others from occuring. */ 2891 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 2892 2893 /* Make sure the descriptor ring indexes are coherent. */ 2894 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2895 sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); 2896 bus_dmamap_sync(sc->bge_cdata.bge_status_tag, 2897 sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); 2898 2899 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && 2900 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || 2901 statusword || sc->bge_link_evt) 2902 bge_link_upd(sc); 2903 2904 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2905 /* Check RX return ring producer/consumer. */ 2906 bge_rxeof(sc); 2907 2908 /* Check TX ring producer/consumer. */ 2909 bge_txeof(sc); 2910 } 2911 2912 /* Re-enable interrupts. */ 2913 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 2914 2915 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2916 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2917 bge_start_locked(ifp); 2918 2919 BGE_UNLOCK(sc); 2920 } 2921 2922 static void 2923 bge_asf_driver_up(struct bge_softc *sc) 2924 { 2925 if (sc->bge_asf_mode & ASF_STACKUP) { 2926 /* Send ASF heartbeat aprox. every 2s */ 2927 if (sc->bge_asf_count) 2928 sc->bge_asf_count --; 2929 else { 2930 sc->bge_asf_count = 5; 2931 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, 2932 BGE_FW_DRV_ALIVE); 2933 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); 2934 bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); 2935 CSR_WRITE_4(sc, BGE_CPU_EVENT, 2936 CSR_READ_4(sc, BGE_CPU_EVENT) != (1 << 14)); 2937 } 2938 } 2939 } 2940 2941 static void 2942 bge_tick_locked(struct bge_softc *sc) 2943 { 2944 struct mii_data *mii = NULL; 2945 2946 BGE_LOCK_ASSERT(sc); 2947 2948 if (BGE_IS_5705_OR_BEYOND(sc)) 2949 bge_stats_update_regs(sc); 2950 else 2951 bge_stats_update(sc); 2952 2953 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 2954 mii = device_get_softc(sc->bge_miibus); 2955 /* Don't mess with the PHY in IPMI/ASF mode */ 2956 if (!((sc->bge_asf_mode & ASF_STACKUP) && (sc->bge_link))) 2957 mii_tick(mii); 2958 } else { 2959 /* 2960 * Since in TBI mode auto-polling can't be used we should poll 2961 * link status manually. Here we register pending link event 2962 * and trigger interrupt. 2963 */ 2964 #ifdef DEVICE_POLLING 2965 /* In polling mode we poll link state in bge_poll(). */ 2966 if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) 2967 #endif 2968 { 2969 sc->bge_link_evt++; 2970 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); 2971 } 2972 } 2973 2974 bge_asf_driver_up(sc); 2975 2976 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 2977 } 2978 2979 static void 2980 bge_tick(void *xsc) 2981 { 2982 struct bge_softc *sc; 2983 2984 sc = xsc; 2985 2986 BGE_LOCK(sc); 2987 bge_tick_locked(sc); 2988 BGE_UNLOCK(sc); 2989 } 2990 2991 static void 2992 bge_stats_update_regs(struct bge_softc *sc) 2993 { 2994 struct bge_mac_stats_regs stats; 2995 struct ifnet *ifp; 2996 uint32_t *s; 2997 u_long cnt; /* current register value */ 2998 int i; 2999 3000 ifp = sc->bge_ifp; 3001 3002 s = (uint32_t *)&stats; 3003 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { 3004 *s = CSR_READ_4(sc, BGE_RX_STATS + i); 3005 s++; 3006 } 3007 3008 cnt = stats.dot3StatsSingleCollisionFrames + 3009 stats.dot3StatsMultipleCollisionFrames + 3010 stats.dot3StatsExcessiveCollisions + 3011 stats.dot3StatsLateCollisions; 3012 ifp->if_collisions += cnt >= sc->bge_tx_collisions ? 3013 cnt - sc->bge_tx_collisions : cnt; 3014 sc->bge_tx_collisions = cnt; 3015 } 3016 3017 static void 3018 bge_stats_update(struct bge_softc *sc) 3019 { 3020 struct ifnet *ifp; 3021 bus_size_t stats; 3022 u_long cnt; /* current register value */ 3023 3024 ifp = sc->bge_ifp; 3025 3026 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; 3027 3028 #define READ_STAT(sc, stats, stat) \ 3029 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) 3030 3031 cnt = READ_STAT(sc, stats, 3032 txstats.dot3StatsSingleCollisionFrames.bge_addr_lo); 3033 cnt += READ_STAT(sc, stats, 3034 txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo); 3035 cnt += READ_STAT(sc, stats, 3036 txstats.dot3StatsExcessiveCollisions.bge_addr_lo); 3037 cnt += READ_STAT(sc, stats, 3038 txstats.dot3StatsLateCollisions.bge_addr_lo); 3039 ifp->if_collisions += cnt >= sc->bge_tx_collisions ? 3040 cnt - sc->bge_tx_collisions : cnt; 3041 sc->bge_tx_collisions = cnt; 3042 3043 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); 3044 ifp->if_ierrors += cnt >= sc->bge_rx_discards ? 3045 cnt - sc->bge_rx_discards : cnt; 3046 sc->bge_rx_discards = cnt; 3047 3048 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); 3049 ifp->if_oerrors += cnt >= sc->bge_tx_discards ? 3050 cnt - sc->bge_tx_discards : cnt; 3051 sc->bge_tx_discards = cnt; 3052 3053 #undef READ_STAT 3054 } 3055 3056 /* 3057 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. 3058 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, 3059 * but when such padded frames employ the bge IP/TCP checksum offload, 3060 * the hardware checksum assist gives incorrect results (possibly 3061 * from incorporating its own padding into the UDP/TCP checksum; who knows). 3062 * If we pad such runts with zeros, the onboard checksum comes out correct. 3063 */ 3064 static __inline int 3065 bge_cksum_pad(struct mbuf *m) 3066 { 3067 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; 3068 struct mbuf *last; 3069 3070 /* If there's only the packet-header and we can pad there, use it. */ 3071 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && 3072 M_TRAILINGSPACE(m) >= padlen) { 3073 last = m; 3074 } else { 3075 /* 3076 * Walk packet chain to find last mbuf. We will either 3077 * pad there, or append a new mbuf and pad it. 3078 */ 3079 for (last = m; last->m_next != NULL; last = last->m_next); 3080 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { 3081 /* Allocate new empty mbuf, pad it. Compact later. */ 3082 struct mbuf *n; 3083 3084 MGET(n, M_DONTWAIT, MT_DATA); 3085 if (n == NULL) 3086 return (ENOBUFS); 3087 n->m_len = 0; 3088 last->m_next = n; 3089 last = n; 3090 } 3091 } 3092 3093 /* Now zero the pad area, to avoid the bge cksum-assist bug. */ 3094 memset(mtod(last, caddr_t) + last->m_len, 0, padlen); 3095 last->m_len += padlen; 3096 m->m_pkthdr.len += padlen; 3097 3098 return (0); 3099 } 3100 3101 /* 3102 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 3103 * pointers to descriptors. 3104 */ 3105 static int 3106 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) 3107 { 3108 bus_dma_segment_t segs[BGE_NSEG_NEW]; 3109 bus_dmamap_t map; 3110 struct bge_tx_bd *d; 3111 struct mbuf *m = *m_head; 3112 uint32_t idx = *txidx; 3113 uint16_t csum_flags; 3114 int nsegs, i, error; 3115 3116 csum_flags = 0; 3117 if (m->m_pkthdr.csum_flags) { 3118 if (m->m_pkthdr.csum_flags & CSUM_IP) 3119 csum_flags |= BGE_TXBDFLAG_IP_CSUM; 3120 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { 3121 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; 3122 if (m->m_pkthdr.len < ETHER_MIN_NOPAD && 3123 (error = bge_cksum_pad(m)) != 0) { 3124 m_freem(m); 3125 *m_head = NULL; 3126 return (error); 3127 } 3128 } 3129 if (m->m_flags & M_LASTFRAG) 3130 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; 3131 else if (m->m_flags & M_FRAG) 3132 csum_flags |= BGE_TXBDFLAG_IP_FRAG; 3133 } 3134 3135 map = sc->bge_cdata.bge_tx_dmamap[idx]; 3136 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs, 3137 &nsegs, BUS_DMA_NOWAIT); 3138 if (error == EFBIG) { 3139 m = m_defrag(m, M_DONTWAIT); 3140 if (m == NULL) { 3141 m_freem(*m_head); 3142 *m_head = NULL; 3143 return (ENOBUFS); 3144 } 3145 *m_head = m; 3146 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, 3147 segs, &nsegs, BUS_DMA_NOWAIT); 3148 if (error) { 3149 m_freem(m); 3150 *m_head = NULL; 3151 return (error); 3152 } 3153 } else if (error != 0) 3154 return (error); 3155 3156 /* 3157 * Sanity check: avoid coming within 16 descriptors 3158 * of the end of the ring. 3159 */ 3160 if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { 3161 bus_dmamap_unload(sc->bge_cdata.bge_mtag, map); 3162 return (ENOBUFS); 3163 } 3164 3165 bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE); 3166 3167 for (i = 0; ; i++) { 3168 d = &sc->bge_ldata.bge_tx_ring[idx]; 3169 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); 3170 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); 3171 d->bge_len = segs[i].ds_len; 3172 d->bge_flags = csum_flags; 3173 if (i == nsegs - 1) 3174 break; 3175 BGE_INC(idx, BGE_TX_RING_CNT); 3176 } 3177 3178 /* Mark the last segment as end of packet... */ 3179 d->bge_flags |= BGE_TXBDFLAG_END; 3180 3181 /* ... and put VLAN tag into first segment. */ 3182 d = &sc->bge_ldata.bge_tx_ring[*txidx]; 3183 if (m->m_flags & M_VLANTAG) { 3184 d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; 3185 d->bge_vlan_tag = m->m_pkthdr.ether_vtag; 3186 } else 3187 d->bge_vlan_tag = 0; 3188 3189 /* 3190 * Insure that the map for this transmission 3191 * is placed at the array index of the last descriptor 3192 * in this chain. 3193 */ 3194 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; 3195 sc->bge_cdata.bge_tx_dmamap[idx] = map; 3196 sc->bge_cdata.bge_tx_chain[idx] = m; 3197 sc->bge_txcnt += nsegs; 3198 3199 BGE_INC(idx, BGE_TX_RING_CNT); 3200 *txidx = idx; 3201 3202 return (0); 3203 } 3204 3205 /* 3206 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3207 * to the mbuf data regions directly in the transmit descriptors. 3208 */ 3209 static void 3210 bge_start_locked(struct ifnet *ifp) 3211 { 3212 struct bge_softc *sc; 3213 struct mbuf *m_head = NULL; 3214 uint32_t prodidx; 3215 int count = 0; 3216 3217 sc = ifp->if_softc; 3218 3219 if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3220 return; 3221 3222 prodidx = sc->bge_tx_prodidx; 3223 3224 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { 3225 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 3226 if (m_head == NULL) 3227 break; 3228 3229 /* 3230 * XXX 3231 * The code inside the if() block is never reached since we 3232 * must mark CSUM_IP_FRAGS in our if_hwassist to start getting 3233 * requests to checksum TCP/UDP in a fragmented packet. 3234 * 3235 * XXX 3236 * safety overkill. If this is a fragmented packet chain 3237 * with delayed TCP/UDP checksums, then only encapsulate 3238 * it if we have enough descriptors to handle the entire 3239 * chain at once. 3240 * (paranoia -- may not actually be needed) 3241 */ 3242 if (m_head->m_flags & M_FIRSTFRAG && 3243 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3244 if ((BGE_TX_RING_CNT - sc->bge_txcnt) < 3245 m_head->m_pkthdr.csum_data + 16) { 3246 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3247 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3248 break; 3249 } 3250 } 3251 3252 /* 3253 * Pack the data into the transmit ring. If we 3254 * don't have room, set the OACTIVE flag and wait 3255 * for the NIC to drain the ring. 3256 */ 3257 if (bge_encap(sc, &m_head, &prodidx)) { 3258 if (m_head == NULL) 3259 break; 3260 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 3261 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3262 break; 3263 } 3264 ++count; 3265 3266 /* 3267 * If there's a BPF listener, bounce a copy of this frame 3268 * to him. 3269 */ 3270 BPF_MTAP(ifp, m_head); 3271 } 3272 3273 if (count == 0) 3274 /* No packets were dequeued. */ 3275 return; 3276 3277 /* Transmit. */ 3278 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3279 /* 5700 b2 errata */ 3280 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) 3281 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); 3282 3283 sc->bge_tx_prodidx = prodidx; 3284 3285 /* 3286 * Set a timeout in case the chip goes out to lunch. 3287 */ 3288 ifp->if_timer = 5; 3289 } 3290 3291 /* 3292 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3293 * to the mbuf data regions directly in the transmit descriptors. 3294 */ 3295 static void 3296 bge_start(struct ifnet *ifp) 3297 { 3298 struct bge_softc *sc; 3299 3300 sc = ifp->if_softc; 3301 BGE_LOCK(sc); 3302 bge_start_locked(ifp); 3303 BGE_UNLOCK(sc); 3304 } 3305 3306 static void 3307 bge_init_locked(struct bge_softc *sc) 3308 { 3309 struct ifnet *ifp; 3310 uint16_t *m; 3311 3312 BGE_LOCK_ASSERT(sc); 3313 3314 ifp = sc->bge_ifp; 3315 3316 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3317 return; 3318 3319 /* Cancel pending I/O and flush buffers. */ 3320 bge_stop(sc); 3321 3322 bge_stop_fw(sc); 3323 bge_sig_pre_reset(sc, BGE_RESET_START); 3324 bge_reset(sc); 3325 bge_sig_legacy(sc, BGE_RESET_START); 3326 bge_sig_post_reset(sc, BGE_RESET_START); 3327 3328 bge_chipinit(sc); 3329 3330 /* 3331 * Init the various state machines, ring 3332 * control blocks and firmware. 3333 */ 3334 if (bge_blockinit(sc)) { 3335 device_printf(sc->bge_dev, "initialization failure\n"); 3336 return; 3337 } 3338 3339 ifp = sc->bge_ifp; 3340 3341 /* Specify MTU. */ 3342 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + 3343 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3344 3345 /* Load our MAC address. */ 3346 m = (uint16_t *)IF_LLADDR(sc->bge_ifp); 3347 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); 3348 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); 3349 3350 /* Program promiscuous mode. */ 3351 bge_setpromisc(sc); 3352 3353 /* Program multicast filter. */ 3354 bge_setmulti(sc); 3355 3356 /* Init RX ring. */ 3357 bge_init_rx_ring_std(sc); 3358 3359 /* 3360 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's 3361 * memory to insure that the chip has in fact read the first 3362 * entry of the ring. 3363 */ 3364 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { 3365 uint32_t v, i; 3366 for (i = 0; i < 10; i++) { 3367 DELAY(20); 3368 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); 3369 if (v == (MCLBYTES - ETHER_ALIGN)) 3370 break; 3371 } 3372 if (i == 10) 3373 device_printf (sc->bge_dev, 3374 "5705 A0 chip failed to load RX ring\n"); 3375 } 3376 3377 /* Init jumbo RX ring. */ 3378 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3379 bge_init_rx_ring_jumbo(sc); 3380 3381 /* Init our RX return ring index. */ 3382 sc->bge_rx_saved_considx = 0; 3383 3384 /* Init TX ring. */ 3385 bge_init_tx_ring(sc); 3386 3387 /* Turn on transmitter. */ 3388 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); 3389 3390 /* Turn on receiver. */ 3391 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3392 3393 /* Tell firmware we're alive. */ 3394 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3395 3396 #ifdef DEVICE_POLLING 3397 /* Disable interrupts if we are polling. */ 3398 if (ifp->if_capenable & IFCAP_POLLING) { 3399 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3400 BGE_PCIMISCCTL_MASK_PCI_INTR); 3401 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3402 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 3403 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 3404 } else 3405 #endif 3406 3407 /* Enable host interrupts. */ 3408 { 3409 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); 3410 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3411 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3412 } 3413 3414 bge_ifmedia_upd_locked(ifp); 3415 3416 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3417 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3418 3419 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); 3420 } 3421 3422 static void 3423 bge_init(void *xsc) 3424 { 3425 struct bge_softc *sc = xsc; 3426 3427 BGE_LOCK(sc); 3428 bge_init_locked(sc); 3429 BGE_UNLOCK(sc); 3430 } 3431 3432 /* 3433 * Set media options. 3434 */ 3435 static int 3436 bge_ifmedia_upd(struct ifnet *ifp) 3437 { 3438 struct bge_softc *sc = ifp->if_softc; 3439 int res; 3440 3441 BGE_LOCK(sc); 3442 res = bge_ifmedia_upd_locked(ifp); 3443 BGE_UNLOCK(sc); 3444 3445 return (res); 3446 } 3447 3448 static int 3449 bge_ifmedia_upd_locked(struct ifnet *ifp) 3450 { 3451 struct bge_softc *sc = ifp->if_softc; 3452 struct mii_data *mii; 3453 struct ifmedia *ifm; 3454 3455 BGE_LOCK_ASSERT(sc); 3456 3457 ifm = &sc->bge_ifmedia; 3458 3459 /* If this is a 1000baseX NIC, enable the TBI port. */ 3460 if (sc->bge_flags & BGE_FLAG_TBI) { 3461 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3462 return (EINVAL); 3463 switch(IFM_SUBTYPE(ifm->ifm_media)) { 3464 case IFM_AUTO: 3465 /* 3466 * The BCM5704 ASIC appears to have a special 3467 * mechanism for programming the autoneg 3468 * advertisement registers in TBI mode. 3469 */ 3470 if (bge_fake_autoneg == 0 && 3471 sc->bge_asicrev == BGE_ASICREV_BCM5704) { 3472 uint32_t sgdig; 3473 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); 3474 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); 3475 sgdig |= BGE_SGDIGCFG_AUTO| 3476 BGE_SGDIGCFG_PAUSE_CAP| 3477 BGE_SGDIGCFG_ASYM_PAUSE; 3478 CSR_WRITE_4(sc, BGE_SGDIG_CFG, 3479 sgdig|BGE_SGDIGCFG_SEND); 3480 DELAY(5); 3481 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); 3482 } 3483 break; 3484 case IFM_1000_SX: 3485 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3486 BGE_CLRBIT(sc, BGE_MAC_MODE, 3487 BGE_MACMODE_HALF_DUPLEX); 3488 } else { 3489 BGE_SETBIT(sc, BGE_MAC_MODE, 3490 BGE_MACMODE_HALF_DUPLEX); 3491 } 3492 break; 3493 default: 3494 return (EINVAL); 3495 } 3496 return (0); 3497 } 3498 3499 sc->bge_link_evt++; 3500 mii = device_get_softc(sc->bge_miibus); 3501 if (mii->mii_instance) { 3502 struct mii_softc *miisc; 3503 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 3504 miisc = LIST_NEXT(miisc, mii_list)) 3505 mii_phy_reset(miisc); 3506 } 3507 mii_mediachg(mii); 3508 3509 return (0); 3510 } 3511 3512 /* 3513 * Report current media status. 3514 */ 3515 static void 3516 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3517 { 3518 struct bge_softc *sc = ifp->if_softc; 3519 struct mii_data *mii; 3520 3521 BGE_LOCK(sc); 3522 3523 if (sc->bge_flags & BGE_FLAG_TBI) { 3524 ifmr->ifm_status = IFM_AVALID; 3525 ifmr->ifm_active = IFM_ETHER; 3526 if (CSR_READ_4(sc, BGE_MAC_STS) & 3527 BGE_MACSTAT_TBI_PCS_SYNCHED) 3528 ifmr->ifm_status |= IFM_ACTIVE; 3529 else { 3530 ifmr->ifm_active |= IFM_NONE; 3531 BGE_UNLOCK(sc); 3532 return; 3533 } 3534 ifmr->ifm_active |= IFM_1000_SX; 3535 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) 3536 ifmr->ifm_active |= IFM_HDX; 3537 else 3538 ifmr->ifm_active |= IFM_FDX; 3539 BGE_UNLOCK(sc); 3540 return; 3541 } 3542 3543 mii = device_get_softc(sc->bge_miibus); 3544 mii_pollstat(mii); 3545 ifmr->ifm_active = mii->mii_media_active; 3546 ifmr->ifm_status = mii->mii_media_status; 3547 3548 BGE_UNLOCK(sc); 3549 } 3550 3551 static int 3552 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3553 { 3554 struct bge_softc *sc = ifp->if_softc; 3555 struct ifreq *ifr = (struct ifreq *) data; 3556 struct mii_data *mii; 3557 int flags, mask, error = 0; 3558 3559 switch (command) { 3560 case SIOCSIFMTU: 3561 if (ifr->ifr_mtu < ETHERMIN || 3562 ((BGE_IS_JUMBO_CAPABLE(sc)) && 3563 ifr->ifr_mtu > BGE_JUMBO_MTU) || 3564 ((!BGE_IS_JUMBO_CAPABLE(sc)) && 3565 ifr->ifr_mtu > ETHERMTU)) 3566 error = EINVAL; 3567 else if (ifp->if_mtu != ifr->ifr_mtu) { 3568 ifp->if_mtu = ifr->ifr_mtu; 3569 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3570 bge_init(sc); 3571 } 3572 break; 3573 case SIOCSIFFLAGS: 3574 BGE_LOCK(sc); 3575 if (ifp->if_flags & IFF_UP) { 3576 /* 3577 * If only the state of the PROMISC flag changed, 3578 * then just use the 'set promisc mode' command 3579 * instead of reinitializing the entire NIC. Doing 3580 * a full re-init means reloading the firmware and 3581 * waiting for it to start up, which may take a 3582 * second or two. Similarly for ALLMULTI. 3583 */ 3584 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3585 flags = ifp->if_flags ^ sc->bge_if_flags; 3586 if (flags & IFF_PROMISC) 3587 bge_setpromisc(sc); 3588 if (flags & IFF_ALLMULTI) 3589 bge_setmulti(sc); 3590 } else 3591 bge_init_locked(sc); 3592 } else { 3593 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3594 bge_stop(sc); 3595 } 3596 } 3597 sc->bge_if_flags = ifp->if_flags; 3598 BGE_UNLOCK(sc); 3599 error = 0; 3600 break; 3601 case SIOCADDMULTI: 3602 case SIOCDELMULTI: 3603 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3604 BGE_LOCK(sc); 3605 bge_setmulti(sc); 3606 BGE_UNLOCK(sc); 3607 error = 0; 3608 } 3609 break; 3610 case SIOCSIFMEDIA: 3611 case SIOCGIFMEDIA: 3612 if (sc->bge_flags & BGE_FLAG_TBI) { 3613 error = ifmedia_ioctl(ifp, ifr, 3614 &sc->bge_ifmedia, command); 3615 } else { 3616 mii = device_get_softc(sc->bge_miibus); 3617 error = ifmedia_ioctl(ifp, ifr, 3618 &mii->mii_media, command); 3619 } 3620 break; 3621 case SIOCSIFCAP: 3622 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3623 #ifdef DEVICE_POLLING 3624 if (mask & IFCAP_POLLING) { 3625 if (ifr->ifr_reqcap & IFCAP_POLLING) { 3626 error = ether_poll_register(bge_poll, ifp); 3627 if (error) 3628 return (error); 3629 BGE_LOCK(sc); 3630 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, 3631 BGE_PCIMISCCTL_MASK_PCI_INTR); 3632 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3633 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); 3634 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); 3635 ifp->if_capenable |= IFCAP_POLLING; 3636 BGE_UNLOCK(sc); 3637 } else { 3638 error = ether_poll_deregister(ifp); 3639 /* Enable interrupt even in error case */ 3640 BGE_LOCK(sc); 3641 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); 3642 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); 3643 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, 3644 BGE_PCIMISCCTL_MASK_PCI_INTR); 3645 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); 3646 ifp->if_capenable &= ~IFCAP_POLLING; 3647 BGE_UNLOCK(sc); 3648 } 3649 } 3650 #endif 3651 if (mask & IFCAP_HWCSUM) { 3652 ifp->if_capenable ^= IFCAP_HWCSUM; 3653 if (IFCAP_HWCSUM & ifp->if_capenable && 3654 IFCAP_HWCSUM & ifp->if_capabilities) 3655 ifp->if_hwassist = BGE_CSUM_FEATURES; 3656 else 3657 ifp->if_hwassist = 0; 3658 VLAN_CAPABILITIES(ifp); 3659 } 3660 break; 3661 default: 3662 error = ether_ioctl(ifp, command, data); 3663 break; 3664 } 3665 3666 return (error); 3667 } 3668 3669 static void 3670 bge_watchdog(struct ifnet *ifp) 3671 { 3672 struct bge_softc *sc; 3673 3674 sc = ifp->if_softc; 3675 3676 if_printf(ifp, "watchdog timeout -- resetting\n"); 3677 3678 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3679 bge_init(sc); 3680 3681 ifp->if_oerrors++; 3682 } 3683 3684 /* 3685 * Stop the adapter and free any mbufs allocated to the 3686 * RX and TX lists. 3687 */ 3688 static void 3689 bge_stop(struct bge_softc *sc) 3690 { 3691 struct ifnet *ifp; 3692 struct ifmedia_entry *ifm; 3693 struct mii_data *mii = NULL; 3694 int mtmp, itmp; 3695 3696 BGE_LOCK_ASSERT(sc); 3697 3698 ifp = sc->bge_ifp; 3699 3700 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) 3701 mii = device_get_softc(sc->bge_miibus); 3702 3703 callout_stop(&sc->bge_stat_ch); 3704 3705 /* 3706 * Disable all of the receiver blocks. 3707 */ 3708 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); 3709 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); 3710 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); 3711 if (!(BGE_IS_5705_OR_BEYOND(sc))) 3712 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); 3713 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); 3714 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); 3715 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); 3716 3717 /* 3718 * Disable all of the transmit blocks. 3719 */ 3720 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); 3721 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); 3722 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); 3723 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); 3724 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); 3725 if (!(BGE_IS_5705_OR_BEYOND(sc))) 3726 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); 3727 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); 3728 3729 /* 3730 * Shut down all of the memory managers and related 3731 * state machines. 3732 */ 3733 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); 3734 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); 3735 if (!(BGE_IS_5705_OR_BEYOND(sc))) 3736 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); 3737 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); 3738 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); 3739 if (!(BGE_IS_5705_OR_BEYOND(sc))) { 3740 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); 3741 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); 3742 } 3743 3744 /* Disable host interrupts. */ 3745 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); 3746 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); 3747 3748 /* 3749 * Tell firmware we're shutting down. 3750 */ 3751 3752 bge_stop_fw(sc); 3753 bge_sig_pre_reset(sc, BGE_RESET_STOP); 3754 bge_reset(sc); 3755 bge_sig_legacy(sc, BGE_RESET_STOP); 3756 bge_sig_post_reset(sc, BGE_RESET_STOP); 3757 3758 /* 3759 * Keep the ASF firmware running if up. 3760 */ 3761 if (sc->bge_asf_mode & ASF_STACKUP) 3762 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3763 else 3764 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); 3765 3766 /* Free the RX lists. */ 3767 bge_free_rx_ring_std(sc); 3768 3769 /* Free jumbo RX list. */ 3770 if (BGE_IS_JUMBO_CAPABLE(sc)) 3771 bge_free_rx_ring_jumbo(sc); 3772 3773 /* Free TX buffers. */ 3774 bge_free_tx_ring(sc); 3775 3776 /* 3777 * Isolate/power down the PHY, but leave the media selection 3778 * unchanged so that things will be put back to normal when 3779 * we bring the interface back up. 3780 */ 3781 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { 3782 itmp = ifp->if_flags; 3783 ifp->if_flags |= IFF_UP; 3784 /* 3785 * If we are called from bge_detach(), mii is already NULL. 3786 */ 3787 if (mii != NULL) { 3788 ifm = mii->mii_media.ifm_cur; 3789 mtmp = ifm->ifm_media; 3790 ifm->ifm_media = IFM_ETHER|IFM_NONE; 3791 mii_mediachg(mii); 3792 ifm->ifm_media = mtmp; 3793 } 3794 ifp->if_flags = itmp; 3795 } 3796 3797 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; 3798 3799 /* 3800 * We can't just call bge_link_upd() cause chip is almost stopped so 3801 * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may 3802 * lead to hardware deadlock. So we just clearing MAC's link state 3803 * (PHY may still have link UP). 3804 */ 3805 if (bootverbose && sc->bge_link) 3806 if_printf(sc->bge_ifp, "link DOWN\n"); 3807 sc->bge_link = 0; 3808 3809 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3810 } 3811 3812 /* 3813 * Stop all chip I/O so that the kernel's probe routines don't 3814 * get confused by errant DMAs when rebooting. 3815 */ 3816 static void 3817 bge_shutdown(device_t dev) 3818 { 3819 struct bge_softc *sc; 3820 3821 sc = device_get_softc(dev); 3822 3823 BGE_LOCK(sc); 3824 bge_stop(sc); 3825 bge_reset(sc); 3826 BGE_UNLOCK(sc); 3827 } 3828 3829 static int 3830 bge_suspend(device_t dev) 3831 { 3832 struct bge_softc *sc; 3833 3834 sc = device_get_softc(dev); 3835 BGE_LOCK(sc); 3836 bge_stop(sc); 3837 BGE_UNLOCK(sc); 3838 3839 return (0); 3840 } 3841 3842 static int 3843 bge_resume(device_t dev) 3844 { 3845 struct bge_softc *sc; 3846 struct ifnet *ifp; 3847 3848 sc = device_get_softc(dev); 3849 BGE_LOCK(sc); 3850 ifp = sc->bge_ifp; 3851 if (ifp->if_flags & IFF_UP) { 3852 bge_init_locked(sc); 3853 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3854 bge_start_locked(ifp); 3855 } 3856 BGE_UNLOCK(sc); 3857 3858 return (0); 3859 } 3860 3861 static void 3862 bge_link_upd(struct bge_softc *sc) 3863 { 3864 struct mii_data *mii; 3865 uint32_t link, status; 3866 3867 BGE_LOCK_ASSERT(sc); 3868 3869 /* Clear 'pending link event' flag. */ 3870 sc->bge_link_evt = 0; 3871 3872 /* 3873 * Process link state changes. 3874 * Grrr. The link status word in the status block does 3875 * not work correctly on the BCM5700 rev AX and BX chips, 3876 * according to all available information. Hence, we have 3877 * to enable MII interrupts in order to properly obtain 3878 * async link changes. Unfortunately, this also means that 3879 * we have to read the MAC status register to detect link 3880 * changes, thereby adding an additional register access to 3881 * the interrupt handler. 3882 * 3883 * XXX: perhaps link state detection procedure used for 3884 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. 3885 */ 3886 3887 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && 3888 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { 3889 status = CSR_READ_4(sc, BGE_MAC_STS); 3890 if (status & BGE_MACSTAT_MI_INTERRUPT) { 3891 callout_stop(&sc->bge_stat_ch); 3892 bge_tick_locked(sc); 3893 3894 mii = device_get_softc(sc->bge_miibus); 3895 if (!sc->bge_link && 3896 mii->mii_media_status & IFM_ACTIVE && 3897 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3898 sc->bge_link++; 3899 if (bootverbose) 3900 if_printf(sc->bge_ifp, "link UP\n"); 3901 } else if (sc->bge_link && 3902 (!(mii->mii_media_status & IFM_ACTIVE) || 3903 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 3904 sc->bge_link = 0; 3905 if (bootverbose) 3906 if_printf(sc->bge_ifp, "link DOWN\n"); 3907 } 3908 3909 /* Clear the interrupt. */ 3910 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, 3911 BGE_EVTENB_MI_INTERRUPT); 3912 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); 3913 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, 3914 BRGPHY_INTRS); 3915 } 3916 return; 3917 } 3918 3919 if (sc->bge_flags & BGE_FLAG_TBI) { 3920 status = CSR_READ_4(sc, BGE_MAC_STS); 3921 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { 3922 if (!sc->bge_link) { 3923 sc->bge_link++; 3924 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) 3925 BGE_CLRBIT(sc, BGE_MAC_MODE, 3926 BGE_MACMODE_TBI_SEND_CFGS); 3927 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); 3928 if (bootverbose) 3929 if_printf(sc->bge_ifp, "link UP\n"); 3930 if_link_state_change(sc->bge_ifp, 3931 LINK_STATE_UP); 3932 } 3933 } else if (sc->bge_link) { 3934 sc->bge_link = 0; 3935 if (bootverbose) 3936 if_printf(sc->bge_ifp, "link DOWN\n"); 3937 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); 3938 } 3939 /* Discard link events for MII/GMII cards if MI auto-polling disabled */ 3940 } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { 3941 /* 3942 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit 3943 * in status word always set. Workaround this bug by reading 3944 * PHY link status directly. 3945 */ 3946 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; 3947 3948 if (link != sc->bge_link || 3949 sc->bge_asicrev == BGE_ASICREV_BCM5700) { 3950 callout_stop(&sc->bge_stat_ch); 3951 bge_tick_locked(sc); 3952 3953 mii = device_get_softc(sc->bge_miibus); 3954 if (!sc->bge_link && 3955 mii->mii_media_status & IFM_ACTIVE && 3956 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3957 sc->bge_link++; 3958 if (bootverbose) 3959 if_printf(sc->bge_ifp, "link UP\n"); 3960 } else if (sc->bge_link && 3961 (!(mii->mii_media_status & IFM_ACTIVE) || 3962 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { 3963 sc->bge_link = 0; 3964 if (bootverbose) 3965 if_printf(sc->bge_ifp, "link DOWN\n"); 3966 } 3967 } 3968 } 3969 3970 /* Clear the attention. */ 3971 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| 3972 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| 3973 BGE_MACSTAT_LINK_CHANGED); 3974 } 3975