1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause
3 *
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul@windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 /*
38 * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver
39 *
40 * The Broadcom BCM5700 is based on technology originally developed by
41 * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet
42 * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has
43 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
44 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
45 * frames, highly configurable RX filtering, and 16 RX and TX queues
46 * (which, along with RX filter rules, can be used for QOS applications).
47 * Other features, such as TCP segmentation, may be available as part
48 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
49 * firmware images can be stored in hardware and need not be compiled
50 * into the driver.
51 *
52 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
53 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
54 *
55 * The BCM5701 is a single-chip solution incorporating both the BCM5700
56 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
57 * does not support external SSRAM.
58 *
59 * Broadcom also produces a variation of the BCM5700 under the "Altima"
60 * brand name, which is functionally similar but lacks PCI-X support.
61 *
62 * Without external SSRAM, you can only have at most 4 TX rings,
63 * and the use of the mini RX ring is disabled. This seems to imply
64 * that these features are simply not available on the BCM5701. As a
65 * result, this driver does not implement any support for the mini RX
66 * ring.
67 */
68
69 #ifdef HAVE_KERNEL_OPTION_HEADERS
70 #include "opt_device_polling.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/endian.h>
75 #include <sys/systm.h>
76 #include <sys/sockio.h>
77 #include <sys/mbuf.h>
78 #include <sys/malloc.h>
79 #include <sys/kernel.h>
80 #include <sys/module.h>
81 #include <sys/socket.h>
82 #include <sys/sysctl.h>
83 #include <sys/taskqueue.h>
84
85 #include <net/debugnet.h>
86 #include <net/if.h>
87 #include <net/if_var.h>
88 #include <net/if_arp.h>
89 #include <net/ethernet.h>
90 #include <net/if_dl.h>
91 #include <net/if_media.h>
92
93 #include <net/bpf.h>
94
95 #include <net/if_types.h>
96 #include <net/if_vlan_var.h>
97
98 #include <netinet/in_systm.h>
99 #include <netinet/in.h>
100 #include <netinet/ip.h>
101 #include <netinet/tcp.h>
102
103 #include <machine/bus.h>
104 #include <machine/resource.h>
105 #include <sys/bus.h>
106 #include <sys/rman.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110 #include "miidevs.h"
111 #include <dev/mii/brgphyreg.h>
112
113 #include <dev/pci/pcireg.h>
114 #include <dev/pci/pcivar.h>
115
116 #include <dev/bge/if_bgereg.h>
117
118 #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP)
119 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
120
121 MODULE_DEPEND(bge, pci, 1, 1, 1);
122 MODULE_DEPEND(bge, ether, 1, 1, 1);
123 MODULE_DEPEND(bge, miibus, 1, 1, 1);
124
125 /* "device miibus" required. See GENERIC if you get errors here. */
126 #include "miibus_if.h"
127
128 /*
129 * Various supported device vendors/types and their names. Note: the
130 * spec seems to indicate that the hardware still has Alteon's vendor
131 * ID burned into it, though it will always be overridden by the vendor
132 * ID in the EEPROM. Just to be safe, we cover all possibilities.
133 */
134 static const struct bge_type {
135 uint16_t bge_vid;
136 uint16_t bge_did;
137 } bge_devs[] = {
138 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 },
139 { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 },
140
141 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 },
142 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 },
143 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 },
144
145 { APPLE_VENDORID, APPLE_DEVICE_BCM5701 },
146
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 },
148 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 },
150 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT },
151 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X },
152 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 },
153 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT },
154 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X },
155 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C },
156 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S },
157 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT },
158 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 },
159 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F },
160 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K },
161 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M },
162 { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT },
163 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C },
164 { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S },
165 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 },
166 { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S },
167 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 },
168 { BCOM_VENDORID, BCOM_DEVICEID_BCM5717C },
169 { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 },
170 { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 },
171 { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 },
172 { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 },
173 { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 },
174 { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 },
175 { BCOM_VENDORID, BCOM_DEVICEID_BCM5725 },
176 { BCOM_VENDORID, BCOM_DEVICEID_BCM5727 },
177 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 },
178 { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M },
179 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 },
180 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F },
181 { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M },
182 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 },
183 { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M },
184 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 },
185 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F },
186 { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M },
187 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 },
188 { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M },
189 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 },
190 { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M },
191 { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 },
192 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 },
193 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E },
194 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S },
195 { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE },
196 { BCOM_VENDORID, BCOM_DEVICEID_BCM5762 },
197 { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 },
198 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 },
199 { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S },
200 { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 },
201 { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 },
202 { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 },
203 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F },
204 { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G },
205 { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 },
206 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 },
207 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F },
208 { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M },
209 { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 },
210 { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 },
211 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 },
212 { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 },
213 { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M },
214 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 },
215 { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M },
216 { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 },
217 { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 },
218 { BCOM_VENDORID, BCOM_DEVICEID_BCM57762 },
219 { BCOM_VENDORID, BCOM_DEVICEID_BCM57764 },
220 { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 },
221 { BCOM_VENDORID, BCOM_DEVICEID_BCM57766 },
222 { BCOM_VENDORID, BCOM_DEVICEID_BCM57767 },
223 { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 },
224 { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 },
225 { BCOM_VENDORID, BCOM_DEVICEID_BCM57782 },
226 { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 },
227 { BCOM_VENDORID, BCOM_DEVICEID_BCM57786 },
228 { BCOM_VENDORID, BCOM_DEVICEID_BCM57787 },
229 { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 },
230 { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 },
231 { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 },
232 { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 },
233
234 { SK_VENDORID, SK_DEVICEID_ALTIMA },
235
236 { TC_VENDORID, TC_DEVICEID_3C996 },
237
238 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 },
239 { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 },
240 { 0, 0 }
241 };
242
243 static const struct bge_vendor {
244 uint16_t v_id;
245 const char *v_name;
246 } bge_vendors[] = {
247 { ALTEON_VENDORID, "Alteon" },
248 { ALTIMA_VENDORID, "Altima" },
249 { APPLE_VENDORID, "Apple" },
250 { BCOM_VENDORID, "Broadcom" },
251 { SK_VENDORID, "SysKonnect" },
252 { TC_VENDORID, "3Com" },
253 { FJTSU_VENDORID, "Fujitsu" },
254 { 0, NULL }
255 };
256
257 static const struct bge_revision {
258 uint32_t br_chipid;
259 const char *br_name;
260 } bge_revisions[] = {
261 { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
262 { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
263 { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
264 { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
265 { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
266 { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
267 { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
268 { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
269 { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
270 { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
271 { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
272 { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
273 { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
274 { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
275 { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
276 { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
277 { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
278 { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
279 { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
280 { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
281 { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
282 { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
283 { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
284 { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
285 { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
286 { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
287 { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
288 { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
289 { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
290 { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
291 { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
292 { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
293 { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
294 { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
295 { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
296 { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
297 { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
298 { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
299 { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
300 { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
301 { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
302 { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
303 { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
304 { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" },
305 { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" },
306 { BGE_CHIPID_BCM5717_C0, "BCM5717 C0" },
307 { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" },
308 { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" },
309 { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
310 { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
311 { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
312 { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" },
313 { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" },
314 { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" },
315 { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" },
316 { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" },
317 { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" },
318 /* 5754 and 5787 share the same ASIC ID */
319 { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
320 { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
321 { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
322 { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
323 { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" },
324 { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" },
325 { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" },
326 { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" },
327 { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" },
328 { 0, NULL }
329 };
330
331 /*
332 * Some defaults for major revisions, so that newer steppings
333 * that we don't know about have a shot at working.
334 */
335 static const struct bge_revision bge_majorrevs[] = {
336 { BGE_ASICREV_BCM5700, "unknown BCM5700" },
337 { BGE_ASICREV_BCM5701, "unknown BCM5701" },
338 { BGE_ASICREV_BCM5703, "unknown BCM5703" },
339 { BGE_ASICREV_BCM5704, "unknown BCM5704" },
340 { BGE_ASICREV_BCM5705, "unknown BCM5705" },
341 { BGE_ASICREV_BCM5750, "unknown BCM5750" },
342 { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
343 { BGE_ASICREV_BCM5752, "unknown BCM5752" },
344 { BGE_ASICREV_BCM5780, "unknown BCM5780" },
345 { BGE_ASICREV_BCM5714, "unknown BCM5714" },
346 { BGE_ASICREV_BCM5755, "unknown BCM5755" },
347 { BGE_ASICREV_BCM5761, "unknown BCM5761" },
348 { BGE_ASICREV_BCM5784, "unknown BCM5784" },
349 { BGE_ASICREV_BCM5785, "unknown BCM5785" },
350 /* 5754 and 5787 share the same ASIC ID */
351 { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
352 { BGE_ASICREV_BCM5906, "unknown BCM5906" },
353 { BGE_ASICREV_BCM57765, "unknown BCM57765" },
354 { BGE_ASICREV_BCM57766, "unknown BCM57766" },
355 { BGE_ASICREV_BCM57780, "unknown BCM57780" },
356 { BGE_ASICREV_BCM5717, "unknown BCM5717" },
357 { BGE_ASICREV_BCM5719, "unknown BCM5719" },
358 { BGE_ASICREV_BCM5720, "unknown BCM5720" },
359 { BGE_ASICREV_BCM5762, "unknown BCM5762" },
360 { 0, NULL }
361 };
362
363 #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO)
364 #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY)
365 #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS)
366 #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY)
367 #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS)
368 #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS)
369 #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS)
370 #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS)
371
372 static uint32_t bge_chipid(device_t);
373 static const struct bge_vendor * bge_lookup_vendor(uint16_t);
374 static const struct bge_revision * bge_lookup_rev(uint32_t);
375
376 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]);
377
378 static int bge_probe(device_t);
379 static int bge_attach(device_t);
380 static int bge_detach(device_t);
381 static int bge_suspend(device_t);
382 static int bge_resume(device_t);
383 static void bge_release_resources(struct bge_softc *);
384 static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int);
385 static int bge_dma_alloc(struct bge_softc *);
386 static void bge_dma_free(struct bge_softc *);
387 static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t,
388 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *);
389
390 static void bge_devinfo(struct bge_softc *);
391 static int bge_mbox_reorder(struct bge_softc *);
392
393 static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]);
394 static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]);
395 static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]);
396 static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]);
397 static int bge_get_eaddr(struct bge_softc *, uint8_t[]);
398
399 static void bge_txeof(struct bge_softc *, uint16_t);
400 static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *);
401 static int bge_rxeof(struct bge_softc *, uint16_t, int);
402
403 static void bge_asf_driver_up (struct bge_softc *);
404 static void bge_tick(void *);
405 static void bge_stats_clear_regs(struct bge_softc *);
406 static void bge_stats_update(struct bge_softc *);
407 static void bge_stats_update_regs(struct bge_softc *);
408 static struct mbuf *bge_check_short_dma(struct mbuf *);
409 static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *,
410 uint16_t *, uint16_t *);
411 static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
412
413 static void bge_intr(void *);
414 static int bge_msi_intr(void *);
415 static void bge_intr_task(void *, int);
416 static void bge_start(if_t);
417 static void bge_start_locked(if_t);
418 static void bge_start_tx(struct bge_softc *, uint32_t);
419 static int bge_ioctl(if_t, u_long, caddr_t);
420 static void bge_init_locked(struct bge_softc *);
421 static void bge_init(void *);
422 static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
423 static void bge_stop(struct bge_softc *);
424 static void bge_watchdog(struct bge_softc *);
425 static int bge_shutdown(device_t);
426 static int bge_ifmedia_upd_locked(if_t);
427 static int bge_ifmedia_upd(if_t);
428 static void bge_ifmedia_sts(if_t, struct ifmediareq *);
429 static uint64_t bge_get_counter(if_t, ift_counter);
430
431 static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
432 static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
433
434 static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *);
435 static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
436
437 static void bge_setpromisc(struct bge_softc *);
438 static void bge_setmulti(struct bge_softc *);
439 static void bge_setvlan(struct bge_softc *);
440
441 static __inline void bge_rxreuse_std(struct bge_softc *, int);
442 static __inline void bge_rxreuse_jumbo(struct bge_softc *, int);
443 static int bge_newbuf_std(struct bge_softc *, int);
444 static int bge_newbuf_jumbo(struct bge_softc *, int);
445 static int bge_init_rx_ring_std(struct bge_softc *);
446 static void bge_free_rx_ring_std(struct bge_softc *);
447 static int bge_init_rx_ring_jumbo(struct bge_softc *);
448 static void bge_free_rx_ring_jumbo(struct bge_softc *);
449 static void bge_free_tx_ring(struct bge_softc *);
450 static int bge_init_tx_ring(struct bge_softc *);
451
452 static int bge_chipinit(struct bge_softc *);
453 static int bge_blockinit(struct bge_softc *);
454 static uint32_t bge_dma_swap_options(struct bge_softc *);
455
456 static int bge_has_eaddr(struct bge_softc *);
457 static uint32_t bge_readmem_ind(struct bge_softc *, int);
458 static void bge_writemem_ind(struct bge_softc *, int, int);
459 static void bge_writembx(struct bge_softc *, int, int);
460 #ifdef notdef
461 static uint32_t bge_readreg_ind(struct bge_softc *, int);
462 #endif
463 static void bge_writemem_direct(struct bge_softc *, int, int);
464 static void bge_writereg_ind(struct bge_softc *, int, int);
465
466 static int bge_miibus_readreg(device_t, int, int);
467 static int bge_miibus_writereg(device_t, int, int, int);
468 static void bge_miibus_statchg(device_t);
469 #ifdef DEVICE_POLLING
470 static int bge_poll(if_t ifp, enum poll_cmd cmd, int count);
471 #endif
472
473 #define BGE_RESET_SHUTDOWN 0
474 #define BGE_RESET_START 1
475 #define BGE_RESET_SUSPEND 2
476 static void bge_sig_post_reset(struct bge_softc *, int);
477 static void bge_sig_legacy(struct bge_softc *, int);
478 static void bge_sig_pre_reset(struct bge_softc *, int);
479 static void bge_stop_fw(struct bge_softc *);
480 static int bge_reset(struct bge_softc *);
481 static void bge_link_upd(struct bge_softc *);
482
483 static void bge_ape_lock_init(struct bge_softc *);
484 static void bge_ape_read_fw_ver(struct bge_softc *);
485 static int bge_ape_lock(struct bge_softc *, int);
486 static void bge_ape_unlock(struct bge_softc *, int);
487 static void bge_ape_send_event(struct bge_softc *, uint32_t);
488 static void bge_ape_driver_state_change(struct bge_softc *, int);
489
490 /*
491 * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may
492 * leak information to untrusted users. It is also known to cause alignment
493 * traps on certain architectures.
494 */
495 #ifdef BGE_REGISTER_DEBUG
496 static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
497 static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS);
498 static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS);
499 static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS);
500 #endif
501 static void bge_add_sysctls(struct bge_softc *);
502 static void bge_add_sysctl_stats_regs(struct bge_softc *,
503 struct sysctl_ctx_list *, struct sysctl_oid_list *);
504 static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *,
505 struct sysctl_oid_list *);
506 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS);
507
508 DEBUGNET_DEFINE(bge);
509
510 static device_method_t bge_methods[] = {
511 /* Device interface */
512 DEVMETHOD(device_probe, bge_probe),
513 DEVMETHOD(device_attach, bge_attach),
514 DEVMETHOD(device_detach, bge_detach),
515 DEVMETHOD(device_shutdown, bge_shutdown),
516 DEVMETHOD(device_suspend, bge_suspend),
517 DEVMETHOD(device_resume, bge_resume),
518
519 /* MII interface */
520 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
521 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
522 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
523
524 DEVMETHOD_END
525 };
526
527 static driver_t bge_driver = {
528 "bge",
529 bge_methods,
530 sizeof(struct bge_softc)
531 };
532
533 DRIVER_MODULE(bge, pci, bge_driver, 0, 0);
534 MODULE_PNP_INFO("U16:vendor;U16:device", pci, bge, bge_devs,
535 nitems(bge_devs) - 1);
536 DRIVER_MODULE(miibus, bge, miibus_driver, 0, 0);
537
538 static int bge_allow_asf = 1;
539
540 static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
541 "BGE driver parameters");
542 SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0,
543 "Allow ASF mode if available");
544
545 static int
bge_has_eaddr(struct bge_softc * sc)546 bge_has_eaddr(struct bge_softc *sc)
547 {
548 return (1);
549 }
550
551 static uint32_t
bge_readmem_ind(struct bge_softc * sc,int off)552 bge_readmem_ind(struct bge_softc *sc, int off)
553 {
554 device_t dev;
555 uint32_t val;
556
557 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
558 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
559 return (0);
560
561 dev = sc->bge_dev;
562
563 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
564 val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4);
565 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
566 return (val);
567 }
568
569 static void
bge_writemem_ind(struct bge_softc * sc,int off,int val)570 bge_writemem_ind(struct bge_softc *sc, int off, int val)
571 {
572 device_t dev;
573
574 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
575 off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4)
576 return;
577
578 dev = sc->bge_dev;
579
580 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
581 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
582 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
583 }
584
585 #ifdef notdef
586 static uint32_t
bge_readreg_ind(struct bge_softc * sc,int off)587 bge_readreg_ind(struct bge_softc *sc, int off)
588 {
589 device_t dev;
590
591 dev = sc->bge_dev;
592
593 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
594 return (pci_read_config(dev, BGE_PCI_REG_DATA, 4));
595 }
596 #endif
597
598 static void
bge_writereg_ind(struct bge_softc * sc,int off,int val)599 bge_writereg_ind(struct bge_softc *sc, int off, int val)
600 {
601 device_t dev;
602
603 dev = sc->bge_dev;
604
605 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
606 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
607 }
608
609 static void
bge_writemem_direct(struct bge_softc * sc,int off,int val)610 bge_writemem_direct(struct bge_softc *sc, int off, int val)
611 {
612 CSR_WRITE_4(sc, off, val);
613 }
614
615 static void
bge_writembx(struct bge_softc * sc,int off,int val)616 bge_writembx(struct bge_softc *sc, int off, int val)
617 {
618 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
619 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
620
621 CSR_WRITE_4(sc, off, val);
622 if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0)
623 CSR_READ_4(sc, off);
624 }
625
626 /*
627 * Clear all stale locks and select the lock for this driver instance.
628 */
629 static void
bge_ape_lock_init(struct bge_softc * sc)630 bge_ape_lock_init(struct bge_softc *sc)
631 {
632 uint32_t bit, regbase;
633 int i;
634
635 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
636 regbase = BGE_APE_LOCK_GRANT;
637 else
638 regbase = BGE_APE_PER_LOCK_GRANT;
639
640 /* Clear any stale locks. */
641 for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) {
642 switch (i) {
643 case BGE_APE_LOCK_PHY0:
644 case BGE_APE_LOCK_PHY1:
645 case BGE_APE_LOCK_PHY2:
646 case BGE_APE_LOCK_PHY3:
647 bit = BGE_APE_LOCK_GRANT_DRIVER0;
648 break;
649 default:
650 if (sc->bge_func_addr == 0)
651 bit = BGE_APE_LOCK_GRANT_DRIVER0;
652 else
653 bit = (1 << sc->bge_func_addr);
654 }
655 APE_WRITE_4(sc, regbase + 4 * i, bit);
656 }
657
658 /* Select the PHY lock based on the device's function number. */
659 switch (sc->bge_func_addr) {
660 case 0:
661 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0;
662 break;
663 case 1:
664 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1;
665 break;
666 case 2:
667 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2;
668 break;
669 case 3:
670 sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3;
671 break;
672 default:
673 device_printf(sc->bge_dev,
674 "PHY lock not supported on this function\n");
675 }
676 }
677
678 /*
679 * Check for APE firmware, set flags, and print version info.
680 */
681 static void
bge_ape_read_fw_ver(struct bge_softc * sc)682 bge_ape_read_fw_ver(struct bge_softc *sc)
683 {
684 const char *fwtype;
685 uint32_t apedata, features;
686
687 /* Check for a valid APE signature in shared memory. */
688 apedata = APE_READ_4(sc, BGE_APE_SEG_SIG);
689 if (apedata != BGE_APE_SEG_SIG_MAGIC) {
690 sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE;
691 return;
692 }
693
694 /* Check if APE firmware is running. */
695 apedata = APE_READ_4(sc, BGE_APE_FW_STATUS);
696 if ((apedata & BGE_APE_FW_STATUS_READY) == 0) {
697 device_printf(sc->bge_dev, "APE signature found "
698 "but FW status not ready! 0x%08x\n", apedata);
699 return;
700 }
701
702 sc->bge_mfw_flags |= BGE_MFW_ON_APE;
703
704 /* Fetch the APE firmware type and version. */
705 apedata = APE_READ_4(sc, BGE_APE_FW_VERSION);
706 features = APE_READ_4(sc, BGE_APE_FW_FEATURES);
707 if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) {
708 sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI;
709 fwtype = "NCSI";
710 } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) {
711 sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH;
712 fwtype = "DASH";
713 } else
714 fwtype = "UNKN";
715
716 /* Print the APE firmware version. */
717 device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n",
718 fwtype,
719 (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT,
720 (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT,
721 (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT,
722 (apedata & BGE_APE_FW_VERSION_BLDMSK));
723 }
724
725 static int
bge_ape_lock(struct bge_softc * sc,int locknum)726 bge_ape_lock(struct bge_softc *sc, int locknum)
727 {
728 uint32_t bit, gnt, req, status;
729 int i, off;
730
731 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
732 return (0);
733
734 /* Lock request/grant registers have different bases. */
735 if (sc->bge_asicrev == BGE_ASICREV_BCM5761) {
736 req = BGE_APE_LOCK_REQ;
737 gnt = BGE_APE_LOCK_GRANT;
738 } else {
739 req = BGE_APE_PER_LOCK_REQ;
740 gnt = BGE_APE_PER_LOCK_GRANT;
741 }
742
743 off = 4 * locknum;
744
745 switch (locknum) {
746 case BGE_APE_LOCK_GPIO:
747 /* Lock required when using GPIO. */
748 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
749 return (0);
750 if (sc->bge_func_addr == 0)
751 bit = BGE_APE_LOCK_REQ_DRIVER0;
752 else
753 bit = (1 << sc->bge_func_addr);
754 break;
755 case BGE_APE_LOCK_GRC:
756 /* Lock required to reset the device. */
757 if (sc->bge_func_addr == 0)
758 bit = BGE_APE_LOCK_REQ_DRIVER0;
759 else
760 bit = (1 << sc->bge_func_addr);
761 break;
762 case BGE_APE_LOCK_MEM:
763 /* Lock required when accessing certain APE memory. */
764 if (sc->bge_func_addr == 0)
765 bit = BGE_APE_LOCK_REQ_DRIVER0;
766 else
767 bit = (1 << sc->bge_func_addr);
768 break;
769 case BGE_APE_LOCK_PHY0:
770 case BGE_APE_LOCK_PHY1:
771 case BGE_APE_LOCK_PHY2:
772 case BGE_APE_LOCK_PHY3:
773 /* Lock required when accessing PHYs. */
774 bit = BGE_APE_LOCK_REQ_DRIVER0;
775 break;
776 default:
777 return (EINVAL);
778 }
779
780 /* Request a lock. */
781 APE_WRITE_4(sc, req + off, bit);
782
783 /* Wait up to 1 second to acquire lock. */
784 for (i = 0; i < 20000; i++) {
785 status = APE_READ_4(sc, gnt + off);
786 if (status == bit)
787 break;
788 DELAY(50);
789 }
790
791 /* Handle any errors. */
792 if (status != bit) {
793 device_printf(sc->bge_dev, "APE lock %d request failed! "
794 "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n",
795 locknum, req + off, bit & 0xFFFF, gnt + off,
796 status & 0xFFFF);
797 /* Revoke the lock request. */
798 APE_WRITE_4(sc, gnt + off, bit);
799 return (EBUSY);
800 }
801
802 return (0);
803 }
804
805 static void
bge_ape_unlock(struct bge_softc * sc,int locknum)806 bge_ape_unlock(struct bge_softc *sc, int locknum)
807 {
808 uint32_t bit, gnt;
809 int off;
810
811 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
812 return;
813
814 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
815 gnt = BGE_APE_LOCK_GRANT;
816 else
817 gnt = BGE_APE_PER_LOCK_GRANT;
818
819 off = 4 * locknum;
820
821 switch (locknum) {
822 case BGE_APE_LOCK_GPIO:
823 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
824 return;
825 if (sc->bge_func_addr == 0)
826 bit = BGE_APE_LOCK_GRANT_DRIVER0;
827 else
828 bit = (1 << sc->bge_func_addr);
829 break;
830 case BGE_APE_LOCK_GRC:
831 if (sc->bge_func_addr == 0)
832 bit = BGE_APE_LOCK_GRANT_DRIVER0;
833 else
834 bit = (1 << sc->bge_func_addr);
835 break;
836 case BGE_APE_LOCK_MEM:
837 if (sc->bge_func_addr == 0)
838 bit = BGE_APE_LOCK_GRANT_DRIVER0;
839 else
840 bit = (1 << sc->bge_func_addr);
841 break;
842 case BGE_APE_LOCK_PHY0:
843 case BGE_APE_LOCK_PHY1:
844 case BGE_APE_LOCK_PHY2:
845 case BGE_APE_LOCK_PHY3:
846 bit = BGE_APE_LOCK_GRANT_DRIVER0;
847 break;
848 default:
849 return;
850 }
851
852 APE_WRITE_4(sc, gnt + off, bit);
853 }
854
855 /*
856 * Send an event to the APE firmware.
857 */
858 static void
bge_ape_send_event(struct bge_softc * sc,uint32_t event)859 bge_ape_send_event(struct bge_softc *sc, uint32_t event)
860 {
861 uint32_t apedata;
862 int i;
863
864 /* NCSI does not support APE events. */
865 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
866 return;
867
868 /* Wait up to 1ms for APE to service previous event. */
869 for (i = 10; i > 0; i--) {
870 if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0)
871 break;
872 apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS);
873 if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) {
874 APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event |
875 BGE_APE_EVENT_STATUS_EVENT_PENDING);
876 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
877 APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1);
878 break;
879 }
880 bge_ape_unlock(sc, BGE_APE_LOCK_MEM);
881 DELAY(100);
882 }
883 if (i == 0)
884 device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n",
885 event);
886 }
887
888 static void
bge_ape_driver_state_change(struct bge_softc * sc,int kind)889 bge_ape_driver_state_change(struct bge_softc *sc, int kind)
890 {
891 uint32_t apedata, event;
892
893 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0)
894 return;
895
896 switch (kind) {
897 case BGE_RESET_START:
898 /* If this is the first load, clear the load counter. */
899 apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG);
900 if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC)
901 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0);
902 else {
903 apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT);
904 APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata);
905 }
906 APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG,
907 BGE_APE_HOST_SEG_SIG_MAGIC);
908 APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN,
909 BGE_APE_HOST_SEG_LEN_MAGIC);
910
911 /* Add some version info if bge(4) supports it. */
912 APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID,
913 BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0));
914 APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR,
915 BGE_APE_HOST_BEHAV_NO_PHYLOCK);
916 APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS,
917 BGE_APE_HOST_HEARTBEAT_INT_DISABLE);
918 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
919 BGE_APE_HOST_DRVR_STATE_START);
920 event = BGE_APE_EVENT_STATUS_STATE_START;
921 break;
922 case BGE_RESET_SHUTDOWN:
923 APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE,
924 BGE_APE_HOST_DRVR_STATE_UNLOAD);
925 event = BGE_APE_EVENT_STATUS_STATE_UNLOAD;
926 break;
927 case BGE_RESET_SUSPEND:
928 event = BGE_APE_EVENT_STATUS_STATE_SUSPEND;
929 break;
930 default:
931 return;
932 }
933
934 bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT |
935 BGE_APE_EVENT_STATUS_STATE_CHNGE);
936 }
937
938 /*
939 * Map a single buffer address.
940 */
941
942 static void
bge_dma_map_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)943 bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
944 {
945 struct bge_dmamap_arg *ctx;
946
947 if (error)
948 return;
949
950 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg));
951
952 ctx = arg;
953 ctx->bge_busaddr = segs->ds_addr;
954 }
955
956 static uint8_t
bge_nvram_getbyte(struct bge_softc * sc,int addr,uint8_t * dest)957 bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
958 {
959 uint32_t access, byte = 0;
960 int i;
961
962 /* Lock. */
963 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
964 for (i = 0; i < 8000; i++) {
965 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
966 break;
967 DELAY(20);
968 }
969 if (i == 8000)
970 return (1);
971
972 /* Enable access. */
973 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
974 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
975
976 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
977 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
978 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
979 DELAY(10);
980 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
981 DELAY(10);
982 break;
983 }
984 }
985
986 if (i == BGE_TIMEOUT * 10) {
987 if_printf(sc->bge_ifp, "nvram read timed out\n");
988 return (1);
989 }
990
991 /* Get result. */
992 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
993
994 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
995
996 /* Disable access. */
997 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
998
999 /* Unlock. */
1000 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
1001 CSR_READ_4(sc, BGE_NVRAM_SWARB);
1002
1003 return (0);
1004 }
1005
1006 /*
1007 * Read a sequence of bytes from NVRAM.
1008 */
1009 static int
bge_read_nvram(struct bge_softc * sc,caddr_t dest,int off,int cnt)1010 bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt)
1011 {
1012 int err = 0, i;
1013 uint8_t byte = 0;
1014
1015 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
1016 return (1);
1017
1018 for (i = 0; i < cnt; i++) {
1019 err = bge_nvram_getbyte(sc, off + i, &byte);
1020 if (err)
1021 break;
1022 *(dest + i) = byte;
1023 }
1024
1025 return (err ? 1 : 0);
1026 }
1027
1028 /*
1029 * Read a byte of data stored in the EEPROM at address 'addr.' The
1030 * BCM570x supports both the traditional bitbang interface and an
1031 * auto access interface for reading the EEPROM. We use the auto
1032 * access method.
1033 */
1034 static uint8_t
bge_eeprom_getbyte(struct bge_softc * sc,int addr,uint8_t * dest)1035 bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest)
1036 {
1037 int i;
1038 uint32_t byte = 0;
1039
1040 /*
1041 * Enable use of auto EEPROM access so we can avoid
1042 * having to use the bitbang method.
1043 */
1044 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
1045
1046 /* Reset the EEPROM, load the clock period. */
1047 CSR_WRITE_4(sc, BGE_EE_ADDR,
1048 BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
1049 DELAY(20);
1050
1051 /* Issue the read EEPROM command. */
1052 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
1053
1054 /* Wait for completion */
1055 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
1056 DELAY(10);
1057 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
1058 break;
1059 }
1060
1061 if (i == BGE_TIMEOUT * 10) {
1062 device_printf(sc->bge_dev, "EEPROM read timed out\n");
1063 return (1);
1064 }
1065
1066 /* Get result. */
1067 byte = CSR_READ_4(sc, BGE_EE_DATA);
1068
1069 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
1070
1071 return (0);
1072 }
1073
1074 /*
1075 * Read a sequence of bytes from the EEPROM.
1076 */
1077 static int
bge_read_eeprom(struct bge_softc * sc,caddr_t dest,int off,int cnt)1078 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
1079 {
1080 int i, error = 0;
1081 uint8_t byte = 0;
1082
1083 for (i = 0; i < cnt; i++) {
1084 error = bge_eeprom_getbyte(sc, off + i, &byte);
1085 if (error)
1086 break;
1087 *(dest + i) = byte;
1088 }
1089
1090 return (error ? 1 : 0);
1091 }
1092
1093 static int
bge_miibus_readreg(device_t dev,int phy,int reg)1094 bge_miibus_readreg(device_t dev, int phy, int reg)
1095 {
1096 struct bge_softc *sc;
1097 uint32_t val;
1098 int i;
1099
1100 sc = device_get_softc(dev);
1101
1102 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1103 return (0);
1104
1105 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1106 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1107 CSR_WRITE_4(sc, BGE_MI_MODE,
1108 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
1109 DELAY(80);
1110 }
1111
1112 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY |
1113 BGE_MIPHY(phy) | BGE_MIREG(reg));
1114
1115 /* Poll for the PHY register access to complete. */
1116 for (i = 0; i < BGE_TIMEOUT; i++) {
1117 DELAY(10);
1118 val = CSR_READ_4(sc, BGE_MI_COMM);
1119 if ((val & BGE_MICOMM_BUSY) == 0) {
1120 DELAY(5);
1121 val = CSR_READ_4(sc, BGE_MI_COMM);
1122 break;
1123 }
1124 }
1125
1126 if (i == BGE_TIMEOUT) {
1127 device_printf(sc->bge_dev,
1128 "PHY read timed out (phy %d, reg %d, val 0x%08x)\n",
1129 phy, reg, val);
1130 val = 0;
1131 }
1132
1133 /* Restore the autopoll bit if necessary. */
1134 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1135 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1136 DELAY(80);
1137 }
1138
1139 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1140
1141 if (val & BGE_MICOMM_READFAIL)
1142 return (0);
1143
1144 return (val & 0xFFFF);
1145 }
1146
1147 static int
bge_miibus_writereg(device_t dev,int phy,int reg,int val)1148 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
1149 {
1150 struct bge_softc *sc;
1151 int i;
1152
1153 sc = device_get_softc(dev);
1154
1155 if (sc->bge_asicrev == BGE_ASICREV_BCM5906 &&
1156 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL))
1157 return (0);
1158
1159 if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0)
1160 return (0);
1161
1162 /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */
1163 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1164 CSR_WRITE_4(sc, BGE_MI_MODE,
1165 sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL);
1166 DELAY(80);
1167 }
1168
1169 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY |
1170 BGE_MIPHY(phy) | BGE_MIREG(reg) | val);
1171
1172 for (i = 0; i < BGE_TIMEOUT; i++) {
1173 DELAY(10);
1174 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
1175 DELAY(5);
1176 CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */
1177 break;
1178 }
1179 }
1180
1181 /* Restore the autopoll bit if necessary. */
1182 if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
1183 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
1184 DELAY(80);
1185 }
1186
1187 bge_ape_unlock(sc, sc->bge_phy_ape_lock);
1188
1189 if (i == BGE_TIMEOUT)
1190 device_printf(sc->bge_dev,
1191 "PHY write timed out (phy %d, reg %d, val 0x%04x)\n",
1192 phy, reg, val);
1193
1194 return (0);
1195 }
1196
1197 static void
bge_miibus_statchg(device_t dev)1198 bge_miibus_statchg(device_t dev)
1199 {
1200 struct bge_softc *sc;
1201 struct mii_data *mii;
1202 uint32_t mac_mode, rx_mode, tx_mode;
1203
1204 sc = device_get_softc(dev);
1205 if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0)
1206 return;
1207 mii = device_get_softc(sc->bge_miibus);
1208
1209 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1210 (IFM_ACTIVE | IFM_AVALID)) {
1211 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1212 case IFM_10_T:
1213 case IFM_100_TX:
1214 sc->bge_link = 1;
1215 break;
1216 case IFM_1000_T:
1217 case IFM_1000_SX:
1218 case IFM_2500_SX:
1219 if (sc->bge_asicrev != BGE_ASICREV_BCM5906)
1220 sc->bge_link = 1;
1221 else
1222 sc->bge_link = 0;
1223 break;
1224 default:
1225 sc->bge_link = 0;
1226 break;
1227 }
1228 } else
1229 sc->bge_link = 0;
1230 if (sc->bge_link == 0)
1231 return;
1232
1233 /*
1234 * APE firmware touches these registers to keep the MAC
1235 * connected to the outside world. Try to keep the
1236 * accesses atomic.
1237 */
1238
1239 /* Set the port mode (MII/GMII) to match the link speed. */
1240 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) &
1241 ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX);
1242 tx_mode = CSR_READ_4(sc, BGE_TX_MODE);
1243 rx_mode = CSR_READ_4(sc, BGE_RX_MODE);
1244
1245 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1246 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
1247 mac_mode |= BGE_PORTMODE_GMII;
1248 else
1249 mac_mode |= BGE_PORTMODE_MII;
1250
1251 /* Set MAC flow control behavior to match link flow control settings. */
1252 tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE;
1253 rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE;
1254 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1255 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1256 tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE;
1257 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1258 rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE;
1259 } else
1260 mac_mode |= BGE_MACMODE_HALF_DUPLEX;
1261
1262 CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode);
1263 DELAY(40);
1264 CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode);
1265 CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode);
1266 }
1267
1268 /*
1269 * Intialize a standard receive ring descriptor.
1270 */
1271 static int
bge_newbuf_std(struct bge_softc * sc,int i)1272 bge_newbuf_std(struct bge_softc *sc, int i)
1273 {
1274 struct mbuf *m;
1275 struct bge_rx_bd *r;
1276 bus_dma_segment_t segs[1];
1277 bus_dmamap_t map;
1278 int error, nsegs;
1279
1280 if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
1281 (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
1282 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
1283 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1284 if (m == NULL)
1285 return (ENOBUFS);
1286 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1287 } else {
1288 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1289 if (m == NULL)
1290 return (ENOBUFS);
1291 m->m_len = m->m_pkthdr.len = MCLBYTES;
1292 }
1293 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1294 m_adj(m, ETHER_ALIGN);
1295
1296 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
1297 sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0);
1298 if (error != 0) {
1299 m_freem(m);
1300 return (error);
1301 }
1302 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1303 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1304 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD);
1305 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1306 sc->bge_cdata.bge_rx_std_dmamap[i]);
1307 }
1308 map = sc->bge_cdata.bge_rx_std_dmamap[i];
1309 sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap;
1310 sc->bge_cdata.bge_rx_std_sparemap = map;
1311 sc->bge_cdata.bge_rx_std_chain[i] = m;
1312 sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len;
1313 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
1314 r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1315 r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1316 r->bge_flags = BGE_RXBDFLAG_END;
1317 r->bge_len = segs[0].ds_len;
1318 r->bge_idx = i;
1319
1320 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1321 sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD);
1322
1323 return (0);
1324 }
1325
1326 /*
1327 * Initialize a jumbo receive ring descriptor. This allocates
1328 * a jumbo buffer from the pool managed internally by the driver.
1329 */
1330 static int
bge_newbuf_jumbo(struct bge_softc * sc,int i)1331 bge_newbuf_jumbo(struct bge_softc *sc, int i)
1332 {
1333 bus_dma_segment_t segs[BGE_NSEG_JUMBO];
1334 bus_dmamap_t map;
1335 struct bge_extrx_bd *r;
1336 struct mbuf *m;
1337 int error, nsegs;
1338
1339 MGETHDR(m, M_NOWAIT, MT_DATA);
1340 if (m == NULL)
1341 return (ENOBUFS);
1342
1343 if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) {
1344 m_freem(m);
1345 return (ENOBUFS);
1346 }
1347 m->m_len = m->m_pkthdr.len = MJUM9BYTES;
1348 if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
1349 m_adj(m, ETHER_ALIGN);
1350
1351 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo,
1352 sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0);
1353 if (error != 0) {
1354 m_freem(m);
1355 return (error);
1356 }
1357
1358 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1359 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1360 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD);
1361 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1362 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1363 }
1364 map = sc->bge_cdata.bge_rx_jumbo_dmamap[i];
1365 sc->bge_cdata.bge_rx_jumbo_dmamap[i] =
1366 sc->bge_cdata.bge_rx_jumbo_sparemap;
1367 sc->bge_cdata.bge_rx_jumbo_sparemap = map;
1368 sc->bge_cdata.bge_rx_jumbo_chain[i] = m;
1369 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0;
1370 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0;
1371 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0;
1372 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0;
1373
1374 /*
1375 * Fill in the extended RX buffer descriptor.
1376 */
1377 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
1378 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
1379 r->bge_idx = i;
1380 r->bge_len3 = r->bge_len2 = r->bge_len1 = 0;
1381 switch (nsegs) {
1382 case 4:
1383 r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr);
1384 r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr);
1385 r->bge_len3 = segs[3].ds_len;
1386 sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len;
1387 case 3:
1388 r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr);
1389 r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr);
1390 r->bge_len2 = segs[2].ds_len;
1391 sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len;
1392 case 2:
1393 r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr);
1394 r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr);
1395 r->bge_len1 = segs[1].ds_len;
1396 sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len;
1397 case 1:
1398 r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
1399 r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
1400 r->bge_len0 = segs[0].ds_len;
1401 sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len;
1402 break;
1403 default:
1404 panic("%s: %d segments\n", __func__, nsegs);
1405 }
1406
1407 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1408 sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD);
1409
1410 return (0);
1411 }
1412
1413 static int
bge_init_rx_ring_std(struct bge_softc * sc)1414 bge_init_rx_ring_std(struct bge_softc *sc)
1415 {
1416 int error, i;
1417
1418 bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ);
1419 sc->bge_std = 0;
1420 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1421 if ((error = bge_newbuf_std(sc, i)) != 0)
1422 return (error);
1423 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1424 }
1425
1426 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
1427 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
1428
1429 sc->bge_std = 0;
1430 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1);
1431
1432 return (0);
1433 }
1434
1435 static void
bge_free_rx_ring_std(struct bge_softc * sc)1436 bge_free_rx_ring_std(struct bge_softc *sc)
1437 {
1438 int i;
1439
1440 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1441 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1442 bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
1443 sc->bge_cdata.bge_rx_std_dmamap[i],
1444 BUS_DMASYNC_POSTREAD);
1445 bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
1446 sc->bge_cdata.bge_rx_std_dmamap[i]);
1447 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1448 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1449 }
1450 bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i],
1451 sizeof(struct bge_rx_bd));
1452 }
1453 }
1454
1455 static int
bge_init_rx_ring_jumbo(struct bge_softc * sc)1456 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1457 {
1458 struct bge_rcb *rcb;
1459 int error, i;
1460
1461 bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ);
1462 sc->bge_jumbo = 0;
1463 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1464 if ((error = bge_newbuf_jumbo(sc, i)) != 0)
1465 return (error);
1466 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1467 }
1468
1469 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
1470 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
1471
1472 sc->bge_jumbo = 0;
1473
1474 /* Enable the jumbo receive producer ring. */
1475 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
1476 rcb->bge_maxlen_flags =
1477 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD);
1478 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1479
1480 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1);
1481
1482 return (0);
1483 }
1484
1485 static void
bge_free_rx_ring_jumbo(struct bge_softc * sc)1486 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1487 {
1488 int i;
1489
1490 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1491 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1492 bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
1493 sc->bge_cdata.bge_rx_jumbo_dmamap[i],
1494 BUS_DMASYNC_POSTREAD);
1495 bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo,
1496 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
1497 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1498 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1499 }
1500 bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i],
1501 sizeof(struct bge_extrx_bd));
1502 }
1503 }
1504
1505 static void
bge_free_tx_ring(struct bge_softc * sc)1506 bge_free_tx_ring(struct bge_softc *sc)
1507 {
1508 int i;
1509
1510 if (sc->bge_ldata.bge_tx_ring == NULL)
1511 return;
1512
1513 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1514 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1515 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
1516 sc->bge_cdata.bge_tx_dmamap[i],
1517 BUS_DMASYNC_POSTWRITE);
1518 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
1519 sc->bge_cdata.bge_tx_dmamap[i]);
1520 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1521 sc->bge_cdata.bge_tx_chain[i] = NULL;
1522 }
1523 bzero((char *)&sc->bge_ldata.bge_tx_ring[i],
1524 sizeof(struct bge_tx_bd));
1525 }
1526 }
1527
1528 static int
bge_init_tx_ring(struct bge_softc * sc)1529 bge_init_tx_ring(struct bge_softc *sc)
1530 {
1531 sc->bge_txcnt = 0;
1532 sc->bge_tx_saved_considx = 0;
1533
1534 bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ);
1535 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
1536 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1537
1538 /* Initialize transmit producer index for host-memory send ring. */
1539 sc->bge_tx_prodidx = 0;
1540 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1541
1542 /* 5700 b2 errata */
1543 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1544 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1545
1546 /* NIC-memory send ring not used; initialize to zero. */
1547 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1548 /* 5700 b2 errata */
1549 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1550 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1551
1552 return (0);
1553 }
1554
1555 static void
bge_setpromisc(struct bge_softc * sc)1556 bge_setpromisc(struct bge_softc *sc)
1557 {
1558 if_t ifp;
1559
1560 BGE_LOCK_ASSERT(sc);
1561
1562 ifp = sc->bge_ifp;
1563
1564 /* Enable or disable promiscuous mode as needed. */
1565 if (if_getflags(ifp) & IFF_PROMISC)
1566 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1567 else
1568 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
1569 }
1570
1571 static u_int
bge_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)1572 bge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1573 {
1574 uint32_t *hashes = arg;
1575 int h;
1576
1577 h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7F;
1578 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1579
1580 return (1);
1581 }
1582
1583 static void
bge_setmulti(struct bge_softc * sc)1584 bge_setmulti(struct bge_softc *sc)
1585 {
1586 if_t ifp;
1587 uint32_t hashes[4] = { 0, 0, 0, 0 };
1588 int i;
1589
1590 BGE_LOCK_ASSERT(sc);
1591
1592 ifp = sc->bge_ifp;
1593
1594 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
1595 for (i = 0; i < 4; i++)
1596 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1597 return;
1598 }
1599
1600 /* First, zot all the existing filters. */
1601 for (i = 0; i < 4; i++)
1602 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1603
1604 if_foreach_llmaddr(ifp, bge_hash_maddr, hashes);
1605
1606 for (i = 0; i < 4; i++)
1607 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1608 }
1609
1610 static void
bge_setvlan(struct bge_softc * sc)1611 bge_setvlan(struct bge_softc *sc)
1612 {
1613 if_t ifp;
1614
1615 BGE_LOCK_ASSERT(sc);
1616
1617 ifp = sc->bge_ifp;
1618
1619 /* Enable or disable VLAN tag stripping as needed. */
1620 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
1621 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1622 else
1623 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
1624 }
1625
1626 static void
bge_sig_pre_reset(struct bge_softc * sc,int type)1627 bge_sig_pre_reset(struct bge_softc *sc, int type)
1628 {
1629
1630 /*
1631 * Some chips don't like this so only do this if ASF is enabled
1632 */
1633 if (sc->bge_asf_mode)
1634 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
1635
1636 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1637 switch (type) {
1638 case BGE_RESET_START:
1639 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1640 BGE_FW_DRV_STATE_START);
1641 break;
1642 case BGE_RESET_SHUTDOWN:
1643 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1644 BGE_FW_DRV_STATE_UNLOAD);
1645 break;
1646 case BGE_RESET_SUSPEND:
1647 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1648 BGE_FW_DRV_STATE_SUSPEND);
1649 break;
1650 }
1651 }
1652
1653 if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND)
1654 bge_ape_driver_state_change(sc, type);
1655 }
1656
1657 static void
bge_sig_post_reset(struct bge_softc * sc,int type)1658 bge_sig_post_reset(struct bge_softc *sc, int type)
1659 {
1660
1661 if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) {
1662 switch (type) {
1663 case BGE_RESET_START:
1664 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1665 BGE_FW_DRV_STATE_START_DONE);
1666 /* START DONE */
1667 break;
1668 case BGE_RESET_SHUTDOWN:
1669 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1670 BGE_FW_DRV_STATE_UNLOAD_DONE);
1671 break;
1672 }
1673 }
1674 if (type == BGE_RESET_SHUTDOWN)
1675 bge_ape_driver_state_change(sc, type);
1676 }
1677
1678 static void
bge_sig_legacy(struct bge_softc * sc,int type)1679 bge_sig_legacy(struct bge_softc *sc, int type)
1680 {
1681
1682 if (sc->bge_asf_mode) {
1683 switch (type) {
1684 case BGE_RESET_START:
1685 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1686 BGE_FW_DRV_STATE_START);
1687 break;
1688 case BGE_RESET_SHUTDOWN:
1689 bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB,
1690 BGE_FW_DRV_STATE_UNLOAD);
1691 break;
1692 }
1693 }
1694 }
1695
1696 static void
bge_stop_fw(struct bge_softc * sc)1697 bge_stop_fw(struct bge_softc *sc)
1698 {
1699 int i;
1700
1701 if (sc->bge_asf_mode) {
1702 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE);
1703 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
1704 CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT);
1705
1706 for (i = 0; i < 100; i++ ) {
1707 if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) &
1708 BGE_RX_CPU_DRV_EVENT))
1709 break;
1710 DELAY(10);
1711 }
1712 }
1713 }
1714
1715 static uint32_t
bge_dma_swap_options(struct bge_softc * sc)1716 bge_dma_swap_options(struct bge_softc *sc)
1717 {
1718 uint32_t dma_options;
1719
1720 dma_options = BGE_MODECTL_WORDSWAP_NONFRAME |
1721 BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA;
1722 #if BYTE_ORDER == BIG_ENDIAN
1723 dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME;
1724 #endif
1725 return (dma_options);
1726 }
1727
1728 /*
1729 * Do endian, PCI and DMA initialization.
1730 */
1731 static int
bge_chipinit(struct bge_softc * sc)1732 bge_chipinit(struct bge_softc *sc)
1733 {
1734 uint32_t dma_rw_ctl, misc_ctl, mode_ctl;
1735 uint16_t val;
1736 int i;
1737
1738 /* Set endianness before we access any non-PCI registers. */
1739 misc_ctl = BGE_INIT;
1740 if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS)
1741 misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS;
1742 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4);
1743
1744 /*
1745 * Clear the MAC statistics block in the NIC's
1746 * internal memory.
1747 */
1748 for (i = BGE_STATS_BLOCK;
1749 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1750 BGE_MEMWIN_WRITE(sc, i, 0);
1751
1752 for (i = BGE_STATUS_BLOCK;
1753 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1754 BGE_MEMWIN_WRITE(sc, i, 0);
1755
1756 if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) {
1757 /*
1758 * Fix data corruption caused by non-qword write with WB.
1759 * Fix master abort in PCI mode.
1760 * Fix PCI latency timer.
1761 */
1762 val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2);
1763 val |= (1 << 10) | (1 << 12) | (1 << 13);
1764 pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2);
1765 }
1766
1767 if (sc->bge_asicrev == BGE_ASICREV_BCM57765 ||
1768 sc->bge_asicrev == BGE_ASICREV_BCM57766) {
1769 /*
1770 * For the 57766 and non Ax versions of 57765, bootcode
1771 * needs to setup the PCIE Fast Training Sequence (FTS)
1772 * value to prevent transmit hangs.
1773 */
1774 if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) {
1775 CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL,
1776 CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) |
1777 BGE_CPMU_PADRNG_CTL_RDIV2);
1778 }
1779 }
1780
1781 /*
1782 * Set up the PCI DMA control register.
1783 */
1784 dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) |
1785 BGE_PCIDMARWCTL_WR_CMD_SHIFT(7);
1786 if (sc->bge_flags & BGE_FLAG_PCIE) {
1787 if (sc->bge_mps >= 256)
1788 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1789 else
1790 dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1791 } else if (sc->bge_flags & BGE_FLAG_PCIX) {
1792 if (BGE_IS_5714_FAMILY(sc)) {
1793 /* 256 bytes for read and write. */
1794 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) |
1795 BGE_PCIDMARWCTL_WR_WAT_SHIFT(2);
1796 dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ?
1797 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL :
1798 BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL;
1799 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
1800 /*
1801 * In the BCM5703, the DMA read watermark should
1802 * be set to less than or equal to the maximum
1803 * memory read byte count of the PCI-X command
1804 * register.
1805 */
1806 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) |
1807 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1808 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1809 /* 1536 bytes for read, 384 bytes for write. */
1810 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1811 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3);
1812 } else {
1813 /* 384 bytes for read and write. */
1814 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) |
1815 BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) |
1816 0x0F;
1817 }
1818 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1819 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1820 uint32_t tmp;
1821
1822 /* Set ONE_DMA_AT_ONCE for hardware workaround. */
1823 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
1824 if (tmp == 6 || tmp == 7)
1825 dma_rw_ctl |=
1826 BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL;
1827
1828 /* Set PCI-X DMA write workaround. */
1829 dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE;
1830 }
1831 } else {
1832 /* Conventional PCI bus: 256 bytes for read and write. */
1833 dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) |
1834 BGE_PCIDMARWCTL_WR_WAT_SHIFT(7);
1835
1836 if (sc->bge_asicrev != BGE_ASICREV_BCM5705 &&
1837 sc->bge_asicrev != BGE_ASICREV_BCM5750)
1838 dma_rw_ctl |= 0x0F;
1839 }
1840 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
1841 sc->bge_asicrev == BGE_ASICREV_BCM5701)
1842 dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM |
1843 BGE_PCIDMARWCTL_ASRT_ALL_BE;
1844 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1845 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1846 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1847 if (BGE_IS_5717_PLUS(sc)) {
1848 dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT;
1849 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
1850 dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK;
1851 /*
1852 * Enable HW workaround for controllers that misinterpret
1853 * a status tag update and leave interrupts permanently
1854 * disabled.
1855 */
1856 if (!BGE_IS_57765_PLUS(sc) &&
1857 sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
1858 sc->bge_asicrev != BGE_ASICREV_BCM5762)
1859 dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA;
1860 }
1861 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1862
1863 /*
1864 * Set up general mode register.
1865 */
1866 mode_ctl = bge_dma_swap_options(sc);
1867 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
1868 sc->bge_asicrev == BGE_ASICREV_BCM5762) {
1869 /* Retain Host-2-BMC settings written by APE firmware. */
1870 mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) &
1871 (BGE_MODECTL_BYTESWAP_B2HRX_DATA |
1872 BGE_MODECTL_WORDSWAP_B2HRX_DATA |
1873 BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE);
1874 }
1875 mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS |
1876 BGE_MODECTL_TX_NO_PHDR_CSUM;
1877
1878 /*
1879 * BCM5701 B5 have a bug causing data corruption when using
1880 * 64-bit DMA reads, which can be terminated early and then
1881 * completed later as 32-bit accesses, in combination with
1882 * certain bridges.
1883 */
1884 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
1885 sc->bge_chipid == BGE_CHIPID_BCM5701_B5)
1886 mode_ctl |= BGE_MODECTL_FORCE_PCI32;
1887
1888 /*
1889 * Tell the firmware the driver is running
1890 */
1891 if (sc->bge_asf_mode & ASF_STACKUP)
1892 mode_ctl |= BGE_MODECTL_STACKUP;
1893
1894 CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl);
1895
1896 /*
1897 * Disable memory write invalidate. Apparently it is not supported
1898 * properly by these devices.
1899 */
1900 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1901
1902 /* Set the timer prescaler (always 66 MHz). */
1903 CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ);
1904
1905 /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */
1906 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1907 DELAY(40); /* XXX */
1908
1909 /* Put PHY into ready state */
1910 BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ);
1911 CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */
1912 DELAY(40);
1913 }
1914
1915 return (0);
1916 }
1917
1918 static int
bge_blockinit(struct bge_softc * sc)1919 bge_blockinit(struct bge_softc *sc)
1920 {
1921 struct bge_rcb *rcb;
1922 bus_size_t vrcb;
1923 caddr_t lladdr;
1924 bge_hostaddr taddr;
1925 uint32_t dmactl, rdmareg, val;
1926 int i, limit;
1927
1928 /*
1929 * Initialize the memory window pointer register so that
1930 * we can access the first 32K of internal NIC RAM. This will
1931 * allow us to set up the TX send ring RCBs and the RX return
1932 * ring RCBs, plus other things which live in NIC memory.
1933 */
1934 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1935
1936 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1937
1938 if (!(BGE_IS_5705_PLUS(sc))) {
1939 /* Configure mbuf memory pool */
1940 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1941 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1942 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1943 else
1944 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1945
1946 /* Configure DMA resource pool */
1947 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1948 BGE_DMA_DESCRIPTORS);
1949 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1950 }
1951
1952 /* Configure mbuf pool watermarks */
1953 if (BGE_IS_5717_PLUS(sc)) {
1954 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1955 if (if_getmtu(sc->bge_ifp) > ETHERMTU) {
1956 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
1957 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
1958 } else {
1959 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a);
1960 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0);
1961 }
1962 } else if (!BGE_IS_5705_PLUS(sc)) {
1963 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1964 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1965 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1966 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
1967 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1968 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1969 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1970 } else {
1971 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1972 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1973 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1974 }
1975
1976 /* Configure DMA resource watermarks */
1977 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1978 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1979
1980 /* Enable buffer manager */
1981 val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN;
1982 /*
1983 * Change the arbitration algorithm of TXMBUF read request to
1984 * round-robin instead of priority based for BCM5719. When
1985 * TXFIFO is almost empty, RDMA will hold its request until
1986 * TXFIFO is not almost empty.
1987 */
1988 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
1989 val |= BGE_BMANMODE_NO_TX_UNDERRUN;
1990 CSR_WRITE_4(sc, BGE_BMAN_MODE, val);
1991
1992 /* Poll for buffer manager start indication */
1993 for (i = 0; i < BGE_TIMEOUT; i++) {
1994 DELAY(10);
1995 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1996 break;
1997 }
1998
1999 if (i == BGE_TIMEOUT) {
2000 device_printf(sc->bge_dev, "buffer manager failed to start\n");
2001 return (ENXIO);
2002 }
2003
2004 /* Enable flow-through queues */
2005 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2006 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2007
2008 /* Wait until queue initialization is complete */
2009 for (i = 0; i < BGE_TIMEOUT; i++) {
2010 DELAY(10);
2011 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
2012 break;
2013 }
2014
2015 if (i == BGE_TIMEOUT) {
2016 device_printf(sc->bge_dev, "flow-through queue init failed\n");
2017 return (ENXIO);
2018 }
2019
2020 /*
2021 * Summary of rings supported by the controller:
2022 *
2023 * Standard Receive Producer Ring
2024 * - This ring is used to feed receive buffers for "standard"
2025 * sized frames (typically 1536 bytes) to the controller.
2026 *
2027 * Jumbo Receive Producer Ring
2028 * - This ring is used to feed receive buffers for jumbo sized
2029 * frames (i.e. anything bigger than the "standard" frames)
2030 * to the controller.
2031 *
2032 * Mini Receive Producer Ring
2033 * - This ring is used to feed receive buffers for "mini"
2034 * sized frames to the controller.
2035 * - This feature required external memory for the controller
2036 * but was never used in a production system. Should always
2037 * be disabled.
2038 *
2039 * Receive Return Ring
2040 * - After the controller has placed an incoming frame into a
2041 * receive buffer that buffer is moved into a receive return
2042 * ring. The driver is then responsible to passing the
2043 * buffer up to the stack. Many versions of the controller
2044 * support multiple RR rings.
2045 *
2046 * Send Ring
2047 * - This ring is used for outgoing frames. Many versions of
2048 * the controller support multiple send rings.
2049 */
2050
2051 /* Initialize the standard receive producer ring control block. */
2052 rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb;
2053 rcb->bge_hostaddr.bge_addr_lo =
2054 BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr);
2055 rcb->bge_hostaddr.bge_addr_hi =
2056 BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr);
2057 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
2058 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD);
2059 if (BGE_IS_5717_PLUS(sc)) {
2060 /*
2061 * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32)
2062 * Bits 15-2 : Maximum RX frame size
2063 * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled
2064 * Bit 0 : Reserved
2065 */
2066 rcb->bge_maxlen_flags =
2067 BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2);
2068 } else if (BGE_IS_5705_PLUS(sc)) {
2069 /*
2070 * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32)
2071 * Bits 15-2 : Reserved (should be 0)
2072 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2073 * Bit 0 : Reserved
2074 */
2075 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
2076 } else {
2077 /*
2078 * Ring size is always XXX entries
2079 * Bits 31-16: Maximum RX frame size
2080 * Bits 15-2 : Reserved (should be 0)
2081 * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled
2082 * Bit 0 : Reserved
2083 */
2084 rcb->bge_maxlen_flags =
2085 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
2086 }
2087 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2088 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2089 sc->bge_asicrev == BGE_ASICREV_BCM5720)
2090 rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717;
2091 else
2092 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
2093 /* Write the standard receive producer ring control block. */
2094 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
2095 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
2096 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
2097 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
2098
2099 /* Reset the standard receive producer ring producer index. */
2100 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
2101
2102 /*
2103 * Initialize the jumbo RX producer ring control
2104 * block. We set the 'ring disabled' bit in the
2105 * flags field until we're actually ready to start
2106 * using this ring (i.e. once we set the MTU
2107 * high enough to require it).
2108 */
2109 if (BGE_IS_JUMBO_CAPABLE(sc)) {
2110 rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb;
2111 /* Get the jumbo receive producer ring RCB parameters. */
2112 rcb->bge_hostaddr.bge_addr_lo =
2113 BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
2114 rcb->bge_hostaddr.bge_addr_hi =
2115 BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr);
2116 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2117 sc->bge_cdata.bge_rx_jumbo_ring_map,
2118 BUS_DMASYNC_PREREAD);
2119 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
2120 BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED);
2121 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2122 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2123 sc->bge_asicrev == BGE_ASICREV_BCM5720)
2124 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717;
2125 else
2126 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
2127 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
2128 rcb->bge_hostaddr.bge_addr_hi);
2129 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
2130 rcb->bge_hostaddr.bge_addr_lo);
2131 /* Program the jumbo receive producer ring RCB parameters. */
2132 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
2133 rcb->bge_maxlen_flags);
2134 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
2135 /* Reset the jumbo receive producer ring producer index. */
2136 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
2137 }
2138
2139 /* Disable the mini receive producer ring RCB. */
2140 if (BGE_IS_5700_FAMILY(sc)) {
2141 rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb;
2142 rcb->bge_maxlen_flags =
2143 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
2144 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
2145 rcb->bge_maxlen_flags);
2146 /* Reset the mini receive producer ring producer index. */
2147 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
2148 }
2149
2150 /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */
2151 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
2152 if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 ||
2153 sc->bge_chipid == BGE_CHIPID_BCM5906_A1 ||
2154 sc->bge_chipid == BGE_CHIPID_BCM5906_A2)
2155 CSR_WRITE_4(sc, BGE_ISO_PKT_TX,
2156 (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2);
2157 }
2158 /*
2159 * The BD ring replenish thresholds control how often the
2160 * hardware fetches new BD's from the producer rings in host
2161 * memory. Setting the value too low on a busy system can
2162 * starve the hardware and reduce the throughput.
2163 *
2164 * Set the BD ring replentish thresholds. The recommended
2165 * values are 1/8th the number of descriptors allocated to
2166 * each ring.
2167 * XXX The 5754 requires a lower threshold, so it might be a
2168 * requirement of all 575x family chips. The Linux driver sets
2169 * the lower threshold for all 5705 family chips as well, but there
2170 * are reports that it might not need to be so strict.
2171 *
2172 * XXX Linux does some extra fiddling here for the 5906 parts as
2173 * well.
2174 */
2175 if (BGE_IS_5705_PLUS(sc))
2176 val = 8;
2177 else
2178 val = BGE_STD_RX_RING_CNT / 8;
2179 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val);
2180 if (BGE_IS_JUMBO_CAPABLE(sc))
2181 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH,
2182 BGE_JUMBO_RX_RING_CNT/8);
2183 if (BGE_IS_5717_PLUS(sc)) {
2184 CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32);
2185 CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16);
2186 }
2187
2188 /*
2189 * Disable all send rings by setting the 'ring disabled' bit
2190 * in the flags field of all the TX send ring control blocks,
2191 * located in NIC memory.
2192 */
2193 if (!BGE_IS_5705_PLUS(sc))
2194 /* 5700 to 5704 had 16 send rings. */
2195 limit = BGE_TX_RINGS_EXTSSRAM_MAX;
2196 else if (BGE_IS_57765_PLUS(sc) ||
2197 sc->bge_asicrev == BGE_ASICREV_BCM5762)
2198 limit = 2;
2199 else if (BGE_IS_5717_PLUS(sc))
2200 limit = 4;
2201 else
2202 limit = 1;
2203 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2204 for (i = 0; i < limit; i++) {
2205 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2206 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
2207 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2208 vrcb += sizeof(struct bge_rcb);
2209 }
2210
2211 /* Configure send ring RCB 0 (we use only the first ring) */
2212 vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
2213 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr);
2214 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2215 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2216 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2217 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2218 sc->bge_asicrev == BGE_ASICREV_BCM5720)
2219 RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717);
2220 else
2221 RCB_WRITE_4(sc, vrcb, bge_nicaddr,
2222 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
2223 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2224 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
2225
2226 /*
2227 * Disable all receive return rings by setting the
2228 * 'ring diabled' bit in the flags field of all the receive
2229 * return ring control blocks, located in NIC memory.
2230 */
2231 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
2232 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
2233 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2234 /* Should be 17, use 16 until we get an SRAM map. */
2235 limit = 16;
2236 } else if (!BGE_IS_5705_PLUS(sc))
2237 limit = BGE_RX_RINGS_MAX;
2238 else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
2239 sc->bge_asicrev == BGE_ASICREV_BCM5762 ||
2240 BGE_IS_57765_PLUS(sc))
2241 limit = 4;
2242 else
2243 limit = 1;
2244 /* Disable all receive return rings. */
2245 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2246 for (i = 0; i < limit; i++) {
2247 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0);
2248 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0);
2249 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2250 BGE_RCB_FLAG_RING_DISABLED);
2251 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2252 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
2253 (i * (sizeof(uint64_t))), 0);
2254 vrcb += sizeof(struct bge_rcb);
2255 }
2256
2257 /*
2258 * Set up receive return ring 0. Note that the NIC address
2259 * for RX return rings is 0x0. The return rings live entirely
2260 * within the host, so the nicaddr field in the RCB isn't used.
2261 */
2262 vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
2263 BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr);
2264 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
2265 RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
2266 RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0);
2267 RCB_WRITE_4(sc, vrcb, bge_maxlen_flags,
2268 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
2269
2270 lladdr = if_getlladdr(sc->bge_ifp);
2271 /* Set random backoff seed for TX */
2272 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
2273 (lladdr[0] + lladdr[1] +
2274 lladdr[2] + lladdr[3] +
2275 lladdr[4] + lladdr[5]) &
2276 BGE_TX_BACKOFF_SEED_MASK);
2277
2278 /* Set inter-packet gap */
2279 val = 0x2620;
2280 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
2281 sc->bge_asicrev == BGE_ASICREV_BCM5762)
2282 val |= CSR_READ_4(sc, BGE_TX_LENGTHS) &
2283 (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK);
2284 CSR_WRITE_4(sc, BGE_TX_LENGTHS, val);
2285
2286 /*
2287 * Specify which ring to use for packets that don't match
2288 * any RX rules.
2289 */
2290 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
2291
2292 /*
2293 * Configure number of RX lists. One interrupt distribution
2294 * list, sixteen active lists, one bad frames class.
2295 */
2296 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
2297
2298 /* Initialize RX list placement stats mask. */
2299 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
2300 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
2301
2302 /* Disable host coalescing until we get it set up */
2303 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
2304
2305 /* Poll to make sure it's shut down. */
2306 for (i = 0; i < BGE_TIMEOUT; i++) {
2307 DELAY(10);
2308 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
2309 break;
2310 }
2311
2312 if (i == BGE_TIMEOUT) {
2313 device_printf(sc->bge_dev,
2314 "host coalescing engine failed to idle\n");
2315 return (ENXIO);
2316 }
2317
2318 /* Set up host coalescing defaults */
2319 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
2320 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
2321 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
2322 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
2323 if (!(BGE_IS_5705_PLUS(sc))) {
2324 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
2325 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
2326 }
2327 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1);
2328 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1);
2329
2330 /* Set up address of statistics block */
2331 if (!(BGE_IS_5705_PLUS(sc))) {
2332 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI,
2333 BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr));
2334 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
2335 BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr));
2336 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
2337 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
2338 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
2339 }
2340
2341 /* Set up address of status block */
2342 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI,
2343 BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr));
2344 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
2345 BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr));
2346
2347 /* Set up status block size. */
2348 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2349 sc->bge_chipid != BGE_CHIPID_BCM5700_C0) {
2350 val = BGE_STATBLKSZ_FULL;
2351 bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ);
2352 } else {
2353 val = BGE_STATBLKSZ_32BYTE;
2354 bzero(sc->bge_ldata.bge_status_block, 32);
2355 }
2356 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
2357 sc->bge_cdata.bge_status_map,
2358 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2359
2360 /* Turn on host coalescing state machine */
2361 CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE);
2362
2363 /* Turn on RX BD completion state machine and enable attentions */
2364 CSR_WRITE_4(sc, BGE_RBDC_MODE,
2365 BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN);
2366
2367 /* Turn on RX list placement state machine */
2368 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2369
2370 /* Turn on RX list selector state machine. */
2371 if (!(BGE_IS_5705_PLUS(sc)))
2372 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2373
2374 /* Turn on DMA, clear stats. */
2375 val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB |
2376 BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR |
2377 BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB |
2378 BGE_MACMODE_FRMHDR_DMA_ENB;
2379
2380 if (sc->bge_flags & BGE_FLAG_TBI)
2381 val |= BGE_PORTMODE_TBI;
2382 else if (sc->bge_flags & BGE_FLAG_MII_SERDES)
2383 val |= BGE_PORTMODE_GMII;
2384 else
2385 val |= BGE_PORTMODE_MII;
2386
2387 /* Allow APE to send/receive frames. */
2388 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
2389 val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
2390
2391 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
2392 DELAY(40);
2393
2394 /* Set misc. local control, enable interrupts on attentions */
2395 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
2396
2397 #ifdef notdef
2398 /* Assert GPIO pins for PHY reset */
2399 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 |
2400 BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2);
2401 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 |
2402 BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2);
2403 #endif
2404
2405 /* Turn on DMA completion state machine */
2406 if (!(BGE_IS_5705_PLUS(sc)))
2407 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2408
2409 val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS;
2410
2411 /* Enable host coalescing bug fix. */
2412 if (BGE_IS_5755_PLUS(sc))
2413 val |= BGE_WDMAMODE_STATUS_TAG_FIX;
2414
2415 /* Request larger DMA burst size to get better performance. */
2416 if (sc->bge_asicrev == BGE_ASICREV_BCM5785)
2417 val |= BGE_WDMAMODE_BURST_ALL_DATA;
2418
2419 /* Turn on write DMA state machine */
2420 CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
2421 DELAY(40);
2422
2423 /* Turn on read DMA state machine */
2424 val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
2425
2426 if (sc->bge_asicrev == BGE_ASICREV_BCM5717)
2427 val |= BGE_RDMAMODE_MULT_DMA_RD_DIS;
2428
2429 if (sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2430 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2431 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2432 val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN |
2433 BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN |
2434 BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN;
2435 if (sc->bge_flags & BGE_FLAG_PCIE)
2436 val |= BGE_RDMAMODE_FIFO_LONG_BURST;
2437 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
2438 val |= BGE_RDMAMODE_TSO4_ENABLE;
2439 if (sc->bge_flags & BGE_FLAG_TSO3 ||
2440 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2441 sc->bge_asicrev == BGE_ASICREV_BCM57780)
2442 val |= BGE_RDMAMODE_TSO6_ENABLE;
2443 }
2444
2445 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
2446 sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2447 val |= CSR_READ_4(sc, BGE_RDMA_MODE) &
2448 BGE_RDMAMODE_H2BNC_VLAN_DET;
2449 /*
2450 * Allow multiple outstanding read requests from
2451 * non-LSO read DMA engine.
2452 */
2453 val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS;
2454 }
2455
2456 if (sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
2457 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
2458 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
2459 sc->bge_asicrev == BGE_ASICREV_BCM57780 ||
2460 BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) {
2461 if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
2462 rdmareg = BGE_RDMA_RSRVCTRL_REG2;
2463 else
2464 rdmareg = BGE_RDMA_RSRVCTRL;
2465 dmactl = CSR_READ_4(sc, rdmareg);
2466 /*
2467 * Adjust tx margin to prevent TX data corruption and
2468 * fix internal FIFO overflow.
2469 */
2470 if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 ||
2471 sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2472 dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2473 BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK |
2474 BGE_RDMA_RSRVCTRL_TXMRGN_MASK);
2475 dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2476 BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K |
2477 BGE_RDMA_RSRVCTRL_TXMRGN_320B;
2478 }
2479 /*
2480 * Enable fix for read DMA FIFO overruns.
2481 * The fix is to limit the number of RX BDs
2482 * the hardware would fetch at a fime.
2483 */
2484 CSR_WRITE_4(sc, rdmareg, dmactl |
2485 BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2486 }
2487
2488 if (sc->bge_asicrev == BGE_ASICREV_BCM5719) {
2489 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2490 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2491 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2492 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2493 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) {
2494 /*
2495 * Allow 4KB burst length reads for non-LSO frames.
2496 * Enable 512B burst length reads for buffer descriptors.
2497 */
2498 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL,
2499 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) |
2500 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 |
2501 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2502 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) {
2503 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2,
2504 CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) |
2505 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K |
2506 BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K);
2507 }
2508
2509 CSR_WRITE_4(sc, BGE_RDMA_MODE, val);
2510 DELAY(40);
2511
2512 if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
2513 for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) {
2514 val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4);
2515 if ((val & 0xFFFF) > BGE_FRAMELEN)
2516 break;
2517 if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN)
2518 break;
2519 }
2520 if (i != BGE_NUM_RDMA_CHANNELS / 2) {
2521 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
2522 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
2523 val |= BGE_RDMA_TX_LENGTH_WA_5719;
2524 else
2525 val |= BGE_RDMA_TX_LENGTH_WA_5720;
2526 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
2527 }
2528 }
2529
2530 /* Turn on RX data completion state machine */
2531 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2532
2533 /* Turn on RX BD initiator state machine */
2534 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2535
2536 /* Turn on RX data and RX BD initiator state machine */
2537 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
2538
2539 /* Turn on Mbuf cluster free state machine */
2540 if (!(BGE_IS_5705_PLUS(sc)))
2541 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2542
2543 /* Turn on send BD completion state machine */
2544 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2545
2546 /* Turn on send data completion state machine */
2547 val = BGE_SDCMODE_ENABLE;
2548 if (sc->bge_asicrev == BGE_ASICREV_BCM5761)
2549 val |= BGE_SDCMODE_CDELAY;
2550 CSR_WRITE_4(sc, BGE_SDC_MODE, val);
2551
2552 /* Turn on send data initiator state machine */
2553 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3))
2554 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE |
2555 BGE_SDIMODE_HW_LSO_PRE_DMA);
2556 else
2557 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2558
2559 /* Turn on send BD initiator state machine */
2560 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2561
2562 /* Turn on send BD selector state machine */
2563 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2564
2565 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
2566 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
2567 BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER);
2568
2569 /* ack/clear link change events */
2570 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2571 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2572 BGE_MACSTAT_LINK_CHANGED);
2573 CSR_WRITE_4(sc, BGE_MI_STS, 0);
2574
2575 /*
2576 * Enable attention when the link has changed state for
2577 * devices that use auto polling.
2578 */
2579 if (sc->bge_flags & BGE_FLAG_TBI) {
2580 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
2581 } else {
2582 if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) {
2583 CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode);
2584 DELAY(80);
2585 }
2586 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2587 sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
2588 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2589 BGE_EVTENB_MI_INTERRUPT);
2590 }
2591
2592 /*
2593 * Clear any pending link state attention.
2594 * Otherwise some link state change events may be lost until attention
2595 * is cleared by bge_intr() -> bge_link_upd() sequence.
2596 * It's not necessary on newer BCM chips - perhaps enabling link
2597 * state change attentions implies clearing pending attention.
2598 */
2599 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
2600 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
2601 BGE_MACSTAT_LINK_CHANGED);
2602
2603 /* Enable link state change attentions. */
2604 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
2605
2606 return (0);
2607 }
2608
2609 static const struct bge_revision *
bge_lookup_rev(uint32_t chipid)2610 bge_lookup_rev(uint32_t chipid)
2611 {
2612 const struct bge_revision *br;
2613
2614 for (br = bge_revisions; br->br_name != NULL; br++) {
2615 if (br->br_chipid == chipid)
2616 return (br);
2617 }
2618
2619 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2620 if (br->br_chipid == BGE_ASICREV(chipid))
2621 return (br);
2622 }
2623
2624 return (NULL);
2625 }
2626
2627 static const struct bge_vendor *
bge_lookup_vendor(uint16_t vid)2628 bge_lookup_vendor(uint16_t vid)
2629 {
2630 const struct bge_vendor *v;
2631
2632 for (v = bge_vendors; v->v_name != NULL; v++)
2633 if (v->v_id == vid)
2634 return (v);
2635
2636 return (NULL);
2637 }
2638
2639 static uint32_t
bge_chipid(device_t dev)2640 bge_chipid(device_t dev)
2641 {
2642 uint32_t id;
2643
2644 id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >>
2645 BGE_PCIMISCCTL_ASICREV_SHIFT;
2646 if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) {
2647 /*
2648 * Find the ASCI revision. Different chips use different
2649 * registers.
2650 */
2651 switch (pci_get_device(dev)) {
2652 case BCOM_DEVICEID_BCM5717C:
2653 /* 5717 C0 seems to belong to 5720 line. */
2654 id = BGE_CHIPID_BCM5720_A0;
2655 break;
2656 case BCOM_DEVICEID_BCM5717:
2657 case BCOM_DEVICEID_BCM5718:
2658 case BCOM_DEVICEID_BCM5719:
2659 case BCOM_DEVICEID_BCM5720:
2660 case BCOM_DEVICEID_BCM5725:
2661 case BCOM_DEVICEID_BCM5727:
2662 case BCOM_DEVICEID_BCM5762:
2663 case BCOM_DEVICEID_BCM57764:
2664 case BCOM_DEVICEID_BCM57767:
2665 case BCOM_DEVICEID_BCM57787:
2666 id = pci_read_config(dev,
2667 BGE_PCI_GEN2_PRODID_ASICREV, 4);
2668 break;
2669 case BCOM_DEVICEID_BCM57761:
2670 case BCOM_DEVICEID_BCM57762:
2671 case BCOM_DEVICEID_BCM57765:
2672 case BCOM_DEVICEID_BCM57766:
2673 case BCOM_DEVICEID_BCM57781:
2674 case BCOM_DEVICEID_BCM57782:
2675 case BCOM_DEVICEID_BCM57785:
2676 case BCOM_DEVICEID_BCM57786:
2677 case BCOM_DEVICEID_BCM57791:
2678 case BCOM_DEVICEID_BCM57795:
2679 id = pci_read_config(dev,
2680 BGE_PCI_GEN15_PRODID_ASICREV, 4);
2681 break;
2682 default:
2683 id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4);
2684 }
2685 }
2686 return (id);
2687 }
2688
2689 /*
2690 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2691 * against our list and return its name if we find a match.
2692 *
2693 * Note that since the Broadcom controller contains VPD support, we
2694 * try to get the device name string from the controller itself instead
2695 * of the compiled-in string. It guarantees we'll always announce the
2696 * right product name. We fall back to the compiled-in string when
2697 * VPD is unavailable or corrupt.
2698 */
2699 static int
bge_probe(device_t dev)2700 bge_probe(device_t dev)
2701 {
2702 char model[64];
2703 const struct bge_revision *br;
2704 const char *pname;
2705 struct bge_softc *sc;
2706 const struct bge_type *t = bge_devs;
2707 const struct bge_vendor *v;
2708 uint32_t id;
2709 uint16_t did, vid;
2710
2711 sc = device_get_softc(dev);
2712 sc->bge_dev = dev;
2713 vid = pci_get_vendor(dev);
2714 did = pci_get_device(dev);
2715 while(t->bge_vid != 0) {
2716 if ((vid == t->bge_vid) && (did == t->bge_did)) {
2717 id = bge_chipid(dev);
2718 br = bge_lookup_rev(id);
2719 if (bge_has_eaddr(sc) &&
2720 pci_get_vpd_ident(dev, &pname) == 0)
2721 snprintf(model, sizeof(model), "%s", pname);
2722 else {
2723 v = bge_lookup_vendor(vid);
2724 snprintf(model, sizeof(model), "%s %s",
2725 v != NULL ? v->v_name : "Unknown",
2726 br != NULL ? br->br_name :
2727 "NetXtreme/NetLink Ethernet Controller");
2728 }
2729 device_set_descf(dev, "%s, %sASIC rev. %#08x",
2730 model, br != NULL ? "" : "unknown ", id);
2731 return (BUS_PROBE_DEFAULT);
2732 }
2733 t++;
2734 }
2735
2736 return (ENXIO);
2737 }
2738
2739 static void
bge_dma_free(struct bge_softc * sc)2740 bge_dma_free(struct bge_softc *sc)
2741 {
2742 int i;
2743
2744 /* Destroy DMA maps for RX buffers. */
2745 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
2746 if (sc->bge_cdata.bge_rx_std_dmamap[i])
2747 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2748 sc->bge_cdata.bge_rx_std_dmamap[i]);
2749 }
2750 if (sc->bge_cdata.bge_rx_std_sparemap)
2751 bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
2752 sc->bge_cdata.bge_rx_std_sparemap);
2753
2754 /* Destroy DMA maps for jumbo RX buffers. */
2755 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
2756 if (sc->bge_cdata.bge_rx_jumbo_dmamap[i])
2757 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2758 sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
2759 }
2760 if (sc->bge_cdata.bge_rx_jumbo_sparemap)
2761 bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo,
2762 sc->bge_cdata.bge_rx_jumbo_sparemap);
2763
2764 /* Destroy DMA maps for TX buffers. */
2765 for (i = 0; i < BGE_TX_RING_CNT; i++) {
2766 if (sc->bge_cdata.bge_tx_dmamap[i])
2767 bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
2768 sc->bge_cdata.bge_tx_dmamap[i]);
2769 }
2770
2771 if (sc->bge_cdata.bge_rx_mtag)
2772 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
2773 if (sc->bge_cdata.bge_mtag_jumbo)
2774 bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo);
2775 if (sc->bge_cdata.bge_tx_mtag)
2776 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
2777
2778 /* Destroy standard RX ring. */
2779 if (sc->bge_ldata.bge_rx_std_ring_paddr)
2780 bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag,
2781 sc->bge_cdata.bge_rx_std_ring_map);
2782 if (sc->bge_ldata.bge_rx_std_ring)
2783 bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag,
2784 sc->bge_ldata.bge_rx_std_ring,
2785 sc->bge_cdata.bge_rx_std_ring_map);
2786
2787 if (sc->bge_cdata.bge_rx_std_ring_tag)
2788 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag);
2789
2790 /* Destroy jumbo RX ring. */
2791 if (sc->bge_ldata.bge_rx_jumbo_ring_paddr)
2792 bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2793 sc->bge_cdata.bge_rx_jumbo_ring_map);
2794
2795 if (sc->bge_ldata.bge_rx_jumbo_ring)
2796 bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag,
2797 sc->bge_ldata.bge_rx_jumbo_ring,
2798 sc->bge_cdata.bge_rx_jumbo_ring_map);
2799
2800 if (sc->bge_cdata.bge_rx_jumbo_ring_tag)
2801 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag);
2802
2803 /* Destroy RX return ring. */
2804 if (sc->bge_ldata.bge_rx_return_ring_paddr)
2805 bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag,
2806 sc->bge_cdata.bge_rx_return_ring_map);
2807
2808 if (sc->bge_ldata.bge_rx_return_ring)
2809 bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag,
2810 sc->bge_ldata.bge_rx_return_ring,
2811 sc->bge_cdata.bge_rx_return_ring_map);
2812
2813 if (sc->bge_cdata.bge_rx_return_ring_tag)
2814 bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag);
2815
2816 /* Destroy TX ring. */
2817 if (sc->bge_ldata.bge_tx_ring_paddr)
2818 bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag,
2819 sc->bge_cdata.bge_tx_ring_map);
2820
2821 if (sc->bge_ldata.bge_tx_ring)
2822 bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag,
2823 sc->bge_ldata.bge_tx_ring,
2824 sc->bge_cdata.bge_tx_ring_map);
2825
2826 if (sc->bge_cdata.bge_tx_ring_tag)
2827 bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag);
2828
2829 /* Destroy status block. */
2830 if (sc->bge_ldata.bge_status_block_paddr)
2831 bus_dmamap_unload(sc->bge_cdata.bge_status_tag,
2832 sc->bge_cdata.bge_status_map);
2833
2834 if (sc->bge_ldata.bge_status_block)
2835 bus_dmamem_free(sc->bge_cdata.bge_status_tag,
2836 sc->bge_ldata.bge_status_block,
2837 sc->bge_cdata.bge_status_map);
2838
2839 if (sc->bge_cdata.bge_status_tag)
2840 bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag);
2841
2842 /* Destroy statistics block. */
2843 if (sc->bge_ldata.bge_stats_paddr)
2844 bus_dmamap_unload(sc->bge_cdata.bge_stats_tag,
2845 sc->bge_cdata.bge_stats_map);
2846
2847 if (sc->bge_ldata.bge_stats)
2848 bus_dmamem_free(sc->bge_cdata.bge_stats_tag,
2849 sc->bge_ldata.bge_stats,
2850 sc->bge_cdata.bge_stats_map);
2851
2852 if (sc->bge_cdata.bge_stats_tag)
2853 bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag);
2854
2855 if (sc->bge_cdata.bge_buffer_tag)
2856 bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag);
2857
2858 /* Destroy the parent tag. */
2859 if (sc->bge_cdata.bge_parent_tag)
2860 bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag);
2861 }
2862
2863 static int
bge_dma_ring_alloc(struct bge_softc * sc,bus_size_t alignment,bus_size_t maxsize,bus_dma_tag_t * tag,uint8_t ** ring,bus_dmamap_t * map,bus_addr_t * paddr,const char * msg)2864 bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment,
2865 bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map,
2866 bus_addr_t *paddr, const char *msg)
2867 {
2868 struct bge_dmamap_arg ctx;
2869 bus_addr_t lowaddr;
2870 bus_size_t ring_end;
2871 int error;
2872
2873 lowaddr = BUS_SPACE_MAXADDR;
2874 again:
2875 error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag,
2876 alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2877 NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag);
2878 if (error != 0) {
2879 device_printf(sc->bge_dev,
2880 "could not create %s dma tag\n", msg);
2881 return (ENOMEM);
2882 }
2883 /* Allocate DMA'able memory for ring. */
2884 error = bus_dmamem_alloc(*tag, (void **)ring,
2885 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map);
2886 if (error != 0) {
2887 device_printf(sc->bge_dev,
2888 "could not allocate DMA'able memory for %s\n", msg);
2889 return (ENOMEM);
2890 }
2891 /* Load the address of the ring. */
2892 ctx.bge_busaddr = 0;
2893 error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr,
2894 &ctx, BUS_DMA_NOWAIT);
2895 if (error != 0) {
2896 device_printf(sc->bge_dev,
2897 "could not load DMA'able memory for %s\n", msg);
2898 return (ENOMEM);
2899 }
2900 *paddr = ctx.bge_busaddr;
2901 ring_end = *paddr + maxsize;
2902 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 &&
2903 BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) {
2904 /*
2905 * 4GB boundary crossed. Limit maximum allowable DMA
2906 * address space to 32bit and try again.
2907 */
2908 bus_dmamap_unload(*tag, *map);
2909 bus_dmamem_free(*tag, *ring, *map);
2910 bus_dma_tag_destroy(*tag);
2911 if (bootverbose)
2912 device_printf(sc->bge_dev, "4GB boundary crossed, "
2913 "limit DMA address space to 32bit for %s\n", msg);
2914 *ring = NULL;
2915 *tag = NULL;
2916 *map = NULL;
2917 lowaddr = BUS_SPACE_MAXADDR_32BIT;
2918 goto again;
2919 }
2920 return (0);
2921 }
2922
2923 static int
bge_dma_alloc(struct bge_softc * sc)2924 bge_dma_alloc(struct bge_softc *sc)
2925 {
2926 bus_addr_t lowaddr;
2927 bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz;
2928 int i, error;
2929
2930 lowaddr = BUS_SPACE_MAXADDR;
2931 if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0)
2932 lowaddr = BGE_DMA_MAXADDR;
2933 /*
2934 * Allocate the parent bus DMA tag appropriate for PCI.
2935 */
2936 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
2937 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL,
2938 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
2939 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag);
2940 if (error != 0) {
2941 device_printf(sc->bge_dev,
2942 "could not allocate parent dma tag\n");
2943 return (ENOMEM);
2944 }
2945
2946 /* Create tag for standard RX ring. */
2947 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ,
2948 &sc->bge_cdata.bge_rx_std_ring_tag,
2949 (uint8_t **)&sc->bge_ldata.bge_rx_std_ring,
2950 &sc->bge_cdata.bge_rx_std_ring_map,
2951 &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring");
2952 if (error)
2953 return (error);
2954
2955 /* Create tag for RX return ring. */
2956 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc),
2957 &sc->bge_cdata.bge_rx_return_ring_tag,
2958 (uint8_t **)&sc->bge_ldata.bge_rx_return_ring,
2959 &sc->bge_cdata.bge_rx_return_ring_map,
2960 &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring");
2961 if (error)
2962 return (error);
2963
2964 /* Create tag for TX ring. */
2965 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ,
2966 &sc->bge_cdata.bge_tx_ring_tag,
2967 (uint8_t **)&sc->bge_ldata.bge_tx_ring,
2968 &sc->bge_cdata.bge_tx_ring_map,
2969 &sc->bge_ldata.bge_tx_ring_paddr, "TX ring");
2970 if (error)
2971 return (error);
2972
2973 /*
2974 * Create tag for status block.
2975 * Because we only use single Tx/Rx/Rx return ring, use
2976 * minimum status block size except BCM5700 AX/BX which
2977 * seems to want to see full status block size regardless
2978 * of configured number of ring.
2979 */
2980 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
2981 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
2982 sbsz = BGE_STATUS_BLK_SZ;
2983 else
2984 sbsz = 32;
2985 error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz,
2986 &sc->bge_cdata.bge_status_tag,
2987 (uint8_t **)&sc->bge_ldata.bge_status_block,
2988 &sc->bge_cdata.bge_status_map,
2989 &sc->bge_ldata.bge_status_block_paddr, "status block");
2990 if (error)
2991 return (error);
2992
2993 /* Create tag for statistics block. */
2994 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ,
2995 &sc->bge_cdata.bge_stats_tag,
2996 (uint8_t **)&sc->bge_ldata.bge_stats,
2997 &sc->bge_cdata.bge_stats_map,
2998 &sc->bge_ldata.bge_stats_paddr, "statistics block");
2999 if (error)
3000 return (error);
3001
3002 /* Create tag for jumbo RX ring. */
3003 if (BGE_IS_JUMBO_CAPABLE(sc)) {
3004 error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ,
3005 &sc->bge_cdata.bge_rx_jumbo_ring_tag,
3006 (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring,
3007 &sc->bge_cdata.bge_rx_jumbo_ring_map,
3008 &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring");
3009 if (error)
3010 return (error);
3011 }
3012
3013 /* Create parent tag for buffers. */
3014 boundary = 0;
3015 if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) {
3016 boundary = BGE_DMA_BNDRY;
3017 /*
3018 * XXX
3019 * watchdog timeout issue was observed on BCM5704 which
3020 * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge).
3021 * Both limiting DMA address space to 32bits and flushing
3022 * mailbox write seem to address the issue.
3023 */
3024 if (sc->bge_pcixcap != 0)
3025 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3026 }
3027 error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev),
3028 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL,
3029 NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
3030 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag);
3031 if (error != 0) {
3032 device_printf(sc->bge_dev,
3033 "could not allocate buffer dma tag\n");
3034 return (ENOMEM);
3035 }
3036 /* Create tag for Tx mbufs. */
3037 if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) {
3038 txsegsz = BGE_TSOSEG_SZ;
3039 txmaxsegsz = 65535 + sizeof(struct ether_vlan_header);
3040 } else {
3041 txsegsz = MCLBYTES;
3042 txmaxsegsz = MCLBYTES * BGE_NSEG_NEW;
3043 }
3044 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1,
3045 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
3046 txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL,
3047 &sc->bge_cdata.bge_tx_mtag);
3048
3049 if (error) {
3050 device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
3051 return (ENOMEM);
3052 }
3053
3054 /* Create tag for Rx mbufs. */
3055 if (sc->bge_flags & BGE_FLAG_JUMBO_STD)
3056 rxmaxsegsz = MJUM9BYTES;
3057 else
3058 rxmaxsegsz = MCLBYTES;
3059 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0,
3060 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1,
3061 rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
3062
3063 if (error) {
3064 device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
3065 return (ENOMEM);
3066 }
3067
3068 /* Create DMA maps for RX buffers. */
3069 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
3070 &sc->bge_cdata.bge_rx_std_sparemap);
3071 if (error) {
3072 device_printf(sc->bge_dev,
3073 "can't create spare DMA map for RX\n");
3074 return (ENOMEM);
3075 }
3076 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
3077 error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
3078 &sc->bge_cdata.bge_rx_std_dmamap[i]);
3079 if (error) {
3080 device_printf(sc->bge_dev,
3081 "can't create DMA map for RX\n");
3082 return (ENOMEM);
3083 }
3084 }
3085
3086 /* Create DMA maps for TX buffers. */
3087 for (i = 0; i < BGE_TX_RING_CNT; i++) {
3088 error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
3089 &sc->bge_cdata.bge_tx_dmamap[i]);
3090 if (error) {
3091 device_printf(sc->bge_dev,
3092 "can't create DMA map for TX\n");
3093 return (ENOMEM);
3094 }
3095 }
3096
3097 /* Create tags for jumbo RX buffers. */
3098 if (BGE_IS_JUMBO_CAPABLE(sc)) {
3099 error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag,
3100 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
3101 NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE,
3102 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo);
3103 if (error) {
3104 device_printf(sc->bge_dev,
3105 "could not allocate jumbo dma tag\n");
3106 return (ENOMEM);
3107 }
3108 /* Create DMA maps for jumbo RX buffers. */
3109 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
3110 0, &sc->bge_cdata.bge_rx_jumbo_sparemap);
3111 if (error) {
3112 device_printf(sc->bge_dev,
3113 "can't create spare DMA map for jumbo RX\n");
3114 return (ENOMEM);
3115 }
3116 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
3117 error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo,
3118 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]);
3119 if (error) {
3120 device_printf(sc->bge_dev,
3121 "can't create DMA map for jumbo RX\n");
3122 return (ENOMEM);
3123 }
3124 }
3125 }
3126
3127 return (0);
3128 }
3129
3130 /*
3131 * Return true if this device has more than one port.
3132 */
3133 static int
bge_has_multiple_ports(struct bge_softc * sc)3134 bge_has_multiple_ports(struct bge_softc *sc)
3135 {
3136 device_t dev = sc->bge_dev;
3137 u_int b, d, f, fscan, s;
3138
3139 d = pci_get_domain(dev);
3140 b = pci_get_bus(dev);
3141 s = pci_get_slot(dev);
3142 f = pci_get_function(dev);
3143 for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++)
3144 if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL)
3145 return (1);
3146 return (0);
3147 }
3148
3149 /*
3150 * Return true if MSI can be used with this device.
3151 */
3152 static int
bge_can_use_msi(struct bge_softc * sc)3153 bge_can_use_msi(struct bge_softc *sc)
3154 {
3155 int can_use_msi = 0;
3156
3157 if (sc->bge_msi == 0)
3158 return (0);
3159
3160 /* Disable MSI for polling(4). */
3161 #ifdef DEVICE_POLLING
3162 return (0);
3163 #endif
3164 switch (sc->bge_asicrev) {
3165 case BGE_ASICREV_BCM5714_A0:
3166 case BGE_ASICREV_BCM5714:
3167 /*
3168 * Apparently, MSI doesn't work when these chips are
3169 * configured in single-port mode.
3170 */
3171 if (bge_has_multiple_ports(sc))
3172 can_use_msi = 1;
3173 break;
3174 case BGE_ASICREV_BCM5750:
3175 if (sc->bge_chiprev != BGE_CHIPREV_5750_AX &&
3176 sc->bge_chiprev != BGE_CHIPREV_5750_BX)
3177 can_use_msi = 1;
3178 break;
3179 case BGE_ASICREV_BCM5784:
3180 /*
3181 * Prevent infinite "watchdog timeout" errors
3182 * in some MacBook Pro and make it work out-of-the-box.
3183 */
3184 if (sc->bge_chiprev == BGE_CHIPREV_5784_AX)
3185 break;
3186 /* FALLTHROUGH */
3187 default:
3188 if (BGE_IS_575X_PLUS(sc))
3189 can_use_msi = 1;
3190 }
3191 return (can_use_msi);
3192 }
3193
3194 static int
bge_mbox_reorder(struct bge_softc * sc)3195 bge_mbox_reorder(struct bge_softc *sc)
3196 {
3197 /* Lists of PCI bridges that are known to reorder mailbox writes. */
3198 static const struct mbox_reorder {
3199 const uint16_t vendor;
3200 const uint16_t device;
3201 const char *desc;
3202 } mbox_reorder_lists[] = {
3203 { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" },
3204 };
3205 devclass_t pci, pcib;
3206 device_t bus, dev;
3207 int i;
3208
3209 pci = devclass_find("pci");
3210 pcib = devclass_find("pcib");
3211 dev = sc->bge_dev;
3212 bus = device_get_parent(dev);
3213 for (;;) {
3214 dev = device_get_parent(bus);
3215 bus = device_get_parent(dev);
3216 if (device_get_devclass(dev) != pcib)
3217 break;
3218 if (device_get_devclass(bus) != pci)
3219 break;
3220 for (i = 0; i < nitems(mbox_reorder_lists); i++) {
3221 if (pci_get_vendor(dev) ==
3222 mbox_reorder_lists[i].vendor &&
3223 pci_get_device(dev) ==
3224 mbox_reorder_lists[i].device) {
3225 device_printf(sc->bge_dev,
3226 "enabling MBOX workaround for %s\n",
3227 mbox_reorder_lists[i].desc);
3228 return (1);
3229 }
3230 }
3231 }
3232 return (0);
3233 }
3234
3235 static void
bge_devinfo(struct bge_softc * sc)3236 bge_devinfo(struct bge_softc *sc)
3237 {
3238 uint32_t cfg, clk;
3239
3240 device_printf(sc->bge_dev,
3241 "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ",
3242 sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev);
3243 if (sc->bge_flags & BGE_FLAG_PCIE)
3244 printf("PCI-E\n");
3245 else if (sc->bge_flags & BGE_FLAG_PCIX) {
3246 printf("PCI-X ");
3247 cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3248 if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE)
3249 clk = 133;
3250 else {
3251 clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F;
3252 switch (clk) {
3253 case 0:
3254 clk = 33;
3255 break;
3256 case 2:
3257 clk = 50;
3258 break;
3259 case 4:
3260 clk = 66;
3261 break;
3262 case 6:
3263 clk = 100;
3264 break;
3265 case 7:
3266 clk = 133;
3267 break;
3268 }
3269 }
3270 printf("%u MHz\n", clk);
3271 } else {
3272 if (sc->bge_pcixcap != 0)
3273 printf("PCI on PCI-X ");
3274 else
3275 printf("PCI ");
3276 cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4);
3277 if (cfg & BGE_PCISTATE_PCI_BUSSPEED)
3278 clk = 66;
3279 else
3280 clk = 33;
3281 if (cfg & BGE_PCISTATE_32BIT_BUS)
3282 printf("%u MHz; 32bit\n", clk);
3283 else
3284 printf("%u MHz; 64bit\n", clk);
3285 }
3286 }
3287
3288 static int
bge_attach(device_t dev)3289 bge_attach(device_t dev)
3290 {
3291 if_t ifp;
3292 struct bge_softc *sc;
3293 uint32_t hwcfg = 0, misccfg, pcistate;
3294 u_char eaddr[ETHER_ADDR_LEN];
3295 int capmask, error, reg, rid, trys;
3296
3297 sc = device_get_softc(dev);
3298 sc->bge_dev = dev;
3299
3300 BGE_LOCK_INIT(sc, device_get_nameunit(dev));
3301 NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc);
3302 callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0);
3303
3304 pci_enable_busmaster(dev);
3305
3306 /*
3307 * Allocate control/status registers.
3308 */
3309 rid = PCIR_BAR(0);
3310 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3311 RF_ACTIVE);
3312
3313 if (sc->bge_res == NULL) {
3314 device_printf (sc->bge_dev, "couldn't map BAR0 memory\n");
3315 error = ENXIO;
3316 goto fail;
3317 }
3318
3319 /* Save various chip information. */
3320 sc->bge_func_addr = pci_get_function(dev);
3321 sc->bge_chipid = bge_chipid(dev);
3322 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
3323 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
3324
3325 /* Set default PHY address. */
3326 sc->bge_phy_addr = 1;
3327 /*
3328 * PHY address mapping for various devices.
3329 *
3330 * | F0 Cu | F0 Sr | F1 Cu | F1 Sr |
3331 * ---------+-------+-------+-------+-------+
3332 * BCM57XX | 1 | X | X | X |
3333 * BCM5704 | 1 | X | 1 | X |
3334 * BCM5717 | 1 | 8 | 2 | 9 |
3335 * BCM5719 | 1 | 8 | 2 | 9 |
3336 * BCM5720 | 1 | 8 | 2 | 9 |
3337 *
3338 * | F2 Cu | F2 Sr | F3 Cu | F3 Sr |
3339 * ---------+-------+-------+-------+-------+
3340 * BCM57XX | X | X | X | X |
3341 * BCM5704 | X | X | X | X |
3342 * BCM5717 | X | X | X | X |
3343 * BCM5719 | 3 | 10 | 4 | 11 |
3344 * BCM5720 | X | X | X | X |
3345 *
3346 * Other addresses may respond but they are not
3347 * IEEE compliant PHYs and should be ignored.
3348 */
3349 if (sc->bge_asicrev == BGE_ASICREV_BCM5717 ||
3350 sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3351 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
3352 if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) {
3353 if (CSR_READ_4(sc, BGE_SGDIG_STS) &
3354 BGE_SGDIGSTS_IS_SERDES)
3355 sc->bge_phy_addr = sc->bge_func_addr + 8;
3356 else
3357 sc->bge_phy_addr = sc->bge_func_addr + 1;
3358 } else {
3359 if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) &
3360 BGE_CPMU_PHY_STRAP_IS_SERDES)
3361 sc->bge_phy_addr = sc->bge_func_addr + 8;
3362 else
3363 sc->bge_phy_addr = sc->bge_func_addr + 1;
3364 }
3365 }
3366
3367 if (bge_has_eaddr(sc))
3368 sc->bge_flags |= BGE_FLAG_EADDR;
3369
3370 /* Save chipset family. */
3371 switch (sc->bge_asicrev) {
3372 case BGE_ASICREV_BCM5762:
3373 case BGE_ASICREV_BCM57765:
3374 case BGE_ASICREV_BCM57766:
3375 sc->bge_flags |= BGE_FLAG_57765_PLUS;
3376 /* FALLTHROUGH */
3377 case BGE_ASICREV_BCM5717:
3378 case BGE_ASICREV_BCM5719:
3379 case BGE_ASICREV_BCM5720:
3380 sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS |
3381 BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO |
3382 BGE_FLAG_JUMBO_FRAME;
3383 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3384 sc->bge_asicrev == BGE_ASICREV_BCM5720) {
3385 /*
3386 * Enable work around for DMA engine miscalculation
3387 * of TXMBUF available space.
3388 */
3389 sc->bge_flags |= BGE_FLAG_RDMA_BUG;
3390 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3391 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3392 /* Jumbo frame on BCM5719 A0 does not work. */
3393 sc->bge_flags &= ~BGE_FLAG_JUMBO;
3394 }
3395 }
3396 break;
3397 case BGE_ASICREV_BCM5755:
3398 case BGE_ASICREV_BCM5761:
3399 case BGE_ASICREV_BCM5784:
3400 case BGE_ASICREV_BCM5785:
3401 case BGE_ASICREV_BCM5787:
3402 case BGE_ASICREV_BCM57780:
3403 sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS |
3404 BGE_FLAG_5705_PLUS;
3405 break;
3406 case BGE_ASICREV_BCM5700:
3407 case BGE_ASICREV_BCM5701:
3408 case BGE_ASICREV_BCM5703:
3409 case BGE_ASICREV_BCM5704:
3410 sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO;
3411 break;
3412 case BGE_ASICREV_BCM5714_A0:
3413 case BGE_ASICREV_BCM5780:
3414 case BGE_ASICREV_BCM5714:
3415 sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD;
3416 /* FALLTHROUGH */
3417 case BGE_ASICREV_BCM5750:
3418 case BGE_ASICREV_BCM5752:
3419 case BGE_ASICREV_BCM5906:
3420 sc->bge_flags |= BGE_FLAG_575X_PLUS;
3421 /* FALLTHROUGH */
3422 case BGE_ASICREV_BCM5705:
3423 sc->bge_flags |= BGE_FLAG_5705_PLUS;
3424 break;
3425 }
3426
3427 /* Identify chips with APE processor. */
3428 switch (sc->bge_asicrev) {
3429 case BGE_ASICREV_BCM5717:
3430 case BGE_ASICREV_BCM5719:
3431 case BGE_ASICREV_BCM5720:
3432 case BGE_ASICREV_BCM5761:
3433 case BGE_ASICREV_BCM5762:
3434 sc->bge_flags |= BGE_FLAG_APE;
3435 break;
3436 }
3437
3438 /* Chips with APE need BAR2 access for APE registers/memory. */
3439 if ((sc->bge_flags & BGE_FLAG_APE) != 0) {
3440 rid = PCIR_BAR(2);
3441 sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
3442 RF_ACTIVE);
3443 if (sc->bge_res2 == NULL) {
3444 device_printf (sc->bge_dev,
3445 "couldn't map BAR2 memory\n");
3446 error = ENXIO;
3447 goto fail;
3448 }
3449
3450 /* Enable APE register/memory access by host driver. */
3451 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
3452 pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
3453 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
3454 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
3455 pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4);
3456
3457 bge_ape_lock_init(sc);
3458 bge_ape_read_fw_ver(sc);
3459 }
3460
3461 /* Add SYSCTLs, requires the chipset family to be set. */
3462 bge_add_sysctls(sc);
3463
3464 /* Identify the chips that use an CPMU. */
3465 if (BGE_IS_5717_PLUS(sc) ||
3466 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3467 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3468 sc->bge_asicrev == BGE_ASICREV_BCM5785 ||
3469 sc->bge_asicrev == BGE_ASICREV_BCM57780)
3470 sc->bge_flags |= BGE_FLAG_CPMU_PRESENT;
3471 if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0)
3472 sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST;
3473 else
3474 sc->bge_mi_mode = BGE_MIMODE_BASE;
3475 /* Enable auto polling for BCM570[0-5]. */
3476 if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705)
3477 sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL;
3478
3479 /*
3480 * All Broadcom controllers have 4GB boundary DMA bug.
3481 * Whenever an address crosses a multiple of the 4GB boundary
3482 * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition
3483 * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA
3484 * state machine will lockup and cause the device to hang.
3485 */
3486 sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG;
3487
3488 /* BCM5755 or higher and BCM5906 have short DMA bug. */
3489 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
3490 sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG;
3491
3492 /*
3493 * BCM5719 cannot handle DMA requests for DMA segments that
3494 * have larger than 4KB in size. However the maximum DMA
3495 * segment size created in DMA tag is 4KB for TSO, so we
3496 * wouldn't encounter the issue here.
3497 */
3498 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
3499 sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG;
3500
3501 misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK;
3502 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
3503 if (misccfg == BGE_MISCCFG_BOARD_ID_5788 ||
3504 misccfg == BGE_MISCCFG_BOARD_ID_5788M)
3505 sc->bge_flags |= BGE_FLAG_5788;
3506 }
3507
3508 capmask = BMSR_DEFCAPMASK;
3509 if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 &&
3510 (misccfg == 0x4000 || misccfg == 0x8000)) ||
3511 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3512 pci_get_vendor(dev) == BCOM_VENDORID &&
3513 (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 ||
3514 pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 ||
3515 pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) ||
3516 (pci_get_vendor(dev) == BCOM_VENDORID &&
3517 (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F ||
3518 pci_get_device(dev) == BCOM_DEVICEID_BCM5753F ||
3519 pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) ||
3520 pci_get_device(dev) == BCOM_DEVICEID_BCM57790 ||
3521 pci_get_device(dev) == BCOM_DEVICEID_BCM57791 ||
3522 pci_get_device(dev) == BCOM_DEVICEID_BCM57795 ||
3523 sc->bge_asicrev == BGE_ASICREV_BCM5906) {
3524 /* These chips are 10/100 only. */
3525 capmask &= ~BMSR_EXTSTAT;
3526 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3527 }
3528
3529 /*
3530 * Some controllers seem to require a special firmware to use
3531 * TSO. But the firmware is not available to FreeBSD and Linux
3532 * claims that the TSO performed by the firmware is slower than
3533 * hardware based TSO. Moreover the firmware based TSO has one
3534 * known bug which can't handle TSO if Ethernet header + IP/TCP
3535 * header is greater than 80 bytes. A workaround for the TSO
3536 * bug exist but it seems it's too expensive than not using
3537 * TSO at all. Some hardware also have the TSO bug so limit
3538 * the TSO to the controllers that are not affected TSO issues
3539 * (e.g. 5755 or higher).
3540 */
3541 if (BGE_IS_5717_PLUS(sc)) {
3542 /* BCM5717 requires different TSO configuration. */
3543 sc->bge_flags |= BGE_FLAG_TSO3;
3544 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 &&
3545 sc->bge_chipid == BGE_CHIPID_BCM5719_A0) {
3546 /* TSO on BCM5719 A0 does not work. */
3547 sc->bge_flags &= ~BGE_FLAG_TSO3;
3548 }
3549 } else if (BGE_IS_5755_PLUS(sc)) {
3550 /*
3551 * BCM5754 and BCM5787 shares the same ASIC id so
3552 * explicit device id check is required.
3553 * Due to unknown reason TSO does not work on BCM5755M.
3554 */
3555 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 &&
3556 pci_get_device(dev) != BCOM_DEVICEID_BCM5754M &&
3557 pci_get_device(dev) != BCOM_DEVICEID_BCM5755M)
3558 sc->bge_flags |= BGE_FLAG_TSO;
3559 }
3560
3561 /*
3562 * Check if this is a PCI-X or PCI Express device.
3563 */
3564 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
3565 /*
3566 * Found a PCI Express capabilities register, this
3567 * must be a PCI Express device.
3568 */
3569 sc->bge_flags |= BGE_FLAG_PCIE;
3570 sc->bge_expcap = reg;
3571 /* Extract supported maximum payload size. */
3572 sc->bge_mps = pci_read_config(dev, sc->bge_expcap +
3573 PCIER_DEVICE_CAP, 2);
3574 sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD);
3575 if (sc->bge_asicrev == BGE_ASICREV_BCM5719 ||
3576 sc->bge_asicrev == BGE_ASICREV_BCM5720)
3577 sc->bge_expmrq = 2048;
3578 else
3579 sc->bge_expmrq = 4096;
3580 pci_set_max_read_req(dev, sc->bge_expmrq);
3581 } else {
3582 /*
3583 * Check if the device is in PCI-X Mode.
3584 * (This bit is not valid on PCI Express controllers.)
3585 */
3586 if (pci_find_cap(dev, PCIY_PCIX, ®) == 0)
3587 sc->bge_pcixcap = reg;
3588 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
3589 BGE_PCISTATE_PCI_BUSMODE) == 0)
3590 sc->bge_flags |= BGE_FLAG_PCIX;
3591 }
3592
3593 /*
3594 * The 40bit DMA bug applies to the 5714/5715 controllers and is
3595 * not actually a MAC controller bug but an issue with the embedded
3596 * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround.
3597 */
3598 if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX))
3599 sc->bge_flags |= BGE_FLAG_40BIT_BUG;
3600 /*
3601 * Some PCI-X bridges are known to trigger write reordering to
3602 * the mailbox registers. Typical phenomena is watchdog timeouts
3603 * caused by out-of-order TX completions. Enable workaround for
3604 * PCI-X devices that live behind these bridges.
3605 * Note, PCI-X controllers can run in PCI mode so we can't use
3606 * BGE_FLAG_PCIX flag to detect PCI-X controllers.
3607 */
3608 if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0)
3609 sc->bge_flags |= BGE_FLAG_MBOX_REORDER;
3610 /*
3611 * Allocate the interrupt, using MSI if possible. These devices
3612 * support 8 MSI messages, but only the first one is used in
3613 * normal operation.
3614 */
3615 rid = 0;
3616 if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) {
3617 sc->bge_msicap = reg;
3618 reg = 1;
3619 if (bge_can_use_msi(sc) && pci_alloc_msi(dev, ®) == 0) {
3620 rid = 1;
3621 sc->bge_flags |= BGE_FLAG_MSI;
3622 }
3623 }
3624
3625 /*
3626 * All controllers except BCM5700 supports tagged status but
3627 * we use tagged status only for MSI case on BCM5717. Otherwise
3628 * MSI on BCM5717 does not work.
3629 */
3630 #ifndef DEVICE_POLLING
3631 if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc))
3632 sc->bge_flags |= BGE_FLAG_TAGGED_STATUS;
3633 #endif
3634
3635 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
3636 RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE));
3637
3638 if (sc->bge_irq == NULL) {
3639 device_printf(sc->bge_dev, "couldn't map interrupt\n");
3640 error = ENXIO;
3641 goto fail;
3642 }
3643
3644 bge_devinfo(sc);
3645
3646 sc->bge_asf_mode = 0;
3647 /* No ASF if APE present. */
3648 if ((sc->bge_flags & BGE_FLAG_APE) == 0) {
3649 if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) ==
3650 BGE_SRAM_DATA_SIG_MAGIC)) {
3651 if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) &
3652 BGE_HWCFG_ASF) {
3653 sc->bge_asf_mode |= ASF_ENABLE;
3654 sc->bge_asf_mode |= ASF_STACKUP;
3655 if (BGE_IS_575X_PLUS(sc))
3656 sc->bge_asf_mode |= ASF_NEW_HANDSHAKE;
3657 }
3658 }
3659 }
3660
3661 bge_stop_fw(sc);
3662 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
3663 if (bge_reset(sc)) {
3664 device_printf(sc->bge_dev, "chip reset failed\n");
3665 error = ENXIO;
3666 goto fail;
3667 }
3668
3669 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
3670 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
3671
3672 if (bge_chipinit(sc)) {
3673 device_printf(sc->bge_dev, "chip initialization failed\n");
3674 error = ENXIO;
3675 goto fail;
3676 }
3677
3678 error = bge_get_eaddr(sc, eaddr);
3679 if (error) {
3680 device_printf(sc->bge_dev,
3681 "failed to read station address\n");
3682 error = ENXIO;
3683 goto fail;
3684 }
3685
3686 /* 5705 limits RX return ring to 512 entries. */
3687 if (BGE_IS_5717_PLUS(sc))
3688 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3689 else if (BGE_IS_5705_PLUS(sc))
3690 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
3691 else
3692 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
3693
3694 if (bge_dma_alloc(sc)) {
3695 device_printf(sc->bge_dev,
3696 "failed to allocate DMA resources\n");
3697 error = ENXIO;
3698 goto fail;
3699 }
3700
3701 /* Set default tuneable values. */
3702 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
3703 sc->bge_rx_coal_ticks = 150;
3704 sc->bge_tx_coal_ticks = 150;
3705 sc->bge_rx_max_coal_bds = 10;
3706 sc->bge_tx_max_coal_bds = 10;
3707
3708 /* Initialize checksum features to use. */
3709 sc->bge_csum_features = BGE_CSUM_FEATURES;
3710 if (sc->bge_forced_udpcsum != 0)
3711 sc->bge_csum_features |= CSUM_UDP;
3712
3713 /* Set up ifnet structure */
3714 ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
3715 if_setsoftc(ifp, sc);
3716 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3717 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
3718 if_setioctlfn(ifp, bge_ioctl);
3719 if_setstartfn(ifp, bge_start);
3720 if_setinitfn(ifp, bge_init);
3721 if_setgetcounterfn(ifp, bge_get_counter);
3722 if_setsendqlen(ifp, BGE_TX_RING_CNT - 1);
3723 if_setsendqready(ifp);
3724 if_sethwassist(ifp, sc->bge_csum_features);
3725 if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
3726 IFCAP_VLAN_MTU);
3727 if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
3728 if_sethwassistbits(ifp, CSUM_TSO, 0);
3729 if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0);
3730 }
3731 #ifdef IFCAP_VLAN_HWCSUM
3732 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
3733 #endif
3734 if_setcapenable(ifp, if_getcapabilities(ifp));
3735 #ifdef DEVICE_POLLING
3736 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
3737 #endif
3738
3739 /*
3740 * 5700 B0 chips do not support checksumming correctly due
3741 * to hardware bugs.
3742 */
3743 if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
3744 if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM);
3745 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
3746 if_sethwassist(ifp, 0);
3747 }
3748
3749 /*
3750 * Figure out what sort of media we have by checking the
3751 * hardware config word in the first 32k of NIC internal memory,
3752 * or fall back to examining the EEPROM if necessary.
3753 * Note: on some BCM5700 cards, this value appears to be unset.
3754 * If that's the case, we have to rely on identifying the NIC
3755 * by its PCI subsystem ID, as we do below for the SysKonnect
3756 * SK-9D41.
3757 */
3758 if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)
3759 hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG);
3760 else if ((sc->bge_flags & BGE_FLAG_EADDR) &&
3761 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
3762 if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
3763 sizeof(hwcfg))) {
3764 device_printf(sc->bge_dev, "failed to read EEPROM\n");
3765 error = ENXIO;
3766 goto fail;
3767 }
3768 hwcfg = ntohl(hwcfg);
3769 }
3770
3771 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
3772 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) ==
3773 SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) {
3774 if (BGE_IS_5705_PLUS(sc)) {
3775 sc->bge_flags |= BGE_FLAG_MII_SERDES;
3776 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3777 } else
3778 sc->bge_flags |= BGE_FLAG_TBI;
3779 }
3780
3781 /* Set various PHY bug flags. */
3782 if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 ||
3783 sc->bge_chipid == BGE_CHIPID_BCM5701_B0)
3784 sc->bge_phy_flags |= BGE_PHY_CRC_BUG;
3785 if (sc->bge_chiprev == BGE_CHIPREV_5703_AX ||
3786 sc->bge_chiprev == BGE_CHIPREV_5704_AX)
3787 sc->bge_phy_flags |= BGE_PHY_ADC_BUG;
3788 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
3789 sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG;
3790 if (pci_get_subvendor(dev) == DELL_VENDORID)
3791 sc->bge_phy_flags |= BGE_PHY_NO_3LED;
3792 if ((BGE_IS_5705_PLUS(sc)) &&
3793 sc->bge_asicrev != BGE_ASICREV_BCM5906 &&
3794 sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
3795 sc->bge_asicrev != BGE_ASICREV_BCM57780 &&
3796 !BGE_IS_5717_PLUS(sc)) {
3797 if (sc->bge_asicrev == BGE_ASICREV_BCM5755 ||
3798 sc->bge_asicrev == BGE_ASICREV_BCM5761 ||
3799 sc->bge_asicrev == BGE_ASICREV_BCM5784 ||
3800 sc->bge_asicrev == BGE_ASICREV_BCM5787) {
3801 if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 &&
3802 pci_get_device(dev) != BCOM_DEVICEID_BCM5756)
3803 sc->bge_phy_flags |= BGE_PHY_JITTER_BUG;
3804 if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M)
3805 sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM;
3806 } else
3807 sc->bge_phy_flags |= BGE_PHY_BER_BUG;
3808 }
3809
3810 /*
3811 * Don't enable Ethernet@WireSpeed for the 5700 or the
3812 * 5705 A0 and A1 chips.
3813 */
3814 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
3815 (sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
3816 (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
3817 sc->bge_chipid != BGE_CHIPID_BCM5705_A1)))
3818 sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
3819
3820 if (sc->bge_flags & BGE_FLAG_TBI) {
3821 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
3822 bge_ifmedia_sts);
3823 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
3824 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
3825 0, NULL);
3826 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
3827 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO);
3828 sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
3829 } else {
3830 /*
3831 * Do transceiver setup and tell the firmware the
3832 * driver is down so we can try to get access the
3833 * probe if ASF is running. Retry a couple of times
3834 * if we get a conflict with the ASF firmware accessing
3835 * the PHY.
3836 */
3837 trys = 0;
3838 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3839 again:
3840 bge_asf_driver_up(sc);
3841
3842 error = mii_attach(dev, &sc->bge_miibus, ifp,
3843 (ifm_change_cb_t)bge_ifmedia_upd,
3844 (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr,
3845 MII_OFFSET_ANY, MIIF_DOPAUSE);
3846 if (error != 0) {
3847 if (trys++ < 4) {
3848 device_printf(sc->bge_dev, "Try again\n");
3849 bge_miibus_writereg(sc->bge_dev,
3850 sc->bge_phy_addr, MII_BMCR, BMCR_RESET);
3851 goto again;
3852 }
3853 device_printf(sc->bge_dev, "attaching PHYs failed\n");
3854 goto fail;
3855 }
3856
3857 /*
3858 * Now tell the firmware we are going up after probing the PHY
3859 */
3860 if (sc->bge_asf_mode & ASF_STACKUP)
3861 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3862 }
3863
3864 /*
3865 * When using the BCM5701 in PCI-X mode, data corruption has
3866 * been observed in the first few bytes of some received packets.
3867 * Aligning the packet buffer in memory eliminates the corruption.
3868 * Unfortunately, this misaligns the packet payloads. On platforms
3869 * which do not support unaligned accesses, we will realign the
3870 * payloads by copying the received packets.
3871 */
3872 if (sc->bge_asicrev == BGE_ASICREV_BCM5701 &&
3873 sc->bge_flags & BGE_FLAG_PCIX)
3874 sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG;
3875
3876 /*
3877 * Call MI attach routine.
3878 */
3879 ether_ifattach(ifp, eaddr);
3880
3881 /* Tell upper layer we support long frames. */
3882 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
3883
3884 /*
3885 * Hookup IRQ last.
3886 */
3887 if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) {
3888 /* Take advantage of single-shot MSI. */
3889 CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) &
3890 ~BGE_MSIMODE_ONE_SHOT_DISABLE);
3891 sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
3892 taskqueue_thread_enqueue, &sc->bge_tq);
3893 error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET,
3894 "%s taskq", device_get_nameunit(sc->bge_dev));
3895 if (error != 0) {
3896 device_printf(dev, "could not start threads.\n");
3897 ether_ifdetach(ifp);
3898 goto fail;
3899 }
3900 error = bus_setup_intr(dev, sc->bge_irq,
3901 INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc,
3902 &sc->bge_intrhand);
3903 } else
3904 error = bus_setup_intr(dev, sc->bge_irq,
3905 INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc,
3906 &sc->bge_intrhand);
3907
3908 if (error) {
3909 ether_ifdetach(ifp);
3910 device_printf(sc->bge_dev, "couldn't set up irq\n");
3911 goto fail;
3912 }
3913
3914 /* Attach driver debugnet methods. */
3915 DEBUGNET_SET(ifp, bge);
3916
3917 fail:
3918 if (error)
3919 bge_detach(dev);
3920 return (error);
3921 }
3922
3923 static int
bge_detach(device_t dev)3924 bge_detach(device_t dev)
3925 {
3926 struct bge_softc *sc;
3927 if_t ifp;
3928
3929 sc = device_get_softc(dev);
3930 ifp = sc->bge_ifp;
3931
3932 #ifdef DEVICE_POLLING
3933 if (if_getcapenable(ifp) & IFCAP_POLLING)
3934 ether_poll_deregister(ifp);
3935 #endif
3936
3937 if (device_is_attached(dev)) {
3938 ether_ifdetach(ifp);
3939 BGE_LOCK(sc);
3940 bge_stop(sc);
3941 BGE_UNLOCK(sc);
3942 callout_drain(&sc->bge_stat_ch);
3943 }
3944
3945 if (sc->bge_tq)
3946 taskqueue_drain(sc->bge_tq, &sc->bge_intr_task);
3947
3948 if (sc->bge_flags & BGE_FLAG_TBI)
3949 ifmedia_removeall(&sc->bge_ifmedia);
3950 else if (sc->bge_miibus != NULL) {
3951 bus_generic_detach(dev);
3952 device_delete_child(dev, sc->bge_miibus);
3953 }
3954
3955 bge_release_resources(sc);
3956
3957 return (0);
3958 }
3959
3960 static void
bge_release_resources(struct bge_softc * sc)3961 bge_release_resources(struct bge_softc *sc)
3962 {
3963 device_t dev;
3964
3965 dev = sc->bge_dev;
3966
3967 if (sc->bge_tq != NULL)
3968 taskqueue_free(sc->bge_tq);
3969
3970 if (sc->bge_intrhand != NULL)
3971 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
3972
3973 if (sc->bge_irq != NULL) {
3974 bus_release_resource(dev, SYS_RES_IRQ,
3975 rman_get_rid(sc->bge_irq), sc->bge_irq);
3976 pci_release_msi(dev);
3977 }
3978
3979 if (sc->bge_res != NULL)
3980 bus_release_resource(dev, SYS_RES_MEMORY,
3981 rman_get_rid(sc->bge_res), sc->bge_res);
3982
3983 if (sc->bge_res2 != NULL)
3984 bus_release_resource(dev, SYS_RES_MEMORY,
3985 rman_get_rid(sc->bge_res2), sc->bge_res2);
3986
3987 if (sc->bge_ifp != NULL)
3988 if_free(sc->bge_ifp);
3989
3990 bge_dma_free(sc);
3991
3992 if (mtx_initialized(&sc->bge_mtx)) /* XXX */
3993 BGE_LOCK_DESTROY(sc);
3994 }
3995
3996 static int
bge_reset(struct bge_softc * sc)3997 bge_reset(struct bge_softc *sc)
3998 {
3999 device_t dev;
4000 uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val;
4001 void (*write_op)(struct bge_softc *, int, int);
4002 uint16_t devctl;
4003 int i;
4004
4005 dev = sc->bge_dev;
4006
4007 mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE;
4008 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4009 mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN;
4010 mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask;
4011
4012 if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) &&
4013 (sc->bge_asicrev != BGE_ASICREV_BCM5906)) {
4014 if (sc->bge_flags & BGE_FLAG_PCIE)
4015 write_op = bge_writemem_direct;
4016 else
4017 write_op = bge_writemem_ind;
4018 } else
4019 write_op = bge_writereg_ind;
4020
4021 if (sc->bge_asicrev != BGE_ASICREV_BCM5700 &&
4022 sc->bge_asicrev != BGE_ASICREV_BCM5701) {
4023 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
4024 for (i = 0; i < 8000; i++) {
4025 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) &
4026 BGE_NVRAMSWARB_GNT1)
4027 break;
4028 DELAY(20);
4029 }
4030 if (i == 8000) {
4031 if (bootverbose)
4032 device_printf(dev, "NVRAM lock timedout!\n");
4033 }
4034 }
4035 /* Take APE lock when performing reset. */
4036 bge_ape_lock(sc, BGE_APE_LOCK_GRC);
4037
4038 /* Save some important PCI state. */
4039 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
4040 command = pci_read_config(dev, BGE_PCI_CMD, 4);
4041
4042 pci_write_config(dev, BGE_PCI_MISC_CTL,
4043 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4044 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
4045
4046 /* Disable fastboot on controllers that support it. */
4047 if (sc->bge_asicrev == BGE_ASICREV_BCM5752 ||
4048 BGE_IS_5755_PLUS(sc)) {
4049 if (bootverbose)
4050 device_printf(dev, "Disabling fastboot\n");
4051 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0);
4052 }
4053
4054 /*
4055 * Write the magic number to SRAM at offset 0xB50.
4056 * When firmware finishes its initialization it will
4057 * write ~BGE_SRAM_FW_MB_MAGIC to the same location.
4058 */
4059 bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC);
4060
4061 reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ;
4062
4063 /* XXX: Broadcom Linux driver. */
4064 if (sc->bge_flags & BGE_FLAG_PCIE) {
4065 if (sc->bge_asicrev != BGE_ASICREV_BCM5785 &&
4066 (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) {
4067 if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */
4068 CSR_WRITE_4(sc, 0x7E2C, 0x20);
4069 }
4070 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
4071 /* Prevent PCIE link training during global reset */
4072 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
4073 reset |= 1 << 29;
4074 }
4075 }
4076
4077 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
4078 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
4079 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
4080 val | BGE_VCPU_STATUS_DRV_RESET);
4081 val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
4082 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
4083 val & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
4084 }
4085
4086 /*
4087 * Set GPHY Power Down Override to leave GPHY
4088 * powered up in D0 uninitialized.
4089 */
4090 if (BGE_IS_5705_PLUS(sc) &&
4091 (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0)
4092 reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE;
4093
4094 /* Issue global reset */
4095 write_op(sc, BGE_MISC_CFG, reset);
4096
4097 if (sc->bge_flags & BGE_FLAG_PCIE)
4098 DELAY(100 * 1000);
4099 else
4100 DELAY(1000);
4101
4102 /* XXX: Broadcom Linux driver. */
4103 if (sc->bge_flags & BGE_FLAG_PCIE) {
4104 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
4105 DELAY(500000); /* wait for link training to complete */
4106 val = pci_read_config(dev, 0xC4, 4);
4107 pci_write_config(dev, 0xC4, val | (1 << 15), 4);
4108 }
4109 devctl = pci_read_config(dev,
4110 sc->bge_expcap + PCIER_DEVICE_CTL, 2);
4111 /* Clear enable no snoop and disable relaxed ordering. */
4112 devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE |
4113 PCIEM_CTL_NOSNOOP_ENABLE);
4114 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL,
4115 devctl, 2);
4116 pci_set_max_read_req(dev, sc->bge_expmrq);
4117 /* Clear error status. */
4118 pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA,
4119 PCIEM_STA_CORRECTABLE_ERROR |
4120 PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR |
4121 PCIEM_STA_UNSUPPORTED_REQ, 2);
4122 }
4123
4124 /* Reset some of the PCI state that got zapped by reset. */
4125 pci_write_config(dev, BGE_PCI_MISC_CTL,
4126 BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR |
4127 BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4);
4128 val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE;
4129 if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 &&
4130 (sc->bge_flags & BGE_FLAG_PCIX) != 0)
4131 val |= BGE_PCISTATE_RETRY_SAME_DMA;
4132 if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0)
4133 val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR |
4134 BGE_PCISTATE_ALLOW_APE_SHMEM_WR |
4135 BGE_PCISTATE_ALLOW_APE_PSPACE_WR;
4136 pci_write_config(dev, BGE_PCI_PCISTATE, val, 4);
4137 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
4138 pci_write_config(dev, BGE_PCI_CMD, command, 4);
4139 /*
4140 * Disable PCI-X relaxed ordering to ensure status block update
4141 * comes first then packet buffer DMA. Otherwise driver may
4142 * read stale status block.
4143 */
4144 if (sc->bge_flags & BGE_FLAG_PCIX) {
4145 devctl = pci_read_config(dev,
4146 sc->bge_pcixcap + PCIXR_COMMAND, 2);
4147 devctl &= ~PCIXM_COMMAND_ERO;
4148 if (sc->bge_asicrev == BGE_ASICREV_BCM5703) {
4149 devctl &= ~PCIXM_COMMAND_MAX_READ;
4150 devctl |= PCIXM_COMMAND_MAX_READ_2048;
4151 } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
4152 devctl &= ~(PCIXM_COMMAND_MAX_SPLITS |
4153 PCIXM_COMMAND_MAX_READ);
4154 devctl |= PCIXM_COMMAND_MAX_READ_2048;
4155 }
4156 pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND,
4157 devctl, 2);
4158 }
4159 /* Re-enable MSI, if necessary, and enable the memory arbiter. */
4160 if (BGE_IS_5714_FAMILY(sc)) {
4161 /* This chip disables MSI on reset. */
4162 if (sc->bge_flags & BGE_FLAG_MSI) {
4163 val = pci_read_config(dev,
4164 sc->bge_msicap + PCIR_MSI_CTRL, 2);
4165 pci_write_config(dev,
4166 sc->bge_msicap + PCIR_MSI_CTRL,
4167 val | PCIM_MSICTRL_MSI_ENABLE, 2);
4168 val = CSR_READ_4(sc, BGE_MSI_MODE);
4169 CSR_WRITE_4(sc, BGE_MSI_MODE,
4170 val | BGE_MSIMODE_ENABLE);
4171 }
4172 val = CSR_READ_4(sc, BGE_MARB_MODE);
4173 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
4174 } else
4175 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4176
4177 /* Fix up byte swapping. */
4178 CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc));
4179
4180 val = CSR_READ_4(sc, BGE_MAC_MODE);
4181 val = (val & ~mac_mode_mask) | mac_mode;
4182 CSR_WRITE_4(sc, BGE_MAC_MODE, val);
4183 DELAY(40);
4184
4185 bge_ape_unlock(sc, BGE_APE_LOCK_GRC);
4186
4187 if (sc->bge_asicrev == BGE_ASICREV_BCM5906) {
4188 for (i = 0; i < BGE_TIMEOUT; i++) {
4189 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
4190 if (val & BGE_VCPU_STATUS_INIT_DONE)
4191 break;
4192 DELAY(100);
4193 }
4194 if (i == BGE_TIMEOUT) {
4195 device_printf(dev, "reset timed out\n");
4196 return (1);
4197 }
4198 } else {
4199 /*
4200 * Poll until we see the 1's complement of the magic number.
4201 * This indicates that the firmware initialization is complete.
4202 * We expect this to fail if no chip containing the Ethernet
4203 * address is fitted though.
4204 */
4205 for (i = 0; i < BGE_TIMEOUT; i++) {
4206 DELAY(10);
4207 val = bge_readmem_ind(sc, BGE_SRAM_FW_MB);
4208 if (val == ~BGE_SRAM_FW_MB_MAGIC)
4209 break;
4210 }
4211
4212 if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT)
4213 device_printf(dev,
4214 "firmware handshake timed out, found 0x%08x\n",
4215 val);
4216 /* BCM57765 A0 needs additional time before accessing. */
4217 if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0)
4218 DELAY(10 * 1000); /* XXX */
4219 }
4220
4221 /*
4222 * The 5704 in TBI mode apparently needs some special
4223 * adjustment to insure the SERDES drive level is set
4224 * to 1.2V.
4225 */
4226 if (sc->bge_asicrev == BGE_ASICREV_BCM5704 &&
4227 sc->bge_flags & BGE_FLAG_TBI) {
4228 val = CSR_READ_4(sc, BGE_SERDES_CFG);
4229 val = (val & ~0xFFF) | 0x880;
4230 CSR_WRITE_4(sc, BGE_SERDES_CFG, val);
4231 }
4232
4233 /* XXX: Broadcom Linux driver. */
4234 if (sc->bge_flags & BGE_FLAG_PCIE &&
4235 !BGE_IS_5717_PLUS(sc) &&
4236 sc->bge_chipid != BGE_CHIPID_BCM5750_A0 &&
4237 sc->bge_asicrev != BGE_ASICREV_BCM5785) {
4238 /* Enable Data FIFO protection. */
4239 val = CSR_READ_4(sc, 0x7C00);
4240 CSR_WRITE_4(sc, 0x7C00, val | (1 << 25));
4241 }
4242
4243 if (sc->bge_asicrev == BGE_ASICREV_BCM5720)
4244 BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE,
4245 CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
4246
4247 return (0);
4248 }
4249
4250 static __inline void
bge_rxreuse_std(struct bge_softc * sc,int i)4251 bge_rxreuse_std(struct bge_softc *sc, int i)
4252 {
4253 struct bge_rx_bd *r;
4254
4255 r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std];
4256 r->bge_flags = BGE_RXBDFLAG_END;
4257 r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i];
4258 r->bge_idx = i;
4259 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4260 }
4261
4262 static __inline void
bge_rxreuse_jumbo(struct bge_softc * sc,int i)4263 bge_rxreuse_jumbo(struct bge_softc *sc, int i)
4264 {
4265 struct bge_extrx_bd *r;
4266
4267 r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo];
4268 r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END;
4269 r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0];
4270 r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1];
4271 r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2];
4272 r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3];
4273 r->bge_idx = i;
4274 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4275 }
4276
4277 /*
4278 * Frame reception handling. This is called if there's a frame
4279 * on the receive return list.
4280 *
4281 * Note: we have to be able to handle two possibilities here:
4282 * 1) the frame is from the jumbo receive ring
4283 * 2) the frame is from the standard receive ring
4284 */
4285
4286 static int
bge_rxeof(struct bge_softc * sc,uint16_t rx_prod,int holdlck)4287 bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
4288 {
4289 if_t ifp;
4290 int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
4291 uint16_t rx_cons;
4292
4293 rx_cons = sc->bge_rx_saved_considx;
4294
4295 /* Nothing to do. */
4296 if (rx_cons == rx_prod)
4297 return (rx_npkts);
4298
4299 ifp = sc->bge_ifp;
4300
4301 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
4302 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD);
4303 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
4304 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
4305 if (BGE_IS_JUMBO_CAPABLE(sc) &&
4306 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
4307 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))
4308 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
4309 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
4310
4311 while (rx_cons != rx_prod) {
4312 struct bge_rx_bd *cur_rx;
4313 uint32_t rxidx;
4314 struct mbuf *m = NULL;
4315 uint16_t vlan_tag = 0;
4316 int have_tag = 0;
4317
4318 #ifdef DEVICE_POLLING
4319 if (if_getcapenable(ifp) & IFCAP_POLLING) {
4320 if (sc->rxcycles <= 0)
4321 break;
4322 sc->rxcycles--;
4323 }
4324 #endif
4325
4326 cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons];
4327
4328 rxidx = cur_rx->bge_idx;
4329 BGE_INC(rx_cons, sc->bge_return_ring_cnt);
4330
4331 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING &&
4332 cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
4333 have_tag = 1;
4334 vlan_tag = cur_rx->bge_vlan_tag;
4335 }
4336
4337 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
4338 jumbocnt++;
4339 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
4340 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4341 bge_rxreuse_jumbo(sc, rxidx);
4342 continue;
4343 }
4344 if (bge_newbuf_jumbo(sc, rxidx) != 0) {
4345 bge_rxreuse_jumbo(sc, rxidx);
4346 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
4347 continue;
4348 }
4349 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
4350 } else {
4351 stdcnt++;
4352 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
4353 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
4354 bge_rxreuse_std(sc, rxidx);
4355 continue;
4356 }
4357 if (bge_newbuf_std(sc, rxidx) != 0) {
4358 bge_rxreuse_std(sc, rxidx);
4359 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
4360 continue;
4361 }
4362 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
4363 }
4364
4365 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
4366 #ifndef __NO_STRICT_ALIGNMENT
4367 /*
4368 * For architectures with strict alignment we must make sure
4369 * the payload is aligned.
4370 */
4371 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) {
4372 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
4373 cur_rx->bge_len);
4374 m->m_data += ETHER_ALIGN;
4375 }
4376 #endif
4377 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
4378 m->m_pkthdr.rcvif = ifp;
4379
4380 if (if_getcapenable(ifp) & IFCAP_RXCSUM)
4381 bge_rxcsum(sc, cur_rx, m);
4382
4383 /*
4384 * If we received a packet with a vlan tag,
4385 * attach that information to the packet.
4386 */
4387 if (have_tag) {
4388 m->m_pkthdr.ether_vtag = vlan_tag;
4389 m->m_flags |= M_VLANTAG;
4390 }
4391
4392 if (holdlck != 0) {
4393 BGE_UNLOCK(sc);
4394 if_input(ifp, m);
4395 BGE_LOCK(sc);
4396 } else
4397 if_input(ifp, m);
4398 rx_npkts++;
4399
4400 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
4401 return (rx_npkts);
4402 }
4403
4404 bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag,
4405 sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD);
4406 if (stdcnt > 0)
4407 bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
4408 sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE);
4409
4410 if (jumbocnt > 0)
4411 bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
4412 sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE);
4413
4414 sc->bge_rx_saved_considx = rx_cons;
4415 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
4416 if (stdcnt)
4417 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std +
4418 BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT);
4419 if (jumbocnt)
4420 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo +
4421 BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT);
4422 #ifdef notyet
4423 /*
4424 * This register wraps very quickly under heavy packet drops.
4425 * If you need correct statistics, you can enable this check.
4426 */
4427 if (BGE_IS_5705_PLUS(sc))
4428 if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS));
4429 #endif
4430 return (rx_npkts);
4431 }
4432
4433 static void
bge_rxcsum(struct bge_softc * sc,struct bge_rx_bd * cur_rx,struct mbuf * m)4434 bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m)
4435 {
4436
4437 if (BGE_IS_5717_PLUS(sc)) {
4438 if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) {
4439 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4440 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4441 if ((cur_rx->bge_error_flag &
4442 BGE_RXERRFLAG_IP_CSUM_NOK) == 0)
4443 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4444 }
4445 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
4446 m->m_pkthdr.csum_data =
4447 cur_rx->bge_tcp_udp_csum;
4448 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4449 CSUM_PSEUDO_HDR;
4450 }
4451 }
4452 } else {
4453 if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) {
4454 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4455 if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0)
4456 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4457 }
4458 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
4459 m->m_pkthdr.len >= ETHER_MIN_NOPAD) {
4460 m->m_pkthdr.csum_data =
4461 cur_rx->bge_tcp_udp_csum;
4462 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
4463 CSUM_PSEUDO_HDR;
4464 }
4465 }
4466 }
4467
4468 static void
bge_txeof(struct bge_softc * sc,uint16_t tx_cons)4469 bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
4470 {
4471 struct bge_tx_bd *cur_tx;
4472 if_t ifp;
4473
4474 BGE_LOCK_ASSERT(sc);
4475
4476 /* Nothing to do. */
4477 if (sc->bge_tx_saved_considx == tx_cons)
4478 return;
4479
4480 ifp = sc->bge_ifp;
4481
4482 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
4483 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE);
4484 /*
4485 * Go through our tx ring and free mbufs for those
4486 * frames that have been sent.
4487 */
4488 while (sc->bge_tx_saved_considx != tx_cons) {
4489 uint32_t idx;
4490
4491 idx = sc->bge_tx_saved_considx;
4492 cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
4493 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
4494 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
4495 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
4496 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
4497 sc->bge_cdata.bge_tx_dmamap[idx],
4498 BUS_DMASYNC_POSTWRITE);
4499 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
4500 sc->bge_cdata.bge_tx_dmamap[idx]);
4501 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
4502 sc->bge_cdata.bge_tx_chain[idx] = NULL;
4503 }
4504 sc->bge_txcnt--;
4505 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
4506 }
4507
4508 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
4509 if (sc->bge_txcnt == 0)
4510 sc->bge_timer = 0;
4511 }
4512
4513 #ifdef DEVICE_POLLING
4514 static int
bge_poll(if_t ifp,enum poll_cmd cmd,int count)4515 bge_poll(if_t ifp, enum poll_cmd cmd, int count)
4516 {
4517 struct bge_softc *sc = if_getsoftc(ifp);
4518 uint16_t rx_prod, tx_cons;
4519 uint32_t statusword;
4520 int rx_npkts = 0;
4521
4522 BGE_LOCK(sc);
4523 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
4524 BGE_UNLOCK(sc);
4525 return (rx_npkts);
4526 }
4527
4528 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4529 sc->bge_cdata.bge_status_map,
4530 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4531 /* Fetch updates from the status block. */
4532 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4533 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4534
4535 statusword = sc->bge_ldata.bge_status_block->bge_status;
4536 /* Clear the status so the next pass only sees the changes. */
4537 sc->bge_ldata.bge_status_block->bge_status = 0;
4538
4539 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4540 sc->bge_cdata.bge_status_map,
4541 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4542
4543 /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */
4544 if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED)
4545 sc->bge_link_evt++;
4546
4547 if (cmd == POLL_AND_CHECK_STATUS)
4548 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4549 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4550 sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI))
4551 bge_link_upd(sc);
4552
4553 sc->rxcycles = count;
4554 rx_npkts = bge_rxeof(sc, rx_prod, 1);
4555 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
4556 BGE_UNLOCK(sc);
4557 return (rx_npkts);
4558 }
4559 bge_txeof(sc, tx_cons);
4560 if (!if_sendq_empty(ifp))
4561 bge_start_locked(ifp);
4562
4563 BGE_UNLOCK(sc);
4564 return (rx_npkts);
4565 }
4566 #endif /* DEVICE_POLLING */
4567
4568 static int
bge_msi_intr(void * arg)4569 bge_msi_intr(void *arg)
4570 {
4571 struct bge_softc *sc;
4572
4573 sc = (struct bge_softc *)arg;
4574 /*
4575 * This interrupt is not shared and controller already
4576 * disabled further interrupt.
4577 */
4578 taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task);
4579 return (FILTER_HANDLED);
4580 }
4581
4582 static void
bge_intr_task(void * arg,int pending)4583 bge_intr_task(void *arg, int pending)
4584 {
4585 struct bge_softc *sc;
4586 if_t ifp;
4587 uint32_t status, status_tag;
4588 uint16_t rx_prod, tx_cons;
4589
4590 sc = (struct bge_softc *)arg;
4591 ifp = sc->bge_ifp;
4592
4593 BGE_LOCK(sc);
4594 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
4595 BGE_UNLOCK(sc);
4596 return;
4597 }
4598
4599 /* Get updated status block. */
4600 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4601 sc->bge_cdata.bge_status_map,
4602 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4603
4604 /* Save producer/consumer indices. */
4605 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4606 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4607 status = sc->bge_ldata.bge_status_block->bge_status;
4608 status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24;
4609 /* Dirty the status flag. */
4610 sc->bge_ldata.bge_status_block->bge_status = 0;
4611 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4612 sc->bge_cdata.bge_status_map,
4613 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4614 if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0)
4615 status_tag = 0;
4616
4617 if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0)
4618 bge_link_upd(sc);
4619
4620 /* Let controller work. */
4621 bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
4622
4623 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
4624 sc->bge_rx_saved_considx != rx_prod) {
4625 /* Check RX return ring producer/consumer. */
4626 BGE_UNLOCK(sc);
4627 bge_rxeof(sc, rx_prod, 0);
4628 BGE_LOCK(sc);
4629 }
4630 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4631 /* Check TX ring producer/consumer. */
4632 bge_txeof(sc, tx_cons);
4633 if (!if_sendq_empty(ifp))
4634 bge_start_locked(ifp);
4635 }
4636 BGE_UNLOCK(sc);
4637 }
4638
4639 static void
bge_intr(void * xsc)4640 bge_intr(void *xsc)
4641 {
4642 struct bge_softc *sc;
4643 if_t ifp;
4644 uint32_t statusword;
4645 uint16_t rx_prod, tx_cons;
4646
4647 sc = xsc;
4648
4649 BGE_LOCK(sc);
4650
4651 ifp = sc->bge_ifp;
4652
4653 #ifdef DEVICE_POLLING
4654 if (if_getcapenable(ifp) & IFCAP_POLLING) {
4655 BGE_UNLOCK(sc);
4656 return;
4657 }
4658 #endif
4659
4660 /*
4661 * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't
4662 * disable interrupts by writing nonzero like we used to, since with
4663 * our current organization this just gives complications and
4664 * pessimizations for re-enabling interrupts. We used to have races
4665 * instead of the necessary complications. Disabling interrupts
4666 * would just reduce the chance of a status update while we are
4667 * running (by switching to the interrupt-mode coalescence
4668 * parameters), but this chance is already very low so it is more
4669 * efficient to get another interrupt than prevent it.
4670 *
4671 * We do the ack first to ensure another interrupt if there is a
4672 * status update after the ack. We don't check for the status
4673 * changing later because it is more efficient to get another
4674 * interrupt than prevent it, not quite as above (not checking is
4675 * a smaller optimization than not toggling the interrupt enable,
4676 * since checking doesn't involve PCI accesses and toggling require
4677 * the status check). So toggling would probably be a pessimization
4678 * even with MSI. It would only be needed for using a task queue.
4679 */
4680 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4681
4682 /*
4683 * Do the mandatory PCI flush as well as get the link status.
4684 */
4685 statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED;
4686
4687 /* Make sure the descriptor ring indexes are coherent. */
4688 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4689 sc->bge_cdata.bge_status_map,
4690 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4691 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
4692 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
4693 sc->bge_ldata.bge_status_block->bge_status = 0;
4694 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
4695 sc->bge_cdata.bge_status_map,
4696 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4697
4698 if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
4699 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
4700 statusword || sc->bge_link_evt)
4701 bge_link_upd(sc);
4702
4703 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4704 /* Check RX return ring producer/consumer. */
4705 bge_rxeof(sc, rx_prod, 1);
4706 }
4707
4708 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
4709 /* Check TX ring producer/consumer. */
4710 bge_txeof(sc, tx_cons);
4711 }
4712
4713 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
4714 !if_sendq_empty(ifp))
4715 bge_start_locked(ifp);
4716
4717 BGE_UNLOCK(sc);
4718 }
4719
4720 static void
bge_asf_driver_up(struct bge_softc * sc)4721 bge_asf_driver_up(struct bge_softc *sc)
4722 {
4723 if (sc->bge_asf_mode & ASF_STACKUP) {
4724 /* Send ASF heartbeat aprox. every 2s */
4725 if (sc->bge_asf_count)
4726 sc->bge_asf_count --;
4727 else {
4728 sc->bge_asf_count = 2;
4729 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB,
4730 BGE_FW_CMD_DRV_ALIVE);
4731 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4);
4732 bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB,
4733 BGE_FW_HB_TIMEOUT_SEC);
4734 CSR_WRITE_4(sc, BGE_RX_CPU_EVENT,
4735 CSR_READ_4(sc, BGE_RX_CPU_EVENT) |
4736 BGE_RX_CPU_DRV_EVENT);
4737 }
4738 }
4739 }
4740
4741 static void
bge_tick(void * xsc)4742 bge_tick(void *xsc)
4743 {
4744 struct bge_softc *sc = xsc;
4745 struct mii_data *mii = NULL;
4746
4747 BGE_LOCK_ASSERT(sc);
4748
4749 /* Synchronize with possible callout reset/stop. */
4750 if (callout_pending(&sc->bge_stat_ch) ||
4751 !callout_active(&sc->bge_stat_ch))
4752 return;
4753
4754 if (BGE_IS_5705_PLUS(sc))
4755 bge_stats_update_regs(sc);
4756 else
4757 bge_stats_update(sc);
4758
4759 /* XXX Add APE heartbeat check here? */
4760
4761 if ((sc->bge_flags & BGE_FLAG_TBI) == 0) {
4762 mii = device_get_softc(sc->bge_miibus);
4763 /*
4764 * Do not touch PHY if we have link up. This could break
4765 * IPMI/ASF mode or produce extra input errors
4766 * (extra errors was reported for bcm5701 & bcm5704).
4767 */
4768 if (!sc->bge_link)
4769 mii_tick(mii);
4770 } else {
4771 /*
4772 * Since in TBI mode auto-polling can't be used we should poll
4773 * link status manually. Here we register pending link event
4774 * and trigger interrupt.
4775 */
4776 #ifdef DEVICE_POLLING
4777 /* In polling mode we poll link state in bge_poll(). */
4778 if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING))
4779 #endif
4780 {
4781 sc->bge_link_evt++;
4782 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
4783 sc->bge_flags & BGE_FLAG_5788)
4784 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
4785 else
4786 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
4787 }
4788 }
4789
4790 bge_asf_driver_up(sc);
4791 bge_watchdog(sc);
4792
4793 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
4794 }
4795
4796 static void
bge_stats_update_regs(struct bge_softc * sc)4797 bge_stats_update_regs(struct bge_softc *sc)
4798 {
4799 struct bge_mac_stats *stats;
4800 uint32_t val;
4801
4802 stats = &sc->bge_mac_stats;
4803
4804 stats->ifHCOutOctets +=
4805 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4806 stats->etherStatsCollisions +=
4807 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4808 stats->outXonSent +=
4809 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4810 stats->outXoffSent +=
4811 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4812 stats->dot3StatsInternalMacTransmitErrors +=
4813 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4814 stats->dot3StatsSingleCollisionFrames +=
4815 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4816 stats->dot3StatsMultipleCollisionFrames +=
4817 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4818 stats->dot3StatsDeferredTransmissions +=
4819 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4820 stats->dot3StatsExcessiveCollisions +=
4821 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4822 stats->dot3StatsLateCollisions +=
4823 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4824 stats->ifHCOutUcastPkts +=
4825 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4826 stats->ifHCOutMulticastPkts +=
4827 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4828 stats->ifHCOutBroadcastPkts +=
4829 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4830
4831 stats->ifHCInOctets +=
4832 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4833 stats->etherStatsFragments +=
4834 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4835 stats->ifHCInUcastPkts +=
4836 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4837 stats->ifHCInMulticastPkts +=
4838 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4839 stats->ifHCInBroadcastPkts +=
4840 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4841 stats->dot3StatsFCSErrors +=
4842 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4843 stats->dot3StatsAlignmentErrors +=
4844 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4845 stats->xonPauseFramesReceived +=
4846 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4847 stats->xoffPauseFramesReceived +=
4848 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4849 stats->macControlFramesReceived +=
4850 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4851 stats->xoffStateEntered +=
4852 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4853 stats->dot3StatsFramesTooLong +=
4854 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4855 stats->etherStatsJabbers +=
4856 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4857 stats->etherStatsUndersizePkts +=
4858 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4859
4860 stats->FramesDroppedDueToFilters +=
4861 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4862 stats->DmaWriteQueueFull +=
4863 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4864 stats->DmaWriteHighPriQueueFull +=
4865 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4866 stats->NoMoreRxBDs +=
4867 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4868 /*
4869 * XXX
4870 * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS
4871 * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0
4872 * includes number of unwanted multicast frames. This comes
4873 * from silicon bug and known workaround to get rough(not
4874 * exact) counter is to enable interrupt on MBUF low water
4875 * attention. This can be accomplished by setting
4876 * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE,
4877 * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and
4878 * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL.
4879 * However that change would generate more interrupts and
4880 * there are still possibilities of losing multiple frames
4881 * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling.
4882 * Given that the workaround still would not get correct
4883 * counter I don't think it's worth to implement it. So
4884 * ignore reading the counter on controllers that have the
4885 * silicon bug.
4886 */
4887 if (sc->bge_asicrev != BGE_ASICREV_BCM5717 &&
4888 sc->bge_chipid != BGE_CHIPID_BCM5719_A0 &&
4889 sc->bge_chipid != BGE_CHIPID_BCM5720_A0)
4890 stats->InputDiscards +=
4891 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4892 stats->InputErrors +=
4893 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4894 stats->RecvThresholdHit +=
4895 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4896
4897 if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
4898 /*
4899 * If controller transmitted more than BGE_NUM_RDMA_CHANNELS
4900 * frames, it's safe to disable workaround for DMA engine's
4901 * miscalculation of TXMBUF space.
4902 */
4903 if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts +
4904 stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) {
4905 val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL);
4906 if (sc->bge_asicrev == BGE_ASICREV_BCM5719)
4907 val &= ~BGE_RDMA_TX_LENGTH_WA_5719;
4908 else
4909 val &= ~BGE_RDMA_TX_LENGTH_WA_5720;
4910 CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val);
4911 sc->bge_flags &= ~BGE_FLAG_RDMA_BUG;
4912 }
4913 }
4914 }
4915
4916 static void
bge_stats_clear_regs(struct bge_softc * sc)4917 bge_stats_clear_regs(struct bge_softc *sc)
4918 {
4919
4920 CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS);
4921 CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS);
4922 CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT);
4923 CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT);
4924 CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS);
4925 CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL);
4926 CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL);
4927 CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED);
4928 CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL);
4929 CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL);
4930 CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST);
4931 CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST);
4932 CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST);
4933
4934 CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS);
4935 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS);
4936 CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST);
4937 CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST);
4938 CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST);
4939 CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS);
4940 CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS);
4941 CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD);
4942 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD);
4943 CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD);
4944 CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED);
4945 CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG);
4946 CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS);
4947 CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE);
4948
4949 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP);
4950 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL);
4951 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL);
4952 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS);
4953 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
4954 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS);
4955 CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
4956 }
4957
4958 static void
bge_stats_update(struct bge_softc * sc)4959 bge_stats_update(struct bge_softc *sc)
4960 {
4961 if_t ifp;
4962 bus_size_t stats;
4963 uint32_t cnt; /* current register value */
4964
4965 ifp = sc->bge_ifp;
4966
4967 stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
4968
4969 #define READ_STAT(sc, stats, stat) \
4970 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
4971
4972 cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
4973 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions);
4974 sc->bge_tx_collisions = cnt;
4975
4976 cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
4977 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds);
4978 sc->bge_rx_nobds = cnt;
4979 cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
4980 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs);
4981 sc->bge_rx_inerrs = cnt;
4982 cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
4983 if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards);
4984 sc->bge_rx_discards = cnt;
4985
4986 cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
4987 if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards);
4988 sc->bge_tx_discards = cnt;
4989
4990 #undef READ_STAT
4991 }
4992
4993 /*
4994 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
4995 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
4996 * but when such padded frames employ the bge IP/TCP checksum offload,
4997 * the hardware checksum assist gives incorrect results (possibly
4998 * from incorporating its own padding into the UDP/TCP checksum; who knows).
4999 * If we pad such runts with zeros, the onboard checksum comes out correct.
5000 */
5001 static __inline int
bge_cksum_pad(struct mbuf * m)5002 bge_cksum_pad(struct mbuf *m)
5003 {
5004 int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len;
5005 struct mbuf *last;
5006
5007 /* If there's only the packet-header and we can pad there, use it. */
5008 if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) &&
5009 M_TRAILINGSPACE(m) >= padlen) {
5010 last = m;
5011 } else {
5012 /*
5013 * Walk packet chain to find last mbuf. We will either
5014 * pad there, or append a new mbuf and pad it.
5015 */
5016 for (last = m; last->m_next != NULL; last = last->m_next);
5017 if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) {
5018 /* Allocate new empty mbuf, pad it. Compact later. */
5019 struct mbuf *n;
5020
5021 MGET(n, M_NOWAIT, MT_DATA);
5022 if (n == NULL)
5023 return (ENOBUFS);
5024 n->m_len = 0;
5025 last->m_next = n;
5026 last = n;
5027 }
5028 }
5029
5030 /* Now zero the pad area, to avoid the bge cksum-assist bug. */
5031 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
5032 last->m_len += padlen;
5033 m->m_pkthdr.len += padlen;
5034
5035 return (0);
5036 }
5037
5038 static struct mbuf *
bge_check_short_dma(struct mbuf * m)5039 bge_check_short_dma(struct mbuf *m)
5040 {
5041 struct mbuf *n;
5042 int found;
5043
5044 /*
5045 * If device receive two back-to-back send BDs with less than
5046 * or equal to 8 total bytes then the device may hang. The two
5047 * back-to-back send BDs must in the same frame for this failure
5048 * to occur. Scan mbuf chains and see whether two back-to-back
5049 * send BDs are there. If this is the case, allocate new mbuf
5050 * and copy the frame to workaround the silicon bug.
5051 */
5052 for (n = m, found = 0; n != NULL; n = n->m_next) {
5053 if (n->m_len < 8) {
5054 found++;
5055 if (found > 1)
5056 break;
5057 continue;
5058 }
5059 found = 0;
5060 }
5061
5062 if (found > 1) {
5063 n = m_defrag(m, M_NOWAIT);
5064 if (n == NULL)
5065 m_freem(m);
5066 } else
5067 n = m;
5068 return (n);
5069 }
5070
5071 static struct mbuf *
bge_setup_tso(struct bge_softc * sc,struct mbuf * m,uint16_t * mss,uint16_t * flags)5072 bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
5073 uint16_t *flags)
5074 {
5075 struct ip *ip;
5076 struct tcphdr *tcp;
5077 struct mbuf *n;
5078 uint16_t hlen;
5079 uint32_t poff;
5080
5081 if (M_WRITABLE(m) == 0) {
5082 /* Get a writable copy. */
5083 n = m_dup(m, M_NOWAIT);
5084 m_freem(m);
5085 if (n == NULL)
5086 return (NULL);
5087 m = n;
5088 }
5089 m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip));
5090 if (m == NULL)
5091 return (NULL);
5092 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
5093 poff = sizeof(struct ether_header) + (ip->ip_hl << 2);
5094 m = m_pullup(m, poff + sizeof(struct tcphdr));
5095 if (m == NULL)
5096 return (NULL);
5097 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
5098 m = m_pullup(m, poff + (tcp->th_off << 2));
5099 if (m == NULL)
5100 return (NULL);
5101 /*
5102 * It seems controller doesn't modify IP length and TCP pseudo
5103 * checksum. These checksum computed by upper stack should be 0.
5104 */
5105 *mss = m->m_pkthdr.tso_segsz;
5106 ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header));
5107 ip->ip_sum = 0;
5108 ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2));
5109 /* Clear pseudo checksum computed by TCP stack. */
5110 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
5111 tcp->th_sum = 0;
5112 /*
5113 * Broadcom controllers uses different descriptor format for
5114 * TSO depending on ASIC revision. Due to TSO-capable firmware
5115 * license issue and lower performance of firmware based TSO
5116 * we only support hardware based TSO.
5117 */
5118 /* Calculate header length, incl. TCP/IP options, in 32 bit units. */
5119 hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2;
5120 if (sc->bge_flags & BGE_FLAG_TSO3) {
5121 /*
5122 * For BCM5717 and newer controllers, hardware based TSO
5123 * uses the 14 lower bits of the bge_mss field to store the
5124 * MSS and the upper 2 bits to store the lowest 2 bits of
5125 * the IP/TCP header length. The upper 6 bits of the header
5126 * length are stored in the bge_flags[14:10,4] field. Jumbo
5127 * frames are supported.
5128 */
5129 *mss |= ((hlen & 0x3) << 14);
5130 *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2);
5131 } else {
5132 /*
5133 * For BCM5755 and newer controllers, hardware based TSO uses
5134 * the lower 11 bits to store the MSS and the upper 5 bits to
5135 * store the IP/TCP header length. Jumbo frames are not
5136 * supported.
5137 */
5138 *mss |= (hlen << 11);
5139 }
5140 return (m);
5141 }
5142
5143 /*
5144 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
5145 * pointers to descriptors.
5146 */
5147 static int
bge_encap(struct bge_softc * sc,struct mbuf ** m_head,uint32_t * txidx)5148 bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
5149 {
5150 bus_dma_segment_t segs[BGE_NSEG_NEW];
5151 bus_dmamap_t map;
5152 struct bge_tx_bd *d;
5153 struct mbuf *m = *m_head;
5154 uint32_t idx = *txidx;
5155 uint16_t csum_flags, mss, vlan_tag;
5156 int nsegs, i, error;
5157
5158 csum_flags = 0;
5159 mss = 0;
5160 vlan_tag = 0;
5161 if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 &&
5162 m->m_next != NULL) {
5163 *m_head = bge_check_short_dma(m);
5164 if (*m_head == NULL)
5165 return (ENOBUFS);
5166 m = *m_head;
5167 }
5168 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
5169 *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags);
5170 if (*m_head == NULL)
5171 return (ENOBUFS);
5172 csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA |
5173 BGE_TXBDFLAG_CPU_POST_DMA;
5174 } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) {
5175 if (m->m_pkthdr.csum_flags & CSUM_IP)
5176 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
5177 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) {
5178 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
5179 if (m->m_pkthdr.len < ETHER_MIN_NOPAD &&
5180 (error = bge_cksum_pad(m)) != 0) {
5181 m_freem(m);
5182 *m_head = NULL;
5183 return (error);
5184 }
5185 }
5186 }
5187
5188 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
5189 if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME &&
5190 m->m_pkthdr.len > ETHER_MAX_LEN)
5191 csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME;
5192 if (sc->bge_forced_collapse > 0 &&
5193 (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) {
5194 /*
5195 * Forcedly collapse mbuf chains to overcome hardware
5196 * limitation which only support a single outstanding
5197 * DMA read operation.
5198 */
5199 if (sc->bge_forced_collapse == 1)
5200 m = m_defrag(m, M_NOWAIT);
5201 else
5202 m = m_collapse(m, M_NOWAIT,
5203 sc->bge_forced_collapse);
5204 if (m == NULL)
5205 m = *m_head;
5206 *m_head = m;
5207 }
5208 }
5209
5210 map = sc->bge_cdata.bge_tx_dmamap[idx];
5211 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
5212 &nsegs, BUS_DMA_NOWAIT);
5213 if (error == EFBIG) {
5214 m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW);
5215 if (m == NULL) {
5216 m_freem(*m_head);
5217 *m_head = NULL;
5218 return (ENOBUFS);
5219 }
5220 *m_head = m;
5221 error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
5222 m, segs, &nsegs, BUS_DMA_NOWAIT);
5223 if (error) {
5224 m_freem(m);
5225 *m_head = NULL;
5226 return (error);
5227 }
5228 } else if (error != 0)
5229 return (error);
5230
5231 /* Check if we have enough free send BDs. */
5232 if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) {
5233 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
5234 return (ENOBUFS);
5235 }
5236
5237 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
5238
5239 if (m->m_flags & M_VLANTAG) {
5240 csum_flags |= BGE_TXBDFLAG_VLAN_TAG;
5241 vlan_tag = m->m_pkthdr.ether_vtag;
5242 }
5243
5244 if (sc->bge_asicrev == BGE_ASICREV_BCM5762 &&
5245 (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
5246 /*
5247 * 5725 family of devices corrupts TSO packets when TSO DMA
5248 * buffers cross into regions which are within MSS bytes of
5249 * a 4GB boundary. If we encounter the condition, drop the
5250 * packet.
5251 */
5252 for (i = 0; ; i++) {
5253 d = &sc->bge_ldata.bge_tx_ring[idx];
5254 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
5255 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
5256 d->bge_len = segs[i].ds_len;
5257 if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss <
5258 d->bge_addr.bge_addr_lo)
5259 break;
5260 d->bge_flags = csum_flags;
5261 d->bge_vlan_tag = vlan_tag;
5262 d->bge_mss = mss;
5263 if (i == nsegs - 1)
5264 break;
5265 BGE_INC(idx, BGE_TX_RING_CNT);
5266 }
5267 if (i != nsegs - 1) {
5268 bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map,
5269 BUS_DMASYNC_POSTWRITE);
5270 bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
5271 m_freem(*m_head);
5272 *m_head = NULL;
5273 return (EIO);
5274 }
5275 } else {
5276 for (i = 0; ; i++) {
5277 d = &sc->bge_ldata.bge_tx_ring[idx];
5278 d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr);
5279 d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr);
5280 d->bge_len = segs[i].ds_len;
5281 d->bge_flags = csum_flags;
5282 d->bge_vlan_tag = vlan_tag;
5283 d->bge_mss = mss;
5284 if (i == nsegs - 1)
5285 break;
5286 BGE_INC(idx, BGE_TX_RING_CNT);
5287 }
5288 }
5289
5290 /* Mark the last segment as end of packet... */
5291 d->bge_flags |= BGE_TXBDFLAG_END;
5292
5293 /*
5294 * Insure that the map for this transmission
5295 * is placed at the array index of the last descriptor
5296 * in this chain.
5297 */
5298 sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx];
5299 sc->bge_cdata.bge_tx_dmamap[idx] = map;
5300 sc->bge_cdata.bge_tx_chain[idx] = m;
5301 sc->bge_txcnt += nsegs;
5302
5303 BGE_INC(idx, BGE_TX_RING_CNT);
5304 *txidx = idx;
5305
5306 return (0);
5307 }
5308
5309 /*
5310 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5311 * to the mbuf data regions directly in the transmit descriptors.
5312 */
5313 static void
bge_start_locked(if_t ifp)5314 bge_start_locked(if_t ifp)
5315 {
5316 struct bge_softc *sc;
5317 struct mbuf *m_head;
5318 uint32_t prodidx;
5319 int count;
5320
5321 sc = if_getsoftc(ifp);
5322 BGE_LOCK_ASSERT(sc);
5323
5324 if (!sc->bge_link ||
5325 (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
5326 IFF_DRV_RUNNING)
5327 return;
5328
5329 prodidx = sc->bge_tx_prodidx;
5330
5331 for (count = 0; !if_sendq_empty(ifp);) {
5332 if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
5333 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5334 break;
5335 }
5336 m_head = if_dequeue(ifp);
5337 if (m_head == NULL)
5338 break;
5339
5340 /*
5341 * Pack the data into the transmit ring. If we
5342 * don't have room, set the OACTIVE flag and wait
5343 * for the NIC to drain the ring.
5344 */
5345 if (bge_encap(sc, &m_head, &prodidx)) {
5346 if (m_head == NULL)
5347 break;
5348 if_sendq_prepend(ifp, m_head);
5349 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
5350 break;
5351 }
5352 ++count;
5353
5354 /*
5355 * If there's a BPF listener, bounce a copy of this frame
5356 * to him.
5357 */
5358 bpf_mtap_if(ifp, m_head);
5359 }
5360
5361 if (count > 0)
5362 bge_start_tx(sc, prodidx);
5363 }
5364
5365 static void
bge_start_tx(struct bge_softc * sc,uint32_t prodidx)5366 bge_start_tx(struct bge_softc *sc, uint32_t prodidx)
5367 {
5368
5369 bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag,
5370 sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE);
5371 /* Transmit. */
5372 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5373 /* 5700 b2 errata */
5374 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
5375 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
5376
5377 sc->bge_tx_prodidx = prodidx;
5378
5379 /* Set a timeout in case the chip goes out to lunch. */
5380 sc->bge_timer = BGE_TX_TIMEOUT;
5381 }
5382
5383 /*
5384 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
5385 * to the mbuf data regions directly in the transmit descriptors.
5386 */
5387 static void
bge_start(if_t ifp)5388 bge_start(if_t ifp)
5389 {
5390 struct bge_softc *sc;
5391
5392 sc = if_getsoftc(ifp);
5393 BGE_LOCK(sc);
5394 bge_start_locked(ifp);
5395 BGE_UNLOCK(sc);
5396 }
5397
5398 static void
bge_init_locked(struct bge_softc * sc)5399 bge_init_locked(struct bge_softc *sc)
5400 {
5401 if_t ifp;
5402 uint16_t *m;
5403 uint32_t mode;
5404
5405 BGE_LOCK_ASSERT(sc);
5406
5407 ifp = sc->bge_ifp;
5408
5409 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
5410 return;
5411
5412 /* Cancel pending I/O and flush buffers. */
5413 bge_stop(sc);
5414
5415 bge_stop_fw(sc);
5416 bge_sig_pre_reset(sc, BGE_RESET_START);
5417 bge_reset(sc);
5418 bge_sig_legacy(sc, BGE_RESET_START);
5419 bge_sig_post_reset(sc, BGE_RESET_START);
5420
5421 bge_chipinit(sc);
5422
5423 /*
5424 * Init the various state machines, ring
5425 * control blocks and firmware.
5426 */
5427 if (bge_blockinit(sc)) {
5428 device_printf(sc->bge_dev, "initialization failure\n");
5429 return;
5430 }
5431
5432 ifp = sc->bge_ifp;
5433
5434 /* Specify MTU. */
5435 CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) +
5436 ETHER_HDR_LEN + ETHER_CRC_LEN +
5437 (if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
5438
5439 /* Load our MAC address. */
5440 m = (uint16_t *)if_getlladdr(sc->bge_ifp);
5441 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
5442 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
5443
5444 /* Program promiscuous mode. */
5445 bge_setpromisc(sc);
5446
5447 /* Program multicast filter. */
5448 bge_setmulti(sc);
5449
5450 /* Program VLAN tag stripping. */
5451 bge_setvlan(sc);
5452
5453 /* Override UDP checksum offloading. */
5454 if (sc->bge_forced_udpcsum == 0)
5455 sc->bge_csum_features &= ~CSUM_UDP;
5456 else
5457 sc->bge_csum_features |= CSUM_UDP;
5458 if (if_getcapabilities(ifp) & IFCAP_TXCSUM &&
5459 if_getcapenable(ifp) & IFCAP_TXCSUM) {
5460 if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP));
5461 if_sethwassistbits(ifp, sc->bge_csum_features, 0);
5462 }
5463
5464 /* Init RX ring. */
5465 if (bge_init_rx_ring_std(sc) != 0) {
5466 device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
5467 bge_stop(sc);
5468 return;
5469 }
5470
5471 /*
5472 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
5473 * memory to insure that the chip has in fact read the first
5474 * entry of the ring.
5475 */
5476 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
5477 uint32_t v, i;
5478 for (i = 0; i < 10; i++) {
5479 DELAY(20);
5480 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
5481 if (v == (MCLBYTES - ETHER_ALIGN))
5482 break;
5483 }
5484 if (i == 10)
5485 device_printf (sc->bge_dev,
5486 "5705 A0 chip failed to load RX ring\n");
5487 }
5488
5489 /* Init jumbo RX ring. */
5490 if (BGE_IS_JUMBO_CAPABLE(sc) &&
5491 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
5492 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) {
5493 if (bge_init_rx_ring_jumbo(sc) != 0) {
5494 device_printf(sc->bge_dev,
5495 "no memory for jumbo Rx buffers.\n");
5496 bge_stop(sc);
5497 return;
5498 }
5499 }
5500
5501 /* Init our RX return ring index. */
5502 sc->bge_rx_saved_considx = 0;
5503
5504 /* Init our RX/TX stat counters. */
5505 sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0;
5506
5507 /* Init TX ring. */
5508 bge_init_tx_ring(sc);
5509
5510 /* Enable TX MAC state machine lockup fix. */
5511 mode = CSR_READ_4(sc, BGE_TX_MODE);
5512 if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906)
5513 mode |= BGE_TXMODE_MBUF_LOCKUP_FIX;
5514 if (sc->bge_asicrev == BGE_ASICREV_BCM5720 ||
5515 sc->bge_asicrev == BGE_ASICREV_BCM5762) {
5516 mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5517 mode |= CSR_READ_4(sc, BGE_TX_MODE) &
5518 (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE);
5519 }
5520 /* Turn on transmitter. */
5521 CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE);
5522 DELAY(100);
5523
5524 /* Turn on receiver. */
5525 mode = CSR_READ_4(sc, BGE_RX_MODE);
5526 if (BGE_IS_5755_PLUS(sc))
5527 mode |= BGE_RXMODE_IPV6_ENABLE;
5528 if (sc->bge_asicrev == BGE_ASICREV_BCM5762)
5529 mode |= BGE_RXMODE_IPV4_FRAG_FIX;
5530 CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE);
5531 DELAY(10);
5532
5533 /*
5534 * Set the number of good frames to receive after RX MBUF
5535 * Low Watermark has been reached. After the RX MAC receives
5536 * this number of frames, it will drop subsequent incoming
5537 * frames until the MBUF High Watermark is reached.
5538 */
5539 if (BGE_IS_57765_PLUS(sc))
5540 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1);
5541 else
5542 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
5543
5544 /* Clear MAC statistics. */
5545 if (BGE_IS_5705_PLUS(sc))
5546 bge_stats_clear_regs(sc);
5547
5548 /* Tell firmware we're alive. */
5549 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
5550
5551 #ifdef DEVICE_POLLING
5552 /* Disable interrupts if we are polling. */
5553 if (if_getcapenable(ifp) & IFCAP_POLLING) {
5554 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5555 BGE_PCIMISCCTL_MASK_PCI_INTR);
5556 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5557 } else
5558 #endif
5559
5560 /* Enable host interrupts. */
5561 {
5562 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
5563 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5564 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5565 }
5566
5567 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
5568 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
5569
5570 bge_ifmedia_upd_locked(ifp);
5571
5572 callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc);
5573 }
5574
5575 static void
bge_init(void * xsc)5576 bge_init(void *xsc)
5577 {
5578 struct bge_softc *sc = xsc;
5579
5580 BGE_LOCK(sc);
5581 bge_init_locked(sc);
5582 BGE_UNLOCK(sc);
5583 }
5584
5585 /*
5586 * Set media options.
5587 */
5588 static int
bge_ifmedia_upd(if_t ifp)5589 bge_ifmedia_upd(if_t ifp)
5590 {
5591 struct bge_softc *sc = if_getsoftc(ifp);
5592 int res;
5593
5594 BGE_LOCK(sc);
5595 res = bge_ifmedia_upd_locked(ifp);
5596 BGE_UNLOCK(sc);
5597
5598 return (res);
5599 }
5600
5601 static int
bge_ifmedia_upd_locked(if_t ifp)5602 bge_ifmedia_upd_locked(if_t ifp)
5603 {
5604 struct bge_softc *sc = if_getsoftc(ifp);
5605 struct mii_data *mii;
5606 struct mii_softc *miisc;
5607 struct ifmedia *ifm;
5608
5609 BGE_LOCK_ASSERT(sc);
5610
5611 ifm = &sc->bge_ifmedia;
5612
5613 /* If this is a 1000baseX NIC, enable the TBI port. */
5614 if (sc->bge_flags & BGE_FLAG_TBI) {
5615 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
5616 return (EINVAL);
5617 switch(IFM_SUBTYPE(ifm->ifm_media)) {
5618 case IFM_AUTO:
5619 /*
5620 * The BCM5704 ASIC appears to have a special
5621 * mechanism for programming the autoneg
5622 * advertisement registers in TBI mode.
5623 */
5624 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
5625 uint32_t sgdig;
5626 sgdig = CSR_READ_4(sc, BGE_SGDIG_STS);
5627 if (sgdig & BGE_SGDIGSTS_DONE) {
5628 CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
5629 sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
5630 sgdig |= BGE_SGDIGCFG_AUTO |
5631 BGE_SGDIGCFG_PAUSE_CAP |
5632 BGE_SGDIGCFG_ASYM_PAUSE;
5633 CSR_WRITE_4(sc, BGE_SGDIG_CFG,
5634 sgdig | BGE_SGDIGCFG_SEND);
5635 DELAY(5);
5636 CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
5637 }
5638 }
5639 break;
5640 case IFM_1000_SX:
5641 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
5642 BGE_CLRBIT(sc, BGE_MAC_MODE,
5643 BGE_MACMODE_HALF_DUPLEX);
5644 } else {
5645 BGE_SETBIT(sc, BGE_MAC_MODE,
5646 BGE_MACMODE_HALF_DUPLEX);
5647 }
5648 DELAY(40);
5649 break;
5650 default:
5651 return (EINVAL);
5652 }
5653 return (0);
5654 }
5655
5656 sc->bge_link_evt++;
5657 mii = device_get_softc(sc->bge_miibus);
5658 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
5659 PHY_RESET(miisc);
5660 mii_mediachg(mii);
5661
5662 /*
5663 * Force an interrupt so that we will call bge_link_upd
5664 * if needed and clear any pending link state attention.
5665 * Without this we are not getting any further interrupts
5666 * for link state changes and thus will not UP the link and
5667 * not be able to send in bge_start_locked. The only
5668 * way to get things working was to receive a packet and
5669 * get an RX intr.
5670 * bge_tick should help for fiber cards and we might not
5671 * need to do this here if BGE_FLAG_TBI is set but as
5672 * we poll for fiber anyway it should not harm.
5673 */
5674 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 ||
5675 sc->bge_flags & BGE_FLAG_5788)
5676 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
5677 else
5678 BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW);
5679
5680 return (0);
5681 }
5682
5683 /*
5684 * Report current media status.
5685 */
5686 static void
bge_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)5687 bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
5688 {
5689 struct bge_softc *sc = if_getsoftc(ifp);
5690 struct mii_data *mii;
5691
5692 BGE_LOCK(sc);
5693
5694 if ((if_getflags(ifp) & IFF_UP) == 0) {
5695 BGE_UNLOCK(sc);
5696 return;
5697 }
5698 if (sc->bge_flags & BGE_FLAG_TBI) {
5699 ifmr->ifm_status = IFM_AVALID;
5700 ifmr->ifm_active = IFM_ETHER;
5701 if (CSR_READ_4(sc, BGE_MAC_STS) &
5702 BGE_MACSTAT_TBI_PCS_SYNCHED)
5703 ifmr->ifm_status |= IFM_ACTIVE;
5704 else {
5705 ifmr->ifm_active |= IFM_NONE;
5706 BGE_UNLOCK(sc);
5707 return;
5708 }
5709 ifmr->ifm_active |= IFM_1000_SX;
5710 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
5711 ifmr->ifm_active |= IFM_HDX;
5712 else
5713 ifmr->ifm_active |= IFM_FDX;
5714 BGE_UNLOCK(sc);
5715 return;
5716 }
5717
5718 mii = device_get_softc(sc->bge_miibus);
5719 mii_pollstat(mii);
5720 ifmr->ifm_active = mii->mii_media_active;
5721 ifmr->ifm_status = mii->mii_media_status;
5722
5723 BGE_UNLOCK(sc);
5724 }
5725
5726 static int
bge_ioctl(if_t ifp,u_long command,caddr_t data)5727 bge_ioctl(if_t ifp, u_long command, caddr_t data)
5728 {
5729 struct bge_softc *sc = if_getsoftc(ifp);
5730 struct ifreq *ifr = (struct ifreq *) data;
5731 struct mii_data *mii;
5732 int flags, mask, error = 0;
5733
5734 switch (command) {
5735 case SIOCSIFMTU:
5736 if (BGE_IS_JUMBO_CAPABLE(sc) ||
5737 (sc->bge_flags & BGE_FLAG_JUMBO_STD)) {
5738 if (ifr->ifr_mtu < ETHERMIN ||
5739 ifr->ifr_mtu > BGE_JUMBO_MTU) {
5740 error = EINVAL;
5741 break;
5742 }
5743 } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) {
5744 error = EINVAL;
5745 break;
5746 }
5747 BGE_LOCK(sc);
5748 if (if_getmtu(ifp) != ifr->ifr_mtu) {
5749 if_setmtu(ifp, ifr->ifr_mtu);
5750 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5751 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
5752 bge_init_locked(sc);
5753 }
5754 }
5755 BGE_UNLOCK(sc);
5756 break;
5757 case SIOCSIFFLAGS:
5758 BGE_LOCK(sc);
5759 if (if_getflags(ifp) & IFF_UP) {
5760 /*
5761 * If only the state of the PROMISC flag changed,
5762 * then just use the 'set promisc mode' command
5763 * instead of reinitializing the entire NIC. Doing
5764 * a full re-init means reloading the firmware and
5765 * waiting for it to start up, which may take a
5766 * second or two. Similarly for ALLMULTI.
5767 */
5768 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5769 flags = if_getflags(ifp) ^ sc->bge_if_flags;
5770 if (flags & IFF_PROMISC)
5771 bge_setpromisc(sc);
5772 if (flags & IFF_ALLMULTI)
5773 bge_setmulti(sc);
5774 } else
5775 bge_init_locked(sc);
5776 } else {
5777 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5778 bge_stop(sc);
5779 }
5780 }
5781 sc->bge_if_flags = if_getflags(ifp);
5782 BGE_UNLOCK(sc);
5783 error = 0;
5784 break;
5785 case SIOCADDMULTI:
5786 case SIOCDELMULTI:
5787 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
5788 BGE_LOCK(sc);
5789 bge_setmulti(sc);
5790 BGE_UNLOCK(sc);
5791 error = 0;
5792 }
5793 break;
5794 case SIOCSIFMEDIA:
5795 case SIOCGIFMEDIA:
5796 if (sc->bge_flags & BGE_FLAG_TBI) {
5797 error = ifmedia_ioctl(ifp, ifr,
5798 &sc->bge_ifmedia, command);
5799 } else {
5800 mii = device_get_softc(sc->bge_miibus);
5801 error = ifmedia_ioctl(ifp, ifr,
5802 &mii->mii_media, command);
5803 }
5804 break;
5805 case SIOCSIFCAP:
5806 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
5807 #ifdef DEVICE_POLLING
5808 if (mask & IFCAP_POLLING) {
5809 if (ifr->ifr_reqcap & IFCAP_POLLING) {
5810 error = ether_poll_register(bge_poll, ifp);
5811 if (error)
5812 return (error);
5813 BGE_LOCK(sc);
5814 BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
5815 BGE_PCIMISCCTL_MASK_PCI_INTR);
5816 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5817 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
5818 BGE_UNLOCK(sc);
5819 } else {
5820 error = ether_poll_deregister(ifp);
5821 /* Enable interrupt even in error case */
5822 BGE_LOCK(sc);
5823 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
5824 BGE_PCIMISCCTL_MASK_PCI_INTR);
5825 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
5826 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
5827 BGE_UNLOCK(sc);
5828 }
5829 }
5830 #endif
5831 if ((mask & IFCAP_TXCSUM) != 0 &&
5832 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
5833 if_togglecapenable(ifp, IFCAP_TXCSUM);
5834 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
5835 if_sethwassistbits(ifp,
5836 sc->bge_csum_features, 0);
5837 else
5838 if_sethwassistbits(ifp, 0,
5839 sc->bge_csum_features);
5840 }
5841
5842 if ((mask & IFCAP_RXCSUM) != 0 &&
5843 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
5844 if_togglecapenable(ifp, IFCAP_RXCSUM);
5845
5846 if ((mask & IFCAP_TSO4) != 0 &&
5847 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
5848 if_togglecapenable(ifp, IFCAP_TSO4);
5849 if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
5850 if_sethwassistbits(ifp, CSUM_TSO, 0);
5851 else
5852 if_sethwassistbits(ifp, 0, CSUM_TSO);
5853 }
5854
5855 if (mask & IFCAP_VLAN_MTU) {
5856 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
5857 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
5858 bge_init(sc);
5859 }
5860
5861 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
5862 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
5863 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
5864 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
5865 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
5866 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
5867 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
5868 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
5869 BGE_LOCK(sc);
5870 bge_setvlan(sc);
5871 BGE_UNLOCK(sc);
5872 }
5873 #ifdef VLAN_CAPABILITIES
5874 if_vlancap(ifp);
5875 #endif
5876 break;
5877 default:
5878 error = ether_ioctl(ifp, command, data);
5879 break;
5880 }
5881
5882 return (error);
5883 }
5884
5885 static void
bge_watchdog(struct bge_softc * sc)5886 bge_watchdog(struct bge_softc *sc)
5887 {
5888 if_t ifp;
5889 uint32_t status;
5890
5891 BGE_LOCK_ASSERT(sc);
5892
5893 if (sc->bge_timer == 0 || --sc->bge_timer)
5894 return;
5895
5896 /* If pause frames are active then don't reset the hardware. */
5897 if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) {
5898 status = CSR_READ_4(sc, BGE_RX_STS);
5899 if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) {
5900 /*
5901 * If link partner has us in XOFF state then wait for
5902 * the condition to clear.
5903 */
5904 CSR_WRITE_4(sc, BGE_RX_STS, status);
5905 sc->bge_timer = BGE_TX_TIMEOUT;
5906 return;
5907 } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 &&
5908 (status & BGE_RXSTAT_RCVD_XON) != 0) {
5909 /*
5910 * If link partner has us in XOFF state then wait for
5911 * the condition to clear.
5912 */
5913 CSR_WRITE_4(sc, BGE_RX_STS, status);
5914 sc->bge_timer = BGE_TX_TIMEOUT;
5915 return;
5916 }
5917 /*
5918 * Any other condition is unexpected and the controller
5919 * should be reset.
5920 */
5921 }
5922
5923 ifp = sc->bge_ifp;
5924
5925 if_printf(ifp, "watchdog timeout -- resetting\n");
5926
5927 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
5928 bge_init_locked(sc);
5929
5930 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
5931 }
5932
5933 static void
bge_stop_block(struct bge_softc * sc,bus_size_t reg,uint32_t bit)5934 bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
5935 {
5936 int i;
5937
5938 BGE_CLRBIT(sc, reg, bit);
5939
5940 for (i = 0; i < BGE_TIMEOUT; i++) {
5941 if ((CSR_READ_4(sc, reg) & bit) == 0)
5942 return;
5943 DELAY(100);
5944 }
5945 }
5946
5947 /*
5948 * Stop the adapter and free any mbufs allocated to the
5949 * RX and TX lists.
5950 */
5951 static void
bge_stop(struct bge_softc * sc)5952 bge_stop(struct bge_softc *sc)
5953 {
5954 if_t ifp;
5955
5956 BGE_LOCK_ASSERT(sc);
5957
5958 ifp = sc->bge_ifp;
5959
5960 callout_stop(&sc->bge_stat_ch);
5961
5962 /* Disable host interrupts. */
5963 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
5964 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
5965
5966 /*
5967 * Tell firmware we're shutting down.
5968 */
5969 bge_stop_fw(sc);
5970 bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN);
5971
5972 /*
5973 * Disable all of the receiver blocks.
5974 */
5975 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
5976 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
5977 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
5978 if (BGE_IS_5700_FAMILY(sc))
5979 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
5980 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
5981 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
5982 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
5983
5984 /*
5985 * Disable all of the transmit blocks.
5986 */
5987 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
5988 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
5989 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
5990 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
5991 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
5992 if (BGE_IS_5700_FAMILY(sc))
5993 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
5994 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
5995
5996 /*
5997 * Shut down all of the memory managers and related
5998 * state machines.
5999 */
6000 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
6001 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
6002 if (BGE_IS_5700_FAMILY(sc))
6003 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
6004
6005 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
6006 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
6007 if (!(BGE_IS_5705_PLUS(sc))) {
6008 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
6009 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
6010 }
6011 /* Update MAC statistics. */
6012 if (BGE_IS_5705_PLUS(sc))
6013 bge_stats_update_regs(sc);
6014
6015 bge_reset(sc);
6016 bge_sig_legacy(sc, BGE_RESET_SHUTDOWN);
6017 bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN);
6018
6019 /*
6020 * Keep the ASF firmware running if up.
6021 */
6022 if (sc->bge_asf_mode & ASF_STACKUP)
6023 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6024 else
6025 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
6026
6027 /* Free the RX lists. */
6028 bge_free_rx_ring_std(sc);
6029
6030 /* Free jumbo RX list. */
6031 if (BGE_IS_JUMBO_CAPABLE(sc))
6032 bge_free_rx_ring_jumbo(sc);
6033
6034 /* Free TX buffers. */
6035 bge_free_tx_ring(sc);
6036
6037 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
6038
6039 /* Clear MAC's link state (PHY may still have link UP). */
6040 if (bootverbose && sc->bge_link)
6041 if_printf(sc->bge_ifp, "link DOWN\n");
6042 sc->bge_link = 0;
6043
6044 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
6045 }
6046
6047 /*
6048 * Stop all chip I/O so that the kernel's probe routines don't
6049 * get confused by errant DMAs when rebooting.
6050 */
6051 static int
bge_shutdown(device_t dev)6052 bge_shutdown(device_t dev)
6053 {
6054 struct bge_softc *sc;
6055
6056 sc = device_get_softc(dev);
6057 BGE_LOCK(sc);
6058 bge_stop(sc);
6059 BGE_UNLOCK(sc);
6060
6061 return (0);
6062 }
6063
6064 static int
bge_suspend(device_t dev)6065 bge_suspend(device_t dev)
6066 {
6067 struct bge_softc *sc;
6068
6069 sc = device_get_softc(dev);
6070 BGE_LOCK(sc);
6071 bge_stop(sc);
6072 BGE_UNLOCK(sc);
6073
6074 return (0);
6075 }
6076
6077 static int
bge_resume(device_t dev)6078 bge_resume(device_t dev)
6079 {
6080 struct bge_softc *sc;
6081 if_t ifp;
6082
6083 sc = device_get_softc(dev);
6084 BGE_LOCK(sc);
6085 ifp = sc->bge_ifp;
6086 if (if_getflags(ifp) & IFF_UP) {
6087 bge_init_locked(sc);
6088 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
6089 bge_start_locked(ifp);
6090 }
6091 BGE_UNLOCK(sc);
6092
6093 return (0);
6094 }
6095
6096 static void
bge_link_upd(struct bge_softc * sc)6097 bge_link_upd(struct bge_softc *sc)
6098 {
6099 struct mii_data *mii;
6100 uint32_t link, status;
6101
6102 BGE_LOCK_ASSERT(sc);
6103
6104 /* Clear 'pending link event' flag. */
6105 sc->bge_link_evt = 0;
6106
6107 /*
6108 * Process link state changes.
6109 * Grrr. The link status word in the status block does
6110 * not work correctly on the BCM5700 rev AX and BX chips,
6111 * according to all available information. Hence, we have
6112 * to enable MII interrupts in order to properly obtain
6113 * async link changes. Unfortunately, this also means that
6114 * we have to read the MAC status register to detect link
6115 * changes, thereby adding an additional register access to
6116 * the interrupt handler.
6117 *
6118 * XXX: perhaps link state detection procedure used for
6119 * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions.
6120 */
6121
6122 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6123 sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
6124 status = CSR_READ_4(sc, BGE_MAC_STS);
6125 if (status & BGE_MACSTAT_MI_INTERRUPT) {
6126 mii = device_get_softc(sc->bge_miibus);
6127 mii_pollstat(mii);
6128 if (!sc->bge_link &&
6129 mii->mii_media_status & IFM_ACTIVE &&
6130 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6131 sc->bge_link++;
6132 if (bootverbose)
6133 if_printf(sc->bge_ifp, "link UP\n");
6134 } else if (sc->bge_link &&
6135 (!(mii->mii_media_status & IFM_ACTIVE) ||
6136 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
6137 sc->bge_link = 0;
6138 if (bootverbose)
6139 if_printf(sc->bge_ifp, "link DOWN\n");
6140 }
6141
6142 /* Clear the interrupt. */
6143 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
6144 BGE_EVTENB_MI_INTERRUPT);
6145 bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr,
6146 BRGPHY_MII_ISR);
6147 bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr,
6148 BRGPHY_MII_IMR, BRGPHY_INTRS);
6149 }
6150 return;
6151 }
6152
6153 if (sc->bge_flags & BGE_FLAG_TBI) {
6154 status = CSR_READ_4(sc, BGE_MAC_STS);
6155 if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
6156 if (!sc->bge_link) {
6157 sc->bge_link++;
6158 if (sc->bge_asicrev == BGE_ASICREV_BCM5704) {
6159 BGE_CLRBIT(sc, BGE_MAC_MODE,
6160 BGE_MACMODE_TBI_SEND_CFGS);
6161 DELAY(40);
6162 }
6163 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
6164 if (bootverbose)
6165 if_printf(sc->bge_ifp, "link UP\n");
6166 if_link_state_change(sc->bge_ifp,
6167 LINK_STATE_UP);
6168 }
6169 } else if (sc->bge_link) {
6170 sc->bge_link = 0;
6171 if (bootverbose)
6172 if_printf(sc->bge_ifp, "link DOWN\n");
6173 if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN);
6174 }
6175 } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) {
6176 /*
6177 * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
6178 * in status word always set. Workaround this bug by reading
6179 * PHY link status directly.
6180 */
6181 link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
6182
6183 if (link != sc->bge_link ||
6184 sc->bge_asicrev == BGE_ASICREV_BCM5700) {
6185 mii = device_get_softc(sc->bge_miibus);
6186 mii_pollstat(mii);
6187 if (!sc->bge_link &&
6188 mii->mii_media_status & IFM_ACTIVE &&
6189 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6190 sc->bge_link++;
6191 if (bootverbose)
6192 if_printf(sc->bge_ifp, "link UP\n");
6193 } else if (sc->bge_link &&
6194 (!(mii->mii_media_status & IFM_ACTIVE) ||
6195 IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
6196 sc->bge_link = 0;
6197 if (bootverbose)
6198 if_printf(sc->bge_ifp, "link DOWN\n");
6199 }
6200 }
6201 } else {
6202 /*
6203 * For controllers that call mii_tick, we have to poll
6204 * link status.
6205 */
6206 mii = device_get_softc(sc->bge_miibus);
6207 mii_pollstat(mii);
6208 bge_miibus_statchg(sc->bge_dev);
6209 }
6210
6211 /* Disable MAC attention when link is up. */
6212 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED |
6213 BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE |
6214 BGE_MACSTAT_LINK_CHANGED);
6215 }
6216
6217 static void
bge_add_sysctls(struct bge_softc * sc)6218 bge_add_sysctls(struct bge_softc *sc)
6219 {
6220 struct sysctl_ctx_list *ctx;
6221 struct sysctl_oid_list *children;
6222
6223 ctx = device_get_sysctl_ctx(sc->bge_dev);
6224 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev));
6225
6226 #ifdef BGE_REGISTER_DEBUG
6227 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info",
6228 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
6229 bge_sysctl_debug_info, "I", "Debug Information");
6230
6231 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read",
6232 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
6233 bge_sysctl_reg_read, "I", "MAC Register Read");
6234
6235 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read",
6236 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
6237 bge_sysctl_ape_read, "I", "APE Register Read");
6238
6239 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read",
6240 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
6241 bge_sysctl_mem_read, "I", "Memory Read");
6242
6243 #endif
6244
6245 /*
6246 * A common design characteristic for many Broadcom client controllers
6247 * is that they only support a single outstanding DMA read operation
6248 * on the PCIe bus. This means that it will take twice as long to fetch
6249 * a TX frame that is split into header and payload buffers as it does
6250 * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For
6251 * these controllers, coalescing buffers to reduce the number of memory
6252 * reads is effective way to get maximum performance(about 940Mbps).
6253 * Without collapsing TX buffers the maximum TCP bulk transfer
6254 * performance is about 850Mbps. However forcing coalescing mbufs
6255 * consumes a lot of CPU cycles, so leave it off by default.
6256 */
6257 sc->bge_forced_collapse = 0;
6258 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse",
6259 CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0,
6260 "Number of fragmented TX buffers of a frame allowed before "
6261 "forced collapsing");
6262
6263 sc->bge_msi = 1;
6264 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi",
6265 CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI");
6266
6267 /*
6268 * It seems all Broadcom controllers have a bug that can generate UDP
6269 * datagrams with checksum value 0 when TX UDP checksum offloading is
6270 * enabled. Generating UDP checksum value 0 is RFC 768 violation.
6271 * Even though the probability of generating such UDP datagrams is
6272 * low, I don't want to see FreeBSD boxes to inject such datagrams
6273 * into network so disable UDP checksum offloading by default. Users
6274 * still override this behavior by setting a sysctl variable,
6275 * dev.bge.0.forced_udpcsum.
6276 */
6277 sc->bge_forced_udpcsum = 0;
6278 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum",
6279 CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0,
6280 "Enable UDP checksum offloading even if controller can "
6281 "generate UDP checksum value 0");
6282
6283 if (BGE_IS_5705_PLUS(sc))
6284 bge_add_sysctl_stats_regs(sc, ctx, children);
6285 else
6286 bge_add_sysctl_stats(sc, ctx, children);
6287 }
6288
6289 #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \
6290 SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, \
6291 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, \
6292 offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", desc)
6293
6294 static void
bge_add_sysctl_stats(struct bge_softc * sc,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * parent)6295 bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
6296 struct sysctl_oid_list *parent)
6297 {
6298 struct sysctl_oid *tree;
6299 struct sysctl_oid_list *children, *schildren;
6300
6301 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
6302 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics");
6303 schildren = children = SYSCTL_CHILDREN(tree);
6304 BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters",
6305 children, COSFramesDroppedDueToFilters,
6306 "FramesDroppedDueToFilters");
6307 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full",
6308 children, nicDmaWriteQueueFull, "DmaWriteQueueFull");
6309 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full",
6310 children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull");
6311 BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors",
6312 children, nicNoMoreRxBDs, "NoMoreRxBDs");
6313 BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames",
6314 children, ifInDiscards, "InputDiscards");
6315 BGE_SYSCTL_STAT(sc, ctx, "Input Errors",
6316 children, ifInErrors, "InputErrors");
6317 BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit",
6318 children, nicRecvThresholdHit, "RecvThresholdHit");
6319 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full",
6320 children, nicDmaReadQueueFull, "DmaReadQueueFull");
6321 BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full",
6322 children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull");
6323 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full",
6324 children, nicSendDataCompQueueFull, "SendDataCompQueueFull");
6325 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index",
6326 children, nicRingSetSendProdIndex, "RingSetSendProdIndex");
6327 BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update",
6328 children, nicRingStatusUpdate, "RingStatusUpdate");
6329 BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts",
6330 children, nicInterrupts, "Interrupts");
6331 BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts",
6332 children, nicAvoidedInterrupts, "AvoidedInterrupts");
6333 BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit",
6334 children, nicSendThresholdHit, "SendThresholdHit");
6335
6336 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx",
6337 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics");
6338 children = SYSCTL_CHILDREN(tree);
6339 BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets",
6340 children, rxstats.ifHCInOctets, "ifHCInOctets");
6341 BGE_SYSCTL_STAT(sc, ctx, "Fragments",
6342 children, rxstats.etherStatsFragments, "Fragments");
6343 BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets",
6344 children, rxstats.ifHCInUcastPkts, "UnicastPkts");
6345 BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets",
6346 children, rxstats.ifHCInMulticastPkts, "MulticastPkts");
6347 BGE_SYSCTL_STAT(sc, ctx, "FCS Errors",
6348 children, rxstats.dot3StatsFCSErrors, "FCSErrors");
6349 BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors",
6350 children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors");
6351 BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received",
6352 children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived");
6353 BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received",
6354 children, rxstats.xoffPauseFramesReceived,
6355 "xoffPauseFramesReceived");
6356 BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received",
6357 children, rxstats.macControlFramesReceived,
6358 "ControlFramesReceived");
6359 BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered",
6360 children, rxstats.xoffStateEntered, "xoffStateEntered");
6361 BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long",
6362 children, rxstats.dot3StatsFramesTooLong, "FramesTooLong");
6363 BGE_SYSCTL_STAT(sc, ctx, "Jabbers",
6364 children, rxstats.etherStatsJabbers, "Jabbers");
6365 BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets",
6366 children, rxstats.etherStatsUndersizePkts, "UndersizePkts");
6367 BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors",
6368 children, rxstats.inRangeLengthError, "inRangeLengthError");
6369 BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors",
6370 children, rxstats.outRangeLengthError, "outRangeLengthError");
6371
6372 tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx",
6373 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics");
6374 children = SYSCTL_CHILDREN(tree);
6375 BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets",
6376 children, txstats.ifHCOutOctets, "ifHCOutOctets");
6377 BGE_SYSCTL_STAT(sc, ctx, "TX Collisions",
6378 children, txstats.etherStatsCollisions, "Collisions");
6379 BGE_SYSCTL_STAT(sc, ctx, "XON Sent",
6380 children, txstats.outXonSent, "XonSent");
6381 BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent",
6382 children, txstats.outXoffSent, "XoffSent");
6383 BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done",
6384 children, txstats.flowControlDone, "flowControlDone");
6385 BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors",
6386 children, txstats.dot3StatsInternalMacTransmitErrors,
6387 "InternalMacTransmitErrors");
6388 BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames",
6389 children, txstats.dot3StatsSingleCollisionFrames,
6390 "SingleCollisionFrames");
6391 BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames",
6392 children, txstats.dot3StatsMultipleCollisionFrames,
6393 "MultipleCollisionFrames");
6394 BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions",
6395 children, txstats.dot3StatsDeferredTransmissions,
6396 "DeferredTransmissions");
6397 BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions",
6398 children, txstats.dot3StatsExcessiveCollisions,
6399 "ExcessiveCollisions");
6400 BGE_SYSCTL_STAT(sc, ctx, "Late Collisions",
6401 children, txstats.dot3StatsLateCollisions,
6402 "LateCollisions");
6403 BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets",
6404 children, txstats.ifHCOutUcastPkts, "UnicastPkts");
6405 BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets",
6406 children, txstats.ifHCOutMulticastPkts, "MulticastPkts");
6407 BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets",
6408 children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts");
6409 BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors",
6410 children, txstats.dot3StatsCarrierSenseErrors,
6411 "CarrierSenseErrors");
6412 BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards",
6413 children, txstats.ifOutDiscards, "Discards");
6414 BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors",
6415 children, txstats.ifOutErrors, "Errors");
6416 }
6417
6418 #undef BGE_SYSCTL_STAT
6419
6420 #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
6421 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
6422
6423 static void
bge_add_sysctl_stats_regs(struct bge_softc * sc,struct sysctl_ctx_list * ctx,struct sysctl_oid_list * parent)6424 bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx,
6425 struct sysctl_oid_list *parent)
6426 {
6427 struct sysctl_oid *tree;
6428 struct sysctl_oid_list *child, *schild;
6429 struct bge_mac_stats *stats;
6430
6431 stats = &sc->bge_mac_stats;
6432 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats",
6433 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE Statistics");
6434 schild = child = SYSCTL_CHILDREN(tree);
6435 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters",
6436 &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters");
6437 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull",
6438 &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full");
6439 BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull",
6440 &stats->DmaWriteHighPriQueueFull,
6441 "NIC DMA Write High Priority Queue Full");
6442 BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs",
6443 &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors");
6444 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards",
6445 &stats->InputDiscards, "Discarded Input Frames");
6446 BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors",
6447 &stats->InputErrors, "Input Errors");
6448 BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit",
6449 &stats->RecvThresholdHit, "NIC Recv Threshold Hit");
6450
6451 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx",
6452 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE RX Statistics");
6453 child = SYSCTL_CHILDREN(tree);
6454 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets",
6455 &stats->ifHCInOctets, "Inbound Octets");
6456 BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments",
6457 &stats->etherStatsFragments, "Fragments");
6458 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
6459 &stats->ifHCInUcastPkts, "Inbound Unicast Packets");
6460 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
6461 &stats->ifHCInMulticastPkts, "Inbound Multicast Packets");
6462 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
6463 &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets");
6464 BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors",
6465 &stats->dot3StatsFCSErrors, "FCS Errors");
6466 BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors",
6467 &stats->dot3StatsAlignmentErrors, "Alignment Errors");
6468 BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived",
6469 &stats->xonPauseFramesReceived, "XON Pause Frames Received");
6470 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived",
6471 &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received");
6472 BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived",
6473 &stats->macControlFramesReceived, "MAC Control Frames Received");
6474 BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered",
6475 &stats->xoffStateEntered, "XOFF State Entered");
6476 BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong",
6477 &stats->dot3StatsFramesTooLong, "Frames Too Long");
6478 BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers",
6479 &stats->etherStatsJabbers, "Jabbers");
6480 BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts",
6481 &stats->etherStatsUndersizePkts, "Undersized Packets");
6482
6483 tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx",
6484 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "BGE TX Statistics");
6485 child = SYSCTL_CHILDREN(tree);
6486 BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets",
6487 &stats->ifHCOutOctets, "Outbound Octets");
6488 BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions",
6489 &stats->etherStatsCollisions, "TX Collisions");
6490 BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent",
6491 &stats->outXonSent, "XON Sent");
6492 BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent",
6493 &stats->outXoffSent, "XOFF Sent");
6494 BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors",
6495 &stats->dot3StatsInternalMacTransmitErrors,
6496 "Internal MAC TX Errors");
6497 BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames",
6498 &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames");
6499 BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames",
6500 &stats->dot3StatsMultipleCollisionFrames,
6501 "Multiple Collision Frames");
6502 BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions",
6503 &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions");
6504 BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions",
6505 &stats->dot3StatsExcessiveCollisions, "Excessive Collisions");
6506 BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions",
6507 &stats->dot3StatsLateCollisions, "Late Collisions");
6508 BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts",
6509 &stats->ifHCOutUcastPkts, "Outbound Unicast Packets");
6510 BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts",
6511 &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets");
6512 BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts",
6513 &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets");
6514 }
6515
6516 #undef BGE_SYSCTL_STAT_ADD64
6517
6518 static int
bge_sysctl_stats(SYSCTL_HANDLER_ARGS)6519 bge_sysctl_stats(SYSCTL_HANDLER_ARGS)
6520 {
6521 struct bge_softc *sc;
6522 uint32_t result;
6523 int offset;
6524
6525 sc = (struct bge_softc *)arg1;
6526 offset = arg2;
6527 result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset +
6528 offsetof(bge_hostaddr, bge_addr_lo));
6529 return (sysctl_handle_int(oidp, &result, 0, req));
6530 }
6531
6532 #ifdef BGE_REGISTER_DEBUG
6533 static int
bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)6534 bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
6535 {
6536 struct bge_softc *sc;
6537 uint16_t *sbdata;
6538 int error, result, sbsz;
6539 int i, j;
6540
6541 result = -1;
6542 error = sysctl_handle_int(oidp, &result, 0, req);
6543 if (error || (req->newptr == NULL))
6544 return (error);
6545
6546 if (result == 1) {
6547 sc = (struct bge_softc *)arg1;
6548
6549 if (sc->bge_asicrev == BGE_ASICREV_BCM5700 &&
6550 sc->bge_chipid != BGE_CHIPID_BCM5700_C0)
6551 sbsz = BGE_STATUS_BLK_SZ;
6552 else
6553 sbsz = 32;
6554 sbdata = (uint16_t *)sc->bge_ldata.bge_status_block;
6555 printf("Status Block:\n");
6556 BGE_LOCK(sc);
6557 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6558 sc->bge_cdata.bge_status_map,
6559 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6560 for (i = 0x0; i < sbsz / sizeof(uint16_t); ) {
6561 printf("%06x:", i);
6562 for (j = 0; j < 8; j++)
6563 printf(" %04x", sbdata[i++]);
6564 printf("\n");
6565 }
6566
6567 printf("Registers:\n");
6568 for (i = 0x800; i < 0xA00; ) {
6569 printf("%06x:", i);
6570 for (j = 0; j < 8; j++) {
6571 printf(" %08x", CSR_READ_4(sc, i));
6572 i += 4;
6573 }
6574 printf("\n");
6575 }
6576 BGE_UNLOCK(sc);
6577
6578 printf("Hardware Flags:\n");
6579 if (BGE_IS_5717_PLUS(sc))
6580 printf(" - 5717 Plus\n");
6581 if (BGE_IS_5755_PLUS(sc))
6582 printf(" - 5755 Plus\n");
6583 if (BGE_IS_575X_PLUS(sc))
6584 printf(" - 575X Plus\n");
6585 if (BGE_IS_5705_PLUS(sc))
6586 printf(" - 5705 Plus\n");
6587 if (BGE_IS_5714_FAMILY(sc))
6588 printf(" - 5714 Family\n");
6589 if (BGE_IS_5700_FAMILY(sc))
6590 printf(" - 5700 Family\n");
6591 if (sc->bge_flags & BGE_FLAG_JUMBO)
6592 printf(" - Supports Jumbo Frames\n");
6593 if (sc->bge_flags & BGE_FLAG_PCIX)
6594 printf(" - PCI-X Bus\n");
6595 if (sc->bge_flags & BGE_FLAG_PCIE)
6596 printf(" - PCI Express Bus\n");
6597 if (sc->bge_phy_flags & BGE_PHY_NO_3LED)
6598 printf(" - No 3 LEDs\n");
6599 if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG)
6600 printf(" - RX Alignment Bug\n");
6601 }
6602
6603 return (error);
6604 }
6605
6606 static int
bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)6607 bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS)
6608 {
6609 struct bge_softc *sc;
6610 int error;
6611 uint16_t result;
6612 uint32_t val;
6613
6614 result = -1;
6615 error = sysctl_handle_int(oidp, &result, 0, req);
6616 if (error || (req->newptr == NULL))
6617 return (error);
6618
6619 if (result < 0x8000) {
6620 sc = (struct bge_softc *)arg1;
6621 val = CSR_READ_4(sc, result);
6622 printf("reg 0x%06X = 0x%08X\n", result, val);
6623 }
6624
6625 return (error);
6626 }
6627
6628 static int
bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS)6629 bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS)
6630 {
6631 struct bge_softc *sc;
6632 int error;
6633 uint16_t result;
6634 uint32_t val;
6635
6636 result = -1;
6637 error = sysctl_handle_int(oidp, &result, 0, req);
6638 if (error || (req->newptr == NULL))
6639 return (error);
6640
6641 if (result < 0x8000) {
6642 sc = (struct bge_softc *)arg1;
6643 val = APE_READ_4(sc, result);
6644 printf("reg 0x%06X = 0x%08X\n", result, val);
6645 }
6646
6647 return (error);
6648 }
6649
6650 static int
bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)6651 bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS)
6652 {
6653 struct bge_softc *sc;
6654 int error;
6655 uint16_t result;
6656 uint32_t val;
6657
6658 result = -1;
6659 error = sysctl_handle_int(oidp, &result, 0, req);
6660 if (error || (req->newptr == NULL))
6661 return (error);
6662
6663 if (result < 0x8000) {
6664 sc = (struct bge_softc *)arg1;
6665 val = bge_readmem_ind(sc, result);
6666 printf("mem 0x%06X = 0x%08X\n", result, val);
6667 }
6668
6669 return (error);
6670 }
6671 #endif
6672
6673 static int
bge_get_eaddr_fw(struct bge_softc * sc,uint8_t ether_addr[])6674 bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[])
6675 {
6676 return (1);
6677 }
6678
6679 static int
bge_get_eaddr_mem(struct bge_softc * sc,uint8_t ether_addr[])6680 bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[])
6681 {
6682 uint32_t mac_addr;
6683
6684 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB);
6685 if ((mac_addr >> 16) == 0x484b) {
6686 ether_addr[0] = (uint8_t)(mac_addr >> 8);
6687 ether_addr[1] = (uint8_t)mac_addr;
6688 mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB);
6689 ether_addr[2] = (uint8_t)(mac_addr >> 24);
6690 ether_addr[3] = (uint8_t)(mac_addr >> 16);
6691 ether_addr[4] = (uint8_t)(mac_addr >> 8);
6692 ether_addr[5] = (uint8_t)mac_addr;
6693 return (0);
6694 }
6695 return (1);
6696 }
6697
6698 static int
bge_get_eaddr_nvram(struct bge_softc * sc,uint8_t ether_addr[])6699 bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[])
6700 {
6701 int mac_offset = BGE_EE_MAC_OFFSET;
6702
6703 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6704 mac_offset = BGE_EE_MAC_OFFSET_5906;
6705
6706 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
6707 ETHER_ADDR_LEN));
6708 }
6709
6710 static int
bge_get_eaddr_eeprom(struct bge_softc * sc,uint8_t ether_addr[])6711 bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[])
6712 {
6713
6714 if (sc->bge_asicrev == BGE_ASICREV_BCM5906)
6715 return (1);
6716
6717 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
6718 ETHER_ADDR_LEN));
6719 }
6720
6721 static int
bge_get_eaddr(struct bge_softc * sc,uint8_t eaddr[])6722 bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[])
6723 {
6724 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
6725 /* NOTE: Order is critical */
6726 bge_get_eaddr_fw,
6727 bge_get_eaddr_mem,
6728 bge_get_eaddr_nvram,
6729 bge_get_eaddr_eeprom,
6730 NULL
6731 };
6732 const bge_eaddr_fcn_t *func;
6733
6734 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
6735 if ((*func)(sc, eaddr) == 0)
6736 break;
6737 }
6738 return (*func == NULL ? ENXIO : 0);
6739 }
6740
6741 static uint64_t
bge_get_counter(if_t ifp,ift_counter cnt)6742 bge_get_counter(if_t ifp, ift_counter cnt)
6743 {
6744 struct bge_softc *sc;
6745 struct bge_mac_stats *stats;
6746
6747 sc = if_getsoftc(ifp);
6748 if (!BGE_IS_5705_PLUS(sc))
6749 return (if_get_counter_default(ifp, cnt));
6750 stats = &sc->bge_mac_stats;
6751
6752 switch (cnt) {
6753 case IFCOUNTER_IERRORS:
6754 return (stats->NoMoreRxBDs + stats->InputDiscards +
6755 stats->InputErrors);
6756 case IFCOUNTER_COLLISIONS:
6757 return (stats->etherStatsCollisions);
6758 default:
6759 return (if_get_counter_default(ifp, cnt));
6760 }
6761 }
6762
6763 #ifdef DEBUGNET
6764 static void
bge_debugnet_init(if_t ifp,int * nrxr,int * ncl,int * clsize)6765 bge_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize)
6766 {
6767 struct bge_softc *sc;
6768
6769 sc = if_getsoftc(ifp);
6770 BGE_LOCK(sc);
6771 /*
6772 * There is only one logical receive ring, but it is backed
6773 * by two actual rings, for cluster- and jumbo-sized mbufs.
6774 * Debugnet expects only one size, so if jumbo is in use,
6775 * this says we have two rings of jumbo mbufs, but that's
6776 * only a little wasteful.
6777 */
6778 *nrxr = 2;
6779 *ncl = DEBUGNET_MAX_IN_FLIGHT;
6780 if ((sc->bge_flags & BGE_FLAG_JUMBO_STD) != 0 &&
6781 (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
6782 ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)))
6783 *clsize = MJUM9BYTES;
6784 else
6785 *clsize = MCLBYTES;
6786 BGE_UNLOCK(sc);
6787 }
6788
6789 static void
bge_debugnet_event(if_t ifp __unused,enum debugnet_ev event __unused)6790 bge_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused)
6791 {
6792 }
6793
6794 static int
bge_debugnet_transmit(if_t ifp,struct mbuf * m)6795 bge_debugnet_transmit(if_t ifp, struct mbuf *m)
6796 {
6797 struct bge_softc *sc;
6798 uint32_t prodidx;
6799 int error;
6800
6801 sc = if_getsoftc(ifp);
6802 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6803 IFF_DRV_RUNNING)
6804 return (1);
6805
6806 prodidx = sc->bge_tx_prodidx;
6807 error = bge_encap(sc, &m, &prodidx);
6808 if (error == 0)
6809 bge_start_tx(sc, prodidx);
6810 return (error);
6811 }
6812
6813 static int
bge_debugnet_poll(if_t ifp,int count)6814 bge_debugnet_poll(if_t ifp, int count)
6815 {
6816 struct bge_softc *sc;
6817 uint32_t rx_prod, tx_cons;
6818
6819 sc = if_getsoftc(ifp);
6820 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
6821 IFF_DRV_RUNNING)
6822 return (1);
6823
6824 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6825 sc->bge_cdata.bge_status_map,
6826 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6827
6828 rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx;
6829 tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx;
6830
6831 bus_dmamap_sync(sc->bge_cdata.bge_status_tag,
6832 sc->bge_cdata.bge_status_map,
6833 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6834
6835 (void)bge_rxeof(sc, rx_prod, 0);
6836 bge_txeof(sc, tx_cons);
6837 return (0);
6838 }
6839 #endif /* DEBUGNET */
6840