1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
5 * Copyright (C) 2009-2015 Semihalf
6 * Copyright (C) 2015 Stormshield
7 * All rights reserved.
8 *
9 * Developed by Semihalf.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of MARVELL nor the names of contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #ifdef HAVE_KERNEL_OPTION_HEADERS
37 #include "opt_device_polling.h"
38 #endif
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/endian.h>
43 #include <sys/mbuf.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/kernel.h>
47 #include <sys/module.h>
48 #include <sys/socket.h>
49 #include <sys/sysctl.h>
50
51 #include <net/ethernet.h>
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/if_vlan_var.h>
59
60 #include <netinet/in_systm.h>
61 #include <netinet/in.h>
62 #include <netinet/ip.h>
63
64 #include <sys/sockio.h>
65 #include <sys/bus.h>
66 #include <machine/bus.h>
67 #include <sys/rman.h>
68 #include <machine/resource.h>
69
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72
73 #include <dev/fdt/fdt_common.h>
74 #include <dev/ofw/ofw_bus.h>
75 #include <dev/ofw/ofw_bus_subr.h>
76 #include <dev/mdio/mdio.h>
77
78 #include <dev/mge/if_mgevar.h>
79 #include <arm/mv/mvreg.h>
80 #include <arm/mv/mvvar.h>
81
82 #include "miibus_if.h"
83 #include "mdio_if.h"
84
85 #define MGE_DELAY(x) pause("SMI access sleep", (x) / tick_sbt)
86
87 static int mge_probe(device_t dev);
88 static int mge_attach(device_t dev);
89 static int mge_detach(device_t dev);
90 static int mge_shutdown(device_t dev);
91 static int mge_suspend(device_t dev);
92 static int mge_resume(device_t dev);
93
94 static int mge_miibus_readreg(device_t dev, int phy, int reg);
95 static int mge_miibus_writereg(device_t dev, int phy, int reg, int value);
96
97 static int mge_mdio_readreg(device_t dev, int phy, int reg);
98 static int mge_mdio_writereg(device_t dev, int phy, int reg, int value);
99
100 static int mge_ifmedia_upd(if_t ifp);
101 static void mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr);
102
103 static void mge_init(void *arg);
104 static void mge_init_locked(void *arg);
105 static void mge_start(if_t ifp);
106 static void mge_start_locked(if_t ifp);
107 static void mge_watchdog(struct mge_softc *sc);
108 static int mge_ioctl(if_t ifp, u_long command, caddr_t data);
109
110 static uint32_t mge_tfut_ipg(uint32_t val, int ver);
111 static uint32_t mge_rx_ipg(uint32_t val, int ver);
112 static void mge_ver_params(struct mge_softc *sc);
113
114 static void mge_intrs_ctrl(struct mge_softc *sc, int enable);
115 static void mge_intr_rxtx(void *arg);
116 static void mge_intr_rx(void *arg);
117 static void mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
118 uint32_t int_cause_ext);
119 static int mge_intr_rx_locked(struct mge_softc *sc, int count);
120 static void mge_intr_tx(void *arg);
121 static void mge_intr_tx_locked(struct mge_softc *sc);
122 static void mge_intr_misc(void *arg);
123 static void mge_intr_sum(void *arg);
124 static void mge_intr_err(void *arg);
125 static void mge_stop(struct mge_softc *sc);
126 static void mge_tick(void *msc);
127 static uint32_t mge_set_port_serial_control(uint32_t media);
128 static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr);
129 static void mge_set_mac_address(struct mge_softc *sc);
130 static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte,
131 uint8_t queue);
132 static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
133 static int mge_allocate_dma(struct mge_softc *sc);
134 static int mge_alloc_desc_dma(struct mge_softc *sc,
135 struct mge_desc_wrapper* desc_tab, uint32_t size,
136 bus_dma_tag_t *buffer_tag);
137 static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
138 struct mbuf **mbufp, bus_addr_t *paddr);
139 static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg,
140 int error);
141 static void mge_free_dma(struct mge_softc *sc);
142 static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
143 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs);
144 static void mge_offload_process_frame(if_t ifp, struct mbuf *frame,
145 uint32_t status, uint16_t bufsize);
146 static void mge_offload_setup_descriptor(struct mge_softc *sc,
147 struct mge_desc_wrapper *dw);
148 static uint8_t mge_crc8(uint8_t *data, int size);
149 static void mge_setup_multicast(struct mge_softc *sc);
150 static void mge_set_rxic(struct mge_softc *sc);
151 static void mge_set_txic(struct mge_softc *sc);
152 static void mge_add_sysctls(struct mge_softc *sc);
153 static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS);
154
155 static device_method_t mge_methods[] = {
156 /* Device interface */
157 DEVMETHOD(device_probe, mge_probe),
158 DEVMETHOD(device_attach, mge_attach),
159 DEVMETHOD(device_detach, mge_detach),
160 DEVMETHOD(device_shutdown, mge_shutdown),
161 DEVMETHOD(device_suspend, mge_suspend),
162 DEVMETHOD(device_resume, mge_resume),
163 /* MII interface */
164 DEVMETHOD(miibus_readreg, mge_miibus_readreg),
165 DEVMETHOD(miibus_writereg, mge_miibus_writereg),
166 /* MDIO interface */
167 DEVMETHOD(mdio_readreg, mge_mdio_readreg),
168 DEVMETHOD(mdio_writereg, mge_mdio_writereg),
169 { 0, 0 }
170 };
171
172 DEFINE_CLASS_0(mge, mge_driver, mge_methods, sizeof(struct mge_softc));
173
174 static int switch_attached = 0;
175
176 DRIVER_MODULE(mge, simplebus, mge_driver, 0, 0);
177 DRIVER_MODULE(miibus, mge, miibus_driver, 0, 0);
178 DRIVER_MODULE(mdio, mge, mdio_driver, 0, 0);
179 MODULE_DEPEND(mge, ether, 1, 1, 1);
180 MODULE_DEPEND(mge, miibus, 1, 1, 1);
181 MODULE_DEPEND(mge, mdio, 1, 1, 1);
182
183 static struct resource_spec res_spec[] = {
184 { SYS_RES_MEMORY, 0, RF_ACTIVE },
185 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
186 { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE },
187 { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE },
188 { -1, 0 }
189 };
190
191 static struct {
192 driver_intr_t *handler;
193 char * description;
194 } mge_intrs[MGE_INTR_COUNT + 1] = {
195 { mge_intr_rxtx,"GbE aggregated interrupt" },
196 { mge_intr_rx, "GbE receive interrupt" },
197 { mge_intr_tx, "GbE transmit interrupt" },
198 { mge_intr_misc,"GbE misc interrupt" },
199 { mge_intr_sum, "GbE summary interrupt" },
200 { mge_intr_err, "GbE error interrupt" },
201 };
202
203 /* SMI access interlock */
204 static struct sx sx_smi;
205
206 static uint32_t
mv_read_ge_smi(device_t dev,int phy,int reg)207 mv_read_ge_smi(device_t dev, int phy, int reg)
208 {
209 uint32_t timeout;
210 uint32_t ret;
211 struct mge_softc *sc;
212
213 sc = device_get_softc(dev);
214 KASSERT(sc != NULL, ("NULL softc ptr!"));
215 timeout = MGE_SMI_WRITE_RETRIES;
216
217 MGE_SMI_LOCK();
218 while (--timeout &&
219 (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
220 MGE_DELAY(MGE_SMI_WRITE_DELAY);
221
222 if (timeout == 0) {
223 device_printf(dev, "SMI write timeout.\n");
224 ret = ~0U;
225 goto out;
226 }
227
228 MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
229 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
230
231 /* Wait till finished. */
232 timeout = MGE_SMI_WRITE_RETRIES;
233 while (--timeout &&
234 !((MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_READVALID)))
235 MGE_DELAY(MGE_SMI_WRITE_DELAY);
236
237 if (timeout == 0) {
238 device_printf(dev, "SMI write validation timeout.\n");
239 ret = ~0U;
240 goto out;
241 }
242
243 /* Wait for the data to update in the SMI register */
244 MGE_DELAY(MGE_SMI_DELAY);
245 ret = MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
246
247 out:
248 MGE_SMI_UNLOCK();
249 return (ret);
250
251 }
252
253 static void
mv_write_ge_smi(device_t dev,int phy,int reg,uint32_t value)254 mv_write_ge_smi(device_t dev, int phy, int reg, uint32_t value)
255 {
256 uint32_t timeout;
257 struct mge_softc *sc;
258
259 sc = device_get_softc(dev);
260 KASSERT(sc != NULL, ("NULL softc ptr!"));
261
262 MGE_SMI_LOCK();
263 timeout = MGE_SMI_READ_RETRIES;
264 while (--timeout &&
265 (MGE_READ(sc, MGE_REG_SMI) & MGE_SMI_BUSY))
266 MGE_DELAY(MGE_SMI_READ_DELAY);
267
268 if (timeout == 0) {
269 device_printf(dev, "SMI read timeout.\n");
270 goto out;
271 }
272
273 MGE_WRITE(sc, MGE_REG_SMI, MGE_SMI_MASK &
274 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
275 (value & MGE_SMI_DATA_MASK)));
276
277 out:
278 MGE_SMI_UNLOCK();
279 }
280
281 static int
mv_read_ext_phy(device_t dev,int phy,int reg)282 mv_read_ext_phy(device_t dev, int phy, int reg)
283 {
284 uint32_t retries;
285 struct mge_softc *sc;
286 uint32_t ret;
287
288 sc = device_get_softc(dev);
289
290 MGE_SMI_LOCK();
291 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
292 (MGE_SMI_READ | (reg << 21) | (phy << 16)));
293
294 retries = MGE_SMI_READ_RETRIES;
295 while (--retries &&
296 !(MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_READVALID))
297 DELAY(MGE_SMI_READ_DELAY);
298
299 if (retries == 0)
300 device_printf(dev, "Timeout while reading from PHY\n");
301
302 ret = MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_DATA_MASK;
303 MGE_SMI_UNLOCK();
304
305 return (ret);
306 }
307
308 static void
mv_write_ext_phy(device_t dev,int phy,int reg,int value)309 mv_write_ext_phy(device_t dev, int phy, int reg, int value)
310 {
311 uint32_t retries;
312 struct mge_softc *sc;
313
314 sc = device_get_softc(dev);
315
316 MGE_SMI_LOCK();
317 MGE_WRITE(sc->phy_sc, MGE_REG_SMI, MGE_SMI_MASK &
318 (MGE_SMI_WRITE | (reg << 21) | (phy << 16) |
319 (value & MGE_SMI_DATA_MASK)));
320
321 retries = MGE_SMI_WRITE_RETRIES;
322 while (--retries && MGE_READ(sc->phy_sc, MGE_REG_SMI) & MGE_SMI_BUSY)
323 DELAY(MGE_SMI_WRITE_DELAY);
324
325 if (retries == 0)
326 device_printf(dev, "Timeout while writing to PHY\n");
327 MGE_SMI_UNLOCK();
328 }
329
330 static void
mge_get_mac_address(struct mge_softc * sc,uint8_t * addr)331 mge_get_mac_address(struct mge_softc *sc, uint8_t *addr)
332 {
333 uint32_t mac_l, mac_h;
334 uint8_t lmac[6];
335 int i, valid;
336
337 /*
338 * Retrieve hw address from the device tree.
339 */
340 i = OF_getprop(sc->node, "local-mac-address", (void *)lmac, 6);
341 if (i == 6) {
342 valid = 0;
343 for (i = 0; i < 6; i++)
344 if (lmac[i] != 0) {
345 valid = 1;
346 break;
347 }
348
349 if (valid) {
350 bcopy(lmac, addr, 6);
351 return;
352 }
353 }
354
355 /*
356 * Fall back -- use the currently programmed address.
357 */
358 mac_l = MGE_READ(sc, MGE_MAC_ADDR_L);
359 mac_h = MGE_READ(sc, MGE_MAC_ADDR_H);
360
361 addr[0] = (mac_h & 0xff000000) >> 24;
362 addr[1] = (mac_h & 0x00ff0000) >> 16;
363 addr[2] = (mac_h & 0x0000ff00) >> 8;
364 addr[3] = (mac_h & 0x000000ff);
365 addr[4] = (mac_l & 0x0000ff00) >> 8;
366 addr[5] = (mac_l & 0x000000ff);
367 }
368
369 static uint32_t
mge_tfut_ipg(uint32_t val,int ver)370 mge_tfut_ipg(uint32_t val, int ver)
371 {
372
373 switch (ver) {
374 case 1:
375 return ((val & 0x3fff) << 4);
376 case 2:
377 default:
378 return ((val & 0xffff) << 4);
379 }
380 }
381
382 static uint32_t
mge_rx_ipg(uint32_t val,int ver)383 mge_rx_ipg(uint32_t val, int ver)
384 {
385
386 switch (ver) {
387 case 1:
388 return ((val & 0x3fff) << 8);
389 case 2:
390 default:
391 return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7));
392 }
393 }
394
395 static void
mge_ver_params(struct mge_softc * sc)396 mge_ver_params(struct mge_softc *sc)
397 {
398 uint32_t d, r;
399
400 soc_id(&d, &r);
401 if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
402 d == MV_DEV_88F6282 ||
403 d == MV_DEV_MV78100 ||
404 d == MV_DEV_MV78100_Z0 ||
405 (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
406 sc->mge_ver = 2;
407 sc->mge_mtu = 0x4e8;
408 sc->mge_tfut_ipg_max = 0xFFFF;
409 sc->mge_rx_ipg_max = 0xFFFF;
410 sc->mge_tx_arb_cfg = 0xFC0000FF;
411 sc->mge_tx_tok_cfg = 0xFFFF7FFF;
412 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
413 } else {
414 sc->mge_ver = 1;
415 sc->mge_mtu = 0x458;
416 sc->mge_tfut_ipg_max = 0x3FFF;
417 sc->mge_rx_ipg_max = 0x3FFF;
418 sc->mge_tx_arb_cfg = 0x000000FF;
419 sc->mge_tx_tok_cfg = 0x3FFFFFFF;
420 sc->mge_tx_tok_cnt = 0x3FFFFFFF;
421 }
422 if (d == MV_DEV_88RC8180)
423 sc->mge_intr_cnt = 1;
424 else
425 sc->mge_intr_cnt = 2;
426
427 if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
428 sc->mge_hw_csum = 0;
429 else
430 sc->mge_hw_csum = 1;
431 }
432
433 static void
mge_set_mac_address(struct mge_softc * sc)434 mge_set_mac_address(struct mge_softc *sc)
435 {
436 char *if_mac;
437 uint32_t mac_l, mac_h;
438
439 MGE_GLOBAL_LOCK_ASSERT(sc);
440
441 if_mac = (char *)if_getlladdr(sc->ifp);
442
443 mac_l = (if_mac[4] << 8) | (if_mac[5]);
444 mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) |
445 (if_mac[2] << 8) | (if_mac[3] << 0);
446
447 MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l);
448 MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h);
449
450 mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE);
451 }
452
453 static void
mge_set_ucast_address(struct mge_softc * sc,uint8_t last_byte,uint8_t queue)454 mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
455 {
456 uint32_t reg_idx, reg_off, reg_val, i;
457
458 last_byte &= 0xf;
459 reg_idx = last_byte / MGE_UCAST_REG_NUMBER;
460 reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8;
461 reg_val = (1 | (queue << 1)) << reg_off;
462
463 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) {
464 if ( i == reg_idx)
465 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
466 else
467 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0);
468 }
469 }
470
471 static void
mge_set_prom_mode(struct mge_softc * sc,uint8_t queue)472 mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
473 {
474 uint32_t port_config;
475 uint32_t reg_val, i;
476
477 /* Enable or disable promiscuous mode as needed */
478 if (if_getflags(sc->ifp) & IFF_PROMISC) {
479 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
480 port_config |= PORT_CONFIG_UPM;
481 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
482
483 reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
484 (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
485
486 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
487 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val);
488 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val);
489 }
490
491 for (i = 0; i < MGE_UCAST_REG_NUMBER; i++)
492 MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val);
493
494 } else {
495 port_config = MGE_READ(sc, MGE_PORT_CONFIG);
496 port_config &= ~PORT_CONFIG_UPM;
497 MGE_WRITE(sc, MGE_PORT_CONFIG, port_config);
498
499 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
500 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0);
501 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0);
502 }
503
504 mge_set_mac_address(sc);
505 }
506 }
507
508 static void
mge_get_dma_addr(void * arg,bus_dma_segment_t * segs,int nseg,int error)509 mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
510 {
511 u_int32_t *paddr;
512
513 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
514 paddr = arg;
515
516 *paddr = segs->ds_addr;
517 }
518
519 static int
mge_new_rxbuf(bus_dma_tag_t tag,bus_dmamap_t map,struct mbuf ** mbufp,bus_addr_t * paddr)520 mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
521 bus_addr_t *paddr)
522 {
523 struct mbuf *new_mbuf;
524 bus_dma_segment_t seg[1];
525 int error;
526 int nsegs;
527
528 KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
529
530 new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
531 if (new_mbuf == NULL)
532 return (ENOBUFS);
533 new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
534
535 if (*mbufp) {
536 bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
537 bus_dmamap_unload(tag, map);
538 }
539
540 error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
541 BUS_DMA_NOWAIT);
542 KASSERT(nsegs == 1, ("Too many segments returned!"));
543 if (nsegs != 1 || error)
544 panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
545
546 bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
547
548 (*mbufp) = new_mbuf;
549 (*paddr) = seg->ds_addr;
550 return (0);
551 }
552
553 static int
mge_alloc_desc_dma(struct mge_softc * sc,struct mge_desc_wrapper * tab,uint32_t size,bus_dma_tag_t * buffer_tag)554 mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab,
555 uint32_t size, bus_dma_tag_t *buffer_tag)
556 {
557 struct mge_desc_wrapper *dw;
558 bus_addr_t desc_paddr;
559 int i, error;
560
561 desc_paddr = 0;
562 for (i = size - 1; i >= 0; i--) {
563 dw = &(tab[i]);
564 error = bus_dmamem_alloc(sc->mge_desc_dtag,
565 (void**)&(dw->mge_desc),
566 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
567 &(dw->desc_dmap));
568
569 if (error) {
570 if_printf(sc->ifp, "failed to allocate DMA memory\n");
571 dw->mge_desc = NULL;
572 return (ENXIO);
573 }
574
575 error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap,
576 dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr,
577 &(dw->mge_desc_paddr), BUS_DMA_NOWAIT);
578
579 if (error) {
580 if_printf(sc->ifp, "can't load descriptor\n");
581 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
582 dw->desc_dmap);
583 dw->mge_desc = NULL;
584 return (ENXIO);
585 }
586
587 /* Chain descriptors */
588 dw->mge_desc->next_desc = desc_paddr;
589 desc_paddr = dw->mge_desc_paddr;
590 }
591 tab[size - 1].mge_desc->next_desc = desc_paddr;
592
593 /* Allocate a busdma tag for mbufs. */
594 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
595 1, 0, /* alignment, boundary */
596 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
597 BUS_SPACE_MAXADDR, /* highaddr */
598 NULL, NULL, /* filtfunc, filtfuncarg */
599 MCLBYTES, 1, /* maxsize, nsegments */
600 MCLBYTES, 0, /* maxsegsz, flags */
601 NULL, NULL, /* lockfunc, lockfuncarg */
602 buffer_tag); /* dmat */
603 if (error) {
604 if_printf(sc->ifp, "failed to create busdma tag for mbufs\n");
605 return (ENXIO);
606 }
607
608 /* Create TX busdma maps */
609 for (i = 0; i < size; i++) {
610 dw = &(tab[i]);
611 error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap);
612 if (error) {
613 if_printf(sc->ifp, "failed to create map for mbuf\n");
614 return (ENXIO);
615 }
616
617 dw->buffer = (struct mbuf*)NULL;
618 dw->mge_desc->buffer = (bus_addr_t)NULL;
619 }
620
621 return (0);
622 }
623
624 static int
mge_allocate_dma(struct mge_softc * sc)625 mge_allocate_dma(struct mge_softc *sc)
626 {
627 struct mge_desc_wrapper *dw;
628 int i;
629
630 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
631 bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
632 16, 0, /* alignment, boundary */
633 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
634 BUS_SPACE_MAXADDR, /* highaddr */
635 NULL, NULL, /* filtfunc, filtfuncarg */
636 sizeof(struct mge_desc), 1, /* maxsize, nsegments */
637 sizeof(struct mge_desc), 0, /* maxsegsz, flags */
638 NULL, NULL, /* lockfunc, lockfuncarg */
639 &sc->mge_desc_dtag); /* dmat */
640
641
642 mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM,
643 &sc->mge_tx_dtag);
644 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
645 &sc->mge_rx_dtag);
646
647 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
648 dw = &(sc->mge_rx_desc[i]);
649 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
650 &dw->mge_desc->buffer);
651 }
652
653 sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr;
654 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
655
656 return (0);
657 }
658
659 static void
mge_free_desc(struct mge_softc * sc,struct mge_desc_wrapper * tab,uint32_t size,bus_dma_tag_t buffer_tag,uint8_t free_mbufs)660 mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab,
661 uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs)
662 {
663 struct mge_desc_wrapper *dw;
664 int i;
665
666 for (i = 0; i < size; i++) {
667 /* Free RX mbuf */
668 dw = &(tab[i]);
669
670 if (dw->buffer_dmap) {
671 if (free_mbufs) {
672 bus_dmamap_sync(buffer_tag, dw->buffer_dmap,
673 BUS_DMASYNC_POSTREAD);
674 bus_dmamap_unload(buffer_tag, dw->buffer_dmap);
675 }
676 bus_dmamap_destroy(buffer_tag, dw->buffer_dmap);
677 if (free_mbufs)
678 m_freem(dw->buffer);
679 }
680 /* Free RX descriptors */
681 if (dw->desc_dmap) {
682 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
683 BUS_DMASYNC_POSTREAD);
684 bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap);
685 bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc,
686 dw->desc_dmap);
687 }
688 }
689 }
690
691 static void
mge_free_dma(struct mge_softc * sc)692 mge_free_dma(struct mge_softc *sc)
693 {
694
695 /* Free descriptors and mbufs */
696 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
697 mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0);
698
699 /* Destroy mbuf dma tag */
700 bus_dma_tag_destroy(sc->mge_tx_dtag);
701 bus_dma_tag_destroy(sc->mge_rx_dtag);
702 /* Destroy descriptors tag */
703 bus_dma_tag_destroy(sc->mge_desc_dtag);
704 }
705
706 static void
mge_reinit_rx(struct mge_softc * sc)707 mge_reinit_rx(struct mge_softc *sc)
708 {
709 struct mge_desc_wrapper *dw;
710 int i;
711
712 MGE_RECEIVE_LOCK_ASSERT(sc);
713
714 mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1);
715
716 mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM,
717 &sc->mge_rx_dtag);
718
719 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
720 dw = &(sc->mge_rx_desc[i]);
721 mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer,
722 &dw->mge_desc->buffer);
723 }
724
725 sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr;
726 sc->rx_desc_curr = 0;
727
728 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
729 sc->rx_desc_start);
730
731 /* Enable RX queue */
732 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
733 }
734
735 #ifdef DEVICE_POLLING
736 static poll_handler_t mge_poll;
737
738 static int
mge_poll(if_t ifp,enum poll_cmd cmd,int count)739 mge_poll(if_t ifp, enum poll_cmd cmd, int count)
740 {
741 struct mge_softc *sc = if_getsoftc(ifp);
742 uint32_t int_cause, int_cause_ext;
743 int rx_npkts = 0;
744
745 MGE_RECEIVE_LOCK(sc);
746
747 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
748 MGE_RECEIVE_UNLOCK(sc);
749 return (rx_npkts);
750 }
751
752 if (cmd == POLL_AND_CHECK_STATUS) {
753 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
754 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
755
756 /* Check for resource error */
757 if (int_cause & MGE_PORT_INT_RXERRQ0)
758 mge_reinit_rx(sc);
759
760 if (int_cause || int_cause_ext) {
761 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
762 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
763 }
764 }
765
766
767 rx_npkts = mge_intr_rx_locked(sc, count);
768
769 MGE_RECEIVE_UNLOCK(sc);
770 MGE_TRANSMIT_LOCK(sc);
771 mge_intr_tx_locked(sc);
772 MGE_TRANSMIT_UNLOCK(sc);
773 return (rx_npkts);
774 }
775 #endif /* DEVICE_POLLING */
776
777 static int
mge_attach(device_t dev)778 mge_attach(device_t dev)
779 {
780 struct mge_softc *sc;
781 struct mii_softc *miisc;
782 if_t ifp;
783 uint8_t hwaddr[ETHER_ADDR_LEN];
784 int i, error, phy;
785
786 sc = device_get_softc(dev);
787 sc->dev = dev;
788 sc->node = ofw_bus_get_node(dev);
789 phy = 0;
790
791 if (fdt_get_phyaddr(sc->node, sc->dev, &phy, (void **)&sc->phy_sc) == 0) {
792 device_printf(dev, "PHY%i attached, phy_sc points to %s\n", phy,
793 device_get_nameunit(sc->phy_sc->dev));
794 sc->phy_attached = 1;
795 } else {
796 device_printf(dev, "PHY not attached.\n");
797 sc->phy_attached = 0;
798 sc->phy_sc = sc;
799 }
800
801 if (fdt_find_compatible(sc->node, "mrvl,sw", 1) != 0) {
802 device_printf(dev, "Switch attached.\n");
803 sc->switch_attached = 1;
804 /* additional variable available across instances */
805 switch_attached = 1;
806 } else {
807 sc->switch_attached = 0;
808 }
809
810 if (device_get_unit(dev) == 0) {
811 sx_init(&sx_smi, "mge_tick() SMI access threads interlock");
812 }
813
814 /* Set chip version-dependent parameters */
815 mge_ver_params(sc);
816
817 /* Initialize mutexes */
818 mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock",
819 MTX_DEF);
820 mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock",
821 MTX_DEF);
822
823 /* Allocate IO and IRQ resources */
824 error = bus_alloc_resources(dev, res_spec, sc->res);
825 if (error) {
826 device_printf(dev, "could not allocate resources\n");
827 mge_detach(dev);
828 return (ENXIO);
829 }
830
831 /* Allocate DMA, buffers, buffer descriptors */
832 error = mge_allocate_dma(sc);
833 if (error) {
834 mge_detach(dev);
835 return (ENXIO);
836 }
837
838 sc->tx_desc_curr = 0;
839 sc->rx_desc_curr = 0;
840 sc->tx_desc_used_idx = 0;
841 sc->tx_desc_used_count = 0;
842
843 /* Configure defaults for interrupts coalescing */
844 sc->rx_ic_time = 768;
845 sc->tx_ic_time = 768;
846 mge_add_sysctls(sc);
847
848 /* Allocate network interface */
849 ifp = sc->ifp = if_alloc(IFT_ETHER);
850 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
851 if_setsoftc(ifp, sc);
852 if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
853 if_setcapabilities(ifp, IFCAP_VLAN_MTU);
854 if (sc->mge_hw_csum) {
855 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
856 if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
857 }
858 if_setcapenable(ifp, if_getcapabilities(ifp));
859
860 #ifdef DEVICE_POLLING
861 /* Advertise that polling is supported */
862 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
863 #endif
864
865 if_setinitfn(ifp, mge_init);
866 if_setstartfn(ifp, mge_start);
867 if_setioctlfn(ifp, mge_ioctl);
868
869 if_setsendqlen(ifp, MGE_TX_DESC_NUM - 1);
870 if_setsendqready(ifp);
871
872 mge_get_mac_address(sc, hwaddr);
873 ether_ifattach(ifp, hwaddr);
874 callout_init(&sc->wd_callout, 1);
875
876 /* Attach PHY(s) */
877 if (sc->phy_attached) {
878 error = mii_attach(dev, &sc->miibus, ifp, mge_ifmedia_upd,
879 mge_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
880 if (error) {
881 device_printf(dev, "MII failed to find PHY\n");
882 if_free(ifp);
883 sc->ifp = NULL;
884 mge_detach(dev);
885 return (error);
886 }
887 sc->mii = device_get_softc(sc->miibus);
888
889 /* Tell the MAC where to find the PHY so autoneg works */
890 miisc = LIST_FIRST(&sc->mii->mii_phys);
891 MGE_WRITE(sc, MGE_REG_PHYDEV, miisc->mii_phy);
892 } else {
893 /* no PHY, so use hard-coded values */
894 ifmedia_init(&sc->mge_ifmedia, 0,
895 mge_ifmedia_upd,
896 mge_ifmedia_sts);
897 ifmedia_add(&sc->mge_ifmedia,
898 IFM_ETHER | IFM_1000_T | IFM_FDX,
899 0, NULL);
900 ifmedia_set(&sc->mge_ifmedia,
901 IFM_ETHER | IFM_1000_T | IFM_FDX);
902 }
903
904 /* Attach interrupt handlers */
905 /* TODO: review flags, in part. mark RX as INTR_ENTROPY ? */
906 for (i = 1; i <= sc->mge_intr_cnt; ++i) {
907 error = bus_setup_intr(dev, sc->res[i],
908 INTR_TYPE_NET | INTR_MPSAFE,
909 NULL, *mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].handler,
910 sc, &sc->ih_cookie[i - 1]);
911 if (error) {
912 device_printf(dev, "could not setup %s\n",
913 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i)].description);
914 mge_detach(dev);
915 return (error);
916 }
917 }
918
919 if (sc->switch_attached) {
920 MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
921 device_add_child(dev, "mdio", DEVICE_UNIT_ANY);
922 bus_attach_children(dev);
923 }
924
925 return (0);
926 }
927
928 static int
mge_detach(device_t dev)929 mge_detach(device_t dev)
930 {
931 struct mge_softc *sc;
932 int error,i;
933
934 sc = device_get_softc(dev);
935
936 /* Stop controller and free TX queue */
937 if (sc->ifp)
938 mge_shutdown(dev);
939
940 /* Wait for stopping ticks */
941 callout_drain(&sc->wd_callout);
942
943 /* Stop and release all interrupts */
944 for (i = 0; i < sc->mge_intr_cnt; ++i) {
945 if (!sc->ih_cookie[i])
946 continue;
947
948 error = bus_teardown_intr(dev, sc->res[1 + i],
949 sc->ih_cookie[i]);
950 if (error)
951 device_printf(dev, "could not release %s\n",
952 mge_intrs[(sc->mge_intr_cnt == 1 ? 0 : i + 1)].description);
953 }
954
955 /* Detach network interface */
956 if (sc->ifp) {
957 ether_ifdetach(sc->ifp);
958 if_free(sc->ifp);
959 }
960
961 /* Free DMA resources */
962 mge_free_dma(sc);
963
964 /* Free IO memory handler */
965 bus_release_resources(dev, res_spec, sc->res);
966
967 /* Destroy mutexes */
968 mtx_destroy(&sc->receive_lock);
969 mtx_destroy(&sc->transmit_lock);
970
971 if (device_get_unit(dev) == 0)
972 sx_destroy(&sx_smi);
973
974 return (0);
975 }
976
977 static void
mge_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)978 mge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
979 {
980 struct mge_softc *sc;
981 struct mii_data *mii;
982
983 sc = if_getsoftc(ifp);
984 MGE_GLOBAL_LOCK(sc);
985
986 if (!sc->phy_attached) {
987 ifmr->ifm_active = IFM_1000_T | IFM_FDX | IFM_ETHER;
988 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
989 goto out_unlock;
990 }
991
992 mii = sc->mii;
993 mii_pollstat(mii);
994
995 ifmr->ifm_active = mii->mii_media_active;
996 ifmr->ifm_status = mii->mii_media_status;
997
998 out_unlock:
999 MGE_GLOBAL_UNLOCK(sc);
1000 }
1001
1002 static uint32_t
mge_set_port_serial_control(uint32_t media)1003 mge_set_port_serial_control(uint32_t media)
1004 {
1005 uint32_t port_config;
1006
1007 port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL |
1008 PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552);
1009
1010 if (IFM_TYPE(media) == IFM_ETHER) {
1011 switch(IFM_SUBTYPE(media)) {
1012 case IFM_AUTO:
1013 break;
1014 case IFM_1000_T:
1015 port_config |= (PORT_SERIAL_GMII_SPEED_1000 |
1016 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1017 | PORT_SERIAL_SPEED_AUTONEG);
1018 break;
1019 case IFM_100_TX:
1020 port_config |= (PORT_SERIAL_MII_SPEED_100 |
1021 PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC
1022 | PORT_SERIAL_SPEED_AUTONEG);
1023 break;
1024 case IFM_10_T:
1025 port_config |= (PORT_SERIAL_AUTONEG |
1026 PORT_SERIAL_AUTONEG_FC |
1027 PORT_SERIAL_SPEED_AUTONEG);
1028 break;
1029 }
1030 if (media & IFM_FDX)
1031 port_config |= PORT_SERIAL_FULL_DUPLEX;
1032 }
1033 return (port_config);
1034 }
1035
1036 static int
mge_ifmedia_upd(if_t ifp)1037 mge_ifmedia_upd(if_t ifp)
1038 {
1039 struct mge_softc *sc = if_getsoftc(ifp);
1040
1041 /*
1042 * Do not do anything for switch here, as updating media between
1043 * MGE MAC and switch MAC is hardcoded in PCB. Changing it here would
1044 * break the link.
1045 */
1046 if (sc->phy_attached) {
1047 MGE_GLOBAL_LOCK(sc);
1048 if (if_getflags(ifp) & IFF_UP) {
1049 sc->mge_media_status = sc->mii->mii_media.ifm_media;
1050 mii_mediachg(sc->mii);
1051
1052 /* MGE MAC needs to be reinitialized. */
1053 mge_init_locked(sc);
1054
1055 }
1056 MGE_GLOBAL_UNLOCK(sc);
1057 }
1058
1059 return (0);
1060 }
1061
1062 static void
mge_init(void * arg)1063 mge_init(void *arg)
1064 {
1065 struct mge_softc *sc;
1066
1067 sc = arg;
1068 MGE_GLOBAL_LOCK(sc);
1069
1070 mge_init_locked(arg);
1071
1072 MGE_GLOBAL_UNLOCK(sc);
1073 }
1074
1075 static void
mge_init_locked(void * arg)1076 mge_init_locked(void *arg)
1077 {
1078 struct mge_softc *sc = arg;
1079 struct mge_desc_wrapper *dw;
1080 volatile uint32_t reg_val;
1081 int i, count;
1082 uint32_t media_status;
1083
1084
1085 MGE_GLOBAL_LOCK_ASSERT(sc);
1086
1087 /* Stop interface */
1088 mge_stop(sc);
1089
1090 /* Disable interrupts */
1091 mge_intrs_ctrl(sc, 0);
1092
1093 /* Set MAC address */
1094 mge_set_mac_address(sc);
1095
1096 /* Setup multicast filters */
1097 mge_setup_multicast(sc);
1098
1099 if (sc->mge_ver == 2) {
1100 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN);
1101 MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0));
1102 }
1103
1104 /* Initialize TX queue configuration registers */
1105 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt);
1106 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg);
1107 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg);
1108
1109 /* Clear TX queue configuration registers for unused queues */
1110 for (i = 1; i < 7; i++) {
1111 MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0);
1112 MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0);
1113 MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0);
1114 }
1115
1116 /* Set default MTU */
1117 MGE_WRITE(sc, sc->mge_mtu, 0);
1118
1119 /* Port configuration */
1120 MGE_WRITE(sc, MGE_PORT_CONFIG,
1121 PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) |
1122 PORT_CONFIG_ARO_RXQ(0));
1123 MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0);
1124
1125 /* Configure promisc mode */
1126 mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE);
1127
1128 media_status = sc->mge_media_status;
1129 if (sc->switch_attached) {
1130 media_status &= ~IFM_TMASK;
1131 media_status |= IFM_1000_T;
1132 }
1133
1134 /* Setup port configuration */
1135 reg_val = mge_set_port_serial_control(media_status);
1136 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1137
1138 /* Setup SDMA configuration */
1139 MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP |
1140 MGE_SDMA_TX_BYTE_SWAP |
1141 MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) |
1142 MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD));
1143
1144 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0);
1145
1146 MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start);
1147 MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE),
1148 sc->rx_desc_start);
1149
1150 /* Reset descriptor indexes */
1151 sc->tx_desc_curr = 0;
1152 sc->rx_desc_curr = 0;
1153 sc->tx_desc_used_idx = 0;
1154 sc->tx_desc_used_count = 0;
1155
1156 /* Enable RX descriptors */
1157 for (i = 0; i < MGE_RX_DESC_NUM; i++) {
1158 dw = &sc->mge_rx_desc[i];
1159 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1160 dw->mge_desc->buff_size = MCLBYTES;
1161 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1162 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1163 }
1164
1165 /* Enable RX queue */
1166 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE));
1167
1168 /* Enable port */
1169 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1170 reg_val |= PORT_SERIAL_ENABLE;
1171 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val);
1172 count = 0x100000;
1173 for (;;) {
1174 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1175 if (reg_val & MGE_STATUS_LINKUP)
1176 break;
1177 DELAY(100);
1178 if (--count == 0) {
1179 if_printf(sc->ifp, "Timeout on link-up\n");
1180 break;
1181 }
1182 }
1183
1184 /* Setup interrupts coalescing */
1185 mge_set_rxic(sc);
1186 mge_set_txic(sc);
1187
1188 /* Enable interrupts */
1189 #ifdef DEVICE_POLLING
1190 /*
1191 * * ...only if polling is not turned on. Disable interrupts explicitly
1192 * if polling is enabled.
1193 */
1194 if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1195 mge_intrs_ctrl(sc, 0);
1196 else
1197 #endif /* DEVICE_POLLING */
1198 mge_intrs_ctrl(sc, 1);
1199
1200 /* Activate network interface */
1201 if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, 0);
1202 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE);
1203 sc->wd_timer = 0;
1204
1205 /* Schedule watchdog timeout */
1206 if (sc->phy_attached)
1207 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1208 }
1209
1210 static void
mge_intr_rxtx(void * arg)1211 mge_intr_rxtx(void *arg)
1212 {
1213 struct mge_softc *sc;
1214 uint32_t int_cause, int_cause_ext;
1215
1216 sc = arg;
1217 MGE_GLOBAL_LOCK(sc);
1218
1219 #ifdef DEVICE_POLLING
1220 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1221 MGE_GLOBAL_UNLOCK(sc);
1222 return;
1223 }
1224 #endif
1225
1226 /* Get interrupt cause */
1227 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1228 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1229
1230 /* Check for Transmit interrupt */
1231 if (int_cause_ext & (MGE_PORT_INT_EXT_TXBUF0 |
1232 MGE_PORT_INT_EXT_TXUR)) {
1233 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1234 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1235 mge_intr_tx_locked(sc);
1236 }
1237
1238 MGE_TRANSMIT_UNLOCK(sc);
1239
1240 /* Check for Receive interrupt */
1241 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1242
1243 MGE_RECEIVE_UNLOCK(sc);
1244 }
1245
1246 static void
mge_intr_err(void * arg)1247 mge_intr_err(void *arg)
1248 {
1249 struct mge_softc *sc;
1250 if_t ifp;
1251
1252 sc = arg;
1253 ifp = sc->ifp;
1254 if_printf(ifp, "%s\n", __FUNCTION__);
1255 }
1256
1257 static void
mge_intr_misc(void * arg)1258 mge_intr_misc(void *arg)
1259 {
1260 struct mge_softc *sc;
1261 if_t ifp;
1262
1263 sc = arg;
1264 ifp = sc->ifp;
1265 if_printf(ifp, "%s\n", __FUNCTION__);
1266 }
1267
1268 static void
mge_intr_rx(void * arg)1269 mge_intr_rx(void *arg) {
1270 struct mge_softc *sc;
1271 uint32_t int_cause, int_cause_ext;
1272
1273 sc = arg;
1274 MGE_RECEIVE_LOCK(sc);
1275
1276 #ifdef DEVICE_POLLING
1277 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1278 MGE_RECEIVE_UNLOCK(sc);
1279 return;
1280 }
1281 #endif
1282
1283 /* Get interrupt cause */
1284 int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE);
1285 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1286
1287 mge_intr_rx_check(sc, int_cause, int_cause_ext);
1288
1289 MGE_RECEIVE_UNLOCK(sc);
1290 }
1291
1292 static void
mge_intr_rx_check(struct mge_softc * sc,uint32_t int_cause,uint32_t int_cause_ext)1293 mge_intr_rx_check(struct mge_softc *sc, uint32_t int_cause,
1294 uint32_t int_cause_ext)
1295 {
1296 /* Check for resource error */
1297 if (int_cause & MGE_PORT_INT_RXERRQ0) {
1298 mge_reinit_rx(sc);
1299 MGE_WRITE(sc, MGE_PORT_INT_CAUSE,
1300 ~(int_cause & MGE_PORT_INT_RXERRQ0));
1301 }
1302
1303 int_cause &= MGE_PORT_INT_RXQ0;
1304 int_cause_ext &= MGE_PORT_INT_EXT_RXOR;
1305
1306 if (int_cause || int_cause_ext) {
1307 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause);
1308 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext);
1309 mge_intr_rx_locked(sc, -1);
1310 }
1311 }
1312
1313 static int
mge_intr_rx_locked(struct mge_softc * sc,int count)1314 mge_intr_rx_locked(struct mge_softc *sc, int count)
1315 {
1316 if_t ifp = sc->ifp;
1317 uint32_t status;
1318 uint16_t bufsize;
1319 struct mge_desc_wrapper* dw;
1320 struct mbuf *mb;
1321 int rx_npkts = 0;
1322
1323 MGE_RECEIVE_LOCK_ASSERT(sc);
1324
1325 while (count != 0) {
1326 dw = &sc->mge_rx_desc[sc->rx_desc_curr];
1327 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1328 BUS_DMASYNC_POSTREAD);
1329
1330 /* Get status */
1331 status = dw->mge_desc->cmd_status;
1332 bufsize = dw->mge_desc->buff_size;
1333 if ((status & MGE_DMA_OWNED) != 0)
1334 break;
1335
1336 if (dw->mge_desc->byte_count &&
1337 ~(status & MGE_ERR_SUMMARY)) {
1338
1339 bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap,
1340 BUS_DMASYNC_POSTREAD);
1341
1342 mb = m_devget(dw->buffer->m_data,
1343 dw->mge_desc->byte_count - ETHER_CRC_LEN,
1344 0, ifp, NULL);
1345
1346 if (mb == NULL)
1347 /* Give up if no mbufs */
1348 break;
1349
1350 mb->m_len -= 2;
1351 mb->m_pkthdr.len -= 2;
1352 mb->m_data += 2;
1353
1354 mb->m_pkthdr.rcvif = ifp;
1355
1356 mge_offload_process_frame(ifp, mb, status,
1357 bufsize);
1358
1359 MGE_RECEIVE_UNLOCK(sc);
1360 if_input(ifp, mb);
1361 MGE_RECEIVE_LOCK(sc);
1362 rx_npkts++;
1363 }
1364
1365 dw->mge_desc->byte_count = 0;
1366 dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED;
1367 sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM);
1368 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1369 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1370
1371 if (count > 0)
1372 count -= 1;
1373 }
1374
1375 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_npkts);
1376
1377 return (rx_npkts);
1378 }
1379
1380 static void
mge_intr_sum(void * arg)1381 mge_intr_sum(void *arg)
1382 {
1383 struct mge_softc *sc = arg;
1384 if_t ifp;
1385
1386 ifp = sc->ifp;
1387 if_printf(ifp, "%s\n", __FUNCTION__);
1388 }
1389
1390 static void
mge_intr_tx(void * arg)1391 mge_intr_tx(void *arg)
1392 {
1393 struct mge_softc *sc = arg;
1394 uint32_t int_cause_ext;
1395
1396 MGE_TRANSMIT_LOCK(sc);
1397
1398 #ifdef DEVICE_POLLING
1399 if (if_getcapenable(sc->ifp) & IFCAP_POLLING) {
1400 MGE_TRANSMIT_UNLOCK(sc);
1401 return;
1402 }
1403 #endif
1404
1405 /* Ack the interrupt */
1406 int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT);
1407 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~(int_cause_ext &
1408 (MGE_PORT_INT_EXT_TXBUF0 | MGE_PORT_INT_EXT_TXUR)));
1409
1410 mge_intr_tx_locked(sc);
1411
1412 MGE_TRANSMIT_UNLOCK(sc);
1413 }
1414
1415 static void
mge_intr_tx_locked(struct mge_softc * sc)1416 mge_intr_tx_locked(struct mge_softc *sc)
1417 {
1418 if_t ifp = sc->ifp;
1419 struct mge_desc_wrapper *dw;
1420 struct mge_desc *desc;
1421 uint32_t status;
1422 int send = 0;
1423
1424 MGE_TRANSMIT_LOCK_ASSERT(sc);
1425
1426 /* Disable watchdog */
1427 sc->wd_timer = 0;
1428
1429 while (sc->tx_desc_used_count) {
1430 /* Get the descriptor */
1431 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1432 desc = dw->mge_desc;
1433 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1434 BUS_DMASYNC_POSTREAD);
1435
1436 /* Get descriptor status */
1437 status = desc->cmd_status;
1438
1439 if (status & MGE_DMA_OWNED)
1440 break;
1441
1442 sc->tx_desc_used_idx =
1443 (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;
1444 sc->tx_desc_used_count--;
1445
1446 /* Update collision statistics */
1447 if (status & MGE_ERR_SUMMARY) {
1448 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC)
1449 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1450 if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL)
1451 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
1452 }
1453
1454 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1455 BUS_DMASYNC_POSTWRITE);
1456 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1457 m_freem(dw->buffer);
1458 dw->buffer = (struct mbuf*)NULL;
1459 send++;
1460
1461 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1462 }
1463
1464 if (send) {
1465 /* Now send anything that was pending */
1466 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1467 mge_start_locked(ifp);
1468 }
1469 }
1470 static int
mge_ioctl(if_t ifp,u_long command,caddr_t data)1471 mge_ioctl(if_t ifp, u_long command, caddr_t data)
1472 {
1473 struct mge_softc *sc = if_getsoftc(ifp);
1474 struct ifreq *ifr = (struct ifreq *)data;
1475 int mask, error;
1476 uint32_t flags;
1477
1478 error = 0;
1479
1480 switch (command) {
1481 case SIOCSIFFLAGS:
1482 MGE_GLOBAL_LOCK(sc);
1483
1484 if (if_getflags(ifp) & IFF_UP) {
1485 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1486 flags = if_getflags(ifp) ^ sc->mge_if_flags;
1487 if (flags & IFF_PROMISC)
1488 mge_set_prom_mode(sc,
1489 MGE_RX_DEFAULT_QUEUE);
1490
1491 if (flags & IFF_ALLMULTI)
1492 mge_setup_multicast(sc);
1493 } else
1494 mge_init_locked(sc);
1495 }
1496 else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1497 mge_stop(sc);
1498
1499 sc->mge_if_flags = if_getflags(ifp);
1500 MGE_GLOBAL_UNLOCK(sc);
1501 break;
1502 case SIOCADDMULTI:
1503 case SIOCDELMULTI:
1504 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1505 MGE_GLOBAL_LOCK(sc);
1506 mge_setup_multicast(sc);
1507 MGE_GLOBAL_UNLOCK(sc);
1508 }
1509 break;
1510 case SIOCSIFCAP:
1511 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
1512 if (mask & IFCAP_HWCSUM) {
1513 if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
1514 if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0);
1515 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
1516 if_sethwassist(ifp, MGE_CHECKSUM_FEATURES);
1517 else
1518 if_sethwassist(ifp, 0);
1519 }
1520 #ifdef DEVICE_POLLING
1521 if (mask & IFCAP_POLLING) {
1522 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1523 error = ether_poll_register(mge_poll, ifp);
1524 if (error)
1525 return(error);
1526
1527 MGE_GLOBAL_LOCK(sc);
1528 mge_intrs_ctrl(sc, 0);
1529 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1530 MGE_GLOBAL_UNLOCK(sc);
1531 } else {
1532 error = ether_poll_deregister(ifp);
1533 MGE_GLOBAL_LOCK(sc);
1534 mge_intrs_ctrl(sc, 1);
1535 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1536 MGE_GLOBAL_UNLOCK(sc);
1537 }
1538 }
1539 #endif
1540 break;
1541 case SIOCGIFMEDIA: /* fall through */
1542 case SIOCSIFMEDIA:
1543 /*
1544 * Setting up media type via ioctls is *not* supported for MAC
1545 * which is connected to switch. Use etherswitchcfg.
1546 */
1547 if (!sc->phy_attached && (command == SIOCSIFMEDIA))
1548 return (0);
1549 else if (!sc->phy_attached) {
1550 error = ifmedia_ioctl(ifp, ifr, &sc->mge_ifmedia,
1551 command);
1552 break;
1553 }
1554
1555 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T
1556 && !(ifr->ifr_media & IFM_FDX)) {
1557 device_printf(sc->dev,
1558 "1000baseTX half-duplex unsupported\n");
1559 return 0;
1560 }
1561 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command);
1562 break;
1563 default:
1564 error = ether_ioctl(ifp, command, data);
1565 }
1566 return (error);
1567 }
1568
1569 static int
mge_miibus_readreg(device_t dev,int phy,int reg)1570 mge_miibus_readreg(device_t dev, int phy, int reg)
1571 {
1572
1573 KASSERT(!switch_attached, ("miibus used with switch attached"));
1574
1575 return (mv_read_ext_phy(dev, phy, reg));
1576 }
1577
1578 static int
mge_miibus_writereg(device_t dev,int phy,int reg,int value)1579 mge_miibus_writereg(device_t dev, int phy, int reg, int value)
1580 {
1581
1582 KASSERT(!switch_attached, ("miibus used with switch attached"));
1583
1584 mv_write_ext_phy(dev, phy, reg, value);
1585
1586 return (0);
1587 }
1588
1589 static int
mge_probe(device_t dev)1590 mge_probe(device_t dev)
1591 {
1592
1593 if (!ofw_bus_status_okay(dev))
1594 return (ENXIO);
1595
1596 if (!ofw_bus_is_compatible(dev, "mrvl,ge"))
1597 return (ENXIO);
1598
1599 device_set_desc(dev, "Marvell Gigabit Ethernet controller");
1600 return (BUS_PROBE_DEFAULT);
1601 }
1602
1603 static int
mge_resume(device_t dev)1604 mge_resume(device_t dev)
1605 {
1606
1607 device_printf(dev, "%s\n", __FUNCTION__);
1608 return (0);
1609 }
1610
1611 static int
mge_shutdown(device_t dev)1612 mge_shutdown(device_t dev)
1613 {
1614 struct mge_softc *sc = device_get_softc(dev);
1615
1616 MGE_GLOBAL_LOCK(sc);
1617
1618 #ifdef DEVICE_POLLING
1619 if (if_getcapenable(sc->ifp) & IFCAP_POLLING)
1620 ether_poll_deregister(sc->ifp);
1621 #endif
1622
1623 mge_stop(sc);
1624
1625 MGE_GLOBAL_UNLOCK(sc);
1626
1627 return (0);
1628 }
1629
1630 static int
mge_encap(struct mge_softc * sc,struct mbuf * m0)1631 mge_encap(struct mge_softc *sc, struct mbuf *m0)
1632 {
1633 struct mge_desc_wrapper *dw = NULL;
1634 bus_dma_segment_t segs[MGE_TX_DESC_NUM];
1635 bus_dmamap_t mapp;
1636 int error;
1637 int seg, nsegs;
1638 int desc_no;
1639
1640 /* Fetch unused map */
1641 desc_no = sc->tx_desc_curr;
1642 dw = &sc->mge_tx_desc[desc_no];
1643 mapp = dw->buffer_dmap;
1644
1645 /* Create mapping in DMA memory */
1646 error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs,
1647 BUS_DMA_NOWAIT);
1648 if (error != 0) {
1649 m_freem(m0);
1650 return (error);
1651 }
1652
1653 /* Only one segment is supported. */
1654 if (nsegs != 1) {
1655 bus_dmamap_unload(sc->mge_tx_dtag, mapp);
1656 m_freem(m0);
1657 return (-1);
1658 }
1659
1660 bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE);
1661
1662 /* Everything is ok, now we can send buffers */
1663 for (seg = 0; seg < nsegs; seg++) {
1664 dw->mge_desc->byte_count = segs[seg].ds_len;
1665 dw->mge_desc->buffer = segs[seg].ds_addr;
1666 dw->buffer = m0;
1667 dw->mge_desc->cmd_status = 0;
1668 if (seg == 0)
1669 mge_offload_setup_descriptor(sc, dw);
1670 dw->mge_desc->cmd_status |= MGE_TX_LAST | MGE_TX_FIRST |
1671 MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING |
1672 MGE_DMA_OWNED;
1673 }
1674
1675 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1676 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1677
1678 sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM;
1679 sc->tx_desc_used_count++;
1680 return (0);
1681 }
1682
1683 static void
mge_tick(void * msc)1684 mge_tick(void *msc)
1685 {
1686 struct mge_softc *sc = msc;
1687
1688 KASSERT(sc->phy_attached == 1, ("mge_tick while PHY not attached"));
1689
1690 MGE_GLOBAL_LOCK(sc);
1691
1692 /* Check for TX timeout */
1693 mge_watchdog(sc);
1694
1695 mii_tick(sc->mii);
1696
1697 /* Check for media type change */
1698 if(sc->mge_media_status != sc->mii->mii_media.ifm_media)
1699 mge_ifmedia_upd(sc->ifp);
1700
1701 MGE_GLOBAL_UNLOCK(sc);
1702
1703 /* Schedule another timeout one second from now */
1704 callout_reset(&sc->wd_callout, hz, mge_tick, sc);
1705
1706 return;
1707 }
1708
1709 static void
mge_watchdog(struct mge_softc * sc)1710 mge_watchdog(struct mge_softc *sc)
1711 {
1712 if_t ifp;
1713
1714 ifp = sc->ifp;
1715
1716 if (sc->wd_timer == 0 || --sc->wd_timer) {
1717 return;
1718 }
1719
1720 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1721 if_printf(ifp, "watchdog timeout\n");
1722
1723 mge_stop(sc);
1724 mge_init_locked(sc);
1725 }
1726
1727 static void
mge_start(if_t ifp)1728 mge_start(if_t ifp)
1729 {
1730 struct mge_softc *sc = if_getsoftc(ifp);
1731
1732 MGE_TRANSMIT_LOCK(sc);
1733
1734 mge_start_locked(ifp);
1735
1736 MGE_TRANSMIT_UNLOCK(sc);
1737 }
1738
1739 static void
mge_start_locked(if_t ifp)1740 mge_start_locked(if_t ifp)
1741 {
1742 struct mge_softc *sc;
1743 struct mbuf *m0, *mtmp;
1744 uint32_t reg_val, queued = 0;
1745
1746 sc = if_getsoftc(ifp);
1747
1748 MGE_TRANSMIT_LOCK_ASSERT(sc);
1749
1750 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1751 IFF_DRV_RUNNING)
1752 return;
1753
1754 for (;;) {
1755 /* Get packet from the queue */
1756 m0 = if_dequeue(ifp);
1757 if (m0 == NULL)
1758 break;
1759
1760 if (m0->m_pkthdr.csum_flags & (CSUM_IP|CSUM_TCP|CSUM_UDP) ||
1761 m0->m_flags & M_VLANTAG) {
1762 if (M_WRITABLE(m0) == 0) {
1763 mtmp = m_dup(m0, M_NOWAIT);
1764 m_freem(m0);
1765 if (mtmp == NULL)
1766 continue;
1767 m0 = mtmp;
1768 }
1769 }
1770 /* The driver support only one DMA fragment. */
1771 if (m0->m_next != NULL) {
1772 mtmp = m_defrag(m0, M_NOWAIT);
1773 if (mtmp != NULL)
1774 m0 = mtmp;
1775 }
1776
1777 /* Check for free descriptors */
1778 if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) {
1779 if_sendq_prepend(ifp, m0);
1780 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1781 break;
1782 }
1783
1784 if (mge_encap(sc, m0) != 0)
1785 break;
1786
1787 queued++;
1788 BPF_MTAP(ifp, m0);
1789 }
1790
1791 if (queued) {
1792 /* Enable transmitter and watchdog timer */
1793 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1794 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ);
1795 sc->wd_timer = 5;
1796 }
1797 }
1798
1799 static void
mge_stop(struct mge_softc * sc)1800 mge_stop(struct mge_softc *sc)
1801 {
1802 if_t ifp;
1803 volatile uint32_t reg_val, status;
1804 struct mge_desc_wrapper *dw;
1805 struct mge_desc *desc;
1806 int count;
1807
1808 ifp = sc->ifp;
1809
1810 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1811 return;
1812
1813 /* Stop tick engine */
1814 callout_stop(&sc->wd_callout);
1815
1816 /* Disable interface */
1817 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1818 sc->wd_timer = 0;
1819
1820 /* Disable interrupts */
1821 mge_intrs_ctrl(sc, 0);
1822
1823 /* Disable Rx and Tx */
1824 reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD);
1825 MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ);
1826 MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL);
1827
1828 /* Remove pending data from TX queue */
1829 while (sc->tx_desc_used_idx != sc->tx_desc_curr &&
1830 sc->tx_desc_used_count) {
1831 /* Get the descriptor */
1832 dw = &sc->mge_tx_desc[sc->tx_desc_used_idx];
1833 desc = dw->mge_desc;
1834 bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap,
1835 BUS_DMASYNC_POSTREAD);
1836
1837 /* Get descriptor status */
1838 status = desc->cmd_status;
1839
1840 if (status & MGE_DMA_OWNED)
1841 break;
1842
1843 sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) %
1844 MGE_TX_DESC_NUM;
1845 sc->tx_desc_used_count--;
1846
1847 bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap,
1848 BUS_DMASYNC_POSTWRITE);
1849 bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap);
1850
1851 m_freem(dw->buffer);
1852 dw->buffer = (struct mbuf*)NULL;
1853 }
1854
1855 /* Wait for end of transmission */
1856 count = 0x100000;
1857 while (count--) {
1858 reg_val = MGE_READ(sc, MGE_PORT_STATUS);
1859 if ( !(reg_val & MGE_STATUS_TX_IN_PROG) &&
1860 (reg_val & MGE_STATUS_TX_FIFO_EMPTY))
1861 break;
1862 DELAY(100);
1863 }
1864
1865 if (count == 0)
1866 if_printf(ifp,
1867 "%s: timeout while waiting for end of transmission\n",
1868 __FUNCTION__);
1869
1870 reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL);
1871 reg_val &= ~(PORT_SERIAL_ENABLE);
1872 MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val);
1873 }
1874
1875 static int
mge_suspend(device_t dev)1876 mge_suspend(device_t dev)
1877 {
1878
1879 device_printf(dev, "%s\n", __FUNCTION__);
1880 return (0);
1881 }
1882
1883 static void
mge_offload_process_frame(if_t ifp,struct mbuf * frame,uint32_t status,uint16_t bufsize)1884 mge_offload_process_frame(if_t ifp, struct mbuf *frame,
1885 uint32_t status, uint16_t bufsize)
1886 {
1887 int csum_flags = 0;
1888
1889 if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
1890 if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK))
1891 csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
1892
1893 if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 &&
1894 (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) &&
1895 (status & MGE_RX_L4_CSUM_OK)) {
1896 csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1897 frame->m_pkthdr.csum_data = 0xFFFF;
1898 }
1899
1900 frame->m_pkthdr.csum_flags = csum_flags;
1901 }
1902 }
1903
1904 static void
mge_offload_setup_descriptor(struct mge_softc * sc,struct mge_desc_wrapper * dw)1905 mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw)
1906 {
1907 struct mbuf *m0 = dw->buffer;
1908 struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *);
1909 int csum_flags = m0->m_pkthdr.csum_flags;
1910 int cmd_status = 0;
1911 struct ip *ip;
1912 int ehlen, etype;
1913
1914 if (csum_flags != 0) {
1915 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1916 etype = ntohs(eh->evl_proto);
1917 ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1918 csum_flags |= MGE_TX_VLAN_TAGGED;
1919 } else {
1920 etype = ntohs(eh->evl_encap_proto);
1921 ehlen = ETHER_HDR_LEN;
1922 }
1923
1924 if (etype != ETHERTYPE_IP) {
1925 if_printf(sc->ifp,
1926 "TCP/IP Offload enabled for unsupported "
1927 "protocol!\n");
1928 return;
1929 }
1930
1931 ip = (struct ip *)(m0->m_data + ehlen);
1932 cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl);
1933 cmd_status |= MGE_TX_NOT_FRAGMENT;
1934 }
1935
1936 if (csum_flags & CSUM_IP)
1937 cmd_status |= MGE_TX_GEN_IP_CSUM;
1938
1939 if (csum_flags & CSUM_TCP)
1940 cmd_status |= MGE_TX_GEN_L4_CSUM;
1941
1942 if (csum_flags & CSUM_UDP)
1943 cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP;
1944
1945 dw->mge_desc->cmd_status |= cmd_status;
1946 }
1947
1948 static void
mge_intrs_ctrl(struct mge_softc * sc,int enable)1949 mge_intrs_ctrl(struct mge_softc *sc, int enable)
1950 {
1951
1952 if (enable) {
1953 MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 |
1954 MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0);
1955 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 |
1956 MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR |
1957 MGE_PORT_INT_EXT_TXBUF0);
1958 } else {
1959 MGE_WRITE(sc, MGE_INT_CAUSE, 0x0);
1960 MGE_WRITE(sc, MGE_INT_MASK, 0x0);
1961
1962 MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0);
1963 MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0);
1964
1965 MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0);
1966 MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0);
1967 }
1968 }
1969
1970 static uint8_t
mge_crc8(uint8_t * data,int size)1971 mge_crc8(uint8_t *data, int size)
1972 {
1973 uint8_t crc = 0;
1974 static const uint8_t ct[256] = {
1975 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15,
1976 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D,
1977 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65,
1978 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D,
1979 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5,
1980 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD,
1981 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85,
1982 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD,
1983 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2,
1984 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA,
1985 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2,
1986 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A,
1987 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32,
1988 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A,
1989 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42,
1990 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A,
1991 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C,
1992 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4,
1993 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC,
1994 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4,
1995 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C,
1996 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44,
1997 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C,
1998 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34,
1999 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B,
2000 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63,
2001 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B,
2002 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13,
2003 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB,
2004 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83,
2005 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB,
2006 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3
2007 };
2008
2009 while(size--)
2010 crc = ct[crc ^ *(data++)];
2011
2012 return(crc);
2013 }
2014
2015 struct mge_hash_maddr_ctx {
2016 uint32_t smt[MGE_MCAST_REG_NUMBER];
2017 uint32_t omt[MGE_MCAST_REG_NUMBER];
2018 };
2019
2020 static u_int
mge_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)2021 mge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2022 {
2023 static const uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 };
2024 struct mge_hash_maddr_ctx *ctx = arg;
2025 static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2026 uint8_t *mac;
2027 int i;
2028
2029 mac = LLADDR(sdl);
2030 if (memcmp(mac, special, sizeof(special)) == 0) {
2031 i = mac[5];
2032 ctx->smt[i >> 2] |= v << ((i & 0x03) << 3);
2033 } else {
2034 i = mge_crc8(mac, ETHER_ADDR_LEN);
2035 ctx->omt[i >> 2] |= v << ((i & 0x03) << 3);
2036 }
2037 return (1);
2038 }
2039
2040 static void
mge_setup_multicast(struct mge_softc * sc)2041 mge_setup_multicast(struct mge_softc *sc)
2042 {
2043 struct mge_hash_maddr_ctx ctx;
2044 if_t ifp = sc->ifp;
2045 static const uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1;
2046 int i;
2047
2048 if (if_getflags(ifp) & IFF_ALLMULTI) {
2049 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++)
2050 ctx.smt[i] = ctx.omt[i] =
2051 (v << 24) | (v << 16) | (v << 8) | v;
2052 } else {
2053 memset(&ctx, 0, sizeof(ctx));
2054 if_foreach_llmaddr(ifp, mge_hash_maddr, &ctx);
2055 }
2056
2057 for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) {
2058 MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), ctx.smt[i]);
2059 MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), ctx.omt[i]);
2060 }
2061 }
2062
2063 static void
mge_set_rxic(struct mge_softc * sc)2064 mge_set_rxic(struct mge_softc *sc)
2065 {
2066 uint32_t reg;
2067
2068 if (sc->rx_ic_time > sc->mge_rx_ipg_max)
2069 sc->rx_ic_time = sc->mge_rx_ipg_max;
2070
2071 reg = MGE_READ(sc, MGE_SDMA_CONFIG);
2072 reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver);
2073 reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver);
2074 MGE_WRITE(sc, MGE_SDMA_CONFIG, reg);
2075 }
2076
2077 static void
mge_set_txic(struct mge_softc * sc)2078 mge_set_txic(struct mge_softc *sc)
2079 {
2080 uint32_t reg;
2081
2082 if (sc->tx_ic_time > sc->mge_tfut_ipg_max)
2083 sc->tx_ic_time = sc->mge_tfut_ipg_max;
2084
2085 reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH);
2086 reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver);
2087 reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver);
2088 MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg);
2089 }
2090
2091 static int
mge_sysctl_ic(SYSCTL_HANDLER_ARGS)2092 mge_sysctl_ic(SYSCTL_HANDLER_ARGS)
2093 {
2094 struct mge_softc *sc = (struct mge_softc *)arg1;
2095 uint32_t time;
2096 int error;
2097
2098 time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time;
2099 error = sysctl_handle_int(oidp, &time, 0, req);
2100 if (error != 0)
2101 return(error);
2102
2103 MGE_GLOBAL_LOCK(sc);
2104 if (arg2 == MGE_IC_RX) {
2105 sc->rx_ic_time = time;
2106 mge_set_rxic(sc);
2107 } else {
2108 sc->tx_ic_time = time;
2109 mge_set_txic(sc);
2110 }
2111 MGE_GLOBAL_UNLOCK(sc);
2112
2113 return(0);
2114 }
2115
2116 static void
mge_add_sysctls(struct mge_softc * sc)2117 mge_add_sysctls(struct mge_softc *sc)
2118 {
2119 struct sysctl_ctx_list *ctx;
2120 struct sysctl_oid_list *children;
2121 struct sysctl_oid *tree;
2122
2123 ctx = device_get_sysctl_ctx(sc->dev);
2124 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
2125 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal",
2126 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "MGE Interrupts coalescing");
2127 children = SYSCTL_CHILDREN(tree);
2128
2129 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time",
2130 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_RX,
2131 mge_sysctl_ic, "I", "IC RX time threshold");
2132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time",
2133 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, MGE_IC_TX,
2134 mge_sysctl_ic, "I", "IC TX time threshold");
2135 }
2136
2137 static int
mge_mdio_writereg(device_t dev,int phy,int reg,int value)2138 mge_mdio_writereg(device_t dev, int phy, int reg, int value)
2139 {
2140
2141 mv_write_ge_smi(dev, phy, reg, value);
2142
2143 return (0);
2144 }
2145
2146
2147 static int
mge_mdio_readreg(device_t dev,int phy,int reg)2148 mge_mdio_readreg(device_t dev, int phy, int reg)
2149 {
2150 int ret;
2151
2152 ret = mv_read_ge_smi(dev, phy, reg);
2153
2154 return (ret);
2155 }
2156