1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Soren Schmidt <sos@deepcore.dk>
5 * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $Id: eqos.c 1059 2022-12-08 19:32:32Z sos $
30 */
31
32 /*
33 * DesignWare Ethernet Quality-of-Service controller
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/endian.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/callout.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/mbuf.h>
50 #include <sys/systm.h>
51 #include <machine/bus.h>
52
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/bpf.h>
59
60 #include <dev/mii/mii.h>
61 #include <dev/mii/miivar.h>
62
63 #include "miibus_if.h"
64 #include "if_eqos_if.h"
65
66 #ifdef FDT
67 #include <dev/ofw/openfirm.h>
68 #include <dev/ofw/ofw_bus.h>
69 #include <dev/ofw/ofw_bus_subr.h>
70 #include <dev/clk/clk.h>
71 #endif
72
73 #include <dev/eqos/if_eqos_reg.h>
74 #include <dev/eqos/if_eqos_var.h>
75
76 #define DESC_BOUNDARY (1ULL << 32)
77 #define DESC_ALIGN sizeof(struct eqos_dma_desc)
78 #define DESC_OFFSET(n) ((n) * sizeof(struct eqos_dma_desc))
79
80 #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT
81 #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN)
82 #define TX_MAX_SEGS (TX_DESC_COUNT / 2)
83 #define TX_NEXT(n) (((n) + 1 ) % TX_DESC_COUNT)
84 #define TX_QUEUED(h, t) ((((h) - (t)) + TX_DESC_COUNT) % TX_DESC_COUNT)
85
86 #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT
87 #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN)
88 #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT)
89
90 #define MII_BUSY_RETRY 1000
91 #define WATCHDOG_TIMEOUT_SECS 3
92
93 #define EQOS_LOCK(sc) mtx_lock(&(sc)->lock)
94 #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock)
95 #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED)
96
97 #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o))
98 #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v))
99
100
101 static struct resource_spec eqos_spec[] = {
102 { SYS_RES_MEMORY, 0, RF_ACTIVE },
103 { SYS_RES_IRQ, 0, RF_ACTIVE },
104 { -1, 0 }
105 };
106
107 static void eqos_tick(void *softc);
108
109
110 static int
eqos_miibus_readreg(device_t dev,int phy,int reg)111 eqos_miibus_readreg(device_t dev, int phy, int reg)
112 {
113 struct eqos_softc *sc = device_get_softc(dev);
114 uint32_t addr;
115 int retry, val;
116
117 addr = sc->csr_clock_range |
118 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
119 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
120 GMAC_MAC_MDIO_ADDRESS_GOC_READ | GMAC_MAC_MDIO_ADDRESS_GB;
121 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
122
123 DELAY(100);
124
125 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
126 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS);
127 if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB)) {
128 val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF;
129 break;
130 }
131 DELAY(10);
132 }
133 if (!retry) {
134 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
135 phy, reg);
136 return (ETIMEDOUT);
137 }
138 return (val);
139 }
140
141 static int
eqos_miibus_writereg(device_t dev,int phy,int reg,int val)142 eqos_miibus_writereg(device_t dev, int phy, int reg, int val)
143 {
144 struct eqos_softc *sc = device_get_softc(dev);
145 uint32_t addr;
146 int retry;
147
148 WR4(sc, GMAC_MAC_MDIO_DATA, val);
149
150 addr = sc->csr_clock_range |
151 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
152 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
153 GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | GMAC_MAC_MDIO_ADDRESS_GB;
154 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
155
156 DELAY(100);
157
158 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
159 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS);
160 if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB))
161 break;
162 DELAY(10);
163 }
164 if (!retry) {
165 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
166 phy, reg);
167 return (ETIMEDOUT);
168 }
169 return (0);
170 }
171
172 static void
eqos_miibus_statchg(device_t dev)173 eqos_miibus_statchg(device_t dev)
174 {
175 struct eqos_softc *sc = device_get_softc(dev);
176 struct mii_data *mii = device_get_softc(sc->miibus);
177 uint32_t reg;
178
179 EQOS_ASSERT_LOCKED(sc);
180
181 if (mii->mii_media_status & IFM_ACTIVE)
182 sc->link_up = true;
183 else
184 sc->link_up = false;
185
186 reg = RD4(sc, GMAC_MAC_CONFIGURATION);
187
188 switch (IFM_SUBTYPE(mii->mii_media_active)) {
189 case IFM_10_T:
190 reg |= GMAC_MAC_CONFIGURATION_PS;
191 reg &= ~GMAC_MAC_CONFIGURATION_FES;
192 break;
193 case IFM_100_TX:
194 reg |= GMAC_MAC_CONFIGURATION_PS;
195 reg |= GMAC_MAC_CONFIGURATION_FES;
196 break;
197 case IFM_1000_T:
198 case IFM_1000_SX:
199 reg &= ~GMAC_MAC_CONFIGURATION_PS;
200 reg &= ~GMAC_MAC_CONFIGURATION_FES;
201 break;
202 case IFM_2500_T:
203 case IFM_2500_SX:
204 reg &= ~GMAC_MAC_CONFIGURATION_PS;
205 reg |= GMAC_MAC_CONFIGURATION_FES;
206 break;
207 default:
208 sc->link_up = false;
209 return;
210 }
211
212 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX))
213 reg |= GMAC_MAC_CONFIGURATION_DM;
214 else
215 reg &= ~GMAC_MAC_CONFIGURATION_DM;
216
217 WR4(sc, GMAC_MAC_CONFIGURATION, reg);
218
219 IF_EQOS_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active));
220
221 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
222 }
223
224 static void
eqos_media_status(if_t ifp,struct ifmediareq * ifmr)225 eqos_media_status(if_t ifp, struct ifmediareq *ifmr)
226 {
227 struct eqos_softc *sc = if_getsoftc(ifp);
228 struct mii_data *mii = device_get_softc(sc->miibus);
229
230 EQOS_LOCK(sc);
231 mii_pollstat(mii);
232 ifmr->ifm_active = mii->mii_media_active;
233 ifmr->ifm_status = mii->mii_media_status;
234 EQOS_UNLOCK(sc);
235 }
236
237 static int
eqos_media_change(if_t ifp)238 eqos_media_change(if_t ifp)
239 {
240 struct eqos_softc *sc = if_getsoftc(ifp);
241 int error;
242
243 EQOS_LOCK(sc);
244 error = mii_mediachg(device_get_softc(sc->miibus));
245 EQOS_UNLOCK(sc);
246 return (error);
247 }
248
249 static void
eqos_setup_txdesc(struct eqos_softc * sc,int index,int flags,bus_addr_t paddr,u_int len,u_int total_len)250 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags,
251 bus_addr_t paddr, u_int len, u_int total_len)
252 {
253 uint32_t tdes2, tdes3;
254
255 if (!paddr || !len) {
256 tdes2 = 0;
257 tdes3 = flags;
258 } else {
259 tdes2 = (flags & EQOS_TDES3_LD) ? EQOS_TDES2_IOC : 0;
260 tdes3 = flags;
261 }
262 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
263 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr);
264 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
265 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len);
266 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len);
267 }
268
269 static int
eqos_setup_txbuf(struct eqos_softc * sc,struct mbuf * m)270 eqos_setup_txbuf(struct eqos_softc *sc, struct mbuf *m)
271 {
272 bus_dma_segment_t segs[TX_MAX_SEGS];
273 int first = sc->tx.head;
274 int error, nsegs, idx;
275 uint32_t flags;
276
277 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
278 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
279 if (error == EFBIG) {
280 struct mbuf *mb;
281
282 device_printf(sc->dev, "TX packet too big trying defrag\n");
283 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
284 if (!(mb = m_defrag(m, M_NOWAIT)))
285 return (ENOMEM);
286 m = mb;
287 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
288 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
289 }
290 if (error)
291 return (ENOMEM);
292
293 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) {
294 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
295 device_printf(sc->dev, "TX packet no more queue space\n");
296 return (ENOMEM);
297 }
298
299 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map,
300 BUS_DMASYNC_PREWRITE);
301
302 sc->tx.buf_map[first].mbuf = m;
303
304 for (flags = EQOS_TDES3_FD, idx = 0; idx < nsegs; idx++) {
305 if (idx == (nsegs - 1))
306 flags |= EQOS_TDES3_LD;
307 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr,
308 segs[idx].ds_len, m->m_pkthdr.len);
309 flags &= ~EQOS_TDES3_FD;
310 flags |= EQOS_TDES3_OWN;
311 sc->tx.head = TX_NEXT(sc->tx.head);
312 }
313
314 /*
315 * Defer setting OWN bit on the first descriptor
316 * until all descriptors have been updated
317 */
318 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
319 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN);
320
321 return (0);
322 }
323
324 static void
eqos_setup_rxdesc(struct eqos_softc * sc,int index,bus_addr_t paddr)325 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr)
326 {
327
328 sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr);
329 sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
330 sc->rx.desc_ring[index].des2 = htole32(0);
331 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE);
332 sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC |
333 EQOS_RDES3_BUF1V);
334 }
335
336 static int
eqos_setup_rxbuf(struct eqos_softc * sc,int index,struct mbuf * m)337 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m)
338 {
339 struct bus_dma_segment seg;
340 int error, nsegs;
341
342 m_adj(m, ETHER_ALIGN);
343
344 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag,
345 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0);
346 if (error)
347 return (error);
348
349 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
350 BUS_DMASYNC_PREREAD);
351
352 sc->rx.buf_map[index].mbuf = m;
353 eqos_setup_rxdesc(sc, index, seg.ds_addr);
354
355 return (0);
356 }
357
358 static struct mbuf *
eqos_alloc_mbufcl(struct eqos_softc * sc)359 eqos_alloc_mbufcl(struct eqos_softc *sc)
360 {
361 struct mbuf *m;
362
363 if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)))
364 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
365 return (m);
366 }
367
368 static void
eqos_enable_intr(struct eqos_softc * sc)369 eqos_enable_intr(struct eqos_softc *sc)
370 {
371
372 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE,
373 GMAC_DMA_CHAN0_INTR_ENABLE_NIE | GMAC_DMA_CHAN0_INTR_ENABLE_AIE |
374 GMAC_DMA_CHAN0_INTR_ENABLE_FBE | GMAC_DMA_CHAN0_INTR_ENABLE_RIE |
375 GMAC_DMA_CHAN0_INTR_ENABLE_TIE);
376 }
377
378 static void
eqos_disable_intr(struct eqos_softc * sc)379 eqos_disable_intr(struct eqos_softc *sc)
380 {
381
382 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0);
383 }
384
385 static uint32_t
eqos_bitrev32(uint32_t x)386 eqos_bitrev32(uint32_t x)
387 {
388
389 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
390 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
391 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
392 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
393 return ((x >> 16) | (x << 16));
394 }
395
396 static u_int
eqos_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)397 eqos_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
398 {
399 uint32_t crc, *hash = arg;
400
401 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
402 crc &= 0x7f;
403 crc = eqos_bitrev32(~crc) >> 26;
404 hash[crc >> 5] |= 1 << (crc & 0x1f);
405 return (1);
406 }
407
408 static void
eqos_setup_rxfilter(struct eqos_softc * sc)409 eqos_setup_rxfilter(struct eqos_softc *sc)
410 {
411 if_t ifp = sc->ifp;
412 uint32_t pfil, hash[2];
413 const uint8_t *eaddr;
414 uint32_t val;
415
416 EQOS_ASSERT_LOCKED(sc);
417
418 pfil = RD4(sc, GMAC_MAC_PACKET_FILTER);
419 pfil &= ~(GMAC_MAC_PACKET_FILTER_PR |
420 GMAC_MAC_PACKET_FILTER_PM |
421 GMAC_MAC_PACKET_FILTER_HMC |
422 GMAC_MAC_PACKET_FILTER_PCF_MASK);
423 hash[0] = hash[1] = 0xffffffff;
424
425 if ((if_getflags(ifp) & IFF_PROMISC)) {
426 pfil |= GMAC_MAC_PACKET_FILTER_PR |
427 GMAC_MAC_PACKET_FILTER_PCF_ALL;
428 } else if ((if_getflags(ifp) & IFF_ALLMULTI)) {
429 pfil |= GMAC_MAC_PACKET_FILTER_PM;
430 } else {
431 hash[0] = hash[1] = 0;
432 pfil |= GMAC_MAC_PACKET_FILTER_HMC;
433 if_foreach_llmaddr(ifp, eqos_hash_maddr, hash);
434 }
435
436 /* Write our unicast address */
437 eaddr = if_getlladdr(ifp);
438 val = eaddr[4] | (eaddr[5] << 8);
439 WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val);
440 val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) |
441 (eaddr[3] << 24);
442 WR4(sc, GMAC_MAC_ADDRESS0_LOW, val);
443
444 /* Multicast hash filters */
445 WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]);
446 WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]);
447
448 /* Packet filter config */
449 WR4(sc, GMAC_MAC_PACKET_FILTER, pfil);
450 }
451
452 static int
eqos_reset(struct eqos_softc * sc)453 eqos_reset(struct eqos_softc *sc)
454 {
455 uint32_t val;
456 int retry;
457
458 WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR);
459 for (retry = 5000; retry > 0; retry--) {
460 DELAY(1000);
461 val = RD4(sc, GMAC_DMA_MODE);
462 if (!(val & GMAC_DMA_MODE_SWR))
463 return (0);
464 }
465 return (ETIMEDOUT);
466 }
467
468 static void
eqos_init_rings(struct eqos_softc * sc)469 eqos_init_rings(struct eqos_softc *sc)
470 {
471
472 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI,
473 (uint32_t)(sc->tx.desc_ring_paddr >> 32));
474 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR,
475 (uint32_t)sc->tx.desc_ring_paddr);
476 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1);
477
478 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI,
479 (uint32_t)(sc->rx.desc_ring_paddr >> 32));
480 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR,
481 (uint32_t)sc->rx.desc_ring_paddr);
482 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1);
483
484 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR,
485 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT));
486 }
487
488 static void
eqos_init(void * if_softc)489 eqos_init(void *if_softc)
490 {
491 struct eqos_softc *sc = if_softc;
492 if_t ifp = sc->ifp;
493 struct mii_data *mii = device_get_softc(sc->miibus);
494 uint32_t val, mtl_tx_val, mtl_rx_val;
495
496 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
497 return;
498
499 EQOS_LOCK(sc);
500
501 eqos_init_rings(sc);
502
503 eqos_setup_rxfilter(sc);
504
505 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
506
507 /* Enable transmit and receive DMA */
508 val = RD4(sc, GMAC_DMA_CHAN0_CONTROL);
509 val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK;
510 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT;
511 if (sc->pblx8)
512 val |= GMAC_DMA_CHAN0_CONTROL_PBLX8;
513 WR4(sc, GMAC_DMA_CHAN0_CONTROL, val);
514 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL);
515 if (sc->txpbl > 0)
516 val |= (sc->txpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT);
517 val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP;
518 val |= GMAC_DMA_CHAN0_TX_CONTROL_START;
519 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val);
520 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL);
521 if (sc->rxpbl > 0)
522 val |= (sc->rxpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT);
523 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK;
524 val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT);
525 val |= GMAC_DMA_CHAN0_RX_CONTROL_START;
526 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val);
527
528 /* Disable counters */
529 WR4(sc, GMAC_MMC_CONTROL,
530 GMAC_MMC_CONTROL_CNTFREEZ |
531 GMAC_MMC_CONTROL_CNTPRST |
532 GMAC_MMC_CONTROL_CNTPRSTLVL);
533
534 /* Configure operation modes */
535 if (sc->thresh_dma_mode) {
536 mtl_tx_val = sc->ttc;
537 mtl_rx_val = sc->rtc;
538 } else {
539 mtl_tx_val = GMAC_MTL_TXQ0_OPERATION_MODE_TSF;
540 mtl_rx_val = GMAC_MTL_RXQ0_OPERATION_MODE_RSF;
541 }
542
543 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE,
544 mtl_tx_val |
545 GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN);
546 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE,
547 mtl_rx_val |
548 GMAC_MTL_RXQ0_OPERATION_MODE_FEP |
549 GMAC_MTL_RXQ0_OPERATION_MODE_FUP);
550
551 /* Enable flow control */
552 val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL);
553 val |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT;
554 val |= GMAC_MAC_Q0_TX_FLOW_CTRL_TFE;
555 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val);
556 val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL);
557 val |= GMAC_MAC_RX_FLOW_CTRL_RFE;
558 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val);
559
560 /* set RX queue mode. must be in DCB mode. */
561 WR4(sc, GMAC_RXQ_CTRL0, (GMAC_RXQ_CTRL0_EN_MASK << 16) |
562 GMAC_RXQ_CTRL0_EN_DCB);
563
564 /* Enable transmitter and receiver */
565 val = RD4(sc, GMAC_MAC_CONFIGURATION);
566 val |= GMAC_MAC_CONFIGURATION_BE;
567 val |= GMAC_MAC_CONFIGURATION_JD;
568 val |= GMAC_MAC_CONFIGURATION_JE;
569 val |= GMAC_MAC_CONFIGURATION_DCRS;
570 val |= GMAC_MAC_CONFIGURATION_TE;
571 val |= GMAC_MAC_CONFIGURATION_RE;
572 WR4(sc, GMAC_MAC_CONFIGURATION, val);
573
574 eqos_enable_intr(sc);
575
576 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
577
578 mii_mediachg(mii);
579 callout_reset(&sc->callout, hz, eqos_tick, sc);
580
581 EQOS_UNLOCK(sc);
582 }
583
584 static void
eqos_start_locked(if_t ifp)585 eqos_start_locked(if_t ifp)
586 {
587 struct eqos_softc *sc = if_getsoftc(ifp);
588 struct mbuf *m;
589 int pending = 0;
590
591 if (!sc->link_up)
592 return;
593
594 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
595 IFF_DRV_RUNNING)
596 return;
597
598 while (true) {
599 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >=
600 TX_DESC_COUNT - TX_MAX_SEGS) {
601 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
602 break;
603 }
604
605 if (!(m = if_dequeue(ifp)))
606 break;
607
608 if (eqos_setup_txbuf(sc, m)) {
609 if_sendq_prepend(ifp, m);
610 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
611 break;
612 }
613 bpf_mtap_if(ifp, m);
614 pending++;
615 }
616
617 if (pending) {
618 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
619 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
620
621 /* Start and run TX DMA */
622 WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR,
623 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head));
624 sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS;
625 }
626 }
627
628 static void
eqos_start(if_t ifp)629 eqos_start(if_t ifp)
630 {
631 struct eqos_softc *sc = if_getsoftc(ifp);
632
633 EQOS_LOCK(sc);
634 eqos_start_locked(ifp);
635 EQOS_UNLOCK(sc);
636 }
637
638 static void
eqos_stop(struct eqos_softc * sc)639 eqos_stop(struct eqos_softc *sc)
640 {
641 if_t ifp = sc->ifp;
642 uint32_t val;
643 int retry;
644
645 EQOS_LOCK(sc);
646
647 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
648
649 callout_stop(&sc->callout);
650
651 /* Disable receiver */
652 val = RD4(sc, GMAC_MAC_CONFIGURATION);
653 val &= ~GMAC_MAC_CONFIGURATION_RE;
654 WR4(sc, GMAC_MAC_CONFIGURATION, val);
655
656 /* Stop receive DMA */
657 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL);
658 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START;
659 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val);
660
661 /* Stop transmit DMA */
662 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL);
663 val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START;
664 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val);
665
666 /* Flush data in the TX FIFO */
667 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE);
668 val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ;
669 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val);
670 for (retry = 10000; retry > 0; retry--) {
671 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE);
672 if (!(val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ))
673 break;
674 DELAY(10);
675 }
676 if (!retry)
677 device_printf(sc->dev, "timeout flushing TX queue\n");
678
679 /* Disable transmitter */
680 val = RD4(sc, GMAC_MAC_CONFIGURATION);
681 val &= ~GMAC_MAC_CONFIGURATION_TE;
682 WR4(sc, GMAC_MAC_CONFIGURATION, val);
683
684 eqos_disable_intr(sc);
685
686 EQOS_UNLOCK(sc);
687 }
688
689 static void
eqos_rxintr(struct eqos_softc * sc)690 eqos_rxintr(struct eqos_softc *sc)
691 {
692 if_t ifp = sc->ifp;
693 struct mbuf *m;
694 uint32_t rdes3;
695 int error, length;
696
697 while (true) {
698 rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3);
699 if ((rdes3 & EQOS_RDES3_OWN))
700 break;
701
702 if (rdes3 & (EQOS_RDES3_OE | EQOS_RDES3_RE))
703 printf("Receive error rdes3=%08x\n", rdes3);
704
705 bus_dmamap_sync(sc->rx.buf_tag,
706 sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD);
707 bus_dmamap_unload(sc->rx.buf_tag,
708 sc->rx.buf_map[sc->rx.head].map);
709
710 length = rdes3 & EQOS_RDES3_LENGTH_MASK;
711 if (length) {
712 m = sc->rx.buf_map[sc->rx.head].mbuf;
713 m->m_pkthdr.rcvif = ifp;
714 m->m_pkthdr.len = length;
715 m->m_len = length;
716 m->m_nextpkt = NULL;
717
718 /* Remove trailing FCS */
719 m_adj(m, -ETHER_CRC_LEN);
720
721 EQOS_UNLOCK(sc);
722 if_input(ifp, m);
723 EQOS_LOCK(sc);
724 }
725
726 if ((m = eqos_alloc_mbufcl(sc))) {
727 if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m)))
728 printf("ERROR: Hole in RX ring!!\n");
729 }
730 else
731 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
732
733 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
734
735 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR,
736 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head));
737
738 sc->rx.head = RX_NEXT(sc->rx.head);
739 }
740 }
741
742 static void
eqos_txintr(struct eqos_softc * sc)743 eqos_txintr(struct eqos_softc *sc)
744 {
745 if_t ifp = sc->ifp;
746 struct eqos_bufmap *bmap;
747 uint32_t tdes3;
748
749 EQOS_ASSERT_LOCKED(sc);
750
751 while (sc->tx.tail != sc->tx.head) {
752 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3);
753 if ((tdes3 & EQOS_TDES3_OWN))
754 break;
755
756 bmap = &sc->tx.buf_map[sc->tx.tail];
757 if (bmap->mbuf) {
758 bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
759 BUS_DMASYNC_POSTWRITE);
760 bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
761 m_freem(bmap->mbuf);
762 bmap->mbuf = NULL;
763 }
764
765 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0);
766
767 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
768
769 /* Last descriptor in a packet contains DMA status */
770 if ((tdes3 & EQOS_TDES3_LD)) {
771 if ((tdes3 & EQOS_TDES3_DE)) {
772 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
773 } else if ((tdes3 & EQOS_TDES3_ES)) {
774 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
775 } else {
776 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
777 }
778 }
779 sc->tx.tail = TX_NEXT(sc->tx.tail);
780 }
781 if (sc->tx.tail == sc->tx.head)
782 sc->tx_watchdog = 0;
783 eqos_start_locked(sc->ifp);
784 }
785
786 static void
eqos_intr_mtl(struct eqos_softc * sc,uint32_t mtl_status)787 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status)
788 {
789 uint32_t mtl_istat = 0;
790
791 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS)) {
792 uint32_t mtl_clear = 0;
793
794 mtl_istat = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS);
795 if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS)) {
796 mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS;
797 }
798 if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS)) {
799 mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS;
800 }
801 if (mtl_clear) {
802 mtl_clear |= (mtl_istat &
803 (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE |
804 GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE));
805 WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, mtl_clear);
806 }
807 }
808 if (bootverbose)
809 device_printf(sc->dev,
810 "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, "
811 "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n",
812 mtl_status, mtl_istat);
813 }
814
815 static void
eqos_tick(void * softc)816 eqos_tick(void *softc)
817 {
818 struct eqos_softc *sc = softc;
819 struct mii_data *mii = device_get_softc(sc->miibus);
820 bool link_status;
821
822 EQOS_ASSERT_LOCKED(sc);
823
824 if (sc->tx_watchdog > 0)
825 if (!--sc->tx_watchdog) {
826 device_printf(sc->dev, "watchdog timeout\n");
827 eqos_txintr(sc);
828 }
829
830 link_status = sc->link_up;
831 mii_tick(mii);
832 if (sc->link_up && !link_status)
833 eqos_start_locked(sc->ifp);
834
835 callout_reset(&sc->callout, hz, eqos_tick, sc);
836 }
837
838 static void
eqos_intr(void * arg)839 eqos_intr(void *arg)
840 {
841 struct eqos_softc *sc = arg;
842 uint32_t mac_status, mtl_status, dma_status, rx_tx_status;
843
844 mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS);
845 mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE);
846
847 if (mac_status)
848 device_printf(sc->dev, "MAC interrupt\n");
849
850 if ((mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS)))
851 eqos_intr_mtl(sc, mtl_status);
852
853 dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS);
854 dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE);
855
856 if (dma_status)
857 WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status);
858
859 EQOS_LOCK(sc);
860
861 if (dma_status & GMAC_DMA_CHAN0_STATUS_RI)
862 eqos_rxintr(sc);
863
864 if (dma_status & GMAC_DMA_CHAN0_STATUS_TI)
865 eqos_txintr(sc);
866
867 EQOS_UNLOCK(sc);
868
869 if (!(mac_status | mtl_status | dma_status)) {
870 device_printf(sc->dev,
871 "spurious interrupt mac=%08x mtl=%08x dma=%08x\n",
872 RD4(sc, GMAC_MAC_INTERRUPT_STATUS),
873 RD4(sc, GMAC_MTL_INTERRUPT_STATUS),
874 RD4(sc, GMAC_DMA_CHAN0_STATUS));
875 }
876 if ((rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS)))
877 device_printf(sc->dev, "RX/TX status interrupt\n");
878 }
879
880 static int
eqos_ioctl(if_t ifp,u_long cmd,caddr_t data)881 eqos_ioctl(if_t ifp, u_long cmd, caddr_t data)
882 {
883 struct eqos_softc *sc = if_getsoftc(ifp);
884 struct ifreq *ifr = (struct ifreq *)data;
885 struct mii_data *mii;
886 int flags, mask;
887 int error = 0;
888
889 switch (cmd) {
890 case SIOCSIFFLAGS:
891 if (if_getflags(ifp) & IFF_UP) {
892 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
893 flags = if_getflags(ifp);
894 if ((flags & (IFF_PROMISC|IFF_ALLMULTI))) {
895 EQOS_LOCK(sc);
896 eqos_setup_rxfilter(sc);
897 EQOS_UNLOCK(sc);
898 }
899 }
900 else {
901 eqos_init(sc);
902 }
903 }
904 else {
905 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
906 eqos_stop(sc);
907 }
908 break;
909
910 case SIOCADDMULTI:
911 case SIOCDELMULTI:
912 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
913 EQOS_LOCK(sc);
914 eqos_setup_rxfilter(sc);
915 EQOS_UNLOCK(sc);
916 }
917 break;
918
919 case SIOCSIFMEDIA:
920 case SIOCGIFMEDIA:
921 mii = device_get_softc(sc->miibus);
922 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
923 break;
924
925 case SIOCSIFCAP:
926 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
927 if (mask & IFCAP_VLAN_MTU)
928 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
929 if (mask & IFCAP_RXCSUM)
930 if_togglecapenable(ifp, IFCAP_RXCSUM);
931 if (mask & IFCAP_TXCSUM)
932 if_togglecapenable(ifp, IFCAP_TXCSUM);
933 if ((if_getcapenable(ifp) & IFCAP_TXCSUM))
934 if_sethwassistbits(ifp,
935 CSUM_IP | CSUM_UDP | CSUM_TCP, 0);
936 else
937 if_sethwassistbits(ifp,
938 0, CSUM_IP | CSUM_UDP | CSUM_TCP);
939 break;
940
941 default:
942 error = ether_ioctl(ifp, cmd, data);
943 break;
944 }
945
946 return (error);
947 }
948
949 static void
eqos_get_eaddr(struct eqos_softc * sc,uint8_t * eaddr)950 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr)
951 {
952 uint32_t maclo, machi;
953
954 maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW));
955 machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF);
956
957 /* if no valid MAC address generate random */
958 if (maclo == 0xffffffff && machi == 0xffff) {
959 maclo = 0xf2 | (arc4random() & 0xffff0000);
960 machi = arc4random() & 0x0000ffff;
961 }
962 eaddr[0] = maclo & 0xff;
963 eaddr[1] = (maclo >> 8) & 0xff;
964 eaddr[2] = (maclo >> 16) & 0xff;
965 eaddr[3] = (maclo >> 24) & 0xff;
966 eaddr[4] = machi & 0xff;
967 eaddr[5] = (machi >> 8) & 0xff;
968 }
969
970 static void
eqos_axi_configure(struct eqos_softc * sc)971 eqos_axi_configure(struct eqos_softc *sc)
972 {
973 uint32_t val;
974
975 val = RD4(sc, GMAC_DMA_SYSBUS_MODE);
976
977 /* Max Write Outstanding Req Limit */
978 val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK;
979 val |= 0x03 << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT;
980
981 /* Max Read Outstanding Req Limit */
982 val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK;
983 val |= 0x07 << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT;
984
985 /* Allowed Burst Length's */
986 val |= GMAC_DMA_SYSBUS_MODE_BLEN16;
987 val |= GMAC_DMA_SYSBUS_MODE_BLEN8;
988 val |= GMAC_DMA_SYSBUS_MODE_BLEN4;
989
990 /* Fixed Burst Length */
991 val |= GMAC_DMA_SYSBUS_MODE_MB;
992
993 WR4(sc, GMAC_DMA_SYSBUS_MODE, val);
994 }
995
996 static void
eqos_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)997 eqos_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
998 {
999
1000 if (!error)
1001 *(bus_addr_t *)arg = segs[0].ds_addr;
1002 }
1003
1004 static int
eqos_setup_dma(struct eqos_softc * sc)1005 eqos_setup_dma(struct eqos_softc *sc)
1006 {
1007 struct mbuf *m;
1008 int error, i;
1009
1010 /* Set up TX descriptor ring, descriptors, and dma maps */
1011 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1012 DESC_ALIGN, DESC_BOUNDARY,
1013 BUS_SPACE_MAXADDR_32BIT,
1014 BUS_SPACE_MAXADDR, NULL, NULL,
1015 TX_DESC_SIZE, 1, TX_DESC_SIZE, 0,
1016 NULL, NULL, &sc->tx.desc_tag))) {
1017 device_printf(sc->dev, "could not create TX ring DMA tag\n");
1018 return (error);
1019 }
1020
1021 if ((error = bus_dmamem_alloc(sc->tx.desc_tag,
1022 (void**)&sc->tx.desc_ring,
1023 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
1024 &sc->tx.desc_map))) {
1025 device_printf(sc->dev,
1026 "could not allocate TX descriptor ring.\n");
1027 return (error);
1028 }
1029
1030 if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
1031 sc->tx.desc_ring,
1032 TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) {
1033 device_printf(sc->dev,
1034 "could not load TX descriptor ring map.\n");
1035 return (error);
1036 }
1037
1038 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1039 BUS_SPACE_MAXADDR_32BIT,
1040 BUS_SPACE_MAXADDR, NULL, NULL,
1041 MCLBYTES*TX_MAX_SEGS, TX_MAX_SEGS,
1042 MCLBYTES, 0, NULL, NULL,
1043 &sc->tx.buf_tag))) {
1044 device_printf(sc->dev, "could not create TX buffer DMA tag.\n");
1045 return (error);
1046 }
1047
1048 for (i = 0; i < TX_DESC_COUNT; i++) {
1049 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT,
1050 &sc->tx.buf_map[i].map))) {
1051 device_printf(sc->dev, "cannot create TX buffer map\n");
1052 return (error);
1053 }
1054 eqos_setup_txdesc(sc, i, EQOS_TDES3_OWN, 0, 0, 0);
1055 }
1056
1057 /* Set up RX descriptor ring, descriptors, dma maps, and mbufs */
1058 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1059 DESC_ALIGN, DESC_BOUNDARY,
1060 BUS_SPACE_MAXADDR_32BIT,
1061 BUS_SPACE_MAXADDR, NULL, NULL,
1062 RX_DESC_SIZE, 1, RX_DESC_SIZE, 0,
1063 NULL, NULL, &sc->rx.desc_tag))) {
1064 device_printf(sc->dev, "could not create RX ring DMA tag.\n");
1065 return (error);
1066 }
1067
1068 if ((error = bus_dmamem_alloc(sc->rx.desc_tag,
1069 (void **)&sc->rx.desc_ring,
1070 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
1071 &sc->rx.desc_map))) {
1072 device_printf(sc->dev,
1073 "could not allocate RX descriptor ring.\n");
1074 return (error);
1075 }
1076
1077 if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
1078 sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr,
1079 &sc->rx.desc_ring_paddr, 0))) {
1080 device_printf(sc->dev,
1081 "could not load RX descriptor ring map.\n");
1082 return (error);
1083 }
1084
1085 if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1086 BUS_SPACE_MAXADDR_32BIT,
1087 BUS_SPACE_MAXADDR, NULL, NULL,
1088 MCLBYTES, 1,
1089 MCLBYTES, 0, NULL, NULL,
1090 &sc->rx.buf_tag))) {
1091 device_printf(sc->dev, "could not create RX buf DMA tag.\n");
1092 return (error);
1093 }
1094
1095 for (i = 0; i < RX_DESC_COUNT; i++) {
1096 if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT,
1097 &sc->rx.buf_map[i].map))) {
1098 device_printf(sc->dev, "cannot create RX buffer map\n");
1099 return (error);
1100 }
1101 if (!(m = eqos_alloc_mbufcl(sc))) {
1102 device_printf(sc->dev, "cannot allocate RX mbuf\n");
1103 return (ENOMEM);
1104 }
1105 if ((error = eqos_setup_rxbuf(sc, i, m))) {
1106 device_printf(sc->dev, "cannot create RX buffer\n");
1107 return (error);
1108 }
1109 }
1110
1111 if (bootverbose)
1112 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n",
1113 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr);
1114 return (0);
1115 }
1116
1117 static int
eqos_attach(device_t dev)1118 eqos_attach(device_t dev)
1119 {
1120 struct eqos_softc *sc = device_get_softc(dev);
1121 if_t ifp;
1122 uint32_t ver;
1123 uint8_t eaddr[ETHER_ADDR_LEN];
1124 u_int userver, snpsver;
1125 int error;
1126 int n;
1127
1128 /* default values */
1129 sc->thresh_dma_mode = false;
1130 sc->pblx8 = true;
1131 sc->txpbl = 0;
1132 sc->rxpbl = 0;
1133 sc->ttc = 0x10;
1134 sc->rtc = 0;
1135
1136 /* setup resources */
1137 if (bus_alloc_resources(dev, eqos_spec, sc->res)) {
1138 device_printf(dev, "Could not allocate resources\n");
1139 bus_release_resources(dev, eqos_spec, sc->res);
1140 return (ENXIO);
1141 }
1142
1143 if ((error = IF_EQOS_INIT(dev)))
1144 return (error);
1145
1146 sc->dev = dev;
1147 ver = RD4(sc, GMAC_MAC_VERSION);
1148 userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >>
1149 GMAC_MAC_VERSION_USERVER_SHIFT;
1150 snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK;
1151
1152 if (snpsver != 0x51 && snpsver != 0x52) {
1153 device_printf(dev, "EQOS version 0x%02x not supported\n",
1154 snpsver);
1155 return (ENXIO);
1156 }
1157
1158 for (n = 0; n < 4; n++)
1159 sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n));
1160
1161 if (bootverbose) {
1162 device_printf(dev, "DesignWare EQOS ver 0x%02x (0x%02x)\n",
1163 snpsver, userver);
1164 device_printf(dev, "hw features %08x %08x %08x %08x\n",
1165 sc->hw_feature[0], sc->hw_feature[1],
1166 sc->hw_feature[2], sc->hw_feature[3]);
1167 }
1168
1169 mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF);
1170 callout_init_mtx(&sc->callout, &sc->lock, 0);
1171
1172 eqos_get_eaddr(sc, eaddr);
1173 if (bootverbose)
1174 device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":");
1175
1176 /* Soft reset EMAC core */
1177 if ((error = eqos_reset(sc))) {
1178 device_printf(sc->dev, "reset timeout!\n");
1179 return (error);
1180 }
1181
1182 /* Configure AXI Bus mode parameters */
1183 eqos_axi_configure(sc);
1184
1185 /* Setup DMA descriptors */
1186 if (eqos_setup_dma(sc)) {
1187 device_printf(sc->dev, "failed to setup DMA descriptors\n");
1188 return (EINVAL);
1189 }
1190
1191 /* setup interrupt delivery */
1192 if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS,
1193 NULL, eqos_intr, sc, &sc->irq_handle))) {
1194 device_printf(dev, "unable to setup 1st interrupt\n");
1195 bus_release_resources(dev, eqos_spec, sc->res);
1196 return (ENXIO);
1197 }
1198
1199 /* Setup ethernet interface */
1200 ifp = sc->ifp = if_alloc(IFT_ETHER);
1201 if_setsoftc(ifp, sc);
1202 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
1203 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1204 if_setstartfn(ifp, eqos_start);
1205 if_setioctlfn(ifp, eqos_ioctl);
1206 if_setinitfn(ifp, eqos_init);
1207 if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1208 if_setsendqready(ifp);
1209 if_setcapabilities(ifp, IFCAP_VLAN_MTU /*| IFCAP_HWCSUM*/);
1210 if_setcapenable(ifp, if_getcapabilities(ifp));
1211
1212 /* Attach MII driver */
1213 if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change,
1214 eqos_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY,
1215 MII_OFFSET_ANY, 0))) {
1216 device_printf(sc->dev, "PHY attach failed\n");
1217 return (ENXIO);
1218 }
1219
1220 /* Attach ethernet interface */
1221 ether_ifattach(ifp, eaddr);
1222
1223 return (0);
1224 }
1225
1226 static int
eqos_detach(device_t dev)1227 eqos_detach(device_t dev)
1228 {
1229 struct eqos_softc *sc = device_get_softc(dev);
1230 int i;
1231
1232 if (device_is_attached(dev)) {
1233 EQOS_LOCK(sc);
1234 eqos_stop(sc);
1235 EQOS_UNLOCK(sc);
1236 if_setflagbits(sc->ifp, 0, IFF_UP);
1237 ether_ifdetach(sc->ifp);
1238 }
1239
1240 bus_generic_detach(dev);
1241
1242 if (sc->irq_handle)
1243 bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0],
1244 sc->irq_handle);
1245
1246 if (sc->ifp)
1247 if_free(sc->ifp);
1248
1249 bus_release_resources(dev, eqos_spec, sc->res);
1250
1251 if (sc->tx.desc_tag) {
1252 if (sc->tx.desc_map) {
1253 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map);
1254 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring,
1255 sc->tx.desc_map);
1256 }
1257 bus_dma_tag_destroy(sc->tx.desc_tag);
1258 }
1259 if (sc->tx.buf_tag) {
1260 for (i = 0; i < TX_DESC_COUNT; i++) {
1261 m_free(sc->tx.buf_map[i].mbuf);
1262 bus_dmamap_destroy(sc->tx.buf_tag,
1263 sc->tx.buf_map[i].map);
1264 }
1265 bus_dma_tag_destroy(sc->tx.buf_tag);
1266 }
1267
1268 if (sc->rx.desc_tag) {
1269 if (sc->rx.desc_map) {
1270 bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map);
1271 bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring,
1272 sc->rx.desc_map);
1273 }
1274 bus_dma_tag_destroy(sc->rx.desc_tag);
1275 }
1276 if (sc->rx.buf_tag) {
1277 for (i = 0; i < RX_DESC_COUNT; i++) {
1278 m_free(sc->rx.buf_map[i].mbuf);
1279 bus_dmamap_destroy(sc->rx.buf_tag,
1280 sc->rx.buf_map[i].map);
1281 }
1282 bus_dma_tag_destroy(sc->rx.buf_tag);
1283 }
1284
1285 mtx_destroy(&sc->lock);
1286
1287 return (0);
1288 }
1289
1290
1291 static device_method_t eqos_methods[] = {
1292 /* Device Interface */
1293 DEVMETHOD(device_attach, eqos_attach),
1294 DEVMETHOD(device_detach, eqos_detach),
1295
1296 /* MII Interface */
1297 DEVMETHOD(miibus_readreg, eqos_miibus_readreg),
1298 DEVMETHOD(miibus_writereg, eqos_miibus_writereg),
1299 DEVMETHOD(miibus_statchg, eqos_miibus_statchg),
1300
1301 DEVMETHOD_END
1302 };
1303
1304 driver_t eqos_driver = {
1305 "eqos",
1306 eqos_methods,
1307 sizeof(struct eqos_softc),
1308 };
1309
1310 DRIVER_MODULE(miibus, eqos, miibus_driver, 0, 0);
1311