1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Soren Schmidt <sos@deepcore.dk>
5 * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $Id: eqos.c 1059 2022-12-08 19:32:32Z sos $
30 */
31
32 /*
33 * DesignWare Ethernet Quality-of-Service controller
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/endian.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/callout.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/mbuf.h>
50 #include <sys/systm.h>
51 #include <machine/bus.h>
52
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/bpf.h>
59
60 #include <dev/mii/mii.h>
61 #include <dev/mii/miivar.h>
62
63 #include "miibus_if.h"
64 #include "if_eqos_if.h"
65
66 #ifdef FDT
67 #include <dev/ofw/openfirm.h>
68 #include <dev/ofw/ofw_bus.h>
69 #include <dev/ofw/ofw_bus_subr.h>
70 #include <dev/clk/clk.h>
71 #endif
72
73 #include <dev/eqos/if_eqos_reg.h>
74 #include <dev/eqos/if_eqos_var.h>
75
76 #define DESC_BOUNDARY (1ULL << 32)
77 #define DESC_ALIGN sizeof(struct eqos_dma_desc)
78 #define DESC_OFFSET(n) ((n) * sizeof(struct eqos_dma_desc))
79
80 #define TX_DESC_COUNT EQOS_DMA_DESC_COUNT
81 #define TX_DESC_SIZE (TX_DESC_COUNT * DESC_ALIGN)
82 #define TX_MAX_SEGS (TX_DESC_COUNT / 2)
83 #define TX_NEXT(n) (((n) + 1 ) % TX_DESC_COUNT)
84 #define TX_QUEUED(h, t) ((((h) - (t)) + TX_DESC_COUNT) % TX_DESC_COUNT)
85
86 #define RX_DESC_COUNT EQOS_DMA_DESC_COUNT
87 #define RX_DESC_SIZE (RX_DESC_COUNT * DESC_ALIGN)
88 #define RX_NEXT(n) (((n) + 1) % RX_DESC_COUNT)
89
90 #define MII_BUSY_RETRY 1000
91 #define WATCHDOG_TIMEOUT_SECS 3
92
93 #define EQOS_LOCK(sc) mtx_lock(&(sc)->lock)
94 #define EQOS_UNLOCK(sc) mtx_unlock(&(sc)->lock)
95 #define EQOS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED)
96
97 #define RD4(sc, o) bus_read_4(sc->res[EQOS_RES_MEM], (o))
98 #define WR4(sc, o, v) bus_write_4(sc->res[EQOS_RES_MEM], (o), (v))
99
100
101 static struct resource_spec eqos_spec[] = {
102 { SYS_RES_MEMORY, 0, RF_ACTIVE },
103 { SYS_RES_IRQ, 0, RF_ACTIVE },
104 { -1, 0 }
105 };
106
107 static void eqos_tick(void *softc);
108
109
110 static int
eqos_miibus_readreg(device_t dev,int phy,int reg)111 eqos_miibus_readreg(device_t dev, int phy, int reg)
112 {
113 struct eqos_softc *sc = device_get_softc(dev);
114 uint32_t addr;
115 int retry, val;
116
117 addr = sc->csr_clock_range |
118 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
119 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
120 GMAC_MAC_MDIO_ADDRESS_GOC_READ |
121 GMAC_MAC_MDIO_ADDRESS_GB;
122 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
123
124 DELAY(100);
125
126 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
127 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS);
128 if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB)) {
129 val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF;
130 break;
131 }
132 DELAY(10);
133 }
134 if (!retry) {
135 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
136 phy, reg);
137 return (ETIMEDOUT);
138 }
139 return (val);
140 }
141
142 static int
eqos_miibus_writereg(device_t dev,int phy,int reg,int val)143 eqos_miibus_writereg(device_t dev, int phy, int reg, int val)
144 {
145 struct eqos_softc *sc = device_get_softc(dev);
146 uint32_t addr;
147 int retry;
148
149 WR4(sc, GMAC_MAC_MDIO_DATA, val);
150
151 addr = sc->csr_clock_range |
152 (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
153 (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
154 GMAC_MAC_MDIO_ADDRESS_GOC_WRITE |
155 GMAC_MAC_MDIO_ADDRESS_GB;
156 WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
157
158 DELAY(100);
159
160 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
161 addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS);
162 if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB))
163 break;
164 DELAY(10);
165 }
166 if (!retry) {
167 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
168 phy, reg);
169 return (ETIMEDOUT);
170 }
171 return (0);
172 }
173
174 static void
eqos_miibus_statchg(device_t dev)175 eqos_miibus_statchg(device_t dev)
176 {
177 struct eqos_softc *sc = device_get_softc(dev);
178 struct mii_data *mii = device_get_softc(sc->miibus);
179 uint32_t reg;
180
181 EQOS_ASSERT_LOCKED(sc);
182
183 if (mii->mii_media_status & IFM_ACTIVE)
184 sc->link_up = true;
185 else
186 sc->link_up = false;
187
188 reg = RD4(sc, GMAC_MAC_CONFIGURATION);
189
190 switch (IFM_SUBTYPE(mii->mii_media_active)) {
191 case IFM_10_T:
192 reg |= GMAC_MAC_CONFIGURATION_PS;
193 reg &= ~GMAC_MAC_CONFIGURATION_FES;
194 break;
195 case IFM_100_TX:
196 reg |= GMAC_MAC_CONFIGURATION_PS;
197 reg |= GMAC_MAC_CONFIGURATION_FES;
198 break;
199 case IFM_1000_T:
200 case IFM_1000_SX:
201 reg &= ~GMAC_MAC_CONFIGURATION_PS;
202 reg &= ~GMAC_MAC_CONFIGURATION_FES;
203 break;
204 case IFM_2500_T:
205 case IFM_2500_SX:
206 reg &= ~GMAC_MAC_CONFIGURATION_PS;
207 reg |= GMAC_MAC_CONFIGURATION_FES;
208 break;
209 default:
210 sc->link_up = false;
211 return;
212 }
213
214 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX))
215 reg |= GMAC_MAC_CONFIGURATION_DM;
216 else
217 reg &= ~GMAC_MAC_CONFIGURATION_DM;
218
219 WR4(sc, GMAC_MAC_CONFIGURATION, reg);
220
221 IF_EQOS_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active));
222
223 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
224 }
225
226 static void
eqos_media_status(if_t ifp,struct ifmediareq * ifmr)227 eqos_media_status(if_t ifp, struct ifmediareq *ifmr)
228 {
229 struct eqos_softc *sc = if_getsoftc(ifp);
230 struct mii_data *mii = device_get_softc(sc->miibus);
231
232 EQOS_LOCK(sc);
233 mii_pollstat(mii);
234 ifmr->ifm_active = mii->mii_media_active;
235 ifmr->ifm_status = mii->mii_media_status;
236 EQOS_UNLOCK(sc);
237 }
238
239 static int
eqos_media_change(if_t ifp)240 eqos_media_change(if_t ifp)
241 {
242 struct eqos_softc *sc = if_getsoftc(ifp);
243 int error;
244
245 EQOS_LOCK(sc);
246 error = mii_mediachg(device_get_softc(sc->miibus));
247 EQOS_UNLOCK(sc);
248 return (error);
249 }
250
251 static void
eqos_setup_txdesc(struct eqos_softc * sc,int index,int flags,bus_addr_t paddr,u_int len,u_int total_len)252 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags,
253 bus_addr_t paddr, u_int len, u_int total_len)
254 {
255 uint32_t tdes2, tdes3;
256
257 if (!paddr || !len) {
258 tdes2 = 0;
259 tdes3 = flags;
260 } else {
261 tdes2 = (flags & EQOS_TDES3_LD) ? EQOS_TDES2_IOC : 0;
262 tdes3 = flags;
263 }
264 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
265 sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr);
266 sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
267 sc->tx.desc_ring[index].des2 = htole32(tdes2 | len);
268 sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len);
269 }
270
271 static int
eqos_setup_txbuf(struct eqos_softc * sc,struct mbuf * m)272 eqos_setup_txbuf(struct eqos_softc *sc, struct mbuf *m)
273 {
274 bus_dma_segment_t segs[TX_MAX_SEGS];
275 int first = sc->tx.head;
276 int error, nsegs, idx;
277 uint32_t flags;
278
279 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
280 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
281 if (error == EFBIG) {
282 struct mbuf *mb;
283
284 device_printf(sc->dev, "TX packet too big trying defrag\n");
285 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
286 if (!(mb = m_defrag(m, M_NOWAIT)))
287 return (ENOMEM);
288 m = mb;
289 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
290 sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
291 }
292 if (error)
293 return (ENOMEM);
294
295 if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) {
296 bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
297 device_printf(sc->dev, "TX packet no more queue space\n");
298 return (ENOMEM);
299 }
300
301 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map,
302 BUS_DMASYNC_PREWRITE);
303
304 sc->tx.buf_map[first].mbuf = m;
305
306 for (flags = EQOS_TDES3_FD, idx = 0; idx < nsegs; idx++) {
307 if (idx == (nsegs - 1))
308 flags |= EQOS_TDES3_LD;
309 eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr,
310 segs[idx].ds_len, m->m_pkthdr.len);
311 flags &= ~EQOS_TDES3_FD;
312 flags |= EQOS_TDES3_OWN;
313 sc->tx.head = TX_NEXT(sc->tx.head);
314 }
315
316 /*
317 * Defer setting OWN bit on the first descriptor
318 * until all descriptors have been updated
319 */
320 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
321 sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN);
322
323 return (0);
324 }
325
326 static void
eqos_setup_rxdesc(struct eqos_softc * sc,int index,bus_addr_t paddr)327 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr)
328 {
329
330 sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr);
331 sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
332 sc->rx.desc_ring[index].des2 = htole32(0);
333 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE);
334 sc->rx.desc_ring[index].des3 =
335 htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC | EQOS_RDES3_BUF1V);
336 }
337
338 static int
eqos_setup_rxbuf(struct eqos_softc * sc,int index,struct mbuf * m)339 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m)
340 {
341 struct bus_dma_segment seg;
342 int error, nsegs;
343
344 m_adj(m, ETHER_ALIGN);
345
346 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag,
347 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0);
348 if (error)
349 return (error);
350
351 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
352 BUS_DMASYNC_PREREAD);
353
354 sc->rx.buf_map[index].mbuf = m;
355 eqos_setup_rxdesc(sc, index, seg.ds_addr);
356
357 return (0);
358 }
359
360 static struct mbuf *
eqos_alloc_mbufcl(struct eqos_softc * sc)361 eqos_alloc_mbufcl(struct eqos_softc *sc)
362 {
363 struct mbuf *m;
364
365 if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)))
366 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
367 return (m);
368 }
369
370 static void
eqos_enable_intr(struct eqos_softc * sc)371 eqos_enable_intr(struct eqos_softc *sc)
372 {
373
374 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE,
375 GMAC_DMA_CHAN0_INTR_ENABLE_NIE |
376 GMAC_DMA_CHAN0_INTR_ENABLE_AIE |
377 GMAC_DMA_CHAN0_INTR_ENABLE_FBE |
378 GMAC_DMA_CHAN0_INTR_ENABLE_RIE |
379 GMAC_DMA_CHAN0_INTR_ENABLE_TIE);
380 }
381
382 static void
eqos_disable_intr(struct eqos_softc * sc)383 eqos_disable_intr(struct eqos_softc *sc)
384 {
385
386 WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0);
387 }
388
389 static uint32_t
eqos_bitrev32(uint32_t x)390 eqos_bitrev32(uint32_t x)
391 {
392
393 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
394 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
395 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
396 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
397 return ((x >> 16) | (x << 16));
398 }
399
400 static u_int
eqos_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)401 eqos_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
402 {
403 uint32_t crc, *hash = arg;
404
405 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
406 crc &= 0x7f;
407 crc = eqos_bitrev32(~crc) >> 26;
408 hash[crc >> 5] |= 1 << (crc & 0x1f);
409 return (1);
410 }
411
412 static void
eqos_setup_rxfilter(struct eqos_softc * sc)413 eqos_setup_rxfilter(struct eqos_softc *sc)
414 {
415 if_t ifp = sc->ifp;
416 uint32_t pfil, hash[2];
417 const uint8_t *eaddr;
418 uint32_t val;
419
420 EQOS_ASSERT_LOCKED(sc);
421
422 pfil = RD4(sc, GMAC_MAC_PACKET_FILTER);
423 pfil &= ~(GMAC_MAC_PACKET_FILTER_PR |
424 GMAC_MAC_PACKET_FILTER_PM |
425 GMAC_MAC_PACKET_FILTER_HMC |
426 GMAC_MAC_PACKET_FILTER_PCF_MASK);
427 hash[0] = hash[1] = 0xffffffff;
428
429 if ((if_getflags(ifp) & IFF_PROMISC)) {
430 pfil |= GMAC_MAC_PACKET_FILTER_PR |
431 GMAC_MAC_PACKET_FILTER_PCF_ALL;
432 } else if ((if_getflags(ifp) & IFF_ALLMULTI)) {
433 pfil |= GMAC_MAC_PACKET_FILTER_PM;
434 } else {
435 hash[0] = hash[1] = 0;
436 pfil |= GMAC_MAC_PACKET_FILTER_HMC;
437 if_foreach_llmaddr(ifp, eqos_hash_maddr, hash);
438 }
439
440 /* Write our unicast address */
441 eaddr = if_getlladdr(ifp);
442 val = eaddr[4] | (eaddr[5] << 8);
443 WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val);
444 val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | (eaddr[3] << 24);
445 WR4(sc, GMAC_MAC_ADDRESS0_LOW, val);
446
447 /* Multicast hash filters */
448 WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]);
449 WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]);
450
451 /* Packet filter config */
452 WR4(sc, GMAC_MAC_PACKET_FILTER, pfil);
453 }
454
455 static int
eqos_reset(struct eqos_softc * sc)456 eqos_reset(struct eqos_softc *sc)
457 {
458 uint32_t val;
459 int retry;
460
461 WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR);
462 for (retry = 5000; retry > 0; retry--) {
463 DELAY(1000);
464 val = RD4(sc, GMAC_DMA_MODE);
465 if (!(val & GMAC_DMA_MODE_SWR))
466 return (0);
467 }
468 return (ETIMEDOUT);
469 }
470
471 static void
eqos_init_rings(struct eqos_softc * sc)472 eqos_init_rings(struct eqos_softc *sc)
473 {
474
475 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI,
476 (uint32_t)(sc->tx.desc_ring_paddr >> 32));
477 WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR,
478 (uint32_t)sc->tx.desc_ring_paddr);
479 WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1);
480
481 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI,
482 (uint32_t)(sc->rx.desc_ring_paddr >> 32));
483 WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR,
484 (uint32_t)sc->rx.desc_ring_paddr);
485 WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1);
486
487 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR,
488 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT));
489 }
490
491 static void
eqos_init(void * if_softc)492 eqos_init(void *if_softc)
493 {
494 struct eqos_softc *sc = if_softc;
495 if_t ifp = sc->ifp;
496 struct mii_data *mii = device_get_softc(sc->miibus);
497 uint32_t val, mtl_tx_val, mtl_rx_val;
498
499 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
500 return;
501
502 EQOS_LOCK(sc);
503
504 eqos_init_rings(sc);
505
506 eqos_setup_rxfilter(sc);
507
508 WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
509
510 /* Enable transmit and receive DMA */
511 val = RD4(sc, GMAC_DMA_CHAN0_CONTROL);
512 val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK;
513 val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT;
514 if (sc->pblx8)
515 val |= GMAC_DMA_CHAN0_CONTROL_PBLX8;
516 WR4(sc, GMAC_DMA_CHAN0_CONTROL, val);
517 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL);
518 if (sc->txpbl > 0)
519 val |= (sc->txpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT);
520 val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP;
521 val |= GMAC_DMA_CHAN0_TX_CONTROL_START;
522 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val);
523 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL);
524 if (sc->rxpbl > 0)
525 val |= (sc->rxpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT);
526 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK;
527 val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT);
528 val |= GMAC_DMA_CHAN0_RX_CONTROL_START;
529 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val);
530
531 /* Disable counters */
532 WR4(sc, GMAC_MMC_CONTROL,
533 GMAC_MMC_CONTROL_CNTFREEZ |
534 GMAC_MMC_CONTROL_CNTPRST |
535 GMAC_MMC_CONTROL_CNTPRSTLVL);
536
537 /* Configure operation modes */
538 if (sc->thresh_dma_mode) {
539 mtl_tx_val = sc->ttc;
540 mtl_rx_val = sc->rtc;
541 } else {
542 mtl_tx_val = GMAC_MTL_TXQ0_OPERATION_MODE_TSF;
543 mtl_rx_val = GMAC_MTL_RXQ0_OPERATION_MODE_RSF;
544 }
545
546 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE,
547 mtl_tx_val |
548 GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN);
549 WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE,
550 mtl_rx_val |
551 GMAC_MTL_RXQ0_OPERATION_MODE_FEP |
552 GMAC_MTL_RXQ0_OPERATION_MODE_FUP);
553
554 /* Enable flow control */
555 val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL);
556 val |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT;
557 val |= GMAC_MAC_Q0_TX_FLOW_CTRL_TFE;
558 WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val);
559 val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL);
560 val |= GMAC_MAC_RX_FLOW_CTRL_RFE;
561 WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val);
562
563 /* set RX queue mode. must be in DCB mode. */
564 WR4(sc, GMAC_RXQ_CTRL0, (GMAC_RXQ_CTRL0_EN_MASK << 16) |
565 GMAC_RXQ_CTRL0_EN_DCB);
566
567 /* Enable transmitter and receiver */
568 val = RD4(sc, GMAC_MAC_CONFIGURATION);
569 val |= GMAC_MAC_CONFIGURATION_BE;
570 val |= GMAC_MAC_CONFIGURATION_JD;
571 val |= GMAC_MAC_CONFIGURATION_JE;
572 val |= GMAC_MAC_CONFIGURATION_DCRS;
573 val |= GMAC_MAC_CONFIGURATION_TE;
574 val |= GMAC_MAC_CONFIGURATION_RE;
575 WR4(sc, GMAC_MAC_CONFIGURATION, val);
576
577 eqos_enable_intr(sc);
578
579 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
580
581 mii_mediachg(mii);
582 callout_reset(&sc->callout, hz, eqos_tick, sc);
583
584 EQOS_UNLOCK(sc);
585 }
586
587 static void
eqos_start_locked(if_t ifp)588 eqos_start_locked(if_t ifp)
589 {
590 struct eqos_softc *sc = if_getsoftc(ifp);
591 struct mbuf *m;
592 int pending = 0;
593
594 if (!sc->link_up)
595 return;
596
597 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
598 IFF_DRV_RUNNING)
599 return;
600
601 while (true) {
602 if (TX_QUEUED(sc->tx.head, sc->tx.tail) >=
603 TX_DESC_COUNT - TX_MAX_SEGS) {
604 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
605 break;
606 }
607
608 if (!(m = if_dequeue(ifp)))
609 break;
610
611 if (eqos_setup_txbuf(sc, m)) {
612 if_sendq_prepend(ifp, m);
613 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
614 break;
615 }
616 bpf_mtap_if(ifp, m);
617 pending++;
618 }
619
620 if (pending) {
621 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
623
624 /* Start and run TX DMA */
625 WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR,
626 (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head));
627 sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS;
628 }
629 }
630
631 static void
eqos_start(if_t ifp)632 eqos_start(if_t ifp)
633 {
634 struct eqos_softc *sc = if_getsoftc(ifp);
635
636 EQOS_LOCK(sc);
637 eqos_start_locked(ifp);
638 EQOS_UNLOCK(sc);
639 }
640
641 static void
eqos_stop(struct eqos_softc * sc)642 eqos_stop(struct eqos_softc *sc)
643 {
644 if_t ifp = sc->ifp;
645 uint32_t val;
646 int retry;
647
648 EQOS_LOCK(sc);
649
650 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
651
652 callout_stop(&sc->callout);
653
654 /* Disable receiver */
655 val = RD4(sc, GMAC_MAC_CONFIGURATION);
656 val &= ~GMAC_MAC_CONFIGURATION_RE;
657 WR4(sc, GMAC_MAC_CONFIGURATION, val);
658
659 /* Stop receive DMA */
660 val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL);
661 val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START;
662 WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val);
663
664 /* Stop transmit DMA */
665 val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL);
666 val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START;
667 WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val);
668
669 /* Flush data in the TX FIFO */
670 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE);
671 val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ;
672 WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val);
673 for (retry = 10000; retry > 0; retry--) {
674 val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE);
675 if (!(val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ))
676 break;
677 DELAY(10);
678 }
679 if (!retry)
680 device_printf(sc->dev, "timeout flushing TX queue\n");
681
682 /* Disable transmitter */
683 val = RD4(sc, GMAC_MAC_CONFIGURATION);
684 val &= ~GMAC_MAC_CONFIGURATION_TE;
685 WR4(sc, GMAC_MAC_CONFIGURATION, val);
686
687 eqos_disable_intr(sc);
688
689 EQOS_UNLOCK(sc);
690 }
691
692 static void
eqos_rxintr(struct eqos_softc * sc)693 eqos_rxintr(struct eqos_softc *sc)
694 {
695 if_t ifp = sc->ifp;
696 struct mbuf *m;
697 uint32_t rdes3;
698 int error, length;
699
700 while (true) {
701 rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3);
702 if ((rdes3 & EQOS_RDES3_OWN))
703 break;
704
705 if (rdes3 & (EQOS_RDES3_OE | EQOS_RDES3_RE))
706 printf("Receive error rdes3=%08x\n", rdes3);
707
708 bus_dmamap_sync(sc->rx.buf_tag,
709 sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD);
710 bus_dmamap_unload(sc->rx.buf_tag,
711 sc->rx.buf_map[sc->rx.head].map);
712
713 length = rdes3 & EQOS_RDES3_LENGTH_MASK;
714 if (length) {
715 m = sc->rx.buf_map[sc->rx.head].mbuf;
716 m->m_pkthdr.rcvif = ifp;
717 m->m_pkthdr.len = length;
718 m->m_len = length;
719 m->m_nextpkt = NULL;
720
721 /* Remove trailing FCS */
722 m_adj(m, -ETHER_CRC_LEN);
723
724 EQOS_UNLOCK(sc);
725 if_input(ifp, m);
726 EQOS_LOCK(sc);
727 }
728
729 if ((m = eqos_alloc_mbufcl(sc))) {
730 if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m)))
731 printf("ERROR: Hole in RX ring!!\n");
732 } else
733 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
734
735 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
736
737 WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR,
738 (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head));
739
740 sc->rx.head = RX_NEXT(sc->rx.head);
741 }
742 }
743
744 static void
eqos_txintr(struct eqos_softc * sc)745 eqos_txintr(struct eqos_softc *sc)
746 {
747 if_t ifp = sc->ifp;
748 struct eqos_bufmap *bmap;
749 uint32_t tdes3;
750
751 EQOS_ASSERT_LOCKED(sc);
752
753 while (sc->tx.tail != sc->tx.head) {
754 tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3);
755 if ((tdes3 & EQOS_TDES3_OWN))
756 break;
757
758 bmap = &sc->tx.buf_map[sc->tx.tail];
759 if (bmap->mbuf) {
760 bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
761 BUS_DMASYNC_POSTWRITE);
762 bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
763 m_freem(bmap->mbuf);
764 bmap->mbuf = NULL;
765 }
766
767 eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0);
768
769 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
770
771 /* Last descriptor in a packet contains DMA status */
772 if ((tdes3 & EQOS_TDES3_LD)) {
773 if ((tdes3 & EQOS_TDES3_DE)) {
774 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
775 } else if ((tdes3 & EQOS_TDES3_ES)) {
776 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
777 } else {
778 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
779 }
780 }
781 sc->tx.tail = TX_NEXT(sc->tx.tail);
782 }
783 if (sc->tx.tail == sc->tx.head)
784 sc->tx_watchdog = 0;
785 eqos_start_locked(sc->ifp);
786 }
787
788 static void
eqos_intr_mtl(struct eqos_softc * sc,uint32_t mtl_status)789 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status)
790 {
791 uint32_t mtl_istat = 0;
792
793 if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS)) {
794 uint32_t mtl_clear = 0;
795
796 mtl_istat = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS);
797 if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS)) {
798 mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS;
799 }
800 if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS)) {
801 mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS;
802 }
803 if (mtl_clear) {
804 mtl_clear |= (mtl_istat &
805 (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE |
806 GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE));
807 WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, mtl_clear);
808 }
809 }
810 if (bootverbose)
811 device_printf(sc->dev,
812 "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, "
813 "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n",
814 mtl_status, mtl_istat);
815 }
816
817 static void
eqos_tick(void * softc)818 eqos_tick(void *softc)
819 {
820 struct eqos_softc *sc = softc;
821 struct mii_data *mii = device_get_softc(sc->miibus);
822 bool link_status;
823
824 EQOS_ASSERT_LOCKED(sc);
825
826 if (sc->tx_watchdog > 0)
827 if (!--sc->tx_watchdog) {
828 device_printf(sc->dev, "watchdog timeout\n");
829 eqos_txintr(sc);
830 }
831
832 link_status = sc->link_up;
833 mii_tick(mii);
834 if (sc->link_up && !link_status)
835 eqos_start_locked(sc->ifp);
836
837 callout_reset(&sc->callout, hz, eqos_tick, sc);
838 }
839
840 static void
eqos_intr(void * arg)841 eqos_intr(void *arg)
842 {
843 struct eqos_softc *sc = arg;
844 uint32_t mac_status, mtl_status, dma_status, rx_tx_status;
845
846 mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS);
847 mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE);
848
849 if (mac_status)
850 device_printf(sc->dev, "MAC interrupt\n");
851
852 if ((mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS)))
853 eqos_intr_mtl(sc, mtl_status);
854
855 dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS);
856 dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE);
857
858 if (dma_status)
859 WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status);
860
861 EQOS_LOCK(sc);
862
863 if (dma_status & GMAC_DMA_CHAN0_STATUS_RI)
864 eqos_rxintr(sc);
865
866 if (dma_status & GMAC_DMA_CHAN0_STATUS_TI)
867 eqos_txintr(sc);
868
869 EQOS_UNLOCK(sc);
870
871 if (!(mac_status | mtl_status | dma_status)) {
872 device_printf(sc->dev,
873 "spurious interrupt mac=%08x mtl=%08x dma=%08x\n",
874 RD4(sc, GMAC_MAC_INTERRUPT_STATUS),
875 RD4(sc, GMAC_MTL_INTERRUPT_STATUS),
876 RD4(sc, GMAC_DMA_CHAN0_STATUS));
877 }
878 if ((rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS)))
879 device_printf(sc->dev, "RX/TX status interrupt\n");
880 }
881
882 static int
eqos_ioctl(if_t ifp,u_long cmd,caddr_t data)883 eqos_ioctl(if_t ifp, u_long cmd, caddr_t data)
884 {
885 struct eqos_softc *sc = if_getsoftc(ifp);
886 struct ifreq *ifr = (struct ifreq *)data;
887 struct mii_data *mii;
888 int flags, mask;
889 int error = 0;
890
891 switch (cmd) {
892 case SIOCSIFFLAGS:
893 if (if_getflags(ifp) & IFF_UP) {
894 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
895 flags = if_getflags(ifp);
896 if ((flags & (IFF_PROMISC|IFF_ALLMULTI))) {
897 EQOS_LOCK(sc);
898 eqos_setup_rxfilter(sc);
899 EQOS_UNLOCK(sc);
900 }
901 } else {
902 eqos_init(sc);
903 }
904 } else {
905 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
906 eqos_stop(sc);
907 }
908 break;
909
910 case SIOCADDMULTI:
911 case SIOCDELMULTI:
912 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
913 EQOS_LOCK(sc);
914 eqos_setup_rxfilter(sc);
915 EQOS_UNLOCK(sc);
916 }
917 break;
918
919 case SIOCSIFMEDIA:
920 case SIOCGIFMEDIA:
921 mii = device_get_softc(sc->miibus);
922 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
923 break;
924
925 case SIOCSIFCAP:
926 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
927 if (mask & IFCAP_VLAN_MTU)
928 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
929 if (mask & IFCAP_RXCSUM)
930 if_togglecapenable(ifp, IFCAP_RXCSUM);
931 if (mask & IFCAP_TXCSUM)
932 if_togglecapenable(ifp, IFCAP_TXCSUM);
933 if ((if_getcapenable(ifp) & IFCAP_TXCSUM))
934 if_sethwassistbits(ifp,
935 CSUM_IP | CSUM_UDP | CSUM_TCP, 0);
936 else
937 if_sethwassistbits(ifp,
938 0, CSUM_IP | CSUM_UDP | CSUM_TCP);
939 break;
940
941 default:
942 error = ether_ioctl(ifp, cmd, data);
943 break;
944 }
945
946 return (error);
947 }
948
949 static void
eqos_get_eaddr(struct eqos_softc * sc,uint8_t * eaddr)950 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr)
951 {
952 uint32_t maclo, machi;
953
954 maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW));
955 machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF);
956
957 /* if no valid MAC address generate random */
958 if (maclo == 0xffffffff && machi == 0xffff) {
959 maclo = 0xf2 | (arc4random() & 0xffff0000);
960 machi = arc4random() & 0x0000ffff;
961 }
962 eaddr[0] = maclo & 0xff;
963 eaddr[1] = (maclo >> 8) & 0xff;
964 eaddr[2] = (maclo >> 16) & 0xff;
965 eaddr[3] = (maclo >> 24) & 0xff;
966 eaddr[4] = machi & 0xff;
967 eaddr[5] = (machi >> 8) & 0xff;
968 }
969
970 static void
eqos_axi_configure(struct eqos_softc * sc)971 eqos_axi_configure(struct eqos_softc *sc)
972 {
973 uint32_t val;
974
975 val = RD4(sc, GMAC_DMA_SYSBUS_MODE);
976
977 /* Max Write Outstanding Req Limit */
978 val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK;
979 val |= 0x03 << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT;
980
981 /* Max Read Outstanding Req Limit */
982 val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK;
983 val |= 0x07 << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT;
984
985 /* Allowed Burst Length's */
986 val |= GMAC_DMA_SYSBUS_MODE_BLEN16;
987 val |= GMAC_DMA_SYSBUS_MODE_BLEN8;
988 val |= GMAC_DMA_SYSBUS_MODE_BLEN4;
989
990 /* Fixed Burst Length */
991 val |= GMAC_DMA_SYSBUS_MODE_MB;
992
993 WR4(sc, GMAC_DMA_SYSBUS_MODE, val);
994 }
995
996 static void
eqos_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)997 eqos_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
998 {
999
1000 if (!error)
1001 *(bus_addr_t *)arg = segs[0].ds_addr;
1002 }
1003
1004 static int
eqos_setup_dma(struct eqos_softc * sc)1005 eqos_setup_dma(struct eqos_softc *sc)
1006 {
1007 struct mbuf *m;
1008 int error, i;
1009
1010 /* Set up TX descriptor ring, descriptors, and dma maps */
1011 error = bus_dma_tag_create(
1012 bus_get_dma_tag(sc->dev), /* Parent tag */
1013 DESC_ALIGN, DESC_BOUNDARY, /* alignment, boundary */
1014 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1015 BUS_SPACE_MAXADDR, /* highaddr */
1016 NULL, NULL, /* filterfunc, filterarg */
1017 TX_DESC_SIZE, 1, /* maxsize, nsegs */
1018 TX_DESC_SIZE, /* maxsegsize */
1019 0, /* flags */
1020 NULL, NULL, /* lockfunc, lockarg */
1021 &sc->tx.desc_tag);
1022 if (error != 0) {
1023 device_printf(sc->dev, "could not create TX ring DMA tag\n");
1024 return (error);
1025 }
1026
1027 error = bus_dmamem_alloc(sc->tx.desc_tag,
1028 (void**)&sc->tx.desc_ring,
1029 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
1030 &sc->tx.desc_map);
1031 if (error != 0) {
1032 device_printf(sc->dev,
1033 "could not allocate TX descriptor ring.\n");
1034 return (error);
1035 }
1036
1037
1038 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
1039 sc->tx.desc_ring, TX_DESC_SIZE,
1040 eqos_get1paddr, &sc->tx.desc_ring_paddr,
1041 0);
1042 if (error != 0) {
1043 device_printf(sc->dev,
1044 "could not load TX descriptor ring map.\n");
1045 return (error);
1046 }
1047
1048 error = bus_dma_tag_create(
1049 bus_get_dma_tag(sc->dev), /* Parent tag */
1050 1, 0, /* alignment, boundary */
1051 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1052 BUS_SPACE_MAXADDR, /* highaddr */
1053 NULL, NULL, /* filterfunc, filterarg */
1054 MCLBYTES*TX_MAX_SEGS, TX_MAX_SEGS, /* maxsize, nsegs */
1055 MCLBYTES, /* maxsegsize */
1056 0, /* flags */
1057 NULL, NULL, /* lockfunc, lockarg */
1058 &sc->tx.buf_tag);
1059 if (error != 0) {
1060 device_printf(sc->dev, "could not create TX buffer DMA tag.\n");
1061 return (error);
1062 }
1063
1064 for (i = 0; i < TX_DESC_COUNT; i++) {
1065 if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT,
1066 &sc->tx.buf_map[i].map))) {
1067 device_printf(sc->dev, "cannot create TX buffer map\n");
1068 return (error);
1069 }
1070 eqos_setup_txdesc(sc, i, EQOS_TDES3_OWN, 0, 0, 0);
1071 }
1072
1073 /* Set up RX descriptor ring, descriptors, dma maps, and mbufs */
1074 error = bus_dma_tag_create(
1075 bus_get_dma_tag(sc->dev), /* Parent tag */
1076 DESC_ALIGN, DESC_BOUNDARY, /* alignment, boundary */
1077 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1078 BUS_SPACE_MAXADDR, /* highaddr */
1079 NULL, NULL, /* filterfunc, filterarg */
1080 RX_DESC_SIZE, 1, /* maxsize, nsegs */
1081 RX_DESC_SIZE, /* maxsegsize */
1082 0, /* flags */
1083 NULL, NULL, /* lockfunc, lockarg */
1084 &sc->rx.desc_tag);
1085 if (error != 0) {
1086 device_printf(sc->dev, "could not create RX ring DMA tag.\n");
1087 return (error);
1088 }
1089
1090 error = bus_dmamem_alloc(sc->rx.desc_tag,
1091 (void **)&sc->rx.desc_ring,
1092 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
1093 &sc->rx.desc_map);
1094 if (error != 0) {
1095 device_printf(sc->dev,
1096 "could not allocate RX descriptor ring.\n");
1097 return (error);
1098 }
1099
1100 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
1101 sc->rx.desc_ring, RX_DESC_SIZE,
1102 eqos_get1paddr, &sc->rx.desc_ring_paddr,
1103 0);
1104 if (error != 0) {
1105 device_printf(sc->dev,
1106 "could not load RX descriptor ring map.\n");
1107 return (error);
1108 }
1109
1110 error = bus_dma_tag_create(
1111 bus_get_dma_tag(sc->dev), /* Parent tag */
1112 1, 0, /* alignment, boundary */
1113 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1114 BUS_SPACE_MAXADDR, /* highaddr */
1115 NULL, NULL, /* filterfunc, filterarg */
1116 MCLBYTES, 1, /* maxsize, nsegs */
1117 MCLBYTES, /* maxsegsize */
1118 0, /* flags */
1119 NULL, NULL, /* lockfunc, lockarg */
1120 &sc->rx.buf_tag);
1121 if (error != 0) {
1122 device_printf(sc->dev, "could not create RX buf DMA tag.\n");
1123 return (error);
1124 }
1125
1126 for (i = 0; i < RX_DESC_COUNT; i++) {
1127 if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT,
1128 &sc->rx.buf_map[i].map))) {
1129 device_printf(sc->dev, "cannot create RX buffer map\n");
1130 return (error);
1131 }
1132 if (!(m = eqos_alloc_mbufcl(sc))) {
1133 device_printf(sc->dev, "cannot allocate RX mbuf\n");
1134 return (ENOMEM);
1135 }
1136 if ((error = eqos_setup_rxbuf(sc, i, m))) {
1137 device_printf(sc->dev, "cannot create RX buffer\n");
1138 return (error);
1139 }
1140 }
1141
1142 if (bootverbose)
1143 device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n",
1144 sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr);
1145 return (0);
1146 }
1147
1148 static int
eqos_attach(device_t dev)1149 eqos_attach(device_t dev)
1150 {
1151 struct eqos_softc *sc = device_get_softc(dev);
1152 if_t ifp;
1153 uint32_t ver;
1154 uint8_t eaddr[ETHER_ADDR_LEN];
1155 u_int userver, snpsver;
1156 int error;
1157 int n;
1158
1159 /* default values */
1160 sc->thresh_dma_mode = false;
1161 sc->pblx8 = true;
1162 sc->txpbl = 0;
1163 sc->rxpbl = 0;
1164 sc->ttc = 0x10;
1165 sc->rtc = 0;
1166
1167 /* setup resources */
1168 if (bus_alloc_resources(dev, eqos_spec, sc->res)) {
1169 device_printf(dev, "Could not allocate resources\n");
1170 bus_release_resources(dev, eqos_spec, sc->res);
1171 return (ENXIO);
1172 }
1173
1174 if ((error = IF_EQOS_INIT(dev)))
1175 return (error);
1176
1177 sc->dev = dev;
1178 ver = RD4(sc, GMAC_MAC_VERSION);
1179 userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >>
1180 GMAC_MAC_VERSION_USERVER_SHIFT;
1181 snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK;
1182
1183 if (snpsver != 0x51 && snpsver != 0x52) {
1184 device_printf(dev, "EQOS version 0x%02x not supported\n",
1185 snpsver);
1186 return (ENXIO);
1187 }
1188
1189 for (n = 0; n < 4; n++)
1190 sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n));
1191
1192 if (bootverbose) {
1193 device_printf(dev, "DesignWare EQOS ver 0x%02x (0x%02x)\n",
1194 snpsver, userver);
1195 device_printf(dev, "hw features %08x %08x %08x %08x\n",
1196 sc->hw_feature[0], sc->hw_feature[1],
1197 sc->hw_feature[2], sc->hw_feature[3]);
1198 }
1199
1200 mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF);
1201 callout_init_mtx(&sc->callout, &sc->lock, 0);
1202
1203 eqos_get_eaddr(sc, eaddr);
1204 if (bootverbose)
1205 device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":");
1206
1207 /* Soft reset EMAC core */
1208 if ((error = eqos_reset(sc))) {
1209 device_printf(sc->dev, "reset timeout!\n");
1210 return (error);
1211 }
1212
1213 /* Configure AXI Bus mode parameters */
1214 eqos_axi_configure(sc);
1215
1216 /* Setup DMA descriptors */
1217 if (eqos_setup_dma(sc)) {
1218 device_printf(sc->dev, "failed to setup DMA descriptors\n");
1219 return (EINVAL);
1220 }
1221
1222 /* setup interrupt delivery */
1223 if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS,
1224 NULL, eqos_intr, sc, &sc->irq_handle))) {
1225 device_printf(dev, "unable to setup 1st interrupt\n");
1226 bus_release_resources(dev, eqos_spec, sc->res);
1227 return (ENXIO);
1228 }
1229
1230 /* Setup ethernet interface */
1231 ifp = sc->ifp = if_alloc(IFT_ETHER);
1232 if_setsoftc(ifp, sc);
1233 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
1234 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1235 if_setstartfn(ifp, eqos_start);
1236 if_setioctlfn(ifp, eqos_ioctl);
1237 if_setinitfn(ifp, eqos_init);
1238 if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1239 if_setsendqready(ifp);
1240 if_setcapabilities(ifp, IFCAP_VLAN_MTU /*| IFCAP_HWCSUM*/);
1241 if_setcapenable(ifp, if_getcapabilities(ifp));
1242
1243 /* Attach MII driver */
1244 if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change,
1245 eqos_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY,
1246 MII_OFFSET_ANY, 0))) {
1247 device_printf(sc->dev, "PHY attach failed\n");
1248 return (ENXIO);
1249 }
1250
1251 /* Attach ethernet interface */
1252 ether_ifattach(ifp, eaddr);
1253
1254 return (0);
1255 }
1256
1257 static int
eqos_detach(device_t dev)1258 eqos_detach(device_t dev)
1259 {
1260 struct eqos_softc *sc = device_get_softc(dev);
1261 int i;
1262
1263 if (device_is_attached(dev)) {
1264 EQOS_LOCK(sc);
1265 eqos_stop(sc);
1266 EQOS_UNLOCK(sc);
1267 if_setflagbits(sc->ifp, 0, IFF_UP);
1268 ether_ifdetach(sc->ifp);
1269 }
1270
1271 bus_generic_detach(dev);
1272
1273 if (sc->irq_handle)
1274 bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0],
1275 sc->irq_handle);
1276
1277 if (sc->ifp)
1278 if_free(sc->ifp);
1279
1280 bus_release_resources(dev, eqos_spec, sc->res);
1281
1282 if (sc->tx.desc_tag) {
1283 if (sc->tx.desc_map) {
1284 bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map);
1285 bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring,
1286 sc->tx.desc_map);
1287 }
1288 bus_dma_tag_destroy(sc->tx.desc_tag);
1289 }
1290 if (sc->tx.buf_tag) {
1291 for (i = 0; i < TX_DESC_COUNT; i++) {
1292 m_free(sc->tx.buf_map[i].mbuf);
1293 bus_dmamap_destroy(sc->tx.buf_tag,
1294 sc->tx.buf_map[i].map);
1295 }
1296 bus_dma_tag_destroy(sc->tx.buf_tag);
1297 }
1298
1299 if (sc->rx.desc_tag) {
1300 if (sc->rx.desc_map) {
1301 bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map);
1302 bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring,
1303 sc->rx.desc_map);
1304 }
1305 bus_dma_tag_destroy(sc->rx.desc_tag);
1306 }
1307 if (sc->rx.buf_tag) {
1308 for (i = 0; i < RX_DESC_COUNT; i++) {
1309 m_free(sc->rx.buf_map[i].mbuf);
1310 bus_dmamap_destroy(sc->rx.buf_tag,
1311 sc->rx.buf_map[i].map);
1312 }
1313 bus_dma_tag_destroy(sc->rx.buf_tag);
1314 }
1315
1316 mtx_destroy(&sc->lock);
1317
1318 return (0);
1319 }
1320
1321
1322 static device_method_t eqos_methods[] = {
1323 /* Device Interface */
1324 DEVMETHOD(device_attach, eqos_attach),
1325 DEVMETHOD(device_detach, eqos_detach),
1326
1327 /* MII Interface */
1328 DEVMETHOD(miibus_readreg, eqos_miibus_readreg),
1329 DEVMETHOD(miibus_writereg, eqos_miibus_writereg),
1330 DEVMETHOD(miibus_statchg, eqos_miibus_statchg),
1331
1332 DEVMETHOD_END
1333 };
1334
1335 driver_t eqos_driver = {
1336 "eqos",
1337 eqos_methods,
1338 sizeof(struct eqos_softc),
1339 };
1340
1341 DRIVER_MODULE(miibus, eqos, miibus_driver, 0, 0);
1342