1 /*-
2 * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca>
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
18 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
20 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
21 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 /*
27 * Allwinner Gigabit Ethernet MAC (EMAC) controller
28 */
29
30 #include "opt_device_polling.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/rman.h>
36 #include <sys/kernel.h>
37 #include <sys/endian.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <sys/sockio.h>
41 #include <sys/module.h>
42 #include <sys/gpio.h>
43
44 #include <net/bpf.h>
45 #include <net/if.h>
46 #include <net/ethernet.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 #include <net/if_types.h>
50 #include <net/if_var.h>
51
52 #include <machine/bus.h>
53
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
56
57 #include <arm/allwinner/if_awgreg.h>
58 #include <arm/allwinner/aw_sid.h>
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
61
62 #include <dev/clk/clk.h>
63 #include <dev/hwreset/hwreset.h>
64 #include <dev/regulator/regulator.h>
65 #include <dev/syscon/syscon.h>
66
67 #include "syscon_if.h"
68 #include "miibus_if.h"
69 #include "gpio_if.h"
70
71 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg))
72 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val))
73
74 #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx)
75 #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx);
76 #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
77 #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
78
79 #define DESC_ALIGN 4
80 #define TX_DESC_COUNT 1024
81 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT)
82 #define RX_DESC_COUNT 256
83 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT)
84
85 #define DESC_OFF(n) ((n) * sizeof(struct emac_desc))
86 #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1))
87 #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1))
88 #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1))
89
90 #define TX_MAX_SEGS 20
91
92 #define SOFT_RST_RETRY 1000
93 #define MII_BUSY_RETRY 1000
94 #define MDIO_FREQ 2500000
95
96 #define BURST_LEN_DEFAULT 8
97 #define RX_TX_PRI_DEFAULT 0
98 #define PAUSE_TIME_DEFAULT 0x400
99 #define TX_INTERVAL_DEFAULT 64
100 #define RX_BATCH_DEFAULT 64
101
102 /* syscon EMAC clock register */
103 #define EMAC_CLK_REG 0x30
104 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */
105 #define EMAC_CLK_EPHY_ADDR_SHIFT 20
106 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */
107 #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */
108 #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */
109 #define EMAC_CLK_RMII_EN (1 << 13)
110 #define EMAC_CLK_ETXDC (0x7 << 10)
111 #define EMAC_CLK_ETXDC_SHIFT 10
112 #define EMAC_CLK_ERXDC (0x1f << 5)
113 #define EMAC_CLK_ERXDC_SHIFT 5
114 #define EMAC_CLK_PIT (0x1 << 2)
115 #define EMAC_CLK_PIT_MII (0 << 2)
116 #define EMAC_CLK_PIT_RGMII (1 << 2)
117 #define EMAC_CLK_SRC (0x3 << 0)
118 #define EMAC_CLK_SRC_MII (0 << 0)
119 #define EMAC_CLK_SRC_EXT_RGMII (1 << 0)
120 #define EMAC_CLK_SRC_RGMII (2 << 0)
121
122 /* Burst length of RX and TX DMA transfers */
123 static int awg_burst_len = BURST_LEN_DEFAULT;
124 TUNABLE_INT("hw.awg.burst_len", &awg_burst_len);
125
126 /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */
127 static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT;
128 TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri);
129
130 /* Pause time field in the transmitted control frame */
131 static int awg_pause_time = PAUSE_TIME_DEFAULT;
132 TUNABLE_INT("hw.awg.pause_time", &awg_pause_time);
133
134 /* Request a TX interrupt every <n> descriptors */
135 static int awg_tx_interval = TX_INTERVAL_DEFAULT;
136 TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval);
137
138 /* Maximum number of mbufs to send to if_input */
139 static int awg_rx_batch = RX_BATCH_DEFAULT;
140 TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch);
141
142 enum awg_type {
143 EMAC_A83T = 1,
144 EMAC_H3,
145 EMAC_A64,
146 EMAC_D1,
147 };
148
149 static struct ofw_compat_data compat_data[] = {
150 { "allwinner,sun8i-a83t-emac", EMAC_A83T },
151 { "allwinner,sun8i-h3-emac", EMAC_H3 },
152 { "allwinner,sun50i-a64-emac", EMAC_A64 },
153 { "allwinner,sun20i-d1-emac", EMAC_D1 },
154 { NULL, 0 }
155 };
156
157 struct awg_bufmap {
158 bus_dmamap_t map;
159 struct mbuf *mbuf;
160 };
161
162 struct awg_txring {
163 bus_dma_tag_t desc_tag;
164 bus_dmamap_t desc_map;
165 struct emac_desc *desc_ring;
166 bus_addr_t desc_ring_paddr;
167 bus_dma_tag_t buf_tag;
168 struct awg_bufmap buf_map[TX_DESC_COUNT];
169 u_int cur, next, queued;
170 u_int segs;
171 };
172
173 struct awg_rxring {
174 bus_dma_tag_t desc_tag;
175 bus_dmamap_t desc_map;
176 struct emac_desc *desc_ring;
177 bus_addr_t desc_ring_paddr;
178 bus_dma_tag_t buf_tag;
179 struct awg_bufmap buf_map[RX_DESC_COUNT];
180 bus_dmamap_t buf_spare_map;
181 u_int cur;
182 };
183
184 enum {
185 _RES_EMAC,
186 _RES_IRQ,
187 _RES_SYSCON,
188 _RES_NITEMS
189 };
190
191 struct awg_softc {
192 struct resource *res[_RES_NITEMS];
193 struct mtx mtx;
194 if_t ifp;
195 device_t dev;
196 device_t miibus;
197 struct callout stat_ch;
198 void *ih;
199 u_int mdc_div_ratio_m;
200 int link;
201 int if_flags;
202 enum awg_type type;
203 struct syscon *syscon;
204
205 struct awg_txring tx;
206 struct awg_rxring rx;
207 };
208
209 static struct resource_spec awg_spec[] = {
210 { SYS_RES_MEMORY, 0, RF_ACTIVE },
211 { SYS_RES_IRQ, 0, RF_ACTIVE },
212 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL },
213 { -1, 0 }
214 };
215
216 static void awg_txeof(struct awg_softc *sc);
217 static void awg_start_locked(struct awg_softc *sc);
218
219 static void awg_tick(void *softc);
220
221 static int awg_parse_delay(device_t dev, uint32_t *tx_delay,
222 uint32_t *rx_delay);
223 static uint32_t syscon_read_emac_clk_reg(device_t dev);
224 static void syscon_write_emac_clk_reg(device_t dev, uint32_t val);
225 static phandle_t awg_get_phy_node(device_t dev);
226 static bool awg_has_internal_phy(device_t dev);
227
228 /*
229 * MII functions
230 */
231
232 static int
awg_miibus_readreg(device_t dev,int phy,int reg)233 awg_miibus_readreg(device_t dev, int phy, int reg)
234 {
235 struct awg_softc *sc;
236 int retry, val;
237
238 sc = device_get_softc(dev);
239 val = 0;
240
241 WR4(sc, EMAC_MII_CMD,
242 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
243 (phy << PHY_ADDR_SHIFT) |
244 (reg << PHY_REG_ADDR_SHIFT) |
245 MII_BUSY);
246 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
247 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) {
248 val = RD4(sc, EMAC_MII_DATA);
249 break;
250 }
251 DELAY(10);
252 }
253
254 if (retry == 0)
255 device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
256 phy, reg);
257
258 return (val);
259 }
260
261 static int
awg_miibus_writereg(device_t dev,int phy,int reg,int val)262 awg_miibus_writereg(device_t dev, int phy, int reg, int val)
263 {
264 struct awg_softc *sc;
265 int retry;
266
267 sc = device_get_softc(dev);
268
269 WR4(sc, EMAC_MII_DATA, val);
270 WR4(sc, EMAC_MII_CMD,
271 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
272 (phy << PHY_ADDR_SHIFT) |
273 (reg << PHY_REG_ADDR_SHIFT) |
274 MII_WR | MII_BUSY);
275 for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
276 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0)
277 break;
278 DELAY(10);
279 }
280
281 if (retry == 0)
282 device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
283 phy, reg);
284
285 return (0);
286 }
287
288 static void
awg_miibus_statchg(device_t dev)289 awg_miibus_statchg(device_t dev)
290 {
291 struct awg_softc *sc;
292 struct mii_data *mii;
293 uint32_t val;
294
295 sc = device_get_softc(dev);
296
297 AWG_ASSERT_LOCKED(sc);
298
299 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
300 return;
301 mii = device_get_softc(sc->miibus);
302
303 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
304 (IFM_ACTIVE | IFM_AVALID)) {
305 switch (IFM_SUBTYPE(mii->mii_media_active)) {
306 case IFM_1000_T:
307 case IFM_1000_SX:
308 case IFM_100_TX:
309 case IFM_10_T:
310 sc->link = 1;
311 break;
312 default:
313 sc->link = 0;
314 break;
315 }
316 } else
317 sc->link = 0;
318
319 if (sc->link == 0)
320 return;
321
322 val = RD4(sc, EMAC_BASIC_CTL_0);
323 val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX);
324
325 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
326 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
327 val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT;
328 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
329 val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT;
330 else
331 val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT;
332
333 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
334 val |= BASIC_CTL_DUPLEX;
335
336 WR4(sc, EMAC_BASIC_CTL_0, val);
337
338 val = RD4(sc, EMAC_RX_CTL_0);
339 val &= ~RX_FLOW_CTL_EN;
340 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
341 val |= RX_FLOW_CTL_EN;
342 WR4(sc, EMAC_RX_CTL_0, val);
343
344 val = RD4(sc, EMAC_TX_FLOW_CTL);
345 val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN);
346 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
347 val |= TX_FLOW_CTL_EN;
348 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
349 val |= awg_pause_time << PAUSE_TIME_SHIFT;
350 WR4(sc, EMAC_TX_FLOW_CTL, val);
351 }
352
353 /*
354 * Media functions
355 */
356
357 static void
awg_media_status(if_t ifp,struct ifmediareq * ifmr)358 awg_media_status(if_t ifp, struct ifmediareq *ifmr)
359 {
360 struct awg_softc *sc;
361 struct mii_data *mii;
362
363 sc = if_getsoftc(ifp);
364 mii = device_get_softc(sc->miibus);
365
366 AWG_LOCK(sc);
367 mii_pollstat(mii);
368 ifmr->ifm_active = mii->mii_media_active;
369 ifmr->ifm_status = mii->mii_media_status;
370 AWG_UNLOCK(sc);
371 }
372
373 static int
awg_media_change(if_t ifp)374 awg_media_change(if_t ifp)
375 {
376 struct awg_softc *sc;
377 struct mii_data *mii;
378 int error;
379
380 sc = if_getsoftc(ifp);
381 mii = device_get_softc(sc->miibus);
382
383 AWG_LOCK(sc);
384 error = mii_mediachg(mii);
385 AWG_UNLOCK(sc);
386
387 return (error);
388 }
389
390 /*
391 * Core functions
392 */
393
394 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
395 static uint32_t
bitrev32(uint32_t x)396 bitrev32(uint32_t x)
397 {
398 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
399 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
400 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
401 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
402
403 return (x >> 16) | (x << 16);
404 }
405
406 static u_int
awg_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)407 awg_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
408 {
409 uint32_t crc, hashreg, hashbit, *hash = arg;
410
411 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7f;
412 crc = bitrev32(~crc) >> 26;
413 hashreg = (crc >> 5);
414 hashbit = (crc & 0x1f);
415 hash[hashreg] |= (1 << hashbit);
416
417 return (1);
418 }
419
420 static void
awg_setup_rxfilter(struct awg_softc * sc)421 awg_setup_rxfilter(struct awg_softc *sc)
422 {
423 uint32_t val, hash[2], machi, maclo;
424 uint8_t *eaddr;
425 if_t ifp;
426
427 AWG_ASSERT_LOCKED(sc);
428
429 ifp = sc->ifp;
430 val = 0;
431 hash[0] = hash[1] = 0;
432
433 if (if_getflags(ifp) & IFF_PROMISC)
434 val |= DIS_ADDR_FILTER;
435 else if (if_getflags(ifp) & IFF_ALLMULTI) {
436 val |= RX_ALL_MULTICAST;
437 hash[0] = hash[1] = ~0;
438 } else if (if_foreach_llmaddr(ifp, awg_hash_maddr, hash) > 0)
439 val |= HASH_MULTICAST;
440
441 /* Write our unicast address */
442 eaddr = if_getlladdr(ifp);
443 machi = (eaddr[5] << 8) | eaddr[4];
444 maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) |
445 (eaddr[0] << 0);
446 WR4(sc, EMAC_ADDR_HIGH(0), machi);
447 WR4(sc, EMAC_ADDR_LOW(0), maclo);
448
449 /* Multicast hash filters */
450 WR4(sc, EMAC_RX_HASH_0, hash[1]);
451 WR4(sc, EMAC_RX_HASH_1, hash[0]);
452
453 /* RX frame filter config */
454 WR4(sc, EMAC_RX_FRM_FLT, val);
455 }
456
457 static void
awg_setup_core(struct awg_softc * sc)458 awg_setup_core(struct awg_softc *sc)
459 {
460 uint32_t val;
461
462 AWG_ASSERT_LOCKED(sc);
463 /* Configure DMA burst length and priorities */
464 val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT;
465 if (awg_rx_tx_pri)
466 val |= BASIC_CTL_RX_TX_PRI;
467 WR4(sc, EMAC_BASIC_CTL_1, val);
468
469 }
470
471 static void
awg_enable_mac(struct awg_softc * sc,bool enable)472 awg_enable_mac(struct awg_softc *sc, bool enable)
473 {
474 uint32_t tx, rx;
475
476 AWG_ASSERT_LOCKED(sc);
477
478 tx = RD4(sc, EMAC_TX_CTL_0);
479 rx = RD4(sc, EMAC_RX_CTL_0);
480 if (enable) {
481 tx |= TX_EN;
482 rx |= RX_EN | CHECK_CRC;
483 } else {
484 tx &= ~TX_EN;
485 rx &= ~(RX_EN | CHECK_CRC);
486 }
487
488 WR4(sc, EMAC_TX_CTL_0, tx);
489 WR4(sc, EMAC_RX_CTL_0, rx);
490 }
491
492 static void
awg_get_eaddr(device_t dev,uint8_t * eaddr)493 awg_get_eaddr(device_t dev, uint8_t *eaddr)
494 {
495 struct awg_softc *sc;
496 uint32_t maclo, machi, rnd;
497 u_char rootkey[16];
498 uint32_t rootkey_size;
499
500 sc = device_get_softc(dev);
501
502 machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff;
503 maclo = RD4(sc, EMAC_ADDR_LOW(0));
504
505 rootkey_size = sizeof(rootkey);
506 if (maclo == 0xffffffff && machi == 0xffff) {
507 /* MAC address in hardware is invalid, create one */
508 if (aw_sid_get_fuse(AW_SID_FUSE_ROOTKEY, rootkey,
509 &rootkey_size) == 0 &&
510 (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] |
511 rootkey[15]) != 0) {
512 /* MAC address is derived from the root key in SID */
513 maclo = (rootkey[13] << 24) | (rootkey[12] << 16) |
514 (rootkey[3] << 8) | 0x02;
515 machi = (rootkey[15] << 8) | rootkey[14];
516 } else {
517 /* Create one */
518 rnd = arc4random();
519 maclo = 0x00f2 | (rnd & 0xffff0000);
520 machi = rnd & 0xffff;
521 }
522 }
523
524 eaddr[0] = maclo & 0xff;
525 eaddr[1] = (maclo >> 8) & 0xff;
526 eaddr[2] = (maclo >> 16) & 0xff;
527 eaddr[3] = (maclo >> 24) & 0xff;
528 eaddr[4] = machi & 0xff;
529 eaddr[5] = (machi >> 8) & 0xff;
530 }
531
532 /*
533 * DMA functions
534 */
535
536 static void
awg_enable_dma_intr(struct awg_softc * sc)537 awg_enable_dma_intr(struct awg_softc *sc)
538 {
539 /* Enable interrupts */
540 WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN);
541 }
542
543 static void
awg_disable_dma_intr(struct awg_softc * sc)544 awg_disable_dma_intr(struct awg_softc *sc)
545 {
546 /* Disable interrupts */
547 WR4(sc, EMAC_INT_EN, 0);
548 }
549
550 static void
awg_init_dma(struct awg_softc * sc)551 awg_init_dma(struct awg_softc *sc)
552 {
553 uint32_t val;
554
555 AWG_ASSERT_LOCKED(sc);
556
557 /* Enable interrupts */
558 #ifdef DEVICE_POLLING
559 if ((if_getcapenable(sc->ifp) & IFCAP_POLLING) == 0)
560 awg_enable_dma_intr(sc);
561 else
562 awg_disable_dma_intr(sc);
563 #else
564 awg_enable_dma_intr(sc);
565 #endif
566
567 /* Enable transmit DMA */
568 val = RD4(sc, EMAC_TX_CTL_1);
569 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME);
570
571 /* Enable receive DMA */
572 val = RD4(sc, EMAC_RX_CTL_1);
573 WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD);
574 }
575
576 static void
awg_stop_dma(struct awg_softc * sc)577 awg_stop_dma(struct awg_softc *sc)
578 {
579 uint32_t val;
580
581 AWG_ASSERT_LOCKED(sc);
582
583 /* Stop transmit DMA and flush data in the TX FIFO */
584 val = RD4(sc, EMAC_TX_CTL_1);
585 val &= ~TX_DMA_EN;
586 val |= FLUSH_TX_FIFO;
587 WR4(sc, EMAC_TX_CTL_1, val);
588
589 /* Disable interrupts */
590 awg_disable_dma_intr(sc);
591
592 /* Disable transmit DMA */
593 val = RD4(sc, EMAC_TX_CTL_1);
594 WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN);
595
596 /* Disable receive DMA */
597 val = RD4(sc, EMAC_RX_CTL_1);
598 WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN);
599 }
600
601 static int
awg_encap(struct awg_softc * sc,struct mbuf ** mp)602 awg_encap(struct awg_softc *sc, struct mbuf **mp)
603 {
604 bus_dmamap_t map;
605 bus_dma_segment_t segs[TX_MAX_SEGS];
606 int error, nsegs, cur, first, last, i;
607 u_int csum_flags;
608 uint32_t flags, status;
609 struct mbuf *m;
610
611 cur = first = sc->tx.cur;
612 map = sc->tx.buf_map[first].map;
613
614 m = *mp;
615 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs,
616 &nsegs, BUS_DMA_NOWAIT);
617 if (error == EFBIG) {
618 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
619 if (m == NULL) {
620 device_printf(sc->dev, "awg_encap: m_collapse failed\n");
621 m_freem(*mp);
622 *mp = NULL;
623 return (ENOMEM);
624 }
625 *mp = m;
626 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m,
627 segs, &nsegs, BUS_DMA_NOWAIT);
628 if (error != 0) {
629 m_freem(*mp);
630 *mp = NULL;
631 }
632 }
633 if (error != 0) {
634 device_printf(sc->dev, "awg_encap: bus_dmamap_load_mbuf_sg failed\n");
635 return (error);
636 }
637 if (nsegs == 0) {
638 m_freem(*mp);
639 *mp = NULL;
640 return (EIO);
641 }
642
643 if (sc->tx.queued + nsegs > TX_DESC_COUNT) {
644 bus_dmamap_unload(sc->tx.buf_tag, map);
645 return (ENOBUFS);
646 }
647
648 bus_dmamap_sync(sc->tx.buf_tag, map, BUS_DMASYNC_PREWRITE);
649
650 flags = TX_FIR_DESC;
651 status = 0;
652 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
653 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0)
654 csum_flags = TX_CHECKSUM_CTL_FULL;
655 else
656 csum_flags = TX_CHECKSUM_CTL_IP;
657 flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT);
658 }
659
660 for (i = 0; i < nsegs; i++) {
661 sc->tx.segs++;
662 if (i == nsegs - 1) {
663 flags |= TX_LAST_DESC;
664 /*
665 * Can only request TX completion
666 * interrupt on last descriptor.
667 */
668 if (sc->tx.segs >= awg_tx_interval) {
669 sc->tx.segs = 0;
670 flags |= TX_INT_CTL;
671 }
672 }
673
674 sc->tx.desc_ring[cur].addr = htole32((uint32_t)segs[i].ds_addr);
675 sc->tx.desc_ring[cur].size = htole32(flags | segs[i].ds_len);
676 sc->tx.desc_ring[cur].status = htole32(status);
677
678 flags &= ~TX_FIR_DESC;
679 /*
680 * Setting of the valid bit in the first descriptor is
681 * deferred until the whole chain is fully set up.
682 */
683 status = TX_DESC_CTL;
684
685 ++sc->tx.queued;
686 cur = TX_NEXT(cur);
687 }
688
689 sc->tx.cur = cur;
690
691 /* Store mapping and mbuf in the last segment */
692 last = TX_SKIP(cur, TX_DESC_COUNT - 1);
693 sc->tx.buf_map[first].map = sc->tx.buf_map[last].map;
694 sc->tx.buf_map[last].map = map;
695 sc->tx.buf_map[last].mbuf = m;
696
697 /*
698 * The whole mbuf chain has been DMA mapped,
699 * fix the first descriptor.
700 */
701 sc->tx.desc_ring[first].status = htole32(TX_DESC_CTL);
702
703 return (0);
704 }
705
706 static void
awg_clean_txbuf(struct awg_softc * sc,int index)707 awg_clean_txbuf(struct awg_softc *sc, int index)
708 {
709 struct awg_bufmap *bmap;
710
711 --sc->tx.queued;
712
713 bmap = &sc->tx.buf_map[index];
714 if (bmap->mbuf != NULL) {
715 bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
716 BUS_DMASYNC_POSTWRITE);
717 bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
718 m_freem(bmap->mbuf);
719 bmap->mbuf = NULL;
720 }
721 }
722
723 static void
awg_setup_rxdesc(struct awg_softc * sc,int index,bus_addr_t paddr)724 awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr)
725 {
726 uint32_t status, size;
727
728 status = RX_DESC_CTL;
729 size = MCLBYTES - 1;
730
731 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr);
732 sc->rx.desc_ring[index].size = htole32(size);
733 sc->rx.desc_ring[index].status = htole32(status);
734 }
735
736 static void
awg_reuse_rxdesc(struct awg_softc * sc,int index)737 awg_reuse_rxdesc(struct awg_softc *sc, int index)
738 {
739
740 sc->rx.desc_ring[index].status = htole32(RX_DESC_CTL);
741 }
742
743 static int
awg_newbuf_rx(struct awg_softc * sc,int index)744 awg_newbuf_rx(struct awg_softc *sc, int index)
745 {
746 struct mbuf *m;
747 bus_dma_segment_t seg;
748 bus_dmamap_t map;
749 int nsegs;
750
751 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
752 if (m == NULL)
753 return (ENOBUFS);
754
755 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
756 m_adj(m, ETHER_ALIGN);
757
758 if (bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_spare_map,
759 m, &seg, &nsegs, BUS_DMA_NOWAIT) != 0) {
760 m_freem(m);
761 return (ENOBUFS);
762 }
763
764 if (sc->rx.buf_map[index].mbuf != NULL) {
765 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
766 BUS_DMASYNC_POSTREAD);
767 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map);
768 }
769 map = sc->rx.buf_map[index].map;
770 sc->rx.buf_map[index].map = sc->rx.buf_spare_map;
771 sc->rx.buf_spare_map = map;
772 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
773 BUS_DMASYNC_PREREAD);
774
775 sc->rx.buf_map[index].mbuf = m;
776 awg_setup_rxdesc(sc, index, seg.ds_addr);
777
778 return (0);
779 }
780
781 static void
awg_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)782 awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
783 {
784 if (error != 0)
785 return;
786 *(bus_addr_t *)arg = segs[0].ds_addr;
787 }
788
789 static int
awg_setup_dma(device_t dev)790 awg_setup_dma(device_t dev)
791 {
792 struct awg_softc *sc;
793 int error, i;
794
795 sc = device_get_softc(dev);
796
797 /* Setup TX ring */
798 error = bus_dma_tag_create(
799 bus_get_dma_tag(dev), /* Parent tag */
800 DESC_ALIGN, 0, /* alignment, boundary */
801 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
802 BUS_SPACE_MAXADDR, /* highaddr */
803 NULL, NULL, /* filter, filterarg */
804 TX_DESC_SIZE, 1, /* maxsize, nsegs */
805 TX_DESC_SIZE, /* maxsegsize */
806 0, /* flags */
807 NULL, NULL, /* lockfunc, lockarg */
808 &sc->tx.desc_tag);
809 if (error != 0) {
810 device_printf(dev, "cannot create TX descriptor ring tag\n");
811 return (error);
812 }
813
814 error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring,
815 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map);
816 if (error != 0) {
817 device_printf(dev, "cannot allocate TX descriptor ring\n");
818 return (error);
819 }
820
821 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
822 sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb,
823 &sc->tx.desc_ring_paddr, 0);
824 if (error != 0) {
825 device_printf(dev, "cannot load TX descriptor ring\n");
826 return (error);
827 }
828
829 for (i = 0; i < TX_DESC_COUNT; i++)
830 sc->tx.desc_ring[i].next =
831 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i)));
832
833 error = bus_dma_tag_create(
834 bus_get_dma_tag(dev), /* Parent tag */
835 1, 0, /* alignment, boundary */
836 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
837 BUS_SPACE_MAXADDR, /* highaddr */
838 NULL, NULL, /* filter, filterarg */
839 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */
840 MCLBYTES, /* maxsegsize */
841 0, /* flags */
842 NULL, NULL, /* lockfunc, lockarg */
843 &sc->tx.buf_tag);
844 if (error != 0) {
845 device_printf(dev, "cannot create TX buffer tag\n");
846 return (error);
847 }
848
849 sc->tx.queued = 0;
850 for (i = 0; i < TX_DESC_COUNT; i++) {
851 error = bus_dmamap_create(sc->tx.buf_tag, 0,
852 &sc->tx.buf_map[i].map);
853 if (error != 0) {
854 device_printf(dev, "cannot create TX buffer map\n");
855 return (error);
856 }
857 }
858
859 /* Setup RX ring */
860 error = bus_dma_tag_create(
861 bus_get_dma_tag(dev), /* Parent tag */
862 DESC_ALIGN, 0, /* alignment, boundary */
863 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
864 BUS_SPACE_MAXADDR, /* highaddr */
865 NULL, NULL, /* filter, filterarg */
866 RX_DESC_SIZE, 1, /* maxsize, nsegs */
867 RX_DESC_SIZE, /* maxsegsize */
868 0, /* flags */
869 NULL, NULL, /* lockfunc, lockarg */
870 &sc->rx.desc_tag);
871 if (error != 0) {
872 device_printf(dev, "cannot create RX descriptor ring tag\n");
873 return (error);
874 }
875
876 error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring,
877 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map);
878 if (error != 0) {
879 device_printf(dev, "cannot allocate RX descriptor ring\n");
880 return (error);
881 }
882
883 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
884 sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb,
885 &sc->rx.desc_ring_paddr, 0);
886 if (error != 0) {
887 device_printf(dev, "cannot load RX descriptor ring\n");
888 return (error);
889 }
890
891 error = bus_dma_tag_create(
892 bus_get_dma_tag(dev), /* Parent tag */
893 1, 0, /* alignment, boundary */
894 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
895 BUS_SPACE_MAXADDR, /* highaddr */
896 NULL, NULL, /* filter, filterarg */
897 MCLBYTES, 1, /* maxsize, nsegs */
898 MCLBYTES, /* maxsegsize */
899 0, /* flags */
900 NULL, NULL, /* lockfunc, lockarg */
901 &sc->rx.buf_tag);
902 if (error != 0) {
903 device_printf(dev, "cannot create RX buffer tag\n");
904 return (error);
905 }
906
907 error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_spare_map);
908 if (error != 0) {
909 device_printf(dev,
910 "cannot create RX buffer spare map\n");
911 return (error);
912 }
913
914 for (i = 0; i < RX_DESC_COUNT; i++) {
915 sc->rx.desc_ring[i].next =
916 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(i)));
917
918 error = bus_dmamap_create(sc->rx.buf_tag, 0,
919 &sc->rx.buf_map[i].map);
920 if (error != 0) {
921 device_printf(dev, "cannot create RX buffer map\n");
922 return (error);
923 }
924 sc->rx.buf_map[i].mbuf = NULL;
925 error = awg_newbuf_rx(sc, i);
926 if (error != 0) {
927 device_printf(dev, "cannot create RX buffer\n");
928 return (error);
929 }
930 }
931 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
932 BUS_DMASYNC_PREWRITE);
933
934 /* Write transmit and receive descriptor base address registers */
935 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr);
936 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr);
937
938 return (0);
939 }
940
941 static void
awg_dma_start_tx(struct awg_softc * sc)942 awg_dma_start_tx(struct awg_softc *sc)
943 {
944 uint32_t val;
945
946 AWG_ASSERT_LOCKED(sc);
947
948 /* Start and run TX DMA */
949 val = RD4(sc, EMAC_TX_CTL_1);
950 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START);
951 }
952
953 /*
954 * if_ functions
955 */
956
957 static void
awg_start_locked(struct awg_softc * sc)958 awg_start_locked(struct awg_softc *sc)
959 {
960 struct mbuf *m;
961 if_t ifp;
962 int cnt, err;
963
964 AWG_ASSERT_LOCKED(sc);
965
966 if (!sc->link)
967 return;
968
969 ifp = sc->ifp;
970
971 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
972 IFF_DRV_RUNNING)
973 return;
974
975 for (cnt = 0; ; cnt++) {
976 m = if_dequeue(ifp);
977 if (m == NULL)
978 break;
979
980 err = awg_encap(sc, &m);
981 if (err != 0) {
982 if (err == ENOBUFS)
983 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
984 if (m != NULL)
985 if_sendq_prepend(ifp, m);
986 break;
987 }
988 bpf_mtap_if(ifp, m);
989 }
990
991 if (cnt != 0) {
992 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
993 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
994
995 awg_dma_start_tx(sc);
996 }
997 }
998
999 static void
awg_start(if_t ifp)1000 awg_start(if_t ifp)
1001 {
1002 struct awg_softc *sc;
1003
1004 sc = if_getsoftc(ifp);
1005
1006 AWG_LOCK(sc);
1007 awg_start_locked(sc);
1008 AWG_UNLOCK(sc);
1009 }
1010
1011 static void
awg_init_locked(struct awg_softc * sc)1012 awg_init_locked(struct awg_softc *sc)
1013 {
1014 struct mii_data *mii;
1015 if_t ifp;
1016
1017 mii = device_get_softc(sc->miibus);
1018 ifp = sc->ifp;
1019
1020 AWG_ASSERT_LOCKED(sc);
1021
1022 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1023 return;
1024
1025 awg_setup_rxfilter(sc);
1026 awg_setup_core(sc);
1027 awg_enable_mac(sc, true);
1028 awg_init_dma(sc);
1029
1030 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
1031
1032 mii_mediachg(mii);
1033 callout_reset(&sc->stat_ch, hz, awg_tick, sc);
1034 }
1035
1036 static void
awg_init(void * softc)1037 awg_init(void *softc)
1038 {
1039 struct awg_softc *sc;
1040
1041 sc = softc;
1042
1043 AWG_LOCK(sc);
1044 awg_init_locked(sc);
1045 AWG_UNLOCK(sc);
1046 }
1047
1048 static void
awg_stop(struct awg_softc * sc)1049 awg_stop(struct awg_softc *sc)
1050 {
1051 if_t ifp;
1052 uint32_t val;
1053 int i;
1054
1055 AWG_ASSERT_LOCKED(sc);
1056
1057 ifp = sc->ifp;
1058
1059 callout_stop(&sc->stat_ch);
1060
1061 awg_stop_dma(sc);
1062 awg_enable_mac(sc, false);
1063
1064 sc->link = 0;
1065
1066 /* Finish handling transmitted buffers */
1067 awg_txeof(sc);
1068
1069 /* Release any untransmitted buffers. */
1070 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
1071 val = le32toh(sc->tx.desc_ring[i].status);
1072 if ((val & TX_DESC_CTL) != 0)
1073 break;
1074 awg_clean_txbuf(sc, i);
1075 }
1076 sc->tx.next = i;
1077 for (; sc->tx.queued > 0; i = TX_NEXT(i)) {
1078 sc->tx.desc_ring[i].status = 0;
1079 awg_clean_txbuf(sc, i);
1080 }
1081 sc->tx.cur = sc->tx.next;
1082 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
1083 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1084
1085 /* Setup RX buffers for reuse */
1086 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1087 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1088
1089 for (i = sc->rx.cur; ; i = RX_NEXT(i)) {
1090 val = le32toh(sc->rx.desc_ring[i].status);
1091 if ((val & RX_DESC_CTL) != 0)
1092 break;
1093 awg_reuse_rxdesc(sc, i);
1094 }
1095 sc->rx.cur = i;
1096 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1097 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1098
1099 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1100 }
1101
1102 static int
awg_ioctl(if_t ifp,u_long cmd,caddr_t data)1103 awg_ioctl(if_t ifp, u_long cmd, caddr_t data)
1104 {
1105 struct awg_softc *sc;
1106 struct mii_data *mii;
1107 struct ifreq *ifr;
1108 int flags, mask, error;
1109
1110 sc = if_getsoftc(ifp);
1111 mii = device_get_softc(sc->miibus);
1112 ifr = (struct ifreq *)data;
1113 error = 0;
1114
1115 switch (cmd) {
1116 case SIOCSIFFLAGS:
1117 AWG_LOCK(sc);
1118 if (if_getflags(ifp) & IFF_UP) {
1119 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1120 flags = if_getflags(ifp) ^ sc->if_flags;
1121 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
1122 awg_setup_rxfilter(sc);
1123 } else
1124 awg_init_locked(sc);
1125 } else {
1126 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1127 awg_stop(sc);
1128 }
1129 sc->if_flags = if_getflags(ifp);
1130 AWG_UNLOCK(sc);
1131 break;
1132 case SIOCADDMULTI:
1133 case SIOCDELMULTI:
1134 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1135 AWG_LOCK(sc);
1136 awg_setup_rxfilter(sc);
1137 AWG_UNLOCK(sc);
1138 }
1139 break;
1140 case SIOCSIFMEDIA:
1141 case SIOCGIFMEDIA:
1142 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1143 break;
1144 case SIOCSIFCAP:
1145 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1146 #ifdef DEVICE_POLLING
1147 if (mask & IFCAP_POLLING) {
1148 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1149 error = ether_poll_register(awg_poll, ifp);
1150 if (error != 0)
1151 break;
1152 AWG_LOCK(sc);
1153 awg_disable_dma_intr(sc);
1154 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1155 AWG_UNLOCK(sc);
1156 } else {
1157 error = ether_poll_deregister(ifp);
1158 AWG_LOCK(sc);
1159 awg_enable_dma_intr(sc);
1160 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1161 AWG_UNLOCK(sc);
1162 }
1163 }
1164 #endif
1165 if (mask & IFCAP_VLAN_MTU)
1166 if_togglecapenable(ifp, IFCAP_VLAN_MTU);
1167 if (mask & IFCAP_RXCSUM)
1168 if_togglecapenable(ifp, IFCAP_RXCSUM);
1169 if (mask & IFCAP_TXCSUM)
1170 if_togglecapenable(ifp, IFCAP_TXCSUM);
1171 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1172 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0);
1173 else
1174 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP);
1175 break;
1176 default:
1177 error = ether_ioctl(ifp, cmd, data);
1178 break;
1179 }
1180
1181 return (error);
1182 }
1183
1184 /*
1185 * Interrupts functions
1186 */
1187
1188 static int
awg_rxintr(struct awg_softc * sc)1189 awg_rxintr(struct awg_softc *sc)
1190 {
1191 if_t ifp;
1192 struct mbuf *m, *mh, *mt;
1193 int error, index, len, cnt, npkt;
1194 uint32_t status;
1195
1196 ifp = sc->ifp;
1197 mh = mt = NULL;
1198 cnt = 0;
1199 npkt = 0;
1200
1201 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1202 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1203
1204 for (index = sc->rx.cur; ; index = RX_NEXT(index)) {
1205 status = le32toh(sc->rx.desc_ring[index].status);
1206 if ((status & RX_DESC_CTL) != 0)
1207 break;
1208
1209 len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT;
1210
1211 if (len == 0) {
1212 if ((status & (RX_NO_ENOUGH_BUF_ERR | RX_OVERFLOW_ERR)) != 0)
1213 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1214 awg_reuse_rxdesc(sc, index);
1215 continue;
1216 }
1217
1218 m = sc->rx.buf_map[index].mbuf;
1219
1220 error = awg_newbuf_rx(sc, index);
1221 if (error != 0) {
1222 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1223 awg_reuse_rxdesc(sc, index);
1224 continue;
1225 }
1226
1227 m->m_pkthdr.rcvif = ifp;
1228 m->m_pkthdr.len = len;
1229 m->m_len = len;
1230 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1231
1232 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
1233 (status & RX_FRM_TYPE) != 0) {
1234 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1235 if ((status & RX_HEADER_ERR) == 0)
1236 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1237 if ((status & RX_PAYLOAD_ERR) == 0) {
1238 m->m_pkthdr.csum_flags |=
1239 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1240 m->m_pkthdr.csum_data = 0xffff;
1241 }
1242 }
1243
1244 m->m_nextpkt = NULL;
1245 if (mh == NULL)
1246 mh = m;
1247 else
1248 mt->m_nextpkt = m;
1249 mt = m;
1250 ++cnt;
1251 ++npkt;
1252
1253 if (cnt == awg_rx_batch) {
1254 AWG_UNLOCK(sc);
1255 if_input(ifp, mh);
1256 AWG_LOCK(sc);
1257 mh = mt = NULL;
1258 cnt = 0;
1259 }
1260 }
1261
1262 if (index != sc->rx.cur) {
1263 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1264 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1265 }
1266
1267 if (mh != NULL) {
1268 AWG_UNLOCK(sc);
1269 if_input(ifp, mh);
1270 AWG_LOCK(sc);
1271 }
1272
1273 sc->rx.cur = index;
1274
1275 return (npkt);
1276 }
1277
1278 static void
awg_txeof(struct awg_softc * sc)1279 awg_txeof(struct awg_softc *sc)
1280 {
1281 struct emac_desc *desc;
1282 uint32_t status, size;
1283 if_t ifp;
1284 int i, prog;
1285
1286 AWG_ASSERT_LOCKED(sc);
1287
1288 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
1289 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1290
1291 ifp = sc->ifp;
1292
1293 prog = 0;
1294 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
1295 desc = &sc->tx.desc_ring[i];
1296 status = le32toh(desc->status);
1297 if ((status & TX_DESC_CTL) != 0)
1298 break;
1299 size = le32toh(desc->size);
1300 if (size & TX_LAST_DESC) {
1301 if ((status & (TX_HEADER_ERR | TX_PAYLOAD_ERR)) != 0)
1302 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1303 else
1304 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1305 }
1306 prog++;
1307 awg_clean_txbuf(sc, i);
1308 }
1309
1310 if (prog > 0) {
1311 sc->tx.next = i;
1312 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1313 }
1314 }
1315
1316 static void
awg_intr(void * arg)1317 awg_intr(void *arg)
1318 {
1319 struct awg_softc *sc;
1320 uint32_t val;
1321
1322 sc = arg;
1323
1324 AWG_LOCK(sc);
1325 val = RD4(sc, EMAC_INT_STA);
1326 WR4(sc, EMAC_INT_STA, val);
1327
1328 if (val & RX_INT)
1329 awg_rxintr(sc);
1330
1331 if (val & TX_INT)
1332 awg_txeof(sc);
1333
1334 if (val & (TX_INT | TX_BUF_UA_INT)) {
1335 if (!if_sendq_empty(sc->ifp))
1336 awg_start_locked(sc);
1337 }
1338
1339 AWG_UNLOCK(sc);
1340 }
1341
1342 #ifdef DEVICE_POLLING
1343 static int
awg_poll(if_t ifp,enum poll_cmd cmd,int count)1344 awg_poll(if_t ifp, enum poll_cmd cmd, int count)
1345 {
1346 struct awg_softc *sc;
1347 uint32_t val;
1348 int rx_npkts;
1349
1350 sc = if_getsoftc(ifp);
1351 rx_npkts = 0;
1352
1353 AWG_LOCK(sc);
1354
1355 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1356 AWG_UNLOCK(sc);
1357 return (0);
1358 }
1359
1360 rx_npkts = awg_rxintr(sc);
1361 awg_txeof(sc);
1362 if (!if_sendq_empty(ifp))
1363 awg_start_locked(sc);
1364
1365 if (cmd == POLL_AND_CHECK_STATUS) {
1366 val = RD4(sc, EMAC_INT_STA);
1367 if (val != 0)
1368 WR4(sc, EMAC_INT_STA, val);
1369 }
1370
1371 AWG_UNLOCK(sc);
1372
1373 return (rx_npkts);
1374 }
1375 #endif
1376
1377 /*
1378 * syscon functions
1379 */
1380 static uint32_t
syscon_read_emac_clk_reg(device_t dev)1381 syscon_read_emac_clk_reg(device_t dev)
1382 {
1383 struct awg_softc *sc;
1384
1385 sc = device_get_softc(dev);
1386 if (sc->syscon != NULL)
1387 return (SYSCON_READ_4(sc->syscon, EMAC_CLK_REG));
1388 else if (sc->res[_RES_SYSCON] != NULL)
1389 return (bus_read_4(sc->res[_RES_SYSCON], 0));
1390
1391 return (0);
1392 }
1393
1394 static void
syscon_write_emac_clk_reg(device_t dev,uint32_t val)1395 syscon_write_emac_clk_reg(device_t dev, uint32_t val)
1396 {
1397 struct awg_softc *sc;
1398
1399 sc = device_get_softc(dev);
1400 if (sc->syscon != NULL)
1401 SYSCON_WRITE_4(sc->syscon, EMAC_CLK_REG, val);
1402 else if (sc->res[_RES_SYSCON] != NULL)
1403 bus_write_4(sc->res[_RES_SYSCON], 0, val);
1404 }
1405
1406 /*
1407 * PHY functions
1408 */
1409
1410 static phandle_t
awg_get_phy_node(device_t dev)1411 awg_get_phy_node(device_t dev)
1412 {
1413 phandle_t node;
1414 pcell_t phy_handle;
1415
1416 node = ofw_bus_get_node(dev);
1417 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
1418 sizeof(phy_handle)) <= 0)
1419 return (0);
1420
1421 return (OF_node_from_xref(phy_handle));
1422 }
1423
1424 static bool
awg_has_internal_phy(device_t dev)1425 awg_has_internal_phy(device_t dev)
1426 {
1427 phandle_t node, phy_node;
1428
1429 node = ofw_bus_get_node(dev);
1430 /* Legacy binding */
1431 if (OF_hasprop(node, "allwinner,use-internal-phy"))
1432 return (true);
1433
1434 phy_node = awg_get_phy_node(dev);
1435 return (phy_node != 0 && ofw_bus_node_is_compatible(OF_parent(phy_node),
1436 "allwinner,sun8i-h3-mdio-internal") != 0);
1437 }
1438
1439 static int
awg_parse_delay(device_t dev,uint32_t * tx_delay,uint32_t * rx_delay)1440 awg_parse_delay(device_t dev, uint32_t *tx_delay, uint32_t *rx_delay)
1441 {
1442 phandle_t node;
1443 uint32_t delay;
1444
1445 if (tx_delay == NULL || rx_delay == NULL)
1446 return (EINVAL);
1447 *tx_delay = *rx_delay = 0;
1448 node = ofw_bus_get_node(dev);
1449
1450 if (OF_getencprop(node, "tx-delay", &delay, sizeof(delay)) >= 0)
1451 *tx_delay = delay;
1452 else if (OF_getencprop(node, "allwinner,tx-delay-ps", &delay,
1453 sizeof(delay)) >= 0) {
1454 if ((delay % 100) != 0) {
1455 device_printf(dev, "tx-delay-ps is not a multiple of 100\n");
1456 return (EDOM);
1457 }
1458 *tx_delay = delay / 100;
1459 }
1460 if (*tx_delay > 7) {
1461 device_printf(dev, "tx-delay out of range\n");
1462 return (ERANGE);
1463 }
1464
1465 if (OF_getencprop(node, "rx-delay", &delay, sizeof(delay)) >= 0)
1466 *rx_delay = delay;
1467 else if (OF_getencprop(node, "allwinner,rx-delay-ps", &delay,
1468 sizeof(delay)) >= 0) {
1469 if ((delay % 100) != 0) {
1470 device_printf(dev, "rx-delay-ps is not within documented domain\n");
1471 return (EDOM);
1472 }
1473 *rx_delay = delay / 100;
1474 }
1475 if (*rx_delay > 31) {
1476 device_printf(dev, "rx-delay out of range\n");
1477 return (ERANGE);
1478 }
1479
1480 return (0);
1481 }
1482
1483 static int
awg_setup_phy(device_t dev)1484 awg_setup_phy(device_t dev)
1485 {
1486 struct awg_softc *sc;
1487 clk_t clk_tx, clk_tx_parent;
1488 const char *tx_parent_name;
1489 char *phy_type;
1490 phandle_t node;
1491 uint32_t reg, tx_delay, rx_delay;
1492 int error;
1493 bool use_syscon;
1494
1495 sc = device_get_softc(dev);
1496 node = ofw_bus_get_node(dev);
1497 use_syscon = false;
1498
1499 if (OF_getprop_alloc(node, "phy-mode", (void **)&phy_type) == 0)
1500 return (0);
1501
1502 if (sc->syscon != NULL || sc->res[_RES_SYSCON] != NULL)
1503 use_syscon = true;
1504
1505 if (bootverbose)
1506 device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type,
1507 use_syscon ? "reg" : "clk");
1508
1509 if (use_syscon) {
1510 /*
1511 * Abstract away writing to syscon for devices like the pine64.
1512 * For the pine64, we get dtb from U-Boot and it still uses the
1513 * legacy setup of specifying syscon register in emac node
1514 * rather than as its own node and using an xref in emac.
1515 * These abstractions can go away once U-Boot dts is up-to-date.
1516 */
1517 reg = syscon_read_emac_clk_reg(dev);
1518 reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN);
1519 if (strncmp(phy_type, "rgmii", 5) == 0)
1520 reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII;
1521 else if (strcmp(phy_type, "rmii") == 0)
1522 reg |= EMAC_CLK_RMII_EN;
1523 else
1524 reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII;
1525
1526 /*
1527 * Fail attach if we fail to parse either of the delay
1528 * parameters. If we don't have the proper delay to write to
1529 * syscon, then awg likely won't function properly anyways.
1530 * Lack of delay is not an error!
1531 */
1532 error = awg_parse_delay(dev, &tx_delay, &rx_delay);
1533 if (error != 0)
1534 goto fail;
1535
1536 /* Default to 0 and we'll increase it if we need to. */
1537 reg &= ~(EMAC_CLK_ETXDC | EMAC_CLK_ERXDC);
1538 if (tx_delay > 0)
1539 reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT);
1540 if (rx_delay > 0)
1541 reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT);
1542
1543 if (sc->type == EMAC_H3) {
1544 if (awg_has_internal_phy(dev)) {
1545 reg |= EMAC_CLK_EPHY_SELECT;
1546 reg &= ~EMAC_CLK_EPHY_SHUTDOWN;
1547 if (OF_hasprop(node,
1548 "allwinner,leds-active-low"))
1549 reg |= EMAC_CLK_EPHY_LED_POL;
1550 else
1551 reg &= ~EMAC_CLK_EPHY_LED_POL;
1552
1553 /* Set internal PHY addr to 1 */
1554 reg &= ~EMAC_CLK_EPHY_ADDR;
1555 reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT);
1556 } else {
1557 reg &= ~EMAC_CLK_EPHY_SELECT;
1558 }
1559 }
1560
1561 if (bootverbose)
1562 device_printf(dev, "EMAC clock: 0x%08x\n", reg);
1563 syscon_write_emac_clk_reg(dev, reg);
1564 } else {
1565 if (strncmp(phy_type, "rgmii", 5) == 0)
1566 tx_parent_name = "emac_int_tx";
1567 else
1568 tx_parent_name = "mii_phy_tx";
1569
1570 /* Get the TX clock */
1571 error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx);
1572 if (error != 0) {
1573 device_printf(dev, "cannot get tx clock\n");
1574 goto fail;
1575 }
1576
1577 /* Find the desired parent clock based on phy-mode property */
1578 error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent);
1579 if (error != 0) {
1580 device_printf(dev, "cannot get clock '%s'\n",
1581 tx_parent_name);
1582 goto fail;
1583 }
1584
1585 /* Set TX clock parent */
1586 error = clk_set_parent_by_clk(clk_tx, clk_tx_parent);
1587 if (error != 0) {
1588 device_printf(dev, "cannot set tx clock parent\n");
1589 goto fail;
1590 }
1591
1592 /* Enable TX clock */
1593 error = clk_enable(clk_tx);
1594 if (error != 0) {
1595 device_printf(dev, "cannot enable tx clock\n");
1596 goto fail;
1597 }
1598 }
1599
1600 error = 0;
1601
1602 fail:
1603 OF_prop_free(phy_type);
1604 return (error);
1605 }
1606
1607 static int
awg_setup_extres(device_t dev)1608 awg_setup_extres(device_t dev)
1609 {
1610 struct awg_softc *sc;
1611 phandle_t node, phy_node;
1612 hwreset_t rst_ahb, rst_ephy;
1613 clk_t clk_ahb, clk_ephy;
1614 regulator_t reg;
1615 uint64_t freq;
1616 int error, div;
1617
1618 sc = device_get_softc(dev);
1619 rst_ahb = rst_ephy = NULL;
1620 clk_ahb = clk_ephy = NULL;
1621 reg = NULL;
1622 node = ofw_bus_get_node(dev);
1623 phy_node = awg_get_phy_node(dev);
1624
1625 if (phy_node == 0 && OF_hasprop(node, "phy-handle")) {
1626 error = ENXIO;
1627 device_printf(dev, "cannot get phy handle\n");
1628 goto fail;
1629 }
1630
1631 /* Get AHB clock and reset resources */
1632 error = hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst_ahb);
1633 if (error != 0)
1634 error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb);
1635 if (error != 0) {
1636 device_printf(dev, "cannot get ahb reset\n");
1637 goto fail;
1638 }
1639 if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0)
1640 if (phy_node == 0 || hwreset_get_by_ofw_idx(dev, phy_node, 0,
1641 &rst_ephy) != 0)
1642 rst_ephy = NULL;
1643 error = clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk_ahb);
1644 if (error != 0)
1645 error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb);
1646 if (error != 0) {
1647 device_printf(dev, "cannot get ahb clock\n");
1648 goto fail;
1649 }
1650 if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0)
1651 if (phy_node == 0 || clk_get_by_ofw_index(dev, phy_node, 0,
1652 &clk_ephy) != 0)
1653 clk_ephy = NULL;
1654
1655 if (OF_hasprop(node, "syscon") && syscon_get_by_ofw_property(dev, node,
1656 "syscon", &sc->syscon) != 0) {
1657 device_printf(dev, "cannot get syscon driver handle\n");
1658 goto fail;
1659 }
1660
1661 /* Configure PHY for MII or RGMII mode */
1662 if (awg_setup_phy(dev) != 0)
1663 goto fail;
1664
1665 /* Enable clocks */
1666 error = clk_enable(clk_ahb);
1667 if (error != 0) {
1668 device_printf(dev, "cannot enable ahb clock\n");
1669 goto fail;
1670 }
1671 if (clk_ephy != NULL) {
1672 error = clk_enable(clk_ephy);
1673 if (error != 0) {
1674 device_printf(dev, "cannot enable ephy clock\n");
1675 goto fail;
1676 }
1677 }
1678
1679 /* De-assert reset */
1680 error = hwreset_deassert(rst_ahb);
1681 if (error != 0) {
1682 device_printf(dev, "cannot de-assert ahb reset\n");
1683 goto fail;
1684 }
1685 if (rst_ephy != NULL) {
1686 /*
1687 * The ephy reset is left de-asserted by U-Boot. Assert it
1688 * here to make sure that we're in a known good state going
1689 * into the PHY reset.
1690 */
1691 hwreset_assert(rst_ephy);
1692 error = hwreset_deassert(rst_ephy);
1693 if (error != 0) {
1694 device_printf(dev, "cannot de-assert ephy reset\n");
1695 goto fail;
1696 }
1697 }
1698
1699 /* Enable PHY regulator if applicable */
1700 if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) {
1701 error = regulator_enable(reg);
1702 if (error != 0) {
1703 device_printf(dev, "cannot enable PHY regulator\n");
1704 goto fail;
1705 }
1706 }
1707
1708 /* Determine MDC clock divide ratio based on AHB clock */
1709 error = clk_get_freq(clk_ahb, &freq);
1710 if (error != 0) {
1711 device_printf(dev, "cannot get AHB clock frequency\n");
1712 goto fail;
1713 }
1714 div = freq / MDIO_FREQ;
1715 if (div <= 16)
1716 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16;
1717 else if (div <= 32)
1718 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32;
1719 else if (div <= 64)
1720 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64;
1721 else if (div <= 128)
1722 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128;
1723 else {
1724 device_printf(dev, "cannot determine MDC clock divide ratio\n");
1725 error = ENXIO;
1726 goto fail;
1727 }
1728
1729 if (bootverbose)
1730 device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n",
1731 (uintmax_t)freq, sc->mdc_div_ratio_m);
1732
1733 return (0);
1734
1735 fail:
1736 if (reg != NULL)
1737 regulator_release(reg);
1738 if (clk_ephy != NULL)
1739 clk_release(clk_ephy);
1740 if (clk_ahb != NULL)
1741 clk_release(clk_ahb);
1742 if (rst_ephy != NULL)
1743 hwreset_release(rst_ephy);
1744 if (rst_ahb != NULL)
1745 hwreset_release(rst_ahb);
1746 return (error);
1747 }
1748
1749 #ifdef AWG_DEBUG
1750 static void
awg_dump_regs(device_t dev)1751 awg_dump_regs(device_t dev)
1752 {
1753 static const struct {
1754 const char *name;
1755 u_int reg;
1756 } regs[] = {
1757 { "BASIC_CTL_0", EMAC_BASIC_CTL_0 },
1758 { "BASIC_CTL_1", EMAC_BASIC_CTL_1 },
1759 { "INT_STA", EMAC_INT_STA },
1760 { "INT_EN", EMAC_INT_EN },
1761 { "TX_CTL_0", EMAC_TX_CTL_0 },
1762 { "TX_CTL_1", EMAC_TX_CTL_1 },
1763 { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL },
1764 { "TX_DMA_LIST", EMAC_TX_DMA_LIST },
1765 { "RX_CTL_0", EMAC_RX_CTL_0 },
1766 { "RX_CTL_1", EMAC_RX_CTL_1 },
1767 { "RX_DMA_LIST", EMAC_RX_DMA_LIST },
1768 { "RX_FRM_FLT", EMAC_RX_FRM_FLT },
1769 { "RX_HASH_0", EMAC_RX_HASH_0 },
1770 { "RX_HASH_1", EMAC_RX_HASH_1 },
1771 { "MII_CMD", EMAC_MII_CMD },
1772 { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) },
1773 { "ADDR_LOW0", EMAC_ADDR_LOW(0) },
1774 { "TX_DMA_STA", EMAC_TX_DMA_STA },
1775 { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC },
1776 { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF },
1777 { "RX_DMA_STA", EMAC_RX_DMA_STA },
1778 { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC },
1779 { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF },
1780 { "RGMII_STA", EMAC_RGMII_STA },
1781 };
1782 struct awg_softc *sc;
1783 unsigned int n;
1784
1785 sc = device_get_softc(dev);
1786
1787 for (n = 0; n < nitems(regs); n++)
1788 device_printf(dev, " %-20s %08x\n", regs[n].name,
1789 RD4(sc, regs[n].reg));
1790 }
1791 #endif
1792
1793 #define GPIO_ACTIVE_LOW 1
1794
1795 static int
awg_phy_reset(device_t dev)1796 awg_phy_reset(device_t dev)
1797 {
1798 pcell_t gpio_prop[4], delay_prop[3];
1799 phandle_t node, gpio_node;
1800 device_t gpio;
1801 uint32_t pin, flags;
1802 uint32_t pin_value;
1803
1804 node = ofw_bus_get_node(dev);
1805 if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop,
1806 sizeof(gpio_prop)) <= 0)
1807 return (0);
1808
1809 if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop,
1810 sizeof(delay_prop)) <= 0)
1811 return (ENXIO);
1812
1813 gpio_node = OF_node_from_xref(gpio_prop[0]);
1814 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL)
1815 return (ENXIO);
1816
1817 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1,
1818 gpio_prop + 1, &pin, &flags) != 0)
1819 return (ENXIO);
1820
1821 pin_value = GPIO_PIN_LOW;
1822 if (OF_hasprop(node, "allwinner,reset-active-low"))
1823 pin_value = GPIO_PIN_HIGH;
1824
1825 if (flags & GPIO_ACTIVE_LOW)
1826 pin_value = !pin_value;
1827
1828 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT);
1829 GPIO_PIN_SET(gpio, pin, pin_value);
1830 DELAY(delay_prop[0]);
1831 GPIO_PIN_SET(gpio, pin, !pin_value);
1832 DELAY(delay_prop[1]);
1833 GPIO_PIN_SET(gpio, pin, pin_value);
1834 DELAY(delay_prop[2]);
1835
1836 return (0);
1837 }
1838
1839 static int
awg_reset(device_t dev)1840 awg_reset(device_t dev)
1841 {
1842 struct awg_softc *sc;
1843 int retry;
1844
1845 sc = device_get_softc(dev);
1846
1847 /* Reset PHY if necessary */
1848 if (awg_phy_reset(dev) != 0) {
1849 device_printf(dev, "failed to reset PHY\n");
1850 return (ENXIO);
1851 }
1852
1853 /* Soft reset all registers and logic */
1854 WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST);
1855
1856 /* Wait for soft reset bit to self-clear */
1857 for (retry = SOFT_RST_RETRY; retry > 0; retry--) {
1858 if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0)
1859 break;
1860 DELAY(10);
1861 }
1862 if (retry == 0) {
1863 device_printf(dev, "soft reset timed out\n");
1864 #ifdef AWG_DEBUG
1865 awg_dump_regs(dev);
1866 #endif
1867 return (ETIMEDOUT);
1868 }
1869
1870 return (0);
1871 }
1872
1873 /*
1874 * Stats
1875 */
1876
1877 static void
awg_tick(void * softc)1878 awg_tick(void *softc)
1879 {
1880 struct awg_softc *sc;
1881 struct mii_data *mii;
1882 if_t ifp;
1883 int link;
1884
1885 sc = softc;
1886 ifp = sc->ifp;
1887 mii = device_get_softc(sc->miibus);
1888
1889 AWG_ASSERT_LOCKED(sc);
1890
1891 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
1892 return;
1893
1894 link = sc->link;
1895 mii_tick(mii);
1896 if (sc->link && !link)
1897 awg_start_locked(sc);
1898
1899 callout_reset(&sc->stat_ch, hz, awg_tick, sc);
1900 }
1901
1902 /*
1903 * Probe/attach functions
1904 */
1905
1906 static int
awg_probe(device_t dev)1907 awg_probe(device_t dev)
1908 {
1909 if (!ofw_bus_status_okay(dev))
1910 return (ENXIO);
1911
1912 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
1913 return (ENXIO);
1914
1915 device_set_desc(dev, "Allwinner Gigabit Ethernet");
1916 return (BUS_PROBE_DEFAULT);
1917 }
1918
1919 static int
awg_attach(device_t dev)1920 awg_attach(device_t dev)
1921 {
1922 uint8_t eaddr[ETHER_ADDR_LEN];
1923 struct awg_softc *sc;
1924 int error;
1925
1926 sc = device_get_softc(dev);
1927 sc->dev = dev;
1928 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
1929
1930 if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) {
1931 device_printf(dev, "cannot allocate resources for device\n");
1932 return (ENXIO);
1933 }
1934
1935 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1936 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
1937
1938 /* Setup clocks and regulators */
1939 error = awg_setup_extres(dev);
1940 if (error != 0)
1941 return (error);
1942
1943 /* Read MAC address before resetting the chip */
1944 awg_get_eaddr(dev, eaddr);
1945
1946 /* Soft reset EMAC core */
1947 error = awg_reset(dev);
1948 if (error != 0)
1949 return (error);
1950
1951 /* Setup DMA descriptors */
1952 error = awg_setup_dma(dev);
1953 if (error != 0)
1954 return (error);
1955
1956 /* Install interrupt handler */
1957 error = bus_setup_intr(dev, sc->res[_RES_IRQ],
1958 INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih);
1959 if (error != 0) {
1960 device_printf(dev, "cannot setup interrupt handler\n");
1961 return (error);
1962 }
1963
1964 /* Setup ethernet interface */
1965 sc->ifp = if_alloc(IFT_ETHER);
1966 if_setsoftc(sc->ifp, sc);
1967 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
1968 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1969 if_setstartfn(sc->ifp, awg_start);
1970 if_setioctlfn(sc->ifp, awg_ioctl);
1971 if_setinitfn(sc->ifp, awg_init);
1972 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
1973 if_setsendqready(sc->ifp);
1974 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP);
1975 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM);
1976 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
1977 #ifdef DEVICE_POLLING
1978 if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0);
1979 #endif
1980
1981 /* Attach MII driver */
1982 error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change,
1983 awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
1984 MIIF_DOPAUSE);
1985 if (error != 0) {
1986 device_printf(dev, "cannot attach PHY\n");
1987 return (error);
1988 }
1989
1990 /* Attach ethernet interface */
1991 ether_ifattach(sc->ifp, eaddr);
1992
1993 return (0);
1994 }
1995
1996 static device_method_t awg_methods[] = {
1997 /* Device interface */
1998 DEVMETHOD(device_probe, awg_probe),
1999 DEVMETHOD(device_attach, awg_attach),
2000
2001 /* MII interface */
2002 DEVMETHOD(miibus_readreg, awg_miibus_readreg),
2003 DEVMETHOD(miibus_writereg, awg_miibus_writereg),
2004 DEVMETHOD(miibus_statchg, awg_miibus_statchg),
2005
2006 DEVMETHOD_END
2007 };
2008
2009 static driver_t awg_driver = {
2010 "awg",
2011 awg_methods,
2012 sizeof(struct awg_softc),
2013 };
2014
2015 DRIVER_MODULE(awg, simplebus, awg_driver, 0, 0);
2016 DRIVER_MODULE(miibus, awg, miibus_driver, 0, 0);
2017 MODULE_DEPEND(awg, ether, 1, 1, 1);
2018 MODULE_DEPEND(awg, miibus, 1, 1, 1);
2019 MODULE_DEPEND(awg, aw_sid, 1, 1, 1);
2020 SIMPLEBUS_PNP_INFO(compat_data);
2021