1 /*
2 * Copyright (c) 2017 Stormshield.
3 * Copyright (c) 2017 Semihalf.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include "opt_platform.h"
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/endian.h>
33 #include <sys/mbuf.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/smp.h>
41 #include <sys/taskqueue.h>
42 #ifdef MVNETA_KTR
43 #include <sys/ktr.h>
44 #endif
45
46 #include <net/ethernet.h>
47 #include <net/bpf.h>
48 #include <net/if.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/if_types.h>
53 #include <net/if_vlan_var.h>
54
55 #include <netinet/in_systm.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp_lro.h>
59
60 #include <sys/sockio.h>
61 #include <sys/bus.h>
62 #include <machine/bus.h>
63 #include <sys/rman.h>
64 #include <machine/resource.h>
65
66 #include <dev/clk/clk.h>
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70
71 #include <dev/mdio/mdio.h>
72
73 #include <arm/mv/mvvar.h>
74
75 #if !defined(__aarch64__)
76 #include <arm/mv/mvreg.h>
77 #include <arm/mv/mvwin.h>
78 #endif
79
80 #include "if_mvnetareg.h"
81 #include "if_mvnetavar.h"
82
83 #include "miibus_if.h"
84 #include "mdio_if.h"
85
86 #ifdef MVNETA_DEBUG
87 #define STATIC /* nothing */
88 #else
89 #define STATIC static
90 #endif
91
92 #define DASSERT(x) KASSERT((x), (#x))
93
94 #define A3700_TCLK_250MHZ 250000000
95
96 /* Device Register Initialization */
97 STATIC int mvneta_initreg(if_t);
98
99 /* Descriptor Ring Control for each of queues */
100 STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int);
101 STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int);
102 STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int);
103 STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int);
104 STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int);
105 STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int);
106 STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int);
107 STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int);
108 STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int);
109 STATIC int mvneta_dma_create(struct mvneta_softc *);
110
111 /* Rx/Tx Queue Control */
112 STATIC int mvneta_rx_queue_init(if_t, int);
113 STATIC int mvneta_tx_queue_init(if_t, int);
114 STATIC int mvneta_rx_queue_enable(if_t, int);
115 STATIC int mvneta_tx_queue_enable(if_t, int);
116 STATIC void mvneta_rx_lockq(struct mvneta_softc *, int);
117 STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int);
118 STATIC void mvneta_tx_lockq(struct mvneta_softc *, int);
119 STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int);
120
121 /* Interrupt Handlers */
122 STATIC void mvneta_disable_intr(struct mvneta_softc *);
123 STATIC void mvneta_enable_intr(struct mvneta_softc *);
124 STATIC void mvneta_rxtxth_intr(void *);
125 STATIC int mvneta_misc_intr(struct mvneta_softc *);
126 STATIC void mvneta_tick(void *);
127 /* struct ifnet and mii callbacks*/
128 STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **);
129 STATIC int mvneta_xmit_locked(struct mvneta_softc *, int);
130 #ifdef MVNETA_MULTIQUEUE
131 STATIC int mvneta_transmit(if_t, struct mbuf *);
132 #else /* !MVNETA_MULTIQUEUE */
133 STATIC void mvneta_start(if_t);
134 #endif
135 STATIC void mvneta_qflush(if_t);
136 STATIC void mvneta_tx_task(void *, int);
137 STATIC int mvneta_ioctl(if_t, u_long, caddr_t);
138 STATIC void mvneta_init(void *);
139 STATIC void mvneta_init_locked(void *);
140 STATIC void mvneta_stop(struct mvneta_softc *);
141 STATIC void mvneta_stop_locked(struct mvneta_softc *);
142 STATIC int mvneta_mediachange(if_t);
143 STATIC void mvneta_mediastatus(if_t, struct ifmediareq *);
144 STATIC void mvneta_portup(struct mvneta_softc *);
145 STATIC void mvneta_portdown(struct mvneta_softc *);
146
147 /* Link State Notify */
148 STATIC void mvneta_update_autoneg(struct mvneta_softc *, int);
149 STATIC int mvneta_update_media(struct mvneta_softc *, int);
150 STATIC void mvneta_adjust_link(struct mvneta_softc *);
151 STATIC void mvneta_update_eee(struct mvneta_softc *);
152 STATIC void mvneta_update_fc(struct mvneta_softc *);
153 STATIC void mvneta_link_isr(struct mvneta_softc *);
154 STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t);
155 STATIC void mvneta_linkup(struct mvneta_softc *);
156 STATIC void mvneta_linkdown(struct mvneta_softc *);
157 STATIC void mvneta_linkreset(struct mvneta_softc *);
158
159 /* Tx Subroutines */
160 STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int);
161 STATIC void mvneta_tx_set_csumflag(if_t,
162 struct mvneta_tx_desc *, struct mbuf *);
163 STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int);
164 STATIC void mvneta_tx_drain(struct mvneta_softc *);
165
166 /* Rx Subroutines */
167 STATIC int mvneta_rx(struct mvneta_softc *, int, int);
168 STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int);
169 STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int);
170 STATIC void mvneta_rx_set_csumflag(if_t,
171 struct mvneta_rx_desc *, struct mbuf *);
172 STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *);
173
174 /* MAC address filter */
175 STATIC void mvneta_filter_setup(struct mvneta_softc *);
176
177 /* sysctl(9) */
178 STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS);
179 STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS);
180 STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS);
181 STATIC void sysctl_mvneta_init(struct mvneta_softc *);
182
183 /* MIB */
184 STATIC void mvneta_clear_mib(struct mvneta_softc *);
185 STATIC uint64_t mvneta_read_mib(struct mvneta_softc *, int);
186 STATIC void mvneta_update_mib(struct mvneta_softc *);
187
188 /* Switch */
189 STATIC boolean_t mvneta_has_switch(device_t);
190
191 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
192 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
193
194 STATIC struct mtx mii_mutex;
195 STATIC int mii_init = 0;
196
197 /* Device */
198 STATIC int mvneta_detach(device_t);
199 /* MII */
200 STATIC int mvneta_miibus_readreg(device_t, int, int);
201 STATIC int mvneta_miibus_writereg(device_t, int, int, int);
202
203 static device_method_t mvneta_methods[] = {
204 /* Device interface */
205 DEVMETHOD(device_detach, mvneta_detach),
206 /* MII interface */
207 DEVMETHOD(miibus_readreg, mvneta_miibus_readreg),
208 DEVMETHOD(miibus_writereg, mvneta_miibus_writereg),
209 /* MDIO interface */
210 DEVMETHOD(mdio_readreg, mvneta_miibus_readreg),
211 DEVMETHOD(mdio_writereg, mvneta_miibus_writereg),
212
213 /* End */
214 DEVMETHOD_END
215 };
216
217 DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc));
218
219 DRIVER_MODULE(miibus, mvneta, miibus_driver, 0, 0);
220 DRIVER_MODULE(mdio, mvneta, mdio_driver, 0, 0);
221 MODULE_DEPEND(mvneta, mdio, 1, 1, 1);
222 MODULE_DEPEND(mvneta, ether, 1, 1, 1);
223 MODULE_DEPEND(mvneta, miibus, 1, 1, 1);
224 MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1);
225
226 /*
227 * List of MIB register and names
228 */
229 enum mvneta_mib_idx
230 {
231 MVNETA_MIB_RX_GOOD_OCT_IDX,
232 MVNETA_MIB_RX_BAD_OCT_IDX,
233 MVNETA_MIB_TX_MAC_TRNS_ERR_IDX,
234 MVNETA_MIB_RX_GOOD_FRAME_IDX,
235 MVNETA_MIB_RX_BAD_FRAME_IDX,
236 MVNETA_MIB_RX_BCAST_FRAME_IDX,
237 MVNETA_MIB_RX_MCAST_FRAME_IDX,
238 MVNETA_MIB_RX_FRAME64_OCT_IDX,
239 MVNETA_MIB_RX_FRAME127_OCT_IDX,
240 MVNETA_MIB_RX_FRAME255_OCT_IDX,
241 MVNETA_MIB_RX_FRAME511_OCT_IDX,
242 MVNETA_MIB_RX_FRAME1023_OCT_IDX,
243 MVNETA_MIB_RX_FRAMEMAX_OCT_IDX,
244 MVNETA_MIB_TX_GOOD_OCT_IDX,
245 MVNETA_MIB_TX_GOOD_FRAME_IDX,
246 MVNETA_MIB_TX_EXCES_COL_IDX,
247 MVNETA_MIB_TX_MCAST_FRAME_IDX,
248 MVNETA_MIB_TX_BCAST_FRAME_IDX,
249 MVNETA_MIB_TX_MAC_CTL_ERR_IDX,
250 MVNETA_MIB_FC_SENT_IDX,
251 MVNETA_MIB_FC_GOOD_IDX,
252 MVNETA_MIB_FC_BAD_IDX,
253 MVNETA_MIB_PKT_UNDERSIZE_IDX,
254 MVNETA_MIB_PKT_FRAGMENT_IDX,
255 MVNETA_MIB_PKT_OVERSIZE_IDX,
256 MVNETA_MIB_PKT_JABBER_IDX,
257 MVNETA_MIB_MAC_RX_ERR_IDX,
258 MVNETA_MIB_MAC_CRC_ERR_IDX,
259 MVNETA_MIB_MAC_COL_IDX,
260 MVNETA_MIB_MAC_LATE_COL_IDX,
261 };
262
263 STATIC struct mvneta_mib_def {
264 uint32_t regnum;
265 int reg64;
266 const char *sysctl_name;
267 const char *desc;
268 } mvneta_mib_list[] = {
269 [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1,
270 "rx_good_oct", "Good Octets Rx"},
271 [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0,
272 "rx_bad_oct", "Bad Octets Rx"},
273 [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0,
274 "tx_mac_err", "MAC Transmit Error"},
275 [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0,
276 "rx_good_frame", "Good Frames Rx"},
277 [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0,
278 "rx_bad_frame", "Bad Frames Rx"},
279 [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0,
280 "rx_bcast_frame", "Broadcast Frames Rx"},
281 [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0,
282 "rx_mcast_frame", "Multicast Frames Rx"},
283 [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0,
284 "rx_frame_1_64", "Frame Size 1 - 64"},
285 [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0,
286 "rx_frame_65_127", "Frame Size 65 - 127"},
287 [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0,
288 "rx_frame_128_255", "Frame Size 128 - 255"},
289 [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0,
290 "rx_frame_256_511", "Frame Size 256 - 511"},
291 [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0,
292 "rx_frame_512_1023", "Frame Size 512 - 1023"},
293 [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0,
294 "rx_fame_1024_max", "Frame Size 1024 - Max"},
295 [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1,
296 "tx_good_oct", "Good Octets Tx"},
297 [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0,
298 "tx_good_frame", "Good Frames Tx"},
299 [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0,
300 "tx_exces_collision", "Excessive Collision"},
301 [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0,
302 "tx_mcast_frame", "Multicast Frames Tx"},
303 [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0,
304 "tx_bcast_frame", "Broadcast Frames Tx"},
305 [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0,
306 "tx_mac_ctl_err", "Unknown MAC Control"},
307 [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0,
308 "fc_tx", "Flow Control Tx"},
309 [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0,
310 "fc_rx_good", "Good Flow Control Rx"},
311 [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0,
312 "fc_rx_bad", "Bad Flow Control Rx"},
313 [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0,
314 "pkt_undersize", "Undersized Packets Rx"},
315 [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0,
316 "pkt_fragment", "Fragmented Packets Rx"},
317 [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0,
318 "pkt_oversize", "Oversized Packets Rx"},
319 [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0,
320 "pkt_jabber", "Jabber Packets Rx"},
321 [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0,
322 "mac_rx_err", "MAC Rx Errors"},
323 [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0,
324 "mac_crc_err", "MAC CRC Errors"},
325 [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0,
326 "mac_collision", "MAC Collision"},
327 [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0,
328 "mac_late_collision", "MAC Late Collision"},
329 };
330
331 static struct resource_spec res_spec[] = {
332 { SYS_RES_MEMORY, 0, RF_ACTIVE },
333 { SYS_RES_IRQ, 0, RF_ACTIVE },
334 { -1, 0}
335 };
336
337 static struct {
338 driver_intr_t *handler;
339 char * description;
340 } mvneta_intrs[] = {
341 { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" },
342 };
343
344 static int
mvneta_set_mac_address(struct mvneta_softc * sc,uint8_t * addr)345 mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr)
346 {
347 unsigned int mac_h;
348 unsigned int mac_l;
349
350 mac_l = (addr[4] << 8) | (addr[5]);
351 mac_h = (addr[0] << 24) | (addr[1] << 16) |
352 (addr[2] << 8) | (addr[3] << 0);
353
354 MVNETA_WRITE(sc, MVNETA_MACAL, mac_l);
355 MVNETA_WRITE(sc, MVNETA_MACAH, mac_h);
356 return (0);
357 }
358
359 static int
mvneta_get_mac_address(struct mvneta_softc * sc,uint8_t * addr)360 mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr)
361 {
362 uint32_t mac_l, mac_h;
363
364 #ifdef FDT
365 if (mvneta_fdt_mac_address(sc, addr) == 0)
366 return (0);
367 #endif
368 /*
369 * Fall back -- use the currently programmed address.
370 */
371 mac_l = MVNETA_READ(sc, MVNETA_MACAL);
372 mac_h = MVNETA_READ(sc, MVNETA_MACAH);
373 if (mac_l == 0 && mac_h == 0) {
374 /*
375 * Generate pseudo-random MAC.
376 * Set lower part to random number | unit number.
377 */
378 mac_l = arc4random() & ~0xff;
379 mac_l |= device_get_unit(sc->dev) & 0xff;
380 mac_h = arc4random();
381 mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */
382 if (bootverbose) {
383 device_printf(sc->dev,
384 "Could not acquire MAC address. "
385 "Using randomized one.\n");
386 }
387 }
388
389 addr[0] = (mac_h & 0xff000000) >> 24;
390 addr[1] = (mac_h & 0x00ff0000) >> 16;
391 addr[2] = (mac_h & 0x0000ff00) >> 8;
392 addr[3] = (mac_h & 0x000000ff);
393 addr[4] = (mac_l & 0x0000ff00) >> 8;
394 addr[5] = (mac_l & 0x000000ff);
395 return (0);
396 }
397
398 STATIC boolean_t
mvneta_has_switch(device_t self)399 mvneta_has_switch(device_t self)
400 {
401 #ifdef FDT
402 return (mvneta_has_switch_fdt(self));
403 #endif
404
405 return (false);
406 }
407
408 STATIC int
mvneta_dma_create(struct mvneta_softc * sc)409 mvneta_dma_create(struct mvneta_softc *sc)
410 {
411 size_t maxsize, maxsegsz;
412 size_t q;
413 int error;
414
415 /*
416 * Create Tx DMA
417 */
418 maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT;
419
420 error = bus_dma_tag_create(
421 bus_get_dma_tag(sc->dev), /* parent */
422 16, 0, /* alignment, boundary */
423 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
424 BUS_SPACE_MAXADDR, /* highaddr */
425 NULL, NULL, /* filtfunc, filtfuncarg */
426 maxsize, /* maxsize */
427 1, /* nsegments */
428 maxsegsz, /* maxsegsz */
429 0, /* flags */
430 NULL, NULL, /* lockfunc, lockfuncarg */
431 &sc->tx_dtag); /* dmat */
432 if (error != 0) {
433 device_printf(sc->dev,
434 "Failed to create DMA tag for Tx descriptors.\n");
435 goto fail;
436 }
437 error = bus_dma_tag_create(
438 bus_get_dma_tag(sc->dev), /* parent */
439 1, 0, /* alignment, boundary */
440 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
441 BUS_SPACE_MAXADDR, /* highaddr */
442 NULL, NULL, /* filtfunc, filtfuncarg */
443 MVNETA_MAX_FRAME, /* maxsize */
444 MVNETA_TX_SEGLIMIT, /* nsegments */
445 MVNETA_MAX_FRAME, /* maxsegsz */
446 BUS_DMA_ALLOCNOW, /* flags */
447 NULL, NULL, /* lockfunc, lockfuncarg */
448 &sc->txmbuf_dtag);
449 if (error != 0) {
450 device_printf(sc->dev,
451 "Failed to create DMA tag for Tx mbufs.\n");
452 goto fail;
453 }
454
455 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
456 error = mvneta_ring_alloc_tx_queue(sc, q);
457 if (error != 0) {
458 device_printf(sc->dev,
459 "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
460 goto fail;
461 }
462 }
463
464 /*
465 * Create Rx DMA.
466 */
467 /* Create tag for Rx descripors */
468 error = bus_dma_tag_create(
469 bus_get_dma_tag(sc->dev), /* parent */
470 32, 0, /* alignment, boundary */
471 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
472 BUS_SPACE_MAXADDR, /* highaddr */
473 NULL, NULL, /* filtfunc, filtfuncarg */
474 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */
475 1, /* nsegments */
476 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */
477 0, /* flags */
478 NULL, NULL, /* lockfunc, lockfuncarg */
479 &sc->rx_dtag); /* dmat */
480 if (error != 0) {
481 device_printf(sc->dev,
482 "Failed to create DMA tag for Rx descriptors.\n");
483 goto fail;
484 }
485
486 /* Create tag for Rx buffers */
487 error = bus_dma_tag_create(
488 bus_get_dma_tag(sc->dev), /* parent */
489 32, 0, /* alignment, boundary */
490 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
491 BUS_SPACE_MAXADDR, /* highaddr */
492 NULL, NULL, /* filtfunc, filtfuncarg */
493 MVNETA_MAX_FRAME, 1, /* maxsize, nsegments */
494 MVNETA_MAX_FRAME, /* maxsegsz */
495 0, /* flags */
496 NULL, NULL, /* lockfunc, lockfuncarg */
497 &sc->rxbuf_dtag); /* dmat */
498 if (error != 0) {
499 device_printf(sc->dev,
500 "Failed to create DMA tag for Rx buffers.\n");
501 goto fail;
502 }
503
504 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
505 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
506 device_printf(sc->dev,
507 "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
508 goto fail;
509 }
510 }
511
512 return (0);
513 fail:
514 mvneta_detach(sc->dev);
515
516 return (error);
517 }
518
519 /* ARGSUSED */
520 int
mvneta_attach(device_t self)521 mvneta_attach(device_t self)
522 {
523 struct mvneta_softc *sc;
524 if_t ifp;
525 device_t child;
526 int ifm_target;
527 int q, error;
528 #if !defined(__aarch64__)
529 uint32_t reg;
530 #endif
531 clk_t clk;
532
533 sc = device_get_softc(self);
534 sc->dev = self;
535
536 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
537
538 error = bus_alloc_resources(self, res_spec, sc->res);
539 if (error) {
540 device_printf(self, "could not allocate resources\n");
541 return (ENXIO);
542 }
543
544 sc->version = MVNETA_READ(sc, MVNETA_PV);
545 device_printf(self, "version is %x\n", sc->version);
546 callout_init(&sc->tick_ch, 0);
547
548 /*
549 * make sure DMA engines are in reset state
550 */
551 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
552 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
553
554 error = clk_get_by_ofw_index(sc->dev, ofw_bus_get_node(sc->dev), 0,
555 &clk);
556 if (error != 0) {
557 #if defined(__aarch64__)
558 device_printf(sc->dev,
559 "Cannot get clock, using default frequency: %d\n",
560 A3700_TCLK_250MHZ);
561 sc->clk_freq = A3700_TCLK_250MHZ;
562 #else
563 device_printf(sc->dev,
564 "Cannot get clock, using get_tclk()\n");
565 sc->clk_freq = get_tclk();
566 #endif
567 } else {
568 error = clk_get_freq(clk, &sc->clk_freq);
569 if (error != 0) {
570 device_printf(sc->dev,
571 "Cannot obtain frequency from parent clock\n");
572 bus_release_resources(sc->dev, res_spec, sc->res);
573 return (error);
574 }
575 }
576
577 #if !defined(__aarch64__)
578 /*
579 * Disable port snoop for buffers and descriptors
580 * to avoid L2 caching of both without DRAM copy.
581 * Obtain coherency settings from the first MBUS
582 * window attribute.
583 */
584 if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) {
585 reg = MVNETA_READ(sc, MVNETA_PSNPCFG);
586 reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK;
587 reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK;
588 MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg);
589 }
590 #endif
591
592 error = bus_setup_intr(self, sc->res[1],
593 INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc,
594 &sc->ih_cookie[0]);
595 if (error) {
596 device_printf(self, "could not setup %s\n",
597 mvneta_intrs[0].description);
598 mvneta_detach(self);
599 return (error);
600 }
601
602 /*
603 * MAC address
604 */
605 if (mvneta_get_mac_address(sc, sc->enaddr)) {
606 device_printf(self, "no mac address.\n");
607 return (ENXIO);
608 }
609 mvneta_set_mac_address(sc, sc->enaddr);
610
611 mvneta_disable_intr(sc);
612
613 /* Allocate network interface */
614 ifp = sc->ifp = if_alloc(IFT_ETHER);
615 if_initname(ifp, device_get_name(self), device_get_unit(self));
616
617 /*
618 * We can support 802.1Q VLAN-sized frames and jumbo
619 * Ethernet frames.
620 */
621 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU, 0);
622
623 if_setsoftc(ifp, sc);
624 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
625 #ifdef MVNETA_MULTIQUEUE
626 if_settransmitfn(ifp, mvneta_transmit);
627 if_setqflushfn(ifp, mvneta_qflush);
628 #else /* !MVNETA_MULTIQUEUE */
629 if_setstartfn(ifp, mvneta_start);
630 if_setsendqlen(ifp, MVNETA_TX_RING_CNT - 1);
631 if_setsendqready(ifp);
632 #endif
633 if_setinitfn(ifp, mvneta_init);
634 if_setioctlfn(ifp, mvneta_ioctl);
635
636 /*
637 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware.
638 */
639 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0);
640
641 /*
642 * As VLAN hardware tagging is not supported
643 * but is necessary to perform VLAN hardware checksums,
644 * it is done in the driver
645 */
646 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM, 0);
647
648 /*
649 * Currently IPv6 HW checksum is broken, so make sure it is disabled.
650 */
651 if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM_IPV6);
652 if_setcapenable(ifp, if_getcapabilities(ifp));
653
654 /*
655 * Disabled option(s):
656 * - Support for Large Receive Offload
657 */
658 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
659
660 if_sethwassist(ifp, CSUM_IP | CSUM_TCP | CSUM_UDP);
661
662 sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */
663
664 /*
665 * Device DMA Buffer allocation.
666 * Handles resource deallocation in case of failure.
667 */
668 error = mvneta_dma_create(sc);
669 if (error != 0) {
670 mvneta_detach(self);
671 return (error);
672 }
673
674 /* Initialize queues */
675 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
676 error = mvneta_ring_init_tx_queue(sc, q);
677 if (error != 0) {
678 mvneta_detach(self);
679 return (error);
680 }
681 }
682
683 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
684 error = mvneta_ring_init_rx_queue(sc, q);
685 if (error != 0) {
686 mvneta_detach(self);
687 return (error);
688 }
689 }
690
691 /*
692 * Enable DMA engines and Initialize Device Registers.
693 */
694 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
695 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
696 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
697 mvneta_sc_lock(sc);
698 mvneta_filter_setup(sc);
699 mvneta_sc_unlock(sc);
700 mvneta_initreg(ifp);
701
702 /*
703 * Now MAC is working, setup MII.
704 */
705 if (mii_init == 0) {
706 /*
707 * MII bus is shared by all MACs and all PHYs in SoC.
708 * serializing the bus access should be safe.
709 */
710 mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF);
711 mii_init = 1;
712 }
713
714 /* Attach PHY(s) */
715 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
716 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
717 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
718 MII_OFFSET_ANY, 0);
719 if (error != 0) {
720 device_printf(self, "MII attach failed, error: %d\n",
721 error);
722 ether_ifdetach(sc->ifp);
723 mvneta_detach(self);
724 return (error);
725 }
726 sc->mii = device_get_softc(sc->miibus);
727 sc->phy_attached = 1;
728
729 /* Disable auto-negotiation in MAC - rely on PHY layer */
730 mvneta_update_autoneg(sc, FALSE);
731 } else if (sc->use_inband_status == TRUE) {
732 /* In-band link status */
733 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
734 mvneta_mediastatus);
735
736 /* Configure media */
737 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
738 0, NULL);
739 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
740 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
741 0, NULL);
742 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
743 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
744 0, NULL);
745 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
746 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
747
748 /* Enable auto-negotiation */
749 mvneta_update_autoneg(sc, TRUE);
750
751 mvneta_sc_lock(sc);
752 if (MVNETA_IS_LINKUP(sc))
753 mvneta_linkup(sc);
754 else
755 mvneta_linkdown(sc);
756 mvneta_sc_unlock(sc);
757
758 } else {
759 /* Fixed-link, use predefined values */
760 mvneta_update_autoneg(sc, FALSE);
761 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
762 mvneta_mediastatus);
763
764 ifm_target = IFM_ETHER;
765 switch (sc->phy_speed) {
766 case 2500:
767 if (sc->phy_mode != MVNETA_PHY_SGMII &&
768 sc->phy_mode != MVNETA_PHY_QSGMII) {
769 device_printf(self,
770 "2.5G speed can work only in (Q)SGMII mode\n");
771 ether_ifdetach(sc->ifp);
772 mvneta_detach(self);
773 return (ENXIO);
774 }
775 ifm_target |= IFM_2500_T;
776 break;
777 case 1000:
778 ifm_target |= IFM_1000_T;
779 break;
780 case 100:
781 ifm_target |= IFM_100_TX;
782 break;
783 case 10:
784 ifm_target |= IFM_10_T;
785 break;
786 default:
787 ether_ifdetach(sc->ifp);
788 mvneta_detach(self);
789 return (ENXIO);
790 }
791
792 if (sc->phy_fdx)
793 ifm_target |= IFM_FDX;
794 else
795 ifm_target |= IFM_HDX;
796
797 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
798 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
799 if_link_state_change(sc->ifp, LINK_STATE_UP);
800
801 if (mvneta_has_switch(self)) {
802 if (bootverbose)
803 device_printf(self, "This device is attached to a switch\n");
804 child = device_add_child(sc->dev, "mdio", DEVICE_UNIT_ANY);
805 if (child == NULL) {
806 ether_ifdetach(sc->ifp);
807 mvneta_detach(self);
808 return (ENXIO);
809 }
810 bus_generic_attach(sc->dev);
811 bus_generic_attach(child);
812 }
813
814 /* Configure MAC media */
815 mvneta_update_media(sc, ifm_target);
816 }
817
818 ether_ifattach(ifp, sc->enaddr);
819
820 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
821
822 sysctl_mvneta_init(sc);
823
824 return (0);
825 }
826
827 STATIC int
mvneta_detach(device_t dev)828 mvneta_detach(device_t dev)
829 {
830 struct mvneta_softc *sc;
831 int q;
832
833 sc = device_get_softc(dev);
834
835 if (device_is_attached(dev)) {
836 mvneta_stop(sc);
837 callout_drain(&sc->tick_ch);
838 ether_ifdetach(sc->ifp);
839 }
840
841 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
842 mvneta_ring_dealloc_rx_queue(sc, q);
843 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
844 mvneta_ring_dealloc_tx_queue(sc, q);
845
846 device_delete_children(dev);
847
848 if (sc->ih_cookie[0] != NULL)
849 bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]);
850
851 if (sc->tx_dtag != NULL)
852 bus_dma_tag_destroy(sc->tx_dtag);
853 if (sc->rx_dtag != NULL)
854 bus_dma_tag_destroy(sc->rx_dtag);
855 if (sc->txmbuf_dtag != NULL)
856 bus_dma_tag_destroy(sc->txmbuf_dtag);
857 if (sc->rxbuf_dtag != NULL)
858 bus_dma_tag_destroy(sc->rxbuf_dtag);
859
860 bus_release_resources(dev, res_spec, sc->res);
861
862 if (sc->ifp)
863 if_free(sc->ifp);
864
865 if (mtx_initialized(&sc->mtx))
866 mtx_destroy(&sc->mtx);
867
868 return (0);
869 }
870
871 /*
872 * MII
873 */
874 STATIC int
mvneta_miibus_readreg(device_t dev,int phy,int reg)875 mvneta_miibus_readreg(device_t dev, int phy, int reg)
876 {
877 struct mvneta_softc *sc;
878 if_t ifp;
879 uint32_t smi, val;
880 int i;
881
882 sc = device_get_softc(dev);
883 ifp = sc->ifp;
884
885 mtx_lock(&mii_mutex);
886
887 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
888 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
889 break;
890 DELAY(1);
891 }
892 if (i == MVNETA_PHY_TIMEOUT) {
893 if_printf(ifp, "SMI busy timeout\n");
894 mtx_unlock(&mii_mutex);
895 return (-1);
896 }
897
898 smi = MVNETA_SMI_PHYAD(phy) |
899 MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ;
900 MVNETA_WRITE(sc, MVNETA_SMI, smi);
901
902 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
903 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
904 break;
905 DELAY(1);
906 }
907
908 if (i == MVNETA_PHY_TIMEOUT) {
909 if_printf(ifp, "SMI busy timeout\n");
910 mtx_unlock(&mii_mutex);
911 return (-1);
912 }
913 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
914 smi = MVNETA_READ(sc, MVNETA_SMI);
915 if (smi & MVNETA_SMI_READVALID)
916 break;
917 DELAY(1);
918 }
919
920 if (i == MVNETA_PHY_TIMEOUT) {
921 if_printf(ifp, "SMI busy timeout\n");
922 mtx_unlock(&mii_mutex);
923 return (-1);
924 }
925
926 mtx_unlock(&mii_mutex);
927
928 #ifdef MVNETA_KTR
929 CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", if_getname(ifp), i,
930 MVNETA_PHY_TIMEOUT);
931 #endif
932
933 val = smi & MVNETA_SMI_DATA_MASK;
934
935 #ifdef MVNETA_KTR
936 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_getname(ifp), phy,
937 reg, val);
938 #endif
939 return (val);
940 }
941
942 STATIC int
mvneta_miibus_writereg(device_t dev,int phy,int reg,int val)943 mvneta_miibus_writereg(device_t dev, int phy, int reg, int val)
944 {
945 struct mvneta_softc *sc;
946 if_t ifp;
947 uint32_t smi;
948 int i;
949
950 sc = device_get_softc(dev);
951 ifp = sc->ifp;
952 #ifdef MVNETA_KTR
953 CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", if_name(ifp),
954 phy, reg, val);
955 #endif
956
957 mtx_lock(&mii_mutex);
958
959 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
960 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
961 break;
962 DELAY(1);
963 }
964 if (i == MVNETA_PHY_TIMEOUT) {
965 if_printf(ifp, "SMI busy timeout\n");
966 mtx_unlock(&mii_mutex);
967 return (0);
968 }
969
970 smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) |
971 MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK);
972 MVNETA_WRITE(sc, MVNETA_SMI, smi);
973
974 for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) {
975 if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0)
976 break;
977 DELAY(1);
978 }
979
980 mtx_unlock(&mii_mutex);
981
982 if (i == MVNETA_PHY_TIMEOUT)
983 if_printf(ifp, "phy write timed out\n");
984
985 return (0);
986 }
987
988 STATIC void
mvneta_portup(struct mvneta_softc * sc)989 mvneta_portup(struct mvneta_softc *sc)
990 {
991 int q;
992
993 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
994 mvneta_rx_lockq(sc, q);
995 mvneta_rx_queue_enable(sc->ifp, q);
996 mvneta_rx_unlockq(sc, q);
997 }
998
999 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1000 mvneta_tx_lockq(sc, q);
1001 mvneta_tx_queue_enable(sc->ifp, q);
1002 mvneta_tx_unlockq(sc, q);
1003 }
1004
1005 }
1006
1007 STATIC void
mvneta_portdown(struct mvneta_softc * sc)1008 mvneta_portdown(struct mvneta_softc *sc)
1009 {
1010 struct mvneta_rx_ring *rx;
1011 struct mvneta_tx_ring *tx;
1012 int q, cnt;
1013 uint32_t reg;
1014
1015 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1016 rx = MVNETA_RX_RING(sc, q);
1017 mvneta_rx_lockq(sc, q);
1018 rx->queue_status = MVNETA_QUEUE_DISABLED;
1019 mvneta_rx_unlockq(sc, q);
1020 }
1021
1022 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1023 tx = MVNETA_TX_RING(sc, q);
1024 mvneta_tx_lockq(sc, q);
1025 tx->queue_status = MVNETA_QUEUE_DISABLED;
1026 mvneta_tx_unlockq(sc, q);
1027 }
1028
1029 /* Wait for all Rx activity to terminate. */
1030 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1031 reg = MVNETA_RQC_DIS(reg);
1032 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1033 cnt = 0;
1034 do {
1035 if (cnt >= RX_DISABLE_TIMEOUT) {
1036 if_printf(sc->ifp,
1037 "timeout for RX stopped. rqc 0x%x\n", reg);
1038 break;
1039 }
1040 cnt++;
1041 reg = MVNETA_READ(sc, MVNETA_RQC);
1042 } while ((reg & MVNETA_RQC_EN_MASK) != 0);
1043
1044 /* Wait for all Tx activity to terminate. */
1045 reg = MVNETA_READ(sc, MVNETA_PIE);
1046 reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK;
1047 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1048
1049 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1050 reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK;
1051 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1052
1053 reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK;
1054 reg = MVNETA_TQC_DIS(reg);
1055 MVNETA_WRITE(sc, MVNETA_TQC, reg);
1056 cnt = 0;
1057 do {
1058 if (cnt >= TX_DISABLE_TIMEOUT) {
1059 if_printf(sc->ifp,
1060 "timeout for TX stopped. tqc 0x%x\n", reg);
1061 break;
1062 }
1063 cnt++;
1064 reg = MVNETA_READ(sc, MVNETA_TQC);
1065 } while ((reg & MVNETA_TQC_EN_MASK) != 0);
1066
1067 /* Wait for all Tx FIFO is empty */
1068 cnt = 0;
1069 do {
1070 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) {
1071 if_printf(sc->ifp,
1072 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1073 break;
1074 }
1075 cnt++;
1076 reg = MVNETA_READ(sc, MVNETA_PS0);
1077 } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) &&
1078 ((reg & MVNETA_PS0_TXINPROG) != 0));
1079 }
1080
1081 /*
1082 * Device Register Initialization
1083 * reset device registers to device driver default value.
1084 * the device is not enabled here.
1085 */
1086 STATIC int
mvneta_initreg(if_t ifp)1087 mvneta_initreg(if_t ifp)
1088 {
1089 struct mvneta_softc *sc;
1090 int q;
1091 uint32_t reg;
1092
1093 sc = if_getsoftc(ifp);
1094 #ifdef MVNETA_KTR
1095 CTR1(KTR_SPARE2, "%s initializing device register", if_name(ifp));
1096 #endif
1097
1098 /* Disable Legacy WRR, Disable EJP, Release from reset. */
1099 MVNETA_WRITE(sc, MVNETA_TQC_1, 0);
1100 /* Enable mbus retry. */
1101 MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN);
1102
1103 /* Init TX/RX Queue Registers */
1104 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1105 mvneta_rx_lockq(sc, q);
1106 if (mvneta_rx_queue_init(ifp, q) != 0) {
1107 device_printf(sc->dev,
1108 "initialization failed: cannot initialize queue\n");
1109 mvneta_rx_unlockq(sc, q);
1110 return (ENOBUFS);
1111 }
1112 mvneta_rx_unlockq(sc, q);
1113 }
1114 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1115 mvneta_tx_lockq(sc, q);
1116 if (mvneta_tx_queue_init(ifp, q) != 0) {
1117 device_printf(sc->dev,
1118 "initialization failed: cannot initialize queue\n");
1119 mvneta_tx_unlockq(sc, q);
1120 return (ENOBUFS);
1121 }
1122 mvneta_tx_unlockq(sc, q);
1123 }
1124
1125 /*
1126 * Ethernet Unit Control - disable automatic PHY management by HW.
1127 * In case the port uses SMI-controlled PHY, poll its status with
1128 * mii_tick() and update MAC settings accordingly.
1129 */
1130 reg = MVNETA_READ(sc, MVNETA_EUC);
1131 reg &= ~MVNETA_EUC_POLLING;
1132 MVNETA_WRITE(sc, MVNETA_EUC, reg);
1133
1134 /* EEE: Low Power Idle */
1135 reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI);
1136 reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS);
1137 MVNETA_WRITE(sc, MVNETA_LPIC0, reg);
1138
1139 reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW);
1140 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
1141
1142 reg = MVNETA_LPIC2_MUSTSET;
1143 MVNETA_WRITE(sc, MVNETA_LPIC2, reg);
1144
1145 /* Port MAC Control set 0 */
1146 reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */
1147 reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */
1148 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
1149 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
1150
1151 /* Port MAC Control set 2 */
1152 reg = MVNETA_READ(sc, MVNETA_PMACC2);
1153 switch (sc->phy_mode) {
1154 case MVNETA_PHY_QSGMII:
1155 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1156 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII);
1157 break;
1158 case MVNETA_PHY_SGMII:
1159 reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN);
1160 MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII);
1161 break;
1162 case MVNETA_PHY_RGMII:
1163 case MVNETA_PHY_RGMII_ID:
1164 reg |= MVNETA_PMACC2_RGMIIEN;
1165 break;
1166 }
1167 reg |= MVNETA_PMACC2_MUSTSET;
1168 reg &= ~MVNETA_PMACC2_PORTMACRESET;
1169 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
1170
1171 /* Port Configuration Extended: enable Tx CRC generation */
1172 reg = MVNETA_READ(sc, MVNETA_PXCX);
1173 reg &= ~MVNETA_PXCX_TXCRCDIS;
1174 MVNETA_WRITE(sc, MVNETA_PXCX, reg);
1175
1176 /* clear MIB counter registers(clear by read) */
1177 mvneta_sc_lock(sc);
1178 mvneta_clear_mib(sc);
1179 mvneta_sc_unlock(sc);
1180
1181 /* Set SDC register except IPGINT bits */
1182 reg = MVNETA_SDC_RXBSZ_16_64BITWORDS;
1183 reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS;
1184 reg |= MVNETA_SDC_BLMR;
1185 reg |= MVNETA_SDC_BLMT;
1186 MVNETA_WRITE(sc, MVNETA_SDC, reg);
1187
1188 return (0);
1189 }
1190
1191 STATIC void
mvneta_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)1192 mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
1193 {
1194
1195 if (error != 0)
1196 return;
1197 *(bus_addr_t *)arg = segs->ds_addr;
1198 }
1199
1200 STATIC int
mvneta_ring_alloc_rx_queue(struct mvneta_softc * sc,int q)1201 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1202 {
1203 struct mvneta_rx_ring *rx;
1204 struct mvneta_buf *rxbuf;
1205 bus_dmamap_t dmap;
1206 int i, error;
1207
1208 if (q >= MVNETA_RX_QNUM_MAX)
1209 return (EINVAL);
1210
1211 rx = MVNETA_RX_RING(sc, q);
1212 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1213 /* Allocate DMA memory for Rx descriptors */
1214 error = bus_dmamem_alloc(sc->rx_dtag,
1215 (void**)&(rx->desc),
1216 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1217 &rx->desc_map);
1218 if (error != 0 || rx->desc == NULL)
1219 goto fail;
1220 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1221 rx->desc,
1222 sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT,
1223 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1224 if (error != 0)
1225 goto fail;
1226
1227 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1228 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1229 if (error != 0) {
1230 device_printf(sc->dev,
1231 "Failed to create DMA map for Rx buffer num: %d\n", i);
1232 goto fail;
1233 }
1234 rxbuf = &rx->rxbuf[i];
1235 rxbuf->dmap = dmap;
1236 rxbuf->m = NULL;
1237 }
1238
1239 return (0);
1240 fail:
1241 mvneta_rx_lockq(sc, q);
1242 mvneta_ring_flush_rx_queue(sc, q);
1243 mvneta_rx_unlockq(sc, q);
1244 mvneta_ring_dealloc_rx_queue(sc, q);
1245 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1246 return (error);
1247 }
1248
1249 STATIC int
mvneta_ring_alloc_tx_queue(struct mvneta_softc * sc,int q)1250 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1251 {
1252 struct mvneta_tx_ring *tx;
1253 int error;
1254
1255 if (q >= MVNETA_TX_QNUM_MAX)
1256 return (EINVAL);
1257 tx = MVNETA_TX_RING(sc, q);
1258 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1259 error = bus_dmamem_alloc(sc->tx_dtag,
1260 (void**)&(tx->desc),
1261 BUS_DMA_NOWAIT | BUS_DMA_ZERO,
1262 &tx->desc_map);
1263 if (error != 0 || tx->desc == NULL)
1264 goto fail;
1265 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1266 tx->desc,
1267 sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT,
1268 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1269 if (error != 0)
1270 goto fail;
1271
1272 #ifdef MVNETA_MULTIQUEUE
1273 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1274 &tx->ring_mtx);
1275 if (tx->br == NULL) {
1276 device_printf(sc->dev,
1277 "Could not setup buffer ring for TxQ(%d)\n", q);
1278 error = ENOMEM;
1279 goto fail;
1280 }
1281 #endif
1282
1283 return (0);
1284 fail:
1285 mvneta_tx_lockq(sc, q);
1286 mvneta_ring_flush_tx_queue(sc, q);
1287 mvneta_tx_unlockq(sc, q);
1288 mvneta_ring_dealloc_tx_queue(sc, q);
1289 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1290 return (error);
1291 }
1292
1293 STATIC void
mvneta_ring_dealloc_tx_queue(struct mvneta_softc * sc,int q)1294 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1295 {
1296 struct mvneta_tx_ring *tx;
1297 struct mvneta_buf *txbuf;
1298 void *kva;
1299 int error;
1300 int i;
1301
1302 if (q >= MVNETA_TX_QNUM_MAX)
1303 return;
1304 tx = MVNETA_TX_RING(sc, q);
1305
1306 if (tx->taskq != NULL) {
1307 /* Remove task */
1308 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1309 taskqueue_drain(tx->taskq, &tx->task);
1310 }
1311 #ifdef MVNETA_MULTIQUEUE
1312 if (tx->br != NULL)
1313 drbr_free(tx->br, M_DEVBUF);
1314 #endif
1315
1316 if (sc->txmbuf_dtag != NULL) {
1317 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1318 txbuf = &tx->txbuf[i];
1319 if (txbuf->dmap != NULL) {
1320 error = bus_dmamap_destroy(sc->txmbuf_dtag,
1321 txbuf->dmap);
1322 if (error != 0) {
1323 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1324 __func__, q, i);
1325 }
1326 }
1327 }
1328 }
1329
1330 if (tx->desc_pa != 0)
1331 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1332
1333 kva = (void *)tx->desc;
1334 if (kva != NULL)
1335 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1336
1337 if (mtx_name(&tx->ring_mtx) != NULL)
1338 mtx_destroy(&tx->ring_mtx);
1339
1340 memset(tx, 0, sizeof(*tx));
1341 }
1342
1343 STATIC void
mvneta_ring_dealloc_rx_queue(struct mvneta_softc * sc,int q)1344 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1345 {
1346 struct mvneta_rx_ring *rx;
1347 struct lro_ctrl *lro;
1348 void *kva;
1349
1350 if (q >= MVNETA_RX_QNUM_MAX)
1351 return;
1352
1353 rx = MVNETA_RX_RING(sc, q);
1354
1355 if (rx->desc_pa != 0)
1356 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1357
1358 kva = (void *)rx->desc;
1359 if (kva != NULL)
1360 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1361
1362 lro = &rx->lro;
1363 tcp_lro_free(lro);
1364
1365 if (mtx_name(&rx->ring_mtx) != NULL)
1366 mtx_destroy(&rx->ring_mtx);
1367
1368 memset(rx, 0, sizeof(*rx));
1369 }
1370
1371 STATIC int
mvneta_ring_init_rx_queue(struct mvneta_softc * sc,int q)1372 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1373 {
1374 struct mvneta_rx_ring *rx;
1375 struct lro_ctrl *lro;
1376 int error;
1377
1378 if (q >= MVNETA_RX_QNUM_MAX)
1379 return (0);
1380
1381 rx = MVNETA_RX_RING(sc, q);
1382 rx->dma = rx->cpu = 0;
1383 rx->queue_th_received = MVNETA_RXTH_COUNT;
1384 rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */
1385
1386 /* Initialize LRO */
1387 rx->lro_enabled = FALSE;
1388 if ((if_getcapenable(sc->ifp) & IFCAP_LRO) != 0) {
1389 lro = &rx->lro;
1390 error = tcp_lro_init(lro);
1391 if (error != 0)
1392 device_printf(sc->dev, "LRO Initialization failed!\n");
1393 else {
1394 rx->lro_enabled = TRUE;
1395 lro->ifp = sc->ifp;
1396 }
1397 }
1398
1399 return (0);
1400 }
1401
1402 STATIC int
mvneta_ring_init_tx_queue(struct mvneta_softc * sc,int q)1403 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1404 {
1405 struct mvneta_tx_ring *tx;
1406 struct mvneta_buf *txbuf;
1407 int i, error;
1408
1409 if (q >= MVNETA_TX_QNUM_MAX)
1410 return (0);
1411
1412 tx = MVNETA_TX_RING(sc, q);
1413
1414 /* Tx handle */
1415 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1416 txbuf = &tx->txbuf[i];
1417 txbuf->m = NULL;
1418 /* Tx handle needs DMA map for busdma_load_mbuf() */
1419 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1420 &txbuf->dmap);
1421 if (error != 0) {
1422 device_printf(sc->dev,
1423 "can't create dma map (tx ring %d)\n", i);
1424 return (error);
1425 }
1426 }
1427 tx->dma = tx->cpu = 0;
1428 tx->used = 0;
1429 tx->drv_error = 0;
1430 tx->queue_status = MVNETA_QUEUE_DISABLED;
1431 tx->queue_hung = FALSE;
1432
1433 tx->ifp = sc->ifp;
1434 tx->qidx = q;
1435 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1436 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1437 taskqueue_thread_enqueue, &tx->taskq);
1438 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1439 device_get_nameunit(sc->dev), q);
1440
1441 return (0);
1442 }
1443
1444 STATIC void
mvneta_ring_flush_tx_queue(struct mvneta_softc * sc,int q)1445 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1446 {
1447 struct mvneta_tx_ring *tx;
1448 struct mvneta_buf *txbuf;
1449 int i;
1450
1451 tx = MVNETA_TX_RING(sc, q);
1452 KASSERT_TX_MTX(sc, q);
1453
1454 /* Tx handle */
1455 for (i = 0; i < MVNETA_TX_RING_CNT; i++) {
1456 txbuf = &tx->txbuf[i];
1457 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1458 if (txbuf->m != NULL) {
1459 m_freem(txbuf->m);
1460 txbuf->m = NULL;
1461 }
1462 }
1463 tx->dma = tx->cpu = 0;
1464 tx->used = 0;
1465 }
1466
1467 STATIC void
mvneta_ring_flush_rx_queue(struct mvneta_softc * sc,int q)1468 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1469 {
1470 struct mvneta_rx_ring *rx;
1471 struct mvneta_buf *rxbuf;
1472 int i;
1473
1474 rx = MVNETA_RX_RING(sc, q);
1475 KASSERT_RX_MTX(sc, q);
1476
1477 /* Rx handle */
1478 for (i = 0; i < MVNETA_RX_RING_CNT; i++) {
1479 rxbuf = &rx->rxbuf[i];
1480 mvneta_rx_buf_free(sc, rxbuf);
1481 }
1482 rx->dma = rx->cpu = 0;
1483 }
1484
1485 /*
1486 * Rx/Tx Queue Control
1487 */
1488 STATIC int
mvneta_rx_queue_init(if_t ifp,int q)1489 mvneta_rx_queue_init(if_t ifp, int q)
1490 {
1491 struct mvneta_softc *sc;
1492 struct mvneta_rx_ring *rx;
1493 uint32_t reg;
1494
1495 sc = if_getsoftc(ifp);
1496 KASSERT_RX_MTX(sc, q);
1497 rx = MVNETA_RX_RING(sc, q);
1498 DASSERT(rx->desc_pa != 0);
1499
1500 /* descriptor address */
1501 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1502
1503 /* Rx buffer size and descriptor ring size */
1504 reg = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3);
1505 reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT);
1506 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1507 #ifdef MVNETA_KTR
1508 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", if_name(ifp), q,
1509 MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1510 #endif
1511 /* Rx packet offset address */
1512 reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3);
1513 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1514 #ifdef MVNETA_KTR
1515 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", if_name(ifp), q,
1516 MVNETA_READ(sc, MVNETA_PRXC(q)));
1517 #endif
1518
1519 /* if DMA is not working, register is not updated */
1520 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1521 return (0);
1522 }
1523
1524 STATIC int
mvneta_tx_queue_init(if_t ifp,int q)1525 mvneta_tx_queue_init(if_t ifp, int q)
1526 {
1527 struct mvneta_softc *sc;
1528 struct mvneta_tx_ring *tx;
1529 uint32_t reg;
1530
1531 sc = if_getsoftc(ifp);
1532 KASSERT_TX_MTX(sc, q);
1533 tx = MVNETA_TX_RING(sc, q);
1534 DASSERT(tx->desc_pa != 0);
1535
1536 /* descriptor address */
1537 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1538
1539 /* descriptor ring size */
1540 reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT);
1541 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1542
1543 /* if DMA is not working, register is not updated */
1544 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1545 return (0);
1546 }
1547
1548 STATIC int
mvneta_rx_queue_enable(if_t ifp,int q)1549 mvneta_rx_queue_enable(if_t ifp, int q)
1550 {
1551 struct mvneta_softc *sc;
1552 struct mvneta_rx_ring *rx;
1553 uint32_t reg;
1554
1555 sc = if_getsoftc(ifp);
1556 rx = MVNETA_RX_RING(sc, q);
1557 KASSERT_RX_MTX(sc, q);
1558
1559 /* Set Rx interrupt threshold */
1560 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1561 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1562
1563 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1564 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1565
1566 /* Unmask RXTX_TH Intr. */
1567 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1568 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1569 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1570
1571 /* Enable Rx queue */
1572 reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK;
1573 reg |= MVNETA_RQC_ENQ(q);
1574 MVNETA_WRITE(sc, MVNETA_RQC, reg);
1575
1576 rx->queue_status = MVNETA_QUEUE_WORKING;
1577 return (0);
1578 }
1579
1580 STATIC int
mvneta_tx_queue_enable(if_t ifp,int q)1581 mvneta_tx_queue_enable(if_t ifp, int q)
1582 {
1583 struct mvneta_softc *sc;
1584 struct mvneta_tx_ring *tx;
1585
1586 sc = if_getsoftc(ifp);
1587 tx = MVNETA_TX_RING(sc, q);
1588 KASSERT_TX_MTX(sc, q);
1589
1590 /* Enable Tx queue */
1591 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1592
1593 tx->queue_status = MVNETA_QUEUE_IDLE;
1594 tx->queue_hung = FALSE;
1595 return (0);
1596 }
1597
1598 STATIC __inline void
mvneta_rx_lockq(struct mvneta_softc * sc,int q)1599 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1600 {
1601
1602 DASSERT(q >= 0);
1603 DASSERT(q < MVNETA_RX_QNUM_MAX);
1604 mtx_lock(&sc->rx_ring[q].ring_mtx);
1605 }
1606
1607 STATIC __inline void
mvneta_rx_unlockq(struct mvneta_softc * sc,int q)1608 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1609 {
1610
1611 DASSERT(q >= 0);
1612 DASSERT(q < MVNETA_RX_QNUM_MAX);
1613 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1614 }
1615
1616 STATIC __inline int __unused
mvneta_tx_trylockq(struct mvneta_softc * sc,int q)1617 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1618 {
1619
1620 DASSERT(q >= 0);
1621 DASSERT(q < MVNETA_TX_QNUM_MAX);
1622 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1623 }
1624
1625 STATIC __inline void
mvneta_tx_lockq(struct mvneta_softc * sc,int q)1626 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1627 {
1628
1629 DASSERT(q >= 0);
1630 DASSERT(q < MVNETA_TX_QNUM_MAX);
1631 mtx_lock(&sc->tx_ring[q].ring_mtx);
1632 }
1633
1634 STATIC __inline void
mvneta_tx_unlockq(struct mvneta_softc * sc,int q)1635 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1636 {
1637
1638 DASSERT(q >= 0);
1639 DASSERT(q < MVNETA_TX_QNUM_MAX);
1640 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1641 }
1642
1643 /*
1644 * Interrupt Handlers
1645 */
1646 STATIC void
mvneta_disable_intr(struct mvneta_softc * sc)1647 mvneta_disable_intr(struct mvneta_softc *sc)
1648 {
1649
1650 MVNETA_WRITE(sc, MVNETA_EUIM, 0);
1651 MVNETA_WRITE(sc, MVNETA_EUIC, 0);
1652 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0);
1653 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0);
1654 MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0);
1655 MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0);
1656 MVNETA_WRITE(sc, MVNETA_PMIM, 0);
1657 MVNETA_WRITE(sc, MVNETA_PMIC, 0);
1658 MVNETA_WRITE(sc, MVNETA_PIE, 0);
1659 }
1660
1661 STATIC void
mvneta_enable_intr(struct mvneta_softc * sc)1662 mvneta_enable_intr(struct mvneta_softc *sc)
1663 {
1664 uint32_t reg;
1665
1666 /* Enable Summary Bit to check all interrupt cause. */
1667 reg = MVNETA_READ(sc, MVNETA_PRXTXTIM);
1668 reg |= MVNETA_PRXTXTI_PMISCICSUMMARY;
1669 MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg);
1670
1671 if (!sc->phy_attached || sc->use_inband_status) {
1672 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */
1673 MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG |
1674 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE);
1675 }
1676
1677 /* Enable All Queue Interrupt */
1678 reg = MVNETA_READ(sc, MVNETA_PIE);
1679 reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK;
1680 reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK;
1681 MVNETA_WRITE(sc, MVNETA_PIE, reg);
1682 }
1683
1684 STATIC void
mvneta_rxtxth_intr(void * arg)1685 mvneta_rxtxth_intr(void *arg)
1686 {
1687 struct mvneta_softc *sc;
1688 if_t ifp;
1689 uint32_t ic, queues;
1690
1691 sc = arg;
1692 ifp = sc->ifp;
1693 #ifdef MVNETA_KTR
1694 CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", if_name(ifp));
1695 #endif
1696 ic = MVNETA_READ(sc, MVNETA_PRXTXTIC);
1697 if (ic == 0)
1698 return;
1699 MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic);
1700
1701 /* Ack maintenance interrupt first */
1702 if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) &&
1703 (!sc->phy_attached || sc->use_inband_status))) {
1704 mvneta_sc_lock(sc);
1705 mvneta_misc_intr(sc);
1706 mvneta_sc_unlock(sc);
1707 }
1708 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)))
1709 return;
1710 /* RxTxTH interrupt */
1711 queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic);
1712 if (__predict_true(queues)) {
1713 #ifdef MVNETA_KTR
1714 CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", if_name(ifp));
1715 #endif
1716 /* At the moment the driver support only one RX queue. */
1717 DASSERT(MVNETA_IS_QUEUE_SET(queues, 0));
1718 mvneta_rx(sc, 0, 0);
1719 }
1720 }
1721
1722 STATIC int
mvneta_misc_intr(struct mvneta_softc * sc)1723 mvneta_misc_intr(struct mvneta_softc *sc)
1724 {
1725 uint32_t ic;
1726 int claimed = 0;
1727
1728 #ifdef MVNETA_KTR
1729 CTR1(KTR_SPARE2, "%s got MISC_INTR", if_name(sc->ifp));
1730 #endif
1731 KASSERT_SC_MTX(sc);
1732
1733 for (;;) {
1734 ic = MVNETA_READ(sc, MVNETA_PMIC);
1735 ic &= MVNETA_READ(sc, MVNETA_PMIM);
1736 if (ic == 0)
1737 break;
1738 MVNETA_WRITE(sc, MVNETA_PMIC, ~ic);
1739 claimed = 1;
1740
1741 if (ic & (MVNETA_PMI_PHYSTATUSCHNG |
1742 MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE))
1743 mvneta_link_isr(sc);
1744 }
1745 return (claimed);
1746 }
1747
1748 STATIC void
mvneta_tick(void * arg)1749 mvneta_tick(void *arg)
1750 {
1751 struct mvneta_softc *sc;
1752 struct mvneta_tx_ring *tx;
1753 struct mvneta_rx_ring *rx;
1754 int q;
1755 uint32_t fc_prev, fc_curr;
1756
1757 sc = arg;
1758
1759 /*
1760 * This is done before mib update to get the right stats
1761 * for this tick.
1762 */
1763 mvneta_tx_drain(sc);
1764
1765 /* Extract previous flow-control frame received counter. */
1766 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1767 /* Read mib registers (clear by read). */
1768 mvneta_update_mib(sc);
1769 /* Extract current flow-control frame received counter. */
1770 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1771
1772
1773 if (sc->phy_attached && if_getflags(sc->ifp) & IFF_UP) {
1774 mvneta_sc_lock(sc);
1775 mii_tick(sc->mii);
1776
1777 /* Adjust MAC settings */
1778 mvneta_adjust_link(sc);
1779 mvneta_sc_unlock(sc);
1780 }
1781
1782 /*
1783 * We were unable to refill the rx queue and left the rx func, leaving
1784 * the ring without mbuf and no way to call the refill func.
1785 */
1786 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1787 rx = MVNETA_RX_RING(sc, q);
1788 if (rx->needs_refill == TRUE) {
1789 mvneta_rx_lockq(sc, q);
1790 mvneta_rx_queue_refill(sc, q);
1791 mvneta_rx_unlockq(sc, q);
1792 }
1793 }
1794
1795 /*
1796 * Watchdog:
1797 * - check if queue is mark as hung.
1798 * - ignore hung status if we received some pause frame
1799 * as hardware may have paused packet transmit.
1800 */
1801 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1802 /*
1803 * We should take queue lock, but as we only read
1804 * queue status we can do it without lock, we may
1805 * only missdetect queue status for one tick.
1806 */
1807 tx = MVNETA_TX_RING(sc, q);
1808
1809 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1810 goto timeout;
1811 }
1812
1813 callout_schedule(&sc->tick_ch, hz);
1814 return;
1815
1816 timeout:
1817 if_printf(sc->ifp, "watchdog timeout\n");
1818
1819 mvneta_sc_lock(sc);
1820 sc->counter_watchdog++;
1821 sc->counter_watchdog_mib++;
1822 /* Trigger reinitialize sequence. */
1823 mvneta_stop_locked(sc);
1824 mvneta_init_locked(sc);
1825 mvneta_sc_unlock(sc);
1826 }
1827
1828 STATIC void
mvneta_qflush(if_t ifp)1829 mvneta_qflush(if_t ifp)
1830 {
1831 #ifdef MVNETA_MULTIQUEUE
1832 struct mvneta_softc *sc;
1833 struct mvneta_tx_ring *tx;
1834 struct mbuf *m;
1835 size_t q;
1836
1837 sc = if_getsoftc(ifp);
1838
1839 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1840 tx = MVNETA_TX_RING(sc, q);
1841 mvneta_tx_lockq(sc, q);
1842 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1843 m_freem(m);
1844 mvneta_tx_unlockq(sc, q);
1845 }
1846 #endif
1847 if_qflush(ifp);
1848 }
1849
1850 STATIC void
mvneta_tx_task(void * arg,int pending)1851 mvneta_tx_task(void *arg, int pending)
1852 {
1853 struct mvneta_softc *sc;
1854 struct mvneta_tx_ring *tx;
1855 if_t ifp;
1856 int error;
1857
1858 tx = arg;
1859 ifp = tx->ifp;
1860 sc = if_getsoftc(ifp);
1861
1862 mvneta_tx_lockq(sc, tx->qidx);
1863 error = mvneta_xmit_locked(sc, tx->qidx);
1864 mvneta_tx_unlockq(sc, tx->qidx);
1865
1866 /* Try again */
1867 if (__predict_false(error != 0 && error != ENETDOWN)) {
1868 pause("mvneta_tx_task_sleep", 1);
1869 taskqueue_enqueue(tx->taskq, &tx->task);
1870 }
1871 }
1872
1873 STATIC int
mvneta_xmitfast_locked(struct mvneta_softc * sc,int q,struct mbuf ** m)1874 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1875 {
1876 struct mvneta_tx_ring *tx;
1877 if_t ifp;
1878 int error;
1879
1880 KASSERT_TX_MTX(sc, q);
1881 tx = MVNETA_TX_RING(sc, q);
1882 error = 0;
1883
1884 ifp = sc->ifp;
1885
1886 /* Dont enqueue packet if the queue is disabled. */
1887 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1888 m_freem(*m);
1889 *m = NULL;
1890 return (ENETDOWN);
1891 }
1892
1893 /* Reclaim mbuf if above threshold. */
1894 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1895 mvneta_tx_queue_complete(sc, q);
1896
1897 /* Do not call transmit path if queue is already too full. */
1898 if (__predict_false(tx->used >
1899 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1900 return (ENOBUFS);
1901
1902 error = mvneta_tx_queue(sc, m, q);
1903 if (__predict_false(error != 0))
1904 return (error);
1905
1906 /* Send a copy of the frame to the BPF listener */
1907 ETHER_BPF_MTAP(ifp, *m);
1908
1909 /* Set watchdog on */
1910 tx->watchdog_time = ticks;
1911 tx->queue_status = MVNETA_QUEUE_WORKING;
1912
1913 return (error);
1914 }
1915
1916 #ifdef MVNETA_MULTIQUEUE
1917 STATIC int
mvneta_transmit(if_t ifp,struct mbuf * m)1918 mvneta_transmit(if_t ifp, struct mbuf *m)
1919 {
1920 struct mvneta_softc *sc;
1921 struct mvneta_tx_ring *tx;
1922 int error;
1923 int q;
1924
1925 sc = if_getsoftc(ifp);
1926
1927 /* Use default queue if there is no flow id as thread can migrate. */
1928 if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE))
1929 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1930 else
1931 q = 0;
1932
1933 tx = MVNETA_TX_RING(sc, q);
1934
1935 /* If buf_ring is full start transmit immediately. */
1936 if (buf_ring_full(tx->br)) {
1937 mvneta_tx_lockq(sc, q);
1938 mvneta_xmit_locked(sc, q);
1939 mvneta_tx_unlockq(sc, q);
1940 }
1941
1942 /*
1943 * If the buf_ring is empty we will not reorder packets.
1944 * If the lock is available transmit without using buf_ring.
1945 */
1946 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1947 error = mvneta_xmitfast_locked(sc, q, &m);
1948 mvneta_tx_unlockq(sc, q);
1949 if (__predict_true(error == 0))
1950 return (0);
1951
1952 /* Transmit can fail in fastpath. */
1953 if (__predict_false(m == NULL))
1954 return (error);
1955 }
1956
1957 /* Enqueue then schedule taskqueue. */
1958 error = drbr_enqueue(ifp, tx->br, m);
1959 if (__predict_false(error != 0))
1960 return (error);
1961
1962 taskqueue_enqueue(tx->taskq, &tx->task);
1963 return (0);
1964 }
1965
1966 STATIC int
mvneta_xmit_locked(struct mvneta_softc * sc,int q)1967 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1968 {
1969 if_t ifp;
1970 struct mvneta_tx_ring *tx;
1971 struct mbuf *m;
1972 int error;
1973
1974 KASSERT_TX_MTX(sc, q);
1975 ifp = sc->ifp;
1976 tx = MVNETA_TX_RING(sc, q);
1977 error = 0;
1978
1979 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
1980 error = mvneta_xmitfast_locked(sc, q, &m);
1981 if (__predict_false(error != 0)) {
1982 if (m != NULL)
1983 drbr_putback(ifp, tx->br, m);
1984 else
1985 drbr_advance(ifp, tx->br);
1986 break;
1987 }
1988 drbr_advance(ifp, tx->br);
1989 }
1990
1991 return (error);
1992 }
1993 #else /* !MVNETA_MULTIQUEUE */
1994 STATIC void
mvneta_start(if_t ifp)1995 mvneta_start(if_t ifp)
1996 {
1997 struct mvneta_softc *sc;
1998 struct mvneta_tx_ring *tx;
1999 int error;
2000
2001 sc = if_getsoftc(ifp);
2002 tx = MVNETA_TX_RING(sc, 0);
2003
2004 mvneta_tx_lockq(sc, 0);
2005 error = mvneta_xmit_locked(sc, 0);
2006 mvneta_tx_unlockq(sc, 0);
2007 /* Handle retransmit in the background taskq. */
2008 if (__predict_false(error != 0 && error != ENETDOWN))
2009 taskqueue_enqueue(tx->taskq, &tx->task);
2010 }
2011
2012 STATIC int
mvneta_xmit_locked(struct mvneta_softc * sc,int q)2013 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
2014 {
2015 if_t ifp;
2016 struct mbuf *m;
2017 int error;
2018
2019 KASSERT_TX_MTX(sc, q);
2020 ifp = sc->ifp;
2021 error = 0;
2022
2023 while (!if_sendq_empty(ifp)) {
2024 m = if_dequeue(ifp);
2025 if (m == NULL)
2026 break;
2027
2028 error = mvneta_xmitfast_locked(sc, q, &m);
2029 if (__predict_false(error != 0)) {
2030 if (m != NULL)
2031 if_sendq_prepend(ifp, m);
2032 break;
2033 }
2034 }
2035
2036 return (error);
2037 }
2038 #endif
2039
2040 STATIC int
mvneta_ioctl(if_t ifp,u_long cmd,caddr_t data)2041 mvneta_ioctl(if_t ifp, u_long cmd, caddr_t data)
2042 {
2043 struct mvneta_softc *sc;
2044 struct mvneta_rx_ring *rx;
2045 struct ifreq *ifr;
2046 int error, mask;
2047 uint32_t flags;
2048 bool reinit;
2049 int q;
2050
2051 error = 0;
2052 reinit = false;
2053 sc = if_getsoftc(ifp);
2054 ifr = (struct ifreq *)data;
2055 switch (cmd) {
2056 case SIOCSIFFLAGS:
2057 mvneta_sc_lock(sc);
2058 if (if_getflags(ifp) & IFF_UP) {
2059 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2060 flags = if_getflags(ifp) ^ sc->mvneta_if_flags;
2061
2062 if (flags != 0)
2063 sc->mvneta_if_flags = if_getflags(ifp);
2064
2065 if ((flags & IFF_PROMISC) != 0)
2066 mvneta_filter_setup(sc);
2067 } else {
2068 mvneta_init_locked(sc);
2069 sc->mvneta_if_flags = if_getflags(ifp);
2070 if (sc->phy_attached)
2071 mii_mediachg(sc->mii);
2072 mvneta_sc_unlock(sc);
2073 break;
2074 }
2075 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2076 mvneta_stop_locked(sc);
2077
2078 sc->mvneta_if_flags = if_getflags(ifp);
2079 mvneta_sc_unlock(sc);
2080 break;
2081 case SIOCSIFCAP:
2082 if (if_getmtu(ifp) > sc->tx_csum_limit &&
2083 ifr->ifr_reqcap & IFCAP_TXCSUM)
2084 ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2085 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2086 if (mask & IFCAP_HWCSUM) {
2087 if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap,
2088 IFCAP_HWCSUM);
2089 if (if_getcapenable(ifp) & IFCAP_TXCSUM)
2090 if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
2091 CSUM_UDP);
2092 else
2093 if_sethwassist(ifp, 0);
2094 }
2095 if (mask & IFCAP_LRO) {
2096 mvneta_sc_lock(sc);
2097 if_togglecapenable(ifp, IFCAP_LRO);
2098 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2099 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2100 rx = MVNETA_RX_RING(sc, q);
2101 rx->lro_enabled = !rx->lro_enabled;
2102 }
2103 }
2104 mvneta_sc_unlock(sc);
2105 }
2106 VLAN_CAPABILITIES(ifp);
2107 break;
2108 case SIOCSIFMEDIA:
2109 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2110 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2111 (ifr->ifr_media & IFM_FDX) == 0) {
2112 device_printf(sc->dev,
2113 "%s half-duplex unsupported\n",
2114 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2115 "1000Base-T" :
2116 "2500Base-T");
2117 error = EINVAL;
2118 break;
2119 }
2120 case SIOCGIFMEDIA: /* FALLTHROUGH */
2121 case SIOCGIFXMEDIA:
2122 if (!sc->phy_attached)
2123 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2124 cmd);
2125 else
2126 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2127 cmd);
2128 break;
2129 case SIOCSIFMTU:
2130 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2131 MVNETA_ETHER_SIZE) {
2132 error = EINVAL;
2133 } else {
2134 if_setmtu(ifp, ifr->ifr_mtu);
2135 mvneta_sc_lock(sc);
2136 if (if_getmtu(ifp) + MVNETA_ETHER_SIZE <= MCLBYTES) {
2137 sc->rx_frame_size = MCLBYTES;
2138 } else {
2139 sc->rx_frame_size = MJUM9BYTES;
2140 }
2141 if (if_getmtu(ifp) > sc->tx_csum_limit) {
2142 if_setcapenablebit(ifp, 0, IFCAP_TXCSUM);
2143 if_sethwassist(ifp, 0);
2144 } else {
2145 if_setcapenablebit(ifp, IFCAP_TXCSUM, 0);
2146 if_sethwassist(ifp, CSUM_IP | CSUM_TCP |
2147 CSUM_UDP);
2148 }
2149 /*
2150 * Reinitialize RX queues.
2151 * We need to update RX descriptor size.
2152 */
2153 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
2154 reinit = true;
2155 mvneta_stop_locked(sc);
2156 }
2157
2158 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2159 mvneta_rx_lockq(sc, q);
2160 if (mvneta_rx_queue_init(ifp, q) != 0) {
2161 device_printf(sc->dev,
2162 "initialization failed:"
2163 " cannot initialize queue\n");
2164 mvneta_rx_unlockq(sc, q);
2165 error = ENOBUFS;
2166 break;
2167 }
2168 mvneta_rx_unlockq(sc, q);
2169 }
2170 if (reinit)
2171 mvneta_init_locked(sc);
2172
2173 mvneta_sc_unlock(sc);
2174 }
2175 break;
2176
2177 default:
2178 error = ether_ioctl(ifp, cmd, data);
2179 break;
2180 }
2181
2182 return (error);
2183 }
2184
2185 STATIC void
mvneta_init_locked(void * arg)2186 mvneta_init_locked(void *arg)
2187 {
2188 struct mvneta_softc *sc;
2189 if_t ifp;
2190 uint32_t reg;
2191 int q, cpu;
2192
2193 sc = arg;
2194 ifp = sc->ifp;
2195
2196 if (!device_is_attached(sc->dev) ||
2197 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2198 return;
2199
2200 mvneta_disable_intr(sc);
2201 callout_stop(&sc->tick_ch);
2202
2203 /* Get the latest mac address */
2204 bcopy(if_getlladdr(ifp), sc->enaddr, ETHER_ADDR_LEN);
2205 mvneta_set_mac_address(sc, sc->enaddr);
2206 mvneta_filter_setup(sc);
2207
2208 /* Start DMA Engine */
2209 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000);
2210 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000);
2211 MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM);
2212
2213 /* Enable port */
2214 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2215 reg |= MVNETA_PMACC0_PORTEN;
2216 reg &= ~MVNETA_PMACC0_FRAMESIZELIMIT_MASK;
2217 reg |= MVNETA_PMACC0_FRAMESIZELIMIT(if_getmtu(ifp) + MVNETA_ETHER_SIZE);
2218 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2219
2220 /* Allow access to each TXQ/RXQ from both CPU's */
2221 for (cpu = 0; cpu < mp_ncpus; ++cpu)
2222 MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu),
2223 MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK);
2224
2225 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2226 mvneta_rx_lockq(sc, q);
2227 mvneta_rx_queue_refill(sc, q);
2228 mvneta_rx_unlockq(sc, q);
2229 }
2230
2231 if (!sc->phy_attached)
2232 mvneta_linkup(sc);
2233
2234 /* Enable interrupt */
2235 mvneta_enable_intr(sc);
2236
2237 /* Set Counter */
2238 callout_schedule(&sc->tick_ch, hz);
2239
2240 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2241 }
2242
2243 STATIC void
mvneta_init(void * arg)2244 mvneta_init(void *arg)
2245 {
2246 struct mvneta_softc *sc;
2247
2248 sc = arg;
2249 mvneta_sc_lock(sc);
2250 mvneta_init_locked(sc);
2251 if (sc->phy_attached)
2252 mii_mediachg(sc->mii);
2253 mvneta_sc_unlock(sc);
2254 }
2255
2256 /* ARGSUSED */
2257 STATIC void
mvneta_stop_locked(struct mvneta_softc * sc)2258 mvneta_stop_locked(struct mvneta_softc *sc)
2259 {
2260 if_t ifp;
2261 uint32_t reg;
2262 int q;
2263
2264 ifp = sc->ifp;
2265 if (ifp == NULL || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
2266 return;
2267
2268 mvneta_disable_intr(sc);
2269
2270 callout_stop(&sc->tick_ch);
2271
2272 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2273
2274 /* Link down */
2275 if (sc->linkup == TRUE)
2276 mvneta_linkdown(sc);
2277
2278 /* Reset the MAC Port Enable bit */
2279 reg = MVNETA_READ(sc, MVNETA_PMACC0);
2280 reg &= ~MVNETA_PMACC0_PORTEN;
2281 MVNETA_WRITE(sc, MVNETA_PMACC0, reg);
2282
2283 /* Disable each of queue */
2284 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2285 mvneta_rx_lockq(sc, q);
2286 mvneta_ring_flush_rx_queue(sc, q);
2287 mvneta_rx_unlockq(sc, q);
2288 }
2289
2290 /*
2291 * Hold Reset state of DMA Engine
2292 * (must write 0x0 to restart it)
2293 */
2294 MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001);
2295 MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001);
2296
2297 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2298 mvneta_tx_lockq(sc, q);
2299 mvneta_ring_flush_tx_queue(sc, q);
2300 mvneta_tx_unlockq(sc, q);
2301 }
2302 }
2303
2304 STATIC void
mvneta_stop(struct mvneta_softc * sc)2305 mvneta_stop(struct mvneta_softc *sc)
2306 {
2307
2308 mvneta_sc_lock(sc);
2309 mvneta_stop_locked(sc);
2310 mvneta_sc_unlock(sc);
2311 }
2312
2313 STATIC int
mvneta_mediachange(if_t ifp)2314 mvneta_mediachange(if_t ifp)
2315 {
2316 struct mvneta_softc *sc;
2317
2318 sc = if_getsoftc(ifp);
2319
2320 if (!sc->phy_attached && !sc->use_inband_status) {
2321 /* We shouldn't be here */
2322 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2323 return (0);
2324 }
2325
2326 if (sc->use_inband_status) {
2327 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2328 return (0);
2329 }
2330
2331 mvneta_sc_lock(sc);
2332
2333 /* Update PHY */
2334 mii_mediachg(sc->mii);
2335
2336 mvneta_sc_unlock(sc);
2337
2338 return (0);
2339 }
2340
2341 STATIC void
mvneta_get_media(struct mvneta_softc * sc,struct ifmediareq * ifmr)2342 mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr)
2343 {
2344 uint32_t psr;
2345
2346 psr = MVNETA_READ(sc, MVNETA_PSR);
2347
2348 /* Speed */
2349 if (psr & MVNETA_PSR_GMIISPEED)
2350 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2351 else if (psr & MVNETA_PSR_MIISPEED)
2352 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2353 else if (psr & MVNETA_PSR_LINKUP)
2354 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2355
2356 /* Duplex */
2357 if (psr & MVNETA_PSR_FULLDX)
2358 ifmr->ifm_active |= IFM_FDX;
2359
2360 /* Link */
2361 ifmr->ifm_status = IFM_AVALID;
2362 if (psr & MVNETA_PSR_LINKUP)
2363 ifmr->ifm_status |= IFM_ACTIVE;
2364 }
2365
2366 STATIC void
mvneta_mediastatus(if_t ifp,struct ifmediareq * ifmr)2367 mvneta_mediastatus(if_t ifp, struct ifmediareq *ifmr)
2368 {
2369 struct mvneta_softc *sc;
2370 struct mii_data *mii;
2371
2372 sc = if_getsoftc(ifp);
2373
2374 if (!sc->phy_attached && !sc->use_inband_status) {
2375 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2376 return;
2377 }
2378
2379 mvneta_sc_lock(sc);
2380
2381 if (sc->use_inband_status) {
2382 mvneta_get_media(sc, ifmr);
2383 mvneta_sc_unlock(sc);
2384 return;
2385 }
2386
2387 mii = sc->mii;
2388 mii_pollstat(mii);
2389
2390 ifmr->ifm_active = mii->mii_media_active;
2391 ifmr->ifm_status = mii->mii_media_status;
2392
2393 mvneta_sc_unlock(sc);
2394 }
2395
2396 /*
2397 * Link State Notify
2398 */
2399 STATIC void
mvneta_update_autoneg(struct mvneta_softc * sc,int enable)2400 mvneta_update_autoneg(struct mvneta_softc *sc, int enable)
2401 {
2402 int reg;
2403
2404 if (enable) {
2405 reg = MVNETA_READ(sc, MVNETA_PANC);
2406 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2407 MVNETA_PANC_ANFCEN);
2408 reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2409 MVNETA_PANC_INBANDANEN;
2410 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2411
2412 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2413 reg |= MVNETA_PMACC2_INBANDANMODE;
2414 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2415
2416 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2417 reg |= MVNETA_PSOMSCD_ENABLE;
2418 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2419 } else {
2420 reg = MVNETA_READ(sc, MVNETA_PANC);
2421 reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS |
2422 MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN |
2423 MVNETA_PANC_INBANDANEN);
2424 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2425
2426 reg = MVNETA_READ(sc, MVNETA_PMACC2);
2427 reg &= ~MVNETA_PMACC2_INBANDANMODE;
2428 MVNETA_WRITE(sc, MVNETA_PMACC2, reg);
2429
2430 reg = MVNETA_READ(sc, MVNETA_PSOMSCD);
2431 reg &= ~MVNETA_PSOMSCD_ENABLE;
2432 MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg);
2433 }
2434 }
2435
2436 STATIC int
mvneta_update_media(struct mvneta_softc * sc,int media)2437 mvneta_update_media(struct mvneta_softc *sc, int media)
2438 {
2439 int reg, err;
2440 boolean_t running;
2441
2442 err = 0;
2443
2444 mvneta_sc_lock(sc);
2445
2446 mvneta_linkreset(sc);
2447
2448 running = (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0;
2449 if (running)
2450 mvneta_stop_locked(sc);
2451
2452 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2453
2454 if (!sc->phy_attached || sc->use_inband_status)
2455 mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO);
2456
2457 mvneta_update_eee(sc);
2458 mvneta_update_fc(sc);
2459
2460 if (IFM_SUBTYPE(media) != IFM_AUTO) {
2461 reg = MVNETA_READ(sc, MVNETA_PANC);
2462 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2463 MVNETA_PANC_SETMIISPEED |
2464 MVNETA_PANC_SETFULLDX);
2465 if (IFM_SUBTYPE(media) == IFM_1000_T ||
2466 IFM_SUBTYPE(media) == IFM_2500_T) {
2467 if ((media & IFM_FDX) == 0) {
2468 device_printf(sc->dev,
2469 "%s half-duplex unsupported\n",
2470 IFM_SUBTYPE(media) == IFM_1000_T ?
2471 "1000Base-T" :
2472 "2500Base-T");
2473 err = EINVAL;
2474 goto out;
2475 }
2476 reg |= MVNETA_PANC_SETGMIISPEED;
2477 } else if (IFM_SUBTYPE(media) == IFM_100_TX)
2478 reg |= MVNETA_PANC_SETMIISPEED;
2479
2480 if (media & IFM_FDX)
2481 reg |= MVNETA_PANC_SETFULLDX;
2482
2483 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2484 }
2485 out:
2486 if (running)
2487 mvneta_init_locked(sc);
2488 mvneta_sc_unlock(sc);
2489 return (err);
2490 }
2491
2492 STATIC void
mvneta_adjust_link(struct mvneta_softc * sc)2493 mvneta_adjust_link(struct mvneta_softc *sc)
2494 {
2495 boolean_t phy_linkup;
2496 int reg;
2497
2498 /* Update eee/fc */
2499 mvneta_update_eee(sc);
2500 mvneta_update_fc(sc);
2501
2502 /* Check for link change */
2503 phy_linkup = (sc->mii->mii_media_status &
2504 (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE);
2505
2506 if (sc->linkup != phy_linkup)
2507 mvneta_linkupdate(sc, phy_linkup);
2508
2509 /* Don't update media on disabled link */
2510 if (!phy_linkup)
2511 return;
2512
2513 /* Check for media type change */
2514 if (sc->mvneta_media != sc->mii->mii_media_active) {
2515 sc->mvneta_media = sc->mii->mii_media_active;
2516
2517 reg = MVNETA_READ(sc, MVNETA_PANC);
2518 reg &= ~(MVNETA_PANC_SETGMIISPEED |
2519 MVNETA_PANC_SETMIISPEED |
2520 MVNETA_PANC_SETFULLDX);
2521 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2522 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2523 reg |= MVNETA_PANC_SETGMIISPEED;
2524 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2525 reg |= MVNETA_PANC_SETMIISPEED;
2526
2527 if (sc->mvneta_media & IFM_FDX)
2528 reg |= MVNETA_PANC_SETFULLDX;
2529
2530 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2531 }
2532 }
2533
2534 STATIC void
mvneta_link_isr(struct mvneta_softc * sc)2535 mvneta_link_isr(struct mvneta_softc *sc)
2536 {
2537 int linkup;
2538
2539 KASSERT_SC_MTX(sc);
2540
2541 linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE;
2542 if (sc->linkup == linkup)
2543 return;
2544
2545 if (linkup == TRUE)
2546 mvneta_linkup(sc);
2547 else
2548 mvneta_linkdown(sc);
2549
2550 #ifdef DEBUG
2551 device_printf(sc->dev,
2552 "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
2553 #endif
2554 }
2555
2556 STATIC void
mvneta_linkupdate(struct mvneta_softc * sc,boolean_t linkup)2557 mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup)
2558 {
2559
2560 KASSERT_SC_MTX(sc);
2561
2562 if (linkup == TRUE)
2563 mvneta_linkup(sc);
2564 else
2565 mvneta_linkdown(sc);
2566
2567 #ifdef DEBUG
2568 device_printf(sc->dev,
2569 "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
2570 #endif
2571 }
2572
2573 STATIC void
mvneta_update_eee(struct mvneta_softc * sc)2574 mvneta_update_eee(struct mvneta_softc *sc)
2575 {
2576 uint32_t reg;
2577
2578 KASSERT_SC_MTX(sc);
2579
2580 /* set EEE parameters */
2581 reg = MVNETA_READ(sc, MVNETA_LPIC1);
2582 if (sc->cf_lpi)
2583 reg |= MVNETA_LPIC1_LPIRE;
2584 else
2585 reg &= ~MVNETA_LPIC1_LPIRE;
2586 MVNETA_WRITE(sc, MVNETA_LPIC1, reg);
2587 }
2588
2589 STATIC void
mvneta_update_fc(struct mvneta_softc * sc)2590 mvneta_update_fc(struct mvneta_softc *sc)
2591 {
2592 uint32_t reg;
2593
2594 KASSERT_SC_MTX(sc);
2595
2596 reg = MVNETA_READ(sc, MVNETA_PANC);
2597 if (sc->cf_fc) {
2598 /* Flow control negotiation */
2599 reg |= MVNETA_PANC_PAUSEADV;
2600 reg |= MVNETA_PANC_ANFCEN;
2601 } else {
2602 /* Disable flow control negotiation */
2603 reg &= ~MVNETA_PANC_PAUSEADV;
2604 reg &= ~MVNETA_PANC_ANFCEN;
2605 }
2606
2607 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2608 }
2609
2610 STATIC void
mvneta_linkup(struct mvneta_softc * sc)2611 mvneta_linkup(struct mvneta_softc *sc)
2612 {
2613 uint32_t reg;
2614
2615 KASSERT_SC_MTX(sc);
2616
2617 if (!sc->phy_attached || !sc->use_inband_status) {
2618 reg = MVNETA_READ(sc, MVNETA_PANC);
2619 reg |= MVNETA_PANC_FORCELINKPASS;
2620 reg &= ~MVNETA_PANC_FORCELINKFAIL;
2621 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2622 }
2623
2624 mvneta_qflush(sc->ifp);
2625 mvneta_portup(sc);
2626 sc->linkup = TRUE;
2627 if_link_state_change(sc->ifp, LINK_STATE_UP);
2628 }
2629
2630 STATIC void
mvneta_linkdown(struct mvneta_softc * sc)2631 mvneta_linkdown(struct mvneta_softc *sc)
2632 {
2633 uint32_t reg;
2634
2635 KASSERT_SC_MTX(sc);
2636
2637 if (!sc->phy_attached || !sc->use_inband_status) {
2638 reg = MVNETA_READ(sc, MVNETA_PANC);
2639 reg &= ~MVNETA_PANC_FORCELINKPASS;
2640 reg |= MVNETA_PANC_FORCELINKFAIL;
2641 MVNETA_WRITE(sc, MVNETA_PANC, reg);
2642 }
2643
2644 mvneta_portdown(sc);
2645 mvneta_qflush(sc->ifp);
2646 sc->linkup = FALSE;
2647 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2648 }
2649
2650 STATIC void
mvneta_linkreset(struct mvneta_softc * sc)2651 mvneta_linkreset(struct mvneta_softc *sc)
2652 {
2653 struct mii_softc *mii;
2654
2655 if (sc->phy_attached) {
2656 /* Force reset PHY */
2657 mii = LIST_FIRST(&sc->mii->mii_phys);
2658 if (mii)
2659 mii_phy_reset(mii);
2660 }
2661 }
2662
2663 /*
2664 * Tx Subroutines
2665 */
2666 STATIC int
mvneta_tx_queue(struct mvneta_softc * sc,struct mbuf ** mbufp,int q)2667 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2668 {
2669 if_t ifp;
2670 bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT];
2671 struct mbuf *mtmp, *mbuf;
2672 struct mvneta_tx_ring *tx;
2673 struct mvneta_buf *txbuf;
2674 struct mvneta_tx_desc *t;
2675 uint32_t ptxsu;
2676 int used, error, i, txnsegs;
2677
2678 mbuf = *mbufp;
2679 tx = MVNETA_TX_RING(sc, q);
2680 DASSERT(tx->used >= 0);
2681 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2682 t = NULL;
2683 ifp = sc->ifp;
2684
2685 if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2686 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2687 if (mbuf == NULL) {
2688 tx->drv_error++;
2689 *mbufp = NULL;
2690 return (ENOBUFS);
2691 }
2692 mbuf->m_flags &= ~M_VLANTAG;
2693 *mbufp = mbuf;
2694 }
2695
2696 if (__predict_false(mbuf->m_next != NULL &&
2697 (mbuf->m_pkthdr.csum_flags &
2698 (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) {
2699 if (M_WRITABLE(mbuf) == 0) {
2700 mtmp = m_dup(mbuf, M_NOWAIT);
2701 m_freem(mbuf);
2702 if (mtmp == NULL) {
2703 tx->drv_error++;
2704 *mbufp = NULL;
2705 return (ENOBUFS);
2706 }
2707 *mbufp = mbuf = mtmp;
2708 }
2709 }
2710
2711 /* load mbuf using dmamap of 1st descriptor */
2712 txbuf = &tx->txbuf[tx->cpu];
2713 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2714 txbuf->dmap, mbuf, txsegs, &txnsegs,
2715 BUS_DMA_NOWAIT);
2716 if (__predict_false(error != 0)) {
2717 #ifdef MVNETA_KTR
2718 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", if_name(ifp), q, error);
2719 #endif
2720 /* This is the only recoverable error (except EFBIG). */
2721 if (error != ENOMEM) {
2722 tx->drv_error++;
2723 m_freem(mbuf);
2724 *mbufp = NULL;
2725 return (ENOBUFS);
2726 }
2727 return (error);
2728 }
2729
2730 if (__predict_false(txnsegs <= 0
2731 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2732 /* we have no enough descriptors or mbuf is broken */
2733 #ifdef MVNETA_KTR
2734 CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d",
2735 if_name(ifp), q, txnsegs);
2736 #endif
2737 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2738 return (ENOBUFS);
2739 }
2740 DASSERT(txbuf->m == NULL);
2741
2742 /* remember mbuf using 1st descriptor */
2743 txbuf->m = mbuf;
2744 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2745 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2746
2747 /* load to tx descriptors */
2748 used = 0;
2749 for (i = 0; i < txnsegs; i++) {
2750 t = &tx->desc[tx->cpu];
2751 t->command = 0;
2752 t->l4ichk = 0;
2753 t->flags = 0;
2754 if (__predict_true(i == 0)) {
2755 /* 1st descriptor */
2756 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2757 t->command |= MVNETA_TX_CMD_F;
2758 mvneta_tx_set_csumflag(ifp, t, mbuf);
2759 }
2760 t->bufptr_pa = txsegs[i].ds_addr;
2761 t->bytecnt = txsegs[i].ds_len;
2762 tx->cpu = tx_counter_adv(tx->cpu, 1);
2763
2764 tx->used++;
2765 used++;
2766 }
2767 /* t is last descriptor here */
2768 DASSERT(t != NULL);
2769 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2770
2771 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2772 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2773
2774 while (__predict_false(used > 255)) {
2775 ptxsu = MVNETA_PTXSU_NOWD(255);
2776 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2777 used -= 255;
2778 }
2779 if (__predict_true(used > 0)) {
2780 ptxsu = MVNETA_PTXSU_NOWD(used);
2781 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2782 }
2783 return (0);
2784 }
2785
2786 STATIC void
mvneta_tx_set_csumflag(if_t ifp,struct mvneta_tx_desc * t,struct mbuf * m)2787 mvneta_tx_set_csumflag(if_t ifp,
2788 struct mvneta_tx_desc *t, struct mbuf *m)
2789 {
2790 struct ether_header *eh;
2791 struct ether_vlan_header *evh;
2792 int csum_flags;
2793 uint32_t iphl, ipoff;
2794 struct ip *ip;
2795
2796 iphl = ipoff = 0;
2797 csum_flags = if_gethwassist(ifp) & m->m_pkthdr.csum_flags;
2798 eh = mtod(m, struct ether_header *);
2799
2800 switch (ntohs(eh->ether_type)) {
2801 case ETHERTYPE_IP:
2802 ipoff = ETHER_HDR_LEN;
2803 break;
2804 case ETHERTYPE_VLAN:
2805 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2806 evh = mtod(m, struct ether_vlan_header *);
2807 if (ntohs(evh->evl_proto) == ETHERTYPE_VLAN)
2808 ipoff += ETHER_VLAN_ENCAP_LEN;
2809 break;
2810 default:
2811 csum_flags = 0;
2812 }
2813
2814 if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) {
2815 ip = (struct ip *)(m->m_data + ipoff);
2816 iphl = ip->ip_hl<<2;
2817 t->command |= MVNETA_TX_CMD_L3_IP4;
2818 } else {
2819 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2820 return;
2821 }
2822
2823
2824 /* L3 */
2825 if (csum_flags & CSUM_IP) {
2826 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2827 }
2828
2829 /* L4 */
2830 if (csum_flags & CSUM_IP_TCP) {
2831 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2832 t->command |= MVNETA_TX_CMD_L4_TCP;
2833 } else if (csum_flags & CSUM_IP_UDP) {
2834 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2835 t->command |= MVNETA_TX_CMD_L4_UDP;
2836 } else
2837 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2838
2839 t->l4ichk = 0;
2840 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2841 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2842 }
2843
2844 STATIC void
mvneta_tx_queue_complete(struct mvneta_softc * sc,int q)2845 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2846 {
2847 struct mvneta_tx_ring *tx;
2848 struct mvneta_buf *txbuf;
2849 struct mvneta_tx_desc *t __diagused;
2850 uint32_t ptxs, ptxsu, ndesc;
2851 int i;
2852
2853 KASSERT_TX_MTX(sc, q);
2854
2855 tx = MVNETA_TX_RING(sc, q);
2856 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2857 return;
2858
2859 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2860 ndesc = MVNETA_PTXS_GET_TBC(ptxs);
2861
2862 if (__predict_false(ndesc == 0)) {
2863 if (tx->used == 0)
2864 tx->queue_status = MVNETA_QUEUE_IDLE;
2865 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2866 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2867 tx->queue_hung = TRUE;
2868 return;
2869 }
2870
2871 #ifdef MVNETA_KTR
2872 CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u",
2873 if_name(sc->ifp), q, ndesc);
2874 #endif
2875
2876 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2877 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2878
2879 for (i = 0; i < ndesc; i++) {
2880 t = &tx->desc[tx->dma];
2881 #ifdef MVNETA_KTR
2882 if (t->flags & MVNETA_TX_F_ES)
2883 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2884 if_name(sc->ifp), q, tx->dma);
2885 #endif
2886 txbuf = &tx->txbuf[tx->dma];
2887 if (__predict_true(txbuf->m != NULL)) {
2888 DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2889 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2890 m_freem(txbuf->m);
2891 txbuf->m = NULL;
2892 }
2893 else
2894 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2895 tx->dma = tx_counter_adv(tx->dma, 1);
2896 tx->used--;
2897 }
2898 DASSERT(tx->used >= 0);
2899 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2900 while (__predict_false(ndesc > 255)) {
2901 ptxsu = MVNETA_PTXSU_NORB(255);
2902 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2903 ndesc -= 255;
2904 }
2905 if (__predict_true(ndesc > 0)) {
2906 ptxsu = MVNETA_PTXSU_NORB(ndesc);
2907 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2908 }
2909 #ifdef MVNETA_KTR
2910 CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d",
2911 if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used);
2912 #endif
2913
2914 tx->watchdog_time = ticks;
2915
2916 if (tx->used == 0)
2917 tx->queue_status = MVNETA_QUEUE_IDLE;
2918 }
2919
2920 /*
2921 * Do a final TX complete when TX is idle.
2922 */
2923 STATIC void
mvneta_tx_drain(struct mvneta_softc * sc)2924 mvneta_tx_drain(struct mvneta_softc *sc)
2925 {
2926 struct mvneta_tx_ring *tx;
2927 int q;
2928
2929 /*
2930 * Handle trailing mbuf on TX queue.
2931 * Check is done lockess to avoid TX path contention.
2932 */
2933 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2934 tx = MVNETA_TX_RING(sc, q);
2935 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2936 tx->used > 0) {
2937 mvneta_tx_lockq(sc, q);
2938 mvneta_tx_queue_complete(sc, q);
2939 mvneta_tx_unlockq(sc, q);
2940 }
2941 }
2942 }
2943
2944 /*
2945 * Rx Subroutines
2946 */
2947 STATIC int
mvneta_rx(struct mvneta_softc * sc,int q,int count)2948 mvneta_rx(struct mvneta_softc *sc, int q, int count)
2949 {
2950 uint32_t prxs, npkt;
2951 int more;
2952
2953 more = 0;
2954 mvneta_rx_lockq(sc, q);
2955 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2956 npkt = MVNETA_PRXS_GET_ODC(prxs);
2957 if (__predict_false(npkt == 0))
2958 goto out;
2959
2960 if (count > 0 && npkt > count) {
2961 more = 1;
2962 npkt = count;
2963 }
2964 mvneta_rx_queue(sc, q, npkt);
2965 out:
2966 mvneta_rx_unlockq(sc, q);
2967 return more;
2968 }
2969
2970 /*
2971 * Helper routine for updating PRXSU register of a given queue.
2972 * Handles number of processed descriptors bigger than maximum acceptable value.
2973 */
2974 STATIC __inline void
mvneta_prxsu_update(struct mvneta_softc * sc,int q,int processed)2975 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
2976 {
2977 uint32_t prxsu;
2978
2979 while (__predict_false(processed > 255)) {
2980 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255);
2981 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2982 processed -= 255;
2983 }
2984 prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed);
2985 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2986 }
2987
2988 static __inline void
mvneta_prefetch(void * p)2989 mvneta_prefetch(void *p)
2990 {
2991
2992 __builtin_prefetch(p);
2993 }
2994
2995 STATIC void
mvneta_rx_queue(struct mvneta_softc * sc,int q,int npkt)2996 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
2997 {
2998 if_t ifp;
2999 struct mvneta_rx_ring *rx;
3000 struct mvneta_rx_desc *r;
3001 struct mvneta_buf *rxbuf;
3002 struct mbuf *m;
3003 void *pktbuf;
3004 int i, pktlen, processed, ndma;
3005
3006 KASSERT_RX_MTX(sc, q);
3007
3008 ifp = sc->ifp;
3009 rx = MVNETA_RX_RING(sc, q);
3010 processed = 0;
3011
3012 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3013 return;
3014
3015 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
3016 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3017
3018 for (i = 0; i < npkt; i++) {
3019 /* Prefetch next desc, rxbuf. */
3020 ndma = rx_counter_adv(rx->dma, 1);
3021 mvneta_prefetch(&rx->desc[ndma]);
3022 mvneta_prefetch(&rx->rxbuf[ndma]);
3023
3024 /* get descriptor and packet */
3025 r = &rx->desc[rx->dma];
3026 rxbuf = &rx->rxbuf[rx->dma];
3027 m = rxbuf->m;
3028 rxbuf->m = NULL;
3029 DASSERT(m != NULL);
3030 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
3031 BUS_DMASYNC_POSTREAD);
3032 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3033 /* Prefetch mbuf header. */
3034 mvneta_prefetch(m);
3035
3036 processed++;
3037 /* Drop desc with error status or not in a single buffer. */
3038 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3039 (MVNETA_RX_F|MVNETA_RX_L));
3040 if (__predict_false((r->status & MVNETA_RX_ES) ||
3041 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3042 (MVNETA_RX_F|MVNETA_RX_L)))
3043 goto rx_error;
3044
3045 /*
3046 * [ OFF | MH | PKT | CRC ]
3047 * bytecnt cover MH, PKT, CRC
3048 */
3049 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3050 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3051 MVNETA_HWHEADER_SIZE;
3052
3053 /* Prefetch mbuf data. */
3054 mvneta_prefetch(pktbuf);
3055
3056 /* Write value to mbuf (avoid read). */
3057 m->m_data = pktbuf;
3058 m->m_len = m->m_pkthdr.len = pktlen;
3059 m->m_pkthdr.rcvif = ifp;
3060 mvneta_rx_set_csumflag(ifp, r, m);
3061
3062 /* Increase rx_dma before releasing the lock. */
3063 rx->dma = ndma;
3064
3065 if (__predict_false(rx->lro_enabled &&
3066 ((r->status & MVNETA_RX_L3_IP) != 0) &&
3067 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3068 (m->m_pkthdr.csum_flags &
3069 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
3070 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) {
3071 if (rx->lro.lro_cnt != 0) {
3072 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3073 goto rx_done;
3074 }
3075 }
3076
3077 mvneta_rx_unlockq(sc, q);
3078 if_input(ifp, m);
3079 mvneta_rx_lockq(sc, q);
3080 /*
3081 * Check whether this queue has been disabled in the
3082 * meantime. If yes, then clear LRO and exit.
3083 */
3084 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3085 goto rx_lro;
3086 rx_done:
3087 /* Refresh receive ring to avoid stall and minimize jitter. */
3088 if (processed >= MVNETA_RX_REFILL_COUNT) {
3089 mvneta_prxsu_update(sc, q, processed);
3090 mvneta_rx_queue_refill(sc, q);
3091 processed = 0;
3092 }
3093 continue;
3094 rx_error:
3095 m_freem(m);
3096 rx->dma = ndma;
3097 /* Refresh receive ring to avoid stall and minimize jitter. */
3098 if (processed >= MVNETA_RX_REFILL_COUNT) {
3099 mvneta_prxsu_update(sc, q, processed);
3100 mvneta_rx_queue_refill(sc, q);
3101 processed = 0;
3102 }
3103 }
3104 #ifdef MVNETA_KTR
3105 CTR3(KTR_SPARE2, "%s:%u %u packets received", if_name(ifp), q, npkt);
3106 #endif
3107 /* DMA status update */
3108 mvneta_prxsu_update(sc, q, processed);
3109 /* Refill the rest of buffers if there are any to refill */
3110 mvneta_rx_queue_refill(sc, q);
3111
3112 rx_lro:
3113 /*
3114 * Flush any outstanding LRO work
3115 */
3116 tcp_lro_flush_all(&rx->lro);
3117 }
3118
3119 STATIC void
mvneta_rx_buf_free(struct mvneta_softc * sc,struct mvneta_buf * rxbuf)3120 mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf)
3121 {
3122
3123 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3124 /* This will remove all data at once */
3125 m_freem(rxbuf->m);
3126 }
3127
3128 STATIC void
mvneta_rx_queue_refill(struct mvneta_softc * sc,int q)3129 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3130 {
3131 struct mvneta_rx_ring *rx;
3132 struct mvneta_rx_desc *r;
3133 struct mvneta_buf *rxbuf;
3134 bus_dma_segment_t segs;
3135 struct mbuf *m;
3136 uint32_t prxs, prxsu, ndesc;
3137 int npkt, refill, nsegs, error;
3138
3139 KASSERT_RX_MTX(sc, q);
3140
3141 rx = MVNETA_RX_RING(sc, q);
3142 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3143 ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs);
3144 refill = MVNETA_RX_RING_CNT - ndesc;
3145 #ifdef MVNETA_KTR
3146 CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q,
3147 refill);
3148 #endif
3149 if (__predict_false(refill <= 0))
3150 return;
3151
3152 for (npkt = 0; npkt < refill; npkt++) {
3153 rxbuf = &rx->rxbuf[rx->cpu];
3154 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size);
3155 if (__predict_false(m == NULL)) {
3156 error = ENOBUFS;
3157 break;
3158 }
3159 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3160
3161 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3162 m, &segs, &nsegs, BUS_DMA_NOWAIT);
3163 if (__predict_false(error != 0 || nsegs != 1)) {
3164 KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3165 m_freem(m);
3166 break;
3167 }
3168
3169 /* Add the packet to the ring */
3170 rxbuf->m = m;
3171 r = &rx->desc[rx->cpu];
3172 r->bufptr_pa = segs.ds_addr;
3173 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3174
3175 rx->cpu = rx_counter_adv(rx->cpu, 1);
3176 }
3177 if (npkt == 0) {
3178 if (refill == MVNETA_RX_RING_CNT)
3179 rx->needs_refill = TRUE;
3180 return;
3181 }
3182
3183 rx->needs_refill = FALSE;
3184 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3185
3186 while (__predict_false(npkt > 255)) {
3187 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255);
3188 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3189 npkt -= 255;
3190 }
3191 if (__predict_true(npkt > 0)) {
3192 prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt);
3193 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3194 }
3195 }
3196
3197 STATIC __inline void
mvneta_rx_set_csumflag(if_t ifp,struct mvneta_rx_desc * r,struct mbuf * m)3198 mvneta_rx_set_csumflag(if_t ifp,
3199 struct mvneta_rx_desc *r, struct mbuf *m)
3200 {
3201 uint32_t csum_flags;
3202
3203 csum_flags = 0;
3204 if (__predict_false((r->status &
3205 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0))
3206 return; /* not a IP packet */
3207
3208 /* L3 */
3209 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3210 MVNETA_RX_IP_HEADER_OK))
3211 csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID;
3212
3213 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3214 (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) {
3215 /* L4 */
3216 switch (r->status & MVNETA_RX_L4_MASK) {
3217 case MVNETA_RX_L4_TCP:
3218 case MVNETA_RX_L4_UDP:
3219 csum_flags |= CSUM_L4_CALC;
3220 if (__predict_true((r->status &
3221 MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) {
3222 csum_flags |= CSUM_L4_VALID;
3223 m->m_pkthdr.csum_data = htons(0xffff);
3224 }
3225 break;
3226 case MVNETA_RX_L4_OTH:
3227 default:
3228 break;
3229 }
3230 }
3231 m->m_pkthdr.csum_flags = csum_flags;
3232 }
3233
3234 /*
3235 * MAC address filter
3236 */
3237 STATIC void
mvneta_filter_setup(struct mvneta_softc * sc)3238 mvneta_filter_setup(struct mvneta_softc *sc)
3239 {
3240 if_t ifp;
3241 uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT];
3242 uint32_t pxc;
3243 int i;
3244
3245 KASSERT_SC_MTX(sc);
3246
3247 memset(dfut, 0, sizeof(dfut));
3248 memset(dfsmt, 0, sizeof(dfsmt));
3249 memset(dfomt, 0, sizeof(dfomt));
3250
3251 ifp = sc->ifp;
3252 if_setflagbits(ifp, IFF_ALLMULTI, 0);
3253 if (if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) {
3254 for (i = 0; i < MVNETA_NDFSMT; i++) {
3255 dfsmt[i] = dfomt[i] =
3256 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3257 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3258 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3259 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3260 }
3261 }
3262
3263 pxc = MVNETA_READ(sc, MVNETA_PXC);
3264 pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK |
3265 MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK);
3266 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3267 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3268 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3269 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3270 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3271 pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP;
3272 if (if_getflags(ifp) & IFF_BROADCAST) {
3273 pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP);
3274 }
3275 if (if_getflags(ifp) & IFF_PROMISC) {
3276 pxc |= MVNETA_PXC_UPM;
3277 }
3278 MVNETA_WRITE(sc, MVNETA_PXC, pxc);
3279
3280 /* Set Destination Address Filter Unicast Table */
3281 if (if_getflags(ifp) & IFF_PROMISC) {
3282 /* pass all unicast addresses */
3283 for (i = 0; i < MVNETA_NDFUT; i++) {
3284 dfut[i] =
3285 MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3286 MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3287 MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) |
3288 MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3289 }
3290 } else {
3291 i = sc->enaddr[5] & 0xf; /* last nibble */
3292 dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS);
3293 }
3294 MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT);
3295
3296 /* Set Destination Address Filter Multicast Tables */
3297 MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT);
3298 MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT);
3299 }
3300
3301 /*
3302 * sysctl(9)
3303 */
3304 STATIC int
sysctl_read_mib(SYSCTL_HANDLER_ARGS)3305 sysctl_read_mib(SYSCTL_HANDLER_ARGS)
3306 {
3307 struct mvneta_sysctl_mib *arg;
3308 struct mvneta_softc *sc;
3309 uint64_t val;
3310
3311 arg = (struct mvneta_sysctl_mib *)arg1;
3312 if (arg == NULL)
3313 return (EINVAL);
3314
3315 sc = arg->sc;
3316 if (sc == NULL)
3317 return (EINVAL);
3318 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3319 return (EINVAL);
3320
3321 mvneta_sc_lock(sc);
3322 val = arg->counter;
3323 mvneta_sc_unlock(sc);
3324 return sysctl_handle_64(oidp, &val, 0, req);
3325 }
3326
3327
3328 STATIC int
sysctl_clear_mib(SYSCTL_HANDLER_ARGS)3329 sysctl_clear_mib(SYSCTL_HANDLER_ARGS)
3330 {
3331 struct mvneta_softc *sc;
3332 int err, val;
3333
3334 val = 0;
3335 sc = (struct mvneta_softc *)arg1;
3336 if (sc == NULL)
3337 return (EINVAL);
3338
3339 err = sysctl_handle_int(oidp, &val, 0, req);
3340 if (err != 0)
3341 return (err);
3342
3343 if (val < 0 || val > 1)
3344 return (EINVAL);
3345
3346 if (val == 1) {
3347 mvneta_sc_lock(sc);
3348 mvneta_clear_mib(sc);
3349 mvneta_sc_unlock(sc);
3350 }
3351
3352 return (0);
3353 }
3354
3355 STATIC int
sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)3356 sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS)
3357 {
3358 struct mvneta_sysctl_queue *arg;
3359 struct mvneta_rx_ring *rx;
3360 struct mvneta_softc *sc;
3361 uint32_t reg, time_mvtclk;
3362 int err, time_us;
3363
3364 rx = NULL;
3365 arg = (struct mvneta_sysctl_queue *)arg1;
3366 if (arg == NULL)
3367 return (EINVAL);
3368 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3369 return (EINVAL);
3370 if (arg->rxtx != MVNETA_SYSCTL_RX)
3371 return (EINVAL);
3372
3373 sc = arg->sc;
3374 if (sc == NULL)
3375 return (EINVAL);
3376
3377 /* read queue length */
3378 mvneta_sc_lock(sc);
3379 mvneta_rx_lockq(sc, arg->queue);
3380 rx = MVNETA_RX_RING(sc, arg->queue);
3381 time_mvtclk = rx->queue_th_time;
3382 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / sc->clk_freq;
3383 mvneta_rx_unlockq(sc, arg->queue);
3384 mvneta_sc_unlock(sc);
3385
3386 err = sysctl_handle_int(oidp, &time_us, 0, req);
3387 if (err != 0)
3388 return (err);
3389
3390 mvneta_sc_lock(sc);
3391 mvneta_rx_lockq(sc, arg->queue);
3392
3393 /* update queue length (0[sec] - 1[sec]) */
3394 if (time_us < 0 || time_us > (1000 * 1000)) {
3395 mvneta_rx_unlockq(sc, arg->queue);
3396 mvneta_sc_unlock(sc);
3397 return (EINVAL);
3398 }
3399 time_mvtclk = sc->clk_freq * (uint64_t)time_us / (1000ULL * 1000ULL);
3400 rx->queue_th_time = time_mvtclk;
3401 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3402 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3403 mvneta_rx_unlockq(sc, arg->queue);
3404 mvneta_sc_unlock(sc);
3405
3406 return (0);
3407 }
3408
3409 STATIC void
sysctl_mvneta_init(struct mvneta_softc * sc)3410 sysctl_mvneta_init(struct mvneta_softc *sc)
3411 {
3412 struct sysctl_ctx_list *ctx;
3413 struct sysctl_oid_list *children;
3414 struct sysctl_oid_list *rxchildren;
3415 struct sysctl_oid_list *qchildren, *mchildren;
3416 struct sysctl_oid *tree;
3417 int i, q;
3418 struct mvneta_sysctl_queue *rxarg;
3419 #define MVNETA_SYSCTL_NAME(num) "queue" # num
3420 static const char *sysctl_queue_names[] = {
3421 MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1),
3422 MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3),
3423 MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5),
3424 MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7),
3425 };
3426 #undef MVNETA_SYSCTL_NAME
3427
3428 #ifndef NO_SYSCTL_DESCR
3429 #define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num
3430 static const char *sysctl_queue_descrs[] = {
3431 MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1),
3432 MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3),
3433 MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5),
3434 MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7),
3435 };
3436 #undef MVNETA_SYSCTL_DESCR
3437 #endif
3438
3439
3440 ctx = device_get_sysctl_ctx(sc->dev);
3441 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3442
3443 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3444 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX");
3445 rxchildren = SYSCTL_CHILDREN(tree);
3446 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib",
3447 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA MIB");
3448 mchildren = SYSCTL_CHILDREN(tree);
3449
3450
3451 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control",
3452 CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3453 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi",
3454 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3455
3456 /*
3457 * MIB access
3458 */
3459 /* dev.mvneta.[unit].mib.<mibs> */
3460 for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) {
3461 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3462
3463 mib_arg->sc = sc;
3464 mib_arg->index = i;
3465 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO,
3466 mvneta_mib_list[i].sysctl_name,
3467 CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
3468 (void *)mib_arg, 0, sysctl_read_mib, "I",
3469 mvneta_mib_list[i].desc);
3470 }
3471 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard",
3472 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3473 SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun",
3474 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3475 SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog",
3476 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3477
3478 SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset",
3479 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3480 (void *)sc, 0, sysctl_clear_mib, "I", "Reset MIB counters");
3481
3482 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3483 rxarg = &sc->sysctl_rx_queue[q];
3484
3485 rxarg->sc = sc;
3486 rxarg->queue = q;
3487 rxarg->rxtx = MVNETA_SYSCTL_RX;
3488
3489 /* hw.mvneta.mvneta[unit].rx.[queue] */
3490 tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO,
3491 sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
3492 sysctl_queue_descrs[q]);
3493 qchildren = SYSCTL_CHILDREN(tree);
3494
3495 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3496 SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us",
3497 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, rxarg, 0,
3498 sysctl_set_queue_rxthtime, "I",
3499 "interrupt coalescing threshold timer [us]");
3500 }
3501 }
3502
3503 /*
3504 * MIB
3505 */
3506 STATIC uint64_t
mvneta_read_mib(struct mvneta_softc * sc,int index)3507 mvneta_read_mib(struct mvneta_softc *sc, int index)
3508 {
3509 struct mvneta_mib_def *mib;
3510 uint64_t val;
3511
3512 mib = &mvneta_mib_list[index];
3513 val = MVNETA_READ_MIB(sc, mib->regnum);
3514 if (mib->reg64)
3515 val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32;
3516 return (val);
3517 }
3518
3519 STATIC void
mvneta_clear_mib(struct mvneta_softc * sc)3520 mvneta_clear_mib(struct mvneta_softc *sc)
3521 {
3522 int i;
3523
3524 KASSERT_SC_MTX(sc);
3525
3526 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3527 (void)mvneta_read_mib(sc, i);
3528 sc->sysctl_mib[i].counter = 0;
3529 }
3530 MVNETA_READ(sc, MVNETA_PDFC);
3531 sc->counter_pdfc = 0;
3532 MVNETA_READ(sc, MVNETA_POFC);
3533 sc->counter_pofc = 0;
3534 sc->counter_watchdog = 0;
3535 }
3536
3537 STATIC void
mvneta_update_mib(struct mvneta_softc * sc)3538 mvneta_update_mib(struct mvneta_softc *sc)
3539 {
3540 struct mvneta_tx_ring *tx;
3541 int i;
3542 uint64_t val;
3543 uint32_t reg;
3544
3545 for (i = 0; i < nitems(mvneta_mib_list); i++) {
3546
3547 val = mvneta_read_mib(sc, i);
3548 if (val == 0)
3549 continue;
3550
3551 sc->sysctl_mib[i].counter += val;
3552 switch (mvneta_mib_list[i].regnum) {
3553 case MVNETA_MIB_RX_GOOD_OCT:
3554 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3555 break;
3556 case MVNETA_MIB_RX_BAD_FRAME:
3557 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3558 break;
3559 case MVNETA_MIB_RX_GOOD_FRAME:
3560 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3561 break;
3562 case MVNETA_MIB_RX_MCAST_FRAME:
3563 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3564 break;
3565 case MVNETA_MIB_TX_GOOD_OCT:
3566 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3567 break;
3568 case MVNETA_MIB_TX_GOOD_FRAME:
3569 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3570 break;
3571 case MVNETA_MIB_TX_MCAST_FRAME:
3572 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3573 break;
3574 case MVNETA_MIB_MAC_COL:
3575 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3576 break;
3577 case MVNETA_MIB_TX_MAC_TRNS_ERR:
3578 case MVNETA_MIB_TX_EXCES_COL:
3579 case MVNETA_MIB_MAC_LATE_COL:
3580 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3581 break;
3582 }
3583 }
3584
3585 reg = MVNETA_READ(sc, MVNETA_PDFC);
3586 sc->counter_pdfc += reg;
3587 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3588 reg = MVNETA_READ(sc, MVNETA_POFC);
3589 sc->counter_pofc += reg;
3590 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3591
3592 /* TX watchdog. */
3593 if (sc->counter_watchdog_mib > 0) {
3594 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3595 sc->counter_watchdog_mib = 0;
3596 }
3597 /*
3598 * TX driver errors:
3599 * We do not take queue locks to not disrupt TX path.
3600 * We may only miss one drv error which will be fixed at
3601 * next mib update. We may also clear counter when TX path
3602 * is incrementing it but we only do it if counter was not zero
3603 * thus we may only loose one error.
3604 */
3605 for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) {
3606 tx = MVNETA_TX_RING(sc, i);
3607
3608 if (tx->drv_error > 0) {
3609 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
3610 tx->drv_error = 0;
3611 }
3612 }
3613 }
3614