xref: /freebsd/sys/dev/dpaa/dpaa_eth.c (revision bac7bd5038e09d12dfdbf79a87b25443e02d0ba9)
1 /*-
2  * Copyright (c) 2026 Justin Hibbits
3  * Copyright (c) 2012 Semihalf.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/module.h>
32 #include <sys/bus.h>
33 #include <sys/rman.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 #include <sys/smp.h>
37 #include <sys/socket.h>
38 #include <sys/sockio.h>
39 #include <sys/sysctl.h>
40 
41 #include <net/ethernet.h>
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/if_types.h>
46 #include <net/if_arp.h>
47 #include <netinet/ip.h>
48 #include <netinet/ip6.h>
49 
50 #include <dev/mii/mii.h>
51 #include <dev/mii/miivar.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 
56 #include "miibus_if.h"
57 
58 #include "bman.h"
59 #include "dpaa_common.h"
60 #include "dpaa_eth.h"
61 #include "fman.h"
62 #include "fman_parser.h"
63 #include "fman_port.h"
64 #include "fman_if.h"
65 #include "fman_port_if.h"
66 #include "if_dtsec.h"
67 #include "qman.h"
68 #include "qman_var.h"
69 #include "qman_portal_if.h"
70 
71 
72 #define DPAA_ETH_LOCK(sc)		mtx_lock(&(sc)->sc_lock)
73 #define DPAA_ETH_UNLOCK(sc)		mtx_unlock(&(sc)->sc_lock)
74 #define DPAA_ETH_LOCK_ASSERT(sc)	mtx_assert(&(sc)->sc_lock, MA_OWNED)
75 
76 /**
77  * @group dTSEC RM private defines.
78  * @{
79  */
80 #define	DTSEC_BPOOLS_USED	(1)
81 #define	DTSEC_MAX_TX_QUEUE_LEN	256
82 
83 struct dpaa_eth_frame_info {
84 	struct mbuf			*fi_mbuf;
85 	struct fman_internal_context	fi_ic;
86 	struct dpaa_sgte		fi_sgt[DPAA_NUM_OF_SG_TABLE_ENTRY];
87 };
88 
89 enum dpaa_eth_pool_params {
90 	DTSEC_RM_POOL_RX_LOW_MARK	= 16,
91 	DTSEC_RM_POOL_RX_HIGH_MARK	= 64,
92 	DTSEC_RM_POOL_RX_MAX_SIZE	= 256,
93 
94 	DTSEC_RM_POOL_FI_LOW_MARK	= 16,
95 	DTSEC_RM_POOL_FI_HIGH_MARK	= 64,
96 	DTSEC_RM_POOL_FI_MAX_SIZE	= 256,
97 };
98 
99 #define	DTSEC_RM_FQR_RX_CHANNEL		0x401
100 #define	DTSEC_RM_FQR_TX_CONF_CHANNEL	0
101 enum dpaa_eth_fq_params {
102 	DTSEC_RM_FQR_RX_WQ		= 1,
103 	DTSEC_RM_FQR_TX_WQ		= 1,
104 	DTSEC_RM_FQR_TX_CONF_WQ		= 1
105 };
106 /** @} */
107 
108 
109 /**
110  * @group dTSEC Frame Info routines.
111  * @{
112  */
113 void
114 dpaa_eth_fi_pool_free(struct dpaa_eth_softc *sc)
115 {
116 
117 	if (sc->sc_fi_zone != NULL)
118 		uma_zdestroy(sc->sc_fi_zone);
119 }
120 
121 int
122 dpaa_eth_fi_pool_init(struct dpaa_eth_softc *sc)
123 {
124 
125 	snprintf(sc->sc_fi_zname, sizeof(sc->sc_fi_zname), "%s: Frame Info",
126 	    device_get_nameunit(sc->sc_dev));
127 
128 	sc->sc_fi_zone = uma_zcreate(sc->sc_fi_zname,
129 	    sizeof(struct dpaa_eth_frame_info), NULL, NULL, NULL, NULL,
130 	    UMA_ALIGN_PTR, 0);
131 
132 	return (0);
133 }
134 
135 static struct dpaa_eth_frame_info *
136 dpaa_eth_fi_alloc(struct dpaa_eth_softc *sc)
137 {
138 	struct dpaa_eth_frame_info *fi;
139 
140 	fi = uma_zalloc(sc->sc_fi_zone, M_NOWAIT | M_ZERO);
141 
142 	return (fi);
143 }
144 
145 static void
146 dpaa_eth_fi_free(struct dpaa_eth_softc *sc, struct dpaa_eth_frame_info *fi)
147 {
148 
149 	uma_zfree(sc->sc_fi_zone, fi);
150 }
151 /** @} */
152 
153 
154 /**
155  * @group dTSEC FMan PORT routines.
156  * @{
157  */
158 int
159 dpaa_eth_fm_port_rx_init(struct dpaa_eth_softc *sc)
160 {
161 	struct fman_port_params params;
162 	int error;
163 
164 	params.dflt_fqid = sc->sc_rx_fqid;
165 	params.err_fqid = sc->sc_rx_fqid;
166 	params.rx_params.num_pools = 1;
167 	params.rx_params.bpools[0].bpid = bman_get_bpid(sc->sc_rx_pool);
168 	params.rx_params.bpools[0].size = MCLBYTES;
169 	error = FMAN_PORT_CONFIG(sc->sc_rx_port, &params);
170 	error = FMAN_PORT_INIT(sc->sc_rx_port);
171 	if (error != 0) {
172 		device_printf(sc->sc_dev, "couldn't initialize FM Port RX.\n");
173 		return (ENXIO);
174 	}
175 
176 	return (0);
177 }
178 
179 int
180 dpaa_eth_fm_port_tx_init(struct dpaa_eth_softc *sc)
181 {
182 	struct fman_port_params params;
183 	int error;
184 
185 	params.dflt_fqid = sc->sc_tx_conf_fqid;
186 	params.err_fqid = sc->sc_tx_conf_fqid;
187 
188 	error = FMAN_PORT_CONFIG(sc->sc_tx_port, &params);
189 	error = FMAN_PORT_INIT(sc->sc_tx_port);
190 	if (error != 0) {
191 		device_printf(sc->sc_dev, "couldn't initialize FM Port TX.\n");
192 		return (ENXIO);
193 	}
194 
195 	return (0);
196 }
197 /** @} */
198 
199 
200 /**
201  * @group dTSEC buffer pools routines.
202  * @{
203  */
204 static int
205 dpaa_eth_pool_rx_put_buffer(struct dpaa_eth_softc *sc, uint8_t *buffer,
206     void *context)
207 {
208 
209 	uma_zfree(sc->sc_rx_zone, buffer);
210 
211 	return (0);
212 }
213 
214 static int
215 dtsec_add_buffers(struct dpaa_eth_softc *sc, int count)
216 {
217 	struct bman_buffer bufs[8] = {};
218 	int err;
219 	int c;
220 
221 	while (count > 0) {
222 		c = min(8, count);
223 		for (int i = 0; i < c; i++) {
224 			void *b;
225 			vm_paddr_t pa;
226 
227 			b = uma_zalloc(sc->sc_rx_zone, M_NOWAIT);
228 			if (b == NULL)
229 				return (ENOMEM);
230 			pa = pmap_kextract((vm_offset_t)b);
231 			bufs[i].buf_hi = (pa >> 32);
232 			bufs[i].buf_lo = (pa & 0xffffffff);
233 		}
234 
235 		err = bman_put_buffers(sc->sc_rx_pool, bufs, c);
236 		if (err != 0)
237 			return (err);
238 		count -= c;
239 	}
240 
241 	return (0);
242 }
243 
244 static void
245 dpaa_eth_pool_rx_depleted(void *h_App, bool in)
246 {
247 	struct dpaa_eth_softc *sc;
248 	unsigned int count;
249 
250 	sc = h_App;
251 
252 	if (!in)
253 		return;
254 
255 	while (1) {
256 		count = bman_count(sc->sc_rx_pool);
257 		if (count > DTSEC_RM_POOL_RX_HIGH_MARK)
258 			return;
259 
260 		/* Can only release 8 buffers at a time */
261 		count = min(DTSEC_RM_POOL_RX_HIGH_MARK - count + 8, 8);
262 		if (dtsec_add_buffers(sc, count) != 0)
263 			return;
264 	}
265 }
266 
267 void
268 dpaa_eth_pool_rx_free(struct dpaa_eth_softc *sc)
269 {
270 
271 	if (sc->sc_rx_pool != NULL)
272 		bman_pool_destroy(sc->sc_rx_pool);
273 
274 	if (sc->sc_rx_zone != NULL)
275 		uma_zdestroy(sc->sc_rx_zone);
276 }
277 
278 int
279 dpaa_eth_pool_rx_init(struct dpaa_eth_softc *sc)
280 {
281 
282 	/* MCLBYTES must be less than PAGE_SIZE */
283 	CTASSERT(MCLBYTES < PAGE_SIZE);
284 
285 	snprintf(sc->sc_rx_zname, sizeof(sc->sc_rx_zname), "%s: RX Buffers",
286 	    device_get_nameunit(sc->sc_dev));
287 
288 	sc->sc_rx_zone = uma_zcreate(sc->sc_rx_zname, MCLBYTES, NULL,
289 	    NULL, NULL, NULL, MCLBYTES - 1, 0);
290 
291 	sc->sc_rx_pool = bman_pool_create(&sc->sc_rx_bpid, MCLBYTES,
292 	    DTSEC_RM_POOL_RX_MAX_SIZE, DTSEC_RM_POOL_RX_LOW_MARK,
293 	    DTSEC_RM_POOL_RX_HIGH_MARK, 0, 0, dpaa_eth_pool_rx_depleted, sc);
294 	if (sc->sc_rx_pool == NULL) {
295 		device_printf(sc->sc_dev, "NULL rx pool  somehow\n");
296 		dpaa_eth_pool_rx_free(sc);
297 		return (EIO);
298 	}
299 
300 	dtsec_add_buffers(sc, DTSEC_RM_POOL_RX_HIGH_MARK);
301 
302 	return (0);
303 }
304 /** @} */
305 
306 
307 /**
308  * @group dTSEC Frame Queue Range routines.
309  * @{
310  */
311 static void
312 dpaa_eth_fq_mext_free(struct mbuf *m)
313 {
314 	struct dpaa_eth_softc *sc;
315 	void *buffer;
316 
317 	buffer = m->m_ext.ext_arg1;
318 	sc = m->m_ext.ext_arg2;
319 	if (bman_count(sc->sc_rx_pool) <= DTSEC_RM_POOL_RX_MAX_SIZE)
320 		bman_put_buffer(sc->sc_rx_pool,
321 		    pmap_kextract((vm_offset_t)buffer), sc->sc_rx_bpid);
322 	else
323 		dpaa_eth_pool_rx_put_buffer(sc, buffer, NULL);
324 }
325 
326 static int
327 dpaa_eth_update_csum_flags(struct qman_fd *frame,
328     struct fman_parse_result *prs, struct mbuf *m)
329 {
330 	uint16_t l3r = be16toh(prs->l3r);
331 
332 	/* TODO: nested protocols? */
333 	if ((l3r & L3R_FIRST_IP_M) != 0) {
334 		m->m_pkthdr.csum_flags |= CSUM_L3_CALC;
335 		if ((l3r & L3R_FIRST_ERROR) == 0)
336 			m->m_pkthdr.csum_flags |= CSUM_L3_VALID;
337 	}
338 	if (frame->cmd_stat & DPAA_FD_RX_STATUS_L4CV) {
339 		m->m_pkthdr.csum_flags |= CSUM_L4_CALC;
340 		m->m_pkthdr.csum_data = 0xffff;
341 		if ((prs->l4r & L4R_TYPE_M) != 0 &&
342 		    (prs->l4r & L4R_ERR) == 0)
343 			m->m_pkthdr.csum_flags |= CSUM_L4_VALID;
344 	}
345 
346 	return (0);
347 }
348 
349 static int
350 dpaa_eth_fq_rx_callback(device_t portal, struct qman_fq *fq,
351     struct qman_fd *frame, void *app)
352 {
353 	struct dpaa_eth_softc *sc;
354 	struct mbuf *m;
355 	struct fman_internal_context *frame_ic;
356 	void *frame_va;
357 
358 	m = NULL;
359 	sc = app;
360 
361 	frame_va = DPAA_FD_GET_ADDR(frame);
362 	frame_ic = frame_va;	/* internal context at head of the frame */
363 	/* Only simple (single- or multi-) frames are supported. */
364 	KASSERT(frame->format == 0 || frame->format == 4,
365 	    ("%s(): Got unsupported frame format 0x%02X!", __func__,
366 	    frame->format));
367 
368 	if ((frame->cmd_stat & DPAA_FD_CMD_STAT_ERR_M) != 0) {
369 		device_printf(sc->sc_dev, "RX error: 0x%08X\n",
370 		    frame->cmd_stat);
371 		goto err;
372 	}
373 
374 	m = m_gethdr(M_NOWAIT, MT_HEADER);
375 	if (m == NULL)
376 		goto err;
377 
378 	if (frame->format == 0) {
379 		/* Single-frame format */
380 		m_extadd(m, (char *)frame_va + frame->offset, frame->length,
381 		    dpaa_eth_fq_mext_free, frame_va, sc, 0, EXT_NET_DRV);
382 	} else {
383 		struct dpaa_sgte *sgt =
384 		    (struct dpaa_sgte *)(char *)frame_va + frame->offset;
385 		/* Simple multi-frame format */
386 		for (int i = 0; i < DPAA_NUM_OF_SG_TABLE_ENTRY; i++) {
387 			if (sgt[i].length > 0)
388 				m_extadd(m, PHYS_TO_DMAP(sgt[i].addr),
389 				    sgt[i].length, dpaa_eth_fq_mext_free,
390 				    PHYS_TO_DMAP(sgt[i].addr), sc, 0,
391 				    EXT_NET_DRV);
392 			if (sgt[i].final)
393 				break;
394 		}
395 		/* Free the SGT buffer, it's no longer needed. */
396 		bman_put_buffer(sc->sc_rx_pool, frame->addr, sc->sc_rx_bpid);
397 	}
398 
399 	if (if_getcapenable(sc->sc_ifnet) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
400 		dpaa_eth_update_csum_flags(frame, &frame_ic->prs, m);
401 
402 	m->m_pkthdr.rcvif = sc->sc_ifnet;
403 	m->m_len = frame->length;
404 	m_fixhdr(m);
405 
406 	if_input(sc->sc_ifnet, m);
407 
408 	return (1);
409 
410 err:
411 	bman_put_buffer(sc->sc_rx_pool, frame->addr, sc->sc_rx_bpid);
412 	if (m != NULL)
413 		m_freem(m);
414 
415 	return (1);
416 }
417 
418 static int
419 dpaa_eth_fq_tx_confirm_callback(device_t portal, struct qman_fq *fq,
420     struct qman_fd *frame, void *app)
421 {
422 	struct dpaa_eth_frame_info *fi;
423 	struct dpaa_eth_softc *sc;
424 	unsigned int qlen;
425 	struct dpaa_sgte *sgt0;
426 
427 	sc = app;
428 
429 	if ((frame->cmd_stat & DPAA_FD_TX_STAT_ERR_M) != 0)
430 		device_printf(sc->sc_dev, "TX error: 0x%08X\n",
431 		    frame->cmd_stat);
432 
433 	/*
434 	 * We are storing struct dpaa_eth_frame_info in first entry
435 	 * of scatter-gather table.
436 	 */
437 	sgt0 = (struct dpaa_sgte *)PHYS_TO_DMAP(frame->addr + frame->offset);
438 	fi = (struct dpaa_eth_frame_info *)PHYS_TO_DMAP(sgt0->addr);
439 
440 	/* Free transmitted frame */
441 	m_freem(fi->fi_mbuf);
442 	dpaa_eth_fi_free(sc, fi);
443 
444 	qlen = qman_fq_get_counter(sc->sc_tx_conf_fq, QMAN_COUNTER_FRAME);
445 
446 	if (qlen == 0) {
447 		DPAA_ETH_LOCK(sc);
448 
449 		if (sc->sc_tx_fq_full) {
450 			sc->sc_tx_fq_full = 0;
451 			dpaa_eth_if_start_locked(sc);
452 		}
453 
454 		DPAA_ETH_UNLOCK(sc);
455 	}
456 
457 	return (1);
458 }
459 
460 void
461 dpaa_eth_fq_rx_free(struct dpaa_eth_softc *sc)
462 {
463 	int cpu;
464 
465 	if (sc->sc_rx_fq)
466 		qman_fq_free(sc->sc_rx_fq);
467 	if (sc->sc_rx_channel != 0) {
468 		CPU_FOREACH(cpu) {
469 			device_t portal = DPCPU_ID_GET(cpu, qman_affine_portal);
470 			QMAN_PORTAL_STATIC_DEQUEUE_RM_CHANNEL(portal,
471 			    sc->sc_rx_channel);
472 		}
473 		qman_free_channel(sc->sc_rx_channel);
474 	}
475 }
476 
477 int
478 dpaa_eth_fq_rx_init(struct dpaa_eth_softc *sc)
479 {
480 	void *fq;
481 	int error;
482 	int cpu;
483 
484 	/* Default Frame Queue */
485 	if (sc->sc_rx_channel == 0)
486 		sc->sc_rx_channel = qman_alloc_channel();
487 	fq = qman_fq_create(1, sc->sc_rx_channel, DTSEC_RM_FQR_RX_WQ,
488 	    false, 0, false, false, true, false, 0, 0, 0);
489 	if (fq == NULL) {
490 		device_printf(sc->sc_dev,
491 		    "could not create default RX queue\n");
492 		return (EIO);
493 	}
494 
495 	CPU_FOREACH(cpu) {
496 		device_t portal = DPCPU_ID_GET(cpu, qman_affine_portal);
497 		QMAN_PORTAL_STATIC_DEQUEUE_CHANNEL(portal, sc->sc_rx_channel);
498 	}
499 
500 	sc->sc_rx_fq = fq;
501 	sc->sc_rx_fqid = qman_fq_get_fqid(fq);
502 
503 	error = qman_fq_register_cb(fq, dpaa_eth_fq_rx_callback, sc);
504 	if (error != 0) {
505 		device_printf(sc->sc_dev, "could not register RX callback\n");
506 		dpaa_eth_fq_rx_free(sc);
507 		return (EIO);
508 	}
509 
510 	return (0);
511 }
512 
513 void
514 dpaa_eth_fq_tx_free(struct dpaa_eth_softc *sc)
515 {
516 
517 	if (sc->sc_tx_fq)
518 		qman_fq_free(sc->sc_tx_fq);
519 
520 	if (sc->sc_tx_conf_fq)
521 		qman_fq_free(sc->sc_tx_conf_fq);
522 }
523 
524 int
525 dpaa_eth_fq_tx_init(struct dpaa_eth_softc *sc)
526 {
527 	int error;
528 	void *fq;
529 
530 	/* TX Frame Queue */
531 	fq = qman_fq_create(1, sc->sc_port_tx_qman_chan,
532 	    DTSEC_RM_FQR_TX_WQ, false, 0, false, false, true, false, 0, 0, 0);
533 	if (fq == NULL) {
534 		device_printf(sc->sc_dev, "could not create default TX queue"
535 		    "\n");
536 		return (EIO);
537 	}
538 
539 	sc->sc_tx_fq = fq;
540 
541 	if (sc->sc_rx_channel == 0)
542 		sc->sc_rx_channel = qman_alloc_channel();
543 	/* TX Confirmation Frame Queue */
544 	fq = qman_fq_create(1, sc->sc_rx_channel,
545 	    DTSEC_RM_FQR_TX_CONF_WQ, false, 0, false, false, true, false, 0, 0,
546 	    0);
547 	if (fq == NULL) {
548 		device_printf(sc->sc_dev, "could not create TX confirmation "
549 		    "queue\n");
550 		dpaa_eth_fq_tx_free(sc);
551 		return (EIO);
552 	}
553 
554 	sc->sc_tx_conf_fq = fq;
555 	sc->sc_tx_conf_fqid = qman_fq_get_fqid(fq);
556 
557 	error = qman_fq_register_cb(fq, dpaa_eth_fq_tx_confirm_callback, sc);
558 	if (error != 0) {
559 		device_printf(sc->sc_dev, "could not register TX confirmation "
560 		    "callback\n");
561 		dpaa_eth_fq_tx_free(sc);
562 		return (EIO);
563 	}
564 
565 	return (0);
566 }
567 /** @} */
568 
569 /* Returns the cmd_stat field for the frame descriptor */
570 static uint32_t
571 dpaa_eth_tx_add_csum(struct dpaa_eth_frame_info *fi)
572 {
573 	struct mbuf *m = fi->fi_mbuf;
574 	struct fman_parse_result *prs = &fi->fi_ic.prs;
575 	uint32_t csum_flags = m->m_pkthdr.csum_flags;
576 	uint8_t ether_size = ETHER_HDR_LEN;
577 
578 	if ((csum_flags & CSUM_FLAGS_TX) == 0)
579 		return (0);
580 
581 	if (m->m_flags & M_VLANTAG)
582 		ether_size += ETHER_VLAN_ENCAP_LEN;
583 	if (csum_flags & CSUM_IP)
584 		prs->l3r = L3R_FIRST_IPV4;
585 	if (csum_flags & CSUM_IP_UDP) {
586 		prs->l4r = L4R_TYPE_UDP;
587 		prs->l4_off = ether_size + sizeof(struct ip);
588 	} else if (csum_flags & CSUM_IP_TCP) {
589 		prs->l4r = L4R_TYPE_TCP;
590 		prs->l4_off = ether_size + sizeof(struct ip);
591 	} else if (csum_flags & CSUM_IP6_UDP) {
592 		prs->l3r = L3R_FIRST_IPV6;
593 		prs->l4r = L4R_TYPE_UDP;
594 		prs->l4_off = ether_size + sizeof(struct ip6_hdr);
595 	} else if (csum_flags & CSUM_IP6_TCP) {
596 		prs->l3r = L3R_FIRST_IPV6;
597 		prs->l4r = L4R_TYPE_TCP;
598 		prs->l4_off = ether_size + sizeof(struct ip6_hdr);
599 	}
600 
601 	prs->ip_off[0] = ether_size;
602 
603 	return (DPAA_FD_TX_CMD_RPD | DPAA_FD_TX_CMD_DTC);
604 }
605 
606 /**
607  * @group dTSEC IFnet routines.
608  * @{
609  */
610 void
611 dpaa_eth_if_start_locked(struct dpaa_eth_softc *sc)
612 {
613 	vm_size_t dsize, psize, ssize;
614 	struct dpaa_eth_frame_info *fi;
615 	unsigned int qlen, i;
616 	struct mbuf *m0, *m;
617 	vm_offset_t vaddr;
618 	struct dpaa_fd fd;
619 
620 	DPAA_ETH_LOCK_ASSERT(sc);
621 	/* TODO: IFF_DRV_OACTIVE */
622 
623 	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0)
624 		return;
625 
626 	if ((if_getdrvflags(sc->sc_ifnet) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
627 		return;
628 
629 	while (!if_sendq_empty(sc->sc_ifnet)) {
630 		/* Check length of the TX queue */
631 		qlen = qman_fq_get_counter(sc->sc_tx_fq, QMAN_COUNTER_FRAME);
632 
633 		if (qlen >= DTSEC_MAX_TX_QUEUE_LEN) {
634 			sc->sc_tx_fq_full = 1;
635 			return;
636 		}
637 
638 		fi = dpaa_eth_fi_alloc(sc);
639 		if (fi == NULL)
640 			return;
641 
642 		m0 = if_dequeue(sc->sc_ifnet);
643 		if (m0 == NULL) {
644 			dpaa_eth_fi_free(sc, fi);
645 			return;
646 		}
647 
648 		i = 0;
649 		m = m0;
650 		psize = 0;
651 		dsize = 0;
652 		fi->fi_mbuf = m0;
653 		while (m && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
654 			if (m->m_len == 0)
655 				continue;
656 
657 			/*
658 			 * First entry in scatter-gather table is used to keep
659 			 * pointer to frame info structure.
660 			 */
661 			fi->fi_sgt[i].addr = pmap_kextract((vm_offset_t)fi);
662 			i++;
663 
664 			dsize = m->m_len;
665 			vaddr = (vm_offset_t)m->m_data;
666 			while (dsize > 0 && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
667 				ssize = PAGE_SIZE - (vaddr & PAGE_MASK);
668 				if (m->m_len < ssize)
669 					ssize = m->m_len;
670 
671 				fi->fi_sgt[i].addr = pmap_kextract(vaddr);
672 				fi->fi_sgt[i].length = ssize;
673 
674 				fi->fi_sgt[i].extension = 0;
675 				fi->fi_sgt[i].final = 0;
676 				fi->fi_sgt[i].bpid = 0;
677 				fi->fi_sgt[i].offset = 0;
678 
679 				dsize -= ssize;
680 				vaddr += ssize;
681 				psize += ssize;
682 				i++;
683 			}
684 
685 			if (dsize > 0)
686 				break;
687 
688 			m = m->m_next;
689 		}
690 
691 		/* Check if SG table was constructed properly */
692 		if (m != NULL || dsize != 0) {
693 			dpaa_eth_fi_free(sc, fi);
694 			m_freem(m0);
695 			continue;
696 		}
697 
698 		fi->fi_sgt[i - 1].final = 1;
699 
700 		fd.addr = pmap_kextract((vm_offset_t)&fi->fi_ic);
701 		fd.length = psize;
702 		fd.format = DPAA_FD_FORMAT_SHORT_MBSF;
703 
704 		fd.liodn = 0;
705 		fd.bpid = 0;
706 		fd.eliodn = 0;
707 		fd.offset = offsetof(struct dpaa_eth_frame_info, fi_sgt) -
708 		    offsetof(struct dpaa_eth_frame_info, fi_ic);
709 		fd.cmd_stat = dpaa_eth_tx_add_csum(fi);
710 
711 		DPAA_ETH_UNLOCK(sc);
712 		if (qman_fq_enqueue(sc->sc_tx_fq, &fd) != 0) {
713 			dpaa_eth_fi_free(sc, fi);
714 			m_freem(m0);
715 		}
716 		DPAA_ETH_LOCK(sc);
717 	}
718 }
719 /** @} */
720