xref: /freebsd/sys/dev/dpaa/dpaa_eth.c (revision 6464974c5a56a9f899f8a673385711253f9fcbd0)
1 /*-
2  * Copyright (c) 2026 Justin Hibbits
3  * Copyright (c) 2012 Semihalf.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/module.h>
32 #include <sys/bus.h>
33 #include <sys/rman.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 #include <sys/smp.h>
37 #include <sys/socket.h>
38 #include <sys/sockio.h>
39 #include <sys/sysctl.h>
40 
41 #include <net/ethernet.h>
42 #include <net/if.h>
43 #include <net/if_dl.h>
44 #include <net/if_media.h>
45 #include <net/if_types.h>
46 #include <net/if_arp.h>
47 #include <netinet/ip.h>
48 #include <netinet/ip6.h>
49 
50 #include <dev/mii/mii.h>
51 #include <dev/mii/miivar.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 
56 #include "miibus_if.h"
57 
58 #include "bman.h"
59 #include "dpaa_common.h"
60 #include "dpaa_eth.h"
61 #include "fman.h"
62 #include "fman_parser.h"
63 #include "fman_port.h"
64 #include "fman_if.h"
65 #include "fman_port_if.h"
66 #include "if_dtsec.h"
67 #include "qman.h"
68 #include "qman_var.h"
69 #include "qman_portal_if.h"
70 
71 
72 #define DPAA_ETH_LOCK(sc)		mtx_lock(&(sc)->sc_lock)
73 #define DPAA_ETH_UNLOCK(sc)		mtx_unlock(&(sc)->sc_lock)
74 #define DPAA_ETH_LOCK_ASSERT(sc)	mtx_assert(&(sc)->sc_lock, MA_OWNED)
75 
76 /**
77  * @group dTSEC RM private defines.
78  * @{
79  */
80 #define	DTSEC_BPOOLS_USED	(1)
81 #define	DTSEC_MAX_TX_QUEUE_LEN	256
82 
83 struct dpaa_eth_frame_info {
84 	struct mbuf			*fi_mbuf;
85 	struct fman_internal_context	fi_ic;
86 	struct dpaa_sgte		fi_sgt[DPAA_NUM_OF_SG_TABLE_ENTRY];
87 };
88 
89 enum dpaa_eth_pool_params {
90 	DTSEC_RM_POOL_RX_LOW_MARK	= 16,
91 	DTSEC_RM_POOL_RX_HIGH_MARK	= 64,
92 	DTSEC_RM_POOL_RX_MAX_SIZE	= 256,
93 
94 	DTSEC_RM_POOL_FI_LOW_MARK	= 16,
95 	DTSEC_RM_POOL_FI_HIGH_MARK	= 64,
96 	DTSEC_RM_POOL_FI_MAX_SIZE	= 256,
97 };
98 
99 #define	DTSEC_RM_FQR_RX_CHANNEL		0x401
100 #define	DTSEC_RM_FQR_TX_CONF_CHANNEL	0
101 enum dpaa_eth_fq_params {
102 	DTSEC_RM_FQR_RX_WQ		= 1,
103 	DTSEC_RM_FQR_TX_WQ		= 1,
104 	DTSEC_RM_FQR_TX_CONF_WQ		= 1
105 };
106 /** @} */
107 
108 
109 /**
110  * @group dTSEC Frame Info routines.
111  * @{
112  */
113 void
114 dpaa_eth_fi_pool_free(struct dpaa_eth_softc *sc)
115 {
116 
117 	if (sc->sc_fi_zone != NULL)
118 		uma_zdestroy(sc->sc_fi_zone);
119 }
120 
121 int
122 dpaa_eth_fi_pool_init(struct dpaa_eth_softc *sc)
123 {
124 
125 	snprintf(sc->sc_fi_zname, sizeof(sc->sc_fi_zname), "%s: Frame Info",
126 	    device_get_nameunit(sc->sc_dev));
127 
128 	sc->sc_fi_zone = uma_zcreate(sc->sc_fi_zname,
129 	    sizeof(struct dpaa_eth_frame_info), NULL, NULL, NULL, NULL,
130 	    UMA_ALIGN_PTR, 0);
131 
132 	return (0);
133 }
134 
135 static struct dpaa_eth_frame_info *
136 dpaa_eth_fi_alloc(struct dpaa_eth_softc *sc)
137 {
138 	struct dpaa_eth_frame_info *fi;
139 
140 	fi = uma_zalloc(sc->sc_fi_zone, M_NOWAIT | M_ZERO);
141 
142 	return (fi);
143 }
144 
145 static void
146 dpaa_eth_fi_free(struct dpaa_eth_softc *sc, struct dpaa_eth_frame_info *fi)
147 {
148 
149 	uma_zfree(sc->sc_fi_zone, fi);
150 }
151 /** @} */
152 
153 
154 /**
155  * @group dTSEC FMan PORT routines.
156  * @{
157  */
158 int
159 dpaa_eth_fm_port_rx_init(struct dpaa_eth_softc *sc)
160 {
161 	struct fman_port_params params;
162 	int error;
163 
164 	params.dflt_fqid = sc->sc_rx_fqid;
165 	params.err_fqid = sc->sc_rx_fqid;
166 	params.rx_params.num_pools = 1;
167 	params.rx_params.bpools[0].bpid = bman_get_bpid(sc->sc_rx_pool);
168 	params.rx_params.bpools[0].size = MCLBYTES;
169 	error = FMAN_PORT_CONFIG(sc->sc_rx_port, &params);
170 	error = FMAN_PORT_INIT(sc->sc_rx_port);
171 	if (error != 0) {
172 		device_printf(sc->sc_dev, "couldn't initialize FM Port RX.\n");
173 		return (ENXIO);
174 	}
175 
176 	return (0);
177 }
178 
179 int
180 dpaa_eth_fm_port_tx_init(struct dpaa_eth_softc *sc)
181 {
182 	struct fman_port_params params;
183 	int error;
184 
185 	params.dflt_fqid = sc->sc_tx_conf_fqid;
186 	params.err_fqid = sc->sc_tx_conf_fqid;
187 
188 	error = FMAN_PORT_CONFIG(sc->sc_tx_port, &params);
189 	error = FMAN_PORT_INIT(sc->sc_tx_port);
190 	if (error != 0) {
191 		device_printf(sc->sc_dev, "couldn't initialize FM Port TX.\n");
192 		return (ENXIO);
193 	}
194 
195 	return (0);
196 }
197 /** @} */
198 
199 
200 /**
201  * @group dTSEC buffer pools routines.
202  * @{
203  */
204 static int
205 dpaa_eth_pool_rx_put_buffer(struct dpaa_eth_softc *sc, uint8_t *buffer,
206     void *context)
207 {
208 
209 	uma_zfree(sc->sc_rx_zone, buffer);
210 
211 	return (0);
212 }
213 
214 static int
215 dtsec_add_buffers(struct dpaa_eth_softc *sc, int count)
216 {
217 	struct bman_buffer bufs[8] = {};
218 	int err;
219 	int c;
220 
221 	while (count > 0) {
222 		c = min(8, count);
223 		for (int i = 0; i < c; i++) {
224 			void *b;
225 			vm_paddr_t pa;
226 
227 			b = uma_zalloc(sc->sc_rx_zone, M_NOWAIT);
228 			if (b == NULL)
229 				return (ENOMEM);
230 			pa = pmap_kextract((vm_offset_t)b);
231 			bufs[i].buf_hi = (pa >> 32);
232 			bufs[i].buf_lo = (pa & 0xffffffff);
233 		}
234 
235 		err = bman_put_buffers(sc->sc_rx_pool, bufs, c);
236 		if (err != 0)
237 			return (err);
238 		count -= c;
239 	}
240 
241 	return (0);
242 }
243 
244 static void
245 dpaa_eth_pool_rx_depleted(void *h_App, bool in)
246 {
247 	struct dpaa_eth_softc *sc;
248 	unsigned int count;
249 
250 	sc = h_App;
251 
252 	if (!in)
253 		return;
254 
255 	while (1) {
256 		count = bman_count(sc->sc_rx_pool);
257 		if (count > DTSEC_RM_POOL_RX_HIGH_MARK)
258 			return;
259 
260 		/* Can only release 8 buffers at a time */
261 		count = min(DTSEC_RM_POOL_RX_HIGH_MARK - count + 8, 8);
262 		if (dtsec_add_buffers(sc, count) != 0)
263 			return;
264 	}
265 }
266 
267 void
268 dpaa_eth_pool_rx_free(struct dpaa_eth_softc *sc)
269 {
270 
271 	if (sc->sc_rx_pool != NULL)
272 		bman_pool_destroy(sc->sc_rx_pool);
273 
274 	if (sc->sc_rx_zone != NULL)
275 		uma_zdestroy(sc->sc_rx_zone);
276 }
277 
278 int
279 dpaa_eth_pool_rx_init(struct dpaa_eth_softc *sc)
280 {
281 
282 	/* MCLBYTES must be less than PAGE_SIZE */
283 	CTASSERT(MCLBYTES < PAGE_SIZE);
284 
285 	snprintf(sc->sc_rx_zname, sizeof(sc->sc_rx_zname), "%s: RX Buffers",
286 	    device_get_nameunit(sc->sc_dev));
287 
288 	sc->sc_rx_zone = uma_zcreate(sc->sc_rx_zname, MCLBYTES, NULL,
289 	    NULL, NULL, NULL, MCLBYTES - 1, 0);
290 
291 	sc->sc_rx_pool = bman_pool_create(&sc->sc_rx_bpid, MCLBYTES,
292 	    DTSEC_RM_POOL_RX_MAX_SIZE, DTSEC_RM_POOL_RX_LOW_MARK,
293 	    DTSEC_RM_POOL_RX_HIGH_MARK, 0, 0, dpaa_eth_pool_rx_depleted, sc);
294 	if (sc->sc_rx_pool == NULL) {
295 		device_printf(sc->sc_dev, "NULL rx pool  somehow\n");
296 		dpaa_eth_pool_rx_free(sc);
297 		return (EIO);
298 	}
299 
300 	dtsec_add_buffers(sc, DTSEC_RM_POOL_RX_HIGH_MARK);
301 
302 	return (0);
303 }
304 /** @} */
305 
306 
307 /**
308  * @group dTSEC Frame Queue Range routines.
309  * @{
310  */
311 static void
312 dpaa_eth_fq_mext_free(struct mbuf *m)
313 {
314 	struct dpaa_eth_softc *sc;
315 	void *buffer;
316 
317 	buffer = m->m_ext.ext_arg1;
318 	sc = m->m_ext.ext_arg2;
319 	if (bman_count(sc->sc_rx_pool) <= DTSEC_RM_POOL_RX_MAX_SIZE)
320 		bman_put_buffer(sc->sc_rx_pool,
321 		    pmap_kextract((vm_offset_t)buffer), sc->sc_rx_bpid);
322 	else
323 		dpaa_eth_pool_rx_put_buffer(sc, buffer, NULL);
324 }
325 
326 static int
327 dpaa_eth_update_csum_flags(struct qman_fd *frame,
328     struct fman_parse_result *prs, struct mbuf *m)
329 {
330 	uint16_t l3r = be16toh(prs->l3r);
331 
332 	/* TODO: nested protocols? */
333 	if ((l3r & L3R_FIRST_IP_M) != 0) {
334 		m->m_pkthdr.csum_flags |= CSUM_L3_CALC;
335 		if ((l3r & L3R_FIRST_ERROR) == 0)
336 			m->m_pkthdr.csum_flags |= CSUM_L3_VALID;
337 	}
338 	if (frame->cmd_stat & DPAA_FD_RX_STATUS_L4CV) {
339 		m->m_pkthdr.csum_flags |= CSUM_L4_CALC;
340 		m->m_pkthdr.csum_data = 0xffff;
341 		if ((prs->l4r & L4R_TYPE_M) != 0 &&
342 		    (prs->l4r & L4R_ERR) == 0)
343 			m->m_pkthdr.csum_flags |= CSUM_L4_VALID;
344 	}
345 
346 	return (0);
347 }
348 
349 static int
350 dpaa_eth_fq_rx_callback(device_t portal, struct qman_fq *fq,
351     struct qman_fd *frame, void *app)
352 {
353 	struct dpaa_eth_softc *sc;
354 	struct mbuf *m;
355 	struct fman_internal_context *frame_ic;
356 	void *frame_va;
357 
358 	m = NULL;
359 	sc = app;
360 
361 	frame_va = DPAA_FD_GET_ADDR(frame);
362 	frame_ic = frame_va;	/* internal context at head of the frame */
363 	KASSERT(frame->format == 0,
364 	    ("%s(): Got unsupported frame format 0x%02X!", __func__,
365 	    frame->format));
366 
367 	if ((frame->cmd_stat & DPAA_FD_CMD_STAT_ERR_M) != 0) {
368 		device_printf(sc->sc_dev, "RX error: 0x%08X\n",
369 		    frame->cmd_stat);
370 		goto err;
371 	}
372 
373 	m = m_gethdr(M_NOWAIT, MT_HEADER);
374 	if (m == NULL)
375 		goto err;
376 
377 	m_extadd(m, (char *)frame_va + frame->offset, frame->length,
378 	    dpaa_eth_fq_mext_free, frame_va, sc, 0, EXT_NET_DRV);
379 
380 	if (if_getcapenable(sc->sc_ifnet) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
381 		dpaa_eth_update_csum_flags(frame, &frame_ic->prs, m);
382 
383 	m->m_pkthdr.rcvif = sc->sc_ifnet;
384 	m->m_len = frame->length;
385 	m_fixhdr(m);
386 
387 	if_input(sc->sc_ifnet, m);
388 
389 	return (1);
390 
391 err:
392 	bman_put_buffer(sc->sc_rx_pool, frame->addr, sc->sc_rx_bpid);
393 	if (m != NULL)
394 		m_freem(m);
395 
396 	return (1);
397 }
398 
399 static int
400 dpaa_eth_fq_tx_confirm_callback(device_t portal, struct qman_fq *fq,
401     struct qman_fd *frame, void *app)
402 {
403 	struct dpaa_eth_frame_info *fi;
404 	struct dpaa_eth_softc *sc;
405 	unsigned int qlen;
406 	struct dpaa_sgte *sgt0;
407 
408 	sc = app;
409 
410 	if ((frame->cmd_stat & DPAA_FD_TX_STAT_ERR_M) != 0)
411 		device_printf(sc->sc_dev, "TX error: 0x%08X\n",
412 		    frame->cmd_stat);
413 
414 	/*
415 	 * We are storing struct dpaa_eth_frame_info in first entry
416 	 * of scatter-gather table.
417 	 */
418 	sgt0 = (struct dpaa_sgte *)PHYS_TO_DMAP(frame->addr + frame->offset);
419 	fi = (struct dpaa_eth_frame_info *)PHYS_TO_DMAP(sgt0->addr);
420 
421 	/* Free transmitted frame */
422 	m_freem(fi->fi_mbuf);
423 	dpaa_eth_fi_free(sc, fi);
424 
425 	qlen = qman_fq_get_counter(sc->sc_tx_conf_fq, QMAN_COUNTER_FRAME);
426 
427 	if (qlen == 0) {
428 		DPAA_ETH_LOCK(sc);
429 
430 		if (sc->sc_tx_fq_full) {
431 			sc->sc_tx_fq_full = 0;
432 			dpaa_eth_if_start_locked(sc);
433 		}
434 
435 		DPAA_ETH_UNLOCK(sc);
436 	}
437 
438 	return (1);
439 }
440 
441 void
442 dpaa_eth_fq_rx_free(struct dpaa_eth_softc *sc)
443 {
444 	int cpu;
445 
446 	if (sc->sc_rx_fq)
447 		qman_fq_free(sc->sc_rx_fq);
448 	if (sc->sc_rx_channel != 0) {
449 		CPU_FOREACH(cpu) {
450 			device_t portal = DPCPU_ID_GET(cpu, qman_affine_portal);
451 			QMAN_PORTAL_STATIC_DEQUEUE_RM_CHANNEL(portal,
452 			    sc->sc_rx_channel);
453 		}
454 		qman_free_channel(sc->sc_rx_channel);
455 	}
456 }
457 
458 int
459 dpaa_eth_fq_rx_init(struct dpaa_eth_softc *sc)
460 {
461 	void *fq;
462 	int error;
463 	int cpu;
464 
465 	/* Default Frame Queue */
466 	if (sc->sc_rx_channel == 0)
467 		sc->sc_rx_channel = qman_alloc_channel();
468 	fq = qman_fq_create(1, sc->sc_rx_channel, DTSEC_RM_FQR_RX_WQ,
469 	    false, 0, false, false, true, false, 0, 0, 0);
470 	if (fq == NULL) {
471 		device_printf(sc->sc_dev,
472 		    "could not create default RX queue\n");
473 		return (EIO);
474 	}
475 
476 	CPU_FOREACH(cpu) {
477 		device_t portal = DPCPU_ID_GET(cpu, qman_affine_portal);
478 		QMAN_PORTAL_STATIC_DEQUEUE_CHANNEL(portal, sc->sc_rx_channel);
479 	}
480 
481 	sc->sc_rx_fq = fq;
482 	sc->sc_rx_fqid = qman_fq_get_fqid(fq);
483 
484 	error = qman_fq_register_cb(fq, dpaa_eth_fq_rx_callback, sc);
485 	if (error != 0) {
486 		device_printf(sc->sc_dev, "could not register RX callback\n");
487 		dpaa_eth_fq_rx_free(sc);
488 		return (EIO);
489 	}
490 
491 	return (0);
492 }
493 
494 void
495 dpaa_eth_fq_tx_free(struct dpaa_eth_softc *sc)
496 {
497 
498 	if (sc->sc_tx_fq)
499 		qman_fq_free(sc->sc_tx_fq);
500 
501 	if (sc->sc_tx_conf_fq)
502 		qman_fq_free(sc->sc_tx_conf_fq);
503 }
504 
505 int
506 dpaa_eth_fq_tx_init(struct dpaa_eth_softc *sc)
507 {
508 	int error;
509 	void *fq;
510 
511 	/* TX Frame Queue */
512 	fq = qman_fq_create(1, sc->sc_port_tx_qman_chan,
513 	    DTSEC_RM_FQR_TX_WQ, false, 0, false, false, true, false, 0, 0, 0);
514 	if (fq == NULL) {
515 		device_printf(sc->sc_dev, "could not create default TX queue"
516 		    "\n");
517 		return (EIO);
518 	}
519 
520 	sc->sc_tx_fq = fq;
521 
522 	if (sc->sc_rx_channel == 0)
523 		sc->sc_rx_channel = qman_alloc_channel();
524 	/* TX Confirmation Frame Queue */
525 	fq = qman_fq_create(1, sc->sc_rx_channel,
526 	    DTSEC_RM_FQR_TX_CONF_WQ, false, 0, false, false, true, false, 0, 0,
527 	    0);
528 	if (fq == NULL) {
529 		device_printf(sc->sc_dev, "could not create TX confirmation "
530 		    "queue\n");
531 		dpaa_eth_fq_tx_free(sc);
532 		return (EIO);
533 	}
534 
535 	sc->sc_tx_conf_fq = fq;
536 	sc->sc_tx_conf_fqid = qman_fq_get_fqid(fq);
537 
538 	error = qman_fq_register_cb(fq, dpaa_eth_fq_tx_confirm_callback, sc);
539 	if (error != 0) {
540 		device_printf(sc->sc_dev, "could not register TX confirmation "
541 		    "callback\n");
542 		dpaa_eth_fq_tx_free(sc);
543 		return (EIO);
544 	}
545 
546 	return (0);
547 }
548 /** @} */
549 
550 /* Returns the cmd_stat field for the frame descriptor */
551 static uint32_t
552 dpaa_eth_tx_add_csum(struct dpaa_eth_frame_info *fi)
553 {
554 	struct mbuf *m = fi->fi_mbuf;
555 	struct fman_parse_result *prs = &fi->fi_ic.prs;
556 	uint32_t csum_flags = m->m_pkthdr.csum_flags;
557 	uint8_t ether_size = ETHER_HDR_LEN;
558 
559 	if ((csum_flags & CSUM_FLAGS_TX) == 0)
560 		return (0);
561 
562 	if (m->m_flags & M_VLANTAG)
563 		ether_size += ETHER_VLAN_ENCAP_LEN;
564 	if (csum_flags & CSUM_IP)
565 		prs->l3r = L3R_FIRST_IPV4;
566 	if (csum_flags & CSUM_IP_UDP) {
567 		prs->l4r = L4R_TYPE_UDP;
568 		prs->l4_off = ether_size + sizeof(struct ip);
569 	} else if (csum_flags & CSUM_IP_TCP) {
570 		prs->l4r = L4R_TYPE_TCP;
571 		prs->l4_off = ether_size + sizeof(struct ip);
572 	} else if (csum_flags & CSUM_IP6_UDP) {
573 		prs->l3r = L3R_FIRST_IPV6;
574 		prs->l4r = L4R_TYPE_UDP;
575 		prs->l4_off = ether_size + sizeof(struct ip6_hdr);
576 	} else if (csum_flags & CSUM_IP6_TCP) {
577 		prs->l3r = L3R_FIRST_IPV6;
578 		prs->l4r = L4R_TYPE_TCP;
579 		prs->l4_off = ether_size + sizeof(struct ip6_hdr);
580 	}
581 
582 	prs->ip_off[0] = ether_size;
583 
584 	return (DPAA_FD_TX_CMD_RPD | DPAA_FD_TX_CMD_DTC);
585 }
586 
587 /**
588  * @group dTSEC IFnet routines.
589  * @{
590  */
591 void
592 dpaa_eth_if_start_locked(struct dpaa_eth_softc *sc)
593 {
594 	vm_size_t dsize, psize, ssize;
595 	struct dpaa_eth_frame_info *fi;
596 	unsigned int qlen, i;
597 	struct mbuf *m0, *m;
598 	vm_offset_t vaddr;
599 	struct dpaa_fd fd;
600 
601 	DPAA_ETH_LOCK_ASSERT(sc);
602 	/* TODO: IFF_DRV_OACTIVE */
603 
604 	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0)
605 		return;
606 
607 	if ((if_getdrvflags(sc->sc_ifnet) & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
608 		return;
609 
610 	while (!if_sendq_empty(sc->sc_ifnet)) {
611 		/* Check length of the TX queue */
612 		qlen = qman_fq_get_counter(sc->sc_tx_fq, QMAN_COUNTER_FRAME);
613 
614 		if (qlen >= DTSEC_MAX_TX_QUEUE_LEN) {
615 			sc->sc_tx_fq_full = 1;
616 			return;
617 		}
618 
619 		fi = dpaa_eth_fi_alloc(sc);
620 		if (fi == NULL)
621 			return;
622 
623 		m0 = if_dequeue(sc->sc_ifnet);
624 		if (m0 == NULL) {
625 			dpaa_eth_fi_free(sc, fi);
626 			return;
627 		}
628 
629 		i = 0;
630 		m = m0;
631 		psize = 0;
632 		dsize = 0;
633 		fi->fi_mbuf = m0;
634 		while (m && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
635 			if (m->m_len == 0)
636 				continue;
637 
638 			/*
639 			 * First entry in scatter-gather table is used to keep
640 			 * pointer to frame info structure.
641 			 */
642 			fi->fi_sgt[i].addr = pmap_kextract((vm_offset_t)fi);
643 			i++;
644 
645 			dsize = m->m_len;
646 			vaddr = (vm_offset_t)m->m_data;
647 			while (dsize > 0 && i < DPAA_NUM_OF_SG_TABLE_ENTRY) {
648 				ssize = PAGE_SIZE - (vaddr & PAGE_MASK);
649 				if (m->m_len < ssize)
650 					ssize = m->m_len;
651 
652 				fi->fi_sgt[i].addr = pmap_kextract(vaddr);
653 				fi->fi_sgt[i].length = ssize;
654 
655 				fi->fi_sgt[i].extension = 0;
656 				fi->fi_sgt[i].final = 0;
657 				fi->fi_sgt[i].bpid = 0;
658 				fi->fi_sgt[i].offset = 0;
659 
660 				dsize -= ssize;
661 				vaddr += ssize;
662 				psize += ssize;
663 				i++;
664 			}
665 
666 			if (dsize > 0)
667 				break;
668 
669 			m = m->m_next;
670 		}
671 
672 		/* Check if SG table was constructed properly */
673 		if (m != NULL || dsize != 0) {
674 			dpaa_eth_fi_free(sc, fi);
675 			m_freem(m0);
676 			continue;
677 		}
678 
679 		fi->fi_sgt[i - 1].final = 1;
680 
681 		fd.addr = pmap_kextract((vm_offset_t)&fi->fi_ic);
682 		fd.length = psize;
683 		fd.format = DPAA_FD_FORMAT_SHORT_MBSF;
684 
685 		fd.liodn = 0;
686 		fd.bpid = 0;
687 		fd.eliodn = 0;
688 		fd.offset = offsetof(struct dpaa_eth_frame_info, fi_sgt) -
689 		    offsetof(struct dpaa_eth_frame_info, fi_ic);
690 		fd.cmd_stat = dpaa_eth_tx_add_csum(fi);
691 
692 		DPAA_ETH_UNLOCK(sc);
693 		if (qman_fq_enqueue(sc->sc_tx_fq, &fd) != 0) {
694 			dpaa_eth_fi_free(sc, fi);
695 			m_freem(m0);
696 		}
697 		DPAA_ETH_LOCK(sc);
698 	}
699 }
700 /** @} */
701