xref: /freebsd/sys/dev/liquidio/lio_rxtx.c (revision 8aac90f18aef7c9eea906c3ff9a001ca7b94f375)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "lio_bsd.h"
35 #include "lio_common.h"
36 #include "lio_droq.h"
37 #include "lio_iq.h"
38 #include "lio_response_manager.h"
39 #include "lio_device.h"
40 #include "lio_ctrl.h"
41 #include "lio_main.h"
42 #include "lio_network.h"
43 #include "lio_rxtx.h"
44 
45 int
46 lio_xmit(struct lio *lio, struct lio_instr_queue *iq,
47 	 struct mbuf **m_headp)
48 {
49 	struct lio_data_pkt		ndata;
50 	union lio_cmd_setup		cmdsetup;
51 	struct lio_mbuf_free_info	*finfo = NULL;
52 	struct octeon_device		*oct = iq->oct_dev;
53 	struct lio_iq_stats		*stats;
54 	struct octeon_instr_irh		*irh;
55 	struct lio_request_list		*tx_buf;
56 	union lio_tx_info		*tx_info;
57 	struct mbuf			*m_head;
58 	bus_dma_segment_t		segs[LIO_MAX_SG];
59 	bus_dmamap_t			map;
60 	uint64_t	dptr = 0;
61 	uint32_t	tag = 0;
62 	int		iq_no = 0;
63 	int		nsegs;
64 	int		status = 0;
65 
66 	iq_no = iq->txpciq.s.q_no;
67 	tag = iq_no;
68 	stats = &oct->instr_queue[iq_no]->stats;
69 	tx_buf = iq->request_list + iq->host_write_index;
70 
71 	/*
72 	 * Check for all conditions in which the current packet cannot be
73 	 * transmitted.
74 	 */
75 	if (!(atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
76 	    (!lio->linfo.link.s.link_up)) {
77 		lio_dev_info(oct, "Transmit failed link_status : %d\n",
78 			     lio->linfo.link.s.link_up);
79 		status = ENETDOWN;
80 		goto drop_packet;
81 	}
82 
83 	if (lio_iq_is_full(oct, iq_no)) {
84 		/* Defer sending if queue is full */
85 		lio_dev_dbg(oct, "Transmit failed iq:%d full\n", iq_no);
86 		stats->tx_iq_busy++;
87 		return (ENOBUFS);
88 	}
89 
90 	map = tx_buf->map;
91 	status = bus_dmamap_load_mbuf_sg(iq->txtag, map, *m_headp, segs, &nsegs,
92 					 BUS_DMA_NOWAIT);
93 	if (status == EFBIG) {
94 		struct mbuf	*m;
95 
96 		m = m_defrag(*m_headp, M_NOWAIT);
97 		if (m == NULL) {
98 			stats->mbuf_defrag_failed++;
99 			goto drop_packet;
100 		}
101 
102 		*m_headp = m;
103 		status = bus_dmamap_load_mbuf_sg(iq->txtag, map,
104 						 *m_headp, segs, &nsegs,
105 						 BUS_DMA_NOWAIT);
106 	}
107 
108 	if (status == ENOMEM) {
109 		goto retry;
110 	} else if (status) {
111 		stats->tx_dmamap_fail++;
112 		lio_dev_dbg(oct, "bus_dmamap_load_mbuf_sg failed with error %d. iq:%d",
113 			    status, iq_no);
114 		goto drop_packet;
115 	}
116 
117 	m_head = *m_headp;
118 
119 	/* Info used to unmap and free the buffers. */
120 	finfo = &tx_buf->finfo;
121 	finfo->map = map;
122 	finfo->mb = m_head;
123 
124 	/* Prepare the attributes for the data to be passed to OSI. */
125 	bzero(&ndata, sizeof(struct lio_data_pkt));
126 
127 	ndata.buf = (void *)finfo;
128 	ndata.q_no = iq_no;
129 	ndata.datasize = m_head->m_pkthdr.len;
130 
131 	cmdsetup.cmd_setup64 = 0;
132 	cmdsetup.s.iq_no = iq_no;
133 
134 	if (m_head->m_pkthdr.csum_flags & CSUM_IP)
135 		cmdsetup.s.ip_csum = 1;
136 
137 	if ((m_head->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) ||
138 	    (m_head->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)))
139 		cmdsetup.s.transport_csum = 1;
140 
141 	if (nsegs == 1) {
142 		cmdsetup.s.u.datasize = segs[0].ds_len;
143 		lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
144 
145 		dptr = segs[0].ds_addr;
146 		ndata.cmd.cmd3.dptr = dptr;
147 		ndata.reqtype = LIO_REQTYPE_NORESP_NET;
148 
149 	} else {
150 		struct lio_gather	*g;
151 		int	i;
152 
153 		mtx_lock(&lio->glist_lock[iq_no]);
154 		g = (struct lio_gather *)
155 			lio_delete_first_node(&lio->ghead[iq_no]);
156 		mtx_unlock(&lio->glist_lock[iq_no]);
157 
158 		if (g == NULL) {
159 			lio_dev_err(oct,
160 				    "Transmit scatter gather: glist null!\n");
161 			goto retry;
162 		}
163 
164 		cmdsetup.s.gather = 1;
165 		cmdsetup.s.u.gatherptrs = nsegs;
166 		lio_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
167 
168 		bzero(g->sg, g->sg_size);
169 
170 		i = 0;
171 		while (nsegs--) {
172 			g->sg[(i >> 2)].ptr[(i & 3)] = segs[i].ds_addr;
173 			lio_add_sg_size(&g->sg[(i >> 2)], segs[i].ds_len,
174 					(i & 3));
175 			i++;
176 		}
177 
178 		dptr = g->sg_dma_ptr;
179 
180 		ndata.cmd.cmd3.dptr = dptr;
181 		finfo->g = g;
182 
183 		ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
184 	}
185 
186 	irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
187 	tx_info = (union lio_tx_info *)&ndata.cmd.cmd3.ossp[0];
188 
189 	if (m_head->m_pkthdr.csum_flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
190 		tx_info->s.gso_size = m_head->m_pkthdr.tso_segsz;
191 		tx_info->s.gso_segs = howmany(m_head->m_pkthdr.len,
192 					      m_head->m_pkthdr.tso_segsz);
193 		stats->tx_gso++;
194 	}
195 
196 	/* HW insert VLAN tag */
197 	if (m_head->m_flags & M_VLANTAG) {
198 		irh->priority = m_head->m_pkthdr.ether_vtag >> 13;
199 		irh->vlan = m_head->m_pkthdr.ether_vtag & 0xfff;
200 	}
201 
202 	status = lio_send_data_pkt(oct, &ndata);
203 	if (status == LIO_IQ_SEND_FAILED)
204 		goto retry;
205 
206 	if (tx_info->s.gso_segs)
207 		stats->tx_done += tx_info->s.gso_segs;
208 	else
209 		stats->tx_done++;
210 
211 	stats->tx_tot_bytes += ndata.datasize;
212 
213 	return (0);
214 
215 retry:
216 	return (ENOBUFS);
217 
218 drop_packet:
219 	stats->tx_dropped++;
220 	lio_dev_err(oct, "IQ%d Transmit dropped: %llu\n", iq_no,
221 		    LIO_CAST64(stats->tx_dropped));
222 
223 	m_freem(*m_headp);
224 	*m_headp = NULL;
225 
226 	return (status);
227 }
228 
229 int
230 lio_mq_start_locked(if_t ifp, struct lio_instr_queue *iq)
231 {
232 	struct lio	*lio = if_getsoftc(ifp);
233 	struct mbuf	*next;
234 	int		err = 0;
235 
236 	if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
237 	    (!lio->linfo.link.s.link_up))
238 		return (-ENETDOWN);
239 
240 	/* Process the queue */
241 	while ((next = drbr_peek(ifp, iq->br)) != NULL) {
242 		err = lio_xmit(lio, iq, &next);
243 		if (err) {
244 			if (next == NULL)
245 				drbr_advance(ifp, iq->br);
246 			else
247 				drbr_putback(ifp, iq->br, next);
248 			break;
249 		}
250 		drbr_advance(ifp, iq->br);
251 		/* Send a copy of the frame to the BPF listener */
252 		ETHER_BPF_MTAP(ifp, next);
253 		if (((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
254 		    (!lio->linfo.link.s.link_up))
255 			break;
256 	}
257 
258 	return (err);
259 }
260 
261 int
262 lio_mq_start(if_t ifp, struct mbuf *m)
263 {
264 	struct lio		*lio = if_getsoftc(ifp);
265 	struct octeon_device	*oct = lio->oct_dev;
266 	struct lio_instr_queue	*iq;
267 	int	err = 0, i;
268 #ifdef RSS
269 	uint32_t	bucket_id;
270 #endif
271 
272 	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
273 #ifdef RSS
274 		if (rss_hash2bucket(m->m_pkthdr.flowid, M_HASHTYPE_GET(m),
275 				    &bucket_id) == 0) {
276 			i = bucket_id % oct->num_iqs;
277 			if (bucket_id > oct->num_iqs)
278 				lio_dev_dbg(oct,
279 					    "bucket_id (%d) > num_iqs (%d)\n",
280 					    bucket_id, oct->num_iqs);
281 		} else
282 #endif
283 			i = m->m_pkthdr.flowid % oct->num_iqs;
284 	} else
285 		i = curcpu % oct->num_iqs;
286 
287 	iq = oct->instr_queue[i];
288 
289 	err = drbr_enqueue(ifp, iq->br, m);
290 	if (err)
291 		return (err);
292 
293 	if (mtx_trylock(&iq->enq_lock)) {
294 		lio_mq_start_locked(ifp, iq);
295 		mtx_unlock(&iq->enq_lock);
296 	}
297 
298 	return (err);
299 }
300 
301 void
302 lio_qflush(if_t ifp)
303 {
304 	struct lio		*lio = if_getsoftc(ifp);
305 	struct octeon_device	*oct = lio->oct_dev;
306 	struct lio_instr_queue	*iq;
307 	struct mbuf		*m;
308 	int	i;
309 
310 	for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) {
311 		if (!(oct->io_qmask.iq & BIT_ULL(i)))
312 			continue;
313 
314 		iq = oct->instr_queue[i];
315 
316 		mtx_lock(&iq->enq_lock);
317 		while ((m = buf_ring_dequeue_sc(iq->br)) != NULL)
318 			m_freem(m);
319 
320 		mtx_unlock(&iq->enq_lock);
321 	}
322 
323 	if_qflush(ifp);
324 }
325