xref: /freebsd/sys/dev/qlxgb/qla_isr.c (revision cc1a53bc1aea0675d64e9547cdca241612906592)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011-2013 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: qla_isr.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "qla_os.h"
39 #include "qla_reg.h"
40 #include "qla_hw.h"
41 #include "qla_def.h"
42 #include "qla_inline.h"
43 #include "qla_ver.h"
44 #include "qla_glbl.h"
45 #include "qla_dbg.h"
46 
47 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp);
48 static void qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp);
49 
50 /*
51  * Name: qla_rx_intr
52  * Function: Handles normal ethernet frames received
53  */
54 static void
55 qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx,
56 	struct lro_ctrl *lro)
57 {
58 	uint32_t idx, length, status, ring;
59 	qla_rx_buf_t *rxb;
60 	struct mbuf *mp;
61 	struct ifnet *ifp = ha->ifp;
62 	qla_sds_t *sdsp;
63 	struct ether_vlan_header *eh;
64 
65 	sdsp = &ha->hw.sds[sds_idx];
66 
67 	ring = (uint32_t)Q8_STAT_DESC_TYPE(data);
68 	idx = (uint32_t)Q8_STAT_DESC_HANDLE(data);
69 	length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data);
70 	status = (uint32_t)Q8_STAT_DESC_STATUS(data);
71 
72 	if (ring == 0) {
73 		if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) {
74 			device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
75 				" len[0x%08x] invalid\n",
76 				__func__, ring, idx, length);
77 			return;
78 		}
79 	} else {
80 		if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) {
81 			device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]"
82 				" len[0x%08x] invalid\n",
83 				__func__, ring, idx, length);
84 			return;
85 		}
86 	}
87 
88 	if (ring == 0)
89 		rxb = &ha->rx_buf[idx];
90 	else
91 		rxb = &ha->rx_jbuf[idx];
92 
93 	QL_ASSERT((rxb != NULL),\
94 		("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\
95 		 __func__, ring, idx, sds_idx));
96 
97 	mp = rxb->m_head;
98 
99 	QL_ASSERT((mp != NULL),\
100 		("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\
101 		 __func__, ring, idx, rxb, sds_idx));
102 
103 	bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
104 
105 	if (ring == 0) {
106 		rxb->m_head = NULL;
107 		rxb->next = sdsp->rxb_free;
108 		sdsp->rxb_free = rxb;
109 		sdsp->rx_free++;
110 	} else {
111 		rxb->m_head = NULL;
112 		rxb->next = sdsp->rxjb_free;
113 		sdsp->rxjb_free = rxb;
114 		sdsp->rxj_free++;
115 	}
116 
117 	mp->m_len = length;
118 	mp->m_pkthdr.len = length;
119 	mp->m_pkthdr.rcvif = ifp;
120 
121 	eh = mtod(mp, struct ether_vlan_header *);
122 
123 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
124 		uint32_t *data = (uint32_t *)eh;
125 
126 		mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
127 		mp->m_flags |= M_VLANTAG;
128 
129 		*(data + 3) = *(data + 2);
130 		*(data + 2) = *(data + 1);
131 		*(data + 1) = *data;
132 
133 		m_adj(mp, ETHER_VLAN_ENCAP_LEN);
134 	}
135 
136 	if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
137 		mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
138 	} else {
139 		mp->m_pkthdr.csum_flags = 0;
140 	}
141 
142 #if defined(INET) || defined(INET6)
143 	if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
144 		/* LRO packet has been successfully queued */
145 	} else
146 #endif
147 	{
148 		(*ifp->if_input)(ifp, mp);
149 	}
150 
151 	if (sdsp->rx_free > std_replenish)
152 		qla_replenish_normal_rx(ha, sdsp);
153 
154 	if (sdsp->rxj_free > jumbo_replenish)
155 		qla_replenish_jumbo_rx(ha, sdsp);
156 
157 	return;
158 }
159 
160 static void
161 qla_replenish_jumbo_rx(qla_host_t *ha, qla_sds_t *sdsp)
162 {
163 	qla_rx_buf_t *rxb;
164 	int count = jumbo_replenish;
165 	uint32_t rxj_next;
166 
167 	if (!mtx_trylock(&ha->rxj_lock))
168 		return;
169 
170 	rxj_next = ha->hw.rxj_next;
171 
172 	while (count--) {
173 		rxb = sdsp->rxjb_free;
174 
175 		if (rxb == NULL)
176 			break;
177 
178 		sdsp->rxjb_free = rxb->next;
179 		sdsp->rxj_free--;
180 
181 		if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_JUMBO) == 0) {
182 			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO,
183 				ha->hw.rxj_in, rxb->handle, rxb->paddr,
184 				(rxb->m_head)->m_pkthdr.len);
185 			ha->hw.rxj_in++;
186 			if (ha->hw.rxj_in == NUM_RX_JUMBO_DESCRIPTORS)
187 				ha->hw.rxj_in = 0;
188 			ha->hw.rxj_next++;
189 			if (ha->hw.rxj_next == NUM_RX_JUMBO_DESCRIPTORS)
190 				ha->hw.rxj_next = 0;
191 		} else {
192 			device_printf(ha->pci_dev,
193 				"%s: qla_get_mbuf [1,(%d),(%d)] failed\n",
194 				__func__, ha->hw.rxj_in, rxb->handle);
195 
196 			rxb->m_head = NULL;
197 			rxb->next = sdsp->rxjb_free;
198 			sdsp->rxjb_free = rxb;
199 			sdsp->rxj_free++;
200 
201 			break;
202 		}
203 	}
204 
205 	if (rxj_next != ha->hw.rxj_next) {
206 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, 1, ha->hw.rxj_next);
207 	}
208 	mtx_unlock(&ha->rxj_lock);
209 }
210 
211 static void
212 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp)
213 {
214 	qla_rx_buf_t *rxb;
215 	int count = std_replenish;
216 	uint32_t rx_next;
217 
218 	if (!mtx_trylock(&ha->rx_lock))
219 		return;
220 
221 	rx_next = ha->hw.rx_next;
222 
223 	while (count--) {
224 		rxb = sdsp->rxb_free;
225 
226 		if (rxb == NULL)
227 			break;
228 
229 		sdsp->rxb_free = rxb->next;
230 		sdsp->rx_free--;
231 
232 		if (qla_get_mbuf(ha, rxb, NULL, RDS_RING_INDEX_NORMAL) == 0) {
233 			qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL,
234 				ha->hw.rx_in, rxb->handle, rxb->paddr,
235 				(rxb->m_head)->m_pkthdr.len);
236 			ha->hw.rx_in++;
237 			if (ha->hw.rx_in == NUM_RX_DESCRIPTORS)
238 				ha->hw.rx_in = 0;
239 			ha->hw.rx_next++;
240 			if (ha->hw.rx_next == NUM_RX_DESCRIPTORS)
241 				ha->hw.rx_next = 0;
242 		} else {
243 			device_printf(ha->pci_dev,
244 				"%s: qla_get_mbuf [0,(%d),(%d)] failed\n",
245 				__func__, ha->hw.rx_in, rxb->handle);
246 
247 			rxb->m_head = NULL;
248 			rxb->next = sdsp->rxb_free;
249 			sdsp->rxb_free = rxb;
250 			sdsp->rx_free++;
251 
252 			break;
253 		}
254 	}
255 
256 	if (rx_next != ha->hw.rx_next) {
257 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, 0, ha->hw.rx_next);
258 	}
259 	mtx_unlock(&ha->rx_lock);
260 }
261 
262 /*
263  * Name: qla_isr
264  * Function: Main Interrupt Service Routine
265  */
266 static uint32_t
267 qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
268 {
269 	device_t dev;
270 	qla_hw_t *hw;
271 	uint32_t comp_idx, desc_count;
272 	q80_stat_desc_t *sdesc;
273 	struct lro_ctrl *lro;
274 	uint32_t ret = 0;
275 
276 	dev = ha->pci_dev;
277 	hw = &ha->hw;
278 
279 	hw->sds[sds_idx].rcv_active = 1;
280 	if (ha->flags.stop_rcv) {
281 		hw->sds[sds_idx].rcv_active = 0;
282 		return 0;
283 	}
284 
285 	QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));
286 
287 	/*
288 	 * receive interrupts
289 	 */
290 	comp_idx = hw->sds[sds_idx].sdsr_next;
291 	lro = &hw->sds[sds_idx].lro;
292 
293 	while (count--) {
294 		sdesc = (q80_stat_desc_t *)
295 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
296 
297 		if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
298 			Q8_STAT_DESC_OWNER_HOST) {
299 			QL_DPRINT2((dev, "%s:  data %p sdsr_next 0x%08x\n",
300 				__func__, (void *)sdesc->data[0], comp_idx));
301 			break;
302 		}
303 
304 		desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));
305 
306 		switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {
307 		case Q8_STAT_DESC_OPCODE_RCV_PKT:
308 		case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
309 			qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
310 
311 			break;
312 
313 		default:
314 			device_printf(dev, "%s: default 0x%llx!\n", __func__,
315 					(long long unsigned int)sdesc->data[0]);
316 			break;
317 		}
318 
319 		while (desc_count--) {
320 			sdesc->data[0] =
321 				Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
322 			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
323 			sdesc = (q80_stat_desc_t *)
324 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
325 		}
326 	}
327 
328 #if defined(INET) || defined(INET6)
329 	tcp_lro_flush_all(lro);
330 #endif
331 
332 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
333 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
334 	}
335 	hw->sds[sds_idx].sdsr_next = comp_idx;
336 
337 	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
338 	if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
339 					Q8_STAT_DESC_OWNER_HOST)) {
340 		ret = -1;
341 	}
342 
343 	hw->sds[sds_idx].rcv_active = 0;
344 	return (ret);
345 }
346 
347 void
348 qla_isr(void *arg)
349 {
350 	qla_ivec_t *ivec = arg;
351 	qla_host_t *ha;
352 	uint32_t sds_idx;
353 	uint32_t ret;
354 
355 	ha = ivec->ha;
356 	sds_idx = ivec->irq_rid - 1;
357 
358 	if (sds_idx >= ha->hw.num_sds_rings) {
359 		device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__,
360 			sds_idx);
361 
362 		return;
363 	}
364 
365 	if (sds_idx == 0)
366 		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
367 
368 	ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres);
369 
370 	if (sds_idx == 0)
371 		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
372 
373 	if (ret) {
374 		taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq,
375 			&ha->irq_vec[sds_idx].rcv_task);
376 	} else {
377 		QL_ENABLE_INTERRUPTS(ha, sds_idx);
378 	}
379 }
380 
381 void
382 qla_rcv(void *context, int pending)
383 {
384 	qla_ivec_t *ivec = context;
385 	qla_host_t *ha;
386 	qla_hw_t *hw;
387 	uint32_t sds_idx;
388 	uint32_t ret;
389 	struct ifnet *ifp;
390 
391 	ha = ivec->ha;
392 	hw = &ha->hw;
393 	sds_idx = ivec->irq_rid - 1;
394 	ifp = ha->ifp;
395 
396 	do {
397 		if (sds_idx == 0) {
398 			if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) {
399 				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
400 			} else if ((ifp->if_snd.ifq_head != NULL) &&
401 					QL_RUNNING(ifp)) {
402 				taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
403 			}
404 		}
405 		ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d);
406 	} while (ret);
407 
408 	if (sds_idx == 0)
409 		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
410 
411 	QL_ENABLE_INTERRUPTS(ha, sds_idx);
412 }
413