xref: /freebsd/sys/dev/qlxge/qls_isr.c (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013-2014 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: qls_isr.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "qls_os.h"
38 #include "qls_hw.h"
39 #include "qls_def.h"
40 #include "qls_inline.h"
41 #include "qls_ver.h"
42 #include "qls_glbl.h"
43 #include "qls_dbg.h"
44 
45 static void
46 qls_tx_comp(qla_host_t *ha, uint32_t txr_idx, q81_tx_mac_comp_t *tx_comp)
47 {
48 	qla_tx_buf_t *txb;
49 	uint32_t tx_idx = tx_comp->tid_lo;
50 
51 	if (tx_idx >= NUM_TX_DESCRIPTORS) {
52 		ha->qla_initiate_recovery = 1;
53 		return;
54 	}
55 
56 	txb = &ha->tx_ring[txr_idx].tx_buf[tx_idx];
57 
58 	if (txb->m_head) {
59 		if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1);
60 		bus_dmamap_sync(ha->tx_tag, txb->map,
61 		        BUS_DMASYNC_POSTWRITE);
62 		bus_dmamap_unload(ha->tx_tag, txb->map);
63 		m_freem(txb->m_head);
64 
65 		txb->m_head = NULL;
66 	}
67 
68         ha->tx_ring[txr_idx].txr_done++;
69 
70 	if (ha->tx_ring[txr_idx].txr_done == NUM_TX_DESCRIPTORS)
71 		ha->tx_ring[txr_idx].txr_done = 0;
72 }
73 
74 static void
75 qls_replenish_rx(qla_host_t *ha, uint32_t r_idx)
76 {
77         qla_rx_buf_t			*rxb;
78 	qla_rx_ring_t			*rxr;
79         int				count;
80 	volatile q81_bq_addr_e_t	*sbq_e;
81 
82 	rxr = &ha->rx_ring[r_idx];
83 
84 	count = rxr->rx_free;
85 	sbq_e = rxr->sbq_vaddr;
86 
87         while (count--) {
88 		rxb = &rxr->rx_buf[rxr->sbq_next];
89 
90 		if (rxb->m_head == NULL) {
91                 	if (qls_get_mbuf(ha, rxb, NULL) != 0) {
92                         	device_printf(ha->pci_dev,
93 					"%s: qls_get_mbuf [0,%d,%d] failed\n",
94 					__func__, rxr->sbq_next, r_idx);
95 				rxb->m_head = NULL;
96 				break;
97 			}
98 		}
99 
100 		if (rxb->m_head != NULL) {
101 			sbq_e[rxr->sbq_next].addr_lo = (uint32_t)rxb->paddr;
102 			sbq_e[rxr->sbq_next].addr_hi =
103 				(uint32_t)(rxb->paddr >> 32);
104 
105                         rxr->sbq_next++;
106                         if (rxr->sbq_next == NUM_RX_DESCRIPTORS)
107                                 rxr->sbq_next = 0;
108 
109 			rxr->sbq_free++;
110                 	rxr->rx_free--;
111 		}
112 
113                 if (rxr->sbq_free == 16) {
114 			rxr->sbq_in += 16;
115 			rxr->sbq_in = rxr->sbq_in & (NUM_RX_DESCRIPTORS - 1);
116 			rxr->sbq_free = 0;
117 
118 			Q81_WR_SBQ_PROD_IDX(r_idx, (rxr->sbq_in));
119                 }
120         }
121 }
122 
123 static int
124 qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
125 {
126 	qla_rx_buf_t	*rxb;
127 	qla_rx_ring_t	*rxr;
128 	device_t	dev = ha->pci_dev;
129 	struct mbuf     *mp = NULL;
130 	if_t ifp = ha->ifp;
131 #if defined(INET) || defined(INET6)
132 	struct lro_ctrl	*lro;
133 #endif
134 	struct ether_vlan_header *eh;
135 
136 	rxr = &ha->rx_ring[rxr_idx];
137 
138 #if defined(INET) || defined(INET6)
139 	lro = &rxr->lro;
140 #endif
141 
142 	rxb = &rxr->rx_buf[rxr->rx_next];
143 
144 	if (!(cq_e->flags1 & Q81_RX_FLAGS1_DS)) {
145 		device_printf(dev, "%s: DS bit not set \n", __func__);
146 		return -1;
147 	}
148 	if (rxb->paddr != cq_e->b_paddr) {
149 		device_printf(dev,
150 			"%s: (rxb->paddr != cq_e->b_paddr)[%p, %p] \n",
151 			__func__, (void *)rxb->paddr, (void *)cq_e->b_paddr);
152 
153 		Q81_SET_CQ_INVALID(cq_idx);
154 
155 		ha->qla_initiate_recovery = 1;
156 
157 		return(-1);
158 	}
159 
160 	rxr->rx_int++;
161 
162 	if ((cq_e->flags1 & Q81_RX_FLAGS1_ERR_MASK) == 0) {
163 		mp = rxb->m_head;
164 		rxb->m_head = NULL;
165 
166 		if (mp == NULL) {
167 			device_printf(dev, "%s: mp == NULL\n", __func__);
168 		} else {
169 			mp->m_flags |= M_PKTHDR;
170 			mp->m_pkthdr.len = cq_e->length;
171 			mp->m_pkthdr.rcvif = ifp;
172 			mp->m_len = cq_e->length;
173 
174 			eh = mtod(mp, struct ether_vlan_header *);
175 
176 			if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
177 				uint32_t *data = (uint32_t *)eh;
178 
179 				mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
180 				mp->m_flags |= M_VLANTAG;
181 
182 				*(data + 3) = *(data + 2);
183 				*(data + 2) = *(data + 1);
184 				*(data + 1) = *data;
185 
186 				m_adj(mp, ETHER_VLAN_ENCAP_LEN);
187 			}
188 
189 			if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
190 				rxr->rss_int++;
191 				mp->m_pkthdr.flowid = cq_e->rss;
192 				M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE_HASH);
193 			}
194 			if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
195 				Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
196 				mp->m_pkthdr.csum_flags = 0;
197 			} else {
198 				mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
199 					CSUM_IP_VALID | CSUM_DATA_VALID |
200 					CSUM_PSEUDO_HDR;
201 				mp->m_pkthdr.csum_data = 0xFFFF;
202 			}
203 			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
204 
205 #if defined(INET) || defined(INET6)
206 			if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) {
207 				/* LRO packet has been successfully queued */
208 			} else
209 #endif
210 			{
211 				if_input(ifp, mp);
212 			}
213 		}
214 	} else {
215 		device_printf(dev, "%s: err [0%08x]\n", __func__, cq_e->flags1);
216 	}
217 
218 	rxr->rx_free++;
219 	rxr->rx_next++;
220 
221 	if (rxr->rx_next == NUM_RX_DESCRIPTORS)
222 		rxr->rx_next = 0;
223 
224 	if ((rxr->rx_free + rxr->sbq_free) >= 16)
225                 qls_replenish_rx(ha, rxr_idx);
226 
227 	return 0;
228 }
229 
230 static void
231 qls_cq_isr(qla_host_t *ha, uint32_t cq_idx)
232 {
233 	q81_cq_e_t *cq_e, *cq_b;
234 	uint32_t i, cq_comp_idx;
235 	int ret = 0, tx_comp_done = 0;
236 #if defined(INET) || defined(INET6)
237 	struct lro_ctrl	*lro = &ha->rx_ring[cq_idx].lro;
238 #endif
239 
240 	cq_b = ha->rx_ring[cq_idx].cq_base_vaddr;
241 
242 	cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
243 
244 	i = ha->rx_ring[cq_idx].cq_next;
245 
246 	while (i != cq_comp_idx) {
247 		cq_e = &cq_b[i];
248 
249 		switch (cq_e->opcode) {
250                 case Q81_IOCB_TX_MAC:
251                 case Q81_IOCB_TX_TSO:
252                         qls_tx_comp(ha, cq_idx, (q81_tx_mac_comp_t *)cq_e);
253                         tx_comp_done++;
254                         break;
255 
256 		case Q81_IOCB_RX:
257 			ret = qls_rx_comp(ha, cq_idx, i, (q81_rx_t *)cq_e);
258 
259 			break;
260 
261 		case Q81_IOCB_MPI:
262 		case Q81_IOCB_SYS:
263 		default:
264 			device_printf(ha->pci_dev, "%s[%d %d 0x%x]: illegal \n",
265 				__func__, i, (*(ha->rx_ring[cq_idx].cqi_vaddr)),
266 				cq_e->opcode);
267 			qls_dump_buf32(ha, __func__, cq_e,
268 				(sizeof (q81_cq_e_t) >> 2));
269 			break;
270 		}
271 
272 		i++;
273 		if (i == NUM_CQ_ENTRIES)
274 			i = 0;
275 
276 		if (ret) {
277 			break;
278 		}
279 
280 		if (i == cq_comp_idx) {
281 			cq_comp_idx = *(ha->rx_ring[cq_idx].cqi_vaddr);
282 		}
283 
284                 if (tx_comp_done) {
285                         taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
286                         tx_comp_done = 0;
287                 }
288 	}
289 
290 #if defined(INET) || defined(INET6)
291 	tcp_lro_flush_all(lro);
292 #endif
293 
294 	ha->rx_ring[cq_idx].cq_next = cq_comp_idx;
295 
296 	if (!ret) {
297 		Q81_WR_CQ_CONS_IDX(cq_idx, (ha->rx_ring[cq_idx].cq_next));
298 	}
299         if (tx_comp_done)
300                 taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
301 
302 	return;
303 }
304 
305 static void
306 qls_mbx_isr(qla_host_t *ha)
307 {
308 	uint32_t data;
309 	int i;
310 	device_t dev = ha->pci_dev;
311 
312 	if (qls_mbx_rd_reg(ha, 0, &data) == 0) {
313 		if ((data & 0xF000) == 0x4000) {
314 			ha->mbox[0] = data;
315 			for (i = 1; i < Q81_NUM_MBX_REGISTERS; i++) {
316 				if (qls_mbx_rd_reg(ha, i, &data))
317 					break;
318 				ha->mbox[i] = data;
319 			}
320 			ha->mbx_done = 1;
321 		} else if ((data & 0xF000) == 0x8000) {
322 			/* we have an AEN */
323 
324 			ha->aen[0] = data;
325 			for (i = 1; i < Q81_NUM_AEN_REGISTERS; i++) {
326 				if (qls_mbx_rd_reg(ha, i, &data))
327 					break;
328 				ha->aen[i] = data;
329 			}
330 			device_printf(dev,"%s: AEN "
331 				"[0x%08x 0x%08x 0x%08x 0x%08x 0x%08x"
332 				" 0x%08x 0x%08x 0x%08x 0x%08x]\n",
333 				__func__,
334 				ha->aen[0], ha->aen[1], ha->aen[2],
335 				ha->aen[3], ha->aen[4], ha->aen[5],
336 				ha->aen[6], ha->aen[7], ha->aen[8]);
337 
338 			switch ((ha->aen[0] & 0xFFFF)) {
339 			case 0x8011:
340 				ha->link_up = 1;
341 				break;
342 
343 			case 0x8012:
344 				ha->link_up = 0;
345 				break;
346 
347 			case 0x8130:
348 				ha->link_hw_info = ha->aen[1];
349 				break;
350 
351 			case 0x8131:
352 				ha->link_hw_info = 0;
353 				break;
354 			}
355 		}
356 	}
357 	WRITE_REG32(ha, Q81_CTL_HOST_CMD_STATUS, Q81_CTL_HCS_CMD_CLR_RTH_INTR);
358 
359 	return;
360 }
361 
362 void
363 qls_isr(void *arg)
364 {
365 	qla_ivec_t *ivec = arg;
366 	qla_host_t *ha;
367 	uint32_t status;
368 	uint32_t cq_idx;
369 	device_t dev;
370 
371 	ha = ivec->ha;
372 	cq_idx = ivec->cq_idx;
373 	dev = ha->pci_dev;
374 
375 	status = READ_REG32(ha, Q81_CTL_STATUS);
376 
377 	if (status & Q81_CTL_STATUS_FE) {
378 		device_printf(dev, "%s fatal error\n", __func__);
379 		return;
380 	}
381 
382 	if ((cq_idx == 0) && (status & Q81_CTL_STATUS_PI)) {
383 		qls_mbx_isr(ha);
384 	}
385 
386 	status = READ_REG32(ha, Q81_CTL_INTR_STATUS1);
387 
388 	if (status & ( 0x1 << cq_idx))
389 		qls_cq_isr(ha, cq_idx);
390 
391 	Q81_ENABLE_INTR(ha, cq_idx);
392 
393 	return;
394 }
395