xref: /freebsd/sys/dev/qlxgbe/ql_isr.c (revision ae41709ab46305df80f7f35bb478a3c8ebf22ebb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: ql_isr.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "ql_os.h"
39 #include "ql_hw.h"
40 #include "ql_def.h"
41 #include "ql_inline.h"
42 #include "ql_ver.h"
43 #include "ql_glbl.h"
44 #include "ql_dbg.h"
45 
46 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
47 		uint32_t r_idx);
48 
49 static void
50 qla_rcv_error(qla_host_t *ha)
51 {
52 	ha->stop_rcv = 1;
53 	QL_INITIATE_RECOVERY(ha);
54 }
55 
56 /*
57  * Name: qla_rx_intr
58  * Function: Handles normal ethernet frames received
59  */
60 static void
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62 {
63 	qla_rx_buf_t		*rxb;
64 	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65 	struct ifnet		*ifp = ha->ifp;
66 	qla_sds_t		*sdsp;
67 	struct ether_vlan_header *eh;
68 	uint32_t		i, rem_len = 0;
69 	uint32_t		r_idx = 0;
70 	qla_rx_ring_t		*rx_ring;
71 	struct lro_ctrl		*lro;
72 
73 	lro = &ha->hw.sds[sds_idx].lro;
74 
75 	if (ha->hw.num_rds_rings > 1)
76 		r_idx = sds_idx;
77 
78 	ha->hw.rds[r_idx].count++;
79 
80 	sdsp = &ha->hw.sds[sds_idx];
81 	rx_ring = &ha->rx_ring[r_idx];
82 
83 	for (i = 0; i < sgc->num_handles; i++) {
84 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
85 
86 		QL_ASSERT(ha, (rxb != NULL),
87 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
88 			sds_idx));
89 
90 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
91 			/* log the error */
92 			device_printf(ha->pci_dev,
93 				"%s invalid rxb[%d, %d, 0x%04x]\n",
94 				__func__, sds_idx, i, sgc->handle[i]);
95 			qla_rcv_error(ha);
96 			return;
97 		}
98 
99 		mp = rxb->m_head;
100 		if (i == 0)
101 			mpf = mp;
102 
103 		QL_ASSERT(ha, (mp != NULL),
104 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
105 			sds_idx));
106 
107 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
108 
109 		rxb->m_head = NULL;
110 		rxb->next = sdsp->rxb_free;
111 		sdsp->rxb_free = rxb;
112 		sdsp->rx_free++;
113 
114 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
115 			/* log the error */
116 			device_printf(ha->pci_dev,
117 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
118 				__func__, sds_idx, i, sgc->handle[i]);
119 			qla_rcv_error(ha);
120 			return;
121 		}
122 
123 		if (i == 0) {
124 			mpl = mpf = mp;
125 			mp->m_flags |= M_PKTHDR;
126 			mp->m_pkthdr.len = sgc->pkt_length;
127 			mp->m_pkthdr.rcvif = ifp;
128 			rem_len = mp->m_pkthdr.len;
129 		} else {
130 			mp->m_flags &= ~M_PKTHDR;
131 			mpl->m_next = mp;
132 			mpl = mp;
133 			rem_len = rem_len - mp->m_len;
134 		}
135 	}
136 
137 	mpl->m_len = rem_len;
138 
139 	eh = mtod(mpf, struct ether_vlan_header *);
140 
141 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142 		uint32_t *data = (uint32_t *)eh;
143 
144 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145 		mpf->m_flags |= M_VLANTAG;
146 
147 		*(data + 3) = *(data + 2);
148 		*(data + 2) = *(data + 1);
149 		*(data + 1) = *data;
150 
151 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
152 	}
153 
154 	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155 		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156 			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157 		mpf->m_pkthdr.csum_data = 0xFFFF;
158 	} else {
159 		mpf->m_pkthdr.csum_flags = 0;
160 	}
161 
162 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
163 
164 	mpf->m_pkthdr.flowid = sgc->rss_hash;
165 
166 #if __FreeBSD_version >= 1100000
167 	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
168 #else
169 #if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000)
170         M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
171 #else
172         M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
173 #endif
174 #endif /* #if __FreeBSD_version >= 1100000 */
175 
176 	if (ha->hw.enable_soft_lro) {
177 #if (__FreeBSD_version >= 1100101)
178 
179 		tcp_lro_queue_mbuf(lro, mpf);
180 
181 #else
182 		if (tcp_lro_rx(lro, mpf, 0))
183 			(*ifp->if_input)(ifp, mpf);
184 
185 #endif /* #if (__FreeBSD_version >= 1100101) */
186 
187 	} else {
188 		(*ifp->if_input)(ifp, mpf);
189 	}
190 
191 	if (sdsp->rx_free > ha->std_replenish)
192 		qla_replenish_normal_rx(ha, sdsp, r_idx);
193 
194 	return;
195 }
196 
197 #define QLA_TCP_HDR_SIZE        20
198 #define QLA_TCP_TS_OPTION_SIZE  12
199 
200 /*
201  * Name: qla_lro_intr
202  * Function: Handles normal ethernet frames received
203  */
204 static int
205 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
206 {
207 	qla_rx_buf_t *rxb;
208 	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
209 	struct ifnet *ifp = ha->ifp;
210 	qla_sds_t *sdsp;
211 	struct ether_vlan_header *eh;
212 	uint32_t i, rem_len = 0, pkt_length, iplen;
213 	struct tcphdr *th;
214 	struct ip *ip = NULL;
215 	struct ip6_hdr *ip6 = NULL;
216 	uint16_t etype;
217 	uint32_t r_idx = 0;
218 	qla_rx_ring_t *rx_ring;
219 
220 	if (ha->hw.num_rds_rings > 1)
221 		r_idx = sds_idx;
222 
223 	ha->hw.rds[r_idx].count++;
224 
225 	rx_ring = &ha->rx_ring[r_idx];
226 
227 	ha->hw.rds[r_idx].lro_pkt_count++;
228 
229 	sdsp = &ha->hw.sds[sds_idx];
230 
231 	pkt_length = sgc->payload_length + sgc->l4_offset;
232 
233 	if (sgc->flags & Q8_LRO_COMP_TS) {
234 		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
235 	} else {
236 		pkt_length += QLA_TCP_HDR_SIZE;
237 	}
238 	ha->hw.rds[r_idx].lro_bytes += pkt_length;
239 
240 	for (i = 0; i < sgc->num_handles; i++) {
241 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
242 
243 		QL_ASSERT(ha, (rxb != NULL),
244 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
245 			sds_idx));
246 
247 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
248 			/* log the error */
249 			device_printf(ha->pci_dev,
250 				"%s invalid rxb[%d, %d, 0x%04x]\n",
251 				__func__, sds_idx, i, sgc->handle[i]);
252 			qla_rcv_error(ha);
253 			return (0);
254 		}
255 
256 		mp = rxb->m_head;
257 		if (i == 0)
258 			mpf = mp;
259 
260 		QL_ASSERT(ha, (mp != NULL),
261 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
262 			sds_idx));
263 
264 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
265 
266 		rxb->m_head = NULL;
267 		rxb->next = sdsp->rxb_free;
268 		sdsp->rxb_free = rxb;
269 		sdsp->rx_free++;
270 
271 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
272 			/* log the error */
273 			device_printf(ha->pci_dev,
274 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
275 				__func__, sds_idx, i, sgc->handle[i]);
276 			qla_rcv_error(ha);
277 			return (0);
278 		}
279 
280 		if (i == 0) {
281 			mpl = mpf = mp;
282 			mp->m_flags |= M_PKTHDR;
283 			mp->m_pkthdr.len = pkt_length;
284 			mp->m_pkthdr.rcvif = ifp;
285 			rem_len = mp->m_pkthdr.len;
286 		} else {
287 			mp->m_flags &= ~M_PKTHDR;
288 			mpl->m_next = mp;
289 			mpl = mp;
290 			rem_len = rem_len - mp->m_len;
291 		}
292 	}
293 
294 	mpl->m_len = rem_len;
295 
296 	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
297 
298 	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
299 		th->th_flags |= TH_PUSH;
300 
301 	m_adj(mpf, sgc->l2_offset);
302 
303 	eh = mtod(mpf, struct ether_vlan_header *);
304 
305 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
306 		uint32_t *data = (uint32_t *)eh;
307 
308 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
309 		mpf->m_flags |= M_VLANTAG;
310 
311 		*(data + 3) = *(data + 2);
312 		*(data + 2) = *(data + 1);
313 		*(data + 1) = *data;
314 
315 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
316 
317 		etype = ntohs(eh->evl_proto);
318 	} else {
319 		etype = ntohs(eh->evl_encap_proto);
320 	}
321 
322 	if (etype == ETHERTYPE_IP) {
323 		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
324 
325 		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
326 				sgc->payload_length;
327 
328                 ip->ip_len = htons(iplen);
329 
330 		ha->ipv4_lro++;
331 
332 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
333 
334 	} else if (etype == ETHERTYPE_IPV6) {
335 		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
336 
337 		iplen = (th->th_off << 2) + sgc->payload_length;
338 
339 		ip6->ip6_plen = htons(iplen);
340 
341 		ha->ipv6_lro++;
342 
343 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
344 
345 	} else {
346 		m_freem(mpf);
347 
348 		if (sdsp->rx_free > ha->std_replenish)
349 			qla_replenish_normal_rx(ha, sdsp, r_idx);
350 		return 0;
351 	}
352 
353 	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
354 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
355 	mpf->m_pkthdr.csum_data = 0xFFFF;
356 
357 	mpf->m_pkthdr.flowid = sgc->rss_hash;
358 
359 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
360 
361 	(*ifp->if_input)(ifp, mpf);
362 
363 	if (sdsp->rx_free > ha->std_replenish)
364 		qla_replenish_normal_rx(ha, sdsp, r_idx);
365 
366 	return (0);
367 }
368 
369 static int
370 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
371 	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
372 {
373 	uint32_t i;
374 	uint16_t num_handles;
375 	q80_stat_desc_t *sdesc;
376 	uint32_t opcode;
377 
378 	*nhandles = 0;
379 	dcount--;
380 
381 	for (i = 0; i < dcount; i++) {
382 		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
383 		sdesc = (q80_stat_desc_t *)
384 				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
385 
386 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
387 
388 		if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) {
389 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
390 				__func__, (void *)sdesc->data[0],
391 				(void *)sdesc->data[1]);
392 			return -1;
393 		}
394 
395 		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
396 		if (!num_handles) {
397 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
398 				__func__, (void *)sdesc->data[0],
399 				(void *)sdesc->data[1]);
400 			return -1;
401 		}
402 
403 		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
404 			num_handles = -1;
405 
406 		switch (num_handles) {
407 		case 1:
408 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
409 			break;
410 
411 		case 2:
412 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
413 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
414 			break;
415 
416 		case 3:
417 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
418 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
419 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
420 			break;
421 
422 		case 4:
423 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
424 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
425 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
426 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
427 			break;
428 
429 		case 5:
430 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
431 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
432 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
433 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
434 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
435 			break;
436 
437 		case 6:
438 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
439 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
440 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
441 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
442 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
443 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
444 			break;
445 
446 		case 7:
447 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
448 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
449 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
450 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
451 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
452 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
453 			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
454 			break;
455 
456 		default:
457 			device_printf(ha->pci_dev,
458 				"%s: invalid num handles %p %p\n",
459 				__func__, (void *)sdesc->data[0],
460 				(void *)sdesc->data[1]);
461 
462 			QL_ASSERT(ha, (0),\
463 			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
464 			__func__, "invalid num handles", sds_idx, num_handles,
465 			(void *)sdesc->data[0],(void *)sdesc->data[1]));
466 
467 			qla_rcv_error(ha);
468 			return 0;
469 		}
470 		*nhandles = *nhandles + num_handles;
471 	}
472 	return 0;
473 }
474 
475 /*
476  * Name: ql_rcv_isr
477  * Function: Main Interrupt Service Routine
478  */
479 uint32_t
480 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
481 {
482 	device_t dev;
483 	qla_hw_t *hw;
484 	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
485 	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
486 	uint32_t ret = 0;
487 	qla_sgl_comp_t sgc;
488 	uint16_t nhandles;
489 	uint32_t sds_replenish_threshold = 0;
490 	uint32_t r_idx = 0;
491 	qla_sds_t *sdsp;
492 
493 	dev = ha->pci_dev;
494 	hw = &ha->hw;
495 
496 	hw->sds[sds_idx].rcv_active = 1;
497 	if (ha->stop_rcv) {
498 		hw->sds[sds_idx].rcv_active = 0;
499 		return 0;
500 	}
501 
502 	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
503 
504 	/*
505 	 * receive interrupts
506 	 */
507 	comp_idx = hw->sds[sds_idx].sdsr_next;
508 
509 	while (count-- && !ha->stop_rcv) {
510 		sdesc = (q80_stat_desc_t *)
511 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
512 
513 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
514 
515 		if (!opcode)
516 			break;
517 
518 		switch (opcode) {
519 		case Q8_STAT_DESC_OPCODE_RCV_PKT:
520 
521 			desc_count = 1;
522 
523 			bzero(&sgc, sizeof(qla_sgl_comp_t));
524 
525 			sgc.rcv.pkt_length =
526 				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
527 			sgc.rcv.num_handles = 1;
528 			sgc.rcv.handle[0] =
529 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
530 			sgc.rcv.chksum_status =
531 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
532 
533 			sgc.rcv.rss_hash =
534 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
535 
536 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
537 				sgc.rcv.vlan_tag =
538 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
539 			}
540 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
541 			break;
542 
543 		case Q8_STAT_DESC_OPCODE_SGL_RCV:
544 
545 			desc_count =
546 				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
547 
548 			if (desc_count > 1) {
549 				c_idx = (comp_idx + desc_count -1) &
550 						(NUM_STATUS_DESCRIPTORS-1);
551 				sdesc0 = (q80_stat_desc_t *)
552 					&hw->sds[sds_idx].sds_ring_base[c_idx];
553 
554 				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
555 						Q8_STAT_DESC_OPCODE_CONT) ||
556 				QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) {
557 					desc_count = 0;
558 					break;
559 				}
560 			}
561 
562 			bzero(&sgc, sizeof(qla_sgl_comp_t));
563 
564 			sgc.rcv.pkt_length =
565 				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
566 					(sdesc->data[0]));
567 			sgc.rcv.chksum_status =
568 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
569 
570 			sgc.rcv.rss_hash =
571 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
572 
573 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
574 				sgc.rcv.vlan_tag =
575 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
576 			}
577 
578 			QL_ASSERT(ha, (desc_count <= 2) ,\
579 				("%s: [sds_idx, data0, data1]="\
580 				"%d, %p, %p]\n", __func__, sds_idx,\
581 				(void *)sdesc->data[0],\
582 				(void *)sdesc->data[1]));
583 
584 			sgc.rcv.num_handles = 1;
585 			sgc.rcv.handle[0] =
586 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
587 
588 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
589 				&sgc.rcv.handle[1], &nhandles)) {
590 				device_printf(dev,
591 					"%s: [sds_idx, dcount, data0, data1]="
592 					 "[%d, %d, 0x%llx, 0x%llx]\n",
593 					__func__, sds_idx, desc_count,
594 					(long long unsigned int)sdesc->data[0],
595 					(long long unsigned int)sdesc->data[1]);
596 				desc_count = 0;
597 				break;
598 			}
599 
600 			sgc.rcv.num_handles += nhandles;
601 
602 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
603 
604 			break;
605 
606 		case Q8_STAT_DESC_OPCODE_SGL_LRO:
607 
608 			desc_count =
609 				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
610 
611 			if (desc_count > 1) {
612 				c_idx = (comp_idx + desc_count -1) &
613 						(NUM_STATUS_DESCRIPTORS-1);
614 				sdesc0 = (q80_stat_desc_t *)
615 					&hw->sds[sds_idx].sds_ring_base[c_idx];
616 
617 				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
618 						Q8_STAT_DESC_OPCODE_CONT) ||
619 				QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) {
620 					desc_count = 0;
621 					break;
622 				}
623 			}
624 			bzero(&sgc, sizeof(qla_sgl_comp_t));
625 
626 			sgc.lro.payload_length =
627 			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
628 
629 			sgc.lro.rss_hash =
630 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
631 
632 			sgc.lro.num_handles = 1;
633 			sgc.lro.handle[0] =
634 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
635 
636 			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
637 				sgc.lro.flags |= Q8_LRO_COMP_TS;
638 
639 			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
640 				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
641 
642 			sgc.lro.l2_offset =
643 				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
644 			sgc.lro.l4_offset =
645 				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
646 
647 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
648 				sgc.lro.vlan_tag =
649 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
650 			}
651 
652 			QL_ASSERT(ha, (desc_count <= 7) ,\
653 				("%s: [sds_idx, data0, data1]="\
654 				 "[%d, 0x%llx, 0x%llx]\n",\
655 				__func__, sds_idx,\
656 				(long long unsigned int)sdesc->data[0],\
657 				(long long unsigned int)sdesc->data[1]));
658 
659 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
660 				desc_count, &sgc.lro.handle[1], &nhandles)) {
661 				device_printf(dev,
662 				"%s: [sds_idx, data0, data1]="\
663 				 "[%d, 0x%llx, 0x%llx]\n",\
664 				__func__, sds_idx,\
665 				(long long unsigned int)sdesc->data[0],\
666 				(long long unsigned int)sdesc->data[1]);
667 
668 				desc_count = 0;
669 				break;
670 			}
671 
672 			sgc.lro.num_handles += nhandles;
673 
674 			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
675 				device_printf(dev,
676 				"%s: [sds_idx, data0, data1]="\
677 				 "[%d, 0x%llx, 0x%llx]\n",\
678 				__func__, sds_idx,\
679 				(long long unsigned int)sdesc->data[0],\
680 				(long long unsigned int)sdesc->data[1]);
681 				device_printf(dev,
682 				"%s: [comp_idx, c_idx, dcount, nhndls]="\
683 				 "[%d, %d, %d, %d]\n",\
684 				__func__, comp_idx, c_idx, desc_count,
685 				sgc.lro.num_handles);
686 				if (desc_count > 1) {
687 				device_printf(dev,
688 				"%s: [sds_idx, data0, data1]="\
689 				 "[%d, 0x%llx, 0x%llx]\n",\
690 				__func__, sds_idx,\
691 				(long long unsigned int)sdesc0->data[0],\
692 				(long long unsigned int)sdesc0->data[1]);
693 				}
694 			}
695 
696 			break;
697 
698 		default:
699 			desc_count = 0;
700 			device_printf(dev, "%s: default 0x%llx!\n", __func__,
701 					(long long unsigned int)sdesc->data[0]);
702 			break;
703 		}
704 
705 		if (desc_count == 0)
706 			break;
707 
708 		sds_replenish_threshold += desc_count;
709 
710 		while (desc_count--) {
711 			sdesc->data[0] = 0ULL;
712 			sdesc->data[1] = 0ULL;
713 			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
714 			sdesc = (q80_stat_desc_t *)
715 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
716 		}
717 
718 		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
719 			sds_replenish_threshold = 0;
720 			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
721 				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
722 					comp_idx);
723 			}
724 			hw->sds[sds_idx].sdsr_next = comp_idx;
725 		}
726 	}
727 
728 	if (ha->hw.enable_soft_lro) {
729 		struct lro_ctrl		*lro;
730 
731 		lro = &ha->hw.sds[sds_idx].lro;
732 
733 #if (__FreeBSD_version >= 1100101)
734 
735 		tcp_lro_flush_all(lro);
736 
737 #else
738 		struct lro_entry *queued;
739 
740 		while ((!SLIST_EMPTY(&lro->lro_active))) {
741 			queued = SLIST_FIRST(&lro->lro_active);
742 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
743 			tcp_lro_flush(lro, queued);
744 		}
745 
746 #endif /* #if (__FreeBSD_version >= 1100101) */
747 	}
748 
749 	if (ha->stop_rcv)
750 		goto ql_rcv_isr_exit;
751 
752 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
753 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
754 		hw->sds[sds_idx].sdsr_next = comp_idx;
755 	} else {
756 		if (ha->hw.num_rds_rings > 1)
757 			r_idx = sds_idx;
758 
759 		sdsp = &ha->hw.sds[sds_idx];
760 
761 		if (sdsp->rx_free > ha->std_replenish)
762 			qla_replenish_normal_rx(ha, sdsp, r_idx);
763 	}
764 
765 	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
766 	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
767 
768 	if (opcode)
769 		ret = -1;
770 
771 ql_rcv_isr_exit:
772 	hw->sds[sds_idx].rcv_active = 0;
773 
774 	return (ret);
775 }
776 
777 void
778 ql_mbx_isr(void *arg)
779 {
780 	qla_host_t *ha;
781 	uint32_t data;
782 	uint32_t prev_link_state;
783 
784 	ha = arg;
785 
786 	if (ha == NULL) {
787 		printf("%s: arg == NULL\n", __func__);
788 		return;
789 	}
790 
791 	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
792 	if ((data & 0x3) != 0x1) {
793 		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
794 		return;
795 	}
796 
797 	data = READ_REG32(ha, Q8_FW_MBOX0);
798 
799 	if ((data & 0xF000) != 0x8000)
800 		return;
801 
802 	data = data & 0xFFFF;
803 
804 	switch (data) {
805 	case 0x8001:  /* It's an AEN */
806 
807 		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
808 
809 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
810 		ha->hw.cable_length = data & 0xFFFF;
811 
812 		data = data >> 16;
813 		ha->hw.link_speed = data & 0xFFF;
814 
815 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
816 
817 		prev_link_state =  ha->hw.link_up;
818 
819 		data = (((data & 0xFF) == 0) ? 0 : 1);
820 		atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data);
821 
822 		device_printf(ha->pci_dev,
823 			"%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n",
824 			__func__, data, prev_link_state);
825 
826 		if (prev_link_state !=  ha->hw.link_up) {
827 			if (ha->hw.link_up)
828 				if_link_state_change(ha->ifp, LINK_STATE_UP);
829 			else
830 				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
831 		}
832 
833 		ha->hw.module_type = ((data >> 8) & 0xFF);
834 		ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
835 		ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
836 
837 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
838 		ha->hw.loopback_mode = data & 0x03;
839 
840 		ha->hw.link_faults = (data >> 3) & 0xFF;
841 
842 		break;
843 
844         case 0x8100:
845 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
846 		ha->hw.imd_compl=1;
847 		break;
848 
849         case 0x8101:
850                 ha->async_event = 1;
851                 ha->hw.aen_mb0 = 0x8101;
852                 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
853                 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
854                 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
855                 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
856 		device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n",
857 			__func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2,
858 			ha->hw.aen_mb3, ha->hw.aen_mb4);
859                 break;
860 
861         case 0x8110:
862                 /* for now just dump the registers */
863                 {
864                         uint32_t ombx[5];
865 
866                         ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
867                         ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
868                         ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
869                         ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
870                         ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
871 
872                         device_printf(ha->pci_dev, "%s: "
873                                 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
874                                 __func__, data, ombx[0], ombx[1], ombx[2],
875                                 ombx[3], ombx[4]);
876                 }
877 
878                 break;
879 
880         case 0x8130:
881                 /* sfp insertion aen */
882                 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
883                         __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
884                 break;
885 
886         case 0x8131:
887                 /* sfp removal aen */
888                 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
889                 break;
890 
891 	case 0x8140:
892 		{
893 			uint32_t ombx[3];
894 
895 			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
896 			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
897 			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
898 
899 			device_printf(ha->pci_dev, "%s: "
900 				"0x%08x 0x%08x 0x%08x 0x%08x \n",
901 				__func__, data, ombx[0], ombx[1], ombx[2]);
902 		}
903 		break;
904 
905 	default:
906 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
907 		break;
908 	}
909 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
910 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
911 	return;
912 }
913 
914 static void
915 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
916 {
917 	qla_rx_buf_t *rxb;
918 	int count = sdsp->rx_free;
919 	uint32_t rx_next;
920 	qla_rdesc_t *rdesc;
921 
922 	/* we can play with this value via a sysctl */
923 	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
924 
925 	rdesc = &ha->hw.rds[r_idx];
926 
927 	rx_next = rdesc->rx_next;
928 
929 	while (count--) {
930 		rxb = sdsp->rxb_free;
931 
932 		if (rxb == NULL)
933 			break;
934 
935 		sdsp->rxb_free = rxb->next;
936 		sdsp->rx_free--;
937 
938 		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
939 			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
940 				rxb->handle,
941 				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
942 			rdesc->rx_in++;
943 			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
944 				rdesc->rx_in = 0;
945 			rdesc->rx_next++;
946 			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
947 				rdesc->rx_next = 0;
948 		} else {
949 			device_printf(ha->pci_dev,
950 				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
951 				__func__, r_idx, rdesc->rx_in, rxb->handle);
952 
953 			rxb->m_head = NULL;
954 			rxb->next = sdsp->rxb_free;
955 			sdsp->rxb_free = rxb;
956 			sdsp->rx_free++;
957 
958 			break;
959 		}
960 		if (replenish_thresh-- == 0) {
961 			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
962 				rdesc->rx_next);
963 			rx_next = rdesc->rx_next;
964 			replenish_thresh = ha->hw.rds_pidx_thres;
965 		}
966 	}
967 
968 	if (rx_next != rdesc->rx_next) {
969 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
970 			rdesc->rx_next);
971 	}
972 }
973 
974 void
975 ql_isr(void *arg)
976 {
977 	qla_ivec_t *ivec = arg;
978 	qla_host_t *ha ;
979 	int idx;
980 	qla_hw_t *hw;
981 	struct ifnet *ifp;
982 	qla_tx_fp_t *fp;
983 
984 	ha = ivec->ha;
985 	hw = &ha->hw;
986 	ifp = ha->ifp;
987 
988 	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
989 		return;
990 
991 	fp = &ha->tx_fp[idx];
992 	hw->sds[idx].intr_count++;
993 
994 	if ((fp->fp_taskqueue != NULL) &&
995 		(ifp->if_drv_flags & IFF_DRV_RUNNING))
996 		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
997 
998 	return;
999 }
1000