xref: /freebsd/sys/dev/qlxgbe/ql_isr.c (revision 4530e0c3e78d0616367d37273d6c1f47f627839b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: ql_isr.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include "ql_os.h"
39 #include "ql_hw.h"
40 #include "ql_def.h"
41 #include "ql_inline.h"
42 #include "ql_ver.h"
43 #include "ql_glbl.h"
44 #include "ql_dbg.h"
45 
46 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
47 		uint32_t r_idx);
48 
49 static void
50 qla_rcv_error(qla_host_t *ha)
51 {
52 	ha->stop_rcv = 1;
53 	QL_INITIATE_RECOVERY(ha);
54 }
55 
56 /*
57  * Name: qla_rx_intr
58  * Function: Handles normal ethernet frames received
59  */
60 static void
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62 {
63 	qla_rx_buf_t		*rxb;
64 	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65 	struct ifnet		*ifp = ha->ifp;
66 	qla_sds_t		*sdsp;
67 	struct ether_vlan_header *eh;
68 	uint32_t		i, rem_len = 0;
69 	uint32_t		r_idx = 0;
70 	qla_rx_ring_t		*rx_ring;
71 #if defined(INET) || defined(INET6)
72 	struct lro_ctrl		*lro;
73 
74 	lro = &ha->hw.sds[sds_idx].lro;
75 #endif
76 
77 	if (ha->hw.num_rds_rings > 1)
78 		r_idx = sds_idx;
79 
80 	ha->hw.rds[r_idx].count++;
81 
82 	sdsp = &ha->hw.sds[sds_idx];
83 	rx_ring = &ha->rx_ring[r_idx];
84 
85 	for (i = 0; i < sgc->num_handles; i++) {
86 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
87 
88 		QL_ASSERT(ha, (rxb != NULL),
89 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
90 			sds_idx));
91 
92 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
93 			/* log the error */
94 			device_printf(ha->pci_dev,
95 				"%s invalid rxb[%d, %d, 0x%04x]\n",
96 				__func__, sds_idx, i, sgc->handle[i]);
97 			qla_rcv_error(ha);
98 			return;
99 		}
100 
101 		mp = rxb->m_head;
102 		if (i == 0)
103 			mpf = mp;
104 
105 		QL_ASSERT(ha, (mp != NULL),
106 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
107 			sds_idx));
108 
109 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
110 
111 		rxb->m_head = NULL;
112 		rxb->next = sdsp->rxb_free;
113 		sdsp->rxb_free = rxb;
114 		sdsp->rx_free++;
115 
116 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
117 			/* log the error */
118 			device_printf(ha->pci_dev,
119 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
120 				__func__, sds_idx, i, sgc->handle[i]);
121 			qla_rcv_error(ha);
122 			return;
123 		}
124 
125 		if (i == 0) {
126 			mpl = mpf = mp;
127 			mp->m_flags |= M_PKTHDR;
128 			mp->m_pkthdr.len = sgc->pkt_length;
129 			mp->m_pkthdr.rcvif = ifp;
130 			rem_len = mp->m_pkthdr.len;
131 		} else {
132 			mp->m_flags &= ~M_PKTHDR;
133 			mpl->m_next = mp;
134 			mpl = mp;
135 			rem_len = rem_len - mp->m_len;
136 		}
137 	}
138 
139 	mpl->m_len = rem_len;
140 
141 	eh = mtod(mpf, struct ether_vlan_header *);
142 
143 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
144 		uint32_t *data = (uint32_t *)eh;
145 
146 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
147 		mpf->m_flags |= M_VLANTAG;
148 
149 		*(data + 3) = *(data + 2);
150 		*(data + 2) = *(data + 1);
151 		*(data + 1) = *data;
152 
153 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
154 	}
155 
156 	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
157 		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
158 			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
159 		mpf->m_pkthdr.csum_data = 0xFFFF;
160 	} else {
161 		mpf->m_pkthdr.csum_flags = 0;
162 	}
163 
164 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
165 
166 	mpf->m_pkthdr.flowid = sgc->rss_hash;
167 
168 	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
169 
170 #if defined(INET) || defined(INET6)
171 	if (ha->hw.enable_soft_lro)
172 		tcp_lro_queue_mbuf(lro, mpf);
173 	else
174 #endif
175 		(*ifp->if_input)(ifp, mpf);
176 
177 	if (sdsp->rx_free > ha->std_replenish)
178 		qla_replenish_normal_rx(ha, sdsp, r_idx);
179 
180 	return;
181 }
182 
183 #define QLA_TCP_HDR_SIZE        20
184 #define QLA_TCP_TS_OPTION_SIZE  12
185 
186 /*
187  * Name: qla_lro_intr
188  * Function: Handles normal ethernet frames received
189  */
190 static int
191 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
192 {
193 	qla_rx_buf_t *rxb;
194 	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
195 	struct ifnet *ifp = ha->ifp;
196 	qla_sds_t *sdsp;
197 	struct ether_vlan_header *eh;
198 	uint32_t i, rem_len = 0, pkt_length, iplen;
199 	struct tcphdr *th;
200 	struct ip *ip = NULL;
201 	struct ip6_hdr *ip6 = NULL;
202 	uint16_t etype;
203 	uint32_t r_idx = 0;
204 	qla_rx_ring_t *rx_ring;
205 
206 	if (ha->hw.num_rds_rings > 1)
207 		r_idx = sds_idx;
208 
209 	ha->hw.rds[r_idx].count++;
210 
211 	rx_ring = &ha->rx_ring[r_idx];
212 
213 	ha->hw.rds[r_idx].lro_pkt_count++;
214 
215 	sdsp = &ha->hw.sds[sds_idx];
216 
217 	pkt_length = sgc->payload_length + sgc->l4_offset;
218 
219 	if (sgc->flags & Q8_LRO_COMP_TS) {
220 		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
221 	} else {
222 		pkt_length += QLA_TCP_HDR_SIZE;
223 	}
224 	ha->hw.rds[r_idx].lro_bytes += pkt_length;
225 
226 	for (i = 0; i < sgc->num_handles; i++) {
227 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
228 
229 		QL_ASSERT(ha, (rxb != NULL),
230 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
231 			sds_idx));
232 
233 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
234 			/* log the error */
235 			device_printf(ha->pci_dev,
236 				"%s invalid rxb[%d, %d, 0x%04x]\n",
237 				__func__, sds_idx, i, sgc->handle[i]);
238 			qla_rcv_error(ha);
239 			return (0);
240 		}
241 
242 		mp = rxb->m_head;
243 		if (i == 0)
244 			mpf = mp;
245 
246 		QL_ASSERT(ha, (mp != NULL),
247 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
248 			sds_idx));
249 
250 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
251 
252 		rxb->m_head = NULL;
253 		rxb->next = sdsp->rxb_free;
254 		sdsp->rxb_free = rxb;
255 		sdsp->rx_free++;
256 
257 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
258 			/* log the error */
259 			device_printf(ha->pci_dev,
260 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
261 				__func__, sds_idx, i, sgc->handle[i]);
262 			qla_rcv_error(ha);
263 			return (0);
264 		}
265 
266 		if (i == 0) {
267 			mpl = mpf = mp;
268 			mp->m_flags |= M_PKTHDR;
269 			mp->m_pkthdr.len = pkt_length;
270 			mp->m_pkthdr.rcvif = ifp;
271 			rem_len = mp->m_pkthdr.len;
272 		} else {
273 			mp->m_flags &= ~M_PKTHDR;
274 			mpl->m_next = mp;
275 			mpl = mp;
276 			rem_len = rem_len - mp->m_len;
277 		}
278 	}
279 
280 	mpl->m_len = rem_len;
281 
282 	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
283 
284 	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
285 		th->th_flags |= TH_PUSH;
286 
287 	m_adj(mpf, sgc->l2_offset);
288 
289 	eh = mtod(mpf, struct ether_vlan_header *);
290 
291 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
292 		uint32_t *data = (uint32_t *)eh;
293 
294 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
295 		mpf->m_flags |= M_VLANTAG;
296 
297 		*(data + 3) = *(data + 2);
298 		*(data + 2) = *(data + 1);
299 		*(data + 1) = *data;
300 
301 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
302 
303 		etype = ntohs(eh->evl_proto);
304 	} else {
305 		etype = ntohs(eh->evl_encap_proto);
306 	}
307 
308 	if (etype == ETHERTYPE_IP) {
309 		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
310 
311 		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
312 				sgc->payload_length;
313 
314                 ip->ip_len = htons(iplen);
315 
316 		ha->ipv4_lro++;
317 
318 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
319 
320 	} else if (etype == ETHERTYPE_IPV6) {
321 		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
322 
323 		iplen = (th->th_off << 2) + sgc->payload_length;
324 
325 		ip6->ip6_plen = htons(iplen);
326 
327 		ha->ipv6_lro++;
328 
329 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
330 
331 	} else {
332 		m_freem(mpf);
333 
334 		if (sdsp->rx_free > ha->std_replenish)
335 			qla_replenish_normal_rx(ha, sdsp, r_idx);
336 		return 0;
337 	}
338 
339 	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
340 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
341 	mpf->m_pkthdr.csum_data = 0xFFFF;
342 
343 	mpf->m_pkthdr.flowid = sgc->rss_hash;
344 
345 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
346 
347 	(*ifp->if_input)(ifp, mpf);
348 
349 	if (sdsp->rx_free > ha->std_replenish)
350 		qla_replenish_normal_rx(ha, sdsp, r_idx);
351 
352 	return (0);
353 }
354 
355 static int
356 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
357 	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
358 {
359 	uint32_t i;
360 	uint16_t num_handles;
361 	q80_stat_desc_t *sdesc;
362 	uint32_t opcode;
363 
364 	*nhandles = 0;
365 	dcount--;
366 
367 	for (i = 0; i < dcount; i++) {
368 		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
369 		sdesc = (q80_stat_desc_t *)
370 				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
371 
372 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
373 
374 		if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) {
375 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
376 				__func__, (void *)sdesc->data[0],
377 				(void *)sdesc->data[1]);
378 			return -1;
379 		}
380 
381 		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
382 		if (!num_handles) {
383 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
384 				__func__, (void *)sdesc->data[0],
385 				(void *)sdesc->data[1]);
386 			return -1;
387 		}
388 
389 		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
390 			num_handles = -1;
391 
392 		switch (num_handles) {
393 		case 1:
394 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
395 			break;
396 
397 		case 2:
398 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
399 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
400 			break;
401 
402 		case 3:
403 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
404 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
405 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
406 			break;
407 
408 		case 4:
409 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
410 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
411 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
412 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
413 			break;
414 
415 		case 5:
416 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
417 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
418 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
419 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
420 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
421 			break;
422 
423 		case 6:
424 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
425 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
426 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
427 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
428 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
429 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
430 			break;
431 
432 		case 7:
433 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
434 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
435 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
436 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
437 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
438 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
439 			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
440 			break;
441 
442 		default:
443 			device_printf(ha->pci_dev,
444 				"%s: invalid num handles %p %p\n",
445 				__func__, (void *)sdesc->data[0],
446 				(void *)sdesc->data[1]);
447 
448 			QL_ASSERT(ha, (0),\
449 			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
450 			__func__, "invalid num handles", sds_idx, num_handles,
451 			(void *)sdesc->data[0],(void *)sdesc->data[1]));
452 
453 			qla_rcv_error(ha);
454 			return 0;
455 		}
456 		*nhandles = *nhandles + num_handles;
457 	}
458 	return 0;
459 }
460 
461 /*
462  * Name: ql_rcv_isr
463  * Function: Main Interrupt Service Routine
464  */
465 uint32_t
466 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
467 {
468 	device_t dev;
469 	qla_hw_t *hw;
470 	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
471 	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
472 	uint32_t ret = 0;
473 	qla_sgl_comp_t sgc;
474 	uint16_t nhandles;
475 	uint32_t sds_replenish_threshold = 0;
476 	uint32_t r_idx = 0;
477 	qla_sds_t *sdsp;
478 
479 	dev = ha->pci_dev;
480 	hw = &ha->hw;
481 
482 	hw->sds[sds_idx].rcv_active = 1;
483 	if (ha->stop_rcv) {
484 		hw->sds[sds_idx].rcv_active = 0;
485 		return 0;
486 	}
487 
488 	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
489 
490 	/*
491 	 * receive interrupts
492 	 */
493 	comp_idx = hw->sds[sds_idx].sdsr_next;
494 
495 	while (count-- && !ha->stop_rcv) {
496 		sdesc = (q80_stat_desc_t *)
497 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
498 
499 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
500 
501 		if (!opcode)
502 			break;
503 
504 		switch (opcode) {
505 		case Q8_STAT_DESC_OPCODE_RCV_PKT:
506 
507 			desc_count = 1;
508 
509 			bzero(&sgc, sizeof(qla_sgl_comp_t));
510 
511 			sgc.rcv.pkt_length =
512 				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
513 			sgc.rcv.num_handles = 1;
514 			sgc.rcv.handle[0] =
515 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
516 			sgc.rcv.chksum_status =
517 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
518 
519 			sgc.rcv.rss_hash =
520 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
521 
522 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
523 				sgc.rcv.vlan_tag =
524 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
525 			}
526 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
527 			break;
528 
529 		case Q8_STAT_DESC_OPCODE_SGL_RCV:
530 
531 			desc_count =
532 				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
533 
534 			if (desc_count > 1) {
535 				c_idx = (comp_idx + desc_count -1) &
536 						(NUM_STATUS_DESCRIPTORS-1);
537 				sdesc0 = (q80_stat_desc_t *)
538 					&hw->sds[sds_idx].sds_ring_base[c_idx];
539 
540 				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
541 						Q8_STAT_DESC_OPCODE_CONT) ||
542 				QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) {
543 					desc_count = 0;
544 					break;
545 				}
546 			}
547 
548 			bzero(&sgc, sizeof(qla_sgl_comp_t));
549 
550 			sgc.rcv.pkt_length =
551 				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
552 					(sdesc->data[0]));
553 			sgc.rcv.chksum_status =
554 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
555 
556 			sgc.rcv.rss_hash =
557 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
558 
559 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
560 				sgc.rcv.vlan_tag =
561 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
562 			}
563 
564 			QL_ASSERT(ha, (desc_count <= 2) ,\
565 				("%s: [sds_idx, data0, data1]="\
566 				"%d, %p, %p]\n", __func__, sds_idx,\
567 				(void *)sdesc->data[0],\
568 				(void *)sdesc->data[1]));
569 
570 			sgc.rcv.num_handles = 1;
571 			sgc.rcv.handle[0] =
572 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
573 
574 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
575 				&sgc.rcv.handle[1], &nhandles)) {
576 				device_printf(dev,
577 					"%s: [sds_idx, dcount, data0, data1]="
578 					 "[%d, %d, 0x%llx, 0x%llx]\n",
579 					__func__, sds_idx, desc_count,
580 					(long long unsigned int)sdesc->data[0],
581 					(long long unsigned int)sdesc->data[1]);
582 				desc_count = 0;
583 				break;
584 			}
585 
586 			sgc.rcv.num_handles += nhandles;
587 
588 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
589 
590 			break;
591 
592 		case Q8_STAT_DESC_OPCODE_SGL_LRO:
593 
594 			desc_count =
595 				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
596 
597 			if (desc_count > 1) {
598 				c_idx = (comp_idx + desc_count -1) &
599 						(NUM_STATUS_DESCRIPTORS-1);
600 				sdesc0 = (q80_stat_desc_t *)
601 					&hw->sds[sds_idx].sds_ring_base[c_idx];
602 
603 				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
604 						Q8_STAT_DESC_OPCODE_CONT) ||
605 				QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) {
606 					desc_count = 0;
607 					break;
608 				}
609 			}
610 			bzero(&sgc, sizeof(qla_sgl_comp_t));
611 
612 			sgc.lro.payload_length =
613 			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
614 
615 			sgc.lro.rss_hash =
616 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
617 
618 			sgc.lro.num_handles = 1;
619 			sgc.lro.handle[0] =
620 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
621 
622 			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
623 				sgc.lro.flags |= Q8_LRO_COMP_TS;
624 
625 			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
626 				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
627 
628 			sgc.lro.l2_offset =
629 				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
630 			sgc.lro.l4_offset =
631 				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
632 
633 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
634 				sgc.lro.vlan_tag =
635 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
636 			}
637 
638 			QL_ASSERT(ha, (desc_count <= 7) ,\
639 				("%s: [sds_idx, data0, data1]="\
640 				 "[%d, 0x%llx, 0x%llx]\n",\
641 				__func__, sds_idx,\
642 				(long long unsigned int)sdesc->data[0],\
643 				(long long unsigned int)sdesc->data[1]));
644 
645 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
646 				desc_count, &sgc.lro.handle[1], &nhandles)) {
647 				device_printf(dev,
648 				"%s: [sds_idx, data0, data1]="\
649 				 "[%d, 0x%llx, 0x%llx]\n",\
650 				__func__, sds_idx,\
651 				(long long unsigned int)sdesc->data[0],\
652 				(long long unsigned int)sdesc->data[1]);
653 
654 				desc_count = 0;
655 				break;
656 			}
657 
658 			sgc.lro.num_handles += nhandles;
659 
660 			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
661 				device_printf(dev,
662 				"%s: [sds_idx, data0, data1]="\
663 				 "[%d, 0x%llx, 0x%llx]\n",\
664 				__func__, sds_idx,\
665 				(long long unsigned int)sdesc->data[0],\
666 				(long long unsigned int)sdesc->data[1]);
667 				device_printf(dev,
668 				"%s: [comp_idx, c_idx, dcount, nhndls]="\
669 				 "[%d, %d, %d, %d]\n",\
670 				__func__, comp_idx, c_idx, desc_count,
671 				sgc.lro.num_handles);
672 				if (desc_count > 1) {
673 				device_printf(dev,
674 				"%s: [sds_idx, data0, data1]="\
675 				 "[%d, 0x%llx, 0x%llx]\n",\
676 				__func__, sds_idx,\
677 				(long long unsigned int)sdesc0->data[0],\
678 				(long long unsigned int)sdesc0->data[1]);
679 				}
680 			}
681 
682 			break;
683 
684 		default:
685 			desc_count = 0;
686 			device_printf(dev, "%s: default 0x%llx!\n", __func__,
687 					(long long unsigned int)sdesc->data[0]);
688 			break;
689 		}
690 
691 		if (desc_count == 0)
692 			break;
693 
694 		sds_replenish_threshold += desc_count;
695 
696 		while (desc_count--) {
697 			sdesc->data[0] = 0ULL;
698 			sdesc->data[1] = 0ULL;
699 			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
700 			sdesc = (q80_stat_desc_t *)
701 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
702 		}
703 
704 		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
705 			sds_replenish_threshold = 0;
706 			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
707 				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
708 					comp_idx);
709 			}
710 			hw->sds[sds_idx].sdsr_next = comp_idx;
711 		}
712 	}
713 
714 #if defined(INET) || defined(INET6)
715 	if (ha->hw.enable_soft_lro) {
716 		struct lro_ctrl		*lro;
717 
718 		lro = &ha->hw.sds[sds_idx].lro;
719 		tcp_lro_flush_all(lro);
720 	}
721 #endif
722 
723 	if (ha->stop_rcv)
724 		goto ql_rcv_isr_exit;
725 
726 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
727 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
728 		hw->sds[sds_idx].sdsr_next = comp_idx;
729 	} else {
730 		if (ha->hw.num_rds_rings > 1)
731 			r_idx = sds_idx;
732 
733 		sdsp = &ha->hw.sds[sds_idx];
734 
735 		if (sdsp->rx_free > ha->std_replenish)
736 			qla_replenish_normal_rx(ha, sdsp, r_idx);
737 	}
738 
739 	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
740 	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
741 
742 	if (opcode)
743 		ret = -1;
744 
745 ql_rcv_isr_exit:
746 	hw->sds[sds_idx].rcv_active = 0;
747 
748 	return (ret);
749 }
750 
751 void
752 ql_mbx_isr(void *arg)
753 {
754 	qla_host_t *ha;
755 	uint32_t data;
756 	uint32_t prev_link_state;
757 
758 	ha = arg;
759 
760 	if (ha == NULL) {
761 		printf("%s: arg == NULL\n", __func__);
762 		return;
763 	}
764 
765 	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
766 	if ((data & 0x3) != 0x1) {
767 		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
768 		return;
769 	}
770 
771 	data = READ_REG32(ha, Q8_FW_MBOX0);
772 
773 	if ((data & 0xF000) != 0x8000)
774 		return;
775 
776 	data = data & 0xFFFF;
777 
778 	switch (data) {
779 	case 0x8001:  /* It's an AEN */
780 
781 		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
782 
783 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
784 		ha->hw.cable_length = data & 0xFFFF;
785 
786 		data = data >> 16;
787 		ha->hw.link_speed = data & 0xFFF;
788 
789 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
790 
791 		prev_link_state =  ha->hw.link_up;
792 
793 		data = (((data & 0xFF) == 0) ? 0 : 1);
794 		atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data);
795 
796 		device_printf(ha->pci_dev,
797 			"%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n",
798 			__func__, data, prev_link_state);
799 
800 		if (prev_link_state !=  ha->hw.link_up) {
801 			if (ha->hw.link_up)
802 				if_link_state_change(ha->ifp, LINK_STATE_UP);
803 			else
804 				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
805 		}
806 
807 		ha->hw.module_type = ((data >> 8) & 0xFF);
808 		ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
809 		ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
810 
811 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
812 		ha->hw.loopback_mode = data & 0x03;
813 
814 		ha->hw.link_faults = (data >> 3) & 0xFF;
815 
816 		break;
817 
818         case 0x8100:
819 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
820 		ha->hw.imd_compl=1;
821 		break;
822 
823         case 0x8101:
824                 ha->async_event = 1;
825                 ha->hw.aen_mb0 = 0x8101;
826                 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
827                 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
828                 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
829                 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
830 		device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n",
831 			__func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2,
832 			ha->hw.aen_mb3, ha->hw.aen_mb4);
833                 break;
834 
835         case 0x8110:
836                 /* for now just dump the registers */
837                 {
838                         uint32_t ombx[5];
839 
840                         ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
841                         ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
842                         ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
843                         ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
844                         ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
845 
846                         device_printf(ha->pci_dev, "%s: "
847                                 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
848                                 __func__, data, ombx[0], ombx[1], ombx[2],
849                                 ombx[3], ombx[4]);
850                 }
851 
852                 break;
853 
854         case 0x8130:
855                 /* sfp insertion aen */
856                 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
857                         __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
858                 break;
859 
860         case 0x8131:
861                 /* sfp removal aen */
862                 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
863                 break;
864 
865 	case 0x8140:
866 		{
867 			uint32_t ombx[3];
868 
869 			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
870 			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
871 			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
872 
873 			device_printf(ha->pci_dev, "%s: "
874 				"0x%08x 0x%08x 0x%08x 0x%08x \n",
875 				__func__, data, ombx[0], ombx[1], ombx[2]);
876 		}
877 		break;
878 
879 	default:
880 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
881 		break;
882 	}
883 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
884 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
885 	return;
886 }
887 
888 static void
889 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
890 {
891 	qla_rx_buf_t *rxb;
892 	int count = sdsp->rx_free;
893 	uint32_t rx_next;
894 	qla_rdesc_t *rdesc;
895 
896 	/* we can play with this value via a sysctl */
897 	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
898 
899 	rdesc = &ha->hw.rds[r_idx];
900 
901 	rx_next = rdesc->rx_next;
902 
903 	while (count--) {
904 		rxb = sdsp->rxb_free;
905 
906 		if (rxb == NULL)
907 			break;
908 
909 		sdsp->rxb_free = rxb->next;
910 		sdsp->rx_free--;
911 
912 		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
913 			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
914 				rxb->handle,
915 				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
916 			rdesc->rx_in++;
917 			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
918 				rdesc->rx_in = 0;
919 			rdesc->rx_next++;
920 			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
921 				rdesc->rx_next = 0;
922 		} else {
923 			device_printf(ha->pci_dev,
924 				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
925 				__func__, r_idx, rdesc->rx_in, rxb->handle);
926 
927 			rxb->m_head = NULL;
928 			rxb->next = sdsp->rxb_free;
929 			sdsp->rxb_free = rxb;
930 			sdsp->rx_free++;
931 
932 			break;
933 		}
934 		if (replenish_thresh-- == 0) {
935 			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
936 				rdesc->rx_next);
937 			rx_next = rdesc->rx_next;
938 			replenish_thresh = ha->hw.rds_pidx_thres;
939 		}
940 	}
941 
942 	if (rx_next != rdesc->rx_next) {
943 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
944 			rdesc->rx_next);
945 	}
946 }
947 
948 void
949 ql_isr(void *arg)
950 {
951 	qla_ivec_t *ivec = arg;
952 	qla_host_t *ha ;
953 	int idx;
954 	qla_hw_t *hw;
955 	struct ifnet *ifp;
956 	qla_tx_fp_t *fp;
957 
958 	ha = ivec->ha;
959 	hw = &ha->hw;
960 	ifp = ha->ifp;
961 
962 	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
963 		return;
964 
965 	fp = &ha->tx_fp[idx];
966 	hw->sds[idx].intr_count++;
967 
968 	if ((fp->fp_taskqueue != NULL) &&
969 		(ifp->if_drv_flags & IFF_DRV_RUNNING))
970 		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
971 
972 	return;
973 }
974