xref: /freebsd/sys/dev/qlxgbe/ql_isr.c (revision 396c556d77189a5c474d35cec6f44a762e310b7d)
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_isr.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 
45 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
46 		uint32_t r_idx);
47 
48 static void
49 qla_rcv_error(qla_host_t *ha)
50 {
51 	ha->stop_rcv = 1;
52 	ha->qla_initiate_recovery = 1;
53 }
54 
55 
56 /*
57  * Name: qla_rx_intr
58  * Function: Handles normal ethernet frames received
59  */
60 static void
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62 {
63 	qla_rx_buf_t		*rxb;
64 	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65 	struct ifnet		*ifp = ha->ifp;
66 	qla_sds_t		*sdsp;
67 	struct ether_vlan_header *eh;
68 	uint32_t		i, rem_len = 0;
69 	uint32_t		r_idx = 0;
70 	qla_rx_ring_t		*rx_ring;
71 	struct lro_ctrl		*lro;
72 
73 	lro = &ha->hw.sds[sds_idx].lro;
74 
75 	if (ha->hw.num_rds_rings > 1)
76 		r_idx = sds_idx;
77 
78 	ha->hw.rds[r_idx].count++;
79 
80 	sdsp = &ha->hw.sds[sds_idx];
81 	rx_ring = &ha->rx_ring[r_idx];
82 
83 	for (i = 0; i < sgc->num_handles; i++) {
84 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
85 
86 		QL_ASSERT(ha, (rxb != NULL),
87 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
88 			sds_idx));
89 
90 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
91 			/* log the error */
92 			device_printf(ha->pci_dev,
93 				"%s invalid rxb[%d, %d, 0x%04x]\n",
94 				__func__, sds_idx, i, sgc->handle[i]);
95 			qla_rcv_error(ha);
96 			return;
97 		}
98 
99 		mp = rxb->m_head;
100 		if (i == 0)
101 			mpf = mp;
102 
103 		QL_ASSERT(ha, (mp != NULL),
104 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
105 			sds_idx));
106 
107 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
108 
109 		rxb->m_head = NULL;
110 		rxb->next = sdsp->rxb_free;
111 		sdsp->rxb_free = rxb;
112 		sdsp->rx_free++;
113 
114 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
115 			/* log the error */
116 			device_printf(ha->pci_dev,
117 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
118 				__func__, sds_idx, i, sgc->handle[i]);
119 			qla_rcv_error(ha);
120 			return;
121 		}
122 
123 		if (i == 0) {
124 			mpl = mpf = mp;
125 			mp->m_flags |= M_PKTHDR;
126 			mp->m_pkthdr.len = sgc->pkt_length;
127 			mp->m_pkthdr.rcvif = ifp;
128 			rem_len = mp->m_pkthdr.len;
129 		} else {
130 			mp->m_flags &= ~M_PKTHDR;
131 			mpl->m_next = mp;
132 			mpl = mp;
133 			rem_len = rem_len - mp->m_len;
134 		}
135 	}
136 
137 	mpl->m_len = rem_len;
138 
139 	eh = mtod(mpf, struct ether_vlan_header *);
140 
141 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142 		uint32_t *data = (uint32_t *)eh;
143 
144 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145 		mpf->m_flags |= M_VLANTAG;
146 
147 		*(data + 3) = *(data + 2);
148 		*(data + 2) = *(data + 1);
149 		*(data + 1) = *data;
150 
151 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
152 	}
153 
154 	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155 		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156 			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157 		mpf->m_pkthdr.csum_data = 0xFFFF;
158 	} else {
159 		mpf->m_pkthdr.csum_flags = 0;
160 	}
161 
162 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
163 
164 	mpf->m_pkthdr.flowid = sgc->rss_hash;
165 
166 #if __FreeBSD_version >= 1100000
167 	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
168 #else
169 #if (__FreeBSD_version >= 903511 && __FreeBSD_version < 1100000)
170         M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
171 #else
172         M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
173 #endif
174 #endif /* #if __FreeBSD_version >= 1100000 */
175 
176 	if (ha->hw.enable_soft_lro) {
177 
178 #if (__FreeBSD_version >= 1100101)
179 
180 		tcp_lro_queue_mbuf(lro, mpf);
181 
182 #else
183 		if (tcp_lro_rx(lro, mpf, 0))
184 			(*ifp->if_input)(ifp, mpf);
185 
186 #endif /* #if (__FreeBSD_version >= 1100101) */
187 
188 
189 	} else {
190 		(*ifp->if_input)(ifp, mpf);
191 	}
192 
193 	if (sdsp->rx_free > ha->std_replenish)
194 		qla_replenish_normal_rx(ha, sdsp, r_idx);
195 
196 	return;
197 }
198 
199 #define QLA_TCP_HDR_SIZE        20
200 #define QLA_TCP_TS_OPTION_SIZE  12
201 
202 /*
203  * Name: qla_lro_intr
204  * Function: Handles normal ethernet frames received
205  */
206 static int
207 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
208 {
209 	qla_rx_buf_t *rxb;
210 	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
211 	struct ifnet *ifp = ha->ifp;
212 	qla_sds_t *sdsp;
213 	struct ether_vlan_header *eh;
214 	uint32_t i, rem_len = 0, pkt_length, iplen;
215 	struct tcphdr *th;
216 	struct ip *ip = NULL;
217 	struct ip6_hdr *ip6 = NULL;
218 	uint16_t etype;
219 	uint32_t r_idx = 0;
220 	qla_rx_ring_t *rx_ring;
221 
222 	if (ha->hw.num_rds_rings > 1)
223 		r_idx = sds_idx;
224 
225 	ha->hw.rds[r_idx].count++;
226 
227 	rx_ring = &ha->rx_ring[r_idx];
228 
229 	ha->hw.rds[r_idx].lro_pkt_count++;
230 
231 	sdsp = &ha->hw.sds[sds_idx];
232 
233 	pkt_length = sgc->payload_length + sgc->l4_offset;
234 
235 	if (sgc->flags & Q8_LRO_COMP_TS) {
236 		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
237 	} else {
238 		pkt_length += QLA_TCP_HDR_SIZE;
239 	}
240 	ha->hw.rds[r_idx].lro_bytes += pkt_length;
241 
242 	for (i = 0; i < sgc->num_handles; i++) {
243 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
244 
245 		QL_ASSERT(ha, (rxb != NULL),
246 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
247 			sds_idx));
248 
249 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
250 			/* log the error */
251 			device_printf(ha->pci_dev,
252 				"%s invalid rxb[%d, %d, 0x%04x]\n",
253 				__func__, sds_idx, i, sgc->handle[i]);
254 			qla_rcv_error(ha);
255 			return (0);
256 		}
257 
258 		mp = rxb->m_head;
259 		if (i == 0)
260 			mpf = mp;
261 
262 		QL_ASSERT(ha, (mp != NULL),
263 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
264 			sds_idx));
265 
266 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
267 
268 		rxb->m_head = NULL;
269 		rxb->next = sdsp->rxb_free;
270 		sdsp->rxb_free = rxb;
271 		sdsp->rx_free++;
272 
273 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
274 			/* log the error */
275 			device_printf(ha->pci_dev,
276 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
277 				__func__, sds_idx, i, sgc->handle[i]);
278 			qla_rcv_error(ha);
279 			return (0);
280 		}
281 
282 		if (i == 0) {
283 			mpl = mpf = mp;
284 			mp->m_flags |= M_PKTHDR;
285 			mp->m_pkthdr.len = pkt_length;
286 			mp->m_pkthdr.rcvif = ifp;
287 			rem_len = mp->m_pkthdr.len;
288 		} else {
289 			mp->m_flags &= ~M_PKTHDR;
290 			mpl->m_next = mp;
291 			mpl = mp;
292 			rem_len = rem_len - mp->m_len;
293 		}
294 	}
295 
296 	mpl->m_len = rem_len;
297 
298 	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
299 
300 	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
301 		th->th_flags |= TH_PUSH;
302 
303 	m_adj(mpf, sgc->l2_offset);
304 
305 	eh = mtod(mpf, struct ether_vlan_header *);
306 
307 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
308 		uint32_t *data = (uint32_t *)eh;
309 
310 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
311 		mpf->m_flags |= M_VLANTAG;
312 
313 		*(data + 3) = *(data + 2);
314 		*(data + 2) = *(data + 1);
315 		*(data + 1) = *data;
316 
317 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
318 
319 		etype = ntohs(eh->evl_proto);
320 	} else {
321 		etype = ntohs(eh->evl_encap_proto);
322 	}
323 
324 	if (etype == ETHERTYPE_IP) {
325 		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
326 
327 		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
328 				sgc->payload_length;
329 
330                 ip->ip_len = htons(iplen);
331 
332 		ha->ipv4_lro++;
333 
334 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
335 
336 	} else if (etype == ETHERTYPE_IPV6) {
337 		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
338 
339 		iplen = (th->th_off << 2) + sgc->payload_length;
340 
341 		ip6->ip6_plen = htons(iplen);
342 
343 		ha->ipv6_lro++;
344 
345 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
346 
347 	} else {
348 		m_freem(mpf);
349 
350 		if (sdsp->rx_free > ha->std_replenish)
351 			qla_replenish_normal_rx(ha, sdsp, r_idx);
352 		return 0;
353 	}
354 
355 	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
356 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
357 	mpf->m_pkthdr.csum_data = 0xFFFF;
358 
359 	mpf->m_pkthdr.flowid = sgc->rss_hash;
360 
361 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
362 
363 	(*ifp->if_input)(ifp, mpf);
364 
365 	if (sdsp->rx_free > ha->std_replenish)
366 		qla_replenish_normal_rx(ha, sdsp, r_idx);
367 
368 	return (0);
369 }
370 
371 static int
372 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
373 	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
374 {
375 	uint32_t i;
376 	uint16_t num_handles;
377 	q80_stat_desc_t *sdesc;
378 	uint32_t opcode;
379 
380 	*nhandles = 0;
381 	dcount--;
382 
383 	for (i = 0; i < dcount; i++) {
384 		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
385 		sdesc = (q80_stat_desc_t *)
386 				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
387 
388 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
389 
390 		if (!opcode) {
391 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
392 				__func__, (void *)sdesc->data[0],
393 				(void *)sdesc->data[1]);
394 			return -1;
395 		}
396 
397 		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
398 		if (!num_handles) {
399 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
400 				__func__, (void *)sdesc->data[0],
401 				(void *)sdesc->data[1]);
402 			return -1;
403 		}
404 
405 		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
406 			num_handles = -1;
407 
408 		switch (num_handles) {
409 
410 		case 1:
411 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
412 			break;
413 
414 		case 2:
415 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
416 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
417 			break;
418 
419 		case 3:
420 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
421 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
422 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
423 			break;
424 
425 		case 4:
426 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
427 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
428 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
429 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
430 			break;
431 
432 		case 5:
433 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
434 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
435 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
436 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
437 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
438 			break;
439 
440 		case 6:
441 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
442 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
443 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
444 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
445 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
446 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
447 			break;
448 
449 		case 7:
450 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
451 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
452 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
453 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
454 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
455 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
456 			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
457 			break;
458 
459 		default:
460 			device_printf(ha->pci_dev,
461 				"%s: invalid num handles %p %p\n",
462 				__func__, (void *)sdesc->data[0],
463 				(void *)sdesc->data[1]);
464 
465 			QL_ASSERT(ha, (0),\
466 			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
467 			__func__, "invalid num handles", sds_idx, num_handles,
468 			(void *)sdesc->data[0],(void *)sdesc->data[1]));
469 
470 			qla_rcv_error(ha);
471 			return 0;
472 		}
473 		*nhandles = *nhandles + num_handles;
474 	}
475 	return 0;
476 }
477 
478 /*
479  * Name: ql_rcv_isr
480  * Function: Main Interrupt Service Routine
481  */
482 uint32_t
483 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
484 {
485 	device_t dev;
486 	qla_hw_t *hw;
487 	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
488 	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
489 	uint32_t ret = 0;
490 	qla_sgl_comp_t sgc;
491 	uint16_t nhandles;
492 	uint32_t sds_replenish_threshold = 0;
493 	uint32_t r_idx = 0;
494 	qla_sds_t *sdsp;
495 
496 	dev = ha->pci_dev;
497 	hw = &ha->hw;
498 
499 	hw->sds[sds_idx].rcv_active = 1;
500 	if (ha->stop_rcv) {
501 		hw->sds[sds_idx].rcv_active = 0;
502 		return 0;
503 	}
504 
505 	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
506 
507 	/*
508 	 * receive interrupts
509 	 */
510 	comp_idx = hw->sds[sds_idx].sdsr_next;
511 
512 	while (count-- && !ha->stop_rcv) {
513 
514 		sdesc = (q80_stat_desc_t *)
515 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
516 
517 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
518 
519 		if (!opcode)
520 			break;
521 
522 		switch (opcode) {
523 
524 		case Q8_STAT_DESC_OPCODE_RCV_PKT:
525 
526 			desc_count = 1;
527 
528 			bzero(&sgc, sizeof(qla_sgl_comp_t));
529 
530 			sgc.rcv.pkt_length =
531 				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
532 			sgc.rcv.num_handles = 1;
533 			sgc.rcv.handle[0] =
534 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
535 			sgc.rcv.chksum_status =
536 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
537 
538 			sgc.rcv.rss_hash =
539 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
540 
541 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
542 				sgc.rcv.vlan_tag =
543 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
544 			}
545 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
546 			break;
547 
548 		case Q8_STAT_DESC_OPCODE_SGL_RCV:
549 
550 			desc_count =
551 				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
552 
553 			if (desc_count > 1) {
554 				c_idx = (comp_idx + desc_count -1) &
555 						(NUM_STATUS_DESCRIPTORS-1);
556 				sdesc0 = (q80_stat_desc_t *)
557 					&hw->sds[sds_idx].sds_ring_base[c_idx];
558 
559 				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
560 						Q8_STAT_DESC_OPCODE_CONT) {
561 					desc_count = 0;
562 					break;
563 				}
564 			}
565 
566 			bzero(&sgc, sizeof(qla_sgl_comp_t));
567 
568 			sgc.rcv.pkt_length =
569 				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
570 					(sdesc->data[0]));
571 			sgc.rcv.chksum_status =
572 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
573 
574 			sgc.rcv.rss_hash =
575 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
576 
577 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
578 				sgc.rcv.vlan_tag =
579 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
580 			}
581 
582 			QL_ASSERT(ha, (desc_count <= 2) ,\
583 				("%s: [sds_idx, data0, data1]="\
584 				"%d, %p, %p]\n", __func__, sds_idx,\
585 				(void *)sdesc->data[0],\
586 				(void *)sdesc->data[1]));
587 
588 			sgc.rcv.num_handles = 1;
589 			sgc.rcv.handle[0] =
590 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
591 
592 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
593 				&sgc.rcv.handle[1], &nhandles)) {
594 				device_printf(dev,
595 					"%s: [sds_idx, dcount, data0, data1]="
596 					 "[%d, %d, 0x%llx, 0x%llx]\n",
597 					__func__, sds_idx, desc_count,
598 					(long long unsigned int)sdesc->data[0],
599 					(long long unsigned int)sdesc->data[1]);
600 				desc_count = 0;
601 				break;
602 			}
603 
604 			sgc.rcv.num_handles += nhandles;
605 
606 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
607 
608 			break;
609 
610 		case Q8_STAT_DESC_OPCODE_SGL_LRO:
611 
612 			desc_count =
613 				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
614 
615 			if (desc_count > 1) {
616 				c_idx = (comp_idx + desc_count -1) &
617 						(NUM_STATUS_DESCRIPTORS-1);
618 				sdesc0 = (q80_stat_desc_t *)
619 					&hw->sds[sds_idx].sds_ring_base[c_idx];
620 
621 				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
622 						Q8_STAT_DESC_OPCODE_CONT) {
623 					desc_count = 0;
624 					break;
625 				}
626 			}
627 			bzero(&sgc, sizeof(qla_sgl_comp_t));
628 
629 			sgc.lro.payload_length =
630 			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
631 
632 			sgc.lro.rss_hash =
633 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
634 
635 			sgc.lro.num_handles = 1;
636 			sgc.lro.handle[0] =
637 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
638 
639 			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
640 				sgc.lro.flags |= Q8_LRO_COMP_TS;
641 
642 			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
643 				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
644 
645 			sgc.lro.l2_offset =
646 				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
647 			sgc.lro.l4_offset =
648 				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
649 
650 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
651 				sgc.lro.vlan_tag =
652 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
653 			}
654 
655 			QL_ASSERT(ha, (desc_count <= 7) ,\
656 				("%s: [sds_idx, data0, data1]="\
657 				 "[%d, 0x%llx, 0x%llx]\n",\
658 				__func__, sds_idx,\
659 				(long long unsigned int)sdesc->data[0],\
660 				(long long unsigned int)sdesc->data[1]));
661 
662 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
663 				desc_count, &sgc.lro.handle[1], &nhandles)) {
664 				device_printf(dev,
665 				"%s: [sds_idx, data0, data1]="\
666 				 "[%d, 0x%llx, 0x%llx]\n",\
667 				__func__, sds_idx,\
668 				(long long unsigned int)sdesc->data[0],\
669 				(long long unsigned int)sdesc->data[1]);
670 
671 				desc_count = 0;
672 				break;
673 			}
674 
675 			sgc.lro.num_handles += nhandles;
676 
677 			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
678 				device_printf(dev,
679 				"%s: [sds_idx, data0, data1]="\
680 				 "[%d, 0x%llx, 0x%llx]\n",\
681 				__func__, sds_idx,\
682 				(long long unsigned int)sdesc->data[0],\
683 				(long long unsigned int)sdesc->data[1]);
684 				device_printf(dev,
685 				"%s: [comp_idx, c_idx, dcount, nhndls]="\
686 				 "[%d, %d, %d, %d]\n",\
687 				__func__, comp_idx, c_idx, desc_count,
688 				sgc.lro.num_handles);
689 				if (desc_count > 1) {
690 				device_printf(dev,
691 				"%s: [sds_idx, data0, data1]="\
692 				 "[%d, 0x%llx, 0x%llx]\n",\
693 				__func__, sds_idx,\
694 				(long long unsigned int)sdesc0->data[0],\
695 				(long long unsigned int)sdesc0->data[1]);
696 				}
697 			}
698 
699 			break;
700 
701 		default:
702 			device_printf(dev, "%s: default 0x%llx!\n", __func__,
703 					(long long unsigned int)sdesc->data[0]);
704 			break;
705 		}
706 
707 		if (desc_count == 0)
708 			break;
709 
710 		sds_replenish_threshold += desc_count;
711 
712 
713 		while (desc_count--) {
714 			sdesc->data[0] = 0ULL;
715 			sdesc->data[1] = 0ULL;
716 			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
717 			sdesc = (q80_stat_desc_t *)
718 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
719 		}
720 
721 		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
722 			sds_replenish_threshold = 0;
723 			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
724 				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
725 					comp_idx);
726 			}
727 			hw->sds[sds_idx].sdsr_next = comp_idx;
728 		}
729 	}
730 
731 	if (ha->hw.enable_soft_lro) {
732 		struct lro_ctrl		*lro;
733 
734 		lro = &ha->hw.sds[sds_idx].lro;
735 
736 #if (__FreeBSD_version >= 1100101)
737 
738 		tcp_lro_flush_all(lro);
739 
740 #else
741 		struct lro_entry *queued;
742 
743 		while ((!SLIST_EMPTY(&lro->lro_active))) {
744 			queued = SLIST_FIRST(&lro->lro_active);
745 			SLIST_REMOVE_HEAD(&lro->lro_active, next);
746 			tcp_lro_flush(lro, queued);
747 		}
748 
749 #endif /* #if (__FreeBSD_version >= 1100101) */
750 
751 	}
752 
753 	if (ha->stop_rcv)
754 		goto ql_rcv_isr_exit;
755 
756 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
757 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
758 		hw->sds[sds_idx].sdsr_next = comp_idx;
759 	} else {
760 		if (ha->hw.num_rds_rings > 1)
761 			r_idx = sds_idx;
762 
763 		sdsp = &ha->hw.sds[sds_idx];
764 
765 		if (sdsp->rx_free > ha->std_replenish)
766 			qla_replenish_normal_rx(ha, sdsp, r_idx);
767 	}
768 
769 	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
770 	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
771 
772 	if (opcode)
773 		ret = -1;
774 
775 ql_rcv_isr_exit:
776 	hw->sds[sds_idx].rcv_active = 0;
777 
778 	return (ret);
779 }
780 
781 void
782 ql_mbx_isr(void *arg)
783 {
784 	qla_host_t *ha;
785 	uint32_t data;
786 	uint32_t prev_link_state;
787 
788 	ha = arg;
789 
790 	if (ha == NULL) {
791 		device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
792 		return;
793 	}
794 
795 	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
796 	if ((data & 0x3) != 0x1) {
797 		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
798 		return;
799 	}
800 
801 	data = READ_REG32(ha, Q8_FW_MBOX0);
802 
803 	if ((data & 0xF000) != 0x8000)
804 		return;
805 
806 	data = data & 0xFFFF;
807 
808 	switch (data) {
809 
810 	case 0x8001:  /* It's an AEN */
811 
812 		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
813 
814 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
815 		ha->hw.cable_length = data & 0xFFFF;
816 
817 		data = data >> 16;
818 		ha->hw.link_speed = data & 0xFFF;
819 
820 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
821 
822 		prev_link_state =  ha->hw.link_up;
823 		ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1);
824 
825 		if (prev_link_state !=  ha->hw.link_up) {
826 			if (ha->hw.link_up)
827 				if_link_state_change(ha->ifp, LINK_STATE_UP);
828 			else
829 				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
830 		}
831 
832 
833 		ha->hw.module_type = ((data >> 8) & 0xFF);
834 		ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
835 		ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
836 
837 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
838 		ha->hw.flags.loopback_mode = data & 0x03;
839 
840 		ha->hw.link_faults = (data >> 3) & 0xFF;
841 
842 		break;
843 
844         case 0x8100:
845 		ha->hw.imd_compl=1;
846 		break;
847 
848         case 0x8101:
849                 ha->async_event = 1;
850                 ha->hw.aen_mb0 = 0x8101;
851                 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
852                 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
853                 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
854                 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
855                 break;
856 
857         case 0x8110:
858                 /* for now just dump the registers */
859                 {
860                         uint32_t ombx[5];
861 
862                         ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
863                         ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
864                         ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
865                         ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
866                         ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
867 
868                         device_printf(ha->pci_dev, "%s: "
869                                 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
870                                 __func__, data, ombx[0], ombx[1], ombx[2],
871                                 ombx[3], ombx[4]);
872                 }
873 
874                 break;
875 
876         case 0x8130:
877                 /* sfp insertion aen */
878                 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
879                         __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
880                 break;
881 
882         case 0x8131:
883                 /* sfp removal aen */
884                 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
885                 break;
886 
887 	case 0x8140:
888 		{
889 			uint32_t ombx[3];
890 
891 			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
892 			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
893 			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
894 
895 			device_printf(ha->pci_dev, "%s: "
896 				"0x%08x 0x%08x 0x%08x 0x%08x \n",
897 				__func__, data, ombx[0], ombx[1], ombx[2]);
898 		}
899 		break;
900 
901 	default:
902 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
903 		break;
904 	}
905 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
906 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
907 	return;
908 }
909 
910 
911 static void
912 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
913 {
914 	qla_rx_buf_t *rxb;
915 	int count = sdsp->rx_free;
916 	uint32_t rx_next;
917 	qla_rdesc_t *rdesc;
918 
919 	/* we can play with this value via a sysctl */
920 	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
921 
922 	rdesc = &ha->hw.rds[r_idx];
923 
924 	rx_next = rdesc->rx_next;
925 
926 	while (count--) {
927 		rxb = sdsp->rxb_free;
928 
929 		if (rxb == NULL)
930 			break;
931 
932 		sdsp->rxb_free = rxb->next;
933 		sdsp->rx_free--;
934 
935 		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
936 			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
937 				rxb->handle,
938 				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
939 			rdesc->rx_in++;
940 			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
941 				rdesc->rx_in = 0;
942 			rdesc->rx_next++;
943 			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
944 				rdesc->rx_next = 0;
945 		} else {
946 			device_printf(ha->pci_dev,
947 				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
948 				__func__, r_idx, rdesc->rx_in, rxb->handle);
949 
950 			rxb->m_head = NULL;
951 			rxb->next = sdsp->rxb_free;
952 			sdsp->rxb_free = rxb;
953 			sdsp->rx_free++;
954 
955 			break;
956 		}
957 		if (replenish_thresh-- == 0) {
958 			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
959 				rdesc->rx_next);
960 			rx_next = rdesc->rx_next;
961 			replenish_thresh = ha->hw.rds_pidx_thres;
962 		}
963 	}
964 
965 	if (rx_next != rdesc->rx_next) {
966 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
967 			rdesc->rx_next);
968 	}
969 }
970 
971 void
972 ql_isr(void *arg)
973 {
974 	qla_ivec_t *ivec = arg;
975 	qla_host_t *ha ;
976 	int idx;
977 	qla_hw_t *hw;
978 	struct ifnet *ifp;
979 	qla_tx_fp_t *fp;
980 
981 	ha = ivec->ha;
982 	hw = &ha->hw;
983 	ifp = ha->ifp;
984 
985 	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
986 		return;
987 
988 	fp = &ha->tx_fp[idx];
989 	hw->sds[idx].intr_count++;
990 
991 	if ((fp->fp_taskqueue != NULL) &&
992 		(ifp->if_drv_flags & IFF_DRV_RUNNING))
993 		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
994 
995 	return;
996 }
997 
998