xref: /freebsd/sys/dev/qlxgbe/ql_isr.c (revision 8aac90f18aef7c9eea906c3ff9a001ca7b94f375)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013-2016 Qlogic Corporation
5  * All rights reserved.
6  *
7  *  Redistribution and use in source and binary forms, with or without
8  *  modification, are permitted provided that the following conditions
9  *  are met:
10  *
11  *  1. Redistributions of source code must retain the above copyright
12  *     notice, this list of conditions and the following disclaimer.
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  *  POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  * File: ql_isr.c
32  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
33  */
34 
35 #include <sys/cdefs.h>
36 #include "ql_os.h"
37 #include "ql_hw.h"
38 #include "ql_def.h"
39 #include "ql_inline.h"
40 #include "ql_ver.h"
41 #include "ql_glbl.h"
42 #include "ql_dbg.h"
43 
44 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
45 		uint32_t r_idx);
46 
47 static void
48 qla_rcv_error(qla_host_t *ha)
49 {
50 	ha->stop_rcv = 1;
51 	QL_INITIATE_RECOVERY(ha);
52 }
53 
54 /*
55  * Name: qla_rx_intr
56  * Function: Handles normal ethernet frames received
57  */
58 static void
59 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
60 {
61 	qla_rx_buf_t		*rxb;
62 	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
63 	if_t ifp = ha->ifp;
64 	qla_sds_t		*sdsp;
65 	struct ether_vlan_header *eh;
66 	uint32_t		i, rem_len = 0;
67 	uint32_t		r_idx = 0;
68 	qla_rx_ring_t		*rx_ring;
69 #if defined(INET) || defined(INET6)
70 	struct lro_ctrl		*lro;
71 
72 	lro = &ha->hw.sds[sds_idx].lro;
73 #endif
74 
75 	if (ha->hw.num_rds_rings > 1)
76 		r_idx = sds_idx;
77 
78 	ha->hw.rds[r_idx].count++;
79 
80 	sdsp = &ha->hw.sds[sds_idx];
81 	rx_ring = &ha->rx_ring[r_idx];
82 
83 	for (i = 0; i < sgc->num_handles; i++) {
84 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
85 
86 		QL_ASSERT(ha, (rxb != NULL),
87 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
88 			sds_idx));
89 
90 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
91 			/* log the error */
92 			device_printf(ha->pci_dev,
93 				"%s invalid rxb[%d, %d, 0x%04x]\n",
94 				__func__, sds_idx, i, sgc->handle[i]);
95 			qla_rcv_error(ha);
96 			return;
97 		}
98 
99 		mp = rxb->m_head;
100 		if (i == 0)
101 			mpf = mp;
102 
103 		QL_ASSERT(ha, (mp != NULL),
104 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
105 			sds_idx));
106 
107 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
108 
109 		rxb->m_head = NULL;
110 		rxb->next = sdsp->rxb_free;
111 		sdsp->rxb_free = rxb;
112 		sdsp->rx_free++;
113 
114 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
115 			/* log the error */
116 			device_printf(ha->pci_dev,
117 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
118 				__func__, sds_idx, i, sgc->handle[i]);
119 			qla_rcv_error(ha);
120 			return;
121 		}
122 
123 		if (i == 0) {
124 			mpl = mpf = mp;
125 			mp->m_flags |= M_PKTHDR;
126 			mp->m_pkthdr.len = sgc->pkt_length;
127 			mp->m_pkthdr.rcvif = ifp;
128 			rem_len = mp->m_pkthdr.len;
129 		} else {
130 			mp->m_flags &= ~M_PKTHDR;
131 			mpl->m_next = mp;
132 			mpl = mp;
133 			rem_len = rem_len - mp->m_len;
134 		}
135 	}
136 
137 	mpl->m_len = rem_len;
138 
139 	eh = mtod(mpf, struct ether_vlan_header *);
140 
141 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
142 		uint32_t *data = (uint32_t *)eh;
143 
144 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
145 		mpf->m_flags |= M_VLANTAG;
146 
147 		*(data + 3) = *(data + 2);
148 		*(data + 2) = *(data + 1);
149 		*(data + 1) = *data;
150 
151 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
152 	}
153 
154 	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
155 		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
156 			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
157 		mpf->m_pkthdr.csum_data = 0xFFFF;
158 	} else {
159 		mpf->m_pkthdr.csum_flags = 0;
160 	}
161 
162 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
163 
164 	mpf->m_pkthdr.flowid = sgc->rss_hash;
165 
166 	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
167 
168 #if defined(INET) || defined(INET6)
169 	if (ha->hw.enable_soft_lro)
170 		tcp_lro_queue_mbuf(lro, mpf);
171 	else
172 #endif
173 		if_input(ifp, mpf);
174 
175 	if (sdsp->rx_free > ha->std_replenish)
176 		qla_replenish_normal_rx(ha, sdsp, r_idx);
177 
178 	return;
179 }
180 
181 #define QLA_TCP_HDR_SIZE        20
182 #define QLA_TCP_TS_OPTION_SIZE  12
183 
184 /*
185  * Name: qla_lro_intr
186  * Function: Handles normal ethernet frames received
187  */
188 static int
189 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
190 {
191 	qla_rx_buf_t *rxb;
192 	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
193 	if_t ifp = ha->ifp;
194 	qla_sds_t *sdsp;
195 	struct ether_vlan_header *eh;
196 	uint32_t i, rem_len = 0, pkt_length, iplen;
197 	struct tcphdr *th;
198 	struct ip *ip = NULL;
199 	struct ip6_hdr *ip6 = NULL;
200 	uint16_t etype;
201 	uint32_t r_idx = 0;
202 	qla_rx_ring_t *rx_ring;
203 
204 	if (ha->hw.num_rds_rings > 1)
205 		r_idx = sds_idx;
206 
207 	ha->hw.rds[r_idx].count++;
208 
209 	rx_ring = &ha->rx_ring[r_idx];
210 
211 	ha->hw.rds[r_idx].lro_pkt_count++;
212 
213 	sdsp = &ha->hw.sds[sds_idx];
214 
215 	pkt_length = sgc->payload_length + sgc->l4_offset;
216 
217 	if (sgc->flags & Q8_LRO_COMP_TS) {
218 		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
219 	} else {
220 		pkt_length += QLA_TCP_HDR_SIZE;
221 	}
222 	ha->hw.rds[r_idx].lro_bytes += pkt_length;
223 
224 	for (i = 0; i < sgc->num_handles; i++) {
225 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
226 
227 		QL_ASSERT(ha, (rxb != NULL),
228 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
229 			sds_idx));
230 
231 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
232 			/* log the error */
233 			device_printf(ha->pci_dev,
234 				"%s invalid rxb[%d, %d, 0x%04x]\n",
235 				__func__, sds_idx, i, sgc->handle[i]);
236 			qla_rcv_error(ha);
237 			return (0);
238 		}
239 
240 		mp = rxb->m_head;
241 		if (i == 0)
242 			mpf = mp;
243 
244 		QL_ASSERT(ha, (mp != NULL),
245 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
246 			sds_idx));
247 
248 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
249 
250 		rxb->m_head = NULL;
251 		rxb->next = sdsp->rxb_free;
252 		sdsp->rxb_free = rxb;
253 		sdsp->rx_free++;
254 
255 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
256 			/* log the error */
257 			device_printf(ha->pci_dev,
258 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
259 				__func__, sds_idx, i, sgc->handle[i]);
260 			qla_rcv_error(ha);
261 			return (0);
262 		}
263 
264 		if (i == 0) {
265 			mpl = mpf = mp;
266 			mp->m_flags |= M_PKTHDR;
267 			mp->m_pkthdr.len = pkt_length;
268 			mp->m_pkthdr.rcvif = ifp;
269 			rem_len = mp->m_pkthdr.len;
270 		} else {
271 			mp->m_flags &= ~M_PKTHDR;
272 			mpl->m_next = mp;
273 			mpl = mp;
274 			rem_len = rem_len - mp->m_len;
275 		}
276 	}
277 
278 	mpl->m_len = rem_len;
279 
280 	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
281 
282 	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
283 		th->th_flags |= TH_PUSH;
284 
285 	m_adj(mpf, sgc->l2_offset);
286 
287 	eh = mtod(mpf, struct ether_vlan_header *);
288 
289 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
290 		uint32_t *data = (uint32_t *)eh;
291 
292 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
293 		mpf->m_flags |= M_VLANTAG;
294 
295 		*(data + 3) = *(data + 2);
296 		*(data + 2) = *(data + 1);
297 		*(data + 1) = *data;
298 
299 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
300 
301 		etype = ntohs(eh->evl_proto);
302 	} else {
303 		etype = ntohs(eh->evl_encap_proto);
304 	}
305 
306 	if (etype == ETHERTYPE_IP) {
307 		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
308 
309 		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
310 				sgc->payload_length;
311 
312                 ip->ip_len = htons(iplen);
313 
314 		ha->ipv4_lro++;
315 
316 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
317 
318 	} else if (etype == ETHERTYPE_IPV6) {
319 		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
320 
321 		iplen = (th->th_off << 2) + sgc->payload_length;
322 
323 		ip6->ip6_plen = htons(iplen);
324 
325 		ha->ipv6_lro++;
326 
327 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
328 
329 	} else {
330 		m_freem(mpf);
331 
332 		if (sdsp->rx_free > ha->std_replenish)
333 			qla_replenish_normal_rx(ha, sdsp, r_idx);
334 		return 0;
335 	}
336 
337 	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
338 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
339 	mpf->m_pkthdr.csum_data = 0xFFFF;
340 
341 	mpf->m_pkthdr.flowid = sgc->rss_hash;
342 
343 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
344 
345 	if_input(ifp, mpf);
346 
347 	if (sdsp->rx_free > ha->std_replenish)
348 		qla_replenish_normal_rx(ha, sdsp, r_idx);
349 
350 	return (0);
351 }
352 
353 static int
354 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
355 	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
356 {
357 	uint32_t i;
358 	uint16_t num_handles;
359 	q80_stat_desc_t *sdesc;
360 	uint32_t opcode;
361 
362 	*nhandles = 0;
363 	dcount--;
364 
365 	for (i = 0; i < dcount; i++) {
366 		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
367 		sdesc = (q80_stat_desc_t *)
368 				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
369 
370 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
371 
372 		if (!opcode || QL_ERR_INJECT(ha, INJCT_INV_CONT_OPCODE)) {
373 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
374 				__func__, (void *)sdesc->data[0],
375 				(void *)sdesc->data[1]);
376 			return -1;
377 		}
378 
379 		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
380 		if (!num_handles) {
381 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
382 				__func__, (void *)sdesc->data[0],
383 				(void *)sdesc->data[1]);
384 			return -1;
385 		}
386 
387 		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
388 			num_handles = -1;
389 
390 		switch (num_handles) {
391 		case 1:
392 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
393 			break;
394 
395 		case 2:
396 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
397 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
398 			break;
399 
400 		case 3:
401 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
402 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
403 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
404 			break;
405 
406 		case 4:
407 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
408 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
409 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
410 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
411 			break;
412 
413 		case 5:
414 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
415 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
416 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
417 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
418 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
419 			break;
420 
421 		case 6:
422 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
423 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
424 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
425 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
426 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
427 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
428 			break;
429 
430 		case 7:
431 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
432 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
433 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
434 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
435 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
436 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
437 			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
438 			break;
439 
440 		default:
441 			device_printf(ha->pci_dev,
442 				"%s: invalid num handles %p %p\n",
443 				__func__, (void *)sdesc->data[0],
444 				(void *)sdesc->data[1]);
445 
446 			QL_ASSERT(ha, (0),\
447 			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
448 			__func__, "invalid num handles", sds_idx, num_handles,
449 			(void *)sdesc->data[0],(void *)sdesc->data[1]));
450 
451 			qla_rcv_error(ha);
452 			return 0;
453 		}
454 		*nhandles = *nhandles + num_handles;
455 	}
456 	return 0;
457 }
458 
459 /*
460  * Name: ql_rcv_isr
461  * Function: Main Interrupt Service Routine
462  */
463 uint32_t
464 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
465 {
466 	device_t dev;
467 	qla_hw_t *hw;
468 	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
469 	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
470 	uint32_t ret = 0;
471 	qla_sgl_comp_t sgc;
472 	uint16_t nhandles;
473 	uint32_t sds_replenish_threshold = 0;
474 	uint32_t r_idx = 0;
475 	qla_sds_t *sdsp;
476 
477 	dev = ha->pci_dev;
478 	hw = &ha->hw;
479 
480 	hw->sds[sds_idx].rcv_active = 1;
481 	if (ha->stop_rcv) {
482 		hw->sds[sds_idx].rcv_active = 0;
483 		return 0;
484 	}
485 
486 	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
487 
488 	/*
489 	 * receive interrupts
490 	 */
491 	comp_idx = hw->sds[sds_idx].sdsr_next;
492 
493 	while (count-- && !ha->stop_rcv) {
494 		sdesc = (q80_stat_desc_t *)
495 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
496 
497 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
498 
499 		if (!opcode)
500 			break;
501 
502 		switch (opcode) {
503 		case Q8_STAT_DESC_OPCODE_RCV_PKT:
504 
505 			desc_count = 1;
506 
507 			bzero(&sgc, sizeof(qla_sgl_comp_t));
508 
509 			sgc.rcv.pkt_length =
510 				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
511 			sgc.rcv.num_handles = 1;
512 			sgc.rcv.handle[0] =
513 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
514 			sgc.rcv.chksum_status =
515 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
516 
517 			sgc.rcv.rss_hash =
518 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
519 
520 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
521 				sgc.rcv.vlan_tag =
522 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
523 			}
524 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
525 			break;
526 
527 		case Q8_STAT_DESC_OPCODE_SGL_RCV:
528 
529 			desc_count =
530 				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
531 
532 			if (desc_count > 1) {
533 				c_idx = (comp_idx + desc_count -1) &
534 						(NUM_STATUS_DESCRIPTORS-1);
535 				sdesc0 = (q80_stat_desc_t *)
536 					&hw->sds[sds_idx].sds_ring_base[c_idx];
537 
538 				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
539 						Q8_STAT_DESC_OPCODE_CONT) ||
540 				QL_ERR_INJECT(ha, INJCT_SGL_RCV_INV_DESC_COUNT)) {
541 					desc_count = 0;
542 					break;
543 				}
544 			}
545 
546 			bzero(&sgc, sizeof(qla_sgl_comp_t));
547 
548 			sgc.rcv.pkt_length =
549 				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
550 					(sdesc->data[0]));
551 			sgc.rcv.chksum_status =
552 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
553 
554 			sgc.rcv.rss_hash =
555 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
556 
557 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
558 				sgc.rcv.vlan_tag =
559 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
560 			}
561 
562 			QL_ASSERT(ha, (desc_count <= 2) ,\
563 				("%s: [sds_idx, data0, data1]="\
564 				"%d, %p, %p]\n", __func__, sds_idx,\
565 				(void *)sdesc->data[0],\
566 				(void *)sdesc->data[1]));
567 
568 			sgc.rcv.num_handles = 1;
569 			sgc.rcv.handle[0] =
570 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
571 
572 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
573 				&sgc.rcv.handle[1], &nhandles)) {
574 				device_printf(dev,
575 					"%s: [sds_idx, dcount, data0, data1]="
576 					 "[%d, %d, 0x%llx, 0x%llx]\n",
577 					__func__, sds_idx, desc_count,
578 					(long long unsigned int)sdesc->data[0],
579 					(long long unsigned int)sdesc->data[1]);
580 				desc_count = 0;
581 				break;
582 			}
583 
584 			sgc.rcv.num_handles += nhandles;
585 
586 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
587 
588 			break;
589 
590 		case Q8_STAT_DESC_OPCODE_SGL_LRO:
591 
592 			desc_count =
593 				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
594 
595 			if (desc_count > 1) {
596 				c_idx = (comp_idx + desc_count -1) &
597 						(NUM_STATUS_DESCRIPTORS-1);
598 				sdesc0 = (q80_stat_desc_t *)
599 					&hw->sds[sds_idx].sds_ring_base[c_idx];
600 
601 				if ((Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
602 						Q8_STAT_DESC_OPCODE_CONT) ||
603 				QL_ERR_INJECT(ha, INJCT_SGL_LRO_INV_DESC_COUNT)) {
604 					desc_count = 0;
605 					break;
606 				}
607 			}
608 			bzero(&sgc, sizeof(qla_sgl_comp_t));
609 
610 			sgc.lro.payload_length =
611 			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
612 
613 			sgc.lro.rss_hash =
614 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
615 
616 			sgc.lro.num_handles = 1;
617 			sgc.lro.handle[0] =
618 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
619 
620 			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
621 				sgc.lro.flags |= Q8_LRO_COMP_TS;
622 
623 			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
624 				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
625 
626 			sgc.lro.l2_offset =
627 				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
628 			sgc.lro.l4_offset =
629 				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
630 
631 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
632 				sgc.lro.vlan_tag =
633 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
634 			}
635 
636 			QL_ASSERT(ha, (desc_count <= 7) ,\
637 				("%s: [sds_idx, data0, data1]="\
638 				 "[%d, 0x%llx, 0x%llx]\n",\
639 				__func__, sds_idx,\
640 				(long long unsigned int)sdesc->data[0],\
641 				(long long unsigned int)sdesc->data[1]));
642 
643 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
644 				desc_count, &sgc.lro.handle[1], &nhandles)) {
645 				device_printf(dev,
646 				"%s: [sds_idx, data0, data1]="\
647 				 "[%d, 0x%llx, 0x%llx]\n",\
648 				__func__, sds_idx,\
649 				(long long unsigned int)sdesc->data[0],\
650 				(long long unsigned int)sdesc->data[1]);
651 
652 				desc_count = 0;
653 				break;
654 			}
655 
656 			sgc.lro.num_handles += nhandles;
657 
658 			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
659 				device_printf(dev,
660 				"%s: [sds_idx, data0, data1]="\
661 				 "[%d, 0x%llx, 0x%llx]\n",\
662 				__func__, sds_idx,\
663 				(long long unsigned int)sdesc->data[0],\
664 				(long long unsigned int)sdesc->data[1]);
665 				device_printf(dev,
666 				"%s: [comp_idx, c_idx, dcount, nhndls]="\
667 				 "[%d, %d, %d, %d]\n",\
668 				__func__, comp_idx, c_idx, desc_count,
669 				sgc.lro.num_handles);
670 				if (desc_count > 1) {
671 				device_printf(dev,
672 				"%s: [sds_idx, data0, data1]="\
673 				 "[%d, 0x%llx, 0x%llx]\n",\
674 				__func__, sds_idx,\
675 				(long long unsigned int)sdesc0->data[0],\
676 				(long long unsigned int)sdesc0->data[1]);
677 				}
678 			}
679 
680 			break;
681 
682 		default:
683 			desc_count = 0;
684 			device_printf(dev, "%s: default 0x%llx!\n", __func__,
685 					(long long unsigned int)sdesc->data[0]);
686 			break;
687 		}
688 
689 		if (desc_count == 0)
690 			break;
691 
692 		sds_replenish_threshold += desc_count;
693 
694 		while (desc_count--) {
695 			sdesc->data[0] = 0ULL;
696 			sdesc->data[1] = 0ULL;
697 			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
698 			sdesc = (q80_stat_desc_t *)
699 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
700 		}
701 
702 		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
703 			sds_replenish_threshold = 0;
704 			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
705 				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
706 					comp_idx);
707 			}
708 			hw->sds[sds_idx].sdsr_next = comp_idx;
709 		}
710 	}
711 
712 #if defined(INET) || defined(INET6)
713 	if (ha->hw.enable_soft_lro) {
714 		struct lro_ctrl		*lro;
715 
716 		lro = &ha->hw.sds[sds_idx].lro;
717 		tcp_lro_flush_all(lro);
718 	}
719 #endif
720 
721 	if (ha->stop_rcv)
722 		goto ql_rcv_isr_exit;
723 
724 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
725 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
726 		hw->sds[sds_idx].sdsr_next = comp_idx;
727 	} else {
728 		if (ha->hw.num_rds_rings > 1)
729 			r_idx = sds_idx;
730 
731 		sdsp = &ha->hw.sds[sds_idx];
732 
733 		if (sdsp->rx_free > ha->std_replenish)
734 			qla_replenish_normal_rx(ha, sdsp, r_idx);
735 	}
736 
737 	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
738 	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
739 
740 	if (opcode)
741 		ret = -1;
742 
743 ql_rcv_isr_exit:
744 	hw->sds[sds_idx].rcv_active = 0;
745 
746 	return (ret);
747 }
748 
749 void
750 ql_mbx_isr(void *arg)
751 {
752 	qla_host_t *ha;
753 	uint32_t data;
754 	uint32_t prev_link_state;
755 
756 	ha = arg;
757 
758 	if (ha == NULL) {
759 		printf("%s: arg == NULL\n", __func__);
760 		return;
761 	}
762 
763 	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
764 	if ((data & 0x3) != 0x1) {
765 		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
766 		return;
767 	}
768 
769 	data = READ_REG32(ha, Q8_FW_MBOX0);
770 
771 	if ((data & 0xF000) != 0x8000)
772 		return;
773 
774 	data = data & 0xFFFF;
775 
776 	switch (data) {
777 	case 0x8001:  /* It's an AEN */
778 
779 		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
780 
781 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
782 		ha->hw.cable_length = data & 0xFFFF;
783 
784 		data = data >> 16;
785 		ha->hw.link_speed = data & 0xFFF;
786 
787 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
788 
789 		prev_link_state =  ha->hw.link_up;
790 
791 		data = (((data & 0xFF) == 0) ? 0 : 1);
792 		atomic_store_rel_8(&ha->hw.link_up, (uint8_t)data);
793 
794 		device_printf(ha->pci_dev,
795 			"%s: AEN[0x8001] data = 0x%08x, prev_link_state = 0x%08x\n",
796 			__func__, data, prev_link_state);
797 
798 		if (prev_link_state !=  ha->hw.link_up) {
799 			if (ha->hw.link_up)
800 				if_link_state_change(ha->ifp, LINK_STATE_UP);
801 			else
802 				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
803 		}
804 
805 		ha->hw.module_type = ((data >> 8) & 0xFF);
806 		ha->hw.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
807 		ha->hw.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
808 
809 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
810 		ha->hw.loopback_mode = data & 0x03;
811 
812 		ha->hw.link_faults = (data >> 3) & 0xFF;
813 
814 		break;
815 
816         case 0x8100:
817 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
818 		ha->hw.imd_compl=1;
819 		break;
820 
821         case 0x8101:
822                 ha->async_event = 1;
823                 ha->hw.aen_mb0 = 0x8101;
824                 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
825                 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
826                 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
827                 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
828 		device_printf(ha->pci_dev, "%s: AEN[0x%08x 0x%08x 0x%08x 0%08x 0x%08x]\n",
829 			__func__, data, ha->hw.aen_mb1, ha->hw.aen_mb2,
830 			ha->hw.aen_mb3, ha->hw.aen_mb4);
831                 break;
832 
833         case 0x8110:
834                 /* for now just dump the registers */
835                 {
836                         uint32_t ombx[5];
837 
838                         ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
839                         ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
840                         ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
841                         ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
842                         ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
843 
844                         device_printf(ha->pci_dev, "%s: "
845                                 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
846                                 __func__, data, ombx[0], ombx[1], ombx[2],
847                                 ombx[3], ombx[4]);
848                 }
849 
850                 break;
851 
852         case 0x8130:
853                 /* sfp insertion aen */
854                 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
855                         __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
856                 break;
857 
858         case 0x8131:
859                 /* sfp removal aen */
860                 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
861                 break;
862 
863 	case 0x8140:
864 		{
865 			uint32_t ombx[3];
866 
867 			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
868 			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
869 			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
870 
871 			device_printf(ha->pci_dev, "%s: "
872 				"0x%08x 0x%08x 0x%08x 0x%08x \n",
873 				__func__, data, ombx[0], ombx[1], ombx[2]);
874 		}
875 		break;
876 
877 	default:
878 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
879 		break;
880 	}
881 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
882 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
883 	return;
884 }
885 
886 static void
887 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
888 {
889 	qla_rx_buf_t *rxb;
890 	int count = sdsp->rx_free;
891 	uint32_t rx_next;
892 	qla_rdesc_t *rdesc;
893 
894 	/* we can play with this value via a sysctl */
895 	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
896 
897 	rdesc = &ha->hw.rds[r_idx];
898 
899 	rx_next = rdesc->rx_next;
900 
901 	while (count--) {
902 		rxb = sdsp->rxb_free;
903 
904 		if (rxb == NULL)
905 			break;
906 
907 		sdsp->rxb_free = rxb->next;
908 		sdsp->rx_free--;
909 
910 		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
911 			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
912 				rxb->handle,
913 				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
914 			rdesc->rx_in++;
915 			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
916 				rdesc->rx_in = 0;
917 			rdesc->rx_next++;
918 			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
919 				rdesc->rx_next = 0;
920 		} else {
921 			device_printf(ha->pci_dev,
922 				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
923 				__func__, r_idx, rdesc->rx_in, rxb->handle);
924 
925 			rxb->m_head = NULL;
926 			rxb->next = sdsp->rxb_free;
927 			sdsp->rxb_free = rxb;
928 			sdsp->rx_free++;
929 
930 			break;
931 		}
932 		if (replenish_thresh-- == 0) {
933 			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
934 				rdesc->rx_next);
935 			rx_next = rdesc->rx_next;
936 			replenish_thresh = ha->hw.rds_pidx_thres;
937 		}
938 	}
939 
940 	if (rx_next != rdesc->rx_next) {
941 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
942 			rdesc->rx_next);
943 	}
944 }
945 
946 void
947 ql_isr(void *arg)
948 {
949 	qla_ivec_t *ivec = arg;
950 	qla_host_t *ha ;
951 	int idx;
952 	qla_hw_t *hw;
953 	if_t ifp;
954 	qla_tx_fp_t *fp;
955 
956 	ha = ivec->ha;
957 	hw = &ha->hw;
958 	ifp = ha->ifp;
959 
960 	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
961 		return;
962 
963 	fp = &ha->tx_fp[idx];
964 	hw->sds[idx].intr_count++;
965 
966 	if ((fp->fp_taskqueue != NULL) &&
967 		(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
968 		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
969 
970 	return;
971 }
972