xref: /freebsd/sys/dev/qlxgbe/ql_isr.c (revision 076ad2f836d5f49dc1375f1677335a48fe0d4b82)
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_isr.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 
45 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
46 		uint32_t r_idx);
47 
48 static void
49 qla_rcv_error(qla_host_t *ha)
50 {
51 	ha->flags.stop_rcv = 1;
52 	ha->qla_initiate_recovery = 1;
53 }
54 
55 
56 /*
57  * Name: qla_rx_intr
58  * Function: Handles normal ethernet frames received
59  */
60 static void
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62 {
63 	qla_rx_buf_t		*rxb;
64 	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65 	struct ifnet		*ifp = ha->ifp;
66 	qla_sds_t		*sdsp;
67 	struct ether_vlan_header *eh;
68 	uint32_t		i, rem_len = 0;
69 	uint32_t		r_idx = 0;
70 	qla_rx_ring_t		*rx_ring;
71 
72 	if (ha->hw.num_rds_rings > 1)
73 		r_idx = sds_idx;
74 
75 	ha->hw.rds[r_idx].count++;
76 
77 	sdsp = &ha->hw.sds[sds_idx];
78 	rx_ring = &ha->rx_ring[r_idx];
79 
80 	for (i = 0; i < sgc->num_handles; i++) {
81 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
82 
83 		QL_ASSERT(ha, (rxb != NULL),
84 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
85 			sds_idx));
86 
87 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
88 			/* log the error */
89 			device_printf(ha->pci_dev,
90 				"%s invalid rxb[%d, %d, 0x%04x]\n",
91 				__func__, sds_idx, i, sgc->handle[i]);
92 			qla_rcv_error(ha);
93 			return;
94 		}
95 
96 		mp = rxb->m_head;
97 		if (i == 0)
98 			mpf = mp;
99 
100 		QL_ASSERT(ha, (mp != NULL),
101 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
102 			sds_idx));
103 
104 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
105 
106 		rxb->m_head = NULL;
107 		rxb->next = sdsp->rxb_free;
108 		sdsp->rxb_free = rxb;
109 		sdsp->rx_free++;
110 
111 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
112 			/* log the error */
113 			device_printf(ha->pci_dev,
114 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
115 				__func__, sds_idx, i, sgc->handle[i]);
116 			qla_rcv_error(ha);
117 			return;
118 		}
119 
120 		if (i == 0) {
121 			mpl = mpf = mp;
122 			mp->m_flags |= M_PKTHDR;
123 			mp->m_pkthdr.len = sgc->pkt_length;
124 			mp->m_pkthdr.rcvif = ifp;
125 			rem_len = mp->m_pkthdr.len;
126 		} else {
127 			mp->m_flags &= ~M_PKTHDR;
128 			mpl->m_next = mp;
129 			mpl = mp;
130 			rem_len = rem_len - mp->m_len;
131 		}
132 	}
133 
134 	mpl->m_len = rem_len;
135 
136 	eh = mtod(mpf, struct ether_vlan_header *);
137 
138 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
139 		uint32_t *data = (uint32_t *)eh;
140 
141 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
142 		mpf->m_flags |= M_VLANTAG;
143 
144 		*(data + 3) = *(data + 2);
145 		*(data + 2) = *(data + 1);
146 		*(data + 1) = *data;
147 
148 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
149 	}
150 
151 	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
152 		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
153 			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
154 		mpf->m_pkthdr.csum_data = 0xFFFF;
155 	} else {
156 		mpf->m_pkthdr.csum_flags = 0;
157 	}
158 
159 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
160 
161 	mpf->m_pkthdr.flowid = sgc->rss_hash;
162 
163 #if __FreeBSD_version >= 1100000
164 	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
165 #else
166 	M_HASHTYPE_SET(mpf, M_HASHTYPE_NONE);
167 #endif /* #if __FreeBSD_version >= 1100000 */
168 
169 	(*ifp->if_input)(ifp, mpf);
170 
171 	if (sdsp->rx_free > ha->std_replenish)
172 		qla_replenish_normal_rx(ha, sdsp, r_idx);
173 
174 	return;
175 }
176 
177 #define QLA_TCP_HDR_SIZE        20
178 #define QLA_TCP_TS_OPTION_SIZE  12
179 
180 /*
181  * Name: qla_lro_intr
182  * Function: Handles normal ethernet frames received
183  */
184 static int
185 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
186 {
187 	qla_rx_buf_t *rxb;
188 	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
189 	struct ifnet *ifp = ha->ifp;
190 	qla_sds_t *sdsp;
191 	struct ether_vlan_header *eh;
192 	uint32_t i, rem_len = 0, pkt_length, iplen;
193 	struct tcphdr *th;
194 	struct ip *ip = NULL;
195 	struct ip6_hdr *ip6 = NULL;
196 	uint16_t etype;
197 	uint32_t r_idx = 0;
198 	qla_rx_ring_t *rx_ring;
199 
200 	if (ha->hw.num_rds_rings > 1)
201 		r_idx = sds_idx;
202 
203 	ha->hw.rds[r_idx].count++;
204 
205 	rx_ring = &ha->rx_ring[r_idx];
206 
207 	ha->lro_pkt_count++;
208 
209 	sdsp = &ha->hw.sds[sds_idx];
210 
211 	pkt_length = sgc->payload_length + sgc->l4_offset;
212 
213 	if (sgc->flags & Q8_LRO_COMP_TS) {
214 		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
215 	} else {
216 		pkt_length += QLA_TCP_HDR_SIZE;
217 	}
218 	ha->lro_bytes += pkt_length;
219 
220 	for (i = 0; i < sgc->num_handles; i++) {
221 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
222 
223 		QL_ASSERT(ha, (rxb != NULL),
224 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
225 			sds_idx));
226 
227 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
228 			/* log the error */
229 			device_printf(ha->pci_dev,
230 				"%s invalid rxb[%d, %d, 0x%04x]\n",
231 				__func__, sds_idx, i, sgc->handle[i]);
232 			qla_rcv_error(ha);
233 			return (0);
234 		}
235 
236 		mp = rxb->m_head;
237 		if (i == 0)
238 			mpf = mp;
239 
240 		QL_ASSERT(ha, (mp != NULL),
241 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
242 			sds_idx));
243 
244 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
245 
246 		rxb->m_head = NULL;
247 		rxb->next = sdsp->rxb_free;
248 		sdsp->rxb_free = rxb;
249 		sdsp->rx_free++;
250 
251 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
252 			/* log the error */
253 			device_printf(ha->pci_dev,
254 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
255 				__func__, sds_idx, i, sgc->handle[i]);
256 			qla_rcv_error(ha);
257 			return (0);
258 		}
259 
260 		if (i == 0) {
261 			mpl = mpf = mp;
262 			mp->m_flags |= M_PKTHDR;
263 			mp->m_pkthdr.len = pkt_length;
264 			mp->m_pkthdr.rcvif = ifp;
265 			rem_len = mp->m_pkthdr.len;
266 		} else {
267 			mp->m_flags &= ~M_PKTHDR;
268 			mpl->m_next = mp;
269 			mpl = mp;
270 			rem_len = rem_len - mp->m_len;
271 		}
272 	}
273 
274 	mpl->m_len = rem_len;
275 
276 	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
277 
278 	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
279 		th->th_flags |= TH_PUSH;
280 
281 	m_adj(mpf, sgc->l2_offset);
282 
283 	eh = mtod(mpf, struct ether_vlan_header *);
284 
285 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
286 		uint32_t *data = (uint32_t *)eh;
287 
288 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
289 		mpf->m_flags |= M_VLANTAG;
290 
291 		*(data + 3) = *(data + 2);
292 		*(data + 2) = *(data + 1);
293 		*(data + 1) = *data;
294 
295 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
296 
297 		etype = ntohs(eh->evl_proto);
298 	} else {
299 		etype = ntohs(eh->evl_encap_proto);
300 	}
301 
302 	if (etype == ETHERTYPE_IP) {
303 		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
304 
305 		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
306 				sgc->payload_length;
307 
308                 ip->ip_len = htons(iplen);
309 
310 		ha->ipv4_lro++;
311 
312 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
313 
314 	} else if (etype == ETHERTYPE_IPV6) {
315 		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
316 
317 		iplen = (th->th_off << 2) + sgc->payload_length;
318 
319 		ip6->ip6_plen = htons(iplen);
320 
321 		ha->ipv6_lro++;
322 
323 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
324 
325 	} else {
326 		m_freem(mpf);
327 
328 		if (sdsp->rx_free > ha->std_replenish)
329 			qla_replenish_normal_rx(ha, sdsp, r_idx);
330 		return 0;
331 	}
332 
333 	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
334 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
335 	mpf->m_pkthdr.csum_data = 0xFFFF;
336 
337 	mpf->m_pkthdr.flowid = sgc->rss_hash;
338 
339 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
340 
341 	(*ifp->if_input)(ifp, mpf);
342 
343 	if (sdsp->rx_free > ha->std_replenish)
344 		qla_replenish_normal_rx(ha, sdsp, r_idx);
345 
346 	return (0);
347 }
348 
349 static int
350 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
351 	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
352 {
353 	uint32_t i;
354 	uint16_t num_handles;
355 	q80_stat_desc_t *sdesc;
356 	uint32_t opcode;
357 
358 	*nhandles = 0;
359 	dcount--;
360 
361 	for (i = 0; i < dcount; i++) {
362 		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
363 		sdesc = (q80_stat_desc_t *)
364 				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
365 
366 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
367 
368 		if (!opcode) {
369 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
370 				__func__, (void *)sdesc->data[0],
371 				(void *)sdesc->data[1]);
372 			return -1;
373 		}
374 
375 		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
376 		if (!num_handles) {
377 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
378 				__func__, (void *)sdesc->data[0],
379 				(void *)sdesc->data[1]);
380 			return -1;
381 		}
382 
383 		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
384 			num_handles = -1;
385 
386 		switch (num_handles) {
387 
388 		case 1:
389 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
390 			break;
391 
392 		case 2:
393 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
394 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
395 			break;
396 
397 		case 3:
398 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
399 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
400 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
401 			break;
402 
403 		case 4:
404 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
405 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
406 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
407 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
408 			break;
409 
410 		case 5:
411 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
412 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
413 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
414 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
415 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
416 			break;
417 
418 		case 6:
419 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
420 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
421 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
422 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
423 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
424 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
425 			break;
426 
427 		case 7:
428 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
429 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
430 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
431 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
432 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
433 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
434 			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
435 			break;
436 
437 		default:
438 			device_printf(ha->pci_dev,
439 				"%s: invalid num handles %p %p\n",
440 				__func__, (void *)sdesc->data[0],
441 				(void *)sdesc->data[1]);
442 
443 			QL_ASSERT(ha, (0),\
444 			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
445 			__func__, "invalid num handles", sds_idx, num_handles,
446 			(void *)sdesc->data[0],(void *)sdesc->data[1]));
447 
448 			qla_rcv_error(ha);
449 			return 0;
450 		}
451 		*nhandles = *nhandles + num_handles;
452 	}
453 	return 0;
454 }
455 
456 /*
457  * Name: ql_rcv_isr
458  * Function: Main Interrupt Service Routine
459  */
460 uint32_t
461 ql_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
462 {
463 	device_t dev;
464 	qla_hw_t *hw;
465 	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
466 	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
467 	uint32_t ret = 0;
468 	qla_sgl_comp_t sgc;
469 	uint16_t nhandles;
470 	uint32_t sds_replenish_threshold = 0;
471 	uint32_t r_idx = 0;
472 	qla_sds_t *sdsp;
473 
474 	dev = ha->pci_dev;
475 	hw = &ha->hw;
476 
477 	hw->sds[sds_idx].rcv_active = 1;
478 	if (ha->flags.stop_rcv) {
479 		hw->sds[sds_idx].rcv_active = 0;
480 		return 0;
481 	}
482 
483 	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
484 
485 	/*
486 	 * receive interrupts
487 	 */
488 	comp_idx = hw->sds[sds_idx].sdsr_next;
489 
490 	while (count-- && !ha->flags.stop_rcv) {
491 
492 		sdesc = (q80_stat_desc_t *)
493 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
494 
495 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
496 
497 		if (!opcode)
498 			break;
499 
500 		hw->sds[sds_idx].intr_count++;
501 		switch (opcode) {
502 
503 		case Q8_STAT_DESC_OPCODE_RCV_PKT:
504 
505 			desc_count = 1;
506 
507 			bzero(&sgc, sizeof(qla_sgl_comp_t));
508 
509 			sgc.rcv.pkt_length =
510 				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
511 			sgc.rcv.num_handles = 1;
512 			sgc.rcv.handle[0] =
513 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
514 			sgc.rcv.chksum_status =
515 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
516 
517 			sgc.rcv.rss_hash =
518 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
519 
520 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
521 				sgc.rcv.vlan_tag =
522 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
523 			}
524 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
525 			break;
526 
527 		case Q8_STAT_DESC_OPCODE_SGL_RCV:
528 
529 			desc_count =
530 				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
531 
532 			if (desc_count > 1) {
533 				c_idx = (comp_idx + desc_count -1) &
534 						(NUM_STATUS_DESCRIPTORS-1);
535 				sdesc0 = (q80_stat_desc_t *)
536 					&hw->sds[sds_idx].sds_ring_base[c_idx];
537 
538 				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
539 						Q8_STAT_DESC_OPCODE_CONT) {
540 					desc_count = 0;
541 					break;
542 				}
543 			}
544 
545 			bzero(&sgc, sizeof(qla_sgl_comp_t));
546 
547 			sgc.rcv.pkt_length =
548 				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
549 					(sdesc->data[0]));
550 			sgc.rcv.chksum_status =
551 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
552 
553 			sgc.rcv.rss_hash =
554 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
555 
556 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
557 				sgc.rcv.vlan_tag =
558 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
559 			}
560 
561 			QL_ASSERT(ha, (desc_count <= 2) ,\
562 				("%s: [sds_idx, data0, data1]="\
563 				"%d, %p, %p]\n", __func__, sds_idx,\
564 				(void *)sdesc->data[0],\
565 				(void *)sdesc->data[1]));
566 
567 			sgc.rcv.num_handles = 1;
568 			sgc.rcv.handle[0] =
569 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
570 
571 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
572 				&sgc.rcv.handle[1], &nhandles)) {
573 				device_printf(dev,
574 					"%s: [sds_idx, dcount, data0, data1]="
575 					 "[%d, %d, 0x%llx, 0x%llx]\n",
576 					__func__, sds_idx, desc_count,
577 					(long long unsigned int)sdesc->data[0],
578 					(long long unsigned int)sdesc->data[1]);
579 				desc_count = 0;
580 				break;
581 			}
582 
583 			sgc.rcv.num_handles += nhandles;
584 
585 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
586 
587 			break;
588 
589 		case Q8_STAT_DESC_OPCODE_SGL_LRO:
590 
591 			desc_count =
592 				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
593 
594 			if (desc_count > 1) {
595 				c_idx = (comp_idx + desc_count -1) &
596 						(NUM_STATUS_DESCRIPTORS-1);
597 				sdesc0 = (q80_stat_desc_t *)
598 					&hw->sds[sds_idx].sds_ring_base[c_idx];
599 
600 				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
601 						Q8_STAT_DESC_OPCODE_CONT) {
602 					desc_count = 0;
603 					break;
604 				}
605 			}
606 			bzero(&sgc, sizeof(qla_sgl_comp_t));
607 
608 			sgc.lro.payload_length =
609 			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
610 
611 			sgc.lro.rss_hash =
612 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
613 
614 			sgc.lro.num_handles = 1;
615 			sgc.lro.handle[0] =
616 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
617 
618 			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
619 				sgc.lro.flags |= Q8_LRO_COMP_TS;
620 
621 			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
622 				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
623 
624 			sgc.lro.l2_offset =
625 				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
626 			sgc.lro.l4_offset =
627 				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
628 
629 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
630 				sgc.lro.vlan_tag =
631 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
632 			}
633 
634 			QL_ASSERT(ha, (desc_count <= 7) ,\
635 				("%s: [sds_idx, data0, data1]="\
636 				 "[%d, 0x%llx, 0x%llx]\n",\
637 				__func__, sds_idx,\
638 				(long long unsigned int)sdesc->data[0],\
639 				(long long unsigned int)sdesc->data[1]));
640 
641 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
642 				desc_count, &sgc.lro.handle[1], &nhandles)) {
643 				device_printf(dev,
644 				"%s: [sds_idx, data0, data1]="\
645 				 "[%d, 0x%llx, 0x%llx]\n",\
646 				__func__, sds_idx,\
647 				(long long unsigned int)sdesc->data[0],\
648 				(long long unsigned int)sdesc->data[1]);
649 
650 				desc_count = 0;
651 				break;
652 			}
653 
654 			sgc.lro.num_handles += nhandles;
655 
656 			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
657 				device_printf(dev,
658 				"%s: [sds_idx, data0, data1]="\
659 				 "[%d, 0x%llx, 0x%llx]\n",\
660 				__func__, sds_idx,\
661 				(long long unsigned int)sdesc->data[0],\
662 				(long long unsigned int)sdesc->data[1]);
663 				device_printf(dev,
664 				"%s: [comp_idx, c_idx, dcount, nhndls]="\
665 				 "[%d, %d, %d, %d]\n",\
666 				__func__, comp_idx, c_idx, desc_count,
667 				sgc.lro.num_handles);
668 				if (desc_count > 1) {
669 				device_printf(dev,
670 				"%s: [sds_idx, data0, data1]="\
671 				 "[%d, 0x%llx, 0x%llx]\n",\
672 				__func__, sds_idx,\
673 				(long long unsigned int)sdesc0->data[0],\
674 				(long long unsigned int)sdesc0->data[1]);
675 				}
676 			}
677 
678 			break;
679 
680 		default:
681 			device_printf(dev, "%s: default 0x%llx!\n", __func__,
682 					(long long unsigned int)sdesc->data[0]);
683 			break;
684 		}
685 
686 		if (desc_count == 0)
687 			break;
688 
689 		sds_replenish_threshold += desc_count;
690 
691 
692 		while (desc_count--) {
693 			sdesc->data[0] = 0ULL;
694 			sdesc->data[1] = 0ULL;
695 			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
696 			sdesc = (q80_stat_desc_t *)
697 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
698 		}
699 
700 		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
701 			sds_replenish_threshold = 0;
702 			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
703 				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
704 					comp_idx);
705 			}
706 			hw->sds[sds_idx].sdsr_next = comp_idx;
707 		}
708 	}
709 
710 	if (ha->flags.stop_rcv)
711 		goto ql_rcv_isr_exit;
712 
713 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
714 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
715 		hw->sds[sds_idx].sdsr_next = comp_idx;
716 	} else {
717 		hw->sds[sds_idx].spurious_intr_count++;
718 
719 		if (ha->hw.num_rds_rings > 1)
720 			r_idx = sds_idx;
721 
722 		sdsp = &ha->hw.sds[sds_idx];
723 
724 		if (sdsp->rx_free > ha->std_replenish)
725 			qla_replenish_normal_rx(ha, sdsp, r_idx);
726 	}
727 
728 	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
729 	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
730 
731 	if (opcode)
732 		ret = -1;
733 
734 ql_rcv_isr_exit:
735 	hw->sds[sds_idx].rcv_active = 0;
736 
737 	return (ret);
738 }
739 
740 void
741 ql_mbx_isr(void *arg)
742 {
743 	qla_host_t *ha;
744 	uint32_t data;
745 	uint32_t prev_link_state;
746 
747 	ha = arg;
748 
749 	if (ha == NULL) {
750 		device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
751 		return;
752 	}
753 
754 	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
755 	if ((data & 0x3) != 0x1) {
756 		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
757 		return;
758 	}
759 
760 	data = READ_REG32(ha, Q8_FW_MBOX0);
761 
762 	if ((data & 0xF000) != 0x8000)
763 		return;
764 
765 	data = data & 0xFFFF;
766 
767 	switch (data) {
768 
769 	case 0x8001:  /* It's an AEN */
770 
771 		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
772 
773 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
774 		ha->hw.cable_length = data & 0xFFFF;
775 
776 		data = data >> 16;
777 		ha->hw.link_speed = data & 0xFFF;
778 
779 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
780 
781 		prev_link_state =  ha->hw.link_up;
782 		ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1);
783 
784 		if (prev_link_state !=  ha->hw.link_up) {
785 			if (ha->hw.link_up)
786 				if_link_state_change(ha->ifp, LINK_STATE_UP);
787 			else
788 				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
789 		}
790 
791 
792 		ha->hw.module_type = ((data >> 8) & 0xFF);
793 		ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
794 		ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
795 
796 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
797 		ha->hw.flags.loopback_mode = data & 0x03;
798 
799 		ha->hw.link_faults = (data >> 3) & 0xFF;
800 
801 		break;
802 
803         case 0x8100:
804 		ha->hw.imd_compl=1;
805 		break;
806 
807         case 0x8101:
808                 ha->async_event = 1;
809                 ha->hw.aen_mb0 = 0x8101;
810                 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
811                 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
812                 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
813                 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
814                 break;
815 
816         case 0x8110:
817                 /* for now just dump the registers */
818                 {
819                         uint32_t ombx[5];
820 
821                         ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
822                         ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
823                         ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
824                         ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
825                         ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
826 
827                         device_printf(ha->pci_dev, "%s: "
828                                 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
829                                 __func__, data, ombx[0], ombx[1], ombx[2],
830                                 ombx[3], ombx[4]);
831                 }
832 
833                 break;
834 
835         case 0x8130:
836                 /* sfp insertion aen */
837                 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
838                         __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
839                 break;
840 
841         case 0x8131:
842                 /* sfp removal aen */
843                 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
844                 break;
845 
846 	case 0x8140:
847 		{
848 			uint32_t ombx[3];
849 
850 			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
851 			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
852 			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
853 
854 			device_printf(ha->pci_dev, "%s: "
855 				"0x%08x 0x%08x 0x%08x 0x%08x \n",
856 				__func__, data, ombx[0], ombx[1], ombx[2]);
857 		}
858 		break;
859 
860 	default:
861 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
862 		break;
863 	}
864 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
865 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
866 	return;
867 }
868 
869 
870 static void
871 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
872 {
873 	qla_rx_buf_t *rxb;
874 	int count = sdsp->rx_free;
875 	uint32_t rx_next;
876 	qla_rdesc_t *rdesc;
877 
878 	/* we can play with this value via a sysctl */
879 	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
880 
881 	rdesc = &ha->hw.rds[r_idx];
882 
883 	rx_next = rdesc->rx_next;
884 
885 	while (count--) {
886 		rxb = sdsp->rxb_free;
887 
888 		if (rxb == NULL)
889 			break;
890 
891 		sdsp->rxb_free = rxb->next;
892 		sdsp->rx_free--;
893 
894 		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
895 			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
896 				rxb->handle,
897 				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
898 			rdesc->rx_in++;
899 			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
900 				rdesc->rx_in = 0;
901 			rdesc->rx_next++;
902 			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
903 				rdesc->rx_next = 0;
904 		} else {
905 			device_printf(ha->pci_dev,
906 				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
907 				__func__, r_idx, rdesc->rx_in, rxb->handle);
908 
909 			rxb->m_head = NULL;
910 			rxb->next = sdsp->rxb_free;
911 			sdsp->rxb_free = rxb;
912 			sdsp->rx_free++;
913 
914 			break;
915 		}
916 		if (replenish_thresh-- == 0) {
917 			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
918 				rdesc->rx_next);
919 			rx_next = rdesc->rx_next;
920 			replenish_thresh = ha->hw.rds_pidx_thres;
921 		}
922 	}
923 
924 	if (rx_next != rdesc->rx_next) {
925 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
926 			rdesc->rx_next);
927 	}
928 }
929 
930 void
931 ql_isr(void *arg)
932 {
933 	qla_ivec_t *ivec = arg;
934 	qla_host_t *ha ;
935 	int idx;
936 	qla_hw_t *hw;
937 	struct ifnet *ifp;
938 	qla_tx_fp_t *fp;
939 
940 	ha = ivec->ha;
941 	hw = &ha->hw;
942 	ifp = ha->ifp;
943 
944 	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
945 		return;
946 
947 
948 	fp = &ha->tx_fp[idx];
949 
950 	if (fp->fp_taskqueue != NULL)
951 		taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
952 
953 	return;
954 }
955 
956