xref: /freebsd/sys/dev/qlxgbe/ql_isr.c (revision 792bbaba989533a1fc93823df1720c8c4aaf0442)
1 /*
2  * Copyright (c) 2013-2016 Qlogic Corporation
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 /*
29  * File: ql_isr.c
30  * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 
37 #include "ql_os.h"
38 #include "ql_hw.h"
39 #include "ql_def.h"
40 #include "ql_inline.h"
41 #include "ql_ver.h"
42 #include "ql_glbl.h"
43 #include "ql_dbg.h"
44 
45 static void qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp,
46 		uint32_t r_idx);
47 
48 static void
49 qla_rcv_error(qla_host_t *ha)
50 {
51 	ha->flags.stop_rcv = 1;
52 	ha->qla_initiate_recovery = 1;
53 }
54 
55 
56 /*
57  * Name: qla_rx_intr
58  * Function: Handles normal ethernet frames received
59  */
60 static void
61 qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
62 {
63 	qla_rx_buf_t		*rxb;
64 	struct mbuf		*mp = NULL, *mpf = NULL, *mpl = NULL;
65 	struct ifnet		*ifp = ha->ifp;
66 	qla_sds_t		*sdsp;
67 	struct ether_vlan_header *eh;
68 	uint32_t		i, rem_len = 0;
69 	uint32_t		r_idx = 0;
70 	qla_rx_ring_t		*rx_ring;
71 
72 	if (ha->hw.num_rds_rings > 1)
73 		r_idx = sds_idx;
74 
75 	ha->hw.rds[r_idx].count++;
76 
77 	sdsp = &ha->hw.sds[sds_idx];
78 	rx_ring = &ha->rx_ring[r_idx];
79 
80 	for (i = 0; i < sgc->num_handles; i++) {
81 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
82 
83 		QL_ASSERT(ha, (rxb != NULL),
84 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
85 			sds_idx));
86 
87 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) {
88 			/* log the error */
89 			device_printf(ha->pci_dev,
90 				"%s invalid rxb[%d, %d, 0x%04x]\n",
91 				__func__, sds_idx, i, sgc->handle[i]);
92 			qla_rcv_error(ha);
93 			return;
94 		}
95 
96 		mp = rxb->m_head;
97 		if (i == 0)
98 			mpf = mp;
99 
100 		QL_ASSERT(ha, (mp != NULL),
101 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
102 			sds_idx));
103 
104 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
105 
106 		rxb->m_head = NULL;
107 		rxb->next = sdsp->rxb_free;
108 		sdsp->rxb_free = rxb;
109 		sdsp->rx_free++;
110 
111 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) {
112 			/* log the error */
113 			device_printf(ha->pci_dev,
114 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
115 				__func__, sds_idx, i, sgc->handle[i]);
116 			qla_rcv_error(ha);
117 			return;
118 		}
119 
120 		if (i == 0) {
121 			mpl = mpf = mp;
122 			mp->m_flags |= M_PKTHDR;
123 			mp->m_pkthdr.len = sgc->pkt_length;
124 			mp->m_pkthdr.rcvif = ifp;
125 			rem_len = mp->m_pkthdr.len;
126 		} else {
127 			mp->m_flags &= ~M_PKTHDR;
128 			mpl->m_next = mp;
129 			mpl = mp;
130 			rem_len = rem_len - mp->m_len;
131 		}
132 	}
133 
134 	mpl->m_len = rem_len;
135 
136 	eh = mtod(mpf, struct ether_vlan_header *);
137 
138 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
139 		uint32_t *data = (uint32_t *)eh;
140 
141 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
142 		mpf->m_flags |= M_VLANTAG;
143 
144 		*(data + 3) = *(data + 2);
145 		*(data + 2) = *(data + 1);
146 		*(data + 1) = *data;
147 
148 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
149 	}
150 
151 	if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) {
152 		mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
153 			CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
154 		mpf->m_pkthdr.csum_data = 0xFFFF;
155 	} else {
156 		mpf->m_pkthdr.csum_flags = 0;
157 	}
158 
159 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
160 
161 	mpf->m_pkthdr.flowid = sgc->rss_hash;
162 	M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE_HASH);
163 
164 	(*ifp->if_input)(ifp, mpf);
165 
166 	if (sdsp->rx_free > ha->std_replenish)
167 		qla_replenish_normal_rx(ha, sdsp, r_idx);
168 
169 	return;
170 }
171 
172 #define QLA_TCP_HDR_SIZE        20
173 #define QLA_TCP_TS_OPTION_SIZE  12
174 
175 /*
176  * Name: qla_lro_intr
177  * Function: Handles normal ethernet frames received
178  */
179 static int
180 qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
181 {
182 	qla_rx_buf_t *rxb;
183 	struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL;
184 	struct ifnet *ifp = ha->ifp;
185 	qla_sds_t *sdsp;
186 	struct ether_vlan_header *eh;
187 	uint32_t i, rem_len = 0, pkt_length, iplen;
188 	struct tcphdr *th;
189 	struct ip *ip = NULL;
190 	struct ip6_hdr *ip6 = NULL;
191 	uint16_t etype;
192 	uint32_t r_idx = 0;
193 	qla_rx_ring_t *rx_ring;
194 
195 	if (ha->hw.num_rds_rings > 1)
196 		r_idx = sds_idx;
197 
198 	ha->hw.rds[r_idx].count++;
199 
200 	rx_ring = &ha->rx_ring[r_idx];
201 
202 	ha->lro_pkt_count++;
203 
204 	sdsp = &ha->hw.sds[sds_idx];
205 
206 	pkt_length = sgc->payload_length + sgc->l4_offset;
207 
208 	if (sgc->flags & Q8_LRO_COMP_TS) {
209 		pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE;
210 	} else {
211 		pkt_length += QLA_TCP_HDR_SIZE;
212 	}
213 	ha->lro_bytes += pkt_length;
214 
215 	for (i = 0; i < sgc->num_handles; i++) {
216 		rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF];
217 
218 		QL_ASSERT(ha, (rxb != NULL),
219 			("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\
220 			sds_idx));
221 
222 		if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) {
223 			/* log the error */
224 			device_printf(ha->pci_dev,
225 				"%s invalid rxb[%d, %d, 0x%04x]\n",
226 				__func__, sds_idx, i, sgc->handle[i]);
227 			qla_rcv_error(ha);
228 			return (0);
229 		}
230 
231 		mp = rxb->m_head;
232 		if (i == 0)
233 			mpf = mp;
234 
235 		QL_ASSERT(ha, (mp != NULL),
236 			("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\
237 			sds_idx));
238 
239 		bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD);
240 
241 		rxb->m_head = NULL;
242 		rxb->next = sdsp->rxb_free;
243 		sdsp->rxb_free = rxb;
244 		sdsp->rx_free++;
245 
246 		if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) {
247 			/* log the error */
248 			device_printf(ha->pci_dev,
249 				"%s mp  == NULL [%d, %d, 0x%04x]\n",
250 				__func__, sds_idx, i, sgc->handle[i]);
251 			qla_rcv_error(ha);
252 			return (0);
253 		}
254 
255 		if (i == 0) {
256 			mpl = mpf = mp;
257 			mp->m_flags |= M_PKTHDR;
258 			mp->m_pkthdr.len = pkt_length;
259 			mp->m_pkthdr.rcvif = ifp;
260 			rem_len = mp->m_pkthdr.len;
261 		} else {
262 			mp->m_flags &= ~M_PKTHDR;
263 			mpl->m_next = mp;
264 			mpl = mp;
265 			rem_len = rem_len - mp->m_len;
266 		}
267 	}
268 
269 	mpl->m_len = rem_len;
270 
271 	th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
272 
273 	if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
274 		th->th_flags |= TH_PUSH;
275 
276 	m_adj(mpf, sgc->l2_offset);
277 
278 	eh = mtod(mpf, struct ether_vlan_header *);
279 
280 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
281 		uint32_t *data = (uint32_t *)eh;
282 
283 		mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag);
284 		mpf->m_flags |= M_VLANTAG;
285 
286 		*(data + 3) = *(data + 2);
287 		*(data + 2) = *(data + 1);
288 		*(data + 1) = *data;
289 
290 		m_adj(mpf, ETHER_VLAN_ENCAP_LEN);
291 
292 		etype = ntohs(eh->evl_proto);
293 	} else {
294 		etype = ntohs(eh->evl_encap_proto);
295 	}
296 
297 	if (etype == ETHERTYPE_IP) {
298 		ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN);
299 
300 		iplen = (ip->ip_hl << 2) + (th->th_off << 2) +
301 				sgc->payload_length;
302 
303                 ip->ip_len = htons(iplen);
304 
305 		ha->ipv4_lro++;
306 
307 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV4);
308 
309 	} else if (etype == ETHERTYPE_IPV6) {
310 		ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN);
311 
312 		iplen = (th->th_off << 2) + sgc->payload_length;
313 
314 		ip6->ip6_plen = htons(iplen);
315 
316 		ha->ipv6_lro++;
317 
318 		M_HASHTYPE_SET(mpf, M_HASHTYPE_RSS_TCP_IPV6);
319 
320 	} else {
321 		m_freem(mpf);
322 
323 		if (sdsp->rx_free > ha->std_replenish)
324 			qla_replenish_normal_rx(ha, sdsp, r_idx);
325 		return 0;
326 	}
327 
328 	mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
329 					CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
330 	mpf->m_pkthdr.csum_data = 0xFFFF;
331 
332 	mpf->m_pkthdr.flowid = sgc->rss_hash;
333 
334 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
335 
336 	(*ifp->if_input)(ifp, mpf);
337 
338 	if (sdsp->rx_free > ha->std_replenish)
339 		qla_replenish_normal_rx(ha, sdsp, r_idx);
340 
341 	return (0);
342 }
343 
344 static int
345 qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx,
346 	uint32_t dcount, uint16_t *handle, uint16_t *nhandles)
347 {
348 	uint32_t i;
349 	uint16_t num_handles;
350 	q80_stat_desc_t *sdesc;
351 	uint32_t opcode;
352 
353 	*nhandles = 0;
354 	dcount--;
355 
356 	for (i = 0; i < dcount; i++) {
357 		comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
358 		sdesc = (q80_stat_desc_t *)
359 				&ha->hw.sds[sds_idx].sds_ring_base[comp_idx];
360 
361 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
362 
363 		if (!opcode) {
364 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
365 				__func__, (void *)sdesc->data[0],
366 				(void *)sdesc->data[1]);
367 			return -1;
368 		}
369 
370 		num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1]));
371 		if (!num_handles) {
372 			device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n",
373 				__func__, (void *)sdesc->data[0],
374 				(void *)sdesc->data[1]);
375 			return -1;
376 		}
377 
378 		if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID))
379 			num_handles = -1;
380 
381 		switch (num_handles) {
382 
383 		case 1:
384 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
385 			break;
386 
387 		case 2:
388 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
389 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
390 			break;
391 
392 		case 3:
393 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
394 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
395 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
396 			break;
397 
398 		case 4:
399 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
400 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
401 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
402 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
403 			break;
404 
405 		case 5:
406 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
407 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
408 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
409 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
410 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
411 			break;
412 
413 		case 6:
414 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
415 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
416 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
417 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
418 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
419 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
420 			break;
421 
422 		case 7:
423 			*handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0]));
424 			*handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0]));
425 			*handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0]));
426 			*handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0]));
427 			*handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1]));
428 			*handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1]));
429 			*handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1]));
430 			break;
431 
432 		default:
433 			device_printf(ha->pci_dev,
434 				"%s: invalid num handles %p %p\n",
435 				__func__, (void *)sdesc->data[0],
436 				(void *)sdesc->data[1]);
437 
438 			QL_ASSERT(ha, (0),\
439 			("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n",
440 			__func__, "invalid num handles", sds_idx, num_handles,
441 			(void *)sdesc->data[0],(void *)sdesc->data[1]));
442 
443 			qla_rcv_error(ha);
444 			return 0;
445 		}
446 		*nhandles = *nhandles + num_handles;
447 	}
448 	return 0;
449 }
450 
451 /*
452  * Name: qla_rcv_isr
453  * Function: Main Interrupt Service Routine
454  */
455 static uint32_t
456 qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
457 {
458 	device_t dev;
459 	qla_hw_t *hw;
460 	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
461 	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
462 	uint32_t ret = 0;
463 	qla_sgl_comp_t sgc;
464 	uint16_t nhandles;
465 	uint32_t sds_replenish_threshold = 0;
466 	uint32_t r_idx = 0;
467 	qla_sds_t *sdsp;
468 
469 	dev = ha->pci_dev;
470 	hw = &ha->hw;
471 
472 	hw->sds[sds_idx].rcv_active = 1;
473 	if (ha->flags.stop_rcv) {
474 		hw->sds[sds_idx].rcv_active = 0;
475 		return 0;
476 	}
477 
478 	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));
479 
480 	/*
481 	 * receive interrupts
482 	 */
483 	comp_idx = hw->sds[sds_idx].sdsr_next;
484 
485 	while (count-- && !ha->flags.stop_rcv) {
486 
487 		sdesc = (q80_stat_desc_t *)
488 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
489 
490 		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
491 
492 		if (!opcode)
493 			break;
494 
495 		hw->sds[sds_idx].intr_count++;
496 		switch (opcode) {
497 
498 		case Q8_STAT_DESC_OPCODE_RCV_PKT:
499 
500 			desc_count = 1;
501 
502 			bzero(&sgc, sizeof(qla_sgl_comp_t));
503 
504 			sgc.rcv.pkt_length =
505 				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
506 			sgc.rcv.num_handles = 1;
507 			sgc.rcv.handle[0] =
508 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
509 			sgc.rcv.chksum_status =
510 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
511 
512 			sgc.rcv.rss_hash =
513 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
514 
515 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
516 				sgc.rcv.vlan_tag =
517 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
518 			}
519 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
520 			break;
521 
522 		case Q8_STAT_DESC_OPCODE_SGL_RCV:
523 
524 			desc_count =
525 				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));
526 
527 			if (desc_count > 1) {
528 				c_idx = (comp_idx + desc_count -1) &
529 						(NUM_STATUS_DESCRIPTORS-1);
530 				sdesc0 = (q80_stat_desc_t *)
531 					&hw->sds[sds_idx].sds_ring_base[c_idx];
532 
533 				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
534 						Q8_STAT_DESC_OPCODE_CONT) {
535 					desc_count = 0;
536 					break;
537 				}
538 			}
539 
540 			bzero(&sgc, sizeof(qla_sgl_comp_t));
541 
542 			sgc.rcv.pkt_length =
543 				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
544 					(sdesc->data[0]));
545 			sgc.rcv.chksum_status =
546 				Q8_STAT_DESC_STATUS((sdesc->data[1]));
547 
548 			sgc.rcv.rss_hash =
549 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
550 
551 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
552 				sgc.rcv.vlan_tag =
553 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
554 			}
555 
556 			QL_ASSERT(ha, (desc_count <= 2) ,\
557 				("%s: [sds_idx, data0, data1]="\
558 				"%d, %p, %p]\n", __func__, sds_idx,\
559 				(void *)sdesc->data[0],\
560 				(void *)sdesc->data[1]));
561 
562 			sgc.rcv.num_handles = 1;
563 			sgc.rcv.handle[0] =
564 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
565 
566 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
567 				&sgc.rcv.handle[1], &nhandles)) {
568 				device_printf(dev,
569 					"%s: [sds_idx, dcount, data0, data1]="
570 					 "[%d, %d, 0x%llx, 0x%llx]\n",
571 					__func__, sds_idx, desc_count,
572 					(long long unsigned int)sdesc->data[0],
573 					(long long unsigned int)sdesc->data[1]);
574 				desc_count = 0;
575 				break;
576 			}
577 
578 			sgc.rcv.num_handles += nhandles;
579 
580 			qla_rx_intr(ha, &sgc.rcv, sds_idx);
581 
582 			break;
583 
584 		case Q8_STAT_DESC_OPCODE_SGL_LRO:
585 
586 			desc_count =
587 				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));
588 
589 			if (desc_count > 1) {
590 				c_idx = (comp_idx + desc_count -1) &
591 						(NUM_STATUS_DESCRIPTORS-1);
592 				sdesc0 = (q80_stat_desc_t *)
593 					&hw->sds[sds_idx].sds_ring_base[c_idx];
594 
595 				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
596 						Q8_STAT_DESC_OPCODE_CONT) {
597 					desc_count = 0;
598 					break;
599 				}
600 			}
601 			bzero(&sgc, sizeof(qla_sgl_comp_t));
602 
603 			sgc.lro.payload_length =
604 			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
605 
606 			sgc.lro.rss_hash =
607 				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
608 
609 			sgc.lro.num_handles = 1;
610 			sgc.lro.handle[0] =
611 				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
612 
613 			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
614 				sgc.lro.flags |= Q8_LRO_COMP_TS;
615 
616 			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
617 				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;
618 
619 			sgc.lro.l2_offset =
620 				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
621 			sgc.lro.l4_offset =
622 				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));
623 
624 			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
625 				sgc.lro.vlan_tag =
626 					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
627 			}
628 
629 			QL_ASSERT(ha, (desc_count <= 7) ,\
630 				("%s: [sds_idx, data0, data1]="\
631 				 "[%d, 0x%llx, 0x%llx]\n",\
632 				__func__, sds_idx,\
633 				(long long unsigned int)sdesc->data[0],\
634 				(long long unsigned int)sdesc->data[1]));
635 
636 			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx,
637 				desc_count, &sgc.lro.handle[1], &nhandles)) {
638 				device_printf(dev,
639 				"%s: [sds_idx, data0, data1]="\
640 				 "[%d, 0x%llx, 0x%llx]\n",\
641 				__func__, sds_idx,\
642 				(long long unsigned int)sdesc->data[0],\
643 				(long long unsigned int)sdesc->data[1]);
644 
645 				desc_count = 0;
646 				break;
647 			}
648 
649 			sgc.lro.num_handles += nhandles;
650 
651 			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
652 				device_printf(dev,
653 				"%s: [sds_idx, data0, data1]="\
654 				 "[%d, 0x%llx, 0x%llx]\n",\
655 				__func__, sds_idx,\
656 				(long long unsigned int)sdesc->data[0],\
657 				(long long unsigned int)sdesc->data[1]);
658 				device_printf(dev,
659 				"%s: [comp_idx, c_idx, dcount, nhndls]="\
660 				 "[%d, %d, %d, %d]\n",\
661 				__func__, comp_idx, c_idx, desc_count,
662 				sgc.lro.num_handles);
663 				if (desc_count > 1) {
664 				device_printf(dev,
665 				"%s: [sds_idx, data0, data1]="\
666 				 "[%d, 0x%llx, 0x%llx]\n",\
667 				__func__, sds_idx,\
668 				(long long unsigned int)sdesc0->data[0],\
669 				(long long unsigned int)sdesc0->data[1]);
670 				}
671 			}
672 
673 			break;
674 
675 		default:
676 			device_printf(dev, "%s: default 0x%llx!\n", __func__,
677 					(long long unsigned int)sdesc->data[0]);
678 			break;
679 		}
680 
681 		if (desc_count == 0)
682 			break;
683 
684 		sds_replenish_threshold += desc_count;
685 
686 
687 		while (desc_count--) {
688 			sdesc->data[0] = 0ULL;
689 			sdesc->data[1] = 0ULL;
690 			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
691 			sdesc = (q80_stat_desc_t *)
692 				&hw->sds[sds_idx].sds_ring_base[comp_idx];
693 		}
694 
695 		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
696 			sds_replenish_threshold = 0;
697 			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
698 				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
699 					comp_idx);
700 			}
701 			hw->sds[sds_idx].sdsr_next = comp_idx;
702 		}
703 	}
704 
705 	if (ha->flags.stop_rcv)
706 		goto qla_rcv_isr_exit;
707 
708 	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
709 		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
710 		hw->sds[sds_idx].sdsr_next = comp_idx;
711 	} else {
712 		hw->sds[sds_idx].spurious_intr_count++;
713 
714 		if (ha->hw.num_rds_rings > 1)
715 			r_idx = sds_idx;
716 
717 		sdsp = &ha->hw.sds[sds_idx];
718 
719 		if (sdsp->rx_free > ha->std_replenish)
720 			qla_replenish_normal_rx(ha, sdsp, r_idx);
721 	}
722 
723 	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
724 	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));
725 
726 	if (opcode)
727 		ret = -1;
728 
729 qla_rcv_isr_exit:
730 	hw->sds[sds_idx].rcv_active = 0;
731 
732 	return (ret);
733 }
734 
735 void
736 ql_mbx_isr(void *arg)
737 {
738 	qla_host_t *ha;
739 	uint32_t data;
740 	uint32_t prev_link_state;
741 
742 	ha = arg;
743 
744 	if (ha == NULL) {
745 		device_printf(ha->pci_dev, "%s: arg == NULL\n", __func__);
746 		return;
747 	}
748 
749 	data = READ_REG32(ha, Q8_FW_MBOX_CNTRL);
750 	if ((data & 0x3) != 0x1) {
751 		WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0);
752 		return;
753 	}
754 
755 	data = READ_REG32(ha, Q8_FW_MBOX0);
756 
757 	if ((data & 0xF000) != 0x8000)
758 		return;
759 
760 	data = data & 0xFFFF;
761 
762 	switch (data) {
763 
764 	case 0x8001:  /* It's an AEN */
765 
766 		ha->hw.cable_oui = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
767 
768 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
769 		ha->hw.cable_length = data & 0xFFFF;
770 
771 		data = data >> 16;
772 		ha->hw.link_speed = data & 0xFFF;
773 
774 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
775 
776 		prev_link_state =  ha->hw.link_up;
777 		ha->hw.link_up = (((data & 0xFF) == 0) ? 0 : 1);
778 
779 		if (prev_link_state !=  ha->hw.link_up) {
780 			if (ha->hw.link_up)
781 				if_link_state_change(ha->ifp, LINK_STATE_UP);
782 			else
783 				if_link_state_change(ha->ifp, LINK_STATE_DOWN);
784 		}
785 
786 
787 		ha->hw.module_type = ((data >> 8) & 0xFF);
788 		ha->hw.flags.fduplex = (((data & 0xFF0000) == 0) ? 0 : 1);
789 		ha->hw.flags.autoneg = (((data & 0xFF000000) == 0) ? 0 : 1);
790 
791 		data = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
792 		ha->hw.flags.loopback_mode = data & 0x03;
793 
794 		ha->hw.link_faults = (data >> 3) & 0xFF;
795 
796 		break;
797 
798         case 0x8100:
799 		ha->hw.imd_compl=1;
800 		break;
801 
802         case 0x8101:
803                 ha->async_event = 1;
804                 ha->hw.aen_mb0 = 0x8101;
805                 ha->hw.aen_mb1 = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
806                 ha->hw.aen_mb2 = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
807                 ha->hw.aen_mb3 = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
808                 ha->hw.aen_mb4 = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
809                 break;
810 
811         case 0x8110:
812                 /* for now just dump the registers */
813                 {
814                         uint32_t ombx[5];
815 
816                         ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
817                         ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
818                         ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
819                         ombx[3] = READ_REG32(ha, (Q8_FW_MBOX0 + 16));
820                         ombx[4] = READ_REG32(ha, (Q8_FW_MBOX0 + 20));
821 
822                         device_printf(ha->pci_dev, "%s: "
823                                 "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
824                                 __func__, data, ombx[0], ombx[1], ombx[2],
825                                 ombx[3], ombx[4]);
826                 }
827 
828                 break;
829 
830         case 0x8130:
831                 /* sfp insertion aen */
832                 device_printf(ha->pci_dev, "%s: sfp inserted [0x%08x]\n",
833                         __func__, READ_REG32(ha, (Q8_FW_MBOX0 + 4)));
834                 break;
835 
836         case 0x8131:
837                 /* sfp removal aen */
838                 device_printf(ha->pci_dev, "%s: sfp removed]\n", __func__);
839                 break;
840 
841 	case 0x8140:
842 		{
843 			uint32_t ombx[3];
844 
845 			ombx[0] = READ_REG32(ha, (Q8_FW_MBOX0 + 4));
846 			ombx[1] = READ_REG32(ha, (Q8_FW_MBOX0 + 8));
847 			ombx[2] = READ_REG32(ha, (Q8_FW_MBOX0 + 12));
848 
849 			device_printf(ha->pci_dev, "%s: "
850 				"0x%08x 0x%08x 0x%08x 0x%08x \n",
851 				__func__, data, ombx[0], ombx[1], ombx[2]);
852 		}
853 		break;
854 
855 	default:
856 		device_printf(ha->pci_dev, "%s: AEN[0x%08x]\n", __func__, data);
857 		break;
858 	}
859 	WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0);
860 	WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0);
861 	return;
862 }
863 
864 
865 static void
866 qla_replenish_normal_rx(qla_host_t *ha, qla_sds_t *sdsp, uint32_t r_idx)
867 {
868 	qla_rx_buf_t *rxb;
869 	int count = sdsp->rx_free;
870 	uint32_t rx_next;
871 	qla_rdesc_t *rdesc;
872 
873 	/* we can play with this value via a sysctl */
874 	uint32_t replenish_thresh = ha->hw.rds_pidx_thres;
875 
876 	rdesc = &ha->hw.rds[r_idx];
877 
878 	rx_next = rdesc->rx_next;
879 
880 	while (count--) {
881 		rxb = sdsp->rxb_free;
882 
883 		if (rxb == NULL)
884 			break;
885 
886 		sdsp->rxb_free = rxb->next;
887 		sdsp->rx_free--;
888 
889 		if (ql_get_mbuf(ha, rxb, NULL) == 0) {
890 			qla_set_hw_rcv_desc(ha, r_idx, rdesc->rx_in,
891 				rxb->handle,
892 				rxb->paddr, (rxb->m_head)->m_pkthdr.len);
893 			rdesc->rx_in++;
894 			if (rdesc->rx_in == NUM_RX_DESCRIPTORS)
895 				rdesc->rx_in = 0;
896 			rdesc->rx_next++;
897 			if (rdesc->rx_next == NUM_RX_DESCRIPTORS)
898 				rdesc->rx_next = 0;
899 		} else {
900 			device_printf(ha->pci_dev,
901 				"%s: qla_get_mbuf [(%d),(%d),(%d)] failed\n",
902 				__func__, r_idx, rdesc->rx_in, rxb->handle);
903 
904 			rxb->m_head = NULL;
905 			rxb->next = sdsp->rxb_free;
906 			sdsp->rxb_free = rxb;
907 			sdsp->rx_free++;
908 
909 			break;
910 		}
911 		if (replenish_thresh-- == 0) {
912 			QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
913 				rdesc->rx_next);
914 			rx_next = rdesc->rx_next;
915 			replenish_thresh = ha->hw.rds_pidx_thres;
916 		}
917 	}
918 
919 	if (rx_next != rdesc->rx_next) {
920 		QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,
921 			rdesc->rx_next);
922 	}
923 }
924 
925 void
926 ql_isr(void *arg)
927 {
928 	qla_ivec_t *ivec = arg;
929 	qla_host_t *ha ;
930 	int idx;
931 	qla_hw_t *hw;
932 	struct ifnet *ifp;
933 	uint32_t ret = 0;
934 
935 	ha = ivec->ha;
936 	hw = &ha->hw;
937 	ifp = ha->ifp;
938 
939 	if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings)
940 		return;
941 
942 	if (idx == 0)
943 		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
944 
945 	ret = qla_rcv_isr(ha, idx, -1);
946 
947 	if (idx == 0)
948 		taskqueue_enqueue(ha->tx_tq, &ha->tx_task);
949 
950 	if (!ha->flags.stop_rcv) {
951 		QL_ENABLE_INTERRUPTS(ha, idx);
952 	}
953 	return;
954 }
955 
956