xref: /freebsd/sys/dev/sfxge/common/ef10_ev.c (revision 3fc36ee018bb836bd1796067cf4ef8683f166ebc)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 #if EFSYS_OPT_MON_STATS
37 #include "mcdi_mon.h"
38 #endif
39 
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
41 
42 #if EFSYS_OPT_QSTATS
43 #define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
44 	do {								\
45 		(_eep)->ee_stat[_stat]++;				\
46 	_NOTE(CONSTANTCONDITION)					\
47 	} while (B_FALSE)
48 #else
49 #define	EFX_EV_QSTAT_INCR(_eep, _stat)
50 #endif
51 
52 
53 static	__checkReturn	boolean_t
54 ef10_ev_rx(
55 	__in		efx_evq_t *eep,
56 	__in		efx_qword_t *eqp,
57 	__in		const efx_ev_callbacks_t *eecp,
58 	__in_opt	void *arg);
59 
60 static	__checkReturn	boolean_t
61 ef10_ev_tx(
62 	__in		efx_evq_t *eep,
63 	__in		efx_qword_t *eqp,
64 	__in		const efx_ev_callbacks_t *eecp,
65 	__in_opt	void *arg);
66 
67 static	__checkReturn	boolean_t
68 ef10_ev_driver(
69 	__in		efx_evq_t *eep,
70 	__in		efx_qword_t *eqp,
71 	__in		const efx_ev_callbacks_t *eecp,
72 	__in_opt	void *arg);
73 
74 static	__checkReturn	boolean_t
75 ef10_ev_drv_gen(
76 	__in		efx_evq_t *eep,
77 	__in		efx_qword_t *eqp,
78 	__in		const efx_ev_callbacks_t *eecp,
79 	__in_opt	void *arg);
80 
81 static	__checkReturn	boolean_t
82 ef10_ev_mcdi(
83 	__in		efx_evq_t *eep,
84 	__in		efx_qword_t *eqp,
85 	__in		const efx_ev_callbacks_t *eecp,
86 	__in_opt	void *arg);
87 
88 
89 static	__checkReturn	efx_rc_t
90 efx_mcdi_set_evq_tmr(
91 	__in		efx_nic_t *enp,
92 	__in		uint32_t instance,
93 	__in		uint32_t mode,
94 	__in		uint32_t timer_ns)
95 {
96 	efx_mcdi_req_t req;
97 	uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
98 			    MC_CMD_SET_EVQ_TMR_OUT_LEN)];
99 	efx_rc_t rc;
100 
101 	(void) memset(payload, 0, sizeof (payload));
102 	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
103 	req.emr_in_buf = payload;
104 	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
105 	req.emr_out_buf = payload;
106 	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
107 
108 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
109 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
110 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
111 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
112 
113 	efx_mcdi_execute(enp, &req);
114 
115 	if (req.emr_rc != 0) {
116 		rc = req.emr_rc;
117 		goto fail1;
118 	}
119 
120 	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
121 		rc = EMSGSIZE;
122 		goto fail2;
123 	}
124 
125 	return (0);
126 
127 fail2:
128 	EFSYS_PROBE(fail2);
129 fail1:
130 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
131 
132 	return (rc);
133 }
134 
135 static	__checkReturn	efx_rc_t
136 efx_mcdi_init_evq(
137 	__in		efx_nic_t *enp,
138 	__in		unsigned int instance,
139 	__in		efsys_mem_t *esmp,
140 	__in		size_t nevs,
141 	__in		uint32_t irq,
142 	__in		uint32_t us,
143 	__in		boolean_t low_latency)
144 {
145 	efx_mcdi_req_t req;
146 	uint8_t payload[
147 	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
148 		MC_CMD_INIT_EVQ_OUT_LEN)];
149 	efx_qword_t *dma_addr;
150 	uint64_t addr;
151 	int npages;
152 	int i;
153 	int ev_cut_through;
154 	efx_rc_t rc;
155 
156 	npages = EFX_EVQ_NBUFS(nevs);
157 	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
158 		rc = EINVAL;
159 		goto fail1;
160 	}
161 
162 	(void) memset(payload, 0, sizeof (payload));
163 	req.emr_cmd = MC_CMD_INIT_EVQ;
164 	req.emr_in_buf = payload;
165 	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
166 	req.emr_out_buf = payload;
167 	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
168 
169 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
170 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
171 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
172 
173 	/*
174 	 * On Huntington RX and TX event batching can only be requested together
175 	 * (even if the datapath firmware doesn't actually support RX
176 	 * batching). If event cut through is enabled no RX batching will occur.
177 	 *
178 	 * So always enable RX and TX event batching, and enable event cut
179 	 * through if we want low latency operation.
180 	 */
181 	ev_cut_through = low_latency ? 1 : 0;
182 	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
183 	    INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
184 	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
185 	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
186 	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
187 	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
188 	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
189 
190 	/* If the value is zero then disable the timer */
191 	if (us == 0) {
192 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
193 		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
194 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
195 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
196 	} else {
197 		unsigned int ticks;
198 
199 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
200 			goto fail2;
201 
202 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
203 		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
204 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
205 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
206 	}
207 
208 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
209 	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
210 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
211 
212 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
213 	addr = EFSYS_MEM_ADDR(esmp);
214 
215 	for (i = 0; i < npages; i++) {
216 		EFX_POPULATE_QWORD_2(*dma_addr,
217 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
218 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
219 
220 		dma_addr++;
221 		addr += EFX_BUF_SIZE;
222 	}
223 
224 	efx_mcdi_execute(enp, &req);
225 
226 	if (req.emr_rc != 0) {
227 		rc = req.emr_rc;
228 		goto fail3;
229 	}
230 
231 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
232 		rc = EMSGSIZE;
233 		goto fail4;
234 	}
235 
236 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
237 
238 	return (0);
239 
240 fail4:
241 	EFSYS_PROBE(fail4);
242 fail3:
243 	EFSYS_PROBE(fail3);
244 fail2:
245 	EFSYS_PROBE(fail2);
246 fail1:
247 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
248 
249 	return (rc);
250 }
251 
252 
253 static	__checkReturn	efx_rc_t
254 efx_mcdi_init_evq_v2(
255 	__in		efx_nic_t *enp,
256 	__in		unsigned int instance,
257 	__in		efsys_mem_t *esmp,
258 	__in		size_t nevs,
259 	__in		uint32_t irq,
260 	__in		uint32_t us)
261 {
262 	efx_mcdi_req_t req;
263 	uint8_t payload[
264 		MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
265 		    MC_CMD_INIT_EVQ_V2_OUT_LEN)];
266 	efx_qword_t *dma_addr;
267 	uint64_t addr;
268 	int npages;
269 	int i;
270 	efx_rc_t rc;
271 
272 	npages = EFX_EVQ_NBUFS(nevs);
273 	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
274 		rc = EINVAL;
275 		goto fail1;
276 	}
277 
278 	(void) memset(payload, 0, sizeof (payload));
279 	req.emr_cmd = MC_CMD_INIT_EVQ;
280 	req.emr_in_buf = payload;
281 	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
282 	req.emr_out_buf = payload;
283 	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
284 
285 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
286 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
287 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
288 
289 	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
290 	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
291 	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
292 	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
293 	    INIT_EVQ_V2_IN_FLAG_TYPE, MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
294 
295 	/* If the value is zero then disable the timer */
296 	if (us == 0) {
297 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
298 		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
299 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
300 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
301 	} else {
302 		unsigned int ticks;
303 
304 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
305 			goto fail2;
306 
307 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
308 		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
309 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
310 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
311 	}
312 
313 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
314 	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
315 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
316 
317 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
318 	addr = EFSYS_MEM_ADDR(esmp);
319 
320 	for (i = 0; i < npages; i++) {
321 		EFX_POPULATE_QWORD_2(*dma_addr,
322 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
323 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
324 
325 		dma_addr++;
326 		addr += EFX_BUF_SIZE;
327 	}
328 
329 	efx_mcdi_execute(enp, &req);
330 
331 	if (req.emr_rc != 0) {
332 		rc = req.emr_rc;
333 		goto fail3;
334 	}
335 
336 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
337 		rc = EMSGSIZE;
338 		goto fail4;
339 	}
340 
341 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
342 
343 	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
344 		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
345 
346 	return (0);
347 
348 fail4:
349 	EFSYS_PROBE(fail4);
350 fail3:
351 	EFSYS_PROBE(fail3);
352 fail2:
353 	EFSYS_PROBE(fail2);
354 fail1:
355 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
356 
357 	return (rc);
358 }
359 
360 static	__checkReturn	efx_rc_t
361 efx_mcdi_fini_evq(
362 	__in		efx_nic_t *enp,
363 	__in		uint32_t instance)
364 {
365 	efx_mcdi_req_t req;
366 	uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
367 			    MC_CMD_FINI_EVQ_OUT_LEN)];
368 	efx_rc_t rc;
369 
370 	(void) memset(payload, 0, sizeof (payload));
371 	req.emr_cmd = MC_CMD_FINI_EVQ;
372 	req.emr_in_buf = payload;
373 	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
374 	req.emr_out_buf = payload;
375 	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
376 
377 	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
378 
379 	efx_mcdi_execute_quiet(enp, &req);
380 
381 	if (req.emr_rc != 0) {
382 		rc = req.emr_rc;
383 		goto fail1;
384 	}
385 
386 	return (0);
387 
388 fail1:
389 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
390 
391 	return (rc);
392 }
393 
394 
395 
396 	__checkReturn	efx_rc_t
397 ef10_ev_init(
398 	__in		efx_nic_t *enp)
399 {
400 	_NOTE(ARGUNUSED(enp))
401 	return (0);
402 }
403 
404 			void
405 ef10_ev_fini(
406 	__in		efx_nic_t *enp)
407 {
408 	_NOTE(ARGUNUSED(enp))
409 }
410 
411 	__checkReturn	efx_rc_t
412 ef10_ev_qcreate(
413 	__in		efx_nic_t *enp,
414 	__in		unsigned int index,
415 	__in		efsys_mem_t *esmp,
416 	__in		size_t n,
417 	__in		uint32_t id,
418 	__in		uint32_t us,
419 	__in		efx_evq_t *eep)
420 {
421 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
422 	uint32_t irq;
423 	efx_rc_t rc;
424 
425 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
426 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
427 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
428 
429 	if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
430 		rc = EINVAL;
431 		goto fail1;
432 	}
433 
434 	if (index >= encp->enc_evq_limit) {
435 		rc = EINVAL;
436 		goto fail2;
437 	}
438 
439 	if (us > encp->enc_evq_timer_max_us) {
440 		rc = EINVAL;
441 		goto fail3;
442 	}
443 
444 	/* Set up the handler table */
445 	eep->ee_rx	= ef10_ev_rx;
446 	eep->ee_tx	= ef10_ev_tx;
447 	eep->ee_driver	= ef10_ev_driver;
448 	eep->ee_drv_gen	= ef10_ev_drv_gen;
449 	eep->ee_mcdi	= ef10_ev_mcdi;
450 
451 	/* Set up the event queue */
452 	irq = index;	/* INIT_EVQ expects function-relative vector number */
453 
454 	/*
455 	 * Interrupts may be raised for events immediately after the queue is
456 	 * created. See bug58606.
457 	 */
458 
459 	if (encp->enc_init_evq_v2_supported) {
460 		/*
461 		 * On Medford the low latency license is required to enable RX
462 		 * and event cut through and to disable RX batching.  We let the
463 		 * firmware decide the settings to use. If the adapter has a low
464 		 * latency license, it will choose the best settings for low
465 		 * latency, otherwise it choose the best settings for
466 		 * throughput.
467 		 */
468 		rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us);
469 		if (rc != 0)
470 			goto fail4;
471 	} else {
472 		/*
473 		 * On Huntington we need to specify the settings to use. We
474 		 * favour latency if the adapter is running low-latency firmware
475 		 * and throughput otherwise, and assume not support RX batching
476 		 * implies the adapter is running low-latency firmware.  (This
477 		 * is how it's been done since Huntington GA. It doesn't make
478 		 * much sense with hindsight as the 'low-latency' firmware
479 		 * variant is also best for throughput, and does now support RX
480 		 * batching).
481 		 */
482 		boolean_t low_latency = encp->enc_rx_batching_enabled ? 0 : 1;
483 		rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us,
484 				    low_latency);
485 		if (rc != 0)
486 			goto fail5;
487 	}
488 
489 	return (0);
490 
491 fail5:
492 	EFSYS_PROBE(fail5);
493 fail4:
494 	EFSYS_PROBE(fail4);
495 fail3:
496 	EFSYS_PROBE(fail3);
497 fail2:
498 	EFSYS_PROBE(fail2);
499 fail1:
500 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
501 
502 	return (rc);
503 }
504 
505 			void
506 ef10_ev_qdestroy(
507 	__in		efx_evq_t *eep)
508 {
509 	efx_nic_t *enp = eep->ee_enp;
510 
511 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
512 	    enp->en_family == EFX_FAMILY_MEDFORD);
513 
514 	(void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
515 }
516 
517 	__checkReturn	efx_rc_t
518 ef10_ev_qprime(
519 	__in		efx_evq_t *eep,
520 	__in		unsigned int count)
521 {
522 	efx_nic_t *enp = eep->ee_enp;
523 	uint32_t rptr;
524 	efx_dword_t dword;
525 
526 	rptr = count & eep->ee_mask;
527 
528 	if (enp->en_nic_cfg.enc_bug35388_workaround) {
529 		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
530 		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
531 		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
532 		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
533 
534 		EFX_POPULATE_DWORD_2(dword,
535 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
536 		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
537 		    ERF_DD_EVQ_IND_RPTR,
538 		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
539 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
540 		    &dword, B_FALSE);
541 
542 		EFX_POPULATE_DWORD_2(dword,
543 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
544 		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
545 		    ERF_DD_EVQ_IND_RPTR,
546 		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
547 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
548 		    &dword, B_FALSE);
549 	} else {
550 		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
551 		EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
552 		    &dword, B_FALSE);
553 	}
554 
555 	return (0);
556 }
557 
558 static	__checkReturn	efx_rc_t
559 efx_mcdi_driver_event(
560 	__in		efx_nic_t *enp,
561 	__in		uint32_t evq,
562 	__in		efx_qword_t data)
563 {
564 	efx_mcdi_req_t req;
565 	uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
566 			    MC_CMD_DRIVER_EVENT_OUT_LEN)];
567 	efx_rc_t rc;
568 
569 	req.emr_cmd = MC_CMD_DRIVER_EVENT;
570 	req.emr_in_buf = payload;
571 	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
572 	req.emr_out_buf = payload;
573 	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
574 
575 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
576 
577 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
578 	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
579 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
580 	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
581 
582 	efx_mcdi_execute(enp, &req);
583 
584 	if (req.emr_rc != 0) {
585 		rc = req.emr_rc;
586 		goto fail1;
587 	}
588 
589 	return (0);
590 
591 fail1:
592 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
593 
594 	return (rc);
595 }
596 
597 			void
598 ef10_ev_qpost(
599 	__in	efx_evq_t *eep,
600 	__in	uint16_t data)
601 {
602 	efx_nic_t *enp = eep->ee_enp;
603 	efx_qword_t event;
604 
605 	EFX_POPULATE_QWORD_3(event,
606 	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
607 	    ESF_DZ_DRV_SUB_CODE, 0,
608 	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
609 
610 	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
611 }
612 
613 	__checkReturn	efx_rc_t
614 ef10_ev_qmoderate(
615 	__in		efx_evq_t *eep,
616 	__in		unsigned int us)
617 {
618 	efx_nic_t *enp = eep->ee_enp;
619 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
620 	efx_dword_t dword;
621 	uint32_t mode;
622 	efx_rc_t rc;
623 
624 	/* Check that hardware and MCDI use the same timer MODE values */
625 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
626 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
627 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
628 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
629 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
630 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
631 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
632 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
633 
634 	if (us > encp->enc_evq_timer_max_us) {
635 		rc = EINVAL;
636 		goto fail1;
637 	}
638 
639 	/* If the value is zero then disable the timer */
640 	if (us == 0) {
641 		mode = FFE_CZ_TIMER_MODE_DIS;
642 	} else {
643 		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
644 	}
645 
646 	if (encp->enc_bug61265_workaround) {
647 		uint32_t ns = us * 1000;
648 
649 		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
650 		if (rc != 0)
651 			goto fail2;
652 	} else {
653 		unsigned int ticks;
654 
655 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
656 			goto fail3;
657 
658 		if (encp->enc_bug35388_workaround) {
659 			EFX_POPULATE_DWORD_3(dword,
660 			    ERF_DD_EVQ_IND_TIMER_FLAGS,
661 			    EFE_DD_EVQ_IND_TIMER_FLAGS,
662 			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
663 			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
664 			EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
665 			    eep->ee_index, &dword, 0);
666 		} else {
667 			EFX_POPULATE_DWORD_2(dword,
668 			    ERF_DZ_TC_TIMER_MODE, mode,
669 			    ERF_DZ_TC_TIMER_VAL, ticks);
670 			EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
671 			    eep->ee_index, &dword, 0);
672 		}
673 	}
674 
675 	return (0);
676 
677 fail3:
678 	EFSYS_PROBE(fail3);
679 fail2:
680 	EFSYS_PROBE(fail2);
681 fail1:
682 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
683 
684 	return (rc);
685 }
686 
687 
688 #if EFSYS_OPT_QSTATS
689 			void
690 ef10_ev_qstats_update(
691 	__in				efx_evq_t *eep,
692 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
693 {
694 	unsigned int id;
695 
696 	for (id = 0; id < EV_NQSTATS; id++) {
697 		efsys_stat_t *essp = &stat[id];
698 
699 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
700 		eep->ee_stat[id] = 0;
701 	}
702 }
703 #endif /* EFSYS_OPT_QSTATS */
704 
705 
706 static	__checkReturn	boolean_t
707 ef10_ev_rx(
708 	__in		efx_evq_t *eep,
709 	__in		efx_qword_t *eqp,
710 	__in		const efx_ev_callbacks_t *eecp,
711 	__in_opt	void *arg)
712 {
713 	efx_nic_t *enp = eep->ee_enp;
714 	uint32_t size;
715 	uint32_t label;
716 	uint32_t mac_class;
717 	uint32_t eth_tag_class;
718 	uint32_t l3_class;
719 	uint32_t l4_class;
720 	uint32_t next_read_lbits;
721 	uint16_t flags;
722 	boolean_t cont;
723 	boolean_t should_abort;
724 	efx_evq_rxq_state_t *eersp;
725 	unsigned int desc_count;
726 	unsigned int last_used_id;
727 
728 	EFX_EV_QSTAT_INCR(eep, EV_RX);
729 
730 	/* Discard events after RXQ/TXQ errors */
731 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
732 		return (B_FALSE);
733 
734 	/* Basic packet information */
735 	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
736 	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
737 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
738 	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
739 	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
740 	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
741 	l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
742 	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
743 
744 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
745 		/* Drop this event */
746 		return (B_FALSE);
747 	}
748 	flags = 0;
749 
750 	if (cont != 0) {
751 		/*
752 		 * This may be part of a scattered frame, or it may be a
753 		 * truncated frame if scatter is disabled on this RXQ.
754 		 * Overlength frames can be received if e.g. a VF is configured
755 		 * for 1500 MTU but connected to a port set to 9000 MTU
756 		 * (see bug56567).
757 		 * FIXME: There is not yet any driver that supports scatter on
758 		 * Huntington.  Scatter support is required for OSX.
759 		 */
760 		flags |= EFX_PKT_CONT;
761 	}
762 
763 	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
764 		flags |= EFX_PKT_UNICAST;
765 
766 	/* Increment the count of descriptors read */
767 	eersp = &eep->ee_rxq_state[label];
768 	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
769 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
770 	eersp->eers_rx_read_ptr += desc_count;
771 
772 	/*
773 	 * FIXME: add error checking to make sure this a batched event.
774 	 * This could also be an aborted scatter, see Bug36629.
775 	 */
776 	if (desc_count > 1) {
777 		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
778 		flags |= EFX_PKT_PREFIX_LEN;
779 	}
780 
781 	/* Calculate the index of the last descriptor consumed */
782 	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
783 
784 	/* Check for errors that invalidate checksum and L3/L4 fields */
785 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
786 		/* RX frame truncated (error flag is misnamed) */
787 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
788 		flags |= EFX_DISCARD;
789 		goto deliver;
790 	}
791 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
792 		/* Bad Ethernet frame CRC */
793 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
794 		flags |= EFX_DISCARD;
795 		goto deliver;
796 	}
797 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
798 		/*
799 		 * Hardware parse failed, due to malformed headers
800 		 * or headers that are too long for the parser.
801 		 * Headers and checksums must be validated by the host.
802 		 */
803 		// TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
804 		goto deliver;
805 	}
806 
807 	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
808 	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
809 		flags |= EFX_PKT_VLAN_TAGGED;
810 	}
811 
812 	switch (l3_class) {
813 	case ESE_DZ_L3_CLASS_IP4:
814 	case ESE_DZ_L3_CLASS_IP4_FRAG:
815 		flags |= EFX_PKT_IPV4;
816 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
817 			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
818 		} else {
819 			flags |= EFX_CKSUM_IPV4;
820 		}
821 
822 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
823 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
824 			flags |= EFX_PKT_TCP;
825 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
826 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
827 			flags |= EFX_PKT_UDP;
828 		} else {
829 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
830 		}
831 		break;
832 
833 	case ESE_DZ_L3_CLASS_IP6:
834 	case ESE_DZ_L3_CLASS_IP6_FRAG:
835 		flags |= EFX_PKT_IPV6;
836 
837 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
838 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
839 			flags |= EFX_PKT_TCP;
840 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
841 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
842 			flags |= EFX_PKT_UDP;
843 		} else {
844 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
845 		}
846 		break;
847 
848 	default:
849 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
850 		break;
851 	}
852 
853 	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
854 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
855 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
856 		} else {
857 			flags |= EFX_CKSUM_TCPUDP;
858 		}
859 	}
860 
861 deliver:
862 	/* If we're not discarding the packet then it is ok */
863 	if (~flags & EFX_DISCARD)
864 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
865 
866 	EFSYS_ASSERT(eecp->eec_rx != NULL);
867 	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
868 
869 	return (should_abort);
870 }
871 
872 static	__checkReturn	boolean_t
873 ef10_ev_tx(
874 	__in		efx_evq_t *eep,
875 	__in		efx_qword_t *eqp,
876 	__in		const efx_ev_callbacks_t *eecp,
877 	__in_opt	void *arg)
878 {
879 	efx_nic_t *enp = eep->ee_enp;
880 	uint32_t id;
881 	uint32_t label;
882 	boolean_t should_abort;
883 
884 	EFX_EV_QSTAT_INCR(eep, EV_TX);
885 
886 	/* Discard events after RXQ/TXQ errors */
887 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
888 		return (B_FALSE);
889 
890 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
891 		/* Drop this event */
892 		return (B_FALSE);
893 	}
894 
895 	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
896 	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
897 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
898 
899 	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
900 
901 	EFSYS_ASSERT(eecp->eec_tx != NULL);
902 	should_abort = eecp->eec_tx(arg, label, id);
903 
904 	return (should_abort);
905 }
906 
907 static	__checkReturn	boolean_t
908 ef10_ev_driver(
909 	__in		efx_evq_t *eep,
910 	__in		efx_qword_t *eqp,
911 	__in		const efx_ev_callbacks_t *eecp,
912 	__in_opt	void *arg)
913 {
914 	unsigned int code;
915 	boolean_t should_abort;
916 
917 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
918 	should_abort = B_FALSE;
919 
920 	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
921 	switch (code) {
922 	case ESE_DZ_DRV_TIMER_EV: {
923 		uint32_t id;
924 
925 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
926 
927 		EFSYS_ASSERT(eecp->eec_timer != NULL);
928 		should_abort = eecp->eec_timer(arg, id);
929 		break;
930 	}
931 
932 	case ESE_DZ_DRV_WAKE_UP_EV: {
933 		uint32_t id;
934 
935 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
936 
937 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
938 		should_abort = eecp->eec_wake_up(arg, id);
939 		break;
940 	}
941 
942 	case ESE_DZ_DRV_START_UP_EV:
943 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
944 		should_abort = eecp->eec_initialized(arg);
945 		break;
946 
947 	default:
948 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
949 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
950 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
951 		break;
952 	}
953 
954 	return (should_abort);
955 }
956 
957 static	__checkReturn	boolean_t
958 ef10_ev_drv_gen(
959 	__in		efx_evq_t *eep,
960 	__in		efx_qword_t *eqp,
961 	__in		const efx_ev_callbacks_t *eecp,
962 	__in_opt	void *arg)
963 {
964 	uint32_t data;
965 	boolean_t should_abort;
966 
967 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
968 	should_abort = B_FALSE;
969 
970 	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
971 	if (data >= ((uint32_t)1 << 16)) {
972 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
973 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
974 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
975 
976 		return (B_TRUE);
977 	}
978 
979 	EFSYS_ASSERT(eecp->eec_software != NULL);
980 	should_abort = eecp->eec_software(arg, (uint16_t)data);
981 
982 	return (should_abort);
983 }
984 
985 static	__checkReturn	boolean_t
986 ef10_ev_mcdi(
987 	__in		efx_evq_t *eep,
988 	__in		efx_qword_t *eqp,
989 	__in		const efx_ev_callbacks_t *eecp,
990 	__in_opt	void *arg)
991 {
992 	efx_nic_t *enp = eep->ee_enp;
993 	unsigned code;
994 	boolean_t should_abort = B_FALSE;
995 
996 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
997 
998 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
999 	switch (code) {
1000 	case MCDI_EVENT_CODE_BADSSERT:
1001 		efx_mcdi_ev_death(enp, EINTR);
1002 		break;
1003 
1004 	case MCDI_EVENT_CODE_CMDDONE:
1005 		efx_mcdi_ev_cpl(enp,
1006 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1007 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1008 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1009 		break;
1010 
1011 #if EFSYS_OPT_MCDI_PROXY_AUTH
1012 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1013 		/*
1014 		 * This event notifies a function that an authorization request
1015 		 * has been processed. If the request was authorized then the
1016 		 * function can now re-send the original MCDI request.
1017 		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1018 		 */
1019 		efx_mcdi_ev_proxy_response(enp,
1020 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1021 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1022 		break;
1023 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1024 
1025 	case MCDI_EVENT_CODE_LINKCHANGE: {
1026 		efx_link_mode_t link_mode;
1027 
1028 		ef10_phy_link_ev(enp, eqp, &link_mode);
1029 		should_abort = eecp->eec_link_change(arg, link_mode);
1030 		break;
1031 	}
1032 
1033 	case MCDI_EVENT_CODE_SENSOREVT: {
1034 #if EFSYS_OPT_MON_STATS
1035 		efx_mon_stat_t id;
1036 		efx_mon_stat_value_t value;
1037 		efx_rc_t rc;
1038 
1039 		/* Decode monitor stat for MCDI sensor (if supported) */
1040 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1041 			/* Report monitor stat change */
1042 			should_abort = eecp->eec_monitor(arg, id, value);
1043 		} else if (rc == ENOTSUP) {
1044 			should_abort = eecp->eec_exception(arg,
1045 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1046 				MCDI_EV_FIELD(eqp, DATA));
1047 		} else {
1048 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
1049 		}
1050 #endif
1051 		break;
1052 	}
1053 
1054 	case MCDI_EVENT_CODE_SCHEDERR:
1055 		/* Informational only */
1056 		break;
1057 
1058 	case MCDI_EVENT_CODE_REBOOT:
1059 		/* Falcon/Siena only (should not been seen with Huntington). */
1060 		efx_mcdi_ev_death(enp, EIO);
1061 		break;
1062 
1063 	case MCDI_EVENT_CODE_MC_REBOOT:
1064 		/* MC_REBOOT event is used for Huntington (EF10) and later. */
1065 		efx_mcdi_ev_death(enp, EIO);
1066 		break;
1067 
1068 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1069 #if EFSYS_OPT_MAC_STATS
1070 		if (eecp->eec_mac_stats != NULL) {
1071 			eecp->eec_mac_stats(arg,
1072 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1073 		}
1074 #endif
1075 		break;
1076 
1077 	case MCDI_EVENT_CODE_FWALERT: {
1078 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1079 
1080 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1081 			should_abort = eecp->eec_exception(arg,
1082 				EFX_EXCEPTION_FWALERT_SRAM,
1083 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
1084 		else
1085 			should_abort = eecp->eec_exception(arg,
1086 				EFX_EXCEPTION_UNKNOWN_FWALERT,
1087 				MCDI_EV_FIELD(eqp, DATA));
1088 		break;
1089 	}
1090 
1091 	case MCDI_EVENT_CODE_TX_ERR: {
1092 		/*
1093 		 * After a TXQ error is detected, firmware sends a TX_ERR event.
1094 		 * This may be followed by TX completions (which we discard),
1095 		 * and then finally by a TX_FLUSH event. Firmware destroys the
1096 		 * TXQ automatically after sending the TX_FLUSH event.
1097 		 */
1098 		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1099 
1100 		EFSYS_PROBE2(tx_descq_err,
1101 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1102 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1103 
1104 		/* Inform the driver that a reset is required. */
1105 		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1106 		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1107 		break;
1108 	}
1109 
1110 	case MCDI_EVENT_CODE_TX_FLUSH: {
1111 		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1112 
1113 		/*
1114 		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1115 		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1116 		 * We want to wait for all completions, so ignore the events
1117 		 * with TX_FLUSH_TO_DRIVER.
1118 		 */
1119 		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1120 			should_abort = B_FALSE;
1121 			break;
1122 		}
1123 
1124 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1125 
1126 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1127 
1128 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1129 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1130 		break;
1131 	}
1132 
1133 	case MCDI_EVENT_CODE_RX_ERR: {
1134 		/*
1135 		 * After an RXQ error is detected, firmware sends an RX_ERR
1136 		 * event. This may be followed by RX events (which we discard),
1137 		 * and then finally by an RX_FLUSH event. Firmware destroys the
1138 		 * RXQ automatically after sending the RX_FLUSH event.
1139 		 */
1140 		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1141 
1142 		EFSYS_PROBE2(rx_descq_err,
1143 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1144 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1145 
1146 		/* Inform the driver that a reset is required. */
1147 		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1148 		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1149 		break;
1150 	}
1151 
1152 	case MCDI_EVENT_CODE_RX_FLUSH: {
1153 		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1154 
1155 		/*
1156 		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1157 		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1158 		 * We want to wait for all completions, so ignore the events
1159 		 * with RX_FLUSH_TO_DRIVER.
1160 		 */
1161 		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1162 			should_abort = B_FALSE;
1163 			break;
1164 		}
1165 
1166 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1167 
1168 		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1169 
1170 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1171 		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1172 		break;
1173 	}
1174 
1175 	default:
1176 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1177 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1178 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1179 		break;
1180 	}
1181 
1182 	return (should_abort);
1183 }
1184 
1185 		void
1186 ef10_ev_rxlabel_init(
1187 	__in		efx_evq_t *eep,
1188 	__in		efx_rxq_t *erp,
1189 	__in		unsigned int label)
1190 {
1191 	efx_evq_rxq_state_t *eersp;
1192 
1193 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1194 	eersp = &eep->ee_rxq_state[label];
1195 
1196 	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1197 
1198 	eersp->eers_rx_read_ptr = 0;
1199 	eersp->eers_rx_mask = erp->er_mask;
1200 }
1201 
1202 		void
1203 ef10_ev_rxlabel_fini(
1204 	__in		efx_evq_t *eep,
1205 	__in		unsigned int label)
1206 {
1207 	efx_evq_rxq_state_t *eersp;
1208 
1209 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1210 	eersp = &eep->ee_rxq_state[label];
1211 
1212 	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1213 
1214 	eersp->eers_rx_read_ptr = 0;
1215 	eersp->eers_rx_mask = 0;
1216 }
1217 
1218 #endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
1219