xref: /freebsd/sys/dev/sfxge/common/ef10_ev.c (revision 266900be140bd4eeb782cdb101e081eab973dda3)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 #if EFSYS_OPT_MON_STATS
37 #include "mcdi_mon.h"
38 #endif
39 
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
41 
42 #if EFSYS_OPT_QSTATS
43 #define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
44 	do {								\
45 		(_eep)->ee_stat[_stat]++;				\
46 	_NOTE(CONSTANTCONDITION)					\
47 	} while (B_FALSE)
48 #else
49 #define	EFX_EV_QSTAT_INCR(_eep, _stat)
50 #endif
51 
52 /*
53  * Non-interrupting event queue requires interrrupting event queue to
54  * refer to for wake-up events even if wake ups are never used.
55  * It could be even non-allocated event queue.
56  */
57 #define	EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
58 
59 static	__checkReturn	boolean_t
60 ef10_ev_rx(
61 	__in		efx_evq_t *eep,
62 	__in		efx_qword_t *eqp,
63 	__in		const efx_ev_callbacks_t *eecp,
64 	__in_opt	void *arg);
65 
66 static	__checkReturn	boolean_t
67 ef10_ev_tx(
68 	__in		efx_evq_t *eep,
69 	__in		efx_qword_t *eqp,
70 	__in		const efx_ev_callbacks_t *eecp,
71 	__in_opt	void *arg);
72 
73 static	__checkReturn	boolean_t
74 ef10_ev_driver(
75 	__in		efx_evq_t *eep,
76 	__in		efx_qword_t *eqp,
77 	__in		const efx_ev_callbacks_t *eecp,
78 	__in_opt	void *arg);
79 
80 static	__checkReturn	boolean_t
81 ef10_ev_drv_gen(
82 	__in		efx_evq_t *eep,
83 	__in		efx_qword_t *eqp,
84 	__in		const efx_ev_callbacks_t *eecp,
85 	__in_opt	void *arg);
86 
87 static	__checkReturn	boolean_t
88 ef10_ev_mcdi(
89 	__in		efx_evq_t *eep,
90 	__in		efx_qword_t *eqp,
91 	__in		const efx_ev_callbacks_t *eecp,
92 	__in_opt	void *arg);
93 
94 
95 static	__checkReturn	efx_rc_t
96 efx_mcdi_set_evq_tmr(
97 	__in		efx_nic_t *enp,
98 	__in		uint32_t instance,
99 	__in		uint32_t mode,
100 	__in		uint32_t timer_ns)
101 {
102 	efx_mcdi_req_t req;
103 	uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
104 			    MC_CMD_SET_EVQ_TMR_OUT_LEN)];
105 	efx_rc_t rc;
106 
107 	(void) memset(payload, 0, sizeof (payload));
108 	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
109 	req.emr_in_buf = payload;
110 	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
111 	req.emr_out_buf = payload;
112 	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
113 
114 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
115 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
116 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
117 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
118 
119 	efx_mcdi_execute(enp, &req);
120 
121 	if (req.emr_rc != 0) {
122 		rc = req.emr_rc;
123 		goto fail1;
124 	}
125 
126 	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
127 		rc = EMSGSIZE;
128 		goto fail2;
129 	}
130 
131 	return (0);
132 
133 fail2:
134 	EFSYS_PROBE(fail2);
135 fail1:
136 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
137 
138 	return (rc);
139 }
140 
141 static	__checkReturn	efx_rc_t
142 efx_mcdi_init_evq(
143 	__in		efx_nic_t *enp,
144 	__in		unsigned int instance,
145 	__in		efsys_mem_t *esmp,
146 	__in		size_t nevs,
147 	__in		uint32_t irq,
148 	__in		uint32_t us,
149 	__in		uint32_t flags,
150 	__in		boolean_t low_latency)
151 {
152 	efx_mcdi_req_t req;
153 	uint8_t payload[
154 	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
155 		MC_CMD_INIT_EVQ_OUT_LEN)];
156 	efx_qword_t *dma_addr;
157 	uint64_t addr;
158 	int npages;
159 	int i;
160 	boolean_t interrupting;
161 	int ev_cut_through;
162 	efx_rc_t rc;
163 
164 	npages = EFX_EVQ_NBUFS(nevs);
165 	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
166 		rc = EINVAL;
167 		goto fail1;
168 	}
169 
170 	(void) memset(payload, 0, sizeof (payload));
171 	req.emr_cmd = MC_CMD_INIT_EVQ;
172 	req.emr_in_buf = payload;
173 	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
174 	req.emr_out_buf = payload;
175 	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
176 
177 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
178 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
179 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
180 
181 	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
182 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
183 
184 	/*
185 	 * On Huntington RX and TX event batching can only be requested together
186 	 * (even if the datapath firmware doesn't actually support RX
187 	 * batching). If event cut through is enabled no RX batching will occur.
188 	 *
189 	 * So always enable RX and TX event batching, and enable event cut
190 	 * through if we want low latency operation.
191 	 */
192 	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
193 	case EFX_EVQ_FLAGS_TYPE_AUTO:
194 		ev_cut_through = low_latency ? 1 : 0;
195 		break;
196 	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
197 		ev_cut_through = 0;
198 		break;
199 	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
200 		ev_cut_through = 1;
201 		break;
202 	default:
203 		rc = EINVAL;
204 		goto fail2;
205 	}
206 	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
207 	    INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
208 	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
209 	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
210 	    INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
211 	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
212 	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
213 
214 	/* If the value is zero then disable the timer */
215 	if (us == 0) {
216 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
217 		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
218 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
219 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
220 	} else {
221 		unsigned int ticks;
222 
223 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
224 			goto fail3;
225 
226 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
227 		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
228 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
229 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
230 	}
231 
232 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
233 	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
234 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
235 
236 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
237 	addr = EFSYS_MEM_ADDR(esmp);
238 
239 	for (i = 0; i < npages; i++) {
240 		EFX_POPULATE_QWORD_2(*dma_addr,
241 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
242 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
243 
244 		dma_addr++;
245 		addr += EFX_BUF_SIZE;
246 	}
247 
248 	efx_mcdi_execute(enp, &req);
249 
250 	if (req.emr_rc != 0) {
251 		rc = req.emr_rc;
252 		goto fail4;
253 	}
254 
255 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
256 		rc = EMSGSIZE;
257 		goto fail5;
258 	}
259 
260 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
261 
262 	return (0);
263 
264 fail5:
265 	EFSYS_PROBE(fail5);
266 fail4:
267 	EFSYS_PROBE(fail4);
268 fail3:
269 	EFSYS_PROBE(fail3);
270 fail2:
271 	EFSYS_PROBE(fail2);
272 fail1:
273 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
274 
275 	return (rc);
276 }
277 
278 
279 static	__checkReturn	efx_rc_t
280 efx_mcdi_init_evq_v2(
281 	__in		efx_nic_t *enp,
282 	__in		unsigned int instance,
283 	__in		efsys_mem_t *esmp,
284 	__in		size_t nevs,
285 	__in		uint32_t irq,
286 	__in		uint32_t us,
287 	__in		uint32_t flags)
288 {
289 	efx_mcdi_req_t req;
290 	uint8_t payload[
291 		MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
292 		    MC_CMD_INIT_EVQ_V2_OUT_LEN)];
293 	boolean_t interrupting;
294 	unsigned int evq_type;
295 	efx_qword_t *dma_addr;
296 	uint64_t addr;
297 	int npages;
298 	int i;
299 	efx_rc_t rc;
300 
301 	npages = EFX_EVQ_NBUFS(nevs);
302 	if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
303 		rc = EINVAL;
304 		goto fail1;
305 	}
306 
307 	(void) memset(payload, 0, sizeof (payload));
308 	req.emr_cmd = MC_CMD_INIT_EVQ;
309 	req.emr_in_buf = payload;
310 	req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
311 	req.emr_out_buf = payload;
312 	req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
313 
314 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
315 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
316 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
317 
318 	interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
319 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
320 
321 	switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
322 	case EFX_EVQ_FLAGS_TYPE_AUTO:
323 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
324 		break;
325 	case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
326 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
327 		break;
328 	case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
329 		evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
330 		break;
331 	default:
332 		rc = EINVAL;
333 		goto fail2;
334 	}
335 	MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
336 	    INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
337 	    INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
338 	    INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
339 	    INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
340 
341 	/* If the value is zero then disable the timer */
342 	if (us == 0) {
343 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
344 		    MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
345 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
346 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
347 	} else {
348 		unsigned int ticks;
349 
350 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
351 			goto fail3;
352 
353 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
354 		    MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
355 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
356 		MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
357 	}
358 
359 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
360 	    MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
361 	MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
362 
363 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
364 	addr = EFSYS_MEM_ADDR(esmp);
365 
366 	for (i = 0; i < npages; i++) {
367 		EFX_POPULATE_QWORD_2(*dma_addr,
368 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
369 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
370 
371 		dma_addr++;
372 		addr += EFX_BUF_SIZE;
373 	}
374 
375 	efx_mcdi_execute(enp, &req);
376 
377 	if (req.emr_rc != 0) {
378 		rc = req.emr_rc;
379 		goto fail4;
380 	}
381 
382 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
383 		rc = EMSGSIZE;
384 		goto fail5;
385 	}
386 
387 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
388 
389 	EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
390 		    MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
391 
392 	return (0);
393 
394 fail5:
395 	EFSYS_PROBE(fail5);
396 fail4:
397 	EFSYS_PROBE(fail4);
398 fail3:
399 	EFSYS_PROBE(fail3);
400 fail2:
401 	EFSYS_PROBE(fail2);
402 fail1:
403 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
404 
405 	return (rc);
406 }
407 
408 static	__checkReturn	efx_rc_t
409 efx_mcdi_fini_evq(
410 	__in		efx_nic_t *enp,
411 	__in		uint32_t instance)
412 {
413 	efx_mcdi_req_t req;
414 	uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
415 			    MC_CMD_FINI_EVQ_OUT_LEN)];
416 	efx_rc_t rc;
417 
418 	(void) memset(payload, 0, sizeof (payload));
419 	req.emr_cmd = MC_CMD_FINI_EVQ;
420 	req.emr_in_buf = payload;
421 	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
422 	req.emr_out_buf = payload;
423 	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
424 
425 	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
426 
427 	efx_mcdi_execute_quiet(enp, &req);
428 
429 	if (req.emr_rc != 0) {
430 		rc = req.emr_rc;
431 		goto fail1;
432 	}
433 
434 	return (0);
435 
436 fail1:
437 	/*
438 	 * EALREADY is not an error, but indicates that the MC has rebooted and
439 	 * that the EVQ has already been destroyed.
440 	 */
441 	if (rc != EALREADY)
442 		EFSYS_PROBE1(fail1, efx_rc_t, rc);
443 
444 	return (rc);
445 }
446 
447 
448 
449 	__checkReturn	efx_rc_t
450 ef10_ev_init(
451 	__in		efx_nic_t *enp)
452 {
453 	_NOTE(ARGUNUSED(enp))
454 	return (0);
455 }
456 
457 			void
458 ef10_ev_fini(
459 	__in		efx_nic_t *enp)
460 {
461 	_NOTE(ARGUNUSED(enp))
462 }
463 
464 	__checkReturn	efx_rc_t
465 ef10_ev_qcreate(
466 	__in		efx_nic_t *enp,
467 	__in		unsigned int index,
468 	__in		efsys_mem_t *esmp,
469 	__in		size_t ndescs,
470 	__in		uint32_t id,
471 	__in		uint32_t us,
472 	__in		uint32_t flags,
473 	__in		efx_evq_t *eep)
474 {
475 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
476 	uint32_t irq;
477 	efx_rc_t rc;
478 
479 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
480 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
481 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
482 
483 	if (!ISP2(ndescs) ||
484 	    (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
485 		rc = EINVAL;
486 		goto fail1;
487 	}
488 
489 	if (index >= encp->enc_evq_limit) {
490 		rc = EINVAL;
491 		goto fail2;
492 	}
493 
494 	if (us > encp->enc_evq_timer_max_us) {
495 		rc = EINVAL;
496 		goto fail3;
497 	}
498 
499 	/* Set up the handler table */
500 	eep->ee_rx	= ef10_ev_rx;
501 	eep->ee_tx	= ef10_ev_tx;
502 	eep->ee_driver	= ef10_ev_driver;
503 	eep->ee_drv_gen	= ef10_ev_drv_gen;
504 	eep->ee_mcdi	= ef10_ev_mcdi;
505 
506 	/* Set up the event queue */
507 	/* INIT_EVQ expects function-relative vector number */
508 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
509 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
510 		irq = index;
511 	} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
512 		irq = index;
513 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
514 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
515 	} else {
516 		irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
517 	}
518 
519 	/*
520 	 * Interrupts may be raised for events immediately after the queue is
521 	 * created. See bug58606.
522 	 */
523 
524 	if (encp->enc_init_evq_v2_supported) {
525 		/*
526 		 * On Medford the low latency license is required to enable RX
527 		 * and event cut through and to disable RX batching.  If event
528 		 * queue type in flags is auto, we let the firmware decide the
529 		 * settings to use. If the adapter has a low latency license,
530 		 * it will choose the best settings for low latency, otherwise
531 		 * it will choose the best settings for throughput.
532 		 */
533 		rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
534 		    flags);
535 		if (rc != 0)
536 			goto fail4;
537 	} else {
538 		/*
539 		 * On Huntington we need to specify the settings to use.
540 		 * If event queue type in flags is auto, we favour throughput
541 		 * if the adapter is running virtualization supporting firmware
542 		 * (i.e. the full featured firmware variant)
543 		 * and latency otherwise. The Ethernet Virtual Bridging
544 		 * capability is used to make this decision. (Note though that
545 		 * the low latency firmware variant is also best for
546 		 * throughput and corresponding type should be specified
547 		 * to choose it.)
548 		 */
549 		boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
550 		rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
551 		    low_latency);
552 		if (rc != 0)
553 			goto fail5;
554 	}
555 
556 	return (0);
557 
558 fail5:
559 	EFSYS_PROBE(fail5);
560 fail4:
561 	EFSYS_PROBE(fail4);
562 fail3:
563 	EFSYS_PROBE(fail3);
564 fail2:
565 	EFSYS_PROBE(fail2);
566 fail1:
567 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
568 
569 	return (rc);
570 }
571 
572 			void
573 ef10_ev_qdestroy(
574 	__in		efx_evq_t *eep)
575 {
576 	efx_nic_t *enp = eep->ee_enp;
577 
578 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
579 	    enp->en_family == EFX_FAMILY_MEDFORD);
580 
581 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
582 }
583 
584 	__checkReturn	efx_rc_t
585 ef10_ev_qprime(
586 	__in		efx_evq_t *eep,
587 	__in		unsigned int count)
588 {
589 	efx_nic_t *enp = eep->ee_enp;
590 	uint32_t rptr;
591 	efx_dword_t dword;
592 
593 	rptr = count & eep->ee_mask;
594 
595 	if (enp->en_nic_cfg.enc_bug35388_workaround) {
596 		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
597 		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
598 		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
599 		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
600 
601 		EFX_POPULATE_DWORD_2(dword,
602 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
603 		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
604 		    ERF_DD_EVQ_IND_RPTR,
605 		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
606 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
607 		    &dword, B_FALSE);
608 
609 		EFX_POPULATE_DWORD_2(dword,
610 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
611 		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
612 		    ERF_DD_EVQ_IND_RPTR,
613 		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
614 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
615 		    &dword, B_FALSE);
616 	} else {
617 		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
618 		EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
619 		    &dword, B_FALSE);
620 	}
621 
622 	return (0);
623 }
624 
625 static	__checkReturn	efx_rc_t
626 efx_mcdi_driver_event(
627 	__in		efx_nic_t *enp,
628 	__in		uint32_t evq,
629 	__in		efx_qword_t data)
630 {
631 	efx_mcdi_req_t req;
632 	uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
633 			    MC_CMD_DRIVER_EVENT_OUT_LEN)];
634 	efx_rc_t rc;
635 
636 	req.emr_cmd = MC_CMD_DRIVER_EVENT;
637 	req.emr_in_buf = payload;
638 	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
639 	req.emr_out_buf = payload;
640 	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
641 
642 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
643 
644 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
645 	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
646 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
647 	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
648 
649 	efx_mcdi_execute(enp, &req);
650 
651 	if (req.emr_rc != 0) {
652 		rc = req.emr_rc;
653 		goto fail1;
654 	}
655 
656 	return (0);
657 
658 fail1:
659 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
660 
661 	return (rc);
662 }
663 
664 			void
665 ef10_ev_qpost(
666 	__in	efx_evq_t *eep,
667 	__in	uint16_t data)
668 {
669 	efx_nic_t *enp = eep->ee_enp;
670 	efx_qword_t event;
671 
672 	EFX_POPULATE_QWORD_3(event,
673 	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
674 	    ESF_DZ_DRV_SUB_CODE, 0,
675 	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
676 
677 	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
678 }
679 
680 	__checkReturn	efx_rc_t
681 ef10_ev_qmoderate(
682 	__in		efx_evq_t *eep,
683 	__in		unsigned int us)
684 {
685 	efx_nic_t *enp = eep->ee_enp;
686 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
687 	efx_dword_t dword;
688 	uint32_t mode;
689 	efx_rc_t rc;
690 
691 	/* Check that hardware and MCDI use the same timer MODE values */
692 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
693 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
694 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
695 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
696 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
697 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
698 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
699 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
700 
701 	if (us > encp->enc_evq_timer_max_us) {
702 		rc = EINVAL;
703 		goto fail1;
704 	}
705 
706 	/* If the value is zero then disable the timer */
707 	if (us == 0) {
708 		mode = FFE_CZ_TIMER_MODE_DIS;
709 	} else {
710 		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
711 	}
712 
713 	if (encp->enc_bug61265_workaround) {
714 		uint32_t ns = us * 1000;
715 
716 		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
717 		if (rc != 0)
718 			goto fail2;
719 	} else {
720 		unsigned int ticks;
721 
722 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
723 			goto fail3;
724 
725 		if (encp->enc_bug35388_workaround) {
726 			EFX_POPULATE_DWORD_3(dword,
727 			    ERF_DD_EVQ_IND_TIMER_FLAGS,
728 			    EFE_DD_EVQ_IND_TIMER_FLAGS,
729 			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
730 			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
731 			EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
732 			    eep->ee_index, &dword, 0);
733 		} else {
734 			EFX_POPULATE_DWORD_2(dword,
735 			    ERF_DZ_TC_TIMER_MODE, mode,
736 			    ERF_DZ_TC_TIMER_VAL, ticks);
737 			EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
738 			    eep->ee_index, &dword, 0);
739 		}
740 	}
741 
742 	return (0);
743 
744 fail3:
745 	EFSYS_PROBE(fail3);
746 fail2:
747 	EFSYS_PROBE(fail2);
748 fail1:
749 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
750 
751 	return (rc);
752 }
753 
754 
755 #if EFSYS_OPT_QSTATS
756 			void
757 ef10_ev_qstats_update(
758 	__in				efx_evq_t *eep,
759 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
760 {
761 	unsigned int id;
762 
763 	for (id = 0; id < EV_NQSTATS; id++) {
764 		efsys_stat_t *essp = &stat[id];
765 
766 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
767 		eep->ee_stat[id] = 0;
768 	}
769 }
770 #endif /* EFSYS_OPT_QSTATS */
771 
772 #if EFSYS_OPT_RX_PACKED_STREAM
773 
774 static	__checkReturn	boolean_t
775 ef10_ev_rx_packed_stream(
776 	__in		efx_evq_t *eep,
777 	__in		efx_qword_t *eqp,
778 	__in		const efx_ev_callbacks_t *eecp,
779 	__in_opt	void *arg)
780 {
781 	uint32_t label;
782 	uint32_t pkt_count_lbits;
783 	uint16_t flags;
784 	boolean_t should_abort;
785 	efx_evq_rxq_state_t *eersp;
786 	unsigned int pkt_count;
787 	unsigned int current_id;
788 	boolean_t new_buffer;
789 
790 	pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
791 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
792 	new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
793 
794 	flags = 0;
795 
796 	eersp = &eep->ee_rxq_state[label];
797 
798 	/*
799 	 * RX_DSC_PTR_LBITS has least significant bits of the global
800 	 * (not per-buffer) packet counter. It is guaranteed that
801 	 * maximum number of completed packets fits in lbits-mask.
802 	 * So, modulo lbits-mask arithmetic should be used to calculate
803 	 * packet counter increment.
804 	 */
805 	pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
806 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
807 	eersp->eers_rx_stream_npackets += pkt_count;
808 
809 	if (new_buffer) {
810 		flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
811 		eersp->eers_rx_packed_stream_credits++;
812 		eersp->eers_rx_read_ptr++;
813 	}
814 	current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
815 
816 	/* Check for errors that invalidate checksum and L3/L4 fields */
817 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
818 		/* RX frame truncated (error flag is misnamed) */
819 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
820 		flags |= EFX_DISCARD;
821 		goto deliver;
822 	}
823 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
824 		/* Bad Ethernet frame CRC */
825 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
826 		flags |= EFX_DISCARD;
827 		goto deliver;
828 	}
829 
830 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
831 		flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
832 		goto deliver;
833 	}
834 
835 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
836 		EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
837 
838 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
839 		EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
840 
841 deliver:
842 	/* If we're not discarding the packet then it is ok */
843 	if (~flags & EFX_DISCARD)
844 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
845 
846 	EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
847 	should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
848 	    flags);
849 
850 	return (should_abort);
851 }
852 
853 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
854 
855 static	__checkReturn	boolean_t
856 ef10_ev_rx(
857 	__in		efx_evq_t *eep,
858 	__in		efx_qword_t *eqp,
859 	__in		const efx_ev_callbacks_t *eecp,
860 	__in_opt	void *arg)
861 {
862 	efx_nic_t *enp = eep->ee_enp;
863 	uint32_t size;
864 	uint32_t label;
865 	uint32_t mac_class;
866 	uint32_t eth_tag_class;
867 	uint32_t l3_class;
868 	uint32_t l4_class;
869 	uint32_t next_read_lbits;
870 	uint16_t flags;
871 	boolean_t cont;
872 	boolean_t should_abort;
873 	efx_evq_rxq_state_t *eersp;
874 	unsigned int desc_count;
875 	unsigned int last_used_id;
876 
877 	EFX_EV_QSTAT_INCR(eep, EV_RX);
878 
879 	/* Discard events after RXQ/TXQ errors */
880 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
881 		return (B_FALSE);
882 
883 	/* Basic packet information */
884 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
885 	eersp = &eep->ee_rxq_state[label];
886 
887 #if EFSYS_OPT_RX_PACKED_STREAM
888 	/*
889 	 * Packed stream events are very different,
890 	 * so handle them separately
891 	 */
892 	if (eersp->eers_rx_packed_stream)
893 	    return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
894 #endif
895 
896 	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
897 	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
898 	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
899 	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
900 	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
901 	l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
902 	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
903 
904 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
905 		/* Drop this event */
906 		return (B_FALSE);
907 	}
908 	flags = 0;
909 
910 	if (cont != 0) {
911 		/*
912 		 * This may be part of a scattered frame, or it may be a
913 		 * truncated frame if scatter is disabled on this RXQ.
914 		 * Overlength frames can be received if e.g. a VF is configured
915 		 * for 1500 MTU but connected to a port set to 9000 MTU
916 		 * (see bug56567).
917 		 * FIXME: There is not yet any driver that supports scatter on
918 		 * Huntington.  Scatter support is required for OSX.
919 		 */
920 		flags |= EFX_PKT_CONT;
921 	}
922 
923 	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
924 		flags |= EFX_PKT_UNICAST;
925 
926 	/* Increment the count of descriptors read */
927 	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
928 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
929 	eersp->eers_rx_read_ptr += desc_count;
930 
931 	/*
932 	 * FIXME: add error checking to make sure this a batched event.
933 	 * This could also be an aborted scatter, see Bug36629.
934 	 */
935 	if (desc_count > 1) {
936 		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
937 		flags |= EFX_PKT_PREFIX_LEN;
938 	}
939 
940 	/* Calculate the index of the last descriptor consumed */
941 	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
942 
943 	/* Check for errors that invalidate checksum and L3/L4 fields */
944 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
945 		/* RX frame truncated (error flag is misnamed) */
946 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
947 		flags |= EFX_DISCARD;
948 		goto deliver;
949 	}
950 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
951 		/* Bad Ethernet frame CRC */
952 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
953 		flags |= EFX_DISCARD;
954 		goto deliver;
955 	}
956 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
957 		/*
958 		 * Hardware parse failed, due to malformed headers
959 		 * or headers that are too long for the parser.
960 		 * Headers and checksums must be validated by the host.
961 		 */
962 		/* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
963 		goto deliver;
964 	}
965 
966 	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
967 	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
968 		flags |= EFX_PKT_VLAN_TAGGED;
969 	}
970 
971 	switch (l3_class) {
972 	case ESE_DZ_L3_CLASS_IP4:
973 	case ESE_DZ_L3_CLASS_IP4_FRAG:
974 		flags |= EFX_PKT_IPV4;
975 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
976 			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
977 		} else {
978 			flags |= EFX_CKSUM_IPV4;
979 		}
980 
981 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
982 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
983 			flags |= EFX_PKT_TCP;
984 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
985 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
986 			flags |= EFX_PKT_UDP;
987 		} else {
988 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
989 		}
990 		break;
991 
992 	case ESE_DZ_L3_CLASS_IP6:
993 	case ESE_DZ_L3_CLASS_IP6_FRAG:
994 		flags |= EFX_PKT_IPV6;
995 
996 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
997 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
998 			flags |= EFX_PKT_TCP;
999 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
1000 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
1001 			flags |= EFX_PKT_UDP;
1002 		} else {
1003 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
1004 		}
1005 		break;
1006 
1007 	default:
1008 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
1009 		break;
1010 	}
1011 
1012 	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
1013 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
1014 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
1015 		} else {
1016 			flags |= EFX_CKSUM_TCPUDP;
1017 		}
1018 	}
1019 
1020 deliver:
1021 	/* If we're not discarding the packet then it is ok */
1022 	if (~flags & EFX_DISCARD)
1023 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
1024 
1025 	EFSYS_ASSERT(eecp->eec_rx != NULL);
1026 	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
1027 
1028 	return (should_abort);
1029 }
1030 
1031 static	__checkReturn	boolean_t
1032 ef10_ev_tx(
1033 	__in		efx_evq_t *eep,
1034 	__in		efx_qword_t *eqp,
1035 	__in		const efx_ev_callbacks_t *eecp,
1036 	__in_opt	void *arg)
1037 {
1038 	efx_nic_t *enp = eep->ee_enp;
1039 	uint32_t id;
1040 	uint32_t label;
1041 	boolean_t should_abort;
1042 
1043 	EFX_EV_QSTAT_INCR(eep, EV_TX);
1044 
1045 	/* Discard events after RXQ/TXQ errors */
1046 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
1047 		return (B_FALSE);
1048 
1049 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
1050 		/* Drop this event */
1051 		return (B_FALSE);
1052 	}
1053 
1054 	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
1055 	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
1056 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
1057 
1058 	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
1059 
1060 	EFSYS_ASSERT(eecp->eec_tx != NULL);
1061 	should_abort = eecp->eec_tx(arg, label, id);
1062 
1063 	return (should_abort);
1064 }
1065 
1066 static	__checkReturn	boolean_t
1067 ef10_ev_driver(
1068 	__in		efx_evq_t *eep,
1069 	__in		efx_qword_t *eqp,
1070 	__in		const efx_ev_callbacks_t *eecp,
1071 	__in_opt	void *arg)
1072 {
1073 	unsigned int code;
1074 	boolean_t should_abort;
1075 
1076 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
1077 	should_abort = B_FALSE;
1078 
1079 	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
1080 	switch (code) {
1081 	case ESE_DZ_DRV_TIMER_EV: {
1082 		uint32_t id;
1083 
1084 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
1085 
1086 		EFSYS_ASSERT(eecp->eec_timer != NULL);
1087 		should_abort = eecp->eec_timer(arg, id);
1088 		break;
1089 	}
1090 
1091 	case ESE_DZ_DRV_WAKE_UP_EV: {
1092 		uint32_t id;
1093 
1094 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
1095 
1096 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1097 		should_abort = eecp->eec_wake_up(arg, id);
1098 		break;
1099 	}
1100 
1101 	case ESE_DZ_DRV_START_UP_EV:
1102 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
1103 		should_abort = eecp->eec_initialized(arg);
1104 		break;
1105 
1106 	default:
1107 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1108 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1109 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1110 		break;
1111 	}
1112 
1113 	return (should_abort);
1114 }
1115 
1116 static	__checkReturn	boolean_t
1117 ef10_ev_drv_gen(
1118 	__in		efx_evq_t *eep,
1119 	__in		efx_qword_t *eqp,
1120 	__in		const efx_ev_callbacks_t *eecp,
1121 	__in_opt	void *arg)
1122 {
1123 	uint32_t data;
1124 	boolean_t should_abort;
1125 
1126 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1127 	should_abort = B_FALSE;
1128 
1129 	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1130 	if (data >= ((uint32_t)1 << 16)) {
1131 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1132 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1133 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1134 
1135 		return (B_TRUE);
1136 	}
1137 
1138 	EFSYS_ASSERT(eecp->eec_software != NULL);
1139 	should_abort = eecp->eec_software(arg, (uint16_t)data);
1140 
1141 	return (should_abort);
1142 }
1143 
1144 static	__checkReturn	boolean_t
1145 ef10_ev_mcdi(
1146 	__in		efx_evq_t *eep,
1147 	__in		efx_qword_t *eqp,
1148 	__in		const efx_ev_callbacks_t *eecp,
1149 	__in_opt	void *arg)
1150 {
1151 	efx_nic_t *enp = eep->ee_enp;
1152 	unsigned int code;
1153 	boolean_t should_abort = B_FALSE;
1154 
1155 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1156 
1157 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1158 	switch (code) {
1159 	case MCDI_EVENT_CODE_BADSSERT:
1160 		efx_mcdi_ev_death(enp, EINTR);
1161 		break;
1162 
1163 	case MCDI_EVENT_CODE_CMDDONE:
1164 		efx_mcdi_ev_cpl(enp,
1165 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1166 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1167 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1168 		break;
1169 
1170 #if EFSYS_OPT_MCDI_PROXY_AUTH
1171 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
1172 		/*
1173 		 * This event notifies a function that an authorization request
1174 		 * has been processed. If the request was authorized then the
1175 		 * function can now re-send the original MCDI request.
1176 		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1177 		 */
1178 		efx_mcdi_ev_proxy_response(enp,
1179 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1180 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1181 		break;
1182 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1183 
1184 	case MCDI_EVENT_CODE_LINKCHANGE: {
1185 		efx_link_mode_t link_mode;
1186 
1187 		ef10_phy_link_ev(enp, eqp, &link_mode);
1188 		should_abort = eecp->eec_link_change(arg, link_mode);
1189 		break;
1190 	}
1191 
1192 	case MCDI_EVENT_CODE_SENSOREVT: {
1193 #if EFSYS_OPT_MON_STATS
1194 		efx_mon_stat_t id;
1195 		efx_mon_stat_value_t value;
1196 		efx_rc_t rc;
1197 
1198 		/* Decode monitor stat for MCDI sensor (if supported) */
1199 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1200 			/* Report monitor stat change */
1201 			should_abort = eecp->eec_monitor(arg, id, value);
1202 		} else if (rc == ENOTSUP) {
1203 			should_abort = eecp->eec_exception(arg,
1204 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1205 				MCDI_EV_FIELD(eqp, DATA));
1206 		} else {
1207 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
1208 		}
1209 #endif
1210 		break;
1211 	}
1212 
1213 	case MCDI_EVENT_CODE_SCHEDERR:
1214 		/* Informational only */
1215 		break;
1216 
1217 	case MCDI_EVENT_CODE_REBOOT:
1218 		/* Falcon/Siena only (should not been seen with Huntington). */
1219 		efx_mcdi_ev_death(enp, EIO);
1220 		break;
1221 
1222 	case MCDI_EVENT_CODE_MC_REBOOT:
1223 		/* MC_REBOOT event is used for Huntington (EF10) and later. */
1224 		efx_mcdi_ev_death(enp, EIO);
1225 		break;
1226 
1227 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
1228 #if EFSYS_OPT_MAC_STATS
1229 		if (eecp->eec_mac_stats != NULL) {
1230 			eecp->eec_mac_stats(arg,
1231 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1232 		}
1233 #endif
1234 		break;
1235 
1236 	case MCDI_EVENT_CODE_FWALERT: {
1237 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1238 
1239 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1240 			should_abort = eecp->eec_exception(arg,
1241 				EFX_EXCEPTION_FWALERT_SRAM,
1242 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
1243 		else
1244 			should_abort = eecp->eec_exception(arg,
1245 				EFX_EXCEPTION_UNKNOWN_FWALERT,
1246 				MCDI_EV_FIELD(eqp, DATA));
1247 		break;
1248 	}
1249 
1250 	case MCDI_EVENT_CODE_TX_ERR: {
1251 		/*
1252 		 * After a TXQ error is detected, firmware sends a TX_ERR event.
1253 		 * This may be followed by TX completions (which we discard),
1254 		 * and then finally by a TX_FLUSH event. Firmware destroys the
1255 		 * TXQ automatically after sending the TX_FLUSH event.
1256 		 */
1257 		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1258 
1259 		EFSYS_PROBE2(tx_descq_err,
1260 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1261 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1262 
1263 		/* Inform the driver that a reset is required. */
1264 		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1265 		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1266 		break;
1267 	}
1268 
1269 	case MCDI_EVENT_CODE_TX_FLUSH: {
1270 		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1271 
1272 		/*
1273 		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1274 		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1275 		 * We want to wait for all completions, so ignore the events
1276 		 * with TX_FLUSH_TO_DRIVER.
1277 		 */
1278 		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1279 			should_abort = B_FALSE;
1280 			break;
1281 		}
1282 
1283 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1284 
1285 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1286 
1287 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1288 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1289 		break;
1290 	}
1291 
1292 	case MCDI_EVENT_CODE_RX_ERR: {
1293 		/*
1294 		 * After an RXQ error is detected, firmware sends an RX_ERR
1295 		 * event. This may be followed by RX events (which we discard),
1296 		 * and then finally by an RX_FLUSH event. Firmware destroys the
1297 		 * RXQ automatically after sending the RX_FLUSH event.
1298 		 */
1299 		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1300 
1301 		EFSYS_PROBE2(rx_descq_err,
1302 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1303 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1304 
1305 		/* Inform the driver that a reset is required. */
1306 		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1307 		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1308 		break;
1309 	}
1310 
1311 	case MCDI_EVENT_CODE_RX_FLUSH: {
1312 		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1313 
1314 		/*
1315 		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1316 		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1317 		 * We want to wait for all completions, so ignore the events
1318 		 * with RX_FLUSH_TO_DRIVER.
1319 		 */
1320 		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1321 			should_abort = B_FALSE;
1322 			break;
1323 		}
1324 
1325 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1326 
1327 		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1328 
1329 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1330 		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1331 		break;
1332 	}
1333 
1334 	default:
1335 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1336 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1337 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1338 		break;
1339 	}
1340 
1341 	return (should_abort);
1342 }
1343 
1344 		void
1345 ef10_ev_rxlabel_init(
1346 	__in		efx_evq_t *eep,
1347 	__in		efx_rxq_t *erp,
1348 	__in		unsigned int label,
1349 	__in		efx_rxq_type_t type)
1350 {
1351 	efx_evq_rxq_state_t *eersp;
1352 #if EFSYS_OPT_RX_PACKED_STREAM
1353 	boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
1354 #endif
1355 
1356 	_NOTE(ARGUNUSED(type))
1357 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1358 	eersp = &eep->ee_rxq_state[label];
1359 
1360 	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1361 
1362 #if EFSYS_OPT_RX_PACKED_STREAM
1363 	/*
1364 	 * For packed stream modes, the very first event will
1365 	 * have a new buffer flag set, so it will be incremented,
1366 	 * yielding the correct pointer. That results in a simpler
1367 	 * code than trying to detect start-of-the-world condition
1368 	 * in the event handler.
1369 	 */
1370 	eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1371 #else
1372 	eersp->eers_rx_read_ptr = 0;
1373 #endif
1374 	eersp->eers_rx_mask = erp->er_mask;
1375 #if EFSYS_OPT_RX_PACKED_STREAM
1376 	eersp->eers_rx_stream_npackets = 0;
1377 	eersp->eers_rx_packed_stream = packed_stream;
1378 	if (packed_stream) {
1379 		eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1380 		    EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
1381 		    EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1382 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1383 		/*
1384 		 * A single credit is allocated to the queue when it is started.
1385 		 * It is immediately spent by the first packet which has NEW
1386 		 * BUFFER flag set, though, but still we shall take into
1387 		 * account, as to not wrap around the maximum number of credits
1388 		 * accidentally
1389 		 */
1390 		eersp->eers_rx_packed_stream_credits--;
1391 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1392 		    EFX_RX_PACKED_STREAM_MAX_CREDITS);
1393 	}
1394 #endif
1395 }
1396 
1397 		void
1398 ef10_ev_rxlabel_fini(
1399 	__in		efx_evq_t *eep,
1400 	__in		unsigned int label)
1401 {
1402 	efx_evq_rxq_state_t *eersp;
1403 
1404 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1405 	eersp = &eep->ee_rxq_state[label];
1406 
1407 	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1408 
1409 	eersp->eers_rx_read_ptr = 0;
1410 	eersp->eers_rx_mask = 0;
1411 #if EFSYS_OPT_RX_PACKED_STREAM
1412 	eersp->eers_rx_stream_npackets = 0;
1413 	eersp->eers_rx_packed_stream = B_FALSE;
1414 	eersp->eers_rx_packed_stream_credits = 0;
1415 #endif
1416 }
1417 
1418 #endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
1419