xref: /freebsd/sys/dev/sfxge/common/ef10_ev.c (revision e27abb6689c5733dd08ce240d5402a0de3a42254)
1 /*-
2  * Copyright (c) 2012-2016 Solarflare Communications Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright notice,
11  *    this list of conditions and the following disclaimer in the documentation
12  *    and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are
27  * those of the authors and should not be interpreted as representing official
28  * policies, either expressed or implied, of the FreeBSD Project.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include "efx.h"
35 #include "efx_impl.h"
36 #if EFSYS_OPT_MON_STATS
37 #include "mcdi_mon.h"
38 #endif
39 
40 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
41 
42 #if EFSYS_OPT_QSTATS
43 #define	EFX_EV_QSTAT_INCR(_eep, _stat)					\
44 	do {								\
45 		(_eep)->ee_stat[_stat]++;				\
46 	_NOTE(CONSTANTCONDITION)					\
47 	} while (B_FALSE)
48 #else
49 #define	EFX_EV_QSTAT_INCR(_eep, _stat)
50 #endif
51 
52 
53 static	__checkReturn	boolean_t
54 ef10_ev_rx(
55 	__in		efx_evq_t *eep,
56 	__in		efx_qword_t *eqp,
57 	__in		const efx_ev_callbacks_t *eecp,
58 	__in_opt	void *arg);
59 
60 static	__checkReturn	boolean_t
61 ef10_ev_tx(
62 	__in		efx_evq_t *eep,
63 	__in		efx_qword_t *eqp,
64 	__in		const efx_ev_callbacks_t *eecp,
65 	__in_opt	void *arg);
66 
67 static	__checkReturn	boolean_t
68 ef10_ev_driver(
69 	__in		efx_evq_t *eep,
70 	__in		efx_qword_t *eqp,
71 	__in		const efx_ev_callbacks_t *eecp,
72 	__in_opt	void *arg);
73 
74 static	__checkReturn	boolean_t
75 ef10_ev_drv_gen(
76 	__in		efx_evq_t *eep,
77 	__in		efx_qword_t *eqp,
78 	__in		const efx_ev_callbacks_t *eecp,
79 	__in_opt	void *arg);
80 
81 static	__checkReturn	boolean_t
82 ef10_ev_mcdi(
83 	__in		efx_evq_t *eep,
84 	__in		efx_qword_t *eqp,
85 	__in		const efx_ev_callbacks_t *eecp,
86 	__in_opt	void *arg);
87 
88 
89 static	__checkReturn	efx_rc_t
90 efx_mcdi_set_evq_tmr(
91 	__in		efx_nic_t *enp,
92 	__in		uint32_t instance,
93 	__in		uint32_t mode,
94 	__in		uint32_t timer_ns)
95 {
96 	efx_mcdi_req_t req;
97 	uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
98 			    MC_CMD_SET_EVQ_TMR_OUT_LEN)];
99 	efx_rc_t rc;
100 
101 	(void) memset(payload, 0, sizeof (payload));
102 	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
103 	req.emr_in_buf = payload;
104 	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
105 	req.emr_out_buf = payload;
106 	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
107 
108 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
109 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
110 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
111 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
112 
113 	efx_mcdi_execute(enp, &req);
114 
115 	if (req.emr_rc != 0) {
116 		rc = req.emr_rc;
117 		goto fail1;
118 	}
119 
120 	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
121 		rc = EMSGSIZE;
122 		goto fail2;
123 	}
124 
125 	return (0);
126 
127 fail2:
128 	EFSYS_PROBE(fail2);
129 fail1:
130 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
131 
132 	return (rc);
133 }
134 
135 static	__checkReturn	efx_rc_t
136 efx_mcdi_init_evq(
137 	__in		efx_nic_t *enp,
138 	__in		unsigned int instance,
139 	__in		efsys_mem_t *esmp,
140 	__in		size_t nevs,
141 	__in		uint32_t irq,
142 	__in		uint32_t us)
143 {
144 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
145 	efx_mcdi_req_t req;
146 	uint8_t payload[
147 	    MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
148 		MC_CMD_INIT_EVQ_OUT_LEN)];
149 	efx_qword_t *dma_addr;
150 	uint64_t addr;
151 	int npages;
152 	int i;
153 	int supports_rx_batching;
154 	efx_rc_t rc;
155 
156 	npages = EFX_EVQ_NBUFS(nevs);
157 	if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
158 		rc = EINVAL;
159 		goto fail1;
160 	}
161 
162 	(void) memset(payload, 0, sizeof (payload));
163 	req.emr_cmd = MC_CMD_INIT_EVQ;
164 	req.emr_in_buf = payload;
165 	req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
166 	req.emr_out_buf = payload;
167 	req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
168 
169 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
170 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
171 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
172 
173 	/*
174 	 * On Huntington RX and TX event batching can only be requested
175 	 * together (even if the datapath firmware doesn't actually support RX
176 	 * batching).
177 	 * Cut through is incompatible with RX batching and so enabling cut
178 	 * through disables RX batching (but it does not affect TX batching).
179 	 *
180 	 * So always enable RX and TX event batching, and enable cut through
181 	 * if RX event batching isn't supported (i.e. on low latency firmware).
182 	 */
183 	supports_rx_batching = enp->en_nic_cfg.enc_rx_batching_enabled ? 1 : 0;
184 	MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
185 	    INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
186 	    INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
187 	    INIT_EVQ_IN_FLAG_INT_ARMD, 0,
188 	    INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_batching,
189 	    INIT_EVQ_IN_FLAG_RX_MERGE, 1,
190 	    INIT_EVQ_IN_FLAG_TX_MERGE, 1);
191 
192 	if (us == 0) {
193 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
194 		    MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
195 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
196 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
197 	} else {
198 		uint32_t timer_val;
199 
200 		/* Calculate the timer value in quanta */
201 		timer_val = us * 1000 / encp->enc_evq_timer_quantum_ns;
202 
203 		/* Moderation value is base 0 so we need to deduct 1 */
204 		if (timer_val > 0)
205 			timer_val--;
206 
207 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
208 		    MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
209 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, timer_val);
210 		MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, timer_val);
211 	}
212 
213 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
214 	    MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
215 	MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
216 
217 	dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
218 	addr = EFSYS_MEM_ADDR(esmp);
219 
220 	for (i = 0; i < npages; i++) {
221 		EFX_POPULATE_QWORD_2(*dma_addr,
222 		    EFX_DWORD_1, (uint32_t)(addr >> 32),
223 		    EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
224 
225 		dma_addr++;
226 		addr += EFX_BUF_SIZE;
227 	}
228 
229 	efx_mcdi_execute(enp, &req);
230 
231 	if (req.emr_rc != 0) {
232 		rc = req.emr_rc;
233 		goto fail2;
234 	}
235 
236 	if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
237 		rc = EMSGSIZE;
238 		goto fail3;
239 	}
240 
241 	/* NOTE: ignore the returned IRQ param as firmware does not set it. */
242 
243 	return (0);
244 
245 fail3:
246 	EFSYS_PROBE(fail3);
247 fail2:
248 	EFSYS_PROBE(fail2);
249 fail1:
250 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
251 
252 	return (rc);
253 }
254 
255 static	__checkReturn	efx_rc_t
256 efx_mcdi_fini_evq(
257 	__in		efx_nic_t *enp,
258 	__in		uint32_t instance)
259 {
260 	efx_mcdi_req_t req;
261 	uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
262 			    MC_CMD_FINI_EVQ_OUT_LEN)];
263 	efx_rc_t rc;
264 
265 	(void) memset(payload, 0, sizeof (payload));
266 	req.emr_cmd = MC_CMD_FINI_EVQ;
267 	req.emr_in_buf = payload;
268 	req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
269 	req.emr_out_buf = payload;
270 	req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
271 
272 	MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
273 
274 	efx_mcdi_execute_quiet(enp, &req);
275 
276 	if (req.emr_rc != 0) {
277 		rc = req.emr_rc;
278 		goto fail1;
279 	}
280 
281 	return (0);
282 
283 fail1:
284 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
285 
286 	return (rc);
287 }
288 
289 
290 
291 	__checkReturn	efx_rc_t
292 ef10_ev_init(
293 	__in		efx_nic_t *enp)
294 {
295 	_NOTE(ARGUNUSED(enp))
296 	return (0);
297 }
298 
299 			void
300 ef10_ev_fini(
301 	__in		efx_nic_t *enp)
302 {
303 	_NOTE(ARGUNUSED(enp))
304 }
305 
306 	__checkReturn	efx_rc_t
307 ef10_ev_qcreate(
308 	__in		efx_nic_t *enp,
309 	__in		unsigned int index,
310 	__in		efsys_mem_t *esmp,
311 	__in		size_t n,
312 	__in		uint32_t id,
313 	__in		uint32_t us,
314 	__in		efx_evq_t *eep)
315 {
316 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
317 	uint32_t irq;
318 	efx_rc_t rc;
319 
320 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
321 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
322 	EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
323 
324 	if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
325 		rc = EINVAL;
326 		goto fail1;
327 	}
328 
329 	if (index >= encp->enc_evq_limit) {
330 		rc = EINVAL;
331 		goto fail2;
332 	}
333 
334 	if (us > encp->enc_evq_timer_max_us) {
335 		rc = EINVAL;
336 		goto fail3;
337 	}
338 
339 	/* Set up the handler table */
340 	eep->ee_rx	= ef10_ev_rx;
341 	eep->ee_tx	= ef10_ev_tx;
342 	eep->ee_driver	= ef10_ev_driver;
343 	eep->ee_drv_gen	= ef10_ev_drv_gen;
344 	eep->ee_mcdi	= ef10_ev_mcdi;
345 
346 	/* Set up the event queue */
347 	irq = index;	/* INIT_EVQ expects function-relative vector number */
348 
349 	/*
350 	 * Interrupts may be raised for events immediately after the queue is
351 	 * created. See bug58606.
352 	 */
353 	if ((rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us)) != 0)
354 		goto fail4;
355 
356 	return (0);
357 
358 fail4:
359 	EFSYS_PROBE(fail4);
360 fail3:
361 	EFSYS_PROBE(fail3);
362 fail2:
363 	EFSYS_PROBE(fail2);
364 fail1:
365 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
366 
367 	return (rc);
368 }
369 
370 			void
371 ef10_ev_qdestroy(
372 	__in		efx_evq_t *eep)
373 {
374 	efx_nic_t *enp = eep->ee_enp;
375 
376 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
377 	    enp->en_family == EFX_FAMILY_MEDFORD);
378 
379 	(void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
380 }
381 
382 	__checkReturn	efx_rc_t
383 ef10_ev_qprime(
384 	__in		efx_evq_t *eep,
385 	__in		unsigned int count)
386 {
387 	efx_nic_t *enp = eep->ee_enp;
388 	uint32_t rptr;
389 	efx_dword_t dword;
390 
391 	rptr = count & eep->ee_mask;
392 
393 	if (enp->en_nic_cfg.enc_bug35388_workaround) {
394 		EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
395 		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
396 		EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
397 		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
398 
399 		EFX_POPULATE_DWORD_2(dword,
400 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
401 		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
402 		    ERF_DD_EVQ_IND_RPTR,
403 		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
404 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
405 		    &dword, B_FALSE);
406 
407 		EFX_POPULATE_DWORD_2(dword,
408 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
409 		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
410 		    ERF_DD_EVQ_IND_RPTR,
411 		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
412 		EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
413 		    &dword, B_FALSE);
414 	} else {
415 		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
416 		EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
417 		    &dword, B_FALSE);
418 	}
419 
420 	return (0);
421 }
422 
423 static	__checkReturn	efx_rc_t
424 efx_mcdi_driver_event(
425 	__in		efx_nic_t *enp,
426 	__in		uint32_t evq,
427 	__in		efx_qword_t data)
428 {
429 	efx_mcdi_req_t req;
430 	uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
431 			    MC_CMD_DRIVER_EVENT_OUT_LEN)];
432 	efx_rc_t rc;
433 
434 	req.emr_cmd = MC_CMD_DRIVER_EVENT;
435 	req.emr_in_buf = payload;
436 	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
437 	req.emr_out_buf = payload;
438 	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
439 
440 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
441 
442 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
443 	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
444 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
445 	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
446 
447 	efx_mcdi_execute(enp, &req);
448 
449 	if (req.emr_rc != 0) {
450 		rc = req.emr_rc;
451 		goto fail1;
452 	}
453 
454 	return (0);
455 
456 fail1:
457 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
458 
459 	return (rc);
460 }
461 
462 			void
463 ef10_ev_qpost(
464 	__in	efx_evq_t *eep,
465 	__in	uint16_t data)
466 {
467 	efx_nic_t *enp = eep->ee_enp;
468 	efx_qword_t event;
469 
470 	EFX_POPULATE_QWORD_3(event,
471 	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
472 	    ESF_DZ_DRV_SUB_CODE, 0,
473 	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
474 
475 	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
476 }
477 
478 	__checkReturn	efx_rc_t
479 ef10_ev_qmoderate(
480 	__in		efx_evq_t *eep,
481 	__in		unsigned int us)
482 {
483 	efx_nic_t *enp = eep->ee_enp;
484 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
485 	efx_dword_t dword;
486 	uint32_t timer_ns, timer_val, mode;
487 	efx_rc_t rc;
488 
489 	/* Check that hardware and MCDI use the same timer MODE values */
490 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
491 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
492 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
493 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
494 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
495 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
496 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
497 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
498 
499 	if (us > encp->enc_evq_timer_max_us) {
500 		rc = EINVAL;
501 		goto fail1;
502 	}
503 
504 	/* If the value is zero then disable the timer */
505 	if (us == 0) {
506 		timer_ns = 0;
507 		mode = FFE_CZ_TIMER_MODE_DIS;
508 	} else {
509 		timer_ns = us * 1000u;
510 		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
511 	}
512 
513 	if (encp->enc_bug61265_workaround) {
514 		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, timer_ns);
515 		if (rc != 0)
516 			goto fail2;
517 	} else {
518 		/* Calculate the timer value in quanta */
519 		timer_val = timer_ns / encp->enc_evq_timer_quantum_ns;
520 
521 		/* Moderation value is base 0 so we need to deduct 1 */
522 		if (timer_val > 0)
523 			timer_val--;
524 
525 		if (encp->enc_bug35388_workaround) {
526 			EFX_POPULATE_DWORD_3(dword,
527 			    ERF_DD_EVQ_IND_TIMER_FLAGS,
528 			    EFE_DD_EVQ_IND_TIMER_FLAGS,
529 			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
530 			    ERF_DD_EVQ_IND_TIMER_VAL, timer_val);
531 			EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
532 			    eep->ee_index, &dword, 0);
533 		} else {
534 			EFX_POPULATE_DWORD_2(dword,
535 			    ERF_DZ_TC_TIMER_MODE, mode,
536 			    ERF_DZ_TC_TIMER_VAL, timer_val);
537 			EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
538 			    eep->ee_index, &dword, 0);
539 		}
540 	}
541 
542 	return (0);
543 
544 fail2:
545 	EFSYS_PROBE(fail2);
546 fail1:
547 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
548 
549 	return (rc);
550 }
551 
552 
553 #if EFSYS_OPT_QSTATS
554 			void
555 ef10_ev_qstats_update(
556 	__in				efx_evq_t *eep,
557 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
558 {
559 	unsigned int id;
560 
561 	for (id = 0; id < EV_NQSTATS; id++) {
562 		efsys_stat_t *essp = &stat[id];
563 
564 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
565 		eep->ee_stat[id] = 0;
566 	}
567 }
568 #endif /* EFSYS_OPT_QSTATS */
569 
570 
571 static	__checkReturn	boolean_t
572 ef10_ev_rx(
573 	__in		efx_evq_t *eep,
574 	__in		efx_qword_t *eqp,
575 	__in		const efx_ev_callbacks_t *eecp,
576 	__in_opt	void *arg)
577 {
578 	efx_nic_t *enp = eep->ee_enp;
579 	uint32_t size;
580 	uint32_t label;
581 	uint32_t mac_class;
582 	uint32_t eth_tag_class;
583 	uint32_t l3_class;
584 	uint32_t l4_class;
585 	uint32_t next_read_lbits;
586 	uint16_t flags;
587 	boolean_t cont;
588 	boolean_t should_abort;
589 	efx_evq_rxq_state_t *eersp;
590 	unsigned int desc_count;
591 	unsigned int last_used_id;
592 
593 	EFX_EV_QSTAT_INCR(eep, EV_RX);
594 
595 	/* Discard events after RXQ/TXQ errors */
596 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
597 		return (B_FALSE);
598 
599 	/* Basic packet information */
600 	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
601 	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
602 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
603 	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
604 	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
605 	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
606 	l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
607 	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
608 
609 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
610 		/* Drop this event */
611 		return (B_FALSE);
612 	}
613 	flags = 0;
614 
615 	if (cont != 0) {
616 		/*
617 		 * This may be part of a scattered frame, or it may be a
618 		 * truncated frame if scatter is disabled on this RXQ.
619 		 * Overlength frames can be received if e.g. a VF is configured
620 		 * for 1500 MTU but connected to a port set to 9000 MTU
621 		 * (see bug56567).
622 		 * FIXME: There is not yet any driver that supports scatter on
623 		 * Huntington.  Scatter support is required for OSX.
624 		 */
625 		flags |= EFX_PKT_CONT;
626 	}
627 
628 	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
629 		flags |= EFX_PKT_UNICAST;
630 
631 	/* Increment the count of descriptors read */
632 	eersp = &eep->ee_rxq_state[label];
633 	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
634 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
635 	eersp->eers_rx_read_ptr += desc_count;
636 
637 	/*
638 	 * FIXME: add error checking to make sure this a batched event.
639 	 * This could also be an aborted scatter, see Bug36629.
640 	 */
641 	if (desc_count > 1) {
642 		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
643 		flags |= EFX_PKT_PREFIX_LEN;
644 	}
645 
646 	/* Calculate the index of the last descriptor consumed */
647 	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
648 
649 	/* Check for errors that invalidate checksum and L3/L4 fields */
650 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
651 		/* RX frame truncated (error flag is misnamed) */
652 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
653 		flags |= EFX_DISCARD;
654 		goto deliver;
655 	}
656 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
657 		/* Bad Ethernet frame CRC */
658 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
659 		flags |= EFX_DISCARD;
660 		goto deliver;
661 	}
662 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
663 		/*
664 		 * Hardware parse failed, due to malformed headers
665 		 * or headers that are too long for the parser.
666 		 * Headers and checksums must be validated by the host.
667 		 */
668 		// TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
669 		goto deliver;
670 	}
671 
672 	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
673 	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
674 		flags |= EFX_PKT_VLAN_TAGGED;
675 	}
676 
677 	switch (l3_class) {
678 	case ESE_DZ_L3_CLASS_IP4:
679 	case ESE_DZ_L3_CLASS_IP4_FRAG:
680 		flags |= EFX_PKT_IPV4;
681 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
682 			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
683 		} else {
684 			flags |= EFX_CKSUM_IPV4;
685 		}
686 
687 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
688 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
689 			flags |= EFX_PKT_TCP;
690 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
691 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
692 			flags |= EFX_PKT_UDP;
693 		} else {
694 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
695 		}
696 		break;
697 
698 	case ESE_DZ_L3_CLASS_IP6:
699 	case ESE_DZ_L3_CLASS_IP6_FRAG:
700 		flags |= EFX_PKT_IPV6;
701 
702 		if (l4_class == ESE_DZ_L4_CLASS_TCP) {
703 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
704 			flags |= EFX_PKT_TCP;
705 		} else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
706 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
707 			flags |= EFX_PKT_UDP;
708 		} else {
709 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
710 		}
711 		break;
712 
713 	default:
714 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
715 		break;
716 	}
717 
718 	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
719 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
720 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
721 		} else {
722 			flags |= EFX_CKSUM_TCPUDP;
723 		}
724 	}
725 
726 deliver:
727 	/* If we're not discarding the packet then it is ok */
728 	if (~flags & EFX_DISCARD)
729 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
730 
731 	EFSYS_ASSERT(eecp->eec_rx != NULL);
732 	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
733 
734 	return (should_abort);
735 }
736 
737 static	__checkReturn	boolean_t
738 ef10_ev_tx(
739 	__in		efx_evq_t *eep,
740 	__in		efx_qword_t *eqp,
741 	__in		const efx_ev_callbacks_t *eecp,
742 	__in_opt	void *arg)
743 {
744 	efx_nic_t *enp = eep->ee_enp;
745 	uint32_t id;
746 	uint32_t label;
747 	boolean_t should_abort;
748 
749 	EFX_EV_QSTAT_INCR(eep, EV_TX);
750 
751 	/* Discard events after RXQ/TXQ errors */
752 	if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
753 		return (B_FALSE);
754 
755 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
756 		/* Drop this event */
757 		return (B_FALSE);
758 	}
759 
760 	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
761 	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
762 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
763 
764 	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
765 
766 	EFSYS_ASSERT(eecp->eec_tx != NULL);
767 	should_abort = eecp->eec_tx(arg, label, id);
768 
769 	return (should_abort);
770 }
771 
772 static	__checkReturn	boolean_t
773 ef10_ev_driver(
774 	__in		efx_evq_t *eep,
775 	__in		efx_qword_t *eqp,
776 	__in		const efx_ev_callbacks_t *eecp,
777 	__in_opt	void *arg)
778 {
779 	unsigned int code;
780 	boolean_t should_abort;
781 
782 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
783 	should_abort = B_FALSE;
784 
785 	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
786 	switch (code) {
787 	case ESE_DZ_DRV_TIMER_EV: {
788 		uint32_t id;
789 
790 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
791 
792 		EFSYS_ASSERT(eecp->eec_timer != NULL);
793 		should_abort = eecp->eec_timer(arg, id);
794 		break;
795 	}
796 
797 	case ESE_DZ_DRV_WAKE_UP_EV: {
798 		uint32_t id;
799 
800 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
801 
802 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
803 		should_abort = eecp->eec_wake_up(arg, id);
804 		break;
805 	}
806 
807 	case ESE_DZ_DRV_START_UP_EV:
808 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
809 		should_abort = eecp->eec_initialized(arg);
810 		break;
811 
812 	default:
813 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
814 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
815 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
816 		break;
817 	}
818 
819 	return (should_abort);
820 }
821 
822 static	__checkReturn	boolean_t
823 ef10_ev_drv_gen(
824 	__in		efx_evq_t *eep,
825 	__in		efx_qword_t *eqp,
826 	__in		const efx_ev_callbacks_t *eecp,
827 	__in_opt	void *arg)
828 {
829 	uint32_t data;
830 	boolean_t should_abort;
831 
832 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
833 	should_abort = B_FALSE;
834 
835 	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
836 	if (data >= ((uint32_t)1 << 16)) {
837 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
838 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
839 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
840 
841 		return (B_TRUE);
842 	}
843 
844 	EFSYS_ASSERT(eecp->eec_software != NULL);
845 	should_abort = eecp->eec_software(arg, (uint16_t)data);
846 
847 	return (should_abort);
848 }
849 
850 static	__checkReturn	boolean_t
851 ef10_ev_mcdi(
852 	__in		efx_evq_t *eep,
853 	__in		efx_qword_t *eqp,
854 	__in		const efx_ev_callbacks_t *eecp,
855 	__in_opt	void *arg)
856 {
857 	efx_nic_t *enp = eep->ee_enp;
858 	unsigned code;
859 	boolean_t should_abort = B_FALSE;
860 
861 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
862 
863 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
864 	switch (code) {
865 	case MCDI_EVENT_CODE_BADSSERT:
866 		efx_mcdi_ev_death(enp, EINTR);
867 		break;
868 
869 	case MCDI_EVENT_CODE_CMDDONE:
870 		efx_mcdi_ev_cpl(enp,
871 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
872 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
873 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
874 		break;
875 
876 #if EFSYS_OPT_MCDI_PROXY_AUTH
877 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
878 		/*
879 		 * This event notifies a function that an authorization request
880 		 * has been processed. If the request was authorized then the
881 		 * function can now re-send the original MCDI request.
882 		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
883 		 */
884 		efx_mcdi_ev_proxy_response(enp,
885 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
886 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
887 		break;
888 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
889 
890 	case MCDI_EVENT_CODE_LINKCHANGE: {
891 		efx_link_mode_t link_mode;
892 
893 		ef10_phy_link_ev(enp, eqp, &link_mode);
894 		should_abort = eecp->eec_link_change(arg, link_mode);
895 		break;
896 	}
897 
898 	case MCDI_EVENT_CODE_SENSOREVT: {
899 #if EFSYS_OPT_MON_STATS
900 		efx_mon_stat_t id;
901 		efx_mon_stat_value_t value;
902 		efx_rc_t rc;
903 
904 		/* Decode monitor stat for MCDI sensor (if supported) */
905 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
906 			/* Report monitor stat change */
907 			should_abort = eecp->eec_monitor(arg, id, value);
908 		} else if (rc == ENOTSUP) {
909 			should_abort = eecp->eec_exception(arg,
910 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
911 				MCDI_EV_FIELD(eqp, DATA));
912 		} else {
913 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
914 		}
915 #endif
916 		break;
917 	}
918 
919 	case MCDI_EVENT_CODE_SCHEDERR:
920 		/* Informational only */
921 		break;
922 
923 	case MCDI_EVENT_CODE_REBOOT:
924 		/* Falcon/Siena only (should not been seen with Huntington). */
925 		efx_mcdi_ev_death(enp, EIO);
926 		break;
927 
928 	case MCDI_EVENT_CODE_MC_REBOOT:
929 		/* MC_REBOOT event is used for Huntington (EF10) and later. */
930 		efx_mcdi_ev_death(enp, EIO);
931 		break;
932 
933 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
934 #if EFSYS_OPT_MAC_STATS
935 		if (eecp->eec_mac_stats != NULL) {
936 			eecp->eec_mac_stats(arg,
937 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
938 		}
939 #endif
940 		break;
941 
942 	case MCDI_EVENT_CODE_FWALERT: {
943 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
944 
945 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
946 			should_abort = eecp->eec_exception(arg,
947 				EFX_EXCEPTION_FWALERT_SRAM,
948 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
949 		else
950 			should_abort = eecp->eec_exception(arg,
951 				EFX_EXCEPTION_UNKNOWN_FWALERT,
952 				MCDI_EV_FIELD(eqp, DATA));
953 		break;
954 	}
955 
956 	case MCDI_EVENT_CODE_TX_ERR: {
957 		/*
958 		 * After a TXQ error is detected, firmware sends a TX_ERR event.
959 		 * This may be followed by TX completions (which we discard),
960 		 * and then finally by a TX_FLUSH event. Firmware destroys the
961 		 * TXQ automatically after sending the TX_FLUSH event.
962 		 */
963 		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
964 
965 		EFSYS_PROBE2(tx_descq_err,
966 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
967 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
968 
969 		/* Inform the driver that a reset is required. */
970 		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
971 		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
972 		break;
973 	}
974 
975 	case MCDI_EVENT_CODE_TX_FLUSH: {
976 		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
977 
978 		/*
979 		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
980 		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
981 		 * We want to wait for all completions, so ignore the events
982 		 * with TX_FLUSH_TO_DRIVER.
983 		 */
984 		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
985 			should_abort = B_FALSE;
986 			break;
987 		}
988 
989 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
990 
991 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
992 
993 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
994 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
995 		break;
996 	}
997 
998 	case MCDI_EVENT_CODE_RX_ERR: {
999 		/*
1000 		 * After an RXQ error is detected, firmware sends an RX_ERR
1001 		 * event. This may be followed by RX events (which we discard),
1002 		 * and then finally by an RX_FLUSH event. Firmware destroys the
1003 		 * RXQ automatically after sending the RX_FLUSH event.
1004 		 */
1005 		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1006 
1007 		EFSYS_PROBE2(rx_descq_err,
1008 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1009 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1010 
1011 		/* Inform the driver that a reset is required. */
1012 		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1013 		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1014 		break;
1015 	}
1016 
1017 	case MCDI_EVENT_CODE_RX_FLUSH: {
1018 		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1019 
1020 		/*
1021 		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1022 		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1023 		 * We want to wait for all completions, so ignore the events
1024 		 * with RX_FLUSH_TO_DRIVER.
1025 		 */
1026 		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1027 			should_abort = B_FALSE;
1028 			break;
1029 		}
1030 
1031 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1032 
1033 		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1034 
1035 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1036 		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1037 		break;
1038 	}
1039 
1040 	default:
1041 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1042 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1043 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1044 		break;
1045 	}
1046 
1047 	return (should_abort);
1048 }
1049 
1050 		void
1051 ef10_ev_rxlabel_init(
1052 	__in		efx_evq_t *eep,
1053 	__in		efx_rxq_t *erp,
1054 	__in		unsigned int label)
1055 {
1056 	efx_evq_rxq_state_t *eersp;
1057 
1058 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1059 	eersp = &eep->ee_rxq_state[label];
1060 
1061 	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1062 
1063 	eersp->eers_rx_read_ptr = 0;
1064 	eersp->eers_rx_mask = erp->er_mask;
1065 }
1066 
1067 		void
1068 ef10_ev_rxlabel_fini(
1069 	__in		efx_evq_t *eep,
1070 	__in		unsigned int label)
1071 {
1072 	efx_evq_rxq_state_t *eersp;
1073 
1074 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1075 	eersp = &eep->ee_rxq_state[label];
1076 
1077 	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1078 
1079 	eersp->eers_rx_read_ptr = 0;
1080 	eersp->eers_rx_mask = 0;
1081 }
1082 
1083 #endif	/* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
1084