1f6078949SAndrew Rybchenko /*-
2929c7febSAndrew Rybchenko * Copyright (c) 2012-2016 Solarflare Communications Inc.
3f6078949SAndrew Rybchenko * All rights reserved.
4f6078949SAndrew Rybchenko *
5f6078949SAndrew Rybchenko * Redistribution and use in source and binary forms, with or without
6f6078949SAndrew Rybchenko * modification, are permitted provided that the following conditions are met:
7f6078949SAndrew Rybchenko *
8f6078949SAndrew Rybchenko * 1. Redistributions of source code must retain the above copyright notice,
9f6078949SAndrew Rybchenko * this list of conditions and the following disclaimer.
10f6078949SAndrew Rybchenko * 2. Redistributions in binary form must reproduce the above copyright notice,
11f6078949SAndrew Rybchenko * this list of conditions and the following disclaimer in the documentation
12f6078949SAndrew Rybchenko * and/or other materials provided with the distribution.
13f6078949SAndrew Rybchenko *
14f6078949SAndrew Rybchenko * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15f6078949SAndrew Rybchenko * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16f6078949SAndrew Rybchenko * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17f6078949SAndrew Rybchenko * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18f6078949SAndrew Rybchenko * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19f6078949SAndrew Rybchenko * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20f6078949SAndrew Rybchenko * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21f6078949SAndrew Rybchenko * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22f6078949SAndrew Rybchenko * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23f6078949SAndrew Rybchenko * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24f6078949SAndrew Rybchenko * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25f6078949SAndrew Rybchenko *
26f6078949SAndrew Rybchenko * The views and conclusions contained in the software and documentation are
27f6078949SAndrew Rybchenko * those of the authors and should not be interpreted as representing official
28f6078949SAndrew Rybchenko * policies, either expressed or implied, of the FreeBSD Project.
29f6078949SAndrew Rybchenko */
30f6078949SAndrew Rybchenko
31f6078949SAndrew Rybchenko #include <sys/cdefs.h>
32f6078949SAndrew Rybchenko #include "efx.h"
33f6078949SAndrew Rybchenko #include "efx_impl.h"
34f6078949SAndrew Rybchenko #if EFSYS_OPT_MON_STATS
35f6078949SAndrew Rybchenko #include "mcdi_mon.h"
36f6078949SAndrew Rybchenko #endif
37f6078949SAndrew Rybchenko
385095efd4SAndrew Rybchenko #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
39f6078949SAndrew Rybchenko
40f6078949SAndrew Rybchenko #if EFSYS_OPT_QSTATS
41f6078949SAndrew Rybchenko #define EFX_EV_QSTAT_INCR(_eep, _stat) \
42f6078949SAndrew Rybchenko do { \
43f6078949SAndrew Rybchenko (_eep)->ee_stat[_stat]++; \
44f6078949SAndrew Rybchenko _NOTE(CONSTANTCONDITION) \
45f6078949SAndrew Rybchenko } while (B_FALSE)
46f6078949SAndrew Rybchenko #else
47f6078949SAndrew Rybchenko #define EFX_EV_QSTAT_INCR(_eep, _stat)
48f6078949SAndrew Rybchenko #endif
49f6078949SAndrew Rybchenko
5082d2a148SAndrew Rybchenko /*
5182d2a148SAndrew Rybchenko * Non-interrupting event queue requires interrrupting event queue to
5282d2a148SAndrew Rybchenko * refer to for wake-up events even if wake ups are never used.
5382d2a148SAndrew Rybchenko * It could be even non-allocated event queue.
5482d2a148SAndrew Rybchenko */
5582d2a148SAndrew Rybchenko #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
56f6078949SAndrew Rybchenko
57f6078949SAndrew Rybchenko static __checkReturn boolean_t
58f6078949SAndrew Rybchenko ef10_ev_rx(
59f6078949SAndrew Rybchenko __in efx_evq_t *eep,
60f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
61f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
62f6078949SAndrew Rybchenko __in_opt void *arg);
63f6078949SAndrew Rybchenko
64f6078949SAndrew Rybchenko static __checkReturn boolean_t
65f6078949SAndrew Rybchenko ef10_ev_tx(
66f6078949SAndrew Rybchenko __in efx_evq_t *eep,
67f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
68f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
69f6078949SAndrew Rybchenko __in_opt void *arg);
70f6078949SAndrew Rybchenko
71f6078949SAndrew Rybchenko static __checkReturn boolean_t
72f6078949SAndrew Rybchenko ef10_ev_driver(
73f6078949SAndrew Rybchenko __in efx_evq_t *eep,
74f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
75f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
76f6078949SAndrew Rybchenko __in_opt void *arg);
77f6078949SAndrew Rybchenko
78f6078949SAndrew Rybchenko static __checkReturn boolean_t
79f6078949SAndrew Rybchenko ef10_ev_drv_gen(
80f6078949SAndrew Rybchenko __in efx_evq_t *eep,
81f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
82f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
83f6078949SAndrew Rybchenko __in_opt void *arg);
84f6078949SAndrew Rybchenko
85f6078949SAndrew Rybchenko static __checkReturn boolean_t
86f6078949SAndrew Rybchenko ef10_ev_mcdi(
87f6078949SAndrew Rybchenko __in efx_evq_t *eep,
88f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
89f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
90f6078949SAndrew Rybchenko __in_opt void *arg);
91f6078949SAndrew Rybchenko
92f6078949SAndrew Rybchenko static __checkReturn efx_rc_t
efx_mcdi_set_evq_tmr(__in efx_nic_t * enp,__in uint32_t instance,__in uint32_t mode,__in uint32_t timer_ns)93e26f5dacSAndrew Rybchenko efx_mcdi_set_evq_tmr(
94e26f5dacSAndrew Rybchenko __in efx_nic_t *enp,
95e26f5dacSAndrew Rybchenko __in uint32_t instance,
96e26f5dacSAndrew Rybchenko __in uint32_t mode,
97e26f5dacSAndrew Rybchenko __in uint32_t timer_ns)
98e26f5dacSAndrew Rybchenko {
99e26f5dacSAndrew Rybchenko efx_mcdi_req_t req;
100315bbbaaSAndrew Rybchenko EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
101315bbbaaSAndrew Rybchenko MC_CMD_SET_EVQ_TMR_OUT_LEN);
102e26f5dacSAndrew Rybchenko efx_rc_t rc;
103e26f5dacSAndrew Rybchenko
104e26f5dacSAndrew Rybchenko req.emr_cmd = MC_CMD_SET_EVQ_TMR;
105e26f5dacSAndrew Rybchenko req.emr_in_buf = payload;
106e26f5dacSAndrew Rybchenko req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
107e26f5dacSAndrew Rybchenko req.emr_out_buf = payload;
108e26f5dacSAndrew Rybchenko req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
109e26f5dacSAndrew Rybchenko
110e26f5dacSAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
111e26f5dacSAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
112e26f5dacSAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
113e26f5dacSAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
114e26f5dacSAndrew Rybchenko
115e26f5dacSAndrew Rybchenko efx_mcdi_execute(enp, &req);
116e26f5dacSAndrew Rybchenko
117e26f5dacSAndrew Rybchenko if (req.emr_rc != 0) {
118e26f5dacSAndrew Rybchenko rc = req.emr_rc;
119e26f5dacSAndrew Rybchenko goto fail1;
120e26f5dacSAndrew Rybchenko }
121e26f5dacSAndrew Rybchenko
122e26f5dacSAndrew Rybchenko if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
123e26f5dacSAndrew Rybchenko rc = EMSGSIZE;
124e26f5dacSAndrew Rybchenko goto fail2;
125e26f5dacSAndrew Rybchenko }
126e26f5dacSAndrew Rybchenko
127e26f5dacSAndrew Rybchenko return (0);
128e26f5dacSAndrew Rybchenko
129e26f5dacSAndrew Rybchenko fail2:
130e26f5dacSAndrew Rybchenko EFSYS_PROBE(fail2);
131e26f5dacSAndrew Rybchenko fail1:
132e26f5dacSAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
133e26f5dacSAndrew Rybchenko
134e26f5dacSAndrew Rybchenko return (rc);
135e26f5dacSAndrew Rybchenko }
136e26f5dacSAndrew Rybchenko
137e26f5dacSAndrew Rybchenko static __checkReturn efx_rc_t
efx_mcdi_init_evq(__in efx_nic_t * enp,__in unsigned int instance,__in efsys_mem_t * esmp,__in size_t nevs,__in uint32_t irq,__in uint32_t us,__in uint32_t flags,__in boolean_t low_latency)138f6078949SAndrew Rybchenko efx_mcdi_init_evq(
139f6078949SAndrew Rybchenko __in efx_nic_t *enp,
140f6078949SAndrew Rybchenko __in unsigned int instance,
141f6078949SAndrew Rybchenko __in efsys_mem_t *esmp,
142f6078949SAndrew Rybchenko __in size_t nevs,
1435c4c3d92SAndrew Rybchenko __in uint32_t irq,
144995a3bf4SAndrew Rybchenko __in uint32_t us,
145a3fe009aSAndrew Rybchenko __in uint32_t flags,
146995a3bf4SAndrew Rybchenko __in boolean_t low_latency)
147f6078949SAndrew Rybchenko {
148f6078949SAndrew Rybchenko efx_mcdi_req_t req;
149315bbbaaSAndrew Rybchenko EFX_MCDI_DECLARE_BUF(payload,
150315bbbaaSAndrew Rybchenko MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
151315bbbaaSAndrew Rybchenko MC_CMD_INIT_EVQ_OUT_LEN);
152f6078949SAndrew Rybchenko efx_qword_t *dma_addr;
153f6078949SAndrew Rybchenko uint64_t addr;
154f6078949SAndrew Rybchenko int npages;
155f6078949SAndrew Rybchenko int i;
15682d2a148SAndrew Rybchenko boolean_t interrupting;
157995a3bf4SAndrew Rybchenko int ev_cut_through;
158f6078949SAndrew Rybchenko efx_rc_t rc;
159f6078949SAndrew Rybchenko
160f6078949SAndrew Rybchenko npages = EFX_EVQ_NBUFS(nevs);
161f6078949SAndrew Rybchenko if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
162f6078949SAndrew Rybchenko rc = EINVAL;
163f6078949SAndrew Rybchenko goto fail1;
164f6078949SAndrew Rybchenko }
165f6078949SAndrew Rybchenko
166f6078949SAndrew Rybchenko req.emr_cmd = MC_CMD_INIT_EVQ;
167f6078949SAndrew Rybchenko req.emr_in_buf = payload;
168f6078949SAndrew Rybchenko req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
169f6078949SAndrew Rybchenko req.emr_out_buf = payload;
170f6078949SAndrew Rybchenko req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
171f6078949SAndrew Rybchenko
172f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
173f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
174f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
175f6078949SAndrew Rybchenko
17682d2a148SAndrew Rybchenko interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
17782d2a148SAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
17882d2a148SAndrew Rybchenko
179f6078949SAndrew Rybchenko /*
180995a3bf4SAndrew Rybchenko * On Huntington RX and TX event batching can only be requested together
181995a3bf4SAndrew Rybchenko * (even if the datapath firmware doesn't actually support RX
182995a3bf4SAndrew Rybchenko * batching). If event cut through is enabled no RX batching will occur.
183f6078949SAndrew Rybchenko *
184995a3bf4SAndrew Rybchenko * So always enable RX and TX event batching, and enable event cut
185995a3bf4SAndrew Rybchenko * through if we want low latency operation.
186f6078949SAndrew Rybchenko */
187a3fe009aSAndrew Rybchenko switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
188a3fe009aSAndrew Rybchenko case EFX_EVQ_FLAGS_TYPE_AUTO:
189995a3bf4SAndrew Rybchenko ev_cut_through = low_latency ? 1 : 0;
190a3fe009aSAndrew Rybchenko break;
191a3fe009aSAndrew Rybchenko case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
192a3fe009aSAndrew Rybchenko ev_cut_through = 0;
193a3fe009aSAndrew Rybchenko break;
194a3fe009aSAndrew Rybchenko case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
195a3fe009aSAndrew Rybchenko ev_cut_through = 1;
196a3fe009aSAndrew Rybchenko break;
197a3fe009aSAndrew Rybchenko default:
198a3fe009aSAndrew Rybchenko rc = EINVAL;
199a3fe009aSAndrew Rybchenko goto fail2;
200a3fe009aSAndrew Rybchenko }
201f6078949SAndrew Rybchenko MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
20282d2a148SAndrew Rybchenko INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
203f6078949SAndrew Rybchenko INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
204f6078949SAndrew Rybchenko INIT_EVQ_IN_FLAG_INT_ARMD, 0,
205995a3bf4SAndrew Rybchenko INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
206f6078949SAndrew Rybchenko INIT_EVQ_IN_FLAG_RX_MERGE, 1,
207f6078949SAndrew Rybchenko INIT_EVQ_IN_FLAG_TX_MERGE, 1);
208f6078949SAndrew Rybchenko
209b839ed60SAndrew Rybchenko /* If the value is zero then disable the timer */
2105c4c3d92SAndrew Rybchenko if (us == 0) {
211f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
212f6078949SAndrew Rybchenko MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
213f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
214f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
2155c4c3d92SAndrew Rybchenko } else {
216b839ed60SAndrew Rybchenko unsigned int ticks;
2175c4c3d92SAndrew Rybchenko
218b839ed60SAndrew Rybchenko if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
219a3fe009aSAndrew Rybchenko goto fail3;
2205c4c3d92SAndrew Rybchenko
2215c4c3d92SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
2225c4c3d92SAndrew Rybchenko MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
223b839ed60SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
224b839ed60SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
2255c4c3d92SAndrew Rybchenko }
226f6078949SAndrew Rybchenko
227f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
228f6078949SAndrew Rybchenko MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
229f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
230f6078949SAndrew Rybchenko
231f6078949SAndrew Rybchenko dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
232f6078949SAndrew Rybchenko addr = EFSYS_MEM_ADDR(esmp);
233f6078949SAndrew Rybchenko
234f6078949SAndrew Rybchenko for (i = 0; i < npages; i++) {
235f6078949SAndrew Rybchenko EFX_POPULATE_QWORD_2(*dma_addr,
236f6078949SAndrew Rybchenko EFX_DWORD_1, (uint32_t)(addr >> 32),
237f6078949SAndrew Rybchenko EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
238f6078949SAndrew Rybchenko
239f6078949SAndrew Rybchenko dma_addr++;
240f6078949SAndrew Rybchenko addr += EFX_BUF_SIZE;
241f6078949SAndrew Rybchenko }
242f6078949SAndrew Rybchenko
243f6078949SAndrew Rybchenko efx_mcdi_execute(enp, &req);
244f6078949SAndrew Rybchenko
245f6078949SAndrew Rybchenko if (req.emr_rc != 0) {
246f6078949SAndrew Rybchenko rc = req.emr_rc;
247a3fe009aSAndrew Rybchenko goto fail4;
248f6078949SAndrew Rybchenko }
249f6078949SAndrew Rybchenko
250f6078949SAndrew Rybchenko if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
251f6078949SAndrew Rybchenko rc = EMSGSIZE;
252a3fe009aSAndrew Rybchenko goto fail5;
253f6078949SAndrew Rybchenko }
254f6078949SAndrew Rybchenko
255ca2eff65SAndrew Rybchenko /* NOTE: ignore the returned IRQ param as firmware does not set it. */
256f6078949SAndrew Rybchenko
257f6078949SAndrew Rybchenko return (0);
258f6078949SAndrew Rybchenko
259a3fe009aSAndrew Rybchenko fail5:
260a3fe009aSAndrew Rybchenko EFSYS_PROBE(fail5);
261b839ed60SAndrew Rybchenko fail4:
262b839ed60SAndrew Rybchenko EFSYS_PROBE(fail4);
263f6078949SAndrew Rybchenko fail3:
264f6078949SAndrew Rybchenko EFSYS_PROBE(fail3);
265f6078949SAndrew Rybchenko fail2:
266f6078949SAndrew Rybchenko EFSYS_PROBE(fail2);
267f6078949SAndrew Rybchenko fail1:
268f6078949SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
269f6078949SAndrew Rybchenko
270f6078949SAndrew Rybchenko return (rc);
271f6078949SAndrew Rybchenko }
272f6078949SAndrew Rybchenko
273995a3bf4SAndrew Rybchenko static __checkReturn efx_rc_t
efx_mcdi_init_evq_v2(__in efx_nic_t * enp,__in unsigned int instance,__in efsys_mem_t * esmp,__in size_t nevs,__in uint32_t irq,__in uint32_t us,__in uint32_t flags)274995a3bf4SAndrew Rybchenko efx_mcdi_init_evq_v2(
275995a3bf4SAndrew Rybchenko __in efx_nic_t *enp,
276995a3bf4SAndrew Rybchenko __in unsigned int instance,
277995a3bf4SAndrew Rybchenko __in efsys_mem_t *esmp,
278995a3bf4SAndrew Rybchenko __in size_t nevs,
279995a3bf4SAndrew Rybchenko __in uint32_t irq,
280a3fe009aSAndrew Rybchenko __in uint32_t us,
281a3fe009aSAndrew Rybchenko __in uint32_t flags)
282995a3bf4SAndrew Rybchenko {
283995a3bf4SAndrew Rybchenko efx_mcdi_req_t req;
284315bbbaaSAndrew Rybchenko EFX_MCDI_DECLARE_BUF(payload,
285315bbbaaSAndrew Rybchenko MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
286315bbbaaSAndrew Rybchenko MC_CMD_INIT_EVQ_V2_OUT_LEN);
28782d2a148SAndrew Rybchenko boolean_t interrupting;
288a3fe009aSAndrew Rybchenko unsigned int evq_type;
289995a3bf4SAndrew Rybchenko efx_qword_t *dma_addr;
290995a3bf4SAndrew Rybchenko uint64_t addr;
291995a3bf4SAndrew Rybchenko int npages;
292995a3bf4SAndrew Rybchenko int i;
293995a3bf4SAndrew Rybchenko efx_rc_t rc;
294995a3bf4SAndrew Rybchenko
295995a3bf4SAndrew Rybchenko npages = EFX_EVQ_NBUFS(nevs);
296995a3bf4SAndrew Rybchenko if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
297995a3bf4SAndrew Rybchenko rc = EINVAL;
298995a3bf4SAndrew Rybchenko goto fail1;
299995a3bf4SAndrew Rybchenko }
300995a3bf4SAndrew Rybchenko
301995a3bf4SAndrew Rybchenko req.emr_cmd = MC_CMD_INIT_EVQ;
302995a3bf4SAndrew Rybchenko req.emr_in_buf = payload;
303995a3bf4SAndrew Rybchenko req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
304995a3bf4SAndrew Rybchenko req.emr_out_buf = payload;
305995a3bf4SAndrew Rybchenko req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
306995a3bf4SAndrew Rybchenko
307995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
308995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
309995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
310995a3bf4SAndrew Rybchenko
31182d2a148SAndrew Rybchenko interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
31282d2a148SAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
31382d2a148SAndrew Rybchenko
314a3fe009aSAndrew Rybchenko switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
315a3fe009aSAndrew Rybchenko case EFX_EVQ_FLAGS_TYPE_AUTO:
316a3fe009aSAndrew Rybchenko evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
317a3fe009aSAndrew Rybchenko break;
318a3fe009aSAndrew Rybchenko case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
319a3fe009aSAndrew Rybchenko evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
320a3fe009aSAndrew Rybchenko break;
321a3fe009aSAndrew Rybchenko case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
322a3fe009aSAndrew Rybchenko evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
323a3fe009aSAndrew Rybchenko break;
324a3fe009aSAndrew Rybchenko default:
325a3fe009aSAndrew Rybchenko rc = EINVAL;
326a3fe009aSAndrew Rybchenko goto fail2;
327a3fe009aSAndrew Rybchenko }
328995a3bf4SAndrew Rybchenko MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
32982d2a148SAndrew Rybchenko INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
330995a3bf4SAndrew Rybchenko INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
331995a3bf4SAndrew Rybchenko INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
332a3fe009aSAndrew Rybchenko INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
333995a3bf4SAndrew Rybchenko
334995a3bf4SAndrew Rybchenko /* If the value is zero then disable the timer */
335995a3bf4SAndrew Rybchenko if (us == 0) {
336995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
337995a3bf4SAndrew Rybchenko MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
338995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
339995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
340995a3bf4SAndrew Rybchenko } else {
341995a3bf4SAndrew Rybchenko unsigned int ticks;
342995a3bf4SAndrew Rybchenko
343995a3bf4SAndrew Rybchenko if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
344a3fe009aSAndrew Rybchenko goto fail3;
345995a3bf4SAndrew Rybchenko
346995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
347995a3bf4SAndrew Rybchenko MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
348995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
349995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
350995a3bf4SAndrew Rybchenko }
351995a3bf4SAndrew Rybchenko
352995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
353995a3bf4SAndrew Rybchenko MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
354995a3bf4SAndrew Rybchenko MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
355995a3bf4SAndrew Rybchenko
356995a3bf4SAndrew Rybchenko dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
357995a3bf4SAndrew Rybchenko addr = EFSYS_MEM_ADDR(esmp);
358995a3bf4SAndrew Rybchenko
359995a3bf4SAndrew Rybchenko for (i = 0; i < npages; i++) {
360995a3bf4SAndrew Rybchenko EFX_POPULATE_QWORD_2(*dma_addr,
361995a3bf4SAndrew Rybchenko EFX_DWORD_1, (uint32_t)(addr >> 32),
362995a3bf4SAndrew Rybchenko EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
363995a3bf4SAndrew Rybchenko
364995a3bf4SAndrew Rybchenko dma_addr++;
365995a3bf4SAndrew Rybchenko addr += EFX_BUF_SIZE;
366995a3bf4SAndrew Rybchenko }
367995a3bf4SAndrew Rybchenko
368995a3bf4SAndrew Rybchenko efx_mcdi_execute(enp, &req);
369995a3bf4SAndrew Rybchenko
370995a3bf4SAndrew Rybchenko if (req.emr_rc != 0) {
371995a3bf4SAndrew Rybchenko rc = req.emr_rc;
372a3fe009aSAndrew Rybchenko goto fail4;
373995a3bf4SAndrew Rybchenko }
374995a3bf4SAndrew Rybchenko
375995a3bf4SAndrew Rybchenko if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
376995a3bf4SAndrew Rybchenko rc = EMSGSIZE;
377a3fe009aSAndrew Rybchenko goto fail5;
378995a3bf4SAndrew Rybchenko }
379995a3bf4SAndrew Rybchenko
380995a3bf4SAndrew Rybchenko /* NOTE: ignore the returned IRQ param as firmware does not set it. */
381995a3bf4SAndrew Rybchenko
382995a3bf4SAndrew Rybchenko EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
383995a3bf4SAndrew Rybchenko MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
384995a3bf4SAndrew Rybchenko
385995a3bf4SAndrew Rybchenko return (0);
386995a3bf4SAndrew Rybchenko
387a3fe009aSAndrew Rybchenko fail5:
388a3fe009aSAndrew Rybchenko EFSYS_PROBE(fail5);
389995a3bf4SAndrew Rybchenko fail4:
390995a3bf4SAndrew Rybchenko EFSYS_PROBE(fail4);
391995a3bf4SAndrew Rybchenko fail3:
392995a3bf4SAndrew Rybchenko EFSYS_PROBE(fail3);
393995a3bf4SAndrew Rybchenko fail2:
394995a3bf4SAndrew Rybchenko EFSYS_PROBE(fail2);
395995a3bf4SAndrew Rybchenko fail1:
396995a3bf4SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
397995a3bf4SAndrew Rybchenko
398995a3bf4SAndrew Rybchenko return (rc);
399995a3bf4SAndrew Rybchenko }
400995a3bf4SAndrew Rybchenko
401f6078949SAndrew Rybchenko static __checkReturn efx_rc_t
efx_mcdi_fini_evq(__in efx_nic_t * enp,__in uint32_t instance)402f6078949SAndrew Rybchenko efx_mcdi_fini_evq(
403f6078949SAndrew Rybchenko __in efx_nic_t *enp,
404f6078949SAndrew Rybchenko __in uint32_t instance)
405f6078949SAndrew Rybchenko {
406f6078949SAndrew Rybchenko efx_mcdi_req_t req;
407315bbbaaSAndrew Rybchenko EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
408315bbbaaSAndrew Rybchenko MC_CMD_FINI_EVQ_OUT_LEN);
409f6078949SAndrew Rybchenko efx_rc_t rc;
410f6078949SAndrew Rybchenko
411f6078949SAndrew Rybchenko req.emr_cmd = MC_CMD_FINI_EVQ;
412f6078949SAndrew Rybchenko req.emr_in_buf = payload;
413f6078949SAndrew Rybchenko req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
414f6078949SAndrew Rybchenko req.emr_out_buf = payload;
415f6078949SAndrew Rybchenko req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
416f6078949SAndrew Rybchenko
417f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
418f6078949SAndrew Rybchenko
419bba8dcbcSAndrew Rybchenko efx_mcdi_execute_quiet(enp, &req);
420f6078949SAndrew Rybchenko
421f6078949SAndrew Rybchenko if (req.emr_rc != 0) {
422f6078949SAndrew Rybchenko rc = req.emr_rc;
423f6078949SAndrew Rybchenko goto fail1;
424f6078949SAndrew Rybchenko }
425f6078949SAndrew Rybchenko
426f6078949SAndrew Rybchenko return (0);
427f6078949SAndrew Rybchenko
428f6078949SAndrew Rybchenko fail1:
4297e1059e8SAndrew Rybchenko /*
4307e1059e8SAndrew Rybchenko * EALREADY is not an error, but indicates that the MC has rebooted and
4317e1059e8SAndrew Rybchenko * that the EVQ has already been destroyed.
4327e1059e8SAndrew Rybchenko */
4337e1059e8SAndrew Rybchenko if (rc != EALREADY)
434f6078949SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
435f6078949SAndrew Rybchenko
436f6078949SAndrew Rybchenko return (rc);
437f6078949SAndrew Rybchenko }
438f6078949SAndrew Rybchenko
439f6078949SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_init(__in efx_nic_t * enp)440f6078949SAndrew Rybchenko ef10_ev_init(
441f6078949SAndrew Rybchenko __in efx_nic_t *enp)
442f6078949SAndrew Rybchenko {
443f6078949SAndrew Rybchenko _NOTE(ARGUNUSED(enp))
444f6078949SAndrew Rybchenko return (0);
445f6078949SAndrew Rybchenko }
446f6078949SAndrew Rybchenko
447f6078949SAndrew Rybchenko void
ef10_ev_fini(__in efx_nic_t * enp)448f6078949SAndrew Rybchenko ef10_ev_fini(
449f6078949SAndrew Rybchenko __in efx_nic_t *enp)
450f6078949SAndrew Rybchenko {
451f6078949SAndrew Rybchenko _NOTE(ARGUNUSED(enp))
452f6078949SAndrew Rybchenko }
453f6078949SAndrew Rybchenko
454f6078949SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_qcreate(__in efx_nic_t * enp,__in unsigned int index,__in efsys_mem_t * esmp,__in size_t ndescs,__in uint32_t id,__in uint32_t us,__in uint32_t flags,__in efx_evq_t * eep)455f6078949SAndrew Rybchenko ef10_ev_qcreate(
456f6078949SAndrew Rybchenko __in efx_nic_t *enp,
457f6078949SAndrew Rybchenko __in unsigned int index,
458f6078949SAndrew Rybchenko __in efsys_mem_t *esmp,
45969953083SAndrew Rybchenko __in size_t ndescs,
460f6078949SAndrew Rybchenko __in uint32_t id,
4615c4c3d92SAndrew Rybchenko __in uint32_t us,
462a3fe009aSAndrew Rybchenko __in uint32_t flags,
463f6078949SAndrew Rybchenko __in efx_evq_t *eep)
464f6078949SAndrew Rybchenko {
465f6078949SAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
466f6078949SAndrew Rybchenko uint32_t irq;
467f6078949SAndrew Rybchenko efx_rc_t rc;
468f6078949SAndrew Rybchenko
469f6078949SAndrew Rybchenko _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
470f6078949SAndrew Rybchenko EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
471f6078949SAndrew Rybchenko EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
472f6078949SAndrew Rybchenko
47369953083SAndrew Rybchenko if (!ISP2(ndescs) ||
47469953083SAndrew Rybchenko (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
475f6078949SAndrew Rybchenko rc = EINVAL;
476f6078949SAndrew Rybchenko goto fail1;
477f6078949SAndrew Rybchenko }
478f6078949SAndrew Rybchenko
479f6078949SAndrew Rybchenko if (index >= encp->enc_evq_limit) {
480f6078949SAndrew Rybchenko rc = EINVAL;
481f6078949SAndrew Rybchenko goto fail2;
482f6078949SAndrew Rybchenko }
483f6078949SAndrew Rybchenko
4845c4c3d92SAndrew Rybchenko if (us > encp->enc_evq_timer_max_us) {
4855c4c3d92SAndrew Rybchenko rc = EINVAL;
4865c4c3d92SAndrew Rybchenko goto fail3;
4875c4c3d92SAndrew Rybchenko }
4885c4c3d92SAndrew Rybchenko
489f6078949SAndrew Rybchenko /* Set up the handler table */
490f6078949SAndrew Rybchenko eep->ee_rx = ef10_ev_rx;
491f6078949SAndrew Rybchenko eep->ee_tx = ef10_ev_tx;
492f6078949SAndrew Rybchenko eep->ee_driver = ef10_ev_driver;
493f6078949SAndrew Rybchenko eep->ee_drv_gen = ef10_ev_drv_gen;
494f6078949SAndrew Rybchenko eep->ee_mcdi = ef10_ev_mcdi;
495f6078949SAndrew Rybchenko
496ca2eff65SAndrew Rybchenko /* Set up the event queue */
49782d2a148SAndrew Rybchenko /* INIT_EVQ expects function-relative vector number */
49882d2a148SAndrew Rybchenko if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
49982d2a148SAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
50082d2a148SAndrew Rybchenko irq = index;
50182d2a148SAndrew Rybchenko } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
50282d2a148SAndrew Rybchenko irq = index;
50382d2a148SAndrew Rybchenko flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
50482d2a148SAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
50582d2a148SAndrew Rybchenko } else {
50682d2a148SAndrew Rybchenko irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
50782d2a148SAndrew Rybchenko }
508bdb482bfSAndrew Rybchenko
509bdb482bfSAndrew Rybchenko /*
510bdb482bfSAndrew Rybchenko * Interrupts may be raised for events immediately after the queue is
511bdb482bfSAndrew Rybchenko * created. See bug58606.
512bdb482bfSAndrew Rybchenko */
513995a3bf4SAndrew Rybchenko
514995a3bf4SAndrew Rybchenko if (encp->enc_init_evq_v2_supported) {
515995a3bf4SAndrew Rybchenko /*
516995a3bf4SAndrew Rybchenko * On Medford the low latency license is required to enable RX
517a3fe009aSAndrew Rybchenko * and event cut through and to disable RX batching. If event
518a3fe009aSAndrew Rybchenko * queue type in flags is auto, we let the firmware decide the
519a3fe009aSAndrew Rybchenko * settings to use. If the adapter has a low latency license,
520a3fe009aSAndrew Rybchenko * it will choose the best settings for low latency, otherwise
521a3fe009aSAndrew Rybchenko * it will choose the best settings for throughput.
522995a3bf4SAndrew Rybchenko */
52369953083SAndrew Rybchenko rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
52469953083SAndrew Rybchenko flags);
525995a3bf4SAndrew Rybchenko if (rc != 0)
5265c4c3d92SAndrew Rybchenko goto fail4;
527995a3bf4SAndrew Rybchenko } else {
528995a3bf4SAndrew Rybchenko /*
529a3fe009aSAndrew Rybchenko * On Huntington we need to specify the settings to use.
530a3fe009aSAndrew Rybchenko * If event queue type in flags is auto, we favour throughput
531a3fe009aSAndrew Rybchenko * if the adapter is running virtualization supporting firmware
532a3fe009aSAndrew Rybchenko * (i.e. the full featured firmware variant)
533a3fe009aSAndrew Rybchenko * and latency otherwise. The Ethernet Virtual Bridging
534a3fe009aSAndrew Rybchenko * capability is used to make this decision. (Note though that
535a3fe009aSAndrew Rybchenko * the low latency firmware variant is also best for
536a3fe009aSAndrew Rybchenko * throughput and corresponding type should be specified
537a3fe009aSAndrew Rybchenko * to choose it.)
538995a3bf4SAndrew Rybchenko */
539a3fe009aSAndrew Rybchenko boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
54069953083SAndrew Rybchenko rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
541995a3bf4SAndrew Rybchenko low_latency);
542995a3bf4SAndrew Rybchenko if (rc != 0)
543995a3bf4SAndrew Rybchenko goto fail5;
544995a3bf4SAndrew Rybchenko }
545f6078949SAndrew Rybchenko
546f6078949SAndrew Rybchenko return (0);
547f6078949SAndrew Rybchenko
548995a3bf4SAndrew Rybchenko fail5:
549995a3bf4SAndrew Rybchenko EFSYS_PROBE(fail5);
5505c4c3d92SAndrew Rybchenko fail4:
5515c4c3d92SAndrew Rybchenko EFSYS_PROBE(fail4);
552f6078949SAndrew Rybchenko fail3:
553f6078949SAndrew Rybchenko EFSYS_PROBE(fail3);
554f6078949SAndrew Rybchenko fail2:
555f6078949SAndrew Rybchenko EFSYS_PROBE(fail2);
556f6078949SAndrew Rybchenko fail1:
557f6078949SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
558f6078949SAndrew Rybchenko
559f6078949SAndrew Rybchenko return (rc);
560f6078949SAndrew Rybchenko }
561f6078949SAndrew Rybchenko
562f6078949SAndrew Rybchenko void
ef10_ev_qdestroy(__in efx_evq_t * eep)563f6078949SAndrew Rybchenko ef10_ev_qdestroy(
564f6078949SAndrew Rybchenko __in efx_evq_t *eep)
565f6078949SAndrew Rybchenko {
566f6078949SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
567f6078949SAndrew Rybchenko
568f6078949SAndrew Rybchenko EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
5695095efd4SAndrew Rybchenko enp->en_family == EFX_FAMILY_MEDFORD ||
5705095efd4SAndrew Rybchenko enp->en_family == EFX_FAMILY_MEDFORD2);
571f6078949SAndrew Rybchenko
572cc8d2b23SAndrew Rybchenko (void) efx_mcdi_fini_evq(enp, eep->ee_index);
573f6078949SAndrew Rybchenko }
574f6078949SAndrew Rybchenko
575f6078949SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_qprime(__in efx_evq_t * eep,__in unsigned int count)576f6078949SAndrew Rybchenko ef10_ev_qprime(
577f6078949SAndrew Rybchenko __in efx_evq_t *eep,
578f6078949SAndrew Rybchenko __in unsigned int count)
579f6078949SAndrew Rybchenko {
580f6078949SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
581f6078949SAndrew Rybchenko uint32_t rptr;
582f6078949SAndrew Rybchenko efx_dword_t dword;
583f6078949SAndrew Rybchenko
584f6078949SAndrew Rybchenko rptr = count & eep->ee_mask;
585f6078949SAndrew Rybchenko
586f6078949SAndrew Rybchenko if (enp->en_nic_cfg.enc_bug35388_workaround) {
587f6078949SAndrew Rybchenko EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
588f6078949SAndrew Rybchenko (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
589f6078949SAndrew Rybchenko EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
590f6078949SAndrew Rybchenko (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
591f6078949SAndrew Rybchenko
592f6078949SAndrew Rybchenko EFX_POPULATE_DWORD_2(dword,
593f6078949SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR_FLAGS,
594f6078949SAndrew Rybchenko EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
595f6078949SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR,
596f6078949SAndrew Rybchenko (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
597c63c8369SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
598f6078949SAndrew Rybchenko &dword, B_FALSE);
599f6078949SAndrew Rybchenko
600f6078949SAndrew Rybchenko EFX_POPULATE_DWORD_2(dword,
601f6078949SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR_FLAGS,
602f6078949SAndrew Rybchenko EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
603f6078949SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR,
604f6078949SAndrew Rybchenko rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
605c63c8369SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
606f6078949SAndrew Rybchenko &dword, B_FALSE);
607f6078949SAndrew Rybchenko } else {
608f6078949SAndrew Rybchenko EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
609c63c8369SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
610f6078949SAndrew Rybchenko &dword, B_FALSE);
611f6078949SAndrew Rybchenko }
612f6078949SAndrew Rybchenko
613f6078949SAndrew Rybchenko return (0);
614f6078949SAndrew Rybchenko }
615f6078949SAndrew Rybchenko
616f6078949SAndrew Rybchenko static __checkReturn efx_rc_t
efx_mcdi_driver_event(__in efx_nic_t * enp,__in uint32_t evq,__in efx_qword_t data)617f6078949SAndrew Rybchenko efx_mcdi_driver_event(
618f6078949SAndrew Rybchenko __in efx_nic_t *enp,
619f6078949SAndrew Rybchenko __in uint32_t evq,
620f6078949SAndrew Rybchenko __in efx_qword_t data)
621f6078949SAndrew Rybchenko {
622f6078949SAndrew Rybchenko efx_mcdi_req_t req;
623315bbbaaSAndrew Rybchenko EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
624315bbbaaSAndrew Rybchenko MC_CMD_DRIVER_EVENT_OUT_LEN);
625f6078949SAndrew Rybchenko efx_rc_t rc;
626f6078949SAndrew Rybchenko
627f6078949SAndrew Rybchenko req.emr_cmd = MC_CMD_DRIVER_EVENT;
628f6078949SAndrew Rybchenko req.emr_in_buf = payload;
629f6078949SAndrew Rybchenko req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
630f6078949SAndrew Rybchenko req.emr_out_buf = payload;
631f6078949SAndrew Rybchenko req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
632f6078949SAndrew Rybchenko
633f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
634f6078949SAndrew Rybchenko
635f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
636f6078949SAndrew Rybchenko EFX_QWORD_FIELD(data, EFX_DWORD_0));
637f6078949SAndrew Rybchenko MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
638f6078949SAndrew Rybchenko EFX_QWORD_FIELD(data, EFX_DWORD_1));
639f6078949SAndrew Rybchenko
640f6078949SAndrew Rybchenko efx_mcdi_execute(enp, &req);
641f6078949SAndrew Rybchenko
642f6078949SAndrew Rybchenko if (req.emr_rc != 0) {
643f6078949SAndrew Rybchenko rc = req.emr_rc;
644f6078949SAndrew Rybchenko goto fail1;
645f6078949SAndrew Rybchenko }
646f6078949SAndrew Rybchenko
647f6078949SAndrew Rybchenko return (0);
648f6078949SAndrew Rybchenko
649f6078949SAndrew Rybchenko fail1:
650f6078949SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
651f6078949SAndrew Rybchenko
652f6078949SAndrew Rybchenko return (rc);
653f6078949SAndrew Rybchenko }
654f6078949SAndrew Rybchenko
655f6078949SAndrew Rybchenko void
ef10_ev_qpost(__in efx_evq_t * eep,__in uint16_t data)656f6078949SAndrew Rybchenko ef10_ev_qpost(
657f6078949SAndrew Rybchenko __in efx_evq_t *eep,
658f6078949SAndrew Rybchenko __in uint16_t data)
659f6078949SAndrew Rybchenko {
660f6078949SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
661f6078949SAndrew Rybchenko efx_qword_t event;
662f6078949SAndrew Rybchenko
663f6078949SAndrew Rybchenko EFX_POPULATE_QWORD_3(event,
664f6078949SAndrew Rybchenko ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
665f6078949SAndrew Rybchenko ESF_DZ_DRV_SUB_CODE, 0,
666f6078949SAndrew Rybchenko ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
667f6078949SAndrew Rybchenko
668f6078949SAndrew Rybchenko (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
669f6078949SAndrew Rybchenko }
670f6078949SAndrew Rybchenko
671f6078949SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_qmoderate(__in efx_evq_t * eep,__in unsigned int us)672f6078949SAndrew Rybchenko ef10_ev_qmoderate(
673f6078949SAndrew Rybchenko __in efx_evq_t *eep,
674f6078949SAndrew Rybchenko __in unsigned int us)
675f6078949SAndrew Rybchenko {
676f6078949SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
677f6078949SAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
678f6078949SAndrew Rybchenko efx_dword_t dword;
679b839ed60SAndrew Rybchenko uint32_t mode;
680f6078949SAndrew Rybchenko efx_rc_t rc;
681f6078949SAndrew Rybchenko
682e26f5dacSAndrew Rybchenko /* Check that hardware and MCDI use the same timer MODE values */
683e26f5dacSAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
684e26f5dacSAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
685e26f5dacSAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
686e26f5dacSAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
687e26f5dacSAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
688e26f5dacSAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
689e26f5dacSAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
690e26f5dacSAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
691e26f5dacSAndrew Rybchenko
692f6078949SAndrew Rybchenko if (us > encp->enc_evq_timer_max_us) {
693f6078949SAndrew Rybchenko rc = EINVAL;
694f6078949SAndrew Rybchenko goto fail1;
695f6078949SAndrew Rybchenko }
696f6078949SAndrew Rybchenko
697f6078949SAndrew Rybchenko /* If the value is zero then disable the timer */
698f6078949SAndrew Rybchenko if (us == 0) {
699f6078949SAndrew Rybchenko mode = FFE_CZ_TIMER_MODE_DIS;
700f6078949SAndrew Rybchenko } else {
701e26f5dacSAndrew Rybchenko mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
702e26f5dacSAndrew Rybchenko }
703e26f5dacSAndrew Rybchenko
704e26f5dacSAndrew Rybchenko if (encp->enc_bug61265_workaround) {
705b839ed60SAndrew Rybchenko uint32_t ns = us * 1000;
706b839ed60SAndrew Rybchenko
707b839ed60SAndrew Rybchenko rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
708e26f5dacSAndrew Rybchenko if (rc != 0)
709e26f5dacSAndrew Rybchenko goto fail2;
710e26f5dacSAndrew Rybchenko } else {
711b839ed60SAndrew Rybchenko unsigned int ticks;
712f6078949SAndrew Rybchenko
713b839ed60SAndrew Rybchenko if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
714b839ed60SAndrew Rybchenko goto fail3;
715f6078949SAndrew Rybchenko
716f6078949SAndrew Rybchenko if (encp->enc_bug35388_workaround) {
717f6078949SAndrew Rybchenko EFX_POPULATE_DWORD_3(dword,
718f6078949SAndrew Rybchenko ERF_DD_EVQ_IND_TIMER_FLAGS,
719f6078949SAndrew Rybchenko EFE_DD_EVQ_IND_TIMER_FLAGS,
720f6078949SAndrew Rybchenko ERF_DD_EVQ_IND_TIMER_MODE, mode,
721b839ed60SAndrew Rybchenko ERF_DD_EVQ_IND_TIMER_VAL, ticks);
722c63c8369SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
723f6078949SAndrew Rybchenko eep->ee_index, &dword, 0);
724f6078949SAndrew Rybchenko } else {
7252222409bSAndrew Rybchenko /*
7262222409bSAndrew Rybchenko * NOTE: The TMR_REL field introduced in Medford2 is
7272222409bSAndrew Rybchenko * ignored on earlier EF10 controllers. See bug66418
7282222409bSAndrew Rybchenko * comment 9 for details.
7292222409bSAndrew Rybchenko */
7302222409bSAndrew Rybchenko EFX_POPULATE_DWORD_3(dword,
731f6078949SAndrew Rybchenko ERF_DZ_TC_TIMER_MODE, mode,
7322222409bSAndrew Rybchenko ERF_DZ_TC_TIMER_VAL, ticks,
7332222409bSAndrew Rybchenko ERF_FZ_TC_TMR_REL_VAL, ticks);
734c63c8369SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
735f6078949SAndrew Rybchenko eep->ee_index, &dword, 0);
736f6078949SAndrew Rybchenko }
737e26f5dacSAndrew Rybchenko }
738f6078949SAndrew Rybchenko
739f6078949SAndrew Rybchenko return (0);
740f6078949SAndrew Rybchenko
741b839ed60SAndrew Rybchenko fail3:
742b839ed60SAndrew Rybchenko EFSYS_PROBE(fail3);
743e26f5dacSAndrew Rybchenko fail2:
744e26f5dacSAndrew Rybchenko EFSYS_PROBE(fail2);
745f6078949SAndrew Rybchenko fail1:
746f6078949SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
747f6078949SAndrew Rybchenko
748f6078949SAndrew Rybchenko return (rc);
749f6078949SAndrew Rybchenko }
750f6078949SAndrew Rybchenko
751f6078949SAndrew Rybchenko #if EFSYS_OPT_QSTATS
752f6078949SAndrew Rybchenko void
ef10_ev_qstats_update(__in efx_evq_t * eep,__inout_ecount (EV_NQSTATS)efsys_stat_t * stat)753f6078949SAndrew Rybchenko ef10_ev_qstats_update(
754f6078949SAndrew Rybchenko __in efx_evq_t *eep,
755f6078949SAndrew Rybchenko __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
756f6078949SAndrew Rybchenko {
757f6078949SAndrew Rybchenko unsigned int id;
758f6078949SAndrew Rybchenko
759f6078949SAndrew Rybchenko for (id = 0; id < EV_NQSTATS; id++) {
760f6078949SAndrew Rybchenko efsys_stat_t *essp = &stat[id];
761f6078949SAndrew Rybchenko
762f6078949SAndrew Rybchenko EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
763f6078949SAndrew Rybchenko eep->ee_stat[id] = 0;
764f6078949SAndrew Rybchenko }
765f6078949SAndrew Rybchenko }
766f6078949SAndrew Rybchenko #endif /* EFSYS_OPT_QSTATS */
767f6078949SAndrew Rybchenko
76804381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
7698e0c4827SAndrew Rybchenko
7708e0c4827SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_rx_packed_stream(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)7718e0c4827SAndrew Rybchenko ef10_ev_rx_packed_stream(
7728e0c4827SAndrew Rybchenko __in efx_evq_t *eep,
7738e0c4827SAndrew Rybchenko __in efx_qword_t *eqp,
7748e0c4827SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
7758e0c4827SAndrew Rybchenko __in_opt void *arg)
7768e0c4827SAndrew Rybchenko {
7778e0c4827SAndrew Rybchenko uint32_t label;
7788e0c4827SAndrew Rybchenko uint32_t pkt_count_lbits;
7798e0c4827SAndrew Rybchenko uint16_t flags;
7808e0c4827SAndrew Rybchenko boolean_t should_abort;
7818e0c4827SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
7828e0c4827SAndrew Rybchenko unsigned int pkt_count;
7838e0c4827SAndrew Rybchenko unsigned int current_id;
7848e0c4827SAndrew Rybchenko boolean_t new_buffer;
7858e0c4827SAndrew Rybchenko
7868e0c4827SAndrew Rybchenko pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
7878e0c4827SAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
7888e0c4827SAndrew Rybchenko new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
7898e0c4827SAndrew Rybchenko
7908e0c4827SAndrew Rybchenko flags = 0;
7918e0c4827SAndrew Rybchenko
7928e0c4827SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
7938e0c4827SAndrew Rybchenko
7948e0c4827SAndrew Rybchenko /*
7958e0c4827SAndrew Rybchenko * RX_DSC_PTR_LBITS has least significant bits of the global
7968e0c4827SAndrew Rybchenko * (not per-buffer) packet counter. It is guaranteed that
7978e0c4827SAndrew Rybchenko * maximum number of completed packets fits in lbits-mask.
7988e0c4827SAndrew Rybchenko * So, modulo lbits-mask arithmetic should be used to calculate
7998e0c4827SAndrew Rybchenko * packet counter increment.
8008e0c4827SAndrew Rybchenko */
8018e0c4827SAndrew Rybchenko pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
8028e0c4827SAndrew Rybchenko EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
8038e0c4827SAndrew Rybchenko eersp->eers_rx_stream_npackets += pkt_count;
8048e0c4827SAndrew Rybchenko
8058e0c4827SAndrew Rybchenko if (new_buffer) {
8068e0c4827SAndrew Rybchenko flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
80704381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
80804381b5eSAndrew Rybchenko /*
80904381b5eSAndrew Rybchenko * If both packed stream and equal stride super-buffer
81004381b5eSAndrew Rybchenko * modes are compiled in, in theory credits should be
81104381b5eSAndrew Rybchenko * be maintained for packed stream only, but right now
81204381b5eSAndrew Rybchenko * these modes are not distinguished in the event queue
81304381b5eSAndrew Rybchenko * Rx queue state and it is OK to increment the counter
81404381b5eSAndrew Rybchenko * regardless (it might be event cheaper than branching
81504381b5eSAndrew Rybchenko * since neighbour structure member are updated as well).
81604381b5eSAndrew Rybchenko */
8178e0c4827SAndrew Rybchenko eersp->eers_rx_packed_stream_credits++;
81804381b5eSAndrew Rybchenko #endif
8198e0c4827SAndrew Rybchenko eersp->eers_rx_read_ptr++;
8208e0c4827SAndrew Rybchenko }
8218e0c4827SAndrew Rybchenko current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
8228e0c4827SAndrew Rybchenko
8238e0c4827SAndrew Rybchenko /* Check for errors that invalidate checksum and L3/L4 fields */
8248bff5a20SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
8258bff5a20SAndrew Rybchenko /* RX frame truncated */
8268e0c4827SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
8278e0c4827SAndrew Rybchenko flags |= EFX_DISCARD;
8288e0c4827SAndrew Rybchenko goto deliver;
8298e0c4827SAndrew Rybchenko }
8308e0c4827SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
8318e0c4827SAndrew Rybchenko /* Bad Ethernet frame CRC */
8328e0c4827SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
8338e0c4827SAndrew Rybchenko flags |= EFX_DISCARD;
8348e0c4827SAndrew Rybchenko goto deliver;
8358e0c4827SAndrew Rybchenko }
8368e0c4827SAndrew Rybchenko
8378e0c4827SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
8388e0c4827SAndrew Rybchenko flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
8398e0c4827SAndrew Rybchenko goto deliver;
8408e0c4827SAndrew Rybchenko }
8418e0c4827SAndrew Rybchenko
8428e0c4827SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
8438e0c4827SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
8448e0c4827SAndrew Rybchenko
8458e0c4827SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
8468e0c4827SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
8478e0c4827SAndrew Rybchenko
8488e0c4827SAndrew Rybchenko deliver:
8498e0c4827SAndrew Rybchenko /* If we're not discarding the packet then it is ok */
8508e0c4827SAndrew Rybchenko if (~flags & EFX_DISCARD)
8518e0c4827SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
8528e0c4827SAndrew Rybchenko
8538e0c4827SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
8548e0c4827SAndrew Rybchenko should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
8558e0c4827SAndrew Rybchenko flags);
8568e0c4827SAndrew Rybchenko
8578e0c4827SAndrew Rybchenko return (should_abort);
8588e0c4827SAndrew Rybchenko }
8598e0c4827SAndrew Rybchenko
86004381b5eSAndrew Rybchenko #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
861f6078949SAndrew Rybchenko
862f6078949SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_rx(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)863f6078949SAndrew Rybchenko ef10_ev_rx(
864f6078949SAndrew Rybchenko __in efx_evq_t *eep,
865f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
866f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
867f6078949SAndrew Rybchenko __in_opt void *arg)
868f6078949SAndrew Rybchenko {
869f6078949SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
870f6078949SAndrew Rybchenko uint32_t size;
871f6078949SAndrew Rybchenko uint32_t label;
872f6078949SAndrew Rybchenko uint32_t mac_class;
873f6078949SAndrew Rybchenko uint32_t eth_tag_class;
874f6078949SAndrew Rybchenko uint32_t l3_class;
875f6078949SAndrew Rybchenko uint32_t l4_class;
876f6078949SAndrew Rybchenko uint32_t next_read_lbits;
877f6078949SAndrew Rybchenko uint16_t flags;
878f6078949SAndrew Rybchenko boolean_t cont;
879f6078949SAndrew Rybchenko boolean_t should_abort;
880f6078949SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
881f6078949SAndrew Rybchenko unsigned int desc_count;
882f6078949SAndrew Rybchenko unsigned int last_used_id;
883f6078949SAndrew Rybchenko
884f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX);
885f6078949SAndrew Rybchenko
886*c6d5e85dSAndrew Rybchenko /* Discard events after RXQ/TXQ errors, or hardware not available */
887*c6d5e85dSAndrew Rybchenko if (enp->en_reset_flags &
888*c6d5e85dSAndrew Rybchenko (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
889f6078949SAndrew Rybchenko return (B_FALSE);
890f6078949SAndrew Rybchenko
891f6078949SAndrew Rybchenko /* Basic packet information */
8928e0c4827SAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
8938e0c4827SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
8948e0c4827SAndrew Rybchenko
89504381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
8968e0c4827SAndrew Rybchenko /*
8978e0c4827SAndrew Rybchenko * Packed stream events are very different,
8988e0c4827SAndrew Rybchenko * so handle them separately
8998e0c4827SAndrew Rybchenko */
9008e0c4827SAndrew Rybchenko if (eersp->eers_rx_packed_stream)
9018e0c4827SAndrew Rybchenko return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
9028e0c4827SAndrew Rybchenko #endif
9038e0c4827SAndrew Rybchenko
904f6078949SAndrew Rybchenko size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
905f2f14997SAndrew Rybchenko cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
906f6078949SAndrew Rybchenko next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
907f6078949SAndrew Rybchenko eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
908f6078949SAndrew Rybchenko mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
909f6078949SAndrew Rybchenko l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
910f2f14997SAndrew Rybchenko
911f2f14997SAndrew Rybchenko /*
912f2f14997SAndrew Rybchenko * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
913f2f14997SAndrew Rybchenko * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
914f2f14997SAndrew Rybchenko * and values for all EF10 controllers.
915f2f14997SAndrew Rybchenko */
916f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
917f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
918f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
919f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
920f2f14997SAndrew Rybchenko
921f2f14997SAndrew Rybchenko l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
922f6078949SAndrew Rybchenko
923f6078949SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
924f6078949SAndrew Rybchenko /* Drop this event */
925f6078949SAndrew Rybchenko return (B_FALSE);
926f6078949SAndrew Rybchenko }
927f6078949SAndrew Rybchenko flags = 0;
928f6078949SAndrew Rybchenko
929f6078949SAndrew Rybchenko if (cont != 0) {
930f6078949SAndrew Rybchenko /*
931f6078949SAndrew Rybchenko * This may be part of a scattered frame, or it may be a
932f6078949SAndrew Rybchenko * truncated frame if scatter is disabled on this RXQ.
933f6078949SAndrew Rybchenko * Overlength frames can be received if e.g. a VF is configured
934f6078949SAndrew Rybchenko * for 1500 MTU but connected to a port set to 9000 MTU
935f6078949SAndrew Rybchenko * (see bug56567).
936f6078949SAndrew Rybchenko * FIXME: There is not yet any driver that supports scatter on
937f6078949SAndrew Rybchenko * Huntington. Scatter support is required for OSX.
938f6078949SAndrew Rybchenko */
939f6078949SAndrew Rybchenko flags |= EFX_PKT_CONT;
940f6078949SAndrew Rybchenko }
941f6078949SAndrew Rybchenko
942f6078949SAndrew Rybchenko if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
943f6078949SAndrew Rybchenko flags |= EFX_PKT_UNICAST;
944f6078949SAndrew Rybchenko
945f6078949SAndrew Rybchenko /* Increment the count of descriptors read */
946f6078949SAndrew Rybchenko desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
947f6078949SAndrew Rybchenko EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
948f6078949SAndrew Rybchenko eersp->eers_rx_read_ptr += desc_count;
949f6078949SAndrew Rybchenko
950f6078949SAndrew Rybchenko /*
951f6078949SAndrew Rybchenko * FIXME: add error checking to make sure this a batched event.
952f6078949SAndrew Rybchenko * This could also be an aborted scatter, see Bug36629.
953f6078949SAndrew Rybchenko */
954f6078949SAndrew Rybchenko if (desc_count > 1) {
955f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
956f6078949SAndrew Rybchenko flags |= EFX_PKT_PREFIX_LEN;
957f6078949SAndrew Rybchenko }
958f6078949SAndrew Rybchenko
959cef367e6SEitan Adler /* Calculate the index of the last descriptor consumed */
960f6078949SAndrew Rybchenko last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
961f6078949SAndrew Rybchenko
962f6078949SAndrew Rybchenko /* Check for errors that invalidate checksum and L3/L4 fields */
9638bff5a20SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
9648bff5a20SAndrew Rybchenko /* RX frame truncated */
965f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
966f6078949SAndrew Rybchenko flags |= EFX_DISCARD;
967f6078949SAndrew Rybchenko goto deliver;
968f6078949SAndrew Rybchenko }
969f6078949SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
970f6078949SAndrew Rybchenko /* Bad Ethernet frame CRC */
971f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
972f6078949SAndrew Rybchenko flags |= EFX_DISCARD;
973f6078949SAndrew Rybchenko goto deliver;
974f6078949SAndrew Rybchenko }
975f6078949SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
976f6078949SAndrew Rybchenko /*
977f6078949SAndrew Rybchenko * Hardware parse failed, due to malformed headers
978f6078949SAndrew Rybchenko * or headers that are too long for the parser.
979f6078949SAndrew Rybchenko * Headers and checksums must be validated by the host.
980f6078949SAndrew Rybchenko */
98175b16fa0SAndrew Rybchenko /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
982f6078949SAndrew Rybchenko goto deliver;
983f6078949SAndrew Rybchenko }
984f6078949SAndrew Rybchenko
985f6078949SAndrew Rybchenko if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
986f6078949SAndrew Rybchenko (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
987f6078949SAndrew Rybchenko flags |= EFX_PKT_VLAN_TAGGED;
988f6078949SAndrew Rybchenko }
989f6078949SAndrew Rybchenko
990f6078949SAndrew Rybchenko switch (l3_class) {
991f6078949SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP4:
992f6078949SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP4_FRAG:
993f6078949SAndrew Rybchenko flags |= EFX_PKT_IPV4;
994f6078949SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
995f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
996f6078949SAndrew Rybchenko } else {
997f6078949SAndrew Rybchenko flags |= EFX_CKSUM_IPV4;
998f6078949SAndrew Rybchenko }
999f6078949SAndrew Rybchenko
1000f2f14997SAndrew Rybchenko /*
1001f2f14997SAndrew Rybchenko * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1002f2f14997SAndrew Rybchenko * only 2 bits wide on Medford2. Check it is safe to use the
1003f2f14997SAndrew Rybchenko * Medford2 field and values for all EF10 controllers.
1004f2f14997SAndrew Rybchenko */
1005f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1006f2f14997SAndrew Rybchenko ESF_DE_RX_L4_CLASS_LBN);
1007f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1008f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1009f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1010f2f14997SAndrew Rybchenko ESE_DE_L4_CLASS_UNKNOWN);
1011f2f14997SAndrew Rybchenko
1012f2f14997SAndrew Rybchenko if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1013f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
1014f6078949SAndrew Rybchenko flags |= EFX_PKT_TCP;
1015f2f14997SAndrew Rybchenko } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1016f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
1017f6078949SAndrew Rybchenko flags |= EFX_PKT_UDP;
1018f6078949SAndrew Rybchenko } else {
1019f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
1020f6078949SAndrew Rybchenko }
1021f6078949SAndrew Rybchenko break;
1022f6078949SAndrew Rybchenko
1023f6078949SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP6:
1024f6078949SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP6_FRAG:
1025f6078949SAndrew Rybchenko flags |= EFX_PKT_IPV6;
1026f6078949SAndrew Rybchenko
1027f2f14997SAndrew Rybchenko /*
1028f2f14997SAndrew Rybchenko * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1029f2f14997SAndrew Rybchenko * only 2 bits wide on Medford2. Check it is safe to use the
1030f2f14997SAndrew Rybchenko * Medford2 field and values for all EF10 controllers.
1031f2f14997SAndrew Rybchenko */
1032f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1033f2f14997SAndrew Rybchenko ESF_DE_RX_L4_CLASS_LBN);
1034f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1035f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1036f2f14997SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1037f2f14997SAndrew Rybchenko ESE_DE_L4_CLASS_UNKNOWN);
1038f2f14997SAndrew Rybchenko
1039f2f14997SAndrew Rybchenko if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1040f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
1041f6078949SAndrew Rybchenko flags |= EFX_PKT_TCP;
1042f2f14997SAndrew Rybchenko } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1043f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
1044f6078949SAndrew Rybchenko flags |= EFX_PKT_UDP;
1045f6078949SAndrew Rybchenko } else {
1046f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
1047f6078949SAndrew Rybchenko }
1048f6078949SAndrew Rybchenko break;
1049f6078949SAndrew Rybchenko
1050f6078949SAndrew Rybchenko default:
1051f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
1052f6078949SAndrew Rybchenko break;
1053f6078949SAndrew Rybchenko }
1054f6078949SAndrew Rybchenko
1055f6078949SAndrew Rybchenko if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
1056f6078949SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
1057f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
1058f6078949SAndrew Rybchenko } else {
1059f6078949SAndrew Rybchenko flags |= EFX_CKSUM_TCPUDP;
1060f6078949SAndrew Rybchenko }
1061f6078949SAndrew Rybchenko }
1062f6078949SAndrew Rybchenko
1063f6078949SAndrew Rybchenko deliver:
1064f6078949SAndrew Rybchenko /* If we're not discarding the packet then it is ok */
1065f6078949SAndrew Rybchenko if (~flags & EFX_DISCARD)
1066f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
1067f6078949SAndrew Rybchenko
1068f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_rx != NULL);
1069f6078949SAndrew Rybchenko should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
1070f6078949SAndrew Rybchenko
1071f6078949SAndrew Rybchenko return (should_abort);
1072f6078949SAndrew Rybchenko }
1073f6078949SAndrew Rybchenko
1074f6078949SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_tx(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1075f6078949SAndrew Rybchenko ef10_ev_tx(
1076f6078949SAndrew Rybchenko __in efx_evq_t *eep,
1077f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
1078f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
1079f6078949SAndrew Rybchenko __in_opt void *arg)
1080f6078949SAndrew Rybchenko {
1081f6078949SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
1082f6078949SAndrew Rybchenko uint32_t id;
1083f6078949SAndrew Rybchenko uint32_t label;
1084f6078949SAndrew Rybchenko boolean_t should_abort;
1085f6078949SAndrew Rybchenko
1086f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_TX);
1087f6078949SAndrew Rybchenko
1088*c6d5e85dSAndrew Rybchenko /* Discard events after RXQ/TXQ errors, or hardware not available */
1089*c6d5e85dSAndrew Rybchenko if (enp->en_reset_flags &
1090*c6d5e85dSAndrew Rybchenko (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
1091f6078949SAndrew Rybchenko return (B_FALSE);
1092f6078949SAndrew Rybchenko
1093f6078949SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
1094f6078949SAndrew Rybchenko /* Drop this event */
1095f6078949SAndrew Rybchenko return (B_FALSE);
1096f6078949SAndrew Rybchenko }
1097f6078949SAndrew Rybchenko
1098f6078949SAndrew Rybchenko /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
1099f6078949SAndrew Rybchenko id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
1100f6078949SAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
1101f6078949SAndrew Rybchenko
1102f6078949SAndrew Rybchenko EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
1103f6078949SAndrew Rybchenko
1104f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_tx != NULL);
1105f6078949SAndrew Rybchenko should_abort = eecp->eec_tx(arg, label, id);
1106f6078949SAndrew Rybchenko
1107f6078949SAndrew Rybchenko return (should_abort);
1108f6078949SAndrew Rybchenko }
1109f6078949SAndrew Rybchenko
1110f6078949SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_driver(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1111f6078949SAndrew Rybchenko ef10_ev_driver(
1112f6078949SAndrew Rybchenko __in efx_evq_t *eep,
1113f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
1114f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
1115f6078949SAndrew Rybchenko __in_opt void *arg)
1116f6078949SAndrew Rybchenko {
1117f6078949SAndrew Rybchenko unsigned int code;
1118f6078949SAndrew Rybchenko boolean_t should_abort;
1119f6078949SAndrew Rybchenko
1120f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
1121f6078949SAndrew Rybchenko should_abort = B_FALSE;
1122f6078949SAndrew Rybchenko
1123f6078949SAndrew Rybchenko code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
1124f6078949SAndrew Rybchenko switch (code) {
1125f6078949SAndrew Rybchenko case ESE_DZ_DRV_TIMER_EV: {
1126f6078949SAndrew Rybchenko uint32_t id;
1127f6078949SAndrew Rybchenko
1128f6078949SAndrew Rybchenko id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
1129f6078949SAndrew Rybchenko
1130f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_timer != NULL);
1131f6078949SAndrew Rybchenko should_abort = eecp->eec_timer(arg, id);
1132f6078949SAndrew Rybchenko break;
1133f6078949SAndrew Rybchenko }
1134f6078949SAndrew Rybchenko
1135f6078949SAndrew Rybchenko case ESE_DZ_DRV_WAKE_UP_EV: {
1136f6078949SAndrew Rybchenko uint32_t id;
1137f6078949SAndrew Rybchenko
1138f6078949SAndrew Rybchenko id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
1139f6078949SAndrew Rybchenko
1140f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1141f6078949SAndrew Rybchenko should_abort = eecp->eec_wake_up(arg, id);
1142f6078949SAndrew Rybchenko break;
1143f6078949SAndrew Rybchenko }
1144f6078949SAndrew Rybchenko
1145f6078949SAndrew Rybchenko case ESE_DZ_DRV_START_UP_EV:
1146f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_initialized != NULL);
1147f6078949SAndrew Rybchenko should_abort = eecp->eec_initialized(arg);
1148f6078949SAndrew Rybchenko break;
1149f6078949SAndrew Rybchenko
1150f6078949SAndrew Rybchenko default:
1151f6078949SAndrew Rybchenko EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1152f6078949SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1153f6078949SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1154f6078949SAndrew Rybchenko break;
1155f6078949SAndrew Rybchenko }
1156f6078949SAndrew Rybchenko
1157f6078949SAndrew Rybchenko return (should_abort);
1158f6078949SAndrew Rybchenko }
1159f6078949SAndrew Rybchenko
1160f6078949SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_drv_gen(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1161f6078949SAndrew Rybchenko ef10_ev_drv_gen(
1162f6078949SAndrew Rybchenko __in efx_evq_t *eep,
1163f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
1164f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
1165f6078949SAndrew Rybchenko __in_opt void *arg)
1166f6078949SAndrew Rybchenko {
1167f6078949SAndrew Rybchenko uint32_t data;
1168f6078949SAndrew Rybchenko boolean_t should_abort;
1169f6078949SAndrew Rybchenko
1170f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1171f6078949SAndrew Rybchenko should_abort = B_FALSE;
1172f6078949SAndrew Rybchenko
1173f6078949SAndrew Rybchenko data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1174f6078949SAndrew Rybchenko if (data >= ((uint32_t)1 << 16)) {
1175f6078949SAndrew Rybchenko EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1176f6078949SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1177f6078949SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1178f6078949SAndrew Rybchenko
1179f6078949SAndrew Rybchenko return (B_TRUE);
1180f6078949SAndrew Rybchenko }
1181f6078949SAndrew Rybchenko
1182f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_software != NULL);
1183f6078949SAndrew Rybchenko should_abort = eecp->eec_software(arg, (uint16_t)data);
1184f6078949SAndrew Rybchenko
1185f6078949SAndrew Rybchenko return (should_abort);
1186f6078949SAndrew Rybchenko }
1187f6078949SAndrew Rybchenko
1188f6078949SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_mcdi(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1189f6078949SAndrew Rybchenko ef10_ev_mcdi(
1190f6078949SAndrew Rybchenko __in efx_evq_t *eep,
1191f6078949SAndrew Rybchenko __in efx_qword_t *eqp,
1192f6078949SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
1193f6078949SAndrew Rybchenko __in_opt void *arg)
1194f6078949SAndrew Rybchenko {
1195f6078949SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
119698a9ac91SAndrew Rybchenko unsigned int code;
1197f6078949SAndrew Rybchenko boolean_t should_abort = B_FALSE;
1198f6078949SAndrew Rybchenko
1199f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1200f6078949SAndrew Rybchenko
1201f6078949SAndrew Rybchenko code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1202f6078949SAndrew Rybchenko switch (code) {
1203f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_BADSSERT:
1204f6078949SAndrew Rybchenko efx_mcdi_ev_death(enp, EINTR);
1205f6078949SAndrew Rybchenko break;
1206f6078949SAndrew Rybchenko
1207f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_CMDDONE:
1208f6078949SAndrew Rybchenko efx_mcdi_ev_cpl(enp,
1209f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1210f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1211f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1212f6078949SAndrew Rybchenko break;
1213f6078949SAndrew Rybchenko
1214f6078949SAndrew Rybchenko #if EFSYS_OPT_MCDI_PROXY_AUTH
1215f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_PROXY_RESPONSE:
1216f6078949SAndrew Rybchenko /*
1217f6078949SAndrew Rybchenko * This event notifies a function that an authorization request
1218f6078949SAndrew Rybchenko * has been processed. If the request was authorized then the
1219f6078949SAndrew Rybchenko * function can now re-send the original MCDI request.
1220f6078949SAndrew Rybchenko * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1221f6078949SAndrew Rybchenko */
1222f6078949SAndrew Rybchenko efx_mcdi_ev_proxy_response(enp,
1223f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1224f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1225f6078949SAndrew Rybchenko break;
1226f6078949SAndrew Rybchenko #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1227f6078949SAndrew Rybchenko
1228f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_LINKCHANGE: {
1229f6078949SAndrew Rybchenko efx_link_mode_t link_mode;
1230f6078949SAndrew Rybchenko
1231f6078949SAndrew Rybchenko ef10_phy_link_ev(enp, eqp, &link_mode);
1232f6078949SAndrew Rybchenko should_abort = eecp->eec_link_change(arg, link_mode);
1233f6078949SAndrew Rybchenko break;
1234f6078949SAndrew Rybchenko }
1235f6078949SAndrew Rybchenko
1236f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_SENSOREVT: {
1237f6078949SAndrew Rybchenko #if EFSYS_OPT_MON_STATS
1238f6078949SAndrew Rybchenko efx_mon_stat_t id;
1239f6078949SAndrew Rybchenko efx_mon_stat_value_t value;
1240f6078949SAndrew Rybchenko efx_rc_t rc;
1241f6078949SAndrew Rybchenko
1242f6078949SAndrew Rybchenko /* Decode monitor stat for MCDI sensor (if supported) */
1243f6078949SAndrew Rybchenko if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1244f6078949SAndrew Rybchenko /* Report monitor stat change */
1245f6078949SAndrew Rybchenko should_abort = eecp->eec_monitor(arg, id, value);
1246f6078949SAndrew Rybchenko } else if (rc == ENOTSUP) {
1247f6078949SAndrew Rybchenko should_abort = eecp->eec_exception(arg,
1248f6078949SAndrew Rybchenko EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1249f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, DATA));
1250f6078949SAndrew Rybchenko } else {
1251f6078949SAndrew Rybchenko EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1252f6078949SAndrew Rybchenko }
1253f6078949SAndrew Rybchenko #endif
1254f6078949SAndrew Rybchenko break;
1255f6078949SAndrew Rybchenko }
1256f6078949SAndrew Rybchenko
1257f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_SCHEDERR:
1258f6078949SAndrew Rybchenko /* Informational only */
1259f6078949SAndrew Rybchenko break;
1260f6078949SAndrew Rybchenko
1261f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_REBOOT:
1262f6078949SAndrew Rybchenko /* Falcon/Siena only (should not been seen with Huntington). */
1263f6078949SAndrew Rybchenko efx_mcdi_ev_death(enp, EIO);
1264f6078949SAndrew Rybchenko break;
1265f6078949SAndrew Rybchenko
1266f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_MC_REBOOT:
1267f6078949SAndrew Rybchenko /* MC_REBOOT event is used for Huntington (EF10) and later. */
1268f6078949SAndrew Rybchenko efx_mcdi_ev_death(enp, EIO);
1269f6078949SAndrew Rybchenko break;
1270f6078949SAndrew Rybchenko
1271f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_MAC_STATS_DMA:
1272f6078949SAndrew Rybchenko #if EFSYS_OPT_MAC_STATS
1273f6078949SAndrew Rybchenko if (eecp->eec_mac_stats != NULL) {
1274f6078949SAndrew Rybchenko eecp->eec_mac_stats(arg,
1275f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1276f6078949SAndrew Rybchenko }
1277f6078949SAndrew Rybchenko #endif
1278f6078949SAndrew Rybchenko break;
1279f6078949SAndrew Rybchenko
1280f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_FWALERT: {
1281f6078949SAndrew Rybchenko uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1282f6078949SAndrew Rybchenko
1283f6078949SAndrew Rybchenko if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1284f6078949SAndrew Rybchenko should_abort = eecp->eec_exception(arg,
1285f6078949SAndrew Rybchenko EFX_EXCEPTION_FWALERT_SRAM,
1286f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, FWALERT_DATA));
1287f6078949SAndrew Rybchenko else
1288f6078949SAndrew Rybchenko should_abort = eecp->eec_exception(arg,
1289f6078949SAndrew Rybchenko EFX_EXCEPTION_UNKNOWN_FWALERT,
1290f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, DATA));
1291f6078949SAndrew Rybchenko break;
1292f6078949SAndrew Rybchenko }
1293f6078949SAndrew Rybchenko
1294f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_TX_ERR: {
1295f6078949SAndrew Rybchenko /*
1296f6078949SAndrew Rybchenko * After a TXQ error is detected, firmware sends a TX_ERR event.
1297f6078949SAndrew Rybchenko * This may be followed by TX completions (which we discard),
1298f6078949SAndrew Rybchenko * and then finally by a TX_FLUSH event. Firmware destroys the
1299f6078949SAndrew Rybchenko * TXQ automatically after sending the TX_FLUSH event.
1300f6078949SAndrew Rybchenko */
1301f6078949SAndrew Rybchenko enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1302f6078949SAndrew Rybchenko
130344e44413SAndrew Rybchenko EFSYS_PROBE2(tx_descq_err,
130444e44413SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
130544e44413SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1306f6078949SAndrew Rybchenko
1307f6078949SAndrew Rybchenko /* Inform the driver that a reset is required. */
1308f6078949SAndrew Rybchenko eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1309f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1310f6078949SAndrew Rybchenko break;
1311f6078949SAndrew Rybchenko }
1312f6078949SAndrew Rybchenko
1313f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_TX_FLUSH: {
1314f6078949SAndrew Rybchenko uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1315f6078949SAndrew Rybchenko
1316f6078949SAndrew Rybchenko /*
1317f6078949SAndrew Rybchenko * EF10 firmware sends two TX_FLUSH events: one to the txq's
1318f6078949SAndrew Rybchenko * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1319f6078949SAndrew Rybchenko * We want to wait for all completions, so ignore the events
1320f6078949SAndrew Rybchenko * with TX_FLUSH_TO_DRIVER.
1321f6078949SAndrew Rybchenko */
1322f6078949SAndrew Rybchenko if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1323f6078949SAndrew Rybchenko should_abort = B_FALSE;
1324f6078949SAndrew Rybchenko break;
1325f6078949SAndrew Rybchenko }
1326f6078949SAndrew Rybchenko
1327f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1328f6078949SAndrew Rybchenko
1329f6078949SAndrew Rybchenko EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1330f6078949SAndrew Rybchenko
1331f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1332f6078949SAndrew Rybchenko should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1333f6078949SAndrew Rybchenko break;
1334f6078949SAndrew Rybchenko }
1335f6078949SAndrew Rybchenko
1336f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_RX_ERR: {
1337f6078949SAndrew Rybchenko /*
1338f6078949SAndrew Rybchenko * After an RXQ error is detected, firmware sends an RX_ERR
1339f6078949SAndrew Rybchenko * event. This may be followed by RX events (which we discard),
1340f6078949SAndrew Rybchenko * and then finally by an RX_FLUSH event. Firmware destroys the
1341f6078949SAndrew Rybchenko * RXQ automatically after sending the RX_FLUSH event.
1342f6078949SAndrew Rybchenko */
1343f6078949SAndrew Rybchenko enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1344f6078949SAndrew Rybchenko
134544e44413SAndrew Rybchenko EFSYS_PROBE2(rx_descq_err,
134644e44413SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
134744e44413SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1348f6078949SAndrew Rybchenko
1349f6078949SAndrew Rybchenko /* Inform the driver that a reset is required. */
1350f6078949SAndrew Rybchenko eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1351f6078949SAndrew Rybchenko MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1352f6078949SAndrew Rybchenko break;
1353f6078949SAndrew Rybchenko }
1354f6078949SAndrew Rybchenko
1355f6078949SAndrew Rybchenko case MCDI_EVENT_CODE_RX_FLUSH: {
1356f6078949SAndrew Rybchenko uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1357f6078949SAndrew Rybchenko
1358f6078949SAndrew Rybchenko /*
1359f6078949SAndrew Rybchenko * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1360f6078949SAndrew Rybchenko * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1361f6078949SAndrew Rybchenko * We want to wait for all completions, so ignore the events
1362f6078949SAndrew Rybchenko * with RX_FLUSH_TO_DRIVER.
1363f6078949SAndrew Rybchenko */
1364f6078949SAndrew Rybchenko if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1365f6078949SAndrew Rybchenko should_abort = B_FALSE;
1366f6078949SAndrew Rybchenko break;
1367f6078949SAndrew Rybchenko }
1368f6078949SAndrew Rybchenko
1369f6078949SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1370f6078949SAndrew Rybchenko
1371f6078949SAndrew Rybchenko EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1372f6078949SAndrew Rybchenko
1373f6078949SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1374f6078949SAndrew Rybchenko should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1375f6078949SAndrew Rybchenko break;
1376f6078949SAndrew Rybchenko }
1377f6078949SAndrew Rybchenko
1378f6078949SAndrew Rybchenko default:
1379f6078949SAndrew Rybchenko EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1380f6078949SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1381f6078949SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1382f6078949SAndrew Rybchenko break;
1383f6078949SAndrew Rybchenko }
1384f6078949SAndrew Rybchenko
1385f6078949SAndrew Rybchenko return (should_abort);
1386f6078949SAndrew Rybchenko }
1387f6078949SAndrew Rybchenko
1388f6078949SAndrew Rybchenko void
ef10_ev_rxlabel_init(__in efx_evq_t * eep,__in efx_rxq_t * erp,__in unsigned int label,__in efx_rxq_type_t type)1389f6078949SAndrew Rybchenko ef10_ev_rxlabel_init(
1390f6078949SAndrew Rybchenko __in efx_evq_t *eep,
1391f6078949SAndrew Rybchenko __in efx_rxq_t *erp,
13928e0c4827SAndrew Rybchenko __in unsigned int label,
13931aa1b495SAndrew Rybchenko __in efx_rxq_type_t type)
1394f6078949SAndrew Rybchenko {
1395f6078949SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
139604381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1397074cfb5cSAndrew Rybchenko boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
139804381b5eSAndrew Rybchenko boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
1399cc8d2b23SAndrew Rybchenko #endif
1400f6078949SAndrew Rybchenko
1401cc8d2b23SAndrew Rybchenko _NOTE(ARGUNUSED(type))
1402f6078949SAndrew Rybchenko EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1403f6078949SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
1404f6078949SAndrew Rybchenko
1405f6078949SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1406f6078949SAndrew Rybchenko
14078e0c4827SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
14088e0c4827SAndrew Rybchenko /*
14098e0c4827SAndrew Rybchenko * For packed stream modes, the very first event will
14108e0c4827SAndrew Rybchenko * have a new buffer flag set, so it will be incremented,
14118e0c4827SAndrew Rybchenko * yielding the correct pointer. That results in a simpler
14128e0c4827SAndrew Rybchenko * code than trying to detect start-of-the-world condition
14138e0c4827SAndrew Rybchenko * in the event handler.
14148e0c4827SAndrew Rybchenko */
14158e0c4827SAndrew Rybchenko eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
14168e0c4827SAndrew Rybchenko #else
1417f6078949SAndrew Rybchenko eersp->eers_rx_read_ptr = 0;
14188e0c4827SAndrew Rybchenko #endif
1419f6078949SAndrew Rybchenko eersp->eers_rx_mask = erp->er_mask;
142004381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
14218e0c4827SAndrew Rybchenko eersp->eers_rx_stream_npackets = 0;
142204381b5eSAndrew Rybchenko eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
142304381b5eSAndrew Rybchenko #endif
142404381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
14258e0c4827SAndrew Rybchenko if (packed_stream) {
14268e0c4827SAndrew Rybchenko eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
14278e0c4827SAndrew Rybchenko EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
14288e0c4827SAndrew Rybchenko EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
14298e0c4827SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
14308e0c4827SAndrew Rybchenko /*
14318e0c4827SAndrew Rybchenko * A single credit is allocated to the queue when it is started.
14328e0c4827SAndrew Rybchenko * It is immediately spent by the first packet which has NEW
14338e0c4827SAndrew Rybchenko * BUFFER flag set, though, but still we shall take into
14348e0c4827SAndrew Rybchenko * account, as to not wrap around the maximum number of credits
14358e0c4827SAndrew Rybchenko * accidentally
14368e0c4827SAndrew Rybchenko */
14378e0c4827SAndrew Rybchenko eersp->eers_rx_packed_stream_credits--;
14388e0c4827SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
14398e0c4827SAndrew Rybchenko EFX_RX_PACKED_STREAM_MAX_CREDITS);
14408e0c4827SAndrew Rybchenko }
14418e0c4827SAndrew Rybchenko #endif
1442f6078949SAndrew Rybchenko }
1443f6078949SAndrew Rybchenko
1444f6078949SAndrew Rybchenko void
ef10_ev_rxlabel_fini(__in efx_evq_t * eep,__in unsigned int label)1445f6078949SAndrew Rybchenko ef10_ev_rxlabel_fini(
1446f6078949SAndrew Rybchenko __in efx_evq_t *eep,
1447f6078949SAndrew Rybchenko __in unsigned int label)
1448f6078949SAndrew Rybchenko {
1449f6078949SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
1450f6078949SAndrew Rybchenko
1451f6078949SAndrew Rybchenko EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1452f6078949SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
1453f6078949SAndrew Rybchenko
1454f6078949SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1455f6078949SAndrew Rybchenko
1456f6078949SAndrew Rybchenko eersp->eers_rx_read_ptr = 0;
1457f6078949SAndrew Rybchenko eersp->eers_rx_mask = 0;
145804381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
14598e0c4827SAndrew Rybchenko eersp->eers_rx_stream_npackets = 0;
14608e0c4827SAndrew Rybchenko eersp->eers_rx_packed_stream = B_FALSE;
146104381b5eSAndrew Rybchenko #endif
146204381b5eSAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
14638e0c4827SAndrew Rybchenko eersp->eers_rx_packed_stream_credits = 0;
14648e0c4827SAndrew Rybchenko #endif
1465f6078949SAndrew Rybchenko }
1466f6078949SAndrew Rybchenko
14675095efd4SAndrew Rybchenko #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
1468