1 /*-
2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
29 */
30
31 #include <sys/cdefs.h>
32 #include "efx.h"
33 #include "efx_impl.h"
34 #if EFSYS_OPT_MON_STATS
35 #include "mcdi_mon.h"
36 #endif
37
38 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
39
40 #if EFSYS_OPT_QSTATS
41 #define EFX_EV_QSTAT_INCR(_eep, _stat) \
42 do { \
43 (_eep)->ee_stat[_stat]++; \
44 _NOTE(CONSTANTCONDITION) \
45 } while (B_FALSE)
46 #else
47 #define EFX_EV_QSTAT_INCR(_eep, _stat)
48 #endif
49
50 /*
51 * Non-interrupting event queue requires interrrupting event queue to
52 * refer to for wake-up events even if wake ups are never used.
53 * It could be even non-allocated event queue.
54 */
55 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
56
57 static __checkReturn boolean_t
58 ef10_ev_rx(
59 __in efx_evq_t *eep,
60 __in efx_qword_t *eqp,
61 __in const efx_ev_callbacks_t *eecp,
62 __in_opt void *arg);
63
64 static __checkReturn boolean_t
65 ef10_ev_tx(
66 __in efx_evq_t *eep,
67 __in efx_qword_t *eqp,
68 __in const efx_ev_callbacks_t *eecp,
69 __in_opt void *arg);
70
71 static __checkReturn boolean_t
72 ef10_ev_driver(
73 __in efx_evq_t *eep,
74 __in efx_qword_t *eqp,
75 __in const efx_ev_callbacks_t *eecp,
76 __in_opt void *arg);
77
78 static __checkReturn boolean_t
79 ef10_ev_drv_gen(
80 __in efx_evq_t *eep,
81 __in efx_qword_t *eqp,
82 __in const efx_ev_callbacks_t *eecp,
83 __in_opt void *arg);
84
85 static __checkReturn boolean_t
86 ef10_ev_mcdi(
87 __in efx_evq_t *eep,
88 __in efx_qword_t *eqp,
89 __in const efx_ev_callbacks_t *eecp,
90 __in_opt void *arg);
91
92 static __checkReturn efx_rc_t
efx_mcdi_set_evq_tmr(__in efx_nic_t * enp,__in uint32_t instance,__in uint32_t mode,__in uint32_t timer_ns)93 efx_mcdi_set_evq_tmr(
94 __in efx_nic_t *enp,
95 __in uint32_t instance,
96 __in uint32_t mode,
97 __in uint32_t timer_ns)
98 {
99 efx_mcdi_req_t req;
100 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
101 MC_CMD_SET_EVQ_TMR_OUT_LEN);
102 efx_rc_t rc;
103
104 req.emr_cmd = MC_CMD_SET_EVQ_TMR;
105 req.emr_in_buf = payload;
106 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
107 req.emr_out_buf = payload;
108 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
109
110 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
111 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
112 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
113 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
114
115 efx_mcdi_execute(enp, &req);
116
117 if (req.emr_rc != 0) {
118 rc = req.emr_rc;
119 goto fail1;
120 }
121
122 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
123 rc = EMSGSIZE;
124 goto fail2;
125 }
126
127 return (0);
128
129 fail2:
130 EFSYS_PROBE(fail2);
131 fail1:
132 EFSYS_PROBE1(fail1, efx_rc_t, rc);
133
134 return (rc);
135 }
136
137 static __checkReturn efx_rc_t
efx_mcdi_init_evq(__in efx_nic_t * enp,__in unsigned int instance,__in efsys_mem_t * esmp,__in size_t nevs,__in uint32_t irq,__in uint32_t us,__in uint32_t flags,__in boolean_t low_latency)138 efx_mcdi_init_evq(
139 __in efx_nic_t *enp,
140 __in unsigned int instance,
141 __in efsys_mem_t *esmp,
142 __in size_t nevs,
143 __in uint32_t irq,
144 __in uint32_t us,
145 __in uint32_t flags,
146 __in boolean_t low_latency)
147 {
148 efx_mcdi_req_t req;
149 EFX_MCDI_DECLARE_BUF(payload,
150 MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
151 MC_CMD_INIT_EVQ_OUT_LEN);
152 efx_qword_t *dma_addr;
153 uint64_t addr;
154 int npages;
155 int i;
156 boolean_t interrupting;
157 int ev_cut_through;
158 efx_rc_t rc;
159
160 npages = EFX_EVQ_NBUFS(nevs);
161 if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
162 rc = EINVAL;
163 goto fail1;
164 }
165
166 req.emr_cmd = MC_CMD_INIT_EVQ;
167 req.emr_in_buf = payload;
168 req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
169 req.emr_out_buf = payload;
170 req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
171
172 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
173 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
174 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
175
176 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
177 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
178
179 /*
180 * On Huntington RX and TX event batching can only be requested together
181 * (even if the datapath firmware doesn't actually support RX
182 * batching). If event cut through is enabled no RX batching will occur.
183 *
184 * So always enable RX and TX event batching, and enable event cut
185 * through if we want low latency operation.
186 */
187 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
188 case EFX_EVQ_FLAGS_TYPE_AUTO:
189 ev_cut_through = low_latency ? 1 : 0;
190 break;
191 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
192 ev_cut_through = 0;
193 break;
194 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
195 ev_cut_through = 1;
196 break;
197 default:
198 rc = EINVAL;
199 goto fail2;
200 }
201 MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
202 INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
203 INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
204 INIT_EVQ_IN_FLAG_INT_ARMD, 0,
205 INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
206 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
207 INIT_EVQ_IN_FLAG_TX_MERGE, 1);
208
209 /* If the value is zero then disable the timer */
210 if (us == 0) {
211 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
212 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
213 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
214 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
215 } else {
216 unsigned int ticks;
217
218 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
219 goto fail3;
220
221 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
222 MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
223 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
224 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
225 }
226
227 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
228 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
229 MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
230
231 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
232 addr = EFSYS_MEM_ADDR(esmp);
233
234 for (i = 0; i < npages; i++) {
235 EFX_POPULATE_QWORD_2(*dma_addr,
236 EFX_DWORD_1, (uint32_t)(addr >> 32),
237 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
238
239 dma_addr++;
240 addr += EFX_BUF_SIZE;
241 }
242
243 efx_mcdi_execute(enp, &req);
244
245 if (req.emr_rc != 0) {
246 rc = req.emr_rc;
247 goto fail4;
248 }
249
250 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
251 rc = EMSGSIZE;
252 goto fail5;
253 }
254
255 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
256
257 return (0);
258
259 fail5:
260 EFSYS_PROBE(fail5);
261 fail4:
262 EFSYS_PROBE(fail4);
263 fail3:
264 EFSYS_PROBE(fail3);
265 fail2:
266 EFSYS_PROBE(fail2);
267 fail1:
268 EFSYS_PROBE1(fail1, efx_rc_t, rc);
269
270 return (rc);
271 }
272
273 static __checkReturn efx_rc_t
efx_mcdi_init_evq_v2(__in efx_nic_t * enp,__in unsigned int instance,__in efsys_mem_t * esmp,__in size_t nevs,__in uint32_t irq,__in uint32_t us,__in uint32_t flags)274 efx_mcdi_init_evq_v2(
275 __in efx_nic_t *enp,
276 __in unsigned int instance,
277 __in efsys_mem_t *esmp,
278 __in size_t nevs,
279 __in uint32_t irq,
280 __in uint32_t us,
281 __in uint32_t flags)
282 {
283 efx_mcdi_req_t req;
284 EFX_MCDI_DECLARE_BUF(payload,
285 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
286 MC_CMD_INIT_EVQ_V2_OUT_LEN);
287 boolean_t interrupting;
288 unsigned int evq_type;
289 efx_qword_t *dma_addr;
290 uint64_t addr;
291 int npages;
292 int i;
293 efx_rc_t rc;
294
295 npages = EFX_EVQ_NBUFS(nevs);
296 if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
297 rc = EINVAL;
298 goto fail1;
299 }
300
301 req.emr_cmd = MC_CMD_INIT_EVQ;
302 req.emr_in_buf = payload;
303 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
304 req.emr_out_buf = payload;
305 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
306
307 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
308 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
309 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
310
311 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
312 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
313
314 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
315 case EFX_EVQ_FLAGS_TYPE_AUTO:
316 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
317 break;
318 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
319 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
320 break;
321 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
322 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
323 break;
324 default:
325 rc = EINVAL;
326 goto fail2;
327 }
328 MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
329 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
330 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
331 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
332 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
333
334 /* If the value is zero then disable the timer */
335 if (us == 0) {
336 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
337 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
338 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
339 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
340 } else {
341 unsigned int ticks;
342
343 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
344 goto fail3;
345
346 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
347 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
348 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
349 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
350 }
351
352 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
353 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
354 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
355
356 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
357 addr = EFSYS_MEM_ADDR(esmp);
358
359 for (i = 0; i < npages; i++) {
360 EFX_POPULATE_QWORD_2(*dma_addr,
361 EFX_DWORD_1, (uint32_t)(addr >> 32),
362 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
363
364 dma_addr++;
365 addr += EFX_BUF_SIZE;
366 }
367
368 efx_mcdi_execute(enp, &req);
369
370 if (req.emr_rc != 0) {
371 rc = req.emr_rc;
372 goto fail4;
373 }
374
375 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
376 rc = EMSGSIZE;
377 goto fail5;
378 }
379
380 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
381
382 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
383 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
384
385 return (0);
386
387 fail5:
388 EFSYS_PROBE(fail5);
389 fail4:
390 EFSYS_PROBE(fail4);
391 fail3:
392 EFSYS_PROBE(fail3);
393 fail2:
394 EFSYS_PROBE(fail2);
395 fail1:
396 EFSYS_PROBE1(fail1, efx_rc_t, rc);
397
398 return (rc);
399 }
400
401 static __checkReturn efx_rc_t
efx_mcdi_fini_evq(__in efx_nic_t * enp,__in uint32_t instance)402 efx_mcdi_fini_evq(
403 __in efx_nic_t *enp,
404 __in uint32_t instance)
405 {
406 efx_mcdi_req_t req;
407 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
408 MC_CMD_FINI_EVQ_OUT_LEN);
409 efx_rc_t rc;
410
411 req.emr_cmd = MC_CMD_FINI_EVQ;
412 req.emr_in_buf = payload;
413 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
414 req.emr_out_buf = payload;
415 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
416
417 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
418
419 efx_mcdi_execute_quiet(enp, &req);
420
421 if (req.emr_rc != 0) {
422 rc = req.emr_rc;
423 goto fail1;
424 }
425
426 return (0);
427
428 fail1:
429 /*
430 * EALREADY is not an error, but indicates that the MC has rebooted and
431 * that the EVQ has already been destroyed.
432 */
433 if (rc != EALREADY)
434 EFSYS_PROBE1(fail1, efx_rc_t, rc);
435
436 return (rc);
437 }
438
439 __checkReturn efx_rc_t
ef10_ev_init(__in efx_nic_t * enp)440 ef10_ev_init(
441 __in efx_nic_t *enp)
442 {
443 _NOTE(ARGUNUSED(enp))
444 return (0);
445 }
446
447 void
ef10_ev_fini(__in efx_nic_t * enp)448 ef10_ev_fini(
449 __in efx_nic_t *enp)
450 {
451 _NOTE(ARGUNUSED(enp))
452 }
453
454 __checkReturn efx_rc_t
ef10_ev_qcreate(__in efx_nic_t * enp,__in unsigned int index,__in efsys_mem_t * esmp,__in size_t ndescs,__in uint32_t id,__in uint32_t us,__in uint32_t flags,__in efx_evq_t * eep)455 ef10_ev_qcreate(
456 __in efx_nic_t *enp,
457 __in unsigned int index,
458 __in efsys_mem_t *esmp,
459 __in size_t ndescs,
460 __in uint32_t id,
461 __in uint32_t us,
462 __in uint32_t flags,
463 __in efx_evq_t *eep)
464 {
465 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
466 uint32_t irq;
467 efx_rc_t rc;
468
469 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
470 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
471 EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
472
473 if (!ISP2(ndescs) ||
474 (ndescs < EFX_EVQ_MINNEVS) || (ndescs > EFX_EVQ_MAXNEVS)) {
475 rc = EINVAL;
476 goto fail1;
477 }
478
479 if (index >= encp->enc_evq_limit) {
480 rc = EINVAL;
481 goto fail2;
482 }
483
484 if (us > encp->enc_evq_timer_max_us) {
485 rc = EINVAL;
486 goto fail3;
487 }
488
489 /* Set up the handler table */
490 eep->ee_rx = ef10_ev_rx;
491 eep->ee_tx = ef10_ev_tx;
492 eep->ee_driver = ef10_ev_driver;
493 eep->ee_drv_gen = ef10_ev_drv_gen;
494 eep->ee_mcdi = ef10_ev_mcdi;
495
496 /* Set up the event queue */
497 /* INIT_EVQ expects function-relative vector number */
498 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
499 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
500 irq = index;
501 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
502 irq = index;
503 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
504 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
505 } else {
506 irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
507 }
508
509 /*
510 * Interrupts may be raised for events immediately after the queue is
511 * created. See bug58606.
512 */
513
514 if (encp->enc_init_evq_v2_supported) {
515 /*
516 * On Medford the low latency license is required to enable RX
517 * and event cut through and to disable RX batching. If event
518 * queue type in flags is auto, we let the firmware decide the
519 * settings to use. If the adapter has a low latency license,
520 * it will choose the best settings for low latency, otherwise
521 * it will choose the best settings for throughput.
522 */
523 rc = efx_mcdi_init_evq_v2(enp, index, esmp, ndescs, irq, us,
524 flags);
525 if (rc != 0)
526 goto fail4;
527 } else {
528 /*
529 * On Huntington we need to specify the settings to use.
530 * If event queue type in flags is auto, we favour throughput
531 * if the adapter is running virtualization supporting firmware
532 * (i.e. the full featured firmware variant)
533 * and latency otherwise. The Ethernet Virtual Bridging
534 * capability is used to make this decision. (Note though that
535 * the low latency firmware variant is also best for
536 * throughput and corresponding type should be specified
537 * to choose it.)
538 */
539 boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
540 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
541 low_latency);
542 if (rc != 0)
543 goto fail5;
544 }
545
546 return (0);
547
548 fail5:
549 EFSYS_PROBE(fail5);
550 fail4:
551 EFSYS_PROBE(fail4);
552 fail3:
553 EFSYS_PROBE(fail3);
554 fail2:
555 EFSYS_PROBE(fail2);
556 fail1:
557 EFSYS_PROBE1(fail1, efx_rc_t, rc);
558
559 return (rc);
560 }
561
562 void
ef10_ev_qdestroy(__in efx_evq_t * eep)563 ef10_ev_qdestroy(
564 __in efx_evq_t *eep)
565 {
566 efx_nic_t *enp = eep->ee_enp;
567
568 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
569 enp->en_family == EFX_FAMILY_MEDFORD ||
570 enp->en_family == EFX_FAMILY_MEDFORD2);
571
572 (void) efx_mcdi_fini_evq(enp, eep->ee_index);
573 }
574
575 __checkReturn efx_rc_t
ef10_ev_qprime(__in efx_evq_t * eep,__in unsigned int count)576 ef10_ev_qprime(
577 __in efx_evq_t *eep,
578 __in unsigned int count)
579 {
580 efx_nic_t *enp = eep->ee_enp;
581 uint32_t rptr;
582 efx_dword_t dword;
583
584 rptr = count & eep->ee_mask;
585
586 if (enp->en_nic_cfg.enc_bug35388_workaround) {
587 EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
588 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
589 EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
590 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
591
592 EFX_POPULATE_DWORD_2(dword,
593 ERF_DD_EVQ_IND_RPTR_FLAGS,
594 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
595 ERF_DD_EVQ_IND_RPTR,
596 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
597 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
598 &dword, B_FALSE);
599
600 EFX_POPULATE_DWORD_2(dword,
601 ERF_DD_EVQ_IND_RPTR_FLAGS,
602 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
603 ERF_DD_EVQ_IND_RPTR,
604 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
605 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
606 &dword, B_FALSE);
607 } else {
608 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
609 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
610 &dword, B_FALSE);
611 }
612
613 return (0);
614 }
615
616 static __checkReturn efx_rc_t
efx_mcdi_driver_event(__in efx_nic_t * enp,__in uint32_t evq,__in efx_qword_t data)617 efx_mcdi_driver_event(
618 __in efx_nic_t *enp,
619 __in uint32_t evq,
620 __in efx_qword_t data)
621 {
622 efx_mcdi_req_t req;
623 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
624 MC_CMD_DRIVER_EVENT_OUT_LEN);
625 efx_rc_t rc;
626
627 req.emr_cmd = MC_CMD_DRIVER_EVENT;
628 req.emr_in_buf = payload;
629 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
630 req.emr_out_buf = payload;
631 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
632
633 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
634
635 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
636 EFX_QWORD_FIELD(data, EFX_DWORD_0));
637 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
638 EFX_QWORD_FIELD(data, EFX_DWORD_1));
639
640 efx_mcdi_execute(enp, &req);
641
642 if (req.emr_rc != 0) {
643 rc = req.emr_rc;
644 goto fail1;
645 }
646
647 return (0);
648
649 fail1:
650 EFSYS_PROBE1(fail1, efx_rc_t, rc);
651
652 return (rc);
653 }
654
655 void
ef10_ev_qpost(__in efx_evq_t * eep,__in uint16_t data)656 ef10_ev_qpost(
657 __in efx_evq_t *eep,
658 __in uint16_t data)
659 {
660 efx_nic_t *enp = eep->ee_enp;
661 efx_qword_t event;
662
663 EFX_POPULATE_QWORD_3(event,
664 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
665 ESF_DZ_DRV_SUB_CODE, 0,
666 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
667
668 (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
669 }
670
671 __checkReturn efx_rc_t
ef10_ev_qmoderate(__in efx_evq_t * eep,__in unsigned int us)672 ef10_ev_qmoderate(
673 __in efx_evq_t *eep,
674 __in unsigned int us)
675 {
676 efx_nic_t *enp = eep->ee_enp;
677 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
678 efx_dword_t dword;
679 uint32_t mode;
680 efx_rc_t rc;
681
682 /* Check that hardware and MCDI use the same timer MODE values */
683 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
684 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
685 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
686 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
687 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
688 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
689 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
690 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
691
692 if (us > encp->enc_evq_timer_max_us) {
693 rc = EINVAL;
694 goto fail1;
695 }
696
697 /* If the value is zero then disable the timer */
698 if (us == 0) {
699 mode = FFE_CZ_TIMER_MODE_DIS;
700 } else {
701 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
702 }
703
704 if (encp->enc_bug61265_workaround) {
705 uint32_t ns = us * 1000;
706
707 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
708 if (rc != 0)
709 goto fail2;
710 } else {
711 unsigned int ticks;
712
713 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
714 goto fail3;
715
716 if (encp->enc_bug35388_workaround) {
717 EFX_POPULATE_DWORD_3(dword,
718 ERF_DD_EVQ_IND_TIMER_FLAGS,
719 EFE_DD_EVQ_IND_TIMER_FLAGS,
720 ERF_DD_EVQ_IND_TIMER_MODE, mode,
721 ERF_DD_EVQ_IND_TIMER_VAL, ticks);
722 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
723 eep->ee_index, &dword, 0);
724 } else {
725 /*
726 * NOTE: The TMR_REL field introduced in Medford2 is
727 * ignored on earlier EF10 controllers. See bug66418
728 * comment 9 for details.
729 */
730 EFX_POPULATE_DWORD_3(dword,
731 ERF_DZ_TC_TIMER_MODE, mode,
732 ERF_DZ_TC_TIMER_VAL, ticks,
733 ERF_FZ_TC_TMR_REL_VAL, ticks);
734 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
735 eep->ee_index, &dword, 0);
736 }
737 }
738
739 return (0);
740
741 fail3:
742 EFSYS_PROBE(fail3);
743 fail2:
744 EFSYS_PROBE(fail2);
745 fail1:
746 EFSYS_PROBE1(fail1, efx_rc_t, rc);
747
748 return (rc);
749 }
750
751 #if EFSYS_OPT_QSTATS
752 void
ef10_ev_qstats_update(__in efx_evq_t * eep,__inout_ecount (EV_NQSTATS)efsys_stat_t * stat)753 ef10_ev_qstats_update(
754 __in efx_evq_t *eep,
755 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
756 {
757 unsigned int id;
758
759 for (id = 0; id < EV_NQSTATS; id++) {
760 efsys_stat_t *essp = &stat[id];
761
762 EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
763 eep->ee_stat[id] = 0;
764 }
765 }
766 #endif /* EFSYS_OPT_QSTATS */
767
768 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
769
770 static __checkReturn boolean_t
ef10_ev_rx_packed_stream(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)771 ef10_ev_rx_packed_stream(
772 __in efx_evq_t *eep,
773 __in efx_qword_t *eqp,
774 __in const efx_ev_callbacks_t *eecp,
775 __in_opt void *arg)
776 {
777 uint32_t label;
778 uint32_t pkt_count_lbits;
779 uint16_t flags;
780 boolean_t should_abort;
781 efx_evq_rxq_state_t *eersp;
782 unsigned int pkt_count;
783 unsigned int current_id;
784 boolean_t new_buffer;
785
786 pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
787 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
788 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
789
790 flags = 0;
791
792 eersp = &eep->ee_rxq_state[label];
793
794 /*
795 * RX_DSC_PTR_LBITS has least significant bits of the global
796 * (not per-buffer) packet counter. It is guaranteed that
797 * maximum number of completed packets fits in lbits-mask.
798 * So, modulo lbits-mask arithmetic should be used to calculate
799 * packet counter increment.
800 */
801 pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
802 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
803 eersp->eers_rx_stream_npackets += pkt_count;
804
805 if (new_buffer) {
806 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
807 #if EFSYS_OPT_RX_PACKED_STREAM
808 /*
809 * If both packed stream and equal stride super-buffer
810 * modes are compiled in, in theory credits should be
811 * be maintained for packed stream only, but right now
812 * these modes are not distinguished in the event queue
813 * Rx queue state and it is OK to increment the counter
814 * regardless (it might be event cheaper than branching
815 * since neighbour structure member are updated as well).
816 */
817 eersp->eers_rx_packed_stream_credits++;
818 #endif
819 eersp->eers_rx_read_ptr++;
820 }
821 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
822
823 /* Check for errors that invalidate checksum and L3/L4 fields */
824 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
825 /* RX frame truncated */
826 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
827 flags |= EFX_DISCARD;
828 goto deliver;
829 }
830 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
831 /* Bad Ethernet frame CRC */
832 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
833 flags |= EFX_DISCARD;
834 goto deliver;
835 }
836
837 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
838 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
839 goto deliver;
840 }
841
842 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
843 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
844
845 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
846 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
847
848 deliver:
849 /* If we're not discarding the packet then it is ok */
850 if (~flags & EFX_DISCARD)
851 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
852
853 EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
854 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
855 flags);
856
857 return (should_abort);
858 }
859
860 #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
861
862 static __checkReturn boolean_t
ef10_ev_rx(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)863 ef10_ev_rx(
864 __in efx_evq_t *eep,
865 __in efx_qword_t *eqp,
866 __in const efx_ev_callbacks_t *eecp,
867 __in_opt void *arg)
868 {
869 efx_nic_t *enp = eep->ee_enp;
870 uint32_t size;
871 uint32_t label;
872 uint32_t mac_class;
873 uint32_t eth_tag_class;
874 uint32_t l3_class;
875 uint32_t l4_class;
876 uint32_t next_read_lbits;
877 uint16_t flags;
878 boolean_t cont;
879 boolean_t should_abort;
880 efx_evq_rxq_state_t *eersp;
881 unsigned int desc_count;
882 unsigned int last_used_id;
883
884 EFX_EV_QSTAT_INCR(eep, EV_RX);
885
886 /* Discard events after RXQ/TXQ errors, or hardware not available */
887 if (enp->en_reset_flags &
888 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
889 return (B_FALSE);
890
891 /* Basic packet information */
892 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
893 eersp = &eep->ee_rxq_state[label];
894
895 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
896 /*
897 * Packed stream events are very different,
898 * so handle them separately
899 */
900 if (eersp->eers_rx_packed_stream)
901 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
902 #endif
903
904 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
905 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
906 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
907 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
908 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
909 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
910
911 /*
912 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
913 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
914 * and values for all EF10 controllers.
915 */
916 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
917 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
918 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
919 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
920
921 l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
922
923 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
924 /* Drop this event */
925 return (B_FALSE);
926 }
927 flags = 0;
928
929 if (cont != 0) {
930 /*
931 * This may be part of a scattered frame, or it may be a
932 * truncated frame if scatter is disabled on this RXQ.
933 * Overlength frames can be received if e.g. a VF is configured
934 * for 1500 MTU but connected to a port set to 9000 MTU
935 * (see bug56567).
936 * FIXME: There is not yet any driver that supports scatter on
937 * Huntington. Scatter support is required for OSX.
938 */
939 flags |= EFX_PKT_CONT;
940 }
941
942 if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
943 flags |= EFX_PKT_UNICAST;
944
945 /* Increment the count of descriptors read */
946 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
947 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
948 eersp->eers_rx_read_ptr += desc_count;
949
950 /*
951 * FIXME: add error checking to make sure this a batched event.
952 * This could also be an aborted scatter, see Bug36629.
953 */
954 if (desc_count > 1) {
955 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
956 flags |= EFX_PKT_PREFIX_LEN;
957 }
958
959 /* Calculate the index of the last descriptor consumed */
960 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
961
962 /* Check for errors that invalidate checksum and L3/L4 fields */
963 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
964 /* RX frame truncated */
965 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
966 flags |= EFX_DISCARD;
967 goto deliver;
968 }
969 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
970 /* Bad Ethernet frame CRC */
971 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
972 flags |= EFX_DISCARD;
973 goto deliver;
974 }
975 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
976 /*
977 * Hardware parse failed, due to malformed headers
978 * or headers that are too long for the parser.
979 * Headers and checksums must be validated by the host.
980 */
981 /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
982 goto deliver;
983 }
984
985 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
986 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
987 flags |= EFX_PKT_VLAN_TAGGED;
988 }
989
990 switch (l3_class) {
991 case ESE_DZ_L3_CLASS_IP4:
992 case ESE_DZ_L3_CLASS_IP4_FRAG:
993 flags |= EFX_PKT_IPV4;
994 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
995 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
996 } else {
997 flags |= EFX_CKSUM_IPV4;
998 }
999
1000 /*
1001 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1002 * only 2 bits wide on Medford2. Check it is safe to use the
1003 * Medford2 field and values for all EF10 controllers.
1004 */
1005 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1006 ESF_DE_RX_L4_CLASS_LBN);
1007 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1008 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1009 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1010 ESE_DE_L4_CLASS_UNKNOWN);
1011
1012 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1013 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
1014 flags |= EFX_PKT_TCP;
1015 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1016 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
1017 flags |= EFX_PKT_UDP;
1018 } else {
1019 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
1020 }
1021 break;
1022
1023 case ESE_DZ_L3_CLASS_IP6:
1024 case ESE_DZ_L3_CLASS_IP6_FRAG:
1025 flags |= EFX_PKT_IPV6;
1026
1027 /*
1028 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
1029 * only 2 bits wide on Medford2. Check it is safe to use the
1030 * Medford2 field and values for all EF10 controllers.
1031 */
1032 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
1033 ESF_DE_RX_L4_CLASS_LBN);
1034 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
1035 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
1036 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
1037 ESE_DE_L4_CLASS_UNKNOWN);
1038
1039 if (l4_class == ESE_FZ_L4_CLASS_TCP) {
1040 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
1041 flags |= EFX_PKT_TCP;
1042 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
1043 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
1044 flags |= EFX_PKT_UDP;
1045 } else {
1046 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
1047 }
1048 break;
1049
1050 default:
1051 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
1052 break;
1053 }
1054
1055 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
1056 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
1057 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
1058 } else {
1059 flags |= EFX_CKSUM_TCPUDP;
1060 }
1061 }
1062
1063 deliver:
1064 /* If we're not discarding the packet then it is ok */
1065 if (~flags & EFX_DISCARD)
1066 EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
1067
1068 EFSYS_ASSERT(eecp->eec_rx != NULL);
1069 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
1070
1071 return (should_abort);
1072 }
1073
1074 static __checkReturn boolean_t
ef10_ev_tx(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1075 ef10_ev_tx(
1076 __in efx_evq_t *eep,
1077 __in efx_qword_t *eqp,
1078 __in const efx_ev_callbacks_t *eecp,
1079 __in_opt void *arg)
1080 {
1081 efx_nic_t *enp = eep->ee_enp;
1082 uint32_t id;
1083 uint32_t label;
1084 boolean_t should_abort;
1085
1086 EFX_EV_QSTAT_INCR(eep, EV_TX);
1087
1088 /* Discard events after RXQ/TXQ errors, or hardware not available */
1089 if (enp->en_reset_flags &
1090 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
1091 return (B_FALSE);
1092
1093 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
1094 /* Drop this event */
1095 return (B_FALSE);
1096 }
1097
1098 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
1099 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
1100 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
1101
1102 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
1103
1104 EFSYS_ASSERT(eecp->eec_tx != NULL);
1105 should_abort = eecp->eec_tx(arg, label, id);
1106
1107 return (should_abort);
1108 }
1109
1110 static __checkReturn boolean_t
ef10_ev_driver(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1111 ef10_ev_driver(
1112 __in efx_evq_t *eep,
1113 __in efx_qword_t *eqp,
1114 __in const efx_ev_callbacks_t *eecp,
1115 __in_opt void *arg)
1116 {
1117 unsigned int code;
1118 boolean_t should_abort;
1119
1120 EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
1121 should_abort = B_FALSE;
1122
1123 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
1124 switch (code) {
1125 case ESE_DZ_DRV_TIMER_EV: {
1126 uint32_t id;
1127
1128 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
1129
1130 EFSYS_ASSERT(eecp->eec_timer != NULL);
1131 should_abort = eecp->eec_timer(arg, id);
1132 break;
1133 }
1134
1135 case ESE_DZ_DRV_WAKE_UP_EV: {
1136 uint32_t id;
1137
1138 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
1139
1140 EFSYS_ASSERT(eecp->eec_wake_up != NULL);
1141 should_abort = eecp->eec_wake_up(arg, id);
1142 break;
1143 }
1144
1145 case ESE_DZ_DRV_START_UP_EV:
1146 EFSYS_ASSERT(eecp->eec_initialized != NULL);
1147 should_abort = eecp->eec_initialized(arg);
1148 break;
1149
1150 default:
1151 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1152 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1153 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1154 break;
1155 }
1156
1157 return (should_abort);
1158 }
1159
1160 static __checkReturn boolean_t
ef10_ev_drv_gen(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1161 ef10_ev_drv_gen(
1162 __in efx_evq_t *eep,
1163 __in efx_qword_t *eqp,
1164 __in const efx_ev_callbacks_t *eecp,
1165 __in_opt void *arg)
1166 {
1167 uint32_t data;
1168 boolean_t should_abort;
1169
1170 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
1171 should_abort = B_FALSE;
1172
1173 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
1174 if (data >= ((uint32_t)1 << 16)) {
1175 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1176 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1177 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1178
1179 return (B_TRUE);
1180 }
1181
1182 EFSYS_ASSERT(eecp->eec_software != NULL);
1183 should_abort = eecp->eec_software(arg, (uint16_t)data);
1184
1185 return (should_abort);
1186 }
1187
1188 static __checkReturn boolean_t
ef10_ev_mcdi(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)1189 ef10_ev_mcdi(
1190 __in efx_evq_t *eep,
1191 __in efx_qword_t *eqp,
1192 __in const efx_ev_callbacks_t *eecp,
1193 __in_opt void *arg)
1194 {
1195 efx_nic_t *enp = eep->ee_enp;
1196 unsigned int code;
1197 boolean_t should_abort = B_FALSE;
1198
1199 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
1200
1201 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
1202 switch (code) {
1203 case MCDI_EVENT_CODE_BADSSERT:
1204 efx_mcdi_ev_death(enp, EINTR);
1205 break;
1206
1207 case MCDI_EVENT_CODE_CMDDONE:
1208 efx_mcdi_ev_cpl(enp,
1209 MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
1210 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
1211 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
1212 break;
1213
1214 #if EFSYS_OPT_MCDI_PROXY_AUTH
1215 case MCDI_EVENT_CODE_PROXY_RESPONSE:
1216 /*
1217 * This event notifies a function that an authorization request
1218 * has been processed. If the request was authorized then the
1219 * function can now re-send the original MCDI request.
1220 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
1221 */
1222 efx_mcdi_ev_proxy_response(enp,
1223 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
1224 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
1225 break;
1226 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
1227
1228 case MCDI_EVENT_CODE_LINKCHANGE: {
1229 efx_link_mode_t link_mode;
1230
1231 ef10_phy_link_ev(enp, eqp, &link_mode);
1232 should_abort = eecp->eec_link_change(arg, link_mode);
1233 break;
1234 }
1235
1236 case MCDI_EVENT_CODE_SENSOREVT: {
1237 #if EFSYS_OPT_MON_STATS
1238 efx_mon_stat_t id;
1239 efx_mon_stat_value_t value;
1240 efx_rc_t rc;
1241
1242 /* Decode monitor stat for MCDI sensor (if supported) */
1243 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
1244 /* Report monitor stat change */
1245 should_abort = eecp->eec_monitor(arg, id, value);
1246 } else if (rc == ENOTSUP) {
1247 should_abort = eecp->eec_exception(arg,
1248 EFX_EXCEPTION_UNKNOWN_SENSOREVT,
1249 MCDI_EV_FIELD(eqp, DATA));
1250 } else {
1251 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
1252 }
1253 #endif
1254 break;
1255 }
1256
1257 case MCDI_EVENT_CODE_SCHEDERR:
1258 /* Informational only */
1259 break;
1260
1261 case MCDI_EVENT_CODE_REBOOT:
1262 /* Falcon/Siena only (should not been seen with Huntington). */
1263 efx_mcdi_ev_death(enp, EIO);
1264 break;
1265
1266 case MCDI_EVENT_CODE_MC_REBOOT:
1267 /* MC_REBOOT event is used for Huntington (EF10) and later. */
1268 efx_mcdi_ev_death(enp, EIO);
1269 break;
1270
1271 case MCDI_EVENT_CODE_MAC_STATS_DMA:
1272 #if EFSYS_OPT_MAC_STATS
1273 if (eecp->eec_mac_stats != NULL) {
1274 eecp->eec_mac_stats(arg,
1275 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
1276 }
1277 #endif
1278 break;
1279
1280 case MCDI_EVENT_CODE_FWALERT: {
1281 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
1282
1283 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
1284 should_abort = eecp->eec_exception(arg,
1285 EFX_EXCEPTION_FWALERT_SRAM,
1286 MCDI_EV_FIELD(eqp, FWALERT_DATA));
1287 else
1288 should_abort = eecp->eec_exception(arg,
1289 EFX_EXCEPTION_UNKNOWN_FWALERT,
1290 MCDI_EV_FIELD(eqp, DATA));
1291 break;
1292 }
1293
1294 case MCDI_EVENT_CODE_TX_ERR: {
1295 /*
1296 * After a TXQ error is detected, firmware sends a TX_ERR event.
1297 * This may be followed by TX completions (which we discard),
1298 * and then finally by a TX_FLUSH event. Firmware destroys the
1299 * TXQ automatically after sending the TX_FLUSH event.
1300 */
1301 enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
1302
1303 EFSYS_PROBE2(tx_descq_err,
1304 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1305 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1306
1307 /* Inform the driver that a reset is required. */
1308 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
1309 MCDI_EV_FIELD(eqp, TX_ERR_DATA));
1310 break;
1311 }
1312
1313 case MCDI_EVENT_CODE_TX_FLUSH: {
1314 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
1315
1316 /*
1317 * EF10 firmware sends two TX_FLUSH events: one to the txq's
1318 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
1319 * We want to wait for all completions, so ignore the events
1320 * with TX_FLUSH_TO_DRIVER.
1321 */
1322 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1323 should_abort = B_FALSE;
1324 break;
1325 }
1326
1327 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1328
1329 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1330
1331 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1332 should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1333 break;
1334 }
1335
1336 case MCDI_EVENT_CODE_RX_ERR: {
1337 /*
1338 * After an RXQ error is detected, firmware sends an RX_ERR
1339 * event. This may be followed by RX events (which we discard),
1340 * and then finally by an RX_FLUSH event. Firmware destroys the
1341 * RXQ automatically after sending the RX_FLUSH event.
1342 */
1343 enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1344
1345 EFSYS_PROBE2(rx_descq_err,
1346 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1347 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1348
1349 /* Inform the driver that a reset is required. */
1350 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1351 MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1352 break;
1353 }
1354
1355 case MCDI_EVENT_CODE_RX_FLUSH: {
1356 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1357
1358 /*
1359 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1360 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1361 * We want to wait for all completions, so ignore the events
1362 * with RX_FLUSH_TO_DRIVER.
1363 */
1364 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1365 should_abort = B_FALSE;
1366 break;
1367 }
1368
1369 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1370
1371 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1372
1373 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1374 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1375 break;
1376 }
1377
1378 default:
1379 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1380 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1381 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1382 break;
1383 }
1384
1385 return (should_abort);
1386 }
1387
1388 void
ef10_ev_rxlabel_init(__in efx_evq_t * eep,__in efx_rxq_t * erp,__in unsigned int label,__in efx_rxq_type_t type)1389 ef10_ev_rxlabel_init(
1390 __in efx_evq_t *eep,
1391 __in efx_rxq_t *erp,
1392 __in unsigned int label,
1393 __in efx_rxq_type_t type)
1394 {
1395 efx_evq_rxq_state_t *eersp;
1396 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1397 boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
1398 boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
1399 #endif
1400
1401 _NOTE(ARGUNUSED(type))
1402 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1403 eersp = &eep->ee_rxq_state[label];
1404
1405 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1406
1407 #if EFSYS_OPT_RX_PACKED_STREAM
1408 /*
1409 * For packed stream modes, the very first event will
1410 * have a new buffer flag set, so it will be incremented,
1411 * yielding the correct pointer. That results in a simpler
1412 * code than trying to detect start-of-the-world condition
1413 * in the event handler.
1414 */
1415 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1416 #else
1417 eersp->eers_rx_read_ptr = 0;
1418 #endif
1419 eersp->eers_rx_mask = erp->er_mask;
1420 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1421 eersp->eers_rx_stream_npackets = 0;
1422 eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
1423 #endif
1424 #if EFSYS_OPT_RX_PACKED_STREAM
1425 if (packed_stream) {
1426 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1427 EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
1428 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1429 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1430 /*
1431 * A single credit is allocated to the queue when it is started.
1432 * It is immediately spent by the first packet which has NEW
1433 * BUFFER flag set, though, but still we shall take into
1434 * account, as to not wrap around the maximum number of credits
1435 * accidentally
1436 */
1437 eersp->eers_rx_packed_stream_credits--;
1438 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1439 EFX_RX_PACKED_STREAM_MAX_CREDITS);
1440 }
1441 #endif
1442 }
1443
1444 void
ef10_ev_rxlabel_fini(__in efx_evq_t * eep,__in unsigned int label)1445 ef10_ev_rxlabel_fini(
1446 __in efx_evq_t *eep,
1447 __in unsigned int label)
1448 {
1449 efx_evq_rxq_state_t *eersp;
1450
1451 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1452 eersp = &eep->ee_rxq_state[label];
1453
1454 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1455
1456 eersp->eers_rx_read_ptr = 0;
1457 eersp->eers_rx_mask = 0;
1458 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1459 eersp->eers_rx_stream_npackets = 0;
1460 eersp->eers_rx_packed_stream = B_FALSE;
1461 #endif
1462 #if EFSYS_OPT_RX_PACKED_STREAM
1463 eersp->eers_rx_packed_stream_credits = 0;
1464 #endif
1465 }
1466
1467 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
1468