1 /*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27 #include <sys/cdefs.h>
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bitset.h>
34 #include <sys/bitstring.h>
35 #include <sys/buf_ring.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/pciio.h>
43 #include <sys/pcpu.h>
44 #include <sys/proc.h>
45 #include <sys/sockio.h>
46 #include <sys/socket.h>
47 #include <sys/stdatomic.h>
48 #include <sys/cpuset.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/smp.h>
52 #include <sys/taskqueue.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56
57 #include <machine/bus.h>
58 #include <machine/vmparam.h>
59
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_media.h>
63 #include <net/ifq.h>
64 #include <net/bpf.h>
65 #include <net/ethernet.h>
66
67 #include <netinet/in_systm.h>
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
72 #include <netinet/sctp.h>
73 #include <netinet/tcp.h>
74 #include <netinet/tcp_lro.h>
75 #include <netinet/udp.h>
76
77 #include <netinet6/ip6_var.h>
78
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81
82 #include "thunder_bgx.h"
83 #include "nic_reg.h"
84 #include "nic.h"
85 #include "q_struct.h"
86 #include "nicvf_queues.h"
87
88 #define DEBUG
89 #undef DEBUG
90
91 #ifdef DEBUG
92 #define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__)
93 #else
94 #define dprintf(dev, fmt, ...)
95 #endif
96
97 MALLOC_DECLARE(M_NICVF);
98
99 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
100 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
101 static void nicvf_sq_disable(struct nicvf *, int);
102 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
103 static void nicvf_put_sq_desc(struct snd_queue *, int);
104 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
105 boolean_t);
106 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
107
108 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
109
110 static void nicvf_rbdr_task(void *, int);
111 static void nicvf_rbdr_task_nowait(void *, int);
112
113 struct rbuf_info {
114 bus_dma_tag_t dmat;
115 bus_dmamap_t dmap;
116 struct mbuf * mbuf;
117 };
118
119 #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES))
120
121 /* Poll a register for a specific value */
nicvf_poll_reg(struct nicvf * nic,int qidx,uint64_t reg,int bit_pos,int bits,int val)122 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
123 uint64_t reg, int bit_pos, int bits, int val)
124 {
125 uint64_t bit_mask;
126 uint64_t reg_val;
127 int timeout = 10;
128
129 bit_mask = (1UL << bits) - 1;
130 bit_mask = (bit_mask << bit_pos);
131
132 while (timeout) {
133 reg_val = nicvf_queue_reg_read(nic, reg, qidx);
134 if (((reg_val & bit_mask) >> bit_pos) == val)
135 return (0);
136
137 DELAY(1000);
138 timeout--;
139 }
140 device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
141 return (ETIMEDOUT);
142 }
143
144 /* Callback for bus_dmamap_load() */
145 static void
nicvf_dmamap_q_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)146 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
147 {
148 bus_addr_t *paddr;
149
150 KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
151 paddr = arg;
152 *paddr = segs->ds_addr;
153 }
154
155 /* Allocate memory for a queue's descriptors */
156 static int
nicvf_alloc_q_desc_mem(struct nicvf * nic,struct q_desc_mem * dmem,int q_len,int desc_size,int align_bytes)157 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
158 int q_len, int desc_size, int align_bytes)
159 {
160 int err, err_dmat __diagused;
161
162 /* Create DMA tag first */
163 err = bus_dma_tag_create(
164 bus_get_dma_tag(nic->dev), /* parent tag */
165 align_bytes, /* alignment */
166 0, /* boundary */
167 BUS_SPACE_MAXADDR, /* lowaddr */
168 BUS_SPACE_MAXADDR, /* highaddr */
169 NULL, NULL, /* filtfunc, filtfuncarg */
170 (q_len * desc_size), /* maxsize */
171 1, /* nsegments */
172 (q_len * desc_size), /* maxsegsize */
173 0, /* flags */
174 NULL, NULL, /* lockfunc, lockfuncarg */
175 &dmem->dmat); /* dmat */
176
177 if (err != 0) {
178 device_printf(nic->dev,
179 "Failed to create busdma tag for descriptors ring\n");
180 return (err);
181 }
182
183 /* Allocate segment of continuous DMA safe memory */
184 err = bus_dmamem_alloc(
185 dmem->dmat, /* DMA tag */
186 &dmem->base, /* virtual address */
187 (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */
188 &dmem->dmap); /* DMA map */
189 if (err != 0) {
190 device_printf(nic->dev, "Failed to allocate DMA safe memory for"
191 "descriptors ring\n");
192 goto dmamem_fail;
193 }
194
195 err = bus_dmamap_load(
196 dmem->dmat,
197 dmem->dmap,
198 dmem->base,
199 (q_len * desc_size), /* allocation size */
200 nicvf_dmamap_q_cb, /* map to DMA address cb. */
201 &dmem->phys_base, /* physical address */
202 BUS_DMA_NOWAIT);
203 if (err != 0) {
204 device_printf(nic->dev,
205 "Cannot load DMA map of descriptors ring\n");
206 goto dmamap_fail;
207 }
208
209 dmem->q_len = q_len;
210 dmem->size = (desc_size * q_len);
211
212 return (0);
213
214 dmamap_fail:
215 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
216 dmem->phys_base = 0;
217 dmamem_fail:
218 err_dmat = bus_dma_tag_destroy(dmem->dmat);
219 dmem->base = NULL;
220 KASSERT(err_dmat == 0,
221 ("%s: Trying to destroy BUSY DMA tag", __func__));
222
223 return (err);
224 }
225
226 /* Free queue's descriptor memory */
227 static void
nicvf_free_q_desc_mem(struct nicvf * nic,struct q_desc_mem * dmem)228 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
229 {
230 int err __diagused;
231
232 if ((dmem == NULL) || (dmem->base == NULL))
233 return;
234
235 /* Unload a map */
236 bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
237 bus_dmamap_unload(dmem->dmat, dmem->dmap);
238 /* Free DMA memory */
239 bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
240 /* Destroy DMA tag */
241 err = bus_dma_tag_destroy(dmem->dmat);
242
243 KASSERT(err == 0,
244 ("%s: Trying to destroy BUSY DMA tag", __func__));
245
246 dmem->phys_base = 0;
247 dmem->base = NULL;
248 }
249
250 /*
251 * Allocate buffer for packet reception
252 * HW returns memory address where packet is DMA'ed but not a pointer
253 * into RBDR ring, so save buffer address at the start of fragment and
254 * align the start address to a cache aligned address
255 */
256 static __inline int
nicvf_alloc_rcv_buffer(struct nicvf * nic,struct rbdr * rbdr,bus_dmamap_t dmap,int mflags,uint32_t buf_len,bus_addr_t * rbuf)257 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
258 bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
259 {
260 struct mbuf *mbuf;
261 struct rbuf_info *rinfo;
262 bus_dma_segment_t segs[1];
263 int nsegs;
264 int err;
265
266 mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
267 if (mbuf == NULL)
268 return (ENOMEM);
269
270 /*
271 * The length is equal to the actual length + one 128b line
272 * used as a room for rbuf_info structure.
273 */
274 mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
275
276 err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
277 &nsegs, BUS_DMA_NOWAIT);
278 if (err != 0) {
279 device_printf(nic->dev,
280 "Failed to map mbuf into DMA visible memory, err: %d\n",
281 err);
282 m_freem(mbuf);
283 bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
284 return (err);
285 }
286 if (nsegs != 1)
287 panic("Unexpected number of DMA segments for RB: %d", nsegs);
288 /*
289 * Now use the room for rbuf_info structure
290 * and adjust mbuf data and length.
291 */
292 rinfo = (struct rbuf_info *)mbuf->m_data;
293 m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
294
295 rinfo->dmat = rbdr->rbdr_buff_dmat;
296 rinfo->dmap = dmap;
297 rinfo->mbuf = mbuf;
298
299 *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
300
301 return (0);
302 }
303
304 /* Retrieve mbuf for received packet */
305 static struct mbuf *
nicvf_rb_ptr_to_mbuf(struct nicvf * nic,bus_addr_t rb_ptr)306 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
307 {
308 struct mbuf *mbuf;
309 struct rbuf_info *rinfo;
310
311 /* Get buffer start address and alignment offset */
312 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
313
314 /* Now retrieve mbuf to give to stack */
315 mbuf = rinfo->mbuf;
316 if (__predict_false(mbuf == NULL)) {
317 panic("%s: Received packet fragment with NULL mbuf",
318 device_get_nameunit(nic->dev));
319 }
320 /*
321 * Clear the mbuf in the descriptor to indicate
322 * that this slot is processed and free to use.
323 */
324 rinfo->mbuf = NULL;
325
326 bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
327 bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
328
329 return (mbuf);
330 }
331
332 /* Allocate RBDR ring and populate receive buffers */
333 static int
nicvf_init_rbdr(struct nicvf * nic,struct rbdr * rbdr,int ring_len,int buf_size,int qidx)334 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
335 int buf_size, int qidx)
336 {
337 bus_dmamap_t dmap;
338 bus_addr_t rbuf;
339 struct rbdr_entry_t *desc;
340 int idx;
341 int err;
342
343 /* Allocate rbdr descriptors ring */
344 err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
345 sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
346 if (err != 0) {
347 device_printf(nic->dev,
348 "Failed to create RBDR descriptors ring\n");
349 return (err);
350 }
351
352 rbdr->desc = rbdr->dmem.base;
353 /*
354 * Buffer size has to be in multiples of 128 bytes.
355 * Make room for metadata of size of one line (128 bytes).
356 */
357 rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
358 rbdr->enable = TRUE;
359 rbdr->thresh = RBDR_THRESH;
360 rbdr->nic = nic;
361 rbdr->idx = qidx;
362
363 /*
364 * Create DMA tag for Rx buffers.
365 * Each map created using this tag is intended to store Rx payload for
366 * one fragment and one header structure containing rbuf_info (thus
367 * additional 128 byte line since RB must be a multiple of 128 byte
368 * cache line).
369 */
370 if (buf_size > MCLBYTES) {
371 device_printf(nic->dev,
372 "Buffer size to large for mbuf cluster\n");
373 return (EINVAL);
374 }
375 err = bus_dma_tag_create(
376 bus_get_dma_tag(nic->dev), /* parent tag */
377 NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */
378 0, /* boundary */
379 DMAP_MAX_PHYSADDR, /* lowaddr */
380 DMAP_MIN_PHYSADDR, /* highaddr */
381 NULL, NULL, /* filtfunc, filtfuncarg */
382 roundup2(buf_size, MCLBYTES), /* maxsize */
383 1, /* nsegments */
384 roundup2(buf_size, MCLBYTES), /* maxsegsize */
385 0, /* flags */
386 NULL, NULL, /* lockfunc, lockfuncarg */
387 &rbdr->rbdr_buff_dmat); /* dmat */
388
389 if (err != 0) {
390 device_printf(nic->dev,
391 "Failed to create busdma tag for RBDR buffers\n");
392 return (err);
393 }
394
395 rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
396 ring_len, M_NICVF, (M_WAITOK | M_ZERO));
397
398 for (idx = 0; idx < ring_len; idx++) {
399 err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
400 if (err != 0) {
401 device_printf(nic->dev,
402 "Failed to create DMA map for RB\n");
403 return (err);
404 }
405 rbdr->rbdr_buff_dmaps[idx] = dmap;
406
407 err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
408 DMA_BUFFER_LEN, &rbuf);
409 if (err != 0)
410 return (err);
411
412 desc = GET_RBDR_DESC(rbdr, idx);
413 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
414 }
415
416 /* Allocate taskqueue */
417 TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
418 TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
419 rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
420 taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
421 taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
422 device_get_nameunit(nic->dev));
423
424 return (0);
425 }
426
427 /* Free RBDR ring and its receive buffers */
428 static void
nicvf_free_rbdr(struct nicvf * nic,struct rbdr * rbdr)429 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
430 {
431 struct mbuf *mbuf;
432 struct queue_set *qs;
433 struct rbdr_entry_t *desc;
434 struct rbuf_info *rinfo;
435 bus_addr_t buf_addr;
436 int head, tail, idx;
437 int err __diagused;
438
439 qs = nic->qs;
440
441 if ((qs == NULL) || (rbdr == NULL))
442 return;
443
444 rbdr->enable = FALSE;
445 if (rbdr->rbdr_taskq != NULL) {
446 /* Remove tasks */
447 while (taskqueue_cancel(rbdr->rbdr_taskq,
448 &rbdr->rbdr_task_nowait, NULL) != 0) {
449 /* Finish the nowait task first */
450 taskqueue_drain(rbdr->rbdr_taskq,
451 &rbdr->rbdr_task_nowait);
452 }
453 taskqueue_free(rbdr->rbdr_taskq);
454 rbdr->rbdr_taskq = NULL;
455
456 while (taskqueue_cancel(taskqueue_thread,
457 &rbdr->rbdr_task, NULL) != 0) {
458 /* Now finish the sleepable task */
459 taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
460 }
461 }
462
463 /*
464 * Free all of the memory under the RB descriptors.
465 * There are assumptions here:
466 * 1. Corresponding RBDR is disabled
467 * - it is safe to operate using head and tail indexes
468 * 2. All bffers that were received are properly freed by
469 * the receive handler
470 * - there is no need to unload DMA map and free MBUF for other
471 * descriptors than unused ones
472 */
473 if (rbdr->rbdr_buff_dmat != NULL) {
474 head = rbdr->head;
475 tail = rbdr->tail;
476 while (head != tail) {
477 desc = GET_RBDR_DESC(rbdr, head);
478 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
479 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
480 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
481 mbuf = rinfo->mbuf;
482 /* This will destroy everything including rinfo! */
483 m_freem(mbuf);
484 head++;
485 head &= (rbdr->dmem.q_len - 1);
486 }
487 /* Free tail descriptor */
488 desc = GET_RBDR_DESC(rbdr, tail);
489 buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
490 rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
491 bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
492 mbuf = rinfo->mbuf;
493 /* This will destroy everything including rinfo! */
494 m_freem(mbuf);
495
496 /* Destroy DMA maps */
497 for (idx = 0; idx < qs->rbdr_len; idx++) {
498 if (rbdr->rbdr_buff_dmaps[idx] == NULL)
499 continue;
500 err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
501 rbdr->rbdr_buff_dmaps[idx]);
502 KASSERT(err == 0,
503 ("%s: Could not destroy DMA map for RB, desc: %d",
504 __func__, idx));
505 rbdr->rbdr_buff_dmaps[idx] = NULL;
506 }
507
508 /* Now destroy the tag */
509 err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
510 KASSERT(err == 0,
511 ("%s: Trying to destroy BUSY DMA tag", __func__));
512
513 rbdr->head = 0;
514 rbdr->tail = 0;
515 }
516
517 /* Free RBDR ring */
518 nicvf_free_q_desc_mem(nic, &rbdr->dmem);
519 }
520
521 /*
522 * Refill receive buffer descriptors with new buffers.
523 */
524 static int
nicvf_refill_rbdr(struct rbdr * rbdr,int mflags)525 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
526 {
527 struct nicvf *nic;
528 struct queue_set *qs;
529 int rbdr_idx;
530 int tail, qcount;
531 int refill_rb_cnt;
532 struct rbdr_entry_t *desc;
533 bus_dmamap_t dmap;
534 bus_addr_t rbuf;
535 boolean_t rb_alloc_fail;
536 int new_rb;
537
538 rb_alloc_fail = TRUE;
539 new_rb = 0;
540 nic = rbdr->nic;
541 qs = nic->qs;
542 rbdr_idx = rbdr->idx;
543
544 /* Check if it's enabled */
545 if (!rbdr->enable)
546 return (0);
547
548 /* Get no of desc's to be refilled */
549 qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
550 qcount &= 0x7FFFF;
551 /* Doorbell can be ringed with a max of ring size minus 1 */
552 if (qcount >= (qs->rbdr_len - 1)) {
553 rb_alloc_fail = FALSE;
554 goto out;
555 } else
556 refill_rb_cnt = qs->rbdr_len - qcount - 1;
557
558 /* Start filling descs from tail */
559 tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
560 while (refill_rb_cnt) {
561 tail++;
562 tail &= (rbdr->dmem.q_len - 1);
563
564 dmap = rbdr->rbdr_buff_dmaps[tail];
565 if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
566 DMA_BUFFER_LEN, &rbuf)) {
567 /* Something went wrong. Resign */
568 break;
569 }
570 desc = GET_RBDR_DESC(rbdr, tail);
571 desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
572 refill_rb_cnt--;
573 new_rb++;
574 }
575
576 /* make sure all memory stores are done before ringing doorbell */
577 wmb();
578
579 /* Check if buffer allocation failed */
580 if (refill_rb_cnt == 0)
581 rb_alloc_fail = FALSE;
582
583 /* Notify HW */
584 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
585 rbdr_idx, new_rb);
586 out:
587 if (!rb_alloc_fail) {
588 /*
589 * Re-enable RBDR interrupts only
590 * if buffer allocation is success.
591 */
592 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
593
594 return (0);
595 }
596
597 return (ENOMEM);
598 }
599
600 /* Refill RBs even if sleep is needed to reclaim memory */
601 static void
nicvf_rbdr_task(void * arg,int pending)602 nicvf_rbdr_task(void *arg, int pending)
603 {
604 struct rbdr *rbdr;
605 int err;
606
607 rbdr = (struct rbdr *)arg;
608
609 err = nicvf_refill_rbdr(rbdr, M_WAITOK);
610 if (__predict_false(err != 0)) {
611 panic("%s: Failed to refill RBs even when sleep enabled",
612 __func__);
613 }
614 }
615
616 /* Refill RBs as soon as possible without waiting */
617 static void
nicvf_rbdr_task_nowait(void * arg,int pending)618 nicvf_rbdr_task_nowait(void *arg, int pending)
619 {
620 struct rbdr *rbdr;
621 int err;
622
623 rbdr = (struct rbdr *)arg;
624
625 err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
626 if (err != 0) {
627 /*
628 * Schedule another, sleepable kernel thread
629 * that will for sure refill the buffers.
630 */
631 taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
632 }
633 }
634
635 static int
nicvf_rcv_pkt_handler(struct nicvf * nic,struct cmp_queue * cq,struct cqe_rx_t * cqe_rx,int cqe_type)636 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
637 struct cqe_rx_t *cqe_rx, int cqe_type)
638 {
639 struct mbuf *mbuf;
640 struct rcv_queue *rq;
641 int rq_idx;
642 int err = 0;
643
644 rq_idx = cqe_rx->rq_idx;
645 rq = &nic->qs->rq[rq_idx];
646
647 /* Check for errors */
648 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
649 if (err && !cqe_rx->rb_cnt)
650 return (0);
651
652 mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
653 if (mbuf == NULL) {
654 dprintf(nic->dev, "Packet not received\n");
655 return (0);
656 }
657
658 /* If error packet */
659 if (err != 0) {
660 m_freem(mbuf);
661 return (0);
662 }
663
664 if (rq->lro_enabled &&
665 ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
666 (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
667 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
668 /*
669 * At this point it is known that there are no errors in the
670 * packet. Attempt to LRO enqueue. Send to stack if no resources
671 * or enqueue error.
672 */
673 if ((rq->lro.lro_cnt != 0) &&
674 (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
675 return (0);
676 }
677 /*
678 * Push this packet to the stack later to avoid
679 * unlocking completion task in the middle of work.
680 */
681 err = buf_ring_enqueue(cq->rx_br, mbuf);
682 if (err != 0) {
683 /*
684 * Failed to enqueue this mbuf.
685 * We don't drop it, just schedule another task.
686 */
687 return (err);
688 }
689
690 return (0);
691 }
692
693 static void
nicvf_snd_pkt_handler(struct nicvf * nic,struct cmp_queue * cq,struct cqe_send_t * cqe_tx,int cqe_type)694 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
695 struct cqe_send_t *cqe_tx, int cqe_type)
696 {
697 bus_dmamap_t dmap;
698 struct mbuf *mbuf;
699 struct snd_queue *sq;
700 struct sq_hdr_subdesc *hdr;
701
702 mbuf = NULL;
703 sq = &nic->qs->sq[cqe_tx->sq_idx];
704
705 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
706 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
707 return;
708
709 dprintf(nic->dev,
710 "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
711 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
712 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
713
714 dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
715 bus_dmamap_unload(sq->snd_buff_dmat, dmap);
716
717 mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
718 if (mbuf != NULL) {
719 m_freem(mbuf);
720 sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
721 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
722 }
723
724 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
725 }
726
727 static int
nicvf_cq_intr_handler(struct nicvf * nic,uint8_t cq_idx)728 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
729 {
730 struct mbuf *mbuf;
731 if_t ifp;
732 int processed_cqe, tx_done = 0;
733 #ifdef DEBUG
734 int work_done = 0;
735 #endif
736 int cqe_count, cqe_head;
737 struct queue_set *qs = nic->qs;
738 struct cmp_queue *cq = &qs->cq[cq_idx];
739 struct snd_queue *sq = &qs->sq[cq_idx];
740 struct rcv_queue *rq;
741 struct cqe_rx_t *cq_desc;
742 struct lro_ctrl *lro;
743 int rq_idx;
744 int cmp_err;
745
746 NICVF_CMP_LOCK(cq);
747 cmp_err = 0;
748 processed_cqe = 0;
749 /* Get no of valid CQ entries to process */
750 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
751 cqe_count &= CQ_CQE_COUNT;
752 if (cqe_count == 0)
753 goto out;
754
755 /* Get head of the valid CQ entries */
756 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
757 cqe_head &= 0xFFFF;
758
759 dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
760 __func__, cq_idx, cqe_count, cqe_head);
761 while (processed_cqe < cqe_count) {
762 /* Get the CQ descriptor */
763 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
764 cqe_head++;
765 cqe_head &= (cq->dmem.q_len - 1);
766 /* Prefetch next CQ descriptor */
767 __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
768
769 dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
770 cq_desc->cqe_type);
771 switch (cq_desc->cqe_type) {
772 case CQE_TYPE_RX:
773 cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
774 CQE_TYPE_RX);
775 if (__predict_false(cmp_err != 0)) {
776 /*
777 * Ups. Cannot finish now.
778 * Let's try again later.
779 */
780 goto done;
781 }
782 #ifdef DEBUG
783 work_done++;
784 #endif
785 break;
786 case CQE_TYPE_SEND:
787 nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
788 CQE_TYPE_SEND);
789 tx_done++;
790 break;
791 case CQE_TYPE_INVALID:
792 case CQE_TYPE_RX_SPLIT:
793 case CQE_TYPE_RX_TCP:
794 case CQE_TYPE_SEND_PTP:
795 /* Ignore for now */
796 break;
797 }
798 processed_cqe++;
799 }
800 done:
801 dprintf(nic->dev,
802 "%s CQ%d processed_cqe %d work_done %d\n",
803 __func__, cq_idx, processed_cqe, work_done);
804
805 /* Ring doorbell to inform H/W to reuse processed CQEs */
806 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
807
808 if ((tx_done > 0) &&
809 ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
810 /* Reenable TXQ if its stopped earlier due to SQ full */
811 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
812 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
813 }
814 out:
815 /*
816 * Flush any outstanding LRO work
817 */
818 rq_idx = cq_idx;
819 rq = &nic->qs->rq[rq_idx];
820 lro = &rq->lro;
821 tcp_lro_flush_all(lro);
822
823 NICVF_CMP_UNLOCK(cq);
824
825 ifp = nic->ifp;
826 /* Push received MBUFs to the stack */
827 while (!buf_ring_empty(cq->rx_br)) {
828 mbuf = buf_ring_dequeue_mc(cq->rx_br);
829 if (__predict_true(mbuf != NULL))
830 if_input(ifp, mbuf);
831 }
832
833 return (cmp_err);
834 }
835
836 /*
837 * Qset error interrupt handler
838 *
839 * As of now only CQ errors are handled
840 */
841 static void
nicvf_qs_err_task(void * arg,int pending)842 nicvf_qs_err_task(void *arg, int pending)
843 {
844 struct nicvf *nic;
845 struct queue_set *qs;
846 int qidx;
847 uint64_t status;
848 boolean_t enable = TRUE;
849
850 nic = (struct nicvf *)arg;
851 qs = nic->qs;
852
853 /* Deactivate network interface */
854 if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
855
856 /* Check if it is CQ err */
857 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
858 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
859 qidx);
860 if ((status & CQ_ERR_MASK) == 0)
861 continue;
862 /* Process already queued CQEs and reconfig CQ */
863 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
864 nicvf_sq_disable(nic, qidx);
865 (void)nicvf_cq_intr_handler(nic, qidx);
866 nicvf_cmp_queue_config(nic, qs, qidx, enable);
867 nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
868 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
869 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
870 }
871
872 if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
873 /* Re-enable Qset error interrupt */
874 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
875 }
876
877 static void
nicvf_cmp_task(void * arg,int pending)878 nicvf_cmp_task(void *arg, int pending)
879 {
880 struct cmp_queue *cq;
881 struct nicvf *nic;
882 int cmp_err;
883
884 cq = (struct cmp_queue *)arg;
885 nic = cq->nic;
886
887 /* Handle CQ descriptors */
888 cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
889 if (__predict_false(cmp_err != 0)) {
890 /*
891 * Schedule another thread here since we did not
892 * process the entire CQ due to Tx or Rx CQ parse error.
893 */
894 taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
895 }
896
897 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
898 /* Reenable interrupt (previously disabled in nicvf_intr_handler() */
899 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
900
901 }
902
903 /* Initialize completion queue */
904 static int
nicvf_init_cmp_queue(struct nicvf * nic,struct cmp_queue * cq,int q_len,int qidx)905 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
906 int qidx)
907 {
908 int err;
909
910 /* Initizalize lock */
911 snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
912 device_get_nameunit(nic->dev), qidx);
913 mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
914
915 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
916 NICVF_CQ_BASE_ALIGN_BYTES);
917
918 if (err != 0) {
919 device_printf(nic->dev,
920 "Could not allocate DMA memory for CQ\n");
921 return (err);
922 }
923
924 cq->desc = cq->dmem.base;
925 cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
926 cq->nic = nic;
927 cq->idx = qidx;
928 nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
929
930 cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
931 &cq->mtx);
932
933 /* Allocate taskqueue */
934 NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
935 cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
936 taskqueue_thread_enqueue, &cq->cmp_taskq);
937 taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
938 device_get_nameunit(nic->dev), qidx);
939
940 return (0);
941 }
942
943 static void
nicvf_free_cmp_queue(struct nicvf * nic,struct cmp_queue * cq)944 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
945 {
946
947 if (cq == NULL)
948 return;
949 /*
950 * The completion queue itself should be disabled by now
951 * (ref. nicvf_snd_queue_config()).
952 * Ensure that it is safe to disable it or panic.
953 */
954 if (cq->enable)
955 panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
956
957 if (cq->cmp_taskq != NULL) {
958 /* Remove task */
959 while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
960 taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
961
962 taskqueue_free(cq->cmp_taskq);
963 cq->cmp_taskq = NULL;
964 }
965 /*
966 * Completion interrupt will possibly enable interrupts again
967 * so disable interrupting now after we finished processing
968 * completion task. It is safe to do so since the corresponding CQ
969 * was already disabled.
970 */
971 nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
972 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
973
974 NICVF_CMP_LOCK(cq);
975 nicvf_free_q_desc_mem(nic, &cq->dmem);
976 drbr_free(cq->rx_br, M_DEVBUF);
977 NICVF_CMP_UNLOCK(cq);
978 mtx_destroy(&cq->mtx);
979 memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
980 }
981
982 int
nicvf_xmit_locked(struct snd_queue * sq)983 nicvf_xmit_locked(struct snd_queue *sq)
984 {
985 struct nicvf *nic;
986 if_t ifp;
987 struct mbuf *next;
988 int err;
989
990 NICVF_TX_LOCK_ASSERT(sq);
991
992 nic = sq->nic;
993 ifp = nic->ifp;
994 err = 0;
995
996 while ((next = drbr_peek(ifp, sq->br)) != NULL) {
997 /* Send a copy of the frame to the BPF listener */
998 ETHER_BPF_MTAP(ifp, next);
999
1000 err = nicvf_tx_mbuf_locked(sq, &next);
1001 if (err != 0) {
1002 if (next == NULL)
1003 drbr_advance(ifp, sq->br);
1004 else
1005 drbr_putback(ifp, sq->br, next);
1006
1007 break;
1008 }
1009 drbr_advance(ifp, sq->br);
1010 }
1011 return (err);
1012 }
1013
1014 static void
nicvf_snd_task(void * arg,int pending)1015 nicvf_snd_task(void *arg, int pending)
1016 {
1017 struct snd_queue *sq = (struct snd_queue *)arg;
1018 struct nicvf *nic;
1019 if_t ifp;
1020 int err;
1021
1022 nic = sq->nic;
1023 ifp = nic->ifp;
1024
1025 /*
1026 * Skip sending anything if the driver is not running,
1027 * SQ full or link is down.
1028 */
1029 if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1030 IFF_DRV_RUNNING) || !nic->link_up)
1031 return;
1032
1033 NICVF_TX_LOCK(sq);
1034 err = nicvf_xmit_locked(sq);
1035 NICVF_TX_UNLOCK(sq);
1036 /* Try again */
1037 if (err != 0)
1038 taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
1039 }
1040
1041 /* Initialize transmit queue */
1042 static int
nicvf_init_snd_queue(struct nicvf * nic,struct snd_queue * sq,int q_len,int qidx)1043 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
1044 int qidx)
1045 {
1046 size_t i;
1047 int err;
1048
1049 /* Initizalize TX lock for this queue */
1050 snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
1051 device_get_nameunit(nic->dev), qidx);
1052 mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
1053
1054 NICVF_TX_LOCK(sq);
1055 /* Allocate buffer ring */
1056 sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
1057 M_NOWAIT, &sq->mtx);
1058 if (sq->br == NULL) {
1059 device_printf(nic->dev,
1060 "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
1061 err = ENOMEM;
1062 goto error;
1063 }
1064
1065 /* Allocate DMA memory for Tx descriptors */
1066 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1067 NICVF_SQ_BASE_ALIGN_BYTES);
1068 if (err != 0) {
1069 device_printf(nic->dev,
1070 "Could not allocate DMA memory for SQ\n");
1071 goto error;
1072 }
1073
1074 sq->desc = sq->dmem.base;
1075 sq->head = sq->tail = 0;
1076 atomic_store_rel_int(&sq->free_cnt, q_len - 1);
1077 sq->thresh = SND_QUEUE_THRESH;
1078 sq->idx = qidx;
1079 sq->nic = nic;
1080
1081 /*
1082 * Allocate DMA maps for Tx buffers
1083 */
1084
1085 /* Create DMA tag first */
1086 err = bus_dma_tag_create(
1087 bus_get_dma_tag(nic->dev), /* parent tag */
1088 1, /* alignment */
1089 0, /* boundary */
1090 BUS_SPACE_MAXADDR, /* lowaddr */
1091 BUS_SPACE_MAXADDR, /* highaddr */
1092 NULL, NULL, /* filtfunc, filtfuncarg */
1093 NICVF_TSO_MAXSIZE, /* maxsize */
1094 NICVF_TSO_NSEGS, /* nsegments */
1095 MCLBYTES, /* maxsegsize */
1096 0, /* flags */
1097 NULL, NULL, /* lockfunc, lockfuncarg */
1098 &sq->snd_buff_dmat); /* dmat */
1099
1100 if (err != 0) {
1101 device_printf(nic->dev,
1102 "Failed to create busdma tag for Tx buffers\n");
1103 goto error;
1104 }
1105
1106 /* Allocate send buffers array */
1107 sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1108 (M_NOWAIT | M_ZERO));
1109 if (sq->snd_buff == NULL) {
1110 device_printf(nic->dev,
1111 "Could not allocate memory for Tx buffers array\n");
1112 err = ENOMEM;
1113 goto error;
1114 }
1115
1116 /* Now populate maps */
1117 for (i = 0; i < q_len; i++) {
1118 err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1119 &sq->snd_buff[i].dmap);
1120 if (err != 0) {
1121 device_printf(nic->dev,
1122 "Failed to create DMA maps for Tx buffers\n");
1123 goto error;
1124 }
1125 }
1126 NICVF_TX_UNLOCK(sq);
1127
1128 /* Allocate taskqueue */
1129 TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1130 sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1131 taskqueue_thread_enqueue, &sq->snd_taskq);
1132 taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1133 device_get_nameunit(nic->dev), qidx);
1134
1135 return (0);
1136 error:
1137 NICVF_TX_UNLOCK(sq);
1138 return (err);
1139 }
1140
1141 static void
nicvf_free_snd_queue(struct nicvf * nic,struct snd_queue * sq)1142 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1143 {
1144 struct queue_set *qs = nic->qs;
1145 size_t i;
1146 int err __diagused;
1147
1148 if (sq == NULL)
1149 return;
1150
1151 if (sq->snd_taskq != NULL) {
1152 /* Remove task */
1153 while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1154 taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1155
1156 taskqueue_free(sq->snd_taskq);
1157 sq->snd_taskq = NULL;
1158 }
1159
1160 NICVF_TX_LOCK(sq);
1161 if (sq->snd_buff_dmat != NULL) {
1162 if (sq->snd_buff != NULL) {
1163 for (i = 0; i < qs->sq_len; i++) {
1164 m_freem(sq->snd_buff[i].mbuf);
1165 sq->snd_buff[i].mbuf = NULL;
1166
1167 bus_dmamap_unload(sq->snd_buff_dmat,
1168 sq->snd_buff[i].dmap);
1169 err = bus_dmamap_destroy(sq->snd_buff_dmat,
1170 sq->snd_buff[i].dmap);
1171 /*
1172 * If bus_dmamap_destroy fails it can cause
1173 * random panic later if the tag is also
1174 * destroyed in the process.
1175 */
1176 KASSERT(err == 0,
1177 ("%s: Could not destroy DMA map for SQ",
1178 __func__));
1179 }
1180 }
1181
1182 free(sq->snd_buff, M_NICVF);
1183
1184 err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1185 KASSERT(err == 0,
1186 ("%s: Trying to destroy BUSY DMA tag", __func__));
1187 }
1188
1189 /* Free private driver ring for this send queue */
1190 if (sq->br != NULL)
1191 drbr_free(sq->br, M_DEVBUF);
1192
1193 if (sq->dmem.base != NULL)
1194 nicvf_free_q_desc_mem(nic, &sq->dmem);
1195
1196 NICVF_TX_UNLOCK(sq);
1197 /* Destroy Tx lock */
1198 mtx_destroy(&sq->mtx);
1199 memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1200 }
1201
1202 static void
nicvf_reclaim_snd_queue(struct nicvf * nic,struct queue_set * qs,int qidx)1203 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1204 {
1205
1206 /* Disable send queue */
1207 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1208 /* Check if SQ is stopped */
1209 if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1210 return;
1211 /* Reset send queue */
1212 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1213 }
1214
1215 static void
nicvf_reclaim_rcv_queue(struct nicvf * nic,struct queue_set * qs,int qidx)1216 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1217 {
1218 union nic_mbx mbx = {};
1219
1220 /* Make sure all packets in the pipeline are written back into mem */
1221 mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1222 nicvf_send_msg_to_pf(nic, &mbx);
1223 }
1224
1225 static void
nicvf_reclaim_cmp_queue(struct nicvf * nic,struct queue_set * qs,int qidx)1226 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1227 {
1228
1229 /* Disable timer threshold (doesn't get reset upon CQ reset */
1230 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1231 /* Disable completion queue */
1232 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1233 /* Reset completion queue */
1234 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1235 }
1236
1237 static void
nicvf_reclaim_rbdr(struct nicvf * nic,struct rbdr * rbdr,int qidx)1238 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1239 {
1240 uint64_t tmp, fifo_state;
1241 int timeout = 10;
1242
1243 /* Save head and tail pointers for feeing up buffers */
1244 rbdr->head =
1245 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1246 rbdr->tail =
1247 nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1248
1249 /*
1250 * If RBDR FIFO is in 'FAIL' state then do a reset first
1251 * before relaiming.
1252 */
1253 fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1254 if (((fifo_state >> 62) & 0x03) == 0x3) {
1255 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1256 qidx, NICVF_RBDR_RESET);
1257 }
1258
1259 /* Disable RBDR */
1260 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1261 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1262 return;
1263 while (1) {
1264 tmp = nicvf_queue_reg_read(nic,
1265 NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1266 if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1267 break;
1268
1269 DELAY(1000);
1270 timeout--;
1271 if (!timeout) {
1272 device_printf(nic->dev,
1273 "Failed polling on prefetch status\n");
1274 return;
1275 }
1276 }
1277 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1278 NICVF_RBDR_RESET);
1279
1280 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1281 return;
1282 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1283 if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1284 return;
1285 }
1286
1287 /* Configures receive queue */
1288 static void
nicvf_rcv_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,bool enable)1289 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1290 int qidx, bool enable)
1291 {
1292 union nic_mbx mbx = {};
1293 struct rcv_queue *rq;
1294 struct rq_cfg rq_cfg;
1295 if_t ifp;
1296 struct lro_ctrl *lro;
1297
1298 ifp = nic->ifp;
1299
1300 rq = &qs->rq[qidx];
1301 rq->enable = enable;
1302
1303 lro = &rq->lro;
1304
1305 /* Disable receive queue */
1306 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1307
1308 if (!rq->enable) {
1309 nicvf_reclaim_rcv_queue(nic, qs, qidx);
1310 /* Free LRO memory */
1311 tcp_lro_free(lro);
1312 rq->lro_enabled = FALSE;
1313 return;
1314 }
1315
1316 /* Configure LRO if enabled */
1317 rq->lro_enabled = FALSE;
1318 if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
1319 if (tcp_lro_init(lro) != 0) {
1320 device_printf(nic->dev,
1321 "Failed to initialize LRO for RXQ%d\n", qidx);
1322 } else {
1323 rq->lro_enabled = TRUE;
1324 lro->ifp = nic->ifp;
1325 }
1326 }
1327
1328 rq->cq_qs = qs->vnic_id;
1329 rq->cq_idx = qidx;
1330 rq->start_rbdr_qs = qs->vnic_id;
1331 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1332 rq->cont_rbdr_qs = qs->vnic_id;
1333 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1334 /* all writes of RBDR data to be loaded into L2 Cache as well*/
1335 rq->caching = 1;
1336
1337 /* Send a mailbox msg to PF to config RQ */
1338 mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1339 mbx.rq.qs_num = qs->vnic_id;
1340 mbx.rq.rq_num = qidx;
1341 mbx.rq.cfg = ((uint64_t)rq->caching << 26) | (rq->cq_qs << 19) |
1342 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1343 (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1344 (rq->start_qs_rbdr_idx);
1345 nicvf_send_msg_to_pf(nic, &mbx);
1346
1347 mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1348 mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1349 nicvf_send_msg_to_pf(nic, &mbx);
1350
1351 /*
1352 * RQ drop config
1353 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1354 */
1355 mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1356 mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1357 nicvf_send_msg_to_pf(nic, &mbx);
1358
1359 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1360
1361 /* Enable Receive queue */
1362 rq_cfg.ena = 1;
1363 rq_cfg.tcp_ena = 0;
1364 nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1365 *(uint64_t *)&rq_cfg);
1366 }
1367
1368 /* Configures completion queue */
1369 static void
nicvf_cmp_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,boolean_t enable)1370 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1371 int qidx, boolean_t enable)
1372 {
1373 struct cmp_queue *cq;
1374 struct cq_cfg cq_cfg;
1375
1376 cq = &qs->cq[qidx];
1377 cq->enable = enable;
1378
1379 if (!cq->enable) {
1380 nicvf_reclaim_cmp_queue(nic, qs, qidx);
1381 return;
1382 }
1383
1384 /* Reset completion queue */
1385 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1386
1387 /* Set completion queue base address */
1388 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1389 (uint64_t)(cq->dmem.phys_base));
1390
1391 /* Enable Completion queue */
1392 cq_cfg.ena = 1;
1393 cq_cfg.reset = 0;
1394 cq_cfg.caching = 0;
1395 cq_cfg.qsize = CMP_QSIZE;
1396 cq_cfg.avg_con = 0;
1397 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1398
1399 /* Set threshold value for interrupt generation */
1400 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1401 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1402 nic->cq_coalesce_usecs);
1403 }
1404
1405 /* Configures transmit queue */
1406 static void
nicvf_snd_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,boolean_t enable)1407 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1408 boolean_t enable)
1409 {
1410 union nic_mbx mbx = {};
1411 struct snd_queue *sq;
1412 struct sq_cfg sq_cfg;
1413
1414 sq = &qs->sq[qidx];
1415 sq->enable = enable;
1416
1417 if (!sq->enable) {
1418 nicvf_reclaim_snd_queue(nic, qs, qidx);
1419 return;
1420 }
1421
1422 /* Reset send queue */
1423 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1424
1425 sq->cq_qs = qs->vnic_id;
1426 sq->cq_idx = qidx;
1427
1428 /* Send a mailbox msg to PF to config SQ */
1429 mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1430 mbx.sq.qs_num = qs->vnic_id;
1431 mbx.sq.sq_num = qidx;
1432 mbx.sq.sqs_mode = nic->sqs_mode;
1433 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1434 nicvf_send_msg_to_pf(nic, &mbx);
1435
1436 /* Set queue base address */
1437 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1438 (uint64_t)(sq->dmem.phys_base));
1439
1440 /* Enable send queue & set queue size */
1441 sq_cfg.ena = 1;
1442 sq_cfg.reset = 0;
1443 sq_cfg.ldwb = 0;
1444 sq_cfg.qsize = SND_QSIZE;
1445 sq_cfg.tstmp_bgx_intf = 0;
1446 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1447
1448 /* Set threshold value for interrupt generation */
1449 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1450 }
1451
1452 /* Configures receive buffer descriptor ring */
1453 static void
nicvf_rbdr_config(struct nicvf * nic,struct queue_set * qs,int qidx,boolean_t enable)1454 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1455 boolean_t enable)
1456 {
1457 struct rbdr *rbdr;
1458 struct rbdr_cfg rbdr_cfg;
1459
1460 rbdr = &qs->rbdr[qidx];
1461 nicvf_reclaim_rbdr(nic, rbdr, qidx);
1462 if (!enable)
1463 return;
1464
1465 /* Set descriptor base address */
1466 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1467 (uint64_t)(rbdr->dmem.phys_base));
1468
1469 /* Enable RBDR & set queue size */
1470 /* Buffer size should be in multiples of 128 bytes */
1471 rbdr_cfg.ena = 1;
1472 rbdr_cfg.reset = 0;
1473 rbdr_cfg.ldwb = 0;
1474 rbdr_cfg.qsize = RBDR_SIZE;
1475 rbdr_cfg.avg_con = 0;
1476 rbdr_cfg.lines = rbdr->dma_size / 128;
1477 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1478 *(uint64_t *)&rbdr_cfg);
1479
1480 /* Notify HW */
1481 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1482 qs->rbdr_len - 1);
1483
1484 /* Set threshold value for interrupt generation */
1485 nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1486 rbdr->thresh - 1);
1487 }
1488
1489 /* Requests PF to assign and enable Qset */
1490 void
nicvf_qset_config(struct nicvf * nic,boolean_t enable)1491 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
1492 {
1493 union nic_mbx mbx = {};
1494 struct queue_set *qs;
1495 struct qs_cfg *qs_cfg;
1496
1497 qs = nic->qs;
1498 if (qs == NULL) {
1499 device_printf(nic->dev,
1500 "Qset is still not allocated, don't init queues\n");
1501 return;
1502 }
1503
1504 qs->enable = enable;
1505 qs->vnic_id = nic->vf_id;
1506
1507 /* Send a mailbox msg to PF to config Qset */
1508 mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1509 mbx.qs.num = qs->vnic_id;
1510
1511 mbx.qs.cfg = 0;
1512 qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1513 if (qs->enable) {
1514 qs_cfg->ena = 1;
1515 qs_cfg->vnic = qs->vnic_id;
1516 }
1517 nicvf_send_msg_to_pf(nic, &mbx);
1518 }
1519
1520 static void
nicvf_free_resources(struct nicvf * nic)1521 nicvf_free_resources(struct nicvf *nic)
1522 {
1523 int qidx;
1524 struct queue_set *qs;
1525
1526 qs = nic->qs;
1527 /*
1528 * Remove QS error task first since it has to be dead
1529 * to safely free completion queue tasks.
1530 */
1531 if (qs->qs_err_taskq != NULL) {
1532 /* Shut down QS error tasks */
1533 while (taskqueue_cancel(qs->qs_err_taskq,
1534 &qs->qs_err_task, NULL) != 0) {
1535 taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1536 }
1537 taskqueue_free(qs->qs_err_taskq);
1538 qs->qs_err_taskq = NULL;
1539 }
1540 /* Free receive buffer descriptor ring */
1541 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1542 nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1543
1544 /* Free completion queue */
1545 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1546 nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1547
1548 /* Free send queue */
1549 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1550 nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1551 }
1552
1553 static int
nicvf_alloc_resources(struct nicvf * nic)1554 nicvf_alloc_resources(struct nicvf *nic)
1555 {
1556 struct queue_set *qs = nic->qs;
1557 int qidx;
1558
1559 /* Alloc receive buffer descriptor ring */
1560 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1561 if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1562 DMA_BUFFER_LEN, qidx))
1563 goto alloc_fail;
1564 }
1565
1566 /* Alloc send queue */
1567 for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1568 if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1569 goto alloc_fail;
1570 }
1571
1572 /* Alloc completion queue */
1573 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1574 if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1575 goto alloc_fail;
1576 }
1577
1578 /* Allocate QS error taskqueue */
1579 NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1580 qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1581 taskqueue_thread_enqueue, &qs->qs_err_taskq);
1582 taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1583 device_get_nameunit(nic->dev));
1584
1585 return (0);
1586 alloc_fail:
1587 nicvf_free_resources(nic);
1588 return (ENOMEM);
1589 }
1590
1591 int
nicvf_set_qset_resources(struct nicvf * nic)1592 nicvf_set_qset_resources(struct nicvf *nic)
1593 {
1594 struct queue_set *qs;
1595
1596 qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1597 nic->qs = qs;
1598
1599 /* Set count of each queue */
1600 qs->rbdr_cnt = RBDR_CNT;
1601 qs->rq_cnt = RCV_QUEUE_CNT;
1602
1603 qs->sq_cnt = SND_QUEUE_CNT;
1604 qs->cq_cnt = CMP_QUEUE_CNT;
1605
1606 /* Set queue lengths */
1607 qs->rbdr_len = RCV_BUF_COUNT;
1608 qs->sq_len = SND_QUEUE_LEN;
1609 qs->cq_len = CMP_QUEUE_LEN;
1610
1611 nic->rx_queues = qs->rq_cnt;
1612 nic->tx_queues = qs->sq_cnt;
1613
1614 return (0);
1615 }
1616
1617 int
nicvf_config_data_transfer(struct nicvf * nic,boolean_t enable)1618 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1619 {
1620 boolean_t disable = FALSE;
1621 struct queue_set *qs;
1622 int qidx;
1623
1624 qs = nic->qs;
1625 if (qs == NULL)
1626 return (0);
1627
1628 if (enable) {
1629 if (nicvf_alloc_resources(nic) != 0)
1630 return (ENOMEM);
1631
1632 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1633 nicvf_snd_queue_config(nic, qs, qidx, enable);
1634 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1635 nicvf_cmp_queue_config(nic, qs, qidx, enable);
1636 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1637 nicvf_rbdr_config(nic, qs, qidx, enable);
1638 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1639 nicvf_rcv_queue_config(nic, qs, qidx, enable);
1640 } else {
1641 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1642 nicvf_rcv_queue_config(nic, qs, qidx, disable);
1643 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1644 nicvf_rbdr_config(nic, qs, qidx, disable);
1645 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1646 nicvf_snd_queue_config(nic, qs, qidx, disable);
1647 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1648 nicvf_cmp_queue_config(nic, qs, qidx, disable);
1649
1650 nicvf_free_resources(nic);
1651 }
1652
1653 return (0);
1654 }
1655
1656 /*
1657 * Get a free desc from SQ
1658 * returns descriptor ponter & descriptor number
1659 */
1660 static __inline int
nicvf_get_sq_desc(struct snd_queue * sq,int desc_cnt)1661 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1662 {
1663 int qentry;
1664
1665 qentry = sq->tail;
1666 atomic_subtract_int(&sq->free_cnt, desc_cnt);
1667 sq->tail += desc_cnt;
1668 sq->tail &= (sq->dmem.q_len - 1);
1669
1670 return (qentry);
1671 }
1672
1673 /* Free descriptor back to SQ for future use */
1674 static void
nicvf_put_sq_desc(struct snd_queue * sq,int desc_cnt)1675 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1676 {
1677
1678 atomic_add_int(&sq->free_cnt, desc_cnt);
1679 sq->head += desc_cnt;
1680 sq->head &= (sq->dmem.q_len - 1);
1681 }
1682
1683 static __inline int
nicvf_get_nxt_sqentry(struct snd_queue * sq,int qentry)1684 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1685 {
1686 qentry++;
1687 qentry &= (sq->dmem.q_len - 1);
1688 return (qentry);
1689 }
1690
1691 static void
nicvf_sq_enable(struct nicvf * nic,struct snd_queue * sq,int qidx)1692 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1693 {
1694 uint64_t sq_cfg;
1695
1696 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1697 sq_cfg |= NICVF_SQ_EN;
1698 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1699 /* Ring doorbell so that H/W restarts processing SQEs */
1700 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1701 }
1702
1703 static void
nicvf_sq_disable(struct nicvf * nic,int qidx)1704 nicvf_sq_disable(struct nicvf *nic, int qidx)
1705 {
1706 uint64_t sq_cfg;
1707
1708 sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1709 sq_cfg &= ~NICVF_SQ_EN;
1710 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1711 }
1712
1713 static void
nicvf_sq_free_used_descs(struct nicvf * nic,struct snd_queue * sq,int qidx)1714 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1715 {
1716 uint64_t head;
1717 struct snd_buff *snd_buff;
1718 struct sq_hdr_subdesc *hdr;
1719
1720 NICVF_TX_LOCK(sq);
1721 head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1722 while (sq->head != head) {
1723 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1724 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1725 nicvf_put_sq_desc(sq, 1);
1726 continue;
1727 }
1728 snd_buff = &sq->snd_buff[sq->head];
1729 if (snd_buff->mbuf != NULL) {
1730 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1731 m_freem(snd_buff->mbuf);
1732 sq->snd_buff[sq->head].mbuf = NULL;
1733 }
1734 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1735 }
1736 NICVF_TX_UNLOCK(sq);
1737 }
1738
1739 /*
1740 * Add SQ HEADER subdescriptor.
1741 * First subdescriptor for every send descriptor.
1742 */
1743 static __inline int
nicvf_sq_add_hdr_subdesc(struct snd_queue * sq,int qentry,int subdesc_cnt,struct mbuf * mbuf,int len)1744 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1745 int subdesc_cnt, struct mbuf *mbuf, int len)
1746 {
1747 struct nicvf *nic;
1748 struct sq_hdr_subdesc *hdr;
1749 struct ether_vlan_header *eh;
1750 #ifdef INET
1751 struct ip *ip;
1752 #endif
1753 #if defined(INET6) || defined(INET)
1754 struct tcphdr *th;
1755 #endif
1756 #ifdef INET
1757 int iphlen;
1758 #endif
1759 int ehdrlen, poff, proto;
1760 uint16_t etype;
1761
1762 nic = sq->nic;
1763
1764 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1765 sq->snd_buff[qentry].mbuf = mbuf;
1766
1767 memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1768 hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1769 /* Enable notification via CQE after processing SQE */
1770 hdr->post_cqe = 1;
1771 /* No of subdescriptors following this */
1772 hdr->subdesc_cnt = subdesc_cnt;
1773 hdr->tot_len = len;
1774
1775 eh = mtod(mbuf, struct ether_vlan_header *);
1776 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1777 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1778 etype = ntohs(eh->evl_proto);
1779 } else {
1780 ehdrlen = ETHER_HDR_LEN;
1781 etype = ntohs(eh->evl_encap_proto);
1782 }
1783
1784 poff = proto = -1;
1785 switch (etype) {
1786 #ifdef INET6
1787 case ETHERTYPE_IPV6:
1788 if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
1789 mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
1790 sq->snd_buff[qentry].mbuf = NULL;
1791 if (mbuf == NULL)
1792 return (ENOBUFS);
1793 }
1794 poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
1795 if (poff < 0)
1796 return (ENOBUFS);
1797 poff += ehdrlen;
1798 break;
1799 #endif
1800 #ifdef INET
1801 case ETHERTYPE_IP:
1802 if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
1803 mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
1804 sq->snd_buff[qentry].mbuf = mbuf;
1805 if (mbuf == NULL)
1806 return (ENOBUFS);
1807 }
1808 if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
1809 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1810
1811 ip = (struct ip *)(mbuf->m_data + ehdrlen);
1812 iphlen = ip->ip_hl << 2;
1813 poff = ehdrlen + iphlen;
1814 proto = ip->ip_p;
1815 break;
1816 #endif
1817 }
1818
1819 #if defined(INET6) || defined(INET)
1820 if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
1821 switch (proto) {
1822 case IPPROTO_TCP:
1823 if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
1824 break;
1825
1826 if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
1827 mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
1828 sq->snd_buff[qentry].mbuf = mbuf;
1829 if (mbuf == NULL)
1830 return (ENOBUFS);
1831 }
1832 hdr->csum_l4 = SEND_L4_CSUM_TCP;
1833 break;
1834 case IPPROTO_UDP:
1835 if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
1836 break;
1837
1838 if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
1839 mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
1840 sq->snd_buff[qentry].mbuf = mbuf;
1841 if (mbuf == NULL)
1842 return (ENOBUFS);
1843 }
1844 hdr->csum_l4 = SEND_L4_CSUM_UDP;
1845 break;
1846 case IPPROTO_SCTP:
1847 if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
1848 break;
1849
1850 if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
1851 mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
1852 sq->snd_buff[qentry].mbuf = mbuf;
1853 if (mbuf == NULL)
1854 return (ENOBUFS);
1855 }
1856 hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1857 break;
1858 default:
1859 break;
1860 }
1861 hdr->l3_offset = ehdrlen;
1862 hdr->l4_offset = poff;
1863 }
1864
1865 if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
1866 th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
1867
1868 hdr->tso = 1;
1869 hdr->tso_start = poff + (th->th_off * 4);
1870 hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
1871 hdr->inner_l3_offset = ehdrlen - 2;
1872 nic->drv_stats.tx_tso++;
1873 }
1874 #endif
1875
1876 return (0);
1877 }
1878
1879 /*
1880 * SQ GATHER subdescriptor
1881 * Must follow HDR descriptor
1882 */
nicvf_sq_add_gather_subdesc(struct snd_queue * sq,int qentry,int size,uint64_t data)1883 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1884 int size, uint64_t data)
1885 {
1886 struct sq_gather_subdesc *gather;
1887
1888 qentry &= (sq->dmem.q_len - 1);
1889 gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1890
1891 memset(gather, 0, SND_QUEUE_DESC_SIZE);
1892 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1893 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1894 gather->size = size;
1895 gather->addr = data;
1896 }
1897
1898 /* Put an mbuf to a SQ for packet transfer. */
1899 static int
nicvf_tx_mbuf_locked(struct snd_queue * sq,struct mbuf ** mbufp)1900 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
1901 {
1902 bus_dma_segment_t segs[256];
1903 struct snd_buff *snd_buff;
1904 size_t seg;
1905 int nsegs, qentry;
1906 int subdesc_cnt;
1907 int err;
1908
1909 NICVF_TX_LOCK_ASSERT(sq);
1910
1911 if (sq->free_cnt == 0)
1912 return (ENOBUFS);
1913
1914 snd_buff = &sq->snd_buff[sq->tail];
1915
1916 err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1917 *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
1918 if (__predict_false(err != 0)) {
1919 /* ARM64TODO: Add mbuf defragmenting if we lack maps */
1920 m_freem(*mbufp);
1921 *mbufp = NULL;
1922 return (err);
1923 }
1924
1925 /* Set how many subdescriptors is required */
1926 subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
1927 if (subdesc_cnt > sq->free_cnt) {
1928 /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1929 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1930 return (ENOBUFS);
1931 }
1932
1933 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1934
1935 /* Add SQ header subdesc */
1936 err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
1937 (*mbufp)->m_pkthdr.len);
1938 if (err != 0) {
1939 nicvf_put_sq_desc(sq, subdesc_cnt);
1940 bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1941 if (err == ENOBUFS) {
1942 m_freem(*mbufp);
1943 *mbufp = NULL;
1944 }
1945 return (err);
1946 }
1947
1948 /* Add SQ gather subdescs */
1949 for (seg = 0; seg < nsegs; seg++) {
1950 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1951 nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1952 segs[seg].ds_addr);
1953 }
1954
1955 /* make sure all memory stores are done before ringing doorbell */
1956 bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1957
1958 dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1959 __func__, sq->idx, subdesc_cnt);
1960 /* Inform HW to xmit new packet */
1961 nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1962 sq->idx, subdesc_cnt);
1963 return (0);
1964 }
1965
1966 static __inline u_int
frag_num(u_int i)1967 frag_num(u_int i)
1968 {
1969 #if BYTE_ORDER == BIG_ENDIAN
1970 return ((i & ~3) + 3 - (i & 3));
1971 #else
1972 return (i);
1973 #endif
1974 }
1975
1976 /* Returns MBUF for a received packet */
1977 struct mbuf *
nicvf_get_rcv_mbuf(struct nicvf * nic,struct cqe_rx_t * cqe_rx)1978 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1979 {
1980 int frag;
1981 int payload_len = 0;
1982 struct mbuf *mbuf;
1983 struct mbuf *mbuf_frag;
1984 uint16_t *rb_lens = NULL;
1985 uint64_t *rb_ptrs = NULL;
1986
1987 mbuf = NULL;
1988 rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1989 rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
1990
1991 dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
1992 __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1993
1994 for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1995 payload_len = rb_lens[frag_num(frag)];
1996 if (frag == 0) {
1997 /* First fragment */
1998 mbuf = nicvf_rb_ptr_to_mbuf(nic,
1999 (*rb_ptrs - cqe_rx->align_pad));
2000 mbuf->m_len = payload_len;
2001 mbuf->m_data += cqe_rx->align_pad;
2002 if_setrcvif(mbuf, nic->ifp);
2003 } else {
2004 /* Add fragments */
2005 mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
2006 m_append(mbuf, payload_len, mbuf_frag->m_data);
2007 m_freem(mbuf_frag);
2008 }
2009 /* Next buffer pointer */
2010 rb_ptrs++;
2011 }
2012
2013 if (__predict_true(mbuf != NULL)) {
2014 m_fixhdr(mbuf);
2015 mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
2016 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
2017 if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
2018 /*
2019 * HW by default verifies IP & TCP/UDP/SCTP checksums
2020 */
2021 if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
2022 mbuf->m_pkthdr.csum_flags =
2023 (CSUM_IP_CHECKED | CSUM_IP_VALID);
2024 }
2025
2026 switch (cqe_rx->l4_type) {
2027 case L4TYPE_UDP:
2028 case L4TYPE_TCP: /* fall through */
2029 mbuf->m_pkthdr.csum_flags |=
2030 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2031 mbuf->m_pkthdr.csum_data = 0xffff;
2032 break;
2033 case L4TYPE_SCTP:
2034 mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
2035 break;
2036 default:
2037 break;
2038 }
2039 }
2040 }
2041
2042 return (mbuf);
2043 }
2044
2045 /* Enable interrupt */
2046 void
nicvf_enable_intr(struct nicvf * nic,int int_type,int q_idx)2047 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
2048 {
2049 uint64_t reg_val;
2050
2051 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2052
2053 switch (int_type) {
2054 case NICVF_INTR_CQ:
2055 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2056 break;
2057 case NICVF_INTR_SQ:
2058 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2059 break;
2060 case NICVF_INTR_RBDR:
2061 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2062 break;
2063 case NICVF_INTR_PKT_DROP:
2064 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2065 break;
2066 case NICVF_INTR_TCP_TIMER:
2067 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2068 break;
2069 case NICVF_INTR_MBOX:
2070 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2071 break;
2072 case NICVF_INTR_QS_ERR:
2073 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2074 break;
2075 default:
2076 device_printf(nic->dev,
2077 "Failed to enable interrupt: unknown type\n");
2078 break;
2079 }
2080
2081 nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
2082 }
2083
2084 /* Disable interrupt */
2085 void
nicvf_disable_intr(struct nicvf * nic,int int_type,int q_idx)2086 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
2087 {
2088 uint64_t reg_val = 0;
2089
2090 switch (int_type) {
2091 case NICVF_INTR_CQ:
2092 reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2093 break;
2094 case NICVF_INTR_SQ:
2095 reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2096 break;
2097 case NICVF_INTR_RBDR:
2098 reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2099 break;
2100 case NICVF_INTR_PKT_DROP:
2101 reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2102 break;
2103 case NICVF_INTR_TCP_TIMER:
2104 reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2105 break;
2106 case NICVF_INTR_MBOX:
2107 reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2108 break;
2109 case NICVF_INTR_QS_ERR:
2110 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2111 break;
2112 default:
2113 device_printf(nic->dev,
2114 "Failed to disable interrupt: unknown type\n");
2115 break;
2116 }
2117
2118 nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
2119 }
2120
2121 /* Clear interrupt */
2122 void
nicvf_clear_intr(struct nicvf * nic,int int_type,int q_idx)2123 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
2124 {
2125 uint64_t reg_val = 0;
2126
2127 switch (int_type) {
2128 case NICVF_INTR_CQ:
2129 reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2130 break;
2131 case NICVF_INTR_SQ:
2132 reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2133 break;
2134 case NICVF_INTR_RBDR:
2135 reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2136 break;
2137 case NICVF_INTR_PKT_DROP:
2138 reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2139 break;
2140 case NICVF_INTR_TCP_TIMER:
2141 reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2142 break;
2143 case NICVF_INTR_MBOX:
2144 reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
2145 break;
2146 case NICVF_INTR_QS_ERR:
2147 reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2148 break;
2149 default:
2150 device_printf(nic->dev,
2151 "Failed to clear interrupt: unknown type\n");
2152 break;
2153 }
2154
2155 nicvf_reg_write(nic, NIC_VF_INT, reg_val);
2156 }
2157
2158 /* Check if interrupt is enabled */
2159 int
nicvf_is_intr_enabled(struct nicvf * nic,int int_type,int q_idx)2160 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
2161 {
2162 uint64_t reg_val;
2163 uint64_t mask = 0xff;
2164
2165 reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2166
2167 switch (int_type) {
2168 case NICVF_INTR_CQ:
2169 mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2170 break;
2171 case NICVF_INTR_SQ:
2172 mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2173 break;
2174 case NICVF_INTR_RBDR:
2175 mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2176 break;
2177 case NICVF_INTR_PKT_DROP:
2178 mask = NICVF_INTR_PKT_DROP_MASK;
2179 break;
2180 case NICVF_INTR_TCP_TIMER:
2181 mask = NICVF_INTR_TCP_TIMER_MASK;
2182 break;
2183 case NICVF_INTR_MBOX:
2184 mask = NICVF_INTR_MBOX_MASK;
2185 break;
2186 case NICVF_INTR_QS_ERR:
2187 mask = NICVF_INTR_QS_ERR_MASK;
2188 break;
2189 default:
2190 device_printf(nic->dev,
2191 "Failed to check interrupt enable: unknown type\n");
2192 break;
2193 }
2194
2195 return (reg_val & mask);
2196 }
2197
2198 void
nicvf_update_rq_stats(struct nicvf * nic,int rq_idx)2199 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
2200 {
2201 struct rcv_queue *rq;
2202
2203 #define GET_RQ_STATS(reg) \
2204 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
2205 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2206
2207 rq = &nic->qs->rq[rq_idx];
2208 rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
2209 rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
2210 }
2211
2212 void
nicvf_update_sq_stats(struct nicvf * nic,int sq_idx)2213 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
2214 {
2215 struct snd_queue *sq;
2216
2217 #define GET_SQ_STATS(reg) \
2218 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
2219 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2220
2221 sq = &nic->qs->sq[sq_idx];
2222 sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
2223 sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
2224 }
2225
2226 /* Check for errors in the receive cmp.queue entry */
2227 int
nicvf_check_cqe_rx_errs(struct nicvf * nic,struct cmp_queue * cq,struct cqe_rx_t * cqe_rx)2228 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
2229 struct cqe_rx_t *cqe_rx)
2230 {
2231 struct nicvf_hw_stats *stats = &nic->hw_stats;
2232 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2233
2234 if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2235 drv_stats->rx_frames_ok++;
2236 return (0);
2237 }
2238
2239 switch (cqe_rx->err_opcode) {
2240 case CQ_RX_ERROP_RE_PARTIAL:
2241 stats->rx_bgx_truncated_pkts++;
2242 break;
2243 case CQ_RX_ERROP_RE_JABBER:
2244 stats->rx_jabber_errs++;
2245 break;
2246 case CQ_RX_ERROP_RE_FCS:
2247 stats->rx_fcs_errs++;
2248 break;
2249 case CQ_RX_ERROP_RE_RX_CTL:
2250 stats->rx_bgx_errs++;
2251 break;
2252 case CQ_RX_ERROP_PREL2_ERR:
2253 stats->rx_prel2_errs++;
2254 break;
2255 case CQ_RX_ERROP_L2_MAL:
2256 stats->rx_l2_hdr_malformed++;
2257 break;
2258 case CQ_RX_ERROP_L2_OVERSIZE:
2259 stats->rx_oversize++;
2260 break;
2261 case CQ_RX_ERROP_L2_UNDERSIZE:
2262 stats->rx_undersize++;
2263 break;
2264 case CQ_RX_ERROP_L2_LENMISM:
2265 stats->rx_l2_len_mismatch++;
2266 break;
2267 case CQ_RX_ERROP_L2_PCLP:
2268 stats->rx_l2_pclp++;
2269 break;
2270 case CQ_RX_ERROP_IP_NOT:
2271 stats->rx_ip_ver_errs++;
2272 break;
2273 case CQ_RX_ERROP_IP_CSUM_ERR:
2274 stats->rx_ip_csum_errs++;
2275 break;
2276 case CQ_RX_ERROP_IP_MAL:
2277 stats->rx_ip_hdr_malformed++;
2278 break;
2279 case CQ_RX_ERROP_IP_MALD:
2280 stats->rx_ip_payload_malformed++;
2281 break;
2282 case CQ_RX_ERROP_IP_HOP:
2283 stats->rx_ip_ttl_errs++;
2284 break;
2285 case CQ_RX_ERROP_L3_PCLP:
2286 stats->rx_l3_pclp++;
2287 break;
2288 case CQ_RX_ERROP_L4_MAL:
2289 stats->rx_l4_malformed++;
2290 break;
2291 case CQ_RX_ERROP_L4_CHK:
2292 stats->rx_l4_csum_errs++;
2293 break;
2294 case CQ_RX_ERROP_UDP_LEN:
2295 stats->rx_udp_len_errs++;
2296 break;
2297 case CQ_RX_ERROP_L4_PORT:
2298 stats->rx_l4_port_errs++;
2299 break;
2300 case CQ_RX_ERROP_TCP_FLAG:
2301 stats->rx_tcp_flag_errs++;
2302 break;
2303 case CQ_RX_ERROP_TCP_OFFSET:
2304 stats->rx_tcp_offset_errs++;
2305 break;
2306 case CQ_RX_ERROP_L4_PCLP:
2307 stats->rx_l4_pclp++;
2308 break;
2309 case CQ_RX_ERROP_RBDR_TRUNC:
2310 stats->rx_truncated_pkts++;
2311 break;
2312 }
2313
2314 return (1);
2315 }
2316
2317 /* Check for errors in the send cmp.queue entry */
2318 int
nicvf_check_cqe_tx_errs(struct nicvf * nic,struct cmp_queue * cq,struct cqe_send_t * cqe_tx)2319 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2320 struct cqe_send_t *cqe_tx)
2321 {
2322 struct cmp_queue_stats *stats = &cq->stats;
2323
2324 switch (cqe_tx->send_status) {
2325 case CQ_TX_ERROP_GOOD:
2326 stats->tx.good++;
2327 return (0);
2328 case CQ_TX_ERROP_DESC_FAULT:
2329 stats->tx.desc_fault++;
2330 break;
2331 case CQ_TX_ERROP_HDR_CONS_ERR:
2332 stats->tx.hdr_cons_err++;
2333 break;
2334 case CQ_TX_ERROP_SUBDC_ERR:
2335 stats->tx.subdesc_err++;
2336 break;
2337 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2338 stats->tx.imm_size_oflow++;
2339 break;
2340 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2341 stats->tx.data_seq_err++;
2342 break;
2343 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2344 stats->tx.mem_seq_err++;
2345 break;
2346 case CQ_TX_ERROP_LOCK_VIOL:
2347 stats->tx.lock_viol++;
2348 break;
2349 case CQ_TX_ERROP_DATA_FAULT:
2350 stats->tx.data_fault++;
2351 break;
2352 case CQ_TX_ERROP_TSTMP_CONFLICT:
2353 stats->tx.tstmp_conflict++;
2354 break;
2355 case CQ_TX_ERROP_TSTMP_TIMEOUT:
2356 stats->tx.tstmp_timeout++;
2357 break;
2358 case CQ_TX_ERROP_MEM_FAULT:
2359 stats->tx.mem_fault++;
2360 break;
2361 case CQ_TX_ERROP_CK_OVERLAP:
2362 stats->tx.csum_overlap++;
2363 break;
2364 case CQ_TX_ERROP_CK_OFLOW:
2365 stats->tx.csum_overflow++;
2366 break;
2367 }
2368
2369 return (1);
2370 }
2371