xref: /freebsd/sys/dev/vnic/nicvf_queues.c (revision fe3e92e6868dce2ed94c98428b8df1f27ed3ef63)
1 /*
2  * Copyright (C) 2015 Cavium Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 #include <sys/cdefs.h>
28 #include "opt_inet.h"
29 #include "opt_inet6.h"
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bitset.h>
34 #include <sys/bitstring.h>
35 #include <sys/buf_ring.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/pciio.h>
43 #include <sys/pcpu.h>
44 #include <sys/proc.h>
45 #include <sys/sockio.h>
46 #include <sys/socket.h>
47 #include <sys/stdatomic.h>
48 #include <sys/cpuset.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/smp.h>
52 #include <sys/taskqueue.h>
53 
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 
57 #include <machine/bus.h>
58 #include <machine/vmparam.h>
59 
60 #include <net/if.h>
61 #include <net/if_var.h>
62 #include <net/if_media.h>
63 #include <net/ifq.h>
64 #include <net/bpf.h>
65 #include <net/ethernet.h>
66 
67 #include <netinet/in_systm.h>
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 #include <netinet/ip.h>
71 #include <netinet/ip6.h>
72 #include <netinet/sctp.h>
73 #include <netinet/tcp.h>
74 #include <netinet/tcp_lro.h>
75 #include <netinet/udp.h>
76 
77 #include <netinet6/ip6_var.h>
78 
79 #include <dev/pci/pcireg.h>
80 #include <dev/pci/pcivar.h>
81 
82 #include "thunder_bgx.h"
83 #include "nic_reg.h"
84 #include "nic.h"
85 #include "q_struct.h"
86 #include "nicvf_queues.h"
87 
88 #define	DEBUG
89 #undef DEBUG
90 
91 #ifdef DEBUG
92 #define	dprintf(dev, fmt, ...)	device_printf(dev, fmt, ##__VA_ARGS__)
93 #else
94 #define	dprintf(dev, fmt, ...)
95 #endif
96 
97 MALLOC_DECLARE(M_NICVF);
98 
99 static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *);
100 static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *);
101 static void nicvf_sq_disable(struct nicvf *, int);
102 static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int);
103 static void nicvf_put_sq_desc(struct snd_queue *, int);
104 static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int,
105     boolean_t);
106 static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int);
107 
108 static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **);
109 
110 static void nicvf_rbdr_task(void *, int);
111 static void nicvf_rbdr_task_nowait(void *, int);
112 
113 struct rbuf_info {
114 	bus_dma_tag_t	dmat;
115 	bus_dmamap_t	dmap;
116 	struct mbuf *	mbuf;
117 };
118 
119 #define GET_RBUF_INFO(x)						\
120 	((struct rbuf_info *)((char *)(x) - NICVF_RCV_BUF_ALIGN_BYTES))
121 
122 /* Poll a register for a specific value */
nicvf_poll_reg(struct nicvf * nic,int qidx,uint64_t reg,int bit_pos,int bits,int val)123 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
124 			  uint64_t reg, int bit_pos, int bits, int val)
125 {
126 	uint64_t bit_mask;
127 	uint64_t reg_val;
128 	int timeout = 10;
129 
130 	bit_mask = (1UL << bits) - 1;
131 	bit_mask = (bit_mask << bit_pos);
132 
133 	while (timeout) {
134 		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
135 		if (((reg_val & bit_mask) >> bit_pos) == val)
136 			return (0);
137 
138 		DELAY(1000);
139 		timeout--;
140 	}
141 	device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg);
142 	return (ETIMEDOUT);
143 }
144 
145 /* Callback for bus_dmamap_load() */
146 static void
nicvf_dmamap_q_cb(void * arg,bus_dma_segment_t * segs,int nseg,int error)147 nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
148 {
149 	bus_addr_t *paddr;
150 
151 	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
152 	paddr = arg;
153 	*paddr = segs->ds_addr;
154 }
155 
156 /* Allocate memory for a queue's descriptors */
157 static int
nicvf_alloc_q_desc_mem(struct nicvf * nic,struct q_desc_mem * dmem,int q_len,int desc_size,int align_bytes)158 nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
159     int q_len, int desc_size, int align_bytes)
160 {
161 	int err, err_dmat __diagused;
162 
163 	/* Create DMA tag first */
164 	err = bus_dma_tag_create(
165 	    bus_get_dma_tag(nic->dev),		/* parent tag */
166 	    align_bytes,			/* alignment */
167 	    0,					/* boundary */
168 	    BUS_SPACE_MAXADDR,			/* lowaddr */
169 	    BUS_SPACE_MAXADDR,			/* highaddr */
170 	    NULL, NULL,				/* filtfunc, filtfuncarg */
171 	    (q_len * desc_size),		/* maxsize */
172 	    1,					/* nsegments */
173 	    (q_len * desc_size),		/* maxsegsize */
174 	    0,					/* flags */
175 	    NULL, NULL,				/* lockfunc, lockfuncarg */
176 	    &dmem->dmat);			/* dmat */
177 
178 	if (err != 0) {
179 		device_printf(nic->dev,
180 		    "Failed to create busdma tag for descriptors ring\n");
181 		return (err);
182 	}
183 
184 	/* Allocate segment of continuous DMA safe memory */
185 	err = bus_dmamem_alloc(
186 	    dmem->dmat,				/* DMA tag */
187 	    &dmem->base,			/* virtual address */
188 	    (BUS_DMA_NOWAIT | BUS_DMA_ZERO),	/* flags */
189 	    &dmem->dmap);			/* DMA map */
190 	if (err != 0) {
191 		device_printf(nic->dev, "Failed to allocate DMA safe memory for"
192 		    "descriptors ring\n");
193 		goto dmamem_fail;
194 	}
195 
196 	err = bus_dmamap_load(
197 	    dmem->dmat,
198 	    dmem->dmap,
199 	    dmem->base,
200 	    (q_len * desc_size),		/* allocation size */
201 	    nicvf_dmamap_q_cb,			/* map to DMA address cb. */
202 	    &dmem->phys_base,			/* physical address */
203 	    BUS_DMA_NOWAIT);
204 	if (err != 0) {
205 		device_printf(nic->dev,
206 		    "Cannot load DMA map of descriptors ring\n");
207 		goto dmamap_fail;
208 	}
209 
210 	dmem->q_len = q_len;
211 	dmem->size = (desc_size * q_len);
212 
213 	return (0);
214 
215 dmamap_fail:
216 	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
217 	dmem->phys_base = 0;
218 dmamem_fail:
219 	err_dmat = bus_dma_tag_destroy(dmem->dmat);
220 	dmem->base = NULL;
221 	KASSERT(err_dmat == 0,
222 	    ("%s: Trying to destroy BUSY DMA tag", __func__));
223 
224 	return (err);
225 }
226 
227 /* Free queue's descriptor memory */
228 static void
nicvf_free_q_desc_mem(struct nicvf * nic,struct q_desc_mem * dmem)229 nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
230 {
231 	int err __diagused;
232 
233 	if ((dmem == NULL) || (dmem->base == NULL))
234 		return;
235 
236 	/* Unload a map */
237 	bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD);
238 	bus_dmamap_unload(dmem->dmat, dmem->dmap);
239 	/* Free DMA memory */
240 	bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap);
241 	/* Destroy DMA tag */
242 	err = bus_dma_tag_destroy(dmem->dmat);
243 
244 	KASSERT(err == 0,
245 	    ("%s: Trying to destroy BUSY DMA tag", __func__));
246 
247 	dmem->phys_base = 0;
248 	dmem->base = NULL;
249 }
250 
251 /*
252  * Allocate buffer for packet reception
253  * HW returns memory address where packet is DMA'ed but not a pointer
254  * into RBDR ring, so save buffer address at the start of fragment and
255  * align the start address to a cache aligned address
256  */
257 static __inline int
nicvf_alloc_rcv_buffer(struct nicvf * nic,struct rbdr * rbdr,bus_dmamap_t dmap,int mflags,uint32_t buf_len,bus_addr_t * rbuf)258 nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
259     bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf)
260 {
261 	struct mbuf *mbuf;
262 	struct rbuf_info *rinfo;
263 	bus_dma_segment_t segs[1];
264 	int nsegs;
265 	int err;
266 
267 	mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES);
268 	if (mbuf == NULL)
269 		return (ENOMEM);
270 
271 	/*
272 	 * The length is equal to the actual length + one 128b line
273 	 * used as a room for rbuf_info structure.
274 	 */
275 	mbuf->m_len = mbuf->m_pkthdr.len = buf_len;
276 
277 	err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs,
278 	    &nsegs, BUS_DMA_NOWAIT);
279 	if (err != 0) {
280 		device_printf(nic->dev,
281 		    "Failed to map mbuf into DMA visible memory, err: %d\n",
282 		    err);
283 		m_freem(mbuf);
284 		bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap);
285 		return (err);
286 	}
287 	if (nsegs != 1)
288 		panic("Unexpected number of DMA segments for RB: %d", nsegs);
289 	/*
290 	 * Now use the room for rbuf_info structure
291 	 * and adjust mbuf data and length.
292 	 */
293 	rinfo = (struct rbuf_info *)mbuf->m_data;
294 	m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES);
295 
296 	rinfo->dmat = rbdr->rbdr_buff_dmat;
297 	rinfo->dmap = dmap;
298 	rinfo->mbuf = mbuf;
299 
300 	*rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES;
301 
302 	return (0);
303 }
304 
305 /* Retrieve mbuf for received packet */
306 static struct mbuf *
nicvf_rb_ptr_to_mbuf(struct nicvf * nic,bus_addr_t rb_ptr)307 nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr)
308 {
309 	struct mbuf *mbuf;
310 	struct rbuf_info *rinfo;
311 
312 	/* Get buffer start address and alignment offset */
313 	rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr));
314 
315 	/* Now retrieve mbuf to give to stack */
316 	mbuf = rinfo->mbuf;
317 	if (__predict_false(mbuf == NULL)) {
318 		panic("%s: Received packet fragment with NULL mbuf",
319 		    device_get_nameunit(nic->dev));
320 	}
321 	/*
322 	 * Clear the mbuf in the descriptor to indicate
323 	 * that this slot is processed and free to use.
324 	 */
325 	rinfo->mbuf = NULL;
326 
327 	bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD);
328 	bus_dmamap_unload(rinfo->dmat, rinfo->dmap);
329 
330 	return (mbuf);
331 }
332 
333 /* Allocate RBDR ring and populate receive buffers */
334 static int
nicvf_init_rbdr(struct nicvf * nic,struct rbdr * rbdr,int ring_len,int buf_size,int qidx)335 nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len,
336     int buf_size, int qidx)
337 {
338 	bus_dmamap_t dmap;
339 	bus_addr_t rbuf;
340 	struct rbdr_entry_t *desc;
341 	int idx;
342 	int err;
343 
344 	/* Allocate rbdr descriptors ring */
345 	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
346 	    sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES);
347 	if (err != 0) {
348 		device_printf(nic->dev,
349 		    "Failed to create RBDR descriptors ring\n");
350 		return (err);
351 	}
352 
353 	rbdr->desc = rbdr->dmem.base;
354 	/*
355 	 * Buffer size has to be in multiples of 128 bytes.
356 	 * Make room for metadata of size of one line (128 bytes).
357 	 */
358 	rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES;
359 	rbdr->enable = TRUE;
360 	rbdr->thresh = RBDR_THRESH;
361 	rbdr->nic = nic;
362 	rbdr->idx = qidx;
363 
364 	/*
365 	 * Create DMA tag for Rx buffers.
366 	 * Each map created using this tag is intended to store Rx payload for
367 	 * one fragment and one header structure containing rbuf_info (thus
368 	 * additional 128 byte line since RB must be a multiple of 128 byte
369 	 * cache line).
370 	 */
371 	if (buf_size > MCLBYTES) {
372 		device_printf(nic->dev,
373 		    "Buffer size to large for mbuf cluster\n");
374 		return (EINVAL);
375 	}
376 	err = bus_dma_tag_create(
377 	    bus_get_dma_tag(nic->dev),		/* parent tag */
378 	    NICVF_RCV_BUF_ALIGN_BYTES,		/* alignment */
379 	    0,					/* boundary */
380 	    DMAP_MAX_PHYSADDR,			/* lowaddr */
381 	    DMAP_MIN_PHYSADDR,			/* highaddr */
382 	    NULL, NULL,				/* filtfunc, filtfuncarg */
383 	    roundup2(buf_size, MCLBYTES),	/* maxsize */
384 	    1,					/* nsegments */
385 	    roundup2(buf_size, MCLBYTES),	/* maxsegsize */
386 	    0,					/* flags */
387 	    NULL, NULL,				/* lockfunc, lockfuncarg */
388 	    &rbdr->rbdr_buff_dmat);		/* dmat */
389 
390 	if (err != 0) {
391 		device_printf(nic->dev,
392 		    "Failed to create busdma tag for RBDR buffers\n");
393 		return (err);
394 	}
395 
396 	rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) *
397 	    ring_len, M_NICVF, (M_WAITOK | M_ZERO));
398 
399 	for (idx = 0; idx < ring_len; idx++) {
400 		err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap);
401 		if (err != 0) {
402 			device_printf(nic->dev,
403 			    "Failed to create DMA map for RB\n");
404 			return (err);
405 		}
406 		rbdr->rbdr_buff_dmaps[idx] = dmap;
407 
408 		err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK,
409 		    DMA_BUFFER_LEN, &rbuf);
410 		if (err != 0)
411 			return (err);
412 
413 		desc = GET_RBDR_DESC(rbdr, idx);
414 		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
415 	}
416 
417 	/* Allocate taskqueue */
418 	TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr);
419 	TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr);
420 	rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK,
421 	    taskqueue_thread_enqueue, &rbdr->rbdr_taskq);
422 	taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq",
423 	    device_get_nameunit(nic->dev));
424 
425 	return (0);
426 }
427 
428 /* Free RBDR ring and its receive buffers */
429 static void
nicvf_free_rbdr(struct nicvf * nic,struct rbdr * rbdr)430 nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
431 {
432 	struct mbuf *mbuf;
433 	struct queue_set *qs;
434 	struct rbdr_entry_t *desc;
435 	struct rbuf_info *rinfo;
436 	bus_addr_t buf_addr;
437 	int head, tail, idx;
438 	int err __diagused;
439 
440 	qs = nic->qs;
441 
442 	if ((qs == NULL) || (rbdr == NULL))
443 		return;
444 
445 	rbdr->enable = FALSE;
446 	if (rbdr->rbdr_taskq != NULL) {
447 		/* Remove tasks */
448 		while (taskqueue_cancel(rbdr->rbdr_taskq,
449 		    &rbdr->rbdr_task_nowait, NULL) != 0) {
450 			/* Finish the nowait task first */
451 			taskqueue_drain(rbdr->rbdr_taskq,
452 			    &rbdr->rbdr_task_nowait);
453 		}
454 		taskqueue_free(rbdr->rbdr_taskq);
455 		rbdr->rbdr_taskq = NULL;
456 
457 		while (taskqueue_cancel(taskqueue_thread,
458 		    &rbdr->rbdr_task, NULL) != 0) {
459 			/* Now finish the sleepable task */
460 			taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task);
461 		}
462 	}
463 
464 	/*
465 	 * Free all of the memory under the RB descriptors.
466 	 * There are assumptions here:
467 	 * 1. Corresponding RBDR is disabled
468 	 *    - it is safe to operate using head and tail indexes
469 	 * 2. All bffers that were received are properly freed by
470 	 *    the receive handler
471 	 *    - there is no need to unload DMA map and free MBUF for other
472 	 *      descriptors than unused ones
473 	 */
474 	if (rbdr->rbdr_buff_dmat != NULL) {
475 		head = rbdr->head;
476 		tail = rbdr->tail;
477 		while (head != tail) {
478 			desc = GET_RBDR_DESC(rbdr, head);
479 			buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
480 			rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
481 			bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
482 			mbuf = rinfo->mbuf;
483 			/* This will destroy everything including rinfo! */
484 			m_freem(mbuf);
485 			head++;
486 			head &= (rbdr->dmem.q_len - 1);
487 		}
488 		/* Free tail descriptor */
489 		desc = GET_RBDR_DESC(rbdr, tail);
490 		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
491 		rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr));
492 		bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap);
493 		mbuf = rinfo->mbuf;
494 		/* This will destroy everything including rinfo! */
495 		m_freem(mbuf);
496 
497 		/* Destroy DMA maps */
498 		for (idx = 0; idx < qs->rbdr_len; idx++) {
499 			if (rbdr->rbdr_buff_dmaps[idx] == NULL)
500 				continue;
501 			err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat,
502 			    rbdr->rbdr_buff_dmaps[idx]);
503 			KASSERT(err == 0,
504 			    ("%s: Could not destroy DMA map for RB, desc: %d",
505 			    __func__, idx));
506 			rbdr->rbdr_buff_dmaps[idx] = NULL;
507 		}
508 
509 		/* Now destroy the tag */
510 		err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat);
511 		KASSERT(err == 0,
512 		    ("%s: Trying to destroy BUSY DMA tag", __func__));
513 
514 		rbdr->head = 0;
515 		rbdr->tail = 0;
516 	}
517 
518 	/* Free RBDR ring */
519 	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
520 }
521 
522 /*
523  * Refill receive buffer descriptors with new buffers.
524  */
525 static int
nicvf_refill_rbdr(struct rbdr * rbdr,int mflags)526 nicvf_refill_rbdr(struct rbdr *rbdr, int mflags)
527 {
528 	struct nicvf *nic;
529 	struct queue_set *qs;
530 	int rbdr_idx;
531 	int tail, qcount;
532 	int refill_rb_cnt;
533 	struct rbdr_entry_t *desc;
534 	bus_dmamap_t dmap;
535 	bus_addr_t rbuf;
536 	boolean_t rb_alloc_fail;
537 	int new_rb;
538 
539 	rb_alloc_fail = TRUE;
540 	new_rb = 0;
541 	nic = rbdr->nic;
542 	qs = nic->qs;
543 	rbdr_idx = rbdr->idx;
544 
545 	/* Check if it's enabled */
546 	if (!rbdr->enable)
547 		return (0);
548 
549 	/* Get no of desc's to be refilled */
550 	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
551 	qcount &= 0x7FFFF;
552 	/* Doorbell can be ringed with a max of ring size minus 1 */
553 	if (qcount >= (qs->rbdr_len - 1)) {
554 		rb_alloc_fail = FALSE;
555 		goto out;
556 	} else
557 		refill_rb_cnt = qs->rbdr_len - qcount - 1;
558 
559 	/* Start filling descs from tail */
560 	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
561 	while (refill_rb_cnt) {
562 		tail++;
563 		tail &= (rbdr->dmem.q_len - 1);
564 
565 		dmap = rbdr->rbdr_buff_dmaps[tail];
566 		if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags,
567 		    DMA_BUFFER_LEN, &rbuf)) {
568 			/* Something went wrong. Resign */
569 			break;
570 		}
571 		desc = GET_RBDR_DESC(rbdr, tail);
572 		desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN);
573 		refill_rb_cnt--;
574 		new_rb++;
575 	}
576 
577 	/* make sure all memory stores are done before ringing doorbell */
578 	wmb();
579 
580 	/* Check if buffer allocation failed */
581 	if (refill_rb_cnt == 0)
582 		rb_alloc_fail = FALSE;
583 
584 	/* Notify HW */
585 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
586 			      rbdr_idx, new_rb);
587 out:
588 	if (!rb_alloc_fail) {
589 		/*
590 		 * Re-enable RBDR interrupts only
591 		 * if buffer allocation is success.
592 		 */
593 		nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
594 
595 		return (0);
596 	}
597 
598 	return (ENOMEM);
599 }
600 
601 /* Refill RBs even if sleep is needed to reclaim memory */
602 static void
nicvf_rbdr_task(void * arg,int pending)603 nicvf_rbdr_task(void *arg, int pending)
604 {
605 	struct rbdr *rbdr;
606 	int err;
607 
608 	rbdr = (struct rbdr *)arg;
609 
610 	err = nicvf_refill_rbdr(rbdr, M_WAITOK);
611 	if (__predict_false(err != 0)) {
612 		panic("%s: Failed to refill RBs even when sleep enabled",
613 		    __func__);
614 	}
615 }
616 
617 /* Refill RBs as soon as possible without waiting */
618 static void
nicvf_rbdr_task_nowait(void * arg,int pending)619 nicvf_rbdr_task_nowait(void *arg, int pending)
620 {
621 	struct rbdr *rbdr;
622 	int err;
623 
624 	rbdr = (struct rbdr *)arg;
625 
626 	err = nicvf_refill_rbdr(rbdr, M_NOWAIT);
627 	if (err != 0) {
628 		/*
629 		 * Schedule another, sleepable kernel thread
630 		 * that will for sure refill the buffers.
631 		 */
632 		taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task);
633 	}
634 }
635 
636 static int
nicvf_rcv_pkt_handler(struct nicvf * nic,struct cmp_queue * cq,struct cqe_rx_t * cqe_rx,int cqe_type)637 nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
638     struct cqe_rx_t *cqe_rx, int cqe_type)
639 {
640 	struct mbuf *mbuf;
641 	struct rcv_queue *rq;
642 	int rq_idx;
643 	int err = 0;
644 
645 	rq_idx = cqe_rx->rq_idx;
646 	rq = &nic->qs->rq[rq_idx];
647 
648 	/* Check for errors */
649 	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
650 	if (err && !cqe_rx->rb_cnt)
651 		return (0);
652 
653 	mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx);
654 	if (mbuf == NULL) {
655 		dprintf(nic->dev, "Packet not received\n");
656 		return (0);
657 	}
658 
659 	/* If error packet */
660 	if (err != 0) {
661 		m_freem(mbuf);
662 		return (0);
663 	}
664 
665 	if (rq->lro_enabled &&
666 	    ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) &&
667 	    (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
668             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
669 		/*
670 		 * At this point it is known that there are no errors in the
671 		 * packet. Attempt to LRO enqueue. Send to stack if no resources
672 		 * or enqueue error.
673 		 */
674 		if ((rq->lro.lro_cnt != 0) &&
675 		    (tcp_lro_rx(&rq->lro, mbuf, 0) == 0))
676 			return (0);
677 	}
678 	/*
679 	 * Push this packet to the stack later to avoid
680 	 * unlocking completion task in the middle of work.
681 	 */
682 	err = buf_ring_enqueue(cq->rx_br, mbuf);
683 	if (err != 0) {
684 		/*
685 		 * Failed to enqueue this mbuf.
686 		 * We don't drop it, just schedule another task.
687 		 */
688 		return (err);
689 	}
690 
691 	return (0);
692 }
693 
694 static void
nicvf_snd_pkt_handler(struct nicvf * nic,struct cmp_queue * cq,struct cqe_send_t * cqe_tx,int cqe_type)695 nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq,
696     struct cqe_send_t *cqe_tx, int cqe_type)
697 {
698 	bus_dmamap_t dmap;
699 	struct mbuf *mbuf;
700 	struct snd_queue *sq;
701 	struct sq_hdr_subdesc *hdr;
702 
703 	mbuf = NULL;
704 	sq = &nic->qs->sq[cqe_tx->sq_idx];
705 
706 	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
707 	if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
708 		return;
709 
710 	dprintf(nic->dev,
711 	    "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
712 	    __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
713 	    cqe_tx->sqe_ptr, hdr->subdesc_cnt);
714 
715 	dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap;
716 	bus_dmamap_unload(sq->snd_buff_dmat, dmap);
717 
718 	mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf;
719 	if (mbuf != NULL) {
720 		m_freem(mbuf);
721 		sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL;
722 		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
723 	}
724 
725 	nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
726 }
727 
728 static int
nicvf_cq_intr_handler(struct nicvf * nic,uint8_t cq_idx)729 nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx)
730 {
731 	struct mbuf *mbuf;
732 	if_t ifp;
733 	int processed_cqe, tx_done = 0;
734 #ifdef DEBUG
735 	int work_done = 0;
736 #endif
737 	int cqe_count, cqe_head;
738 	struct queue_set *qs = nic->qs;
739 	struct cmp_queue *cq = &qs->cq[cq_idx];
740 	struct snd_queue *sq = &qs->sq[cq_idx];
741 	struct rcv_queue *rq;
742 	struct cqe_rx_t *cq_desc;
743 	struct lro_ctrl	*lro;
744 	int rq_idx;
745 	int cmp_err;
746 
747 	NICVF_CMP_LOCK(cq);
748 	cmp_err = 0;
749 	processed_cqe = 0;
750 	/* Get no of valid CQ entries to process */
751 	cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
752 	cqe_count &= CQ_CQE_COUNT;
753 	if (cqe_count == 0)
754 		goto out;
755 
756 	/* Get head of the valid CQ entries */
757 	cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
758 	cqe_head &= 0xFFFF;
759 
760 	dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n",
761 	    __func__, cq_idx, cqe_count, cqe_head);
762 	while (processed_cqe < cqe_count) {
763 		/* Get the CQ descriptor */
764 		cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
765 		cqe_head++;
766 		cqe_head &= (cq->dmem.q_len - 1);
767 		/* Prefetch next CQ descriptor */
768 		__builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
769 
770 		dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx,
771 		    cq_desc->cqe_type);
772 		switch (cq_desc->cqe_type) {
773 		case CQE_TYPE_RX:
774 			cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
775 			    CQE_TYPE_RX);
776 			if (__predict_false(cmp_err != 0)) {
777 				/*
778 				 * Ups. Cannot finish now.
779 				 * Let's try again later.
780 				 */
781 				goto done;
782 			}
783 #ifdef DEBUG
784 			work_done++;
785 #endif
786 			break;
787 		case CQE_TYPE_SEND:
788 			nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc,
789 			    CQE_TYPE_SEND);
790 			tx_done++;
791 			break;
792 		case CQE_TYPE_INVALID:
793 		case CQE_TYPE_RX_SPLIT:
794 		case CQE_TYPE_RX_TCP:
795 		case CQE_TYPE_SEND_PTP:
796 			/* Ignore for now */
797 			break;
798 		}
799 		processed_cqe++;
800 	}
801 done:
802 	dprintf(nic->dev,
803 	    "%s CQ%d processed_cqe %d work_done %d\n",
804 	    __func__, cq_idx, processed_cqe, work_done);
805 
806 	/* Ring doorbell to inform H/W to reuse processed CQEs */
807 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe);
808 
809 	if ((tx_done > 0) &&
810 	    ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) {
811 		/* Reenable TXQ if its stopped earlier due to SQ full */
812 		if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
813 		taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
814 	}
815 out:
816 	/*
817 	 * Flush any outstanding LRO work
818 	 */
819 	rq_idx = cq_idx;
820 	rq = &nic->qs->rq[rq_idx];
821 	lro = &rq->lro;
822 	tcp_lro_flush_all(lro);
823 
824 	NICVF_CMP_UNLOCK(cq);
825 
826 	ifp = nic->ifp;
827 	/* Push received MBUFs to the stack */
828 	while (!buf_ring_empty(cq->rx_br)) {
829 		mbuf = buf_ring_dequeue_mc(cq->rx_br);
830 		if (__predict_true(mbuf != NULL))
831 			if_input(ifp, mbuf);
832 	}
833 
834 	return (cmp_err);
835 }
836 
837 /*
838  * Qset error interrupt handler
839  *
840  * As of now only CQ errors are handled
841  */
842 static void
nicvf_qs_err_task(void * arg,int pending)843 nicvf_qs_err_task(void *arg, int pending)
844 {
845 	struct nicvf *nic;
846 	struct queue_set *qs;
847 	int qidx;
848 	uint64_t status;
849 	boolean_t enable = TRUE;
850 
851 	nic = (struct nicvf *)arg;
852 	qs = nic->qs;
853 
854 	/* Deactivate network interface */
855 	if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
856 
857 	/* Check if it is CQ err */
858 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
859 		status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
860 		    qidx);
861 		if ((status & CQ_ERR_MASK) == 0)
862 			continue;
863 		/* Process already queued CQEs and reconfig CQ */
864 		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
865 		nicvf_sq_disable(nic, qidx);
866 		(void)nicvf_cq_intr_handler(nic, qidx);
867 		nicvf_cmp_queue_config(nic, qs, qidx, enable);
868 		nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx);
869 		nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
870 		nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
871 	}
872 
873 	if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
874 	/* Re-enable Qset error interrupt */
875 	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
876 }
877 
878 static void
nicvf_cmp_task(void * arg,int pending)879 nicvf_cmp_task(void *arg, int pending)
880 {
881 	struct cmp_queue *cq;
882 	struct nicvf *nic;
883 	int cmp_err;
884 
885 	cq = (struct cmp_queue *)arg;
886 	nic = cq->nic;
887 
888 	/* Handle CQ descriptors */
889 	cmp_err = nicvf_cq_intr_handler(nic, cq->idx);
890 	if (__predict_false(cmp_err != 0)) {
891 		/*
892 		 * Schedule another thread here since we did not
893 		 * process the entire CQ due to Tx or Rx CQ parse error.
894 		 */
895 		taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task);
896 	}
897 
898 	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
899 	/* Reenable interrupt (previously disabled in nicvf_intr_handler() */
900 	nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx);
901 
902 }
903 
904 /* Initialize completion queue */
905 static int
nicvf_init_cmp_queue(struct nicvf * nic,struct cmp_queue * cq,int q_len,int qidx)906 nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len,
907     int qidx)
908 {
909 	int err;
910 
911 	/* Initizalize lock */
912 	snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock",
913 	    device_get_nameunit(nic->dev), qidx);
914 	mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF);
915 
916 	err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
917 				     NICVF_CQ_BASE_ALIGN_BYTES);
918 
919 	if (err != 0) {
920 		device_printf(nic->dev,
921 		    "Could not allocate DMA memory for CQ\n");
922 		return (err);
923 	}
924 
925 	cq->desc = cq->dmem.base;
926 	cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH;
927 	cq->nic = nic;
928 	cq->idx = qidx;
929 	nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
930 
931 	cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK,
932 	    &cq->mtx);
933 
934 	/* Allocate taskqueue */
935 	NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq);
936 	cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK,
937 	    taskqueue_thread_enqueue, &cq->cmp_taskq);
938 	taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)",
939 	    device_get_nameunit(nic->dev), qidx);
940 
941 	return (0);
942 }
943 
944 static void
nicvf_free_cmp_queue(struct nicvf * nic,struct cmp_queue * cq)945 nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
946 {
947 
948 	if (cq == NULL)
949 		return;
950 	/*
951 	 * The completion queue itself should be disabled by now
952 	 * (ref. nicvf_snd_queue_config()).
953 	 * Ensure that it is safe to disable it or panic.
954 	 */
955 	if (cq->enable)
956 		panic("%s: Trying to free working CQ(%d)", __func__, cq->idx);
957 
958 	if (cq->cmp_taskq != NULL) {
959 		/* Remove task */
960 		while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0)
961 			taskqueue_drain(cq->cmp_taskq, &cq->cmp_task);
962 
963 		taskqueue_free(cq->cmp_taskq);
964 		cq->cmp_taskq = NULL;
965 	}
966 	/*
967 	 * Completion interrupt will possibly enable interrupts again
968 	 * so disable interrupting now after we finished processing
969 	 * completion task. It is safe to do so since the corresponding CQ
970 	 * was already disabled.
971 	 */
972 	nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx);
973 	nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx);
974 
975 	NICVF_CMP_LOCK(cq);
976 	nicvf_free_q_desc_mem(nic, &cq->dmem);
977 	drbr_free(cq->rx_br, M_DEVBUF);
978 	NICVF_CMP_UNLOCK(cq);
979 	mtx_destroy(&cq->mtx);
980 	memset(cq->mtx_name, 0, sizeof(cq->mtx_name));
981 }
982 
983 int
nicvf_xmit_locked(struct snd_queue * sq)984 nicvf_xmit_locked(struct snd_queue *sq)
985 {
986 	struct nicvf *nic;
987 	if_t ifp;
988 	struct mbuf *next;
989 	int err;
990 
991 	NICVF_TX_LOCK_ASSERT(sq);
992 
993 	nic = sq->nic;
994 	ifp = nic->ifp;
995 	err = 0;
996 
997 	while ((next = drbr_peek(ifp, sq->br)) != NULL) {
998 		/* Send a copy of the frame to the BPF listener */
999 		ETHER_BPF_MTAP(ifp, next);
1000 
1001 		err = nicvf_tx_mbuf_locked(sq, &next);
1002 		if (err != 0) {
1003 			if (next == NULL)
1004 				drbr_advance(ifp, sq->br);
1005 			else
1006 				drbr_putback(ifp, sq->br, next);
1007 
1008 			break;
1009 		}
1010 		drbr_advance(ifp, sq->br);
1011 	}
1012 	return (err);
1013 }
1014 
1015 static void
nicvf_snd_task(void * arg,int pending)1016 nicvf_snd_task(void *arg, int pending)
1017 {
1018 	struct snd_queue *sq = (struct snd_queue *)arg;
1019 	struct nicvf *nic;
1020 	if_t ifp;
1021 	int err;
1022 
1023 	nic = sq->nic;
1024 	ifp = nic->ifp;
1025 
1026 	/*
1027 	 * Skip sending anything if the driver is not running,
1028 	 * SQ full or link is down.
1029 	 */
1030 	if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1031 	    IFF_DRV_RUNNING) || !nic->link_up)
1032 		return;
1033 
1034 	NICVF_TX_LOCK(sq);
1035 	err = nicvf_xmit_locked(sq);
1036 	NICVF_TX_UNLOCK(sq);
1037 	/* Try again */
1038 	if (err != 0)
1039 		taskqueue_enqueue(sq->snd_taskq, &sq->snd_task);
1040 }
1041 
1042 /* Initialize transmit queue */
1043 static int
nicvf_init_snd_queue(struct nicvf * nic,struct snd_queue * sq,int q_len,int qidx)1044 nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len,
1045     int qidx)
1046 {
1047 	size_t i;
1048 	int err;
1049 
1050 	/* Initizalize TX lock for this queue */
1051 	snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock",
1052 	    device_get_nameunit(nic->dev), qidx);
1053 	mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF);
1054 
1055 	NICVF_TX_LOCK(sq);
1056 	/* Allocate buffer ring */
1057 	sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF,
1058 	    M_NOWAIT, &sq->mtx);
1059 	if (sq->br == NULL) {
1060 		device_printf(nic->dev,
1061 		    "ERROR: Could not set up buf ring for SQ(%d)\n", qidx);
1062 		err = ENOMEM;
1063 		goto error;
1064 	}
1065 
1066 	/* Allocate DMA memory for Tx descriptors */
1067 	err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
1068 				     NICVF_SQ_BASE_ALIGN_BYTES);
1069 	if (err != 0) {
1070 		device_printf(nic->dev,
1071 		    "Could not allocate DMA memory for SQ\n");
1072 		goto error;
1073 	}
1074 
1075 	sq->desc = sq->dmem.base;
1076 	sq->head = sq->tail = 0;
1077 	atomic_store_rel_int(&sq->free_cnt, q_len - 1);
1078 	sq->thresh = SND_QUEUE_THRESH;
1079 	sq->idx = qidx;
1080 	sq->nic = nic;
1081 
1082 	/*
1083 	 * Allocate DMA maps for Tx buffers
1084 	 */
1085 
1086 	/* Create DMA tag first */
1087 	err = bus_dma_tag_create(
1088 	    bus_get_dma_tag(nic->dev),		/* parent tag */
1089 	    1,					/* alignment */
1090 	    0,					/* boundary */
1091 	    BUS_SPACE_MAXADDR,			/* lowaddr */
1092 	    BUS_SPACE_MAXADDR,			/* highaddr */
1093 	    NULL, NULL,				/* filtfunc, filtfuncarg */
1094 	    NICVF_TSO_MAXSIZE,			/* maxsize */
1095 	    NICVF_TSO_NSEGS,			/* nsegments */
1096 	    MCLBYTES,				/* maxsegsize */
1097 	    0,					/* flags */
1098 	    NULL, NULL,				/* lockfunc, lockfuncarg */
1099 	    &sq->snd_buff_dmat);		/* dmat */
1100 
1101 	if (err != 0) {
1102 		device_printf(nic->dev,
1103 		    "Failed to create busdma tag for Tx buffers\n");
1104 		goto error;
1105 	}
1106 
1107 	/* Allocate send buffers array */
1108 	sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF,
1109 	    (M_NOWAIT | M_ZERO));
1110 	if (sq->snd_buff == NULL) {
1111 		device_printf(nic->dev,
1112 		    "Could not allocate memory for Tx buffers array\n");
1113 		err = ENOMEM;
1114 		goto error;
1115 	}
1116 
1117 	/* Now populate maps */
1118 	for (i = 0; i < q_len; i++) {
1119 		err = bus_dmamap_create(sq->snd_buff_dmat, 0,
1120 		    &sq->snd_buff[i].dmap);
1121 		if (err != 0) {
1122 			device_printf(nic->dev,
1123 			    "Failed to create DMA maps for Tx buffers\n");
1124 			goto error;
1125 		}
1126 	}
1127 	NICVF_TX_UNLOCK(sq);
1128 
1129 	/* Allocate taskqueue */
1130 	TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq);
1131 	sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK,
1132 	    taskqueue_thread_enqueue, &sq->snd_taskq);
1133 	taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)",
1134 	    device_get_nameunit(nic->dev), qidx);
1135 
1136 	return (0);
1137 error:
1138 	NICVF_TX_UNLOCK(sq);
1139 	return (err);
1140 }
1141 
1142 static void
nicvf_free_snd_queue(struct nicvf * nic,struct snd_queue * sq)1143 nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
1144 {
1145 	struct queue_set *qs = nic->qs;
1146 	size_t i;
1147 	int err __diagused;
1148 
1149 	if (sq == NULL)
1150 		return;
1151 
1152 	if (sq->snd_taskq != NULL) {
1153 		/* Remove task */
1154 		while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0)
1155 			taskqueue_drain(sq->snd_taskq, &sq->snd_task);
1156 
1157 		taskqueue_free(sq->snd_taskq);
1158 		sq->snd_taskq = NULL;
1159 	}
1160 
1161 	NICVF_TX_LOCK(sq);
1162 	if (sq->snd_buff_dmat != NULL) {
1163 		if (sq->snd_buff != NULL) {
1164 			for (i = 0; i < qs->sq_len; i++) {
1165 				m_freem(sq->snd_buff[i].mbuf);
1166 				sq->snd_buff[i].mbuf = NULL;
1167 
1168 				bus_dmamap_unload(sq->snd_buff_dmat,
1169 				    sq->snd_buff[i].dmap);
1170 				err = bus_dmamap_destroy(sq->snd_buff_dmat,
1171 				    sq->snd_buff[i].dmap);
1172 				/*
1173 				 * If bus_dmamap_destroy fails it can cause
1174 				 * random panic later if the tag is also
1175 				 * destroyed in the process.
1176 				 */
1177 				KASSERT(err == 0,
1178 				    ("%s: Could not destroy DMA map for SQ",
1179 				    __func__));
1180 			}
1181 		}
1182 
1183 		free(sq->snd_buff, M_NICVF);
1184 
1185 		err = bus_dma_tag_destroy(sq->snd_buff_dmat);
1186 		KASSERT(err == 0,
1187 		    ("%s: Trying to destroy BUSY DMA tag", __func__));
1188 	}
1189 
1190 	/* Free private driver ring for this send queue */
1191 	if (sq->br != NULL)
1192 		drbr_free(sq->br, M_DEVBUF);
1193 
1194 	if (sq->dmem.base != NULL)
1195 		nicvf_free_q_desc_mem(nic, &sq->dmem);
1196 
1197 	NICVF_TX_UNLOCK(sq);
1198 	/* Destroy Tx lock */
1199 	mtx_destroy(&sq->mtx);
1200 	memset(sq->mtx_name, 0, sizeof(sq->mtx_name));
1201 }
1202 
1203 static void
nicvf_reclaim_snd_queue(struct nicvf * nic,struct queue_set * qs,int qidx)1204 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1205 {
1206 
1207 	/* Disable send queue */
1208 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
1209 	/* Check if SQ is stopped */
1210 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
1211 		return;
1212 	/* Reset send queue */
1213 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1214 }
1215 
1216 static void
nicvf_reclaim_rcv_queue(struct nicvf * nic,struct queue_set * qs,int qidx)1217 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1218 {
1219 	union nic_mbx mbx = {};
1220 
1221 	/* Make sure all packets in the pipeline are written back into mem */
1222 	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
1223 	nicvf_send_msg_to_pf(nic, &mbx);
1224 }
1225 
1226 static void
nicvf_reclaim_cmp_queue(struct nicvf * nic,struct queue_set * qs,int qidx)1227 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx)
1228 {
1229 
1230 	/* Disable timer threshold (doesn't get reset upon CQ reset */
1231 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
1232 	/* Disable completion queue */
1233 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
1234 	/* Reset completion queue */
1235 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1236 }
1237 
1238 static void
nicvf_reclaim_rbdr(struct nicvf * nic,struct rbdr * rbdr,int qidx)1239 nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
1240 {
1241 	uint64_t tmp, fifo_state;
1242 	int timeout = 10;
1243 
1244 	/* Save head and tail pointers for feeing up buffers */
1245 	rbdr->head =
1246 	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
1247 	rbdr->tail =
1248 	    nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3;
1249 
1250 	/*
1251 	 * If RBDR FIFO is in 'FAIL' state then do a reset first
1252 	 * before relaiming.
1253 	 */
1254 	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
1255 	if (((fifo_state >> 62) & 0x03) == 0x3) {
1256 		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
1257 		    qidx, NICVF_RBDR_RESET);
1258 	}
1259 
1260 	/* Disable RBDR */
1261 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
1262 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1263 		return;
1264 	while (1) {
1265 		tmp = nicvf_queue_reg_read(nic,
1266 		    NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx);
1267 		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
1268 			break;
1269 
1270 		DELAY(1000);
1271 		timeout--;
1272 		if (!timeout) {
1273 			device_printf(nic->dev,
1274 			    "Failed polling on prefetch status\n");
1275 			return;
1276 		}
1277 	}
1278 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1279 	    NICVF_RBDR_RESET);
1280 
1281 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
1282 		return;
1283 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
1284 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
1285 		return;
1286 }
1287 
1288 /* Configures receive queue */
1289 static void
nicvf_rcv_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,bool enable)1290 nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
1291     int qidx, bool enable)
1292 {
1293 	union nic_mbx mbx = {};
1294 	struct rcv_queue *rq;
1295 	struct rq_cfg rq_cfg;
1296 	if_t ifp;
1297 	struct lro_ctrl	*lro;
1298 
1299 	ifp = nic->ifp;
1300 
1301 	rq = &qs->rq[qidx];
1302 	rq->enable = enable;
1303 
1304 	lro = &rq->lro;
1305 
1306 	/* Disable receive queue */
1307 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
1308 
1309 	if (!rq->enable) {
1310 		nicvf_reclaim_rcv_queue(nic, qs, qidx);
1311 		/* Free LRO memory */
1312 		tcp_lro_free(lro);
1313 		rq->lro_enabled = FALSE;
1314 		return;
1315 	}
1316 
1317 	/* Configure LRO if enabled */
1318 	rq->lro_enabled = FALSE;
1319 	if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) {
1320 		if (tcp_lro_init(lro) != 0) {
1321 			device_printf(nic->dev,
1322 			    "Failed to initialize LRO for RXQ%d\n", qidx);
1323 		} else {
1324 			rq->lro_enabled = TRUE;
1325 			lro->ifp = nic->ifp;
1326 		}
1327 	}
1328 
1329 	rq->cq_qs = qs->vnic_id;
1330 	rq->cq_idx = qidx;
1331 	rq->start_rbdr_qs = qs->vnic_id;
1332 	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
1333 	rq->cont_rbdr_qs = qs->vnic_id;
1334 	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
1335 	/* all writes of RBDR data to be loaded into L2 Cache as well*/
1336 	rq->caching = 1;
1337 
1338 	/* Send a mailbox msg to PF to config RQ */
1339 	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
1340 	mbx.rq.qs_num = qs->vnic_id;
1341 	mbx.rq.rq_num = qidx;
1342 	mbx.rq.cfg = ((uint64_t)rq->caching << 26) | (rq->cq_qs << 19) |
1343 	    (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
1344 	    (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) |
1345 	    (rq->start_qs_rbdr_idx);
1346 	nicvf_send_msg_to_pf(nic, &mbx);
1347 
1348 	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
1349 	mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0);
1350 	nicvf_send_msg_to_pf(nic, &mbx);
1351 
1352 	/*
1353 	 * RQ drop config
1354 	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
1355 	 */
1356 	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
1357 	mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8);
1358 	nicvf_send_msg_to_pf(nic, &mbx);
1359 
1360 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
1361 
1362 	/* Enable Receive queue */
1363 	rq_cfg.ena = 1;
1364 	rq_cfg.tcp_ena = 0;
1365 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx,
1366 	    *(uint64_t *)&rq_cfg);
1367 }
1368 
1369 /* Configures completion queue */
1370 static void
nicvf_cmp_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,boolean_t enable)1371 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
1372     int qidx, boolean_t enable)
1373 {
1374 	struct cmp_queue *cq;
1375 	struct cq_cfg cq_cfg;
1376 
1377 	cq = &qs->cq[qidx];
1378 	cq->enable = enable;
1379 
1380 	if (!cq->enable) {
1381 		nicvf_reclaim_cmp_queue(nic, qs, qidx);
1382 		return;
1383 	}
1384 
1385 	/* Reset completion queue */
1386 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
1387 
1388 	/* Set completion queue base address */
1389 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx,
1390 	    (uint64_t)(cq->dmem.phys_base));
1391 
1392 	/* Enable Completion queue */
1393 	cq_cfg.ena = 1;
1394 	cq_cfg.reset = 0;
1395 	cq_cfg.caching = 0;
1396 	cq_cfg.qsize = CMP_QSIZE;
1397 	cq_cfg.avg_con = 0;
1398 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg);
1399 
1400 	/* Set threshold value for interrupt generation */
1401 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
1402 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx,
1403 	    nic->cq_coalesce_usecs);
1404 }
1405 
1406 /* Configures transmit queue */
1407 static void
nicvf_snd_queue_config(struct nicvf * nic,struct queue_set * qs,int qidx,boolean_t enable)1408 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1409     boolean_t enable)
1410 {
1411 	union nic_mbx mbx = {};
1412 	struct snd_queue *sq;
1413 	struct sq_cfg sq_cfg;
1414 
1415 	sq = &qs->sq[qidx];
1416 	sq->enable = enable;
1417 
1418 	if (!sq->enable) {
1419 		nicvf_reclaim_snd_queue(nic, qs, qidx);
1420 		return;
1421 	}
1422 
1423 	/* Reset send queue */
1424 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
1425 
1426 	sq->cq_qs = qs->vnic_id;
1427 	sq->cq_idx = qidx;
1428 
1429 	/* Send a mailbox msg to PF to config SQ */
1430 	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
1431 	mbx.sq.qs_num = qs->vnic_id;
1432 	mbx.sq.sq_num = qidx;
1433 	mbx.sq.sqs_mode = nic->sqs_mode;
1434 	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
1435 	nicvf_send_msg_to_pf(nic, &mbx);
1436 
1437 	/* Set queue base address */
1438 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx,
1439 	    (uint64_t)(sq->dmem.phys_base));
1440 
1441 	/* Enable send queue  & set queue size */
1442 	sq_cfg.ena = 1;
1443 	sq_cfg.reset = 0;
1444 	sq_cfg.ldwb = 0;
1445 	sq_cfg.qsize = SND_QSIZE;
1446 	sq_cfg.tstmp_bgx_intf = 0;
1447 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg);
1448 
1449 	/* Set threshold value for interrupt generation */
1450 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
1451 }
1452 
1453 /* Configures receive buffer descriptor ring */
1454 static void
nicvf_rbdr_config(struct nicvf * nic,struct queue_set * qs,int qidx,boolean_t enable)1455 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx,
1456     boolean_t enable)
1457 {
1458 	struct rbdr *rbdr;
1459 	struct rbdr_cfg rbdr_cfg;
1460 
1461 	rbdr = &qs->rbdr[qidx];
1462 	nicvf_reclaim_rbdr(nic, rbdr, qidx);
1463 	if (!enable)
1464 		return;
1465 
1466 	/* Set descriptor base address */
1467 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx,
1468 	    (uint64_t)(rbdr->dmem.phys_base));
1469 
1470 	/* Enable RBDR  & set queue size */
1471 	/* Buffer size should be in multiples of 128 bytes */
1472 	rbdr_cfg.ena = 1;
1473 	rbdr_cfg.reset = 0;
1474 	rbdr_cfg.ldwb = 0;
1475 	rbdr_cfg.qsize = RBDR_SIZE;
1476 	rbdr_cfg.avg_con = 0;
1477 	rbdr_cfg.lines = rbdr->dma_size / 128;
1478 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
1479 	    *(uint64_t *)&rbdr_cfg);
1480 
1481 	/* Notify HW */
1482 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx,
1483 	    qs->rbdr_len - 1);
1484 
1485 	/* Set threshold value for interrupt generation */
1486 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx,
1487 	    rbdr->thresh - 1);
1488 }
1489 
1490 /* Requests PF to assign and enable Qset */
1491 void
nicvf_qset_config(struct nicvf * nic,boolean_t enable)1492 nicvf_qset_config(struct nicvf *nic, boolean_t enable)
1493 {
1494 	union nic_mbx mbx = {};
1495 	struct queue_set *qs;
1496 	struct qs_cfg *qs_cfg;
1497 
1498 	qs = nic->qs;
1499 	if (qs == NULL) {
1500 		device_printf(nic->dev,
1501 		    "Qset is still not allocated, don't init queues\n");
1502 		return;
1503 	}
1504 
1505 	qs->enable = enable;
1506 	qs->vnic_id = nic->vf_id;
1507 
1508 	/* Send a mailbox msg to PF to config Qset */
1509 	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
1510 	mbx.qs.num = qs->vnic_id;
1511 
1512 	mbx.qs.cfg = 0;
1513 	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
1514 	if (qs->enable) {
1515 		qs_cfg->ena = 1;
1516 		qs_cfg->vnic = qs->vnic_id;
1517 	}
1518 	nicvf_send_msg_to_pf(nic, &mbx);
1519 }
1520 
1521 static void
nicvf_free_resources(struct nicvf * nic)1522 nicvf_free_resources(struct nicvf *nic)
1523 {
1524 	int qidx;
1525 	struct queue_set *qs;
1526 
1527 	qs = nic->qs;
1528 	/*
1529 	 * Remove QS error task first since it has to be dead
1530 	 * to safely free completion queue tasks.
1531 	 */
1532 	if (qs->qs_err_taskq != NULL) {
1533 		/* Shut down QS error tasks */
1534 		while (taskqueue_cancel(qs->qs_err_taskq,
1535 		    &qs->qs_err_task,  NULL) != 0) {
1536 			taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task);
1537 		}
1538 		taskqueue_free(qs->qs_err_taskq);
1539 		qs->qs_err_taskq = NULL;
1540 	}
1541 	/* Free receive buffer descriptor ring */
1542 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1543 		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
1544 
1545 	/* Free completion queue */
1546 	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1547 		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
1548 
1549 	/* Free send queue */
1550 	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1551 		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
1552 }
1553 
1554 static int
nicvf_alloc_resources(struct nicvf * nic)1555 nicvf_alloc_resources(struct nicvf *nic)
1556 {
1557 	struct queue_set *qs = nic->qs;
1558 	int qidx;
1559 
1560 	/* Alloc receive buffer descriptor ring */
1561 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1562 		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
1563 				    DMA_BUFFER_LEN, qidx))
1564 			goto alloc_fail;
1565 	}
1566 
1567 	/* Alloc send queue */
1568 	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
1569 		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
1570 			goto alloc_fail;
1571 	}
1572 
1573 	/* Alloc completion queue */
1574 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1575 		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx))
1576 			goto alloc_fail;
1577 	}
1578 
1579 	/* Allocate QS error taskqueue */
1580 	NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic);
1581 	qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK,
1582 	    taskqueue_thread_enqueue, &qs->qs_err_taskq);
1583 	taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq",
1584 	    device_get_nameunit(nic->dev));
1585 
1586 	return (0);
1587 alloc_fail:
1588 	nicvf_free_resources(nic);
1589 	return (ENOMEM);
1590 }
1591 
1592 int
nicvf_set_qset_resources(struct nicvf * nic)1593 nicvf_set_qset_resources(struct nicvf *nic)
1594 {
1595 	struct queue_set *qs;
1596 
1597 	qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK));
1598 	nic->qs = qs;
1599 
1600 	/* Set count of each queue */
1601 	qs->rbdr_cnt = RBDR_CNT;
1602 	qs->rq_cnt = RCV_QUEUE_CNT;
1603 
1604 	qs->sq_cnt = SND_QUEUE_CNT;
1605 	qs->cq_cnt = CMP_QUEUE_CNT;
1606 
1607 	/* Set queue lengths */
1608 	qs->rbdr_len = RCV_BUF_COUNT;
1609 	qs->sq_len = SND_QUEUE_LEN;
1610 	qs->cq_len = CMP_QUEUE_LEN;
1611 
1612 	nic->rx_queues = qs->rq_cnt;
1613 	nic->tx_queues = qs->sq_cnt;
1614 
1615 	return (0);
1616 }
1617 
1618 int
nicvf_config_data_transfer(struct nicvf * nic,boolean_t enable)1619 nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable)
1620 {
1621 	boolean_t disable = FALSE;
1622 	struct queue_set *qs;
1623 	int qidx;
1624 
1625 	qs = nic->qs;
1626 	if (qs == NULL)
1627 		return (0);
1628 
1629 	if (enable) {
1630 		if (nicvf_alloc_resources(nic) != 0)
1631 			return (ENOMEM);
1632 
1633 		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1634 			nicvf_snd_queue_config(nic, qs, qidx, enable);
1635 		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1636 			nicvf_cmp_queue_config(nic, qs, qidx, enable);
1637 		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1638 			nicvf_rbdr_config(nic, qs, qidx, enable);
1639 		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1640 			nicvf_rcv_queue_config(nic, qs, qidx, enable);
1641 	} else {
1642 		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1643 			nicvf_rcv_queue_config(nic, qs, qidx, disable);
1644 		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1645 			nicvf_rbdr_config(nic, qs, qidx, disable);
1646 		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1647 			nicvf_snd_queue_config(nic, qs, qidx, disable);
1648 		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1649 			nicvf_cmp_queue_config(nic, qs, qidx, disable);
1650 
1651 		nicvf_free_resources(nic);
1652 	}
1653 
1654 	return (0);
1655 }
1656 
1657 /*
1658  * Get a free desc from SQ
1659  * returns descriptor ponter & descriptor number
1660  */
1661 static __inline int
nicvf_get_sq_desc(struct snd_queue * sq,int desc_cnt)1662 nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
1663 {
1664 	int qentry;
1665 
1666 	qentry = sq->tail;
1667 	atomic_subtract_int(&sq->free_cnt, desc_cnt);
1668 	sq->tail += desc_cnt;
1669 	sq->tail &= (sq->dmem.q_len - 1);
1670 
1671 	return (qentry);
1672 }
1673 
1674 /* Free descriptor back to SQ for future use */
1675 static void
nicvf_put_sq_desc(struct snd_queue * sq,int desc_cnt)1676 nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
1677 {
1678 
1679 	atomic_add_int(&sq->free_cnt, desc_cnt);
1680 	sq->head += desc_cnt;
1681 	sq->head &= (sq->dmem.q_len - 1);
1682 }
1683 
1684 static __inline int
nicvf_get_nxt_sqentry(struct snd_queue * sq,int qentry)1685 nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
1686 {
1687 	qentry++;
1688 	qentry &= (sq->dmem.q_len - 1);
1689 	return (qentry);
1690 }
1691 
1692 static void
nicvf_sq_enable(struct nicvf * nic,struct snd_queue * sq,int qidx)1693 nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
1694 {
1695 	uint64_t sq_cfg;
1696 
1697 	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1698 	sq_cfg |= NICVF_SQ_EN;
1699 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1700 	/* Ring doorbell so that H/W restarts processing SQEs */
1701 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
1702 }
1703 
1704 static void
nicvf_sq_disable(struct nicvf * nic,int qidx)1705 nicvf_sq_disable(struct nicvf *nic, int qidx)
1706 {
1707 	uint64_t sq_cfg;
1708 
1709 	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
1710 	sq_cfg &= ~NICVF_SQ_EN;
1711 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
1712 }
1713 
1714 static void
nicvf_sq_free_used_descs(struct nicvf * nic,struct snd_queue * sq,int qidx)1715 nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx)
1716 {
1717 	uint64_t head;
1718 	struct snd_buff *snd_buff;
1719 	struct sq_hdr_subdesc *hdr;
1720 
1721 	NICVF_TX_LOCK(sq);
1722 	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
1723 	while (sq->head != head) {
1724 		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
1725 		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
1726 			nicvf_put_sq_desc(sq, 1);
1727 			continue;
1728 		}
1729 		snd_buff = &sq->snd_buff[sq->head];
1730 		if (snd_buff->mbuf != NULL) {
1731 			bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1732 			m_freem(snd_buff->mbuf);
1733 			sq->snd_buff[sq->head].mbuf = NULL;
1734 		}
1735 		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
1736 	}
1737 	NICVF_TX_UNLOCK(sq);
1738 }
1739 
1740 /*
1741  * Add SQ HEADER subdescriptor.
1742  * First subdescriptor for every send descriptor.
1743  */
1744 static __inline int
nicvf_sq_add_hdr_subdesc(struct snd_queue * sq,int qentry,int subdesc_cnt,struct mbuf * mbuf,int len)1745 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
1746 			 int subdesc_cnt, struct mbuf *mbuf, int len)
1747 {
1748 	struct nicvf *nic;
1749 	struct sq_hdr_subdesc *hdr;
1750 	struct ether_vlan_header *eh;
1751 #ifdef INET
1752 	struct ip *ip;
1753 #endif
1754 #if defined(INET6) || defined(INET)
1755 	struct tcphdr *th;
1756 #endif
1757 #ifdef INET
1758 	int iphlen;
1759 #endif
1760 	int ehdrlen, poff, proto;
1761 	uint16_t etype;
1762 
1763 	nic = sq->nic;
1764 
1765 	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
1766 	sq->snd_buff[qentry].mbuf = mbuf;
1767 
1768 	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
1769 	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
1770 	/* Enable notification via CQE after processing SQE */
1771 	hdr->post_cqe = 1;
1772 	/* No of subdescriptors following this */
1773 	hdr->subdesc_cnt = subdesc_cnt;
1774 	hdr->tot_len = len;
1775 
1776 	eh = mtod(mbuf, struct ether_vlan_header *);
1777 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1778 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1779 		etype = ntohs(eh->evl_proto);
1780 	} else {
1781 		ehdrlen = ETHER_HDR_LEN;
1782 		etype = ntohs(eh->evl_encap_proto);
1783 	}
1784 
1785 	poff = proto = -1;
1786 	switch (etype) {
1787 #ifdef INET6
1788 	case ETHERTYPE_IPV6:
1789 		if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) {
1790 			mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr));
1791 			sq->snd_buff[qentry].mbuf = NULL;
1792 			if (mbuf == NULL)
1793 				return (ENOBUFS);
1794 		}
1795 		poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto);
1796 		if (poff < 0)
1797 			return (ENOBUFS);
1798 		poff += ehdrlen;
1799 		break;
1800 #endif
1801 #ifdef INET
1802 	case ETHERTYPE_IP:
1803 		if (mbuf->m_len < ehdrlen + sizeof(struct ip)) {
1804 			mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip));
1805 			sq->snd_buff[qentry].mbuf = mbuf;
1806 			if (mbuf == NULL)
1807 				return (ENOBUFS);
1808 		}
1809 		if (mbuf->m_pkthdr.csum_flags & CSUM_IP)
1810 			hdr->csum_l3 = 1; /* Enable IP csum calculation */
1811 
1812 		ip = (struct ip *)(mbuf->m_data + ehdrlen);
1813 		iphlen = ip->ip_hl << 2;
1814 		poff = ehdrlen + iphlen;
1815 		proto = ip->ip_p;
1816 		break;
1817 #endif
1818 	}
1819 
1820 #if defined(INET6) || defined(INET)
1821 	if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) {
1822 		switch (proto) {
1823 		case IPPROTO_TCP:
1824 			if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0)
1825 				break;
1826 
1827 			if (mbuf->m_len < (poff + sizeof(struct tcphdr))) {
1828 				mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr));
1829 				sq->snd_buff[qentry].mbuf = mbuf;
1830 				if (mbuf == NULL)
1831 					return (ENOBUFS);
1832 			}
1833 			hdr->csum_l4 = SEND_L4_CSUM_TCP;
1834 			break;
1835 		case IPPROTO_UDP:
1836 			if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0)
1837 				break;
1838 
1839 			if (mbuf->m_len < (poff + sizeof(struct udphdr))) {
1840 				mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr));
1841 				sq->snd_buff[qentry].mbuf = mbuf;
1842 				if (mbuf == NULL)
1843 					return (ENOBUFS);
1844 			}
1845 			hdr->csum_l4 = SEND_L4_CSUM_UDP;
1846 			break;
1847 		case IPPROTO_SCTP:
1848 			if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0)
1849 				break;
1850 
1851 			if (mbuf->m_len < (poff + sizeof(struct sctphdr))) {
1852 				mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr));
1853 				sq->snd_buff[qentry].mbuf = mbuf;
1854 				if (mbuf == NULL)
1855 					return (ENOBUFS);
1856 			}
1857 			hdr->csum_l4 = SEND_L4_CSUM_SCTP;
1858 			break;
1859 		default:
1860 			break;
1861 		}
1862 		hdr->l3_offset = ehdrlen;
1863 		hdr->l4_offset = poff;
1864 	}
1865 
1866 	if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) {
1867 		th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff));
1868 
1869 		hdr->tso = 1;
1870 		hdr->tso_start = poff + (th->th_off * 4);
1871 		hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz;
1872 		hdr->inner_l3_offset = ehdrlen - 2;
1873 		nic->drv_stats.tx_tso++;
1874 	}
1875 #endif
1876 
1877 	return (0);
1878 }
1879 
1880 /*
1881  * SQ GATHER subdescriptor
1882  * Must follow HDR descriptor
1883  */
nicvf_sq_add_gather_subdesc(struct snd_queue * sq,int qentry,int size,uint64_t data)1884 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
1885 					       int size, uint64_t data)
1886 {
1887 	struct sq_gather_subdesc *gather;
1888 
1889 	qentry &= (sq->dmem.q_len - 1);
1890 	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
1891 
1892 	memset(gather, 0, SND_QUEUE_DESC_SIZE);
1893 	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
1894 	gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
1895 	gather->size = size;
1896 	gather->addr = data;
1897 }
1898 
1899 /* Put an mbuf to a SQ for packet transfer. */
1900 static int
nicvf_tx_mbuf_locked(struct snd_queue * sq,struct mbuf ** mbufp)1901 nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp)
1902 {
1903 	bus_dma_segment_t segs[256];
1904 	struct snd_buff *snd_buff;
1905 	size_t seg;
1906 	int nsegs, qentry;
1907 	int subdesc_cnt;
1908 	int err;
1909 
1910 	NICVF_TX_LOCK_ASSERT(sq);
1911 
1912 	if (sq->free_cnt == 0)
1913 		return (ENOBUFS);
1914 
1915 	snd_buff = &sq->snd_buff[sq->tail];
1916 
1917 	err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap,
1918 	    *mbufp, segs, &nsegs, BUS_DMA_NOWAIT);
1919 	if (__predict_false(err != 0)) {
1920 		/* ARM64TODO: Add mbuf defragmenting if we lack maps */
1921 		m_freem(*mbufp);
1922 		*mbufp = NULL;
1923 		return (err);
1924 	}
1925 
1926 	/* Set how many subdescriptors is required */
1927 	subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1;
1928 	if (subdesc_cnt > sq->free_cnt) {
1929 		/* ARM64TODO: Add mbuf defragmentation if we lack descriptors */
1930 		bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1931 		return (ENOBUFS);
1932 	}
1933 
1934 	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1935 
1936 	/* Add SQ header subdesc */
1937 	err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp,
1938 	    (*mbufp)->m_pkthdr.len);
1939 	if (err != 0) {
1940 		nicvf_put_sq_desc(sq, subdesc_cnt);
1941 		bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap);
1942 		if (err == ENOBUFS) {
1943 			m_freem(*mbufp);
1944 			*mbufp = NULL;
1945 		}
1946 		return (err);
1947 	}
1948 
1949 	/* Add SQ gather subdescs */
1950 	for (seg = 0; seg < nsegs; seg++) {
1951 		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1952 		nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len,
1953 		    segs[seg].ds_addr);
1954 	}
1955 
1956 	/* make sure all memory stores are done before ringing doorbell */
1957 	bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE);
1958 
1959 	dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n",
1960 	    __func__, sq->idx, subdesc_cnt);
1961 	/* Inform HW to xmit new packet */
1962 	nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR,
1963 	    sq->idx, subdesc_cnt);
1964 	return (0);
1965 }
1966 
1967 static __inline u_int
frag_num(u_int i)1968 frag_num(u_int i)
1969 {
1970 #if BYTE_ORDER == BIG_ENDIAN
1971 	return ((i & ~3) + 3 - (i & 3));
1972 #else
1973 	return (i);
1974 #endif
1975 }
1976 
1977 /* Returns MBUF for a received packet */
1978 struct mbuf *
nicvf_get_rcv_mbuf(struct nicvf * nic,struct cqe_rx_t * cqe_rx)1979 nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1980 {
1981 	int frag;
1982 	int payload_len = 0;
1983 	struct mbuf *mbuf;
1984 	struct mbuf *mbuf_frag;
1985 	uint16_t *rb_lens = NULL;
1986 	uint64_t *rb_ptrs = NULL;
1987 
1988 	mbuf = NULL;
1989 	rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t)));
1990 	rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t)));
1991 
1992 	dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n",
1993 	    __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1994 
1995 	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1996 		payload_len = rb_lens[frag_num(frag)];
1997 		if (frag == 0) {
1998 			/* First fragment */
1999 			mbuf = nicvf_rb_ptr_to_mbuf(nic,
2000 			    (*rb_ptrs - cqe_rx->align_pad));
2001 			mbuf->m_len = payload_len;
2002 			mbuf->m_data += cqe_rx->align_pad;
2003 			if_setrcvif(mbuf, nic->ifp);
2004 		} else {
2005 			/* Add fragments */
2006 			mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs);
2007 			m_append(mbuf, payload_len, mbuf_frag->m_data);
2008 			m_freem(mbuf_frag);
2009 		}
2010 		/* Next buffer pointer */
2011 		rb_ptrs++;
2012 	}
2013 
2014 	if (__predict_true(mbuf != NULL)) {
2015 		m_fixhdr(mbuf);
2016 		mbuf->m_pkthdr.flowid = cqe_rx->rq_idx;
2017 		M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
2018 		if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) {
2019 			/*
2020 			 * HW by default verifies IP & TCP/UDP/SCTP checksums
2021 			 */
2022 			if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) {
2023 				mbuf->m_pkthdr.csum_flags =
2024 				    (CSUM_IP_CHECKED | CSUM_IP_VALID);
2025 			}
2026 
2027 			switch (cqe_rx->l4_type) {
2028 			case L4TYPE_UDP:
2029 			case L4TYPE_TCP: /* fall through */
2030 				mbuf->m_pkthdr.csum_flags |=
2031 				    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2032 				mbuf->m_pkthdr.csum_data = 0xffff;
2033 				break;
2034 			case L4TYPE_SCTP:
2035 				mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
2036 				break;
2037 			default:
2038 				break;
2039 			}
2040 		}
2041 	}
2042 
2043 	return (mbuf);
2044 }
2045 
2046 /* Enable interrupt */
2047 void
nicvf_enable_intr(struct nicvf * nic,int int_type,int q_idx)2048 nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
2049 {
2050 	uint64_t reg_val;
2051 
2052 	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2053 
2054 	switch (int_type) {
2055 	case NICVF_INTR_CQ:
2056 		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2057 		break;
2058 	case NICVF_INTR_SQ:
2059 		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2060 		break;
2061 	case NICVF_INTR_RBDR:
2062 		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2063 		break;
2064 	case NICVF_INTR_PKT_DROP:
2065 		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2066 		break;
2067 	case NICVF_INTR_TCP_TIMER:
2068 		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2069 		break;
2070 	case NICVF_INTR_MBOX:
2071 		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2072 		break;
2073 	case NICVF_INTR_QS_ERR:
2074 		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2075 		break;
2076 	default:
2077 		device_printf(nic->dev,
2078 			   "Failed to enable interrupt: unknown type\n");
2079 		break;
2080 	}
2081 
2082 	nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
2083 }
2084 
2085 /* Disable interrupt */
2086 void
nicvf_disable_intr(struct nicvf * nic,int int_type,int q_idx)2087 nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
2088 {
2089 	uint64_t reg_val = 0;
2090 
2091 	switch (int_type) {
2092 	case NICVF_INTR_CQ:
2093 		reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2094 		break;
2095 	case NICVF_INTR_SQ:
2096 		reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2097 		break;
2098 	case NICVF_INTR_RBDR:
2099 		reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2100 		break;
2101 	case NICVF_INTR_PKT_DROP:
2102 		reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2103 		break;
2104 	case NICVF_INTR_TCP_TIMER:
2105 		reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2106 		break;
2107 	case NICVF_INTR_MBOX:
2108 		reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT);
2109 		break;
2110 	case NICVF_INTR_QS_ERR:
2111 		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2112 		break;
2113 	default:
2114 		device_printf(nic->dev,
2115 			   "Failed to disable interrupt: unknown type\n");
2116 		break;
2117 	}
2118 
2119 	nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
2120 }
2121 
2122 /* Clear interrupt */
2123 void
nicvf_clear_intr(struct nicvf * nic,int int_type,int q_idx)2124 nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
2125 {
2126 	uint64_t reg_val = 0;
2127 
2128 	switch (int_type) {
2129 	case NICVF_INTR_CQ:
2130 		reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2131 		break;
2132 	case NICVF_INTR_SQ:
2133 		reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2134 		break;
2135 	case NICVF_INTR_RBDR:
2136 		reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2137 		break;
2138 	case NICVF_INTR_PKT_DROP:
2139 		reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT);
2140 		break;
2141 	case NICVF_INTR_TCP_TIMER:
2142 		reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT);
2143 		break;
2144 	case NICVF_INTR_MBOX:
2145 		reg_val = (1UL << NICVF_INTR_MBOX_SHIFT);
2146 		break;
2147 	case NICVF_INTR_QS_ERR:
2148 		reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT);
2149 		break;
2150 	default:
2151 		device_printf(nic->dev,
2152 			   "Failed to clear interrupt: unknown type\n");
2153 		break;
2154 	}
2155 
2156 	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
2157 }
2158 
2159 /* Check if interrupt is enabled */
2160 int
nicvf_is_intr_enabled(struct nicvf * nic,int int_type,int q_idx)2161 nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
2162 {
2163 	uint64_t reg_val;
2164 	uint64_t mask = 0xff;
2165 
2166 	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
2167 
2168 	switch (int_type) {
2169 	case NICVF_INTR_CQ:
2170 		mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT);
2171 		break;
2172 	case NICVF_INTR_SQ:
2173 		mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT);
2174 		break;
2175 	case NICVF_INTR_RBDR:
2176 		mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT);
2177 		break;
2178 	case NICVF_INTR_PKT_DROP:
2179 		mask = NICVF_INTR_PKT_DROP_MASK;
2180 		break;
2181 	case NICVF_INTR_TCP_TIMER:
2182 		mask = NICVF_INTR_TCP_TIMER_MASK;
2183 		break;
2184 	case NICVF_INTR_MBOX:
2185 		mask = NICVF_INTR_MBOX_MASK;
2186 		break;
2187 	case NICVF_INTR_QS_ERR:
2188 		mask = NICVF_INTR_QS_ERR_MASK;
2189 		break;
2190 	default:
2191 		device_printf(nic->dev,
2192 			   "Failed to check interrupt enable: unknown type\n");
2193 		break;
2194 	}
2195 
2196 	return (reg_val & mask);
2197 }
2198 
2199 void
nicvf_update_rq_stats(struct nicvf * nic,int rq_idx)2200 nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
2201 {
2202 	struct rcv_queue *rq;
2203 
2204 #define GET_RQ_STATS(reg) \
2205 	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
2206 			    (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2207 
2208 	rq = &nic->qs->rq[rq_idx];
2209 	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
2210 	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
2211 }
2212 
2213 void
nicvf_update_sq_stats(struct nicvf * nic,int sq_idx)2214 nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
2215 {
2216 	struct snd_queue *sq;
2217 
2218 #define GET_SQ_STATS(reg) \
2219 	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
2220 			    (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
2221 
2222 	sq = &nic->qs->sq[sq_idx];
2223 	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
2224 	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
2225 }
2226 
2227 /* Check for errors in the receive cmp.queue entry */
2228 int
nicvf_check_cqe_rx_errs(struct nicvf * nic,struct cmp_queue * cq,struct cqe_rx_t * cqe_rx)2229 nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq,
2230     struct cqe_rx_t *cqe_rx)
2231 {
2232 	struct nicvf_hw_stats *stats = &nic->hw_stats;
2233 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
2234 
2235 	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
2236 		drv_stats->rx_frames_ok++;
2237 		return (0);
2238 	}
2239 
2240 	switch (cqe_rx->err_opcode) {
2241 	case CQ_RX_ERROP_RE_PARTIAL:
2242 		stats->rx_bgx_truncated_pkts++;
2243 		break;
2244 	case CQ_RX_ERROP_RE_JABBER:
2245 		stats->rx_jabber_errs++;
2246 		break;
2247 	case CQ_RX_ERROP_RE_FCS:
2248 		stats->rx_fcs_errs++;
2249 		break;
2250 	case CQ_RX_ERROP_RE_RX_CTL:
2251 		stats->rx_bgx_errs++;
2252 		break;
2253 	case CQ_RX_ERROP_PREL2_ERR:
2254 		stats->rx_prel2_errs++;
2255 		break;
2256 	case CQ_RX_ERROP_L2_MAL:
2257 		stats->rx_l2_hdr_malformed++;
2258 		break;
2259 	case CQ_RX_ERROP_L2_OVERSIZE:
2260 		stats->rx_oversize++;
2261 		break;
2262 	case CQ_RX_ERROP_L2_UNDERSIZE:
2263 		stats->rx_undersize++;
2264 		break;
2265 	case CQ_RX_ERROP_L2_LENMISM:
2266 		stats->rx_l2_len_mismatch++;
2267 		break;
2268 	case CQ_RX_ERROP_L2_PCLP:
2269 		stats->rx_l2_pclp++;
2270 		break;
2271 	case CQ_RX_ERROP_IP_NOT:
2272 		stats->rx_ip_ver_errs++;
2273 		break;
2274 	case CQ_RX_ERROP_IP_CSUM_ERR:
2275 		stats->rx_ip_csum_errs++;
2276 		break;
2277 	case CQ_RX_ERROP_IP_MAL:
2278 		stats->rx_ip_hdr_malformed++;
2279 		break;
2280 	case CQ_RX_ERROP_IP_MALD:
2281 		stats->rx_ip_payload_malformed++;
2282 		break;
2283 	case CQ_RX_ERROP_IP_HOP:
2284 		stats->rx_ip_ttl_errs++;
2285 		break;
2286 	case CQ_RX_ERROP_L3_PCLP:
2287 		stats->rx_l3_pclp++;
2288 		break;
2289 	case CQ_RX_ERROP_L4_MAL:
2290 		stats->rx_l4_malformed++;
2291 		break;
2292 	case CQ_RX_ERROP_L4_CHK:
2293 		stats->rx_l4_csum_errs++;
2294 		break;
2295 	case CQ_RX_ERROP_UDP_LEN:
2296 		stats->rx_udp_len_errs++;
2297 		break;
2298 	case CQ_RX_ERROP_L4_PORT:
2299 		stats->rx_l4_port_errs++;
2300 		break;
2301 	case CQ_RX_ERROP_TCP_FLAG:
2302 		stats->rx_tcp_flag_errs++;
2303 		break;
2304 	case CQ_RX_ERROP_TCP_OFFSET:
2305 		stats->rx_tcp_offset_errs++;
2306 		break;
2307 	case CQ_RX_ERROP_L4_PCLP:
2308 		stats->rx_l4_pclp++;
2309 		break;
2310 	case CQ_RX_ERROP_RBDR_TRUNC:
2311 		stats->rx_truncated_pkts++;
2312 		break;
2313 	}
2314 
2315 	return (1);
2316 }
2317 
2318 /* Check for errors in the send cmp.queue entry */
2319 int
nicvf_check_cqe_tx_errs(struct nicvf * nic,struct cmp_queue * cq,struct cqe_send_t * cqe_tx)2320 nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq,
2321     struct cqe_send_t *cqe_tx)
2322 {
2323 	struct cmp_queue_stats *stats = &cq->stats;
2324 
2325 	switch (cqe_tx->send_status) {
2326 	case CQ_TX_ERROP_GOOD:
2327 		stats->tx.good++;
2328 		return (0);
2329 	case CQ_TX_ERROP_DESC_FAULT:
2330 		stats->tx.desc_fault++;
2331 		break;
2332 	case CQ_TX_ERROP_HDR_CONS_ERR:
2333 		stats->tx.hdr_cons_err++;
2334 		break;
2335 	case CQ_TX_ERROP_SUBDC_ERR:
2336 		stats->tx.subdesc_err++;
2337 		break;
2338 	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
2339 		stats->tx.imm_size_oflow++;
2340 		break;
2341 	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
2342 		stats->tx.data_seq_err++;
2343 		break;
2344 	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
2345 		stats->tx.mem_seq_err++;
2346 		break;
2347 	case CQ_TX_ERROP_LOCK_VIOL:
2348 		stats->tx.lock_viol++;
2349 		break;
2350 	case CQ_TX_ERROP_DATA_FAULT:
2351 		stats->tx.data_fault++;
2352 		break;
2353 	case CQ_TX_ERROP_TSTMP_CONFLICT:
2354 		stats->tx.tstmp_conflict++;
2355 		break;
2356 	case CQ_TX_ERROP_TSTMP_TIMEOUT:
2357 		stats->tx.tstmp_timeout++;
2358 		break;
2359 	case CQ_TX_ERROP_MEM_FAULT:
2360 		stats->tx.mem_fault++;
2361 		break;
2362 	case CQ_TX_ERROP_CK_OVERLAP:
2363 		stats->tx.csum_overlap++;
2364 		break;
2365 	case CQ_TX_ERROP_CK_OFLOW:
2366 		stats->tx.csum_overflow++;
2367 		break;
2368 	}
2369 
2370 	return (1);
2371 }
2372