xref: /linux/drivers/net/ethernet/cavium/thunder/nicvf_queues.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * Copyright (C) 2015 Cavium, Inc.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License
6  * as published by the Free Software Foundation.
7  */
8 
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/ip.h>
12 #include <linux/etherdevice.h>
13 #include <net/ip.h>
14 #include <net/tso.h>
15 
16 #include "nic_reg.h"
17 #include "nic.h"
18 #include "q_struct.h"
19 #include "nicvf_queues.h"
20 
21 struct rbuf_info {
22 	struct page *page;
23 	void	*data;
24 	u64	offset;
25 };
26 
27 #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
28 
29 /* Poll a register for a specific value */
30 static int nicvf_poll_reg(struct nicvf *nic, int qidx,
31 			  u64 reg, int bit_pos, int bits, int val)
32 {
33 	u64 bit_mask;
34 	u64 reg_val;
35 	int timeout = 10;
36 
37 	bit_mask = (1ULL << bits) - 1;
38 	bit_mask = (bit_mask << bit_pos);
39 
40 	while (timeout) {
41 		reg_val = nicvf_queue_reg_read(nic, reg, qidx);
42 		if (((reg_val & bit_mask) >> bit_pos) == val)
43 			return 0;
44 		usleep_range(1000, 2000);
45 		timeout--;
46 	}
47 	netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
48 	return 1;
49 }
50 
51 /* Allocate memory for a queue's descriptors */
52 static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
53 				  int q_len, int desc_size, int align_bytes)
54 {
55 	dmem->q_len = q_len;
56 	dmem->size = (desc_size * q_len) + align_bytes;
57 	/* Save address, need it while freeing */
58 	dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
59 						&dmem->dma, GFP_KERNEL);
60 	if (!dmem->unalign_base)
61 		return -ENOMEM;
62 
63 	/* Align memory address for 'align_bytes' */
64 	dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
65 	dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
66 	return 0;
67 }
68 
69 /* Free queue's descriptor memory */
70 static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
71 {
72 	if (!dmem)
73 		return;
74 
75 	dma_free_coherent(&nic->pdev->dev, dmem->size,
76 			  dmem->unalign_base, dmem->dma);
77 	dmem->unalign_base = NULL;
78 	dmem->base = NULL;
79 }
80 
81 /* Allocate buffer for packet reception
82  * HW returns memory address where packet is DMA'ed but not a pointer
83  * into RBDR ring, so save buffer address at the start of fragment and
84  * align the start address to a cache aligned address
85  */
86 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
87 					 u32 buf_len, u64 **rbuf)
88 {
89 	u64 data;
90 	struct rbuf_info *rinfo;
91 	int order = get_order(buf_len);
92 
93 	/* Check if request can be accomodated in previous allocated page */
94 	if (nic->rb_page) {
95 		if ((nic->rb_page_offset + buf_len + buf_len) >
96 		    (PAGE_SIZE << order)) {
97 			nic->rb_page = NULL;
98 		} else {
99 			nic->rb_page_offset += buf_len;
100 			get_page(nic->rb_page);
101 		}
102 	}
103 
104 	/* Allocate a new page */
105 	if (!nic->rb_page) {
106 		nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
107 		if (!nic->rb_page) {
108 			netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
109 			return -ENOMEM;
110 		}
111 		nic->rb_page_offset = 0;
112 	}
113 
114 	data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
115 
116 	/* Align buffer addr to cache line i.e 128 bytes */
117 	rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
118 	/* Save page address for reference updation */
119 	rinfo->page = nic->rb_page;
120 	/* Store start address for later retrieval */
121 	rinfo->data = (void *)data;
122 	/* Store alignment offset */
123 	rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
124 
125 	data += rinfo->offset;
126 
127 	/* Give next aligned address to hw for DMA */
128 	*rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
129 	return 0;
130 }
131 
132 /* Retrieve actual buffer start address and build skb for received packet */
133 static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
134 					   u64 rb_ptr, int len)
135 {
136 	struct sk_buff *skb;
137 	struct rbuf_info *rinfo;
138 
139 	rb_ptr = (u64)phys_to_virt(rb_ptr);
140 	/* Get buffer start address and alignment offset */
141 	rinfo = GET_RBUF_INFO(rb_ptr);
142 
143 	/* Now build an skb to give to stack */
144 	skb = build_skb(rinfo->data, RCV_FRAG_LEN);
145 	if (!skb) {
146 		put_page(rinfo->page);
147 		return NULL;
148 	}
149 
150 	/* Set correct skb->data */
151 	skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
152 
153 	prefetch((void *)rb_ptr);
154 	return skb;
155 }
156 
157 /* Allocate RBDR ring and populate receive buffers */
158 static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
159 			    int ring_len, int buf_size)
160 {
161 	int idx;
162 	u64 *rbuf;
163 	struct rbdr_entry_t *desc;
164 	int err;
165 
166 	err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
167 				     sizeof(struct rbdr_entry_t),
168 				     NICVF_RCV_BUF_ALIGN_BYTES);
169 	if (err)
170 		return err;
171 
172 	rbdr->desc = rbdr->dmem.base;
173 	/* Buffer size has to be in multiples of 128 bytes */
174 	rbdr->dma_size = buf_size;
175 	rbdr->enable = true;
176 	rbdr->thresh = RBDR_THRESH;
177 
178 	nic->rb_page = NULL;
179 	for (idx = 0; idx < ring_len; idx++) {
180 		err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
181 					     &rbuf);
182 		if (err)
183 			return err;
184 
185 		desc = GET_RBDR_DESC(rbdr, idx);
186 		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
187 	}
188 	return 0;
189 }
190 
191 /* Free RBDR ring and its receive buffers */
192 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
193 {
194 	int head, tail;
195 	u64 buf_addr;
196 	struct rbdr_entry_t *desc;
197 	struct rbuf_info *rinfo;
198 
199 	if (!rbdr)
200 		return;
201 
202 	rbdr->enable = false;
203 	if (!rbdr->dmem.base)
204 		return;
205 
206 	head = rbdr->head;
207 	tail = rbdr->tail;
208 
209 	/* Free SKBs */
210 	while (head != tail) {
211 		desc = GET_RBDR_DESC(rbdr, head);
212 		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
213 		rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
214 		put_page(rinfo->page);
215 		head++;
216 		head &= (rbdr->dmem.q_len - 1);
217 	}
218 	/* Free SKB of tail desc */
219 	desc = GET_RBDR_DESC(rbdr, tail);
220 	buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
221 	rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
222 	put_page(rinfo->page);
223 
224 	/* Free RBDR ring */
225 	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
226 }
227 
228 /* Refill receive buffer descriptors with new buffers.
229  */
230 static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
231 {
232 	struct queue_set *qs = nic->qs;
233 	int rbdr_idx = qs->rbdr_cnt;
234 	int tail, qcount;
235 	int refill_rb_cnt;
236 	struct rbdr *rbdr;
237 	struct rbdr_entry_t *desc;
238 	u64 *rbuf;
239 	int new_rb = 0;
240 
241 refill:
242 	if (!rbdr_idx)
243 		return;
244 	rbdr_idx--;
245 	rbdr = &qs->rbdr[rbdr_idx];
246 	/* Check if it's enabled */
247 	if (!rbdr->enable)
248 		goto next_rbdr;
249 
250 	/* Get no of desc's to be refilled */
251 	qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
252 	qcount &= 0x7FFFF;
253 	/* Doorbell can be ringed with a max of ring size minus 1 */
254 	if (qcount >= (qs->rbdr_len - 1))
255 		goto next_rbdr;
256 	else
257 		refill_rb_cnt = qs->rbdr_len - qcount - 1;
258 
259 	/* Start filling descs from tail */
260 	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
261 	while (refill_rb_cnt) {
262 		tail++;
263 		tail &= (rbdr->dmem.q_len - 1);
264 
265 		if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
266 			break;
267 
268 		desc = GET_RBDR_DESC(rbdr, tail);
269 		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
270 		refill_rb_cnt--;
271 		new_rb++;
272 	}
273 
274 	/* make sure all memory stores are done before ringing doorbell */
275 	smp_wmb();
276 
277 	/* Check if buffer allocation failed */
278 	if (refill_rb_cnt)
279 		nic->rb_alloc_fail = true;
280 	else
281 		nic->rb_alloc_fail = false;
282 
283 	/* Notify HW */
284 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
285 			      rbdr_idx, new_rb);
286 next_rbdr:
287 	/* Re-enable RBDR interrupts only if buffer allocation is success */
288 	if (!nic->rb_alloc_fail && rbdr->enable)
289 		nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
290 
291 	if (rbdr_idx)
292 		goto refill;
293 }
294 
295 /* Alloc rcv buffers in non-atomic mode for better success */
296 void nicvf_rbdr_work(struct work_struct *work)
297 {
298 	struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
299 
300 	nicvf_refill_rbdr(nic, GFP_KERNEL);
301 	if (nic->rb_alloc_fail)
302 		schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
303 	else
304 		nic->rb_work_scheduled = false;
305 }
306 
307 /* In Softirq context, alloc rcv buffers in atomic mode */
308 void nicvf_rbdr_task(unsigned long data)
309 {
310 	struct nicvf *nic = (struct nicvf *)data;
311 
312 	nicvf_refill_rbdr(nic, GFP_ATOMIC);
313 	if (nic->rb_alloc_fail) {
314 		nic->rb_work_scheduled = true;
315 		schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
316 	}
317 }
318 
319 /* Initialize completion queue */
320 static int nicvf_init_cmp_queue(struct nicvf *nic,
321 				struct cmp_queue *cq, int q_len)
322 {
323 	int err;
324 
325 	err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
326 				     NICVF_CQ_BASE_ALIGN_BYTES);
327 	if (err)
328 		return err;
329 
330 	cq->desc = cq->dmem.base;
331 	cq->thresh = CMP_QUEUE_CQE_THRESH;
332 	nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
333 
334 	return 0;
335 }
336 
337 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
338 {
339 	if (!cq)
340 		return;
341 	if (!cq->dmem.base)
342 		return;
343 
344 	nicvf_free_q_desc_mem(nic, &cq->dmem);
345 }
346 
347 /* Initialize transmit queue */
348 static int nicvf_init_snd_queue(struct nicvf *nic,
349 				struct snd_queue *sq, int q_len)
350 {
351 	int err;
352 
353 	err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
354 				     NICVF_SQ_BASE_ALIGN_BYTES);
355 	if (err)
356 		return err;
357 
358 	sq->desc = sq->dmem.base;
359 	sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
360 	if (!sq->skbuff)
361 		return -ENOMEM;
362 	sq->head = 0;
363 	sq->tail = 0;
364 	atomic_set(&sq->free_cnt, q_len - 1);
365 	sq->thresh = SND_QUEUE_THRESH;
366 
367 	/* Preallocate memory for TSO segment's header */
368 	sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
369 					  q_len * TSO_HEADER_SIZE,
370 					  &sq->tso_hdrs_phys, GFP_KERNEL);
371 	if (!sq->tso_hdrs)
372 		return -ENOMEM;
373 
374 	return 0;
375 }
376 
377 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
378 {
379 	if (!sq)
380 		return;
381 	if (!sq->dmem.base)
382 		return;
383 
384 	if (sq->tso_hdrs)
385 		dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
386 				  sq->tso_hdrs, sq->tso_hdrs_phys);
387 
388 	kfree(sq->skbuff);
389 	nicvf_free_q_desc_mem(nic, &sq->dmem);
390 }
391 
392 static void nicvf_reclaim_snd_queue(struct nicvf *nic,
393 				    struct queue_set *qs, int qidx)
394 {
395 	/* Disable send queue */
396 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
397 	/* Check if SQ is stopped */
398 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
399 		return;
400 	/* Reset send queue */
401 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
402 }
403 
404 static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
405 				    struct queue_set *qs, int qidx)
406 {
407 	union nic_mbx mbx = {};
408 
409 	/* Make sure all packets in the pipeline are written back into mem */
410 	mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
411 	nicvf_send_msg_to_pf(nic, &mbx);
412 }
413 
414 static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
415 				    struct queue_set *qs, int qidx)
416 {
417 	/* Disable timer threshold (doesn't get reset upon CQ reset */
418 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
419 	/* Disable completion queue */
420 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
421 	/* Reset completion queue */
422 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
423 }
424 
425 static void nicvf_reclaim_rbdr(struct nicvf *nic,
426 			       struct rbdr *rbdr, int qidx)
427 {
428 	u64 tmp, fifo_state;
429 	int timeout = 10;
430 
431 	/* Save head and tail pointers for feeing up buffers */
432 	rbdr->head = nicvf_queue_reg_read(nic,
433 					  NIC_QSET_RBDR_0_1_HEAD,
434 					  qidx) >> 3;
435 	rbdr->tail = nicvf_queue_reg_read(nic,
436 					  NIC_QSET_RBDR_0_1_TAIL,
437 					  qidx) >> 3;
438 
439 	/* If RBDR FIFO is in 'FAIL' state then do a reset first
440 	 * before relaiming.
441 	 */
442 	fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
443 	if (((fifo_state >> 62) & 0x03) == 0x3)
444 		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
445 				      qidx, NICVF_RBDR_RESET);
446 
447 	/* Disable RBDR */
448 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
449 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
450 		return;
451 	while (1) {
452 		tmp = nicvf_queue_reg_read(nic,
453 					   NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
454 					   qidx);
455 		if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
456 			break;
457 		usleep_range(1000, 2000);
458 		timeout--;
459 		if (!timeout) {
460 			netdev_err(nic->netdev,
461 				   "Failed polling on prefetch status\n");
462 			return;
463 		}
464 	}
465 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
466 			      qidx, NICVF_RBDR_RESET);
467 
468 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
469 		return;
470 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
471 	if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
472 		return;
473 }
474 
475 /* Configures receive queue */
476 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
477 				   int qidx, bool enable)
478 {
479 	union nic_mbx mbx = {};
480 	struct rcv_queue *rq;
481 	struct rq_cfg rq_cfg;
482 
483 	rq = &qs->rq[qidx];
484 	rq->enable = enable;
485 
486 	/* Disable receive queue */
487 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
488 
489 	if (!rq->enable) {
490 		nicvf_reclaim_rcv_queue(nic, qs, qidx);
491 		return;
492 	}
493 
494 	rq->cq_qs = qs->vnic_id;
495 	rq->cq_idx = qidx;
496 	rq->start_rbdr_qs = qs->vnic_id;
497 	rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
498 	rq->cont_rbdr_qs = qs->vnic_id;
499 	rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
500 	/* all writes of RBDR data to be loaded into L2 Cache as well*/
501 	rq->caching = 1;
502 
503 	/* Send a mailbox msg to PF to config RQ */
504 	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
505 	mbx.rq.qs_num = qs->vnic_id;
506 	mbx.rq.rq_num = qidx;
507 	mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
508 			  (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
509 			  (rq->cont_qs_rbdr_idx << 8) |
510 			  (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
511 	nicvf_send_msg_to_pf(nic, &mbx);
512 
513 	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
514 	mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
515 	nicvf_send_msg_to_pf(nic, &mbx);
516 
517 	/* RQ drop config
518 	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
519 	 */
520 	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
521 	mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
522 	nicvf_send_msg_to_pf(nic, &mbx);
523 
524 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
525 
526 	/* Enable Receive queue */
527 	rq_cfg.ena = 1;
528 	rq_cfg.tcp_ena = 0;
529 	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
530 }
531 
532 /* Configures completion queue */
533 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
534 			    int qidx, bool enable)
535 {
536 	struct cmp_queue *cq;
537 	struct cq_cfg cq_cfg;
538 
539 	cq = &qs->cq[qidx];
540 	cq->enable = enable;
541 
542 	if (!cq->enable) {
543 		nicvf_reclaim_cmp_queue(nic, qs, qidx);
544 		return;
545 	}
546 
547 	/* Reset completion queue */
548 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
549 
550 	if (!cq->enable)
551 		return;
552 
553 	spin_lock_init(&cq->lock);
554 	/* Set completion queue base address */
555 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
556 			      qidx, (u64)(cq->dmem.phys_base));
557 
558 	/* Enable Completion queue */
559 	cq_cfg.ena = 1;
560 	cq_cfg.reset = 0;
561 	cq_cfg.caching = 0;
562 	cq_cfg.qsize = CMP_QSIZE;
563 	cq_cfg.avg_con = 0;
564 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
565 
566 	/* Set threshold value for interrupt generation */
567 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
568 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
569 			      qidx, nic->cq_coalesce_usecs);
570 }
571 
572 /* Configures transmit queue */
573 static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
574 				   int qidx, bool enable)
575 {
576 	union nic_mbx mbx = {};
577 	struct snd_queue *sq;
578 	struct sq_cfg sq_cfg;
579 
580 	sq = &qs->sq[qidx];
581 	sq->enable = enable;
582 
583 	if (!sq->enable) {
584 		nicvf_reclaim_snd_queue(nic, qs, qidx);
585 		return;
586 	}
587 
588 	/* Reset send queue */
589 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
590 
591 	sq->cq_qs = qs->vnic_id;
592 	sq->cq_idx = qidx;
593 
594 	/* Send a mailbox msg to PF to config SQ */
595 	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
596 	mbx.sq.qs_num = qs->vnic_id;
597 	mbx.sq.sq_num = qidx;
598 	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
599 	nicvf_send_msg_to_pf(nic, &mbx);
600 
601 	/* Set queue base address */
602 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
603 			      qidx, (u64)(sq->dmem.phys_base));
604 
605 	/* Enable send queue  & set queue size */
606 	sq_cfg.ena = 1;
607 	sq_cfg.reset = 0;
608 	sq_cfg.ldwb = 0;
609 	sq_cfg.qsize = SND_QSIZE;
610 	sq_cfg.tstmp_bgx_intf = 0;
611 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
612 
613 	/* Set threshold value for interrupt generation */
614 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
615 
616 	/* Set queue:cpu affinity for better load distribution */
617 	if (cpu_online(qidx)) {
618 		cpumask_set_cpu(qidx, &sq->affinity_mask);
619 		netif_set_xps_queue(nic->netdev,
620 				    &sq->affinity_mask, qidx);
621 	}
622 }
623 
624 /* Configures receive buffer descriptor ring */
625 static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
626 			      int qidx, bool enable)
627 {
628 	struct rbdr *rbdr;
629 	struct rbdr_cfg rbdr_cfg;
630 
631 	rbdr = &qs->rbdr[qidx];
632 	nicvf_reclaim_rbdr(nic, rbdr, qidx);
633 	if (!enable)
634 		return;
635 
636 	/* Set descriptor base address */
637 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
638 			      qidx, (u64)(rbdr->dmem.phys_base));
639 
640 	/* Enable RBDR  & set queue size */
641 	/* Buffer size should be in multiples of 128 bytes */
642 	rbdr_cfg.ena = 1;
643 	rbdr_cfg.reset = 0;
644 	rbdr_cfg.ldwb = 0;
645 	rbdr_cfg.qsize = RBDR_SIZE;
646 	rbdr_cfg.avg_con = 0;
647 	rbdr_cfg.lines = rbdr->dma_size / 128;
648 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
649 			      qidx, *(u64 *)&rbdr_cfg);
650 
651 	/* Notify HW */
652 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
653 			      qidx, qs->rbdr_len - 1);
654 
655 	/* Set threshold value for interrupt generation */
656 	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
657 			      qidx, rbdr->thresh - 1);
658 }
659 
660 /* Requests PF to assign and enable Qset */
661 void nicvf_qset_config(struct nicvf *nic, bool enable)
662 {
663 	union nic_mbx mbx = {};
664 	struct queue_set *qs = nic->qs;
665 	struct qs_cfg *qs_cfg;
666 
667 	if (!qs) {
668 		netdev_warn(nic->netdev,
669 			    "Qset is still not allocated, don't init queues\n");
670 		return;
671 	}
672 
673 	qs->enable = enable;
674 	qs->vnic_id = nic->vf_id;
675 
676 	/* Send a mailbox msg to PF to config Qset */
677 	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
678 	mbx.qs.num = qs->vnic_id;
679 
680 	mbx.qs.cfg = 0;
681 	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
682 	if (qs->enable) {
683 		qs_cfg->ena = 1;
684 #ifdef __BIG_ENDIAN
685 		qs_cfg->be = 1;
686 #endif
687 		qs_cfg->vnic = qs->vnic_id;
688 	}
689 	nicvf_send_msg_to_pf(nic, &mbx);
690 }
691 
692 static void nicvf_free_resources(struct nicvf *nic)
693 {
694 	int qidx;
695 	struct queue_set *qs = nic->qs;
696 
697 	/* Free receive buffer descriptor ring */
698 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
699 		nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
700 
701 	/* Free completion queue */
702 	for (qidx = 0; qidx < qs->cq_cnt; qidx++)
703 		nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
704 
705 	/* Free send queue */
706 	for (qidx = 0; qidx < qs->sq_cnt; qidx++)
707 		nicvf_free_snd_queue(nic, &qs->sq[qidx]);
708 }
709 
710 static int nicvf_alloc_resources(struct nicvf *nic)
711 {
712 	int qidx;
713 	struct queue_set *qs = nic->qs;
714 
715 	/* Alloc receive buffer descriptor ring */
716 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
717 		if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
718 				    DMA_BUFFER_LEN))
719 			goto alloc_fail;
720 	}
721 
722 	/* Alloc send queue */
723 	for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
724 		if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
725 			goto alloc_fail;
726 	}
727 
728 	/* Alloc completion queue */
729 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
730 		if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
731 			goto alloc_fail;
732 	}
733 
734 	return 0;
735 alloc_fail:
736 	nicvf_free_resources(nic);
737 	return -ENOMEM;
738 }
739 
740 int nicvf_set_qset_resources(struct nicvf *nic)
741 {
742 	struct queue_set *qs;
743 
744 	qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
745 	if (!qs)
746 		return -ENOMEM;
747 	nic->qs = qs;
748 
749 	/* Set count of each queue */
750 	qs->rbdr_cnt = RBDR_CNT;
751 	qs->rq_cnt = RCV_QUEUE_CNT;
752 	qs->sq_cnt = SND_QUEUE_CNT;
753 	qs->cq_cnt = CMP_QUEUE_CNT;
754 
755 	/* Set queue lengths */
756 	qs->rbdr_len = RCV_BUF_COUNT;
757 	qs->sq_len = SND_QUEUE_LEN;
758 	qs->cq_len = CMP_QUEUE_LEN;
759 	return 0;
760 }
761 
762 int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
763 {
764 	bool disable = false;
765 	struct queue_set *qs = nic->qs;
766 	int qidx;
767 
768 	if (!qs)
769 		return 0;
770 
771 	if (enable) {
772 		if (nicvf_alloc_resources(nic))
773 			return -ENOMEM;
774 
775 		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
776 			nicvf_snd_queue_config(nic, qs, qidx, enable);
777 		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
778 			nicvf_cmp_queue_config(nic, qs, qidx, enable);
779 		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
780 			nicvf_rbdr_config(nic, qs, qidx, enable);
781 		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
782 			nicvf_rcv_queue_config(nic, qs, qidx, enable);
783 	} else {
784 		for (qidx = 0; qidx < qs->rq_cnt; qidx++)
785 			nicvf_rcv_queue_config(nic, qs, qidx, disable);
786 		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
787 			nicvf_rbdr_config(nic, qs, qidx, disable);
788 		for (qidx = 0; qidx < qs->sq_cnt; qidx++)
789 			nicvf_snd_queue_config(nic, qs, qidx, disable);
790 		for (qidx = 0; qidx < qs->cq_cnt; qidx++)
791 			nicvf_cmp_queue_config(nic, qs, qidx, disable);
792 
793 		nicvf_free_resources(nic);
794 	}
795 
796 	return 0;
797 }
798 
799 /* Get a free desc from SQ
800  * returns descriptor ponter & descriptor number
801  */
802 static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
803 {
804 	int qentry;
805 
806 	qentry = sq->tail;
807 	atomic_sub(desc_cnt, &sq->free_cnt);
808 	sq->tail += desc_cnt;
809 	sq->tail &= (sq->dmem.q_len - 1);
810 
811 	return qentry;
812 }
813 
814 /* Free descriptor back to SQ for future use */
815 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
816 {
817 	atomic_add(desc_cnt, &sq->free_cnt);
818 	sq->head += desc_cnt;
819 	sq->head &= (sq->dmem.q_len - 1);
820 }
821 
822 static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
823 {
824 	qentry++;
825 	qentry &= (sq->dmem.q_len - 1);
826 	return qentry;
827 }
828 
829 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
830 {
831 	u64 sq_cfg;
832 
833 	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
834 	sq_cfg |= NICVF_SQ_EN;
835 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
836 	/* Ring doorbell so that H/W restarts processing SQEs */
837 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
838 }
839 
840 void nicvf_sq_disable(struct nicvf *nic, int qidx)
841 {
842 	u64 sq_cfg;
843 
844 	sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
845 	sq_cfg &= ~NICVF_SQ_EN;
846 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
847 }
848 
849 void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
850 			      int qidx)
851 {
852 	u64 head, tail;
853 	struct sk_buff *skb;
854 	struct nicvf *nic = netdev_priv(netdev);
855 	struct sq_hdr_subdesc *hdr;
856 
857 	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
858 	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
859 	while (sq->head != head) {
860 		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
861 		if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
862 			nicvf_put_sq_desc(sq, 1);
863 			continue;
864 		}
865 		skb = (struct sk_buff *)sq->skbuff[sq->head];
866 		atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
867 		atomic64_add(hdr->tot_len,
868 			     (atomic64_t *)&netdev->stats.tx_bytes);
869 		dev_kfree_skb_any(skb);
870 		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
871 	}
872 }
873 
874 /* Calculate no of SQ subdescriptors needed to transmit all
875  * segments of this TSO packet.
876  * Taken from 'Tilera network driver' with a minor modification.
877  */
878 static int nicvf_tso_count_subdescs(struct sk_buff *skb)
879 {
880 	struct skb_shared_info *sh = skb_shinfo(skb);
881 	unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
882 	unsigned int data_len = skb->len - sh_len;
883 	unsigned int p_len = sh->gso_size;
884 	long f_id = -1;    /* id of the current fragment */
885 	long f_size = skb_headlen(skb) - sh_len;  /* current fragment size */
886 	long f_used = 0;  /* bytes used from the current fragment */
887 	long n;            /* size of the current piece of payload */
888 	int num_edescs = 0;
889 	int segment;
890 
891 	for (segment = 0; segment < sh->gso_segs; segment++) {
892 		unsigned int p_used = 0;
893 
894 		/* One edesc for header and for each piece of the payload. */
895 		for (num_edescs++; p_used < p_len; num_edescs++) {
896 			/* Advance as needed. */
897 			while (f_used >= f_size) {
898 				f_id++;
899 				f_size = skb_frag_size(&sh->frags[f_id]);
900 				f_used = 0;
901 			}
902 
903 			/* Use bytes from the current fragment. */
904 			n = p_len - p_used;
905 			if (n > f_size - f_used)
906 				n = f_size - f_used;
907 			f_used += n;
908 			p_used += n;
909 		}
910 
911 		/* The last segment may be less than gso_size. */
912 		data_len -= p_len;
913 		if (data_len < p_len)
914 			p_len = data_len;
915 	}
916 
917 	/* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
918 	return num_edescs + sh->gso_segs;
919 }
920 
921 /* Get the number of SQ descriptors needed to xmit this skb */
922 static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
923 {
924 	int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
925 
926 	if (skb_shinfo(skb)->gso_size) {
927 		subdesc_cnt = nicvf_tso_count_subdescs(skb);
928 		return subdesc_cnt;
929 	}
930 
931 	if (skb_shinfo(skb)->nr_frags)
932 		subdesc_cnt += skb_shinfo(skb)->nr_frags;
933 
934 	return subdesc_cnt;
935 }
936 
937 /* Add SQ HEADER subdescriptor.
938  * First subdescriptor for every send descriptor.
939  */
940 static inline void
941 nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
942 			 int subdesc_cnt, struct sk_buff *skb, int len)
943 {
944 	int proto;
945 	struct sq_hdr_subdesc *hdr;
946 
947 	hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
948 	sq->skbuff[qentry] = (u64)skb;
949 
950 	memset(hdr, 0, SND_QUEUE_DESC_SIZE);
951 	hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
952 	/* Enable notification via CQE after processing SQE */
953 	hdr->post_cqe = 1;
954 	/* No of subdescriptors following this */
955 	hdr->subdesc_cnt = subdesc_cnt;
956 	hdr->tot_len = len;
957 
958 	/* Offload checksum calculation to HW */
959 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
960 		if (skb->protocol != htons(ETH_P_IP))
961 			return;
962 
963 		hdr->csum_l3 = 1; /* Enable IP csum calculation */
964 		hdr->l3_offset = skb_network_offset(skb);
965 		hdr->l4_offset = skb_transport_offset(skb);
966 
967 		proto = ip_hdr(skb)->protocol;
968 		switch (proto) {
969 		case IPPROTO_TCP:
970 			hdr->csum_l4 = SEND_L4_CSUM_TCP;
971 			break;
972 		case IPPROTO_UDP:
973 			hdr->csum_l4 = SEND_L4_CSUM_UDP;
974 			break;
975 		case IPPROTO_SCTP:
976 			hdr->csum_l4 = SEND_L4_CSUM_SCTP;
977 			break;
978 		}
979 	}
980 }
981 
982 /* SQ GATHER subdescriptor
983  * Must follow HDR descriptor
984  */
985 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
986 					       int size, u64 data)
987 {
988 	struct sq_gather_subdesc *gather;
989 
990 	qentry &= (sq->dmem.q_len - 1);
991 	gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
992 
993 	memset(gather, 0, SND_QUEUE_DESC_SIZE);
994 	gather->subdesc_type = SQ_DESC_TYPE_GATHER;
995 	gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
996 	gather->size = size;
997 	gather->addr = data;
998 }
999 
1000 /* Segment a TSO packet into 'gso_size' segments and append
1001  * them to SQ for transfer
1002  */
1003 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1004 			       int qentry, struct sk_buff *skb)
1005 {
1006 	struct tso_t tso;
1007 	int seg_subdescs = 0, desc_cnt = 0;
1008 	int seg_len, total_len, data_left;
1009 	int hdr_qentry = qentry;
1010 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1011 
1012 	tso_start(skb, &tso);
1013 	total_len = skb->len - hdr_len;
1014 	while (total_len > 0) {
1015 		char *hdr;
1016 
1017 		/* Save Qentry for adding HDR_SUBDESC at the end */
1018 		hdr_qentry = qentry;
1019 
1020 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1021 		total_len -= data_left;
1022 
1023 		/* Add segment's header */
1024 		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1025 		hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
1026 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1027 		nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
1028 					    sq->tso_hdrs_phys +
1029 					    qentry * TSO_HEADER_SIZE);
1030 		/* HDR_SUDESC + GATHER */
1031 		seg_subdescs = 2;
1032 		seg_len = hdr_len;
1033 
1034 		/* Add segment's payload fragments */
1035 		while (data_left > 0) {
1036 			int size;
1037 
1038 			size = min_t(int, tso.size, data_left);
1039 
1040 			qentry = nicvf_get_nxt_sqentry(sq, qentry);
1041 			nicvf_sq_add_gather_subdesc(sq, qentry, size,
1042 						    virt_to_phys(tso.data));
1043 			seg_subdescs++;
1044 			seg_len += size;
1045 
1046 			data_left -= size;
1047 			tso_build_data(skb, &tso, size);
1048 		}
1049 		nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1050 					 seg_subdescs - 1, skb, seg_len);
1051 		sq->skbuff[hdr_qentry] = 0;
1052 		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1053 
1054 		desc_cnt += seg_subdescs;
1055 	}
1056 	/* Save SKB in the last segment for freeing */
1057 	sq->skbuff[hdr_qentry] = (u64)skb;
1058 
1059 	/* make sure all memory stores are done before ringing doorbell */
1060 	smp_wmb();
1061 
1062 	/* Inform HW to xmit all TSO segments */
1063 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1064 			      skb_get_queue_mapping(skb), desc_cnt);
1065 	return 1;
1066 }
1067 
1068 /* Append an skb to a SQ for packet transfer. */
1069 int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
1070 {
1071 	int i, size;
1072 	int subdesc_cnt;
1073 	int sq_num, qentry;
1074 	struct queue_set *qs = nic->qs;
1075 	struct snd_queue *sq;
1076 
1077 	sq_num = skb_get_queue_mapping(skb);
1078 	sq = &qs->sq[sq_num];
1079 
1080 	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
1081 	if (subdesc_cnt > atomic_read(&sq->free_cnt))
1082 		goto append_fail;
1083 
1084 	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1085 
1086 	/* Check if its a TSO packet */
1087 	if (skb_shinfo(skb)->gso_size)
1088 		return nicvf_sq_append_tso(nic, sq, qentry, skb);
1089 
1090 	/* Add SQ header subdesc */
1091 	nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
1092 
1093 	/* Add SQ gather subdescs */
1094 	qentry = nicvf_get_nxt_sqentry(sq, qentry);
1095 	size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1096 	nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
1097 
1098 	/* Check for scattered buffer */
1099 	if (!skb_is_nonlinear(skb))
1100 		goto doorbell;
1101 
1102 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1103 		const struct skb_frag_struct *frag;
1104 
1105 		frag = &skb_shinfo(skb)->frags[i];
1106 
1107 		qentry = nicvf_get_nxt_sqentry(sq, qentry);
1108 		size = skb_frag_size(frag);
1109 		nicvf_sq_add_gather_subdesc(sq, qentry, size,
1110 					    virt_to_phys(
1111 					    skb_frag_address(frag)));
1112 	}
1113 
1114 doorbell:
1115 	/* make sure all memory stores are done before ringing doorbell */
1116 	smp_wmb();
1117 
1118 	/* Inform HW to xmit new packet */
1119 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1120 			      sq_num, subdesc_cnt);
1121 	return 1;
1122 
1123 append_fail:
1124 	netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
1125 	return 0;
1126 }
1127 
1128 static inline unsigned frag_num(unsigned i)
1129 {
1130 #ifdef __BIG_ENDIAN
1131 	return (i & ~3) + 3 - (i & 3);
1132 #else
1133 	return i;
1134 #endif
1135 }
1136 
1137 /* Returns SKB for a received packet */
1138 struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1139 {
1140 	int frag;
1141 	int payload_len = 0;
1142 	struct sk_buff *skb = NULL;
1143 	struct sk_buff *skb_frag = NULL;
1144 	struct sk_buff *prev_frag = NULL;
1145 	u16 *rb_lens = NULL;
1146 	u64 *rb_ptrs = NULL;
1147 
1148 	rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
1149 	rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
1150 
1151 	netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1152 		   __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
1153 
1154 	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
1155 		payload_len = rb_lens[frag_num(frag)];
1156 		if (!frag) {
1157 			/* First fragment */
1158 			skb = nicvf_rb_ptr_to_skb(nic,
1159 						  *rb_ptrs - cqe_rx->align_pad,
1160 						  payload_len);
1161 			if (!skb)
1162 				return NULL;
1163 			skb_reserve(skb, cqe_rx->align_pad);
1164 			skb_put(skb, payload_len);
1165 		} else {
1166 			/* Add fragments */
1167 			skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
1168 						       payload_len);
1169 			if (!skb_frag) {
1170 				dev_kfree_skb(skb);
1171 				return NULL;
1172 			}
1173 
1174 			if (!skb_shinfo(skb)->frag_list)
1175 				skb_shinfo(skb)->frag_list = skb_frag;
1176 			else
1177 				prev_frag->next = skb_frag;
1178 
1179 			prev_frag = skb_frag;
1180 			skb->len += payload_len;
1181 			skb->data_len += payload_len;
1182 			skb_frag->len = payload_len;
1183 		}
1184 		/* Next buffer pointer */
1185 		rb_ptrs++;
1186 	}
1187 	return skb;
1188 }
1189 
1190 /* Enable interrupt */
1191 void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
1192 {
1193 	u64 reg_val;
1194 
1195 	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1196 
1197 	switch (int_type) {
1198 	case NICVF_INTR_CQ:
1199 		reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1200 		break;
1201 	case NICVF_INTR_SQ:
1202 		reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1203 		break;
1204 	case NICVF_INTR_RBDR:
1205 		reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1206 		break;
1207 	case NICVF_INTR_PKT_DROP:
1208 		reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1209 		break;
1210 	case NICVF_INTR_TCP_TIMER:
1211 		reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1212 		break;
1213 	case NICVF_INTR_MBOX:
1214 		reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1215 		break;
1216 	case NICVF_INTR_QS_ERR:
1217 		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1218 		break;
1219 	default:
1220 		netdev_err(nic->netdev,
1221 			   "Failed to enable interrupt: unknown type\n");
1222 		break;
1223 	}
1224 
1225 	nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
1226 }
1227 
1228 /* Disable interrupt */
1229 void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
1230 {
1231 	u64 reg_val = 0;
1232 
1233 	switch (int_type) {
1234 	case NICVF_INTR_CQ:
1235 		reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1236 		break;
1237 	case NICVF_INTR_SQ:
1238 		reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1239 		break;
1240 	case NICVF_INTR_RBDR:
1241 		reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1242 		break;
1243 	case NICVF_INTR_PKT_DROP:
1244 		reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1245 		break;
1246 	case NICVF_INTR_TCP_TIMER:
1247 		reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1248 		break;
1249 	case NICVF_INTR_MBOX:
1250 		reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
1251 		break;
1252 	case NICVF_INTR_QS_ERR:
1253 		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1254 		break;
1255 	default:
1256 		netdev_err(nic->netdev,
1257 			   "Failed to disable interrupt: unknown type\n");
1258 		break;
1259 	}
1260 
1261 	nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
1262 }
1263 
1264 /* Clear interrupt */
1265 void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
1266 {
1267 	u64 reg_val = 0;
1268 
1269 	switch (int_type) {
1270 	case NICVF_INTR_CQ:
1271 		reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1272 		break;
1273 	case NICVF_INTR_SQ:
1274 		reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1275 		break;
1276 	case NICVF_INTR_RBDR:
1277 		reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1278 		break;
1279 	case NICVF_INTR_PKT_DROP:
1280 		reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
1281 		break;
1282 	case NICVF_INTR_TCP_TIMER:
1283 		reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
1284 		break;
1285 	case NICVF_INTR_MBOX:
1286 		reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
1287 		break;
1288 	case NICVF_INTR_QS_ERR:
1289 		reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
1290 		break;
1291 	default:
1292 		netdev_err(nic->netdev,
1293 			   "Failed to clear interrupt: unknown type\n");
1294 		break;
1295 	}
1296 
1297 	nicvf_reg_write(nic, NIC_VF_INT, reg_val);
1298 }
1299 
1300 /* Check if interrupt is enabled */
1301 int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
1302 {
1303 	u64 reg_val;
1304 	u64 mask = 0xff;
1305 
1306 	reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
1307 
1308 	switch (int_type) {
1309 	case NICVF_INTR_CQ:
1310 		mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
1311 		break;
1312 	case NICVF_INTR_SQ:
1313 		mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
1314 		break;
1315 	case NICVF_INTR_RBDR:
1316 		mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
1317 		break;
1318 	case NICVF_INTR_PKT_DROP:
1319 		mask = NICVF_INTR_PKT_DROP_MASK;
1320 		break;
1321 	case NICVF_INTR_TCP_TIMER:
1322 		mask = NICVF_INTR_TCP_TIMER_MASK;
1323 		break;
1324 	case NICVF_INTR_MBOX:
1325 		mask = NICVF_INTR_MBOX_MASK;
1326 		break;
1327 	case NICVF_INTR_QS_ERR:
1328 		mask = NICVF_INTR_QS_ERR_MASK;
1329 		break;
1330 	default:
1331 		netdev_err(nic->netdev,
1332 			   "Failed to check interrupt enable: unknown type\n");
1333 		break;
1334 	}
1335 
1336 	return (reg_val & mask);
1337 }
1338 
1339 void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
1340 {
1341 	struct rcv_queue *rq;
1342 
1343 #define GET_RQ_STATS(reg) \
1344 	nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1345 			    (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1346 
1347 	rq = &nic->qs->rq[rq_idx];
1348 	rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
1349 	rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
1350 }
1351 
1352 void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1353 {
1354 	struct snd_queue *sq;
1355 
1356 #define GET_SQ_STATS(reg) \
1357 	nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1358 			    (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1359 
1360 	sq = &nic->qs->sq[sq_idx];
1361 	sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
1362 	sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
1363 }
1364 
1365 /* Check for errors in the receive cmp.queue entry */
1366 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
1367 			    struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1368 {
1369 	struct cmp_queue_stats *stats = &cq->stats;
1370 
1371 	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
1372 		stats->rx.errop.good++;
1373 		return 0;
1374 	}
1375 
1376 	if (netif_msg_rx_err(nic))
1377 		netdev_err(nic->netdev,
1378 			   "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1379 			   nic->netdev->name,
1380 			   cqe_rx->err_level, cqe_rx->err_opcode);
1381 
1382 	switch (cqe_rx->err_level) {
1383 	case CQ_ERRLVL_MAC:
1384 		stats->rx.errlvl.mac_errs++;
1385 		break;
1386 	case CQ_ERRLVL_L2:
1387 		stats->rx.errlvl.l2_errs++;
1388 		break;
1389 	case CQ_ERRLVL_L3:
1390 		stats->rx.errlvl.l3_errs++;
1391 		break;
1392 	case CQ_ERRLVL_L4:
1393 		stats->rx.errlvl.l4_errs++;
1394 		break;
1395 	}
1396 
1397 	switch (cqe_rx->err_opcode) {
1398 	case CQ_RX_ERROP_RE_PARTIAL:
1399 		stats->rx.errop.partial_pkts++;
1400 		break;
1401 	case CQ_RX_ERROP_RE_JABBER:
1402 		stats->rx.errop.jabber_errs++;
1403 		break;
1404 	case CQ_RX_ERROP_RE_FCS:
1405 		stats->rx.errop.fcs_errs++;
1406 		break;
1407 	case CQ_RX_ERROP_RE_TERMINATE:
1408 		stats->rx.errop.terminate_errs++;
1409 		break;
1410 	case CQ_RX_ERROP_RE_RX_CTL:
1411 		stats->rx.errop.bgx_rx_errs++;
1412 		break;
1413 	case CQ_RX_ERROP_PREL2_ERR:
1414 		stats->rx.errop.prel2_errs++;
1415 		break;
1416 	case CQ_RX_ERROP_L2_FRAGMENT:
1417 		stats->rx.errop.l2_frags++;
1418 		break;
1419 	case CQ_RX_ERROP_L2_OVERRUN:
1420 		stats->rx.errop.l2_overruns++;
1421 		break;
1422 	case CQ_RX_ERROP_L2_PFCS:
1423 		stats->rx.errop.l2_pfcs++;
1424 		break;
1425 	case CQ_RX_ERROP_L2_PUNY:
1426 		stats->rx.errop.l2_puny++;
1427 		break;
1428 	case CQ_RX_ERROP_L2_MAL:
1429 		stats->rx.errop.l2_hdr_malformed++;
1430 		break;
1431 	case CQ_RX_ERROP_L2_OVERSIZE:
1432 		stats->rx.errop.l2_oversize++;
1433 		break;
1434 	case CQ_RX_ERROP_L2_UNDERSIZE:
1435 		stats->rx.errop.l2_undersize++;
1436 		break;
1437 	case CQ_RX_ERROP_L2_LENMISM:
1438 		stats->rx.errop.l2_len_mismatch++;
1439 		break;
1440 	case CQ_RX_ERROP_L2_PCLP:
1441 		stats->rx.errop.l2_pclp++;
1442 		break;
1443 	case CQ_RX_ERROP_IP_NOT:
1444 		stats->rx.errop.non_ip++;
1445 		break;
1446 	case CQ_RX_ERROP_IP_CSUM_ERR:
1447 		stats->rx.errop.ip_csum_err++;
1448 		break;
1449 	case CQ_RX_ERROP_IP_MAL:
1450 		stats->rx.errop.ip_hdr_malformed++;
1451 		break;
1452 	case CQ_RX_ERROP_IP_MALD:
1453 		stats->rx.errop.ip_payload_malformed++;
1454 		break;
1455 	case CQ_RX_ERROP_IP_HOP:
1456 		stats->rx.errop.ip_hop_errs++;
1457 		break;
1458 	case CQ_RX_ERROP_L3_ICRC:
1459 		stats->rx.errop.l3_icrc_errs++;
1460 		break;
1461 	case CQ_RX_ERROP_L3_PCLP:
1462 		stats->rx.errop.l3_pclp++;
1463 		break;
1464 	case CQ_RX_ERROP_L4_MAL:
1465 		stats->rx.errop.l4_malformed++;
1466 		break;
1467 	case CQ_RX_ERROP_L4_CHK:
1468 		stats->rx.errop.l4_csum_errs++;
1469 		break;
1470 	case CQ_RX_ERROP_UDP_LEN:
1471 		stats->rx.errop.udp_len_err++;
1472 		break;
1473 	case CQ_RX_ERROP_L4_PORT:
1474 		stats->rx.errop.bad_l4_port++;
1475 		break;
1476 	case CQ_RX_ERROP_TCP_FLAG:
1477 		stats->rx.errop.bad_tcp_flag++;
1478 		break;
1479 	case CQ_RX_ERROP_TCP_OFFSET:
1480 		stats->rx.errop.tcp_offset_errs++;
1481 		break;
1482 	case CQ_RX_ERROP_L4_PCLP:
1483 		stats->rx.errop.l4_pclp++;
1484 		break;
1485 	case CQ_RX_ERROP_RBDR_TRUNC:
1486 		stats->rx.errop.pkt_truncated++;
1487 		break;
1488 	}
1489 
1490 	return 1;
1491 }
1492 
1493 /* Check for errors in the send cmp.queue entry */
1494 int nicvf_check_cqe_tx_errs(struct nicvf *nic,
1495 			    struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1496 {
1497 	struct cmp_queue_stats *stats = &cq->stats;
1498 
1499 	switch (cqe_tx->send_status) {
1500 	case CQ_TX_ERROP_GOOD:
1501 		stats->tx.good++;
1502 		return 0;
1503 	case CQ_TX_ERROP_DESC_FAULT:
1504 		stats->tx.desc_fault++;
1505 		break;
1506 	case CQ_TX_ERROP_HDR_CONS_ERR:
1507 		stats->tx.hdr_cons_err++;
1508 		break;
1509 	case CQ_TX_ERROP_SUBDC_ERR:
1510 		stats->tx.subdesc_err++;
1511 		break;
1512 	case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1513 		stats->tx.imm_size_oflow++;
1514 		break;
1515 	case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1516 		stats->tx.data_seq_err++;
1517 		break;
1518 	case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1519 		stats->tx.mem_seq_err++;
1520 		break;
1521 	case CQ_TX_ERROP_LOCK_VIOL:
1522 		stats->tx.lock_viol++;
1523 		break;
1524 	case CQ_TX_ERROP_DATA_FAULT:
1525 		stats->tx.data_fault++;
1526 		break;
1527 	case CQ_TX_ERROP_TSTMP_CONFLICT:
1528 		stats->tx.tstmp_conflict++;
1529 		break;
1530 	case CQ_TX_ERROP_TSTMP_TIMEOUT:
1531 		stats->tx.tstmp_timeout++;
1532 		break;
1533 	case CQ_TX_ERROP_MEM_FAULT:
1534 		stats->tx.mem_fault++;
1535 		break;
1536 	case CQ_TX_ERROP_CK_OVERLAP:
1537 		stats->tx.csum_overlap++;
1538 		break;
1539 	case CQ_TX_ERROP_CK_OFLOW:
1540 		stats->tx.csum_overflow++;
1541 		break;
1542 	}
1543 
1544 	return 1;
1545 }
1546