xref: /linux/drivers/net/ethernet/intel/idpf/idpf_txrx.c (revision 8faabc041a001140564f718dabe37753e88b37fa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include <net/libeth/rx.h>
5 #include <net/libeth/tx.h>
6 
7 #include "idpf.h"
8 #include "idpf_virtchnl.h"
9 
10 struct idpf_tx_stash {
11 	struct hlist_node hlist;
12 	struct libeth_sqe buf;
13 };
14 
15 #define idpf_tx_buf_compl_tag(buf)	(*(u32 *)&(buf)->priv)
16 LIBETH_SQE_CHECK_PRIV(u32);
17 
18 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
19 			       unsigned int count);
20 
21 /**
22  * idpf_buf_lifo_push - push a buffer pointer onto stack
23  * @stack: pointer to stack struct
24  * @buf: pointer to buf to push
25  *
26  * Returns 0 on success, negative on failure
27  **/
idpf_buf_lifo_push(struct idpf_buf_lifo * stack,struct idpf_tx_stash * buf)28 static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
29 			      struct idpf_tx_stash *buf)
30 {
31 	if (unlikely(stack->top == stack->size))
32 		return -ENOSPC;
33 
34 	stack->bufs[stack->top++] = buf;
35 
36 	return 0;
37 }
38 
39 /**
40  * idpf_buf_lifo_pop - pop a buffer pointer from stack
41  * @stack: pointer to stack struct
42  **/
idpf_buf_lifo_pop(struct idpf_buf_lifo * stack)43 static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
44 {
45 	if (unlikely(!stack->top))
46 		return NULL;
47 
48 	return stack->bufs[--stack->top];
49 }
50 
51 /**
52  * idpf_tx_timeout - Respond to a Tx Hang
53  * @netdev: network interface device structure
54  * @txqueue: TX queue
55  */
idpf_tx_timeout(struct net_device * netdev,unsigned int txqueue)56 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
57 {
58 	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
59 
60 	adapter->tx_timeout_count++;
61 
62 	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
63 		   adapter->tx_timeout_count, txqueue);
64 	if (!idpf_is_reset_in_prog(adapter)) {
65 		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
66 		queue_delayed_work(adapter->vc_event_wq,
67 				   &adapter->vc_event_task,
68 				   msecs_to_jiffies(10));
69 	}
70 }
71 
72 /**
73  * idpf_tx_buf_rel_all - Free any empty Tx buffers
74  * @txq: queue to be cleaned
75  */
idpf_tx_buf_rel_all(struct idpf_tx_queue * txq)76 static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
77 {
78 	struct libeth_sq_napi_stats ss = { };
79 	struct idpf_buf_lifo *buf_stack;
80 	struct idpf_tx_stash *stash;
81 	struct libeth_cq_pp cp = {
82 		.dev	= txq->dev,
83 		.ss	= &ss,
84 	};
85 	struct hlist_node *tmp;
86 	u32 i, tag;
87 
88 	/* Buffers already cleared, nothing to do */
89 	if (!txq->tx_buf)
90 		return;
91 
92 	/* Free all the Tx buffer sk_buffs */
93 	for (i = 0; i < txq->desc_count; i++)
94 		libeth_tx_complete(&txq->tx_buf[i], &cp);
95 
96 	kfree(txq->tx_buf);
97 	txq->tx_buf = NULL;
98 
99 	if (!idpf_queue_has(FLOW_SCH_EN, txq))
100 		return;
101 
102 	buf_stack = &txq->stash->buf_stack;
103 	if (!buf_stack->bufs)
104 		return;
105 
106 	/*
107 	 * If a Tx timeout occurred, there are potentially still bufs in the
108 	 * hash table, free them here.
109 	 */
110 	hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
111 			   hlist) {
112 		if (!stash)
113 			continue;
114 
115 		libeth_tx_complete(&stash->buf, &cp);
116 		hash_del(&stash->hlist);
117 		idpf_buf_lifo_push(buf_stack, stash);
118 	}
119 
120 	for (i = 0; i < buf_stack->size; i++)
121 		kfree(buf_stack->bufs[i]);
122 
123 	kfree(buf_stack->bufs);
124 	buf_stack->bufs = NULL;
125 }
126 
127 /**
128  * idpf_tx_desc_rel - Free Tx resources per queue
129  * @txq: Tx descriptor ring for a specific queue
130  *
131  * Free all transmit software resources
132  */
idpf_tx_desc_rel(struct idpf_tx_queue * txq)133 static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
134 {
135 	idpf_tx_buf_rel_all(txq);
136 	netdev_tx_reset_subqueue(txq->netdev, txq->idx);
137 
138 	if (!txq->desc_ring)
139 		return;
140 
141 	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
142 	txq->desc_ring = NULL;
143 	txq->next_to_use = 0;
144 	txq->next_to_clean = 0;
145 }
146 
147 /**
148  * idpf_compl_desc_rel - Free completion resources per queue
149  * @complq: completion queue
150  *
151  * Free all completion software resources.
152  */
idpf_compl_desc_rel(struct idpf_compl_queue * complq)153 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
154 {
155 	if (!complq->comp)
156 		return;
157 
158 	dma_free_coherent(complq->netdev->dev.parent, complq->size,
159 			  complq->comp, complq->dma);
160 	complq->comp = NULL;
161 	complq->next_to_use = 0;
162 	complq->next_to_clean = 0;
163 }
164 
165 /**
166  * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
167  * @vport: virtual port structure
168  *
169  * Free all transmit software resources
170  */
idpf_tx_desc_rel_all(struct idpf_vport * vport)171 static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
172 {
173 	int i, j;
174 
175 	if (!vport->txq_grps)
176 		return;
177 
178 	for (i = 0; i < vport->num_txq_grp; i++) {
179 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
180 
181 		for (j = 0; j < txq_grp->num_txq; j++)
182 			idpf_tx_desc_rel(txq_grp->txqs[j]);
183 
184 		if (idpf_is_queue_model_split(vport->txq_model))
185 			idpf_compl_desc_rel(txq_grp->complq);
186 	}
187 }
188 
189 /**
190  * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
191  * @tx_q: queue for which the buffers are allocated
192  *
193  * Returns 0 on success, negative on failure
194  */
idpf_tx_buf_alloc_all(struct idpf_tx_queue * tx_q)195 static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
196 {
197 	struct idpf_buf_lifo *buf_stack;
198 	int buf_size;
199 	int i;
200 
201 	/* Allocate book keeping buffers only. Buffers to be supplied to HW
202 	 * are allocated by kernel network stack and received as part of skb
203 	 */
204 	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
205 	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
206 	if (!tx_q->tx_buf)
207 		return -ENOMEM;
208 
209 	if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
210 		return 0;
211 
212 	buf_stack = &tx_q->stash->buf_stack;
213 
214 	/* Initialize tx buf stack for out-of-order completions if
215 	 * flow scheduling offload is enabled
216 	 */
217 	buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
218 				  GFP_KERNEL);
219 	if (!buf_stack->bufs)
220 		return -ENOMEM;
221 
222 	buf_stack->size = tx_q->desc_count;
223 	buf_stack->top = tx_q->desc_count;
224 
225 	for (i = 0; i < tx_q->desc_count; i++) {
226 		buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
227 					     GFP_KERNEL);
228 		if (!buf_stack->bufs[i])
229 			return -ENOMEM;
230 	}
231 
232 	return 0;
233 }
234 
235 /**
236  * idpf_tx_desc_alloc - Allocate the Tx descriptors
237  * @vport: vport to allocate resources for
238  * @tx_q: the tx ring to set up
239  *
240  * Returns 0 on success, negative on failure
241  */
idpf_tx_desc_alloc(const struct idpf_vport * vport,struct idpf_tx_queue * tx_q)242 static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
243 			      struct idpf_tx_queue *tx_q)
244 {
245 	struct device *dev = tx_q->dev;
246 	int err;
247 
248 	err = idpf_tx_buf_alloc_all(tx_q);
249 	if (err)
250 		goto err_alloc;
251 
252 	tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
253 
254 	/* Allocate descriptors also round up to nearest 4K */
255 	tx_q->size = ALIGN(tx_q->size, 4096);
256 	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
257 					      GFP_KERNEL);
258 	if (!tx_q->desc_ring) {
259 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
260 			tx_q->size);
261 		err = -ENOMEM;
262 		goto err_alloc;
263 	}
264 
265 	tx_q->next_to_use = 0;
266 	tx_q->next_to_clean = 0;
267 	idpf_queue_set(GEN_CHK, tx_q);
268 
269 	return 0;
270 
271 err_alloc:
272 	idpf_tx_desc_rel(tx_q);
273 
274 	return err;
275 }
276 
277 /**
278  * idpf_compl_desc_alloc - allocate completion descriptors
279  * @vport: vport to allocate resources for
280  * @complq: completion queue to set up
281  *
282  * Return: 0 on success, -errno on failure.
283  */
idpf_compl_desc_alloc(const struct idpf_vport * vport,struct idpf_compl_queue * complq)284 static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
285 				 struct idpf_compl_queue *complq)
286 {
287 	complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
288 
289 	complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
290 					  complq->size, &complq->dma,
291 					  GFP_KERNEL);
292 	if (!complq->comp)
293 		return -ENOMEM;
294 
295 	complq->next_to_use = 0;
296 	complq->next_to_clean = 0;
297 	idpf_queue_set(GEN_CHK, complq);
298 
299 	return 0;
300 }
301 
302 /**
303  * idpf_tx_desc_alloc_all - allocate all queues Tx resources
304  * @vport: virtual port private structure
305  *
306  * Returns 0 on success, negative on failure
307  */
idpf_tx_desc_alloc_all(struct idpf_vport * vport)308 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
309 {
310 	int err = 0;
311 	int i, j;
312 
313 	/* Setup buffer queues. In single queue model buffer queues and
314 	 * completion queues will be same
315 	 */
316 	for (i = 0; i < vport->num_txq_grp; i++) {
317 		for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
318 			struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
319 			u8 gen_bits = 0;
320 			u16 bufidx_mask;
321 
322 			err = idpf_tx_desc_alloc(vport, txq);
323 			if (err) {
324 				pci_err(vport->adapter->pdev,
325 					"Allocation for Tx Queue %u failed\n",
326 					i);
327 				goto err_out;
328 			}
329 
330 			if (!idpf_is_queue_model_split(vport->txq_model))
331 				continue;
332 
333 			txq->compl_tag_cur_gen = 0;
334 
335 			/* Determine the number of bits in the bufid
336 			 * mask and add one to get the start of the
337 			 * generation bits
338 			 */
339 			bufidx_mask = txq->desc_count - 1;
340 			while (bufidx_mask >> 1) {
341 				txq->compl_tag_gen_s++;
342 				bufidx_mask = bufidx_mask >> 1;
343 			}
344 			txq->compl_tag_gen_s++;
345 
346 			gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
347 							txq->compl_tag_gen_s;
348 			txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
349 
350 			/* Set bufid mask based on location of first
351 			 * gen bit; it cannot simply be the descriptor
352 			 * ring size-1 since we can have size values
353 			 * where not all of those bits are set.
354 			 */
355 			txq->compl_tag_bufid_m =
356 				GETMAXVAL(txq->compl_tag_gen_s);
357 		}
358 
359 		if (!idpf_is_queue_model_split(vport->txq_model))
360 			continue;
361 
362 		/* Setup completion queues */
363 		err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
364 		if (err) {
365 			pci_err(vport->adapter->pdev,
366 				"Allocation for Tx Completion Queue %u failed\n",
367 				i);
368 			goto err_out;
369 		}
370 	}
371 
372 err_out:
373 	if (err)
374 		idpf_tx_desc_rel_all(vport);
375 
376 	return err;
377 }
378 
379 /**
380  * idpf_rx_page_rel - Release an rx buffer page
381  * @rx_buf: the buffer to free
382  */
idpf_rx_page_rel(struct libeth_fqe * rx_buf)383 static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
384 {
385 	if (unlikely(!rx_buf->page))
386 		return;
387 
388 	page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
389 
390 	rx_buf->page = NULL;
391 	rx_buf->offset = 0;
392 }
393 
394 /**
395  * idpf_rx_hdr_buf_rel_all - Release header buffer memory
396  * @bufq: queue to use
397  */
idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue * bufq)398 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
399 {
400 	struct libeth_fq fq = {
401 		.fqes	= bufq->hdr_buf,
402 		.pp	= bufq->hdr_pp,
403 	};
404 
405 	for (u32 i = 0; i < bufq->desc_count; i++)
406 		idpf_rx_page_rel(&bufq->hdr_buf[i]);
407 
408 	libeth_rx_fq_destroy(&fq);
409 	bufq->hdr_buf = NULL;
410 	bufq->hdr_pp = NULL;
411 }
412 
413 /**
414  * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
415  * @bufq: queue to be cleaned
416  */
idpf_rx_buf_rel_bufq(struct idpf_buf_queue * bufq)417 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
418 {
419 	struct libeth_fq fq = {
420 		.fqes	= bufq->buf,
421 		.pp	= bufq->pp,
422 	};
423 
424 	/* queue already cleared, nothing to do */
425 	if (!bufq->buf)
426 		return;
427 
428 	/* Free all the bufs allocated and given to hw on Rx queue */
429 	for (u32 i = 0; i < bufq->desc_count; i++)
430 		idpf_rx_page_rel(&bufq->buf[i]);
431 
432 	if (idpf_queue_has(HSPLIT_EN, bufq))
433 		idpf_rx_hdr_buf_rel_all(bufq);
434 
435 	libeth_rx_fq_destroy(&fq);
436 	bufq->buf = NULL;
437 	bufq->pp = NULL;
438 }
439 
440 /**
441  * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
442  * @rxq: queue to be cleaned
443  */
idpf_rx_buf_rel_all(struct idpf_rx_queue * rxq)444 static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
445 {
446 	struct libeth_fq fq = {
447 		.fqes	= rxq->rx_buf,
448 		.pp	= rxq->pp,
449 	};
450 
451 	if (!rxq->rx_buf)
452 		return;
453 
454 	for (u32 i = 0; i < rxq->desc_count; i++)
455 		idpf_rx_page_rel(&rxq->rx_buf[i]);
456 
457 	libeth_rx_fq_destroy(&fq);
458 	rxq->rx_buf = NULL;
459 	rxq->pp = NULL;
460 }
461 
462 /**
463  * idpf_rx_desc_rel - Free a specific Rx q resources
464  * @rxq: queue to clean the resources from
465  * @dev: device to free DMA memory
466  * @model: single or split queue model
467  *
468  * Free a specific rx queue resources
469  */
idpf_rx_desc_rel(struct idpf_rx_queue * rxq,struct device * dev,u32 model)470 static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
471 			     u32 model)
472 {
473 	if (!rxq)
474 		return;
475 
476 	if (rxq->skb) {
477 		dev_kfree_skb_any(rxq->skb);
478 		rxq->skb = NULL;
479 	}
480 
481 	if (!idpf_is_queue_model_split(model))
482 		idpf_rx_buf_rel_all(rxq);
483 
484 	rxq->next_to_alloc = 0;
485 	rxq->next_to_clean = 0;
486 	rxq->next_to_use = 0;
487 	if (!rxq->desc_ring)
488 		return;
489 
490 	dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
491 	rxq->desc_ring = NULL;
492 }
493 
494 /**
495  * idpf_rx_desc_rel_bufq - free buffer queue resources
496  * @bufq: buffer queue to clean the resources from
497  * @dev: device to free DMA memory
498  */
idpf_rx_desc_rel_bufq(struct idpf_buf_queue * bufq,struct device * dev)499 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
500 				  struct device *dev)
501 {
502 	if (!bufq)
503 		return;
504 
505 	idpf_rx_buf_rel_bufq(bufq);
506 
507 	bufq->next_to_alloc = 0;
508 	bufq->next_to_clean = 0;
509 	bufq->next_to_use = 0;
510 
511 	if (!bufq->split_buf)
512 		return;
513 
514 	dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
515 	bufq->split_buf = NULL;
516 }
517 
518 /**
519  * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
520  * @vport: virtual port structure
521  *
522  * Free all rx queues resources
523  */
idpf_rx_desc_rel_all(struct idpf_vport * vport)524 static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
525 {
526 	struct device *dev = &vport->adapter->pdev->dev;
527 	struct idpf_rxq_group *rx_qgrp;
528 	u16 num_rxq;
529 	int i, j;
530 
531 	if (!vport->rxq_grps)
532 		return;
533 
534 	for (i = 0; i < vport->num_rxq_grp; i++) {
535 		rx_qgrp = &vport->rxq_grps[i];
536 
537 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
538 			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
539 				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
540 						 VIRTCHNL2_QUEUE_MODEL_SINGLE);
541 			continue;
542 		}
543 
544 		num_rxq = rx_qgrp->splitq.num_rxq_sets;
545 		for (j = 0; j < num_rxq; j++)
546 			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
547 					 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
548 
549 		if (!rx_qgrp->splitq.bufq_sets)
550 			continue;
551 
552 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
553 			struct idpf_bufq_set *bufq_set =
554 				&rx_qgrp->splitq.bufq_sets[j];
555 
556 			idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
557 		}
558 	}
559 }
560 
561 /**
562  * idpf_rx_buf_hw_update - Store the new tail and head values
563  * @bufq: queue to bump
564  * @val: new head index
565  */
idpf_rx_buf_hw_update(struct idpf_buf_queue * bufq,u32 val)566 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
567 {
568 	bufq->next_to_use = val;
569 
570 	if (unlikely(!bufq->tail))
571 		return;
572 
573 	/* writel has an implicit memory barrier */
574 	writel(val, bufq->tail);
575 }
576 
577 /**
578  * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
579  * @bufq: ring to use
580  *
581  * Returns 0 on success, negative on failure.
582  */
idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue * bufq)583 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
584 {
585 	struct libeth_fq fq = {
586 		.count	= bufq->desc_count,
587 		.type	= LIBETH_FQE_HDR,
588 		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
589 	};
590 	int ret;
591 
592 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
593 	if (ret)
594 		return ret;
595 
596 	bufq->hdr_pp = fq.pp;
597 	bufq->hdr_buf = fq.fqes;
598 	bufq->hdr_truesize = fq.truesize;
599 	bufq->rx_hbuf_size = fq.buf_len;
600 
601 	return 0;
602 }
603 
604 /**
605  * idpf_rx_post_buf_refill - Post buffer id to refill queue
606  * @refillq: refill queue to post to
607  * @buf_id: buffer id to post
608  */
idpf_rx_post_buf_refill(struct idpf_sw_queue * refillq,u16 buf_id)609 static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
610 {
611 	u32 nta = refillq->next_to_use;
612 
613 	/* store the buffer ID and the SW maintained GEN bit to the refillq */
614 	refillq->ring[nta] =
615 		FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
616 		FIELD_PREP(IDPF_RX_BI_GEN_M,
617 			   idpf_queue_has(GEN_CHK, refillq));
618 
619 	if (unlikely(++nta == refillq->desc_count)) {
620 		nta = 0;
621 		idpf_queue_change(GEN_CHK, refillq);
622 	}
623 
624 	refillq->next_to_use = nta;
625 }
626 
627 /**
628  * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
629  * @bufq: buffer queue to post to
630  * @buf_id: buffer id to post
631  *
632  * Returns false if buffer could not be allocated, true otherwise.
633  */
idpf_rx_post_buf_desc(struct idpf_buf_queue * bufq,u16 buf_id)634 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
635 {
636 	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
637 	struct libeth_fq_fp fq = {
638 		.count	= bufq->desc_count,
639 	};
640 	u16 nta = bufq->next_to_alloc;
641 	dma_addr_t addr;
642 
643 	splitq_rx_desc = &bufq->split_buf[nta];
644 
645 	if (idpf_queue_has(HSPLIT_EN, bufq)) {
646 		fq.pp = bufq->hdr_pp;
647 		fq.fqes = bufq->hdr_buf;
648 		fq.truesize = bufq->hdr_truesize;
649 
650 		addr = libeth_rx_alloc(&fq, buf_id);
651 		if (addr == DMA_MAPPING_ERROR)
652 			return false;
653 
654 		splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
655 	}
656 
657 	fq.pp = bufq->pp;
658 	fq.fqes = bufq->buf;
659 	fq.truesize = bufq->truesize;
660 
661 	addr = libeth_rx_alloc(&fq, buf_id);
662 	if (addr == DMA_MAPPING_ERROR)
663 		return false;
664 
665 	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
666 	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
667 
668 	nta++;
669 	if (unlikely(nta == bufq->desc_count))
670 		nta = 0;
671 	bufq->next_to_alloc = nta;
672 
673 	return true;
674 }
675 
676 /**
677  * idpf_rx_post_init_bufs - Post initial buffers to bufq
678  * @bufq: buffer queue to post working set to
679  * @working_set: number of buffers to put in working set
680  *
681  * Returns true if @working_set bufs were posted successfully, false otherwise.
682  */
idpf_rx_post_init_bufs(struct idpf_buf_queue * bufq,u16 working_set)683 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
684 				   u16 working_set)
685 {
686 	int i;
687 
688 	for (i = 0; i < working_set; i++) {
689 		if (!idpf_rx_post_buf_desc(bufq, i))
690 			return false;
691 	}
692 
693 	idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
694 					       IDPF_RX_BUF_STRIDE));
695 
696 	return true;
697 }
698 
699 /**
700  * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
701  * @rxq: queue for which the buffers are allocated
702  *
703  * Return: 0 on success, -ENOMEM on failure.
704  */
idpf_rx_buf_alloc_singleq(struct idpf_rx_queue * rxq)705 static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
706 {
707 	if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
708 		goto err;
709 
710 	return 0;
711 
712 err:
713 	idpf_rx_buf_rel_all(rxq);
714 
715 	return -ENOMEM;
716 }
717 
718 /**
719  * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
720  * @rxq: buffer queue to create page pool for
721  *
722  * Return: 0 on success, -errno on failure.
723  */
idpf_rx_bufs_init_singleq(struct idpf_rx_queue * rxq)724 static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
725 {
726 	struct libeth_fq fq = {
727 		.count	= rxq->desc_count,
728 		.type	= LIBETH_FQE_MTU,
729 		.nid	= idpf_q_vector_to_mem(rxq->q_vector),
730 	};
731 	int ret;
732 
733 	ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
734 	if (ret)
735 		return ret;
736 
737 	rxq->pp = fq.pp;
738 	rxq->rx_buf = fq.fqes;
739 	rxq->truesize = fq.truesize;
740 	rxq->rx_buf_size = fq.buf_len;
741 
742 	return idpf_rx_buf_alloc_singleq(rxq);
743 }
744 
745 /**
746  * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
747  * @rxbufq: queue for which the buffers are allocated
748  *
749  * Returns 0 on success, negative on failure
750  */
idpf_rx_buf_alloc_all(struct idpf_buf_queue * rxbufq)751 static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
752 {
753 	int err = 0;
754 
755 	if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
756 		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
757 		if (err)
758 			goto rx_buf_alloc_all_out;
759 	}
760 
761 	/* Allocate buffers to be given to HW.	 */
762 	if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
763 		err = -ENOMEM;
764 
765 rx_buf_alloc_all_out:
766 	if (err)
767 		idpf_rx_buf_rel_bufq(rxbufq);
768 
769 	return err;
770 }
771 
772 /**
773  * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
774  * @bufq: buffer queue to create page pool for
775  * @type: type of Rx buffers to allocate
776  *
777  * Returns 0 on success, negative on failure
778  */
idpf_rx_bufs_init(struct idpf_buf_queue * bufq,enum libeth_fqe_type type)779 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
780 			     enum libeth_fqe_type type)
781 {
782 	struct libeth_fq fq = {
783 		.truesize	= bufq->truesize,
784 		.count		= bufq->desc_count,
785 		.type		= type,
786 		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
787 		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
788 	};
789 	int ret;
790 
791 	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
792 	if (ret)
793 		return ret;
794 
795 	bufq->pp = fq.pp;
796 	bufq->buf = fq.fqes;
797 	bufq->truesize = fq.truesize;
798 	bufq->rx_buf_size = fq.buf_len;
799 
800 	return idpf_rx_buf_alloc_all(bufq);
801 }
802 
803 /**
804  * idpf_rx_bufs_init_all - Initialize all RX bufs
805  * @vport: virtual port struct
806  *
807  * Returns 0 on success, negative on failure
808  */
idpf_rx_bufs_init_all(struct idpf_vport * vport)809 int idpf_rx_bufs_init_all(struct idpf_vport *vport)
810 {
811 	bool split = idpf_is_queue_model_split(vport->rxq_model);
812 	int i, j, err;
813 
814 	for (i = 0; i < vport->num_rxq_grp; i++) {
815 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
816 		u32 truesize = 0;
817 
818 		/* Allocate bufs for the rxq itself in singleq */
819 		if (!split) {
820 			int num_rxq = rx_qgrp->singleq.num_rxq;
821 
822 			for (j = 0; j < num_rxq; j++) {
823 				struct idpf_rx_queue *q;
824 
825 				q = rx_qgrp->singleq.rxqs[j];
826 				err = idpf_rx_bufs_init_singleq(q);
827 				if (err)
828 					return err;
829 			}
830 
831 			continue;
832 		}
833 
834 		/* Otherwise, allocate bufs for the buffer queues */
835 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
836 			enum libeth_fqe_type type;
837 			struct idpf_buf_queue *q;
838 
839 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
840 			q->truesize = truesize;
841 
842 			type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
843 
844 			err = idpf_rx_bufs_init(q, type);
845 			if (err)
846 				return err;
847 
848 			truesize = q->truesize >> 1;
849 		}
850 	}
851 
852 	return 0;
853 }
854 
855 /**
856  * idpf_rx_desc_alloc - Allocate queue Rx resources
857  * @vport: vport to allocate resources for
858  * @rxq: Rx queue for which the resources are setup
859  *
860  * Returns 0 on success, negative on failure
861  */
idpf_rx_desc_alloc(const struct idpf_vport * vport,struct idpf_rx_queue * rxq)862 static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
863 			      struct idpf_rx_queue *rxq)
864 {
865 	struct device *dev = &vport->adapter->pdev->dev;
866 
867 	rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
868 
869 	/* Allocate descriptors and also round up to nearest 4K */
870 	rxq->size = ALIGN(rxq->size, 4096);
871 	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
872 					     &rxq->dma, GFP_KERNEL);
873 	if (!rxq->desc_ring) {
874 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
875 			rxq->size);
876 		return -ENOMEM;
877 	}
878 
879 	rxq->next_to_alloc = 0;
880 	rxq->next_to_clean = 0;
881 	rxq->next_to_use = 0;
882 	idpf_queue_set(GEN_CHK, rxq);
883 
884 	return 0;
885 }
886 
887 /**
888  * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
889  * @vport: vport to allocate resources for
890  * @bufq: buffer queue for which the resources are set up
891  *
892  * Return: 0 on success, -ENOMEM on failure.
893  */
idpf_bufq_desc_alloc(const struct idpf_vport * vport,struct idpf_buf_queue * bufq)894 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
895 				struct idpf_buf_queue *bufq)
896 {
897 	struct device *dev = &vport->adapter->pdev->dev;
898 
899 	bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
900 
901 	bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
902 					     GFP_KERNEL);
903 	if (!bufq->split_buf)
904 		return -ENOMEM;
905 
906 	bufq->next_to_alloc = 0;
907 	bufq->next_to_clean = 0;
908 	bufq->next_to_use = 0;
909 
910 	idpf_queue_set(GEN_CHK, bufq);
911 
912 	return 0;
913 }
914 
915 /**
916  * idpf_rx_desc_alloc_all - allocate all RX queues resources
917  * @vport: virtual port structure
918  *
919  * Returns 0 on success, negative on failure
920  */
idpf_rx_desc_alloc_all(struct idpf_vport * vport)921 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
922 {
923 	struct idpf_rxq_group *rx_qgrp;
924 	int i, j, err;
925 	u16 num_rxq;
926 
927 	for (i = 0; i < vport->num_rxq_grp; i++) {
928 		rx_qgrp = &vport->rxq_grps[i];
929 		if (idpf_is_queue_model_split(vport->rxq_model))
930 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
931 		else
932 			num_rxq = rx_qgrp->singleq.num_rxq;
933 
934 		for (j = 0; j < num_rxq; j++) {
935 			struct idpf_rx_queue *q;
936 
937 			if (idpf_is_queue_model_split(vport->rxq_model))
938 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
939 			else
940 				q = rx_qgrp->singleq.rxqs[j];
941 
942 			err = idpf_rx_desc_alloc(vport, q);
943 			if (err) {
944 				pci_err(vport->adapter->pdev,
945 					"Memory allocation for Rx Queue %u failed\n",
946 					i);
947 				goto err_out;
948 			}
949 		}
950 
951 		if (!idpf_is_queue_model_split(vport->rxq_model))
952 			continue;
953 
954 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
955 			struct idpf_buf_queue *q;
956 
957 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
958 
959 			err = idpf_bufq_desc_alloc(vport, q);
960 			if (err) {
961 				pci_err(vport->adapter->pdev,
962 					"Memory allocation for Rx Buffer Queue %u failed\n",
963 					i);
964 				goto err_out;
965 			}
966 		}
967 	}
968 
969 	return 0;
970 
971 err_out:
972 	idpf_rx_desc_rel_all(vport);
973 
974 	return err;
975 }
976 
977 /**
978  * idpf_txq_group_rel - Release all resources for txq groups
979  * @vport: vport to release txq groups on
980  */
idpf_txq_group_rel(struct idpf_vport * vport)981 static void idpf_txq_group_rel(struct idpf_vport *vport)
982 {
983 	bool split, flow_sch_en;
984 	int i, j;
985 
986 	if (!vport->txq_grps)
987 		return;
988 
989 	split = idpf_is_queue_model_split(vport->txq_model);
990 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
991 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
992 
993 	for (i = 0; i < vport->num_txq_grp; i++) {
994 		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
995 
996 		for (j = 0; j < txq_grp->num_txq; j++) {
997 			kfree(txq_grp->txqs[j]);
998 			txq_grp->txqs[j] = NULL;
999 		}
1000 
1001 		if (!split)
1002 			continue;
1003 
1004 		kfree(txq_grp->complq);
1005 		txq_grp->complq = NULL;
1006 
1007 		if (flow_sch_en)
1008 			kfree(txq_grp->stashes);
1009 	}
1010 	kfree(vport->txq_grps);
1011 	vport->txq_grps = NULL;
1012 }
1013 
1014 /**
1015  * idpf_rxq_sw_queue_rel - Release software queue resources
1016  * @rx_qgrp: rx queue group with software queues
1017  */
idpf_rxq_sw_queue_rel(struct idpf_rxq_group * rx_qgrp)1018 static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
1019 {
1020 	int i, j;
1021 
1022 	for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
1023 		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
1024 
1025 		for (j = 0; j < bufq_set->num_refillqs; j++) {
1026 			kfree(bufq_set->refillqs[j].ring);
1027 			bufq_set->refillqs[j].ring = NULL;
1028 		}
1029 		kfree(bufq_set->refillqs);
1030 		bufq_set->refillqs = NULL;
1031 	}
1032 }
1033 
1034 /**
1035  * idpf_rxq_group_rel - Release all resources for rxq groups
1036  * @vport: vport to release rxq groups on
1037  */
idpf_rxq_group_rel(struct idpf_vport * vport)1038 static void idpf_rxq_group_rel(struct idpf_vport *vport)
1039 {
1040 	int i;
1041 
1042 	if (!vport->rxq_grps)
1043 		return;
1044 
1045 	for (i = 0; i < vport->num_rxq_grp; i++) {
1046 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1047 		u16 num_rxq;
1048 		int j;
1049 
1050 		if (idpf_is_queue_model_split(vport->rxq_model)) {
1051 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1052 			for (j = 0; j < num_rxq; j++) {
1053 				kfree(rx_qgrp->splitq.rxq_sets[j]);
1054 				rx_qgrp->splitq.rxq_sets[j] = NULL;
1055 			}
1056 
1057 			idpf_rxq_sw_queue_rel(rx_qgrp);
1058 			kfree(rx_qgrp->splitq.bufq_sets);
1059 			rx_qgrp->splitq.bufq_sets = NULL;
1060 		} else {
1061 			num_rxq = rx_qgrp->singleq.num_rxq;
1062 			for (j = 0; j < num_rxq; j++) {
1063 				kfree(rx_qgrp->singleq.rxqs[j]);
1064 				rx_qgrp->singleq.rxqs[j] = NULL;
1065 			}
1066 		}
1067 	}
1068 	kfree(vport->rxq_grps);
1069 	vport->rxq_grps = NULL;
1070 }
1071 
1072 /**
1073  * idpf_vport_queue_grp_rel_all - Release all queue groups
1074  * @vport: vport to release queue groups for
1075  */
idpf_vport_queue_grp_rel_all(struct idpf_vport * vport)1076 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
1077 {
1078 	idpf_txq_group_rel(vport);
1079 	idpf_rxq_group_rel(vport);
1080 }
1081 
1082 /**
1083  * idpf_vport_queues_rel - Free memory for all queues
1084  * @vport: virtual port
1085  *
1086  * Free the memory allocated for queues associated to a vport
1087  */
idpf_vport_queues_rel(struct idpf_vport * vport)1088 void idpf_vport_queues_rel(struct idpf_vport *vport)
1089 {
1090 	idpf_tx_desc_rel_all(vport);
1091 	idpf_rx_desc_rel_all(vport);
1092 	idpf_vport_queue_grp_rel_all(vport);
1093 
1094 	kfree(vport->txqs);
1095 	vport->txqs = NULL;
1096 }
1097 
1098 /**
1099  * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1100  * @vport: vport to init txqs on
1101  *
1102  * We get a queue index from skb->queue_mapping and we need a fast way to
1103  * dereference the queue from queue groups.  This allows us to quickly pull a
1104  * txq based on a queue index.
1105  *
1106  * Returns 0 on success, negative on failure
1107  */
idpf_vport_init_fast_path_txqs(struct idpf_vport * vport)1108 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
1109 {
1110 	int i, j, k = 0;
1111 
1112 	vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
1113 			      GFP_KERNEL);
1114 
1115 	if (!vport->txqs)
1116 		return -ENOMEM;
1117 
1118 	for (i = 0; i < vport->num_txq_grp; i++) {
1119 		struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
1120 
1121 		for (j = 0; j < tx_grp->num_txq; j++, k++) {
1122 			vport->txqs[k] = tx_grp->txqs[j];
1123 			vport->txqs[k]->idx = k;
1124 		}
1125 	}
1126 
1127 	return 0;
1128 }
1129 
1130 /**
1131  * idpf_vport_init_num_qs - Initialize number of queues
1132  * @vport: vport to initialize queues
1133  * @vport_msg: data to be filled into vport
1134  */
idpf_vport_init_num_qs(struct idpf_vport * vport,struct virtchnl2_create_vport * vport_msg)1135 void idpf_vport_init_num_qs(struct idpf_vport *vport,
1136 			    struct virtchnl2_create_vport *vport_msg)
1137 {
1138 	struct idpf_vport_user_config_data *config_data;
1139 	u16 idx = vport->idx;
1140 
1141 	config_data = &vport->adapter->vport_config[idx]->user_config;
1142 	vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1143 	vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1144 	/* number of txqs and rxqs in config data will be zeros only in the
1145 	 * driver load path and we dont update them there after
1146 	 */
1147 	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1148 		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1149 		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1150 	}
1151 
1152 	if (idpf_is_queue_model_split(vport->txq_model))
1153 		vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1154 	if (idpf_is_queue_model_split(vport->rxq_model))
1155 		vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1156 
1157 	/* Adjust number of buffer queues per Rx queue group. */
1158 	if (!idpf_is_queue_model_split(vport->rxq_model)) {
1159 		vport->num_bufqs_per_qgrp = 0;
1160 
1161 		return;
1162 	}
1163 
1164 	vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
1165 }
1166 
1167 /**
1168  * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1169  * @vport: vport to calculate q groups for
1170  */
idpf_vport_calc_num_q_desc(struct idpf_vport * vport)1171 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
1172 {
1173 	struct idpf_vport_user_config_data *config_data;
1174 	int num_bufqs = vport->num_bufqs_per_qgrp;
1175 	u32 num_req_txq_desc, num_req_rxq_desc;
1176 	u16 idx = vport->idx;
1177 	int i;
1178 
1179 	config_data =  &vport->adapter->vport_config[idx]->user_config;
1180 	num_req_txq_desc = config_data->num_req_txq_desc;
1181 	num_req_rxq_desc = config_data->num_req_rxq_desc;
1182 
1183 	vport->complq_desc_count = 0;
1184 	if (num_req_txq_desc) {
1185 		vport->txq_desc_count = num_req_txq_desc;
1186 		if (idpf_is_queue_model_split(vport->txq_model)) {
1187 			vport->complq_desc_count = num_req_txq_desc;
1188 			if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1189 				vport->complq_desc_count =
1190 					IDPF_MIN_TXQ_COMPLQ_DESC;
1191 		}
1192 	} else {
1193 		vport->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1194 		if (idpf_is_queue_model_split(vport->txq_model))
1195 			vport->complq_desc_count =
1196 				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1197 	}
1198 
1199 	if (num_req_rxq_desc)
1200 		vport->rxq_desc_count = num_req_rxq_desc;
1201 	else
1202 		vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1203 
1204 	for (i = 0; i < num_bufqs; i++) {
1205 		if (!vport->bufq_desc_count[i])
1206 			vport->bufq_desc_count[i] =
1207 				IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1208 							num_bufqs);
1209 	}
1210 }
1211 
1212 /**
1213  * idpf_vport_calc_total_qs - Calculate total number of queues
1214  * @adapter: private data struct
1215  * @vport_idx: vport idx to retrieve vport pointer
1216  * @vport_msg: message to fill with data
1217  * @max_q: vport max queue info
1218  *
1219  * Return 0 on success, error value on failure.
1220  */
idpf_vport_calc_total_qs(struct idpf_adapter * adapter,u16 vport_idx,struct virtchnl2_create_vport * vport_msg,struct idpf_vport_max_q * max_q)1221 int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1222 			     struct virtchnl2_create_vport *vport_msg,
1223 			     struct idpf_vport_max_q *max_q)
1224 {
1225 	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1226 	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1227 	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1228 	struct idpf_vport_config *vport_config;
1229 	u16 num_txq_grps, num_rxq_grps;
1230 	u32 num_qs;
1231 
1232 	vport_config = adapter->vport_config[vport_idx];
1233 	if (vport_config) {
1234 		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1235 		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1236 	} else {
1237 		int num_cpus;
1238 
1239 		/* Restrict num of queues to cpus online as a default
1240 		 * configuration to give best performance. User can always
1241 		 * override to a max number of queues via ethtool.
1242 		 */
1243 		num_cpus = num_online_cpus();
1244 
1245 		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1246 		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1247 		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1248 		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1249 	}
1250 
1251 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1252 		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1253 		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1254 						       IDPF_COMPLQ_PER_GROUP);
1255 		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1256 						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1257 	} else {
1258 		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1259 		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1260 					 dflt_singleq_txqs);
1261 		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1262 		vport_msg->num_tx_complq = 0;
1263 	}
1264 	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1265 		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1266 		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1267 						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1268 		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1269 						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1270 	} else {
1271 		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1272 		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1273 					 dflt_singleq_rxqs);
1274 		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1275 		vport_msg->num_rx_bufq = 0;
1276 	}
1277 
1278 	return 0;
1279 }
1280 
1281 /**
1282  * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1283  * @vport: vport to calculate q groups for
1284  */
idpf_vport_calc_num_q_groups(struct idpf_vport * vport)1285 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1286 {
1287 	if (idpf_is_queue_model_split(vport->txq_model))
1288 		vport->num_txq_grp = vport->num_txq;
1289 	else
1290 		vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1291 
1292 	if (idpf_is_queue_model_split(vport->rxq_model))
1293 		vport->num_rxq_grp = vport->num_rxq;
1294 	else
1295 		vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1296 }
1297 
1298 /**
1299  * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1300  * @vport: vport to calculate queues for
1301  * @num_txq: return parameter for number of TX queues
1302  * @num_rxq: return parameter for number of RX queues
1303  */
idpf_vport_calc_numq_per_grp(struct idpf_vport * vport,u16 * num_txq,u16 * num_rxq)1304 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1305 					 u16 *num_txq, u16 *num_rxq)
1306 {
1307 	if (idpf_is_queue_model_split(vport->txq_model))
1308 		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1309 	else
1310 		*num_txq = vport->num_txq;
1311 
1312 	if (idpf_is_queue_model_split(vport->rxq_model))
1313 		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1314 	else
1315 		*num_rxq = vport->num_rxq;
1316 }
1317 
1318 /**
1319  * idpf_rxq_set_descids - set the descids supported by this queue
1320  * @vport: virtual port data structure
1321  * @q: rx queue for which descids are set
1322  *
1323  */
idpf_rxq_set_descids(const struct idpf_vport * vport,struct idpf_rx_queue * q)1324 static void idpf_rxq_set_descids(const struct idpf_vport *vport,
1325 				 struct idpf_rx_queue *q)
1326 {
1327 	if (idpf_is_queue_model_split(vport->rxq_model)) {
1328 		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1329 	} else {
1330 		if (vport->base_rxd)
1331 			q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1332 		else
1333 			q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1334 	}
1335 }
1336 
1337 /**
1338  * idpf_txq_group_alloc - Allocate all txq group resources
1339  * @vport: vport to allocate txq groups for
1340  * @num_txq: number of txqs to allocate for each group
1341  *
1342  * Returns 0 on success, negative on failure
1343  */
idpf_txq_group_alloc(struct idpf_vport * vport,u16 num_txq)1344 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1345 {
1346 	bool split, flow_sch_en;
1347 	int i;
1348 
1349 	vport->txq_grps = kcalloc(vport->num_txq_grp,
1350 				  sizeof(*vport->txq_grps), GFP_KERNEL);
1351 	if (!vport->txq_grps)
1352 		return -ENOMEM;
1353 
1354 	split = idpf_is_queue_model_split(vport->txq_model);
1355 	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1356 				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1357 
1358 	for (i = 0; i < vport->num_txq_grp; i++) {
1359 		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1360 		struct idpf_adapter *adapter = vport->adapter;
1361 		struct idpf_txq_stash *stashes;
1362 		int j;
1363 
1364 		tx_qgrp->vport = vport;
1365 		tx_qgrp->num_txq = num_txq;
1366 
1367 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1368 			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1369 						   GFP_KERNEL);
1370 			if (!tx_qgrp->txqs[j])
1371 				goto err_alloc;
1372 		}
1373 
1374 		if (split && flow_sch_en) {
1375 			stashes = kcalloc(num_txq, sizeof(*stashes),
1376 					  GFP_KERNEL);
1377 			if (!stashes)
1378 				goto err_alloc;
1379 
1380 			tx_qgrp->stashes = stashes;
1381 		}
1382 
1383 		for (j = 0; j < tx_qgrp->num_txq; j++) {
1384 			struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1385 
1386 			q->dev = &adapter->pdev->dev;
1387 			q->desc_count = vport->txq_desc_count;
1388 			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1389 			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1390 			q->netdev = vport->netdev;
1391 			q->txq_grp = tx_qgrp;
1392 
1393 			if (!split) {
1394 				q->clean_budget = vport->compln_clean_budget;
1395 				idpf_queue_assign(CRC_EN, q,
1396 						  vport->crc_enable);
1397 			}
1398 
1399 			if (!flow_sch_en)
1400 				continue;
1401 
1402 			if (split) {
1403 				q->stash = &stashes[j];
1404 				hash_init(q->stash->sched_buf_hash);
1405 			}
1406 
1407 			idpf_queue_set(FLOW_SCH_EN, q);
1408 		}
1409 
1410 		if (!split)
1411 			continue;
1412 
1413 		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1414 					  sizeof(*tx_qgrp->complq),
1415 					  GFP_KERNEL);
1416 		if (!tx_qgrp->complq)
1417 			goto err_alloc;
1418 
1419 		tx_qgrp->complq->desc_count = vport->complq_desc_count;
1420 		tx_qgrp->complq->txq_grp = tx_qgrp;
1421 		tx_qgrp->complq->netdev = vport->netdev;
1422 		tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1423 
1424 		if (flow_sch_en)
1425 			idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1426 	}
1427 
1428 	return 0;
1429 
1430 err_alloc:
1431 	idpf_txq_group_rel(vport);
1432 
1433 	return -ENOMEM;
1434 }
1435 
1436 /**
1437  * idpf_rxq_group_alloc - Allocate all rxq group resources
1438  * @vport: vport to allocate rxq groups for
1439  * @num_rxq: number of rxqs to allocate for each group
1440  *
1441  * Returns 0 on success, negative on failure
1442  */
idpf_rxq_group_alloc(struct idpf_vport * vport,u16 num_rxq)1443 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1444 {
1445 	int i, k, err = 0;
1446 	bool hs;
1447 
1448 	vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1449 				  sizeof(struct idpf_rxq_group), GFP_KERNEL);
1450 	if (!vport->rxq_grps)
1451 		return -ENOMEM;
1452 
1453 	hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1454 
1455 	for (i = 0; i < vport->num_rxq_grp; i++) {
1456 		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1457 		int j;
1458 
1459 		rx_qgrp->vport = vport;
1460 		if (!idpf_is_queue_model_split(vport->rxq_model)) {
1461 			rx_qgrp->singleq.num_rxq = num_rxq;
1462 			for (j = 0; j < num_rxq; j++) {
1463 				rx_qgrp->singleq.rxqs[j] =
1464 						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1465 							GFP_KERNEL);
1466 				if (!rx_qgrp->singleq.rxqs[j]) {
1467 					err = -ENOMEM;
1468 					goto err_alloc;
1469 				}
1470 			}
1471 			goto skip_splitq_rx_init;
1472 		}
1473 		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1474 
1475 		for (j = 0; j < num_rxq; j++) {
1476 			rx_qgrp->splitq.rxq_sets[j] =
1477 				kzalloc(sizeof(struct idpf_rxq_set),
1478 					GFP_KERNEL);
1479 			if (!rx_qgrp->splitq.rxq_sets[j]) {
1480 				err = -ENOMEM;
1481 				goto err_alloc;
1482 			}
1483 		}
1484 
1485 		rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1486 						    sizeof(struct idpf_bufq_set),
1487 						    GFP_KERNEL);
1488 		if (!rx_qgrp->splitq.bufq_sets) {
1489 			err = -ENOMEM;
1490 			goto err_alloc;
1491 		}
1492 
1493 		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1494 			struct idpf_bufq_set *bufq_set =
1495 				&rx_qgrp->splitq.bufq_sets[j];
1496 			int swq_size = sizeof(struct idpf_sw_queue);
1497 			struct idpf_buf_queue *q;
1498 
1499 			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1500 			q->desc_count = vport->bufq_desc_count[j];
1501 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1502 
1503 			idpf_queue_assign(HSPLIT_EN, q, hs);
1504 
1505 			bufq_set->num_refillqs = num_rxq;
1506 			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1507 						     GFP_KERNEL);
1508 			if (!bufq_set->refillqs) {
1509 				err = -ENOMEM;
1510 				goto err_alloc;
1511 			}
1512 			for (k = 0; k < bufq_set->num_refillqs; k++) {
1513 				struct idpf_sw_queue *refillq =
1514 					&bufq_set->refillqs[k];
1515 
1516 				refillq->desc_count =
1517 					vport->bufq_desc_count[j];
1518 				idpf_queue_set(GEN_CHK, refillq);
1519 				idpf_queue_set(RFL_GEN_CHK, refillq);
1520 				refillq->ring = kcalloc(refillq->desc_count,
1521 							sizeof(*refillq->ring),
1522 							GFP_KERNEL);
1523 				if (!refillq->ring) {
1524 					err = -ENOMEM;
1525 					goto err_alloc;
1526 				}
1527 			}
1528 		}
1529 
1530 skip_splitq_rx_init:
1531 		for (j = 0; j < num_rxq; j++) {
1532 			struct idpf_rx_queue *q;
1533 
1534 			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1535 				q = rx_qgrp->singleq.rxqs[j];
1536 				goto setup_rxq;
1537 			}
1538 			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1539 			rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1540 			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1541 			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1542 				rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1543 				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1544 
1545 			idpf_queue_assign(HSPLIT_EN, q, hs);
1546 
1547 setup_rxq:
1548 			q->desc_count = vport->rxq_desc_count;
1549 			q->rx_ptype_lkup = vport->rx_ptype_lkup;
1550 			q->netdev = vport->netdev;
1551 			q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1552 			q->idx = (i * num_rxq) + j;
1553 			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1554 			q->rx_max_pkt_size = vport->netdev->mtu +
1555 							LIBETH_RX_LL_LEN;
1556 			idpf_rxq_set_descids(vport, q);
1557 		}
1558 	}
1559 
1560 err_alloc:
1561 	if (err)
1562 		idpf_rxq_group_rel(vport);
1563 
1564 	return err;
1565 }
1566 
1567 /**
1568  * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1569  * @vport: vport with qgrps to allocate
1570  *
1571  * Returns 0 on success, negative on failure
1572  */
idpf_vport_queue_grp_alloc_all(struct idpf_vport * vport)1573 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1574 {
1575 	u16 num_txq, num_rxq;
1576 	int err;
1577 
1578 	idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1579 
1580 	err = idpf_txq_group_alloc(vport, num_txq);
1581 	if (err)
1582 		goto err_out;
1583 
1584 	err = idpf_rxq_group_alloc(vport, num_rxq);
1585 	if (err)
1586 		goto err_out;
1587 
1588 	return 0;
1589 
1590 err_out:
1591 	idpf_vport_queue_grp_rel_all(vport);
1592 
1593 	return err;
1594 }
1595 
1596 /**
1597  * idpf_vport_queues_alloc - Allocate memory for all queues
1598  * @vport: virtual port
1599  *
1600  * Allocate memory for queues associated with a vport.  Returns 0 on success,
1601  * negative on failure.
1602  */
idpf_vport_queues_alloc(struct idpf_vport * vport)1603 int idpf_vport_queues_alloc(struct idpf_vport *vport)
1604 {
1605 	int err;
1606 
1607 	err = idpf_vport_queue_grp_alloc_all(vport);
1608 	if (err)
1609 		goto err_out;
1610 
1611 	err = idpf_tx_desc_alloc_all(vport);
1612 	if (err)
1613 		goto err_out;
1614 
1615 	err = idpf_rx_desc_alloc_all(vport);
1616 	if (err)
1617 		goto err_out;
1618 
1619 	err = idpf_vport_init_fast_path_txqs(vport);
1620 	if (err)
1621 		goto err_out;
1622 
1623 	return 0;
1624 
1625 err_out:
1626 	idpf_vport_queues_rel(vport);
1627 
1628 	return err;
1629 }
1630 
1631 /**
1632  * idpf_tx_handle_sw_marker - Handle queue marker packet
1633  * @tx_q: tx queue to handle software marker
1634  */
idpf_tx_handle_sw_marker(struct idpf_tx_queue * tx_q)1635 static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
1636 {
1637 	struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
1638 	struct idpf_vport *vport = priv->vport;
1639 	int i;
1640 
1641 	idpf_queue_clear(SW_MARKER, tx_q);
1642 	/* Hardware must write marker packets to all queues associated with
1643 	 * completion queues. So check if all queues received marker packets
1644 	 */
1645 	for (i = 0; i < vport->num_txq; i++)
1646 		/* If we're still waiting on any other TXQ marker completions,
1647 		 * just return now since we cannot wake up the marker_wq yet.
1648 		 */
1649 		if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
1650 			return;
1651 
1652 	/* Drain complete */
1653 	set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1654 	wake_up(&vport->sw_marker_wq);
1655 }
1656 
1657 /**
1658  * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1659  * out of order completions
1660  * @txq: queue to clean
1661  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1662  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1663  * @budget: Used to determine if we are in netpoll
1664  */
idpf_tx_clean_stashed_bufs(struct idpf_tx_queue * txq,u16 compl_tag,struct libeth_sq_napi_stats * cleaned,int budget)1665 static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
1666 				       u16 compl_tag,
1667 				       struct libeth_sq_napi_stats *cleaned,
1668 				       int budget)
1669 {
1670 	struct idpf_tx_stash *stash;
1671 	struct hlist_node *tmp_buf;
1672 	struct libeth_cq_pp cp = {
1673 		.dev	= txq->dev,
1674 		.ss	= cleaned,
1675 		.napi	= budget,
1676 	};
1677 
1678 	/* Buffer completion */
1679 	hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
1680 				    hlist, compl_tag) {
1681 		if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
1682 			continue;
1683 
1684 		hash_del(&stash->hlist);
1685 		libeth_tx_complete(&stash->buf, &cp);
1686 
1687 		/* Push shadow buf back onto stack */
1688 		idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
1689 	}
1690 }
1691 
1692 /**
1693  * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1694  * later time (only relevant for flow scheduling mode)
1695  * @txq: Tx queue to clean
1696  * @tx_buf: buffer to store
1697  */
idpf_stash_flow_sch_buffers(struct idpf_tx_queue * txq,struct idpf_tx_buf * tx_buf)1698 static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
1699 				       struct idpf_tx_buf *tx_buf)
1700 {
1701 	struct idpf_tx_stash *stash;
1702 
1703 	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
1704 		return 0;
1705 
1706 	stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
1707 	if (unlikely(!stash)) {
1708 		net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
1709 				    netdev_name(txq->netdev));
1710 
1711 		return -ENOMEM;
1712 	}
1713 
1714 	/* Store buffer params in shadow buffer */
1715 	stash->buf.skb = tx_buf->skb;
1716 	stash->buf.bytes = tx_buf->bytes;
1717 	stash->buf.packets = tx_buf->packets;
1718 	stash->buf.type = tx_buf->type;
1719 	stash->buf.nr_frags = tx_buf->nr_frags;
1720 	dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
1721 	dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
1722 	idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
1723 
1724 	/* Add buffer to buf_hash table to be freed later */
1725 	hash_add(txq->stash->sched_buf_hash, &stash->hlist,
1726 		 idpf_tx_buf_compl_tag(&stash->buf));
1727 
1728 	tx_buf->type = LIBETH_SQE_EMPTY;
1729 
1730 	return 0;
1731 }
1732 
1733 #define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
1734 do {								\
1735 	if (unlikely(++(ntc) == (txq)->desc_count)) {		\
1736 		ntc = 0;					\
1737 		buf = (txq)->tx_buf;				\
1738 		desc = &(txq)->flex_tx[0];			\
1739 	} else {						\
1740 		(buf)++;					\
1741 		(desc)++;					\
1742 	}							\
1743 } while (0)
1744 
1745 /**
1746  * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1747  * @tx_q: Tx queue to clean
1748  * @end: queue index until which it should be cleaned
1749  * @napi_budget: Used to determine if we are in netpoll
1750  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1751  * @descs_only: true if queue is using flow-based scheduling and should
1752  * not clean buffers at this time
1753  *
1754  * Cleans the queue descriptor ring. If the queue is using queue-based
1755  * scheduling, the buffers will be cleaned as well. If the queue is using
1756  * flow-based scheduling, only the descriptors are cleaned at this time.
1757  * Separate packet completion events will be reported on the completion queue,
1758  * and the buffers will be cleaned separately. The stats are not updated from
1759  * this function when using flow-based scheduling.
1760  *
1761  * Furthermore, in flow scheduling mode, check to make sure there are enough
1762  * reserve buffers to stash the packet. If there are not, return early, which
1763  * will leave next_to_clean pointing to the packet that failed to be stashed.
1764  *
1765  * Return: false in the scenario above, true otherwise.
1766  */
idpf_tx_splitq_clean(struct idpf_tx_queue * tx_q,u16 end,int napi_budget,struct libeth_sq_napi_stats * cleaned,bool descs_only)1767 static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
1768 				 int napi_budget,
1769 				 struct libeth_sq_napi_stats *cleaned,
1770 				 bool descs_only)
1771 {
1772 	union idpf_tx_flex_desc *next_pending_desc = NULL;
1773 	union idpf_tx_flex_desc *tx_desc;
1774 	u32 ntc = tx_q->next_to_clean;
1775 	struct libeth_cq_pp cp = {
1776 		.dev	= tx_q->dev,
1777 		.ss	= cleaned,
1778 		.napi	= napi_budget,
1779 	};
1780 	struct idpf_tx_buf *tx_buf;
1781 	bool clean_complete = true;
1782 
1783 	tx_desc = &tx_q->flex_tx[ntc];
1784 	next_pending_desc = &tx_q->flex_tx[end];
1785 	tx_buf = &tx_q->tx_buf[ntc];
1786 
1787 	while (tx_desc != next_pending_desc) {
1788 		u32 eop_idx;
1789 
1790 		/* If this entry in the ring was used as a context descriptor,
1791 		 * it's corresponding entry in the buffer ring is reserved. We
1792 		 * can skip this descriptor since there is no buffer to clean.
1793 		 */
1794 		if (tx_buf->type <= LIBETH_SQE_CTX)
1795 			goto fetch_next_txq_desc;
1796 
1797 		if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
1798 			break;
1799 
1800 		eop_idx = tx_buf->rs_idx;
1801 
1802 		if (descs_only) {
1803 			if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
1804 				clean_complete = false;
1805 				goto tx_splitq_clean_out;
1806 			}
1807 
1808 			idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1809 
1810 			while (ntc != eop_idx) {
1811 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1812 							      tx_desc, tx_buf);
1813 				idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1814 			}
1815 		} else {
1816 			libeth_tx_complete(tx_buf, &cp);
1817 
1818 			/* unmap remaining buffers */
1819 			while (ntc != eop_idx) {
1820 				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1821 							      tx_desc, tx_buf);
1822 
1823 				/* unmap any remaining paged data */
1824 				libeth_tx_complete(tx_buf, &cp);
1825 			}
1826 		}
1827 
1828 fetch_next_txq_desc:
1829 		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1830 	}
1831 
1832 tx_splitq_clean_out:
1833 	tx_q->next_to_clean = ntc;
1834 
1835 	return clean_complete;
1836 }
1837 
1838 #define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)	\
1839 do {							\
1840 	(buf)++;					\
1841 	(ntc)++;					\
1842 	if (unlikely((ntc) == (txq)->desc_count)) {	\
1843 		buf = (txq)->tx_buf;			\
1844 		ntc = 0;				\
1845 	}						\
1846 } while (0)
1847 
1848 /**
1849  * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1850  * @txq: queue to clean
1851  * @compl_tag: completion tag of packet to clean (from completion descriptor)
1852  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1853  * @budget: Used to determine if we are in netpoll
1854  *
1855  * Cleans all buffers associated with the input completion tag either from the
1856  * TX buffer ring or from the hash table if the buffers were previously
1857  * stashed. Returns the byte/segment count for the cleaned packet associated
1858  * this completion tag.
1859  */
idpf_tx_clean_buf_ring(struct idpf_tx_queue * txq,u16 compl_tag,struct libeth_sq_napi_stats * cleaned,int budget)1860 static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
1861 				   struct libeth_sq_napi_stats *cleaned,
1862 				   int budget)
1863 {
1864 	u16 idx = compl_tag & txq->compl_tag_bufid_m;
1865 	struct idpf_tx_buf *tx_buf = NULL;
1866 	struct libeth_cq_pp cp = {
1867 		.dev	= txq->dev,
1868 		.ss	= cleaned,
1869 		.napi	= budget,
1870 	};
1871 	u16 ntc, orig_idx = idx;
1872 
1873 	tx_buf = &txq->tx_buf[idx];
1874 
1875 	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
1876 		     idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
1877 		return false;
1878 
1879 	if (tx_buf->type == LIBETH_SQE_SKB)
1880 		libeth_tx_complete(tx_buf, &cp);
1881 
1882 	idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1883 
1884 	while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
1885 		libeth_tx_complete(tx_buf, &cp);
1886 		idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1887 	}
1888 
1889 	/*
1890 	 * It's possible the packet we just cleaned was an out of order
1891 	 * completion, which means we can stash the buffers starting from
1892 	 * the original next_to_clean and reuse the descriptors. We need
1893 	 * to compare the descriptor ring next_to_clean packet's "first" buffer
1894 	 * to the "first" buffer of the packet we just cleaned to determine if
1895 	 * this is the case. Howevever, next_to_clean can point to either a
1896 	 * reserved buffer that corresponds to a context descriptor used for the
1897 	 * next_to_clean packet (TSO packet) or the "first" buffer (single
1898 	 * packet). The orig_idx from the packet we just cleaned will always
1899 	 * point to the "first" buffer. If next_to_clean points to a reserved
1900 	 * buffer, let's bump ntc once and start the comparison from there.
1901 	 */
1902 	ntc = txq->next_to_clean;
1903 	tx_buf = &txq->tx_buf[ntc];
1904 
1905 	if (tx_buf->type == LIBETH_SQE_CTX)
1906 		idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
1907 
1908 	/*
1909 	 * If ntc still points to a different "first" buffer, clean the
1910 	 * descriptor ring and stash all of the buffers for later cleaning. If
1911 	 * we cannot stash all of the buffers, next_to_clean will point to the
1912 	 * "first" buffer of the packet that could not be stashed and cleaning
1913 	 * will start there next time.
1914 	 */
1915 	if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
1916 		     !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
1917 					   true)))
1918 		return true;
1919 
1920 	/*
1921 	 * Otherwise, update next_to_clean to reflect the cleaning that was
1922 	 * done above.
1923 	 */
1924 	txq->next_to_clean = idx;
1925 
1926 	return true;
1927 }
1928 
1929 /**
1930  * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1931  * whether on the buffer ring or in the hash table
1932  * @txq: Tx ring to clean
1933  * @desc: pointer to completion queue descriptor to extract completion
1934  * information from
1935  * @cleaned: pointer to stats struct to track cleaned packets/bytes
1936  * @budget: Used to determine if we are in netpoll
1937  *
1938  * Returns bytes/packets cleaned
1939  */
idpf_tx_handle_rs_completion(struct idpf_tx_queue * txq,struct idpf_splitq_tx_compl_desc * desc,struct libeth_sq_napi_stats * cleaned,int budget)1940 static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
1941 					 struct idpf_splitq_tx_compl_desc *desc,
1942 					 struct libeth_sq_napi_stats *cleaned,
1943 					 int budget)
1944 {
1945 	u16 compl_tag;
1946 
1947 	if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
1948 		u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
1949 
1950 		idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
1951 		return;
1952 	}
1953 
1954 	compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
1955 
1956 	/* If we didn't clean anything on the ring, this packet must be
1957 	 * in the hash table. Go clean it there.
1958 	 */
1959 	if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
1960 		idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
1961 }
1962 
1963 /**
1964  * idpf_tx_clean_complq - Reclaim resources on completion queue
1965  * @complq: Tx ring to clean
1966  * @budget: Used to determine if we are in netpoll
1967  * @cleaned: returns number of packets cleaned
1968  *
1969  * Returns true if there's any budget left (e.g. the clean is finished)
1970  */
idpf_tx_clean_complq(struct idpf_compl_queue * complq,int budget,int * cleaned)1971 static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
1972 				 int *cleaned)
1973 {
1974 	struct idpf_splitq_tx_compl_desc *tx_desc;
1975 	s16 ntc = complq->next_to_clean;
1976 	struct idpf_netdev_priv *np;
1977 	unsigned int complq_budget;
1978 	bool complq_ok = true;
1979 	int i;
1980 
1981 	complq_budget = complq->clean_budget;
1982 	tx_desc = &complq->comp[ntc];
1983 	ntc -= complq->desc_count;
1984 
1985 	do {
1986 		struct libeth_sq_napi_stats cleaned_stats = { };
1987 		struct idpf_tx_queue *tx_q;
1988 		int rel_tx_qid;
1989 		u16 hw_head;
1990 		u8 ctype;	/* completion type */
1991 		u16 gen;
1992 
1993 		/* if the descriptor isn't done, no work yet to do */
1994 		gen = le16_get_bits(tx_desc->qid_comptype_gen,
1995 				    IDPF_TXD_COMPLQ_GEN_M);
1996 		if (idpf_queue_has(GEN_CHK, complq) != gen)
1997 			break;
1998 
1999 		/* Find necessary info of TX queue to clean buffers */
2000 		rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
2001 					   IDPF_TXD_COMPLQ_QID_M);
2002 		if (rel_tx_qid >= complq->txq_grp->num_txq ||
2003 		    !complq->txq_grp->txqs[rel_tx_qid]) {
2004 			netdev_err(complq->netdev, "TxQ not found\n");
2005 			goto fetch_next_desc;
2006 		}
2007 		tx_q = complq->txq_grp->txqs[rel_tx_qid];
2008 
2009 		/* Determine completion type */
2010 		ctype = le16_get_bits(tx_desc->qid_comptype_gen,
2011 				      IDPF_TXD_COMPLQ_COMPL_TYPE_M);
2012 		switch (ctype) {
2013 		case IDPF_TXD_COMPLT_RE:
2014 			hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
2015 
2016 			idpf_tx_splitq_clean(tx_q, hw_head, budget,
2017 					     &cleaned_stats, true);
2018 			break;
2019 		case IDPF_TXD_COMPLT_RS:
2020 			idpf_tx_handle_rs_completion(tx_q, tx_desc,
2021 						     &cleaned_stats, budget);
2022 			break;
2023 		case IDPF_TXD_COMPLT_SW_MARKER:
2024 			idpf_tx_handle_sw_marker(tx_q);
2025 			break;
2026 		default:
2027 			netdev_err(tx_q->netdev,
2028 				   "Unknown TX completion type: %d\n", ctype);
2029 			goto fetch_next_desc;
2030 		}
2031 
2032 		u64_stats_update_begin(&tx_q->stats_sync);
2033 		u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2034 		u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2035 		tx_q->cleaned_pkts += cleaned_stats.packets;
2036 		tx_q->cleaned_bytes += cleaned_stats.bytes;
2037 		complq->num_completions++;
2038 		u64_stats_update_end(&tx_q->stats_sync);
2039 
2040 fetch_next_desc:
2041 		tx_desc++;
2042 		ntc++;
2043 		if (unlikely(!ntc)) {
2044 			ntc -= complq->desc_count;
2045 			tx_desc = &complq->comp[0];
2046 			idpf_queue_change(GEN_CHK, complq);
2047 		}
2048 
2049 		prefetch(tx_desc);
2050 
2051 		/* update budget accounting */
2052 		complq_budget--;
2053 	} while (likely(complq_budget));
2054 
2055 	/* Store the state of the complq to be used later in deciding if a
2056 	 * TXQ can be started again
2057 	 */
2058 	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
2059 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
2060 		complq_ok = false;
2061 
2062 	np = netdev_priv(complq->netdev);
2063 	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
2064 		struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2065 		struct netdev_queue *nq;
2066 		bool dont_wake;
2067 
2068 		/* We didn't clean anything on this queue, move along */
2069 		if (!tx_q->cleaned_bytes)
2070 			continue;
2071 
2072 		*cleaned += tx_q->cleaned_pkts;
2073 
2074 		/* Update BQL */
2075 		nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2076 
2077 		dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
2078 			    np->state != __IDPF_VPORT_UP ||
2079 			    !netif_carrier_ok(tx_q->netdev);
2080 		/* Check if the TXQ needs to and can be restarted */
2081 		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2082 					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2083 					   dont_wake);
2084 
2085 		/* Reset cleaned stats for the next time this queue is
2086 		 * cleaned
2087 		 */
2088 		tx_q->cleaned_bytes = 0;
2089 		tx_q->cleaned_pkts = 0;
2090 	}
2091 
2092 	ntc += complq->desc_count;
2093 	complq->next_to_clean = ntc;
2094 
2095 	return !!complq_budget;
2096 }
2097 
2098 /**
2099  * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2100  * based scheduling descriptors
2101  * @desc: descriptor to populate
2102  * @params: pointer to tx params struct
2103  * @td_cmd: command to be filled in desc
2104  * @size: size of buffer
2105  */
idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)2106 void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
2107 			      struct idpf_tx_splitq_params *params,
2108 			      u16 td_cmd, u16 size)
2109 {
2110 	desc->q.qw1.cmd_dtype =
2111 		le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
2112 	desc->q.qw1.cmd_dtype |=
2113 		le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
2114 	desc->q.qw1.buf_size = cpu_to_le16(size);
2115 	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
2116 }
2117 
2118 /**
2119  * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2120  * scheduling descriptors
2121  * @desc: descriptor to populate
2122  * @params: pointer to tx params struct
2123  * @td_cmd: command to be filled in desc
2124  * @size: size of buffer
2125  */
idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc * desc,struct idpf_tx_splitq_params * params,u16 td_cmd,u16 size)2126 void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
2127 				    struct idpf_tx_splitq_params *params,
2128 				    u16 td_cmd, u16 size)
2129 {
2130 	desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
2131 	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
2132 	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
2133 }
2134 
2135 /**
2136  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2137  * @tx_q: the queue to be checked
2138  * @descs_needed: number of descriptors required for this packet
2139  *
2140  * Returns 0 if stop is not needed
2141  */
idpf_tx_maybe_stop_splitq(struct idpf_tx_queue * tx_q,unsigned int descs_needed)2142 static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2143 				     unsigned int descs_needed)
2144 {
2145 	if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2146 		goto out;
2147 
2148 	/* If there are too many outstanding completions expected on the
2149 	 * completion queue, stop the TX queue to give the device some time to
2150 	 * catch up
2151 	 */
2152 	if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2153 		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2154 		goto splitq_stop;
2155 
2156 	/* Also check for available book keeping buffers; if we are low, stop
2157 	 * the queue to wait for more completions
2158 	 */
2159 	if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2160 		goto splitq_stop;
2161 
2162 	return 0;
2163 
2164 splitq_stop:
2165 	netif_stop_subqueue(tx_q->netdev, tx_q->idx);
2166 
2167 out:
2168 	u64_stats_update_begin(&tx_q->stats_sync);
2169 	u64_stats_inc(&tx_q->q_stats.q_busy);
2170 	u64_stats_update_end(&tx_q->stats_sync);
2171 
2172 	return -EBUSY;
2173 }
2174 
2175 /**
2176  * idpf_tx_buf_hw_update - Store the new tail value
2177  * @tx_q: queue to bump
2178  * @val: new tail index
2179  * @xmit_more: more skb's pending
2180  *
2181  * The naming here is special in that 'hw' signals that this function is about
2182  * to do a register write to update our queue status. We know this can only
2183  * mean tail here as HW should be owning head for TX.
2184  */
idpf_tx_buf_hw_update(struct idpf_tx_queue * tx_q,u32 val,bool xmit_more)2185 void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2186 			   bool xmit_more)
2187 {
2188 	struct netdev_queue *nq;
2189 
2190 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2191 	tx_q->next_to_use = val;
2192 
2193 	if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
2194 		u64_stats_update_begin(&tx_q->stats_sync);
2195 		u64_stats_inc(&tx_q->q_stats.q_busy);
2196 		u64_stats_update_end(&tx_q->stats_sync);
2197 	}
2198 
2199 	/* Force memory writes to complete before letting h/w
2200 	 * know there are new descriptors to fetch.  (Only
2201 	 * applicable for weak-ordered memory model archs,
2202 	 * such as IA-64).
2203 	 */
2204 	wmb();
2205 
2206 	/* notify HW of packet */
2207 	if (netif_xmit_stopped(nq) || !xmit_more)
2208 		writel(val, tx_q->tail);
2209 }
2210 
2211 /**
2212  * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2213  * @txq: queue to send buffer on
2214  * @skb: send buffer
2215  *
2216  * Returns number of data descriptors needed for this skb.
2217  */
idpf_tx_desc_count_required(struct idpf_tx_queue * txq,struct sk_buff * skb)2218 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
2219 					 struct sk_buff *skb)
2220 {
2221 	const struct skb_shared_info *shinfo;
2222 	unsigned int count = 0, i;
2223 
2224 	count += !!skb_headlen(skb);
2225 
2226 	if (!skb_is_nonlinear(skb))
2227 		return count;
2228 
2229 	shinfo = skb_shinfo(skb);
2230 	for (i = 0; i < shinfo->nr_frags; i++) {
2231 		unsigned int size;
2232 
2233 		size = skb_frag_size(&shinfo->frags[i]);
2234 
2235 		/* We only need to use the idpf_size_to_txd_count check if the
2236 		 * fragment is going to span multiple descriptors,
2237 		 * i.e. size >= 16K.
2238 		 */
2239 		if (size >= SZ_16K)
2240 			count += idpf_size_to_txd_count(size);
2241 		else
2242 			count++;
2243 	}
2244 
2245 	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2246 		if (__skb_linearize(skb))
2247 			return 0;
2248 
2249 		count = idpf_size_to_txd_count(skb->len);
2250 		u64_stats_update_begin(&txq->stats_sync);
2251 		u64_stats_inc(&txq->q_stats.linearize);
2252 		u64_stats_update_end(&txq->stats_sync);
2253 	}
2254 
2255 	return count;
2256 }
2257 
2258 /**
2259  * idpf_tx_dma_map_error - handle TX DMA map errors
2260  * @txq: queue to send buffer on
2261  * @skb: send buffer
2262  * @first: original first buffer info buffer for packet
2263  * @idx: starting point on ring to unwind
2264  */
idpf_tx_dma_map_error(struct idpf_tx_queue * txq,struct sk_buff * skb,struct idpf_tx_buf * first,u16 idx)2265 void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2266 			   struct idpf_tx_buf *first, u16 idx)
2267 {
2268 	struct libeth_sq_napi_stats ss = { };
2269 	struct libeth_cq_pp cp = {
2270 		.dev	= txq->dev,
2271 		.ss	= &ss,
2272 	};
2273 
2274 	u64_stats_update_begin(&txq->stats_sync);
2275 	u64_stats_inc(&txq->q_stats.dma_map_errs);
2276 	u64_stats_update_end(&txq->stats_sync);
2277 
2278 	/* clear dma mappings for failed tx_buf map */
2279 	for (;;) {
2280 		struct idpf_tx_buf *tx_buf;
2281 
2282 		tx_buf = &txq->tx_buf[idx];
2283 		libeth_tx_complete(tx_buf, &cp);
2284 		if (tx_buf == first)
2285 			break;
2286 		if (idx == 0)
2287 			idx = txq->desc_count;
2288 		idx--;
2289 	}
2290 
2291 	if (skb_is_gso(skb)) {
2292 		union idpf_tx_flex_desc *tx_desc;
2293 
2294 		/* If we failed a DMA mapping for a TSO packet, we will have
2295 		 * used one additional descriptor for a context
2296 		 * descriptor. Reset that here.
2297 		 */
2298 		tx_desc = &txq->flex_tx[idx];
2299 		memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2300 		if (idx == 0)
2301 			idx = txq->desc_count;
2302 		idx--;
2303 	}
2304 
2305 	/* Update tail in case netdev_xmit_more was previously true */
2306 	idpf_tx_buf_hw_update(txq, idx, false);
2307 }
2308 
2309 /**
2310  * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2311  * @txq: the tx ring to wrap
2312  * @ntu: ring index to bump
2313  */
idpf_tx_splitq_bump_ntu(struct idpf_tx_queue * txq,u16 ntu)2314 static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2315 {
2316 	ntu++;
2317 
2318 	if (ntu == txq->desc_count) {
2319 		ntu = 0;
2320 		txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
2321 	}
2322 
2323 	return ntu;
2324 }
2325 
2326 /**
2327  * idpf_tx_splitq_map - Build the Tx flex descriptor
2328  * @tx_q: queue to send buffer on
2329  * @params: pointer to splitq params struct
2330  * @first: first buffer info buffer to use
2331  *
2332  * This function loops over the skb data pointed to by *first
2333  * and gets a physical address for each memory location and programs
2334  * it and the length into the transmit flex descriptor.
2335  */
idpf_tx_splitq_map(struct idpf_tx_queue * tx_q,struct idpf_tx_splitq_params * params,struct idpf_tx_buf * first)2336 static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2337 			       struct idpf_tx_splitq_params *params,
2338 			       struct idpf_tx_buf *first)
2339 {
2340 	union idpf_tx_flex_desc *tx_desc;
2341 	unsigned int data_len, size;
2342 	struct idpf_tx_buf *tx_buf;
2343 	u16 i = tx_q->next_to_use;
2344 	struct netdev_queue *nq;
2345 	struct sk_buff *skb;
2346 	skb_frag_t *frag;
2347 	u16 td_cmd = 0;
2348 	dma_addr_t dma;
2349 
2350 	skb = first->skb;
2351 
2352 	td_cmd = params->offload.td_cmd;
2353 
2354 	data_len = skb->data_len;
2355 	size = skb_headlen(skb);
2356 
2357 	tx_desc = &tx_q->flex_tx[i];
2358 
2359 	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2360 
2361 	tx_buf = first;
2362 	first->nr_frags = 0;
2363 
2364 	params->compl_tag =
2365 		(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2366 
2367 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2368 		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2369 
2370 		if (dma_mapping_error(tx_q->dev, dma))
2371 			return idpf_tx_dma_map_error(tx_q, skb, first, i);
2372 
2373 		first->nr_frags++;
2374 		idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
2375 		tx_buf->type = LIBETH_SQE_FRAG;
2376 
2377 		/* record length, and DMA address */
2378 		dma_unmap_len_set(tx_buf, len, size);
2379 		dma_unmap_addr_set(tx_buf, dma, dma);
2380 
2381 		/* buf_addr is in same location for both desc types */
2382 		tx_desc->q.buf_addr = cpu_to_le64(dma);
2383 
2384 		/* The stack can send us fragments that are too large for a
2385 		 * single descriptor i.e. frag size > 16K-1. We will need to
2386 		 * split the fragment across multiple descriptors in this case.
2387 		 * To adhere to HW alignment restrictions, the fragment needs
2388 		 * to be split such that the first chunk ends on a 4K boundary
2389 		 * and all subsequent chunks start on a 4K boundary. We still
2390 		 * want to send as much data as possible though, so our
2391 		 * intermediate descriptor chunk size will be 12K.
2392 		 *
2393 		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2394 		 * ------------------------------------------------------------
2395 		 * |                    frag_size = 32K                       |
2396 		 * ------------------------------------------------------------
2397 		 * |2600		  |16384	    |28672
2398 		 *
2399 		 * 3 descriptors will be used for this fragment. The HW expects
2400 		 * the descriptors to contain the following:
2401 		 * ------------------------------------------------------------
2402 		 * | size = 13784         | size = 12K      | size = 6696     |
2403 		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2404 		 * ------------------------------------------------------------
2405 		 *
2406 		 * We need to first adjust the max_data for the first chunk so
2407 		 * that it ends on a 4K boundary. By negating the value of the
2408 		 * DMA address and taking only the low order bits, we're
2409 		 * effectively calculating
2410 		 *	4K - (DMA addr lower order bits) =
2411 		 *				bytes to next boundary.
2412 		 *
2413 		 * Add that to our base aligned max_data (12K) and we have
2414 		 * our first chunk size. In the example above,
2415 		 *	13784 = 12K + (4096-2600)
2416 		 *
2417 		 * After guaranteeing the first chunk ends on a 4K boundary, we
2418 		 * will give the intermediate descriptors 12K chunks and
2419 		 * whatever is left to the final descriptor. This ensures that
2420 		 * all descriptors used for the remaining chunks of the
2421 		 * fragment start on a 4K boundary and we use as few
2422 		 * descriptors as possible.
2423 		 */
2424 		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2425 		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2426 			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2427 						  max_data);
2428 
2429 			if (unlikely(++i == tx_q->desc_count)) {
2430 				tx_buf = tx_q->tx_buf;
2431 				tx_desc = &tx_q->flex_tx[0];
2432 				i = 0;
2433 				tx_q->compl_tag_cur_gen =
2434 					IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2435 			} else {
2436 				tx_buf++;
2437 				tx_desc++;
2438 			}
2439 
2440 			/* Since this packet has a buffer that is going to span
2441 			 * multiple descriptors, it's going to leave holes in
2442 			 * to the TX buffer ring. To ensure these holes do not
2443 			 * cause issues in the cleaning routines, we will clear
2444 			 * them of any stale data and assign them the same
2445 			 * completion tag as the current packet. Then when the
2446 			 * packet is being cleaned, the cleaning routines will
2447 			 * simply pass over these holes and finish cleaning the
2448 			 * rest of the packet.
2449 			 */
2450 			tx_buf->type = LIBETH_SQE_EMPTY;
2451 			idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
2452 
2453 			/* Adjust the DMA offset and the remaining size of the
2454 			 * fragment.  On the first iteration of this loop,
2455 			 * max_data will be >= 12K and <= 16K-1.  On any
2456 			 * subsequent iteration of this loop, max_data will
2457 			 * always be 12K.
2458 			 */
2459 			dma += max_data;
2460 			size -= max_data;
2461 
2462 			/* Reset max_data since remaining chunks will be 12K
2463 			 * at most
2464 			 */
2465 			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2466 
2467 			/* buf_addr is in same location for both desc types */
2468 			tx_desc->q.buf_addr = cpu_to_le64(dma);
2469 		}
2470 
2471 		if (!data_len)
2472 			break;
2473 
2474 		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2475 
2476 		if (unlikely(++i == tx_q->desc_count)) {
2477 			tx_buf = tx_q->tx_buf;
2478 			tx_desc = &tx_q->flex_tx[0];
2479 			i = 0;
2480 			tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2481 		} else {
2482 			tx_buf++;
2483 			tx_desc++;
2484 		}
2485 
2486 		size = skb_frag_size(frag);
2487 		data_len -= size;
2488 
2489 		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2490 				       DMA_TO_DEVICE);
2491 	}
2492 
2493 	/* record SW timestamp if HW timestamp is not available */
2494 	skb_tx_timestamp(skb);
2495 
2496 	first->type = LIBETH_SQE_SKB;
2497 
2498 	/* write last descriptor with RS and EOP bits */
2499 	first->rs_idx = i;
2500 	td_cmd |= params->eop_cmd;
2501 	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2502 	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2503 
2504 	tx_q->txq_grp->num_completions_pending++;
2505 
2506 	/* record bytecount for BQL */
2507 	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2508 	netdev_tx_sent_queue(nq, first->bytes);
2509 
2510 	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2511 }
2512 
2513 /**
2514  * idpf_tso - computes mss and TSO length to prepare for TSO
2515  * @skb: pointer to skb
2516  * @off: pointer to struct that holds offload parameters
2517  *
2518  * Returns error (negative) if TSO was requested but cannot be applied to the
2519  * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2520  */
idpf_tso(struct sk_buff * skb,struct idpf_tx_offload_params * off)2521 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2522 {
2523 	const struct skb_shared_info *shinfo;
2524 	union {
2525 		struct iphdr *v4;
2526 		struct ipv6hdr *v6;
2527 		unsigned char *hdr;
2528 	} ip;
2529 	union {
2530 		struct tcphdr *tcp;
2531 		struct udphdr *udp;
2532 		unsigned char *hdr;
2533 	} l4;
2534 	u32 paylen, l4_start;
2535 	int err;
2536 
2537 	if (!skb_is_gso(skb))
2538 		return 0;
2539 
2540 	err = skb_cow_head(skb, 0);
2541 	if (err < 0)
2542 		return err;
2543 
2544 	shinfo = skb_shinfo(skb);
2545 
2546 	ip.hdr = skb_network_header(skb);
2547 	l4.hdr = skb_transport_header(skb);
2548 
2549 	/* initialize outer IP header fields */
2550 	if (ip.v4->version == 4) {
2551 		ip.v4->tot_len = 0;
2552 		ip.v4->check = 0;
2553 	} else if (ip.v6->version == 6) {
2554 		ip.v6->payload_len = 0;
2555 	}
2556 
2557 	l4_start = skb_transport_offset(skb);
2558 
2559 	/* remove payload length from checksum */
2560 	paylen = skb->len - l4_start;
2561 
2562 	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2563 	case SKB_GSO_TCPV4:
2564 	case SKB_GSO_TCPV6:
2565 		csum_replace_by_diff(&l4.tcp->check,
2566 				     (__force __wsum)htonl(paylen));
2567 		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2568 		break;
2569 	case SKB_GSO_UDP_L4:
2570 		csum_replace_by_diff(&l4.udp->check,
2571 				     (__force __wsum)htonl(paylen));
2572 		/* compute length of segmentation header */
2573 		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2574 		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2575 		break;
2576 	default:
2577 		return -EINVAL;
2578 	}
2579 
2580 	off->tso_len = skb->len - off->tso_hdr_len;
2581 	off->mss = shinfo->gso_size;
2582 	off->tso_segs = shinfo->gso_segs;
2583 
2584 	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2585 
2586 	return 1;
2587 }
2588 
2589 /**
2590  * __idpf_chk_linearize - Check skb is not using too many buffers
2591  * @skb: send buffer
2592  * @max_bufs: maximum number of buffers
2593  *
2594  * For TSO we need to count the TSO header and segment payload separately.  As
2595  * such we need to check cases where we have max_bufs-1 fragments or more as we
2596  * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2597  * for the segment payload in the first descriptor, and another max_buf-1 for
2598  * the fragments.
2599  */
__idpf_chk_linearize(struct sk_buff * skb,unsigned int max_bufs)2600 static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
2601 {
2602 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2603 	const skb_frag_t *frag, *stale;
2604 	int nr_frags, sum;
2605 
2606 	/* no need to check if number of frags is less than max_bufs - 1 */
2607 	nr_frags = shinfo->nr_frags;
2608 	if (nr_frags < (max_bufs - 1))
2609 		return false;
2610 
2611 	/* We need to walk through the list and validate that each group
2612 	 * of max_bufs-2 fragments totals at least gso_size.
2613 	 */
2614 	nr_frags -= max_bufs - 2;
2615 	frag = &shinfo->frags[0];
2616 
2617 	/* Initialize size to the negative value of gso_size minus 1.  We use
2618 	 * this as the worst case scenario in which the frag ahead of us only
2619 	 * provides one byte which is why we are limited to max_bufs-2
2620 	 * descriptors for a single transmit as the header and previous
2621 	 * fragment are already consuming 2 descriptors.
2622 	 */
2623 	sum = 1 - shinfo->gso_size;
2624 
2625 	/* Add size of frags 0 through 4 to create our initial sum */
2626 	sum += skb_frag_size(frag++);
2627 	sum += skb_frag_size(frag++);
2628 	sum += skb_frag_size(frag++);
2629 	sum += skb_frag_size(frag++);
2630 	sum += skb_frag_size(frag++);
2631 
2632 	/* Walk through fragments adding latest fragment, testing it, and
2633 	 * then removing stale fragments from the sum.
2634 	 */
2635 	for (stale = &shinfo->frags[0];; stale++) {
2636 		int stale_size = skb_frag_size(stale);
2637 
2638 		sum += skb_frag_size(frag++);
2639 
2640 		/* The stale fragment may present us with a smaller
2641 		 * descriptor than the actual fragment size. To account
2642 		 * for that we need to remove all the data on the front and
2643 		 * figure out what the remainder would be in the last
2644 		 * descriptor associated with the fragment.
2645 		 */
2646 		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2647 			int align_pad = -(skb_frag_off(stale)) &
2648 					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2649 
2650 			sum -= align_pad;
2651 			stale_size -= align_pad;
2652 
2653 			do {
2654 				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2655 				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2656 			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2657 		}
2658 
2659 		/* if sum is negative we failed to make sufficient progress */
2660 		if (sum < 0)
2661 			return true;
2662 
2663 		if (!nr_frags--)
2664 			break;
2665 
2666 		sum -= stale_size;
2667 	}
2668 
2669 	return false;
2670 }
2671 
2672 /**
2673  * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2674  * @skb: send buffer
2675  * @max_bufs: maximum scatter gather buffers for single packet
2676  * @count: number of buffers this packet needs
2677  *
2678  * Make sure we don't exceed maximum scatter gather buffers for a single
2679  * packet. We have to do some special checking around the boundary (max_bufs-1)
2680  * if TSO is on since we need count the TSO header and payload separately.
2681  * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
2682  * header, 1 for segment payload, and then 7 for the fragments.
2683  */
idpf_chk_linearize(struct sk_buff * skb,unsigned int max_bufs,unsigned int count)2684 static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
2685 			       unsigned int count)
2686 {
2687 	if (likely(count < max_bufs))
2688 		return false;
2689 	if (skb_is_gso(skb))
2690 		return __idpf_chk_linearize(skb, max_bufs);
2691 
2692 	return count > max_bufs;
2693 }
2694 
2695 /**
2696  * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2697  * @txq: queue to put context descriptor on
2698  *
2699  * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2700  * ring entry to reflect that this index is a context descriptor
2701  */
2702 static struct idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue * txq)2703 idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2704 {
2705 	struct idpf_flex_tx_ctx_desc *desc;
2706 	int i = txq->next_to_use;
2707 
2708 	txq->tx_buf[i].type = LIBETH_SQE_CTX;
2709 
2710 	/* grab the next descriptor */
2711 	desc = &txq->flex_ctx[i];
2712 	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2713 
2714 	return desc;
2715 }
2716 
2717 /**
2718  * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2719  * @tx_q: queue to send buffer on
2720  * @skb: pointer to skb
2721  */
idpf_tx_drop_skb(struct idpf_tx_queue * tx_q,struct sk_buff * skb)2722 netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2723 {
2724 	u64_stats_update_begin(&tx_q->stats_sync);
2725 	u64_stats_inc(&tx_q->q_stats.skb_drops);
2726 	u64_stats_update_end(&tx_q->stats_sync);
2727 
2728 	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2729 
2730 	dev_kfree_skb(skb);
2731 
2732 	return NETDEV_TX_OK;
2733 }
2734 
2735 /**
2736  * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2737  * @skb: send buffer
2738  * @tx_q: queue to send buffer on
2739  *
2740  * Returns NETDEV_TX_OK if sent, else an error code
2741  */
idpf_tx_splitq_frame(struct sk_buff * skb,struct idpf_tx_queue * tx_q)2742 static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2743 					struct idpf_tx_queue *tx_q)
2744 {
2745 	struct idpf_tx_splitq_params tx_params = { };
2746 	struct idpf_tx_buf *first;
2747 	unsigned int count;
2748 	int tso;
2749 
2750 	count = idpf_tx_desc_count_required(tx_q, skb);
2751 	if (unlikely(!count))
2752 		return idpf_tx_drop_skb(tx_q, skb);
2753 
2754 	tso = idpf_tso(skb, &tx_params.offload);
2755 	if (unlikely(tso < 0))
2756 		return idpf_tx_drop_skb(tx_q, skb);
2757 
2758 	/* Check for splitq specific TX resources */
2759 	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2760 	if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2761 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2762 
2763 		return NETDEV_TX_BUSY;
2764 	}
2765 
2766 	if (tso) {
2767 		/* If tso is needed, set up context desc */
2768 		struct idpf_flex_tx_ctx_desc *ctx_desc =
2769 			idpf_tx_splitq_get_ctx_desc(tx_q);
2770 
2771 		ctx_desc->tso.qw1.cmd_dtype =
2772 				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2773 					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2774 		ctx_desc->tso.qw0.flex_tlen =
2775 				cpu_to_le32(tx_params.offload.tso_len &
2776 					    IDPF_TXD_FLEX_CTX_TLEN_M);
2777 		ctx_desc->tso.qw0.mss_rt =
2778 				cpu_to_le16(tx_params.offload.mss &
2779 					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
2780 		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2781 
2782 		u64_stats_update_begin(&tx_q->stats_sync);
2783 		u64_stats_inc(&tx_q->q_stats.lso_pkts);
2784 		u64_stats_update_end(&tx_q->stats_sync);
2785 	}
2786 
2787 	/* record the location of the first descriptor for this packet */
2788 	first = &tx_q->tx_buf[tx_q->next_to_use];
2789 	first->skb = skb;
2790 
2791 	if (tso) {
2792 		first->packets = tx_params.offload.tso_segs;
2793 		first->bytes = skb->len +
2794 			((first->packets - 1) * tx_params.offload.tso_hdr_len);
2795 	} else {
2796 		first->packets = 1;
2797 		first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
2798 	}
2799 
2800 	if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2801 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2802 		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2803 		/* Set the RE bit to catch any packets that may have not been
2804 		 * stashed during RS completion cleaning. MIN_GAP is set to
2805 		 * MIN_RING size to ensure it will be set at least once each
2806 		 * time around the ring.
2807 		 */
2808 		if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2809 			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2810 			tx_q->txq_grp->num_completions_pending++;
2811 		}
2812 
2813 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2814 			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2815 
2816 	} else {
2817 		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2818 		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2819 
2820 		if (skb->ip_summed == CHECKSUM_PARTIAL)
2821 			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2822 	}
2823 
2824 	idpf_tx_splitq_map(tx_q, &tx_params, first);
2825 
2826 	return NETDEV_TX_OK;
2827 }
2828 
2829 /**
2830  * idpf_tx_start - Selects the right Tx queue to send buffer
2831  * @skb: send buffer
2832  * @netdev: network interface device structure
2833  *
2834  * Returns NETDEV_TX_OK if sent, else an error code
2835  */
idpf_tx_start(struct sk_buff * skb,struct net_device * netdev)2836 netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
2837 {
2838 	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2839 	struct idpf_tx_queue *tx_q;
2840 
2841 	if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2842 		dev_kfree_skb_any(skb);
2843 
2844 		return NETDEV_TX_OK;
2845 	}
2846 
2847 	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2848 
2849 	/* hardware can't handle really short frames, hardware padding works
2850 	 * beyond this point
2851 	 */
2852 	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2853 		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2854 
2855 		return NETDEV_TX_OK;
2856 	}
2857 
2858 	if (idpf_is_queue_model_split(vport->txq_model))
2859 		return idpf_tx_splitq_frame(skb, tx_q);
2860 	else
2861 		return idpf_tx_singleq_frame(skb, tx_q);
2862 }
2863 
2864 /**
2865  * idpf_rx_hash - set the hash value in the skb
2866  * @rxq: Rx descriptor ring packet is being transacted on
2867  * @skb: pointer to current skb being populated
2868  * @rx_desc: Receive descriptor
2869  * @decoded: Decoded Rx packet type related fields
2870  */
2871 static void
idpf_rx_hash(const struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded)2872 idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
2873 	     const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2874 	     struct libeth_rx_pt decoded)
2875 {
2876 	u32 hash;
2877 
2878 	if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
2879 		return;
2880 
2881 	hash = le16_to_cpu(rx_desc->hash1) |
2882 	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2883 	       (rx_desc->hash3 << 24);
2884 
2885 	libeth_rx_pt_set_hash(skb, hash, decoded);
2886 }
2887 
2888 /**
2889  * idpf_rx_csum - Indicate in skb if checksum is good
2890  * @rxq: Rx descriptor ring packet is being transacted on
2891  * @skb: pointer to current skb being populated
2892  * @csum_bits: checksum fields extracted from the descriptor
2893  * @decoded: Decoded Rx packet type related fields
2894  *
2895  * skb->protocol must be set before this function is called
2896  */
idpf_rx_csum(struct idpf_rx_queue * rxq,struct sk_buff * skb,struct idpf_rx_csum_decoded csum_bits,struct libeth_rx_pt decoded)2897 static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2898 			 struct idpf_rx_csum_decoded csum_bits,
2899 			 struct libeth_rx_pt decoded)
2900 {
2901 	bool ipv4, ipv6;
2902 
2903 	/* check if Rx checksum is enabled */
2904 	if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
2905 		return;
2906 
2907 	/* check if HW has decoded the packet and checksum */
2908 	if (unlikely(!csum_bits.l3l4p))
2909 		return;
2910 
2911 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
2912 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
2913 
2914 	if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
2915 		goto checksum_fail;
2916 
2917 	if (unlikely(ipv6 && csum_bits.ipv6exadd))
2918 		return;
2919 
2920 	/* check for L4 errors and handle packets that were not able to be
2921 	 * checksummed
2922 	 */
2923 	if (unlikely(csum_bits.l4e))
2924 		goto checksum_fail;
2925 
2926 	if (csum_bits.raw_csum_inv ||
2927 	    decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
2928 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2929 		return;
2930 	}
2931 
2932 	skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
2933 	skb->ip_summed = CHECKSUM_COMPLETE;
2934 
2935 	return;
2936 
2937 checksum_fail:
2938 	u64_stats_update_begin(&rxq->stats_sync);
2939 	u64_stats_inc(&rxq->q_stats.hw_csum_err);
2940 	u64_stats_update_end(&rxq->stats_sync);
2941 }
2942 
2943 /**
2944  * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2945  * @rx_desc: receive descriptor
2946  *
2947  * Return: parsed checksum status.
2948  **/
2949 static struct idpf_rx_csum_decoded
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)2950 idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2951 {
2952 	struct idpf_rx_csum_decoded csum = { };
2953 	u8 qword0, qword1;
2954 
2955 	qword0 = rx_desc->status_err0_qw0;
2956 	qword1 = rx_desc->status_err0_qw1;
2957 
2958 	csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
2959 			     qword1);
2960 	csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
2961 			      qword1);
2962 	csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2963 			     qword1);
2964 	csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2965 			       qword1);
2966 	csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
2967 				   qword0);
2968 	csum.raw_csum_inv =
2969 		le16_get_bits(rx_desc->ptype_err_fflags0,
2970 			      VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
2971 	csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
2972 
2973 	return csum;
2974 }
2975 
2976 /**
2977  * idpf_rx_rsc - Set the RSC fields in the skb
2978  * @rxq : Rx descriptor ring packet is being transacted on
2979  * @skb : pointer to current skb being populated
2980  * @rx_desc: Receive descriptor
2981  * @decoded: Decoded Rx packet type related fields
2982  *
2983  * Return 0 on success and error code on failure
2984  *
2985  * Populate the skb fields with the total number of RSC segments, RSC payload
2986  * length and packet type.
2987  */
idpf_rx_rsc(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc,struct libeth_rx_pt decoded)2988 static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2989 		       const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2990 		       struct libeth_rx_pt decoded)
2991 {
2992 	u16 rsc_segments, rsc_seg_len;
2993 	bool ipv4, ipv6;
2994 	int len;
2995 
2996 	if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
2997 		     LIBETH_RX_PT_OUTER_L2))
2998 		return -EINVAL;
2999 
3000 	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
3001 	if (unlikely(!rsc_seg_len))
3002 		return -EINVAL;
3003 
3004 	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3005 	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3006 
3007 	if (unlikely(!(ipv4 ^ ipv6)))
3008 		return -EINVAL;
3009 
3010 	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
3011 	if (unlikely(rsc_segments == 1))
3012 		return 0;
3013 
3014 	NAPI_GRO_CB(skb)->count = rsc_segments;
3015 	skb_shinfo(skb)->gso_size = rsc_seg_len;
3016 
3017 	skb_reset_network_header(skb);
3018 	len = skb->len - skb_transport_offset(skb);
3019 
3020 	if (ipv4) {
3021 		struct iphdr *ipv4h = ip_hdr(skb);
3022 
3023 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3024 
3025 		/* Reset and set transport header offset in skb */
3026 		skb_set_transport_header(skb, sizeof(struct iphdr));
3027 
3028 		/* Compute the TCP pseudo header checksum*/
3029 		tcp_hdr(skb)->check =
3030 			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
3031 	} else {
3032 		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
3033 
3034 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3035 		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
3036 		tcp_hdr(skb)->check =
3037 			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
3038 	}
3039 
3040 	tcp_gro_complete(skb);
3041 
3042 	u64_stats_update_begin(&rxq->stats_sync);
3043 	u64_stats_inc(&rxq->q_stats.rsc_pkts);
3044 	u64_stats_update_end(&rxq->stats_sync);
3045 
3046 	return 0;
3047 }
3048 
3049 /**
3050  * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3051  * @rxq: Rx descriptor ring packet is being transacted on
3052  * @skb: pointer to current skb being populated
3053  * @rx_desc: Receive descriptor
3054  *
3055  * This function checks the ring, descriptor, and packet information in
3056  * order to populate the hash, checksum, protocol, and
3057  * other fields within the skb.
3058  */
3059 static int
idpf_rx_process_skb_fields(struct idpf_rx_queue * rxq,struct sk_buff * skb,const struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)3060 idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3061 			   const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3062 {
3063 	struct idpf_rx_csum_decoded csum_bits;
3064 	struct libeth_rx_pt decoded;
3065 	u16 rx_ptype;
3066 
3067 	rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3068 				 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3069 	decoded = rxq->rx_ptype_lkup[rx_ptype];
3070 
3071 	/* process RSS/hash */
3072 	idpf_rx_hash(rxq, skb, rx_desc, decoded);
3073 
3074 	skb->protocol = eth_type_trans(skb, rxq->netdev);
3075 
3076 	if (le16_get_bits(rx_desc->hdrlen_flags,
3077 			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3078 		return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3079 
3080 	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3081 	idpf_rx_csum(rxq, skb, csum_bits, decoded);
3082 
3083 	skb_record_rx_queue(skb, rxq->idx);
3084 
3085 	return 0;
3086 }
3087 
3088 /**
3089  * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
3090  * @rx_buf: buffer containing page to add
3091  * @skb: sk_buff to place the data into
3092  * @size: packet length from rx_desc
3093  *
3094  * This function will add the data contained in rx_buf->page to the skb.
3095  * It will just attach the page as a frag to the skb.
3096  * The function will then update the page offset.
3097  */
idpf_rx_add_frag(struct idpf_rx_buf * rx_buf,struct sk_buff * skb,unsigned int size)3098 void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
3099 		      unsigned int size)
3100 {
3101 	u32 hr = rx_buf->page->pp->p.offset;
3102 
3103 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
3104 			rx_buf->offset + hr, size, rx_buf->truesize);
3105 }
3106 
3107 /**
3108  * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3109  * @hdr: Rx buffer for the headers
3110  * @buf: Rx buffer for the payload
3111  * @data_len: number of bytes received to the payload buffer
3112  *
3113  * When a header buffer overflow occurs or the HW was unable do parse the
3114  * packet type to perform header split, the whole frame gets placed to the
3115  * payload buffer. We can't build a valid skb around a payload buffer when
3116  * the header split is active since it doesn't reserve any head- or tailroom.
3117  * In that case, copy either the whole frame when it's short or just the
3118  * Ethernet header to the header buffer to be able to build an skb and adjust
3119  * the data offset in the payload buffer, IOW emulate the header split.
3120  *
3121  * Return: number of bytes copied to the header buffer.
3122  */
idpf_rx_hsplit_wa(const struct libeth_fqe * hdr,struct libeth_fqe * buf,u32 data_len)3123 static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3124 			     struct libeth_fqe *buf, u32 data_len)
3125 {
3126 	u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3127 	const void *src;
3128 	void *dst;
3129 
3130 	if (!libeth_rx_sync_for_cpu(buf, copy))
3131 		return 0;
3132 
3133 	dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
3134 	src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
3135 	memcpy(dst, src, LARGEST_ALIGN(copy));
3136 
3137 	buf->offset += copy;
3138 
3139 	return copy;
3140 }
3141 
3142 /**
3143  * idpf_rx_build_skb - Allocate skb and populate it from header buffer
3144  * @buf: Rx buffer to pull data from
3145  * @size: the length of the packet
3146  *
3147  * This function allocates an skb. It then populates it with the page data from
3148  * the current receive descriptor, taking care to set up the skb correctly.
3149  */
idpf_rx_build_skb(const struct libeth_fqe * buf,u32 size)3150 struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
3151 {
3152 	u32 hr = buf->page->pp->p.offset;
3153 	struct sk_buff *skb;
3154 	void *va;
3155 
3156 	va = page_address(buf->page) + buf->offset;
3157 	prefetch(va + hr);
3158 
3159 	skb = napi_build_skb(va, buf->truesize);
3160 	if (unlikely(!skb))
3161 		return NULL;
3162 
3163 	skb_mark_for_recycle(skb);
3164 
3165 	skb_reserve(skb, hr);
3166 	__skb_put(skb, size);
3167 
3168 	return skb;
3169 }
3170 
3171 /**
3172  * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3173  * status and error fields
3174  * @stat_err_field: field from descriptor to test bits in
3175  * @stat_err_bits: value to mask
3176  *
3177  */
idpf_rx_splitq_test_staterr(const u8 stat_err_field,const u8 stat_err_bits)3178 static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3179 					const u8 stat_err_bits)
3180 {
3181 	return !!(stat_err_field & stat_err_bits);
3182 }
3183 
3184 /**
3185  * idpf_rx_splitq_is_eop - process handling of EOP buffers
3186  * @rx_desc: Rx descriptor for current buffer
3187  *
3188  * If the buffer is an EOP buffer, this function exits returning true,
3189  * otherwise return false indicating that this is in fact a non-EOP buffer.
3190  */
idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 * rx_desc)3191 static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3192 {
3193 	/* if we are the last buffer then there is nothing else to do */
3194 	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3195 						  IDPF_RXD_EOF_SPLITQ));
3196 }
3197 
3198 /**
3199  * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3200  * @rxq: Rx descriptor queue to retrieve receive buffer queue
3201  * @budget: Total limit on number of packets to process
3202  *
3203  * This function provides a "bounce buffer" approach to Rx interrupt
3204  * processing. The advantage to this is that on systems that have
3205  * expensive overhead for IOMMU access this provides a means of avoiding
3206  * it by maintaining the mapping of the page to the system.
3207  *
3208  * Returns amount of work completed
3209  */
idpf_rx_splitq_clean(struct idpf_rx_queue * rxq,int budget)3210 static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3211 {
3212 	int total_rx_bytes = 0, total_rx_pkts = 0;
3213 	struct idpf_buf_queue *rx_bufq = NULL;
3214 	struct sk_buff *skb = rxq->skb;
3215 	u16 ntc = rxq->next_to_clean;
3216 
3217 	/* Process Rx packets bounded by budget */
3218 	while (likely(total_rx_pkts < budget)) {
3219 		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3220 		struct libeth_fqe *hdr, *rx_buf = NULL;
3221 		struct idpf_sw_queue *refillq = NULL;
3222 		struct idpf_rxq_set *rxq_set = NULL;
3223 		unsigned int pkt_len = 0;
3224 		unsigned int hdr_len = 0;
3225 		u16 gen_id, buf_id = 0;
3226 		int bufq_id;
3227 		u8 rxdid;
3228 
3229 		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3230 		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
3231 
3232 		/* This memory barrier is needed to keep us from reading
3233 		 * any other fields out of the rx_desc
3234 		 */
3235 		dma_rmb();
3236 
3237 		/* if the descriptor isn't done, no work yet to do */
3238 		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3239 				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3240 
3241 		if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3242 			break;
3243 
3244 		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3245 				  rx_desc->rxdid_ucast);
3246 		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3247 			IDPF_RX_BUMP_NTC(rxq, ntc);
3248 			u64_stats_update_begin(&rxq->stats_sync);
3249 			u64_stats_inc(&rxq->q_stats.bad_descs);
3250 			u64_stats_update_end(&rxq->stats_sync);
3251 			continue;
3252 		}
3253 
3254 		pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3255 					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3256 
3257 		bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3258 					VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3259 
3260 		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3261 		refillq = rxq_set->refillq[bufq_id];
3262 
3263 		/* retrieve buffer from the rxq */
3264 		rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3265 
3266 		buf_id = le16_to_cpu(rx_desc->buf_id);
3267 
3268 		rx_buf = &rx_bufq->buf[buf_id];
3269 
3270 		if (!rx_bufq->hdr_pp)
3271 			goto payload;
3272 
3273 #define __HBO_BIT	VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3274 #define __HDR_LEN_MASK	VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3275 		if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3276 			/* If a header buffer overflow, occurs, i.e. header is
3277 			 * too large to fit in the header split buffer, HW will
3278 			 * put the entire packet, including headers, in the
3279 			 * data/payload buffer.
3280 			 */
3281 			hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3282 						__HDR_LEN_MASK);
3283 #undef __HDR_LEN_MASK
3284 #undef __HBO_BIT
3285 
3286 		hdr = &rx_bufq->hdr_buf[buf_id];
3287 
3288 		if (unlikely(!hdr_len && !skb)) {
3289 			hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3290 			pkt_len -= hdr_len;
3291 
3292 			u64_stats_update_begin(&rxq->stats_sync);
3293 			u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3294 			u64_stats_update_end(&rxq->stats_sync);
3295 		}
3296 
3297 		if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
3298 			skb = idpf_rx_build_skb(hdr, hdr_len);
3299 			if (!skb)
3300 				break;
3301 
3302 			u64_stats_update_begin(&rxq->stats_sync);
3303 			u64_stats_inc(&rxq->q_stats.hsplit_pkts);
3304 			u64_stats_update_end(&rxq->stats_sync);
3305 		}
3306 
3307 		hdr->page = NULL;
3308 
3309 payload:
3310 		if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
3311 			goto skip_data;
3312 
3313 		if (skb)
3314 			idpf_rx_add_frag(rx_buf, skb, pkt_len);
3315 		else
3316 			skb = idpf_rx_build_skb(rx_buf, pkt_len);
3317 
3318 		/* exit if we failed to retrieve a buffer */
3319 		if (!skb)
3320 			break;
3321 
3322 skip_data:
3323 		rx_buf->page = NULL;
3324 
3325 		idpf_rx_post_buf_refill(refillq, buf_id);
3326 		IDPF_RX_BUMP_NTC(rxq, ntc);
3327 
3328 		/* skip if it is non EOP desc */
3329 		if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
3330 			continue;
3331 
3332 		/* pad skb if needed (to make valid ethernet frame) */
3333 		if (eth_skb_pad(skb)) {
3334 			skb = NULL;
3335 			continue;
3336 		}
3337 
3338 		/* probably a little skewed due to removing CRC */
3339 		total_rx_bytes += skb->len;
3340 
3341 		/* protocol */
3342 		if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3343 			dev_kfree_skb_any(skb);
3344 			skb = NULL;
3345 			continue;
3346 		}
3347 
3348 		/* send completed skb up the stack */
3349 		napi_gro_receive(rxq->napi, skb);
3350 		skb = NULL;
3351 
3352 		/* update budget accounting */
3353 		total_rx_pkts++;
3354 	}
3355 
3356 	rxq->next_to_clean = ntc;
3357 
3358 	rxq->skb = skb;
3359 	u64_stats_update_begin(&rxq->stats_sync);
3360 	u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
3361 	u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
3362 	u64_stats_update_end(&rxq->stats_sync);
3363 
3364 	/* guarantee a trip back through this routine if there was a failure */
3365 	return total_rx_pkts;
3366 }
3367 
3368 /**
3369  * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3370  * @bufq: Pointer to the buffer queue
3371  * @buf_id: buffer ID
3372  * @buf_desc: Buffer queue descriptor
3373  *
3374  * Return 0 on success and negative on failure.
3375  */
idpf_rx_update_bufq_desc(struct idpf_buf_queue * bufq,u32 buf_id,struct virtchnl2_splitq_rx_buf_desc * buf_desc)3376 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3377 				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3378 {
3379 	struct libeth_fq_fp fq = {
3380 		.pp		= bufq->pp,
3381 		.fqes		= bufq->buf,
3382 		.truesize	= bufq->truesize,
3383 		.count		= bufq->desc_count,
3384 	};
3385 	dma_addr_t addr;
3386 
3387 	addr = libeth_rx_alloc(&fq, buf_id);
3388 	if (addr == DMA_MAPPING_ERROR)
3389 		return -ENOMEM;
3390 
3391 	buf_desc->pkt_addr = cpu_to_le64(addr);
3392 	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3393 
3394 	if (!idpf_queue_has(HSPLIT_EN, bufq))
3395 		return 0;
3396 
3397 	fq.pp = bufq->hdr_pp;
3398 	fq.fqes = bufq->hdr_buf;
3399 	fq.truesize = bufq->hdr_truesize;
3400 
3401 	addr = libeth_rx_alloc(&fq, buf_id);
3402 	if (addr == DMA_MAPPING_ERROR)
3403 		return -ENOMEM;
3404 
3405 	buf_desc->hdr_addr = cpu_to_le64(addr);
3406 
3407 	return 0;
3408 }
3409 
3410 /**
3411  * idpf_rx_clean_refillq - Clean refill queue buffers
3412  * @bufq: buffer queue to post buffers back to
3413  * @refillq: refill queue to clean
3414  *
3415  * This function takes care of the buffer refill management
3416  */
idpf_rx_clean_refillq(struct idpf_buf_queue * bufq,struct idpf_sw_queue * refillq)3417 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3418 				  struct idpf_sw_queue *refillq)
3419 {
3420 	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3421 	u16 bufq_nta = bufq->next_to_alloc;
3422 	u16 ntc = refillq->next_to_clean;
3423 	int cleaned = 0;
3424 
3425 	buf_desc = &bufq->split_buf[bufq_nta];
3426 
3427 	/* make sure we stop at ring wrap in the unlikely case ring is full */
3428 	while (likely(cleaned < refillq->desc_count)) {
3429 		u32 buf_id, refill_desc = refillq->ring[ntc];
3430 		bool failure;
3431 
3432 		if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3433 		    !!(refill_desc & IDPF_RX_BI_GEN_M))
3434 			break;
3435 
3436 		buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
3437 		failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3438 		if (failure)
3439 			break;
3440 
3441 		if (unlikely(++ntc == refillq->desc_count)) {
3442 			idpf_queue_change(RFL_GEN_CHK, refillq);
3443 			ntc = 0;
3444 		}
3445 
3446 		if (unlikely(++bufq_nta == bufq->desc_count)) {
3447 			buf_desc = &bufq->split_buf[0];
3448 			bufq_nta = 0;
3449 		} else {
3450 			buf_desc++;
3451 		}
3452 
3453 		cleaned++;
3454 	}
3455 
3456 	if (!cleaned)
3457 		return;
3458 
3459 	/* We want to limit how many transactions on the bus we trigger with
3460 	 * tail writes so we only do it in strides. It's also important we
3461 	 * align the write to a multiple of 8 as required by HW.
3462 	 */
3463 	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3464 	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3465 		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3466 						       IDPF_RX_BUF_POST_STRIDE));
3467 
3468 	/* update next to alloc since we have filled the ring */
3469 	refillq->next_to_clean = ntc;
3470 	bufq->next_to_alloc = bufq_nta;
3471 }
3472 
3473 /**
3474  * idpf_rx_clean_refillq_all - Clean all refill queues
3475  * @bufq: buffer queue with refill queues
3476  * @nid: ID of the closest NUMA node with memory
3477  *
3478  * Iterates through all refill queues assigned to the buffer queue assigned to
3479  * this vector.  Returns true if clean is complete within budget, false
3480  * otherwise.
3481  */
idpf_rx_clean_refillq_all(struct idpf_buf_queue * bufq,int nid)3482 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3483 {
3484 	struct idpf_bufq_set *bufq_set;
3485 	int i;
3486 
3487 	page_pool_nid_changed(bufq->pp, nid);
3488 	if (bufq->hdr_pp)
3489 		page_pool_nid_changed(bufq->hdr_pp, nid);
3490 
3491 	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3492 	for (i = 0; i < bufq_set->num_refillqs; i++)
3493 		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3494 }
3495 
3496 /**
3497  * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3498  * @irq: interrupt number
3499  * @data: pointer to a q_vector
3500  *
3501  */
idpf_vport_intr_clean_queues(int __always_unused irq,void * data)3502 static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3503 						void *data)
3504 {
3505 	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3506 
3507 	q_vector->total_events++;
3508 	napi_schedule(&q_vector->napi);
3509 
3510 	return IRQ_HANDLED;
3511 }
3512 
3513 /**
3514  * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3515  * @vport: virtual port structure
3516  *
3517  */
idpf_vport_intr_napi_del_all(struct idpf_vport * vport)3518 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3519 {
3520 	u16 v_idx;
3521 
3522 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3523 		netif_napi_del(&vport->q_vectors[v_idx].napi);
3524 }
3525 
3526 /**
3527  * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3528  * @vport: main vport structure
3529  */
idpf_vport_intr_napi_dis_all(struct idpf_vport * vport)3530 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3531 {
3532 	int v_idx;
3533 
3534 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3535 		napi_disable(&vport->q_vectors[v_idx].napi);
3536 }
3537 
3538 /**
3539  * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3540  * @vport: virtual port
3541  *
3542  * Free the memory allocated for interrupt vectors  associated to a vport
3543  */
idpf_vport_intr_rel(struct idpf_vport * vport)3544 void idpf_vport_intr_rel(struct idpf_vport *vport)
3545 {
3546 	for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
3547 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3548 
3549 		kfree(q_vector->complq);
3550 		q_vector->complq = NULL;
3551 		kfree(q_vector->bufq);
3552 		q_vector->bufq = NULL;
3553 		kfree(q_vector->tx);
3554 		q_vector->tx = NULL;
3555 		kfree(q_vector->rx);
3556 		q_vector->rx = NULL;
3557 
3558 		free_cpumask_var(q_vector->affinity_mask);
3559 	}
3560 
3561 	kfree(vport->q_vectors);
3562 	vport->q_vectors = NULL;
3563 }
3564 
3565 /**
3566  * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3567  * @vport: main vport structure
3568  */
idpf_vport_intr_rel_irq(struct idpf_vport * vport)3569 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3570 {
3571 	struct idpf_adapter *adapter = vport->adapter;
3572 	int vector;
3573 
3574 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3575 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3576 		int irq_num, vidx;
3577 
3578 		/* free only the irqs that were actually requested */
3579 		if (!q_vector)
3580 			continue;
3581 
3582 		vidx = vport->q_vector_idxs[vector];
3583 		irq_num = adapter->msix_entries[vidx].vector;
3584 
3585 		/* clear the affinity_mask in the IRQ descriptor */
3586 		irq_set_affinity_hint(irq_num, NULL);
3587 		kfree(free_irq(irq_num, q_vector));
3588 	}
3589 }
3590 
3591 /**
3592  * idpf_vport_intr_dis_irq_all - Disable all interrupt
3593  * @vport: main vport structure
3594  */
idpf_vport_intr_dis_irq_all(struct idpf_vport * vport)3595 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3596 {
3597 	struct idpf_q_vector *q_vector = vport->q_vectors;
3598 	int q_idx;
3599 
3600 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3601 		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3602 }
3603 
3604 /**
3605  * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3606  * @q_vector: pointer to q_vector
3607  */
idpf_vport_intr_buildreg_itr(struct idpf_q_vector * q_vector)3608 static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
3609 {
3610 	u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
3611 	int type = IDPF_NO_ITR_UPDATE_IDX;
3612 	u16 itr = 0;
3613 
3614 	if (q_vector->wb_on_itr) {
3615 		/*
3616 		 * Trigger a software interrupt when exiting wb_on_itr, to make
3617 		 * sure we catch any pending write backs that might have been
3618 		 * missed due to interrupt state transition.
3619 		 */
3620 		itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
3621 			   q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
3622 		type = IDPF_SW_ITR_UPDATE_IDX;
3623 		itr = IDPF_ITR_20K;
3624 	}
3625 
3626 	itr &= IDPF_ITR_MASK;
3627 	/* Don't clear PBA because that can cause lost interrupts that
3628 	 * came in while we were cleaning/polling
3629 	 */
3630 	itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3631 		   (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3632 
3633 	return itr_val;
3634 }
3635 
3636 /**
3637  * idpf_update_dim_sample - Update dim sample with packets and bytes
3638  * @q_vector: the vector associated with the interrupt
3639  * @dim_sample: dim sample to update
3640  * @dim: dim instance structure
3641  * @packets: total packets
3642  * @bytes: total bytes
3643  *
3644  * Update the dim sample with the packets and bytes which are passed to this
3645  * function. Set the dim state appropriately if the dim settings gets stale.
3646  */
idpf_update_dim_sample(struct idpf_q_vector * q_vector,struct dim_sample * dim_sample,struct dim * dim,u64 packets,u64 bytes)3647 static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3648 				   struct dim_sample *dim_sample,
3649 				   struct dim *dim, u64 packets, u64 bytes)
3650 {
3651 	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3652 	dim_sample->comp_ctr = 0;
3653 
3654 	/* if dim settings get stale, like when not updated for 1 second or
3655 	 * longer, force it to start again. This addresses the frequent case
3656 	 * of an idle queue being switched to by the scheduler.
3657 	 */
3658 	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3659 		dim->state = DIM_START_MEASURE;
3660 }
3661 
3662 /**
3663  * idpf_net_dim - Update net DIM algorithm
3664  * @q_vector: the vector associated with the interrupt
3665  *
3666  * Create a DIM sample and notify net_dim() so that it can possibly decide
3667  * a new ITR value based on incoming packets, bytes, and interrupts.
3668  *
3669  * This function is a no-op if the queue is not configured to dynamic ITR.
3670  */
idpf_net_dim(struct idpf_q_vector * q_vector)3671 static void idpf_net_dim(struct idpf_q_vector *q_vector)
3672 {
3673 	struct dim_sample dim_sample = { };
3674 	u64 packets, bytes;
3675 	u32 i;
3676 
3677 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3678 		goto check_rx_itr;
3679 
3680 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3681 		struct idpf_tx_queue *txq = q_vector->tx[i];
3682 		unsigned int start;
3683 
3684 		do {
3685 			start = u64_stats_fetch_begin(&txq->stats_sync);
3686 			packets += u64_stats_read(&txq->q_stats.packets);
3687 			bytes += u64_stats_read(&txq->q_stats.bytes);
3688 		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3689 	}
3690 
3691 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3692 			       packets, bytes);
3693 	net_dim(&q_vector->tx_dim, &dim_sample);
3694 
3695 check_rx_itr:
3696 	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3697 		return;
3698 
3699 	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3700 		struct idpf_rx_queue *rxq = q_vector->rx[i];
3701 		unsigned int start;
3702 
3703 		do {
3704 			start = u64_stats_fetch_begin(&rxq->stats_sync);
3705 			packets += u64_stats_read(&rxq->q_stats.packets);
3706 			bytes += u64_stats_read(&rxq->q_stats.bytes);
3707 		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3708 	}
3709 
3710 	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3711 			       packets, bytes);
3712 	net_dim(&q_vector->rx_dim, &dim_sample);
3713 }
3714 
3715 /**
3716  * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3717  * @q_vector: q_vector for which itr is being updated and interrupt enabled
3718  *
3719  * Update the net_dim() algorithm and re-enable the interrupt associated with
3720  * this vector.
3721  */
idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector * q_vector)3722 void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3723 {
3724 	u32 intval;
3725 
3726 	/* net_dim() updates ITR out-of-band using a work item */
3727 	idpf_net_dim(q_vector);
3728 
3729 	intval = idpf_vport_intr_buildreg_itr(q_vector);
3730 	q_vector->wb_on_itr = false;
3731 
3732 	writel(intval, q_vector->intr_reg.dyn_ctl);
3733 }
3734 
3735 /**
3736  * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3737  * @vport: main vport structure
3738  */
idpf_vport_intr_req_irq(struct idpf_vport * vport)3739 static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
3740 {
3741 	struct idpf_adapter *adapter = vport->adapter;
3742 	const char *drv_name, *if_name, *vec_name;
3743 	int vector, err, irq_num, vidx;
3744 
3745 	drv_name = dev_driver_string(&adapter->pdev->dev);
3746 	if_name = netdev_name(vport->netdev);
3747 
3748 	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3749 		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3750 		char *name;
3751 
3752 		vidx = vport->q_vector_idxs[vector];
3753 		irq_num = adapter->msix_entries[vidx].vector;
3754 
3755 		if (q_vector->num_rxq && q_vector->num_txq)
3756 			vec_name = "TxRx";
3757 		else if (q_vector->num_rxq)
3758 			vec_name = "Rx";
3759 		else if (q_vector->num_txq)
3760 			vec_name = "Tx";
3761 		else
3762 			continue;
3763 
3764 		name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
3765 				 vec_name, vidx);
3766 
3767 		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3768 				  name, q_vector);
3769 		if (err) {
3770 			netdev_err(vport->netdev,
3771 				   "Request_irq failed, error: %d\n", err);
3772 			goto free_q_irqs;
3773 		}
3774 		/* assign the mask for this irq */
3775 		irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
3776 	}
3777 
3778 	return 0;
3779 
3780 free_q_irqs:
3781 	while (--vector >= 0) {
3782 		vidx = vport->q_vector_idxs[vector];
3783 		irq_num = adapter->msix_entries[vidx].vector;
3784 		kfree(free_irq(irq_num, &vport->q_vectors[vector]));
3785 	}
3786 
3787 	return err;
3788 }
3789 
3790 /**
3791  * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3792  * @q_vector: q_vector structure
3793  * @itr: Interrupt throttling rate
3794  * @tx: Tx or Rx ITR
3795  */
idpf_vport_intr_write_itr(struct idpf_q_vector * q_vector,u16 itr,bool tx)3796 void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3797 {
3798 	struct idpf_intr_reg *intr_reg;
3799 
3800 	if (tx && !q_vector->tx)
3801 		return;
3802 	else if (!tx && !q_vector->rx)
3803 		return;
3804 
3805 	intr_reg = &q_vector->intr_reg;
3806 	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3807 	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3808 }
3809 
3810 /**
3811  * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3812  * @vport: main vport structure
3813  */
idpf_vport_intr_ena_irq_all(struct idpf_vport * vport)3814 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3815 {
3816 	bool dynamic;
3817 	int q_idx;
3818 	u16 itr;
3819 
3820 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3821 		struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3822 
3823 		/* Set the initial ITR values */
3824 		if (qv->num_txq) {
3825 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3826 			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3827 			idpf_vport_intr_write_itr(qv, dynamic ?
3828 						  itr : qv->tx_itr_value,
3829 						  true);
3830 		}
3831 
3832 		if (qv->num_rxq) {
3833 			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3834 			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3835 			idpf_vport_intr_write_itr(qv, dynamic ?
3836 						  itr : qv->rx_itr_value,
3837 						  false);
3838 		}
3839 
3840 		if (qv->num_txq || qv->num_rxq)
3841 			idpf_vport_intr_update_itr_ena_irq(qv);
3842 	}
3843 }
3844 
3845 /**
3846  * idpf_vport_intr_deinit - Release all vector associations for the vport
3847  * @vport: main vport structure
3848  */
idpf_vport_intr_deinit(struct idpf_vport * vport)3849 void idpf_vport_intr_deinit(struct idpf_vport *vport)
3850 {
3851 	idpf_vport_intr_dis_irq_all(vport);
3852 	idpf_vport_intr_napi_dis_all(vport);
3853 	idpf_vport_intr_napi_del_all(vport);
3854 	idpf_vport_intr_rel_irq(vport);
3855 }
3856 
3857 /**
3858  * idpf_tx_dim_work - Call back from the stack
3859  * @work: work queue structure
3860  */
idpf_tx_dim_work(struct work_struct * work)3861 static void idpf_tx_dim_work(struct work_struct *work)
3862 {
3863 	struct idpf_q_vector *q_vector;
3864 	struct idpf_vport *vport;
3865 	struct dim *dim;
3866 	u16 itr;
3867 
3868 	dim = container_of(work, struct dim, work);
3869 	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3870 	vport = q_vector->vport;
3871 
3872 	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3873 		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3874 
3875 	/* look up the values in our local table */
3876 	itr = vport->tx_itr_profile[dim->profile_ix];
3877 
3878 	idpf_vport_intr_write_itr(q_vector, itr, true);
3879 
3880 	dim->state = DIM_START_MEASURE;
3881 }
3882 
3883 /**
3884  * idpf_rx_dim_work - Call back from the stack
3885  * @work: work queue structure
3886  */
idpf_rx_dim_work(struct work_struct * work)3887 static void idpf_rx_dim_work(struct work_struct *work)
3888 {
3889 	struct idpf_q_vector *q_vector;
3890 	struct idpf_vport *vport;
3891 	struct dim *dim;
3892 	u16 itr;
3893 
3894 	dim = container_of(work, struct dim, work);
3895 	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3896 	vport = q_vector->vport;
3897 
3898 	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3899 		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3900 
3901 	/* look up the values in our local table */
3902 	itr = vport->rx_itr_profile[dim->profile_ix];
3903 
3904 	idpf_vport_intr_write_itr(q_vector, itr, false);
3905 
3906 	dim->state = DIM_START_MEASURE;
3907 }
3908 
3909 /**
3910  * idpf_init_dim - Set up dynamic interrupt moderation
3911  * @qv: q_vector structure
3912  */
idpf_init_dim(struct idpf_q_vector * qv)3913 static void idpf_init_dim(struct idpf_q_vector *qv)
3914 {
3915 	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3916 	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3917 	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3918 
3919 	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3920 	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3921 	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3922 }
3923 
3924 /**
3925  * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3926  * @vport: main vport structure
3927  */
idpf_vport_intr_napi_ena_all(struct idpf_vport * vport)3928 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3929 {
3930 	int q_idx;
3931 
3932 	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3933 		struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3934 
3935 		idpf_init_dim(q_vector);
3936 		napi_enable(&q_vector->napi);
3937 	}
3938 }
3939 
3940 /**
3941  * idpf_tx_splitq_clean_all- Clean completion queues
3942  * @q_vec: queue vector
3943  * @budget: Used to determine if we are in netpoll
3944  * @cleaned: returns number of packets cleaned
3945  *
3946  * Returns false if clean is not complete else returns true
3947  */
idpf_tx_splitq_clean_all(struct idpf_q_vector * q_vec,int budget,int * cleaned)3948 static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
3949 				     int budget, int *cleaned)
3950 {
3951 	u16 num_complq = q_vec->num_complq;
3952 	bool clean_complete = true;
3953 	int i, budget_per_q;
3954 
3955 	if (unlikely(!num_complq))
3956 		return true;
3957 
3958 	budget_per_q = DIV_ROUND_UP(budget, num_complq);
3959 
3960 	for (i = 0; i < num_complq; i++)
3961 		clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
3962 						       budget_per_q, cleaned);
3963 
3964 	return clean_complete;
3965 }
3966 
3967 /**
3968  * idpf_rx_splitq_clean_all- Clean completion queues
3969  * @q_vec: queue vector
3970  * @budget: Used to determine if we are in netpoll
3971  * @cleaned: returns number of packets cleaned
3972  *
3973  * Returns false if clean is not complete else returns true
3974  */
idpf_rx_splitq_clean_all(struct idpf_q_vector * q_vec,int budget,int * cleaned)3975 static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
3976 				     int *cleaned)
3977 {
3978 	u16 num_rxq = q_vec->num_rxq;
3979 	bool clean_complete = true;
3980 	int pkts_cleaned = 0;
3981 	int i, budget_per_q;
3982 	int nid;
3983 
3984 	/* We attempt to distribute budget to each Rx queue fairly, but don't
3985 	 * allow the budget to go below 1 because that would exit polling early.
3986 	 */
3987 	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
3988 	for (i = 0; i < num_rxq; i++) {
3989 		struct idpf_rx_queue *rxq = q_vec->rx[i];
3990 		int pkts_cleaned_per_q;
3991 
3992 		pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
3993 		/* if we clean as many as budgeted, we must not be done */
3994 		if (pkts_cleaned_per_q >= budget_per_q)
3995 			clean_complete = false;
3996 		pkts_cleaned += pkts_cleaned_per_q;
3997 	}
3998 	*cleaned = pkts_cleaned;
3999 
4000 	nid = numa_mem_id();
4001 
4002 	for (i = 0; i < q_vec->num_bufq; i++)
4003 		idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
4004 
4005 	return clean_complete;
4006 }
4007 
4008 /**
4009  * idpf_vport_splitq_napi_poll - NAPI handler
4010  * @napi: struct from which you get q_vector
4011  * @budget: budget provided by stack
4012  */
idpf_vport_splitq_napi_poll(struct napi_struct * napi,int budget)4013 static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
4014 {
4015 	struct idpf_q_vector *q_vector =
4016 				container_of(napi, struct idpf_q_vector, napi);
4017 	bool clean_complete;
4018 	int work_done = 0;
4019 
4020 	/* Handle case where we are called by netpoll with a budget of 0 */
4021 	if (unlikely(!budget)) {
4022 		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4023 
4024 		return 0;
4025 	}
4026 
4027 	clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
4028 	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4029 
4030 	/* If work not completed, return budget and polling will return */
4031 	if (!clean_complete) {
4032 		idpf_vport_intr_set_wb_on_itr(q_vector);
4033 		return budget;
4034 	}
4035 
4036 	work_done = min_t(int, work_done, budget - 1);
4037 
4038 	/* Exit the polling mode, but don't re-enable interrupts if stack might
4039 	 * poll us due to busy-polling
4040 	 */
4041 	if (likely(napi_complete_done(napi, work_done)))
4042 		idpf_vport_intr_update_itr_ena_irq(q_vector);
4043 	else
4044 		idpf_vport_intr_set_wb_on_itr(q_vector);
4045 
4046 	/* Switch to poll mode in the tear-down path after sending disable
4047 	 * queues virtchnl message, as the interrupts will be disabled after
4048 	 * that
4049 	 */
4050 	if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
4051 							 q_vector->tx[0])))
4052 		return budget;
4053 	else
4054 		return work_done;
4055 }
4056 
4057 /**
4058  * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4059  * @vport: virtual port
4060  *
4061  * Mapping for vectors to queues
4062  */
idpf_vport_intr_map_vector_to_qs(struct idpf_vport * vport)4063 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
4064 {
4065 	bool split = idpf_is_queue_model_split(vport->rxq_model);
4066 	u16 num_txq_grp = vport->num_txq_grp;
4067 	struct idpf_rxq_group *rx_qgrp;
4068 	struct idpf_txq_group *tx_qgrp;
4069 	u32 i, qv_idx, q_index;
4070 
4071 	for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
4072 		u16 num_rxq;
4073 
4074 		if (qv_idx >= vport->num_q_vectors)
4075 			qv_idx = 0;
4076 
4077 		rx_qgrp = &vport->rxq_grps[i];
4078 		if (split)
4079 			num_rxq = rx_qgrp->splitq.num_rxq_sets;
4080 		else
4081 			num_rxq = rx_qgrp->singleq.num_rxq;
4082 
4083 		for (u32 j = 0; j < num_rxq; j++) {
4084 			struct idpf_rx_queue *q;
4085 
4086 			if (split)
4087 				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4088 			else
4089 				q = rx_qgrp->singleq.rxqs[j];
4090 			q->q_vector = &vport->q_vectors[qv_idx];
4091 			q_index = q->q_vector->num_rxq;
4092 			q->q_vector->rx[q_index] = q;
4093 			q->q_vector->num_rxq++;
4094 
4095 			if (split)
4096 				q->napi = &q->q_vector->napi;
4097 		}
4098 
4099 		if (split) {
4100 			for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
4101 				struct idpf_buf_queue *bufq;
4102 
4103 				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4104 				bufq->q_vector = &vport->q_vectors[qv_idx];
4105 				q_index = bufq->q_vector->num_bufq;
4106 				bufq->q_vector->bufq[q_index] = bufq;
4107 				bufq->q_vector->num_bufq++;
4108 			}
4109 		}
4110 
4111 		qv_idx++;
4112 	}
4113 
4114 	split = idpf_is_queue_model_split(vport->txq_model);
4115 
4116 	for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4117 		u16 num_txq;
4118 
4119 		if (qv_idx >= vport->num_q_vectors)
4120 			qv_idx = 0;
4121 
4122 		tx_qgrp = &vport->txq_grps[i];
4123 		num_txq = tx_qgrp->num_txq;
4124 
4125 		for (u32 j = 0; j < num_txq; j++) {
4126 			struct idpf_tx_queue *q;
4127 
4128 			q = tx_qgrp->txqs[j];
4129 			q->q_vector = &vport->q_vectors[qv_idx];
4130 			q->q_vector->tx[q->q_vector->num_txq++] = q;
4131 		}
4132 
4133 		if (split) {
4134 			struct idpf_compl_queue *q = tx_qgrp->complq;
4135 
4136 			q->q_vector = &vport->q_vectors[qv_idx];
4137 			q->q_vector->complq[q->q_vector->num_complq++] = q;
4138 		}
4139 
4140 		qv_idx++;
4141 	}
4142 }
4143 
4144 /**
4145  * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4146  * @vport: virtual port
4147  *
4148  * Initialize vector indexes with values returened over mailbox
4149  */
idpf_vport_intr_init_vec_idx(struct idpf_vport * vport)4150 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4151 {
4152 	struct idpf_adapter *adapter = vport->adapter;
4153 	struct virtchnl2_alloc_vectors *ac;
4154 	u16 *vecids, total_vecs;
4155 	int i;
4156 
4157 	ac = adapter->req_vec_chunks;
4158 	if (!ac) {
4159 		for (i = 0; i < vport->num_q_vectors; i++)
4160 			vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4161 
4162 		return 0;
4163 	}
4164 
4165 	total_vecs = idpf_get_reserved_vecs(adapter);
4166 	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4167 	if (!vecids)
4168 		return -ENOMEM;
4169 
4170 	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4171 
4172 	for (i = 0; i < vport->num_q_vectors; i++)
4173 		vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4174 
4175 	kfree(vecids);
4176 
4177 	return 0;
4178 }
4179 
4180 /**
4181  * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4182  * @vport: virtual port structure
4183  */
idpf_vport_intr_napi_add_all(struct idpf_vport * vport)4184 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4185 {
4186 	int (*napi_poll)(struct napi_struct *napi, int budget);
4187 	u16 v_idx;
4188 
4189 	if (idpf_is_queue_model_split(vport->txq_model))
4190 		napi_poll = idpf_vport_splitq_napi_poll;
4191 	else
4192 		napi_poll = idpf_vport_singleq_napi_poll;
4193 
4194 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4195 		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4196 
4197 		netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
4198 
4199 		/* only set affinity_mask if the CPU is online */
4200 		if (cpu_online(v_idx))
4201 			cpumask_set_cpu(v_idx, q_vector->affinity_mask);
4202 	}
4203 }
4204 
4205 /**
4206  * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4207  * @vport: virtual port
4208  *
4209  * We allocate one q_vector per queue interrupt. If allocation fails we
4210  * return -ENOMEM.
4211  */
idpf_vport_intr_alloc(struct idpf_vport * vport)4212 int idpf_vport_intr_alloc(struct idpf_vport *vport)
4213 {
4214 	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4215 	struct idpf_q_vector *q_vector;
4216 	u32 complqs_per_vector, v_idx;
4217 
4218 	vport->q_vectors = kcalloc(vport->num_q_vectors,
4219 				   sizeof(struct idpf_q_vector), GFP_KERNEL);
4220 	if (!vport->q_vectors)
4221 		return -ENOMEM;
4222 
4223 	txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4224 				       vport->num_q_vectors);
4225 	rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
4226 				       vport->num_q_vectors);
4227 	bufqs_per_vector = vport->num_bufqs_per_qgrp *
4228 			   DIV_ROUND_UP(vport->num_rxq_grp,
4229 					vport->num_q_vectors);
4230 	complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4231 					  vport->num_q_vectors);
4232 
4233 	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4234 		q_vector = &vport->q_vectors[v_idx];
4235 		q_vector->vport = vport;
4236 
4237 		q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
4238 		q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
4239 		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4240 
4241 		q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
4242 		q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
4243 		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4244 
4245 		if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
4246 			goto error;
4247 
4248 		q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
4249 				       GFP_KERNEL);
4250 		if (!q_vector->tx)
4251 			goto error;
4252 
4253 		q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
4254 				       GFP_KERNEL);
4255 		if (!q_vector->rx)
4256 			goto error;
4257 
4258 		if (!idpf_is_queue_model_split(vport->rxq_model))
4259 			continue;
4260 
4261 		q_vector->bufq = kcalloc(bufqs_per_vector,
4262 					 sizeof(*q_vector->bufq),
4263 					 GFP_KERNEL);
4264 		if (!q_vector->bufq)
4265 			goto error;
4266 
4267 		q_vector->complq = kcalloc(complqs_per_vector,
4268 					   sizeof(*q_vector->complq),
4269 					   GFP_KERNEL);
4270 		if (!q_vector->complq)
4271 			goto error;
4272 	}
4273 
4274 	return 0;
4275 
4276 error:
4277 	idpf_vport_intr_rel(vport);
4278 
4279 	return -ENOMEM;
4280 }
4281 
4282 /**
4283  * idpf_vport_intr_init - Setup all vectors for the given vport
4284  * @vport: virtual port
4285  *
4286  * Returns 0 on success or negative on failure
4287  */
idpf_vport_intr_init(struct idpf_vport * vport)4288 int idpf_vport_intr_init(struct idpf_vport *vport)
4289 {
4290 	int err;
4291 
4292 	err = idpf_vport_intr_init_vec_idx(vport);
4293 	if (err)
4294 		return err;
4295 
4296 	idpf_vport_intr_map_vector_to_qs(vport);
4297 	idpf_vport_intr_napi_add_all(vport);
4298 
4299 	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4300 	if (err)
4301 		goto unroll_vectors_alloc;
4302 
4303 	err = idpf_vport_intr_req_irq(vport);
4304 	if (err)
4305 		goto unroll_vectors_alloc;
4306 
4307 	return 0;
4308 
4309 unroll_vectors_alloc:
4310 	idpf_vport_intr_napi_del_all(vport);
4311 
4312 	return err;
4313 }
4314 
idpf_vport_intr_ena(struct idpf_vport * vport)4315 void idpf_vport_intr_ena(struct idpf_vport *vport)
4316 {
4317 	idpf_vport_intr_napi_ena_all(vport);
4318 	idpf_vport_intr_ena_irq_all(vport);
4319 }
4320 
4321 /**
4322  * idpf_config_rss - Send virtchnl messages to configure RSS
4323  * @vport: virtual port
4324  *
4325  * Return 0 on success, negative on failure
4326  */
idpf_config_rss(struct idpf_vport * vport)4327 int idpf_config_rss(struct idpf_vport *vport)
4328 {
4329 	int err;
4330 
4331 	err = idpf_send_get_set_rss_key_msg(vport, false);
4332 	if (err)
4333 		return err;
4334 
4335 	return idpf_send_get_set_rss_lut_msg(vport, false);
4336 }
4337 
4338 /**
4339  * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4340  * @vport: virtual port structure
4341  */
idpf_fill_dflt_rss_lut(struct idpf_vport * vport)4342 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4343 {
4344 	struct idpf_adapter *adapter = vport->adapter;
4345 	u16 num_active_rxq = vport->num_rxq;
4346 	struct idpf_rss_data *rss_data;
4347 	int i;
4348 
4349 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4350 
4351 	for (i = 0; i < rss_data->rss_lut_size; i++) {
4352 		rss_data->rss_lut[i] = i % num_active_rxq;
4353 		rss_data->cached_lut[i] = rss_data->rss_lut[i];
4354 	}
4355 }
4356 
4357 /**
4358  * idpf_init_rss - Allocate and initialize RSS resources
4359  * @vport: virtual port
4360  *
4361  * Return 0 on success, negative on failure
4362  */
idpf_init_rss(struct idpf_vport * vport)4363 int idpf_init_rss(struct idpf_vport *vport)
4364 {
4365 	struct idpf_adapter *adapter = vport->adapter;
4366 	struct idpf_rss_data *rss_data;
4367 	u32 lut_size;
4368 
4369 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4370 
4371 	lut_size = rss_data->rss_lut_size * sizeof(u32);
4372 	rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4373 	if (!rss_data->rss_lut)
4374 		return -ENOMEM;
4375 
4376 	rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4377 	if (!rss_data->cached_lut) {
4378 		kfree(rss_data->rss_lut);
4379 		rss_data->rss_lut = NULL;
4380 
4381 		return -ENOMEM;
4382 	}
4383 
4384 	/* Fill the default RSS lut values */
4385 	idpf_fill_dflt_rss_lut(vport);
4386 
4387 	return idpf_config_rss(vport);
4388 }
4389 
4390 /**
4391  * idpf_deinit_rss - Release RSS resources
4392  * @vport: virtual port
4393  */
idpf_deinit_rss(struct idpf_vport * vport)4394 void idpf_deinit_rss(struct idpf_vport *vport)
4395 {
4396 	struct idpf_adapter *adapter = vport->adapter;
4397 	struct idpf_rss_data *rss_data;
4398 
4399 	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4400 	kfree(rss_data->cached_lut);
4401 	rss_data->cached_lut = NULL;
4402 	kfree(rss_data->rss_lut);
4403 	rss_data->rss_lut = NULL;
4404 }
4405