xref: /linux/drivers/net/ethernet/intel/ice/ice_txrx.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* The driver transmit and receive code */
5 
6 #include <linux/mm.h>
7 #include <linux/netdevice.h>
8 #include <linux/prefetch.h>
9 #include <linux/bpf_trace.h>
10 #include <linux/net/intel/libie/rx.h>
11 #include <net/libeth/xdp.h>
12 #include <net/dsfield.h>
13 #include <net/mpls.h>
14 #include <net/xdp.h>
15 #include "ice_txrx_lib.h"
16 #include "ice_lib.h"
17 #include "ice.h"
18 #include "ice_trace.h"
19 #include "ice_dcb_lib.h"
20 #include "ice_xsk.h"
21 #include "ice_eswitch.h"
22 
23 #define ICE_RX_HDR_SIZE		256
24 
25 #define ICE_FDIR_CLEAN_DELAY 10
26 
27 /**
28  * ice_prgm_fdir_fltr - Program a Flow Director filter
29  * @vsi: VSI to send dummy packet
30  * @fdir_desc: flow director descriptor
31  * @raw_packet: allocated buffer for flow director
32  */
33 int
ice_prgm_fdir_fltr(struct ice_vsi * vsi,struct ice_fltr_desc * fdir_desc,u8 * raw_packet)34 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
35 		   u8 *raw_packet)
36 {
37 	struct ice_tx_buf *tx_buf, *first;
38 	struct ice_fltr_desc *f_desc;
39 	struct ice_tx_desc *tx_desc;
40 	struct ice_tx_ring *tx_ring;
41 	struct device *dev;
42 	dma_addr_t dma;
43 	u32 td_cmd;
44 	u16 i;
45 
46 	/* VSI and Tx ring */
47 	if (!vsi)
48 		return -ENOENT;
49 	tx_ring = vsi->tx_rings[0];
50 	if (!tx_ring || !tx_ring->desc)
51 		return -ENOENT;
52 	dev = tx_ring->dev;
53 
54 	/* we are using two descriptors to add/del a filter and we can wait */
55 	for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
56 		if (!i)
57 			return -EAGAIN;
58 		msleep_interruptible(1);
59 	}
60 
61 	dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
62 			     DMA_TO_DEVICE);
63 
64 	if (dma_mapping_error(dev, dma))
65 		return -EINVAL;
66 
67 	/* grab the next descriptor */
68 	i = tx_ring->next_to_use;
69 	first = &tx_ring->tx_buf[i];
70 	f_desc = ICE_TX_FDIRDESC(tx_ring, i);
71 	memcpy(f_desc, fdir_desc, sizeof(*f_desc));
72 
73 	i++;
74 	i = (i < tx_ring->count) ? i : 0;
75 	tx_desc = ICE_TX_DESC(tx_ring, i);
76 	tx_buf = &tx_ring->tx_buf[i];
77 
78 	i++;
79 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
80 
81 	memset(tx_buf, 0, sizeof(*tx_buf));
82 	dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
83 	dma_unmap_addr_set(tx_buf, dma, dma);
84 
85 	tx_desc->buf_addr = cpu_to_le64(dma);
86 	td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
87 		 ICE_TX_DESC_CMD_RE;
88 
89 	tx_buf->type = ICE_TX_BUF_DUMMY;
90 	tx_buf->raw_buf = raw_packet;
91 
92 	tx_desc->cmd_type_offset_bsz =
93 		ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
94 
95 	/* Force memory write to complete before letting h/w know
96 	 * there are new descriptors to fetch.
97 	 */
98 	wmb();
99 
100 	/* mark the data descriptor to be watched */
101 	first->next_to_watch = tx_desc;
102 
103 	writel(tx_ring->next_to_use, tx_ring->tail);
104 
105 	return 0;
106 }
107 
108 /**
109  * ice_unmap_and_free_tx_buf - Release a Tx buffer
110  * @ring: the ring that owns the buffer
111  * @tx_buf: the buffer to free
112  */
113 static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring * ring,struct ice_tx_buf * tx_buf)114 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
115 {
116 	if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len))
117 		dma_unmap_page(ring->dev,
118 			       dma_unmap_addr(tx_buf, dma),
119 			       dma_unmap_len(tx_buf, len),
120 			       DMA_TO_DEVICE);
121 
122 	switch (tx_buf->type) {
123 	case ICE_TX_BUF_DUMMY:
124 		devm_kfree(ring->dev, tx_buf->raw_buf);
125 		break;
126 	case ICE_TX_BUF_SKB:
127 		dev_kfree_skb_any(tx_buf->skb);
128 		break;
129 	case ICE_TX_BUF_XDP_TX:
130 		libeth_xdp_return_va(tx_buf->raw_buf, false);
131 		break;
132 	case ICE_TX_BUF_XDP_XMIT:
133 		xdp_return_frame(tx_buf->xdpf);
134 		break;
135 	}
136 
137 	tx_buf->next_to_watch = NULL;
138 	tx_buf->type = ICE_TX_BUF_EMPTY;
139 	dma_unmap_len_set(tx_buf, len, 0);
140 	/* tx_buf must be completely set up in the transmit path */
141 }
142 
txring_txq(const struct ice_tx_ring * ring)143 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
144 {
145 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
146 }
147 
148 /**
149  * ice_clean_tstamp_ring - clean time stamp ring
150  * @tx_ring: Tx ring to clean the Time Stamp ring for
151  */
ice_clean_tstamp_ring(struct ice_tx_ring * tx_ring)152 static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring)
153 {
154 	struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
155 	u32 size;
156 
157 	if (!tstamp_ring->desc)
158 		return;
159 
160 	size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
161 		     PAGE_SIZE);
162 	memset(tstamp_ring->desc, 0, size);
163 	tstamp_ring->next_to_use = 0;
164 }
165 
166 /**
167  * ice_free_tstamp_ring - free time stamp resources per queue
168  * @tx_ring: Tx ring to free the Time Stamp ring for
169  */
ice_free_tstamp_ring(struct ice_tx_ring * tx_ring)170 void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
171 {
172 	struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
173 	u32 size;
174 
175 	if (!tstamp_ring->desc)
176 		return;
177 
178 	ice_clean_tstamp_ring(tx_ring);
179 	size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
180 		     PAGE_SIZE);
181 	dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc,
182 			   tstamp_ring->dma);
183 	tstamp_ring->desc = NULL;
184 }
185 
186 /**
187  * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
188  * @tx_ring: Tx ring to free the Time Stamp ring for
189  */
ice_free_tx_tstamp_ring(struct ice_tx_ring * tx_ring)190 void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
191 {
192 	ice_free_tstamp_ring(tx_ring);
193 	clear_bit(ICE_TX_RING_FLAGS_TXTIME, tx_ring->flags);
194 	smp_wmb();	/* order flag clear before pointer NULL */
195 	kfree_rcu(tx_ring->tstamp_ring, rcu);
196 	WRITE_ONCE(tx_ring->tstamp_ring, NULL);
197 }
198 
199 /**
200  * ice_clean_tx_ring - Free any empty Tx buffers
201  * @tx_ring: ring to be cleaned
202  */
ice_clean_tx_ring(struct ice_tx_ring * tx_ring)203 void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
204 {
205 	u32 size;
206 	u16 i;
207 
208 	if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
209 		ice_xsk_clean_xdp_ring(tx_ring);
210 		goto tx_skip_free;
211 	}
212 
213 	/* ring already cleared, nothing to do */
214 	if (!tx_ring->tx_buf)
215 		return;
216 
217 	/* Free all the Tx ring sk_buffs */
218 	for (i = 0; i < tx_ring->count; i++)
219 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
220 
221 tx_skip_free:
222 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
223 
224 	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
225 		     PAGE_SIZE);
226 	/* Zero out the descriptor ring */
227 	memset(tx_ring->desc, 0, size);
228 
229 	tx_ring->next_to_use = 0;
230 	tx_ring->next_to_clean = 0;
231 
232 	if (!tx_ring->netdev)
233 		return;
234 
235 	/* cleanup Tx queue statistics */
236 	netdev_tx_reset_queue(txring_txq(tx_ring));
237 
238 	if (ice_is_txtime_cfg(tx_ring))
239 		ice_free_tx_tstamp_ring(tx_ring);
240 }
241 
242 /**
243  * ice_free_tx_ring - Free Tx resources per queue
244  * @tx_ring: Tx descriptor ring for a specific queue
245  *
246  * Free all transmit software resources
247  */
ice_free_tx_ring(struct ice_tx_ring * tx_ring)248 void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
249 {
250 	u32 size;
251 
252 	ice_clean_tx_ring(tx_ring);
253 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
254 	tx_ring->tx_buf = NULL;
255 
256 	if (tx_ring->desc) {
257 		size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
258 			     PAGE_SIZE);
259 		dmam_free_coherent(tx_ring->dev, size,
260 				   tx_ring->desc, tx_ring->dma);
261 		tx_ring->desc = NULL;
262 	}
263 }
264 
265 /**
266  * ice_clean_tx_irq - Reclaim resources after transmit completes
267  * @tx_ring: Tx ring to clean
268  * @napi_budget: Used to determine if we are in netpoll
269  *
270  * Returns true if there's any budget left (e.g. the clean is finished)
271  */
ice_clean_tx_irq(struct ice_tx_ring * tx_ring,int napi_budget)272 static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
273 {
274 	unsigned int total_bytes = 0, total_pkts = 0;
275 	unsigned int budget = ICE_DFLT_IRQ_WORK;
276 	struct ice_vsi *vsi = tx_ring->vsi;
277 	s16 i = tx_ring->next_to_clean;
278 	struct ice_tx_desc *tx_desc;
279 	struct ice_tx_buf *tx_buf;
280 
281 	/* get the bql data ready */
282 	netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
283 
284 	tx_buf = &tx_ring->tx_buf[i];
285 	tx_desc = ICE_TX_DESC(tx_ring, i);
286 	i -= tx_ring->count;
287 
288 	prefetch(&vsi->state);
289 
290 	do {
291 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
292 
293 		/* if next_to_watch is not set then there is no work pending */
294 		if (!eop_desc)
295 			break;
296 
297 		/* follow the guidelines of other drivers */
298 		prefetchw(&tx_buf->skb->users);
299 
300 		smp_rmb();	/* prevent any other reads prior to eop_desc */
301 
302 		ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
303 		/* if the descriptor isn't done, no work yet to do */
304 		if (!(eop_desc->cmd_type_offset_bsz &
305 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
306 			break;
307 
308 		/* clear next_to_watch to prevent false hangs */
309 		tx_buf->next_to_watch = NULL;
310 
311 		/* update the statistics for this packet */
312 		total_bytes += tx_buf->bytecount;
313 		total_pkts += tx_buf->gso_segs;
314 
315 		/* free the skb */
316 		napi_consume_skb(tx_buf->skb, napi_budget);
317 
318 		/* unmap skb header data */
319 		dma_unmap_single(tx_ring->dev,
320 				 dma_unmap_addr(tx_buf, dma),
321 				 dma_unmap_len(tx_buf, len),
322 				 DMA_TO_DEVICE);
323 
324 		/* clear tx_buf data */
325 		tx_buf->type = ICE_TX_BUF_EMPTY;
326 		dma_unmap_len_set(tx_buf, len, 0);
327 
328 		/* unmap remaining buffers */
329 		while (tx_desc != eop_desc) {
330 			ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
331 			tx_buf++;
332 			tx_desc++;
333 			i++;
334 			if (unlikely(!i)) {
335 				i -= tx_ring->count;
336 				tx_buf = tx_ring->tx_buf;
337 				tx_desc = ICE_TX_DESC(tx_ring, 0);
338 			}
339 
340 			/* unmap any remaining paged data */
341 			if (dma_unmap_len(tx_buf, len)) {
342 				dma_unmap_page(tx_ring->dev,
343 					       dma_unmap_addr(tx_buf, dma),
344 					       dma_unmap_len(tx_buf, len),
345 					       DMA_TO_DEVICE);
346 				dma_unmap_len_set(tx_buf, len, 0);
347 			}
348 		}
349 		ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
350 
351 		/* move us one more past the eop_desc for start of next pkt */
352 		tx_buf++;
353 		tx_desc++;
354 		i++;
355 		if (unlikely(!i)) {
356 			i -= tx_ring->count;
357 			tx_buf = tx_ring->tx_buf;
358 			tx_desc = ICE_TX_DESC(tx_ring, 0);
359 		}
360 
361 		prefetch(tx_desc);
362 
363 		/* update budget accounting */
364 		budget--;
365 	} while (likely(budget));
366 
367 	i += tx_ring->count;
368 	tx_ring->next_to_clean = i;
369 
370 	ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
371 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
372 
373 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
374 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
375 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
376 		/* Make sure that anybody stopping the queue after this
377 		 * sees the new next_to_clean.
378 		 */
379 		smp_mb();
380 		if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
381 		    !test_bit(ICE_VSI_DOWN, vsi->state)) {
382 			netif_tx_wake_queue(txring_txq(tx_ring));
383 			ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
384 		}
385 	}
386 
387 	return !!budget;
388 }
389 
390 /**
391  * ice_alloc_tstamp_ring - allocate the Time Stamp ring
392  * @tx_ring: Tx ring to allocate the Time Stamp ring for
393  *
394  * Return: 0 on success, negative on error
395  */
ice_alloc_tstamp_ring(struct ice_tx_ring * tx_ring)396 static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
397 {
398 	struct ice_tstamp_ring *tstamp_ring;
399 
400 	/* allocate with kzalloc(), free with kfree_rcu() */
401 	tstamp_ring = kzalloc_obj(*tstamp_ring);
402 	if (!tstamp_ring)
403 		return -ENOMEM;
404 
405 	tstamp_ring->tx_ring = tx_ring;
406 	tx_ring->tstamp_ring = tstamp_ring;
407 	tstamp_ring->desc = NULL;
408 	tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
409 	set_bit(ICE_TX_RING_FLAGS_TXTIME, tx_ring->flags);
410 	return 0;
411 }
412 
413 /**
414  * ice_setup_tstamp_ring - allocate the Time Stamp ring
415  * @tx_ring: Tx ring to set up the Time Stamp ring for
416  *
417  * Return: 0 on success, negative on error
418  */
ice_setup_tstamp_ring(struct ice_tx_ring * tx_ring)419 static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
420 {
421 	struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
422 	struct device *dev = tx_ring->dev;
423 	u32 size;
424 
425 	/* round up to nearest page */
426 	size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
427 		     PAGE_SIZE);
428 	tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma,
429 						GFP_KERNEL);
430 	if (!tstamp_ring->desc) {
431 		dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n",
432 			size);
433 		return -ENOMEM;
434 	}
435 
436 	tstamp_ring->next_to_use = 0;
437 	return 0;
438 }
439 
440 /**
441  * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
442  * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for
443  *
444  * Return: 0 on success, negative on error
445  */
ice_alloc_setup_tstamp_ring(struct ice_tx_ring * tx_ring)446 int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
447 {
448 	struct device *dev = tx_ring->dev;
449 	int err;
450 
451 	err = ice_alloc_tstamp_ring(tx_ring);
452 	if (err) {
453 		dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n",
454 			tx_ring->q_index);
455 		return err;
456 	}
457 
458 	err = ice_setup_tstamp_ring(tx_ring);
459 	if (err) {
460 		dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n",
461 			tx_ring->q_index);
462 		ice_free_tx_tstamp_ring(tx_ring);
463 		return err;
464 	}
465 	return 0;
466 }
467 
468 /**
469  * ice_setup_tx_ring - Allocate the Tx descriptors
470  * @tx_ring: the Tx ring to set up
471  *
472  * Return 0 on success, negative on error
473  */
ice_setup_tx_ring(struct ice_tx_ring * tx_ring)474 int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
475 {
476 	struct device *dev = tx_ring->dev;
477 	u32 size;
478 
479 	if (!dev)
480 		return -ENOMEM;
481 
482 	/* warn if we are about to overwrite the pointer */
483 	WARN_ON(tx_ring->tx_buf);
484 	tx_ring->tx_buf =
485 		devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
486 			     GFP_KERNEL);
487 	if (!tx_ring->tx_buf)
488 		return -ENOMEM;
489 
490 	/* round up to nearest page */
491 	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
492 		     PAGE_SIZE);
493 	tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
494 					    GFP_KERNEL);
495 	if (!tx_ring->desc) {
496 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
497 			size);
498 		goto err;
499 	}
500 
501 	tx_ring->next_to_use = 0;
502 	tx_ring->next_to_clean = 0;
503 	tx_ring->ring_stats->tx.prev_pkt = -1;
504 	return 0;
505 
506 err:
507 	devm_kfree(dev, tx_ring->tx_buf);
508 	tx_ring->tx_buf = NULL;
509 	return -ENOMEM;
510 }
511 
ice_rxq_pp_destroy(struct ice_rx_ring * rq)512 void ice_rxq_pp_destroy(struct ice_rx_ring *rq)
513 {
514 	struct libeth_fq fq = {
515 		.fqes	= rq->rx_fqes,
516 		.pp	= rq->pp,
517 	};
518 
519 	libeth_rx_fq_destroy(&fq);
520 	rq->rx_fqes = NULL;
521 	rq->pp = NULL;
522 
523 	if (!rq->hdr_pp)
524 		return;
525 
526 	fq.fqes = rq->hdr_fqes;
527 	fq.pp = rq->hdr_pp;
528 
529 	libeth_rx_fq_destroy(&fq);
530 	rq->hdr_fqes = NULL;
531 	rq->hdr_pp = NULL;
532 }
533 
534 /**
535  * ice_clean_rx_ring - Free Rx buffers
536  * @rx_ring: ring to be cleaned
537  */
ice_clean_rx_ring(struct ice_rx_ring * rx_ring)538 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
539 {
540 	u32 size;
541 
542 	if (rx_ring->xsk_pool) {
543 		ice_xsk_clean_rx_ring(rx_ring);
544 		goto rx_skip_free;
545 	}
546 
547 	/* ring already cleared, nothing to do */
548 	if (!rx_ring->rx_fqes)
549 		return;
550 
551 	libeth_xdp_return_stash(&rx_ring->xdp);
552 
553 	/* Free all the Rx ring sk_buffs */
554 	for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
555 		libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem);
556 
557 		if (rx_ring->hdr_pp)
558 			libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem);
559 
560 		if (unlikely(++i == rx_ring->count))
561 			i = 0;
562 	}
563 
564 	if ((rx_ring->vsi->type == ICE_VSI_PF ||
565 	     rx_ring->vsi->type == ICE_VSI_SF ||
566 	     rx_ring->vsi->type == ICE_VSI_LB) &&
567 	    xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) {
568 		xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq);
569 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
570 	}
571 
572 	ice_rxq_pp_destroy(rx_ring);
573 
574 rx_skip_free:
575 	/* Zero out the descriptor ring */
576 	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
577 		     PAGE_SIZE);
578 	memset(rx_ring->desc, 0, size);
579 
580 	rx_ring->next_to_clean = 0;
581 	rx_ring->next_to_use = 0;
582 }
583 
584 /**
585  * ice_free_rx_ring - Free Rx resources
586  * @rx_ring: ring to clean the resources from
587  *
588  * Free all receive software resources
589  */
ice_free_rx_ring(struct ice_rx_ring * rx_ring)590 void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
591 {
592 	struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
593 	u32 size;
594 
595 	ice_clean_rx_ring(rx_ring);
596 	WRITE_ONCE(rx_ring->xdp_prog, NULL);
597 	if (rx_ring->xsk_pool) {
598 		kfree(rx_ring->xdp_buf);
599 		rx_ring->xdp_buf = NULL;
600 	}
601 
602 	if (rx_ring->desc) {
603 		size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
604 			     PAGE_SIZE);
605 		dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma);
606 		rx_ring->desc = NULL;
607 	}
608 }
609 
610 /**
611  * ice_setup_rx_ring - Allocate the Rx descriptors
612  * @rx_ring: the Rx ring to set up
613  *
614  * Return 0 on success, negative on error
615  */
ice_setup_rx_ring(struct ice_rx_ring * rx_ring)616 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
617 {
618 	struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
619 	u32 size;
620 
621 	/* round up to nearest page */
622 	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
623 		     PAGE_SIZE);
624 	rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
625 					    GFP_KERNEL);
626 	if (!rx_ring->desc) {
627 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
628 			size);
629 		return -ENOMEM;
630 	}
631 
632 	rx_ring->next_to_use = 0;
633 	rx_ring->next_to_clean = 0;
634 
635 	if (ice_is_xdp_ena_vsi(rx_ring->vsi))
636 		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
637 
638 	return 0;
639 }
640 
641 /**
642  * ice_run_xdp - Executes an XDP program on initialized xdp_buff
643  * @rx_ring: Rx ring
644  * @xdp: xdp_buff used as input to the XDP program
645  * @xdp_prog: XDP program to run
646  * @xdp_ring: ring to be used for XDP_TX action
647  * @eop_desc: Last descriptor in packet to read metadata from
648  *
649  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
650  */
651 static u32
ice_run_xdp(struct ice_rx_ring * rx_ring,struct libeth_xdp_buff * xdp,struct bpf_prog * xdp_prog,struct ice_tx_ring * xdp_ring,union ice_32b_rx_flex_desc * eop_desc)652 ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp,
653 	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
654 	    union ice_32b_rx_flex_desc *eop_desc)
655 {
656 	unsigned int ret = ICE_XDP_PASS;
657 	u32 act;
658 
659 	if (!xdp_prog)
660 		goto exit;
661 
662 	xdp->desc = eop_desc;
663 
664 	act = bpf_prog_run_xdp(xdp_prog, &xdp->base);
665 	switch (act) {
666 	case XDP_PASS:
667 		break;
668 	case XDP_TX:
669 		if (static_branch_unlikely(&ice_xdp_locking_key))
670 			spin_lock(&xdp_ring->tx_lock);
671 		ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false);
672 		if (static_branch_unlikely(&ice_xdp_locking_key))
673 			spin_unlock(&xdp_ring->tx_lock);
674 		if (ret == ICE_XDP_CONSUMED)
675 			goto out_failure;
676 		break;
677 	case XDP_REDIRECT:
678 		if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog))
679 			goto out_failure;
680 		ret = ICE_XDP_REDIR;
681 		break;
682 	default:
683 		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
684 		fallthrough;
685 	case XDP_ABORTED:
686 out_failure:
687 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
688 		fallthrough;
689 	case XDP_DROP:
690 		libeth_xdp_return_buff(xdp);
691 		ret = ICE_XDP_CONSUMED;
692 	}
693 
694 exit:
695 	return ret;
696 }
697 
698 /**
699  * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
700  * @xdpf: XDP frame that will be converted to XDP buff
701  * @xdp_ring: XDP ring for transmission
702  */
ice_xmit_xdp_ring(const struct xdp_frame * xdpf,struct ice_tx_ring * xdp_ring)703 static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
704 			     struct ice_tx_ring *xdp_ring)
705 {
706 	struct xdp_buff xdp;
707 
708 	xdp.data_hard_start = (void *)xdpf;
709 	xdp.data = xdpf->data;
710 	xdp.data_end = xdp.data + xdpf->len;
711 	xdp.frame_sz = xdpf->frame_sz;
712 	xdp.flags = xdpf->flags;
713 
714 	return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
715 }
716 
717 /**
718  * ice_xdp_xmit - submit packets to XDP ring for transmission
719  * @dev: netdev
720  * @n: number of XDP frames to be transmitted
721  * @frames: XDP frames to be transmitted
722  * @flags: transmit flags
723  *
724  * Returns number of frames successfully sent. Failed frames
725  * will be free'ed by XDP core.
726  * For error cases, a negative errno code is returned and no-frames
727  * are transmitted (caller must handle freeing frames).
728  */
729 int
ice_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)730 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
731 	     u32 flags)
732 {
733 	struct ice_netdev_priv *np = netdev_priv(dev);
734 	unsigned int queue_index = smp_processor_id();
735 	struct ice_vsi *vsi = np->vsi;
736 	struct ice_tx_ring *xdp_ring;
737 	struct ice_tx_buf *tx_buf;
738 	int nxmit = 0, i;
739 
740 	if (test_bit(ICE_VSI_DOWN, vsi->state))
741 		return -ENETDOWN;
742 
743 	if (!ice_is_xdp_ena_vsi(vsi))
744 		return -ENXIO;
745 
746 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
747 		return -EINVAL;
748 
749 	if (static_branch_unlikely(&ice_xdp_locking_key)) {
750 		queue_index %= vsi->num_xdp_txq;
751 		xdp_ring = vsi->xdp_rings[queue_index];
752 		spin_lock(&xdp_ring->tx_lock);
753 	} else {
754 		/* Generally, should not happen */
755 		if (unlikely(queue_index >= vsi->num_xdp_txq))
756 			return -ENXIO;
757 		xdp_ring = vsi->xdp_rings[queue_index];
758 	}
759 
760 	tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
761 	for (i = 0; i < n; i++) {
762 		const struct xdp_frame *xdpf = frames[i];
763 		int err;
764 
765 		err = ice_xmit_xdp_ring(xdpf, xdp_ring);
766 		if (err != ICE_XDP_TX)
767 			break;
768 		nxmit++;
769 	}
770 
771 	tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
772 	if (unlikely(flags & XDP_XMIT_FLUSH))
773 		ice_xdp_ring_update_tail(xdp_ring);
774 
775 	if (static_branch_unlikely(&ice_xdp_locking_key))
776 		spin_unlock(&xdp_ring->tx_lock);
777 
778 	return nxmit;
779 }
780 
781 /**
782  * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
783  * @rx_ring: ring to init descriptors on
784  * @count: number of descriptors to initialize
785  */
ice_init_ctrl_rx_descs(struct ice_rx_ring * rx_ring,u32 count)786 void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
787 {
788 	union ice_32b_rx_flex_desc *rx_desc;
789 	u32 ntu = rx_ring->next_to_use;
790 
791 	if (!count)
792 		return;
793 
794 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
795 
796 	do {
797 		rx_desc++;
798 		ntu++;
799 		if (unlikely(ntu == rx_ring->count)) {
800 			rx_desc = ICE_RX_DESC(rx_ring, 0);
801 			ntu = 0;
802 		}
803 
804 		rx_desc->wb.status_error0 = 0;
805 		count--;
806 	} while (count);
807 
808 	if (rx_ring->next_to_use != ntu)
809 		ice_release_rx_desc(rx_ring, ntu);
810 }
811 
812 /**
813  * ice_alloc_rx_bufs - Replace used receive buffers
814  * @rx_ring: ring to place buffers on
815  * @cleaned_count: number of buffers to replace
816  *
817  * Returns false if all allocations were successful, true if any fail. Returning
818  * true signals to the caller that we didn't replace cleaned_count buffers and
819  * there is more work to do.
820  *
821  * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
822  * buffers. Then bump tail at most one time. Grouping like this lets us avoid
823  * multiple tail writes per call.
824  */
ice_alloc_rx_bufs(struct ice_rx_ring * rx_ring,unsigned int cleaned_count)825 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
826 {
827 	const struct libeth_fq_fp hdr_fq = {
828 		.pp		= rx_ring->hdr_pp,
829 		.fqes		= rx_ring->hdr_fqes,
830 		.truesize	= rx_ring->hdr_truesize,
831 		.count		= rx_ring->count,
832 	};
833 	const struct libeth_fq_fp fq = {
834 		.pp		= rx_ring->pp,
835 		.fqes		= rx_ring->rx_fqes,
836 		.truesize	= rx_ring->truesize,
837 		.count		= rx_ring->count,
838 	};
839 	union ice_32b_rx_flex_desc *rx_desc;
840 	u16 ntu = rx_ring->next_to_use;
841 
842 	/* do nothing if no valid netdev defined */
843 	if (!rx_ring->netdev || !cleaned_count)
844 		return false;
845 
846 	/* get the Rx descriptor and buffer based on next_to_use */
847 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
848 
849 	do {
850 		dma_addr_t addr;
851 
852 		addr = libeth_rx_alloc(&fq, ntu);
853 		if (addr == DMA_MAPPING_ERROR) {
854 			ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
855 			break;
856 		}
857 
858 		/* Refresh the desc even if buffer_addrs didn't change
859 		 * because each write-back erases this info.
860 		 */
861 		rx_desc->read.pkt_addr = cpu_to_le64(addr);
862 
863 		if (!hdr_fq.pp)
864 			goto next;
865 
866 		addr = libeth_rx_alloc(&hdr_fq, ntu);
867 		if (addr == DMA_MAPPING_ERROR) {
868 			ice_stats_inc(rx_ring->ring_stats, rx_page_failed);
869 
870 			libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
871 			break;
872 		}
873 
874 		rx_desc->read.hdr_addr = cpu_to_le64(addr);
875 
876 next:
877 		rx_desc++;
878 		ntu++;
879 		if (unlikely(ntu == rx_ring->count)) {
880 			rx_desc = ICE_RX_DESC(rx_ring, 0);
881 			ntu = 0;
882 		}
883 
884 		/* clear the status bits for the next_to_use descriptor */
885 		rx_desc->wb.status_error0 = 0;
886 
887 		cleaned_count--;
888 	} while (cleaned_count);
889 
890 	if (rx_ring->next_to_use != ntu)
891 		ice_release_rx_desc(rx_ring, ntu);
892 
893 	return !!cleaned_count;
894 }
895 
896 /**
897  * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
898  * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
899  *
900  * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
901  * to set flow director rules on VFs.
902  */
ice_clean_ctrl_rx_irq(struct ice_rx_ring * rx_ring)903 void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
904 {
905 	u32 ntc = rx_ring->next_to_clean;
906 	unsigned int total_rx_pkts = 0;
907 	u32 cnt = rx_ring->count;
908 
909 	while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
910 		struct ice_vsi *ctrl_vsi = rx_ring->vsi;
911 		union ice_32b_rx_flex_desc *rx_desc;
912 		u16 stat_err_bits;
913 
914 		rx_desc = ICE_RX_DESC(rx_ring, ntc);
915 
916 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
917 		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
918 			break;
919 
920 		dma_rmb();
921 
922 		if (ctrl_vsi->vf)
923 			ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
924 
925 		if (++ntc == cnt)
926 			ntc = 0;
927 		total_rx_pkts++;
928 	}
929 
930 	rx_ring->next_to_clean = ntc;
931 	ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring));
932 }
933 
934 /**
935  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
936  * @rx_ring: Rx descriptor ring to transact packets on
937  * @budget: Total limit on number of packets to process
938  *
939  * This function provides a "bounce buffer" approach to Rx interrupt
940  * processing. The advantage to this is that on systems that have
941  * expensive overhead for IOMMU access this provides a means of avoiding
942  * it by maintaining the mapping of the page to the system.
943  *
944  * Returns amount of work completed
945  */
ice_clean_rx_irq(struct ice_rx_ring * rx_ring,int budget)946 static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
947 {
948 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
949 	struct ice_tx_ring *xdp_ring = NULL;
950 	struct bpf_prog *xdp_prog = NULL;
951 	u32 ntc = rx_ring->next_to_clean;
952 	LIBETH_XDP_ONSTACK_BUFF(xdp);
953 	u32 cached_ntu, xdp_verdict;
954 	u32 cnt = rx_ring->count;
955 	u32 xdp_xmit = 0;
956 	bool failure;
957 
958 	libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq);
959 
960 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
961 	if (xdp_prog) {
962 		xdp_ring = rx_ring->xdp_ring;
963 		cached_ntu = xdp_ring->next_to_use;
964 	}
965 
966 	/* start the loop to process Rx packets bounded by 'budget' */
967 	while (likely(total_rx_pkts < (unsigned int)budget)) {
968 		union ice_32b_rx_flex_desc *rx_desc;
969 		struct libeth_fqe *rx_buf;
970 		struct sk_buff *skb;
971 		unsigned int size;
972 		u16 stat_err_bits;
973 		u16 vlan_tci;
974 		bool rxe;
975 
976 		/* get the Rx desc from Rx ring based on 'next_to_clean' */
977 		rx_desc = ICE_RX_DESC(rx_ring, ntc);
978 
979 		/*
980 		 * The DD bit will always be zero for unused descriptors
981 		 * because it's cleared in cleanup or when setting the DMA
982 		 * address of the header buffer, which never uses the DD bit.
983 		 * If the hardware wrote the descriptor, it will be non-zero.
984 		 */
985 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
986 		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
987 			break;
988 
989 		/* This memory barrier is needed to keep us from reading
990 		 * any other fields out of the rx_desc until we know the
991 		 * DD bit is set.
992 		 */
993 		dma_rmb();
994 
995 		ice_trace(clean_rx_irq, rx_ring, rx_desc);
996 
997 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) |
998 				BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
999 		rxe = ice_test_staterr(rx_desc->wb.status_error0,
1000 				       stat_err_bits);
1001 
1002 		if (!rx_ring->hdr_pp)
1003 			goto payload;
1004 
1005 		size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1,
1006 				     ICE_RX_FLEX_DESC_HDR_LEN_M);
1007 		if (unlikely(rxe))
1008 			size = 0;
1009 
1010 		rx_buf = &rx_ring->hdr_fqes[ntc];
1011 		libeth_xdp_process_buff(xdp, rx_buf, size);
1012 		rx_buf->netmem = 0;
1013 
1014 payload:
1015 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
1016 			ICE_RX_FLX_DESC_PKT_LEN_M;
1017 		if (unlikely(rxe))
1018 			size = 0;
1019 
1020 		/* retrieve a buffer from the ring */
1021 		rx_buf = &rx_ring->rx_fqes[ntc];
1022 		libeth_xdp_process_buff(xdp, rx_buf, size);
1023 
1024 		if (++ntc == cnt)
1025 			ntc = 0;
1026 
1027 		/* skip if it is NOP desc */
1028 		if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data))
1029 			continue;
1030 
1031 		xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
1032 		if (xdp_verdict == ICE_XDP_PASS)
1033 			goto construct_skb;
1034 
1035 		if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR))
1036 			xdp_xmit |= xdp_verdict;
1037 		total_rx_bytes += xdp_get_buff_len(&xdp->base);
1038 		total_rx_pkts++;
1039 
1040 		xdp->data = NULL;
1041 		continue;
1042 
1043 construct_skb:
1044 		skb = xdp_build_skb_from_buff(&xdp->base);
1045 		xdp->data = NULL;
1046 
1047 		/* exit if we failed to retrieve a buffer */
1048 		if (!skb) {
1049 			libeth_xdp_return_buff_slow(xdp);
1050 			ice_stats_inc(rx_ring->ring_stats, rx_buf_failed);
1051 			continue;
1052 		}
1053 
1054 		vlan_tci = ice_get_vlan_tci(rx_desc);
1055 
1056 		/* probably a little skewed due to removing CRC */
1057 		total_rx_bytes += skb->len;
1058 
1059 		/* populate checksum, VLAN, and protocol */
1060 		ice_process_skb_fields(rx_ring, rx_desc, skb);
1061 
1062 		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1063 		/* send completed skb up the stack */
1064 		ice_receive_skb(rx_ring, skb, vlan_tci);
1065 
1066 		/* update budget accounting */
1067 		total_rx_pkts++;
1068 	}
1069 
1070 	rx_ring->next_to_clean = ntc;
1071 	/* return up to cleaned_count buffers to hardware */
1072 	failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring));
1073 
1074 	if (xdp_xmit)
1075 		ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
1076 
1077 	libeth_xdp_save_buff(&rx_ring->xdp, xdp);
1078 
1079 	if (rx_ring->ring_stats)
1080 		ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
1081 					 total_rx_bytes);
1082 
1083 	/* guarantee a trip back through this routine if there was a failure */
1084 	return failure ? budget : (int)total_rx_pkts;
1085 }
1086 
__ice_update_sample(struct ice_q_vector * q_vector,struct ice_ring_container * rc,struct dim_sample * sample,bool is_tx)1087 static void __ice_update_sample(struct ice_q_vector *q_vector,
1088 				struct ice_ring_container *rc,
1089 				struct dim_sample *sample,
1090 				bool is_tx)
1091 {
1092 	u64 total_packets = 0, total_bytes = 0, pkts, bytes;
1093 
1094 	if (is_tx) {
1095 		struct ice_tx_ring *tx_ring;
1096 
1097 		ice_for_each_tx_ring(tx_ring, *rc) {
1098 			if (!tx_ring->ring_stats)
1099 				continue;
1100 
1101 			ice_fetch_tx_ring_stats(tx_ring, &pkts, &bytes);
1102 
1103 			total_packets += pkts;
1104 			total_bytes += bytes;
1105 		}
1106 	} else {
1107 		struct ice_rx_ring *rx_ring;
1108 
1109 		ice_for_each_rx_ring(rx_ring, *rc) {
1110 			if (!rx_ring->ring_stats)
1111 				continue;
1112 
1113 			ice_fetch_rx_ring_stats(rx_ring, &pkts, &bytes);
1114 
1115 			total_packets += pkts;
1116 			total_bytes += bytes;
1117 		}
1118 	}
1119 
1120 	dim_update_sample(q_vector->total_events,
1121 			  total_packets, total_bytes, sample);
1122 	sample->comp_ctr = 0;
1123 
1124 	/* if dim settings get stale, like when not updated for 1
1125 	 * second or longer, force it to start again. This addresses the
1126 	 * frequent case of an idle queue being switched to by the
1127 	 * scheduler. The 1,000 here means 1,000 milliseconds.
1128 	 */
1129 	if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
1130 		rc->dim.state = DIM_START_MEASURE;
1131 }
1132 
1133 /**
1134  * ice_net_dim - Update net DIM algorithm
1135  * @q_vector: the vector associated with the interrupt
1136  *
1137  * Create a DIM sample and notify net_dim() so that it can possibly decide
1138  * a new ITR value based on incoming packets, bytes, and interrupts.
1139  *
1140  * This function is a no-op if the ring is not configured to dynamic ITR.
1141  */
ice_net_dim(struct ice_q_vector * q_vector)1142 static void ice_net_dim(struct ice_q_vector *q_vector)
1143 {
1144 	struct ice_ring_container *tx = &q_vector->tx;
1145 	struct ice_ring_container *rx = &q_vector->rx;
1146 
1147 	if (ITR_IS_DYNAMIC(tx)) {
1148 		struct dim_sample dim_sample;
1149 
1150 		__ice_update_sample(q_vector, tx, &dim_sample, true);
1151 		net_dim(&tx->dim, &dim_sample);
1152 	}
1153 
1154 	if (ITR_IS_DYNAMIC(rx)) {
1155 		struct dim_sample dim_sample;
1156 
1157 		__ice_update_sample(q_vector, rx, &dim_sample, false);
1158 		net_dim(&rx->dim, &dim_sample);
1159 	}
1160 }
1161 
1162 /**
1163  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1164  * @itr_idx: interrupt throttling index
1165  * @itr: interrupt throttling value in usecs
1166  */
ice_buildreg_itr(u16 itr_idx,u16 itr)1167 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1168 {
1169 	/* The ITR value is reported in microseconds, and the register value is
1170 	 * recorded in 2 microsecond units. For this reason we only need to
1171 	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1172 	 * granularity as a shift instead of division. The mask makes sure the
1173 	 * ITR value is never odd so we don't accidentally write into the field
1174 	 * prior to the ITR field.
1175 	 */
1176 	itr &= ICE_ITR_MASK;
1177 
1178 	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1179 		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1180 		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1181 }
1182 
1183 /**
1184  * ice_enable_interrupt - re-enable MSI-X interrupt
1185  * @q_vector: the vector associated with the interrupt to enable
1186  *
1187  * If the VSI is down, the interrupt will not be re-enabled. Also,
1188  * when enabling the interrupt always reset the wb_on_itr to false
1189  * and trigger a software interrupt to clean out internal state.
1190  */
ice_enable_interrupt(struct ice_q_vector * q_vector)1191 static void ice_enable_interrupt(struct ice_q_vector *q_vector)
1192 {
1193 	struct ice_vsi *vsi = q_vector->vsi;
1194 	bool wb_en = q_vector->wb_on_itr;
1195 	u32 itr_val;
1196 
1197 	if (test_bit(ICE_DOWN, vsi->state))
1198 		return;
1199 
1200 	/* trigger an ITR delayed software interrupt when exiting busy poll, to
1201 	 * make sure to catch any pending cleanups that might have been missed
1202 	 * due to interrupt state transition. If busy poll or poll isn't
1203 	 * enabled, then don't update ITR, and just enable the interrupt.
1204 	 */
1205 	if (!wb_en) {
1206 		itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1207 	} else {
1208 		q_vector->wb_on_itr = false;
1209 
1210 		/* do two things here with a single write. Set up the third ITR
1211 		 * index to be used for software interrupt moderation, and then
1212 		 * trigger a software interrupt with a rate limit of 20K on
1213 		 * software interrupts, this will help avoid high interrupt
1214 		 * loads due to frequently polling and exiting polling.
1215 		 */
1216 		itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
1217 		itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1218 			   ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
1219 			   GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1220 	}
1221 	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1222 }
1223 
1224 /**
1225  * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1226  * @q_vector: q_vector to set WB_ON_ITR on
1227  *
1228  * We need to tell hardware to write-back completed descriptors even when
1229  * interrupts are disabled. Descriptors will be written back on cache line
1230  * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1231  * descriptors may not be written back if they don't fill a cache line until
1232  * the next interrupt.
1233  *
1234  * This sets the write-back frequency to whatever was set previously for the
1235  * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1236  * aren't meddling with the INTENA_M bit.
1237  */
ice_set_wb_on_itr(struct ice_q_vector * q_vector)1238 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1239 {
1240 	struct ice_vsi *vsi = q_vector->vsi;
1241 
1242 	/* already in wb_on_itr mode no need to change it */
1243 	if (q_vector->wb_on_itr)
1244 		return;
1245 
1246 	/* use previously set ITR values for all of the ITR indices by
1247 	 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1248 	 * be static in non-adaptive mode (user configured)
1249 	 */
1250 	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1251 	     FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) |
1252 	     FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) |
1253 	     FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1));
1254 
1255 	q_vector->wb_on_itr = true;
1256 }
1257 
1258 /**
1259  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1260  * @napi: napi struct with our devices info in it
1261  * @budget: amount of work driver is allowed to do this pass, in packets
1262  *
1263  * This function will clean all queues associated with a q_vector.
1264  *
1265  * Returns the amount of work done
1266  */
ice_napi_poll(struct napi_struct * napi,int budget)1267 int ice_napi_poll(struct napi_struct *napi, int budget)
1268 {
1269 	struct ice_q_vector *q_vector =
1270 				container_of(napi, struct ice_q_vector, napi);
1271 	struct ice_tx_ring *tx_ring;
1272 	struct ice_rx_ring *rx_ring;
1273 	bool clean_complete = true;
1274 	int budget_per_ring;
1275 	int work_done = 0;
1276 
1277 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1278 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1279 	 */
1280 	ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1281 		struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
1282 		bool wd;
1283 
1284 		if (xsk_pool)
1285 			wd = ice_xmit_zc(tx_ring, xsk_pool);
1286 		else if (ice_ring_is_xdp(tx_ring))
1287 			wd = true;
1288 		else
1289 			wd = ice_clean_tx_irq(tx_ring, budget);
1290 
1291 		if (!wd)
1292 			clean_complete = false;
1293 	}
1294 
1295 	/* Handle case where we are called by netpoll with a budget of 0 */
1296 	if (unlikely(budget <= 0))
1297 		return budget;
1298 
1299 	/* normally we have 1 Rx ring per q_vector */
1300 	if (unlikely(q_vector->num_ring_rx > 1))
1301 		/* We attempt to distribute budget to each Rx queue fairly, but
1302 		 * don't allow the budget to go below 1 because that would exit
1303 		 * polling early.
1304 		 */
1305 		budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1306 	else
1307 		/* Max of 1 Rx ring in this q_vector so give it the budget */
1308 		budget_per_ring = budget;
1309 
1310 	ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1311 		struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
1312 		int cleaned;
1313 
1314 		/* A dedicated path for zero-copy allows making a single
1315 		 * comparison in the irq context instead of many inside the
1316 		 * ice_clean_rx_irq function and makes the codebase cleaner.
1317 		 */
1318 		cleaned = rx_ring->xsk_pool ?
1319 			  ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
1320 			  ice_clean_rx_irq(rx_ring, budget_per_ring);
1321 		work_done += cleaned;
1322 		/* if we clean as many as budgeted, we must not be done */
1323 		if (cleaned >= budget_per_ring)
1324 			clean_complete = false;
1325 	}
1326 
1327 	/* If work not completed, return budget and polling will return */
1328 	if (!clean_complete) {
1329 		/* Set the writeback on ITR so partial completions of
1330 		 * cache-lines will still continue even if we're polling.
1331 		 */
1332 		ice_set_wb_on_itr(q_vector);
1333 		return budget;
1334 	}
1335 
1336 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1337 	 * poll us due to busy-polling
1338 	 */
1339 	if (napi_complete_done(napi, work_done)) {
1340 		ice_net_dim(q_vector);
1341 		ice_enable_interrupt(q_vector);
1342 	} else {
1343 		ice_set_wb_on_itr(q_vector);
1344 	}
1345 
1346 	return min_t(int, work_done, budget - 1);
1347 }
1348 
1349 /**
1350  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1351  * @tx_ring: the ring to be checked
1352  * @size: the size buffer we want to assure is available
1353  *
1354  * Returns -EBUSY if a stop is needed, else 0
1355  */
__ice_maybe_stop_tx(struct ice_tx_ring * tx_ring,unsigned int size)1356 static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1357 {
1358 	netif_tx_stop_queue(txring_txq(tx_ring));
1359 	/* Memory barrier before checking head and tail */
1360 	smp_mb();
1361 
1362 	/* Check again in a case another CPU has just made room available. */
1363 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1364 		return -EBUSY;
1365 
1366 	/* A reprieve! - use start_queue because it doesn't call schedule */
1367 	netif_tx_start_queue(txring_txq(tx_ring));
1368 	ice_stats_inc(tx_ring->ring_stats, tx_restart_q);
1369 	return 0;
1370 }
1371 
1372 /**
1373  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1374  * @tx_ring: the ring to be checked
1375  * @size:    the size buffer we want to assure is available
1376  *
1377  * Returns 0 if stop is not needed
1378  */
ice_maybe_stop_tx(struct ice_tx_ring * tx_ring,unsigned int size)1379 static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1380 {
1381 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1382 		return 0;
1383 
1384 	return __ice_maybe_stop_tx(tx_ring, size);
1385 }
1386 
1387 /**
1388  * ice_tx_map - Build the Tx descriptor
1389  * @tx_ring: ring to send buffer on
1390  * @first: first buffer info buffer to use
1391  * @off: pointer to struct that holds offload parameters
1392  *
1393  * This function loops over the skb data pointed to by *first
1394  * and gets a physical address for each memory location and programs
1395  * it and the length into the transmit descriptor.
1396  */
1397 static void
ice_tx_map(struct ice_tx_ring * tx_ring,struct ice_tx_buf * first,struct ice_tx_offload_params * off)1398 ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1399 	   struct ice_tx_offload_params *off)
1400 {
1401 	u64 td_offset, td_tag, td_cmd;
1402 	u16 i = tx_ring->next_to_use;
1403 	unsigned int data_len, size;
1404 	struct ice_tx_desc *tx_desc;
1405 	struct ice_tx_buf *tx_buf;
1406 	struct sk_buff *skb;
1407 	skb_frag_t *frag;
1408 	dma_addr_t dma;
1409 	bool kick;
1410 
1411 	td_tag = off->td_l2tag1;
1412 	td_cmd = off->td_cmd;
1413 	td_offset = off->td_offset;
1414 	skb = first->skb;
1415 
1416 	data_len = skb->data_len;
1417 	size = skb_headlen(skb);
1418 
1419 	tx_desc = ICE_TX_DESC(tx_ring, i);
1420 
1421 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1422 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1423 		td_tag = first->vid;
1424 	}
1425 
1426 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1427 
1428 	tx_buf = first;
1429 
1430 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1431 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1432 
1433 		if (dma_mapping_error(tx_ring->dev, dma))
1434 			goto dma_error;
1435 
1436 		/* record length, and DMA address */
1437 		dma_unmap_len_set(tx_buf, len, size);
1438 		dma_unmap_addr_set(tx_buf, dma, dma);
1439 
1440 		/* align size to end of page */
1441 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1442 		tx_desc->buf_addr = cpu_to_le64(dma);
1443 
1444 		/* account for data chunks larger than the hardware
1445 		 * can handle
1446 		 */
1447 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1448 			tx_desc->cmd_type_offset_bsz =
1449 				ice_build_ctob(td_cmd, td_offset, max_data,
1450 					       td_tag);
1451 
1452 			tx_desc++;
1453 			i++;
1454 
1455 			if (i == tx_ring->count) {
1456 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1457 				i = 0;
1458 			}
1459 
1460 			dma += max_data;
1461 			size -= max_data;
1462 
1463 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1464 			tx_desc->buf_addr = cpu_to_le64(dma);
1465 		}
1466 
1467 		if (likely(!data_len))
1468 			break;
1469 
1470 		tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1471 							      size, td_tag);
1472 
1473 		tx_desc++;
1474 		i++;
1475 
1476 		if (i == tx_ring->count) {
1477 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1478 			i = 0;
1479 		}
1480 
1481 		size = skb_frag_size(frag);
1482 		data_len -= size;
1483 
1484 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1485 				       DMA_TO_DEVICE);
1486 
1487 		tx_buf = &tx_ring->tx_buf[i];
1488 		tx_buf->type = ICE_TX_BUF_FRAG;
1489 	}
1490 
1491 	/* record SW timestamp if HW timestamp is not available */
1492 	skb_tx_timestamp(first->skb);
1493 
1494 	i++;
1495 	if (i == tx_ring->count)
1496 		i = 0;
1497 
1498 	/* write last descriptor with RS and EOP bits */
1499 	td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1500 	tx_desc->cmd_type_offset_bsz =
1501 			ice_build_ctob(td_cmd, td_offset, size, td_tag);
1502 
1503 	/* Force memory writes to complete before letting h/w know there
1504 	 * are new descriptors to fetch.
1505 	 *
1506 	 * We also use this memory barrier to make certain all of the
1507 	 * status bits have been updated before next_to_watch is written.
1508 	 */
1509 	wmb();
1510 
1511 	/* set next_to_watch value indicating a packet is present */
1512 	first->next_to_watch = tx_desc;
1513 
1514 	tx_ring->next_to_use = i;
1515 
1516 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1517 
1518 	/* notify HW of packet */
1519 	kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1520 				      netdev_xmit_more());
1521 	if (!kick)
1522 		return;
1523 
1524 	if (ice_is_txtime_cfg(tx_ring)) {
1525 		struct ice_tstamp_ring *tstamp_ring;
1526 		u32 tstamp_count, j;
1527 		struct ice_ts_desc *ts_desc;
1528 		struct timespec64 ts;
1529 		u32 tstamp;
1530 
1531 		smp_rmb();	/* order flag read before pointer read */
1532 		tstamp_ring = READ_ONCE(tx_ring->tstamp_ring);
1533 		if (unlikely(!tstamp_ring))
1534 			goto ring_kick;
1535 
1536 		tstamp_count = tstamp_ring->count;
1537 		j = tstamp_ring->next_to_use;
1538 
1539 		ts = ktime_to_timespec64(first->skb->tstamp);
1540 		tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
1541 
1542 		ts_desc = ICE_TS_DESC(tstamp_ring, j);
1543 		ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp);
1544 
1545 		j++;
1546 		if (j == tstamp_count) {
1547 			u32 fetch = tstamp_count - tx_ring->count;
1548 
1549 			j = 0;
1550 
1551 			/* To prevent an MDD, when wrapping the tstamp ring
1552 			 * create additional TS descriptors equal to the number
1553 			 * of the fetch TS descriptors value. HW will merge the
1554 			 * TS descriptors with the same timestamp value into a
1555 			 * single descriptor.
1556 			 */
1557 			for (; j < fetch; j++) {
1558 				ts_desc = ICE_TS_DESC(tstamp_ring, j);
1559 				ts_desc->tx_desc_idx_tstamp =
1560 				       ice_build_tstamp_desc(i, tstamp);
1561 			}
1562 		}
1563 		tstamp_ring->next_to_use = j;
1564 		writel_relaxed(j, tstamp_ring->tail);
1565 	} else {
1566 ring_kick:
1567 		writel_relaxed(i, tx_ring->tail);
1568 	}
1569 	return;
1570 
1571 dma_error:
1572 	/* clear DMA mappings for failed tx_buf map */
1573 	for (;;) {
1574 		tx_buf = &tx_ring->tx_buf[i];
1575 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1576 		if (tx_buf == first)
1577 			break;
1578 		if (i == 0)
1579 			i = tx_ring->count;
1580 		i--;
1581 	}
1582 
1583 	tx_ring->next_to_use = i;
1584 }
1585 
1586 /**
1587  * ice_tx_csum - Enable Tx checksum offloads
1588  * @first: pointer to the first descriptor
1589  * @off: pointer to struct that holds offload parameters
1590  *
1591  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1592  */
1593 static
ice_tx_csum(struct ice_tx_buf * first,struct ice_tx_offload_params * off)1594 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1595 {
1596 	const struct ice_tx_ring *tx_ring = off->tx_ring;
1597 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1598 	struct sk_buff *skb = first->skb;
1599 	union {
1600 		struct iphdr *v4;
1601 		struct ipv6hdr *v6;
1602 		unsigned char *hdr;
1603 	} ip;
1604 	union {
1605 		struct tcphdr *tcp;
1606 		unsigned char *hdr;
1607 	} l4;
1608 	__be16 frag_off, protocol;
1609 	unsigned char *exthdr;
1610 	u32 offset, cmd = 0;
1611 	u8 l4_proto = 0;
1612 
1613 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1614 		return 0;
1615 
1616 	protocol = vlan_get_protocol(skb);
1617 
1618 	if (eth_p_mpls(protocol)) {
1619 		ip.hdr = skb_inner_network_header(skb);
1620 		l4.hdr = skb_checksum_start(skb);
1621 	} else {
1622 		ip.hdr = skb_network_header(skb);
1623 		l4.hdr = skb_transport_header(skb);
1624 	}
1625 
1626 	/* compute outer L2 header size */
1627 	l2_len = ip.hdr - skb->data;
1628 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1629 
1630 	/* set the tx_flags to indicate the IP protocol type. this is
1631 	 * required so that checksum header computation below is accurate.
1632 	 */
1633 	if (ip.v4->version == 4)
1634 		first->tx_flags |= ICE_TX_FLAGS_IPV4;
1635 	else if (ip.v6->version == 6)
1636 		first->tx_flags |= ICE_TX_FLAGS_IPV6;
1637 
1638 	if (skb->encapsulation) {
1639 		bool gso_ena = false;
1640 		u32 tunnel = 0;
1641 
1642 		/* define outer network header type */
1643 		if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1644 			tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1645 				  ICE_TX_CTX_EIPT_IPV4 :
1646 				  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1647 			l4_proto = ip.v4->protocol;
1648 		} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1649 			int ret;
1650 
1651 			tunnel |= ICE_TX_CTX_EIPT_IPV6;
1652 			exthdr = ip.hdr + sizeof(*ip.v6);
1653 			l4_proto = ip.v6->nexthdr;
1654 			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1655 					       &l4_proto, &frag_off);
1656 			if (ret < 0)
1657 				return -1;
1658 		}
1659 
1660 		/* define outer transport */
1661 		switch (l4_proto) {
1662 		case IPPROTO_UDP:
1663 			tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1664 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1665 			break;
1666 		case IPPROTO_GRE:
1667 			tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1668 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1669 			break;
1670 		case IPPROTO_IPIP:
1671 		case IPPROTO_IPV6:
1672 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1673 			l4.hdr = skb_inner_network_header(skb);
1674 			break;
1675 		default:
1676 			if (first->tx_flags & ICE_TX_FLAGS_TSO)
1677 				return -1;
1678 
1679 			skb_checksum_help(skb);
1680 			return 0;
1681 		}
1682 
1683 		/* compute outer L3 header size */
1684 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1685 			  ICE_TXD_CTX_QW0_EIPLEN_S;
1686 
1687 		/* switch IP header pointer from outer to inner header */
1688 		ip.hdr = skb_inner_network_header(skb);
1689 
1690 		/* compute tunnel header size */
1691 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1692 			   ICE_TXD_CTX_QW0_NATLEN_S;
1693 
1694 		gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1695 		/* indicate if we need to offload outer UDP header */
1696 		if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1697 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1698 			tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1699 
1700 		/* record tunnel offload values */
1701 		off->cd_tunnel_params |= tunnel;
1702 
1703 		/* set DTYP=1 to indicate that it's an Tx context descriptor
1704 		 * in IPsec tunnel mode with Tx offloads in Quad word 1
1705 		 */
1706 		off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1707 
1708 		/* switch L4 header pointer from outer to inner */
1709 		l4.hdr = skb_inner_transport_header(skb);
1710 		l4_proto = 0;
1711 
1712 		/* reset type as we transition from outer to inner headers */
1713 		first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1714 		if (ip.v4->version == 4)
1715 			first->tx_flags |= ICE_TX_FLAGS_IPV4;
1716 		if (ip.v6->version == 6)
1717 			first->tx_flags |= ICE_TX_FLAGS_IPV6;
1718 	}
1719 
1720 	/* Enable IP checksum offloads */
1721 	if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1722 		l4_proto = ip.v4->protocol;
1723 		/* the stack computes the IP header already, the only time we
1724 		 * need the hardware to recompute it is in the case of TSO.
1725 		 */
1726 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1727 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1728 		else
1729 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1730 
1731 	} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1732 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1733 		exthdr = ip.hdr + sizeof(*ip.v6);
1734 		l4_proto = ip.v6->nexthdr;
1735 		if (l4.hdr != exthdr)
1736 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1737 					 &frag_off);
1738 	} else {
1739 		return -1;
1740 	}
1741 
1742 	/* compute inner L3 header size */
1743 	l3_len = l4.hdr - ip.hdr;
1744 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1745 
1746 	if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
1747 	    !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
1748 	    !skb_csum_is_sctp(skb)) {
1749 		/* Set GCS */
1750 		u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
1751 		u16 csum_offset = skb->csum_offset / 2;
1752 		u16 gcs_params;
1753 
1754 		gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
1755 			     FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
1756 			     FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
1757 					ICE_TX_GCS_DESC_CSUM_PSH);
1758 
1759 		/* Unlike legacy HW checksums, GCS requires a context
1760 		 * descriptor.
1761 		 */
1762 		off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
1763 		off->cd_gcs_params = gcs_params;
1764 		/* Fill out CSO info in data descriptors */
1765 		off->td_offset |= offset;
1766 		off->td_cmd |= cmd;
1767 		return 1;
1768 	}
1769 
1770 	/* Enable L4 checksum offloads */
1771 	switch (l4_proto) {
1772 	case IPPROTO_TCP:
1773 		/* enable checksum offloads */
1774 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1775 		l4_len = l4.tcp->doff;
1776 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1777 		break;
1778 	case IPPROTO_UDP:
1779 		/* enable UDP checksum offload */
1780 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1781 		l4_len = (sizeof(struct udphdr) >> 2);
1782 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1783 		break;
1784 	case IPPROTO_SCTP:
1785 		/* enable SCTP checksum offload */
1786 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1787 		l4_len = sizeof(struct sctphdr) >> 2;
1788 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1789 		break;
1790 
1791 	default:
1792 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1793 			return -1;
1794 		skb_checksum_help(skb);
1795 		return 0;
1796 	}
1797 
1798 	off->td_cmd |= cmd;
1799 	off->td_offset |= offset;
1800 	return 1;
1801 }
1802 
1803 /**
1804  * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1805  * @tx_ring: ring to send buffer on
1806  * @first: pointer to struct ice_tx_buf
1807  *
1808  * Checks the skb and set up correspondingly several generic transmit flags
1809  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1810  */
1811 static void
ice_tx_prepare_vlan_flags(struct ice_tx_ring * tx_ring,struct ice_tx_buf * first)1812 ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1813 {
1814 	struct sk_buff *skb = first->skb;
1815 
1816 	/* nothing left to do, software offloaded VLAN */
1817 	if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1818 		return;
1819 
1820 	/* the VLAN ethertype/tpid is determined by VSI configuration and netdev
1821 	 * feature flags, which the driver only allows either 802.1Q or 802.1ad
1822 	 * VLAN offloads exclusively so we only care about the VLAN ID here
1823 	 */
1824 	if (skb_vlan_tag_present(skb)) {
1825 		first->vid = skb_vlan_tag_get(skb);
1826 		if (test_bit(ICE_TX_RING_FLAGS_VLAN_L2TAG2, tx_ring->flags))
1827 			first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1828 		else
1829 			first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1830 	}
1831 
1832 	ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1833 }
1834 
1835 /**
1836  * ice_tso - computes mss and TSO length to prepare for TSO
1837  * @first: pointer to struct ice_tx_buf
1838  * @off: pointer to struct that holds offload parameters
1839  *
1840  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1841  */
1842 static
ice_tso(struct ice_tx_buf * first,struct ice_tx_offload_params * off)1843 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1844 {
1845 	struct sk_buff *skb = first->skb;
1846 	union {
1847 		struct iphdr *v4;
1848 		struct ipv6hdr *v6;
1849 		unsigned char *hdr;
1850 	} ip;
1851 	union {
1852 		struct tcphdr *tcp;
1853 		struct udphdr *udp;
1854 		unsigned char *hdr;
1855 	} l4;
1856 	u64 cd_mss, cd_tso_len;
1857 	__be16 protocol;
1858 	u32 paylen;
1859 	u8 l4_start;
1860 	int err;
1861 
1862 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1863 		return 0;
1864 
1865 	if (!skb_is_gso(skb))
1866 		return 0;
1867 
1868 	err = skb_cow_head(skb, 0);
1869 	if (err < 0)
1870 		return err;
1871 
1872 	protocol = vlan_get_protocol(skb);
1873 
1874 	if (eth_p_mpls(protocol))
1875 		ip.hdr = skb_inner_network_header(skb);
1876 	else
1877 		ip.hdr = skb_network_header(skb);
1878 	l4.hdr = skb_checksum_start(skb);
1879 
1880 	/* initialize outer IP header fields */
1881 	if (ip.v4->version == 4) {
1882 		ip.v4->tot_len = 0;
1883 		ip.v4->check = 0;
1884 	} else {
1885 		ip.v6->payload_len = 0;
1886 	}
1887 
1888 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1889 					 SKB_GSO_GRE_CSUM |
1890 					 SKB_GSO_IPXIP4 |
1891 					 SKB_GSO_IPXIP6 |
1892 					 SKB_GSO_UDP_TUNNEL |
1893 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
1894 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1895 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1896 			l4.udp->len = 0;
1897 
1898 			/* determine offset of outer transport header */
1899 			l4_start = (u8)(l4.hdr - skb->data);
1900 
1901 			/* remove payload length from outer checksum */
1902 			paylen = skb->len - l4_start;
1903 			csum_replace_by_diff(&l4.udp->check,
1904 					     (__force __wsum)htonl(paylen));
1905 		}
1906 
1907 		/* reset pointers to inner headers */
1908 		ip.hdr = skb_inner_network_header(skb);
1909 		l4.hdr = skb_inner_transport_header(skb);
1910 
1911 		/* initialize inner IP header fields */
1912 		if (ip.v4->version == 4) {
1913 			ip.v4->tot_len = 0;
1914 			ip.v4->check = 0;
1915 		} else {
1916 			ip.v6->payload_len = 0;
1917 		}
1918 	}
1919 
1920 	/* determine offset of transport header */
1921 	l4_start = (u8)(l4.hdr - skb->data);
1922 
1923 	/* remove payload length from checksum */
1924 	paylen = skb->len - l4_start;
1925 
1926 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1927 		csum_replace_by_diff(&l4.udp->check,
1928 				     (__force __wsum)htonl(paylen));
1929 		/* compute length of UDP segmentation header */
1930 		off->header_len = (u8)sizeof(l4.udp) + l4_start;
1931 	} else {
1932 		csum_replace_by_diff(&l4.tcp->check,
1933 				     (__force __wsum)htonl(paylen));
1934 		/* compute length of TCP segmentation header */
1935 		off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1936 	}
1937 
1938 	/* update gso_segs and bytecount */
1939 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1940 	first->bytecount += (first->gso_segs - 1) * off->header_len;
1941 
1942 	cd_tso_len = skb->len - off->header_len;
1943 	cd_mss = skb_shinfo(skb)->gso_size;
1944 
1945 	/* record cdesc_qw1 with TSO parameters */
1946 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1947 			     (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1948 			     (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1949 			     (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1950 	first->tx_flags |= ICE_TX_FLAGS_TSO;
1951 	return 1;
1952 }
1953 
1954 /**
1955  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
1956  * @size: transmit request size in bytes
1957  *
1958  * Due to hardware alignment restrictions (4K alignment), we need to
1959  * assume that we can have no more than 12K of data per descriptor, even
1960  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1961  * Thus, we need to divide by 12K. But division is slow! Instead,
1962  * we decompose the operation into shifts and one relatively cheap
1963  * multiply operation.
1964  *
1965  * To divide by 12K, we first divide by 4K, then divide by 3:
1966  *     To divide by 4K, shift right by 12 bits
1967  *     To divide by 3, multiply by 85, then divide by 256
1968  *     (Divide by 256 is done by shifting right by 8 bits)
1969  * Finally, we add one to round up. Because 256 isn't an exact multiple of
1970  * 3, we'll underestimate near each multiple of 12K. This is actually more
1971  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1972  * segment. For our purposes this is accurate out to 1M which is orders of
1973  * magnitude greater than our largest possible GSO size.
1974  *
1975  * This would then be implemented as:
1976  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1977  *
1978  * Since multiplication and division are commutative, we can reorder
1979  * operations into:
1980  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1981  */
ice_txd_use_count(unsigned int size)1982 static unsigned int ice_txd_use_count(unsigned int size)
1983 {
1984 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1985 }
1986 
1987 /**
1988  * ice_xmit_desc_count - calculate number of Tx descriptors needed
1989  * @skb: send buffer
1990  *
1991  * Returns number of data descriptors needed for this skb.
1992  */
ice_xmit_desc_count(struct sk_buff * skb)1993 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1994 {
1995 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1996 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1997 	unsigned int count = 0, size = skb_headlen(skb);
1998 
1999 	for (;;) {
2000 		count += ice_txd_use_count(size);
2001 
2002 		if (!nr_frags--)
2003 			break;
2004 
2005 		size = skb_frag_size(frag++);
2006 	}
2007 
2008 	return count;
2009 }
2010 
2011 /**
2012  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2013  * @skb: send buffer
2014  *
2015  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2016  * and so we need to figure out the cases where we need to linearize the skb.
2017  *
2018  * For TSO we need to count the TSO header and segment payload separately.
2019  * As such we need to check cases where we have 7 fragments or more as we
2020  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2021  * the segment payload in the first descriptor, and another 7 for the
2022  * fragments.
2023  */
__ice_chk_linearize(struct sk_buff * skb)2024 static bool __ice_chk_linearize(struct sk_buff *skb)
2025 {
2026 	const skb_frag_t *frag, *stale;
2027 	int nr_frags, sum;
2028 
2029 	/* no need to check if number of frags is less than 7 */
2030 	nr_frags = skb_shinfo(skb)->nr_frags;
2031 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2032 		return false;
2033 
2034 	/* We need to walk through the list and validate that each group
2035 	 * of 6 fragments totals at least gso_size.
2036 	 */
2037 	nr_frags -= ICE_MAX_BUF_TXD - 2;
2038 	frag = &skb_shinfo(skb)->frags[0];
2039 
2040 	/* Initialize size to the negative value of gso_size minus 1. We
2041 	 * use this as the worst case scenario in which the frag ahead
2042 	 * of us only provides one byte which is why we are limited to 6
2043 	 * descriptors for a single transmit as the header and previous
2044 	 * fragment are already consuming 2 descriptors.
2045 	 */
2046 	sum = 1 - skb_shinfo(skb)->gso_size;
2047 
2048 	/* Add size of frags 0 through 4 to create our initial sum */
2049 	sum += skb_frag_size(frag++);
2050 	sum += skb_frag_size(frag++);
2051 	sum += skb_frag_size(frag++);
2052 	sum += skb_frag_size(frag++);
2053 	sum += skb_frag_size(frag++);
2054 
2055 	/* Walk through fragments adding latest fragment, testing it, and
2056 	 * then removing stale fragments from the sum.
2057 	 */
2058 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2059 		int stale_size = skb_frag_size(stale);
2060 
2061 		sum += skb_frag_size(frag++);
2062 
2063 		/* The stale fragment may present us with a smaller
2064 		 * descriptor than the actual fragment size. To account
2065 		 * for that we need to remove all the data on the front and
2066 		 * figure out what the remainder would be in the last
2067 		 * descriptor associated with the fragment.
2068 		 */
2069 		if (stale_size > ICE_MAX_DATA_PER_TXD) {
2070 			int align_pad = -(skb_frag_off(stale)) &
2071 					(ICE_MAX_READ_REQ_SIZE - 1);
2072 
2073 			sum -= align_pad;
2074 			stale_size -= align_pad;
2075 
2076 			do {
2077 				sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2078 				stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2079 			} while (stale_size > ICE_MAX_DATA_PER_TXD);
2080 		}
2081 
2082 		/* if sum is negative we failed to make sufficient progress */
2083 		if (sum < 0)
2084 			return true;
2085 
2086 		if (!nr_frags--)
2087 			break;
2088 
2089 		sum -= stale_size;
2090 	}
2091 
2092 	return false;
2093 }
2094 
2095 /**
2096  * ice_chk_linearize - Check if there are more than 8 fragments per packet
2097  * @skb:      send buffer
2098  * @count:    number of buffers used
2099  *
2100  * Note: Our HW can't scatter-gather more than 8 fragments to build
2101  * a packet on the wire and so we need to figure out the cases where we
2102  * need to linearize the skb.
2103  */
ice_chk_linearize(struct sk_buff * skb,unsigned int count)2104 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2105 {
2106 	/* Both TSO and single send will work if count is less than 8 */
2107 	if (likely(count < ICE_MAX_BUF_TXD))
2108 		return false;
2109 
2110 	if (skb_is_gso(skb))
2111 		return __ice_chk_linearize(skb);
2112 
2113 	/* we can support up to 8 data buffers for a single send */
2114 	return count != ICE_MAX_BUF_TXD;
2115 }
2116 
2117 /**
2118  * ice_tstamp - set up context descriptor for hardware timestamp
2119  * @tx_ring: pointer to the Tx ring to send buffer on
2120  * @skb: pointer to the SKB we're sending
2121  * @first: Tx buffer
2122  * @off: Tx offload parameters
2123  */
2124 static void
ice_tstamp(struct ice_tx_ring * tx_ring,struct sk_buff * skb,struct ice_tx_buf * first,struct ice_tx_offload_params * off)2125 ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2126 	   struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2127 {
2128 	s8 idx;
2129 
2130 	/* only timestamp the outbound packet if the user has requested it */
2131 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2132 		return;
2133 
2134 	/* Tx timestamps cannot be sampled when doing TSO */
2135 	if (first->tx_flags & ICE_TX_FLAGS_TSO)
2136 		return;
2137 
2138 	/* Grab an open timestamp slot */
2139 	idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2140 	if (idx < 0) {
2141 		tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
2142 		return;
2143 	}
2144 
2145 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2146 			     (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2147 			     ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2148 	first->tx_flags |= ICE_TX_FLAGS_TSYN;
2149 }
2150 
2151 /**
2152  * ice_xmit_frame_ring - Sends buffer on Tx ring
2153  * @skb: send buffer
2154  * @tx_ring: ring to send buffer on
2155  *
2156  * Returns NETDEV_TX_OK if sent, else an error code
2157  */
2158 static netdev_tx_t
ice_xmit_frame_ring(struct sk_buff * skb,struct ice_tx_ring * tx_ring)2159 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2160 {
2161 	struct ice_tx_offload_params offload = { 0 };
2162 	struct ice_vsi *vsi = tx_ring->vsi;
2163 	struct ice_tx_buf *first;
2164 	struct ethhdr *eth;
2165 	unsigned int count;
2166 	int tso, csum;
2167 
2168 	ice_trace(xmit_frame_ring, tx_ring, skb);
2169 
2170 	/* record the location of the first descriptor for this packet */
2171 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
2172 
2173 	count = ice_xmit_desc_count(skb);
2174 	if (ice_chk_linearize(skb, count)) {
2175 		if (__skb_linearize(skb))
2176 			goto out_drop;
2177 		count = ice_txd_use_count(skb->len);
2178 		ice_stats_inc(tx_ring->ring_stats, tx_linearize);
2179 	}
2180 
2181 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2182 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2183 	 *       + 4 desc gap to avoid the cache line where head is,
2184 	 *       + 1 desc for context descriptor,
2185 	 * otherwise try next time
2186 	 */
2187 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2188 			      ICE_DESCS_FOR_CTX_DESC)) {
2189 		ice_stats_inc(tx_ring->ring_stats, tx_busy);
2190 		return NETDEV_TX_BUSY;
2191 	}
2192 
2193 	/* prefetch for bql data which is infrequently used */
2194 	netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2195 
2196 	offload.tx_ring = tx_ring;
2197 
2198 	first->skb = skb;
2199 	first->type = ICE_TX_BUF_SKB;
2200 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2201 	first->gso_segs = 1;
2202 	first->tx_flags = 0;
2203 
2204 	/* prepare the VLAN tagging flags for Tx */
2205 	ice_tx_prepare_vlan_flags(tx_ring, first);
2206 	if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2207 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2208 					(ICE_TX_CTX_DESC_IL2TAG2 <<
2209 					ICE_TXD_CTX_QW1_CMD_S));
2210 		offload.cd_l2tag2 = first->vid;
2211 	}
2212 
2213 	/* set up TSO offload */
2214 	tso = ice_tso(first, &offload);
2215 	if (tso < 0)
2216 		goto out_drop;
2217 
2218 	/* always set up Tx checksum offload */
2219 	csum = ice_tx_csum(first, &offload);
2220 	if (csum < 0)
2221 		goto out_drop;
2222 
2223 	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2224 	eth = (struct ethhdr *)skb_mac_header(skb);
2225 
2226 	if ((ice_is_switchdev_running(vsi->back) ||
2227 	     ice_lag_is_switchdev_running(vsi->back)) &&
2228 	    vsi->type != ICE_VSI_SF)
2229 		ice_eswitch_set_target_vsi(skb, &offload);
2230 	else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2231 			   eth->h_proto == htons(ETH_P_LLDP)) &&
2232 			   vsi->type == ICE_VSI_PF &&
2233 			   vsi->port_info->qos_cfg.is_sw_lldp))
2234 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2235 					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2236 					ICE_TXD_CTX_QW1_CMD_S);
2237 
2238 	ice_tstamp(tx_ring, skb, first, &offload);
2239 
2240 	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2241 		struct ice_tx_ctx_desc *cdesc;
2242 		u16 i = tx_ring->next_to_use;
2243 
2244 		/* grab the next descriptor */
2245 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2246 		i++;
2247 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2248 
2249 		/* setup context descriptor */
2250 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2251 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2252 		cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
2253 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2254 	}
2255 
2256 	ice_tx_map(tx_ring, first, &offload);
2257 	return NETDEV_TX_OK;
2258 
2259 out_drop:
2260 	ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2261 	dev_kfree_skb_any(skb);
2262 	first->type = ICE_TX_BUF_EMPTY;
2263 	return NETDEV_TX_OK;
2264 }
2265 
2266 /**
2267  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2268  * @skb: send buffer
2269  * @netdev: network interface device structure
2270  *
2271  * Returns NETDEV_TX_OK if sent, else an error code
2272  */
ice_start_xmit(struct sk_buff * skb,struct net_device * netdev)2273 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2274 {
2275 	struct ice_netdev_priv *np = netdev_priv(netdev);
2276 	struct ice_vsi *vsi = np->vsi;
2277 	struct ice_tx_ring *tx_ring;
2278 
2279 	tx_ring = vsi->tx_rings[skb->queue_mapping];
2280 
2281 	/* hardware can't handle really short frames, hardware padding works
2282 	 * beyond this point
2283 	 */
2284 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2285 		return NETDEV_TX_OK;
2286 
2287 	return ice_xmit_frame_ring(skb, tx_ring);
2288 }
2289 
2290 /**
2291  * ice_get_dscp_up - return the UP/TC value for a SKB
2292  * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
2293  * @skb: SKB to query for info to determine UP/TC
2294  *
2295  * This function is to only be called when the PF is in L3 DSCP PFC mode
2296  */
ice_get_dscp_up(struct ice_dcbx_cfg * dcbcfg,struct sk_buff * skb)2297 static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2298 {
2299 	u8 dscp = 0;
2300 
2301 	if (skb->protocol == htons(ETH_P_IP))
2302 		dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2303 	else if (skb->protocol == htons(ETH_P_IPV6))
2304 		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2305 
2306 	return dcbcfg->dscp_map[dscp];
2307 }
2308 
2309 u16
ice_select_queue(struct net_device * netdev,struct sk_buff * skb,struct net_device * sb_dev)2310 ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2311 		 struct net_device *sb_dev)
2312 {
2313 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
2314 	struct ice_dcbx_cfg *dcbcfg;
2315 
2316 	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2317 	if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2318 		skb->priority = ice_get_dscp_up(dcbcfg, skb);
2319 
2320 	return netdev_pick_tx(netdev, skb, sb_dev);
2321 }
2322 
2323 /**
2324  * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2325  * @tx_ring: tx_ring to clean
2326  */
ice_clean_ctrl_tx_irq(struct ice_tx_ring * tx_ring)2327 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2328 {
2329 	struct ice_vsi *vsi = tx_ring->vsi;
2330 	s16 i = tx_ring->next_to_clean;
2331 	int budget = ICE_DFLT_IRQ_WORK;
2332 	struct ice_tx_desc *tx_desc;
2333 	struct ice_tx_buf *tx_buf;
2334 
2335 	tx_buf = &tx_ring->tx_buf[i];
2336 	tx_desc = ICE_TX_DESC(tx_ring, i);
2337 	i -= tx_ring->count;
2338 
2339 	do {
2340 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2341 
2342 		/* if next_to_watch is not set then there is no pending work */
2343 		if (!eop_desc)
2344 			break;
2345 
2346 		/* prevent any other reads prior to eop_desc */
2347 		smp_rmb();
2348 
2349 		/* if the descriptor isn't done, no work to do */
2350 		if (!(eop_desc->cmd_type_offset_bsz &
2351 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2352 			break;
2353 
2354 		/* clear next_to_watch to prevent false hangs */
2355 		tx_buf->next_to_watch = NULL;
2356 		tx_desc->buf_addr = 0;
2357 		tx_desc->cmd_type_offset_bsz = 0;
2358 
2359 		/* move past filter desc */
2360 		tx_buf++;
2361 		tx_desc++;
2362 		i++;
2363 		if (unlikely(!i)) {
2364 			i -= tx_ring->count;
2365 			tx_buf = tx_ring->tx_buf;
2366 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2367 		}
2368 
2369 		/* unmap the data header */
2370 		if (dma_unmap_len(tx_buf, len))
2371 			dma_unmap_single(tx_ring->dev,
2372 					 dma_unmap_addr(tx_buf, dma),
2373 					 dma_unmap_len(tx_buf, len),
2374 					 DMA_TO_DEVICE);
2375 		if (tx_buf->type == ICE_TX_BUF_DUMMY)
2376 			devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2377 
2378 		/* clear next_to_watch to prevent false hangs */
2379 		tx_buf->type = ICE_TX_BUF_EMPTY;
2380 		tx_buf->tx_flags = 0;
2381 		tx_buf->next_to_watch = NULL;
2382 		dma_unmap_len_set(tx_buf, len, 0);
2383 		tx_desc->buf_addr = 0;
2384 		tx_desc->cmd_type_offset_bsz = 0;
2385 
2386 		/* move past eop_desc for start of next FD desc */
2387 		tx_buf++;
2388 		tx_desc++;
2389 		i++;
2390 		if (unlikely(!i)) {
2391 			i -= tx_ring->count;
2392 			tx_buf = tx_ring->tx_buf;
2393 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2394 		}
2395 
2396 		budget--;
2397 	} while (likely(budget));
2398 
2399 	i += tx_ring->count;
2400 	tx_ring->next_to_clean = i;
2401 
2402 	/* re-enable interrupt if needed */
2403 	ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2404 }
2405