xref: /linux/drivers/net/ethernet/intel/i40e/i40e_txrx.c (revision af50e4ba34f4c45e92535364133d4deb5931c1c5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*******************************************************************************
3  *
4  * Intel Ethernet Controller XL710 Family Linux Driver
5  * Copyright(c) 2013 - 2016 Intel Corporation.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with this program.  If not, see <http://www.gnu.org/licenses/>.
18  *
19  * The full GNU General Public License is included in this distribution in
20  * the file called "COPYING".
21  *
22  * Contact Information:
23  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25  *
26  ******************************************************************************/
27 
28 #include <linux/prefetch.h>
29 #include <net/busy_poll.h>
30 #include <linux/bpf_trace.h>
31 #include <net/xdp.h>
32 #include "i40e.h"
33 #include "i40e_trace.h"
34 #include "i40e_prototype.h"
35 
36 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
37 				u32 td_tag)
38 {
39 	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
40 			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
41 			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
42 			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
43 			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
44 }
45 
46 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
47 /**
48  * i40e_fdir - Generate a Flow Director descriptor based on fdata
49  * @tx_ring: Tx ring to send buffer on
50  * @fdata: Flow director filter data
51  * @add: Indicate if we are adding a rule or deleting one
52  *
53  **/
54 static void i40e_fdir(struct i40e_ring *tx_ring,
55 		      struct i40e_fdir_filter *fdata, bool add)
56 {
57 	struct i40e_filter_program_desc *fdir_desc;
58 	struct i40e_pf *pf = tx_ring->vsi->back;
59 	u32 flex_ptype, dtype_cmd;
60 	u16 i;
61 
62 	/* grab the next descriptor */
63 	i = tx_ring->next_to_use;
64 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
65 
66 	i++;
67 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
68 
69 	flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
70 		     (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
71 
72 	flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
73 		      (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
74 
75 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
76 		      (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
77 
78 	flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
79 		      (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
80 
81 	/* Use LAN VSI Id if not programmed by user */
82 	flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
83 		      ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
84 		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
85 
86 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
87 
88 	dtype_cmd |= add ?
89 		     I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
90 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT :
91 		     I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
92 		     I40E_TXD_FLTR_QW1_PCMD_SHIFT;
93 
94 	dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
95 		     (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
96 
97 	dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
98 		     (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
99 
100 	if (fdata->cnt_index) {
101 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
102 		dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
103 			     ((u32)fdata->cnt_index <<
104 			      I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
105 	}
106 
107 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
108 	fdir_desc->rsvd = cpu_to_le32(0);
109 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
110 	fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
111 }
112 
113 #define I40E_FD_CLEAN_DELAY 10
114 /**
115  * i40e_program_fdir_filter - Program a Flow Director filter
116  * @fdir_data: Packet data that will be filter parameters
117  * @raw_packet: the pre-allocated packet buffer for FDir
118  * @pf: The PF pointer
119  * @add: True for add/update, False for remove
120  **/
121 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
122 				    u8 *raw_packet, struct i40e_pf *pf,
123 				    bool add)
124 {
125 	struct i40e_tx_buffer *tx_buf, *first;
126 	struct i40e_tx_desc *tx_desc;
127 	struct i40e_ring *tx_ring;
128 	struct i40e_vsi *vsi;
129 	struct device *dev;
130 	dma_addr_t dma;
131 	u32 td_cmd = 0;
132 	u16 i;
133 
134 	/* find existing FDIR VSI */
135 	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
136 	if (!vsi)
137 		return -ENOENT;
138 
139 	tx_ring = vsi->tx_rings[0];
140 	dev = tx_ring->dev;
141 
142 	/* we need two descriptors to add/del a filter and we can wait */
143 	for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
144 		if (!i)
145 			return -EAGAIN;
146 		msleep_interruptible(1);
147 	}
148 
149 	dma = dma_map_single(dev, raw_packet,
150 			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
151 	if (dma_mapping_error(dev, dma))
152 		goto dma_fail;
153 
154 	/* grab the next descriptor */
155 	i = tx_ring->next_to_use;
156 	first = &tx_ring->tx_bi[i];
157 	i40e_fdir(tx_ring, fdir_data, add);
158 
159 	/* Now program a dummy descriptor */
160 	i = tx_ring->next_to_use;
161 	tx_desc = I40E_TX_DESC(tx_ring, i);
162 	tx_buf = &tx_ring->tx_bi[i];
163 
164 	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
165 
166 	memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
167 
168 	/* record length, and DMA address */
169 	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
170 	dma_unmap_addr_set(tx_buf, dma, dma);
171 
172 	tx_desc->buffer_addr = cpu_to_le64(dma);
173 	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
174 
175 	tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
176 	tx_buf->raw_buf = (void *)raw_packet;
177 
178 	tx_desc->cmd_type_offset_bsz =
179 		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
180 
181 	/* Force memory writes to complete before letting h/w
182 	 * know there are new descriptors to fetch.
183 	 */
184 	wmb();
185 
186 	/* Mark the data descriptor to be watched */
187 	first->next_to_watch = tx_desc;
188 
189 	writel(tx_ring->next_to_use, tx_ring->tail);
190 	return 0;
191 
192 dma_fail:
193 	return -1;
194 }
195 
196 #define IP_HEADER_OFFSET 14
197 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
198 /**
199  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
200  * @vsi: pointer to the targeted VSI
201  * @fd_data: the flow director data required for the FDir descriptor
202  * @add: true adds a filter, false removes it
203  *
204  * Returns 0 if the filters were successfully added or removed
205  **/
206 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
207 				   struct i40e_fdir_filter *fd_data,
208 				   bool add)
209 {
210 	struct i40e_pf *pf = vsi->back;
211 	struct udphdr *udp;
212 	struct iphdr *ip;
213 	u8 *raw_packet;
214 	int ret;
215 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
216 		0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
217 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
218 
219 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
220 	if (!raw_packet)
221 		return -ENOMEM;
222 	memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
223 
224 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
225 	udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
226 	      + sizeof(struct iphdr));
227 
228 	ip->daddr = fd_data->dst_ip;
229 	udp->dest = fd_data->dst_port;
230 	ip->saddr = fd_data->src_ip;
231 	udp->source = fd_data->src_port;
232 
233 	if (fd_data->flex_filter) {
234 		u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
235 		__be16 pattern = fd_data->flex_word;
236 		u16 off = fd_data->flex_offset;
237 
238 		*((__force __be16 *)(payload + off)) = pattern;
239 	}
240 
241 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
242 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
243 	if (ret) {
244 		dev_info(&pf->pdev->dev,
245 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
246 			 fd_data->pctype, fd_data->fd_id, ret);
247 		/* Free the packet buffer since it wasn't added to the ring */
248 		kfree(raw_packet);
249 		return -EOPNOTSUPP;
250 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
251 		if (add)
252 			dev_info(&pf->pdev->dev,
253 				 "Filter OK for PCTYPE %d loc = %d\n",
254 				 fd_data->pctype, fd_data->fd_id);
255 		else
256 			dev_info(&pf->pdev->dev,
257 				 "Filter deleted for PCTYPE %d loc = %d\n",
258 				 fd_data->pctype, fd_data->fd_id);
259 	}
260 
261 	if (add)
262 		pf->fd_udp4_filter_cnt++;
263 	else
264 		pf->fd_udp4_filter_cnt--;
265 
266 	return 0;
267 }
268 
269 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
270 /**
271  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
272  * @vsi: pointer to the targeted VSI
273  * @fd_data: the flow director data required for the FDir descriptor
274  * @add: true adds a filter, false removes it
275  *
276  * Returns 0 if the filters were successfully added or removed
277  **/
278 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
279 				   struct i40e_fdir_filter *fd_data,
280 				   bool add)
281 {
282 	struct i40e_pf *pf = vsi->back;
283 	struct tcphdr *tcp;
284 	struct iphdr *ip;
285 	u8 *raw_packet;
286 	int ret;
287 	/* Dummy packet */
288 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
289 		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
290 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
291 		0x0, 0x72, 0, 0, 0, 0};
292 
293 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
294 	if (!raw_packet)
295 		return -ENOMEM;
296 	memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
297 
298 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
299 	tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
300 	      + sizeof(struct iphdr));
301 
302 	ip->daddr = fd_data->dst_ip;
303 	tcp->dest = fd_data->dst_port;
304 	ip->saddr = fd_data->src_ip;
305 	tcp->source = fd_data->src_port;
306 
307 	if (fd_data->flex_filter) {
308 		u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
309 		__be16 pattern = fd_data->flex_word;
310 		u16 off = fd_data->flex_offset;
311 
312 		*((__force __be16 *)(payload + off)) = pattern;
313 	}
314 
315 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
316 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
317 	if (ret) {
318 		dev_info(&pf->pdev->dev,
319 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
320 			 fd_data->pctype, fd_data->fd_id, ret);
321 		/* Free the packet buffer since it wasn't added to the ring */
322 		kfree(raw_packet);
323 		return -EOPNOTSUPP;
324 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
325 		if (add)
326 			dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
327 				 fd_data->pctype, fd_data->fd_id);
328 		else
329 			dev_info(&pf->pdev->dev,
330 				 "Filter deleted for PCTYPE %d loc = %d\n",
331 				 fd_data->pctype, fd_data->fd_id);
332 	}
333 
334 	if (add) {
335 		pf->fd_tcp4_filter_cnt++;
336 		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
337 		    I40E_DEBUG_FD & pf->hw.debug_mask)
338 			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
339 		set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
340 	} else {
341 		pf->fd_tcp4_filter_cnt--;
342 	}
343 
344 	return 0;
345 }
346 
347 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
348 /**
349  * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
350  * a specific flow spec
351  * @vsi: pointer to the targeted VSI
352  * @fd_data: the flow director data required for the FDir descriptor
353  * @add: true adds a filter, false removes it
354  *
355  * Returns 0 if the filters were successfully added or removed
356  **/
357 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
358 				    struct i40e_fdir_filter *fd_data,
359 				    bool add)
360 {
361 	struct i40e_pf *pf = vsi->back;
362 	struct sctphdr *sctp;
363 	struct iphdr *ip;
364 	u8 *raw_packet;
365 	int ret;
366 	/* Dummy packet */
367 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
368 		0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
369 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
370 
371 	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
372 	if (!raw_packet)
373 		return -ENOMEM;
374 	memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
375 
376 	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
377 	sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
378 	      + sizeof(struct iphdr));
379 
380 	ip->daddr = fd_data->dst_ip;
381 	sctp->dest = fd_data->dst_port;
382 	ip->saddr = fd_data->src_ip;
383 	sctp->source = fd_data->src_port;
384 
385 	if (fd_data->flex_filter) {
386 		u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
387 		__be16 pattern = fd_data->flex_word;
388 		u16 off = fd_data->flex_offset;
389 
390 		*((__force __be16 *)(payload + off)) = pattern;
391 	}
392 
393 	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
394 	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
395 	if (ret) {
396 		dev_info(&pf->pdev->dev,
397 			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
398 			 fd_data->pctype, fd_data->fd_id, ret);
399 		/* Free the packet buffer since it wasn't added to the ring */
400 		kfree(raw_packet);
401 		return -EOPNOTSUPP;
402 	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
403 		if (add)
404 			dev_info(&pf->pdev->dev,
405 				 "Filter OK for PCTYPE %d loc = %d\n",
406 				 fd_data->pctype, fd_data->fd_id);
407 		else
408 			dev_info(&pf->pdev->dev,
409 				 "Filter deleted for PCTYPE %d loc = %d\n",
410 				 fd_data->pctype, fd_data->fd_id);
411 	}
412 
413 	if (add)
414 		pf->fd_sctp4_filter_cnt++;
415 	else
416 		pf->fd_sctp4_filter_cnt--;
417 
418 	return 0;
419 }
420 
421 #define I40E_IP_DUMMY_PACKET_LEN 34
422 /**
423  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
424  * a specific flow spec
425  * @vsi: pointer to the targeted VSI
426  * @fd_data: the flow director data required for the FDir descriptor
427  * @add: true adds a filter, false removes it
428  *
429  * Returns 0 if the filters were successfully added or removed
430  **/
431 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
432 				  struct i40e_fdir_filter *fd_data,
433 				  bool add)
434 {
435 	struct i40e_pf *pf = vsi->back;
436 	struct iphdr *ip;
437 	u8 *raw_packet;
438 	int ret;
439 	int i;
440 	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
441 		0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
442 		0, 0, 0, 0};
443 
444 	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
445 	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
446 		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
447 		if (!raw_packet)
448 			return -ENOMEM;
449 		memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
450 		ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
451 
452 		ip->saddr = fd_data->src_ip;
453 		ip->daddr = fd_data->dst_ip;
454 		ip->protocol = 0;
455 
456 		if (fd_data->flex_filter) {
457 			u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
458 			__be16 pattern = fd_data->flex_word;
459 			u16 off = fd_data->flex_offset;
460 
461 			*((__force __be16 *)(payload + off)) = pattern;
462 		}
463 
464 		fd_data->pctype = i;
465 		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
466 		if (ret) {
467 			dev_info(&pf->pdev->dev,
468 				 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
469 				 fd_data->pctype, fd_data->fd_id, ret);
470 			/* The packet buffer wasn't added to the ring so we
471 			 * need to free it now.
472 			 */
473 			kfree(raw_packet);
474 			return -EOPNOTSUPP;
475 		} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
476 			if (add)
477 				dev_info(&pf->pdev->dev,
478 					 "Filter OK for PCTYPE %d loc = %d\n",
479 					 fd_data->pctype, fd_data->fd_id);
480 			else
481 				dev_info(&pf->pdev->dev,
482 					 "Filter deleted for PCTYPE %d loc = %d\n",
483 					 fd_data->pctype, fd_data->fd_id);
484 		}
485 	}
486 
487 	if (add)
488 		pf->fd_ip4_filter_cnt++;
489 	else
490 		pf->fd_ip4_filter_cnt--;
491 
492 	return 0;
493 }
494 
495 /**
496  * i40e_add_del_fdir - Build raw packets to add/del fdir filter
497  * @vsi: pointer to the targeted VSI
498  * @cmd: command to get or set RX flow classification rules
499  * @add: true adds a filter, false removes it
500  *
501  **/
502 int i40e_add_del_fdir(struct i40e_vsi *vsi,
503 		      struct i40e_fdir_filter *input, bool add)
504 {
505 	struct i40e_pf *pf = vsi->back;
506 	int ret;
507 
508 	switch (input->flow_type & ~FLOW_EXT) {
509 	case TCP_V4_FLOW:
510 		ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
511 		break;
512 	case UDP_V4_FLOW:
513 		ret = i40e_add_del_fdir_udpv4(vsi, input, add);
514 		break;
515 	case SCTP_V4_FLOW:
516 		ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
517 		break;
518 	case IP_USER_FLOW:
519 		switch (input->ip4_proto) {
520 		case IPPROTO_TCP:
521 			ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
522 			break;
523 		case IPPROTO_UDP:
524 			ret = i40e_add_del_fdir_udpv4(vsi, input, add);
525 			break;
526 		case IPPROTO_SCTP:
527 			ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
528 			break;
529 		case IPPROTO_IP:
530 			ret = i40e_add_del_fdir_ipv4(vsi, input, add);
531 			break;
532 		default:
533 			/* We cannot support masking based on protocol */
534 			dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
535 				 input->ip4_proto);
536 			return -EINVAL;
537 		}
538 		break;
539 	default:
540 		dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
541 			 input->flow_type);
542 		return -EINVAL;
543 	}
544 
545 	/* The buffer allocated here will be normally be freed by
546 	 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
547 	 * completion. In the event of an error adding the buffer to the FDIR
548 	 * ring, it will immediately be freed. It may also be freed by
549 	 * i40e_clean_tx_ring() when closing the VSI.
550 	 */
551 	return ret;
552 }
553 
554 /**
555  * i40e_fd_handle_status - check the Programming Status for FD
556  * @rx_ring: the Rx ring for this descriptor
557  * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
558  * @prog_id: the id originally used for programming
559  *
560  * This is used to verify if the FD programming or invalidation
561  * requested by SW to the HW is successful or not and take actions accordingly.
562  **/
563 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
564 				  union i40e_rx_desc *rx_desc, u8 prog_id)
565 {
566 	struct i40e_pf *pf = rx_ring->vsi->back;
567 	struct pci_dev *pdev = pf->pdev;
568 	u32 fcnt_prog, fcnt_avail;
569 	u32 error;
570 	u64 qw;
571 
572 	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
573 	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
574 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
575 
576 	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
577 		pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
578 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
579 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
580 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
581 				 pf->fd_inv);
582 
583 		/* Check if the programming error is for ATR.
584 		 * If so, auto disable ATR and set a state for
585 		 * flush in progress. Next time we come here if flush is in
586 		 * progress do nothing, once flush is complete the state will
587 		 * be cleared.
588 		 */
589 		if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
590 			return;
591 
592 		pf->fd_add_err++;
593 		/* store the current atr filter count */
594 		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
595 
596 		if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
597 		    test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
598 			/* These set_bit() calls aren't atomic with the
599 			 * test_bit() here, but worse case we potentially
600 			 * disable ATR and queue a flush right after SB
601 			 * support is re-enabled. That shouldn't cause an
602 			 * issue in practice
603 			 */
604 			set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
605 			set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
606 		}
607 
608 		/* filter programming failed most likely due to table full */
609 		fcnt_prog = i40e_get_global_fd_count(pf);
610 		fcnt_avail = pf->fdir_pf_filter_count;
611 		/* If ATR is running fcnt_prog can quickly change,
612 		 * if we are very close to full, it makes sense to disable
613 		 * FD ATR/SB and then re-enable it when there is room.
614 		 */
615 		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
616 			if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
617 			    !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
618 					      pf->state))
619 				if (I40E_DEBUG_FD & pf->hw.debug_mask)
620 					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
621 		}
622 	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
623 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
624 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
625 				 rx_desc->wb.qword0.hi_dword.fd_id);
626 	}
627 }
628 
629 /**
630  * i40e_unmap_and_free_tx_resource - Release a Tx buffer
631  * @ring:      the ring that owns the buffer
632  * @tx_buffer: the buffer to free
633  **/
634 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
635 					    struct i40e_tx_buffer *tx_buffer)
636 {
637 	if (tx_buffer->skb) {
638 		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
639 			kfree(tx_buffer->raw_buf);
640 		else if (ring_is_xdp(ring))
641 			page_frag_free(tx_buffer->raw_buf);
642 		else
643 			dev_kfree_skb_any(tx_buffer->skb);
644 		if (dma_unmap_len(tx_buffer, len))
645 			dma_unmap_single(ring->dev,
646 					 dma_unmap_addr(tx_buffer, dma),
647 					 dma_unmap_len(tx_buffer, len),
648 					 DMA_TO_DEVICE);
649 	} else if (dma_unmap_len(tx_buffer, len)) {
650 		dma_unmap_page(ring->dev,
651 			       dma_unmap_addr(tx_buffer, dma),
652 			       dma_unmap_len(tx_buffer, len),
653 			       DMA_TO_DEVICE);
654 	}
655 
656 	tx_buffer->next_to_watch = NULL;
657 	tx_buffer->skb = NULL;
658 	dma_unmap_len_set(tx_buffer, len, 0);
659 	/* tx_buffer must be completely set up in the transmit path */
660 }
661 
662 /**
663  * i40e_clean_tx_ring - Free any empty Tx buffers
664  * @tx_ring: ring to be cleaned
665  **/
666 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
667 {
668 	unsigned long bi_size;
669 	u16 i;
670 
671 	/* ring already cleared, nothing to do */
672 	if (!tx_ring->tx_bi)
673 		return;
674 
675 	/* Free all the Tx ring sk_buffs */
676 	for (i = 0; i < tx_ring->count; i++)
677 		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
678 
679 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
680 	memset(tx_ring->tx_bi, 0, bi_size);
681 
682 	/* Zero out the descriptor ring */
683 	memset(tx_ring->desc, 0, tx_ring->size);
684 
685 	tx_ring->next_to_use = 0;
686 	tx_ring->next_to_clean = 0;
687 
688 	if (!tx_ring->netdev)
689 		return;
690 
691 	/* cleanup Tx queue statistics */
692 	netdev_tx_reset_queue(txring_txq(tx_ring));
693 }
694 
695 /**
696  * i40e_free_tx_resources - Free Tx resources per queue
697  * @tx_ring: Tx descriptor ring for a specific queue
698  *
699  * Free all transmit software resources
700  **/
701 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
702 {
703 	i40e_clean_tx_ring(tx_ring);
704 	kfree(tx_ring->tx_bi);
705 	tx_ring->tx_bi = NULL;
706 
707 	if (tx_ring->desc) {
708 		dma_free_coherent(tx_ring->dev, tx_ring->size,
709 				  tx_ring->desc, tx_ring->dma);
710 		tx_ring->desc = NULL;
711 	}
712 }
713 
714 /**
715  * i40e_get_tx_pending - how many tx descriptors not processed
716  * @tx_ring: the ring of descriptors
717  * @in_sw: use SW variables
718  *
719  * Since there is no access to the ring head register
720  * in XL710, we need to use our local copies
721  **/
722 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
723 {
724 	u32 head, tail;
725 
726 	if (!in_sw) {
727 		head = i40e_get_head(ring);
728 		tail = readl(ring->tail);
729 	} else {
730 		head = ring->next_to_clean;
731 		tail = ring->next_to_use;
732 	}
733 
734 	if (head != tail)
735 		return (head < tail) ?
736 			tail - head : (tail + ring->count - head);
737 
738 	return 0;
739 }
740 
741 /**
742  * i40e_detect_recover_hung - Function to detect and recover hung_queues
743  * @vsi:  pointer to vsi struct with tx queues
744  *
745  * VSI has netdev and netdev has TX queues. This function is to check each of
746  * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
747  **/
748 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
749 {
750 	struct i40e_ring *tx_ring = NULL;
751 	struct net_device *netdev;
752 	unsigned int i;
753 	int packets;
754 
755 	if (!vsi)
756 		return;
757 
758 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
759 		return;
760 
761 	netdev = vsi->netdev;
762 	if (!netdev)
763 		return;
764 
765 	if (!netif_carrier_ok(netdev))
766 		return;
767 
768 	for (i = 0; i < vsi->num_queue_pairs; i++) {
769 		tx_ring = vsi->tx_rings[i];
770 		if (tx_ring && tx_ring->desc) {
771 			/* If packet counter has not changed the queue is
772 			 * likely stalled, so force an interrupt for this
773 			 * queue.
774 			 *
775 			 * prev_pkt_ctr would be negative if there was no
776 			 * pending work.
777 			 */
778 			packets = tx_ring->stats.packets & INT_MAX;
779 			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
780 				i40e_force_wb(vsi, tx_ring->q_vector);
781 				continue;
782 			}
783 
784 			/* Memory barrier between read of packet count and call
785 			 * to i40e_get_tx_pending()
786 			 */
787 			smp_rmb();
788 			tx_ring->tx_stats.prev_pkt_ctr =
789 			    i40e_get_tx_pending(tx_ring, true) ? packets : -1;
790 		}
791 	}
792 }
793 
794 #define WB_STRIDE 4
795 
796 /**
797  * i40e_clean_tx_irq - Reclaim resources after transmit completes
798  * @vsi: the VSI we care about
799  * @tx_ring: Tx ring to clean
800  * @napi_budget: Used to determine if we are in netpoll
801  *
802  * Returns true if there's any budget left (e.g. the clean is finished)
803  **/
804 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
805 			      struct i40e_ring *tx_ring, int napi_budget)
806 {
807 	u16 i = tx_ring->next_to_clean;
808 	struct i40e_tx_buffer *tx_buf;
809 	struct i40e_tx_desc *tx_head;
810 	struct i40e_tx_desc *tx_desc;
811 	unsigned int total_bytes = 0, total_packets = 0;
812 	unsigned int budget = vsi->work_limit;
813 
814 	tx_buf = &tx_ring->tx_bi[i];
815 	tx_desc = I40E_TX_DESC(tx_ring, i);
816 	i -= tx_ring->count;
817 
818 	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
819 
820 	do {
821 		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
822 
823 		/* if next_to_watch is not set then there is no work pending */
824 		if (!eop_desc)
825 			break;
826 
827 		/* prevent any other reads prior to eop_desc */
828 		smp_rmb();
829 
830 		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
831 		/* we have caught up to head, no work left to do */
832 		if (tx_head == tx_desc)
833 			break;
834 
835 		/* clear next_to_watch to prevent false hangs */
836 		tx_buf->next_to_watch = NULL;
837 
838 		/* update the statistics for this packet */
839 		total_bytes += tx_buf->bytecount;
840 		total_packets += tx_buf->gso_segs;
841 
842 		/* free the skb/XDP data */
843 		if (ring_is_xdp(tx_ring))
844 			page_frag_free(tx_buf->raw_buf);
845 		else
846 			napi_consume_skb(tx_buf->skb, napi_budget);
847 
848 		/* unmap skb header data */
849 		dma_unmap_single(tx_ring->dev,
850 				 dma_unmap_addr(tx_buf, dma),
851 				 dma_unmap_len(tx_buf, len),
852 				 DMA_TO_DEVICE);
853 
854 		/* clear tx_buffer data */
855 		tx_buf->skb = NULL;
856 		dma_unmap_len_set(tx_buf, len, 0);
857 
858 		/* unmap remaining buffers */
859 		while (tx_desc != eop_desc) {
860 			i40e_trace(clean_tx_irq_unmap,
861 				   tx_ring, tx_desc, tx_buf);
862 
863 			tx_buf++;
864 			tx_desc++;
865 			i++;
866 			if (unlikely(!i)) {
867 				i -= tx_ring->count;
868 				tx_buf = tx_ring->tx_bi;
869 				tx_desc = I40E_TX_DESC(tx_ring, 0);
870 			}
871 
872 			/* unmap any remaining paged data */
873 			if (dma_unmap_len(tx_buf, len)) {
874 				dma_unmap_page(tx_ring->dev,
875 					       dma_unmap_addr(tx_buf, dma),
876 					       dma_unmap_len(tx_buf, len),
877 					       DMA_TO_DEVICE);
878 				dma_unmap_len_set(tx_buf, len, 0);
879 			}
880 		}
881 
882 		/* move us one more past the eop_desc for start of next pkt */
883 		tx_buf++;
884 		tx_desc++;
885 		i++;
886 		if (unlikely(!i)) {
887 			i -= tx_ring->count;
888 			tx_buf = tx_ring->tx_bi;
889 			tx_desc = I40E_TX_DESC(tx_ring, 0);
890 		}
891 
892 		prefetch(tx_desc);
893 
894 		/* update budget accounting */
895 		budget--;
896 	} while (likely(budget));
897 
898 	i += tx_ring->count;
899 	tx_ring->next_to_clean = i;
900 	u64_stats_update_begin(&tx_ring->syncp);
901 	tx_ring->stats.bytes += total_bytes;
902 	tx_ring->stats.packets += total_packets;
903 	u64_stats_update_end(&tx_ring->syncp);
904 	tx_ring->q_vector->tx.total_bytes += total_bytes;
905 	tx_ring->q_vector->tx.total_packets += total_packets;
906 
907 	if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
908 		/* check to see if there are < 4 descriptors
909 		 * waiting to be written back, then kick the hardware to force
910 		 * them to be written back in case we stay in NAPI.
911 		 * In this mode on X722 we do not enable Interrupt.
912 		 */
913 		unsigned int j = i40e_get_tx_pending(tx_ring, false);
914 
915 		if (budget &&
916 		    ((j / WB_STRIDE) == 0) && (j > 0) &&
917 		    !test_bit(__I40E_VSI_DOWN, vsi->state) &&
918 		    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
919 			tx_ring->arm_wb = true;
920 	}
921 
922 	if (ring_is_xdp(tx_ring))
923 		return !!budget;
924 
925 	/* notify netdev of completed buffers */
926 	netdev_tx_completed_queue(txring_txq(tx_ring),
927 				  total_packets, total_bytes);
928 
929 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
930 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
931 		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
932 		/* Make sure that anybody stopping the queue after this
933 		 * sees the new next_to_clean.
934 		 */
935 		smp_mb();
936 		if (__netif_subqueue_stopped(tx_ring->netdev,
937 					     tx_ring->queue_index) &&
938 		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
939 			netif_wake_subqueue(tx_ring->netdev,
940 					    tx_ring->queue_index);
941 			++tx_ring->tx_stats.restart_queue;
942 		}
943 	}
944 
945 	return !!budget;
946 }
947 
948 /**
949  * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
950  * @vsi: the VSI we care about
951  * @q_vector: the vector on which to enable writeback
952  *
953  **/
954 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
955 				  struct i40e_q_vector *q_vector)
956 {
957 	u16 flags = q_vector->tx.ring[0].flags;
958 	u32 val;
959 
960 	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
961 		return;
962 
963 	if (q_vector->arm_wb_state)
964 		return;
965 
966 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
967 		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
968 		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
969 
970 		wr32(&vsi->back->hw,
971 		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
972 		     val);
973 	} else {
974 		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
975 		      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
976 
977 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
978 	}
979 	q_vector->arm_wb_state = true;
980 }
981 
982 /**
983  * i40e_force_wb - Issue SW Interrupt so HW does a wb
984  * @vsi: the VSI we care about
985  * @q_vector: the vector  on which to force writeback
986  *
987  **/
988 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
989 {
990 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
991 		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
992 			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
993 			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
994 			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
995 			  /* allow 00 to be written to the index */
996 
997 		wr32(&vsi->back->hw,
998 		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
999 	} else {
1000 		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1001 			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1002 			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1003 			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1004 			/* allow 00 to be written to the index */
1005 
1006 		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1007 	}
1008 }
1009 
1010 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1011 					struct i40e_ring_container *rc)
1012 {
1013 	return &q_vector->rx == rc;
1014 }
1015 
1016 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1017 {
1018 	unsigned int divisor;
1019 
1020 	switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1021 	case I40E_LINK_SPEED_40GB:
1022 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1023 		break;
1024 	case I40E_LINK_SPEED_25GB:
1025 	case I40E_LINK_SPEED_20GB:
1026 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1027 		break;
1028 	default:
1029 	case I40E_LINK_SPEED_10GB:
1030 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1031 		break;
1032 	case I40E_LINK_SPEED_1GB:
1033 	case I40E_LINK_SPEED_100MB:
1034 		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1035 		break;
1036 	}
1037 
1038 	return divisor;
1039 }
1040 
1041 /**
1042  * i40e_update_itr - update the dynamic ITR value based on statistics
1043  * @q_vector: structure containing interrupt and ring information
1044  * @rc: structure containing ring performance data
1045  *
1046  * Stores a new ITR value based on packets and byte
1047  * counts during the last interrupt.  The advantage of per interrupt
1048  * computation is faster updates and more accurate ITR for the current
1049  * traffic pattern.  Constants in this function were computed
1050  * based on theoretical maximum wire speed and thresholds were set based
1051  * on testing data as well as attempting to minimize response time
1052  * while increasing bulk throughput.
1053  **/
1054 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1055 			    struct i40e_ring_container *rc)
1056 {
1057 	unsigned int avg_wire_size, packets, bytes, itr;
1058 	unsigned long next_update = jiffies;
1059 
1060 	/* If we don't have any rings just leave ourselves set for maximum
1061 	 * possible latency so we take ourselves out of the equation.
1062 	 */
1063 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1064 		return;
1065 
1066 	/* For Rx we want to push the delay up and default to low latency.
1067 	 * for Tx we want to pull the delay down and default to high latency.
1068 	 */
1069 	itr = i40e_container_is_rx(q_vector, rc) ?
1070 	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1071 	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1072 
1073 	/* If we didn't update within up to 1 - 2 jiffies we can assume
1074 	 * that either packets are coming in so slow there hasn't been
1075 	 * any work, or that there is so much work that NAPI is dealing
1076 	 * with interrupt moderation and we don't need to do anything.
1077 	 */
1078 	if (time_after(next_update, rc->next_update))
1079 		goto clear_counts;
1080 
1081 	/* If itr_countdown is set it means we programmed an ITR within
1082 	 * the last 4 interrupt cycles. This has a side effect of us
1083 	 * potentially firing an early interrupt. In order to work around
1084 	 * this we need to throw out any data received for a few
1085 	 * interrupts following the update.
1086 	 */
1087 	if (q_vector->itr_countdown) {
1088 		itr = rc->target_itr;
1089 		goto clear_counts;
1090 	}
1091 
1092 	packets = rc->total_packets;
1093 	bytes = rc->total_bytes;
1094 
1095 	if (i40e_container_is_rx(q_vector, rc)) {
1096 		/* If Rx there are 1 to 4 packets and bytes are less than
1097 		 * 9000 assume insufficient data to use bulk rate limiting
1098 		 * approach unless Tx is already in bulk rate limiting. We
1099 		 * are likely latency driven.
1100 		 */
1101 		if (packets && packets < 4 && bytes < 9000 &&
1102 		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1103 			itr = I40E_ITR_ADAPTIVE_LATENCY;
1104 			goto adjust_by_size;
1105 		}
1106 	} else if (packets < 4) {
1107 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
1108 		 * bulk mode and we are receiving 4 or fewer packets just
1109 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1110 		 * that the Rx can relax.
1111 		 */
1112 		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1113 		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1114 		     I40E_ITR_ADAPTIVE_MAX_USECS)
1115 			goto clear_counts;
1116 	} else if (packets > 32) {
1117 		/* If we have processed over 32 packets in a single interrupt
1118 		 * for Tx assume we need to switch over to "bulk" mode.
1119 		 */
1120 		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1121 	}
1122 
1123 	/* We have no packets to actually measure against. This means
1124 	 * either one of the other queues on this vector is active or
1125 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
1126 	 *
1127 	 * Between 4 and 56 we can assume that our current interrupt delay
1128 	 * is only slightly too low. As such we should increase it by a small
1129 	 * fixed amount.
1130 	 */
1131 	if (packets < 56) {
1132 		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1133 		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1134 			itr &= I40E_ITR_ADAPTIVE_LATENCY;
1135 			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1136 		}
1137 		goto clear_counts;
1138 	}
1139 
1140 	if (packets <= 256) {
1141 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1142 		itr &= I40E_ITR_MASK;
1143 
1144 		/* Between 56 and 112 is our "goldilocks" zone where we are
1145 		 * working out "just right". Just report that our current
1146 		 * ITR is good for us.
1147 		 */
1148 		if (packets <= 112)
1149 			goto clear_counts;
1150 
1151 		/* If packet count is 128 or greater we are likely looking
1152 		 * at a slight overrun of the delay we want. Try halving
1153 		 * our delay to see if that will cut the number of packets
1154 		 * in half per interrupt.
1155 		 */
1156 		itr /= 2;
1157 		itr &= I40E_ITR_MASK;
1158 		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1159 			itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1160 
1161 		goto clear_counts;
1162 	}
1163 
1164 	/* The paths below assume we are dealing with a bulk ITR since
1165 	 * number of packets is greater than 256. We are just going to have
1166 	 * to compute a value and try to bring the count under control,
1167 	 * though for smaller packet sizes there isn't much we can do as
1168 	 * NAPI polling will likely be kicking in sooner rather than later.
1169 	 */
1170 	itr = I40E_ITR_ADAPTIVE_BULK;
1171 
1172 adjust_by_size:
1173 	/* If packet counts are 256 or greater we can assume we have a gross
1174 	 * overestimation of what the rate should be. Instead of trying to fine
1175 	 * tune it just use the formula below to try and dial in an exact value
1176 	 * give the current packet size of the frame.
1177 	 */
1178 	avg_wire_size = bytes / packets;
1179 
1180 	/* The following is a crude approximation of:
1181 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
1182 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1183 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1184 	 *
1185 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1186 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1187 	 * formula down to
1188 	 *
1189 	 *  (170 * (size + 24)) / (size + 640) = ITR
1190 	 *
1191 	 * We first do some math on the packet size and then finally bitshift
1192 	 * by 8 after rounding up. We also have to account for PCIe link speed
1193 	 * difference as ITR scales based on this.
1194 	 */
1195 	if (avg_wire_size <= 60) {
1196 		/* Start at 250k ints/sec */
1197 		avg_wire_size = 4096;
1198 	} else if (avg_wire_size <= 380) {
1199 		/* 250K ints/sec to 60K ints/sec */
1200 		avg_wire_size *= 40;
1201 		avg_wire_size += 1696;
1202 	} else if (avg_wire_size <= 1084) {
1203 		/* 60K ints/sec to 36K ints/sec */
1204 		avg_wire_size *= 15;
1205 		avg_wire_size += 11452;
1206 	} else if (avg_wire_size <= 1980) {
1207 		/* 36K ints/sec to 30K ints/sec */
1208 		avg_wire_size *= 5;
1209 		avg_wire_size += 22420;
1210 	} else {
1211 		/* plateau at a limit of 30K ints/sec */
1212 		avg_wire_size = 32256;
1213 	}
1214 
1215 	/* If we are in low latency mode halve our delay which doubles the
1216 	 * rate to somewhere between 100K to 16K ints/sec
1217 	 */
1218 	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1219 		avg_wire_size /= 2;
1220 
1221 	/* Resultant value is 256 times larger than it needs to be. This
1222 	 * gives us room to adjust the value as needed to either increase
1223 	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1224 	 *
1225 	 * Use addition as we have already recorded the new latency flag
1226 	 * for the ITR value.
1227 	 */
1228 	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1229 	       I40E_ITR_ADAPTIVE_MIN_INC;
1230 
1231 	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1232 		itr &= I40E_ITR_ADAPTIVE_LATENCY;
1233 		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1234 	}
1235 
1236 clear_counts:
1237 	/* write back value */
1238 	rc->target_itr = itr;
1239 
1240 	/* next update should occur within next jiffy */
1241 	rc->next_update = next_update + 1;
1242 
1243 	rc->total_bytes = 0;
1244 	rc->total_packets = 0;
1245 }
1246 
1247 /**
1248  * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1249  * @rx_ring: rx descriptor ring to store buffers on
1250  * @old_buff: donor buffer to have page reused
1251  *
1252  * Synchronizes page for reuse by the adapter
1253  **/
1254 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1255 			       struct i40e_rx_buffer *old_buff)
1256 {
1257 	struct i40e_rx_buffer *new_buff;
1258 	u16 nta = rx_ring->next_to_alloc;
1259 
1260 	new_buff = &rx_ring->rx_bi[nta];
1261 
1262 	/* update, and store next to alloc */
1263 	nta++;
1264 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1265 
1266 	/* transfer page from old buffer to new buffer */
1267 	new_buff->dma		= old_buff->dma;
1268 	new_buff->page		= old_buff->page;
1269 	new_buff->page_offset	= old_buff->page_offset;
1270 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1271 }
1272 
1273 /**
1274  * i40e_rx_is_programming_status - check for programming status descriptor
1275  * @qw: qword representing status_error_len in CPU ordering
1276  *
1277  * The value of in the descriptor length field indicate if this
1278  * is a programming status descriptor for flow director or FCoE
1279  * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1280  * it is a packet descriptor.
1281  **/
1282 static inline bool i40e_rx_is_programming_status(u64 qw)
1283 {
1284 	/* The Rx filter programming status and SPH bit occupy the same
1285 	 * spot in the descriptor. Since we don't support packet split we
1286 	 * can just reuse the bit as an indication that this is a
1287 	 * programming status descriptor.
1288 	 */
1289 	return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1290 }
1291 
1292 /**
1293  * i40e_clean_programming_status - clean the programming status descriptor
1294  * @rx_ring: the rx ring that has this descriptor
1295  * @rx_desc: the rx descriptor written back by HW
1296  * @qw: qword representing status_error_len in CPU ordering
1297  *
1298  * Flow director should handle FD_FILTER_STATUS to check its filter programming
1299  * status being successful or not and take actions accordingly. FCoE should
1300  * handle its context/filter programming/invalidation status and take actions.
1301  *
1302  **/
1303 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1304 					  union i40e_rx_desc *rx_desc,
1305 					  u64 qw)
1306 {
1307 	struct i40e_rx_buffer *rx_buffer;
1308 	u32 ntc = rx_ring->next_to_clean;
1309 	u8 id;
1310 
1311 	/* fetch, update, and store next to clean */
1312 	rx_buffer = &rx_ring->rx_bi[ntc++];
1313 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1314 	rx_ring->next_to_clean = ntc;
1315 
1316 	prefetch(I40E_RX_DESC(rx_ring, ntc));
1317 
1318 	/* place unused page back on the ring */
1319 	i40e_reuse_rx_page(rx_ring, rx_buffer);
1320 	rx_ring->rx_stats.page_reuse_count++;
1321 
1322 	/* clear contents of buffer_info */
1323 	rx_buffer->page = NULL;
1324 
1325 	id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1326 		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1327 
1328 	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1329 		i40e_fd_handle_status(rx_ring, rx_desc, id);
1330 }
1331 
1332 /**
1333  * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1334  * @tx_ring: the tx ring to set up
1335  *
1336  * Return 0 on success, negative on error
1337  **/
1338 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1339 {
1340 	struct device *dev = tx_ring->dev;
1341 	int bi_size;
1342 
1343 	if (!dev)
1344 		return -ENOMEM;
1345 
1346 	/* warn if we are about to overwrite the pointer */
1347 	WARN_ON(tx_ring->tx_bi);
1348 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1349 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1350 	if (!tx_ring->tx_bi)
1351 		goto err;
1352 
1353 	u64_stats_init(&tx_ring->syncp);
1354 
1355 	/* round up to nearest 4K */
1356 	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1357 	/* add u32 for head writeback, align after this takes care of
1358 	 * guaranteeing this is at least one cache line in size
1359 	 */
1360 	tx_ring->size += sizeof(u32);
1361 	tx_ring->size = ALIGN(tx_ring->size, 4096);
1362 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1363 					   &tx_ring->dma, GFP_KERNEL);
1364 	if (!tx_ring->desc) {
1365 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1366 			 tx_ring->size);
1367 		goto err;
1368 	}
1369 
1370 	tx_ring->next_to_use = 0;
1371 	tx_ring->next_to_clean = 0;
1372 	tx_ring->tx_stats.prev_pkt_ctr = -1;
1373 	return 0;
1374 
1375 err:
1376 	kfree(tx_ring->tx_bi);
1377 	tx_ring->tx_bi = NULL;
1378 	return -ENOMEM;
1379 }
1380 
1381 /**
1382  * i40e_clean_rx_ring - Free Rx buffers
1383  * @rx_ring: ring to be cleaned
1384  **/
1385 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1386 {
1387 	unsigned long bi_size;
1388 	u16 i;
1389 
1390 	/* ring already cleared, nothing to do */
1391 	if (!rx_ring->rx_bi)
1392 		return;
1393 
1394 	if (rx_ring->skb) {
1395 		dev_kfree_skb(rx_ring->skb);
1396 		rx_ring->skb = NULL;
1397 	}
1398 
1399 	/* Free all the Rx ring sk_buffs */
1400 	for (i = 0; i < rx_ring->count; i++) {
1401 		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1402 
1403 		if (!rx_bi->page)
1404 			continue;
1405 
1406 		/* Invalidate cache lines that may have been written to by
1407 		 * device so that we avoid corrupting memory.
1408 		 */
1409 		dma_sync_single_range_for_cpu(rx_ring->dev,
1410 					      rx_bi->dma,
1411 					      rx_bi->page_offset,
1412 					      rx_ring->rx_buf_len,
1413 					      DMA_FROM_DEVICE);
1414 
1415 		/* free resources associated with mapping */
1416 		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1417 				     i40e_rx_pg_size(rx_ring),
1418 				     DMA_FROM_DEVICE,
1419 				     I40E_RX_DMA_ATTR);
1420 
1421 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1422 
1423 		rx_bi->page = NULL;
1424 		rx_bi->page_offset = 0;
1425 	}
1426 
1427 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1428 	memset(rx_ring->rx_bi, 0, bi_size);
1429 
1430 	/* Zero out the descriptor ring */
1431 	memset(rx_ring->desc, 0, rx_ring->size);
1432 
1433 	rx_ring->next_to_alloc = 0;
1434 	rx_ring->next_to_clean = 0;
1435 	rx_ring->next_to_use = 0;
1436 }
1437 
1438 /**
1439  * i40e_free_rx_resources - Free Rx resources
1440  * @rx_ring: ring to clean the resources from
1441  *
1442  * Free all receive software resources
1443  **/
1444 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1445 {
1446 	i40e_clean_rx_ring(rx_ring);
1447 	if (rx_ring->vsi->type == I40E_VSI_MAIN)
1448 		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1449 	rx_ring->xdp_prog = NULL;
1450 	kfree(rx_ring->rx_bi);
1451 	rx_ring->rx_bi = NULL;
1452 
1453 	if (rx_ring->desc) {
1454 		dma_free_coherent(rx_ring->dev, rx_ring->size,
1455 				  rx_ring->desc, rx_ring->dma);
1456 		rx_ring->desc = NULL;
1457 	}
1458 }
1459 
1460 /**
1461  * i40e_setup_rx_descriptors - Allocate Rx descriptors
1462  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1463  *
1464  * Returns 0 on success, negative on failure
1465  **/
1466 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1467 {
1468 	struct device *dev = rx_ring->dev;
1469 	int err = -ENOMEM;
1470 	int bi_size;
1471 
1472 	/* warn if we are about to overwrite the pointer */
1473 	WARN_ON(rx_ring->rx_bi);
1474 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1475 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1476 	if (!rx_ring->rx_bi)
1477 		goto err;
1478 
1479 	u64_stats_init(&rx_ring->syncp);
1480 
1481 	/* Round up to nearest 4K */
1482 	rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1483 	rx_ring->size = ALIGN(rx_ring->size, 4096);
1484 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1485 					   &rx_ring->dma, GFP_KERNEL);
1486 
1487 	if (!rx_ring->desc) {
1488 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1489 			 rx_ring->size);
1490 		goto err;
1491 	}
1492 
1493 	rx_ring->next_to_alloc = 0;
1494 	rx_ring->next_to_clean = 0;
1495 	rx_ring->next_to_use = 0;
1496 
1497 	/* XDP RX-queue info only needed for RX rings exposed to XDP */
1498 	if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1499 		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1500 				       rx_ring->queue_index);
1501 		if (err < 0)
1502 			goto err;
1503 	}
1504 
1505 	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1506 
1507 	return 0;
1508 err:
1509 	kfree(rx_ring->rx_bi);
1510 	rx_ring->rx_bi = NULL;
1511 	return err;
1512 }
1513 
1514 /**
1515  * i40e_release_rx_desc - Store the new tail and head values
1516  * @rx_ring: ring to bump
1517  * @val: new head index
1518  **/
1519 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1520 {
1521 	rx_ring->next_to_use = val;
1522 
1523 	/* update next to alloc since we have filled the ring */
1524 	rx_ring->next_to_alloc = val;
1525 
1526 	/* Force memory writes to complete before letting h/w
1527 	 * know there are new descriptors to fetch.  (Only
1528 	 * applicable for weak-ordered memory model archs,
1529 	 * such as IA-64).
1530 	 */
1531 	wmb();
1532 	writel(val, rx_ring->tail);
1533 }
1534 
1535 /**
1536  * i40e_rx_offset - Return expected offset into page to access data
1537  * @rx_ring: Ring we are requesting offset of
1538  *
1539  * Returns the offset value for ring into the data buffer.
1540  */
1541 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1542 {
1543 	return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1544 }
1545 
1546 /**
1547  * i40e_alloc_mapped_page - recycle or make a new page
1548  * @rx_ring: ring to use
1549  * @bi: rx_buffer struct to modify
1550  *
1551  * Returns true if the page was successfully allocated or
1552  * reused.
1553  **/
1554 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1555 				   struct i40e_rx_buffer *bi)
1556 {
1557 	struct page *page = bi->page;
1558 	dma_addr_t dma;
1559 
1560 	/* since we are recycling buffers we should seldom need to alloc */
1561 	if (likely(page)) {
1562 		rx_ring->rx_stats.page_reuse_count++;
1563 		return true;
1564 	}
1565 
1566 	/* alloc new page for storage */
1567 	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1568 	if (unlikely(!page)) {
1569 		rx_ring->rx_stats.alloc_page_failed++;
1570 		return false;
1571 	}
1572 
1573 	/* map page for use */
1574 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1575 				 i40e_rx_pg_size(rx_ring),
1576 				 DMA_FROM_DEVICE,
1577 				 I40E_RX_DMA_ATTR);
1578 
1579 	/* if mapping failed free memory back to system since
1580 	 * there isn't much point in holding memory we can't use
1581 	 */
1582 	if (dma_mapping_error(rx_ring->dev, dma)) {
1583 		__free_pages(page, i40e_rx_pg_order(rx_ring));
1584 		rx_ring->rx_stats.alloc_page_failed++;
1585 		return false;
1586 	}
1587 
1588 	bi->dma = dma;
1589 	bi->page = page;
1590 	bi->page_offset = i40e_rx_offset(rx_ring);
1591 	page_ref_add(page, USHRT_MAX - 1);
1592 	bi->pagecnt_bias = USHRT_MAX;
1593 
1594 	return true;
1595 }
1596 
1597 /**
1598  * i40e_receive_skb - Send a completed packet up the stack
1599  * @rx_ring:  rx ring in play
1600  * @skb: packet to send up
1601  * @vlan_tag: vlan tag for packet
1602  **/
1603 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1604 			     struct sk_buff *skb, u16 vlan_tag)
1605 {
1606 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
1607 
1608 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1609 	    (vlan_tag & VLAN_VID_MASK))
1610 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1611 
1612 	napi_gro_receive(&q_vector->napi, skb);
1613 }
1614 
1615 /**
1616  * i40e_alloc_rx_buffers - Replace used receive buffers
1617  * @rx_ring: ring to place buffers on
1618  * @cleaned_count: number of buffers to replace
1619  *
1620  * Returns false if all allocations were successful, true if any fail
1621  **/
1622 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1623 {
1624 	u16 ntu = rx_ring->next_to_use;
1625 	union i40e_rx_desc *rx_desc;
1626 	struct i40e_rx_buffer *bi;
1627 
1628 	/* do nothing if no valid netdev defined */
1629 	if (!rx_ring->netdev || !cleaned_count)
1630 		return false;
1631 
1632 	rx_desc = I40E_RX_DESC(rx_ring, ntu);
1633 	bi = &rx_ring->rx_bi[ntu];
1634 
1635 	do {
1636 		if (!i40e_alloc_mapped_page(rx_ring, bi))
1637 			goto no_buffers;
1638 
1639 		/* sync the buffer for use by the device */
1640 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1641 						 bi->page_offset,
1642 						 rx_ring->rx_buf_len,
1643 						 DMA_FROM_DEVICE);
1644 
1645 		/* Refresh the desc even if buffer_addrs didn't change
1646 		 * because each write-back erases this info.
1647 		 */
1648 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1649 
1650 		rx_desc++;
1651 		bi++;
1652 		ntu++;
1653 		if (unlikely(ntu == rx_ring->count)) {
1654 			rx_desc = I40E_RX_DESC(rx_ring, 0);
1655 			bi = rx_ring->rx_bi;
1656 			ntu = 0;
1657 		}
1658 
1659 		/* clear the status bits for the next_to_use descriptor */
1660 		rx_desc->wb.qword1.status_error_len = 0;
1661 
1662 		cleaned_count--;
1663 	} while (cleaned_count);
1664 
1665 	if (rx_ring->next_to_use != ntu)
1666 		i40e_release_rx_desc(rx_ring, ntu);
1667 
1668 	return false;
1669 
1670 no_buffers:
1671 	if (rx_ring->next_to_use != ntu)
1672 		i40e_release_rx_desc(rx_ring, ntu);
1673 
1674 	/* make sure to come back via polling to try again after
1675 	 * allocation failure
1676 	 */
1677 	return true;
1678 }
1679 
1680 /**
1681  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1682  * @vsi: the VSI we care about
1683  * @skb: skb currently being received and modified
1684  * @rx_desc: the receive descriptor
1685  **/
1686 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1687 				    struct sk_buff *skb,
1688 				    union i40e_rx_desc *rx_desc)
1689 {
1690 	struct i40e_rx_ptype_decoded decoded;
1691 	u32 rx_error, rx_status;
1692 	bool ipv4, ipv6;
1693 	u8 ptype;
1694 	u64 qword;
1695 
1696 	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1697 	ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1698 	rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1699 		   I40E_RXD_QW1_ERROR_SHIFT;
1700 	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1701 		    I40E_RXD_QW1_STATUS_SHIFT;
1702 	decoded = decode_rx_desc_ptype(ptype);
1703 
1704 	skb->ip_summed = CHECKSUM_NONE;
1705 
1706 	skb_checksum_none_assert(skb);
1707 
1708 	/* Rx csum enabled and ip headers found? */
1709 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1710 		return;
1711 
1712 	/* did the hardware decode the packet and checksum? */
1713 	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1714 		return;
1715 
1716 	/* both known and outer_ip must be set for the below code to work */
1717 	if (!(decoded.known && decoded.outer_ip))
1718 		return;
1719 
1720 	ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1721 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1722 	ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1723 	       (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1724 
1725 	if (ipv4 &&
1726 	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1727 			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1728 		goto checksum_fail;
1729 
1730 	/* likely incorrect csum if alternate IP extension headers found */
1731 	if (ipv6 &&
1732 	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1733 		/* don't increment checksum err here, non-fatal err */
1734 		return;
1735 
1736 	/* there was some L4 error, count error and punt packet to the stack */
1737 	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1738 		goto checksum_fail;
1739 
1740 	/* handle packets that were not able to be checksummed due
1741 	 * to arrival speed, in this case the stack can compute
1742 	 * the csum.
1743 	 */
1744 	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1745 		return;
1746 
1747 	/* If there is an outer header present that might contain a checksum
1748 	 * we need to bump the checksum level by 1 to reflect the fact that
1749 	 * we are indicating we validated the inner checksum.
1750 	 */
1751 	if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1752 		skb->csum_level = 1;
1753 
1754 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
1755 	switch (decoded.inner_prot) {
1756 	case I40E_RX_PTYPE_INNER_PROT_TCP:
1757 	case I40E_RX_PTYPE_INNER_PROT_UDP:
1758 	case I40E_RX_PTYPE_INNER_PROT_SCTP:
1759 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1760 		/* fall though */
1761 	default:
1762 		break;
1763 	}
1764 
1765 	return;
1766 
1767 checksum_fail:
1768 	vsi->back->hw_csum_rx_error++;
1769 }
1770 
1771 /**
1772  * i40e_ptype_to_htype - get a hash type
1773  * @ptype: the ptype value from the descriptor
1774  *
1775  * Returns a hash type to be used by skb_set_hash
1776  **/
1777 static inline int i40e_ptype_to_htype(u8 ptype)
1778 {
1779 	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1780 
1781 	if (!decoded.known)
1782 		return PKT_HASH_TYPE_NONE;
1783 
1784 	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1785 	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1786 		return PKT_HASH_TYPE_L4;
1787 	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1788 		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1789 		return PKT_HASH_TYPE_L3;
1790 	else
1791 		return PKT_HASH_TYPE_L2;
1792 }
1793 
1794 /**
1795  * i40e_rx_hash - set the hash value in the skb
1796  * @ring: descriptor ring
1797  * @rx_desc: specific descriptor
1798  **/
1799 static inline void i40e_rx_hash(struct i40e_ring *ring,
1800 				union i40e_rx_desc *rx_desc,
1801 				struct sk_buff *skb,
1802 				u8 rx_ptype)
1803 {
1804 	u32 hash;
1805 	const __le64 rss_mask =
1806 		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1807 			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1808 
1809 	if (!(ring->netdev->features & NETIF_F_RXHASH))
1810 		return;
1811 
1812 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1813 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1814 		skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1815 	}
1816 }
1817 
1818 /**
1819  * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1820  * @rx_ring: rx descriptor ring packet is being transacted on
1821  * @rx_desc: pointer to the EOP Rx descriptor
1822  * @skb: pointer to current skb being populated
1823  * @rx_ptype: the packet type decoded by hardware
1824  *
1825  * This function checks the ring, descriptor, and packet information in
1826  * order to populate the hash, checksum, VLAN, protocol, and
1827  * other fields within the skb.
1828  **/
1829 static inline
1830 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1831 			     union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1832 			     u8 rx_ptype)
1833 {
1834 	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1835 	u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1836 			I40E_RXD_QW1_STATUS_SHIFT;
1837 	u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1838 	u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1839 		   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1840 
1841 	if (unlikely(tsynvalid))
1842 		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1843 
1844 	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1845 
1846 	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1847 
1848 	skb_record_rx_queue(skb, rx_ring->queue_index);
1849 
1850 	/* modifies the skb - consumes the enet header */
1851 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1852 }
1853 
1854 /**
1855  * i40e_cleanup_headers - Correct empty headers
1856  * @rx_ring: rx descriptor ring packet is being transacted on
1857  * @skb: pointer to current skb being fixed
1858  * @rx_desc: pointer to the EOP Rx descriptor
1859  *
1860  * Also address the case where we are pulling data in on pages only
1861  * and as such no data is present in the skb header.
1862  *
1863  * In addition if skb is not at least 60 bytes we need to pad it so that
1864  * it is large enough to qualify as a valid Ethernet frame.
1865  *
1866  * Returns true if an error was encountered and skb was freed.
1867  **/
1868 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1869 				 union i40e_rx_desc *rx_desc)
1870 
1871 {
1872 	/* XDP packets use error pointer so abort at this point */
1873 	if (IS_ERR(skb))
1874 		return true;
1875 
1876 	/* ERR_MASK will only have valid bits if EOP set, and
1877 	 * what we are doing here is actually checking
1878 	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1879 	 * the error field
1880 	 */
1881 	if (unlikely(i40e_test_staterr(rx_desc,
1882 				       BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1883 		dev_kfree_skb_any(skb);
1884 		return true;
1885 	}
1886 
1887 	/* if eth_skb_pad returns an error the skb was freed */
1888 	if (eth_skb_pad(skb))
1889 		return true;
1890 
1891 	return false;
1892 }
1893 
1894 /**
1895  * i40e_page_is_reusable - check if any reuse is possible
1896  * @page: page struct to check
1897  *
1898  * A page is not reusable if it was allocated under low memory
1899  * conditions, or it's not in the same NUMA node as this CPU.
1900  */
1901 static inline bool i40e_page_is_reusable(struct page *page)
1902 {
1903 	return (page_to_nid(page) == numa_mem_id()) &&
1904 		!page_is_pfmemalloc(page);
1905 }
1906 
1907 /**
1908  * i40e_can_reuse_rx_page - Determine if this page can be reused by
1909  * the adapter for another receive
1910  *
1911  * @rx_buffer: buffer containing the page
1912  *
1913  * If page is reusable, rx_buffer->page_offset is adjusted to point to
1914  * an unused region in the page.
1915  *
1916  * For small pages, @truesize will be a constant value, half the size
1917  * of the memory at page.  We'll attempt to alternate between high and
1918  * low halves of the page, with one half ready for use by the hardware
1919  * and the other half being consumed by the stack.  We use the page
1920  * ref count to determine whether the stack has finished consuming the
1921  * portion of this page that was passed up with a previous packet.  If
1922  * the page ref count is >1, we'll assume the "other" half page is
1923  * still busy, and this page cannot be reused.
1924  *
1925  * For larger pages, @truesize will be the actual space used by the
1926  * received packet (adjusted upward to an even multiple of the cache
1927  * line size).  This will advance through the page by the amount
1928  * actually consumed by the received packets while there is still
1929  * space for a buffer.  Each region of larger pages will be used at
1930  * most once, after which the page will not be reused.
1931  *
1932  * In either case, if the page is reusable its refcount is increased.
1933  **/
1934 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1935 {
1936 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1937 	struct page *page = rx_buffer->page;
1938 
1939 	/* Is any reuse possible? */
1940 	if (unlikely(!i40e_page_is_reusable(page)))
1941 		return false;
1942 
1943 #if (PAGE_SIZE < 8192)
1944 	/* if we are only owner of page we can reuse it */
1945 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
1946 		return false;
1947 #else
1948 #define I40E_LAST_OFFSET \
1949 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1950 	if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1951 		return false;
1952 #endif
1953 
1954 	/* If we have drained the page fragment pool we need to update
1955 	 * the pagecnt_bias and page count so that we fully restock the
1956 	 * number of references the driver holds.
1957 	 */
1958 	if (unlikely(pagecnt_bias == 1)) {
1959 		page_ref_add(page, USHRT_MAX - 1);
1960 		rx_buffer->pagecnt_bias = USHRT_MAX;
1961 	}
1962 
1963 	return true;
1964 }
1965 
1966 /**
1967  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1968  * @rx_ring: rx descriptor ring to transact packets on
1969  * @rx_buffer: buffer containing page to add
1970  * @skb: sk_buff to place the data into
1971  * @size: packet length from rx_desc
1972  *
1973  * This function will add the data contained in rx_buffer->page to the skb.
1974  * It will just attach the page as a frag to the skb.
1975  *
1976  * The function will then update the page offset.
1977  **/
1978 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1979 			     struct i40e_rx_buffer *rx_buffer,
1980 			     struct sk_buff *skb,
1981 			     unsigned int size)
1982 {
1983 #if (PAGE_SIZE < 8192)
1984 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1985 #else
1986 	unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1987 #endif
1988 
1989 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1990 			rx_buffer->page_offset, size, truesize);
1991 
1992 	/* page is being used so we must update the page offset */
1993 #if (PAGE_SIZE < 8192)
1994 	rx_buffer->page_offset ^= truesize;
1995 #else
1996 	rx_buffer->page_offset += truesize;
1997 #endif
1998 }
1999 
2000 /**
2001  * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2002  * @rx_ring: rx descriptor ring to transact packets on
2003  * @size: size of buffer to add to skb
2004  *
2005  * This function will pull an Rx buffer from the ring and synchronize it
2006  * for use by the CPU.
2007  */
2008 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2009 						 const unsigned int size)
2010 {
2011 	struct i40e_rx_buffer *rx_buffer;
2012 
2013 	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
2014 	prefetchw(rx_buffer->page);
2015 
2016 	/* we are reusing so sync this buffer for CPU use */
2017 	dma_sync_single_range_for_cpu(rx_ring->dev,
2018 				      rx_buffer->dma,
2019 				      rx_buffer->page_offset,
2020 				      size,
2021 				      DMA_FROM_DEVICE);
2022 
2023 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
2024 	rx_buffer->pagecnt_bias--;
2025 
2026 	return rx_buffer;
2027 }
2028 
2029 /**
2030  * i40e_construct_skb - Allocate skb and populate it
2031  * @rx_ring: rx descriptor ring to transact packets on
2032  * @rx_buffer: rx buffer to pull data from
2033  * @xdp: xdp_buff pointing to the data
2034  *
2035  * This function allocates an skb.  It then populates it with the page
2036  * data from the current receive descriptor, taking care to set up the
2037  * skb correctly.
2038  */
2039 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2040 					  struct i40e_rx_buffer *rx_buffer,
2041 					  struct xdp_buff *xdp)
2042 {
2043 	unsigned int size = xdp->data_end - xdp->data;
2044 #if (PAGE_SIZE < 8192)
2045 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2046 #else
2047 	unsigned int truesize = SKB_DATA_ALIGN(size);
2048 #endif
2049 	unsigned int headlen;
2050 	struct sk_buff *skb;
2051 
2052 	/* prefetch first cache line of first page */
2053 	prefetch(xdp->data);
2054 #if L1_CACHE_BYTES < 128
2055 	prefetch(xdp->data + L1_CACHE_BYTES);
2056 #endif
2057 
2058 	/* allocate a skb to store the frags */
2059 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2060 			       I40E_RX_HDR_SIZE,
2061 			       GFP_ATOMIC | __GFP_NOWARN);
2062 	if (unlikely(!skb))
2063 		return NULL;
2064 
2065 	/* Determine available headroom for copy */
2066 	headlen = size;
2067 	if (headlen > I40E_RX_HDR_SIZE)
2068 		headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2069 
2070 	/* align pull length to size of long to optimize memcpy performance */
2071 	memcpy(__skb_put(skb, headlen), xdp->data,
2072 	       ALIGN(headlen, sizeof(long)));
2073 
2074 	/* update all of the pointers */
2075 	size -= headlen;
2076 	if (size) {
2077 		skb_add_rx_frag(skb, 0, rx_buffer->page,
2078 				rx_buffer->page_offset + headlen,
2079 				size, truesize);
2080 
2081 		/* buffer is used by skb, update page_offset */
2082 #if (PAGE_SIZE < 8192)
2083 		rx_buffer->page_offset ^= truesize;
2084 #else
2085 		rx_buffer->page_offset += truesize;
2086 #endif
2087 	} else {
2088 		/* buffer is unused, reset bias back to rx_buffer */
2089 		rx_buffer->pagecnt_bias++;
2090 	}
2091 
2092 	return skb;
2093 }
2094 
2095 /**
2096  * i40e_build_skb - Build skb around an existing buffer
2097  * @rx_ring: Rx descriptor ring to transact packets on
2098  * @rx_buffer: Rx buffer to pull data from
2099  * @xdp: xdp_buff pointing to the data
2100  *
2101  * This function builds an skb around an existing Rx buffer, taking care
2102  * to set up the skb correctly and avoid any memcpy overhead.
2103  */
2104 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2105 				      struct i40e_rx_buffer *rx_buffer,
2106 				      struct xdp_buff *xdp)
2107 {
2108 	unsigned int size = xdp->data_end - xdp->data;
2109 #if (PAGE_SIZE < 8192)
2110 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2111 #else
2112 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2113 				SKB_DATA_ALIGN(I40E_SKB_PAD + size);
2114 #endif
2115 	struct sk_buff *skb;
2116 
2117 	/* prefetch first cache line of first page */
2118 	prefetch(xdp->data);
2119 #if L1_CACHE_BYTES < 128
2120 	prefetch(xdp->data + L1_CACHE_BYTES);
2121 #endif
2122 	/* build an skb around the page buffer */
2123 	skb = build_skb(xdp->data_hard_start, truesize);
2124 	if (unlikely(!skb))
2125 		return NULL;
2126 
2127 	/* update pointers within the skb to store the data */
2128 	skb_reserve(skb, I40E_SKB_PAD);
2129 	__skb_put(skb, size);
2130 
2131 	/* buffer is used by skb, update page_offset */
2132 #if (PAGE_SIZE < 8192)
2133 	rx_buffer->page_offset ^= truesize;
2134 #else
2135 	rx_buffer->page_offset += truesize;
2136 #endif
2137 
2138 	return skb;
2139 }
2140 
2141 /**
2142  * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2143  * @rx_ring: rx descriptor ring to transact packets on
2144  * @rx_buffer: rx buffer to pull data from
2145  *
2146  * This function will clean up the contents of the rx_buffer.  It will
2147  * either recycle the buffer or unmap it and free the associated resources.
2148  */
2149 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2150 			       struct i40e_rx_buffer *rx_buffer)
2151 {
2152 	if (i40e_can_reuse_rx_page(rx_buffer)) {
2153 		/* hand second half of page back to the ring */
2154 		i40e_reuse_rx_page(rx_ring, rx_buffer);
2155 		rx_ring->rx_stats.page_reuse_count++;
2156 	} else {
2157 		/* we are not reusing the buffer so unmap it */
2158 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2159 				     i40e_rx_pg_size(rx_ring),
2160 				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2161 		__page_frag_cache_drain(rx_buffer->page,
2162 					rx_buffer->pagecnt_bias);
2163 	}
2164 
2165 	/* clear contents of buffer_info */
2166 	rx_buffer->page = NULL;
2167 }
2168 
2169 /**
2170  * i40e_is_non_eop - process handling of non-EOP buffers
2171  * @rx_ring: Rx ring being processed
2172  * @rx_desc: Rx descriptor for current buffer
2173  * @skb: Current socket buffer containing buffer in progress
2174  *
2175  * This function updates next to clean.  If the buffer is an EOP buffer
2176  * this function exits returning false, otherwise it will place the
2177  * sk_buff in the next buffer to be chained and return true indicating
2178  * that this is in fact a non-EOP buffer.
2179  **/
2180 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2181 			    union i40e_rx_desc *rx_desc,
2182 			    struct sk_buff *skb)
2183 {
2184 	u32 ntc = rx_ring->next_to_clean + 1;
2185 
2186 	/* fetch, update, and store next to clean */
2187 	ntc = (ntc < rx_ring->count) ? ntc : 0;
2188 	rx_ring->next_to_clean = ntc;
2189 
2190 	prefetch(I40E_RX_DESC(rx_ring, ntc));
2191 
2192 	/* if we are the last buffer then there is nothing else to do */
2193 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2194 	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2195 		return false;
2196 
2197 	rx_ring->rx_stats.non_eop_descs++;
2198 
2199 	return true;
2200 }
2201 
2202 #define I40E_XDP_PASS 0
2203 #define I40E_XDP_CONSUMED 1
2204 #define I40E_XDP_TX 2
2205 
2206 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
2207 			      struct i40e_ring *xdp_ring);
2208 
2209 /**
2210  * i40e_run_xdp - run an XDP program
2211  * @rx_ring: Rx ring being processed
2212  * @xdp: XDP buffer containing the frame
2213  **/
2214 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2215 				    struct xdp_buff *xdp)
2216 {
2217 	int err, result = I40E_XDP_PASS;
2218 	struct i40e_ring *xdp_ring;
2219 	struct bpf_prog *xdp_prog;
2220 	u32 act;
2221 
2222 	rcu_read_lock();
2223 	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2224 
2225 	if (!xdp_prog)
2226 		goto xdp_out;
2227 
2228 	act = bpf_prog_run_xdp(xdp_prog, xdp);
2229 	switch (act) {
2230 	case XDP_PASS:
2231 		break;
2232 	case XDP_TX:
2233 		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2234 		result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2235 		break;
2236 	case XDP_REDIRECT:
2237 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2238 		result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
2239 		break;
2240 	default:
2241 		bpf_warn_invalid_xdp_action(act);
2242 	case XDP_ABORTED:
2243 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2244 		/* fallthrough -- handle aborts by dropping packet */
2245 	case XDP_DROP:
2246 		result = I40E_XDP_CONSUMED;
2247 		break;
2248 	}
2249 xdp_out:
2250 	rcu_read_unlock();
2251 	return ERR_PTR(-result);
2252 }
2253 
2254 /**
2255  * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2256  * @rx_ring: Rx ring
2257  * @rx_buffer: Rx buffer to adjust
2258  * @size: Size of adjustment
2259  **/
2260 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2261 				struct i40e_rx_buffer *rx_buffer,
2262 				unsigned int size)
2263 {
2264 #if (PAGE_SIZE < 8192)
2265 	unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2266 
2267 	rx_buffer->page_offset ^= truesize;
2268 #else
2269 	unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2270 
2271 	rx_buffer->page_offset += truesize;
2272 #endif
2273 }
2274 
2275 static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2276 {
2277 	/* Force memory writes to complete before letting h/w
2278 	 * know there are new descriptors to fetch.
2279 	 */
2280 	wmb();
2281 	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2282 }
2283 
2284 /**
2285  * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2286  * @rx_ring: rx descriptor ring to transact packets on
2287  * @budget: Total limit on number of packets to process
2288  *
2289  * This function provides a "bounce buffer" approach to Rx interrupt
2290  * processing.  The advantage to this is that on systems that have
2291  * expensive overhead for IOMMU access this provides a means of avoiding
2292  * it by maintaining the mapping of the page to the system.
2293  *
2294  * Returns amount of work completed
2295  **/
2296 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2297 {
2298 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2299 	struct sk_buff *skb = rx_ring->skb;
2300 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2301 	bool failure = false, xdp_xmit = false;
2302 	struct xdp_buff xdp;
2303 
2304 	xdp.rxq = &rx_ring->xdp_rxq;
2305 
2306 	while (likely(total_rx_packets < (unsigned int)budget)) {
2307 		struct i40e_rx_buffer *rx_buffer;
2308 		union i40e_rx_desc *rx_desc;
2309 		unsigned int size;
2310 		u16 vlan_tag;
2311 		u8 rx_ptype;
2312 		u64 qword;
2313 
2314 		/* return some buffers to hardware, one at a time is too slow */
2315 		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2316 			failure = failure ||
2317 				  i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2318 			cleaned_count = 0;
2319 		}
2320 
2321 		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2322 
2323 		/* status_error_len will always be zero for unused descriptors
2324 		 * because it's cleared in cleanup, and overlaps with hdr_addr
2325 		 * which is always zero because packet split isn't used, if the
2326 		 * hardware wrote DD then the length will be non-zero
2327 		 */
2328 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2329 
2330 		/* This memory barrier is needed to keep us from reading
2331 		 * any other fields out of the rx_desc until we have
2332 		 * verified the descriptor has been written back.
2333 		 */
2334 		dma_rmb();
2335 
2336 		if (unlikely(i40e_rx_is_programming_status(qword))) {
2337 			i40e_clean_programming_status(rx_ring, rx_desc, qword);
2338 			cleaned_count++;
2339 			continue;
2340 		}
2341 		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2342 		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2343 		if (!size)
2344 			break;
2345 
2346 		i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2347 		rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2348 
2349 		/* retrieve a buffer from the ring */
2350 		if (!skb) {
2351 			xdp.data = page_address(rx_buffer->page) +
2352 				   rx_buffer->page_offset;
2353 			xdp_set_data_meta_invalid(&xdp);
2354 			xdp.data_hard_start = xdp.data -
2355 					      i40e_rx_offset(rx_ring);
2356 			xdp.data_end = xdp.data + size;
2357 
2358 			skb = i40e_run_xdp(rx_ring, &xdp);
2359 		}
2360 
2361 		if (IS_ERR(skb)) {
2362 			if (PTR_ERR(skb) == -I40E_XDP_TX) {
2363 				xdp_xmit = true;
2364 				i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2365 			} else {
2366 				rx_buffer->pagecnt_bias++;
2367 			}
2368 			total_rx_bytes += size;
2369 			total_rx_packets++;
2370 		} else if (skb) {
2371 			i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2372 		} else if (ring_uses_build_skb(rx_ring)) {
2373 			skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2374 		} else {
2375 			skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2376 		}
2377 
2378 		/* exit if we failed to retrieve a buffer */
2379 		if (!skb) {
2380 			rx_ring->rx_stats.alloc_buff_failed++;
2381 			rx_buffer->pagecnt_bias++;
2382 			break;
2383 		}
2384 
2385 		i40e_put_rx_buffer(rx_ring, rx_buffer);
2386 		cleaned_count++;
2387 
2388 		if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2389 			continue;
2390 
2391 		if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2392 			skb = NULL;
2393 			continue;
2394 		}
2395 
2396 		/* probably a little skewed due to removing CRC */
2397 		total_rx_bytes += skb->len;
2398 
2399 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2400 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2401 			   I40E_RXD_QW1_PTYPE_SHIFT;
2402 
2403 		/* populate checksum, VLAN, and protocol */
2404 		i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2405 
2406 		vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2407 			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2408 
2409 		i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2410 		i40e_receive_skb(rx_ring, skb, vlan_tag);
2411 		skb = NULL;
2412 
2413 		/* update budget accounting */
2414 		total_rx_packets++;
2415 	}
2416 
2417 	if (xdp_xmit) {
2418 		struct i40e_ring *xdp_ring =
2419 			rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2420 
2421 		i40e_xdp_ring_update_tail(xdp_ring);
2422 		xdp_do_flush_map();
2423 	}
2424 
2425 	rx_ring->skb = skb;
2426 
2427 	u64_stats_update_begin(&rx_ring->syncp);
2428 	rx_ring->stats.packets += total_rx_packets;
2429 	rx_ring->stats.bytes += total_rx_bytes;
2430 	u64_stats_update_end(&rx_ring->syncp);
2431 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
2432 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2433 
2434 	/* guarantee a trip back through this routine if there was a failure */
2435 	return failure ? budget : (int)total_rx_packets;
2436 }
2437 
2438 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2439 {
2440 	u32 val;
2441 
2442 	/* We don't bother with setting the CLEARPBA bit as the data sheet
2443 	 * points out doing so is "meaningless since it was already
2444 	 * auto-cleared". The auto-clearing happens when the interrupt is
2445 	 * asserted.
2446 	 *
2447 	 * Hardware errata 28 for also indicates that writing to a
2448 	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2449 	 * an event in the PBA anyway so we need to rely on the automask
2450 	 * to hold pending events for us until the interrupt is re-enabled
2451 	 *
2452 	 * The itr value is reported in microseconds, and the register
2453 	 * value is recorded in 2 microsecond units. For this reason we
2454 	 * only need to shift by the interval shift - 1 instead of the
2455 	 * full value.
2456 	 */
2457 	itr &= I40E_ITR_MASK;
2458 
2459 	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2460 	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2461 	      (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2462 
2463 	return val;
2464 }
2465 
2466 /* a small macro to shorten up some long lines */
2467 #define INTREG I40E_PFINT_DYN_CTLN
2468 
2469 /* The act of updating the ITR will cause it to immediately trigger. In order
2470  * to prevent this from throwing off adaptive update statistics we defer the
2471  * update so that it can only happen so often. So after either Tx or Rx are
2472  * updated we make the adaptive scheme wait until either the ITR completely
2473  * expires via the next_update expiration or we have been through at least
2474  * 3 interrupts.
2475  */
2476 #define ITR_COUNTDOWN_START 3
2477 
2478 /**
2479  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2480  * @vsi: the VSI we care about
2481  * @q_vector: q_vector for which itr is being updated and interrupt enabled
2482  *
2483  **/
2484 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2485 					  struct i40e_q_vector *q_vector)
2486 {
2487 	struct i40e_hw *hw = &vsi->back->hw;
2488 	u32 intval;
2489 
2490 	/* If we don't have MSIX, then we only need to re-enable icr0 */
2491 	if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2492 		i40e_irq_dynamic_enable_icr0(vsi->back);
2493 		return;
2494 	}
2495 
2496 	/* These will do nothing if dynamic updates are not enabled */
2497 	i40e_update_itr(q_vector, &q_vector->tx);
2498 	i40e_update_itr(q_vector, &q_vector->rx);
2499 
2500 	/* This block of logic allows us to get away with only updating
2501 	 * one ITR value with each interrupt. The idea is to perform a
2502 	 * pseudo-lazy update with the following criteria.
2503 	 *
2504 	 * 1. Rx is given higher priority than Tx if both are in same state
2505 	 * 2. If we must reduce an ITR that is given highest priority.
2506 	 * 3. We then give priority to increasing ITR based on amount.
2507 	 */
2508 	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2509 		/* Rx ITR needs to be reduced, this is highest priority */
2510 		intval = i40e_buildreg_itr(I40E_RX_ITR,
2511 					   q_vector->rx.target_itr);
2512 		q_vector->rx.current_itr = q_vector->rx.target_itr;
2513 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2514 	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2515 		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2516 		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2517 		/* Tx ITR needs to be reduced, this is second priority
2518 		 * Tx ITR needs to be increased more than Rx, fourth priority
2519 		 */
2520 		intval = i40e_buildreg_itr(I40E_TX_ITR,
2521 					   q_vector->tx.target_itr);
2522 		q_vector->tx.current_itr = q_vector->tx.target_itr;
2523 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2524 	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2525 		/* Rx ITR needs to be increased, third priority */
2526 		intval = i40e_buildreg_itr(I40E_RX_ITR,
2527 					   q_vector->rx.target_itr);
2528 		q_vector->rx.current_itr = q_vector->rx.target_itr;
2529 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2530 	} else {
2531 		/* No ITR update, lowest priority */
2532 		intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2533 		if (q_vector->itr_countdown)
2534 			q_vector->itr_countdown--;
2535 	}
2536 
2537 	if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2538 		wr32(hw, INTREG(q_vector->reg_idx), intval);
2539 }
2540 
2541 /**
2542  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2543  * @napi: napi struct with our devices info in it
2544  * @budget: amount of work driver is allowed to do this pass, in packets
2545  *
2546  * This function will clean all queues associated with a q_vector.
2547  *
2548  * Returns the amount of work done
2549  **/
2550 int i40e_napi_poll(struct napi_struct *napi, int budget)
2551 {
2552 	struct i40e_q_vector *q_vector =
2553 			       container_of(napi, struct i40e_q_vector, napi);
2554 	struct i40e_vsi *vsi = q_vector->vsi;
2555 	struct i40e_ring *ring;
2556 	bool clean_complete = true;
2557 	bool arm_wb = false;
2558 	int budget_per_ring;
2559 	int work_done = 0;
2560 
2561 	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2562 		napi_complete(napi);
2563 		return 0;
2564 	}
2565 
2566 	/* Since the actual Tx work is minimal, we can give the Tx a larger
2567 	 * budget and be more aggressive about cleaning up the Tx descriptors.
2568 	 */
2569 	i40e_for_each_ring(ring, q_vector->tx) {
2570 		if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2571 			clean_complete = false;
2572 			continue;
2573 		}
2574 		arm_wb |= ring->arm_wb;
2575 		ring->arm_wb = false;
2576 	}
2577 
2578 	/* Handle case where we are called by netpoll with a budget of 0 */
2579 	if (budget <= 0)
2580 		goto tx_only;
2581 
2582 	/* We attempt to distribute budget to each Rx queue fairly, but don't
2583 	 * allow the budget to go below 1 because that would exit polling early.
2584 	 */
2585 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2586 
2587 	i40e_for_each_ring(ring, q_vector->rx) {
2588 		int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2589 
2590 		work_done += cleaned;
2591 		/* if we clean as many as budgeted, we must not be done */
2592 		if (cleaned >= budget_per_ring)
2593 			clean_complete = false;
2594 	}
2595 
2596 	/* If work not completed, return budget and polling will return */
2597 	if (!clean_complete) {
2598 		int cpu_id = smp_processor_id();
2599 
2600 		/* It is possible that the interrupt affinity has changed but,
2601 		 * if the cpu is pegged at 100%, polling will never exit while
2602 		 * traffic continues and the interrupt will be stuck on this
2603 		 * cpu.  We check to make sure affinity is correct before we
2604 		 * continue to poll, otherwise we must stop polling so the
2605 		 * interrupt can move to the correct cpu.
2606 		 */
2607 		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2608 			/* Tell napi that we are done polling */
2609 			napi_complete_done(napi, work_done);
2610 
2611 			/* Force an interrupt */
2612 			i40e_force_wb(vsi, q_vector);
2613 
2614 			/* Return budget-1 so that polling stops */
2615 			return budget - 1;
2616 		}
2617 tx_only:
2618 		if (arm_wb) {
2619 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2620 			i40e_enable_wb_on_itr(vsi, q_vector);
2621 		}
2622 		return budget;
2623 	}
2624 
2625 	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2626 		q_vector->arm_wb_state = false;
2627 
2628 	/* Work is done so exit the polling mode and re-enable the interrupt */
2629 	napi_complete_done(napi, work_done);
2630 
2631 	i40e_update_enable_itr(vsi, q_vector);
2632 
2633 	return min(work_done, budget - 1);
2634 }
2635 
2636 /**
2637  * i40e_atr - Add a Flow Director ATR filter
2638  * @tx_ring:  ring to add programming descriptor to
2639  * @skb:      send buffer
2640  * @tx_flags: send tx flags
2641  **/
2642 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2643 		     u32 tx_flags)
2644 {
2645 	struct i40e_filter_program_desc *fdir_desc;
2646 	struct i40e_pf *pf = tx_ring->vsi->back;
2647 	union {
2648 		unsigned char *network;
2649 		struct iphdr *ipv4;
2650 		struct ipv6hdr *ipv6;
2651 	} hdr;
2652 	struct tcphdr *th;
2653 	unsigned int hlen;
2654 	u32 flex_ptype, dtype_cmd;
2655 	int l4_proto;
2656 	u16 i;
2657 
2658 	/* make sure ATR is enabled */
2659 	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2660 		return;
2661 
2662 	if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2663 		return;
2664 
2665 	/* if sampling is disabled do nothing */
2666 	if (!tx_ring->atr_sample_rate)
2667 		return;
2668 
2669 	/* Currently only IPv4/IPv6 with TCP is supported */
2670 	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2671 		return;
2672 
2673 	/* snag network header to get L4 type and address */
2674 	hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2675 		      skb_inner_network_header(skb) : skb_network_header(skb);
2676 
2677 	/* Note: tx_flags gets modified to reflect inner protocols in
2678 	 * tx_enable_csum function if encap is enabled.
2679 	 */
2680 	if (tx_flags & I40E_TX_FLAGS_IPV4) {
2681 		/* access ihl as u8 to avoid unaligned access on ia64 */
2682 		hlen = (hdr.network[0] & 0x0F) << 2;
2683 		l4_proto = hdr.ipv4->protocol;
2684 	} else {
2685 		/* find the start of the innermost ipv6 header */
2686 		unsigned int inner_hlen = hdr.network - skb->data;
2687 		unsigned int h_offset = inner_hlen;
2688 
2689 		/* this function updates h_offset to the end of the header */
2690 		l4_proto =
2691 		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2692 		/* hlen will contain our best estimate of the tcp header */
2693 		hlen = h_offset - inner_hlen;
2694 	}
2695 
2696 	if (l4_proto != IPPROTO_TCP)
2697 		return;
2698 
2699 	th = (struct tcphdr *)(hdr.network + hlen);
2700 
2701 	/* Due to lack of space, no more new filters can be programmed */
2702 	if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2703 		return;
2704 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2705 		/* HW ATR eviction will take care of removing filters on FIN
2706 		 * and RST packets.
2707 		 */
2708 		if (th->fin || th->rst)
2709 			return;
2710 	}
2711 
2712 	tx_ring->atr_count++;
2713 
2714 	/* sample on all syn/fin/rst packets or once every atr sample rate */
2715 	if (!th->fin &&
2716 	    !th->syn &&
2717 	    !th->rst &&
2718 	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
2719 		return;
2720 
2721 	tx_ring->atr_count = 0;
2722 
2723 	/* grab the next descriptor */
2724 	i = tx_ring->next_to_use;
2725 	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2726 
2727 	i++;
2728 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2729 
2730 	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2731 		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
2732 	flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2733 		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2734 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2735 		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2736 		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2737 
2738 	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2739 
2740 	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2741 
2742 	dtype_cmd |= (th->fin || th->rst) ?
2743 		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2744 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2745 		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2746 		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2747 
2748 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2749 		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
2750 
2751 	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2752 		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2753 
2754 	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2755 	if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2756 		dtype_cmd |=
2757 			((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2758 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2759 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2760 	else
2761 		dtype_cmd |=
2762 			((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2763 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2764 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2765 
2766 	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2767 		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2768 
2769 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2770 	fdir_desc->rsvd = cpu_to_le32(0);
2771 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2772 	fdir_desc->fd_id = cpu_to_le32(0);
2773 }
2774 
2775 /**
2776  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2777  * @skb:     send buffer
2778  * @tx_ring: ring to send buffer on
2779  * @flags:   the tx flags to be set
2780  *
2781  * Checks the skb and set up correspondingly several generic transmit flags
2782  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2783  *
2784  * Returns error code indicate the frame should be dropped upon error and the
2785  * otherwise  returns 0 to indicate the flags has been set properly.
2786  **/
2787 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2788 					     struct i40e_ring *tx_ring,
2789 					     u32 *flags)
2790 {
2791 	__be16 protocol = skb->protocol;
2792 	u32  tx_flags = 0;
2793 
2794 	if (protocol == htons(ETH_P_8021Q) &&
2795 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2796 		/* When HW VLAN acceleration is turned off by the user the
2797 		 * stack sets the protocol to 8021q so that the driver
2798 		 * can take any steps required to support the SW only
2799 		 * VLAN handling.  In our case the driver doesn't need
2800 		 * to take any further steps so just set the protocol
2801 		 * to the encapsulated ethertype.
2802 		 */
2803 		skb->protocol = vlan_get_protocol(skb);
2804 		goto out;
2805 	}
2806 
2807 	/* if we have a HW VLAN tag being added, default to the HW one */
2808 	if (skb_vlan_tag_present(skb)) {
2809 		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2810 		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2811 	/* else if it is a SW VLAN, check the next protocol and store the tag */
2812 	} else if (protocol == htons(ETH_P_8021Q)) {
2813 		struct vlan_hdr *vhdr, _vhdr;
2814 
2815 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2816 		if (!vhdr)
2817 			return -EINVAL;
2818 
2819 		protocol = vhdr->h_vlan_encapsulated_proto;
2820 		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2821 		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2822 	}
2823 
2824 	if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2825 		goto out;
2826 
2827 	/* Insert 802.1p priority into VLAN header */
2828 	if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2829 	    (skb->priority != TC_PRIO_CONTROL)) {
2830 		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2831 		tx_flags |= (skb->priority & 0x7) <<
2832 				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2833 		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2834 			struct vlan_ethhdr *vhdr;
2835 			int rc;
2836 
2837 			rc = skb_cow_head(skb, 0);
2838 			if (rc < 0)
2839 				return rc;
2840 			vhdr = (struct vlan_ethhdr *)skb->data;
2841 			vhdr->h_vlan_TCI = htons(tx_flags >>
2842 						 I40E_TX_FLAGS_VLAN_SHIFT);
2843 		} else {
2844 			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2845 		}
2846 	}
2847 
2848 out:
2849 	*flags = tx_flags;
2850 	return 0;
2851 }
2852 
2853 /**
2854  * i40e_tso - set up the tso context descriptor
2855  * @first:    pointer to first Tx buffer for xmit
2856  * @hdr_len:  ptr to the size of the packet header
2857  * @cd_type_cmd_tso_mss: Quad Word 1
2858  *
2859  * Returns 0 if no TSO can happen, 1 if tso is going, or error
2860  **/
2861 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2862 		    u64 *cd_type_cmd_tso_mss)
2863 {
2864 	struct sk_buff *skb = first->skb;
2865 	u64 cd_cmd, cd_tso_len, cd_mss;
2866 	union {
2867 		struct iphdr *v4;
2868 		struct ipv6hdr *v6;
2869 		unsigned char *hdr;
2870 	} ip;
2871 	union {
2872 		struct tcphdr *tcp;
2873 		struct udphdr *udp;
2874 		unsigned char *hdr;
2875 	} l4;
2876 	u32 paylen, l4_offset;
2877 	u16 gso_segs, gso_size;
2878 	int err;
2879 
2880 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2881 		return 0;
2882 
2883 	if (!skb_is_gso(skb))
2884 		return 0;
2885 
2886 	err = skb_cow_head(skb, 0);
2887 	if (err < 0)
2888 		return err;
2889 
2890 	ip.hdr = skb_network_header(skb);
2891 	l4.hdr = skb_transport_header(skb);
2892 
2893 	/* initialize outer IP header fields */
2894 	if (ip.v4->version == 4) {
2895 		ip.v4->tot_len = 0;
2896 		ip.v4->check = 0;
2897 	} else {
2898 		ip.v6->payload_len = 0;
2899 	}
2900 
2901 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2902 					 SKB_GSO_GRE_CSUM |
2903 					 SKB_GSO_IPXIP4 |
2904 					 SKB_GSO_IPXIP6 |
2905 					 SKB_GSO_UDP_TUNNEL |
2906 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
2907 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2908 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2909 			l4.udp->len = 0;
2910 
2911 			/* determine offset of outer transport header */
2912 			l4_offset = l4.hdr - skb->data;
2913 
2914 			/* remove payload length from outer checksum */
2915 			paylen = skb->len - l4_offset;
2916 			csum_replace_by_diff(&l4.udp->check,
2917 					     (__force __wsum)htonl(paylen));
2918 		}
2919 
2920 		/* reset pointers to inner headers */
2921 		ip.hdr = skb_inner_network_header(skb);
2922 		l4.hdr = skb_inner_transport_header(skb);
2923 
2924 		/* initialize inner IP header fields */
2925 		if (ip.v4->version == 4) {
2926 			ip.v4->tot_len = 0;
2927 			ip.v4->check = 0;
2928 		} else {
2929 			ip.v6->payload_len = 0;
2930 		}
2931 	}
2932 
2933 	/* determine offset of inner transport header */
2934 	l4_offset = l4.hdr - skb->data;
2935 
2936 	/* remove payload length from inner checksum */
2937 	paylen = skb->len - l4_offset;
2938 	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2939 
2940 	/* compute length of segmentation header */
2941 	*hdr_len = (l4.tcp->doff * 4) + l4_offset;
2942 
2943 	/* pull values out of skb_shinfo */
2944 	gso_size = skb_shinfo(skb)->gso_size;
2945 	gso_segs = skb_shinfo(skb)->gso_segs;
2946 
2947 	/* update GSO size and bytecount with header size */
2948 	first->gso_segs = gso_segs;
2949 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
2950 
2951 	/* find the field values */
2952 	cd_cmd = I40E_TX_CTX_DESC_TSO;
2953 	cd_tso_len = skb->len - *hdr_len;
2954 	cd_mss = gso_size;
2955 	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2956 				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2957 				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2958 	return 1;
2959 }
2960 
2961 /**
2962  * i40e_tsyn - set up the tsyn context descriptor
2963  * @tx_ring:  ptr to the ring to send
2964  * @skb:      ptr to the skb we're sending
2965  * @tx_flags: the collected send information
2966  * @cd_type_cmd_tso_mss: Quad Word 1
2967  *
2968  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2969  **/
2970 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2971 		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2972 {
2973 	struct i40e_pf *pf;
2974 
2975 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2976 		return 0;
2977 
2978 	/* Tx timestamps cannot be sampled when doing TSO */
2979 	if (tx_flags & I40E_TX_FLAGS_TSO)
2980 		return 0;
2981 
2982 	/* only timestamp the outbound packet if the user has requested it and
2983 	 * we are not already transmitting a packet to be timestamped
2984 	 */
2985 	pf = i40e_netdev_to_pf(tx_ring->netdev);
2986 	if (!(pf->flags & I40E_FLAG_PTP))
2987 		return 0;
2988 
2989 	if (pf->ptp_tx &&
2990 	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
2991 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2992 		pf->ptp_tx_start = jiffies;
2993 		pf->ptp_tx_skb = skb_get(skb);
2994 	} else {
2995 		pf->tx_hwtstamp_skipped++;
2996 		return 0;
2997 	}
2998 
2999 	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3000 				I40E_TXD_CTX_QW1_CMD_SHIFT;
3001 
3002 	return 1;
3003 }
3004 
3005 /**
3006  * i40e_tx_enable_csum - Enable Tx checksum offloads
3007  * @skb: send buffer
3008  * @tx_flags: pointer to Tx flags currently set
3009  * @td_cmd: Tx descriptor command bits to set
3010  * @td_offset: Tx descriptor header offsets to set
3011  * @tx_ring: Tx descriptor ring
3012  * @cd_tunneling: ptr to context desc bits
3013  **/
3014 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3015 			       u32 *td_cmd, u32 *td_offset,
3016 			       struct i40e_ring *tx_ring,
3017 			       u32 *cd_tunneling)
3018 {
3019 	union {
3020 		struct iphdr *v4;
3021 		struct ipv6hdr *v6;
3022 		unsigned char *hdr;
3023 	} ip;
3024 	union {
3025 		struct tcphdr *tcp;
3026 		struct udphdr *udp;
3027 		unsigned char *hdr;
3028 	} l4;
3029 	unsigned char *exthdr;
3030 	u32 offset, cmd = 0;
3031 	__be16 frag_off;
3032 	u8 l4_proto = 0;
3033 
3034 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3035 		return 0;
3036 
3037 	ip.hdr = skb_network_header(skb);
3038 	l4.hdr = skb_transport_header(skb);
3039 
3040 	/* compute outer L2 header size */
3041 	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3042 
3043 	if (skb->encapsulation) {
3044 		u32 tunnel = 0;
3045 		/* define outer network header type */
3046 		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3047 			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3048 				  I40E_TX_CTX_EXT_IP_IPV4 :
3049 				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3050 
3051 			l4_proto = ip.v4->protocol;
3052 		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3053 			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3054 
3055 			exthdr = ip.hdr + sizeof(*ip.v6);
3056 			l4_proto = ip.v6->nexthdr;
3057 			if (l4.hdr != exthdr)
3058 				ipv6_skip_exthdr(skb, exthdr - skb->data,
3059 						 &l4_proto, &frag_off);
3060 		}
3061 
3062 		/* define outer transport */
3063 		switch (l4_proto) {
3064 		case IPPROTO_UDP:
3065 			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3066 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3067 			break;
3068 		case IPPROTO_GRE:
3069 			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3070 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3071 			break;
3072 		case IPPROTO_IPIP:
3073 		case IPPROTO_IPV6:
3074 			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3075 			l4.hdr = skb_inner_network_header(skb);
3076 			break;
3077 		default:
3078 			if (*tx_flags & I40E_TX_FLAGS_TSO)
3079 				return -1;
3080 
3081 			skb_checksum_help(skb);
3082 			return 0;
3083 		}
3084 
3085 		/* compute outer L3 header size */
3086 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3087 			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3088 
3089 		/* switch IP header pointer from outer to inner header */
3090 		ip.hdr = skb_inner_network_header(skb);
3091 
3092 		/* compute tunnel header size */
3093 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3094 			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3095 
3096 		/* indicate if we need to offload outer UDP header */
3097 		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3098 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3099 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3100 			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3101 
3102 		/* record tunnel offload values */
3103 		*cd_tunneling |= tunnel;
3104 
3105 		/* switch L4 header pointer from outer to inner */
3106 		l4.hdr = skb_inner_transport_header(skb);
3107 		l4_proto = 0;
3108 
3109 		/* reset type as we transition from outer to inner headers */
3110 		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3111 		if (ip.v4->version == 4)
3112 			*tx_flags |= I40E_TX_FLAGS_IPV4;
3113 		if (ip.v6->version == 6)
3114 			*tx_flags |= I40E_TX_FLAGS_IPV6;
3115 	}
3116 
3117 	/* Enable IP checksum offloads */
3118 	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3119 		l4_proto = ip.v4->protocol;
3120 		/* the stack computes the IP header already, the only time we
3121 		 * need the hardware to recompute it is in the case of TSO.
3122 		 */
3123 		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3124 		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3125 		       I40E_TX_DESC_CMD_IIPT_IPV4;
3126 	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3127 		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3128 
3129 		exthdr = ip.hdr + sizeof(*ip.v6);
3130 		l4_proto = ip.v6->nexthdr;
3131 		if (l4.hdr != exthdr)
3132 			ipv6_skip_exthdr(skb, exthdr - skb->data,
3133 					 &l4_proto, &frag_off);
3134 	}
3135 
3136 	/* compute inner L3 header size */
3137 	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3138 
3139 	/* Enable L4 checksum offloads */
3140 	switch (l4_proto) {
3141 	case IPPROTO_TCP:
3142 		/* enable checksum offloads */
3143 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3144 		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3145 		break;
3146 	case IPPROTO_SCTP:
3147 		/* enable SCTP checksum offload */
3148 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3149 		offset |= (sizeof(struct sctphdr) >> 2) <<
3150 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3151 		break;
3152 	case IPPROTO_UDP:
3153 		/* enable UDP checksum offload */
3154 		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3155 		offset |= (sizeof(struct udphdr) >> 2) <<
3156 			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3157 		break;
3158 	default:
3159 		if (*tx_flags & I40E_TX_FLAGS_TSO)
3160 			return -1;
3161 		skb_checksum_help(skb);
3162 		return 0;
3163 	}
3164 
3165 	*td_cmd |= cmd;
3166 	*td_offset |= offset;
3167 
3168 	return 1;
3169 }
3170 
3171 /**
3172  * i40e_create_tx_ctx Build the Tx context descriptor
3173  * @tx_ring:  ring to create the descriptor on
3174  * @cd_type_cmd_tso_mss: Quad Word 1
3175  * @cd_tunneling: Quad Word 0 - bits 0-31
3176  * @cd_l2tag2: Quad Word 0 - bits 32-63
3177  **/
3178 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3179 			       const u64 cd_type_cmd_tso_mss,
3180 			       const u32 cd_tunneling, const u32 cd_l2tag2)
3181 {
3182 	struct i40e_tx_context_desc *context_desc;
3183 	int i = tx_ring->next_to_use;
3184 
3185 	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3186 	    !cd_tunneling && !cd_l2tag2)
3187 		return;
3188 
3189 	/* grab the next descriptor */
3190 	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3191 
3192 	i++;
3193 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3194 
3195 	/* cpu_to_le32 and assign to struct fields */
3196 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3197 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3198 	context_desc->rsvd = cpu_to_le16(0);
3199 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3200 }
3201 
3202 /**
3203  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3204  * @tx_ring: the ring to be checked
3205  * @size:    the size buffer we want to assure is available
3206  *
3207  * Returns -EBUSY if a stop is needed, else 0
3208  **/
3209 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3210 {
3211 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3212 	/* Memory barrier before checking head and tail */
3213 	smp_mb();
3214 
3215 	/* Check again in a case another CPU has just made room available. */
3216 	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3217 		return -EBUSY;
3218 
3219 	/* A reprieve! - use start_queue because it doesn't call schedule */
3220 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3221 	++tx_ring->tx_stats.restart_queue;
3222 	return 0;
3223 }
3224 
3225 /**
3226  * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3227  * @skb:      send buffer
3228  *
3229  * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3230  * and so we need to figure out the cases where we need to linearize the skb.
3231  *
3232  * For TSO we need to count the TSO header and segment payload separately.
3233  * As such we need to check cases where we have 7 fragments or more as we
3234  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3235  * the segment payload in the first descriptor, and another 7 for the
3236  * fragments.
3237  **/
3238 bool __i40e_chk_linearize(struct sk_buff *skb)
3239 {
3240 	const struct skb_frag_struct *frag, *stale;
3241 	int nr_frags, sum;
3242 
3243 	/* no need to check if number of frags is less than 7 */
3244 	nr_frags = skb_shinfo(skb)->nr_frags;
3245 	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3246 		return false;
3247 
3248 	/* We need to walk through the list and validate that each group
3249 	 * of 6 fragments totals at least gso_size.
3250 	 */
3251 	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3252 	frag = &skb_shinfo(skb)->frags[0];
3253 
3254 	/* Initialize size to the negative value of gso_size minus 1.  We
3255 	 * use this as the worst case scenerio in which the frag ahead
3256 	 * of us only provides one byte which is why we are limited to 6
3257 	 * descriptors for a single transmit as the header and previous
3258 	 * fragment are already consuming 2 descriptors.
3259 	 */
3260 	sum = 1 - skb_shinfo(skb)->gso_size;
3261 
3262 	/* Add size of frags 0 through 4 to create our initial sum */
3263 	sum += skb_frag_size(frag++);
3264 	sum += skb_frag_size(frag++);
3265 	sum += skb_frag_size(frag++);
3266 	sum += skb_frag_size(frag++);
3267 	sum += skb_frag_size(frag++);
3268 
3269 	/* Walk through fragments adding latest fragment, testing it, and
3270 	 * then removing stale fragments from the sum.
3271 	 */
3272 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3273 		int stale_size = skb_frag_size(stale);
3274 
3275 		sum += skb_frag_size(frag++);
3276 
3277 		/* The stale fragment may present us with a smaller
3278 		 * descriptor than the actual fragment size. To account
3279 		 * for that we need to remove all the data on the front and
3280 		 * figure out what the remainder would be in the last
3281 		 * descriptor associated with the fragment.
3282 		 */
3283 		if (stale_size > I40E_MAX_DATA_PER_TXD) {
3284 			int align_pad = -(stale->page_offset) &
3285 					(I40E_MAX_READ_REQ_SIZE - 1);
3286 
3287 			sum -= align_pad;
3288 			stale_size -= align_pad;
3289 
3290 			do {
3291 				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3292 				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3293 			} while (stale_size > I40E_MAX_DATA_PER_TXD);
3294 		}
3295 
3296 		/* if sum is negative we failed to make sufficient progress */
3297 		if (sum < 0)
3298 			return true;
3299 
3300 		if (!nr_frags--)
3301 			break;
3302 
3303 		sum -= stale_size;
3304 	}
3305 
3306 	return false;
3307 }
3308 
3309 /**
3310  * i40e_tx_map - Build the Tx descriptor
3311  * @tx_ring:  ring to send buffer on
3312  * @skb:      send buffer
3313  * @first:    first buffer info buffer to use
3314  * @tx_flags: collected send information
3315  * @hdr_len:  size of the packet header
3316  * @td_cmd:   the command field in the descriptor
3317  * @td_offset: offset for checksum or crc
3318  *
3319  * Returns 0 on success, -1 on failure to DMA
3320  **/
3321 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3322 			      struct i40e_tx_buffer *first, u32 tx_flags,
3323 			      const u8 hdr_len, u32 td_cmd, u32 td_offset)
3324 {
3325 	unsigned int data_len = skb->data_len;
3326 	unsigned int size = skb_headlen(skb);
3327 	struct skb_frag_struct *frag;
3328 	struct i40e_tx_buffer *tx_bi;
3329 	struct i40e_tx_desc *tx_desc;
3330 	u16 i = tx_ring->next_to_use;
3331 	u32 td_tag = 0;
3332 	dma_addr_t dma;
3333 	u16 desc_count = 1;
3334 
3335 	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3336 		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3337 		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3338 			 I40E_TX_FLAGS_VLAN_SHIFT;
3339 	}
3340 
3341 	first->tx_flags = tx_flags;
3342 
3343 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3344 
3345 	tx_desc = I40E_TX_DESC(tx_ring, i);
3346 	tx_bi = first;
3347 
3348 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3349 		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3350 
3351 		if (dma_mapping_error(tx_ring->dev, dma))
3352 			goto dma_error;
3353 
3354 		/* record length, and DMA address */
3355 		dma_unmap_len_set(tx_bi, len, size);
3356 		dma_unmap_addr_set(tx_bi, dma, dma);
3357 
3358 		/* align size to end of page */
3359 		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3360 		tx_desc->buffer_addr = cpu_to_le64(dma);
3361 
3362 		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3363 			tx_desc->cmd_type_offset_bsz =
3364 				build_ctob(td_cmd, td_offset,
3365 					   max_data, td_tag);
3366 
3367 			tx_desc++;
3368 			i++;
3369 			desc_count++;
3370 
3371 			if (i == tx_ring->count) {
3372 				tx_desc = I40E_TX_DESC(tx_ring, 0);
3373 				i = 0;
3374 			}
3375 
3376 			dma += max_data;
3377 			size -= max_data;
3378 
3379 			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3380 			tx_desc->buffer_addr = cpu_to_le64(dma);
3381 		}
3382 
3383 		if (likely(!data_len))
3384 			break;
3385 
3386 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3387 							  size, td_tag);
3388 
3389 		tx_desc++;
3390 		i++;
3391 		desc_count++;
3392 
3393 		if (i == tx_ring->count) {
3394 			tx_desc = I40E_TX_DESC(tx_ring, 0);
3395 			i = 0;
3396 		}
3397 
3398 		size = skb_frag_size(frag);
3399 		data_len -= size;
3400 
3401 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3402 				       DMA_TO_DEVICE);
3403 
3404 		tx_bi = &tx_ring->tx_bi[i];
3405 	}
3406 
3407 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3408 
3409 	i++;
3410 	if (i == tx_ring->count)
3411 		i = 0;
3412 
3413 	tx_ring->next_to_use = i;
3414 
3415 	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3416 
3417 	/* write last descriptor with EOP bit */
3418 	td_cmd |= I40E_TX_DESC_CMD_EOP;
3419 
3420 	/* We OR these values together to check both against 4 (WB_STRIDE)
3421 	 * below. This is safe since we don't re-use desc_count afterwards.
3422 	 */
3423 	desc_count |= ++tx_ring->packet_stride;
3424 
3425 	if (desc_count >= WB_STRIDE) {
3426 		/* write last descriptor with RS bit set */
3427 		td_cmd |= I40E_TX_DESC_CMD_RS;
3428 		tx_ring->packet_stride = 0;
3429 	}
3430 
3431 	tx_desc->cmd_type_offset_bsz =
3432 			build_ctob(td_cmd, td_offset, size, td_tag);
3433 
3434 	/* Force memory writes to complete before letting h/w know there
3435 	 * are new descriptors to fetch.
3436 	 *
3437 	 * We also use this memory barrier to make certain all of the
3438 	 * status bits have been updated before next_to_watch is written.
3439 	 */
3440 	wmb();
3441 
3442 	/* set next_to_watch value indicating a packet is present */
3443 	first->next_to_watch = tx_desc;
3444 
3445 	/* notify HW of packet */
3446 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3447 		writel(i, tx_ring->tail);
3448 
3449 		/* we need this if more than one processor can write to our tail
3450 		 * at a time, it synchronizes IO on IA64/Altix systems
3451 		 */
3452 		mmiowb();
3453 	}
3454 
3455 	return 0;
3456 
3457 dma_error:
3458 	dev_info(tx_ring->dev, "TX DMA map failed\n");
3459 
3460 	/* clear dma mappings for failed tx_bi map */
3461 	for (;;) {
3462 		tx_bi = &tx_ring->tx_bi[i];
3463 		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3464 		if (tx_bi == first)
3465 			break;
3466 		if (i == 0)
3467 			i = tx_ring->count;
3468 		i--;
3469 	}
3470 
3471 	tx_ring->next_to_use = i;
3472 
3473 	return -1;
3474 }
3475 
3476 /**
3477  * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3478  * @xdp: data to transmit
3479  * @xdp_ring: XDP Tx ring
3480  **/
3481 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3482 			      struct i40e_ring *xdp_ring)
3483 {
3484 	u32 size = xdp->data_end - xdp->data;
3485 	u16 i = xdp_ring->next_to_use;
3486 	struct i40e_tx_buffer *tx_bi;
3487 	struct i40e_tx_desc *tx_desc;
3488 	dma_addr_t dma;
3489 
3490 	if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3491 		xdp_ring->tx_stats.tx_busy++;
3492 		return I40E_XDP_CONSUMED;
3493 	}
3494 
3495 	dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
3496 	if (dma_mapping_error(xdp_ring->dev, dma))
3497 		return I40E_XDP_CONSUMED;
3498 
3499 	tx_bi = &xdp_ring->tx_bi[i];
3500 	tx_bi->bytecount = size;
3501 	tx_bi->gso_segs = 1;
3502 	tx_bi->raw_buf = xdp->data;
3503 
3504 	/* record length, and DMA address */
3505 	dma_unmap_len_set(tx_bi, len, size);
3506 	dma_unmap_addr_set(tx_bi, dma, dma);
3507 
3508 	tx_desc = I40E_TX_DESC(xdp_ring, i);
3509 	tx_desc->buffer_addr = cpu_to_le64(dma);
3510 	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3511 						  | I40E_TXD_CMD,
3512 						  0, size, 0);
3513 
3514 	/* Make certain all of the status bits have been updated
3515 	 * before next_to_watch is written.
3516 	 */
3517 	smp_wmb();
3518 
3519 	i++;
3520 	if (i == xdp_ring->count)
3521 		i = 0;
3522 
3523 	tx_bi->next_to_watch = tx_desc;
3524 	xdp_ring->next_to_use = i;
3525 
3526 	return I40E_XDP_TX;
3527 }
3528 
3529 /**
3530  * i40e_xmit_frame_ring - Sends buffer on Tx ring
3531  * @skb:     send buffer
3532  * @tx_ring: ring to send buffer on
3533  *
3534  * Returns NETDEV_TX_OK if sent, else an error code
3535  **/
3536 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3537 					struct i40e_ring *tx_ring)
3538 {
3539 	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3540 	u32 cd_tunneling = 0, cd_l2tag2 = 0;
3541 	struct i40e_tx_buffer *first;
3542 	u32 td_offset = 0;
3543 	u32 tx_flags = 0;
3544 	__be16 protocol;
3545 	u32 td_cmd = 0;
3546 	u8 hdr_len = 0;
3547 	int tso, count;
3548 	int tsyn;
3549 
3550 	/* prefetch the data, we'll need it later */
3551 	prefetch(skb->data);
3552 
3553 	i40e_trace(xmit_frame_ring, skb, tx_ring);
3554 
3555 	count = i40e_xmit_descriptor_count(skb);
3556 	if (i40e_chk_linearize(skb, count)) {
3557 		if (__skb_linearize(skb)) {
3558 			dev_kfree_skb_any(skb);
3559 			return NETDEV_TX_OK;
3560 		}
3561 		count = i40e_txd_use_count(skb->len);
3562 		tx_ring->tx_stats.tx_linearize++;
3563 	}
3564 
3565 	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3566 	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3567 	 *       + 4 desc gap to avoid the cache line where head is,
3568 	 *       + 1 desc for context descriptor,
3569 	 * otherwise try next time
3570 	 */
3571 	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3572 		tx_ring->tx_stats.tx_busy++;
3573 		return NETDEV_TX_BUSY;
3574 	}
3575 
3576 	/* record the location of the first descriptor for this packet */
3577 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
3578 	first->skb = skb;
3579 	first->bytecount = skb->len;
3580 	first->gso_segs = 1;
3581 
3582 	/* prepare the xmit flags */
3583 	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3584 		goto out_drop;
3585 
3586 	/* obtain protocol of skb */
3587 	protocol = vlan_get_protocol(skb);
3588 
3589 	/* setup IPv4/IPv6 offloads */
3590 	if (protocol == htons(ETH_P_IP))
3591 		tx_flags |= I40E_TX_FLAGS_IPV4;
3592 	else if (protocol == htons(ETH_P_IPV6))
3593 		tx_flags |= I40E_TX_FLAGS_IPV6;
3594 
3595 	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3596 
3597 	if (tso < 0)
3598 		goto out_drop;
3599 	else if (tso)
3600 		tx_flags |= I40E_TX_FLAGS_TSO;
3601 
3602 	/* Always offload the checksum, since it's in the data descriptor */
3603 	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3604 				  tx_ring, &cd_tunneling);
3605 	if (tso < 0)
3606 		goto out_drop;
3607 
3608 	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3609 
3610 	if (tsyn)
3611 		tx_flags |= I40E_TX_FLAGS_TSYN;
3612 
3613 	skb_tx_timestamp(skb);
3614 
3615 	/* always enable CRC insertion offload */
3616 	td_cmd |= I40E_TX_DESC_CMD_ICRC;
3617 
3618 	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3619 			   cd_tunneling, cd_l2tag2);
3620 
3621 	/* Add Flow Director ATR if it's enabled.
3622 	 *
3623 	 * NOTE: this must always be directly before the data descriptor.
3624 	 */
3625 	i40e_atr(tx_ring, skb, tx_flags);
3626 
3627 	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3628 			td_cmd, td_offset))
3629 		goto cleanup_tx_tstamp;
3630 
3631 	return NETDEV_TX_OK;
3632 
3633 out_drop:
3634 	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3635 	dev_kfree_skb_any(first->skb);
3636 	first->skb = NULL;
3637 cleanup_tx_tstamp:
3638 	if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3639 		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3640 
3641 		dev_kfree_skb_any(pf->ptp_tx_skb);
3642 		pf->ptp_tx_skb = NULL;
3643 		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3644 	}
3645 
3646 	return NETDEV_TX_OK;
3647 }
3648 
3649 /**
3650  * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3651  * @skb:    send buffer
3652  * @netdev: network interface device structure
3653  *
3654  * Returns NETDEV_TX_OK if sent, else an error code
3655  **/
3656 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3657 {
3658 	struct i40e_netdev_priv *np = netdev_priv(netdev);
3659 	struct i40e_vsi *vsi = np->vsi;
3660 	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3661 
3662 	/* hardware can't handle really short frames, hardware padding works
3663 	 * beyond this point
3664 	 */
3665 	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3666 		return NETDEV_TX_OK;
3667 
3668 	return i40e_xmit_frame_ring(skb, tx_ring);
3669 }
3670 
3671 /**
3672  * i40e_xdp_xmit - Implements ndo_xdp_xmit
3673  * @dev: netdev
3674  * @xdp: XDP buffer
3675  *
3676  * Returns Zero if sent, else an error code
3677  **/
3678 int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
3679 {
3680 	struct i40e_netdev_priv *np = netdev_priv(dev);
3681 	unsigned int queue_index = smp_processor_id();
3682 	struct i40e_vsi *vsi = np->vsi;
3683 	int err;
3684 
3685 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
3686 		return -ENETDOWN;
3687 
3688 	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3689 		return -ENXIO;
3690 
3691 	err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
3692 	if (err != I40E_XDP_TX)
3693 		return -ENOSPC;
3694 
3695 	return 0;
3696 }
3697 
3698 /**
3699  * i40e_xdp_flush - Implements ndo_xdp_flush
3700  * @dev: netdev
3701  **/
3702 void i40e_xdp_flush(struct net_device *dev)
3703 {
3704 	struct i40e_netdev_priv *np = netdev_priv(dev);
3705 	unsigned int queue_index = smp_processor_id();
3706 	struct i40e_vsi *vsi = np->vsi;
3707 
3708 	if (test_bit(__I40E_VSI_DOWN, vsi->state))
3709 		return;
3710 
3711 	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3712 		return;
3713 
3714 	i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
3715 }
3716