xref: /linux/drivers/net/ethernet/wangxun/libwx/wx_lib.c (revision 0e50474fa514822e9d990874e554bf8043a201d7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <net/ip6_checksum.h>
6 #include <net/page_pool/helpers.h>
7 #include <net/inet_ecn.h>
8 #include <linux/workqueue.h>
9 #include <linux/iopoll.h>
10 #include <linux/sctp.h>
11 #include <linux/pci.h>
12 #include <net/tcp.h>
13 #include <net/ip.h>
14 
15 #include "wx_type.h"
16 #include "wx_lib.h"
17 #include "wx_ptp.h"
18 #include "wx_hw.h"
19 #include "wx_vf_lib.h"
20 
21 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
22 static struct wx_dec_ptype wx_ptype_lookup[256] = {
23 	/* L2: mac */
24 	[0x11] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
25 	[0x12] = WX_PTT(L2, NONE, NONE, NONE, TS,   PAY2),
26 	[0x13] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
27 	[0x14] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
28 	[0x15] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
29 	[0x16] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
30 	[0x17] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
31 
32 	/* L2: ethertype filter */
33 	[0x18 ... 0x1F] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
34 
35 	/* L3: ip non-tunnel */
36 	[0x21] = WX_PTT(IP, FGV4, NONE, NONE, NONE, PAY3),
37 	[0x22] = WX_PTT(IP, IPV4, NONE, NONE, NONE, PAY3),
38 	[0x23] = WX_PTT(IP, IPV4, NONE, NONE, UDP,  PAY4),
39 	[0x24] = WX_PTT(IP, IPV4, NONE, NONE, TCP,  PAY4),
40 	[0x25] = WX_PTT(IP, IPV4, NONE, NONE, SCTP, PAY4),
41 	[0x29] = WX_PTT(IP, FGV6, NONE, NONE, NONE, PAY3),
42 	[0x2A] = WX_PTT(IP, IPV6, NONE, NONE, NONE, PAY3),
43 	[0x2B] = WX_PTT(IP, IPV6, NONE, NONE, UDP,  PAY3),
44 	[0x2C] = WX_PTT(IP, IPV6, NONE, NONE, TCP,  PAY4),
45 	[0x2D] = WX_PTT(IP, IPV6, NONE, NONE, SCTP, PAY4),
46 
47 	/* L2: fcoe */
48 	[0x30 ... 0x34] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3),
49 	[0x38 ... 0x3C] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3),
50 
51 	/* IPv4 --> IPv4/IPv6 */
52 	[0x81] = WX_PTT(IP, IPV4, IPIP, FGV4, NONE, PAY3),
53 	[0x82] = WX_PTT(IP, IPV4, IPIP, IPV4, NONE, PAY3),
54 	[0x83] = WX_PTT(IP, IPV4, IPIP, IPV4, UDP,  PAY4),
55 	[0x84] = WX_PTT(IP, IPV4, IPIP, IPV4, TCP,  PAY4),
56 	[0x85] = WX_PTT(IP, IPV4, IPIP, IPV4, SCTP, PAY4),
57 	[0x89] = WX_PTT(IP, IPV4, IPIP, FGV6, NONE, PAY3),
58 	[0x8A] = WX_PTT(IP, IPV4, IPIP, IPV6, NONE, PAY3),
59 	[0x8B] = WX_PTT(IP, IPV4, IPIP, IPV6, UDP,  PAY4),
60 	[0x8C] = WX_PTT(IP, IPV4, IPIP, IPV6, TCP,  PAY4),
61 	[0x8D] = WX_PTT(IP, IPV4, IPIP, IPV6, SCTP, PAY4),
62 
63 	/* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */
64 	[0x90] = WX_PTT(IP, IPV4, IG, NONE, NONE, PAY3),
65 	[0x91] = WX_PTT(IP, IPV4, IG, FGV4, NONE, PAY3),
66 	[0x92] = WX_PTT(IP, IPV4, IG, IPV4, NONE, PAY3),
67 	[0x93] = WX_PTT(IP, IPV4, IG, IPV4, UDP,  PAY4),
68 	[0x94] = WX_PTT(IP, IPV4, IG, IPV4, TCP,  PAY4),
69 	[0x95] = WX_PTT(IP, IPV4, IG, IPV4, SCTP, PAY4),
70 	[0x99] = WX_PTT(IP, IPV4, IG, FGV6, NONE, PAY3),
71 	[0x9A] = WX_PTT(IP, IPV4, IG, IPV6, NONE, PAY3),
72 	[0x9B] = WX_PTT(IP, IPV4, IG, IPV6, UDP,  PAY4),
73 	[0x9C] = WX_PTT(IP, IPV4, IG, IPV6, TCP,  PAY4),
74 	[0x9D] = WX_PTT(IP, IPV4, IG, IPV6, SCTP, PAY4),
75 
76 	/* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */
77 	[0xA0] = WX_PTT(IP, IPV4, IGM, NONE, NONE, PAY3),
78 	[0xA1] = WX_PTT(IP, IPV4, IGM, FGV4, NONE, PAY3),
79 	[0xA2] = WX_PTT(IP, IPV4, IGM, IPV4, NONE, PAY3),
80 	[0xA3] = WX_PTT(IP, IPV4, IGM, IPV4, UDP,  PAY4),
81 	[0xA4] = WX_PTT(IP, IPV4, IGM, IPV4, TCP,  PAY4),
82 	[0xA5] = WX_PTT(IP, IPV4, IGM, IPV4, SCTP, PAY4),
83 	[0xA9] = WX_PTT(IP, IPV4, IGM, FGV6, NONE, PAY3),
84 	[0xAA] = WX_PTT(IP, IPV4, IGM, IPV6, NONE, PAY3),
85 	[0xAB] = WX_PTT(IP, IPV4, IGM, IPV6, UDP,  PAY4),
86 	[0xAC] = WX_PTT(IP, IPV4, IGM, IPV6, TCP,  PAY4),
87 	[0xAD] = WX_PTT(IP, IPV4, IGM, IPV6, SCTP, PAY4),
88 
89 	/* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */
90 	[0xB0] = WX_PTT(IP, IPV4, IGMV, NONE, NONE, PAY3),
91 	[0xB1] = WX_PTT(IP, IPV4, IGMV, FGV4, NONE, PAY3),
92 	[0xB2] = WX_PTT(IP, IPV4, IGMV, IPV4, NONE, PAY3),
93 	[0xB3] = WX_PTT(IP, IPV4, IGMV, IPV4, UDP,  PAY4),
94 	[0xB4] = WX_PTT(IP, IPV4, IGMV, IPV4, TCP,  PAY4),
95 	[0xB5] = WX_PTT(IP, IPV4, IGMV, IPV4, SCTP, PAY4),
96 	[0xB9] = WX_PTT(IP, IPV4, IGMV, FGV6, NONE, PAY3),
97 	[0xBA] = WX_PTT(IP, IPV4, IGMV, IPV6, NONE, PAY3),
98 	[0xBB] = WX_PTT(IP, IPV4, IGMV, IPV6, UDP,  PAY4),
99 	[0xBC] = WX_PTT(IP, IPV4, IGMV, IPV6, TCP,  PAY4),
100 	[0xBD] = WX_PTT(IP, IPV4, IGMV, IPV6, SCTP, PAY4),
101 
102 	/* IPv6 --> IPv4/IPv6 */
103 	[0xC1] = WX_PTT(IP, IPV6, IPIP, FGV4, NONE, PAY3),
104 	[0xC2] = WX_PTT(IP, IPV6, IPIP, IPV4, NONE, PAY3),
105 	[0xC3] = WX_PTT(IP, IPV6, IPIP, IPV4, UDP,  PAY4),
106 	[0xC4] = WX_PTT(IP, IPV6, IPIP, IPV4, TCP,  PAY4),
107 	[0xC5] = WX_PTT(IP, IPV6, IPIP, IPV4, SCTP, PAY4),
108 	[0xC9] = WX_PTT(IP, IPV6, IPIP, FGV6, NONE, PAY3),
109 	[0xCA] = WX_PTT(IP, IPV6, IPIP, IPV6, NONE, PAY3),
110 	[0xCB] = WX_PTT(IP, IPV6, IPIP, IPV6, UDP,  PAY4),
111 	[0xCC] = WX_PTT(IP, IPV6, IPIP, IPV6, TCP,  PAY4),
112 	[0xCD] = WX_PTT(IP, IPV6, IPIP, IPV6, SCTP, PAY4),
113 
114 	/* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */
115 	[0xD0] = WX_PTT(IP, IPV6, IG, NONE, NONE, PAY3),
116 	[0xD1] = WX_PTT(IP, IPV6, IG, FGV4, NONE, PAY3),
117 	[0xD2] = WX_PTT(IP, IPV6, IG, IPV4, NONE, PAY3),
118 	[0xD3] = WX_PTT(IP, IPV6, IG, IPV4, UDP,  PAY4),
119 	[0xD4] = WX_PTT(IP, IPV6, IG, IPV4, TCP,  PAY4),
120 	[0xD5] = WX_PTT(IP, IPV6, IG, IPV4, SCTP, PAY4),
121 	[0xD9] = WX_PTT(IP, IPV6, IG, FGV6, NONE, PAY3),
122 	[0xDA] = WX_PTT(IP, IPV6, IG, IPV6, NONE, PAY3),
123 	[0xDB] = WX_PTT(IP, IPV6, IG, IPV6, UDP,  PAY4),
124 	[0xDC] = WX_PTT(IP, IPV6, IG, IPV6, TCP,  PAY4),
125 	[0xDD] = WX_PTT(IP, IPV6, IG, IPV6, SCTP, PAY4),
126 
127 	/* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */
128 	[0xE0] = WX_PTT(IP, IPV6, IGM, NONE, NONE, PAY3),
129 	[0xE1] = WX_PTT(IP, IPV6, IGM, FGV4, NONE, PAY3),
130 	[0xE2] = WX_PTT(IP, IPV6, IGM, IPV4, NONE, PAY3),
131 	[0xE3] = WX_PTT(IP, IPV6, IGM, IPV4, UDP,  PAY4),
132 	[0xE4] = WX_PTT(IP, IPV6, IGM, IPV4, TCP,  PAY4),
133 	[0xE5] = WX_PTT(IP, IPV6, IGM, IPV4, SCTP, PAY4),
134 	[0xE9] = WX_PTT(IP, IPV6, IGM, FGV6, NONE, PAY3),
135 	[0xEA] = WX_PTT(IP, IPV6, IGM, IPV6, NONE, PAY3),
136 	[0xEB] = WX_PTT(IP, IPV6, IGM, IPV6, UDP,  PAY4),
137 	[0xEC] = WX_PTT(IP, IPV6, IGM, IPV6, TCP,  PAY4),
138 	[0xED] = WX_PTT(IP, IPV6, IGM, IPV6, SCTP, PAY4),
139 
140 	/* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */
141 	[0xF0] = WX_PTT(IP, IPV6, IGMV, NONE, NONE, PAY3),
142 	[0xF1] = WX_PTT(IP, IPV6, IGMV, FGV4, NONE, PAY3),
143 	[0xF2] = WX_PTT(IP, IPV6, IGMV, IPV4, NONE, PAY3),
144 	[0xF3] = WX_PTT(IP, IPV6, IGMV, IPV4, UDP,  PAY4),
145 	[0xF4] = WX_PTT(IP, IPV6, IGMV, IPV4, TCP,  PAY4),
146 	[0xF5] = WX_PTT(IP, IPV6, IGMV, IPV4, SCTP, PAY4),
147 	[0xF9] = WX_PTT(IP, IPV6, IGMV, FGV6, NONE, PAY3),
148 	[0xFA] = WX_PTT(IP, IPV6, IGMV, IPV6, NONE, PAY3),
149 	[0xFB] = WX_PTT(IP, IPV6, IGMV, IPV6, UDP,  PAY4),
150 	[0xFC] = WX_PTT(IP, IPV6, IGMV, IPV6, TCP,  PAY4),
151 	[0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4),
152 };
153 
154 struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
155 {
156 	return wx_ptype_lookup[ptype];
157 }
158 EXPORT_SYMBOL(wx_decode_ptype);
159 
160 /* wx_test_staterr - tests bits in Rx descriptor status and error fields */
161 static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
162 			      const u32 stat_err_bits)
163 {
164 	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
165 }
166 
167 static void wx_dma_sync_frag(struct wx_ring *rx_ring,
168 			     struct wx_rx_buffer *rx_buffer)
169 {
170 	struct sk_buff *skb = rx_buffer->skb;
171 	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
172 
173 	dma_sync_single_range_for_cpu(rx_ring->dev,
174 				      WX_CB(skb)->dma,
175 				      skb_frag_off(frag),
176 				      skb_frag_size(frag),
177 				      DMA_FROM_DEVICE);
178 }
179 
180 static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
181 					     union wx_rx_desc *rx_desc,
182 					     struct sk_buff **skb,
183 					     int *rx_buffer_pgcnt)
184 {
185 	struct wx_rx_buffer *rx_buffer;
186 	unsigned int size;
187 
188 	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
189 	size = le16_to_cpu(rx_desc->wb.upper.length);
190 
191 #if (PAGE_SIZE < 8192)
192 	*rx_buffer_pgcnt = page_count(rx_buffer->page);
193 #else
194 	*rx_buffer_pgcnt = 0;
195 #endif
196 
197 	prefetchw(rx_buffer->page);
198 	*skb = rx_buffer->skb;
199 
200 	/* Delay unmapping of the first packet. It carries the header
201 	 * information, HW may still access the header after the writeback.
202 	 * Only unmap it when EOP is reached
203 	 */
204 	if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) {
205 		if (!*skb)
206 			goto skip_sync;
207 	} else {
208 		if (*skb)
209 			wx_dma_sync_frag(rx_ring, rx_buffer);
210 	}
211 
212 	/* we are reusing so sync this buffer for CPU use */
213 	dma_sync_single_range_for_cpu(rx_ring->dev,
214 				      rx_buffer->dma,
215 				      rx_buffer->page_offset,
216 				      size,
217 				      DMA_FROM_DEVICE);
218 skip_sync:
219 	return rx_buffer;
220 }
221 
222 static void wx_put_rx_buffer(struct wx_ring *rx_ring,
223 			     struct wx_rx_buffer *rx_buffer,
224 			     struct sk_buff *skb,
225 			     int rx_buffer_pgcnt)
226 {
227 	/* clear contents of rx_buffer */
228 	rx_buffer->page = NULL;
229 	rx_buffer->skb = NULL;
230 }
231 
232 static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
233 				    struct wx_rx_buffer *rx_buffer,
234 				    union wx_rx_desc *rx_desc)
235 {
236 	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
237 #if (PAGE_SIZE < 8192)
238 	unsigned int truesize = wx_rx_pg_size(rx_ring) / 2;
239 #else
240 	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
241 #endif
242 	struct sk_buff *skb = rx_buffer->skb;
243 
244 	if (!skb) {
245 		void *page_addr = page_address(rx_buffer->page) +
246 				  rx_buffer->page_offset;
247 
248 		/* prefetch first cache line of first page */
249 		net_prefetch(page_addr);
250 
251 		/* allocate a skb to store the frags */
252 		skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
253 		if (unlikely(!skb))
254 			return NULL;
255 
256 		/* we will be copying header into skb->data in
257 		 * pskb_may_pull so it is in our interest to prefetch
258 		 * it now to avoid a possible cache miss
259 		 */
260 		prefetchw(skb->data);
261 
262 		if (size <= WX_RXBUFFER_256) {
263 			memcpy(__skb_put(skb, size), page_addr,
264 			       ALIGN(size, sizeof(long)));
265 			page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true);
266 			return skb;
267 		}
268 
269 		skb_mark_for_recycle(skb);
270 
271 		if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
272 			WX_CB(skb)->dma = rx_buffer->dma;
273 
274 		skb_add_rx_frag(skb, 0, rx_buffer->page,
275 				rx_buffer->page_offset,
276 				size, truesize);
277 		goto out;
278 
279 	} else {
280 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
281 				rx_buffer->page_offset, size, truesize);
282 	}
283 
284 out:
285 #if (PAGE_SIZE < 8192)
286 	/* flip page offset to other buffer */
287 	rx_buffer->page_offset ^= truesize;
288 #else
289 	/* move offset up to the next cache line */
290 	rx_buffer->page_offset += truesize;
291 #endif
292 
293 	return skb;
294 }
295 
296 static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
297 				 struct wx_rx_buffer *bi)
298 {
299 	struct page *page = bi->page;
300 	dma_addr_t dma;
301 
302 	/* since we are recycling buffers we should seldom need to alloc */
303 	if (likely(page))
304 		return true;
305 
306 	page = page_pool_dev_alloc_pages(rx_ring->page_pool);
307 	if (unlikely(!page))
308 		return false;
309 	dma = page_pool_get_dma_addr(page);
310 
311 	bi->dma = dma;
312 	bi->page = page;
313 	bi->page_offset = 0;
314 
315 	return true;
316 }
317 
318 /**
319  * wx_alloc_rx_buffers - Replace used receive buffers
320  * @rx_ring: ring to place buffers on
321  * @cleaned_count: number of buffers to replace
322  **/
323 void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
324 {
325 	u16 i = rx_ring->next_to_use;
326 	union wx_rx_desc *rx_desc;
327 	struct wx_rx_buffer *bi;
328 
329 	/* nothing to do */
330 	if (!cleaned_count)
331 		return;
332 
333 	rx_desc = WX_RX_DESC(rx_ring, i);
334 	bi = &rx_ring->rx_buffer_info[i];
335 	i -= rx_ring->count;
336 
337 	do {
338 		if (!wx_alloc_mapped_page(rx_ring, bi))
339 			break;
340 
341 		/* sync the buffer for use by the device */
342 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
343 						 bi->page_offset,
344 						 rx_ring->rx_buf_len,
345 						 DMA_FROM_DEVICE);
346 
347 		rx_desc->read.pkt_addr =
348 			cpu_to_le64(bi->dma + bi->page_offset);
349 
350 		rx_desc++;
351 		bi++;
352 		i++;
353 		if (unlikely(!i)) {
354 			rx_desc = WX_RX_DESC(rx_ring, 0);
355 			bi = rx_ring->rx_buffer_info;
356 			i -= rx_ring->count;
357 		}
358 
359 		/* clear the status bits for the next_to_use descriptor */
360 		rx_desc->wb.upper.status_error = 0;
361 		/* clear the length for the next_to_use descriptor */
362 		rx_desc->wb.upper.length = 0;
363 
364 		cleaned_count--;
365 	} while (cleaned_count);
366 
367 	i += rx_ring->count;
368 
369 	if (rx_ring->next_to_use != i) {
370 		rx_ring->next_to_use = i;
371 		/* update next to alloc since we have filled the ring */
372 		rx_ring->next_to_alloc = i;
373 
374 		/* Force memory writes to complete before letting h/w
375 		 * know there are new descriptors to fetch.  (Only
376 		 * applicable for weak-ordered memory model archs,
377 		 * such as IA-64).
378 		 */
379 		wmb();
380 		writel(i, rx_ring->tail);
381 	}
382 }
383 
384 u16 wx_desc_unused(struct wx_ring *ring)
385 {
386 	u16 ntc = ring->next_to_clean;
387 	u16 ntu = ring->next_to_use;
388 
389 	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
390 }
391 
392 /**
393  * wx_is_non_eop - process handling of non-EOP buffers
394  * @rx_ring: Rx ring being processed
395  * @rx_desc: Rx descriptor for current buffer
396  * @skb: Current socket buffer containing buffer in progress
397  *
398  * This function updates next to clean. If the buffer is an EOP buffer
399  * this function exits returning false, otherwise it will place the
400  * sk_buff in the next buffer to be chained and return true indicating
401  * that this is in fact a non-EOP buffer.
402  **/
403 static bool wx_is_non_eop(struct wx_ring *rx_ring,
404 			  union wx_rx_desc *rx_desc,
405 			  struct sk_buff *skb)
406 {
407 	struct wx *wx = rx_ring->q_vector->wx;
408 	u32 ntc = rx_ring->next_to_clean + 1;
409 
410 	/* fetch, update, and store next to clean */
411 	ntc = (ntc < rx_ring->count) ? ntc : 0;
412 	rx_ring->next_to_clean = ntc;
413 
414 	prefetch(WX_RX_DESC(rx_ring, ntc));
415 
416 	/* update RSC append count if present */
417 	if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags)) {
418 		__le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
419 				     cpu_to_le32(WX_RXD_RSCCNT_MASK);
420 
421 		if (unlikely(rsc_enabled)) {
422 			u32 rsc_cnt = le32_to_cpu(rsc_enabled);
423 
424 			rsc_cnt >>= WX_RXD_RSCCNT_SHIFT;
425 			WX_CB(skb)->append_cnt += rsc_cnt - 1;
426 
427 			/* update ntc based on RSC value */
428 			ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
429 			ntc &= WX_RXD_NEXTP_MASK;
430 			ntc >>= WX_RXD_NEXTP_SHIFT;
431 		}
432 	}
433 
434 	/* if we are the last buffer then there is nothing else to do */
435 	if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)))
436 		return false;
437 
438 	rx_ring->rx_buffer_info[ntc].skb = skb;
439 	rx_ring->rx_stats.non_eop_descs++;
440 
441 	return true;
442 }
443 
444 static void wx_pull_tail(struct sk_buff *skb)
445 {
446 	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
447 	unsigned int pull_len;
448 	unsigned char *va;
449 
450 	/* it is valid to use page_address instead of kmap since we are
451 	 * working with pages allocated out of the lomem pool per
452 	 * alloc_page(GFP_ATOMIC)
453 	 */
454 	va = skb_frag_address(frag);
455 
456 	/* we need the header to contain the greater of either ETH_HLEN or
457 	 * 60 bytes if the skb->len is less than 60 for skb_pad.
458 	 */
459 	pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256);
460 
461 	/* align pull length to size of long to optimize memcpy performance */
462 	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
463 
464 	/* update all of the pointers */
465 	skb_frag_size_sub(frag, pull_len);
466 	skb_frag_off_add(frag, pull_len);
467 	skb->data_len -= pull_len;
468 	skb->tail += pull_len;
469 }
470 
471 /**
472  * wx_cleanup_headers - Correct corrupted or empty headers
473  * @rx_ring: rx descriptor ring packet is being transacted on
474  * @rx_desc: pointer to the EOP Rx descriptor
475  * @skb: pointer to current skb being fixed
476  *
477  * Check for corrupted packet headers caused by senders on the local L2
478  * embedded NIC switch not setting up their Tx Descriptors right.  These
479  * should be very rare.
480  *
481  * Also address the case where we are pulling data in on pages only
482  * and as such no data is present in the skb header.
483  *
484  * In addition if skb is not at least 60 bytes we need to pad it so that
485  * it is large enough to qualify as a valid Ethernet frame.
486  *
487  * Returns true if an error was encountered and skb was freed.
488  **/
489 static bool wx_cleanup_headers(struct wx_ring *rx_ring,
490 			       union wx_rx_desc *rx_desc,
491 			       struct sk_buff *skb)
492 {
493 	struct net_device *netdev = rx_ring->netdev;
494 
495 	/* verify that the packet does not have any known errors */
496 	if (!netdev ||
497 	    unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) &&
498 		     !(netdev->features & NETIF_F_RXALL))) {
499 		dev_kfree_skb_any(skb);
500 		return true;
501 	}
502 
503 	/* place header in linear portion of buffer */
504 	if (!skb_headlen(skb))
505 		wx_pull_tail(skb);
506 
507 	/* if eth_skb_pad returns an error the skb was freed */
508 	if (eth_skb_pad(skb))
509 		return true;
510 
511 	return false;
512 }
513 
514 static void wx_rx_hash(struct wx_ring *ring,
515 		       union wx_rx_desc *rx_desc,
516 		       struct sk_buff *skb)
517 {
518 	u16 rss_type;
519 
520 	if (!(ring->netdev->features & NETIF_F_RXHASH))
521 		return;
522 
523 	rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
524 			       WX_RXD_RSSTYPE_MASK;
525 
526 	if (!rss_type)
527 		return;
528 
529 	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
530 		     (WX_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
531 		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
532 }
533 
534 /**
535  * wx_rx_checksum - indicate in skb if hw indicated a good cksum
536  * @ring: structure containing ring specific data
537  * @rx_desc: current Rx descriptor being processed
538  * @skb: skb currently being received and modified
539  **/
540 static void wx_rx_checksum(struct wx_ring *ring,
541 			   union wx_rx_desc *rx_desc,
542 			   struct sk_buff *skb)
543 {
544 	struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc));
545 
546 	skb_checksum_none_assert(skb);
547 	/* Rx csum disabled */
548 	if (!(ring->netdev->features & NETIF_F_RXCSUM))
549 		return;
550 
551 	/* if IPv4 header checksum error */
552 	if ((wx_test_staterr(rx_desc, WX_RXD_STAT_IPCS) &&
553 	     wx_test_staterr(rx_desc, WX_RXD_ERR_IPE)) ||
554 	    (wx_test_staterr(rx_desc, WX_RXD_STAT_OUTERIPCS) &&
555 	     wx_test_staterr(rx_desc, WX_RXD_ERR_OUTERIPER))) {
556 		ring->rx_stats.csum_err++;
557 		return;
558 	}
559 
560 	/* L4 checksum offload flag must set for the below code to work */
561 	if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS))
562 		return;
563 
564 	/* Hardware can't guarantee csum if IPv6 Dest Header found */
565 	if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP &&
566 	    wx_test_staterr(rx_desc, WX_RXD_STAT_IPV6EX))
567 		return;
568 
569 	/* if L4 checksum error */
570 	if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) {
571 		ring->rx_stats.csum_err++;
572 		return;
573 	}
574 
575 	/* It must be a TCP or UDP or SCTP packet with a valid checksum */
576 	skb->ip_summed = CHECKSUM_UNNECESSARY;
577 
578 	/* If there is an outer header present that might contain a checksum
579 	 * we need to bump the checksum level by 1 to reflect the fact that
580 	 * we are indicating we validated the inner checksum.
581 	 */
582 	if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG)
583 		__skb_incr_checksum_unnecessary(skb);
584 	ring->rx_stats.csum_good_cnt++;
585 }
586 
587 static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc,
588 		       struct sk_buff *skb)
589 {
590 	u16 ethertype;
591 	u8 idx = 0;
592 
593 	if ((ring->netdev->features &
594 	     (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) &&
595 	    wx_test_staterr(rx_desc, WX_RXD_STAT_VP)) {
596 		idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
597 		       0x1c0) >> 6;
598 		ethertype = ring->q_vector->wx->tpid[idx];
599 		__vlan_hwaccel_put_tag(skb, htons(ethertype),
600 				       le16_to_cpu(rx_desc->wb.upper.vlan));
601 	}
602 }
603 
604 static void wx_set_rsc_gso_size(struct wx_ring *ring,
605 				struct sk_buff *skb)
606 {
607 	u16 hdr_len = skb_headlen(skb);
608 
609 	/* set gso_size to avoid messing up TCP MSS */
610 	skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
611 						 WX_CB(skb)->append_cnt);
612 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
613 }
614 
615 static void wx_update_rsc_stats(struct wx_ring *rx_ring,
616 				struct sk_buff *skb)
617 {
618 	/* if append_cnt is 0 then frame is not RSC */
619 	if (!WX_CB(skb)->append_cnt)
620 		return;
621 
622 	rx_ring->rx_stats.rsc_count += WX_CB(skb)->append_cnt;
623 	rx_ring->rx_stats.rsc_flush++;
624 
625 	wx_set_rsc_gso_size(rx_ring, skb);
626 
627 	/* gso_size is computed using append_cnt so always clear it last */
628 	WX_CB(skb)->append_cnt = 0;
629 }
630 
631 /**
632  * wx_process_skb_fields - Populate skb header fields from Rx descriptor
633  * @rx_ring: rx descriptor ring packet is being transacted on
634  * @rx_desc: pointer to the EOP Rx descriptor
635  * @skb: pointer to current skb being populated
636  *
637  * This function checks the ring, descriptor, and packet information in
638  * order to populate the hash, checksum, protocol, and
639  * other fields within the skb.
640  **/
641 static void wx_process_skb_fields(struct wx_ring *rx_ring,
642 				  union wx_rx_desc *rx_desc,
643 				  struct sk_buff *skb)
644 {
645 	struct wx *wx = netdev_priv(rx_ring->netdev);
646 
647 	if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags))
648 		wx_update_rsc_stats(rx_ring, skb);
649 
650 	wx_rx_hash(rx_ring, rx_desc, skb);
651 	wx_rx_checksum(rx_ring, rx_desc, skb);
652 
653 	if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) &&
654 	    unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) {
655 		wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb);
656 		rx_ring->last_rx_timestamp = jiffies;
657 	}
658 
659 	wx_rx_vlan(rx_ring, rx_desc, skb);
660 	skb_record_rx_queue(skb, rx_ring->queue_index);
661 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
662 }
663 
664 /**
665  * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
666  * @q_vector: structure containing interrupt and ring information
667  * @rx_ring: rx descriptor ring to transact packets on
668  * @budget: Total limit on number of packets to process
669  *
670  * This function provides a "bounce buffer" approach to Rx interrupt
671  * processing.  The advantage to this is that on systems that have
672  * expensive overhead for IOMMU access this provides a means of avoiding
673  * it by maintaining the mapping of the page to the system.
674  *
675  * Returns amount of work completed.
676  **/
677 static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
678 			   struct wx_ring *rx_ring,
679 			   int budget)
680 {
681 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
682 	u16 cleaned_count = wx_desc_unused(rx_ring);
683 
684 	do {
685 		struct wx_rx_buffer *rx_buffer;
686 		union wx_rx_desc *rx_desc;
687 		struct sk_buff *skb;
688 		int rx_buffer_pgcnt;
689 
690 		/* return some buffers to hardware, one at a time is too slow */
691 		if (cleaned_count >= WX_RX_BUFFER_WRITE) {
692 			wx_alloc_rx_buffers(rx_ring, cleaned_count);
693 			cleaned_count = 0;
694 		}
695 
696 		rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean);
697 		if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD))
698 			break;
699 
700 		/* This memory barrier is needed to keep us from reading
701 		 * any other fields out of the rx_desc until we know the
702 		 * descriptor has been written back
703 		 */
704 		dma_rmb();
705 
706 		rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt);
707 
708 		/* retrieve a buffer from the ring */
709 		skb = wx_build_skb(rx_ring, rx_buffer, rx_desc);
710 
711 		/* exit if we failed to retrieve a buffer */
712 		if (!skb) {
713 			rx_ring->rx_stats.alloc_rx_buff_failed++;
714 			break;
715 		}
716 
717 		wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
718 		cleaned_count++;
719 
720 		/* place incomplete frames back on ring for completion */
721 		if (wx_is_non_eop(rx_ring, rx_desc, skb))
722 			continue;
723 
724 		/* verify the packet layout is correct */
725 		if (wx_cleanup_headers(rx_ring, rx_desc, skb))
726 			continue;
727 
728 		/* probably a little skewed due to removing CRC */
729 		total_rx_bytes += skb->len;
730 
731 		/* populate checksum, timestamp, VLAN, and protocol */
732 		wx_process_skb_fields(rx_ring, rx_desc, skb);
733 		napi_gro_receive(&q_vector->napi, skb);
734 
735 		/* update budget accounting */
736 		total_rx_packets++;
737 	} while (likely(total_rx_packets < budget));
738 
739 	u64_stats_update_begin(&rx_ring->syncp);
740 	rx_ring->stats.packets += total_rx_packets;
741 	rx_ring->stats.bytes += total_rx_bytes;
742 	u64_stats_update_end(&rx_ring->syncp);
743 	q_vector->rx.total_packets += total_rx_packets;
744 	q_vector->rx.total_bytes += total_rx_bytes;
745 
746 	return total_rx_packets;
747 }
748 
749 static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring)
750 {
751 	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
752 }
753 
754 /**
755  * wx_clean_tx_irq - Reclaim resources after transmit completes
756  * @q_vector: structure containing interrupt and ring information
757  * @tx_ring: tx ring to clean
758  * @napi_budget: Used to determine if we are in netpoll
759  **/
760 static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
761 			    struct wx_ring *tx_ring, int napi_budget)
762 {
763 	unsigned int budget = q_vector->wx->tx_work_limit;
764 	unsigned int total_bytes = 0, total_packets = 0;
765 	struct wx *wx = netdev_priv(tx_ring->netdev);
766 	unsigned int i = tx_ring->next_to_clean;
767 	struct wx_tx_buffer *tx_buffer;
768 	union wx_tx_desc *tx_desc;
769 
770 	if (!netif_carrier_ok(tx_ring->netdev))
771 		return true;
772 
773 	tx_buffer = &tx_ring->tx_buffer_info[i];
774 	tx_desc = WX_TX_DESC(tx_ring, i);
775 	i -= tx_ring->count;
776 
777 	do {
778 		union wx_tx_desc *eop_desc = tx_buffer->next_to_watch;
779 
780 		/* if next_to_watch is not set then there is no work pending */
781 		if (!eop_desc)
782 			break;
783 
784 		/* prevent any other reads prior to eop_desc */
785 		smp_rmb();
786 
787 		if (tx_ring->headwb_mem) {
788 			u32 head = *tx_ring->headwb_mem;
789 
790 			if (head == tx_ring->next_to_clean)
791 				break;
792 			else if (head > tx_ring->next_to_clean &&
793 				 !(tx_buffer->next_eop >= tx_ring->next_to_clean &&
794 				   tx_buffer->next_eop < head))
795 				break;
796 			else if (!(tx_buffer->next_eop >= tx_ring->next_to_clean ||
797 				   tx_buffer->next_eop < head))
798 				break;
799 		} else if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD))) {
800 			/* if DD is not set pending work has not been completed */
801 			break;
802 		}
803 
804 		/* clear next_to_watch to prevent false hangs */
805 		tx_buffer->next_to_watch = NULL;
806 
807 		/* update the statistics for this packet */
808 		total_bytes += tx_buffer->bytecount;
809 		total_packets += tx_buffer->gso_segs;
810 
811 		/* schedule check for Tx timestamp */
812 		if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) &&
813 		    skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)
814 			ptp_schedule_worker(wx->ptp_clock, 0);
815 
816 		/* free the skb */
817 		napi_consume_skb(tx_buffer->skb, napi_budget);
818 
819 		/* unmap skb header data */
820 		dma_unmap_single(tx_ring->dev,
821 				 dma_unmap_addr(tx_buffer, dma),
822 				 dma_unmap_len(tx_buffer, len),
823 				 DMA_TO_DEVICE);
824 
825 		/* clear tx_buffer data */
826 		dma_unmap_len_set(tx_buffer, len, 0);
827 
828 		/* unmap remaining buffers */
829 		while (tx_desc != eop_desc) {
830 			tx_buffer++;
831 			tx_desc++;
832 			i++;
833 			if (unlikely(!i)) {
834 				i -= tx_ring->count;
835 				tx_buffer = tx_ring->tx_buffer_info;
836 				tx_desc = WX_TX_DESC(tx_ring, 0);
837 			}
838 
839 			/* unmap any remaining paged data */
840 			if (dma_unmap_len(tx_buffer, len)) {
841 				dma_unmap_page(tx_ring->dev,
842 					       dma_unmap_addr(tx_buffer, dma),
843 					       dma_unmap_len(tx_buffer, len),
844 					       DMA_TO_DEVICE);
845 				dma_unmap_len_set(tx_buffer, len, 0);
846 			}
847 		}
848 
849 		/* move us one more past the eop_desc for start of next pkt */
850 		tx_buffer++;
851 		tx_desc++;
852 		i++;
853 		if (unlikely(!i)) {
854 			i -= tx_ring->count;
855 			tx_buffer = tx_ring->tx_buffer_info;
856 			tx_desc = WX_TX_DESC(tx_ring, 0);
857 		}
858 
859 		/* issue prefetch for next Tx descriptor */
860 		prefetch(tx_desc);
861 
862 		/* update budget accounting */
863 		budget--;
864 	} while (likely(budget));
865 
866 	i += tx_ring->count;
867 	tx_ring->next_to_clean = i;
868 	u64_stats_update_begin(&tx_ring->syncp);
869 	tx_ring->stats.bytes += total_bytes;
870 	tx_ring->stats.packets += total_packets;
871 	u64_stats_update_end(&tx_ring->syncp);
872 	q_vector->tx.total_bytes += total_bytes;
873 	q_vector->tx.total_packets += total_packets;
874 
875 	netdev_tx_completed_queue(wx_txring_txq(tx_ring),
876 				  total_packets, total_bytes);
877 
878 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
879 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
880 		     (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
881 		/* Make sure that anybody stopping the queue after this
882 		 * sees the new next_to_clean.
883 		 */
884 		smp_mb();
885 
886 		if (__netif_subqueue_stopped(tx_ring->netdev,
887 					     tx_ring->queue_index) &&
888 		    netif_running(tx_ring->netdev)) {
889 			netif_wake_subqueue(tx_ring->netdev,
890 					    tx_ring->queue_index);
891 			++tx_ring->tx_stats.restart_queue;
892 		}
893 	}
894 
895 	return !!budget;
896 }
897 
898 static void wx_update_rx_dim_sample(struct wx_q_vector *q_vector)
899 {
900 	struct dim_sample sample = {};
901 
902 	dim_update_sample(q_vector->total_events,
903 			  q_vector->rx.total_packets,
904 			  q_vector->rx.total_bytes,
905 			  &sample);
906 
907 	net_dim(&q_vector->rx.dim, &sample);
908 }
909 
910 static void wx_update_tx_dim_sample(struct wx_q_vector *q_vector)
911 {
912 	struct dim_sample sample = {};
913 
914 	dim_update_sample(q_vector->total_events,
915 			  q_vector->tx.total_packets,
916 			  q_vector->tx.total_bytes,
917 			  &sample);
918 
919 	net_dim(&q_vector->tx.dim, &sample);
920 }
921 
922 static void wx_update_dim_sample(struct wx_q_vector *q_vector)
923 {
924 	wx_update_rx_dim_sample(q_vector);
925 	wx_update_tx_dim_sample(q_vector);
926 }
927 
928 /**
929  * wx_poll - NAPI polling RX/TX cleanup routine
930  * @napi: napi struct with our devices info in it
931  * @budget: amount of work driver is allowed to do this pass, in packets
932  *
933  * This function will clean all queues associated with a q_vector.
934  **/
935 static int wx_poll(struct napi_struct *napi, int budget)
936 {
937 	struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi);
938 	int per_ring_budget, work_done = 0;
939 	struct wx *wx = q_vector->wx;
940 	bool clean_complete = true;
941 	struct wx_ring *ring;
942 
943 	wx_for_each_ring(ring, q_vector->tx) {
944 		if (!wx_clean_tx_irq(q_vector, ring, budget))
945 			clean_complete = false;
946 	}
947 
948 	/* Exit if we are called by netpoll */
949 	if (budget <= 0)
950 		return budget;
951 
952 	/* attempt to distribute budget to each queue fairly, but don't allow
953 	 * the budget to go below 1 because we'll exit polling
954 	 */
955 	if (q_vector->rx.count > 1)
956 		per_ring_budget = max(budget / q_vector->rx.count, 1);
957 	else
958 		per_ring_budget = budget;
959 
960 	wx_for_each_ring(ring, q_vector->rx) {
961 		int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget);
962 
963 		work_done += cleaned;
964 		if (cleaned >= per_ring_budget)
965 			clean_complete = false;
966 	}
967 
968 	/* If all work not completed, return budget and keep polling */
969 	if (!clean_complete)
970 		return budget;
971 
972 	/* all work done, exit the polling mode */
973 	if (likely(napi_complete_done(napi, work_done))) {
974 		if (wx->adaptive_itr)
975 			wx_update_dim_sample(q_vector);
976 		if (netif_running(wx->netdev))
977 			wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
978 	}
979 
980 	return min(work_done, budget - 1);
981 }
982 
983 static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
984 {
985 	if (likely(wx_desc_unused(tx_ring) >= size))
986 		return 0;
987 
988 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
989 
990 	/* For the next check */
991 	smp_mb();
992 
993 	/* We need to check again in a case another CPU has just
994 	 * made room available.
995 	 */
996 	if (likely(wx_desc_unused(tx_ring) < size))
997 		return -EBUSY;
998 
999 	/* A reprieve! - use start_queue because it doesn't call schedule */
1000 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1001 	++tx_ring->tx_stats.restart_queue;
1002 
1003 	return 0;
1004 }
1005 
1006 static u32 wx_tx_cmd_type(u32 tx_flags)
1007 {
1008 	/* set type for advanced descriptor with frame checksum insertion */
1009 	u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
1010 
1011 	/* set HW vlan bit if vlan is present */
1012 	cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE);
1013 	/* set segmentation enable bits for TSO/FSO */
1014 	cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE);
1015 	/* set timestamp bit if present */
1016 	cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP);
1017 	cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC);
1018 
1019 	return cmd_type;
1020 }
1021 
1022 static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
1023 				u32 tx_flags, unsigned int paylen)
1024 {
1025 	u32 olinfo_status = paylen << WX_TXD_PAYLEN_SHIFT;
1026 
1027 	/* enable L4 checksum for TSO and TX checksum offload */
1028 	olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS);
1029 	/* enable IPv4 checksum for TSO */
1030 	olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS);
1031 	/* enable outer IPv4 checksum for TSO */
1032 	olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4,
1033 				     WX_TXD_EIPCS);
1034 	/* Check Context must be set if Tx switch is enabled, which it
1035 	 * always is for case where virtual functions are running
1036 	 */
1037 	olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC);
1038 	olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC,
1039 				     WX_TXD_IPSEC);
1040 	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1041 }
1042 
1043 static int wx_tx_map(struct wx_ring *tx_ring,
1044 		     struct wx_tx_buffer *first,
1045 		     const u8 hdr_len)
1046 {
1047 	struct sk_buff *skb = first->skb;
1048 	struct wx_tx_buffer *tx_buffer;
1049 	u32 tx_flags = first->tx_flags;
1050 	u16 i = tx_ring->next_to_use;
1051 	unsigned int data_len, size;
1052 	union wx_tx_desc *tx_desc;
1053 	skb_frag_t *frag;
1054 	dma_addr_t dma;
1055 	u32 cmd_type;
1056 
1057 	cmd_type = wx_tx_cmd_type(tx_flags);
1058 	tx_desc = WX_TX_DESC(tx_ring, i);
1059 	wx_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
1060 
1061 	size = skb_headlen(skb);
1062 	data_len = skb->data_len;
1063 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1064 
1065 	tx_buffer = first;
1066 
1067 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1068 		if (dma_mapping_error(tx_ring->dev, dma))
1069 			goto dma_error;
1070 
1071 		/* record length, and DMA address */
1072 		dma_unmap_len_set(tx_buffer, len, size);
1073 		dma_unmap_addr_set(tx_buffer, dma, dma);
1074 
1075 		tx_desc->read.buffer_addr = cpu_to_le64(dma);
1076 
1077 		while (unlikely(size > WX_MAX_DATA_PER_TXD)) {
1078 			tx_desc->read.cmd_type_len =
1079 				cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD);
1080 
1081 			i++;
1082 			tx_desc++;
1083 			if (i == tx_ring->count) {
1084 				tx_desc = WX_TX_DESC(tx_ring, 0);
1085 				i = 0;
1086 			}
1087 			tx_desc->read.olinfo_status = 0;
1088 
1089 			dma += WX_MAX_DATA_PER_TXD;
1090 			size -= WX_MAX_DATA_PER_TXD;
1091 
1092 			tx_desc->read.buffer_addr = cpu_to_le64(dma);
1093 		}
1094 
1095 		if (likely(!data_len))
1096 			break;
1097 
1098 		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1099 
1100 		i++;
1101 		tx_desc++;
1102 		if (i == tx_ring->count) {
1103 			tx_desc = WX_TX_DESC(tx_ring, 0);
1104 			i = 0;
1105 		}
1106 		tx_desc->read.olinfo_status = 0;
1107 
1108 		size = skb_frag_size(frag);
1109 
1110 		data_len -= size;
1111 
1112 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1113 				       DMA_TO_DEVICE);
1114 
1115 		tx_buffer = &tx_ring->tx_buffer_info[i];
1116 	}
1117 
1118 	/* write last descriptor with RS and EOP bits */
1119 	cmd_type |= size | WX_TXD_EOP | WX_TXD_RS;
1120 	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1121 
1122 	netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
1123 
1124 	/* set the timestamp */
1125 	first->time_stamp = jiffies;
1126 	skb_tx_timestamp(skb);
1127 
1128 	/* Force memory writes to complete before letting h/w know there
1129 	 * are new descriptors to fetch.  (Only applicable for weak-ordered
1130 	 * memory model archs, such as IA-64).
1131 	 *
1132 	 * We also need this memory barrier to make certain all of the
1133 	 * status bits have been updated before next_to_watch is written.
1134 	 */
1135 	wmb();
1136 
1137 	/* set next_to_watch value indicating a packet is present */
1138 	first->next_to_watch = tx_desc;
1139 
1140 	/* set next_eop for amlite tx head wb */
1141 	if (tx_ring->headwb_mem)
1142 		first->next_eop = i;
1143 
1144 	i++;
1145 	if (i == tx_ring->count)
1146 		i = 0;
1147 
1148 	tx_ring->next_to_use = i;
1149 
1150 	wx_maybe_stop_tx(tx_ring, DESC_NEEDED);
1151 
1152 	if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
1153 		writel(i, tx_ring->tail);
1154 
1155 	return 0;
1156 dma_error:
1157 	dev_err(tx_ring->dev, "TX DMA map failed\n");
1158 
1159 	/* clear dma mappings for failed tx_buffer_info map */
1160 	for (;;) {
1161 		tx_buffer = &tx_ring->tx_buffer_info[i];
1162 		if (dma_unmap_len(tx_buffer, len))
1163 			dma_unmap_page(tx_ring->dev,
1164 				       dma_unmap_addr(tx_buffer, dma),
1165 				       dma_unmap_len(tx_buffer, len),
1166 				       DMA_TO_DEVICE);
1167 		dma_unmap_len_set(tx_buffer, len, 0);
1168 		if (tx_buffer == first)
1169 			break;
1170 		if (i == 0)
1171 			i += tx_ring->count;
1172 		i--;
1173 	}
1174 
1175 	dev_kfree_skb_any(first->skb);
1176 	first->skb = NULL;
1177 
1178 	tx_ring->next_to_use = i;
1179 
1180 	return -ENOMEM;
1181 }
1182 
1183 static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
1184 			   u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1185 {
1186 	struct wx_tx_context_desc *context_desc;
1187 	u16 i = tx_ring->next_to_use;
1188 
1189 	context_desc = WX_TX_CTXTDESC(tx_ring, i);
1190 	i++;
1191 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1192 
1193 	/* set bits to identify this as an advanced context descriptor */
1194 	type_tucmd |= WX_TXD_DTYP_CTXT;
1195 	context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
1196 	context_desc->seqnum_seed       = cpu_to_le32(fcoe_sof_eof);
1197 	context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
1198 	context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
1199 }
1200 
1201 union network_header {
1202 	struct iphdr *ipv4;
1203 	struct ipv6hdr *ipv6;
1204 	void *raw;
1205 };
1206 
1207 static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
1208 {
1209 	u8 tun_prot = 0, l4_prot = 0, ptype = 0;
1210 	struct sk_buff *skb = first->skb;
1211 	unsigned char *exthdr, *l4_hdr;
1212 	__be16 frag_off;
1213 
1214 	if (skb->encapsulation) {
1215 		union network_header hdr;
1216 
1217 		switch (first->protocol) {
1218 		case htons(ETH_P_IP):
1219 			tun_prot = ip_hdr(skb)->protocol;
1220 			ptype = WX_PTYPE_TUN_IPV4;
1221 			break;
1222 		case htons(ETH_P_IPV6):
1223 			l4_hdr = skb_transport_header(skb);
1224 			exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
1225 			tun_prot = ipv6_hdr(skb)->nexthdr;
1226 			if (l4_hdr != exthdr)
1227 				ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
1228 			ptype = WX_PTYPE_TUN_IPV6;
1229 			break;
1230 		default:
1231 			return ptype;
1232 		}
1233 
1234 		if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) {
1235 			hdr.raw = (void *)inner_ip_hdr(skb);
1236 			ptype |= WX_PTYPE_PKT_IPIP;
1237 		} else if (tun_prot == IPPROTO_UDP) {
1238 			hdr.raw = (void *)inner_ip_hdr(skb);
1239 			if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1240 			    skb->inner_protocol != htons(ETH_P_TEB)) {
1241 				ptype |= WX_PTYPE_PKT_IG;
1242 			} else {
1243 				if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto
1244 				     == htons(ETH_P_8021Q))
1245 					ptype |= WX_PTYPE_PKT_IGMV;
1246 				else
1247 					ptype |= WX_PTYPE_PKT_IGM;
1248 			}
1249 
1250 		} else if (tun_prot == IPPROTO_GRE) {
1251 			hdr.raw = (void *)inner_ip_hdr(skb);
1252 			if (skb->inner_protocol ==  htons(ETH_P_IP) ||
1253 			    skb->inner_protocol ==  htons(ETH_P_IPV6)) {
1254 				ptype |= WX_PTYPE_PKT_IG;
1255 			} else {
1256 				if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto
1257 				    == htons(ETH_P_8021Q))
1258 					ptype |= WX_PTYPE_PKT_IGMV;
1259 				else
1260 					ptype |= WX_PTYPE_PKT_IGM;
1261 			}
1262 		} else {
1263 			return ptype;
1264 		}
1265 
1266 		switch (hdr.ipv4->version) {
1267 		case IPVERSION:
1268 			l4_prot = hdr.ipv4->protocol;
1269 			break;
1270 		case 6:
1271 			l4_hdr = skb_inner_transport_header(skb);
1272 			exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr);
1273 			l4_prot = inner_ipv6_hdr(skb)->nexthdr;
1274 			if (l4_hdr != exthdr)
1275 				ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
1276 			ptype |= WX_PTYPE_PKT_IPV6;
1277 			break;
1278 		default:
1279 			return ptype;
1280 		}
1281 	} else {
1282 		switch (first->protocol) {
1283 		case htons(ETH_P_IP):
1284 			l4_prot = ip_hdr(skb)->protocol;
1285 			ptype = WX_PTYPE_PKT_IP;
1286 			break;
1287 		case htons(ETH_P_IPV6):
1288 			l4_hdr = skb_transport_header(skb);
1289 			exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
1290 			l4_prot = ipv6_hdr(skb)->nexthdr;
1291 			if (l4_hdr != exthdr)
1292 				ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
1293 			ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
1294 			break;
1295 		default:
1296 			return WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC;
1297 		}
1298 	}
1299 	switch (l4_prot) {
1300 	case IPPROTO_TCP:
1301 		ptype |= WX_PTYPE_TYP_TCP;
1302 		break;
1303 	case IPPROTO_UDP:
1304 		ptype |= WX_PTYPE_TYP_UDP;
1305 		break;
1306 	case IPPROTO_SCTP:
1307 		ptype |= WX_PTYPE_TYP_SCTP;
1308 		break;
1309 	default:
1310 		ptype |= WX_PTYPE_TYP_IP;
1311 		break;
1312 	}
1313 
1314 	return ptype;
1315 }
1316 
1317 static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
1318 		  u8 *hdr_len, u8 ptype)
1319 {
1320 	u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1321 	struct net_device *netdev = tx_ring->netdev;
1322 	u32 l4len, tunhdr_eiplen_tunlen = 0;
1323 	struct sk_buff *skb = first->skb;
1324 	bool enc = skb->encapsulation;
1325 	struct ipv6hdr *ipv6h;
1326 	struct tcphdr *tcph;
1327 	struct iphdr *iph;
1328 	u8 tun_prot = 0;
1329 	int err;
1330 
1331 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1332 		return 0;
1333 
1334 	if (!skb_is_gso(skb))
1335 		return 0;
1336 
1337 	err = skb_cow_head(skb, 0);
1338 	if (err < 0)
1339 		return err;
1340 
1341 	/* indicates the inner headers in the skbuff are valid. */
1342 	iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
1343 	if (iph->version == 4) {
1344 		tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1345 		iph->tot_len = 0;
1346 		iph->check = 0;
1347 		tcph->check = ~csum_tcpudp_magic(iph->saddr,
1348 						 iph->daddr, 0,
1349 						 IPPROTO_TCP, 0);
1350 		first->tx_flags |= WX_TX_FLAGS_TSO |
1351 				   WX_TX_FLAGS_CSUM |
1352 				   WX_TX_FLAGS_IPV4 |
1353 				   WX_TX_FLAGS_CC;
1354 	} else if (iph->version == 6 && skb_is_gso_v6(skb)) {
1355 		ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1356 		tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1357 		ipv6h->payload_len = 0;
1358 		tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1359 					       &ipv6h->daddr, 0,
1360 					       IPPROTO_TCP, 0);
1361 		first->tx_flags |= WX_TX_FLAGS_TSO |
1362 				   WX_TX_FLAGS_CSUM |
1363 				   WX_TX_FLAGS_CC;
1364 	}
1365 
1366 	/* compute header lengths */
1367 	l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1368 	*hdr_len = enc ? skb_inner_transport_offset(skb) :
1369 			 skb_transport_offset(skb);
1370 	*hdr_len += l4len;
1371 
1372 	/* update gso size and bytecount with header size */
1373 	first->gso_segs = skb_shinfo(skb)->gso_segs;
1374 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
1375 
1376 	/* mss_l4len_id: use 0 as index for TSO */
1377 	mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT;
1378 	mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT;
1379 
1380 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
1381 	if (enc) {
1382 		unsigned char *exthdr, *l4_hdr;
1383 		__be16 frag_off;
1384 
1385 		switch (first->protocol) {
1386 		case htons(ETH_P_IP):
1387 			tun_prot = ip_hdr(skb)->protocol;
1388 			first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
1389 			break;
1390 		case htons(ETH_P_IPV6):
1391 			l4_hdr = skb_transport_header(skb);
1392 			exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
1393 			tun_prot = ipv6_hdr(skb)->nexthdr;
1394 			if (l4_hdr != exthdr)
1395 				ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
1396 			break;
1397 		default:
1398 			break;
1399 		}
1400 		switch (tun_prot) {
1401 		case IPPROTO_UDP:
1402 			tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP;
1403 			tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
1404 						 WX_TXD_OUTER_IPLEN_SHIFT) |
1405 						(((skb_inner_mac_header(skb) -
1406 						skb_transport_header(skb)) >> 1) <<
1407 						WX_TXD_TUNNEL_LEN_SHIFT);
1408 			break;
1409 		case IPPROTO_GRE:
1410 			tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE;
1411 			tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
1412 						 WX_TXD_OUTER_IPLEN_SHIFT) |
1413 						(((skb_inner_mac_header(skb) -
1414 						skb_transport_header(skb)) >> 1) <<
1415 						WX_TXD_TUNNEL_LEN_SHIFT);
1416 			break;
1417 		case IPPROTO_IPIP:
1418 		case IPPROTO_IPV6:
1419 			tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
1420 						(char *)ip_hdr(skb)) >> 2) <<
1421 						WX_TXD_OUTER_IPLEN_SHIFT;
1422 			break;
1423 		default:
1424 			break;
1425 		}
1426 		vlan_macip_lens = skb_inner_network_header_len(skb) >> 1;
1427 	} else {
1428 		vlan_macip_lens = skb_network_header_len(skb) >> 1;
1429 	}
1430 
1431 	vlan_macip_lens |= skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT;
1432 	vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK;
1433 
1434 	type_tucmd = ptype << 24;
1435 	if (skb->vlan_proto == htons(ETH_P_8021AD) &&
1436 	    netdev->features & NETIF_F_HW_VLAN_STAG_TX)
1437 		type_tucmd |= WX_SET_FLAG(first->tx_flags,
1438 					  WX_TX_FLAGS_HW_VLAN,
1439 					  0x1 << WX_TXD_TAG_TPID_SEL_SHIFT);
1440 	wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
1441 		       type_tucmd, mss_l4len_idx);
1442 
1443 	return 1;
1444 }
1445 
1446 static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
1447 		       u8 ptype)
1448 {
1449 	u32 tunhdr_eiplen_tunlen = 0, vlan_macip_lens = 0;
1450 	struct net_device *netdev = tx_ring->netdev;
1451 	u32 mss_l4len_idx = 0, type_tucmd;
1452 	struct sk_buff *skb = first->skb;
1453 	u8 tun_prot = 0;
1454 
1455 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
1456 csum_failed:
1457 		if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
1458 		    !(first->tx_flags & WX_TX_FLAGS_CC))
1459 			return;
1460 		vlan_macip_lens = skb_network_offset(skb) <<
1461 				  WX_TXD_MACLEN_SHIFT;
1462 	} else {
1463 		unsigned char *exthdr, *l4_hdr;
1464 		__be16 frag_off;
1465 		u8 l4_prot = 0;
1466 		union {
1467 			struct iphdr *ipv4;
1468 			struct ipv6hdr *ipv6;
1469 			u8 *raw;
1470 		} network_hdr;
1471 		union {
1472 			struct tcphdr *tcphdr;
1473 			u8 *raw;
1474 		} transport_hdr;
1475 
1476 		if (skb->encapsulation) {
1477 			network_hdr.raw = skb_inner_network_header(skb);
1478 			transport_hdr.raw = skb_inner_transport_header(skb);
1479 			vlan_macip_lens = skb_network_offset(skb) <<
1480 					  WX_TXD_MACLEN_SHIFT;
1481 			switch (first->protocol) {
1482 			case htons(ETH_P_IP):
1483 				tun_prot = ip_hdr(skb)->protocol;
1484 				break;
1485 			case htons(ETH_P_IPV6):
1486 				l4_hdr = skb_transport_header(skb);
1487 				exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
1488 				tun_prot = ipv6_hdr(skb)->nexthdr;
1489 				if (l4_hdr != exthdr)
1490 					ipv6_skip_exthdr(skb, exthdr - skb->data,
1491 							 &tun_prot, &frag_off);
1492 				break;
1493 			default:
1494 				return;
1495 			}
1496 			switch (tun_prot) {
1497 			case IPPROTO_UDP:
1498 				tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP;
1499 				tunhdr_eiplen_tunlen |=
1500 					((skb_network_header_len(skb) >> 2) <<
1501 					WX_TXD_OUTER_IPLEN_SHIFT) |
1502 					(((skb_inner_mac_header(skb) -
1503 					skb_transport_header(skb)) >> 1) <<
1504 					WX_TXD_TUNNEL_LEN_SHIFT);
1505 				break;
1506 			case IPPROTO_GRE:
1507 				tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE;
1508 				tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
1509 							 WX_TXD_OUTER_IPLEN_SHIFT) |
1510 							 (((skb_inner_mac_header(skb) -
1511 							    skb_transport_header(skb)) >> 1) <<
1512 							  WX_TXD_TUNNEL_LEN_SHIFT);
1513 				break;
1514 			case IPPROTO_IPIP:
1515 			case IPPROTO_IPV6:
1516 				tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
1517 							(char *)ip_hdr(skb)) >> 2) <<
1518 							WX_TXD_OUTER_IPLEN_SHIFT;
1519 				break;
1520 			default:
1521 				break;
1522 			}
1523 
1524 		} else {
1525 			network_hdr.raw = skb_network_header(skb);
1526 			transport_hdr.raw = skb_transport_header(skb);
1527 			vlan_macip_lens = skb_network_offset(skb) <<
1528 					  WX_TXD_MACLEN_SHIFT;
1529 		}
1530 
1531 		switch (network_hdr.ipv4->version) {
1532 		case IPVERSION:
1533 			vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
1534 			l4_prot = network_hdr.ipv4->protocol;
1535 			break;
1536 		case 6:
1537 			vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
1538 			exthdr = network_hdr.raw + sizeof(struct ipv6hdr);
1539 			l4_prot = network_hdr.ipv6->nexthdr;
1540 			if (transport_hdr.raw != exthdr)
1541 				ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
1542 			break;
1543 		default:
1544 			break;
1545 		}
1546 
1547 		switch (l4_prot) {
1548 		case IPPROTO_TCP:
1549 		mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
1550 				WX_TXD_L4LEN_SHIFT;
1551 			break;
1552 		case IPPROTO_SCTP:
1553 			mss_l4len_idx = sizeof(struct sctphdr) <<
1554 					WX_TXD_L4LEN_SHIFT;
1555 			break;
1556 		case IPPROTO_UDP:
1557 			mss_l4len_idx = sizeof(struct udphdr) <<
1558 					WX_TXD_L4LEN_SHIFT;
1559 			break;
1560 		default:
1561 			skb_checksum_help(skb);
1562 			goto csum_failed;
1563 		}
1564 
1565 		/* update TX checksum flag */
1566 		first->tx_flags |= WX_TX_FLAGS_CSUM;
1567 	}
1568 	first->tx_flags |= WX_TX_FLAGS_CC;
1569 	/* vlan_macip_lens: MACLEN, VLAN tag */
1570 	vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK;
1571 
1572 	type_tucmd = ptype << 24;
1573 	if (skb->vlan_proto == htons(ETH_P_8021AD) &&
1574 	    netdev->features & NETIF_F_HW_VLAN_STAG_TX)
1575 		type_tucmd |= WX_SET_FLAG(first->tx_flags,
1576 					  WX_TX_FLAGS_HW_VLAN,
1577 					  0x1 << WX_TXD_TAG_TPID_SEL_SHIFT);
1578 	wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
1579 		       type_tucmd, mss_l4len_idx);
1580 }
1581 
1582 static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
1583 				      struct wx_ring *tx_ring)
1584 {
1585 	struct wx *wx = netdev_priv(tx_ring->netdev);
1586 	u16 count = TXD_USE_COUNT(skb_headlen(skb));
1587 	struct wx_tx_buffer *first;
1588 	u8 hdr_len = 0, ptype;
1589 	unsigned short f;
1590 	u32 tx_flags = 0;
1591 	int tso;
1592 
1593 	/* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD,
1594 	 *       + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD,
1595 	 *       + 2 desc gap to keep tail from touching head,
1596 	 *       + 1 desc for context descriptor,
1597 	 * otherwise try next time
1598 	 */
1599 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1600 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
1601 						     frags[f]));
1602 
1603 	if (wx_maybe_stop_tx(tx_ring, count + 3)) {
1604 		tx_ring->tx_stats.tx_busy++;
1605 		return NETDEV_TX_BUSY;
1606 	}
1607 
1608 	/* record the location of the first descriptor for this packet */
1609 	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1610 	first->skb = skb;
1611 	first->bytecount = skb->len;
1612 	first->gso_segs = 1;
1613 
1614 	/* if we have a HW VLAN tag being added default to the HW one */
1615 	if (skb_vlan_tag_present(skb)) {
1616 		tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT;
1617 		tx_flags |= WX_TX_FLAGS_HW_VLAN;
1618 	}
1619 
1620 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1621 	    wx->ptp_clock) {
1622 		if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1623 		    !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS,
1624 					   wx->state)) {
1625 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1626 			tx_flags |= WX_TX_FLAGS_TSTAMP;
1627 			wx->ptp_tx_skb = skb_get(skb);
1628 			wx->ptp_tx_start = jiffies;
1629 		} else {
1630 			wx->tx_hwtstamp_skipped++;
1631 		}
1632 	}
1633 
1634 	/* record initial flags and protocol */
1635 	first->tx_flags = tx_flags;
1636 	first->protocol = vlan_get_protocol(skb);
1637 
1638 	ptype = wx_encode_tx_desc_ptype(first);
1639 
1640 	tso = wx_tso(tx_ring, first, &hdr_len, ptype);
1641 	if (tso < 0)
1642 		goto out_drop;
1643 	else if (!tso)
1644 		wx_tx_csum(tx_ring, first, ptype);
1645 
1646 	if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
1647 		wx->atr(tx_ring, first, ptype);
1648 
1649 	if (wx_tx_map(tx_ring, first, hdr_len))
1650 		goto cleanup_tx_tstamp;
1651 
1652 	return NETDEV_TX_OK;
1653 out_drop:
1654 	dev_kfree_skb_any(first->skb);
1655 	first->skb = NULL;
1656 cleanup_tx_tstamp:
1657 	if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) {
1658 		dev_kfree_skb_any(wx->ptp_tx_skb);
1659 		wx->ptp_tx_skb = NULL;
1660 		wx->tx_hwtstamp_errors++;
1661 		clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
1662 	}
1663 
1664 	return NETDEV_TX_OK;
1665 }
1666 
1667 netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
1668 			  struct net_device *netdev)
1669 {
1670 	unsigned int r_idx = skb->queue_mapping;
1671 	struct wx *wx = netdev_priv(netdev);
1672 	struct wx_ring *tx_ring;
1673 
1674 	if (!netif_carrier_ok(netdev)) {
1675 		dev_kfree_skb_any(skb);
1676 		return NETDEV_TX_OK;
1677 	}
1678 
1679 	/* The minimum packet size for olinfo paylen is 17 so pad the skb
1680 	 * in order to meet this minimum size requirement.
1681 	 */
1682 	if (skb_put_padto(skb, 17))
1683 		return NETDEV_TX_OK;
1684 
1685 	if (r_idx >= wx->num_tx_queues)
1686 		r_idx = r_idx % wx->num_tx_queues;
1687 	tx_ring = wx->tx_ring[r_idx];
1688 
1689 	return wx_xmit_frame_ring(skb, tx_ring);
1690 }
1691 EXPORT_SYMBOL(wx_xmit_frame);
1692 
1693 static void wx_set_itr(struct wx_q_vector *q_vector)
1694 {
1695 	struct wx *wx = q_vector->wx;
1696 	u32 new_itr;
1697 
1698 	if (!wx->adaptive_itr)
1699 		return;
1700 
1701 	/* use the smallest value of new ITR delay calculations */
1702 	new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
1703 	new_itr <<= 2;
1704 
1705 	if (new_itr != q_vector->itr) {
1706 		/* save the algorithm value here */
1707 		q_vector->itr = new_itr;
1708 
1709 		if (wx->pdev->is_virtfn)
1710 			wx_write_eitr_vf(q_vector);
1711 		else
1712 			wx_write_eitr(q_vector);
1713 	}
1714 }
1715 
1716 static void wx_rx_dim_work(struct work_struct *work)
1717 {
1718 	struct dim *dim = container_of(work, struct dim, work);
1719 	struct dim_cq_moder rx_moder;
1720 	struct wx_ring_container *rx;
1721 	struct wx_q_vector *q_vector;
1722 
1723 	rx = container_of(dim, struct wx_ring_container, dim);
1724 
1725 	rx_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1726 	rx->itr = rx_moder.usec;
1727 
1728 	q_vector = container_of(rx, struct wx_q_vector, rx);
1729 	wx_set_itr(q_vector);
1730 
1731 	dim->state = DIM_START_MEASURE;
1732 }
1733 
1734 static void wx_tx_dim_work(struct work_struct *work)
1735 {
1736 	struct dim *dim = container_of(work, struct dim, work);
1737 	struct dim_cq_moder tx_moder;
1738 	struct wx_ring_container *tx;
1739 	struct wx_q_vector *q_vector;
1740 
1741 	tx = container_of(dim, struct wx_ring_container, dim);
1742 
1743 	tx_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
1744 	tx->itr = tx_moder.usec;
1745 
1746 	q_vector = container_of(tx, struct wx_q_vector, tx);
1747 	wx_set_itr(q_vector);
1748 
1749 	dim->state = DIM_START_MEASURE;
1750 }
1751 
1752 void wx_napi_enable_all(struct wx *wx)
1753 {
1754 	struct wx_q_vector *q_vector;
1755 	int q_idx;
1756 
1757 	for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
1758 		q_vector = wx->q_vector[q_idx];
1759 
1760 		INIT_WORK(&q_vector->rx.dim.work, wx_rx_dim_work);
1761 		INIT_WORK(&q_vector->tx.dim.work, wx_tx_dim_work);
1762 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1763 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1764 		napi_enable(&q_vector->napi);
1765 	}
1766 }
1767 EXPORT_SYMBOL(wx_napi_enable_all);
1768 
1769 void wx_napi_disable_all(struct wx *wx)
1770 {
1771 	struct wx_q_vector *q_vector;
1772 	int q_idx;
1773 
1774 	for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
1775 		q_vector = wx->q_vector[q_idx];
1776 		napi_disable(&q_vector->napi);
1777 		disable_work_sync(&q_vector->rx.dim.work);
1778 		disable_work_sync(&q_vector->tx.dim.work);
1779 	}
1780 }
1781 EXPORT_SYMBOL(wx_napi_disable_all);
1782 
1783 static bool wx_set_vmdq_queues(struct wx *wx)
1784 {
1785 	u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit;
1786 	u16 rss_i = wx->ring_feature[RING_F_RSS].limit;
1787 	u16 rss_m = WX_RSS_DISABLED_MASK;
1788 	u16 vmdq_m = 0;
1789 
1790 	/* only proceed if VMDq is enabled */
1791 	if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
1792 		return false;
1793 	/* Add starting offset to total pool count */
1794 	vmdq_i += wx->ring_feature[RING_F_VMDQ].offset;
1795 
1796 	if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
1797 		/* double check we are limited to maximum pools */
1798 		vmdq_i = min_t(u16, 64, vmdq_i);
1799 
1800 		/* 64 pool mode with 2 queues per pool, or
1801 		 * 16/32/64 pool mode with 1 queue per pool
1802 		 */
1803 		if (vmdq_i > 32 || rss_i < 4) {
1804 			vmdq_m = WX_VMDQ_2Q_MASK;
1805 			rss_m = WX_RSS_2Q_MASK;
1806 			rss_i = min_t(u16, rss_i, 2);
1807 		/* 32 pool mode with 4 queues per pool */
1808 		} else {
1809 			vmdq_m = WX_VMDQ_4Q_MASK;
1810 			rss_m = WX_RSS_4Q_MASK;
1811 			rss_i = 4;
1812 		}
1813 	} else {
1814 		/* double check we are limited to maximum pools */
1815 		vmdq_i = min_t(u16, 8, vmdq_i);
1816 
1817 		/* when VMDQ on, disable RSS */
1818 		rss_i = 1;
1819 	}
1820 
1821 	/* remove the starting offset from the pool count */
1822 	vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset;
1823 
1824 	/* save features for later use */
1825 	wx->ring_feature[RING_F_VMDQ].indices = vmdq_i;
1826 	wx->ring_feature[RING_F_VMDQ].mask = vmdq_m;
1827 
1828 	/* limit RSS based on user input and save for later use */
1829 	wx->ring_feature[RING_F_RSS].indices = rss_i;
1830 	wx->ring_feature[RING_F_RSS].mask = rss_m;
1831 
1832 	wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/
1833 	wx->num_rx_pools = vmdq_i;
1834 	wx->num_rx_queues_per_pool = rss_i;
1835 
1836 	wx->num_rx_queues = vmdq_i * rss_i;
1837 	wx->num_tx_queues = vmdq_i * rss_i;
1838 
1839 	return true;
1840 }
1841 
1842 /**
1843  * wx_set_rss_queues: Allocate queues for RSS
1844  * @wx: board private structure to initialize
1845  *
1846  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
1847  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1848  *
1849  **/
1850 static void wx_set_rss_queues(struct wx *wx)
1851 {
1852 	struct wx_ring_feature *f;
1853 
1854 	/* set mask for 16 queue limit of RSS */
1855 	f = &wx->ring_feature[RING_F_RSS];
1856 	if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
1857 		f->mask = WX_RSS_64Q_MASK;
1858 	else
1859 		f->mask = WX_RSS_8Q_MASK;
1860 	f->indices = f->limit;
1861 
1862 	if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
1863 		goto out;
1864 
1865 	clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
1866 
1867 	wx->ring_feature[RING_F_FDIR].indices = 1;
1868 	/* Use Flow Director in addition to RSS to ensure the best
1869 	 * distribution of flows across cores, even when an FDIR flow
1870 	 * isn't matched.
1871 	 */
1872 	if (f->indices > 1) {
1873 		f = &wx->ring_feature[RING_F_FDIR];
1874 
1875 		f->indices = f->limit;
1876 
1877 		if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
1878 			set_bit(WX_FLAG_FDIR_HASH, wx->flags);
1879 	}
1880 
1881 out:
1882 	wx->num_rx_queues = f->indices;
1883 	wx->num_tx_queues = f->indices;
1884 }
1885 
1886 static void wx_set_num_queues(struct wx *wx)
1887 {
1888 	/* Start with base case */
1889 	wx->num_rx_queues = 1;
1890 	wx->num_tx_queues = 1;
1891 	wx->queues_per_pool = 1;
1892 
1893 	if (wx_set_vmdq_queues(wx))
1894 		return;
1895 
1896 	wx_set_rss_queues(wx);
1897 }
1898 
1899 /**
1900  * wx_acquire_msix_vectors - acquire MSI-X vectors
1901  * @wx: board private structure
1902  *
1903  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
1904  * return a negative error code if unable to acquire MSI-X vectors for any
1905  * reason.
1906  */
1907 static int wx_acquire_msix_vectors(struct wx *wx)
1908 {
1909 	struct irq_affinity affd = { .post_vectors = 1 };
1910 	int nvecs, i;
1911 
1912 	/* We start by asking for one vector per queue pair */
1913 	nvecs = max(wx->num_rx_queues, wx->num_tx_queues);
1914 	nvecs = min_t(int, nvecs, num_online_cpus());
1915 	nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors);
1916 
1917 	wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry),
1918 				     GFP_KERNEL);
1919 	if (!wx->msix_q_entries)
1920 		return -ENOMEM;
1921 
1922 	/* One for non-queue interrupts */
1923 	nvecs += 1;
1924 
1925 	wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
1926 				 GFP_KERNEL);
1927 	if (!wx->msix_entry) {
1928 		kfree(wx->msix_q_entries);
1929 		wx->msix_q_entries = NULL;
1930 		return -ENOMEM;
1931 	}
1932 
1933 	nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
1934 					       nvecs,
1935 					       PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
1936 					       &affd);
1937 	if (nvecs < 0) {
1938 		wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
1939 		kfree(wx->msix_q_entries);
1940 		wx->msix_q_entries = NULL;
1941 		kfree(wx->msix_entry);
1942 		wx->msix_entry = NULL;
1943 		return nvecs;
1944 	}
1945 
1946 	nvecs -= 1;
1947 	for (i = 0; i < nvecs; i++) {
1948 		wx->msix_q_entries[i].entry = i;
1949 		wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i);
1950 	}
1951 
1952 	wx->num_q_vectors = nvecs;
1953 
1954 	wx->msix_entry->entry = nvecs;
1955 	wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs);
1956 
1957 	if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags)) {
1958 		wx->msix_entry->entry = 0;
1959 		wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0);
1960 		wx->msix_q_entries[0].entry = 0;
1961 		wx->msix_q_entries[0].vector = pci_irq_vector(wx->pdev, 1);
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 /**
1968  * wx_set_interrupt_capability - set MSI-X or MSI if supported
1969  * @wx: board private structure to initialize
1970  *
1971  * Attempt to configure the interrupts using the best available
1972  * capabilities of the hardware and the kernel.
1973  **/
1974 static int wx_set_interrupt_capability(struct wx *wx)
1975 {
1976 	struct pci_dev *pdev = wx->pdev;
1977 	int nvecs, ret;
1978 
1979 	/* We will try to get MSI-X interrupts first */
1980 	ret = wx_acquire_msix_vectors(wx);
1981 	if (ret == 0 || (ret == -ENOMEM) || pdev->is_virtfn)
1982 		return ret;
1983 
1984 	/* Disable VMDq support */
1985 	dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n");
1986 	clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags);
1987 
1988 	/* Disable RSS */
1989 	dev_warn(&wx->pdev->dev, "Disabling RSS support\n");
1990 	wx->ring_feature[RING_F_RSS].limit = 1;
1991 
1992 	wx_set_num_queues(wx);
1993 
1994 	/* minmum one for queue, one for misc*/
1995 	nvecs = 1;
1996 	nvecs = pci_alloc_irq_vectors(pdev, nvecs,
1997 				      nvecs, PCI_IRQ_MSI | PCI_IRQ_INTX);
1998 	if (nvecs == 1) {
1999 		if (pdev->msi_enabled)
2000 			wx_err(wx, "Fallback to MSI.\n");
2001 		else
2002 			wx_err(wx, "Fallback to INTx.\n");
2003 	} else {
2004 		wx_err(wx, "Failed to allocate MSI/INTx interrupts. Error: %d\n", nvecs);
2005 		return nvecs;
2006 	}
2007 
2008 	pdev->irq = pci_irq_vector(pdev, 0);
2009 	wx->num_q_vectors = 1;
2010 
2011 	return 0;
2012 }
2013 
2014 static bool wx_cache_ring_vmdq(struct wx *wx)
2015 {
2016 	struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ];
2017 	struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS];
2018 	u16 reg_idx;
2019 	int i;
2020 
2021 	/* only proceed if VMDq is enabled */
2022 	if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
2023 		return false;
2024 
2025 	if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
2026 		/* start at VMDq register offset for SR-IOV enabled setups */
2027 		reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
2028 		for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) {
2029 			/* If we are greater than indices move to next pool */
2030 			if ((reg_idx & ~vmdq->mask) >= rss->indices)
2031 				reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
2032 			wx->rx_ring[i]->reg_idx = reg_idx;
2033 		}
2034 		reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
2035 		for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) {
2036 			/* If we are greater than indices move to next pool */
2037 			if ((reg_idx & rss->mask) >= rss->indices)
2038 				reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
2039 			wx->tx_ring[i]->reg_idx = reg_idx;
2040 		}
2041 	} else {
2042 		/* start at VMDq register offset for SR-IOV enabled setups */
2043 		reg_idx = vmdq->offset;
2044 		for (i = 0; i < wx->num_rx_queues; i++)
2045 			/* If we are greater than indices move to next pool */
2046 			wx->rx_ring[i]->reg_idx = reg_idx + i;
2047 
2048 		reg_idx = vmdq->offset;
2049 		for (i = 0; i < wx->num_tx_queues; i++)
2050 			/* If we are greater than indices move to next pool */
2051 			wx->tx_ring[i]->reg_idx = reg_idx + i;
2052 	}
2053 
2054 	return true;
2055 }
2056 
2057 /**
2058  * wx_cache_ring_rss - Descriptor ring to register mapping for RSS
2059  * @wx: board private structure to initialize
2060  *
2061  * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV.
2062  *
2063  **/
2064 static void wx_cache_ring_rss(struct wx *wx)
2065 {
2066 	u16 i;
2067 
2068 	if (wx_cache_ring_vmdq(wx))
2069 		return;
2070 
2071 	for (i = 0; i < wx->num_rx_queues; i++)
2072 		wx->rx_ring[i]->reg_idx = i;
2073 
2074 	for (i = 0; i < wx->num_tx_queues; i++)
2075 		wx->tx_ring[i]->reg_idx = i;
2076 }
2077 
2078 static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head)
2079 {
2080 	ring->next = head->ring;
2081 	head->ring = ring;
2082 	head->count++;
2083 }
2084 
2085 /**
2086  * wx_alloc_q_vector - Allocate memory for a single interrupt vector
2087  * @wx: board private structure to initialize
2088  * @v_count: q_vectors allocated on wx, used for ring interleaving
2089  * @v_idx: index of vector in wx struct
2090  * @txr_count: total number of Tx rings to allocate
2091  * @txr_idx: index of first Tx ring to allocate
2092  * @rxr_count: total number of Rx rings to allocate
2093  * @rxr_idx: index of first Rx ring to allocate
2094  *
2095  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
2096  **/
2097 static int wx_alloc_q_vector(struct wx *wx,
2098 			     unsigned int v_count, unsigned int v_idx,
2099 			     unsigned int txr_count, unsigned int txr_idx,
2100 			     unsigned int rxr_count, unsigned int rxr_idx)
2101 {
2102 	struct wx_q_vector *q_vector;
2103 	int ring_count, default_itr;
2104 	struct wx_ring *ring;
2105 
2106 	/* note this will allocate space for the ring structure as well! */
2107 	ring_count = txr_count + rxr_count;
2108 
2109 	q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
2110 			   GFP_KERNEL);
2111 	if (!q_vector)
2112 		return -ENOMEM;
2113 
2114 	/* initialize NAPI */
2115 	netif_napi_add(wx->netdev, &q_vector->napi,
2116 		       wx_poll);
2117 
2118 	/* tie q_vector and wx together */
2119 	wx->q_vector[v_idx] = q_vector;
2120 	q_vector->wx = wx;
2121 	q_vector->v_idx = v_idx;
2122 	if (cpu_online(v_idx))
2123 		q_vector->numa_node = cpu_to_node(v_idx);
2124 
2125 	/* initialize pointer to rings */
2126 	ring = q_vector->ring;
2127 
2128 	switch (wx->mac.type) {
2129 	case wx_mac_sp:
2130 	case wx_mac_aml:
2131 	case wx_mac_aml40:
2132 		default_itr = WX_12K_ITR;
2133 		break;
2134 	default:
2135 		default_itr = WX_7K_ITR;
2136 		break;
2137 	}
2138 
2139 	/* initialize ITR */
2140 	if (txr_count && !rxr_count)
2141 		/* tx only vector */
2142 		q_vector->itr = wx->tx_itr_setting ?
2143 				default_itr : wx->tx_itr_setting;
2144 	else
2145 		/* rx or rx/tx vector */
2146 		q_vector->itr = wx->rx_itr_setting ?
2147 				default_itr : wx->rx_itr_setting;
2148 
2149 	while (txr_count) {
2150 		/* assign generic ring traits */
2151 		ring->dev = &wx->pdev->dev;
2152 		ring->netdev = wx->netdev;
2153 
2154 		/* configure backlink on ring */
2155 		ring->q_vector = q_vector;
2156 
2157 		/* update q_vector Tx values */
2158 		wx_add_ring(ring, &q_vector->tx);
2159 
2160 		/* apply Tx specific ring traits */
2161 		ring->count = wx->tx_ring_count;
2162 
2163 		ring->queue_index = txr_idx;
2164 
2165 		/* assign ring to wx */
2166 		wx->tx_ring[txr_idx] = ring;
2167 
2168 		/* update count and index */
2169 		txr_count--;
2170 		txr_idx += v_count;
2171 
2172 		/* push pointer to next ring */
2173 		ring++;
2174 	}
2175 
2176 	while (rxr_count) {
2177 		/* assign generic ring traits */
2178 		ring->dev = &wx->pdev->dev;
2179 		ring->netdev = wx->netdev;
2180 
2181 		/* configure backlink on ring */
2182 		ring->q_vector = q_vector;
2183 
2184 		/* update q_vector Rx values */
2185 		wx_add_ring(ring, &q_vector->rx);
2186 
2187 		/* apply Rx specific ring traits */
2188 		ring->count = wx->rx_ring_count;
2189 		ring->queue_index = rxr_idx;
2190 
2191 		/* assign ring to wx */
2192 		wx->rx_ring[rxr_idx] = ring;
2193 
2194 		/* update count and index */
2195 		rxr_count--;
2196 		rxr_idx += v_count;
2197 
2198 		/* push pointer to next ring */
2199 		ring++;
2200 	}
2201 
2202 	return 0;
2203 }
2204 
2205 /**
2206  * wx_free_q_vector - Free memory allocated for specific interrupt vector
2207  * @wx: board private structure to initialize
2208  * @v_idx: Index of vector to be freed
2209  *
2210  * This function frees the memory allocated to the q_vector.  In addition if
2211  * NAPI is enabled it will delete any references to the NAPI struct prior
2212  * to freeing the q_vector.
2213  **/
2214 static void wx_free_q_vector(struct wx *wx, int v_idx)
2215 {
2216 	struct wx_q_vector *q_vector = wx->q_vector[v_idx];
2217 	struct wx_ring *ring;
2218 
2219 	wx_for_each_ring(ring, q_vector->tx)
2220 		wx->tx_ring[ring->queue_index] = NULL;
2221 
2222 	wx_for_each_ring(ring, q_vector->rx)
2223 		wx->rx_ring[ring->queue_index] = NULL;
2224 
2225 	wx->q_vector[v_idx] = NULL;
2226 	netif_napi_del(&q_vector->napi);
2227 	kfree_rcu(q_vector, rcu);
2228 }
2229 
2230 /**
2231  * wx_alloc_q_vectors - Allocate memory for interrupt vectors
2232  * @wx: board private structure to initialize
2233  *
2234  * We allocate one q_vector per queue interrupt.  If allocation fails we
2235  * return -ENOMEM.
2236  **/
2237 static int wx_alloc_q_vectors(struct wx *wx)
2238 {
2239 	unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
2240 	unsigned int rxr_remaining = wx->num_rx_queues;
2241 	unsigned int txr_remaining = wx->num_tx_queues;
2242 	unsigned int q_vectors = wx->num_q_vectors;
2243 	int rqpv, tqpv;
2244 	int err;
2245 
2246 	for (; v_idx < q_vectors; v_idx++) {
2247 		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
2248 		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
2249 		err = wx_alloc_q_vector(wx, q_vectors, v_idx,
2250 					tqpv, txr_idx,
2251 					rqpv, rxr_idx);
2252 
2253 		if (err)
2254 			goto err_out;
2255 
2256 		/* update counts and index */
2257 		rxr_remaining -= rqpv;
2258 		txr_remaining -= tqpv;
2259 		rxr_idx++;
2260 		txr_idx++;
2261 	}
2262 
2263 	return 0;
2264 
2265 err_out:
2266 	wx->num_tx_queues = 0;
2267 	wx->num_rx_queues = 0;
2268 	wx->num_q_vectors = 0;
2269 
2270 	while (v_idx--)
2271 		wx_free_q_vector(wx, v_idx);
2272 
2273 	return -ENOMEM;
2274 }
2275 
2276 /**
2277  * wx_free_q_vectors - Free memory allocated for interrupt vectors
2278  * @wx: board private structure to initialize
2279  *
2280  * This function frees the memory allocated to the q_vectors.  In addition if
2281  * NAPI is enabled it will delete any references to the NAPI struct prior
2282  * to freeing the q_vector.
2283  **/
2284 static void wx_free_q_vectors(struct wx *wx)
2285 {
2286 	int v_idx = wx->num_q_vectors;
2287 
2288 	wx->num_tx_queues = 0;
2289 	wx->num_rx_queues = 0;
2290 	wx->num_q_vectors = 0;
2291 
2292 	while (v_idx--)
2293 		wx_free_q_vector(wx, v_idx);
2294 }
2295 
2296 void wx_reset_interrupt_capability(struct wx *wx)
2297 {
2298 	struct pci_dev *pdev = wx->pdev;
2299 
2300 	if (!pdev->msi_enabled && !pdev->msix_enabled)
2301 		return;
2302 
2303 	if (pdev->msix_enabled) {
2304 		kfree(wx->msix_q_entries);
2305 		wx->msix_q_entries = NULL;
2306 		kfree(wx->msix_entry);
2307 		wx->msix_entry = NULL;
2308 	}
2309 	pci_free_irq_vectors(wx->pdev);
2310 }
2311 EXPORT_SYMBOL(wx_reset_interrupt_capability);
2312 
2313 /**
2314  * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings
2315  * @wx: board private structure to clear interrupt scheme on
2316  *
2317  * We go through and clear interrupt specific resources and reset the structure
2318  * to pre-load conditions
2319  **/
2320 void wx_clear_interrupt_scheme(struct wx *wx)
2321 {
2322 	wx_free_q_vectors(wx);
2323 	wx_reset_interrupt_capability(wx);
2324 }
2325 EXPORT_SYMBOL(wx_clear_interrupt_scheme);
2326 
2327 int wx_init_interrupt_scheme(struct wx *wx)
2328 {
2329 	int ret;
2330 
2331 	/* Number of supported queues */
2332 	if (wx->pdev->is_virtfn) {
2333 		if (wx->set_num_queues)
2334 			wx->set_num_queues(wx);
2335 	} else {
2336 		wx_set_num_queues(wx);
2337 	}
2338 
2339 	/* Set interrupt mode */
2340 	ret = wx_set_interrupt_capability(wx);
2341 	if (ret) {
2342 		wx_err(wx, "Allocate irq vectors for failed.\n");
2343 		return ret;
2344 	}
2345 
2346 	/* Allocate memory for queues */
2347 	ret = wx_alloc_q_vectors(wx);
2348 	if (ret) {
2349 		wx_err(wx, "Unable to allocate memory for queue vectors.\n");
2350 		wx_reset_interrupt_capability(wx);
2351 		return ret;
2352 	}
2353 
2354 	wx_cache_ring_rss(wx);
2355 
2356 	return 0;
2357 }
2358 EXPORT_SYMBOL(wx_init_interrupt_scheme);
2359 
2360 irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
2361 {
2362 	struct wx_q_vector *q_vector = data;
2363 
2364 	/* EIAM disabled interrupts (on this vector) for us */
2365 	if (q_vector->rx.ring || q_vector->tx.ring) {
2366 		napi_schedule_irqoff(&q_vector->napi);
2367 		q_vector->total_events++;
2368 	}
2369 
2370 	return IRQ_HANDLED;
2371 }
2372 EXPORT_SYMBOL(wx_msix_clean_rings);
2373 
2374 void wx_free_irq(struct wx *wx)
2375 {
2376 	struct pci_dev *pdev = wx->pdev;
2377 	int vector;
2378 
2379 	if (!(pdev->msix_enabled)) {
2380 		if (!wx->misc_irq_domain)
2381 			free_irq(pdev->irq, wx);
2382 		return;
2383 	}
2384 
2385 	for (vector = 0; vector < wx->num_q_vectors; vector++) {
2386 		struct wx_q_vector *q_vector = wx->q_vector[vector];
2387 		struct msix_entry *entry = &wx->msix_q_entries[vector];
2388 
2389 		/* free only the irqs that were actually requested */
2390 		if (!q_vector->rx.ring && !q_vector->tx.ring)
2391 			continue;
2392 
2393 		free_irq(entry->vector, q_vector);
2394 	}
2395 
2396 	if (!wx->misc_irq_domain)
2397 		free_irq(wx->msix_entry->vector, wx);
2398 }
2399 EXPORT_SYMBOL(wx_free_irq);
2400 
2401 /**
2402  * wx_setup_isb_resources - allocate interrupt status resources
2403  * @wx: board private structure
2404  *
2405  * Return 0 on success, negative on failure
2406  **/
2407 int wx_setup_isb_resources(struct wx *wx)
2408 {
2409 	struct pci_dev *pdev = wx->pdev;
2410 
2411 	if (wx->isb_mem)
2412 		return 0;
2413 
2414 	wx->isb_mem = dma_alloc_coherent(&pdev->dev,
2415 					 sizeof(u32) * 4,
2416 					 &wx->isb_dma,
2417 					 GFP_KERNEL);
2418 	if (!wx->isb_mem) {
2419 		wx_err(wx, "Alloc isb_mem failed\n");
2420 		return -ENOMEM;
2421 	}
2422 
2423 	return 0;
2424 }
2425 EXPORT_SYMBOL(wx_setup_isb_resources);
2426 
2427 /**
2428  * wx_free_isb_resources - allocate all queues Rx resources
2429  * @wx: board private structure
2430  *
2431  * Return 0 on success, negative on failure
2432  **/
2433 void wx_free_isb_resources(struct wx *wx)
2434 {
2435 	struct pci_dev *pdev = wx->pdev;
2436 
2437 	dma_free_coherent(&pdev->dev, sizeof(u32) * 4,
2438 			  wx->isb_mem, wx->isb_dma);
2439 	wx->isb_mem = NULL;
2440 }
2441 EXPORT_SYMBOL(wx_free_isb_resources);
2442 
2443 u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx)
2444 {
2445 	u32 cur_tag = 0;
2446 
2447 	cur_tag = wx->isb_mem[WX_ISB_HEADER];
2448 	wx->isb_tag[idx] = cur_tag;
2449 
2450 	return (__force u32)cpu_to_le32(wx->isb_mem[idx]);
2451 }
2452 EXPORT_SYMBOL(wx_misc_isb);
2453 
2454 /**
2455  * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
2456  * @wx: pointer to wx struct
2457  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
2458  * @queue: queue to map the corresponding interrupt to
2459  * @msix_vector: the vector to map to the corresponding queue
2460  *
2461  **/
2462 static void wx_set_ivar(struct wx *wx, s8 direction,
2463 			u16 queue, u16 msix_vector)
2464 {
2465 	u32 ivar, index;
2466 
2467 	if (direction == -1) {
2468 		/* other causes */
2469 		if (test_bit(WX_FLAG_IRQ_VECTOR_SHARED, wx->flags))
2470 			msix_vector = 0;
2471 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
2472 		index = 0;
2473 		ivar = rd32(wx, WX_PX_MISC_IVAR);
2474 		ivar &= ~(0xFF << index);
2475 		ivar |= (msix_vector << index);
2476 		wr32(wx, WX_PX_MISC_IVAR, ivar);
2477 	} else {
2478 		/* tx or rx causes */
2479 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
2480 		index = ((16 * (queue & 1)) + (8 * direction));
2481 		ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
2482 		ivar &= ~(0xFF << index);
2483 		ivar |= (msix_vector << index);
2484 		wr32(wx, WX_PX_IVAR(queue >> 1), ivar);
2485 	}
2486 }
2487 
2488 /**
2489  * wx_write_eitr - write EITR register in hardware specific way
2490  * @q_vector: structure containing interrupt and ring information
2491  *
2492  * This function is made to be called by ethtool and by the driver
2493  * when it needs to update EITR registers at runtime.  Hardware
2494  * specific quirks/differences are taken care of here.
2495  */
2496 void wx_write_eitr(struct wx_q_vector *q_vector)
2497 {
2498 	struct wx *wx = q_vector->wx;
2499 	int v_idx = q_vector->v_idx;
2500 	u32 itr_reg;
2501 
2502 	switch (wx->mac.type) {
2503 	case wx_mac_sp:
2504 		itr_reg = q_vector->itr & WX_SP_MAX_EITR;
2505 		break;
2506 	case wx_mac_aml:
2507 	case wx_mac_aml40:
2508 		itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
2509 		break;
2510 	default:
2511 		itr_reg = q_vector->itr & WX_EM_MAX_EITR;
2512 		break;
2513 	}
2514 
2515 	itr_reg |= WX_PX_ITR_CNT_WDIS;
2516 
2517 	wr32(wx, WX_PX_ITR(v_idx), itr_reg);
2518 }
2519 
2520 /**
2521  * wx_configure_vectors - Configure vectors for hardware
2522  * @wx: board private structure
2523  *
2524  * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/INTx
2525  * interrupts.
2526  **/
2527 void wx_configure_vectors(struct wx *wx)
2528 {
2529 	struct pci_dev *pdev = wx->pdev;
2530 	u32 eitrsel = 0;
2531 	u16 v_idx, i;
2532 
2533 	if (pdev->msix_enabled) {
2534 		/* Populate MSIX to EITR Select */
2535 		if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
2536 			if (wx->num_vfs >= 32)
2537 				eitrsel = BIT(wx->num_vfs % 32) - 1;
2538 		} else {
2539 			for (i = 0; i < wx->num_vfs; i++)
2540 				eitrsel |= BIT(i);
2541 		}
2542 		wr32(wx, WX_PX_ITRSEL, eitrsel);
2543 		/* use EIAM to auto-mask when MSI-X interrupt is asserted
2544 		 * this saves a register write for every interrupt
2545 		 */
2546 		wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL);
2547 	} else {
2548 		/* legacy interrupts, use EIAM to auto-mask when reading EICR,
2549 		 * specifically only auto mask tx and rx interrupts.
2550 		 */
2551 		wr32(wx, WX_PX_GPIE, 0);
2552 	}
2553 
2554 	/* Populate the IVAR table and set the ITR values to the
2555 	 * corresponding register.
2556 	 */
2557 	for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
2558 		struct wx_q_vector *q_vector = wx->q_vector[v_idx];
2559 		struct wx_ring *ring;
2560 
2561 		wx_for_each_ring(ring, q_vector->rx)
2562 			wx_set_ivar(wx, 0, ring->reg_idx, v_idx);
2563 
2564 		wx_for_each_ring(ring, q_vector->tx)
2565 			wx_set_ivar(wx, 1, ring->reg_idx, v_idx);
2566 
2567 		wx_write_eitr(q_vector);
2568 	}
2569 
2570 	wx_set_ivar(wx, -1, 0, v_idx);
2571 	if (pdev->msix_enabled)
2572 		wr32(wx, WX_PX_ITR(v_idx), 1950);
2573 }
2574 EXPORT_SYMBOL(wx_configure_vectors);
2575 
2576 /**
2577  * wx_clean_rx_ring - Free Rx Buffers per Queue
2578  * @rx_ring: ring to free buffers from
2579  **/
2580 static void wx_clean_rx_ring(struct wx_ring *rx_ring)
2581 {
2582 	struct wx_rx_buffer *rx_buffer;
2583 	u16 i = rx_ring->next_to_clean;
2584 
2585 	rx_buffer = &rx_ring->rx_buffer_info[i];
2586 
2587 	/* Free all the Rx ring sk_buffs */
2588 	while (i != rx_ring->next_to_alloc) {
2589 		if (rx_buffer->skb) {
2590 			struct sk_buff *skb = rx_buffer->skb;
2591 
2592 			dev_kfree_skb(skb);
2593 		}
2594 
2595 		/* Invalidate cache lines that may have been written to by
2596 		 * device so that we avoid corrupting memory.
2597 		 */
2598 		dma_sync_single_range_for_cpu(rx_ring->dev,
2599 					      rx_buffer->dma,
2600 					      rx_buffer->page_offset,
2601 					      rx_ring->rx_buf_len,
2602 					      DMA_FROM_DEVICE);
2603 
2604 		/* free resources associated with mapping */
2605 		page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
2606 
2607 		i++;
2608 		rx_buffer++;
2609 		if (i == rx_ring->count) {
2610 			i = 0;
2611 			rx_buffer = rx_ring->rx_buffer_info;
2612 		}
2613 	}
2614 
2615 	/* Zero out the descriptor ring */
2616 	memset(rx_ring->desc, 0, rx_ring->size);
2617 
2618 	rx_ring->next_to_alloc = 0;
2619 	rx_ring->next_to_clean = 0;
2620 	rx_ring->next_to_use = 0;
2621 }
2622 
2623 /**
2624  * wx_clean_all_rx_rings - Free Rx Buffers for all queues
2625  * @wx: board private structure
2626  **/
2627 void wx_clean_all_rx_rings(struct wx *wx)
2628 {
2629 	int i;
2630 
2631 	for (i = 0; i < wx->num_rx_queues; i++)
2632 		wx_clean_rx_ring(wx->rx_ring[i]);
2633 }
2634 EXPORT_SYMBOL(wx_clean_all_rx_rings);
2635 
2636 /**
2637  * wx_free_rx_resources - Free Rx Resources
2638  * @rx_ring: ring to clean the resources from
2639  *
2640  * Free all receive software resources
2641  **/
2642 static void wx_free_rx_resources(struct wx_ring *rx_ring)
2643 {
2644 	wx_clean_rx_ring(rx_ring);
2645 	kvfree(rx_ring->rx_buffer_info);
2646 	rx_ring->rx_buffer_info = NULL;
2647 
2648 	/* if not set, then don't free */
2649 	if (!rx_ring->desc)
2650 		return;
2651 
2652 	dma_free_coherent(rx_ring->dev, rx_ring->size,
2653 			  rx_ring->desc, rx_ring->dma);
2654 
2655 	rx_ring->desc = NULL;
2656 
2657 	if (rx_ring->page_pool) {
2658 		page_pool_destroy(rx_ring->page_pool);
2659 		rx_ring->page_pool = NULL;
2660 	}
2661 }
2662 
2663 /**
2664  * wx_free_all_rx_resources - Free Rx Resources for All Queues
2665  * @wx: pointer to hardware structure
2666  *
2667  * Free all receive software resources
2668  **/
2669 static void wx_free_all_rx_resources(struct wx *wx)
2670 {
2671 	int i;
2672 
2673 	for (i = 0; i < wx->num_rx_queues; i++)
2674 		wx_free_rx_resources(wx->rx_ring[i]);
2675 }
2676 
2677 /**
2678  * wx_clean_tx_ring - Free Tx Buffers
2679  * @tx_ring: ring to be cleaned
2680  **/
2681 static void wx_clean_tx_ring(struct wx_ring *tx_ring)
2682 {
2683 	struct wx_tx_buffer *tx_buffer;
2684 	u16 i = tx_ring->next_to_clean;
2685 
2686 	tx_buffer = &tx_ring->tx_buffer_info[i];
2687 
2688 	while (i != tx_ring->next_to_use) {
2689 		union wx_tx_desc *eop_desc, *tx_desc;
2690 
2691 		/* Free all the Tx ring sk_buffs */
2692 		dev_kfree_skb_any(tx_buffer->skb);
2693 
2694 		/* unmap skb header data */
2695 		dma_unmap_single(tx_ring->dev,
2696 				 dma_unmap_addr(tx_buffer, dma),
2697 				 dma_unmap_len(tx_buffer, len),
2698 				 DMA_TO_DEVICE);
2699 
2700 		/* check for eop_desc to determine the end of the packet */
2701 		eop_desc = tx_buffer->next_to_watch;
2702 		tx_desc = WX_TX_DESC(tx_ring, i);
2703 
2704 		/* unmap remaining buffers */
2705 		while (tx_desc != eop_desc) {
2706 			tx_buffer++;
2707 			tx_desc++;
2708 			i++;
2709 			if (unlikely(i == tx_ring->count)) {
2710 				i = 0;
2711 				tx_buffer = tx_ring->tx_buffer_info;
2712 				tx_desc = WX_TX_DESC(tx_ring, 0);
2713 			}
2714 
2715 			/* unmap any remaining paged data */
2716 			if (dma_unmap_len(tx_buffer, len))
2717 				dma_unmap_page(tx_ring->dev,
2718 					       dma_unmap_addr(tx_buffer, dma),
2719 					       dma_unmap_len(tx_buffer, len),
2720 					       DMA_TO_DEVICE);
2721 		}
2722 
2723 		/* move us one more past the eop_desc for start of next pkt */
2724 		tx_buffer++;
2725 		i++;
2726 		if (unlikely(i == tx_ring->count)) {
2727 			i = 0;
2728 			tx_buffer = tx_ring->tx_buffer_info;
2729 		}
2730 	}
2731 
2732 	netdev_tx_reset_queue(wx_txring_txq(tx_ring));
2733 
2734 	/* reset next_to_use and next_to_clean */
2735 	tx_ring->next_to_use = 0;
2736 	tx_ring->next_to_clean = 0;
2737 }
2738 
2739 /**
2740  * wx_clean_all_tx_rings - Free Tx Buffers for all queues
2741  * @wx: board private structure
2742  **/
2743 void wx_clean_all_tx_rings(struct wx *wx)
2744 {
2745 	int i;
2746 
2747 	for (i = 0; i < wx->num_tx_queues; i++)
2748 		wx_clean_tx_ring(wx->tx_ring[i]);
2749 }
2750 EXPORT_SYMBOL(wx_clean_all_tx_rings);
2751 
2752 static void wx_free_headwb_resources(struct wx_ring *tx_ring)
2753 {
2754 	if (!tx_ring->headwb_mem)
2755 		return;
2756 
2757 	dma_free_coherent(tx_ring->dev, sizeof(u32),
2758 			  tx_ring->headwb_mem, tx_ring->headwb_dma);
2759 	tx_ring->headwb_mem = NULL;
2760 }
2761 
2762 /**
2763  * wx_free_tx_resources - Free Tx Resources per Queue
2764  * @tx_ring: Tx descriptor ring for a specific queue
2765  *
2766  * Free all transmit software resources
2767  **/
2768 static void wx_free_tx_resources(struct wx_ring *tx_ring)
2769 {
2770 	wx_clean_tx_ring(tx_ring);
2771 	kvfree(tx_ring->tx_buffer_info);
2772 	tx_ring->tx_buffer_info = NULL;
2773 
2774 	/* if not set, then don't free */
2775 	if (!tx_ring->desc)
2776 		return;
2777 
2778 	dma_free_coherent(tx_ring->dev, tx_ring->size,
2779 			  tx_ring->desc, tx_ring->dma);
2780 	tx_ring->desc = NULL;
2781 
2782 	wx_free_headwb_resources(tx_ring);
2783 }
2784 
2785 /**
2786  * wx_free_all_tx_resources - Free Tx Resources for All Queues
2787  * @wx: pointer to hardware structure
2788  *
2789  * Free all transmit software resources
2790  **/
2791 static void wx_free_all_tx_resources(struct wx *wx)
2792 {
2793 	int i;
2794 
2795 	for (i = 0; i < wx->num_tx_queues; i++)
2796 		wx_free_tx_resources(wx->tx_ring[i]);
2797 }
2798 
2799 void wx_free_resources(struct wx *wx)
2800 {
2801 	wx_free_all_rx_resources(wx);
2802 	wx_free_all_tx_resources(wx);
2803 }
2804 EXPORT_SYMBOL(wx_free_resources);
2805 
2806 static int wx_alloc_page_pool(struct wx_ring *rx_ring)
2807 {
2808 	int ret = 0;
2809 
2810 	struct page_pool_params pp_params = {
2811 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
2812 		.order = wx_rx_pg_order(rx_ring),
2813 		.pool_size = rx_ring->count * rx_ring->rx_buf_len /
2814 			     wx_rx_pg_size(rx_ring),
2815 		.nid = dev_to_node(rx_ring->dev),
2816 		.dev = rx_ring->dev,
2817 		.dma_dir = DMA_FROM_DEVICE,
2818 		.offset = 0,
2819 		.max_len = wx_rx_pg_size(rx_ring),
2820 	};
2821 
2822 	rx_ring->page_pool = page_pool_create(&pp_params);
2823 	if (IS_ERR(rx_ring->page_pool)) {
2824 		ret = PTR_ERR(rx_ring->page_pool);
2825 		rx_ring->page_pool = NULL;
2826 	}
2827 
2828 	return ret;
2829 }
2830 
2831 /**
2832  * wx_setup_rx_resources - allocate Rx resources (Descriptors)
2833  * @rx_ring: rx descriptor ring (for a specific queue) to setup
2834  *
2835  * Returns 0 on success, negative on failure
2836  **/
2837 static int wx_setup_rx_resources(struct wx_ring *rx_ring)
2838 {
2839 	struct device *dev = rx_ring->dev;
2840 	int orig_node = dev_to_node(dev);
2841 	int numa_node = NUMA_NO_NODE;
2842 	int size, ret;
2843 
2844 	size = sizeof(struct wx_rx_buffer) * rx_ring->count;
2845 
2846 	if (rx_ring->q_vector)
2847 		numa_node = rx_ring->q_vector->numa_node;
2848 
2849 	rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
2850 	if (!rx_ring->rx_buffer_info)
2851 		rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL);
2852 	if (!rx_ring->rx_buffer_info)
2853 		goto err;
2854 
2855 	/* Round up to nearest 4K */
2856 	rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc);
2857 	rx_ring->size = ALIGN(rx_ring->size, 4096);
2858 
2859 	set_dev_node(dev, numa_node);
2860 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
2861 					   &rx_ring->dma, GFP_KERNEL);
2862 	if (!rx_ring->desc) {
2863 		set_dev_node(dev, orig_node);
2864 		rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
2865 						   &rx_ring->dma, GFP_KERNEL);
2866 	}
2867 
2868 	if (!rx_ring->desc)
2869 		goto err;
2870 
2871 	rx_ring->next_to_clean = 0;
2872 	rx_ring->next_to_use = 0;
2873 
2874 	ret = wx_alloc_page_pool(rx_ring);
2875 	if (ret < 0) {
2876 		dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
2877 		goto err_desc;
2878 	}
2879 
2880 	return 0;
2881 
2882 err_desc:
2883 	dma_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2884 err:
2885 	kvfree(rx_ring->rx_buffer_info);
2886 	rx_ring->rx_buffer_info = NULL;
2887 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
2888 	return -ENOMEM;
2889 }
2890 
2891 /**
2892  * wx_setup_all_rx_resources - allocate all queues Rx resources
2893  * @wx: pointer to hardware structure
2894  *
2895  * If this function returns with an error, then it's possible one or
2896  * more of the rings is populated (while the rest are not).  It is the
2897  * callers duty to clean those orphaned rings.
2898  *
2899  * Return 0 on success, negative on failure
2900  **/
2901 static int wx_setup_all_rx_resources(struct wx *wx)
2902 {
2903 	int i, err = 0;
2904 
2905 	for (i = 0; i < wx->num_rx_queues; i++) {
2906 		err = wx_setup_rx_resources(wx->rx_ring[i]);
2907 		if (!err)
2908 			continue;
2909 
2910 		wx_err(wx, "Allocation for Rx Queue %u failed\n", i);
2911 		goto err_setup_rx;
2912 	}
2913 
2914 	return 0;
2915 err_setup_rx:
2916 	/* rewind the index freeing the rings as we go */
2917 	while (i--)
2918 		wx_free_rx_resources(wx->rx_ring[i]);
2919 	return err;
2920 }
2921 
2922 static void wx_setup_headwb_resources(struct wx_ring *tx_ring)
2923 {
2924 	struct wx *wx = netdev_priv(tx_ring->netdev);
2925 
2926 	if (!test_bit(WX_FLAG_TXHEAD_WB_ENABLED, wx->flags))
2927 		return;
2928 
2929 	if (!tx_ring->q_vector)
2930 		return;
2931 
2932 	tx_ring->headwb_mem = dma_alloc_coherent(tx_ring->dev,
2933 						 sizeof(u32),
2934 						 &tx_ring->headwb_dma,
2935 						 GFP_KERNEL);
2936 	if (!tx_ring->headwb_mem)
2937 		dev_info(tx_ring->dev, "Allocate headwb memory failed, disable it\n");
2938 }
2939 
2940 /**
2941  * wx_setup_tx_resources - allocate Tx resources (Descriptors)
2942  * @tx_ring: tx descriptor ring (for a specific queue) to setup
2943  *
2944  * Return 0 on success, negative on failure
2945  **/
2946 static int wx_setup_tx_resources(struct wx_ring *tx_ring)
2947 {
2948 	struct device *dev = tx_ring->dev;
2949 	int orig_node = dev_to_node(dev);
2950 	int numa_node = NUMA_NO_NODE;
2951 	int size;
2952 
2953 	size = sizeof(struct wx_tx_buffer) * tx_ring->count;
2954 
2955 	if (tx_ring->q_vector)
2956 		numa_node = tx_ring->q_vector->numa_node;
2957 
2958 	tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
2959 	if (!tx_ring->tx_buffer_info)
2960 		tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
2961 	if (!tx_ring->tx_buffer_info)
2962 		goto err;
2963 
2964 	/* round up to nearest 4K */
2965 	tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
2966 	tx_ring->size = ALIGN(tx_ring->size, 4096);
2967 
2968 	set_dev_node(dev, numa_node);
2969 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2970 					   &tx_ring->dma, GFP_KERNEL);
2971 	if (!tx_ring->desc) {
2972 		set_dev_node(dev, orig_node);
2973 		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2974 						   &tx_ring->dma, GFP_KERNEL);
2975 	}
2976 
2977 	if (!tx_ring->desc)
2978 		goto err;
2979 
2980 	wx_setup_headwb_resources(tx_ring);
2981 
2982 	tx_ring->next_to_use = 0;
2983 	tx_ring->next_to_clean = 0;
2984 
2985 	return 0;
2986 
2987 err:
2988 	kvfree(tx_ring->tx_buffer_info);
2989 	tx_ring->tx_buffer_info = NULL;
2990 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
2991 	return -ENOMEM;
2992 }
2993 
2994 /**
2995  * wx_setup_all_tx_resources - allocate all queues Tx resources
2996  * @wx: pointer to private structure
2997  *
2998  * If this function returns with an error, then it's possible one or
2999  * more of the rings is populated (while the rest are not).  It is the
3000  * callers duty to clean those orphaned rings.
3001  *
3002  * Return 0 on success, negative on failure
3003  **/
3004 static int wx_setup_all_tx_resources(struct wx *wx)
3005 {
3006 	int i, err = 0;
3007 
3008 	for (i = 0; i < wx->num_tx_queues; i++) {
3009 		err = wx_setup_tx_resources(wx->tx_ring[i]);
3010 		if (!err)
3011 			continue;
3012 
3013 		wx_err(wx, "Allocation for Tx Queue %u failed\n", i);
3014 		goto err_setup_tx;
3015 	}
3016 
3017 	return 0;
3018 err_setup_tx:
3019 	/* rewind the index freeing the rings as we go */
3020 	while (i--)
3021 		wx_free_tx_resources(wx->tx_ring[i]);
3022 	return err;
3023 }
3024 
3025 int wx_setup_resources(struct wx *wx)
3026 {
3027 	int err;
3028 
3029 	/* allocate transmit descriptors */
3030 	err = wx_setup_all_tx_resources(wx);
3031 	if (err)
3032 		return err;
3033 
3034 	/* allocate receive descriptors */
3035 	err = wx_setup_all_rx_resources(wx);
3036 	if (err)
3037 		goto err_free_tx;
3038 
3039 	err = wx_setup_isb_resources(wx);
3040 	if (err)
3041 		goto err_free_rx;
3042 
3043 	return 0;
3044 
3045 err_free_rx:
3046 	wx_free_all_rx_resources(wx);
3047 err_free_tx:
3048 	wx_free_all_tx_resources(wx);
3049 
3050 	return err;
3051 }
3052 EXPORT_SYMBOL(wx_setup_resources);
3053 
3054 /**
3055  * wx_get_stats64 - Get System Network Statistics
3056  * @netdev: network interface device structure
3057  * @stats: storage space for 64bit statistics
3058  */
3059 void wx_get_stats64(struct net_device *netdev,
3060 		    struct rtnl_link_stats64 *stats)
3061 {
3062 	struct wx *wx = netdev_priv(netdev);
3063 	struct wx_hw_stats *hwstats;
3064 	int i;
3065 
3066 	wx_update_stats(wx);
3067 
3068 	rcu_read_lock();
3069 	for (i = 0; i < wx->num_rx_queues; i++) {
3070 		struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]);
3071 		u64 bytes, packets;
3072 		unsigned int start;
3073 
3074 		if (ring) {
3075 			do {
3076 				start = u64_stats_fetch_begin(&ring->syncp);
3077 				packets = ring->stats.packets;
3078 				bytes   = ring->stats.bytes;
3079 			} while (u64_stats_fetch_retry(&ring->syncp, start));
3080 			stats->rx_packets += packets;
3081 			stats->rx_bytes   += bytes;
3082 		}
3083 	}
3084 
3085 	for (i = 0; i < wx->num_tx_queues; i++) {
3086 		struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]);
3087 		u64 bytes, packets;
3088 		unsigned int start;
3089 
3090 		if (ring) {
3091 			do {
3092 				start = u64_stats_fetch_begin(&ring->syncp);
3093 				packets = ring->stats.packets;
3094 				bytes   = ring->stats.bytes;
3095 			} while (u64_stats_fetch_retry(&ring->syncp,
3096 							   start));
3097 			stats->tx_packets += packets;
3098 			stats->tx_bytes   += bytes;
3099 		}
3100 	}
3101 
3102 	rcu_read_unlock();
3103 
3104 	hwstats = &wx->stats;
3105 	stats->rx_errors = hwstats->crcerrs + hwstats->rlec;
3106 	stats->multicast = hwstats->qmprc;
3107 	stats->rx_length_errors = hwstats->rlec;
3108 	stats->rx_crc_errors = hwstats->crcerrs;
3109 }
3110 EXPORT_SYMBOL(wx_get_stats64);
3111 
3112 int wx_set_features(struct net_device *netdev, netdev_features_t features)
3113 {
3114 	netdev_features_t changed = netdev->features ^ features;
3115 	struct wx *wx = netdev_priv(netdev);
3116 	bool need_reset = false;
3117 
3118 	wx->rss_enabled = !!(features & NETIF_F_RXHASH);
3119 	wx_enable_rss(wx, wx->rss_enabled);
3120 
3121 	netdev->features = features;
3122 
3123 	if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset)
3124 		wx->do_reset(netdev);
3125 	else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
3126 		wx_set_rx_mode(netdev);
3127 
3128 	if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
3129 		if (!(features & NETIF_F_LRO)) {
3130 			if (test_bit(WX_FLAG_RSC_ENABLED, wx->flags))
3131 				need_reset = true;
3132 			clear_bit(WX_FLAG_RSC_ENABLED, wx->flags);
3133 		} else if (!(test_bit(WX_FLAG_RSC_ENABLED, wx->flags))) {
3134 			if (wx->rx_itr_setting == 1 ||
3135 			    wx->rx_itr_setting > WX_MIN_RSC_ITR) {
3136 				set_bit(WX_FLAG_RSC_ENABLED, wx->flags);
3137 				need_reset = true;
3138 			} else if (changed & NETIF_F_LRO) {
3139 				dev_info(&wx->pdev->dev,
3140 					 "rx-usecs set too low, disable RSC\n");
3141 			}
3142 		}
3143 	}
3144 
3145 	if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
3146 		goto out;
3147 
3148 	/* Check if Flow Director n-tuple support was enabled or disabled.  If
3149 	 * the state changed, we need to reset.
3150 	 */
3151 	switch (features & NETIF_F_NTUPLE) {
3152 	case NETIF_F_NTUPLE:
3153 		/* turn off ATR, enable perfect filters and reset */
3154 		if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
3155 			need_reset = true;
3156 
3157 		clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
3158 		break;
3159 	default:
3160 		/* turn off perfect filters, enable ATR and reset */
3161 		if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
3162 			need_reset = true;
3163 
3164 		/* We cannot enable ATR if RSS is disabled */
3165 		if (wx->ring_feature[RING_F_RSS].limit <= 1)
3166 			break;
3167 
3168 		set_bit(WX_FLAG_FDIR_HASH, wx->flags);
3169 		break;
3170 	}
3171 
3172 out:
3173 	if (need_reset && wx->do_reset)
3174 		wx->do_reset(netdev);
3175 
3176 	return 0;
3177 }
3178 EXPORT_SYMBOL(wx_set_features);
3179 
3180 #define NETIF_VLAN_STRIPPING_FEATURES	(NETIF_F_HW_VLAN_CTAG_RX | \
3181 					 NETIF_F_HW_VLAN_STAG_RX)
3182 
3183 #define NETIF_VLAN_INSERTION_FEATURES	(NETIF_F_HW_VLAN_CTAG_TX | \
3184 					 NETIF_F_HW_VLAN_STAG_TX)
3185 
3186 #define NETIF_VLAN_FILTERING_FEATURES	(NETIF_F_HW_VLAN_CTAG_FILTER | \
3187 					 NETIF_F_HW_VLAN_STAG_FILTER)
3188 
3189 netdev_features_t wx_fix_features(struct net_device *netdev,
3190 				  netdev_features_t features)
3191 {
3192 	netdev_features_t changed = netdev->features ^ features;
3193 	struct wx *wx = netdev_priv(netdev);
3194 
3195 	if (changed & NETIF_VLAN_STRIPPING_FEATURES) {
3196 		if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES &&
3197 		    (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) {
3198 			features &= ~NETIF_VLAN_STRIPPING_FEATURES;
3199 			features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES;
3200 			wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off.");
3201 		}
3202 	}
3203 
3204 	if (changed & NETIF_VLAN_INSERTION_FEATURES) {
3205 		if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES &&
3206 		    (features & NETIF_VLAN_INSERTION_FEATURES) != 0) {
3207 			features &= ~NETIF_VLAN_INSERTION_FEATURES;
3208 			features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES;
3209 			wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off.");
3210 		}
3211 	}
3212 
3213 	if (changed & NETIF_VLAN_FILTERING_FEATURES) {
3214 		if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES &&
3215 		    (features & NETIF_VLAN_FILTERING_FEATURES) != 0) {
3216 			features &= ~NETIF_VLAN_FILTERING_FEATURES;
3217 			features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES;
3218 			wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off.");
3219 		}
3220 	}
3221 
3222 	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
3223 	if (!(features & NETIF_F_RXCSUM))
3224 		features &= ~NETIF_F_LRO;
3225 
3226 	/* Turn off LRO if not RSC capable */
3227 	if (!test_bit(WX_FLAG_RSC_CAPABLE, wx->flags))
3228 		features &= ~NETIF_F_LRO;
3229 
3230 	return features;
3231 }
3232 EXPORT_SYMBOL(wx_fix_features);
3233 
3234 #define WX_MAX_TUNNEL_HDR_LEN	80
3235 netdev_features_t wx_features_check(struct sk_buff *skb,
3236 				    struct net_device *netdev,
3237 				    netdev_features_t features)
3238 {
3239 	struct wx *wx = netdev_priv(netdev);
3240 
3241 	if (!skb->encapsulation)
3242 		return features;
3243 
3244 	if (wx->mac.type == wx_mac_em)
3245 		return features & ~NETIF_F_CSUM_MASK;
3246 
3247 	if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
3248 		     WX_MAX_TUNNEL_HDR_LEN))
3249 		return features & ~NETIF_F_CSUM_MASK;
3250 
3251 	if (skb->inner_protocol_type == ENCAP_TYPE_ETHER &&
3252 	    skb->inner_protocol != htons(ETH_P_IP) &&
3253 	    skb->inner_protocol != htons(ETH_P_IPV6) &&
3254 	    skb->inner_protocol != htons(ETH_P_TEB))
3255 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3256 
3257 	return features;
3258 }
3259 EXPORT_SYMBOL(wx_features_check);
3260 
3261 void wx_set_ring(struct wx *wx, u32 new_tx_count,
3262 		 u32 new_rx_count, struct wx_ring *temp_ring)
3263 {
3264 	int i, err = 0;
3265 
3266 	/* Setup new Tx resources and free the old Tx resources in that order.
3267 	 * We can then assign the new resources to the rings via a memcpy.
3268 	 * The advantage to this approach is that we are guaranteed to still
3269 	 * have resources even in the case of an allocation failure.
3270 	 */
3271 	if (new_tx_count != wx->tx_ring_count) {
3272 		for (i = 0; i < wx->num_tx_queues; i++) {
3273 			memcpy(&temp_ring[i], wx->tx_ring[i],
3274 			       sizeof(struct wx_ring));
3275 
3276 			temp_ring[i].count = new_tx_count;
3277 			err = wx_setup_tx_resources(&temp_ring[i]);
3278 			if (err) {
3279 				wx_err(wx, "setup new tx resources failed, keep using the old config\n");
3280 				while (i) {
3281 					i--;
3282 					wx_free_tx_resources(&temp_ring[i]);
3283 				}
3284 				return;
3285 			}
3286 		}
3287 
3288 		for (i = 0; i < wx->num_tx_queues; i++) {
3289 			wx_free_tx_resources(wx->tx_ring[i]);
3290 
3291 			memcpy(wx->tx_ring[i], &temp_ring[i],
3292 			       sizeof(struct wx_ring));
3293 		}
3294 
3295 		wx->tx_ring_count = new_tx_count;
3296 	}
3297 
3298 	/* Repeat the process for the Rx rings if needed */
3299 	if (new_rx_count != wx->rx_ring_count) {
3300 		for (i = 0; i < wx->num_rx_queues; i++) {
3301 			memcpy(&temp_ring[i], wx->rx_ring[i],
3302 			       sizeof(struct wx_ring));
3303 
3304 			temp_ring[i].count = new_rx_count;
3305 			err = wx_setup_rx_resources(&temp_ring[i]);
3306 			if (err) {
3307 				wx_err(wx, "setup new rx resources failed, keep using the old config\n");
3308 				while (i) {
3309 					i--;
3310 					wx_free_rx_resources(&temp_ring[i]);
3311 				}
3312 				return;
3313 			}
3314 		}
3315 
3316 		for (i = 0; i < wx->num_rx_queues; i++) {
3317 			wx_free_rx_resources(wx->rx_ring[i]);
3318 			memcpy(wx->rx_ring[i], &temp_ring[i],
3319 			       sizeof(struct wx_ring));
3320 		}
3321 
3322 		wx->rx_ring_count = new_rx_count;
3323 	}
3324 }
3325 EXPORT_SYMBOL(wx_set_ring);
3326 
3327 void wx_service_event_schedule(struct wx *wx)
3328 {
3329 	if (!test_and_set_bit(WX_STATE_SERVICE_SCHED, wx->state))
3330 		queue_work(system_power_efficient_wq, &wx->service_task);
3331 }
3332 EXPORT_SYMBOL(wx_service_event_schedule);
3333 
3334 void wx_service_event_complete(struct wx *wx)
3335 {
3336 	if (WARN_ON(!test_bit(WX_STATE_SERVICE_SCHED, wx->state)))
3337 		return;
3338 
3339 	/* flush memory to make sure state is correct before next watchdog */
3340 	smp_mb__before_atomic();
3341 	clear_bit(WX_STATE_SERVICE_SCHED, wx->state);
3342 }
3343 EXPORT_SYMBOL(wx_service_event_complete);
3344 
3345 void wx_service_timer(struct timer_list *t)
3346 {
3347 	struct wx *wx = timer_container_of(wx, t, service_timer);
3348 	unsigned long next_event_offset = HZ * 2;
3349 
3350 	/* Reset the timer */
3351 	mod_timer(&wx->service_timer, next_event_offset + jiffies);
3352 
3353 	wx_service_event_schedule(wx);
3354 }
3355 EXPORT_SYMBOL(wx_service_timer);
3356 
3357 MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers.");
3358 MODULE_LICENSE("GPL");
3359