xref: /linux/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/string.h>
5 #include <linux/types.h>
6 #include <linux/pci.h>
7 
8 #include "../libwx/wx_type.h"
9 #include "../libwx/wx_lib.h"
10 #include "../libwx/wx_hw.h"
11 #include "txgbe_type.h"
12 #include "txgbe_fdir.h"
13 
14 /* These defines allow us to quickly generate all of the necessary instructions
15  * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION
16  * for values 0 through 15
17  */
18 #define TXGBE_ATR_COMMON_HASH_KEY \
19 		(TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY)
20 #define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
21 do { \
22 	u32 n = (_n); \
23 	if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
24 		common_hash ^= lo_hash_dword >> n; \
25 	else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
26 		bucket_hash ^= lo_hash_dword >> n; \
27 	else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
28 		sig_hash ^= lo_hash_dword << (16 - n); \
29 	if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
30 		common_hash ^= hi_hash_dword >> n; \
31 	else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
32 		bucket_hash ^= hi_hash_dword >> n; \
33 	else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
34 		sig_hash ^= hi_hash_dword << (16 - n); \
35 } while (0)
36 
37 /**
38  *  txgbe_atr_compute_sig_hash - Compute the signature hash
39  *  @input: input bitstream to compute the hash on
40  *  @common: compressed common input dword
41  *  @hash: pointer to the computed hash
42  *
43  *  This function is almost identical to the function above but contains
44  *  several optimizations such as unwinding all of the loops, letting the
45  *  compiler work out all of the conditional ifs since the keys are static
46  *  defines, and computing two keys at once since the hashed dword stream
47  *  will be the same for both keys.
48  **/
49 static void txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input,
50 				       union txgbe_atr_hash_dword common,
51 				       u32 *hash)
52 {
53 	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
54 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
55 	u32 i;
56 
57 	/* record the flow_vm_vlan bits as they are a key part to the hash */
58 	flow_vm_vlan = ntohl(input.dword);
59 
60 	/* generate common hash dword */
61 	hi_hash_dword = ntohl(common.dword);
62 
63 	/* low dword is word swapped version of common */
64 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
65 
66 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
67 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
68 
69 	/* Process bits 0 and 16 */
70 	TXGBE_COMPUTE_SIG_HASH_ITERATION(0);
71 
72 	/* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
73 	 * delay this because bit 0 of the stream should not be processed
74 	 * so we do not add the VLAN until after bit 0 was processed
75 	 */
76 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
77 
78 	/* Process remaining 30 bit of the key */
79 	for (i = 1; i <= 15; i++)
80 		TXGBE_COMPUTE_SIG_HASH_ITERATION(i);
81 
82 	/* combine common_hash result with signature and bucket hashes */
83 	bucket_hash ^= common_hash;
84 	bucket_hash &= TXGBE_ATR_HASH_MASK;
85 
86 	sig_hash ^= common_hash << 16;
87 	sig_hash &= TXGBE_ATR_HASH_MASK << 16;
88 
89 	/* return completed signature hash */
90 	*hash = sig_hash ^ bucket_hash;
91 }
92 
93 #define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
94 do { \
95 	u32 n = (_n); \
96 	if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
97 		bucket_hash ^= lo_hash_dword >> n; \
98 	if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
99 		bucket_hash ^= hi_hash_dword >> n; \
100 } while (0)
101 
102 /**
103  *  txgbe_atr_compute_perfect_hash - Compute the perfect filter hash
104  *  @input: input bitstream to compute the hash on
105  *  @input_mask: mask for the input bitstream
106  *
107  *  This function serves two main purposes.  First it applies the input_mask
108  *  to the atr_input resulting in a cleaned up atr_input data stream.
109  *  Secondly it computes the hash and stores it in the bkt_hash field at
110  *  the end of the input byte stream.  This way it will be available for
111  *  future use without needing to recompute the hash.
112  **/
113 void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
114 				    union txgbe_atr_input *input_mask)
115 {
116 	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
117 	u32 bucket_hash = 0;
118 	__be32 hi_dword = 0;
119 	u32 i = 0;
120 
121 	/* Apply masks to input data */
122 	for (i = 0; i < 11; i++)
123 		input->dword_stream[i] &= input_mask->dword_stream[i];
124 
125 	/* record the flow_vm_vlan bits as they are a key part to the hash */
126 	flow_vm_vlan = ntohl(input->dword_stream[0]);
127 
128 	/* generate common hash dword */
129 	for (i = 1; i <= 10; i++)
130 		hi_dword ^= input->dword_stream[i];
131 	hi_hash_dword = ntohl(hi_dword);
132 
133 	/* low dword is word swapped version of common */
134 	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
135 
136 	/* apply flow ID/VM pool/VLAN ID bits to hash words */
137 	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
138 
139 	/* Process bits 0 and 16 */
140 	TXGBE_COMPUTE_BKT_HASH_ITERATION(0);
141 
142 	/* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
143 	 * delay this because bit 0 of the stream should not be processed
144 	 * so we do not add the VLAN until after bit 0 was processed
145 	 */
146 	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
147 
148 	/* Process remaining 30 bit of the key */
149 	for (i = 1; i <= 15; i++)
150 		TXGBE_COMPUTE_BKT_HASH_ITERATION(i);
151 
152 	/* Limit hash to 13 bits since max bucket count is 8K.
153 	 * Store result at the end of the input stream.
154 	 */
155 	input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
156 }
157 
158 static int txgbe_fdir_check_cmd_complete(struct wx *wx)
159 {
160 	u32 val;
161 
162 	return read_poll_timeout_atomic(rd32, val,
163 					!(val & TXGBE_RDB_FDIR_CMD_CMD_MASK),
164 					10, 100, false,
165 					wx, TXGBE_RDB_FDIR_CMD);
166 }
167 
168 /**
169  *  txgbe_fdir_add_signature_filter - Adds a signature hash filter
170  *  @wx: pointer to hardware structure
171  *  @input: unique input dword
172  *  @common: compressed common input dword
173  *  @queue: queue index to direct traffic to
174  *
175  *  @return: 0 on success and negative on failure
176  **/
177 static int txgbe_fdir_add_signature_filter(struct wx *wx,
178 					   union txgbe_atr_hash_dword input,
179 					   union txgbe_atr_hash_dword common,
180 					   u8 queue)
181 {
182 	u32 fdirhashcmd, fdircmd;
183 	u8 flow_type;
184 	int err;
185 
186 	/* Get the flow_type in order to program FDIRCMD properly
187 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
188 	 * fifth is FDIRCMD.TUNNEL_FILTER
189 	 */
190 	flow_type = input.formatted.flow_type;
191 	switch (flow_type) {
192 	case TXGBE_ATR_FLOW_TYPE_TCPV4:
193 	case TXGBE_ATR_FLOW_TYPE_UDPV4:
194 	case TXGBE_ATR_FLOW_TYPE_SCTPV4:
195 	case TXGBE_ATR_FLOW_TYPE_TCPV6:
196 	case TXGBE_ATR_FLOW_TYPE_UDPV6:
197 	case TXGBE_ATR_FLOW_TYPE_SCTPV6:
198 		break;
199 	default:
200 		wx_err(wx, "Error on flow type input\n");
201 		return -EINVAL;
202 	}
203 
204 	/* configure FDIRCMD register */
205 	fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
206 		  TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
207 		  TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
208 	fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(flow_type);
209 	fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
210 
211 	txgbe_atr_compute_sig_hash(input, common, &fdirhashcmd);
212 	fdirhashcmd |= TXGBE_RDB_FDIR_HASH_BUCKET_VALID;
213 	wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhashcmd);
214 	wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
215 
216 	wx_dbg(wx, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
217 
218 	err = txgbe_fdir_check_cmd_complete(wx);
219 	if (err)
220 		wx_err(wx, "Flow Director command did not complete!\n");
221 
222 	return err;
223 }
224 
225 void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
226 {
227 	union txgbe_atr_hash_dword common = { .dword = 0 };
228 	union txgbe_atr_hash_dword input = { .dword = 0 };
229 	struct wx_q_vector *q_vector = ring->q_vector;
230 	struct wx_dec_ptype dptype;
231 	union network_header {
232 		struct ipv6hdr *ipv6;
233 		struct iphdr *ipv4;
234 		void *raw;
235 	} hdr;
236 	struct tcphdr *th;
237 
238 	/* if ring doesn't have a interrupt vector, cannot perform ATR */
239 	if (!q_vector)
240 		return;
241 
242 	ring->atr_count++;
243 	dptype = wx_decode_ptype(ptype);
244 	if (dptype.etype) {
245 		if (WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
246 			return;
247 		hdr.raw = (void *)skb_inner_network_header(first->skb);
248 		th = inner_tcp_hdr(first->skb);
249 	} else {
250 		if (WX_PTYPE_PKT(ptype) != WX_PTYPE_PKT_IP ||
251 		    WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
252 			return;
253 		hdr.raw = (void *)skb_network_header(first->skb);
254 		th = tcp_hdr(first->skb);
255 	}
256 
257 	/* skip this packet since it is invalid or the socket is closing */
258 	if (!th || th->fin)
259 		return;
260 
261 	/* sample on all syn packets or once every atr sample count */
262 	if (!th->syn && ring->atr_count < ring->atr_sample_rate)
263 		return;
264 
265 	/* reset sample count */
266 	ring->atr_count = 0;
267 
268 	/* src and dst are inverted, think how the receiver sees them
269 	 *
270 	 * The input is broken into two sections, a non-compressed section
271 	 * containing vm_pool, vlan_id, and flow_type.  The rest of the data
272 	 * is XORed together and stored in the compressed dword.
273 	 */
274 	input.formatted.vlan_id = htons((u16)ptype);
275 
276 	/* since src port and flex bytes occupy the same word XOR them together
277 	 * and write the value to source port portion of compressed dword
278 	 */
279 	if (first->tx_flags & WX_TX_FLAGS_SW_VLAN)
280 		common.port.src ^= th->dest ^ first->skb->protocol;
281 	else if (first->tx_flags & WX_TX_FLAGS_HW_VLAN)
282 		common.port.src ^= th->dest ^ first->skb->vlan_proto;
283 	else
284 		common.port.src ^= th->dest ^ first->protocol;
285 	common.port.dst ^= th->source;
286 
287 	if (WX_PTYPE_PKT_IPV6 & WX_PTYPE_PKT(ptype)) {
288 		input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6;
289 		common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
290 					 hdr.ipv6->saddr.s6_addr32[1] ^
291 					 hdr.ipv6->saddr.s6_addr32[2] ^
292 					 hdr.ipv6->saddr.s6_addr32[3] ^
293 					 hdr.ipv6->daddr.s6_addr32[0] ^
294 					 hdr.ipv6->daddr.s6_addr32[1] ^
295 					 hdr.ipv6->daddr.s6_addr32[2] ^
296 					 hdr.ipv6->daddr.s6_addr32[3];
297 	} else {
298 		input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
299 		common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
300 	}
301 
302 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
303 	txgbe_fdir_add_signature_filter(q_vector->wx, input, common,
304 					ring->queue_index);
305 }
306 
307 int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
308 {
309 	u32 fdirm = 0, fdirtcpm = 0, flex = 0;
310 
311 	/* Program the relevant mask registers. If src/dst_port or src/dst_addr
312 	 * are zero, then assume a full mask for that field.  Also assume that
313 	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
314 	 * cannot be masked out in this implementation.
315 	 *
316 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
317 	 * point in time.
318 	 */
319 
320 	/* verify bucket hash is cleared on hash generation */
321 	if (input_mask->formatted.bkt_hash)
322 		wx_dbg(wx, "bucket hash should always be 0 in mask\n");
323 
324 	/* Program FDIRM and verify partial masks */
325 	switch (input_mask->formatted.vm_pool & 0x7F) {
326 	case 0x0:
327 		fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL;
328 		break;
329 	case 0x7F:
330 		break;
331 	default:
332 		wx_err(wx, "Error on vm pool mask\n");
333 		return -EINVAL;
334 	}
335 
336 	switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) {
337 	case 0x0:
338 		fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P;
339 		if (input_mask->formatted.dst_port ||
340 		    input_mask->formatted.src_port) {
341 			wx_err(wx, "Error on src/dst port mask\n");
342 			return -EINVAL;
343 		}
344 		break;
345 	case TXGBE_ATR_L4TYPE_MASK:
346 		break;
347 	default:
348 		wx_err(wx, "Error on flow type mask\n");
349 		return -EINVAL;
350 	}
351 
352 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
353 	wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
354 
355 	flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
356 	flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
357 	flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
358 		 TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
359 
360 	switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
361 	case 0x0000:
362 		/* Mask Flex Bytes */
363 		flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK;
364 		break;
365 	case 0xFFFF:
366 		break;
367 	default:
368 		wx_err(wx, "Error on flexible byte mask\n");
369 		return -EINVAL;
370 	}
371 	wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
372 
373 	/* store the TCP/UDP port masks, bit reversed from port layout */
374 	fdirtcpm = ntohs(input_mask->formatted.dst_port);
375 	fdirtcpm <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
376 	fdirtcpm |= ntohs(input_mask->formatted.src_port);
377 
378 	/* write both the same so that UDP and TCP use the same mask */
379 	wr32(wx, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm);
380 	wr32(wx, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm);
381 	wr32(wx, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm);
382 
383 	/* store source and destination IP masks (little-enian) */
384 	wr32(wx, TXGBE_RDB_FDIR_SA4_MSK,
385 	     ntohl(~input_mask->formatted.src_ip[0]));
386 	wr32(wx, TXGBE_RDB_FDIR_DA4_MSK,
387 	     ntohl(~input_mask->formatted.dst_ip[0]));
388 
389 	return 0;
390 }
391 
392 int txgbe_fdir_write_perfect_filter(struct wx *wx,
393 				    union txgbe_atr_input *input,
394 				    u16 soft_id, u8 queue)
395 {
396 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
397 	int err = 0;
398 
399 	/* currently IPv6 is not supported, must be programmed with 0 */
400 	wr32(wx, TXGBE_RDB_FDIR_IP6(2), ntohl(input->formatted.src_ip[0]));
401 	wr32(wx, TXGBE_RDB_FDIR_IP6(1), ntohl(input->formatted.src_ip[1]));
402 	wr32(wx, TXGBE_RDB_FDIR_IP6(0), ntohl(input->formatted.src_ip[2]));
403 
404 	/* record the source address (little-endian) */
405 	wr32(wx, TXGBE_RDB_FDIR_SA, ntohl(input->formatted.src_ip[0]));
406 
407 	/* record the first 32 bits of the destination address
408 	 * (little-endian)
409 	 */
410 	wr32(wx, TXGBE_RDB_FDIR_DA, ntohl(input->formatted.dst_ip[0]));
411 
412 	/* record source and destination port (little-endian)*/
413 	fdirport = ntohs(input->formatted.dst_port);
414 	fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
415 	fdirport |= ntohs(input->formatted.src_port);
416 	wr32(wx, TXGBE_RDB_FDIR_PORT, fdirport);
417 
418 	/* record packet type and flex_bytes (little-endian) */
419 	fdirvlan = ntohs(input->formatted.flex_bytes);
420 	fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT;
421 	fdirvlan |= ntohs(input->formatted.vlan_id);
422 	wr32(wx, TXGBE_RDB_FDIR_FLEX, fdirvlan);
423 
424 	/* configure FDIRHASH register */
425 	fdirhash = (__force u32)input->formatted.bkt_hash |
426 		   TXGBE_RDB_FDIR_HASH_BUCKET_VALID |
427 		   TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
428 	wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
429 
430 	/* flush all previous writes to make certain registers are
431 	 * programmed prior to issuing the command
432 	 */
433 	WX_WRITE_FLUSH(wx);
434 
435 	/* configure FDIRCMD register */
436 	fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
437 		  TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
438 		  TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
439 	if (queue == TXGBE_RDB_FDIR_DROP_QUEUE)
440 		fdircmd |= TXGBE_RDB_FDIR_CMD_DROP;
441 	fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(input->formatted.flow_type);
442 	fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
443 	fdircmd |= TXGBE_RDB_FDIR_CMD_VT_POOL(input->formatted.vm_pool);
444 
445 	wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
446 	err = txgbe_fdir_check_cmd_complete(wx);
447 	if (err)
448 		wx_err(wx, "Flow Director command did not complete!\n");
449 
450 	return err;
451 }
452 
453 int txgbe_fdir_erase_perfect_filter(struct wx *wx,
454 				    union txgbe_atr_input *input,
455 				    u16 soft_id)
456 {
457 	u32 fdirhash, fdircmd;
458 	int err = 0;
459 
460 	/* configure FDIRHASH register */
461 	fdirhash = (__force u32)input->formatted.bkt_hash;
462 	fdirhash |= TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
463 	wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
464 
465 	/* flush hash to HW */
466 	WX_WRITE_FLUSH(wx);
467 
468 	/* Query if filter is present */
469 	wr32(wx, TXGBE_RDB_FDIR_CMD, TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT);
470 
471 	err = txgbe_fdir_check_cmd_complete(wx);
472 	if (err) {
473 		wx_err(wx, "Flow Director command did not complete!\n");
474 		return err;
475 	}
476 
477 	fdircmd = rd32(wx, TXGBE_RDB_FDIR_CMD);
478 	/* if filter exists in hardware then remove it */
479 	if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) {
480 		wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
481 		WX_WRITE_FLUSH(wx);
482 		wr32(wx, TXGBE_RDB_FDIR_CMD,
483 		     TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW);
484 	}
485 
486 	return 0;
487 }
488 
489 /**
490  *  txgbe_fdir_enable - Initialize Flow Director control registers
491  *  @wx: pointer to hardware structure
492  *  @fdirctrl: value to write to flow director control register
493  **/
494 static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
495 {
496 	u32 val;
497 	int ret;
498 
499 	/* Prime the keys for hashing */
500 	wr32(wx, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY);
501 	wr32(wx, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
502 
503 	wr32(wx, TXGBE_RDB_FDIR_CTL, fdirctrl);
504 	WX_WRITE_FLUSH(wx);
505 	ret = read_poll_timeout(rd32, val, val & TXGBE_RDB_FDIR_CTL_INIT_DONE,
506 				1000, 10000, false, wx, TXGBE_RDB_FDIR_CTL);
507 
508 	if (ret < 0)
509 		wx_dbg(wx, "Flow Director poll time exceeded!\n");
510 }
511 
512 /**
513  *  txgbe_init_fdir_signature -Initialize Flow Director sig filters
514  *  @wx: pointer to hardware structure
515  **/
516 static void txgbe_init_fdir_signature(struct wx *wx)
517 {
518 	u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
519 	u32 flex = 0;
520 
521 	flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
522 	flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
523 
524 	flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
525 		 TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
526 	wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
527 
528 	/* Continue setup of fdirctrl register bits:
529 	 *  Move the flexible bytes to use the ethertype - shift 6 words
530 	 *  Set the maximum length per hash bucket to 0xA filters
531 	 *  Send interrupt when 64 filters are left
532 	 */
533 	fdirctrl |= TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
534 		    TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
535 		    TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
536 
537 	/* write hashes and fdirctrl register, poll for completion */
538 	txgbe_fdir_enable(wx, fdirctrl);
539 }
540 
541 /**
542  *  txgbe_init_fdir_perfect - Initialize Flow Director perfect filters
543  *  @wx: pointer to hardware structure
544  **/
545 static void txgbe_init_fdir_perfect(struct wx *wx)
546 {
547 	u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
548 
549 	/* Continue setup of fdirctrl register bits:
550 	 *  Turn perfect match filtering on
551 	 *  Report hash in RSS field of Rx wb descriptor
552 	 *  Initialize the drop queue
553 	 *  Move the flexible bytes to use the ethertype - shift 6 words
554 	 *  Set the maximum length per hash bucket to 0xA filters
555 	 *  Send interrupt when 64 (0x4 * 16) filters are left
556 	 */
557 	fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH |
558 		    TXGBE_RDB_FDIR_CTL_DROP_Q(TXGBE_RDB_FDIR_DROP_QUEUE) |
559 		    TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
560 		    TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
561 		    TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
562 
563 	/* write hashes and fdirctrl register, poll for completion */
564 	txgbe_fdir_enable(wx, fdirctrl);
565 }
566 
567 static void txgbe_fdir_filter_restore(struct wx *wx)
568 {
569 	struct txgbe_fdir_filter *filter;
570 	struct txgbe *txgbe = wx->priv;
571 	struct hlist_node *node;
572 	u8 queue = 0;
573 	int ret = 0;
574 
575 	spin_lock(&txgbe->fdir_perfect_lock);
576 
577 	if (!hlist_empty(&txgbe->fdir_filter_list))
578 		ret = txgbe_fdir_set_input_mask(wx, &txgbe->fdir_mask);
579 
580 	if (ret)
581 		goto unlock;
582 
583 	hlist_for_each_entry_safe(filter, node,
584 				  &txgbe->fdir_filter_list, fdir_node) {
585 		if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) {
586 			queue = TXGBE_RDB_FDIR_DROP_QUEUE;
587 		} else {
588 			u32 ring = ethtool_get_flow_spec_ring(filter->action);
589 
590 			if (ring >= wx->num_rx_queues) {
591 				wx_err(wx, "FDIR restore failed, ring:%u\n",
592 				       ring);
593 				continue;
594 			}
595 
596 			/* Map the ring onto the absolute queue index */
597 			queue = wx->rx_ring[ring]->reg_idx;
598 		}
599 
600 		ret = txgbe_fdir_write_perfect_filter(wx,
601 						      &filter->filter,
602 						      filter->sw_idx,
603 						      queue);
604 		if (ret)
605 			wx_err(wx, "FDIR restore failed, index:%u\n",
606 			       filter->sw_idx);
607 	}
608 
609 unlock:
610 	spin_unlock(&txgbe->fdir_perfect_lock);
611 }
612 
613 void txgbe_configure_fdir(struct wx *wx)
614 {
615 	wx_disable_sec_rx_path(wx);
616 
617 	if (test_bit(WX_FLAG_FDIR_HASH, wx->flags)) {
618 		txgbe_init_fdir_signature(wx);
619 	} else if (test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) {
620 		txgbe_init_fdir_perfect(wx);
621 		txgbe_fdir_filter_restore(wx);
622 	}
623 
624 	wx_enable_sec_rx_path(wx);
625 }
626 
627 void txgbe_fdir_filter_exit(struct wx *wx)
628 {
629 	struct txgbe_fdir_filter *filter;
630 	struct txgbe *txgbe = wx->priv;
631 	struct hlist_node *node;
632 
633 	spin_lock(&txgbe->fdir_perfect_lock);
634 
635 	hlist_for_each_entry_safe(filter, node,
636 				  &txgbe->fdir_filter_list, fdir_node) {
637 		hlist_del(&filter->fdir_node);
638 		kfree(filter);
639 	}
640 	txgbe->fdir_filter_count = 0;
641 
642 	spin_unlock(&txgbe->fdir_perfect_lock);
643 }
644