xref: /linux/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c (revision 110e6f26af80dfd90b6e5c645b1aed7228aa580d)
1 /* Applied Micro X-Gene SoC Ethernet Classifier structures
2  *
3  * Copyright (c) 2016, Applied Micro Circuits Corporation
4  * Authors: Khuong Dinh <kdinh@apm.com>
5  *          Tanmay Inamdar <tinamdar@apm.com>
6  *          Iyappan Subramanian <isubramanian@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "xgene_enet_main.h"
23 
24 /* interfaces to convert structures to HW recognized bit formats */
25 static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
26 				  enum xgene_cle_prot_type type, u32 len,
27 				  u32 *reg)
28 {
29 	*reg =  SET_VAL(SB_IPFRAG, frag) |
30 		SET_VAL(SB_IPPROT, type) |
31 		SET_VAL(SB_IPVER, ver) |
32 		SET_VAL(SB_HDRLEN, len);
33 }
34 
35 static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
36 				u32 nfpsel, u32 *idt_reg)
37 {
38 	*idt_reg =  SET_VAL(IDT_DSTQID, dstqid) |
39 		    SET_VAL(IDT_FPSEL, fpsel) |
40 		    SET_VAL(IDT_NFPSEL, nfpsel);
41 }
42 
43 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
44 				  struct xgene_cle_dbptr *dbptr, u32 *buf)
45 {
46 	buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
47 		 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
48 
49 	buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
50 		 SET_VAL(CLE_PRIORITY, dbptr->cle_priority);
51 }
52 
53 static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
54 {
55 	u32 i, j = 0;
56 	u32 data;
57 
58 	buf[j++] = SET_VAL(CLE_TYPE, kn->node_type);
59 	for (i = 0; i < kn->num_keys; i++) {
60 		struct xgene_cle_ptree_key *key = &kn->key[i];
61 
62 		if (!(i % 2)) {
63 			buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) |
64 				 SET_VAL(CLE_KN_RPTR, key->result_pointer);
65 		} else {
66 			data = SET_VAL(CLE_KN_PRIO, key->priority) |
67 			       SET_VAL(CLE_KN_RPTR, key->result_pointer);
68 			buf[j++] |= (data << 16);
69 		}
70 	}
71 }
72 
73 static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn,
74 			       u32 *buf, u32 jb)
75 {
76 	struct xgene_cle_ptree_branch *br;
77 	u32 i, j = 0;
78 	u32 npp;
79 
80 	buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) |
81 		   SET_VAL(CLE_DN_LASTN, dn->last_node) |
82 		   SET_VAL(CLE_DN_HLS, dn->hdr_len_store) |
83 		   SET_VAL(CLE_DN_EXT, dn->hdr_extn) |
84 		   SET_VAL(CLE_DN_BSTOR, dn->byte_store) |
85 		   SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) |
86 		   SET_VAL(CLE_DN_RPTR, dn->result_pointer);
87 
88 	for (i = 0; i < dn->num_branches; i++) {
89 		br = &dn->branch[i];
90 		npp = br->next_packet_pointer;
91 
92 		if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE))
93 			npp += jb;
94 
95 		buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) |
96 			   SET_VAL(CLE_BR_NPPTR, npp) |
97 			   SET_VAL(CLE_BR_JB, br->jump_bw) |
98 			   SET_VAL(CLE_BR_JR, br->jump_rel) |
99 			   SET_VAL(CLE_BR_OP, br->operation) |
100 			   SET_VAL(CLE_BR_NNODE, br->next_node) |
101 			   SET_VAL(CLE_BR_NBR, br->next_branch);
102 
103 		buf[j++] = SET_VAL(CLE_BR_DATA, br->data) |
104 			   SET_VAL(CLE_BR_MASK, br->mask);
105 	}
106 }
107 
108 static int xgene_cle_poll_cmd_done(void __iomem *base,
109 				   enum xgene_cle_cmd_type cmd)
110 {
111 	u32 status, loop = 10;
112 	int ret = -EBUSY;
113 
114 	while (loop--) {
115 		status = ioread32(base + INDCMD_STATUS);
116 		if (status & cmd) {
117 			ret = 0;
118 			break;
119 		}
120 		usleep_range(1000, 2000);
121 	}
122 
123 	return ret;
124 }
125 
126 static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs,
127 			     u32 index, enum xgene_cle_dram_type type,
128 			     enum xgene_cle_cmd_type cmd)
129 {
130 	enum xgene_cle_parser parser = cle->active_parser;
131 	void __iomem *base = cle->base;
132 	u32 i, j, ind_addr;
133 	u8 port, nparsers;
134 	int ret = 0;
135 
136 	/* PTREE_RAM onwards, DRAM regions are common for all parsers */
137 	nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers;
138 
139 	for (i = 0; i < nparsers; i++) {
140 		port = i;
141 		if ((type < PTREE_RAM) && (parser != PARSER_ALL))
142 			port = parser;
143 
144 		ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index;
145 		iowrite32(ind_addr, base + INDADDR);
146 		for (j = 0; j < nregs; j++)
147 			iowrite32(data[j], base + DATA_RAM0 + (j * 4));
148 		iowrite32(cmd, base + INDCMD);
149 
150 		ret = xgene_cle_poll_cmd_done(base, cmd);
151 		if (ret)
152 			break;
153 	}
154 
155 	return ret;
156 }
157 
158 static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata,
159 				   struct xgene_enet_cle *cle)
160 {
161 	struct xgene_cle_ptree *ptree = &cle->ptree;
162 	void __iomem *addr, *base = cle->base;
163 	u32 offset = CLE_PORT_OFFSET;
164 	u32 i;
165 
166 	/* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
167 	ptree->start_pkt += cle->jump_bytes;
168 	for (i = 0; i < cle->parsers; i++) {
169 		if (cle->active_parser != PARSER_ALL)
170 			addr = base + cle->active_parser * offset;
171 		else
172 			addr = base + (i * offset);
173 
174 		iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0);
175 		iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0);
176 	}
177 }
178 
179 static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
180 				 struct xgene_enet_cle *cle)
181 {
182 	struct xgene_cle_ptree *ptree = &cle->ptree;
183 	u32 buf[CLE_DRAM_REGS];
184 	u32 i;
185 	int ret;
186 
187 	memset(buf, 0, sizeof(buf));
188 	for (i = 0; i < ptree->num_dbptr; i++) {
189 		xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf);
190 		ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr,
191 					DB_RAM,	CLE_CMD_WR);
192 		if (ret)
193 			return ret;
194 	}
195 
196 	return 0;
197 }
198 
199 static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
200 				struct xgene_enet_cle *cle)
201 {
202 	struct xgene_cle_ptree *ptree = &cle->ptree;
203 	struct xgene_cle_ptree_ewdn *dn = ptree->dn;
204 	struct xgene_cle_ptree_kn *kn = ptree->kn;
205 	u32 buf[CLE_DRAM_REGS];
206 	int i, j, ret;
207 
208 	memset(buf, 0, sizeof(buf));
209 	for (i = 0; i < ptree->num_dn; i++) {
210 		xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
211 		ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
212 					PTREE_RAM, CLE_CMD_WR);
213 		if (ret)
214 			return ret;
215 	}
216 
217 	/* continue node index for key node */
218 	memset(buf, 0, sizeof(buf));
219 	for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) {
220 		xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf);
221 		ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
222 					PTREE_RAM, CLE_CMD_WR);
223 		if (ret)
224 			return ret;
225 	}
226 
227 	return 0;
228 }
229 
230 static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
231 				 struct xgene_enet_cle *cle)
232 {
233 	int ret;
234 
235 	ret = xgene_cle_setup_node(pdata, cle);
236 	if (ret)
237 		return ret;
238 
239 	ret = xgene_cle_setup_dbptr(pdata, cle);
240 	if (ret)
241 		return ret;
242 
243 	xgene_cle_enable_ptree(pdata, cle);
244 
245 	return 0;
246 }
247 
248 static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
249 				      struct xgene_enet_cle *enet_cle,
250 				      struct xgene_cle_dbptr *dbptr,
251 				      u32 index, u8 priority)
252 {
253 	void __iomem *base = enet_cle->base;
254 	void __iomem *base_addr;
255 	u32 buf[CLE_DRAM_REGS];
256 	u32 def_cls, offset;
257 	u32 i, j;
258 
259 	memset(buf, 0, sizeof(buf));
260 	xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
261 
262 	for (i = 0; i < enet_cle->parsers; i++) {
263 		if (enet_cle->active_parser != PARSER_ALL) {
264 			offset = enet_cle->active_parser *
265 				CLE_PORT_OFFSET;
266 		} else {
267 			offset = i * CLE_PORT_OFFSET;
268 		}
269 
270 		base_addr = base + DFCLSRESDB00 + offset;
271 		for (j = 0; j < 6; j++)
272 			iowrite32(buf[j], base_addr + (j * 4));
273 
274 		def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
275 		iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
276 	}
277 }
278 
279 static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
280 {
281 	u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
282 	u32 mac_hdr_len = ETH_HLEN;
283 	u32 sband, reg = 0;
284 	u32 ipv4_ihl = 5;
285 	u32 hdr_len;
286 	int ret;
287 
288 	/* Sideband: IPV4/TCP packets */
289 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
290 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
291 	sband = reg;
292 
293 	/* Sideband: IPv4/UDP packets */
294 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
295 	xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
296 	sband |= (reg << 16);
297 
298 	ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
299 	if (ret)
300 		return ret;
301 
302 	/* Sideband: IPv4/RAW packets */
303 	hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
304 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
305 			      hdr_len, &reg);
306 	sband = reg;
307 
308 	/* Sideband: Ethernet II/RAW packets */
309 	hdr_len = (mac_hdr_len << 5);
310 	xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
311 			      hdr_len, &reg);
312 	sband |= (reg << 16);
313 
314 	ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
315 	if (ret)
316 		return ret;
317 
318 	return 0;
319 }
320 
321 static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
322 {
323 	u32 secret_key_ipv4[4];  /* 16 Bytes*/
324 	int ret = 0;
325 
326 	get_random_bytes(secret_key_ipv4, 16);
327 	ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
328 				RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
329 	return ret;
330 }
331 
332 static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
333 {
334 	u32 fpsel, dstqid, nfpsel, idt_reg, idx;
335 	int i, ret = 0;
336 	u16 pool_id;
337 
338 	for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
339 		idx = i % pdata->rxq_cnt;
340 		pool_id = pdata->rx_ring[idx]->buf_pool->id;
341 		fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
342 		dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
343 		nfpsel = 0;
344 		idt_reg = 0;
345 
346 		xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg);
347 		ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
348 					RSS_IDT, CLE_CMD_WR);
349 		if (ret)
350 			return ret;
351 	}
352 
353 	ret = xgene_cle_set_rss_skeys(&pdata->cle);
354 	if (ret)
355 		return ret;
356 
357 	return 0;
358 }
359 
360 static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
361 {
362 	struct xgene_enet_cle *cle = &pdata->cle;
363 	void __iomem *base = cle->base;
364 	u32 offset, val = 0;
365 	int i, ret = 0;
366 
367 	offset = CLE_PORT_OFFSET;
368 	for (i = 0; i < cle->parsers; i++) {
369 		if (cle->active_parser != PARSER_ALL)
370 			offset = cle->active_parser * CLE_PORT_OFFSET;
371 		else
372 			offset = i * CLE_PORT_OFFSET;
373 
374 		/* enable RSS */
375 		val = (RSS_IPV4_12B << 1) | 0x1;
376 		writel(val, base + RSS_CTRL0 + offset);
377 	}
378 
379 	/* setup sideband data */
380 	ret = xgene_cle_set_rss_sband(cle);
381 	if (ret)
382 		return ret;
383 
384 	/* setup indirection table */
385 	ret = xgene_cle_set_rss_idt(pdata);
386 	if (ret)
387 		return ret;
388 
389 	return 0;
390 }
391 
392 static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
393 {
394 	struct xgene_enet_cle *enet_cle = &pdata->cle;
395 	struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
396 	struct xgene_cle_ptree_branch *br;
397 	u32 def_qid, def_fpsel, pool_id;
398 	struct xgene_cle_ptree *ptree;
399 	struct xgene_cle_ptree_kn kn;
400 	int ret;
401 	struct xgene_cle_ptree_ewdn ptree_dn[] = {
402 		{
403 			/* PKT_TYPE_NODE */
404 			.node_type = EWDN,
405 			.last_node = 0,
406 			.hdr_len_store = 1,
407 			.hdr_extn = NO_BYTE,
408 			.byte_store = NO_BYTE,
409 			.search_byte_store = NO_BYTE,
410 			.result_pointer = DB_RES_DROP,
411 			.num_branches = 2,
412 			.branch = {
413 				{
414 					/* IPV4 */
415 					.valid = 0,
416 					.next_packet_pointer = 22,
417 					.jump_bw = JMP_FW,
418 					.jump_rel = JMP_ABS,
419 					.operation = EQT,
420 					.next_node = PKT_PROT_NODE,
421 					.next_branch = 0,
422 					.data = 0x8,
423 					.mask = 0xffff
424 				},
425 				{
426 					.valid = 0,
427 					.next_packet_pointer = 262,
428 					.jump_bw = JMP_FW,
429 					.jump_rel = JMP_ABS,
430 					.operation = EQT,
431 					.next_node = LAST_NODE,
432 					.next_branch = 0,
433 					.data = 0x0,
434 					.mask = 0xffff
435 				}
436 			},
437 		},
438 		{
439 			/* PKT_PROT_NODE */
440 			.node_type = EWDN,
441 			.last_node = 0,
442 			.hdr_len_store = 1,
443 			.hdr_extn = NO_BYTE,
444 			.byte_store = NO_BYTE,
445 			.search_byte_store = NO_BYTE,
446 			.result_pointer = DB_RES_DROP,
447 			.num_branches = 3,
448 			.branch = {
449 				{
450 					/* TCP */
451 					.valid = 1,
452 					.next_packet_pointer = 26,
453 					.jump_bw = JMP_FW,
454 					.jump_rel = JMP_ABS,
455 					.operation = EQT,
456 					.next_node = RSS_IPV4_TCP_NODE,
457 					.next_branch = 0,
458 					.data = 0x0600,
459 					.mask = 0xffff
460 				},
461 				{
462 					/* UDP */
463 					.valid = 1,
464 					.next_packet_pointer = 26,
465 					.jump_bw = JMP_FW,
466 					.jump_rel = JMP_ABS,
467 					.operation = EQT,
468 					.next_node = RSS_IPV4_UDP_NODE,
469 					.next_branch = 0,
470 					.data = 0x1100,
471 					.mask = 0xffff
472 				},
473 				{
474 					.valid = 0,
475 					.next_packet_pointer = 260,
476 					.jump_bw = JMP_FW,
477 					.jump_rel = JMP_ABS,
478 					.operation = EQT,
479 					.next_node = LAST_NODE,
480 					.next_branch = 0,
481 					.data = 0x0,
482 					.mask = 0xffff
483 				}
484 			}
485 		},
486 		{
487 			/* RSS_IPV4_TCP_NODE */
488 			.node_type = EWDN,
489 			.last_node = 0,
490 			.hdr_len_store = 1,
491 			.hdr_extn = NO_BYTE,
492 			.byte_store = NO_BYTE,
493 			.search_byte_store = BOTH_BYTES,
494 			.result_pointer = DB_RES_DROP,
495 			.num_branches = 6,
496 			.branch = {
497 				{
498 					/* SRC IPV4 B01 */
499 					.valid = 0,
500 					.next_packet_pointer = 28,
501 					.jump_bw = JMP_FW,
502 					.jump_rel = JMP_ABS,
503 					.operation = EQT,
504 					.next_node = RSS_IPV4_TCP_NODE,
505 					.next_branch = 1,
506 					.data = 0x0,
507 					.mask = 0xffff
508 				},
509 				{
510 					/* SRC IPV4 B23 */
511 					.valid = 0,
512 					.next_packet_pointer = 30,
513 					.jump_bw = JMP_FW,
514 					.jump_rel = JMP_ABS,
515 					.operation = EQT,
516 					.next_node = RSS_IPV4_TCP_NODE,
517 					.next_branch = 2,
518 					.data = 0x0,
519 					.mask = 0xffff
520 				},
521 				{
522 					/* DST IPV4 B01 */
523 					.valid = 0,
524 					.next_packet_pointer = 32,
525 					.jump_bw = JMP_FW,
526 					.jump_rel = JMP_ABS,
527 					.operation = EQT,
528 					.next_node = RSS_IPV4_TCP_NODE,
529 					.next_branch = 3,
530 					.data = 0x0,
531 					.mask = 0xffff
532 				},
533 				{
534 					/* DST IPV4 B23 */
535 					.valid = 0,
536 					.next_packet_pointer = 34,
537 					.jump_bw = JMP_FW,
538 					.jump_rel = JMP_ABS,
539 					.operation = EQT,
540 					.next_node = RSS_IPV4_TCP_NODE,
541 					.next_branch = 4,
542 					.data = 0x0,
543 					.mask = 0xffff
544 				},
545 				{
546 					/* TCP SRC Port */
547 					.valid = 0,
548 					.next_packet_pointer = 36,
549 					.jump_bw = JMP_FW,
550 					.jump_rel = JMP_ABS,
551 					.operation = EQT,
552 					.next_node = RSS_IPV4_TCP_NODE,
553 					.next_branch = 5,
554 					.data = 0x0,
555 					.mask = 0xffff
556 				},
557 				{
558 					/* TCP DST Port */
559 					.valid = 0,
560 					.next_packet_pointer = 256,
561 					.jump_bw = JMP_FW,
562 					.jump_rel = JMP_ABS,
563 					.operation = EQT,
564 					.next_node = LAST_NODE,
565 					.next_branch = 0,
566 					.data = 0x0,
567 					.mask = 0xffff
568 				}
569 			}
570 		},
571 		{
572 			/* RSS_IPV4_UDP_NODE */
573 			.node_type = EWDN,
574 			.last_node = 0,
575 			.hdr_len_store = 1,
576 			.hdr_extn = NO_BYTE,
577 			.byte_store = NO_BYTE,
578 			.search_byte_store = BOTH_BYTES,
579 			.result_pointer = DB_RES_DROP,
580 			.num_branches = 6,
581 			.branch = {
582 				{
583 					/* SRC IPV4 B01 */
584 					.valid = 0,
585 					.next_packet_pointer = 28,
586 					.jump_bw = JMP_FW,
587 					.jump_rel = JMP_ABS,
588 					.operation = EQT,
589 					.next_node = RSS_IPV4_UDP_NODE,
590 					.next_branch = 1,
591 					.data = 0x0,
592 					.mask = 0xffff
593 				},
594 				{
595 					/* SRC IPV4 B23 */
596 					.valid = 0,
597 					.next_packet_pointer = 30,
598 					.jump_bw = JMP_FW,
599 					.jump_rel = JMP_ABS,
600 					.operation = EQT,
601 					.next_node = RSS_IPV4_UDP_NODE,
602 					.next_branch = 2,
603 					.data = 0x0,
604 					.mask = 0xffff
605 				},
606 				{
607 					/* DST IPV4 B01 */
608 					.valid = 0,
609 					.next_packet_pointer = 32,
610 					.jump_bw = JMP_FW,
611 					.jump_rel = JMP_ABS,
612 					.operation = EQT,
613 					.next_node = RSS_IPV4_UDP_NODE,
614 					.next_branch = 3,
615 					.data = 0x0,
616 					.mask = 0xffff
617 				},
618 				{
619 					/* DST IPV4 B23 */
620 					.valid = 0,
621 					.next_packet_pointer = 34,
622 					.jump_bw = JMP_FW,
623 					.jump_rel = JMP_ABS,
624 					.operation = EQT,
625 					.next_node = RSS_IPV4_UDP_NODE,
626 					.next_branch = 4,
627 					.data = 0x0,
628 					.mask = 0xffff
629 				},
630 				{
631 					/* TCP SRC Port */
632 					.valid = 0,
633 					.next_packet_pointer = 36,
634 					.jump_bw = JMP_FW,
635 					.jump_rel = JMP_ABS,
636 					.operation = EQT,
637 					.next_node = RSS_IPV4_UDP_NODE,
638 					.next_branch = 5,
639 					.data = 0x0,
640 					.mask = 0xffff
641 				},
642 				{
643 					/* TCP DST Port */
644 					.valid = 0,
645 					.next_packet_pointer = 256,
646 					.jump_bw = JMP_FW,
647 					.jump_rel = JMP_ABS,
648 					.operation = EQT,
649 					.next_node = LAST_NODE,
650 					.next_branch = 0,
651 					.data = 0x0,
652 					.mask = 0xffff
653 				}
654 			}
655 		},
656 		{
657 			/* LAST NODE */
658 			.node_type = EWDN,
659 			.last_node = 1,
660 			.hdr_len_store = 1,
661 			.hdr_extn = NO_BYTE,
662 			.byte_store = NO_BYTE,
663 			.search_byte_store = NO_BYTE,
664 			.result_pointer = DB_RES_DROP,
665 			.num_branches = 1,
666 			.branch = {
667 				{
668 					.valid = 0,
669 					.next_packet_pointer = 0,
670 					.jump_bw = JMP_FW,
671 					.jump_rel = JMP_ABS,
672 					.operation = EQT,
673 					.next_node = MAX_NODES,
674 					.next_branch = 0,
675 					.data = 0,
676 					.mask = 0xffff
677 				}
678 			}
679 		}
680 	};
681 
682 	ptree = &enet_cle->ptree;
683 	ptree->start_pkt = 12; /* Ethertype */
684 	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
685 		ret = xgene_cle_setup_rss(pdata);
686 		if (ret) {
687 			netdev_err(pdata->ndev, "RSS initialization failed\n");
688 			return ret;
689 		}
690 	} else {
691 		br = &ptree_dn[PKT_PROT_NODE].branch[0];
692 		br->valid = 0;
693 		br->next_packet_pointer = 260;
694 		br->next_node = LAST_NODE;
695 		br->data = 0x0000;
696 		br->mask = 0xffff;
697 	}
698 
699 	def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
700 	pool_id = pdata->rx_ring[0]->buf_pool->id;
701 	def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
702 
703 	memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
704 	dbptr[DB_RES_ACCEPT].fpsel =  def_fpsel;
705 	dbptr[DB_RES_ACCEPT].dstqid = def_qid;
706 	dbptr[DB_RES_ACCEPT].cle_priority = 1;
707 
708 	dbptr[DB_RES_DEF].fpsel = def_fpsel;
709 	dbptr[DB_RES_DEF].dstqid = def_qid;
710 	dbptr[DB_RES_DEF].cle_priority = 7;
711 	xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
712 				  DB_RES_ACCEPT, 7);
713 
714 	dbptr[DB_RES_DROP].drop = 1;
715 
716 	memset(&kn, 0, sizeof(kn));
717 	kn.node_type = KN;
718 	kn.num_keys = 1;
719 	kn.key[0].priority = 0;
720 	kn.key[0].result_pointer = DB_RES_ACCEPT;
721 
722 	ptree->dn = ptree_dn;
723 	ptree->kn = &kn;
724 	ptree->dbptr = dbptr;
725 	ptree->num_dn = MAX_NODES;
726 	ptree->num_kn = 1;
727 	ptree->num_dbptr = DB_MAX_PTRS;
728 
729 	return xgene_cle_setup_ptree(pdata, enet_cle);
730 }
731 
732 struct xgene_cle_ops xgene_cle3in_ops = {
733 	.cle_init = xgene_enet_cle_init,
734 };
735