xref: /linux/drivers/net/ethernet/rocker/rocker_ofdpa.c (revision 63307d015b91e626c97bb82e88054af3d0b74643)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
4  *					        implementation
5  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6  * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
12 #include <linux/hashtable.h>
13 #include <linux/crc32.h>
14 #include <linux/netdevice.h>
15 #include <linux/inetdevice.h>
16 #include <linux/if_vlan.h>
17 #include <linux/if_bridge.h>
18 #include <net/neighbour.h>
19 #include <net/switchdev.h>
20 #include <net/ip_fib.h>
21 #include <net/arp.h>
22 
23 #include "rocker.h"
24 #include "rocker_tlv.h"
25 
26 struct ofdpa_flow_tbl_key {
27 	u32 priority;
28 	enum rocker_of_dpa_table_id tbl_id;
29 	union {
30 		struct {
31 			u32 in_pport;
32 			u32 in_pport_mask;
33 			enum rocker_of_dpa_table_id goto_tbl;
34 		} ig_port;
35 		struct {
36 			u32 in_pport;
37 			__be16 vlan_id;
38 			__be16 vlan_id_mask;
39 			enum rocker_of_dpa_table_id goto_tbl;
40 			bool untagged;
41 			__be16 new_vlan_id;
42 		} vlan;
43 		struct {
44 			u32 in_pport;
45 			u32 in_pport_mask;
46 			__be16 eth_type;
47 			u8 eth_dst[ETH_ALEN];
48 			u8 eth_dst_mask[ETH_ALEN];
49 			__be16 vlan_id;
50 			__be16 vlan_id_mask;
51 			enum rocker_of_dpa_table_id goto_tbl;
52 			bool copy_to_cpu;
53 		} term_mac;
54 		struct {
55 			__be16 eth_type;
56 			__be32 dst4;
57 			__be32 dst4_mask;
58 			enum rocker_of_dpa_table_id goto_tbl;
59 			u32 group_id;
60 		} ucast_routing;
61 		struct {
62 			u8 eth_dst[ETH_ALEN];
63 			u8 eth_dst_mask[ETH_ALEN];
64 			int has_eth_dst;
65 			int has_eth_dst_mask;
66 			__be16 vlan_id;
67 			u32 tunnel_id;
68 			enum rocker_of_dpa_table_id goto_tbl;
69 			u32 group_id;
70 			bool copy_to_cpu;
71 		} bridge;
72 		struct {
73 			u32 in_pport;
74 			u32 in_pport_mask;
75 			u8 eth_src[ETH_ALEN];
76 			u8 eth_src_mask[ETH_ALEN];
77 			u8 eth_dst[ETH_ALEN];
78 			u8 eth_dst_mask[ETH_ALEN];
79 			__be16 eth_type;
80 			__be16 vlan_id;
81 			__be16 vlan_id_mask;
82 			u8 ip_proto;
83 			u8 ip_proto_mask;
84 			u8 ip_tos;
85 			u8 ip_tos_mask;
86 			u32 group_id;
87 		} acl;
88 	};
89 };
90 
91 struct ofdpa_flow_tbl_entry {
92 	struct hlist_node entry;
93 	u32 cmd;
94 	u64 cookie;
95 	struct ofdpa_flow_tbl_key key;
96 	size_t key_len;
97 	u32 key_crc32; /* key */
98 	struct fib_info *fi;
99 };
100 
101 struct ofdpa_group_tbl_entry {
102 	struct hlist_node entry;
103 	u32 cmd;
104 	u32 group_id; /* key */
105 	u16 group_count;
106 	u32 *group_ids;
107 	union {
108 		struct {
109 			u8 pop_vlan;
110 		} l2_interface;
111 		struct {
112 			u8 eth_src[ETH_ALEN];
113 			u8 eth_dst[ETH_ALEN];
114 			__be16 vlan_id;
115 			u32 group_id;
116 		} l2_rewrite;
117 		struct {
118 			u8 eth_src[ETH_ALEN];
119 			u8 eth_dst[ETH_ALEN];
120 			__be16 vlan_id;
121 			bool ttl_check;
122 			u32 group_id;
123 		} l3_unicast;
124 	};
125 };
126 
127 struct ofdpa_fdb_tbl_entry {
128 	struct hlist_node entry;
129 	u32 key_crc32; /* key */
130 	bool learned;
131 	unsigned long touched;
132 	struct ofdpa_fdb_tbl_key {
133 		struct ofdpa_port *ofdpa_port;
134 		u8 addr[ETH_ALEN];
135 		__be16 vlan_id;
136 	} key;
137 };
138 
139 struct ofdpa_internal_vlan_tbl_entry {
140 	struct hlist_node entry;
141 	int ifindex; /* key */
142 	u32 ref_count;
143 	__be16 vlan_id;
144 };
145 
146 struct ofdpa_neigh_tbl_entry {
147 	struct hlist_node entry;
148 	__be32 ip_addr; /* key */
149 	struct net_device *dev;
150 	u32 ref_count;
151 	u32 index;
152 	u8 eth_dst[ETH_ALEN];
153 	bool ttl_check;
154 };
155 
156 enum {
157 	OFDPA_CTRL_LINK_LOCAL_MCAST,
158 	OFDPA_CTRL_LOCAL_ARP,
159 	OFDPA_CTRL_IPV4_MCAST,
160 	OFDPA_CTRL_IPV6_MCAST,
161 	OFDPA_CTRL_DFLT_BRIDGING,
162 	OFDPA_CTRL_DFLT_OVS,
163 	OFDPA_CTRL_MAX,
164 };
165 
166 #define OFDPA_INTERNAL_VLAN_ID_BASE	0x0f00
167 #define OFDPA_N_INTERNAL_VLANS		255
168 #define OFDPA_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
169 #define OFDPA_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
170 #define OFDPA_UNTAGGED_VID 0
171 
172 struct ofdpa {
173 	struct rocker *rocker;
174 	DECLARE_HASHTABLE(flow_tbl, 16);
175 	spinlock_t flow_tbl_lock;		/* for flow tbl accesses */
176 	u64 flow_tbl_next_cookie;
177 	DECLARE_HASHTABLE(group_tbl, 16);
178 	spinlock_t group_tbl_lock;		/* for group tbl accesses */
179 	struct timer_list fdb_cleanup_timer;
180 	DECLARE_HASHTABLE(fdb_tbl, 16);
181 	spinlock_t fdb_tbl_lock;		/* for fdb tbl accesses */
182 	unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
183 	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
184 	spinlock_t internal_vlan_tbl_lock;	/* for vlan tbl accesses */
185 	DECLARE_HASHTABLE(neigh_tbl, 16);
186 	spinlock_t neigh_tbl_lock;		/* for neigh tbl accesses */
187 	u32 neigh_tbl_next_index;
188 	unsigned long ageing_time;
189 	bool fib_aborted;
190 };
191 
192 struct ofdpa_port {
193 	struct ofdpa *ofdpa;
194 	struct rocker_port *rocker_port;
195 	struct net_device *dev;
196 	u32 pport;
197 	struct net_device *bridge_dev;
198 	__be16 internal_vlan_id;
199 	int stp_state;
200 	u32 brport_flags;
201 	unsigned long ageing_time;
202 	bool ctrls[OFDPA_CTRL_MAX];
203 	unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
204 };
205 
206 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
207 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
208 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
209 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
210 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
211 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
212 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
213 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
214 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
215 
216 /* Rocker priority levels for flow table entries.  Higher
217  * priority match takes precedence over lower priority match.
218  */
219 
220 enum {
221 	OFDPA_PRIORITY_UNKNOWN = 0,
222 	OFDPA_PRIORITY_IG_PORT = 1,
223 	OFDPA_PRIORITY_VLAN = 1,
224 	OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
225 	OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
226 	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
227 	OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
228 	OFDPA_PRIORITY_BRIDGING_VLAN = 3,
229 	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
230 	OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
231 	OFDPA_PRIORITY_BRIDGING_TENANT = 3,
232 	OFDPA_PRIORITY_ACL_CTRL = 3,
233 	OFDPA_PRIORITY_ACL_NORMAL = 2,
234 	OFDPA_PRIORITY_ACL_DFLT = 1,
235 };
236 
237 static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
238 {
239 	u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
240 	u16 end = 0xffe;
241 	u16 _vlan_id = ntohs(vlan_id);
242 
243 	return (_vlan_id >= start && _vlan_id <= end);
244 }
245 
246 static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
247 				     u16 vid, bool *pop_vlan)
248 {
249 	__be16 vlan_id;
250 
251 	if (pop_vlan)
252 		*pop_vlan = false;
253 	vlan_id = htons(vid);
254 	if (!vlan_id) {
255 		vlan_id = ofdpa_port->internal_vlan_id;
256 		if (pop_vlan)
257 			*pop_vlan = true;
258 	}
259 
260 	return vlan_id;
261 }
262 
263 static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
264 				  __be16 vlan_id)
265 {
266 	if (ofdpa_vlan_id_is_internal(vlan_id))
267 		return 0;
268 
269 	return ntohs(vlan_id);
270 }
271 
272 static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
273 				const char *kind)
274 {
275 	return ofdpa_port->bridge_dev &&
276 		!strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
277 }
278 
279 static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
280 {
281 	return ofdpa_port_is_slave(ofdpa_port, "bridge");
282 }
283 
284 static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
285 {
286 	return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
287 }
288 
289 #define OFDPA_OP_FLAG_REMOVE		BIT(0)
290 #define OFDPA_OP_FLAG_NOWAIT		BIT(1)
291 #define OFDPA_OP_FLAG_LEARNED		BIT(2)
292 #define OFDPA_OP_FLAG_REFRESH		BIT(3)
293 
294 static bool ofdpa_flags_nowait(int flags)
295 {
296 	return flags & OFDPA_OP_FLAG_NOWAIT;
297 }
298 
299 /*************************************************************
300  * Flow, group, FDB, internal VLAN and neigh command prepares
301  *************************************************************/
302 
303 static int
304 ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
305 			       const struct ofdpa_flow_tbl_entry *entry)
306 {
307 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
308 			       entry->key.ig_port.in_pport))
309 		return -EMSGSIZE;
310 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
311 			       entry->key.ig_port.in_pport_mask))
312 		return -EMSGSIZE;
313 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
314 			       entry->key.ig_port.goto_tbl))
315 		return -EMSGSIZE;
316 
317 	return 0;
318 }
319 
320 static int
321 ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
322 			    const struct ofdpa_flow_tbl_entry *entry)
323 {
324 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
325 			       entry->key.vlan.in_pport))
326 		return -EMSGSIZE;
327 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
328 				entry->key.vlan.vlan_id))
329 		return -EMSGSIZE;
330 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
331 				entry->key.vlan.vlan_id_mask))
332 		return -EMSGSIZE;
333 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
334 			       entry->key.vlan.goto_tbl))
335 		return -EMSGSIZE;
336 	if (entry->key.vlan.untagged &&
337 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
338 				entry->key.vlan.new_vlan_id))
339 		return -EMSGSIZE;
340 
341 	return 0;
342 }
343 
344 static int
345 ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
346 				const struct ofdpa_flow_tbl_entry *entry)
347 {
348 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
349 			       entry->key.term_mac.in_pport))
350 		return -EMSGSIZE;
351 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
352 			       entry->key.term_mac.in_pport_mask))
353 		return -EMSGSIZE;
354 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
355 				entry->key.term_mac.eth_type))
356 		return -EMSGSIZE;
357 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
358 			   ETH_ALEN, entry->key.term_mac.eth_dst))
359 		return -EMSGSIZE;
360 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
361 			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
362 		return -EMSGSIZE;
363 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
364 				entry->key.term_mac.vlan_id))
365 		return -EMSGSIZE;
366 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
367 				entry->key.term_mac.vlan_id_mask))
368 		return -EMSGSIZE;
369 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
370 			       entry->key.term_mac.goto_tbl))
371 		return -EMSGSIZE;
372 	if (entry->key.term_mac.copy_to_cpu &&
373 	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
374 			      entry->key.term_mac.copy_to_cpu))
375 		return -EMSGSIZE;
376 
377 	return 0;
378 }
379 
380 static int
381 ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
382 				     const struct ofdpa_flow_tbl_entry *entry)
383 {
384 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
385 				entry->key.ucast_routing.eth_type))
386 		return -EMSGSIZE;
387 	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
388 				entry->key.ucast_routing.dst4))
389 		return -EMSGSIZE;
390 	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
391 				entry->key.ucast_routing.dst4_mask))
392 		return -EMSGSIZE;
393 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
394 			       entry->key.ucast_routing.goto_tbl))
395 		return -EMSGSIZE;
396 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
397 			       entry->key.ucast_routing.group_id))
398 		return -EMSGSIZE;
399 
400 	return 0;
401 }
402 
403 static int
404 ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
405 			      const struct ofdpa_flow_tbl_entry *entry)
406 {
407 	if (entry->key.bridge.has_eth_dst &&
408 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
409 			   ETH_ALEN, entry->key.bridge.eth_dst))
410 		return -EMSGSIZE;
411 	if (entry->key.bridge.has_eth_dst_mask &&
412 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
413 			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
414 		return -EMSGSIZE;
415 	if (entry->key.bridge.vlan_id &&
416 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
417 				entry->key.bridge.vlan_id))
418 		return -EMSGSIZE;
419 	if (entry->key.bridge.tunnel_id &&
420 	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
421 			       entry->key.bridge.tunnel_id))
422 		return -EMSGSIZE;
423 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
424 			       entry->key.bridge.goto_tbl))
425 		return -EMSGSIZE;
426 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
427 			       entry->key.bridge.group_id))
428 		return -EMSGSIZE;
429 	if (entry->key.bridge.copy_to_cpu &&
430 	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
431 			      entry->key.bridge.copy_to_cpu))
432 		return -EMSGSIZE;
433 
434 	return 0;
435 }
436 
437 static int
438 ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
439 			   const struct ofdpa_flow_tbl_entry *entry)
440 {
441 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
442 			       entry->key.acl.in_pport))
443 		return -EMSGSIZE;
444 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
445 			       entry->key.acl.in_pport_mask))
446 		return -EMSGSIZE;
447 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
448 			   ETH_ALEN, entry->key.acl.eth_src))
449 		return -EMSGSIZE;
450 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
451 			   ETH_ALEN, entry->key.acl.eth_src_mask))
452 		return -EMSGSIZE;
453 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
454 			   ETH_ALEN, entry->key.acl.eth_dst))
455 		return -EMSGSIZE;
456 	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
457 			   ETH_ALEN, entry->key.acl.eth_dst_mask))
458 		return -EMSGSIZE;
459 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
460 				entry->key.acl.eth_type))
461 		return -EMSGSIZE;
462 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
463 				entry->key.acl.vlan_id))
464 		return -EMSGSIZE;
465 	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
466 				entry->key.acl.vlan_id_mask))
467 		return -EMSGSIZE;
468 
469 	switch (ntohs(entry->key.acl.eth_type)) {
470 	case ETH_P_IP:
471 	case ETH_P_IPV6:
472 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
473 				      entry->key.acl.ip_proto))
474 			return -EMSGSIZE;
475 		if (rocker_tlv_put_u8(desc_info,
476 				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
477 				      entry->key.acl.ip_proto_mask))
478 			return -EMSGSIZE;
479 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
480 				      entry->key.acl.ip_tos & 0x3f))
481 			return -EMSGSIZE;
482 		if (rocker_tlv_put_u8(desc_info,
483 				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
484 				      entry->key.acl.ip_tos_mask & 0x3f))
485 			return -EMSGSIZE;
486 		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
487 				      (entry->key.acl.ip_tos & 0xc0) >> 6))
488 			return -EMSGSIZE;
489 		if (rocker_tlv_put_u8(desc_info,
490 				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
491 				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
492 			return -EMSGSIZE;
493 		break;
494 	}
495 
496 	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
497 	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
498 			       entry->key.acl.group_id))
499 		return -EMSGSIZE;
500 
501 	return 0;
502 }
503 
504 static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
505 				  struct rocker_desc_info *desc_info,
506 				  void *priv)
507 {
508 	const struct ofdpa_flow_tbl_entry *entry = priv;
509 	struct rocker_tlv *cmd_info;
510 	int err = 0;
511 
512 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
513 		return -EMSGSIZE;
514 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
515 	if (!cmd_info)
516 		return -EMSGSIZE;
517 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
518 			       entry->key.tbl_id))
519 		return -EMSGSIZE;
520 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
521 			       entry->key.priority))
522 		return -EMSGSIZE;
523 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
524 		return -EMSGSIZE;
525 	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
526 			       entry->cookie))
527 		return -EMSGSIZE;
528 
529 	switch (entry->key.tbl_id) {
530 	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
531 		err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
532 		break;
533 	case ROCKER_OF_DPA_TABLE_ID_VLAN:
534 		err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
535 		break;
536 	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
537 		err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
538 		break;
539 	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
540 		err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
541 		break;
542 	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
543 		err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
544 		break;
545 	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
546 		err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
547 		break;
548 	default:
549 		err = -ENOTSUPP;
550 		break;
551 	}
552 
553 	if (err)
554 		return err;
555 
556 	rocker_tlv_nest_end(desc_info, cmd_info);
557 
558 	return 0;
559 }
560 
561 static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
562 				  struct rocker_desc_info *desc_info,
563 				  void *priv)
564 {
565 	const struct ofdpa_flow_tbl_entry *entry = priv;
566 	struct rocker_tlv *cmd_info;
567 
568 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
569 		return -EMSGSIZE;
570 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
571 	if (!cmd_info)
572 		return -EMSGSIZE;
573 	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
574 			       entry->cookie))
575 		return -EMSGSIZE;
576 	rocker_tlv_nest_end(desc_info, cmd_info);
577 
578 	return 0;
579 }
580 
581 static int
582 ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
583 				     struct ofdpa_group_tbl_entry *entry)
584 {
585 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
586 			       ROCKER_GROUP_PORT_GET(entry->group_id)))
587 		return -EMSGSIZE;
588 	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
589 			      entry->l2_interface.pop_vlan))
590 		return -EMSGSIZE;
591 
592 	return 0;
593 }
594 
595 static int
596 ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
597 				   const struct ofdpa_group_tbl_entry *entry)
598 {
599 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
600 			       entry->l2_rewrite.group_id))
601 		return -EMSGSIZE;
602 	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
603 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
604 			   ETH_ALEN, entry->l2_rewrite.eth_src))
605 		return -EMSGSIZE;
606 	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
607 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
608 			   ETH_ALEN, entry->l2_rewrite.eth_dst))
609 		return -EMSGSIZE;
610 	if (entry->l2_rewrite.vlan_id &&
611 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
612 				entry->l2_rewrite.vlan_id))
613 		return -EMSGSIZE;
614 
615 	return 0;
616 }
617 
618 static int
619 ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
620 				  const struct ofdpa_group_tbl_entry *entry)
621 {
622 	int i;
623 	struct rocker_tlv *group_ids;
624 
625 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
626 			       entry->group_count))
627 		return -EMSGSIZE;
628 
629 	group_ids = rocker_tlv_nest_start(desc_info,
630 					  ROCKER_TLV_OF_DPA_GROUP_IDS);
631 	if (!group_ids)
632 		return -EMSGSIZE;
633 
634 	for (i = 0; i < entry->group_count; i++)
635 		/* Note TLV array is 1-based */
636 		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
637 			return -EMSGSIZE;
638 
639 	rocker_tlv_nest_end(desc_info, group_ids);
640 
641 	return 0;
642 }
643 
644 static int
645 ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
646 				   const struct ofdpa_group_tbl_entry *entry)
647 {
648 	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
649 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
650 			   ETH_ALEN, entry->l3_unicast.eth_src))
651 		return -EMSGSIZE;
652 	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
653 	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
654 			   ETH_ALEN, entry->l3_unicast.eth_dst))
655 		return -EMSGSIZE;
656 	if (entry->l3_unicast.vlan_id &&
657 	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
658 				entry->l3_unicast.vlan_id))
659 		return -EMSGSIZE;
660 	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
661 			      entry->l3_unicast.ttl_check))
662 		return -EMSGSIZE;
663 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
664 			       entry->l3_unicast.group_id))
665 		return -EMSGSIZE;
666 
667 	return 0;
668 }
669 
670 static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
671 				   struct rocker_desc_info *desc_info,
672 				   void *priv)
673 {
674 	struct ofdpa_group_tbl_entry *entry = priv;
675 	struct rocker_tlv *cmd_info;
676 	int err = 0;
677 
678 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
679 		return -EMSGSIZE;
680 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
681 	if (!cmd_info)
682 		return -EMSGSIZE;
683 
684 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
685 			       entry->group_id))
686 		return -EMSGSIZE;
687 
688 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
689 	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
690 		err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
691 		break;
692 	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
693 		err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
694 		break;
695 	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
696 	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
697 		err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
698 		break;
699 	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
700 		err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
701 		break;
702 	default:
703 		err = -ENOTSUPP;
704 		break;
705 	}
706 
707 	if (err)
708 		return err;
709 
710 	rocker_tlv_nest_end(desc_info, cmd_info);
711 
712 	return 0;
713 }
714 
715 static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
716 				   struct rocker_desc_info *desc_info,
717 				   void *priv)
718 {
719 	const struct ofdpa_group_tbl_entry *entry = priv;
720 	struct rocker_tlv *cmd_info;
721 
722 	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
723 		return -EMSGSIZE;
724 	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
725 	if (!cmd_info)
726 		return -EMSGSIZE;
727 	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
728 			       entry->group_id))
729 		return -EMSGSIZE;
730 	rocker_tlv_nest_end(desc_info, cmd_info);
731 
732 	return 0;
733 }
734 
735 /***************************************************
736  * Flow, group, FDB, internal VLAN and neigh tables
737  ***************************************************/
738 
739 static struct ofdpa_flow_tbl_entry *
740 ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
741 		    const struct ofdpa_flow_tbl_entry *match)
742 {
743 	struct ofdpa_flow_tbl_entry *found;
744 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
745 
746 	hash_for_each_possible(ofdpa->flow_tbl, found,
747 			       entry, match->key_crc32) {
748 		if (memcmp(&found->key, &match->key, key_len) == 0)
749 			return found;
750 	}
751 
752 	return NULL;
753 }
754 
755 static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
756 			      int flags, struct ofdpa_flow_tbl_entry *match)
757 {
758 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
759 	struct ofdpa_flow_tbl_entry *found;
760 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
761 	unsigned long lock_flags;
762 
763 	match->key_crc32 = crc32(~0, &match->key, key_len);
764 
765 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
766 
767 	found = ofdpa_flow_tbl_find(ofdpa, match);
768 
769 	if (found) {
770 		match->cookie = found->cookie;
771 		hash_del(&found->entry);
772 		kfree(found);
773 		found = match;
774 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
775 	} else {
776 		found = match;
777 		found->cookie = ofdpa->flow_tbl_next_cookie++;
778 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
779 	}
780 
781 	hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
782 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
783 
784 	return rocker_cmd_exec(ofdpa_port->rocker_port,
785 			       ofdpa_flags_nowait(flags),
786 			       ofdpa_cmd_flow_tbl_add,
787 			       found, NULL, NULL);
788 }
789 
790 static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
791 			      int flags, struct ofdpa_flow_tbl_entry *match)
792 {
793 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
794 	struct ofdpa_flow_tbl_entry *found;
795 	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
796 	unsigned long lock_flags;
797 	int err = 0;
798 
799 	match->key_crc32 = crc32(~0, &match->key, key_len);
800 
801 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
802 
803 	found = ofdpa_flow_tbl_find(ofdpa, match);
804 
805 	if (found) {
806 		hash_del(&found->entry);
807 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
808 	}
809 
810 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
811 
812 	kfree(match);
813 
814 	if (found) {
815 		err = rocker_cmd_exec(ofdpa_port->rocker_port,
816 				      ofdpa_flags_nowait(flags),
817 				      ofdpa_cmd_flow_tbl_del,
818 				      found, NULL, NULL);
819 		kfree(found);
820 	}
821 
822 	return err;
823 }
824 
825 static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
826 			     struct ofdpa_flow_tbl_entry *entry)
827 {
828 	if (flags & OFDPA_OP_FLAG_REMOVE)
829 		return ofdpa_flow_tbl_del(ofdpa_port, flags, entry);
830 	else
831 		return ofdpa_flow_tbl_add(ofdpa_port, flags, entry);
832 }
833 
834 static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, int flags,
835 				  u32 in_pport, u32 in_pport_mask,
836 				  enum rocker_of_dpa_table_id goto_tbl)
837 {
838 	struct ofdpa_flow_tbl_entry *entry;
839 
840 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
841 	if (!entry)
842 		return -ENOMEM;
843 
844 	entry->key.priority = OFDPA_PRIORITY_IG_PORT;
845 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
846 	entry->key.ig_port.in_pport = in_pport;
847 	entry->key.ig_port.in_pport_mask = in_pport_mask;
848 	entry->key.ig_port.goto_tbl = goto_tbl;
849 
850 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
851 }
852 
853 static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
854 			       int flags,
855 			       u32 in_pport, __be16 vlan_id,
856 			       __be16 vlan_id_mask,
857 			       enum rocker_of_dpa_table_id goto_tbl,
858 			       bool untagged, __be16 new_vlan_id)
859 {
860 	struct ofdpa_flow_tbl_entry *entry;
861 
862 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
863 	if (!entry)
864 		return -ENOMEM;
865 
866 	entry->key.priority = OFDPA_PRIORITY_VLAN;
867 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
868 	entry->key.vlan.in_pport = in_pport;
869 	entry->key.vlan.vlan_id = vlan_id;
870 	entry->key.vlan.vlan_id_mask = vlan_id_mask;
871 	entry->key.vlan.goto_tbl = goto_tbl;
872 
873 	entry->key.vlan.untagged = untagged;
874 	entry->key.vlan.new_vlan_id = new_vlan_id;
875 
876 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
877 }
878 
879 static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
880 				   u32 in_pport, u32 in_pport_mask,
881 				   __be16 eth_type, const u8 *eth_dst,
882 				   const u8 *eth_dst_mask, __be16 vlan_id,
883 				   __be16 vlan_id_mask, bool copy_to_cpu,
884 				   int flags)
885 {
886 	struct ofdpa_flow_tbl_entry *entry;
887 
888 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
889 	if (!entry)
890 		return -ENOMEM;
891 
892 	if (is_multicast_ether_addr(eth_dst)) {
893 		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
894 		entry->key.term_mac.goto_tbl =
895 			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
896 	} else {
897 		entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
898 		entry->key.term_mac.goto_tbl =
899 			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
900 	}
901 
902 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
903 	entry->key.term_mac.in_pport = in_pport;
904 	entry->key.term_mac.in_pport_mask = in_pport_mask;
905 	entry->key.term_mac.eth_type = eth_type;
906 	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
907 	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
908 	entry->key.term_mac.vlan_id = vlan_id;
909 	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
910 	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
911 
912 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
913 }
914 
915 static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
916 				 int flags, const u8 *eth_dst,
917 				 const u8 *eth_dst_mask,  __be16 vlan_id,
918 				 u32 tunnel_id,
919 				 enum rocker_of_dpa_table_id goto_tbl,
920 				 u32 group_id, bool copy_to_cpu)
921 {
922 	struct ofdpa_flow_tbl_entry *entry;
923 	u32 priority;
924 	bool vlan_bridging = !!vlan_id;
925 	bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
926 	bool wild = false;
927 
928 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
929 	if (!entry)
930 		return -ENOMEM;
931 
932 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
933 
934 	if (eth_dst) {
935 		entry->key.bridge.has_eth_dst = 1;
936 		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
937 	}
938 	if (eth_dst_mask) {
939 		entry->key.bridge.has_eth_dst_mask = 1;
940 		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
941 		if (!ether_addr_equal(eth_dst_mask, ff_mac))
942 			wild = true;
943 	}
944 
945 	priority = OFDPA_PRIORITY_UNKNOWN;
946 	if (vlan_bridging && dflt && wild)
947 		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
948 	else if (vlan_bridging && dflt && !wild)
949 		priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
950 	else if (vlan_bridging && !dflt)
951 		priority = OFDPA_PRIORITY_BRIDGING_VLAN;
952 	else if (!vlan_bridging && dflt && wild)
953 		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
954 	else if (!vlan_bridging && dflt && !wild)
955 		priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
956 	else if (!vlan_bridging && !dflt)
957 		priority = OFDPA_PRIORITY_BRIDGING_TENANT;
958 
959 	entry->key.priority = priority;
960 	entry->key.bridge.vlan_id = vlan_id;
961 	entry->key.bridge.tunnel_id = tunnel_id;
962 	entry->key.bridge.goto_tbl = goto_tbl;
963 	entry->key.bridge.group_id = group_id;
964 	entry->key.bridge.copy_to_cpu = copy_to_cpu;
965 
966 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
967 }
968 
969 static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
970 					 __be16 eth_type, __be32 dst,
971 					 __be32 dst_mask, u32 priority,
972 					 enum rocker_of_dpa_table_id goto_tbl,
973 					 u32 group_id, struct fib_info *fi,
974 					 int flags)
975 {
976 	struct ofdpa_flow_tbl_entry *entry;
977 
978 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
979 	if (!entry)
980 		return -ENOMEM;
981 
982 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
983 	entry->key.priority = priority;
984 	entry->key.ucast_routing.eth_type = eth_type;
985 	entry->key.ucast_routing.dst4 = dst;
986 	entry->key.ucast_routing.dst4_mask = dst_mask;
987 	entry->key.ucast_routing.goto_tbl = goto_tbl;
988 	entry->key.ucast_routing.group_id = group_id;
989 	entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
990 				  ucast_routing.group_id);
991 	entry->fi = fi;
992 
993 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
994 }
995 
996 static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, int flags,
997 			      u32 in_pport, u32 in_pport_mask,
998 			      const u8 *eth_src, const u8 *eth_src_mask,
999 			      const u8 *eth_dst, const u8 *eth_dst_mask,
1000 			      __be16 eth_type, __be16 vlan_id,
1001 			      __be16 vlan_id_mask, u8 ip_proto,
1002 			      u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
1003 			      u32 group_id)
1004 {
1005 	u32 priority;
1006 	struct ofdpa_flow_tbl_entry *entry;
1007 
1008 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1009 	if (!entry)
1010 		return -ENOMEM;
1011 
1012 	priority = OFDPA_PRIORITY_ACL_NORMAL;
1013 	if (eth_dst && eth_dst_mask) {
1014 		if (ether_addr_equal(eth_dst_mask, mcast_mac))
1015 			priority = OFDPA_PRIORITY_ACL_DFLT;
1016 		else if (is_link_local_ether_addr(eth_dst))
1017 			priority = OFDPA_PRIORITY_ACL_CTRL;
1018 	}
1019 
1020 	entry->key.priority = priority;
1021 	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1022 	entry->key.acl.in_pport = in_pport;
1023 	entry->key.acl.in_pport_mask = in_pport_mask;
1024 
1025 	if (eth_src)
1026 		ether_addr_copy(entry->key.acl.eth_src, eth_src);
1027 	if (eth_src_mask)
1028 		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
1029 	if (eth_dst)
1030 		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
1031 	if (eth_dst_mask)
1032 		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
1033 
1034 	entry->key.acl.eth_type = eth_type;
1035 	entry->key.acl.vlan_id = vlan_id;
1036 	entry->key.acl.vlan_id_mask = vlan_id_mask;
1037 	entry->key.acl.ip_proto = ip_proto;
1038 	entry->key.acl.ip_proto_mask = ip_proto_mask;
1039 	entry->key.acl.ip_tos = ip_tos;
1040 	entry->key.acl.ip_tos_mask = ip_tos_mask;
1041 	entry->key.acl.group_id = group_id;
1042 
1043 	return ofdpa_flow_tbl_do(ofdpa_port, flags, entry);
1044 }
1045 
1046 static struct ofdpa_group_tbl_entry *
1047 ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
1048 		     const struct ofdpa_group_tbl_entry *match)
1049 {
1050 	struct ofdpa_group_tbl_entry *found;
1051 
1052 	hash_for_each_possible(ofdpa->group_tbl, found,
1053 			       entry, match->group_id) {
1054 		if (found->group_id == match->group_id)
1055 			return found;
1056 	}
1057 
1058 	return NULL;
1059 }
1060 
1061 static void ofdpa_group_tbl_entry_free(struct ofdpa_group_tbl_entry *entry)
1062 {
1063 	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
1064 	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
1065 	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
1066 		kfree(entry->group_ids);
1067 		break;
1068 	default:
1069 		break;
1070 	}
1071 	kfree(entry);
1072 }
1073 
1074 static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, int flags,
1075 			       struct ofdpa_group_tbl_entry *match)
1076 {
1077 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1078 	struct ofdpa_group_tbl_entry *found;
1079 	unsigned long lock_flags;
1080 
1081 	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1082 
1083 	found = ofdpa_group_tbl_find(ofdpa, match);
1084 
1085 	if (found) {
1086 		hash_del(&found->entry);
1087 		ofdpa_group_tbl_entry_free(found);
1088 		found = match;
1089 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
1090 	} else {
1091 		found = match;
1092 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
1093 	}
1094 
1095 	hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
1096 
1097 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1098 
1099 	return rocker_cmd_exec(ofdpa_port->rocker_port,
1100 			       ofdpa_flags_nowait(flags),
1101 			       ofdpa_cmd_group_tbl_add,
1102 			       found, NULL, NULL);
1103 }
1104 
1105 static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, int flags,
1106 			       struct ofdpa_group_tbl_entry *match)
1107 {
1108 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1109 	struct ofdpa_group_tbl_entry *found;
1110 	unsigned long lock_flags;
1111 	int err = 0;
1112 
1113 	spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
1114 
1115 	found = ofdpa_group_tbl_find(ofdpa, match);
1116 
1117 	if (found) {
1118 		hash_del(&found->entry);
1119 		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
1120 	}
1121 
1122 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
1123 
1124 	ofdpa_group_tbl_entry_free(match);
1125 
1126 	if (found) {
1127 		err = rocker_cmd_exec(ofdpa_port->rocker_port,
1128 				      ofdpa_flags_nowait(flags),
1129 				      ofdpa_cmd_group_tbl_del,
1130 				      found, NULL, NULL);
1131 		ofdpa_group_tbl_entry_free(found);
1132 	}
1133 
1134 	return err;
1135 }
1136 
1137 static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, int flags,
1138 			      struct ofdpa_group_tbl_entry *entry)
1139 {
1140 	if (flags & OFDPA_OP_FLAG_REMOVE)
1141 		return ofdpa_group_tbl_del(ofdpa_port, flags, entry);
1142 	else
1143 		return ofdpa_group_tbl_add(ofdpa_port, flags, entry);
1144 }
1145 
1146 static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
1147 				    int flags, __be16 vlan_id,
1148 				    u32 out_pport, int pop_vlan)
1149 {
1150 	struct ofdpa_group_tbl_entry *entry;
1151 
1152 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1153 	if (!entry)
1154 		return -ENOMEM;
1155 
1156 	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1157 	entry->l2_interface.pop_vlan = pop_vlan;
1158 
1159 	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1160 }
1161 
1162 static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
1163 				  int flags, u8 group_count,
1164 				  const u32 *group_ids, u32 group_id)
1165 {
1166 	struct ofdpa_group_tbl_entry *entry;
1167 
1168 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1169 	if (!entry)
1170 		return -ENOMEM;
1171 
1172 	entry->group_id = group_id;
1173 	entry->group_count = group_count;
1174 
1175 	entry->group_ids = kcalloc(group_count, sizeof(u32), GFP_KERNEL);
1176 	if (!entry->group_ids) {
1177 		kfree(entry);
1178 		return -ENOMEM;
1179 	}
1180 	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
1181 
1182 	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1183 }
1184 
1185 static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
1186 				int flags, __be16 vlan_id,
1187 				u8 group_count,	const u32 *group_ids,
1188 				u32 group_id)
1189 {
1190 	return ofdpa_group_l2_fan_out(ofdpa_port, flags,
1191 				      group_count, group_ids,
1192 				      group_id);
1193 }
1194 
1195 static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, int flags,
1196 				  u32 index, const u8 *src_mac, const u8 *dst_mac,
1197 				  __be16 vlan_id, bool ttl_check, u32 pport)
1198 {
1199 	struct ofdpa_group_tbl_entry *entry;
1200 
1201 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1202 	if (!entry)
1203 		return -ENOMEM;
1204 
1205 	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
1206 	if (src_mac)
1207 		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
1208 	if (dst_mac)
1209 		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
1210 	entry->l3_unicast.vlan_id = vlan_id;
1211 	entry->l3_unicast.ttl_check = ttl_check;
1212 	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
1213 
1214 	return ofdpa_group_tbl_do(ofdpa_port, flags, entry);
1215 }
1216 
1217 static struct ofdpa_neigh_tbl_entry *
1218 ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
1219 {
1220 	struct ofdpa_neigh_tbl_entry *found;
1221 
1222 	hash_for_each_possible(ofdpa->neigh_tbl, found,
1223 			       entry, be32_to_cpu(ip_addr))
1224 		if (found->ip_addr == ip_addr)
1225 			return found;
1226 
1227 	return NULL;
1228 }
1229 
1230 static void ofdpa_neigh_add(struct ofdpa *ofdpa,
1231 			    struct ofdpa_neigh_tbl_entry *entry)
1232 {
1233 	entry->index = ofdpa->neigh_tbl_next_index++;
1234 	entry->ref_count++;
1235 	hash_add(ofdpa->neigh_tbl, &entry->entry,
1236 		 be32_to_cpu(entry->ip_addr));
1237 }
1238 
1239 static void ofdpa_neigh_del(struct ofdpa_neigh_tbl_entry *entry)
1240 {
1241 	if (--entry->ref_count == 0) {
1242 		hash_del(&entry->entry);
1243 		kfree(entry);
1244 	}
1245 }
1246 
1247 static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
1248 			       const u8 *eth_dst, bool ttl_check)
1249 {
1250 	if (eth_dst) {
1251 		ether_addr_copy(entry->eth_dst, eth_dst);
1252 		entry->ttl_check = ttl_check;
1253 	} else {
1254 		entry->ref_count++;
1255 	}
1256 }
1257 
1258 static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
1259 				 int flags, __be32 ip_addr, const u8 *eth_dst)
1260 {
1261 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1262 	struct ofdpa_neigh_tbl_entry *entry;
1263 	struct ofdpa_neigh_tbl_entry *found;
1264 	unsigned long lock_flags;
1265 	__be16 eth_type = htons(ETH_P_IP);
1266 	enum rocker_of_dpa_table_id goto_tbl =
1267 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1268 	u32 group_id;
1269 	u32 priority = 0;
1270 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1271 	bool updating;
1272 	bool removing;
1273 	int err = 0;
1274 
1275 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1276 	if (!entry)
1277 		return -ENOMEM;
1278 
1279 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1280 
1281 	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1282 
1283 	updating = found && adding;
1284 	removing = found && !adding;
1285 	adding = !found && adding;
1286 
1287 	if (adding) {
1288 		entry->ip_addr = ip_addr;
1289 		entry->dev = ofdpa_port->dev;
1290 		ether_addr_copy(entry->eth_dst, eth_dst);
1291 		entry->ttl_check = true;
1292 		ofdpa_neigh_add(ofdpa, entry);
1293 	} else if (removing) {
1294 		memcpy(entry, found, sizeof(*entry));
1295 		ofdpa_neigh_del(found);
1296 	} else if (updating) {
1297 		ofdpa_neigh_update(found, eth_dst, true);
1298 		memcpy(entry, found, sizeof(*entry));
1299 	} else {
1300 		err = -ENOENT;
1301 	}
1302 
1303 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1304 
1305 	if (err)
1306 		goto err_out;
1307 
1308 	/* For each active neighbor, we have an L3 unicast group and
1309 	 * a /32 route to the neighbor, which uses the L3 unicast
1310 	 * group.  The L3 unicast group can also be referred to by
1311 	 * other routes' nexthops.
1312 	 */
1313 
1314 	err = ofdpa_group_l3_unicast(ofdpa_port, flags,
1315 				     entry->index,
1316 				     ofdpa_port->dev->dev_addr,
1317 				     entry->eth_dst,
1318 				     ofdpa_port->internal_vlan_id,
1319 				     entry->ttl_check,
1320 				     ofdpa_port->pport);
1321 	if (err) {
1322 		netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
1323 			   err, entry->index);
1324 		goto err_out;
1325 	}
1326 
1327 	if (adding || removing) {
1328 		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
1329 		err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port,
1330 						    eth_type, ip_addr,
1331 						    inet_make_mask(32),
1332 						    priority, goto_tbl,
1333 						    group_id, NULL, flags);
1334 
1335 		if (err)
1336 			netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
1337 				   err, &entry->ip_addr, group_id);
1338 	}
1339 
1340 err_out:
1341 	if (!adding)
1342 		kfree(entry);
1343 
1344 	return err;
1345 }
1346 
1347 static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
1348 				   __be32 ip_addr)
1349 {
1350 	struct net_device *dev = ofdpa_port->dev;
1351 	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
1352 	int err = 0;
1353 
1354 	if (!n) {
1355 		n = neigh_create(&arp_tbl, &ip_addr, dev);
1356 		if (IS_ERR(n))
1357 			return PTR_ERR(n);
1358 	}
1359 
1360 	/* If the neigh is already resolved, then go ahead and
1361 	 * install the entry, otherwise start the ARP process to
1362 	 * resolve the neigh.
1363 	 */
1364 
1365 	if (n->nud_state & NUD_VALID)
1366 		err = ofdpa_port_ipv4_neigh(ofdpa_port, 0,
1367 					    ip_addr, n->ha);
1368 	else
1369 		neigh_event_send(n, NULL);
1370 
1371 	neigh_release(n);
1372 	return err;
1373 }
1374 
1375 static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
1376 			      int flags, __be32 ip_addr, u32 *index)
1377 {
1378 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1379 	struct ofdpa_neigh_tbl_entry *entry;
1380 	struct ofdpa_neigh_tbl_entry *found;
1381 	unsigned long lock_flags;
1382 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1383 	bool updating;
1384 	bool removing;
1385 	bool resolved = true;
1386 	int err = 0;
1387 
1388 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1389 	if (!entry)
1390 		return -ENOMEM;
1391 
1392 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
1393 
1394 	found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
1395 
1396 	updating = found && adding;
1397 	removing = found && !adding;
1398 	adding = !found && adding;
1399 
1400 	if (adding) {
1401 		entry->ip_addr = ip_addr;
1402 		entry->dev = ofdpa_port->dev;
1403 		ofdpa_neigh_add(ofdpa, entry);
1404 		*index = entry->index;
1405 		resolved = false;
1406 	} else if (removing) {
1407 		*index = found->index;
1408 		ofdpa_neigh_del(found);
1409 	} else if (updating) {
1410 		ofdpa_neigh_update(found, NULL, false);
1411 		resolved = !is_zero_ether_addr(found->eth_dst);
1412 		*index = found->index;
1413 	} else {
1414 		err = -ENOENT;
1415 	}
1416 
1417 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
1418 
1419 	if (!adding)
1420 		kfree(entry);
1421 
1422 	if (err)
1423 		return err;
1424 
1425 	/* Resolved means neigh ip_addr is resolved to neigh mac. */
1426 
1427 	if (!resolved)
1428 		err = ofdpa_port_ipv4_resolve(ofdpa_port, ip_addr);
1429 
1430 	return err;
1431 }
1432 
1433 static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
1434 					 int port_index)
1435 {
1436 	struct rocker_port *rocker_port;
1437 
1438 	rocker_port = ofdpa->rocker->ports[port_index];
1439 	return rocker_port ? rocker_port->wpriv : NULL;
1440 }
1441 
1442 static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
1443 				       int flags, __be16 vlan_id)
1444 {
1445 	struct ofdpa_port *p;
1446 	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1447 	unsigned int port_count = ofdpa->rocker->port_count;
1448 	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1449 	u32 *group_ids;
1450 	u8 group_count = 0;
1451 	int err = 0;
1452 	int i;
1453 
1454 	group_ids = kcalloc(port_count, sizeof(u32), GFP_KERNEL);
1455 	if (!group_ids)
1456 		return -ENOMEM;
1457 
1458 	/* Adjust the flood group for this VLAN.  The flood group
1459 	 * references an L2 interface group for each port in this
1460 	 * VLAN.
1461 	 */
1462 
1463 	for (i = 0; i < port_count; i++) {
1464 		p = ofdpa_port_get(ofdpa, i);
1465 		if (!p)
1466 			continue;
1467 		if (!ofdpa_port_is_bridged(p))
1468 			continue;
1469 		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
1470 			group_ids[group_count++] =
1471 				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
1472 		}
1473 	}
1474 
1475 	/* If there are no bridged ports in this VLAN, we're done */
1476 	if (group_count == 0)
1477 		goto no_ports_in_vlan;
1478 
1479 	err = ofdpa_group_l2_flood(ofdpa_port, flags, vlan_id,
1480 				   group_count, group_ids, group_id);
1481 	if (err)
1482 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1483 
1484 no_ports_in_vlan:
1485 	kfree(group_ids);
1486 	return err;
1487 }
1488 
1489 static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, int flags,
1490 				     __be16 vlan_id, bool pop_vlan)
1491 {
1492 	const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1493 	unsigned int port_count = ofdpa->rocker->port_count;
1494 	struct ofdpa_port *p;
1495 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1496 	u32 out_pport;
1497 	int ref = 0;
1498 	int err;
1499 	int i;
1500 
1501 	/* An L2 interface group for this port in this VLAN, but
1502 	 * only when port STP state is LEARNING|FORWARDING.
1503 	 */
1504 
1505 	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1506 	    ofdpa_port->stp_state == BR_STATE_FORWARDING) {
1507 		out_pport = ofdpa_port->pport;
1508 		err = ofdpa_group_l2_interface(ofdpa_port, flags,
1509 					       vlan_id, out_pport, pop_vlan);
1510 		if (err) {
1511 			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
1512 				   err, out_pport);
1513 			return err;
1514 		}
1515 	}
1516 
1517 	/* An L2 interface group for this VLAN to CPU port.
1518 	 * Add when first port joins this VLAN and destroy when
1519 	 * last port leaves this VLAN.
1520 	 */
1521 
1522 	for (i = 0; i < port_count; i++) {
1523 		p = ofdpa_port_get(ofdpa, i);
1524 		if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
1525 			ref++;
1526 	}
1527 
1528 	if ((!adding || ref != 1) && (adding || ref != 0))
1529 		return 0;
1530 
1531 	out_pport = 0;
1532 	err = ofdpa_group_l2_interface(ofdpa_port, flags,
1533 				       vlan_id, out_pport, pop_vlan);
1534 	if (err) {
1535 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
1536 		return err;
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 static struct ofdpa_ctrl {
1543 	const u8 *eth_dst;
1544 	const u8 *eth_dst_mask;
1545 	__be16 eth_type;
1546 	bool acl;
1547 	bool bridge;
1548 	bool term;
1549 	bool copy_to_cpu;
1550 } ofdpa_ctrls[] = {
1551 	[OFDPA_CTRL_LINK_LOCAL_MCAST] = {
1552 		/* pass link local multicast pkts up to CPU for filtering */
1553 		.eth_dst = ll_mac,
1554 		.eth_dst_mask = ll_mask,
1555 		.acl = true,
1556 	},
1557 	[OFDPA_CTRL_LOCAL_ARP] = {
1558 		/* pass local ARP pkts up to CPU */
1559 		.eth_dst = zero_mac,
1560 		.eth_dst_mask = zero_mac,
1561 		.eth_type = htons(ETH_P_ARP),
1562 		.acl = true,
1563 	},
1564 	[OFDPA_CTRL_IPV4_MCAST] = {
1565 		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
1566 		.eth_dst = ipv4_mcast,
1567 		.eth_dst_mask = ipv4_mask,
1568 		.eth_type = htons(ETH_P_IP),
1569 		.term  = true,
1570 		.copy_to_cpu = true,
1571 	},
1572 	[OFDPA_CTRL_IPV6_MCAST] = {
1573 		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
1574 		.eth_dst = ipv6_mcast,
1575 		.eth_dst_mask = ipv6_mask,
1576 		.eth_type = htons(ETH_P_IPV6),
1577 		.term  = true,
1578 		.copy_to_cpu = true,
1579 	},
1580 	[OFDPA_CTRL_DFLT_BRIDGING] = {
1581 		/* flood any pkts on vlan */
1582 		.bridge = true,
1583 		.copy_to_cpu = true,
1584 	},
1585 	[OFDPA_CTRL_DFLT_OVS] = {
1586 		/* pass all pkts up to CPU */
1587 		.eth_dst = zero_mac,
1588 		.eth_dst_mask = zero_mac,
1589 		.acl = true,
1590 	},
1591 };
1592 
1593 static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, int flags,
1594 				    const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1595 {
1596 	u32 in_pport = ofdpa_port->pport;
1597 	u32 in_pport_mask = 0xffffffff;
1598 	u32 out_pport = 0;
1599 	const u8 *eth_src = NULL;
1600 	const u8 *eth_src_mask = NULL;
1601 	__be16 vlan_id_mask = htons(0xffff);
1602 	u8 ip_proto = 0;
1603 	u8 ip_proto_mask = 0;
1604 	u8 ip_tos = 0;
1605 	u8 ip_tos_mask = 0;
1606 	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1607 	int err;
1608 
1609 	err = ofdpa_flow_tbl_acl(ofdpa_port, flags,
1610 				 in_pport, in_pport_mask,
1611 				 eth_src, eth_src_mask,
1612 				 ctrl->eth_dst, ctrl->eth_dst_mask,
1613 				 ctrl->eth_type,
1614 				 vlan_id, vlan_id_mask,
1615 				 ip_proto, ip_proto_mask,
1616 				 ip_tos, ip_tos_mask,
1617 				 group_id);
1618 
1619 	if (err)
1620 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
1621 
1622 	return err;
1623 }
1624 
1625 static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
1626 				       int flags, const struct ofdpa_ctrl *ctrl,
1627 				       __be16 vlan_id)
1628 {
1629 	enum rocker_of_dpa_table_id goto_tbl =
1630 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1631 	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
1632 	u32 tunnel_id = 0;
1633 	int err;
1634 
1635 	if (!ofdpa_port_is_bridged(ofdpa_port))
1636 		return 0;
1637 
1638 	err = ofdpa_flow_tbl_bridge(ofdpa_port, flags,
1639 				    ctrl->eth_dst, ctrl->eth_dst_mask,
1640 				    vlan_id, tunnel_id,
1641 				    goto_tbl, group_id, ctrl->copy_to_cpu);
1642 
1643 	if (err)
1644 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
1645 
1646 	return err;
1647 }
1648 
1649 static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, int flags,
1650 				     const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1651 {
1652 	u32 in_pport_mask = 0xffffffff;
1653 	__be16 vlan_id_mask = htons(0xffff);
1654 	int err;
1655 
1656 	if (ntohs(vlan_id) == 0)
1657 		vlan_id = ofdpa_port->internal_vlan_id;
1658 
1659 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport, in_pport_mask,
1660 				      ctrl->eth_type, ctrl->eth_dst,
1661 				      ctrl->eth_dst_mask, vlan_id,
1662 				      vlan_id_mask, ctrl->copy_to_cpu,
1663 				      flags);
1664 
1665 	if (err)
1666 		netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
1667 
1668 	return err;
1669 }
1670 
1671 static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, int flags,
1672 				const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
1673 {
1674 	if (ctrl->acl)
1675 		return ofdpa_port_ctrl_vlan_acl(ofdpa_port, flags,
1676 						ctrl, vlan_id);
1677 	if (ctrl->bridge)
1678 		return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, flags,
1679 						   ctrl, vlan_id);
1680 
1681 	if (ctrl->term)
1682 		return ofdpa_port_ctrl_vlan_term(ofdpa_port, flags,
1683 						 ctrl, vlan_id);
1684 
1685 	return -EOPNOTSUPP;
1686 }
1687 
1688 static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, int flags,
1689 				    __be16 vlan_id)
1690 {
1691 	int err = 0;
1692 	int i;
1693 
1694 	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
1695 		if (ofdpa_port->ctrls[i]) {
1696 			err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1697 						   &ofdpa_ctrls[i], vlan_id);
1698 			if (err)
1699 				return err;
1700 		}
1701 	}
1702 
1703 	return err;
1704 }
1705 
1706 static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, int flags,
1707 			   const struct ofdpa_ctrl *ctrl)
1708 {
1709 	u16 vid;
1710 	int err = 0;
1711 
1712 	for (vid = 1; vid < VLAN_N_VID; vid++) {
1713 		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
1714 			continue;
1715 		err = ofdpa_port_ctrl_vlan(ofdpa_port, flags,
1716 					   ctrl, htons(vid));
1717 		if (err)
1718 			break;
1719 	}
1720 
1721 	return err;
1722 }
1723 
1724 static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, int flags,
1725 			   u16 vid)
1726 {
1727 	enum rocker_of_dpa_table_id goto_tbl =
1728 			ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
1729 	u32 in_pport = ofdpa_port->pport;
1730 	__be16 vlan_id = htons(vid);
1731 	__be16 vlan_id_mask = htons(0xffff);
1732 	__be16 internal_vlan_id;
1733 	bool untagged;
1734 	bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
1735 	int err;
1736 
1737 	internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
1738 
1739 	if (adding &&
1740 	    test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1741 		return 0; /* already added */
1742 	else if (!adding &&
1743 		 !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
1744 		return 0; /* already removed */
1745 
1746 	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1747 
1748 	if (adding) {
1749 		err = ofdpa_port_ctrl_vlan_add(ofdpa_port, flags,
1750 					       internal_vlan_id);
1751 		if (err) {
1752 			netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
1753 			goto err_vlan_add;
1754 		}
1755 	}
1756 
1757 	err = ofdpa_port_vlan_l2_groups(ofdpa_port, flags,
1758 					internal_vlan_id, untagged);
1759 	if (err) {
1760 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
1761 		goto err_vlan_l2_groups;
1762 	}
1763 
1764 	err = ofdpa_port_vlan_flood_group(ofdpa_port, flags,
1765 					  internal_vlan_id);
1766 	if (err) {
1767 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
1768 		goto err_flood_group;
1769 	}
1770 
1771 	err = ofdpa_flow_tbl_vlan(ofdpa_port, flags,
1772 				  in_pport, vlan_id, vlan_id_mask,
1773 				  goto_tbl, untagged, internal_vlan_id);
1774 	if (err)
1775 		netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
1776 
1777 	return 0;
1778 
1779 err_vlan_add:
1780 err_vlan_l2_groups:
1781 err_flood_group:
1782 	change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
1783 	return err;
1784 }
1785 
1786 static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, int flags)
1787 {
1788 	enum rocker_of_dpa_table_id goto_tbl;
1789 	u32 in_pport;
1790 	u32 in_pport_mask;
1791 	int err;
1792 
1793 	/* Normal Ethernet Frames.  Matches pkts from any local physical
1794 	 * ports.  Goto VLAN tbl.
1795 	 */
1796 
1797 	in_pport = 0;
1798 	in_pport_mask = 0xffff0000;
1799 	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
1800 
1801 	err = ofdpa_flow_tbl_ig_port(ofdpa_port, flags,
1802 				     in_pport, in_pport_mask,
1803 				     goto_tbl);
1804 	if (err)
1805 		netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
1806 
1807 	return err;
1808 }
1809 
1810 struct ofdpa_fdb_learn_work {
1811 	struct work_struct work;
1812 	struct ofdpa_port *ofdpa_port;
1813 	int flags;
1814 	u8 addr[ETH_ALEN];
1815 	u16 vid;
1816 };
1817 
1818 static void ofdpa_port_fdb_learn_work(struct work_struct *work)
1819 {
1820 	const struct ofdpa_fdb_learn_work *lw =
1821 		container_of(work, struct ofdpa_fdb_learn_work, work);
1822 	bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
1823 	bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
1824 	struct switchdev_notifier_fdb_info info;
1825 
1826 	info.addr = lw->addr;
1827 	info.vid = lw->vid;
1828 
1829 	rtnl_lock();
1830 	if (learned && removing)
1831 		call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
1832 					 lw->ofdpa_port->dev, &info.info, NULL);
1833 	else if (learned && !removing)
1834 		call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
1835 					 lw->ofdpa_port->dev, &info.info, NULL);
1836 	rtnl_unlock();
1837 
1838 	kfree(work);
1839 }
1840 
1841 static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
1842 				int flags, const u8 *addr, __be16 vlan_id)
1843 {
1844 	struct ofdpa_fdb_learn_work *lw;
1845 	enum rocker_of_dpa_table_id goto_tbl =
1846 			ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
1847 	u32 out_pport = ofdpa_port->pport;
1848 	u32 tunnel_id = 0;
1849 	u32 group_id = ROCKER_GROUP_NONE;
1850 	bool copy_to_cpu = false;
1851 	int err;
1852 
1853 	if (ofdpa_port_is_bridged(ofdpa_port))
1854 		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
1855 
1856 	if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
1857 		err = ofdpa_flow_tbl_bridge(ofdpa_port, flags, addr,
1858 					    NULL, vlan_id, tunnel_id, goto_tbl,
1859 					    group_id, copy_to_cpu);
1860 		if (err)
1861 			return err;
1862 	}
1863 
1864 	if (!ofdpa_port_is_bridged(ofdpa_port))
1865 		return 0;
1866 
1867 	lw = kzalloc(sizeof(*lw), GFP_ATOMIC);
1868 	if (!lw)
1869 		return -ENOMEM;
1870 
1871 	INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
1872 
1873 	lw->ofdpa_port = ofdpa_port;
1874 	lw->flags = flags;
1875 	ether_addr_copy(lw->addr, addr);
1876 	lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
1877 
1878 	schedule_work(&lw->work);
1879 	return 0;
1880 }
1881 
1882 static struct ofdpa_fdb_tbl_entry *
1883 ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
1884 		   const struct ofdpa_fdb_tbl_entry *match)
1885 {
1886 	struct ofdpa_fdb_tbl_entry *found;
1887 
1888 	hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
1889 		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
1890 			return found;
1891 
1892 	return NULL;
1893 }
1894 
1895 static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
1896 			  const unsigned char *addr,
1897 			  __be16 vlan_id, int flags)
1898 {
1899 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1900 	struct ofdpa_fdb_tbl_entry *fdb;
1901 	struct ofdpa_fdb_tbl_entry *found;
1902 	bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
1903 	unsigned long lock_flags;
1904 
1905 	fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
1906 	if (!fdb)
1907 		return -ENOMEM;
1908 
1909 	fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
1910 	fdb->touched = jiffies;
1911 	fdb->key.ofdpa_port = ofdpa_port;
1912 	ether_addr_copy(fdb->key.addr, addr);
1913 	fdb->key.vlan_id = vlan_id;
1914 	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
1915 
1916 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1917 
1918 	found = ofdpa_fdb_tbl_find(ofdpa, fdb);
1919 
1920 	if (found) {
1921 		found->touched = jiffies;
1922 		if (removing) {
1923 			kfree(fdb);
1924 			hash_del(&found->entry);
1925 		}
1926 	} else if (!removing) {
1927 		hash_add(ofdpa->fdb_tbl, &fdb->entry,
1928 			 fdb->key_crc32);
1929 	}
1930 
1931 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1932 
1933 	/* Check if adding and already exists, or removing and can't find */
1934 	if (!found != !removing) {
1935 		kfree(fdb);
1936 		if (!found && removing)
1937 			return 0;
1938 		/* Refreshing existing to update aging timers */
1939 		flags |= OFDPA_OP_FLAG_REFRESH;
1940 	}
1941 
1942 	return ofdpa_port_fdb_learn(ofdpa_port, flags, addr, vlan_id);
1943 }
1944 
1945 static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, int flags)
1946 {
1947 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
1948 	struct ofdpa_fdb_tbl_entry *found;
1949 	unsigned long lock_flags;
1950 	struct hlist_node *tmp;
1951 	int bkt;
1952 	int err = 0;
1953 
1954 	if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
1955 	    ofdpa_port->stp_state == BR_STATE_FORWARDING)
1956 		return 0;
1957 
1958 	flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
1959 
1960 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1961 
1962 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
1963 		if (found->key.ofdpa_port != ofdpa_port)
1964 			continue;
1965 		if (!found->learned)
1966 			continue;
1967 		err = ofdpa_port_fdb_learn(ofdpa_port, flags,
1968 					   found->key.addr,
1969 					   found->key.vlan_id);
1970 		if (err)
1971 			goto err_out;
1972 		hash_del(&found->entry);
1973 	}
1974 
1975 err_out:
1976 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
1977 
1978 	return err;
1979 }
1980 
1981 static void ofdpa_fdb_cleanup(struct timer_list *t)
1982 {
1983 	struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
1984 	struct ofdpa_port *ofdpa_port;
1985 	struct ofdpa_fdb_tbl_entry *entry;
1986 	struct hlist_node *tmp;
1987 	unsigned long next_timer = jiffies + ofdpa->ageing_time;
1988 	unsigned long expires;
1989 	unsigned long lock_flags;
1990 	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
1991 		    OFDPA_OP_FLAG_LEARNED;
1992 	int bkt;
1993 
1994 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
1995 
1996 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
1997 		if (!entry->learned)
1998 			continue;
1999 		ofdpa_port = entry->key.ofdpa_port;
2000 		expires = entry->touched + ofdpa_port->ageing_time;
2001 		if (time_before_eq(expires, jiffies)) {
2002 			ofdpa_port_fdb_learn(ofdpa_port, flags,
2003 					     entry->key.addr,
2004 					     entry->key.vlan_id);
2005 			hash_del(&entry->entry);
2006 		} else if (time_before(expires, next_timer)) {
2007 			next_timer = expires;
2008 		}
2009 	}
2010 
2011 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
2012 
2013 	mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
2014 }
2015 
2016 static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
2017 				 int flags, __be16 vlan_id)
2018 {
2019 	u32 in_pport_mask = 0xffffffff;
2020 	__be16 eth_type;
2021 	const u8 *dst_mac_mask = ff_mac;
2022 	__be16 vlan_id_mask = htons(0xffff);
2023 	bool copy_to_cpu = false;
2024 	int err;
2025 
2026 	if (ntohs(vlan_id) == 0)
2027 		vlan_id = ofdpa_port->internal_vlan_id;
2028 
2029 	eth_type = htons(ETH_P_IP);
2030 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2031 				      in_pport_mask, eth_type,
2032 				      ofdpa_port->dev->dev_addr,
2033 				      dst_mac_mask, vlan_id, vlan_id_mask,
2034 				      copy_to_cpu, flags);
2035 	if (err)
2036 		return err;
2037 
2038 	eth_type = htons(ETH_P_IPV6);
2039 	err = ofdpa_flow_tbl_term_mac(ofdpa_port, ofdpa_port->pport,
2040 				      in_pport_mask, eth_type,
2041 				      ofdpa_port->dev->dev_addr,
2042 				      dst_mac_mask, vlan_id, vlan_id_mask,
2043 				      copy_to_cpu, flags);
2044 
2045 	return err;
2046 }
2047 
2048 static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, int flags)
2049 {
2050 	bool pop_vlan;
2051 	u32 out_pport;
2052 	__be16 vlan_id;
2053 	u16 vid;
2054 	int err;
2055 
2056 	/* Port will be forwarding-enabled if its STP state is LEARNING
2057 	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
2058 	 * port STP state.  Use L2 interface group on port VLANs as a way
2059 	 * to toggle port forwarding: if forwarding is disabled, L2
2060 	 * interface group will not exist.
2061 	 */
2062 
2063 	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2064 	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2065 		flags |= OFDPA_OP_FLAG_REMOVE;
2066 
2067 	out_pport = ofdpa_port->pport;
2068 	for (vid = 1; vid < VLAN_N_VID; vid++) {
2069 		if (!test_bit(vid, ofdpa_port->vlan_bitmap))
2070 			continue;
2071 		vlan_id = htons(vid);
2072 		pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
2073 		err = ofdpa_group_l2_interface(ofdpa_port, flags,
2074 					       vlan_id, out_pport, pop_vlan);
2075 		if (err) {
2076 			netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
2077 				   err, out_pport);
2078 			return err;
2079 		}
2080 	}
2081 
2082 	return 0;
2083 }
2084 
2085 static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
2086 				 int flags, u8 state)
2087 {
2088 	bool want[OFDPA_CTRL_MAX] = { 0, };
2089 	bool prev_ctrls[OFDPA_CTRL_MAX];
2090 	u8 prev_state;
2091 	int err;
2092 	int i;
2093 
2094 	memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
2095 	prev_state = ofdpa_port->stp_state;
2096 
2097 	if (ofdpa_port->stp_state == state)
2098 		return 0;
2099 
2100 	ofdpa_port->stp_state = state;
2101 
2102 	switch (state) {
2103 	case BR_STATE_DISABLED:
2104 		/* port is completely disabled */
2105 		break;
2106 	case BR_STATE_LISTENING:
2107 	case BR_STATE_BLOCKING:
2108 		want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2109 		break;
2110 	case BR_STATE_LEARNING:
2111 	case BR_STATE_FORWARDING:
2112 		if (!ofdpa_port_is_ovsed(ofdpa_port))
2113 			want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
2114 		want[OFDPA_CTRL_IPV4_MCAST] = true;
2115 		want[OFDPA_CTRL_IPV6_MCAST] = true;
2116 		if (ofdpa_port_is_bridged(ofdpa_port))
2117 			want[OFDPA_CTRL_DFLT_BRIDGING] = true;
2118 		else if (ofdpa_port_is_ovsed(ofdpa_port))
2119 			want[OFDPA_CTRL_DFLT_OVS] = true;
2120 		else
2121 			want[OFDPA_CTRL_LOCAL_ARP] = true;
2122 		break;
2123 	}
2124 
2125 	for (i = 0; i < OFDPA_CTRL_MAX; i++) {
2126 		if (want[i] != ofdpa_port->ctrls[i]) {
2127 			int ctrl_flags = flags |
2128 					 (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
2129 			err = ofdpa_port_ctrl(ofdpa_port, ctrl_flags,
2130 					      &ofdpa_ctrls[i]);
2131 			if (err)
2132 				goto err_port_ctrl;
2133 			ofdpa_port->ctrls[i] = want[i];
2134 		}
2135 	}
2136 
2137 	err = ofdpa_port_fdb_flush(ofdpa_port, flags);
2138 	if (err)
2139 		goto err_fdb_flush;
2140 
2141 	err = ofdpa_port_fwding(ofdpa_port, flags);
2142 	if (err)
2143 		goto err_port_fwding;
2144 
2145 	return 0;
2146 
2147 err_port_ctrl:
2148 err_fdb_flush:
2149 err_port_fwding:
2150 	memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
2151 	ofdpa_port->stp_state = prev_state;
2152 	return err;
2153 }
2154 
2155 static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
2156 {
2157 	if (ofdpa_port_is_bridged(ofdpa_port))
2158 		/* bridge STP will enable port */
2159 		return 0;
2160 
2161 	/* port is not bridged, so simulate going to FORWARDING state */
2162 	return ofdpa_port_stp_update(ofdpa_port, flags,
2163 				     BR_STATE_FORWARDING);
2164 }
2165 
2166 static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
2167 {
2168 	if (ofdpa_port_is_bridged(ofdpa_port))
2169 		/* bridge STP will disable port */
2170 		return 0;
2171 
2172 	/* port is not bridged, so simulate going to DISABLED state */
2173 	return ofdpa_port_stp_update(ofdpa_port, flags,
2174 				     BR_STATE_DISABLED);
2175 }
2176 
2177 static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
2178 			       u16 vid, u16 flags)
2179 {
2180 	int err;
2181 
2182 	/* XXX deal with flags for PVID and untagged */
2183 
2184 	err = ofdpa_port_vlan(ofdpa_port, 0, vid);
2185 	if (err)
2186 		return err;
2187 
2188 	err = ofdpa_port_router_mac(ofdpa_port, 0, htons(vid));
2189 	if (err)
2190 		ofdpa_port_vlan(ofdpa_port,
2191 				OFDPA_OP_FLAG_REMOVE, vid);
2192 
2193 	return err;
2194 }
2195 
2196 static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
2197 			       u16 vid, u16 flags)
2198 {
2199 	int err;
2200 
2201 	err = ofdpa_port_router_mac(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2202 				    htons(vid));
2203 	if (err)
2204 		return err;
2205 
2206 	return ofdpa_port_vlan(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2207 			       vid);
2208 }
2209 
2210 static struct ofdpa_internal_vlan_tbl_entry *
2211 ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
2212 {
2213 	struct ofdpa_internal_vlan_tbl_entry *found;
2214 
2215 	hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
2216 			       entry, ifindex) {
2217 		if (found->ifindex == ifindex)
2218 			return found;
2219 	}
2220 
2221 	return NULL;
2222 }
2223 
2224 static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
2225 					      int ifindex)
2226 {
2227 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2228 	struct ofdpa_internal_vlan_tbl_entry *entry;
2229 	struct ofdpa_internal_vlan_tbl_entry *found;
2230 	unsigned long lock_flags;
2231 	int i;
2232 
2233 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2234 	if (!entry)
2235 		return 0;
2236 
2237 	entry->ifindex = ifindex;
2238 
2239 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2240 
2241 	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2242 	if (found) {
2243 		kfree(entry);
2244 		goto found;
2245 	}
2246 
2247 	found = entry;
2248 	hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
2249 
2250 	for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
2251 		if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
2252 			continue;
2253 		found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
2254 		goto found;
2255 	}
2256 
2257 	netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
2258 
2259 found:
2260 	found->ref_count++;
2261 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2262 
2263 	return found->vlan_id;
2264 }
2265 
2266 static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,  __be32 dst,
2267 			       int dst_len, struct fib_info *fi, u32 tb_id,
2268 			       int flags)
2269 {
2270 	const struct fib_nh *nh;
2271 	__be16 eth_type = htons(ETH_P_IP);
2272 	__be32 dst_mask = inet_make_mask(dst_len);
2273 	__be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
2274 	u32 priority = fi->fib_priority;
2275 	enum rocker_of_dpa_table_id goto_tbl =
2276 		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2277 	u32 group_id;
2278 	bool nh_on_port;
2279 	bool has_gw;
2280 	u32 index;
2281 	int err;
2282 
2283 	/* XXX support ECMP */
2284 
2285 	nh = fi->fib_nh;
2286 	nh_on_port = (fi->fib_dev == ofdpa_port->dev);
2287 	has_gw = !!nh->fib_nh_gw4;
2288 
2289 	if (has_gw && nh_on_port) {
2290 		err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
2291 					 nh->fib_nh_gw4, &index);
2292 		if (err)
2293 			return err;
2294 
2295 		group_id = ROCKER_GROUP_L3_UNICAST(index);
2296 	} else {
2297 		/* Send to CPU for processing */
2298 		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
2299 	}
2300 
2301 	err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, eth_type, dst,
2302 					    dst_mask, priority, goto_tbl,
2303 					    group_id, fi, flags);
2304 	if (err)
2305 		netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
2306 			   err, &dst);
2307 
2308 	return err;
2309 }
2310 
2311 static void
2312 ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
2313 				int ifindex)
2314 {
2315 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2316 	struct ofdpa_internal_vlan_tbl_entry *found;
2317 	unsigned long lock_flags;
2318 	unsigned long bit;
2319 
2320 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2321 
2322 	found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
2323 	if (!found) {
2324 		netdev_err(ofdpa_port->dev,
2325 			   "ifindex (%d) not found in internal VLAN tbl\n",
2326 			   ifindex);
2327 		goto not_found;
2328 	}
2329 
2330 	if (--found->ref_count <= 0) {
2331 		bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
2332 		clear_bit(bit, ofdpa->internal_vlan_bitmap);
2333 		hash_del(&found->entry);
2334 		kfree(found);
2335 	}
2336 
2337 not_found:
2338 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
2339 }
2340 
2341 /**********************************
2342  * Rocker world ops implementation
2343  **********************************/
2344 
2345 static int ofdpa_init(struct rocker *rocker)
2346 {
2347 	struct ofdpa *ofdpa = rocker->wpriv;
2348 
2349 	ofdpa->rocker = rocker;
2350 
2351 	hash_init(ofdpa->flow_tbl);
2352 	spin_lock_init(&ofdpa->flow_tbl_lock);
2353 
2354 	hash_init(ofdpa->group_tbl);
2355 	spin_lock_init(&ofdpa->group_tbl_lock);
2356 
2357 	hash_init(ofdpa->fdb_tbl);
2358 	spin_lock_init(&ofdpa->fdb_tbl_lock);
2359 
2360 	hash_init(ofdpa->internal_vlan_tbl);
2361 	spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
2362 
2363 	hash_init(ofdpa->neigh_tbl);
2364 	spin_lock_init(&ofdpa->neigh_tbl_lock);
2365 
2366 	timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
2367 	mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
2368 
2369 	ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
2370 
2371 	return 0;
2372 }
2373 
2374 static void ofdpa_fini(struct rocker *rocker)
2375 {
2376 	struct ofdpa *ofdpa = rocker->wpriv;
2377 
2378 	unsigned long flags;
2379 	struct ofdpa_flow_tbl_entry *flow_entry;
2380 	struct ofdpa_group_tbl_entry *group_entry;
2381 	struct ofdpa_fdb_tbl_entry *fdb_entry;
2382 	struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
2383 	struct ofdpa_neigh_tbl_entry *neigh_entry;
2384 	struct hlist_node *tmp;
2385 	int bkt;
2386 
2387 	del_timer_sync(&ofdpa->fdb_cleanup_timer);
2388 	flush_workqueue(rocker->rocker_owq);
2389 
2390 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2391 	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
2392 		hash_del(&flow_entry->entry);
2393 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2394 
2395 	spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
2396 	hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
2397 		hash_del(&group_entry->entry);
2398 	spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
2399 
2400 	spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
2401 	hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
2402 		hash_del(&fdb_entry->entry);
2403 	spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
2404 
2405 	spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
2406 	hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
2407 			   tmp, internal_vlan_entry, entry)
2408 		hash_del(&internal_vlan_entry->entry);
2409 	spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
2410 
2411 	spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
2412 	hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
2413 		hash_del(&neigh_entry->entry);
2414 	spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
2415 }
2416 
2417 static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
2418 {
2419 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2420 
2421 	ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
2422 	ofdpa_port->rocker_port = rocker_port;
2423 	ofdpa_port->dev = rocker_port->dev;
2424 	ofdpa_port->pport = rocker_port->pport;
2425 	ofdpa_port->brport_flags = BR_LEARNING;
2426 	ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
2427 	return 0;
2428 }
2429 
2430 static int ofdpa_port_init(struct rocker_port *rocker_port)
2431 {
2432 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2433 	int err;
2434 
2435 	rocker_port_set_learning(rocker_port,
2436 				 !!(ofdpa_port->brport_flags & BR_LEARNING));
2437 
2438 	err = ofdpa_port_ig_tbl(ofdpa_port, 0);
2439 	if (err) {
2440 		netdev_err(ofdpa_port->dev, "install ig port table failed\n");
2441 		return err;
2442 	}
2443 
2444 	ofdpa_port->internal_vlan_id =
2445 		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2446 						ofdpa_port->dev->ifindex);
2447 
2448 	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2449 	if (err) {
2450 		netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
2451 		goto err_untagged_vlan;
2452 	}
2453 	return 0;
2454 
2455 err_untagged_vlan:
2456 	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2457 	return err;
2458 }
2459 
2460 static void ofdpa_port_fini(struct rocker_port *rocker_port)
2461 {
2462 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2463 
2464 	ofdpa_port_ig_tbl(ofdpa_port, OFDPA_OP_FLAG_REMOVE);
2465 }
2466 
2467 static int ofdpa_port_open(struct rocker_port *rocker_port)
2468 {
2469 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2470 
2471 	return ofdpa_port_fwd_enable(ofdpa_port, 0);
2472 }
2473 
2474 static void ofdpa_port_stop(struct rocker_port *rocker_port)
2475 {
2476 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2477 
2478 	ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
2479 }
2480 
2481 static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
2482 					 u8 state)
2483 {
2484 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2485 
2486 	return ofdpa_port_stp_update(ofdpa_port, 0, state);
2487 }
2488 
2489 static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
2490 					    unsigned long brport_flags,
2491 					    struct switchdev_trans *trans)
2492 {
2493 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2494 	unsigned long orig_flags;
2495 	int err = 0;
2496 
2497 	orig_flags = ofdpa_port->brport_flags;
2498 	ofdpa_port->brport_flags = brport_flags;
2499 	if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
2500 	    !switchdev_trans_ph_prepare(trans))
2501 		err = rocker_port_set_learning(ofdpa_port->rocker_port,
2502 					       !!(ofdpa_port->brport_flags & BR_LEARNING));
2503 
2504 	if (switchdev_trans_ph_prepare(trans))
2505 		ofdpa_port->brport_flags = orig_flags;
2506 
2507 	return err;
2508 }
2509 
2510 static int
2511 ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
2512 					 rocker_port,
2513 					 unsigned long *
2514 					 p_brport_flags_support)
2515 {
2516 	*p_brport_flags_support = BR_LEARNING;
2517 	return 0;
2518 }
2519 
2520 static int
2521 ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
2522 				       u32 ageing_time,
2523 				       struct switchdev_trans *trans)
2524 {
2525 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2526 	struct ofdpa *ofdpa = ofdpa_port->ofdpa;
2527 
2528 	if (!switchdev_trans_ph_prepare(trans)) {
2529 		ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
2530 		if (ofdpa_port->ageing_time < ofdpa->ageing_time)
2531 			ofdpa->ageing_time = ofdpa_port->ageing_time;
2532 		mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
2533 	}
2534 
2535 	return 0;
2536 }
2537 
2538 static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
2539 				   const struct switchdev_obj_port_vlan *vlan)
2540 {
2541 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2542 	u16 vid;
2543 	int err;
2544 
2545 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2546 		err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
2547 		if (err)
2548 			return err;
2549 	}
2550 
2551 	return 0;
2552 }
2553 
2554 static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
2555 				   const struct switchdev_obj_port_vlan *vlan)
2556 {
2557 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2558 	u16 vid;
2559 	int err;
2560 
2561 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
2562 		err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
2563 		if (err)
2564 			return err;
2565 	}
2566 
2567 	return 0;
2568 }
2569 
2570 static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
2571 				  u16 vid, const unsigned char *addr)
2572 {
2573 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2574 	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2575 
2576 	if (!ofdpa_port_is_bridged(ofdpa_port))
2577 		return -EINVAL;
2578 
2579 	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, 0);
2580 }
2581 
2582 static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
2583 				  u16 vid, const unsigned char *addr)
2584 {
2585 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2586 	__be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, NULL);
2587 	int flags = OFDPA_OP_FLAG_REMOVE;
2588 
2589 	if (!ofdpa_port_is_bridged(ofdpa_port))
2590 		return -EINVAL;
2591 
2592 	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2593 }
2594 
2595 static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
2596 				  struct net_device *bridge)
2597 {
2598 	int err;
2599 
2600 	/* Port is joining bridge, so the internal VLAN for the
2601 	 * port is going to change to the bridge internal VLAN.
2602 	 * Let's remove untagged VLAN (vid=0) from port and
2603 	 * re-add once internal VLAN has changed.
2604 	 */
2605 
2606 	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2607 	if (err)
2608 		return err;
2609 
2610 	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2611 					ofdpa_port->dev->ifindex);
2612 	ofdpa_port->internal_vlan_id =
2613 		ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
2614 
2615 	ofdpa_port->bridge_dev = bridge;
2616 
2617 	return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2618 }
2619 
2620 static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
2621 {
2622 	int err;
2623 
2624 	err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2625 	if (err)
2626 		return err;
2627 
2628 	ofdpa_port_internal_vlan_id_put(ofdpa_port,
2629 					ofdpa_port->bridge_dev->ifindex);
2630 	ofdpa_port->internal_vlan_id =
2631 		ofdpa_port_internal_vlan_id_get(ofdpa_port,
2632 						ofdpa_port->dev->ifindex);
2633 
2634 	ofdpa_port->bridge_dev = NULL;
2635 
2636 	err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
2637 	if (err)
2638 		return err;
2639 
2640 	if (ofdpa_port->dev->flags & IFF_UP)
2641 		err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2642 
2643 	return err;
2644 }
2645 
2646 static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
2647 				  struct net_device *master)
2648 {
2649 	int err;
2650 
2651 	ofdpa_port->bridge_dev = master;
2652 
2653 	err = ofdpa_port_fwd_disable(ofdpa_port, 0);
2654 	if (err)
2655 		return err;
2656 	err = ofdpa_port_fwd_enable(ofdpa_port, 0);
2657 
2658 	return err;
2659 }
2660 
2661 static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
2662 				    struct net_device *master)
2663 {
2664 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2665 	int err = 0;
2666 
2667 	if (netif_is_bridge_master(master))
2668 		err = ofdpa_port_bridge_join(ofdpa_port, master);
2669 	else if (netif_is_ovs_master(master))
2670 		err = ofdpa_port_ovs_changed(ofdpa_port, master);
2671 	return err;
2672 }
2673 
2674 static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
2675 				      struct net_device *master)
2676 {
2677 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2678 	int err = 0;
2679 
2680 	if (ofdpa_port_is_bridged(ofdpa_port))
2681 		err = ofdpa_port_bridge_leave(ofdpa_port);
2682 	else if (ofdpa_port_is_ovsed(ofdpa_port))
2683 		err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
2684 	return err;
2685 }
2686 
2687 static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
2688 				   struct neighbour *n)
2689 {
2690 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2691 	int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
2692 						    OFDPA_OP_FLAG_NOWAIT;
2693 	__be32 ip_addr = *(__be32 *) n->primary_key;
2694 
2695 	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2696 }
2697 
2698 static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
2699 				    struct neighbour *n)
2700 {
2701 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2702 	int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
2703 	__be32 ip_addr = *(__be32 *) n->primary_key;
2704 
2705 	return ofdpa_port_ipv4_neigh(ofdpa_port, flags, ip_addr, n->ha);
2706 }
2707 
2708 static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
2709 				       const unsigned char *addr,
2710 				       __be16 vlan_id)
2711 {
2712 	struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
2713 	int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
2714 
2715 	if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
2716 	    ofdpa_port->stp_state != BR_STATE_FORWARDING)
2717 		return 0;
2718 
2719 	return ofdpa_port_fdb(ofdpa_port, addr, vlan_id, flags);
2720 }
2721 
2722 static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev,
2723 						    struct rocker *rocker)
2724 {
2725 	struct rocker_port *rocker_port;
2726 
2727 	rocker_port = rocker_port_dev_lower_find(dev, rocker);
2728 	return rocker_port ? rocker_port->wpriv : NULL;
2729 }
2730 
2731 static int ofdpa_fib4_add(struct rocker *rocker,
2732 			  const struct fib_entry_notifier_info *fen_info)
2733 {
2734 	struct ofdpa *ofdpa = rocker->wpriv;
2735 	struct ofdpa_port *ofdpa_port;
2736 	int err;
2737 
2738 	if (ofdpa->fib_aborted)
2739 		return 0;
2740 	ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2741 	if (!ofdpa_port)
2742 		return 0;
2743 	err = ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2744 				  fen_info->dst_len, fen_info->fi,
2745 				  fen_info->tb_id, 0);
2746 	if (err)
2747 		return err;
2748 	fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
2749 	return 0;
2750 }
2751 
2752 static int ofdpa_fib4_del(struct rocker *rocker,
2753 			  const struct fib_entry_notifier_info *fen_info)
2754 {
2755 	struct ofdpa *ofdpa = rocker->wpriv;
2756 	struct ofdpa_port *ofdpa_port;
2757 
2758 	if (ofdpa->fib_aborted)
2759 		return 0;
2760 	ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
2761 	if (!ofdpa_port)
2762 		return 0;
2763 	fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2764 	return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
2765 				   fen_info->dst_len, fen_info->fi,
2766 				   fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
2767 }
2768 
2769 static void ofdpa_fib4_abort(struct rocker *rocker)
2770 {
2771 	struct ofdpa *ofdpa = rocker->wpriv;
2772 	struct ofdpa_port *ofdpa_port;
2773 	struct ofdpa_flow_tbl_entry *flow_entry;
2774 	struct hlist_node *tmp;
2775 	unsigned long flags;
2776 	int bkt;
2777 
2778 	if (ofdpa->fib_aborted)
2779 		return;
2780 
2781 	spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
2782 	hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) {
2783 		if (flow_entry->key.tbl_id !=
2784 		    ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING)
2785 			continue;
2786 		ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev,
2787 						       rocker);
2788 		if (!ofdpa_port)
2789 			continue;
2790 		flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
2791 		ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
2792 				   flow_entry);
2793 	}
2794 	spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
2795 	ofdpa->fib_aborted = true;
2796 }
2797 
2798 struct rocker_world_ops rocker_ofdpa_ops = {
2799 	.kind = "ofdpa",
2800 	.priv_size = sizeof(struct ofdpa),
2801 	.port_priv_size = sizeof(struct ofdpa_port),
2802 	.mode = ROCKER_PORT_MODE_OF_DPA,
2803 	.init = ofdpa_init,
2804 	.fini = ofdpa_fini,
2805 	.port_pre_init = ofdpa_port_pre_init,
2806 	.port_init = ofdpa_port_init,
2807 	.port_fini = ofdpa_port_fini,
2808 	.port_open = ofdpa_port_open,
2809 	.port_stop = ofdpa_port_stop,
2810 	.port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
2811 	.port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
2812 	.port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get,
2813 	.port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
2814 	.port_obj_vlan_add = ofdpa_port_obj_vlan_add,
2815 	.port_obj_vlan_del = ofdpa_port_obj_vlan_del,
2816 	.port_obj_fdb_add = ofdpa_port_obj_fdb_add,
2817 	.port_obj_fdb_del = ofdpa_port_obj_fdb_del,
2818 	.port_master_linked = ofdpa_port_master_linked,
2819 	.port_master_unlinked = ofdpa_port_master_unlinked,
2820 	.port_neigh_update = ofdpa_port_neigh_update,
2821 	.port_neigh_destroy = ofdpa_port_neigh_destroy,
2822 	.port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
2823 	.fib4_add = ofdpa_fib4_add,
2824 	.fib4_del = ofdpa_fib4_del,
2825 	.fib4_abort = ofdpa_fib4_abort,
2826 };
2827