xref: /linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c (revision 320fefa9e2edc67011e235ea1d50f0d00ddfe004)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DPAA2 Ethernet Switch driver
4  *
5  * Copyright 2014-2016 Freescale Semiconductor Inc.
6  * Copyright 2017-2021 NXP
7  *
8  */
9 
10 #include <linux/module.h>
11 
12 #include <linux/interrupt.h>
13 #include <linux/kthread.h>
14 #include <linux/workqueue.h>
15 #include <linux/iommu.h>
16 #include <net/pkt_cls.h>
17 
18 #include <linux/fsl/mc.h>
19 
20 #include "dpaa2-switch.h"
21 
22 /* Minimal supported DPSW version */
23 #define DPSW_MIN_VER_MAJOR		8
24 #define DPSW_MIN_VER_MINOR		9
25 
26 #define DEFAULT_VLAN_ID			1
27 
28 static u16 dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv *port_priv)
29 {
30 	return port_priv->fdb->fdb_id;
31 }
32 
33 static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *ethsw)
34 {
35 	int i;
36 
37 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
38 		if (!ethsw->fdbs[i].in_use)
39 			return &ethsw->fdbs[i];
40 	return NULL;
41 }
42 
43 static struct dpaa2_switch_filter_block *
44 dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw)
45 {
46 	int i;
47 
48 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
49 		if (!ethsw->filter_blocks[i].in_use)
50 			return &ethsw->filter_blocks[i];
51 	return NULL;
52 }
53 
54 static u16 dpaa2_switch_port_set_fdb(struct ethsw_port_priv *port_priv,
55 				     struct net_device *bridge_dev)
56 {
57 	struct ethsw_port_priv *other_port_priv = NULL;
58 	struct dpaa2_switch_fdb *fdb;
59 	struct net_device *other_dev;
60 	struct list_head *iter;
61 
62 	/* If we leave a bridge (bridge_dev is NULL), find an unused
63 	 * FDB and use that.
64 	 */
65 	if (!bridge_dev) {
66 		fdb = dpaa2_switch_fdb_get_unused(port_priv->ethsw_data);
67 
68 		/* If there is no unused FDB, we must be the last port that
69 		 * leaves the last bridge, all the others are standalone. We
70 		 * can just keep the FDB that we already have.
71 		 */
72 
73 		if (!fdb) {
74 			port_priv->fdb->bridge_dev = NULL;
75 			return 0;
76 		}
77 
78 		port_priv->fdb = fdb;
79 		port_priv->fdb->in_use = true;
80 		port_priv->fdb->bridge_dev = NULL;
81 		return 0;
82 	}
83 
84 	/* The below call to netdev_for_each_lower_dev() demands the RTNL lock
85 	 * being held. Assert on it so that it's easier to catch new code
86 	 * paths that reach this point without the RTNL lock.
87 	 */
88 	ASSERT_RTNL();
89 
90 	/* If part of a bridge, use the FDB of the first dpaa2 switch interface
91 	 * to be present in that bridge
92 	 */
93 	netdev_for_each_lower_dev(bridge_dev, other_dev, iter) {
94 		if (!dpaa2_switch_port_dev_check(other_dev))
95 			continue;
96 
97 		if (other_dev == port_priv->netdev)
98 			continue;
99 
100 		other_port_priv = netdev_priv(other_dev);
101 		break;
102 	}
103 
104 	/* The current port is about to change its FDB to the one used by the
105 	 * first port that joined the bridge.
106 	 */
107 	if (other_port_priv) {
108 		/* The previous FDB is about to become unused, since the
109 		 * interface is no longer standalone.
110 		 */
111 		port_priv->fdb->in_use = false;
112 		port_priv->fdb->bridge_dev = NULL;
113 
114 		/* Get a reference to the new FDB */
115 		port_priv->fdb = other_port_priv->fdb;
116 	}
117 
118 	/* Keep track of the new upper bridge device */
119 	port_priv->fdb->bridge_dev = bridge_dev;
120 
121 	return 0;
122 }
123 
124 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core *ethsw, u16 fdb_id,
125 					   enum dpsw_flood_type type,
126 					   struct dpsw_egress_flood_cfg *cfg)
127 {
128 	int i = 0, j;
129 
130 	memset(cfg, 0, sizeof(*cfg));
131 
132 	/* Add all the DPAA2 switch ports found in the same bridging domain to
133 	 * the egress flooding domain
134 	 */
135 	for (j = 0; j < ethsw->sw_attr.num_ifs; j++) {
136 		if (!ethsw->ports[j])
137 			continue;
138 		if (ethsw->ports[j]->fdb->fdb_id != fdb_id)
139 			continue;
140 
141 		if (type == DPSW_BROADCAST && ethsw->ports[j]->bcast_flood)
142 			cfg->if_id[i++] = ethsw->ports[j]->idx;
143 		else if (type == DPSW_FLOODING && ethsw->ports[j]->ucast_flood)
144 			cfg->if_id[i++] = ethsw->ports[j]->idx;
145 	}
146 
147 	/* Add the CTRL interface to the egress flooding domain */
148 	cfg->if_id[i++] = ethsw->sw_attr.num_ifs;
149 
150 	cfg->fdb_id = fdb_id;
151 	cfg->flood_type = type;
152 	cfg->num_ifs = i;
153 }
154 
155 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core *ethsw, u16 fdb_id)
156 {
157 	struct dpsw_egress_flood_cfg flood_cfg;
158 	int err;
159 
160 	/* Setup broadcast flooding domain */
161 	dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_BROADCAST, &flood_cfg);
162 	err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
163 				    &flood_cfg);
164 	if (err) {
165 		dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
166 		return err;
167 	}
168 
169 	/* Setup unknown flooding domain */
170 	dpaa2_switch_fdb_get_flood_cfg(ethsw, fdb_id, DPSW_FLOODING, &flood_cfg);
171 	err = dpsw_set_egress_flood(ethsw->mc_io, 0, ethsw->dpsw_handle,
172 				    &flood_cfg);
173 	if (err) {
174 		dev_err(ethsw->dev, "dpsw_set_egress_flood() = %d\n", err);
175 		return err;
176 	}
177 
178 	return 0;
179 }
180 
181 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
182 				dma_addr_t iova_addr)
183 {
184 	phys_addr_t phys_addr;
185 
186 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
187 
188 	return phys_to_virt(phys_addr);
189 }
190 
191 static int dpaa2_switch_add_vlan(struct ethsw_port_priv *port_priv, u16 vid)
192 {
193 	struct ethsw_core *ethsw = port_priv->ethsw_data;
194 	struct dpsw_vlan_cfg vcfg = {0};
195 	int err;
196 
197 	vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
198 	err = dpsw_vlan_add(ethsw->mc_io, 0,
199 			    ethsw->dpsw_handle, vid, &vcfg);
200 	if (err) {
201 		dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
202 		return err;
203 	}
204 	ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
205 
206 	return 0;
207 }
208 
209 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv *port_priv)
210 {
211 	struct net_device *netdev = port_priv->netdev;
212 	struct dpsw_link_state state;
213 	int err;
214 
215 	err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
216 				     port_priv->ethsw_data->dpsw_handle,
217 				     port_priv->idx, &state);
218 	if (err) {
219 		netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
220 		return true;
221 	}
222 
223 	WARN_ONCE(state.up > 1, "Garbage read into link_state");
224 
225 	return state.up ? true : false;
226 }
227 
228 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv *port_priv, u16 pvid)
229 {
230 	struct ethsw_core *ethsw = port_priv->ethsw_data;
231 	struct net_device *netdev = port_priv->netdev;
232 	struct dpsw_tci_cfg tci_cfg = { 0 };
233 	bool up;
234 	int err, ret;
235 
236 	err = dpsw_if_get_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
237 			      port_priv->idx, &tci_cfg);
238 	if (err) {
239 		netdev_err(netdev, "dpsw_if_get_tci err %d\n", err);
240 		return err;
241 	}
242 
243 	tci_cfg.vlan_id = pvid;
244 
245 	/* Interface needs to be down to change PVID */
246 	up = dpaa2_switch_port_is_up(port_priv);
247 	if (up) {
248 		err = dpsw_if_disable(ethsw->mc_io, 0,
249 				      ethsw->dpsw_handle,
250 				      port_priv->idx);
251 		if (err) {
252 			netdev_err(netdev, "dpsw_if_disable err %d\n", err);
253 			return err;
254 		}
255 	}
256 
257 	err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
258 			      port_priv->idx, &tci_cfg);
259 	if (err) {
260 		netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
261 		goto set_tci_error;
262 	}
263 
264 	/* Delete previous PVID info and mark the new one */
265 	port_priv->vlans[port_priv->pvid] &= ~ETHSW_VLAN_PVID;
266 	port_priv->vlans[pvid] |= ETHSW_VLAN_PVID;
267 	port_priv->pvid = pvid;
268 
269 set_tci_error:
270 	if (up) {
271 		ret = dpsw_if_enable(ethsw->mc_io, 0,
272 				     ethsw->dpsw_handle,
273 				     port_priv->idx);
274 		if (ret) {
275 			netdev_err(netdev, "dpsw_if_enable err %d\n", ret);
276 			return ret;
277 		}
278 	}
279 
280 	return err;
281 }
282 
283 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv *port_priv,
284 				      u16 vid, u16 flags)
285 {
286 	struct ethsw_core *ethsw = port_priv->ethsw_data;
287 	struct net_device *netdev = port_priv->netdev;
288 	struct dpsw_vlan_if_cfg vcfg = {0};
289 	int err;
290 
291 	if (port_priv->vlans[vid]) {
292 		netdev_warn(netdev, "VLAN %d already configured\n", vid);
293 		return -EEXIST;
294 	}
295 
296 	/* If hit, this VLAN rule will lead the packet into the FDB table
297 	 * specified in the vlan configuration below
298 	 */
299 	vcfg.num_ifs = 1;
300 	vcfg.if_id[0] = port_priv->idx;
301 	vcfg.fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
302 	vcfg.options |= DPSW_VLAN_ADD_IF_OPT_FDB_ID;
303 	err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
304 	if (err) {
305 		netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
306 		return err;
307 	}
308 
309 	port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
310 
311 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
312 		err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
313 						ethsw->dpsw_handle,
314 						vid, &vcfg);
315 		if (err) {
316 			netdev_err(netdev,
317 				   "dpsw_vlan_add_if_untagged err %d\n", err);
318 			return err;
319 		}
320 		port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
321 	}
322 
323 	if (flags & BRIDGE_VLAN_INFO_PVID) {
324 		err = dpaa2_switch_port_set_pvid(port_priv, vid);
325 		if (err)
326 			return err;
327 	}
328 
329 	return 0;
330 }
331 
332 static enum dpsw_stp_state br_stp_state_to_dpsw(u8 state)
333 {
334 	switch (state) {
335 	case BR_STATE_DISABLED:
336 		return DPSW_STP_STATE_DISABLED;
337 	case BR_STATE_LISTENING:
338 		return DPSW_STP_STATE_LISTENING;
339 	case BR_STATE_LEARNING:
340 		return DPSW_STP_STATE_LEARNING;
341 	case BR_STATE_FORWARDING:
342 		return DPSW_STP_STATE_FORWARDING;
343 	case BR_STATE_BLOCKING:
344 		return DPSW_STP_STATE_BLOCKING;
345 	default:
346 		return DPSW_STP_STATE_DISABLED;
347 	}
348 }
349 
350 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
351 {
352 	struct dpsw_stp_cfg stp_cfg = {0};
353 	int err;
354 	u16 vid;
355 
356 	if (!netif_running(port_priv->netdev) || state == port_priv->stp_state)
357 		return 0;	/* Nothing to do */
358 
359 	stp_cfg.state = br_stp_state_to_dpsw(state);
360 	for (vid = 0; vid <= VLAN_VID_MASK; vid++) {
361 		if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
362 			stp_cfg.vlan_id = vid;
363 			err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
364 					      port_priv->ethsw_data->dpsw_handle,
365 					      port_priv->idx, &stp_cfg);
366 			if (err) {
367 				netdev_err(port_priv->netdev,
368 					   "dpsw_if_set_stp err %d\n", err);
369 				return err;
370 			}
371 		}
372 	}
373 
374 	port_priv->stp_state = state;
375 
376 	return 0;
377 }
378 
379 static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
380 {
381 	struct ethsw_port_priv *ppriv_local = NULL;
382 	int i, err;
383 
384 	if (!ethsw->vlans[vid])
385 		return -ENOENT;
386 
387 	err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
388 	if (err) {
389 		dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
390 		return err;
391 	}
392 	ethsw->vlans[vid] = 0;
393 
394 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
395 		ppriv_local = ethsw->ports[i];
396 		if (ppriv_local)
397 			ppriv_local->vlans[vid] = 0;
398 	}
399 
400 	return 0;
401 }
402 
403 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
404 					const unsigned char *addr)
405 {
406 	struct dpsw_fdb_unicast_cfg entry = {0};
407 	u16 fdb_id;
408 	int err;
409 
410 	entry.if_egress = port_priv->idx;
411 	entry.type = DPSW_FDB_ENTRY_STATIC;
412 	ether_addr_copy(entry.mac_addr, addr);
413 
414 	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
415 	err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
416 				   port_priv->ethsw_data->dpsw_handle,
417 				   fdb_id, &entry);
418 	if (err)
419 		netdev_err(port_priv->netdev,
420 			   "dpsw_fdb_add_unicast err %d\n", err);
421 	return err;
422 }
423 
424 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
425 					const unsigned char *addr)
426 {
427 	struct dpsw_fdb_unicast_cfg entry = {0};
428 	u16 fdb_id;
429 	int err;
430 
431 	entry.if_egress = port_priv->idx;
432 	entry.type = DPSW_FDB_ENTRY_STATIC;
433 	ether_addr_copy(entry.mac_addr, addr);
434 
435 	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
436 	err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
437 				      port_priv->ethsw_data->dpsw_handle,
438 				      fdb_id, &entry);
439 	/* Silently discard error for calling multiple times the del command */
440 	if (err && err != -ENXIO)
441 		netdev_err(port_priv->netdev,
442 			   "dpsw_fdb_remove_unicast err %d\n", err);
443 	return err;
444 }
445 
446 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
447 					const unsigned char *addr)
448 {
449 	struct dpsw_fdb_multicast_cfg entry = {0};
450 	u16 fdb_id;
451 	int err;
452 
453 	ether_addr_copy(entry.mac_addr, addr);
454 	entry.type = DPSW_FDB_ENTRY_STATIC;
455 	entry.num_ifs = 1;
456 	entry.if_id[0] = port_priv->idx;
457 
458 	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
459 	err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
460 				     port_priv->ethsw_data->dpsw_handle,
461 				     fdb_id, &entry);
462 	/* Silently discard error for calling multiple times the add command */
463 	if (err && err != -ENXIO)
464 		netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
465 			   err);
466 	return err;
467 }
468 
469 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
470 					const unsigned char *addr)
471 {
472 	struct dpsw_fdb_multicast_cfg entry = {0};
473 	u16 fdb_id;
474 	int err;
475 
476 	ether_addr_copy(entry.mac_addr, addr);
477 	entry.type = DPSW_FDB_ENTRY_STATIC;
478 	entry.num_ifs = 1;
479 	entry.if_id[0] = port_priv->idx;
480 
481 	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
482 	err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
483 					port_priv->ethsw_data->dpsw_handle,
484 					fdb_id, &entry);
485 	/* Silently discard error for calling multiple times the del command */
486 	if (err && err != -ENAVAIL)
487 		netdev_err(port_priv->netdev,
488 			   "dpsw_fdb_remove_multicast err %d\n", err);
489 	return err;
490 }
491 
492 static void dpaa2_switch_port_get_stats(struct net_device *netdev,
493 					struct rtnl_link_stats64 *stats)
494 {
495 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
496 	u64 tmp;
497 	int err;
498 
499 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
500 				  port_priv->ethsw_data->dpsw_handle,
501 				  port_priv->idx,
502 				  DPSW_CNT_ING_FRAME, &stats->rx_packets);
503 	if (err)
504 		goto error;
505 
506 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
507 				  port_priv->ethsw_data->dpsw_handle,
508 				  port_priv->idx,
509 				  DPSW_CNT_EGR_FRAME, &stats->tx_packets);
510 	if (err)
511 		goto error;
512 
513 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
514 				  port_priv->ethsw_data->dpsw_handle,
515 				  port_priv->idx,
516 				  DPSW_CNT_ING_BYTE, &stats->rx_bytes);
517 	if (err)
518 		goto error;
519 
520 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
521 				  port_priv->ethsw_data->dpsw_handle,
522 				  port_priv->idx,
523 				  DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
524 	if (err)
525 		goto error;
526 
527 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
528 				  port_priv->ethsw_data->dpsw_handle,
529 				  port_priv->idx,
530 				  DPSW_CNT_ING_FRAME_DISCARD,
531 				  &stats->rx_dropped);
532 	if (err)
533 		goto error;
534 
535 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
536 				  port_priv->ethsw_data->dpsw_handle,
537 				  port_priv->idx,
538 				  DPSW_CNT_ING_FLTR_FRAME,
539 				  &tmp);
540 	if (err)
541 		goto error;
542 	stats->rx_dropped += tmp;
543 
544 	err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
545 				  port_priv->ethsw_data->dpsw_handle,
546 				  port_priv->idx,
547 				  DPSW_CNT_EGR_FRAME_DISCARD,
548 				  &stats->tx_dropped);
549 	if (err)
550 		goto error;
551 
552 	return;
553 
554 error:
555 	netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
556 }
557 
558 static bool dpaa2_switch_port_has_offload_stats(const struct net_device *netdev,
559 						int attr_id)
560 {
561 	return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
562 }
563 
564 static int dpaa2_switch_port_get_offload_stats(int attr_id,
565 					       const struct net_device *netdev,
566 					       void *sp)
567 {
568 	switch (attr_id) {
569 	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
570 		dpaa2_switch_port_get_stats((struct net_device *)netdev, sp);
571 		return 0;
572 	}
573 
574 	return -EINVAL;
575 }
576 
577 static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
578 {
579 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
580 	int err;
581 
582 	err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
583 					   0,
584 					   port_priv->ethsw_data->dpsw_handle,
585 					   port_priv->idx,
586 					   (u16)ETHSW_L2_MAX_FRM(mtu));
587 	if (err) {
588 		netdev_err(netdev,
589 			   "dpsw_if_set_max_frame_length() err %d\n", err);
590 		return err;
591 	}
592 
593 	netdev->mtu = mtu;
594 	return 0;
595 }
596 
597 static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
598 {
599 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
600 	struct dpsw_link_state state;
601 	int err;
602 
603 	/* When we manage the MAC/PHY using phylink there is no need
604 	 * to manually update the netif_carrier.
605 	 */
606 	if (dpaa2_switch_port_is_type_phy(port_priv))
607 		return 0;
608 
609 	/* Interrupts are received even though no one issued an 'ifconfig up'
610 	 * on the switch interface. Ignore these link state update interrupts
611 	 */
612 	if (!netif_running(netdev))
613 		return 0;
614 
615 	err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
616 				     port_priv->ethsw_data->dpsw_handle,
617 				     port_priv->idx, &state);
618 	if (err) {
619 		netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
620 		return err;
621 	}
622 
623 	WARN_ONCE(state.up > 1, "Garbage read into link_state");
624 
625 	if (state.up != port_priv->link_state) {
626 		if (state.up) {
627 			netif_carrier_on(netdev);
628 			netif_tx_start_all_queues(netdev);
629 		} else {
630 			netif_carrier_off(netdev);
631 			netif_tx_stop_all_queues(netdev);
632 		}
633 		port_priv->link_state = state.up;
634 	}
635 
636 	return 0;
637 }
638 
639 /* Manage all NAPI instances for the control interface.
640  *
641  * We only have one RX queue and one Tx Conf queue for all
642  * switch ports. Therefore, we only need to enable the NAPI instance once, the
643  * first time one of the switch ports runs .dev_open().
644  */
645 
646 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core *ethsw)
647 {
648 	int i;
649 
650 	/* Access to the ethsw->napi_users relies on the RTNL lock */
651 	ASSERT_RTNL();
652 
653 	/* a new interface is using the NAPI instance */
654 	ethsw->napi_users++;
655 
656 	/* if there is already a user of the instance, return */
657 	if (ethsw->napi_users > 1)
658 		return;
659 
660 	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
661 		napi_enable(&ethsw->fq[i].napi);
662 }
663 
664 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core *ethsw)
665 {
666 	int i;
667 
668 	/* Access to the ethsw->napi_users relies on the RTNL lock */
669 	ASSERT_RTNL();
670 
671 	/* If we are not the last interface using the NAPI, return */
672 	ethsw->napi_users--;
673 	if (ethsw->napi_users)
674 		return;
675 
676 	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
677 		napi_disable(&ethsw->fq[i].napi);
678 }
679 
680 static int dpaa2_switch_port_open(struct net_device *netdev)
681 {
682 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
683 	struct ethsw_core *ethsw = port_priv->ethsw_data;
684 	int err;
685 
686 	if (!dpaa2_switch_port_is_type_phy(port_priv)) {
687 		/* Explicitly set carrier off, otherwise
688 		 * netif_carrier_ok() will return true and cause 'ip link show'
689 		 * to report the LOWER_UP flag, even though the link
690 		 * notification wasn't even received.
691 		 */
692 		netif_carrier_off(netdev);
693 	}
694 
695 	err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
696 			     port_priv->ethsw_data->dpsw_handle,
697 			     port_priv->idx);
698 	if (err) {
699 		netdev_err(netdev, "dpsw_if_enable err %d\n", err);
700 		return err;
701 	}
702 
703 	dpaa2_switch_enable_ctrl_if_napi(ethsw);
704 
705 	if (dpaa2_switch_port_is_type_phy(port_priv)) {
706 		dpaa2_mac_start(port_priv->mac);
707 		phylink_start(port_priv->mac->phylink);
708 	}
709 
710 	return 0;
711 }
712 
713 static int dpaa2_switch_port_stop(struct net_device *netdev)
714 {
715 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
716 	struct ethsw_core *ethsw = port_priv->ethsw_data;
717 	int err;
718 
719 	if (dpaa2_switch_port_is_type_phy(port_priv)) {
720 		phylink_stop(port_priv->mac->phylink);
721 		dpaa2_mac_stop(port_priv->mac);
722 	} else {
723 		netif_tx_stop_all_queues(netdev);
724 		netif_carrier_off(netdev);
725 	}
726 
727 	err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
728 			      port_priv->ethsw_data->dpsw_handle,
729 			      port_priv->idx);
730 	if (err) {
731 		netdev_err(netdev, "dpsw_if_disable err %d\n", err);
732 		return err;
733 	}
734 
735 	dpaa2_switch_disable_ctrl_if_napi(ethsw);
736 
737 	return 0;
738 }
739 
740 static int dpaa2_switch_port_parent_id(struct net_device *dev,
741 				       struct netdev_phys_item_id *ppid)
742 {
743 	struct ethsw_port_priv *port_priv = netdev_priv(dev);
744 
745 	ppid->id_len = 1;
746 	ppid->id[0] = port_priv->ethsw_data->dev_id;
747 
748 	return 0;
749 }
750 
751 static int dpaa2_switch_port_get_phys_name(struct net_device *netdev, char *name,
752 					   size_t len)
753 {
754 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
755 	int err;
756 
757 	err = snprintf(name, len, "p%d", port_priv->idx);
758 	if (err >= len)
759 		return -EINVAL;
760 
761 	return 0;
762 }
763 
764 struct ethsw_dump_ctx {
765 	struct net_device *dev;
766 	struct sk_buff *skb;
767 	struct netlink_callback *cb;
768 	int idx;
769 };
770 
771 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry *entry,
772 				    struct ethsw_dump_ctx *dump)
773 {
774 	int is_dynamic = entry->type & DPSW_FDB_ENTRY_DINAMIC;
775 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
776 	u32 seq = dump->cb->nlh->nlmsg_seq;
777 	struct nlmsghdr *nlh;
778 	struct ndmsg *ndm;
779 
780 	if (dump->idx < dump->cb->args[2])
781 		goto skip;
782 
783 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
784 			sizeof(*ndm), NLM_F_MULTI);
785 	if (!nlh)
786 		return -EMSGSIZE;
787 
788 	ndm = nlmsg_data(nlh);
789 	ndm->ndm_family  = AF_BRIDGE;
790 	ndm->ndm_pad1    = 0;
791 	ndm->ndm_pad2    = 0;
792 	ndm->ndm_flags   = NTF_SELF;
793 	ndm->ndm_type    = 0;
794 	ndm->ndm_ifindex = dump->dev->ifindex;
795 	ndm->ndm_state   = is_dynamic ? NUD_REACHABLE : NUD_NOARP;
796 
797 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac_addr))
798 		goto nla_put_failure;
799 
800 	nlmsg_end(dump->skb, nlh);
801 
802 skip:
803 	dump->idx++;
804 	return 0;
805 
806 nla_put_failure:
807 	nlmsg_cancel(dump->skb, nlh);
808 	return -EMSGSIZE;
809 }
810 
811 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry *entry,
812 					     struct ethsw_port_priv *port_priv)
813 {
814 	int idx = port_priv->idx;
815 	int valid;
816 
817 	if (entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
818 		valid = entry->if_info == port_priv->idx;
819 	else
820 		valid = entry->if_mask[idx / 8] & BIT(idx % 8);
821 
822 	return valid;
823 }
824 
825 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv *port_priv,
826 				    dpaa2_switch_fdb_cb_t cb, void *data)
827 {
828 	struct net_device *net_dev = port_priv->netdev;
829 	struct ethsw_core *ethsw = port_priv->ethsw_data;
830 	struct device *dev = net_dev->dev.parent;
831 	struct fdb_dump_entry *fdb_entries;
832 	struct fdb_dump_entry fdb_entry;
833 	dma_addr_t fdb_dump_iova;
834 	u16 num_fdb_entries;
835 	u32 fdb_dump_size;
836 	int err = 0, i;
837 	u8 *dma_mem;
838 	u16 fdb_id;
839 
840 	fdb_dump_size = ethsw->sw_attr.max_fdb_entries * sizeof(fdb_entry);
841 	dma_mem = kzalloc(fdb_dump_size, GFP_KERNEL);
842 	if (!dma_mem)
843 		return -ENOMEM;
844 
845 	fdb_dump_iova = dma_map_single(dev, dma_mem, fdb_dump_size,
846 				       DMA_FROM_DEVICE);
847 	if (dma_mapping_error(dev, fdb_dump_iova)) {
848 		netdev_err(net_dev, "dma_map_single() failed\n");
849 		err = -ENOMEM;
850 		goto err_map;
851 	}
852 
853 	fdb_id = dpaa2_switch_port_get_fdb_id(port_priv);
854 	err = dpsw_fdb_dump(ethsw->mc_io, 0, ethsw->dpsw_handle, fdb_id,
855 			    fdb_dump_iova, fdb_dump_size, &num_fdb_entries);
856 	if (err) {
857 		netdev_err(net_dev, "dpsw_fdb_dump() = %d\n", err);
858 		goto err_dump;
859 	}
860 
861 	dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_FROM_DEVICE);
862 
863 	fdb_entries = (struct fdb_dump_entry *)dma_mem;
864 	for (i = 0; i < num_fdb_entries; i++) {
865 		fdb_entry = fdb_entries[i];
866 
867 		err = cb(port_priv, &fdb_entry, data);
868 		if (err)
869 			goto end;
870 	}
871 
872 end:
873 	kfree(dma_mem);
874 
875 	return 0;
876 
877 err_dump:
878 	dma_unmap_single(dev, fdb_dump_iova, fdb_dump_size, DMA_TO_DEVICE);
879 err_map:
880 	kfree(dma_mem);
881 	return err;
882 }
883 
884 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv *port_priv,
885 				       struct fdb_dump_entry *fdb_entry,
886 				       void *data)
887 {
888 	if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
889 		return 0;
890 
891 	return dpaa2_switch_fdb_dump_nl(fdb_entry, data);
892 }
893 
894 static int dpaa2_switch_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
895 				      struct net_device *net_dev,
896 				      struct net_device *filter_dev, int *idx)
897 {
898 	struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
899 	struct ethsw_dump_ctx dump = {
900 		.dev = net_dev,
901 		.skb = skb,
902 		.cb = cb,
903 		.idx = *idx,
904 	};
905 	int err;
906 
907 	err = dpaa2_switch_fdb_iterate(port_priv, dpaa2_switch_fdb_entry_dump, &dump);
908 	*idx = dump.idx;
909 
910 	return err;
911 }
912 
913 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv *port_priv,
914 					   struct fdb_dump_entry *fdb_entry,
915 					   void *data __always_unused)
916 {
917 	if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry, port_priv))
918 		return 0;
919 
920 	if (!(fdb_entry->type & DPSW_FDB_ENTRY_TYPE_DYNAMIC))
921 		return 0;
922 
923 	if (fdb_entry->type & DPSW_FDB_ENTRY_TYPE_UNICAST)
924 		dpaa2_switch_port_fdb_del_uc(port_priv, fdb_entry->mac_addr);
925 	else
926 		dpaa2_switch_port_fdb_del_mc(port_priv, fdb_entry->mac_addr);
927 
928 	return 0;
929 }
930 
931 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv *port_priv)
932 {
933 	dpaa2_switch_fdb_iterate(port_priv,
934 				 dpaa2_switch_fdb_entry_fast_age, NULL);
935 }
936 
937 static int dpaa2_switch_port_vlan_add(struct net_device *netdev, __be16 proto,
938 				      u16 vid)
939 {
940 	struct switchdev_obj_port_vlan vlan = {
941 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
942 		.vid = vid,
943 		.obj.orig_dev = netdev,
944 		/* This API only allows programming tagged, non-PVID VIDs */
945 		.flags = 0,
946 	};
947 
948 	return dpaa2_switch_port_vlans_add(netdev, &vlan);
949 }
950 
951 static int dpaa2_switch_port_vlan_kill(struct net_device *netdev, __be16 proto,
952 				       u16 vid)
953 {
954 	struct switchdev_obj_port_vlan vlan = {
955 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
956 		.vid = vid,
957 		.obj.orig_dev = netdev,
958 		/* This API only allows programming tagged, non-PVID VIDs */
959 		.flags = 0,
960 	};
961 
962 	return dpaa2_switch_port_vlans_del(netdev, &vlan);
963 }
964 
965 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
966 {
967 	struct ethsw_core *ethsw = port_priv->ethsw_data;
968 	struct net_device *net_dev = port_priv->netdev;
969 	struct device *dev = net_dev->dev.parent;
970 	u8 mac_addr[ETH_ALEN];
971 	int err;
972 
973 	if (!(ethsw->features & ETHSW_FEATURE_MAC_ADDR))
974 		return 0;
975 
976 	/* Get firmware address, if any */
977 	err = dpsw_if_get_port_mac_addr(ethsw->mc_io, 0, ethsw->dpsw_handle,
978 					port_priv->idx, mac_addr);
979 	if (err) {
980 		dev_err(dev, "dpsw_if_get_port_mac_addr() failed\n");
981 		return err;
982 	}
983 
984 	/* First check if firmware has any address configured by bootloader */
985 	if (!is_zero_ether_addr(mac_addr)) {
986 		eth_hw_addr_set(net_dev, mac_addr);
987 	} else {
988 		/* No MAC address configured, fill in net_dev->dev_addr
989 		 * with a random one
990 		 */
991 		eth_hw_addr_random(net_dev);
992 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
993 
994 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
995 		 * practical purposes, this will be our "permanent" mac address,
996 		 * at least until the next reboot. This move will also permit
997 		 * register_netdevice() to properly fill up net_dev->perm_addr.
998 		 */
999 		net_dev->addr_assign_type = NET_ADDR_PERM;
1000 	}
1001 
1002 	return 0;
1003 }
1004 
1005 static void dpaa2_switch_free_fd(const struct ethsw_core *ethsw,
1006 				 const struct dpaa2_fd *fd)
1007 {
1008 	struct device *dev = ethsw->dev;
1009 	unsigned char *buffer_start;
1010 	struct sk_buff **skbh, *skb;
1011 	dma_addr_t fd_addr;
1012 
1013 	fd_addr = dpaa2_fd_get_addr(fd);
1014 	skbh = dpaa2_iova_to_virt(ethsw->iommu_domain, fd_addr);
1015 
1016 	skb = *skbh;
1017 	buffer_start = (unsigned char *)skbh;
1018 
1019 	dma_unmap_single(dev, fd_addr,
1020 			 skb_tail_pointer(skb) - buffer_start,
1021 			 DMA_TO_DEVICE);
1022 
1023 	/* Move on with skb release */
1024 	dev_kfree_skb(skb);
1025 }
1026 
1027 static int dpaa2_switch_build_single_fd(struct ethsw_core *ethsw,
1028 					struct sk_buff *skb,
1029 					struct dpaa2_fd *fd)
1030 {
1031 	struct device *dev = ethsw->dev;
1032 	struct sk_buff **skbh;
1033 	dma_addr_t addr;
1034 	u8 *buff_start;
1035 	void *hwa;
1036 
1037 	buff_start = PTR_ALIGN(skb->data - DPAA2_SWITCH_TX_DATA_OFFSET -
1038 			       DPAA2_SWITCH_TX_BUF_ALIGN,
1039 			       DPAA2_SWITCH_TX_BUF_ALIGN);
1040 
1041 	/* Clear FAS to have consistent values for TX confirmation. It is
1042 	 * located in the first 8 bytes of the buffer's hardware annotation
1043 	 * area
1044 	 */
1045 	hwa = buff_start + DPAA2_SWITCH_SWA_SIZE;
1046 	memset(hwa, 0, 8);
1047 
1048 	/* Store a backpointer to the skb at the beginning of the buffer
1049 	 * (in the private data area) such that we can release it
1050 	 * on Tx confirm
1051 	 */
1052 	skbh = (struct sk_buff **)buff_start;
1053 	*skbh = skb;
1054 
1055 	addr = dma_map_single(dev, buff_start,
1056 			      skb_tail_pointer(skb) - buff_start,
1057 			      DMA_TO_DEVICE);
1058 	if (unlikely(dma_mapping_error(dev, addr)))
1059 		return -ENOMEM;
1060 
1061 	/* Setup the FD fields */
1062 	memset(fd, 0, sizeof(*fd));
1063 
1064 	dpaa2_fd_set_addr(fd, addr);
1065 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buff_start));
1066 	dpaa2_fd_set_len(fd, skb->len);
1067 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
1068 
1069 	return 0;
1070 }
1071 
1072 static netdev_tx_t dpaa2_switch_port_tx(struct sk_buff *skb,
1073 					struct net_device *net_dev)
1074 {
1075 	struct ethsw_port_priv *port_priv = netdev_priv(net_dev);
1076 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1077 	int retries = DPAA2_SWITCH_SWP_BUSY_RETRIES;
1078 	struct dpaa2_fd fd;
1079 	int err;
1080 
1081 	if (unlikely(skb_headroom(skb) < DPAA2_SWITCH_NEEDED_HEADROOM)) {
1082 		struct sk_buff *ns;
1083 
1084 		ns = skb_realloc_headroom(skb, DPAA2_SWITCH_NEEDED_HEADROOM);
1085 		if (unlikely(!ns)) {
1086 			net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev->name);
1087 			goto err_free_skb;
1088 		}
1089 		dev_consume_skb_any(skb);
1090 		skb = ns;
1091 	}
1092 
1093 	/* We'll be holding a back-reference to the skb until Tx confirmation */
1094 	skb = skb_unshare(skb, GFP_ATOMIC);
1095 	if (unlikely(!skb)) {
1096 		/* skb_unshare() has already freed the skb */
1097 		net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev->name);
1098 		goto err_exit;
1099 	}
1100 
1101 	/* At this stage, we do not support non-linear skbs so just try to
1102 	 * linearize the skb and if that's not working, just drop the packet.
1103 	 */
1104 	err = skb_linearize(skb);
1105 	if (err) {
1106 		net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev->name, err);
1107 		goto err_free_skb;
1108 	}
1109 
1110 	err = dpaa2_switch_build_single_fd(ethsw, skb, &fd);
1111 	if (unlikely(err)) {
1112 		net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev->name, err);
1113 		goto err_free_skb;
1114 	}
1115 
1116 	do {
1117 		err = dpaa2_io_service_enqueue_qd(NULL,
1118 						  port_priv->tx_qdid,
1119 						  8, 0, &fd);
1120 		retries--;
1121 	} while (err == -EBUSY && retries);
1122 
1123 	if (unlikely(err < 0)) {
1124 		dpaa2_switch_free_fd(ethsw, &fd);
1125 		goto err_exit;
1126 	}
1127 
1128 	return NETDEV_TX_OK;
1129 
1130 err_free_skb:
1131 	dev_kfree_skb(skb);
1132 err_exit:
1133 	return NETDEV_TX_OK;
1134 }
1135 
1136 static int
1137 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block,
1138 				 struct flow_cls_offload *f)
1139 {
1140 	switch (f->command) {
1141 	case FLOW_CLS_REPLACE:
1142 		return dpaa2_switch_cls_flower_replace(filter_block, f);
1143 	case FLOW_CLS_DESTROY:
1144 		return dpaa2_switch_cls_flower_destroy(filter_block, f);
1145 	default:
1146 		return -EOPNOTSUPP;
1147 	}
1148 }
1149 
1150 static int
1151 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block,
1152 				   struct tc_cls_matchall_offload *f)
1153 {
1154 	switch (f->command) {
1155 	case TC_CLSMATCHALL_REPLACE:
1156 		return dpaa2_switch_cls_matchall_replace(block, f);
1157 	case TC_CLSMATCHALL_DESTROY:
1158 		return dpaa2_switch_cls_matchall_destroy(block, f);
1159 	default:
1160 		return -EOPNOTSUPP;
1161 	}
1162 }
1163 
1164 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type,
1165 						  void *type_data,
1166 						  void *cb_priv)
1167 {
1168 	switch (type) {
1169 	case TC_SETUP_CLSFLOWER:
1170 		return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data);
1171 	case TC_SETUP_CLSMATCHALL:
1172 		return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data);
1173 	default:
1174 		return -EOPNOTSUPP;
1175 	}
1176 }
1177 
1178 static LIST_HEAD(dpaa2_switch_block_cb_list);
1179 
1180 static int
1181 dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv,
1182 			       struct dpaa2_switch_filter_block *block)
1183 {
1184 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1185 	struct net_device *netdev = port_priv->netdev;
1186 	struct dpsw_acl_if_cfg acl_if_cfg;
1187 	int err;
1188 
1189 	if (port_priv->filter_block)
1190 		return -EINVAL;
1191 
1192 	acl_if_cfg.if_id[0] = port_priv->idx;
1193 	acl_if_cfg.num_ifs = 1;
1194 	err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1195 			      block->acl_id, &acl_if_cfg);
1196 	if (err) {
1197 		netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1198 		return err;
1199 	}
1200 
1201 	block->ports |= BIT(port_priv->idx);
1202 	port_priv->filter_block = block;
1203 
1204 	return 0;
1205 }
1206 
1207 static int
1208 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv,
1209 				 struct dpaa2_switch_filter_block *block)
1210 {
1211 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1212 	struct net_device *netdev = port_priv->netdev;
1213 	struct dpsw_acl_if_cfg acl_if_cfg;
1214 	int err;
1215 
1216 	if (port_priv->filter_block != block)
1217 		return -EINVAL;
1218 
1219 	acl_if_cfg.if_id[0] = port_priv->idx;
1220 	acl_if_cfg.num_ifs = 1;
1221 	err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1222 				 block->acl_id, &acl_if_cfg);
1223 	if (err) {
1224 		netdev_err(netdev, "dpsw_acl_add_if err %d\n", err);
1225 		return err;
1226 	}
1227 
1228 	block->ports &= ~BIT(port_priv->idx);
1229 	port_priv->filter_block = NULL;
1230 	return 0;
1231 }
1232 
1233 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv,
1234 					struct dpaa2_switch_filter_block *block)
1235 {
1236 	struct dpaa2_switch_filter_block *old_block = port_priv->filter_block;
1237 	int err;
1238 
1239 	/* Offload all the mirror entries found in the block on this new port
1240 	 * joining it.
1241 	 */
1242 	err = dpaa2_switch_block_offload_mirror(block, port_priv);
1243 	if (err)
1244 		return err;
1245 
1246 	/* If the port is already bound to this ACL table then do nothing. This
1247 	 * can happen when this port is the first one to join a tc block
1248 	 */
1249 	if (port_priv->filter_block == block)
1250 		return 0;
1251 
1252 	err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block);
1253 	if (err)
1254 		return err;
1255 
1256 	/* Mark the previous ACL table as being unused if this was the last
1257 	 * port that was using it.
1258 	 */
1259 	if (old_block->ports == 0)
1260 		old_block->in_use = false;
1261 
1262 	return dpaa2_switch_port_acl_tbl_bind(port_priv, block);
1263 }
1264 
1265 static int
1266 dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv,
1267 			       struct dpaa2_switch_filter_block *block)
1268 {
1269 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1270 	struct dpaa2_switch_filter_block *new_block;
1271 	int err;
1272 
1273 	/* Unoffload all the mirror entries found in the block from the
1274 	 * port leaving it.
1275 	 */
1276 	err = dpaa2_switch_block_unoffload_mirror(block, port_priv);
1277 	if (err)
1278 		return err;
1279 
1280 	/* We are the last port that leaves a block (an ACL table).
1281 	 * We'll continue to use this table.
1282 	 */
1283 	if (block->ports == BIT(port_priv->idx))
1284 		return 0;
1285 
1286 	err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block);
1287 	if (err)
1288 		return err;
1289 
1290 	if (block->ports == 0)
1291 		block->in_use = false;
1292 
1293 	new_block = dpaa2_switch_filter_block_get_unused(ethsw);
1294 	new_block->in_use = true;
1295 	return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
1296 }
1297 
1298 static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev,
1299 					    struct flow_block_offload *f)
1300 {
1301 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1302 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1303 	struct dpaa2_switch_filter_block *filter_block;
1304 	struct flow_block_cb *block_cb;
1305 	bool register_block = false;
1306 	int err;
1307 
1308 	block_cb = flow_block_cb_lookup(f->block,
1309 					dpaa2_switch_port_setup_tc_block_cb_ig,
1310 					ethsw);
1311 
1312 	if (!block_cb) {
1313 		/* If the filter block is not already known, then this port
1314 		 * must be the first to join it. In this case, we can just
1315 		 * continue to use our private table
1316 		 */
1317 		filter_block = port_priv->filter_block;
1318 
1319 		block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig,
1320 					       ethsw, filter_block, NULL);
1321 		if (IS_ERR(block_cb))
1322 			return PTR_ERR(block_cb);
1323 
1324 		register_block = true;
1325 	} else {
1326 		filter_block = flow_block_cb_priv(block_cb);
1327 	}
1328 
1329 	flow_block_cb_incref(block_cb);
1330 	err = dpaa2_switch_port_block_bind(port_priv, filter_block);
1331 	if (err)
1332 		goto err_block_bind;
1333 
1334 	if (register_block) {
1335 		flow_block_cb_add(block_cb, f);
1336 		list_add_tail(&block_cb->driver_list,
1337 			      &dpaa2_switch_block_cb_list);
1338 	}
1339 
1340 	return 0;
1341 
1342 err_block_bind:
1343 	if (!flow_block_cb_decref(block_cb))
1344 		flow_block_cb_free(block_cb);
1345 	return err;
1346 }
1347 
1348 static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev,
1349 					       struct flow_block_offload *f)
1350 {
1351 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1352 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1353 	struct dpaa2_switch_filter_block *filter_block;
1354 	struct flow_block_cb *block_cb;
1355 	int err;
1356 
1357 	block_cb = flow_block_cb_lookup(f->block,
1358 					dpaa2_switch_port_setup_tc_block_cb_ig,
1359 					ethsw);
1360 	if (!block_cb)
1361 		return;
1362 
1363 	filter_block = flow_block_cb_priv(block_cb);
1364 	err = dpaa2_switch_port_block_unbind(port_priv, filter_block);
1365 	if (!err && !flow_block_cb_decref(block_cb)) {
1366 		flow_block_cb_remove(block_cb, f);
1367 		list_del(&block_cb->driver_list);
1368 	}
1369 }
1370 
1371 static int dpaa2_switch_setup_tc_block(struct net_device *netdev,
1372 				       struct flow_block_offload *f)
1373 {
1374 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1375 		return -EOPNOTSUPP;
1376 
1377 	f->driver_block_list = &dpaa2_switch_block_cb_list;
1378 
1379 	switch (f->command) {
1380 	case FLOW_BLOCK_BIND:
1381 		return dpaa2_switch_setup_tc_block_bind(netdev, f);
1382 	case FLOW_BLOCK_UNBIND:
1383 		dpaa2_switch_setup_tc_block_unbind(netdev, f);
1384 		return 0;
1385 	default:
1386 		return -EOPNOTSUPP;
1387 	}
1388 }
1389 
1390 static int dpaa2_switch_port_setup_tc(struct net_device *netdev,
1391 				      enum tc_setup_type type,
1392 				      void *type_data)
1393 {
1394 	switch (type) {
1395 	case TC_SETUP_BLOCK: {
1396 		return dpaa2_switch_setup_tc_block(netdev, type_data);
1397 	}
1398 	default:
1399 		return -EOPNOTSUPP;
1400 	}
1401 
1402 	return 0;
1403 }
1404 
1405 static const struct net_device_ops dpaa2_switch_port_ops = {
1406 	.ndo_open		= dpaa2_switch_port_open,
1407 	.ndo_stop		= dpaa2_switch_port_stop,
1408 
1409 	.ndo_set_mac_address	= eth_mac_addr,
1410 	.ndo_get_stats64	= dpaa2_switch_port_get_stats,
1411 	.ndo_change_mtu		= dpaa2_switch_port_change_mtu,
1412 	.ndo_has_offload_stats	= dpaa2_switch_port_has_offload_stats,
1413 	.ndo_get_offload_stats	= dpaa2_switch_port_get_offload_stats,
1414 	.ndo_fdb_dump		= dpaa2_switch_port_fdb_dump,
1415 	.ndo_vlan_rx_add_vid	= dpaa2_switch_port_vlan_add,
1416 	.ndo_vlan_rx_kill_vid	= dpaa2_switch_port_vlan_kill,
1417 
1418 	.ndo_start_xmit		= dpaa2_switch_port_tx,
1419 	.ndo_get_port_parent_id	= dpaa2_switch_port_parent_id,
1420 	.ndo_get_phys_port_name = dpaa2_switch_port_get_phys_name,
1421 	.ndo_setup_tc		= dpaa2_switch_port_setup_tc,
1422 };
1423 
1424 bool dpaa2_switch_port_dev_check(const struct net_device *netdev)
1425 {
1426 	return netdev->netdev_ops == &dpaa2_switch_port_ops;
1427 }
1428 
1429 static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
1430 {
1431 	struct fsl_mc_device *dpsw_port_dev, *dpmac_dev;
1432 	struct dpaa2_mac *mac;
1433 	int err;
1434 
1435 	dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent);
1436 	dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx);
1437 
1438 	if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
1439 		return PTR_ERR(dpmac_dev);
1440 
1441 	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
1442 		return 0;
1443 
1444 	mac = kzalloc(sizeof(*mac), GFP_KERNEL);
1445 	if (!mac)
1446 		return -ENOMEM;
1447 
1448 	mac->mc_dev = dpmac_dev;
1449 	mac->mc_io = port_priv->ethsw_data->mc_io;
1450 	mac->net_dev = port_priv->netdev;
1451 
1452 	err = dpaa2_mac_open(mac);
1453 	if (err)
1454 		goto err_free_mac;
1455 	port_priv->mac = mac;
1456 
1457 	if (dpaa2_switch_port_is_type_phy(port_priv)) {
1458 		err = dpaa2_mac_connect(mac);
1459 		if (err) {
1460 			netdev_err(port_priv->netdev,
1461 				   "Error connecting to the MAC endpoint %pe\n",
1462 				   ERR_PTR(err));
1463 			goto err_close_mac;
1464 		}
1465 	}
1466 
1467 	return 0;
1468 
1469 err_close_mac:
1470 	dpaa2_mac_close(mac);
1471 	port_priv->mac = NULL;
1472 err_free_mac:
1473 	kfree(mac);
1474 	return err;
1475 }
1476 
1477 static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
1478 {
1479 	if (dpaa2_switch_port_is_type_phy(port_priv))
1480 		dpaa2_mac_disconnect(port_priv->mac);
1481 
1482 	if (!dpaa2_switch_port_has_mac(port_priv))
1483 		return;
1484 
1485 	dpaa2_mac_close(port_priv->mac);
1486 	kfree(port_priv->mac);
1487 	port_priv->mac = NULL;
1488 }
1489 
1490 static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
1491 {
1492 	struct device *dev = (struct device *)arg;
1493 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1494 	struct ethsw_port_priv *port_priv;
1495 	u32 status = ~0;
1496 	int err, if_id;
1497 
1498 	err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1499 				  DPSW_IRQ_INDEX_IF, &status);
1500 	if (err) {
1501 		dev_err(dev, "Can't get irq status (err %d)\n", err);
1502 		goto out;
1503 	}
1504 
1505 	if_id = (status & 0xFFFF0000) >> 16;
1506 	port_priv = ethsw->ports[if_id];
1507 
1508 	if (status & DPSW_IRQ_EVENT_LINK_CHANGED) {
1509 		dpaa2_switch_port_link_state_update(port_priv->netdev);
1510 		dpaa2_switch_port_set_mac_addr(port_priv);
1511 	}
1512 
1513 	if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
1514 		rtnl_lock();
1515 		if (dpaa2_switch_port_has_mac(port_priv))
1516 			dpaa2_switch_port_disconnect_mac(port_priv);
1517 		else
1518 			dpaa2_switch_port_connect_mac(port_priv);
1519 		rtnl_unlock();
1520 	}
1521 
1522 out:
1523 	err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
1524 				    DPSW_IRQ_INDEX_IF, status);
1525 	if (err)
1526 		dev_err(dev, "Can't clear irq status (err %d)\n", err);
1527 
1528 	return IRQ_HANDLED;
1529 }
1530 
1531 static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
1532 {
1533 	struct device *dev = &sw_dev->dev;
1534 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1535 	u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
1536 	struct fsl_mc_device_irq *irq;
1537 	int err;
1538 
1539 	err = fsl_mc_allocate_irqs(sw_dev);
1540 	if (err) {
1541 		dev_err(dev, "MC irqs allocation failed\n");
1542 		return err;
1543 	}
1544 
1545 	if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
1546 		err = -EINVAL;
1547 		goto free_irq;
1548 	}
1549 
1550 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1551 				  DPSW_IRQ_INDEX_IF, 0);
1552 	if (err) {
1553 		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1554 		goto free_irq;
1555 	}
1556 
1557 	irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
1558 
1559 	err = devm_request_threaded_irq(dev, irq->virq, NULL,
1560 					dpaa2_switch_irq0_handler_thread,
1561 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
1562 					dev_name(dev), dev);
1563 	if (err) {
1564 		dev_err(dev, "devm_request_threaded_irq(): %d\n", err);
1565 		goto free_irq;
1566 	}
1567 
1568 	err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
1569 				DPSW_IRQ_INDEX_IF, mask);
1570 	if (err) {
1571 		dev_err(dev, "dpsw_set_irq_mask(): %d\n", err);
1572 		goto free_devm_irq;
1573 	}
1574 
1575 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1576 				  DPSW_IRQ_INDEX_IF, 1);
1577 	if (err) {
1578 		dev_err(dev, "dpsw_set_irq_enable(): %d\n", err);
1579 		goto free_devm_irq;
1580 	}
1581 
1582 	return 0;
1583 
1584 free_devm_irq:
1585 	devm_free_irq(dev, irq->virq, dev);
1586 free_irq:
1587 	fsl_mc_free_irqs(sw_dev);
1588 	return err;
1589 }
1590 
1591 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device *sw_dev)
1592 {
1593 	struct device *dev = &sw_dev->dev;
1594 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
1595 	int err;
1596 
1597 	err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
1598 				  DPSW_IRQ_INDEX_IF, 0);
1599 	if (err)
1600 		dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
1601 
1602 	fsl_mc_free_irqs(sw_dev);
1603 }
1604 
1605 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable)
1606 {
1607 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1608 	enum dpsw_learning_mode learn_mode;
1609 	int err;
1610 
1611 	if (enable)
1612 		learn_mode = DPSW_LEARNING_MODE_HW;
1613 	else
1614 		learn_mode = DPSW_LEARNING_MODE_DIS;
1615 
1616 	err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle,
1617 					port_priv->idx, learn_mode);
1618 	if (err)
1619 		netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d\n", err);
1620 
1621 	if (!enable)
1622 		dpaa2_switch_port_fast_age(port_priv);
1623 
1624 	return err;
1625 }
1626 
1627 static int dpaa2_switch_port_attr_stp_state_set(struct net_device *netdev,
1628 						u8 state)
1629 {
1630 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1631 	int err;
1632 
1633 	err = dpaa2_switch_port_set_stp_state(port_priv, state);
1634 	if (err)
1635 		return err;
1636 
1637 	switch (state) {
1638 	case BR_STATE_DISABLED:
1639 	case BR_STATE_BLOCKING:
1640 	case BR_STATE_LISTENING:
1641 		err = dpaa2_switch_port_set_learning(port_priv, false);
1642 		break;
1643 	case BR_STATE_LEARNING:
1644 	case BR_STATE_FORWARDING:
1645 		err = dpaa2_switch_port_set_learning(port_priv,
1646 						     port_priv->learn_ena);
1647 		break;
1648 	}
1649 
1650 	return err;
1651 }
1652 
1653 static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv,
1654 				   struct switchdev_brport_flags flags)
1655 {
1656 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1657 
1658 	if (flags.mask & BR_BCAST_FLOOD)
1659 		port_priv->bcast_flood = !!(flags.val & BR_BCAST_FLOOD);
1660 
1661 	if (flags.mask & BR_FLOOD)
1662 		port_priv->ucast_flood = !!(flags.val & BR_FLOOD);
1663 
1664 	return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
1665 }
1666 
1667 static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev,
1668 					      struct switchdev_brport_flags flags,
1669 					      struct netlink_ext_ack *extack)
1670 {
1671 	if (flags.mask & ~(BR_LEARNING | BR_BCAST_FLOOD | BR_FLOOD |
1672 			   BR_MCAST_FLOOD))
1673 		return -EINVAL;
1674 
1675 	if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD)) {
1676 		bool multicast = !!(flags.val & BR_MCAST_FLOOD);
1677 		bool unicast = !!(flags.val & BR_FLOOD);
1678 
1679 		if (unicast != multicast) {
1680 			NL_SET_ERR_MSG_MOD(extack,
1681 					   "Cannot configure multicast flooding independently of unicast");
1682 			return -EINVAL;
1683 		}
1684 	}
1685 
1686 	return 0;
1687 }
1688 
1689 static int dpaa2_switch_port_bridge_flags(struct net_device *netdev,
1690 					  struct switchdev_brport_flags flags,
1691 					  struct netlink_ext_ack *extack)
1692 {
1693 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1694 	int err;
1695 
1696 	if (flags.mask & BR_LEARNING) {
1697 		bool learn_ena = !!(flags.val & BR_LEARNING);
1698 
1699 		err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
1700 		if (err)
1701 			return err;
1702 		port_priv->learn_ena = learn_ena;
1703 	}
1704 
1705 	if (flags.mask & (BR_BCAST_FLOOD | BR_FLOOD | BR_MCAST_FLOOD)) {
1706 		err = dpaa2_switch_port_flood(port_priv, flags);
1707 		if (err)
1708 			return err;
1709 	}
1710 
1711 	return 0;
1712 }
1713 
1714 static int dpaa2_switch_port_attr_set(struct net_device *netdev, const void *ctx,
1715 				      const struct switchdev_attr *attr,
1716 				      struct netlink_ext_ack *extack)
1717 {
1718 	int err = 0;
1719 
1720 	switch (attr->id) {
1721 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1722 		err = dpaa2_switch_port_attr_stp_state_set(netdev,
1723 							   attr->u.stp_state);
1724 		break;
1725 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1726 		if (!attr->u.vlan_filtering) {
1727 			NL_SET_ERR_MSG_MOD(extack,
1728 					   "The DPAA2 switch does not support VLAN-unaware operation");
1729 			return -EOPNOTSUPP;
1730 		}
1731 		break;
1732 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1733 		err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack);
1734 		break;
1735 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1736 		err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack);
1737 		break;
1738 	default:
1739 		err = -EOPNOTSUPP;
1740 		break;
1741 	}
1742 
1743 	return err;
1744 }
1745 
1746 int dpaa2_switch_port_vlans_add(struct net_device *netdev,
1747 				const struct switchdev_obj_port_vlan *vlan)
1748 {
1749 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1750 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1751 	struct dpsw_attr *attr = &ethsw->sw_attr;
1752 	int err = 0;
1753 
1754 	/* Make sure that the VLAN is not already configured
1755 	 * on the switch port
1756 	 */
1757 	if (port_priv->vlans[vlan->vid] & ETHSW_VLAN_MEMBER)
1758 		return -EEXIST;
1759 
1760 	/* Check if there is space for a new VLAN */
1761 	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1762 				  &ethsw->sw_attr);
1763 	if (err) {
1764 		netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1765 		return err;
1766 	}
1767 	if (attr->max_vlans - attr->num_vlans < 1)
1768 		return -ENOSPC;
1769 
1770 	/* Check if there is space for a new VLAN */
1771 	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
1772 				  &ethsw->sw_attr);
1773 	if (err) {
1774 		netdev_err(netdev, "dpsw_get_attributes err %d\n", err);
1775 		return err;
1776 	}
1777 	if (attr->max_vlans - attr->num_vlans < 1)
1778 		return -ENOSPC;
1779 
1780 	if (!port_priv->ethsw_data->vlans[vlan->vid]) {
1781 		/* this is a new VLAN */
1782 		err = dpaa2_switch_add_vlan(port_priv, vlan->vid);
1783 		if (err)
1784 			return err;
1785 
1786 		port_priv->ethsw_data->vlans[vlan->vid] |= ETHSW_VLAN_GLOBAL;
1787 	}
1788 
1789 	return dpaa2_switch_port_add_vlan(port_priv, vlan->vid, vlan->flags);
1790 }
1791 
1792 static int dpaa2_switch_port_lookup_address(struct net_device *netdev, int is_uc,
1793 					    const unsigned char *addr)
1794 {
1795 	struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
1796 	struct netdev_hw_addr *ha;
1797 
1798 	netif_addr_lock_bh(netdev);
1799 	list_for_each_entry(ha, &list->list, list) {
1800 		if (ether_addr_equal(ha->addr, addr)) {
1801 			netif_addr_unlock_bh(netdev);
1802 			return 1;
1803 		}
1804 	}
1805 	netif_addr_unlock_bh(netdev);
1806 	return 0;
1807 }
1808 
1809 static int dpaa2_switch_port_mdb_add(struct net_device *netdev,
1810 				     const struct switchdev_obj_port_mdb *mdb)
1811 {
1812 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1813 	int err;
1814 
1815 	/* Check if address is already set on this port */
1816 	if (dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1817 		return -EEXIST;
1818 
1819 	err = dpaa2_switch_port_fdb_add_mc(port_priv, mdb->addr);
1820 	if (err)
1821 		return err;
1822 
1823 	err = dev_mc_add(netdev, mdb->addr);
1824 	if (err) {
1825 		netdev_err(netdev, "dev_mc_add err %d\n", err);
1826 		dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1827 	}
1828 
1829 	return err;
1830 }
1831 
1832 static int dpaa2_switch_port_obj_add(struct net_device *netdev,
1833 				     const struct switchdev_obj *obj)
1834 {
1835 	int err;
1836 
1837 	switch (obj->id) {
1838 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1839 		err = dpaa2_switch_port_vlans_add(netdev,
1840 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
1841 		break;
1842 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1843 		err = dpaa2_switch_port_mdb_add(netdev,
1844 						SWITCHDEV_OBJ_PORT_MDB(obj));
1845 		break;
1846 	default:
1847 		err = -EOPNOTSUPP;
1848 		break;
1849 	}
1850 
1851 	return err;
1852 }
1853 
1854 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
1855 {
1856 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1857 	struct net_device *netdev = port_priv->netdev;
1858 	struct dpsw_vlan_if_cfg vcfg;
1859 	int i, err;
1860 
1861 	if (!port_priv->vlans[vid])
1862 		return -ENOENT;
1863 
1864 	if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
1865 		/* If we are deleting the PVID of a port, use VLAN 4095 instead
1866 		 * as we are sure that neither the bridge nor the 8021q module
1867 		 * will use it
1868 		 */
1869 		err = dpaa2_switch_port_set_pvid(port_priv, 4095);
1870 		if (err)
1871 			return err;
1872 	}
1873 
1874 	vcfg.num_ifs = 1;
1875 	vcfg.if_id[0] = port_priv->idx;
1876 	if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
1877 		err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
1878 						   ethsw->dpsw_handle,
1879 						   vid, &vcfg);
1880 		if (err) {
1881 			netdev_err(netdev,
1882 				   "dpsw_vlan_remove_if_untagged err %d\n",
1883 				   err);
1884 		}
1885 		port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
1886 	}
1887 
1888 	if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
1889 		err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
1890 					  vid, &vcfg);
1891 		if (err) {
1892 			netdev_err(netdev,
1893 				   "dpsw_vlan_remove_if err %d\n", err);
1894 			return err;
1895 		}
1896 		port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
1897 
1898 		/* Delete VLAN from switch if it is no longer configured on
1899 		 * any port
1900 		 */
1901 		for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
1902 			if (ethsw->ports[i] &&
1903 			    ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
1904 				return 0; /* Found a port member in VID */
1905 		}
1906 
1907 		ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
1908 
1909 		err = dpaa2_switch_dellink(ethsw, vid);
1910 		if (err)
1911 			return err;
1912 	}
1913 
1914 	return 0;
1915 }
1916 
1917 int dpaa2_switch_port_vlans_del(struct net_device *netdev,
1918 				const struct switchdev_obj_port_vlan *vlan)
1919 {
1920 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1921 
1922 	if (netif_is_bridge_master(vlan->obj.orig_dev))
1923 		return -EOPNOTSUPP;
1924 
1925 	return dpaa2_switch_port_del_vlan(port_priv, vlan->vid);
1926 }
1927 
1928 static int dpaa2_switch_port_mdb_del(struct net_device *netdev,
1929 				     const struct switchdev_obj_port_mdb *mdb)
1930 {
1931 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1932 	int err;
1933 
1934 	if (!dpaa2_switch_port_lookup_address(netdev, 0, mdb->addr))
1935 		return -ENOENT;
1936 
1937 	err = dpaa2_switch_port_fdb_del_mc(port_priv, mdb->addr);
1938 	if (err)
1939 		return err;
1940 
1941 	err = dev_mc_del(netdev, mdb->addr);
1942 	if (err) {
1943 		netdev_err(netdev, "dev_mc_del err %d\n", err);
1944 		return err;
1945 	}
1946 
1947 	return err;
1948 }
1949 
1950 static int dpaa2_switch_port_obj_del(struct net_device *netdev,
1951 				     const struct switchdev_obj *obj)
1952 {
1953 	int err;
1954 
1955 	switch (obj->id) {
1956 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1957 		err = dpaa2_switch_port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
1958 		break;
1959 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1960 		err = dpaa2_switch_port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
1961 		break;
1962 	default:
1963 		err = -EOPNOTSUPP;
1964 		break;
1965 	}
1966 	return err;
1967 }
1968 
1969 static int dpaa2_switch_port_attr_set_event(struct net_device *netdev,
1970 					    struct switchdev_notifier_port_attr_info *ptr)
1971 {
1972 	int err;
1973 
1974 	err = switchdev_handle_port_attr_set(netdev, ptr,
1975 					     dpaa2_switch_port_dev_check,
1976 					     dpaa2_switch_port_attr_set);
1977 	return notifier_from_errno(err);
1978 }
1979 
1980 static struct notifier_block dpaa2_switch_port_switchdev_nb;
1981 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb;
1982 
1983 static int dpaa2_switch_port_bridge_join(struct net_device *netdev,
1984 					 struct net_device *upper_dev,
1985 					 struct netlink_ext_ack *extack)
1986 {
1987 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
1988 	struct ethsw_core *ethsw = port_priv->ethsw_data;
1989 	struct ethsw_port_priv *other_port_priv;
1990 	struct net_device *other_dev;
1991 	struct list_head *iter;
1992 	bool learn_ena;
1993 	int err;
1994 
1995 	netdev_for_each_lower_dev(upper_dev, other_dev, iter) {
1996 		if (!dpaa2_switch_port_dev_check(other_dev))
1997 			continue;
1998 
1999 		other_port_priv = netdev_priv(other_dev);
2000 		if (other_port_priv->ethsw_data != port_priv->ethsw_data) {
2001 			NL_SET_ERR_MSG_MOD(extack,
2002 					   "Interface from a different DPSW is in the bridge already");
2003 			return -EINVAL;
2004 		}
2005 	}
2006 
2007 	/* Delete the previously manually installed VLAN 1 */
2008 	err = dpaa2_switch_port_del_vlan(port_priv, 1);
2009 	if (err)
2010 		return err;
2011 
2012 	dpaa2_switch_port_set_fdb(port_priv, upper_dev);
2013 
2014 	/* Inherit the initial bridge port learning state */
2015 	learn_ena = br_port_flag_is_set(netdev, BR_LEARNING);
2016 	err = dpaa2_switch_port_set_learning(port_priv, learn_ena);
2017 	port_priv->learn_ena = learn_ena;
2018 
2019 	/* Setup the egress flood policy (broadcast, unknown unicast) */
2020 	err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2021 	if (err)
2022 		goto err_egress_flood;
2023 
2024 	err = switchdev_bridge_port_offload(netdev, netdev, NULL,
2025 					    &dpaa2_switch_port_switchdev_nb,
2026 					    &dpaa2_switch_port_switchdev_blocking_nb,
2027 					    false, extack);
2028 	if (err)
2029 		goto err_switchdev_offload;
2030 
2031 	return 0;
2032 
2033 err_switchdev_offload:
2034 err_egress_flood:
2035 	dpaa2_switch_port_set_fdb(port_priv, NULL);
2036 	return err;
2037 }
2038 
2039 static int dpaa2_switch_port_clear_rxvlan(struct net_device *vdev, int vid, void *arg)
2040 {
2041 	__be16 vlan_proto = htons(ETH_P_8021Q);
2042 
2043 	if (vdev)
2044 		vlan_proto = vlan_dev_vlan_proto(vdev);
2045 
2046 	return dpaa2_switch_port_vlan_kill(arg, vlan_proto, vid);
2047 }
2048 
2049 static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, void *arg)
2050 {
2051 	__be16 vlan_proto = htons(ETH_P_8021Q);
2052 
2053 	if (vdev)
2054 		vlan_proto = vlan_dev_vlan_proto(vdev);
2055 
2056 	return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid);
2057 }
2058 
2059 static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev)
2060 {
2061 	switchdev_bridge_port_unoffload(netdev, NULL,
2062 					&dpaa2_switch_port_switchdev_nb,
2063 					&dpaa2_switch_port_switchdev_blocking_nb);
2064 }
2065 
2066 static int dpaa2_switch_port_bridge_leave(struct net_device *netdev)
2067 {
2068 	struct ethsw_port_priv *port_priv = netdev_priv(netdev);
2069 	struct dpaa2_switch_fdb *old_fdb = port_priv->fdb;
2070 	struct ethsw_core *ethsw = port_priv->ethsw_data;
2071 	int err;
2072 
2073 	/* First of all, fast age any learn FDB addresses on this switch port */
2074 	dpaa2_switch_port_fast_age(port_priv);
2075 
2076 	/* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
2077 	 * upper devices or otherwise from the FDB table that we are about to
2078 	 * leave
2079 	 */
2080 	err = vlan_for_each(netdev, dpaa2_switch_port_clear_rxvlan, netdev);
2081 	if (err)
2082 		netdev_err(netdev, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err);
2083 
2084 	dpaa2_switch_port_set_fdb(port_priv, NULL);
2085 
2086 	/* Restore all RX VLANs into the new FDB table that we just joined */
2087 	err = vlan_for_each(netdev, dpaa2_switch_port_restore_rxvlan, netdev);
2088 	if (err)
2089 		netdev_err(netdev, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err);
2090 
2091 	/* Reset the flooding state to denote that this port can send any
2092 	 * packet in standalone mode. With this, we are also ensuring that any
2093 	 * later bridge join will have the flooding flag on.
2094 	 */
2095 	port_priv->bcast_flood = true;
2096 	port_priv->ucast_flood = true;
2097 
2098 	/* Setup the egress flood policy (broadcast, unknown unicast).
2099 	 * When the port is not under a bridge, only the CTRL interface is part
2100 	 * of the flooding domain besides the actual port
2101 	 */
2102 	err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
2103 	if (err)
2104 		return err;
2105 
2106 	/* Recreate the egress flood domain of the FDB that we just left */
2107 	err = dpaa2_switch_fdb_set_egress_flood(ethsw, old_fdb->fdb_id);
2108 	if (err)
2109 		return err;
2110 
2111 	/* No HW learning when not under a bridge */
2112 	err = dpaa2_switch_port_set_learning(port_priv, false);
2113 	if (err)
2114 		return err;
2115 	port_priv->learn_ena = false;
2116 
2117 	/* Add the VLAN 1 as PVID when not under a bridge. We need this since
2118 	 * the dpaa2 switch interfaces are not capable to be VLAN unaware
2119 	 */
2120 	return dpaa2_switch_port_add_vlan(port_priv, DEFAULT_VLAN_ID,
2121 					  BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID);
2122 }
2123 
2124 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *netdev)
2125 {
2126 	struct net_device *upper_dev;
2127 	struct list_head *iter;
2128 
2129 	/* RCU read lock not necessary because we have write-side protection
2130 	 * (rtnl_mutex), however a non-rcu iterator does not exist.
2131 	 */
2132 	netdev_for_each_upper_dev_rcu(netdev, upper_dev, iter)
2133 		if (is_vlan_dev(upper_dev))
2134 			return -EOPNOTSUPP;
2135 
2136 	return 0;
2137 }
2138 
2139 static int
2140 dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev,
2141 					  struct net_device *upper_dev,
2142 					  struct netlink_ext_ack *extack)
2143 {
2144 	int err;
2145 
2146 	if (!br_vlan_enabled(upper_dev)) {
2147 		NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge");
2148 		return -EOPNOTSUPP;
2149 	}
2150 
2151 	err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev);
2152 	if (err) {
2153 		NL_SET_ERR_MSG_MOD(extack,
2154 				   "Cannot join a bridge while VLAN uppers are present");
2155 		return 0;
2156 	}
2157 
2158 	return 0;
2159 }
2160 
2161 static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb,
2162 					     unsigned long event, void *ptr)
2163 {
2164 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
2165 	struct netdev_notifier_changeupper_info *info = ptr;
2166 	struct netlink_ext_ack *extack;
2167 	struct net_device *upper_dev;
2168 	int err = 0;
2169 
2170 	if (!dpaa2_switch_port_dev_check(netdev))
2171 		return NOTIFY_DONE;
2172 
2173 	extack = netdev_notifier_info_to_extack(&info->info);
2174 
2175 	switch (event) {
2176 	case NETDEV_PRECHANGEUPPER:
2177 		upper_dev = info->upper_dev;
2178 		if (!netif_is_bridge_master(upper_dev))
2179 			break;
2180 
2181 		err = dpaa2_switch_prechangeupper_sanity_checks(netdev,
2182 								upper_dev,
2183 								extack);
2184 		if (err)
2185 			goto out;
2186 
2187 		if (!info->linking)
2188 			dpaa2_switch_port_pre_bridge_leave(netdev);
2189 
2190 		break;
2191 	case NETDEV_CHANGEUPPER:
2192 		upper_dev = info->upper_dev;
2193 		if (netif_is_bridge_master(upper_dev)) {
2194 			if (info->linking)
2195 				err = dpaa2_switch_port_bridge_join(netdev,
2196 								    upper_dev,
2197 								    extack);
2198 			else
2199 				err = dpaa2_switch_port_bridge_leave(netdev);
2200 		}
2201 		break;
2202 	}
2203 
2204 out:
2205 	return notifier_from_errno(err);
2206 }
2207 
2208 struct ethsw_switchdev_event_work {
2209 	struct work_struct work;
2210 	struct switchdev_notifier_fdb_info fdb_info;
2211 	struct net_device *dev;
2212 	unsigned long event;
2213 };
2214 
2215 static void dpaa2_switch_event_work(struct work_struct *work)
2216 {
2217 	struct ethsw_switchdev_event_work *switchdev_work =
2218 		container_of(work, struct ethsw_switchdev_event_work, work);
2219 	struct net_device *dev = switchdev_work->dev;
2220 	struct switchdev_notifier_fdb_info *fdb_info;
2221 	int err;
2222 
2223 	rtnl_lock();
2224 	fdb_info = &switchdev_work->fdb_info;
2225 
2226 	switch (switchdev_work->event) {
2227 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2228 		if (!fdb_info->added_by_user || fdb_info->is_local)
2229 			break;
2230 		if (is_unicast_ether_addr(fdb_info->addr))
2231 			err = dpaa2_switch_port_fdb_add_uc(netdev_priv(dev),
2232 							   fdb_info->addr);
2233 		else
2234 			err = dpaa2_switch_port_fdb_add_mc(netdev_priv(dev),
2235 							   fdb_info->addr);
2236 		if (err)
2237 			break;
2238 		fdb_info->offloaded = true;
2239 		call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
2240 					 &fdb_info->info, NULL);
2241 		break;
2242 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2243 		if (!fdb_info->added_by_user || fdb_info->is_local)
2244 			break;
2245 		if (is_unicast_ether_addr(fdb_info->addr))
2246 			dpaa2_switch_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
2247 		else
2248 			dpaa2_switch_port_fdb_del_mc(netdev_priv(dev), fdb_info->addr);
2249 		break;
2250 	}
2251 
2252 	rtnl_unlock();
2253 	kfree(switchdev_work->fdb_info.addr);
2254 	kfree(switchdev_work);
2255 	dev_put(dev);
2256 }
2257 
2258 /* Called under rcu_read_lock() */
2259 static int dpaa2_switch_port_event(struct notifier_block *nb,
2260 				   unsigned long event, void *ptr)
2261 {
2262 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2263 	struct ethsw_port_priv *port_priv = netdev_priv(dev);
2264 	struct ethsw_switchdev_event_work *switchdev_work;
2265 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
2266 	struct ethsw_core *ethsw = port_priv->ethsw_data;
2267 
2268 	if (event == SWITCHDEV_PORT_ATTR_SET)
2269 		return dpaa2_switch_port_attr_set_event(dev, ptr);
2270 
2271 	if (!dpaa2_switch_port_dev_check(dev))
2272 		return NOTIFY_DONE;
2273 
2274 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2275 	if (!switchdev_work)
2276 		return NOTIFY_BAD;
2277 
2278 	INIT_WORK(&switchdev_work->work, dpaa2_switch_event_work);
2279 	switchdev_work->dev = dev;
2280 	switchdev_work->event = event;
2281 
2282 	switch (event) {
2283 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2284 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2285 		memcpy(&switchdev_work->fdb_info, ptr,
2286 		       sizeof(switchdev_work->fdb_info));
2287 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2288 		if (!switchdev_work->fdb_info.addr)
2289 			goto err_addr_alloc;
2290 
2291 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2292 				fdb_info->addr);
2293 
2294 		/* Take a reference on the device to avoid being freed. */
2295 		dev_hold(dev);
2296 		break;
2297 	default:
2298 		kfree(switchdev_work);
2299 		return NOTIFY_DONE;
2300 	}
2301 
2302 	queue_work(ethsw->workqueue, &switchdev_work->work);
2303 
2304 	return NOTIFY_DONE;
2305 
2306 err_addr_alloc:
2307 	kfree(switchdev_work);
2308 	return NOTIFY_BAD;
2309 }
2310 
2311 static int dpaa2_switch_port_obj_event(unsigned long event,
2312 				       struct net_device *netdev,
2313 				       struct switchdev_notifier_port_obj_info *port_obj_info)
2314 {
2315 	int err = -EOPNOTSUPP;
2316 
2317 	if (!dpaa2_switch_port_dev_check(netdev))
2318 		return NOTIFY_DONE;
2319 
2320 	switch (event) {
2321 	case SWITCHDEV_PORT_OBJ_ADD:
2322 		err = dpaa2_switch_port_obj_add(netdev, port_obj_info->obj);
2323 		break;
2324 	case SWITCHDEV_PORT_OBJ_DEL:
2325 		err = dpaa2_switch_port_obj_del(netdev, port_obj_info->obj);
2326 		break;
2327 	}
2328 
2329 	port_obj_info->handled = true;
2330 	return notifier_from_errno(err);
2331 }
2332 
2333 static int dpaa2_switch_port_blocking_event(struct notifier_block *nb,
2334 					    unsigned long event, void *ptr)
2335 {
2336 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2337 
2338 	switch (event) {
2339 	case SWITCHDEV_PORT_OBJ_ADD:
2340 	case SWITCHDEV_PORT_OBJ_DEL:
2341 		return dpaa2_switch_port_obj_event(event, dev, ptr);
2342 	case SWITCHDEV_PORT_ATTR_SET:
2343 		return dpaa2_switch_port_attr_set_event(dev, ptr);
2344 	}
2345 
2346 	return NOTIFY_DONE;
2347 }
2348 
2349 /* Build a linear skb based on a single-buffer frame descriptor */
2350 static struct sk_buff *dpaa2_switch_build_linear_skb(struct ethsw_core *ethsw,
2351 						     const struct dpaa2_fd *fd)
2352 {
2353 	u16 fd_offset = dpaa2_fd_get_offset(fd);
2354 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
2355 	u32 fd_length = dpaa2_fd_get_len(fd);
2356 	struct device *dev = ethsw->dev;
2357 	struct sk_buff *skb = NULL;
2358 	void *fd_vaddr;
2359 
2360 	fd_vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, addr);
2361 	dma_unmap_page(dev, addr, DPAA2_SWITCH_RX_BUF_SIZE,
2362 		       DMA_FROM_DEVICE);
2363 
2364 	skb = build_skb(fd_vaddr, DPAA2_SWITCH_RX_BUF_SIZE +
2365 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2366 	if (unlikely(!skb)) {
2367 		dev_err(dev, "build_skb() failed\n");
2368 		return NULL;
2369 	}
2370 
2371 	skb_reserve(skb, fd_offset);
2372 	skb_put(skb, fd_length);
2373 
2374 	ethsw->buf_count--;
2375 
2376 	return skb;
2377 }
2378 
2379 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq,
2380 				 const struct dpaa2_fd *fd)
2381 {
2382 	dpaa2_switch_free_fd(fq->ethsw, fd);
2383 }
2384 
2385 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq,
2386 			    const struct dpaa2_fd *fd)
2387 {
2388 	struct ethsw_core *ethsw = fq->ethsw;
2389 	struct ethsw_port_priv *port_priv;
2390 	struct net_device *netdev;
2391 	struct vlan_ethhdr *hdr;
2392 	struct sk_buff *skb;
2393 	u16 vlan_tci, vid;
2394 	int if_id, err;
2395 
2396 	/* get switch ingress interface ID */
2397 	if_id = upper_32_bits(dpaa2_fd_get_flc(fd)) & 0x0000FFFF;
2398 
2399 	if (if_id >= ethsw->sw_attr.num_ifs) {
2400 		dev_err(ethsw->dev, "Frame received from unknown interface!\n");
2401 		goto err_free_fd;
2402 	}
2403 	port_priv = ethsw->ports[if_id];
2404 	netdev = port_priv->netdev;
2405 
2406 	/* build the SKB based on the FD received */
2407 	if (dpaa2_fd_get_format(fd) != dpaa2_fd_single) {
2408 		if (net_ratelimit()) {
2409 			netdev_err(netdev, "Received invalid frame format\n");
2410 			goto err_free_fd;
2411 		}
2412 	}
2413 
2414 	skb = dpaa2_switch_build_linear_skb(ethsw, fd);
2415 	if (unlikely(!skb))
2416 		goto err_free_fd;
2417 
2418 	skb_reset_mac_header(skb);
2419 
2420 	/* Remove the VLAN header if the packet that we just received has a vid
2421 	 * equal to the port PVIDs. Since the dpaa2-switch can operate only in
2422 	 * VLAN-aware mode and no alterations are made on the packet when it's
2423 	 * redirected/mirrored to the control interface, we are sure that there
2424 	 * will always be a VLAN header present.
2425 	 */
2426 	hdr = vlan_eth_hdr(skb);
2427 	vid = ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK;
2428 	if (vid == port_priv->pvid) {
2429 		err = __skb_vlan_pop(skb, &vlan_tci);
2430 		if (err) {
2431 			dev_info(ethsw->dev, "__skb_vlan_pop() returned %d", err);
2432 			goto err_free_fd;
2433 		}
2434 	}
2435 
2436 	skb->dev = netdev;
2437 	skb->protocol = eth_type_trans(skb, skb->dev);
2438 
2439 	/* Setup the offload_fwd_mark only if the port is under a bridge */
2440 	skb->offload_fwd_mark = !!(port_priv->fdb->bridge_dev);
2441 
2442 	netif_receive_skb(skb);
2443 
2444 	return;
2445 
2446 err_free_fd:
2447 	dpaa2_switch_free_fd(ethsw, fd);
2448 }
2449 
2450 static void dpaa2_switch_detect_features(struct ethsw_core *ethsw)
2451 {
2452 	ethsw->features = 0;
2453 
2454 	if (ethsw->major > 8 || (ethsw->major == 8 && ethsw->minor >= 6))
2455 		ethsw->features |= ETHSW_FEATURE_MAC_ADDR;
2456 }
2457 
2458 static int dpaa2_switch_setup_fqs(struct ethsw_core *ethsw)
2459 {
2460 	struct dpsw_ctrl_if_attr ctrl_if_attr;
2461 	struct device *dev = ethsw->dev;
2462 	int i = 0;
2463 	int err;
2464 
2465 	err = dpsw_ctrl_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2466 					  &ctrl_if_attr);
2467 	if (err) {
2468 		dev_err(dev, "dpsw_ctrl_if_get_attributes() = %d\n", err);
2469 		return err;
2470 	}
2471 
2472 	ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid;
2473 	ethsw->fq[i].ethsw = ethsw;
2474 	ethsw->fq[i++].type = DPSW_QUEUE_RX;
2475 
2476 	ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid;
2477 	ethsw->fq[i].ethsw = ethsw;
2478 	ethsw->fq[i++].type = DPSW_QUEUE_TX_ERR_CONF;
2479 
2480 	return 0;
2481 }
2482 
2483 /* Free buffers acquired from the buffer pool or which were meant to
2484  * be released in the pool
2485  */
2486 static void dpaa2_switch_free_bufs(struct ethsw_core *ethsw, u64 *buf_array, int count)
2487 {
2488 	struct device *dev = ethsw->dev;
2489 	void *vaddr;
2490 	int i;
2491 
2492 	for (i = 0; i < count; i++) {
2493 		vaddr = dpaa2_iova_to_virt(ethsw->iommu_domain, buf_array[i]);
2494 		dma_unmap_page(dev, buf_array[i], DPAA2_SWITCH_RX_BUF_SIZE,
2495 			       DMA_FROM_DEVICE);
2496 		free_pages((unsigned long)vaddr, 0);
2497 	}
2498 }
2499 
2500 /* Perform a single release command to add buffers
2501  * to the specified buffer pool
2502  */
2503 static int dpaa2_switch_add_bufs(struct ethsw_core *ethsw, u16 bpid)
2504 {
2505 	struct device *dev = ethsw->dev;
2506 	u64 buf_array[BUFS_PER_CMD];
2507 	struct page *page;
2508 	int retries = 0;
2509 	dma_addr_t addr;
2510 	int err;
2511 	int i;
2512 
2513 	for (i = 0; i < BUFS_PER_CMD; i++) {
2514 		/* Allocate one page for each Rx buffer. WRIOP sees
2515 		 * the entire page except for a tailroom reserved for
2516 		 * skb shared info
2517 		 */
2518 		page = dev_alloc_pages(0);
2519 		if (!page) {
2520 			dev_err(dev, "buffer allocation failed\n");
2521 			goto err_alloc;
2522 		}
2523 
2524 		addr = dma_map_page(dev, page, 0, DPAA2_SWITCH_RX_BUF_SIZE,
2525 				    DMA_FROM_DEVICE);
2526 		if (dma_mapping_error(dev, addr)) {
2527 			dev_err(dev, "dma_map_single() failed\n");
2528 			goto err_map;
2529 		}
2530 		buf_array[i] = addr;
2531 	}
2532 
2533 release_bufs:
2534 	/* In case the portal is busy, retry until successful or
2535 	 * max retries hit.
2536 	 */
2537 	while ((err = dpaa2_io_service_release(NULL, bpid,
2538 					       buf_array, i)) == -EBUSY) {
2539 		if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES)
2540 			break;
2541 
2542 		cpu_relax();
2543 	}
2544 
2545 	/* If release command failed, clean up and bail out. */
2546 	if (err) {
2547 		dpaa2_switch_free_bufs(ethsw, buf_array, i);
2548 		return 0;
2549 	}
2550 
2551 	return i;
2552 
2553 err_map:
2554 	__free_pages(page, 0);
2555 err_alloc:
2556 	/* If we managed to allocate at least some buffers,
2557 	 * release them to hardware
2558 	 */
2559 	if (i)
2560 		goto release_bufs;
2561 
2562 	return 0;
2563 }
2564 
2565 static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
2566 {
2567 	int *count = &ethsw->buf_count;
2568 	int new_count;
2569 	int err = 0;
2570 
2571 	if (unlikely(*count < DPAA2_ETHSW_REFILL_THRESH)) {
2572 		do {
2573 			new_count = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2574 			if (unlikely(!new_count)) {
2575 				/* Out of memory; abort for now, we'll
2576 				 * try later on
2577 				 */
2578 				break;
2579 			}
2580 			*count += new_count;
2581 		} while (*count < DPAA2_ETHSW_NUM_BUFS);
2582 
2583 		if (unlikely(*count < DPAA2_ETHSW_NUM_BUFS))
2584 			err = -ENOMEM;
2585 	}
2586 
2587 	return err;
2588 }
2589 
2590 static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
2591 {
2592 	int *count, i;
2593 
2594 	for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
2595 		count = &ethsw->buf_count;
2596 		*count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
2597 
2598 		if (unlikely(*count < BUFS_PER_CMD))
2599 			return -ENOMEM;
2600 	}
2601 
2602 	return 0;
2603 }
2604 
2605 static void dpaa2_switch_drain_bp(struct ethsw_core *ethsw)
2606 {
2607 	u64 buf_array[BUFS_PER_CMD];
2608 	int ret;
2609 
2610 	do {
2611 		ret = dpaa2_io_service_acquire(NULL, ethsw->bpid,
2612 					       buf_array, BUFS_PER_CMD);
2613 		if (ret < 0) {
2614 			dev_err(ethsw->dev,
2615 				"dpaa2_io_service_acquire() = %d\n", ret);
2616 			return;
2617 		}
2618 		dpaa2_switch_free_bufs(ethsw, buf_array, ret);
2619 
2620 	} while (ret);
2621 }
2622 
2623 static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
2624 {
2625 	struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg = { 0 };
2626 	struct device *dev = ethsw->dev;
2627 	struct fsl_mc_device *dpbp_dev;
2628 	struct dpbp_attr dpbp_attrs;
2629 	int err;
2630 
2631 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2632 				     &dpbp_dev);
2633 	if (err) {
2634 		if (err == -ENXIO)
2635 			err = -EPROBE_DEFER;
2636 		else
2637 			dev_err(dev, "DPBP device allocation failed\n");
2638 		return err;
2639 	}
2640 	ethsw->dpbp_dev = dpbp_dev;
2641 
2642 	err = dpbp_open(ethsw->mc_io, 0, dpbp_dev->obj_desc.id,
2643 			&dpbp_dev->mc_handle);
2644 	if (err) {
2645 		dev_err(dev, "dpbp_open() failed\n");
2646 		goto err_open;
2647 	}
2648 
2649 	err = dpbp_reset(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2650 	if (err) {
2651 		dev_err(dev, "dpbp_reset() failed\n");
2652 		goto err_reset;
2653 	}
2654 
2655 	err = dpbp_enable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2656 	if (err) {
2657 		dev_err(dev, "dpbp_enable() failed\n");
2658 		goto err_enable;
2659 	}
2660 
2661 	err = dpbp_get_attributes(ethsw->mc_io, 0, dpbp_dev->mc_handle,
2662 				  &dpbp_attrs);
2663 	if (err) {
2664 		dev_err(dev, "dpbp_get_attributes() failed\n");
2665 		goto err_get_attr;
2666 	}
2667 
2668 	dpsw_ctrl_if_pools_cfg.num_dpbp = 1;
2669 	dpsw_ctrl_if_pools_cfg.pools[0].dpbp_id = dpbp_attrs.id;
2670 	dpsw_ctrl_if_pools_cfg.pools[0].buffer_size = DPAA2_SWITCH_RX_BUF_SIZE;
2671 	dpsw_ctrl_if_pools_cfg.pools[0].backup_pool = 0;
2672 
2673 	err = dpsw_ctrl_if_set_pools(ethsw->mc_io, 0, ethsw->dpsw_handle,
2674 				     &dpsw_ctrl_if_pools_cfg);
2675 	if (err) {
2676 		dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
2677 		goto err_get_attr;
2678 	}
2679 	ethsw->bpid = dpbp_attrs.id;
2680 
2681 	return 0;
2682 
2683 err_get_attr:
2684 	dpbp_disable(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2685 err_enable:
2686 err_reset:
2687 	dpbp_close(ethsw->mc_io, 0, dpbp_dev->mc_handle);
2688 err_open:
2689 	fsl_mc_object_free(dpbp_dev);
2690 	return err;
2691 }
2692 
2693 static void dpaa2_switch_free_dpbp(struct ethsw_core *ethsw)
2694 {
2695 	dpbp_disable(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2696 	dpbp_close(ethsw->mc_io, 0, ethsw->dpbp_dev->mc_handle);
2697 	fsl_mc_object_free(ethsw->dpbp_dev);
2698 }
2699 
2700 static int dpaa2_switch_alloc_rings(struct ethsw_core *ethsw)
2701 {
2702 	int i;
2703 
2704 	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2705 		ethsw->fq[i].store =
2706 			dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE,
2707 					      ethsw->dev);
2708 		if (!ethsw->fq[i].store) {
2709 			dev_err(ethsw->dev, "dpaa2_io_store_create failed\n");
2710 			while (--i >= 0)
2711 				dpaa2_io_store_destroy(ethsw->fq[i].store);
2712 			return -ENOMEM;
2713 		}
2714 	}
2715 
2716 	return 0;
2717 }
2718 
2719 static void dpaa2_switch_destroy_rings(struct ethsw_core *ethsw)
2720 {
2721 	int i;
2722 
2723 	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2724 		dpaa2_io_store_destroy(ethsw->fq[i].store);
2725 }
2726 
2727 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq *fq)
2728 {
2729 	int err, retries = 0;
2730 
2731 	/* Try to pull from the FQ while the portal is busy and we didn't hit
2732 	 * the maximum number fo retries
2733 	 */
2734 	do {
2735 		err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store);
2736 		cpu_relax();
2737 	} while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2738 
2739 	if (unlikely(err))
2740 		dev_err(fq->ethsw->dev, "dpaa2_io_service_pull err %d", err);
2741 
2742 	return err;
2743 }
2744 
2745 /* Consume all frames pull-dequeued into the store */
2746 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq *fq)
2747 {
2748 	struct ethsw_core *ethsw = fq->ethsw;
2749 	int cleaned = 0, is_last;
2750 	struct dpaa2_dq *dq;
2751 	int retries = 0;
2752 
2753 	do {
2754 		/* Get the next available FD from the store */
2755 		dq = dpaa2_io_store_next(fq->store, &is_last);
2756 		if (unlikely(!dq)) {
2757 			if (retries++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES) {
2758 				dev_err_once(ethsw->dev,
2759 					     "No valid dequeue response\n");
2760 				return -ETIMEDOUT;
2761 			}
2762 			continue;
2763 		}
2764 
2765 		if (fq->type == DPSW_QUEUE_RX)
2766 			dpaa2_switch_rx(fq, dpaa2_dq_fd(dq));
2767 		else
2768 			dpaa2_switch_tx_conf(fq, dpaa2_dq_fd(dq));
2769 		cleaned++;
2770 
2771 	} while (!is_last);
2772 
2773 	return cleaned;
2774 }
2775 
2776 /* NAPI poll routine */
2777 static int dpaa2_switch_poll(struct napi_struct *napi, int budget)
2778 {
2779 	int err, cleaned = 0, store_cleaned, work_done;
2780 	struct dpaa2_switch_fq *fq;
2781 	int retries = 0;
2782 
2783 	fq = container_of(napi, struct dpaa2_switch_fq, napi);
2784 
2785 	do {
2786 		err = dpaa2_switch_pull_fq(fq);
2787 		if (unlikely(err))
2788 			break;
2789 
2790 		/* Refill pool if appropriate */
2791 		dpaa2_switch_refill_bp(fq->ethsw);
2792 
2793 		store_cleaned = dpaa2_switch_store_consume(fq);
2794 		cleaned += store_cleaned;
2795 
2796 		if (cleaned >= budget) {
2797 			work_done = budget;
2798 			goto out;
2799 		}
2800 
2801 	} while (store_cleaned);
2802 
2803 	/* We didn't consume the entire budget, so finish napi and re-enable
2804 	 * data availability notifications
2805 	 */
2806 	napi_complete_done(napi, cleaned);
2807 	do {
2808 		err = dpaa2_io_service_rearm(NULL, &fq->nctx);
2809 		cpu_relax();
2810 	} while (err == -EBUSY && retries++ < DPAA2_SWITCH_SWP_BUSY_RETRIES);
2811 
2812 	work_done = max(cleaned, 1);
2813 out:
2814 
2815 	return work_done;
2816 }
2817 
2818 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
2819 {
2820 	struct dpaa2_switch_fq *fq;
2821 
2822 	fq = container_of(nctx, struct dpaa2_switch_fq, nctx);
2823 
2824 	napi_schedule(&fq->napi);
2825 }
2826 
2827 static int dpaa2_switch_setup_dpio(struct ethsw_core *ethsw)
2828 {
2829 	struct dpsw_ctrl_if_queue_cfg queue_cfg;
2830 	struct dpaa2_io_notification_ctx *nctx;
2831 	int err, i, j;
2832 
2833 	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++) {
2834 		nctx = &ethsw->fq[i].nctx;
2835 
2836 		/* Register a new software context for the FQID.
2837 		 * By using NULL as the first parameter, we specify that we do
2838 		 * not care on which cpu are interrupts received for this queue
2839 		 */
2840 		nctx->is_cdan = 0;
2841 		nctx->id = ethsw->fq[i].fqid;
2842 		nctx->desired_cpu = DPAA2_IO_ANY_CPU;
2843 		nctx->cb = dpaa2_switch_fqdan_cb;
2844 		err = dpaa2_io_service_register(NULL, nctx, ethsw->dev);
2845 		if (err) {
2846 			err = -EPROBE_DEFER;
2847 			goto err_register;
2848 		}
2849 
2850 		queue_cfg.options = DPSW_CTRL_IF_QUEUE_OPT_DEST |
2851 				    DPSW_CTRL_IF_QUEUE_OPT_USER_CTX;
2852 		queue_cfg.dest_cfg.dest_type = DPSW_CTRL_IF_DEST_DPIO;
2853 		queue_cfg.dest_cfg.dest_id = nctx->dpio_id;
2854 		queue_cfg.dest_cfg.priority = 0;
2855 		queue_cfg.user_ctx = nctx->qman64;
2856 
2857 		err = dpsw_ctrl_if_set_queue(ethsw->mc_io, 0,
2858 					     ethsw->dpsw_handle,
2859 					     ethsw->fq[i].type,
2860 					     &queue_cfg);
2861 		if (err)
2862 			goto err_set_queue;
2863 	}
2864 
2865 	return 0;
2866 
2867 err_set_queue:
2868 	dpaa2_io_service_deregister(NULL, nctx, ethsw->dev);
2869 err_register:
2870 	for (j = 0; j < i; j++)
2871 		dpaa2_io_service_deregister(NULL, &ethsw->fq[j].nctx,
2872 					    ethsw->dev);
2873 
2874 	return err;
2875 }
2876 
2877 static void dpaa2_switch_free_dpio(struct ethsw_core *ethsw)
2878 {
2879 	int i;
2880 
2881 	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
2882 		dpaa2_io_service_deregister(NULL, &ethsw->fq[i].nctx,
2883 					    ethsw->dev);
2884 }
2885 
2886 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw)
2887 {
2888 	int err;
2889 
2890 	/* setup FQs for Rx and Tx Conf */
2891 	err = dpaa2_switch_setup_fqs(ethsw);
2892 	if (err)
2893 		return err;
2894 
2895 	/* setup the buffer pool needed on the Rx path */
2896 	err = dpaa2_switch_setup_dpbp(ethsw);
2897 	if (err)
2898 		return err;
2899 
2900 	err = dpaa2_switch_alloc_rings(ethsw);
2901 	if (err)
2902 		goto err_free_dpbp;
2903 
2904 	err = dpaa2_switch_setup_dpio(ethsw);
2905 	if (err)
2906 		goto err_destroy_rings;
2907 
2908 	err = dpaa2_switch_seed_bp(ethsw);
2909 	if (err)
2910 		goto err_deregister_dpio;
2911 
2912 	err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
2913 	if (err) {
2914 		dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err);
2915 		goto err_drain_dpbp;
2916 	}
2917 
2918 	return 0;
2919 
2920 err_drain_dpbp:
2921 	dpaa2_switch_drain_bp(ethsw);
2922 err_deregister_dpio:
2923 	dpaa2_switch_free_dpio(ethsw);
2924 err_destroy_rings:
2925 	dpaa2_switch_destroy_rings(ethsw);
2926 err_free_dpbp:
2927 	dpaa2_switch_free_dpbp(ethsw);
2928 
2929 	return err;
2930 }
2931 
2932 static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
2933 				     u16 port_idx)
2934 {
2935 	struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
2936 
2937 	rtnl_lock();
2938 	dpaa2_switch_port_disconnect_mac(port_priv);
2939 	rtnl_unlock();
2940 	free_netdev(port_priv->netdev);
2941 	ethsw->ports[port_idx] = NULL;
2942 }
2943 
2944 static int dpaa2_switch_init(struct fsl_mc_device *sw_dev)
2945 {
2946 	struct device *dev = &sw_dev->dev;
2947 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
2948 	struct dpsw_vlan_if_cfg vcfg = {0};
2949 	struct dpsw_tci_cfg tci_cfg = {0};
2950 	struct dpsw_stp_cfg stp_cfg;
2951 	int err;
2952 	u16 i;
2953 
2954 	ethsw->dev_id = sw_dev->obj_desc.id;
2955 
2956 	err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, &ethsw->dpsw_handle);
2957 	if (err) {
2958 		dev_err(dev, "dpsw_open err %d\n", err);
2959 		return err;
2960 	}
2961 
2962 	err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
2963 				  &ethsw->sw_attr);
2964 	if (err) {
2965 		dev_err(dev, "dpsw_get_attributes err %d\n", err);
2966 		goto err_close;
2967 	}
2968 
2969 	err = dpsw_get_api_version(ethsw->mc_io, 0,
2970 				   &ethsw->major,
2971 				   &ethsw->minor);
2972 	if (err) {
2973 		dev_err(dev, "dpsw_get_api_version err %d\n", err);
2974 		goto err_close;
2975 	}
2976 
2977 	/* Minimum supported DPSW version check */
2978 	if (ethsw->major < DPSW_MIN_VER_MAJOR ||
2979 	    (ethsw->major == DPSW_MIN_VER_MAJOR &&
2980 	     ethsw->minor < DPSW_MIN_VER_MINOR)) {
2981 		dev_err(dev, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
2982 			ethsw->major, ethsw->minor);
2983 		err = -EOPNOTSUPP;
2984 		goto err_close;
2985 	}
2986 
2987 	if (!dpaa2_switch_supports_cpu_traffic(ethsw)) {
2988 		err = -EOPNOTSUPP;
2989 		goto err_close;
2990 	}
2991 
2992 	dpaa2_switch_detect_features(ethsw);
2993 
2994 	err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
2995 	if (err) {
2996 		dev_err(dev, "dpsw_reset err %d\n", err);
2997 		goto err_close;
2998 	}
2999 
3000 	stp_cfg.vlan_id = DEFAULT_VLAN_ID;
3001 	stp_cfg.state = DPSW_STP_STATE_FORWARDING;
3002 
3003 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3004 		err = dpsw_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle, i);
3005 		if (err) {
3006 			dev_err(dev, "dpsw_if_disable err %d\n", err);
3007 			goto err_close;
3008 		}
3009 
3010 		err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
3011 				      &stp_cfg);
3012 		if (err) {
3013 			dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
3014 				err, i);
3015 			goto err_close;
3016 		}
3017 
3018 		/* Switch starts with all ports configured to VLAN 1. Need to
3019 		 * remove this setting to allow configuration at bridge join
3020 		 */
3021 		vcfg.num_ifs = 1;
3022 		vcfg.if_id[0] = i;
3023 		err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
3024 						   DEFAULT_VLAN_ID, &vcfg);
3025 		if (err) {
3026 			dev_err(dev, "dpsw_vlan_remove_if_untagged err %d\n",
3027 				err);
3028 			goto err_close;
3029 		}
3030 
3031 		tci_cfg.vlan_id = 4095;
3032 		err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle, i, &tci_cfg);
3033 		if (err) {
3034 			dev_err(dev, "dpsw_if_set_tci err %d\n", err);
3035 			goto err_close;
3036 		}
3037 
3038 		err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
3039 					  DEFAULT_VLAN_ID, &vcfg);
3040 		if (err) {
3041 			dev_err(dev, "dpsw_vlan_remove_if err %d\n", err);
3042 			goto err_close;
3043 		}
3044 	}
3045 
3046 	err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, DEFAULT_VLAN_ID);
3047 	if (err) {
3048 		dev_err(dev, "dpsw_vlan_remove err %d\n", err);
3049 		goto err_close;
3050 	}
3051 
3052 	ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
3053 						   WQ_MEM_RECLAIM, "ethsw",
3054 						   ethsw->sw_attr.id);
3055 	if (!ethsw->workqueue) {
3056 		err = -ENOMEM;
3057 		goto err_close;
3058 	}
3059 
3060 	err = dpsw_fdb_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, 0);
3061 	if (err)
3062 		goto err_destroy_ordered_workqueue;
3063 
3064 	err = dpaa2_switch_ctrl_if_setup(ethsw);
3065 	if (err)
3066 		goto err_destroy_ordered_workqueue;
3067 
3068 	return 0;
3069 
3070 err_destroy_ordered_workqueue:
3071 	destroy_workqueue(ethsw->workqueue);
3072 
3073 err_close:
3074 	dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3075 	return err;
3076 }
3077 
3078 /* Add an ACL to redirect frames with specific destination MAC address to
3079  * control interface
3080  */
3081 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv,
3082 					   const char *mac)
3083 {
3084 	struct dpaa2_switch_acl_entry acl_entry = {0};
3085 
3086 	/* Match on the destination MAC address */
3087 	ether_addr_copy(acl_entry.key.match.l2_dest_mac, mac);
3088 	eth_broadcast_addr(acl_entry.key.mask.l2_dest_mac);
3089 
3090 	/* Trap to CPU */
3091 	acl_entry.cfg.precedence = 0;
3092 	acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF;
3093 
3094 	return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry);
3095 }
3096 
3097 static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port)
3098 {
3099 	const char stpa[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
3100 	struct switchdev_obj_port_vlan vlan = {
3101 		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
3102 		.vid = DEFAULT_VLAN_ID,
3103 		.flags = BRIDGE_VLAN_INFO_UNTAGGED | BRIDGE_VLAN_INFO_PVID,
3104 	};
3105 	struct net_device *netdev = port_priv->netdev;
3106 	struct ethsw_core *ethsw = port_priv->ethsw_data;
3107 	struct dpaa2_switch_filter_block *filter_block;
3108 	struct dpsw_fdb_cfg fdb_cfg = {0};
3109 	struct dpsw_if_attr dpsw_if_attr;
3110 	struct dpaa2_switch_fdb *fdb;
3111 	struct dpsw_acl_cfg acl_cfg;
3112 	u16 fdb_id, acl_tbl_id;
3113 	int err;
3114 
3115 	/* Get the Tx queue for this specific port */
3116 	err = dpsw_if_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
3117 				     port_priv->idx, &dpsw_if_attr);
3118 	if (err) {
3119 		netdev_err(netdev, "dpsw_if_get_attributes err %d\n", err);
3120 		return err;
3121 	}
3122 	port_priv->tx_qdid = dpsw_if_attr.qdid;
3123 
3124 	/* Create a FDB table for this particular switch port */
3125 	fdb_cfg.num_fdb_entries = ethsw->sw_attr.max_fdb_entries / ethsw->sw_attr.num_ifs;
3126 	err = dpsw_fdb_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3127 			   &fdb_id, &fdb_cfg);
3128 	if (err) {
3129 		netdev_err(netdev, "dpsw_fdb_add err %d\n", err);
3130 		return err;
3131 	}
3132 
3133 	/* Find an unused dpaa2_switch_fdb structure and use it */
3134 	fdb = dpaa2_switch_fdb_get_unused(ethsw);
3135 	fdb->fdb_id = fdb_id;
3136 	fdb->in_use = true;
3137 	fdb->bridge_dev = NULL;
3138 	port_priv->fdb = fdb;
3139 
3140 	/* We need to add VLAN 1 as the PVID on this port until it is under a
3141 	 * bridge since the DPAA2 switch is not able to handle the traffic in a
3142 	 * VLAN unaware fashion
3143 	 */
3144 	err = dpaa2_switch_port_vlans_add(netdev, &vlan);
3145 	if (err)
3146 		return err;
3147 
3148 	/* Setup the egress flooding domains (broadcast, unknown unicast */
3149 	err = dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id);
3150 	if (err)
3151 		return err;
3152 
3153 	/* Create an ACL table to be used by this switch port */
3154 	acl_cfg.max_entries = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES;
3155 	err = dpsw_acl_add(ethsw->mc_io, 0, ethsw->dpsw_handle,
3156 			   &acl_tbl_id, &acl_cfg);
3157 	if (err) {
3158 		netdev_err(netdev, "dpsw_acl_add err %d\n", err);
3159 		return err;
3160 	}
3161 
3162 	filter_block = dpaa2_switch_filter_block_get_unused(ethsw);
3163 	filter_block->ethsw = ethsw;
3164 	filter_block->acl_id = acl_tbl_id;
3165 	filter_block->in_use = true;
3166 	filter_block->num_acl_rules = 0;
3167 	INIT_LIST_HEAD(&filter_block->acl_entries);
3168 	INIT_LIST_HEAD(&filter_block->mirror_entries);
3169 
3170 	err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block);
3171 	if (err)
3172 		return err;
3173 
3174 	err = dpaa2_switch_port_trap_mac_addr(port_priv, stpa);
3175 	if (err)
3176 		return err;
3177 
3178 	return err;
3179 }
3180 
3181 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core *ethsw)
3182 {
3183 	dpsw_ctrl_if_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3184 	dpaa2_switch_free_dpio(ethsw);
3185 	dpaa2_switch_destroy_rings(ethsw);
3186 	dpaa2_switch_drain_bp(ethsw);
3187 	dpaa2_switch_free_dpbp(ethsw);
3188 }
3189 
3190 static void dpaa2_switch_teardown(struct fsl_mc_device *sw_dev)
3191 {
3192 	struct device *dev = &sw_dev->dev;
3193 	struct ethsw_core *ethsw = dev_get_drvdata(dev);
3194 	int err;
3195 
3196 	dpaa2_switch_ctrl_if_teardown(ethsw);
3197 
3198 	destroy_workqueue(ethsw->workqueue);
3199 
3200 	err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
3201 	if (err)
3202 		dev_warn(dev, "dpsw_close err %d\n", err);
3203 }
3204 
3205 static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev)
3206 {
3207 	struct ethsw_port_priv *port_priv;
3208 	struct ethsw_core *ethsw;
3209 	struct device *dev;
3210 	int i;
3211 
3212 	dev = &sw_dev->dev;
3213 	ethsw = dev_get_drvdata(dev);
3214 
3215 	dpaa2_switch_teardown_irqs(sw_dev);
3216 
3217 	dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3218 
3219 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3220 		port_priv = ethsw->ports[i];
3221 		unregister_netdev(port_priv->netdev);
3222 		dpaa2_switch_remove_port(ethsw, i);
3223 	}
3224 
3225 	kfree(ethsw->fdbs);
3226 	kfree(ethsw->filter_blocks);
3227 	kfree(ethsw->ports);
3228 
3229 	dpaa2_switch_teardown(sw_dev);
3230 
3231 	fsl_mc_portal_free(ethsw->mc_io);
3232 
3233 	kfree(ethsw);
3234 
3235 	dev_set_drvdata(dev, NULL);
3236 
3237 	return 0;
3238 }
3239 
3240 static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
3241 				   u16 port_idx)
3242 {
3243 	struct ethsw_port_priv *port_priv;
3244 	struct device *dev = ethsw->dev;
3245 	struct net_device *port_netdev;
3246 	int err;
3247 
3248 	port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
3249 	if (!port_netdev) {
3250 		dev_err(dev, "alloc_etherdev error\n");
3251 		return -ENOMEM;
3252 	}
3253 
3254 	port_priv = netdev_priv(port_netdev);
3255 	port_priv->netdev = port_netdev;
3256 	port_priv->ethsw_data = ethsw;
3257 
3258 	port_priv->idx = port_idx;
3259 	port_priv->stp_state = BR_STATE_FORWARDING;
3260 
3261 	SET_NETDEV_DEV(port_netdev, dev);
3262 	port_netdev->netdev_ops = &dpaa2_switch_port_ops;
3263 	port_netdev->ethtool_ops = &dpaa2_switch_port_ethtool_ops;
3264 
3265 	port_netdev->needed_headroom = DPAA2_SWITCH_NEEDED_HEADROOM;
3266 
3267 	port_priv->bcast_flood = true;
3268 	port_priv->ucast_flood = true;
3269 
3270 	/* Set MTU limits */
3271 	port_netdev->min_mtu = ETH_MIN_MTU;
3272 	port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
3273 
3274 	/* Populate the private port structure so that later calls to
3275 	 * dpaa2_switch_port_init() can use it.
3276 	 */
3277 	ethsw->ports[port_idx] = port_priv;
3278 
3279 	/* The DPAA2 switch's ingress path depends on the VLAN table,
3280 	 * thus we are not able to disable VLAN filtering.
3281 	 */
3282 	port_netdev->features = NETIF_F_HW_VLAN_CTAG_FILTER |
3283 				NETIF_F_HW_VLAN_STAG_FILTER |
3284 				NETIF_F_HW_TC;
3285 
3286 	err = dpaa2_switch_port_init(port_priv, port_idx);
3287 	if (err)
3288 		goto err_port_probe;
3289 
3290 	err = dpaa2_switch_port_set_mac_addr(port_priv);
3291 	if (err)
3292 		goto err_port_probe;
3293 
3294 	err = dpaa2_switch_port_set_learning(port_priv, false);
3295 	if (err)
3296 		goto err_port_probe;
3297 	port_priv->learn_ena = false;
3298 
3299 	err = dpaa2_switch_port_connect_mac(port_priv);
3300 	if (err)
3301 		goto err_port_probe;
3302 
3303 	return 0;
3304 
3305 err_port_probe:
3306 	free_netdev(port_netdev);
3307 	ethsw->ports[port_idx] = NULL;
3308 
3309 	return err;
3310 }
3311 
3312 static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
3313 {
3314 	struct device *dev = &sw_dev->dev;
3315 	struct ethsw_core *ethsw;
3316 	int i, err;
3317 
3318 	/* Allocate switch core*/
3319 	ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
3320 
3321 	if (!ethsw)
3322 		return -ENOMEM;
3323 
3324 	ethsw->dev = dev;
3325 	ethsw->iommu_domain = iommu_get_domain_for_dev(dev);
3326 	dev_set_drvdata(dev, ethsw);
3327 
3328 	err = fsl_mc_portal_allocate(sw_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3329 				     &ethsw->mc_io);
3330 	if (err) {
3331 		if (err == -ENXIO)
3332 			err = -EPROBE_DEFER;
3333 		else
3334 			dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
3335 		goto err_free_drvdata;
3336 	}
3337 
3338 	err = dpaa2_switch_init(sw_dev);
3339 	if (err)
3340 		goto err_free_cmdport;
3341 
3342 	ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
3343 			       GFP_KERNEL);
3344 	if (!(ethsw->ports)) {
3345 		err = -ENOMEM;
3346 		goto err_teardown;
3347 	}
3348 
3349 	ethsw->fdbs = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->fdbs),
3350 			      GFP_KERNEL);
3351 	if (!ethsw->fdbs) {
3352 		err = -ENOMEM;
3353 		goto err_free_ports;
3354 	}
3355 
3356 	ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs,
3357 				       sizeof(*ethsw->filter_blocks),
3358 				       GFP_KERNEL);
3359 	if (!ethsw->filter_blocks) {
3360 		err = -ENOMEM;
3361 		goto err_free_fdbs;
3362 	}
3363 
3364 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3365 		err = dpaa2_switch_probe_port(ethsw, i);
3366 		if (err)
3367 			goto err_free_netdev;
3368 	}
3369 
3370 	/* Add a NAPI instance for each of the Rx queues. The first port's
3371 	 * net_device will be associated with the instances since we do not have
3372 	 * different queues for each switch ports.
3373 	 */
3374 	for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
3375 		netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
3376 			       dpaa2_switch_poll);
3377 
3378 	/* Setup IRQs */
3379 	err = dpaa2_switch_setup_irqs(sw_dev);
3380 	if (err)
3381 		goto err_stop;
3382 
3383 	/* By convention, if the mirror port is equal to the number of switch
3384 	 * interfaces, then mirroring of any kind is disabled.
3385 	 */
3386 	ethsw->mirror_port =  ethsw->sw_attr.num_ifs;
3387 
3388 	/* Register the netdev only when the entire setup is done and the
3389 	 * switch port interfaces are ready to receive traffic
3390 	 */
3391 	for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
3392 		err = register_netdev(ethsw->ports[i]->netdev);
3393 		if (err < 0) {
3394 			dev_err(dev, "register_netdev error %d\n", err);
3395 			goto err_unregister_ports;
3396 		}
3397 	}
3398 
3399 	return 0;
3400 
3401 err_unregister_ports:
3402 	for (i--; i >= 0; i--)
3403 		unregister_netdev(ethsw->ports[i]->netdev);
3404 	dpaa2_switch_teardown_irqs(sw_dev);
3405 err_stop:
3406 	dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
3407 err_free_netdev:
3408 	for (i--; i >= 0; i--)
3409 		dpaa2_switch_remove_port(ethsw, i);
3410 	kfree(ethsw->filter_blocks);
3411 err_free_fdbs:
3412 	kfree(ethsw->fdbs);
3413 err_free_ports:
3414 	kfree(ethsw->ports);
3415 
3416 err_teardown:
3417 	dpaa2_switch_teardown(sw_dev);
3418 
3419 err_free_cmdport:
3420 	fsl_mc_portal_free(ethsw->mc_io);
3421 
3422 err_free_drvdata:
3423 	kfree(ethsw);
3424 	dev_set_drvdata(dev, NULL);
3425 
3426 	return err;
3427 }
3428 
3429 static const struct fsl_mc_device_id dpaa2_switch_match_id_table[] = {
3430 	{
3431 		.vendor = FSL_MC_VENDOR_FREESCALE,
3432 		.obj_type = "dpsw",
3433 	},
3434 	{ .vendor = 0x0 }
3435 };
3436 MODULE_DEVICE_TABLE(fslmc, dpaa2_switch_match_id_table);
3437 
3438 static struct fsl_mc_driver dpaa2_switch_drv = {
3439 	.driver = {
3440 		.name = KBUILD_MODNAME,
3441 		.owner = THIS_MODULE,
3442 	},
3443 	.probe = dpaa2_switch_probe,
3444 	.remove = dpaa2_switch_remove,
3445 	.match_id_table = dpaa2_switch_match_id_table
3446 };
3447 
3448 static struct notifier_block dpaa2_switch_port_nb __read_mostly = {
3449 	.notifier_call = dpaa2_switch_port_netdevice_event,
3450 };
3451 
3452 static struct notifier_block dpaa2_switch_port_switchdev_nb = {
3453 	.notifier_call = dpaa2_switch_port_event,
3454 };
3455 
3456 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb = {
3457 	.notifier_call = dpaa2_switch_port_blocking_event,
3458 };
3459 
3460 static int dpaa2_switch_register_notifiers(void)
3461 {
3462 	int err;
3463 
3464 	err = register_netdevice_notifier(&dpaa2_switch_port_nb);
3465 	if (err) {
3466 		pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err);
3467 		return err;
3468 	}
3469 
3470 	err = register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3471 	if (err) {
3472 		pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err);
3473 		goto err_switchdev_nb;
3474 	}
3475 
3476 	err = register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3477 	if (err) {
3478 		pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err);
3479 		goto err_switchdev_blocking_nb;
3480 	}
3481 
3482 	return 0;
3483 
3484 err_switchdev_blocking_nb:
3485 	unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3486 err_switchdev_nb:
3487 	unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3488 
3489 	return err;
3490 }
3491 
3492 static void dpaa2_switch_unregister_notifiers(void)
3493 {
3494 	int err;
3495 
3496 	err = unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb);
3497 	if (err)
3498 		pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
3499 		       err);
3500 
3501 	err = unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb);
3502 	if (err)
3503 		pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err);
3504 
3505 	err = unregister_netdevice_notifier(&dpaa2_switch_port_nb);
3506 	if (err)
3507 		pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err);
3508 }
3509 
3510 static int __init dpaa2_switch_driver_init(void)
3511 {
3512 	int err;
3513 
3514 	err = fsl_mc_driver_register(&dpaa2_switch_drv);
3515 	if (err)
3516 		return err;
3517 
3518 	err = dpaa2_switch_register_notifiers();
3519 	if (err) {
3520 		fsl_mc_driver_unregister(&dpaa2_switch_drv);
3521 		return err;
3522 	}
3523 
3524 	return 0;
3525 }
3526 
3527 static void __exit dpaa2_switch_driver_exit(void)
3528 {
3529 	dpaa2_switch_unregister_notifiers();
3530 	fsl_mc_driver_unregister(&dpaa2_switch_drv);
3531 }
3532 
3533 module_init(dpaa2_switch_driver_init);
3534 module_exit(dpaa2_switch_driver_exit);
3535 
3536 MODULE_LICENSE("GPL v2");
3537 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
3538