xref: /linux/drivers/net/ethernet/mscc/ocelot.c (revision 65aa371ea52a92dd10826a2ea74bd2c395ee90a8)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Microsemi Ocelot Switch driver
4  *
5  * Copyright (c) 2017 Microsemi Corporation
6  */
7 #include <linux/dsa/ocelot.h>
8 #include <linux/if_bridge.h>
9 #include <linux/ptp_classify.h>
10 #include <soc/mscc/ocelot_vcap.h>
11 #include "ocelot.h"
12 #include "ocelot_vcap.h"
13 
14 #define TABLE_UPDATE_SLEEP_US 10
15 #define TABLE_UPDATE_TIMEOUT_US 100000
16 
17 struct ocelot_mact_entry {
18 	u8 mac[ETH_ALEN];
19 	u16 vid;
20 	enum macaccess_entry_type type;
21 };
22 
23 static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
24 {
25 	return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
26 }
27 
28 static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
29 {
30 	u32 val;
31 
32 	return readx_poll_timeout(ocelot_mact_read_macaccess,
33 		ocelot, val,
34 		(val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
35 		MACACCESS_CMD_IDLE,
36 		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
37 }
38 
39 static void ocelot_mact_select(struct ocelot *ocelot,
40 			       const unsigned char mac[ETH_ALEN],
41 			       unsigned int vid)
42 {
43 	u32 macl = 0, mach = 0;
44 
45 	/* Set the MAC address to handle and the vlan associated in a format
46 	 * understood by the hardware.
47 	 */
48 	mach |= vid    << 16;
49 	mach |= mac[0] << 8;
50 	mach |= mac[1] << 0;
51 	macl |= mac[2] << 24;
52 	macl |= mac[3] << 16;
53 	macl |= mac[4] << 8;
54 	macl |= mac[5] << 0;
55 
56 	ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
57 	ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
58 
59 }
60 
61 int ocelot_mact_learn(struct ocelot *ocelot, int port,
62 		      const unsigned char mac[ETH_ALEN],
63 		      unsigned int vid, enum macaccess_entry_type type)
64 {
65 	u32 cmd = ANA_TABLES_MACACCESS_VALID |
66 		ANA_TABLES_MACACCESS_DEST_IDX(port) |
67 		ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
68 		ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
69 	unsigned int mc_ports;
70 
71 	/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
72 	if (type == ENTRYTYPE_MACv4)
73 		mc_ports = (mac[1] << 8) | mac[2];
74 	else if (type == ENTRYTYPE_MACv6)
75 		mc_ports = (mac[0] << 8) | mac[1];
76 	else
77 		mc_ports = 0;
78 
79 	if (mc_ports & BIT(ocelot->num_phys_ports))
80 		cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
81 
82 	ocelot_mact_select(ocelot, mac, vid);
83 
84 	/* Issue a write command */
85 	ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
86 
87 	return ocelot_mact_wait_for_completion(ocelot);
88 }
89 EXPORT_SYMBOL(ocelot_mact_learn);
90 
91 int ocelot_mact_forget(struct ocelot *ocelot,
92 		       const unsigned char mac[ETH_ALEN], unsigned int vid)
93 {
94 	ocelot_mact_select(ocelot, mac, vid);
95 
96 	/* Issue a forget command */
97 	ocelot_write(ocelot,
98 		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
99 		     ANA_TABLES_MACACCESS);
100 
101 	return ocelot_mact_wait_for_completion(ocelot);
102 }
103 EXPORT_SYMBOL(ocelot_mact_forget);
104 
105 static void ocelot_mact_init(struct ocelot *ocelot)
106 {
107 	/* Configure the learning mode entries attributes:
108 	 * - Do not copy the frame to the CPU extraction queues.
109 	 * - Use the vlan and mac_cpoy for dmac lookup.
110 	 */
111 	ocelot_rmw(ocelot, 0,
112 		   ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
113 		   | ANA_AGENCTRL_LEARN_FWD_KILL
114 		   | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
115 		   ANA_AGENCTRL);
116 
117 	/* Clear the MAC table */
118 	ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
119 }
120 
121 static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
122 {
123 	ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
124 			 ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
125 			 ANA_PORT_VCAP_S2_CFG, port);
126 
127 	ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
128 			 ANA_PORT_VCAP_CFG, port);
129 
130 	ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
131 		       REW_PORT_CFG_ES0_EN,
132 		       REW_PORT_CFG, port);
133 }
134 
135 static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
136 {
137 	return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
138 }
139 
140 static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
141 {
142 	u32 val;
143 
144 	return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
145 		ocelot,
146 		val,
147 		(val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
148 		ANA_TABLES_VLANACCESS_CMD_IDLE,
149 		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
150 }
151 
152 static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
153 {
154 	/* Select the VID to configure */
155 	ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
156 		     ANA_TABLES_VLANTIDX);
157 	/* Set the vlan port members mask and issue a write command */
158 	ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
159 			     ANA_TABLES_VLANACCESS_CMD_WRITE,
160 		     ANA_TABLES_VLANACCESS);
161 
162 	return ocelot_vlant_wait_for_completion(ocelot);
163 }
164 
165 static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
166 {
167 	struct ocelot_bridge_vlan *vlan;
168 	int num_untagged = 0;
169 
170 	list_for_each_entry(vlan, &ocelot->vlans, list) {
171 		if (!(vlan->portmask & BIT(port)))
172 			continue;
173 
174 		if (vlan->untagged & BIT(port))
175 			num_untagged++;
176 	}
177 
178 	return num_untagged;
179 }
180 
181 static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
182 {
183 	struct ocelot_bridge_vlan *vlan;
184 	int num_tagged = 0;
185 
186 	list_for_each_entry(vlan, &ocelot->vlans, list) {
187 		if (!(vlan->portmask & BIT(port)))
188 			continue;
189 
190 		if (!(vlan->untagged & BIT(port)))
191 			num_tagged++;
192 	}
193 
194 	return num_tagged;
195 }
196 
197 /* We use native VLAN when we have to mix egress-tagged VLANs with exactly
198  * _one_ egress-untagged VLAN (_the_ native VLAN)
199  */
200 static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
201 {
202 	return ocelot_port_num_tagged_vlans(ocelot, port) &&
203 	       ocelot_port_num_untagged_vlans(ocelot, port) == 1;
204 }
205 
206 static struct ocelot_bridge_vlan *
207 ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
208 {
209 	struct ocelot_bridge_vlan *vlan;
210 
211 	list_for_each_entry(vlan, &ocelot->vlans, list)
212 		if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
213 			return vlan;
214 
215 	return NULL;
216 }
217 
218 /* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
219  * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
220  * state of the port.
221  */
222 static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
223 {
224 	struct ocelot_port *ocelot_port = ocelot->ports[port];
225 	enum ocelot_port_tag_config tag_cfg;
226 	bool uses_native_vlan = false;
227 
228 	if (ocelot_port->vlan_aware) {
229 		uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
230 
231 		if (uses_native_vlan)
232 			tag_cfg = OCELOT_PORT_TAG_NATIVE;
233 		else if (ocelot_port_num_untagged_vlans(ocelot, port))
234 			tag_cfg = OCELOT_PORT_TAG_DISABLED;
235 		else
236 			tag_cfg = OCELOT_PORT_TAG_TRUNK;
237 	} else {
238 		tag_cfg = OCELOT_PORT_TAG_DISABLED;
239 	}
240 
241 	ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
242 		       REW_TAG_CFG_TAG_CFG_M,
243 		       REW_TAG_CFG, port);
244 
245 	if (uses_native_vlan) {
246 		struct ocelot_bridge_vlan *native_vlan;
247 
248 		/* Not having a native VLAN is impossible, because
249 		 * ocelot_port_num_untagged_vlans has returned 1.
250 		 * So there is no use in checking for NULL here.
251 		 */
252 		native_vlan = ocelot_port_find_native_vlan(ocelot, port);
253 
254 		ocelot_rmw_gix(ocelot,
255 			       REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
256 			       REW_PORT_VLAN_CFG_PORT_VID_M,
257 			       REW_PORT_VLAN_CFG, port);
258 	}
259 }
260 
261 /* Default vlan to clasify for untagged frames (may be zero) */
262 static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
263 				 const struct ocelot_bridge_vlan *pvid_vlan)
264 {
265 	struct ocelot_port *ocelot_port = ocelot->ports[port];
266 	u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
267 	u32 val = 0;
268 
269 	ocelot_port->pvid_vlan = pvid_vlan;
270 
271 	if (ocelot_port->vlan_aware && pvid_vlan)
272 		pvid = pvid_vlan->vid;
273 
274 	ocelot_rmw_gix(ocelot,
275 		       ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
276 		       ANA_PORT_VLAN_CFG_VLAN_VID_M,
277 		       ANA_PORT_VLAN_CFG, port);
278 
279 	/* If there's no pvid, we should drop not only untagged traffic (which
280 	 * happens automatically), but also 802.1p traffic which gets
281 	 * classified to VLAN 0, but that is always in our RX filter, so it
282 	 * would get accepted were it not for this setting.
283 	 */
284 	if (!pvid_vlan && ocelot_port->vlan_aware)
285 		val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
286 		      ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
287 
288 	ocelot_rmw_gix(ocelot, val,
289 		       ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
290 		       ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
291 		       ANA_PORT_DROP_CFG, port);
292 }
293 
294 static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
295 							  u16 vid)
296 {
297 	struct ocelot_bridge_vlan *vlan;
298 
299 	list_for_each_entry(vlan, &ocelot->vlans, list)
300 		if (vlan->vid == vid)
301 			return vlan;
302 
303 	return NULL;
304 }
305 
306 static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
307 				  bool untagged)
308 {
309 	struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
310 	unsigned long portmask;
311 	int err;
312 
313 	if (vlan) {
314 		portmask = vlan->portmask | BIT(port);
315 
316 		err = ocelot_vlant_set_mask(ocelot, vid, portmask);
317 		if (err)
318 			return err;
319 
320 		vlan->portmask = portmask;
321 		/* Bridge VLANs can be overwritten with a different
322 		 * egress-tagging setting, so make sure to override an untagged
323 		 * with a tagged VID if that's going on.
324 		 */
325 		if (untagged)
326 			vlan->untagged |= BIT(port);
327 		else
328 			vlan->untagged &= ~BIT(port);
329 
330 		return 0;
331 	}
332 
333 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
334 	if (!vlan)
335 		return -ENOMEM;
336 
337 	portmask = BIT(port);
338 
339 	err = ocelot_vlant_set_mask(ocelot, vid, portmask);
340 	if (err) {
341 		kfree(vlan);
342 		return err;
343 	}
344 
345 	vlan->vid = vid;
346 	vlan->portmask = portmask;
347 	if (untagged)
348 		vlan->untagged = BIT(port);
349 	INIT_LIST_HEAD(&vlan->list);
350 	list_add_tail(&vlan->list, &ocelot->vlans);
351 
352 	return 0;
353 }
354 
355 static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
356 {
357 	struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
358 	unsigned long portmask;
359 	int err;
360 
361 	if (!vlan)
362 		return 0;
363 
364 	portmask = vlan->portmask & ~BIT(port);
365 
366 	err = ocelot_vlant_set_mask(ocelot, vid, portmask);
367 	if (err)
368 		return err;
369 
370 	vlan->portmask = portmask;
371 	if (vlan->portmask)
372 		return 0;
373 
374 	list_del(&vlan->list);
375 	kfree(vlan);
376 
377 	return 0;
378 }
379 
380 int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
381 			       bool vlan_aware, struct netlink_ext_ack *extack)
382 {
383 	struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
384 	struct ocelot_port *ocelot_port = ocelot->ports[port];
385 	struct ocelot_vcap_filter *filter;
386 	u32 val;
387 
388 	list_for_each_entry(filter, &block->rules, list) {
389 		if (filter->ingress_port_mask & BIT(port) &&
390 		    filter->action.vid_replace_ena) {
391 			NL_SET_ERR_MSG_MOD(extack,
392 					   "Cannot change VLAN state with vlan modify rules active");
393 			return -EBUSY;
394 		}
395 	}
396 
397 	ocelot_port->vlan_aware = vlan_aware;
398 
399 	if (vlan_aware)
400 		val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
401 		      ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
402 	else
403 		val = 0;
404 	ocelot_rmw_gix(ocelot, val,
405 		       ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
406 		       ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
407 		       ANA_PORT_VLAN_CFG, port);
408 
409 	ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
410 	ocelot_port_manage_port_tag(ocelot, port);
411 
412 	return 0;
413 }
414 EXPORT_SYMBOL(ocelot_port_vlan_filtering);
415 
416 int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
417 			bool untagged, struct netlink_ext_ack *extack)
418 {
419 	if (untagged) {
420 		/* We are adding an egress-tagged VLAN */
421 		if (ocelot_port_uses_native_vlan(ocelot, port)) {
422 			NL_SET_ERR_MSG_MOD(extack,
423 					   "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
424 			return -EBUSY;
425 		}
426 	} else {
427 		/* We are adding an egress-tagged VLAN */
428 		if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
429 			NL_SET_ERR_MSG_MOD(extack,
430 					   "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
431 			return -EBUSY;
432 		}
433 	}
434 
435 	return 0;
436 }
437 EXPORT_SYMBOL(ocelot_vlan_prepare);
438 
439 int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
440 		    bool untagged)
441 {
442 	int err;
443 
444 	err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
445 	if (err)
446 		return err;
447 
448 	/* Default ingress vlan classification */
449 	if (pvid)
450 		ocelot_port_set_pvid(ocelot, port,
451 				     ocelot_bridge_vlan_find(ocelot, vid));
452 
453 	/* Untagged egress vlan clasification */
454 	ocelot_port_manage_port_tag(ocelot, port);
455 
456 	return 0;
457 }
458 EXPORT_SYMBOL(ocelot_vlan_add);
459 
460 int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
461 {
462 	struct ocelot_port *ocelot_port = ocelot->ports[port];
463 	int err;
464 
465 	err = ocelot_vlan_member_del(ocelot, port, vid);
466 	if (err)
467 		return err;
468 
469 	/* Ingress */
470 	if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
471 		ocelot_port_set_pvid(ocelot, port, NULL);
472 
473 	/* Egress */
474 	ocelot_port_manage_port_tag(ocelot, port);
475 
476 	return 0;
477 }
478 EXPORT_SYMBOL(ocelot_vlan_del);
479 
480 static void ocelot_vlan_init(struct ocelot *ocelot)
481 {
482 	unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0);
483 	u16 port, vid;
484 
485 	/* Clear VLAN table, by default all ports are members of all VLANs */
486 	ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
487 		     ANA_TABLES_VLANACCESS);
488 	ocelot_vlant_wait_for_completion(ocelot);
489 
490 	/* Configure the port VLAN memberships */
491 	for (vid = 1; vid < VLAN_N_VID; vid++)
492 		ocelot_vlant_set_mask(ocelot, vid, 0);
493 
494 	/* Because VLAN filtering is enabled, we need VID 0 to get untagged
495 	 * traffic.  It is added automatically if 8021q module is loaded, but
496 	 * we can't rely on it since module may be not loaded.
497 	 */
498 	ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
499 
500 	/* Set vlan ingress filter mask to all ports but the CPU port by
501 	 * default.
502 	 */
503 	ocelot_write(ocelot, all_ports, ANA_VLANMASK);
504 
505 	for (port = 0; port < ocelot->num_phys_ports; port++) {
506 		ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
507 		ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
508 	}
509 }
510 
511 static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
512 {
513 	return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
514 }
515 
516 static int ocelot_port_flush(struct ocelot *ocelot, int port)
517 {
518 	unsigned int pause_ena;
519 	int err, val;
520 
521 	/* Disable dequeuing from the egress queues */
522 	ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
523 		       QSYS_PORT_MODE_DEQUEUE_DIS,
524 		       QSYS_PORT_MODE, port);
525 
526 	/* Disable flow control */
527 	ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
528 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
529 
530 	/* Disable priority flow control */
531 	ocelot_fields_write(ocelot, port,
532 			    QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
533 
534 	/* Wait at least the time it takes to receive a frame of maximum length
535 	 * at the port.
536 	 * Worst-case delays for 10 kilobyte jumbo frames are:
537 	 * 8 ms on a 10M port
538 	 * 800 μs on a 100M port
539 	 * 80 μs on a 1G port
540 	 * 32 μs on a 2.5G port
541 	 */
542 	usleep_range(8000, 10000);
543 
544 	/* Disable half duplex backpressure. */
545 	ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
546 		       SYS_FRONT_PORT_MODE, port);
547 
548 	/* Flush the queues associated with the port. */
549 	ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
550 		       REW_PORT_CFG, port);
551 
552 	/* Enable dequeuing from the egress queues. */
553 	ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
554 		       port);
555 
556 	/* Wait until flushing is complete. */
557 	err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
558 				100, 2000000, false, ocelot, port);
559 
560 	/* Clear flushing again. */
561 	ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
562 
563 	/* Re-enable flow control */
564 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
565 
566 	return err;
567 }
568 
569 void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
570 				  unsigned int link_an_mode,
571 				  phy_interface_t interface,
572 				  unsigned long quirks)
573 {
574 	struct ocelot_port *ocelot_port = ocelot->ports[port];
575 	int err;
576 
577 	ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
578 			 DEV_MAC_ENA_CFG);
579 
580 	ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
581 
582 	err = ocelot_port_flush(ocelot, port);
583 	if (err)
584 		dev_err(ocelot->dev, "failed to flush port %d: %d\n",
585 			port, err);
586 
587 	/* Put the port in reset. */
588 	if (interface != PHY_INTERFACE_MODE_QSGMII ||
589 	    !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
590 		ocelot_port_rmwl(ocelot_port,
591 				 DEV_CLOCK_CFG_MAC_TX_RST |
592 				 DEV_CLOCK_CFG_MAC_RX_RST,
593 				 DEV_CLOCK_CFG_MAC_TX_RST |
594 				 DEV_CLOCK_CFG_MAC_RX_RST,
595 				 DEV_CLOCK_CFG);
596 }
597 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
598 
599 void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
600 				struct phy_device *phydev,
601 				unsigned int link_an_mode,
602 				phy_interface_t interface,
603 				int speed, int duplex,
604 				bool tx_pause, bool rx_pause,
605 				unsigned long quirks)
606 {
607 	struct ocelot_port *ocelot_port = ocelot->ports[port];
608 	int mac_speed, mode = 0;
609 	u32 mac_fc_cfg;
610 
611 	/* The MAC might be integrated in systems where the MAC speed is fixed
612 	 * and it's the PCS who is performing the rate adaptation, so we have
613 	 * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
614 	 * (which is also its default value).
615 	 */
616 	if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) ||
617 	    speed == SPEED_1000) {
618 		mac_speed = OCELOT_SPEED_1000;
619 		mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
620 	} else if (speed == SPEED_2500) {
621 		mac_speed = OCELOT_SPEED_2500;
622 		mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
623 	} else if (speed == SPEED_100) {
624 		mac_speed = OCELOT_SPEED_100;
625 	} else {
626 		mac_speed = OCELOT_SPEED_10;
627 	}
628 
629 	if (duplex == DUPLEX_FULL)
630 		mode |= DEV_MAC_MODE_CFG_FDX_ENA;
631 
632 	ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG);
633 
634 	/* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
635 	 * PORT_RST bits in DEV_CLOCK_CFG.
636 	 */
637 	ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed),
638 			   DEV_CLOCK_CFG);
639 
640 	switch (speed) {
641 	case SPEED_10:
642 		mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10);
643 		break;
644 	case SPEED_100:
645 		mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100);
646 		break;
647 	case SPEED_1000:
648 	case SPEED_2500:
649 		mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000);
650 		break;
651 	default:
652 		dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
653 			port, speed);
654 		return;
655 	}
656 
657 	/* Handle RX pause in all cases, with 2500base-X this is used for rate
658 	 * adaptation.
659 	 */
660 	mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
661 
662 	if (tx_pause)
663 		mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
664 			      SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
665 			      SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
666 			      SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
667 
668 	/* Flow control. Link speed is only used here to evaluate the time
669 	 * specification in incoming pause frames.
670 	 */
671 	ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
672 
673 	ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
674 
675 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
676 
677 	/* Undo the effects of ocelot_phylink_mac_link_down:
678 	 * enable MAC module
679 	 */
680 	ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
681 			   DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
682 
683 	/* Core: Enable port for frame transfer */
684 	ocelot_fields_write(ocelot, port,
685 			    QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
686 }
687 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
688 
689 static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
690 					struct sk_buff *clone)
691 {
692 	struct ocelot_port *ocelot_port = ocelot->ports[port];
693 	unsigned long flags;
694 
695 	spin_lock_irqsave(&ocelot->ts_id_lock, flags);
696 
697 	if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
698 	    ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
699 		spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
700 		return -EBUSY;
701 	}
702 
703 	skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
704 	/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
705 	OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
706 
707 	ocelot_port->ts_id++;
708 	if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
709 		ocelot_port->ts_id = 0;
710 
711 	ocelot_port->ptp_skbs_in_flight++;
712 	ocelot->ptp_skbs_in_flight++;
713 
714 	skb_queue_tail(&ocelot_port->tx_skbs, clone);
715 
716 	spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
717 
718 	return 0;
719 }
720 
721 static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
722 				       unsigned int ptp_class)
723 {
724 	struct ptp_header *hdr;
725 	u8 msgtype, twostep;
726 
727 	hdr = ptp_parse_header(skb, ptp_class);
728 	if (!hdr)
729 		return false;
730 
731 	msgtype = ptp_get_msgtype(hdr, ptp_class);
732 	twostep = hdr->flag_field[0] & 0x2;
733 
734 	if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
735 		return true;
736 
737 	return false;
738 }
739 
740 int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
741 				 struct sk_buff *skb,
742 				 struct sk_buff **clone)
743 {
744 	struct ocelot_port *ocelot_port = ocelot->ports[port];
745 	u8 ptp_cmd = ocelot_port->ptp_cmd;
746 	unsigned int ptp_class;
747 	int err;
748 
749 	/* Don't do anything if PTP timestamping not enabled */
750 	if (!ptp_cmd)
751 		return 0;
752 
753 	ptp_class = ptp_classify_raw(skb);
754 	if (ptp_class == PTP_CLASS_NONE)
755 		return -EINVAL;
756 
757 	/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
758 	if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
759 		if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
760 			OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
761 			return 0;
762 		}
763 
764 		/* Fall back to two-step timestamping */
765 		ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
766 	}
767 
768 	if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
769 		*clone = skb_clone_sk(skb);
770 		if (!(*clone))
771 			return -ENOMEM;
772 
773 		err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
774 		if (err)
775 			return err;
776 
777 		OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
778 		OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
779 	}
780 
781 	return 0;
782 }
783 EXPORT_SYMBOL(ocelot_port_txtstamp_request);
784 
785 static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
786 				   struct timespec64 *ts)
787 {
788 	unsigned long flags;
789 	u32 val;
790 
791 	spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
792 
793 	/* Read current PTP time to get seconds */
794 	val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
795 
796 	val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
797 	val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
798 	ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
799 	ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
800 
801 	/* Read packet HW timestamp from FIFO */
802 	val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
803 	ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
804 
805 	/* Sec has incremented since the ts was registered */
806 	if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
807 		ts->tv_sec--;
808 
809 	spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
810 }
811 
812 static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
813 {
814 	struct ptp_header *hdr;
815 
816 	hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
817 	if (WARN_ON(!hdr))
818 		return false;
819 
820 	return seqid == ntohs(hdr->sequence_id);
821 }
822 
823 void ocelot_get_txtstamp(struct ocelot *ocelot)
824 {
825 	int budget = OCELOT_PTP_QUEUE_SZ;
826 
827 	while (budget--) {
828 		struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
829 		struct skb_shared_hwtstamps shhwtstamps;
830 		u32 val, id, seqid, txport;
831 		struct ocelot_port *port;
832 		struct timespec64 ts;
833 		unsigned long flags;
834 
835 		val = ocelot_read(ocelot, SYS_PTP_STATUS);
836 
837 		/* Check if a timestamp can be retrieved */
838 		if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
839 			break;
840 
841 		WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
842 
843 		/* Retrieve the ts ID and Tx port */
844 		id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
845 		txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
846 		seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
847 
848 		port = ocelot->ports[txport];
849 
850 		spin_lock(&ocelot->ts_id_lock);
851 		port->ptp_skbs_in_flight--;
852 		ocelot->ptp_skbs_in_flight--;
853 		spin_unlock(&ocelot->ts_id_lock);
854 
855 		/* Retrieve its associated skb */
856 try_again:
857 		spin_lock_irqsave(&port->tx_skbs.lock, flags);
858 
859 		skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
860 			if (OCELOT_SKB_CB(skb)->ts_id != id)
861 				continue;
862 			__skb_unlink(skb, &port->tx_skbs);
863 			skb_match = skb;
864 			break;
865 		}
866 
867 		spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
868 
869 		if (WARN_ON(!skb_match))
870 			continue;
871 
872 		if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
873 			dev_err_ratelimited(ocelot->dev,
874 					    "port %d received stale TX timestamp for seqid %d, discarding\n",
875 					    txport, seqid);
876 			dev_kfree_skb_any(skb);
877 			goto try_again;
878 		}
879 
880 		/* Get the h/w timestamp */
881 		ocelot_get_hwtimestamp(ocelot, &ts);
882 
883 		/* Set the timestamp into the skb */
884 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
885 		shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
886 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
887 
888 		/* Next ts */
889 		ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
890 	}
891 }
892 EXPORT_SYMBOL(ocelot_get_txtstamp);
893 
894 static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
895 				u32 *rval)
896 {
897 	u32 bytes_valid, val;
898 
899 	val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
900 	if (val == XTR_NOT_READY) {
901 		if (ifh)
902 			return -EIO;
903 
904 		do {
905 			val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
906 		} while (val == XTR_NOT_READY);
907 	}
908 
909 	switch (val) {
910 	case XTR_ABORT:
911 		return -EIO;
912 	case XTR_EOF_0:
913 	case XTR_EOF_1:
914 	case XTR_EOF_2:
915 	case XTR_EOF_3:
916 	case XTR_PRUNED:
917 		bytes_valid = XTR_VALID_BYTES(val);
918 		val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
919 		if (val == XTR_ESCAPE)
920 			*rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
921 		else
922 			*rval = val;
923 
924 		return bytes_valid;
925 	case XTR_ESCAPE:
926 		*rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
927 
928 		return 4;
929 	default:
930 		*rval = val;
931 
932 		return 4;
933 	}
934 }
935 
936 static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
937 {
938 	int i, err = 0;
939 
940 	for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
941 		err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]);
942 		if (err != 4)
943 			return (err < 0) ? err : -EIO;
944 	}
945 
946 	return 0;
947 }
948 
949 int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
950 {
951 	struct skb_shared_hwtstamps *shhwtstamps;
952 	u64 tod_in_ns, full_ts_in_ns;
953 	u64 timestamp, src_port, len;
954 	u32 xfh[OCELOT_TAG_LEN / 4];
955 	struct net_device *dev;
956 	struct timespec64 ts;
957 	struct sk_buff *skb;
958 	int sz, buf_len;
959 	u32 val, *buf;
960 	int err;
961 
962 	err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
963 	if (err)
964 		return err;
965 
966 	ocelot_xfh_get_src_port(xfh, &src_port);
967 	ocelot_xfh_get_len(xfh, &len);
968 	ocelot_xfh_get_rew_val(xfh, &timestamp);
969 
970 	if (WARN_ON(src_port >= ocelot->num_phys_ports))
971 		return -EINVAL;
972 
973 	dev = ocelot->ops->port_to_netdev(ocelot, src_port);
974 	if (!dev)
975 		return -EINVAL;
976 
977 	skb = netdev_alloc_skb(dev, len);
978 	if (unlikely(!skb)) {
979 		netdev_err(dev, "Unable to allocate sk_buff\n");
980 		return -ENOMEM;
981 	}
982 
983 	buf_len = len - ETH_FCS_LEN;
984 	buf = (u32 *)skb_put(skb, buf_len);
985 
986 	len = 0;
987 	do {
988 		sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
989 		if (sz < 0) {
990 			err = sz;
991 			goto out_free_skb;
992 		}
993 		*buf++ = val;
994 		len += sz;
995 	} while (len < buf_len);
996 
997 	/* Read the FCS */
998 	sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
999 	if (sz < 0) {
1000 		err = sz;
1001 		goto out_free_skb;
1002 	}
1003 
1004 	/* Update the statistics if part of the FCS was read before */
1005 	len -= ETH_FCS_LEN - sz;
1006 
1007 	if (unlikely(dev->features & NETIF_F_RXFCS)) {
1008 		buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
1009 		*buf = val;
1010 	}
1011 
1012 	if (ocelot->ptp) {
1013 		ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
1014 
1015 		tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
1016 		if ((tod_in_ns & 0xffffffff) < timestamp)
1017 			full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
1018 					timestamp;
1019 		else
1020 			full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
1021 					timestamp;
1022 
1023 		shhwtstamps = skb_hwtstamps(skb);
1024 		memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1025 		shhwtstamps->hwtstamp = full_ts_in_ns;
1026 	}
1027 
1028 	/* Everything we see on an interface that is in the HW bridge
1029 	 * has already been forwarded.
1030 	 */
1031 	if (ocelot->ports[src_port]->bridge)
1032 		skb->offload_fwd_mark = 1;
1033 
1034 	skb->protocol = eth_type_trans(skb, dev);
1035 
1036 	*nskb = skb;
1037 
1038 	return 0;
1039 
1040 out_free_skb:
1041 	kfree_skb(skb);
1042 	return err;
1043 }
1044 EXPORT_SYMBOL(ocelot_xtr_poll_frame);
1045 
1046 bool ocelot_can_inject(struct ocelot *ocelot, int grp)
1047 {
1048 	u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
1049 
1050 	if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
1051 		return false;
1052 	if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
1053 		return false;
1054 
1055 	return true;
1056 }
1057 EXPORT_SYMBOL(ocelot_can_inject);
1058 
1059 void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
1060 			      u32 rew_op, struct sk_buff *skb)
1061 {
1062 	u32 ifh[OCELOT_TAG_LEN / 4] = {0};
1063 	unsigned int i, count, last;
1064 
1065 	ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
1066 			 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
1067 
1068 	ocelot_ifh_set_bypass(ifh, 1);
1069 	ocelot_ifh_set_dest(ifh, BIT_ULL(port));
1070 	ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
1071 	ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
1072 	ocelot_ifh_set_rew_op(ifh, rew_op);
1073 
1074 	for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
1075 		ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
1076 
1077 	count = DIV_ROUND_UP(skb->len, 4);
1078 	last = skb->len % 4;
1079 	for (i = 0; i < count; i++)
1080 		ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
1081 
1082 	/* Add padding */
1083 	while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
1084 		ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
1085 		i++;
1086 	}
1087 
1088 	/* Indicate EOF and valid bytes in last word */
1089 	ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
1090 			 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
1091 			 QS_INJ_CTRL_EOF,
1092 			 QS_INJ_CTRL, grp);
1093 
1094 	/* Add dummy CRC */
1095 	ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
1096 	skb_tx_timestamp(skb);
1097 
1098 	skb->dev->stats.tx_packets++;
1099 	skb->dev->stats.tx_bytes += skb->len;
1100 }
1101 EXPORT_SYMBOL(ocelot_port_inject_frame);
1102 
1103 void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
1104 {
1105 	while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
1106 		ocelot_read_rix(ocelot, QS_XTR_RD, grp);
1107 }
1108 EXPORT_SYMBOL(ocelot_drain_cpu_queue);
1109 
1110 int ocelot_fdb_add(struct ocelot *ocelot, int port,
1111 		   const unsigned char *addr, u16 vid)
1112 {
1113 	int pgid = port;
1114 
1115 	if (port == ocelot->npi)
1116 		pgid = PGID_CPU;
1117 
1118 	return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
1119 }
1120 EXPORT_SYMBOL(ocelot_fdb_add);
1121 
1122 int ocelot_fdb_del(struct ocelot *ocelot, int port,
1123 		   const unsigned char *addr, u16 vid)
1124 {
1125 	return ocelot_mact_forget(ocelot, addr, vid);
1126 }
1127 EXPORT_SYMBOL(ocelot_fdb_del);
1128 
1129 int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
1130 			    bool is_static, void *data)
1131 {
1132 	struct ocelot_dump_ctx *dump = data;
1133 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
1134 	u32 seq = dump->cb->nlh->nlmsg_seq;
1135 	struct nlmsghdr *nlh;
1136 	struct ndmsg *ndm;
1137 
1138 	if (dump->idx < dump->cb->args[2])
1139 		goto skip;
1140 
1141 	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
1142 			sizeof(*ndm), NLM_F_MULTI);
1143 	if (!nlh)
1144 		return -EMSGSIZE;
1145 
1146 	ndm = nlmsg_data(nlh);
1147 	ndm->ndm_family  = AF_BRIDGE;
1148 	ndm->ndm_pad1    = 0;
1149 	ndm->ndm_pad2    = 0;
1150 	ndm->ndm_flags   = NTF_SELF;
1151 	ndm->ndm_type    = 0;
1152 	ndm->ndm_ifindex = dump->dev->ifindex;
1153 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
1154 
1155 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
1156 		goto nla_put_failure;
1157 
1158 	if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
1159 		goto nla_put_failure;
1160 
1161 	nlmsg_end(dump->skb, nlh);
1162 
1163 skip:
1164 	dump->idx++;
1165 	return 0;
1166 
1167 nla_put_failure:
1168 	nlmsg_cancel(dump->skb, nlh);
1169 	return -EMSGSIZE;
1170 }
1171 EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
1172 
1173 static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
1174 			    struct ocelot_mact_entry *entry)
1175 {
1176 	u32 val, dst, macl, mach;
1177 	char mac[ETH_ALEN];
1178 
1179 	/* Set row and column to read from */
1180 	ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
1181 	ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
1182 
1183 	/* Issue a read command */
1184 	ocelot_write(ocelot,
1185 		     ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
1186 		     ANA_TABLES_MACACCESS);
1187 
1188 	if (ocelot_mact_wait_for_completion(ocelot))
1189 		return -ETIMEDOUT;
1190 
1191 	/* Read the entry flags */
1192 	val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
1193 	if (!(val & ANA_TABLES_MACACCESS_VALID))
1194 		return -EINVAL;
1195 
1196 	/* If the entry read has another port configured as its destination,
1197 	 * do not report it.
1198 	 */
1199 	dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
1200 	if (dst != port)
1201 		return -EINVAL;
1202 
1203 	/* Get the entry's MAC address and VLAN id */
1204 	macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
1205 	mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
1206 
1207 	mac[0] = (mach >> 8)  & 0xff;
1208 	mac[1] = (mach >> 0)  & 0xff;
1209 	mac[2] = (macl >> 24) & 0xff;
1210 	mac[3] = (macl >> 16) & 0xff;
1211 	mac[4] = (macl >> 8)  & 0xff;
1212 	mac[5] = (macl >> 0)  & 0xff;
1213 
1214 	entry->vid = (mach >> 16) & 0xfff;
1215 	ether_addr_copy(entry->mac, mac);
1216 
1217 	return 0;
1218 }
1219 
1220 int ocelot_fdb_dump(struct ocelot *ocelot, int port,
1221 		    dsa_fdb_dump_cb_t *cb, void *data)
1222 {
1223 	int i, j;
1224 
1225 	/* Loop through all the mac tables entries. */
1226 	for (i = 0; i < ocelot->num_mact_rows; i++) {
1227 		for (j = 0; j < 4; j++) {
1228 			struct ocelot_mact_entry entry;
1229 			bool is_static;
1230 			int ret;
1231 
1232 			ret = ocelot_mact_read(ocelot, port, i, j, &entry);
1233 			/* If the entry is invalid (wrong port, invalid...),
1234 			 * skip it.
1235 			 */
1236 			if (ret == -EINVAL)
1237 				continue;
1238 			else if (ret)
1239 				return ret;
1240 
1241 			is_static = (entry.type == ENTRYTYPE_LOCKED);
1242 
1243 			ret = cb(entry.mac, entry.vid, is_static, data);
1244 			if (ret)
1245 				return ret;
1246 		}
1247 	}
1248 
1249 	return 0;
1250 }
1251 EXPORT_SYMBOL(ocelot_fdb_dump);
1252 
1253 int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
1254 {
1255 	return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
1256 			    sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
1257 }
1258 EXPORT_SYMBOL(ocelot_hwstamp_get);
1259 
1260 int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
1261 {
1262 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1263 	struct hwtstamp_config cfg;
1264 
1265 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1266 		return -EFAULT;
1267 
1268 	/* reserved for future extensions */
1269 	if (cfg.flags)
1270 		return -EINVAL;
1271 
1272 	/* Tx type sanity check */
1273 	switch (cfg.tx_type) {
1274 	case HWTSTAMP_TX_ON:
1275 		ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
1276 		break;
1277 	case HWTSTAMP_TX_ONESTEP_SYNC:
1278 		/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
1279 		 * need to update the origin time.
1280 		 */
1281 		ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
1282 		break;
1283 	case HWTSTAMP_TX_OFF:
1284 		ocelot_port->ptp_cmd = 0;
1285 		break;
1286 	default:
1287 		return -ERANGE;
1288 	}
1289 
1290 	mutex_lock(&ocelot->ptp_lock);
1291 
1292 	switch (cfg.rx_filter) {
1293 	case HWTSTAMP_FILTER_NONE:
1294 		break;
1295 	case HWTSTAMP_FILTER_ALL:
1296 	case HWTSTAMP_FILTER_SOME:
1297 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1298 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1299 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1300 	case HWTSTAMP_FILTER_NTP_ALL:
1301 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1302 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1303 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1304 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1305 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1306 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1307 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1308 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1309 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1310 		cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1311 		break;
1312 	default:
1313 		mutex_unlock(&ocelot->ptp_lock);
1314 		return -ERANGE;
1315 	}
1316 
1317 	/* Commit back the result & save it */
1318 	memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
1319 	mutex_unlock(&ocelot->ptp_lock);
1320 
1321 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1322 }
1323 EXPORT_SYMBOL(ocelot_hwstamp_set);
1324 
1325 void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
1326 {
1327 	int i;
1328 
1329 	if (sset != ETH_SS_STATS)
1330 		return;
1331 
1332 	for (i = 0; i < ocelot->num_stats; i++)
1333 		memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
1334 		       ETH_GSTRING_LEN);
1335 }
1336 EXPORT_SYMBOL(ocelot_get_strings);
1337 
1338 static void ocelot_update_stats(struct ocelot *ocelot)
1339 {
1340 	int i, j;
1341 
1342 	mutex_lock(&ocelot->stats_lock);
1343 
1344 	for (i = 0; i < ocelot->num_phys_ports; i++) {
1345 		/* Configure the port to read the stats from */
1346 		ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
1347 
1348 		for (j = 0; j < ocelot->num_stats; j++) {
1349 			u32 val;
1350 			unsigned int idx = i * ocelot->num_stats + j;
1351 
1352 			val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
1353 					      ocelot->stats_layout[j].offset);
1354 
1355 			if (val < (ocelot->stats[idx] & U32_MAX))
1356 				ocelot->stats[idx] += (u64)1 << 32;
1357 
1358 			ocelot->stats[idx] = (ocelot->stats[idx] &
1359 					      ~(u64)U32_MAX) + val;
1360 		}
1361 	}
1362 
1363 	mutex_unlock(&ocelot->stats_lock);
1364 }
1365 
1366 static void ocelot_check_stats_work(struct work_struct *work)
1367 {
1368 	struct delayed_work *del_work = to_delayed_work(work);
1369 	struct ocelot *ocelot = container_of(del_work, struct ocelot,
1370 					     stats_work);
1371 
1372 	ocelot_update_stats(ocelot);
1373 
1374 	queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
1375 			   OCELOT_STATS_CHECK_DELAY);
1376 }
1377 
1378 void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
1379 {
1380 	int i;
1381 
1382 	/* check and update now */
1383 	ocelot_update_stats(ocelot);
1384 
1385 	/* Copy all counters */
1386 	for (i = 0; i < ocelot->num_stats; i++)
1387 		*data++ = ocelot->stats[port * ocelot->num_stats + i];
1388 }
1389 EXPORT_SYMBOL(ocelot_get_ethtool_stats);
1390 
1391 int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
1392 {
1393 	if (sset != ETH_SS_STATS)
1394 		return -EOPNOTSUPP;
1395 
1396 	return ocelot->num_stats;
1397 }
1398 EXPORT_SYMBOL(ocelot_get_sset_count);
1399 
1400 int ocelot_get_ts_info(struct ocelot *ocelot, int port,
1401 		       struct ethtool_ts_info *info)
1402 {
1403 	info->phc_index = ocelot->ptp_clock ?
1404 			  ptp_clock_index(ocelot->ptp_clock) : -1;
1405 	if (info->phc_index == -1) {
1406 		info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
1407 					 SOF_TIMESTAMPING_RX_SOFTWARE |
1408 					 SOF_TIMESTAMPING_SOFTWARE;
1409 		return 0;
1410 	}
1411 	info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
1412 				 SOF_TIMESTAMPING_RX_SOFTWARE |
1413 				 SOF_TIMESTAMPING_SOFTWARE |
1414 				 SOF_TIMESTAMPING_TX_HARDWARE |
1415 				 SOF_TIMESTAMPING_RX_HARDWARE |
1416 				 SOF_TIMESTAMPING_RAW_HARDWARE;
1417 	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
1418 			 BIT(HWTSTAMP_TX_ONESTEP_SYNC);
1419 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1420 
1421 	return 0;
1422 }
1423 EXPORT_SYMBOL(ocelot_get_ts_info);
1424 
1425 static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
1426 				bool only_active_ports)
1427 {
1428 	u32 mask = 0;
1429 	int port;
1430 
1431 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1432 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1433 
1434 		if (!ocelot_port)
1435 			continue;
1436 
1437 		if (ocelot_port->bond == bond) {
1438 			if (only_active_ports && !ocelot_port->lag_tx_active)
1439 				continue;
1440 
1441 			mask |= BIT(port);
1442 		}
1443 	}
1444 
1445 	return mask;
1446 }
1447 
1448 static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
1449 				      struct net_device *bridge)
1450 {
1451 	struct ocelot_port *ocelot_port = ocelot->ports[src_port];
1452 	u32 mask = 0;
1453 	int port;
1454 
1455 	if (!ocelot_port || ocelot_port->bridge != bridge ||
1456 	    ocelot_port->stp_state != BR_STATE_FORWARDING)
1457 		return 0;
1458 
1459 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1460 		ocelot_port = ocelot->ports[port];
1461 
1462 		if (!ocelot_port)
1463 			continue;
1464 
1465 		if (ocelot_port->stp_state == BR_STATE_FORWARDING &&
1466 		    ocelot_port->bridge == bridge)
1467 			mask |= BIT(port);
1468 	}
1469 
1470 	return mask;
1471 }
1472 
1473 static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
1474 {
1475 	u32 mask = 0;
1476 	int port;
1477 
1478 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1479 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1480 
1481 		if (!ocelot_port)
1482 			continue;
1483 
1484 		if (ocelot_port->is_dsa_8021q_cpu)
1485 			mask |= BIT(port);
1486 	}
1487 
1488 	return mask;
1489 }
1490 
1491 void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
1492 {
1493 	unsigned long cpu_fwd_mask;
1494 	int port;
1495 
1496 	/* If a DSA tag_8021q CPU exists, it needs to be included in the
1497 	 * regular forwarding path of the front ports regardless of whether
1498 	 * those are bridged or standalone.
1499 	 * If DSA tag_8021q is not used, this returns 0, which is fine because
1500 	 * the hardware-based CPU port module can be a destination for packets
1501 	 * even if it isn't part of PGID_SRC.
1502 	 */
1503 	cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot);
1504 
1505 	/* Apply FWD mask. The loop is needed to add/remove the current port as
1506 	 * a source for the other ports.
1507 	 */
1508 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1509 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1510 		unsigned long mask;
1511 
1512 		if (!ocelot_port) {
1513 			/* Unused ports can't send anywhere */
1514 			mask = 0;
1515 		} else if (ocelot_port->is_dsa_8021q_cpu) {
1516 			/* The DSA tag_8021q CPU ports need to be able to
1517 			 * forward packets to all other ports except for
1518 			 * themselves
1519 			 */
1520 			mask = GENMASK(ocelot->num_phys_ports - 1, 0);
1521 			mask &= ~cpu_fwd_mask;
1522 		} else if (ocelot_port->bridge) {
1523 			struct net_device *bridge = ocelot_port->bridge;
1524 			struct net_device *bond = ocelot_port->bond;
1525 
1526 			mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
1527 			mask |= cpu_fwd_mask;
1528 			mask &= ~BIT(port);
1529 			if (bond) {
1530 				mask &= ~ocelot_get_bond_mask(ocelot, bond,
1531 							      false);
1532 			}
1533 		} else {
1534 			/* Standalone ports forward only to DSA tag_8021q CPU
1535 			 * ports (if those exist), or to the hardware CPU port
1536 			 * module otherwise.
1537 			 */
1538 			mask = cpu_fwd_mask;
1539 		}
1540 
1541 		ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
1542 	}
1543 }
1544 EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
1545 
1546 void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
1547 {
1548 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1549 	u32 learn_ena = 0;
1550 
1551 	ocelot_port->stp_state = state;
1552 
1553 	if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
1554 	    ocelot_port->learn_ena)
1555 		learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA;
1556 
1557 	ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
1558 		       ANA_PORT_PORT_CFG, port);
1559 
1560 	ocelot_apply_bridge_fwd_mask(ocelot);
1561 }
1562 EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
1563 
1564 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
1565 {
1566 	unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
1567 
1568 	/* Setting AGE_PERIOD to zero effectively disables automatic aging,
1569 	 * which is clearly not what our intention is. So avoid that.
1570 	 */
1571 	if (!age_period)
1572 		age_period = 1;
1573 
1574 	ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
1575 }
1576 EXPORT_SYMBOL(ocelot_set_ageing_time);
1577 
1578 static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
1579 						     const unsigned char *addr,
1580 						     u16 vid)
1581 {
1582 	struct ocelot_multicast *mc;
1583 
1584 	list_for_each_entry(mc, &ocelot->multicast, list) {
1585 		if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
1586 			return mc;
1587 	}
1588 
1589 	return NULL;
1590 }
1591 
1592 static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
1593 {
1594 	if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e)
1595 		return ENTRYTYPE_MACv4;
1596 	if (addr[0] == 0x33 && addr[1] == 0x33)
1597 		return ENTRYTYPE_MACv6;
1598 	return ENTRYTYPE_LOCKED;
1599 }
1600 
1601 static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index,
1602 					     unsigned long ports)
1603 {
1604 	struct ocelot_pgid *pgid;
1605 
1606 	pgid = kzalloc(sizeof(*pgid), GFP_KERNEL);
1607 	if (!pgid)
1608 		return ERR_PTR(-ENOMEM);
1609 
1610 	pgid->ports = ports;
1611 	pgid->index = index;
1612 	refcount_set(&pgid->refcount, 1);
1613 	list_add_tail(&pgid->list, &ocelot->pgids);
1614 
1615 	return pgid;
1616 }
1617 
1618 static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid)
1619 {
1620 	if (!refcount_dec_and_test(&pgid->refcount))
1621 		return;
1622 
1623 	list_del(&pgid->list);
1624 	kfree(pgid);
1625 }
1626 
1627 static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot,
1628 					       const struct ocelot_multicast *mc)
1629 {
1630 	struct ocelot_pgid *pgid;
1631 	int index;
1632 
1633 	/* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
1634 	 * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
1635 	 * destination mask table (PGID), the destination set is programmed as
1636 	 * part of the entry MAC address.", and the DEST_IDX is set to 0.
1637 	 */
1638 	if (mc->entry_type == ENTRYTYPE_MACv4 ||
1639 	    mc->entry_type == ENTRYTYPE_MACv6)
1640 		return ocelot_pgid_alloc(ocelot, 0, mc->ports);
1641 
1642 	list_for_each_entry(pgid, &ocelot->pgids, list) {
1643 		/* When searching for a nonreserved multicast PGID, ignore the
1644 		 * dummy PGID of zero that we have for MACv4/MACv6 entries
1645 		 */
1646 		if (pgid->index && pgid->ports == mc->ports) {
1647 			refcount_inc(&pgid->refcount);
1648 			return pgid;
1649 		}
1650 	}
1651 
1652 	/* Search for a free index in the nonreserved multicast PGID area */
1653 	for_each_nonreserved_multicast_dest_pgid(ocelot, index) {
1654 		bool used = false;
1655 
1656 		list_for_each_entry(pgid, &ocelot->pgids, list) {
1657 			if (pgid->index == index) {
1658 				used = true;
1659 				break;
1660 			}
1661 		}
1662 
1663 		if (!used)
1664 			return ocelot_pgid_alloc(ocelot, index, mc->ports);
1665 	}
1666 
1667 	return ERR_PTR(-ENOSPC);
1668 }
1669 
1670 static void ocelot_encode_ports_to_mdb(unsigned char *addr,
1671 				       struct ocelot_multicast *mc)
1672 {
1673 	ether_addr_copy(addr, mc->addr);
1674 
1675 	if (mc->entry_type == ENTRYTYPE_MACv4) {
1676 		addr[0] = 0;
1677 		addr[1] = mc->ports >> 8;
1678 		addr[2] = mc->ports & 0xff;
1679 	} else if (mc->entry_type == ENTRYTYPE_MACv6) {
1680 		addr[0] = mc->ports >> 8;
1681 		addr[1] = mc->ports & 0xff;
1682 	}
1683 }
1684 
1685 int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
1686 			const struct switchdev_obj_port_mdb *mdb)
1687 {
1688 	unsigned char addr[ETH_ALEN];
1689 	struct ocelot_multicast *mc;
1690 	struct ocelot_pgid *pgid;
1691 	u16 vid = mdb->vid;
1692 
1693 	if (port == ocelot->npi)
1694 		port = ocelot->num_phys_ports;
1695 
1696 	mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
1697 	if (!mc) {
1698 		/* New entry */
1699 		mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
1700 		if (!mc)
1701 			return -ENOMEM;
1702 
1703 		mc->entry_type = ocelot_classify_mdb(mdb->addr);
1704 		ether_addr_copy(mc->addr, mdb->addr);
1705 		mc->vid = vid;
1706 
1707 		list_add_tail(&mc->list, &ocelot->multicast);
1708 	} else {
1709 		/* Existing entry. Clean up the current port mask from
1710 		 * hardware now, because we'll be modifying it.
1711 		 */
1712 		ocelot_pgid_free(ocelot, mc->pgid);
1713 		ocelot_encode_ports_to_mdb(addr, mc);
1714 		ocelot_mact_forget(ocelot, addr, vid);
1715 	}
1716 
1717 	mc->ports |= BIT(port);
1718 
1719 	pgid = ocelot_mdb_get_pgid(ocelot, mc);
1720 	if (IS_ERR(pgid)) {
1721 		dev_err(ocelot->dev,
1722 			"Cannot allocate PGID for mdb %pM vid %d\n",
1723 			mc->addr, mc->vid);
1724 		devm_kfree(ocelot->dev, mc);
1725 		return PTR_ERR(pgid);
1726 	}
1727 	mc->pgid = pgid;
1728 
1729 	ocelot_encode_ports_to_mdb(addr, mc);
1730 
1731 	if (mc->entry_type != ENTRYTYPE_MACv4 &&
1732 	    mc->entry_type != ENTRYTYPE_MACv6)
1733 		ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
1734 				 pgid->index);
1735 
1736 	return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
1737 				 mc->entry_type);
1738 }
1739 EXPORT_SYMBOL(ocelot_port_mdb_add);
1740 
1741 int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
1742 			const struct switchdev_obj_port_mdb *mdb)
1743 {
1744 	unsigned char addr[ETH_ALEN];
1745 	struct ocelot_multicast *mc;
1746 	struct ocelot_pgid *pgid;
1747 	u16 vid = mdb->vid;
1748 
1749 	if (port == ocelot->npi)
1750 		port = ocelot->num_phys_ports;
1751 
1752 	mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
1753 	if (!mc)
1754 		return -ENOENT;
1755 
1756 	ocelot_encode_ports_to_mdb(addr, mc);
1757 	ocelot_mact_forget(ocelot, addr, vid);
1758 
1759 	ocelot_pgid_free(ocelot, mc->pgid);
1760 	mc->ports &= ~BIT(port);
1761 	if (!mc->ports) {
1762 		list_del(&mc->list);
1763 		devm_kfree(ocelot->dev, mc);
1764 		return 0;
1765 	}
1766 
1767 	/* We have a PGID with fewer ports now */
1768 	pgid = ocelot_mdb_get_pgid(ocelot, mc);
1769 	if (IS_ERR(pgid))
1770 		return PTR_ERR(pgid);
1771 	mc->pgid = pgid;
1772 
1773 	ocelot_encode_ports_to_mdb(addr, mc);
1774 
1775 	if (mc->entry_type != ENTRYTYPE_MACv4 &&
1776 	    mc->entry_type != ENTRYTYPE_MACv6)
1777 		ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
1778 				 pgid->index);
1779 
1780 	return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
1781 				 mc->entry_type);
1782 }
1783 EXPORT_SYMBOL(ocelot_port_mdb_del);
1784 
1785 void ocelot_port_bridge_join(struct ocelot *ocelot, int port,
1786 			     struct net_device *bridge)
1787 {
1788 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1789 
1790 	ocelot_port->bridge = bridge;
1791 
1792 	ocelot_apply_bridge_fwd_mask(ocelot);
1793 }
1794 EXPORT_SYMBOL(ocelot_port_bridge_join);
1795 
1796 void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
1797 			      struct net_device *bridge)
1798 {
1799 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1800 
1801 	ocelot_port->bridge = NULL;
1802 
1803 	ocelot_port_set_pvid(ocelot, port, NULL);
1804 	ocelot_port_manage_port_tag(ocelot, port);
1805 	ocelot_apply_bridge_fwd_mask(ocelot);
1806 }
1807 EXPORT_SYMBOL(ocelot_port_bridge_leave);
1808 
1809 static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
1810 {
1811 	unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0);
1812 	int i, port, lag;
1813 
1814 	/* Reset destination and aggregation PGIDS */
1815 	for_each_unicast_dest_pgid(ocelot, port)
1816 		ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
1817 
1818 	for_each_aggr_pgid(ocelot, i)
1819 		ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
1820 				 ANA_PGID_PGID, i);
1821 
1822 	/* The visited ports bitmask holds the list of ports offloading any
1823 	 * bonding interface. Initially we mark all these ports as unvisited,
1824 	 * then every time we visit a port in this bitmask, we know that it is
1825 	 * the lowest numbered port, i.e. the one whose logical ID == physical
1826 	 * port ID == LAG ID. So we mark as visited all further ports in the
1827 	 * bitmask that are offloading the same bonding interface. This way,
1828 	 * we set up the aggregation PGIDs only once per bonding interface.
1829 	 */
1830 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1831 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1832 
1833 		if (!ocelot_port || !ocelot_port->bond)
1834 			continue;
1835 
1836 		visited &= ~BIT(port);
1837 	}
1838 
1839 	/* Now, set PGIDs for each active LAG */
1840 	for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
1841 		struct net_device *bond = ocelot->ports[lag]->bond;
1842 		int num_active_ports = 0;
1843 		unsigned long bond_mask;
1844 		u8 aggr_idx[16];
1845 
1846 		if (!bond || (visited & BIT(lag)))
1847 			continue;
1848 
1849 		bond_mask = ocelot_get_bond_mask(ocelot, bond, true);
1850 
1851 		for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
1852 			// Destination mask
1853 			ocelot_write_rix(ocelot, bond_mask,
1854 					 ANA_PGID_PGID, port);
1855 			aggr_idx[num_active_ports++] = port;
1856 		}
1857 
1858 		for_each_aggr_pgid(ocelot, i) {
1859 			u32 ac;
1860 
1861 			ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
1862 			ac &= ~bond_mask;
1863 			/* Don't do division by zero if there was no active
1864 			 * port. Just make all aggregation codes zero.
1865 			 */
1866 			if (num_active_ports)
1867 				ac |= BIT(aggr_idx[i % num_active_ports]);
1868 			ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
1869 		}
1870 
1871 		/* Mark all ports in the same LAG as visited to avoid applying
1872 		 * the same config again.
1873 		 */
1874 		for (port = lag; port < ocelot->num_phys_ports; port++) {
1875 			struct ocelot_port *ocelot_port = ocelot->ports[port];
1876 
1877 			if (!ocelot_port)
1878 				continue;
1879 
1880 			if (ocelot_port->bond == bond)
1881 				visited |= BIT(port);
1882 		}
1883 	}
1884 }
1885 
1886 /* When offloading a bonding interface, the switch ports configured under the
1887  * same bond must have the same logical port ID, equal to the physical port ID
1888  * of the lowest numbered physical port in that bond. Otherwise, in standalone/
1889  * bridged mode, each port has a logical port ID equal to its physical port ID.
1890  */
1891 static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
1892 {
1893 	int port;
1894 
1895 	for (port = 0; port < ocelot->num_phys_ports; port++) {
1896 		struct ocelot_port *ocelot_port = ocelot->ports[port];
1897 		struct net_device *bond;
1898 
1899 		if (!ocelot_port)
1900 			continue;
1901 
1902 		bond = ocelot_port->bond;
1903 		if (bond) {
1904 			int lag = __ffs(ocelot_get_bond_mask(ocelot, bond,
1905 							     false));
1906 
1907 			ocelot_rmw_gix(ocelot,
1908 				       ANA_PORT_PORT_CFG_PORTID_VAL(lag),
1909 				       ANA_PORT_PORT_CFG_PORTID_VAL_M,
1910 				       ANA_PORT_PORT_CFG, port);
1911 		} else {
1912 			ocelot_rmw_gix(ocelot,
1913 				       ANA_PORT_PORT_CFG_PORTID_VAL(port),
1914 				       ANA_PORT_PORT_CFG_PORTID_VAL_M,
1915 				       ANA_PORT_PORT_CFG, port);
1916 		}
1917 	}
1918 }
1919 
1920 int ocelot_port_lag_join(struct ocelot *ocelot, int port,
1921 			 struct net_device *bond,
1922 			 struct netdev_lag_upper_info *info)
1923 {
1924 	if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
1925 		return -EOPNOTSUPP;
1926 
1927 	ocelot->ports[port]->bond = bond;
1928 
1929 	ocelot_setup_logical_port_ids(ocelot);
1930 	ocelot_apply_bridge_fwd_mask(ocelot);
1931 	ocelot_set_aggr_pgids(ocelot);
1932 
1933 	return 0;
1934 }
1935 EXPORT_SYMBOL(ocelot_port_lag_join);
1936 
1937 void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
1938 			   struct net_device *bond)
1939 {
1940 	ocelot->ports[port]->bond = NULL;
1941 
1942 	ocelot_setup_logical_port_ids(ocelot);
1943 	ocelot_apply_bridge_fwd_mask(ocelot);
1944 	ocelot_set_aggr_pgids(ocelot);
1945 }
1946 EXPORT_SYMBOL(ocelot_port_lag_leave);
1947 
1948 void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
1949 {
1950 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1951 
1952 	ocelot_port->lag_tx_active = lag_tx_active;
1953 
1954 	/* Rebalance the LAGs */
1955 	ocelot_set_aggr_pgids(ocelot);
1956 }
1957 EXPORT_SYMBOL(ocelot_port_lag_change);
1958 
1959 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
1960  * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
1961  * In the special case that it's the NPI port that we're configuring, the
1962  * length of the tag and optional prefix needs to be accounted for privately,
1963  * in order to be able to sustain communication at the requested @sdu.
1964  */
1965 void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
1966 {
1967 	struct ocelot_port *ocelot_port = ocelot->ports[port];
1968 	int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
1969 	int pause_start, pause_stop;
1970 	int atop, atop_tot;
1971 
1972 	if (port == ocelot->npi) {
1973 		maxlen += OCELOT_TAG_LEN;
1974 
1975 		if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
1976 			maxlen += OCELOT_SHORT_PREFIX_LEN;
1977 		else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
1978 			maxlen += OCELOT_LONG_PREFIX_LEN;
1979 	}
1980 
1981 	ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
1982 
1983 	/* Set Pause watermark hysteresis */
1984 	pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ;
1985 	pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ;
1986 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START,
1987 			    pause_start);
1988 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
1989 			    pause_stop);
1990 
1991 	/* Tail dropping watermarks */
1992 	atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
1993 		   OCELOT_BUFFER_CELL_SZ;
1994 	atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
1995 	ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
1996 	ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG);
1997 }
1998 EXPORT_SYMBOL(ocelot_port_set_maxlen);
1999 
2000 int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
2001 {
2002 	int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
2003 
2004 	if (port == ocelot->npi) {
2005 		max_mtu -= OCELOT_TAG_LEN;
2006 
2007 		if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
2008 			max_mtu -= OCELOT_SHORT_PREFIX_LEN;
2009 		else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
2010 			max_mtu -= OCELOT_LONG_PREFIX_LEN;
2011 	}
2012 
2013 	return max_mtu;
2014 }
2015 EXPORT_SYMBOL(ocelot_get_max_mtu);
2016 
2017 static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
2018 				     bool enabled)
2019 {
2020 	struct ocelot_port *ocelot_port = ocelot->ports[port];
2021 	u32 val = 0;
2022 
2023 	if (enabled)
2024 		val = ANA_PORT_PORT_CFG_LEARN_ENA;
2025 
2026 	ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
2027 		       ANA_PORT_PORT_CFG, port);
2028 
2029 	ocelot_port->learn_ena = enabled;
2030 }
2031 
2032 static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
2033 					bool enabled)
2034 {
2035 	u32 val = 0;
2036 
2037 	if (enabled)
2038 		val = BIT(port);
2039 
2040 	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
2041 }
2042 
2043 static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
2044 					bool enabled)
2045 {
2046 	u32 val = 0;
2047 
2048 	if (enabled)
2049 		val = BIT(port);
2050 
2051 	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
2052 }
2053 
2054 static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
2055 					bool enabled)
2056 {
2057 	u32 val = 0;
2058 
2059 	if (enabled)
2060 		val = BIT(port);
2061 
2062 	ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
2063 }
2064 
2065 int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
2066 				 struct switchdev_brport_flags flags)
2067 {
2068 	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
2069 			   BR_BCAST_FLOOD))
2070 		return -EINVAL;
2071 
2072 	return 0;
2073 }
2074 EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
2075 
2076 void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
2077 			      struct switchdev_brport_flags flags)
2078 {
2079 	if (flags.mask & BR_LEARNING)
2080 		ocelot_port_set_learning(ocelot, port,
2081 					 !!(flags.val & BR_LEARNING));
2082 
2083 	if (flags.mask & BR_FLOOD)
2084 		ocelot_port_set_ucast_flood(ocelot, port,
2085 					    !!(flags.val & BR_FLOOD));
2086 
2087 	if (flags.mask & BR_MCAST_FLOOD)
2088 		ocelot_port_set_mcast_flood(ocelot, port,
2089 					    !!(flags.val & BR_MCAST_FLOOD));
2090 
2091 	if (flags.mask & BR_BCAST_FLOOD)
2092 		ocelot_port_set_bcast_flood(ocelot, port,
2093 					    !!(flags.val & BR_BCAST_FLOOD));
2094 }
2095 EXPORT_SYMBOL(ocelot_port_bridge_flags);
2096 
2097 void ocelot_init_port(struct ocelot *ocelot, int port)
2098 {
2099 	struct ocelot_port *ocelot_port = ocelot->ports[port];
2100 
2101 	skb_queue_head_init(&ocelot_port->tx_skbs);
2102 
2103 	/* Basic L2 initialization */
2104 
2105 	/* Set MAC IFG Gaps
2106 	 * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
2107 	 * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
2108 	 */
2109 	ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
2110 			   DEV_MAC_IFG_CFG);
2111 
2112 	/* Load seed (0) and set MAC HDX late collision  */
2113 	ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
2114 			   DEV_MAC_HDX_CFG_SEED_LOAD,
2115 			   DEV_MAC_HDX_CFG);
2116 	mdelay(1);
2117 	ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
2118 			   DEV_MAC_HDX_CFG);
2119 
2120 	/* Set Max Length and maximum tags allowed */
2121 	ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
2122 	ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
2123 			   DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
2124 			   DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
2125 			   DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
2126 			   DEV_MAC_TAGS_CFG);
2127 
2128 	/* Set SMAC of Pause frame (00:00:00:00:00:00) */
2129 	ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
2130 	ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
2131 
2132 	/* Enable transmission of pause frames */
2133 	ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
2134 
2135 	/* Drop frames with multicast source address */
2136 	ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
2137 		       ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
2138 		       ANA_PORT_DROP_CFG, port);
2139 
2140 	/* Set default VLAN and tag type to 8021Q. */
2141 	ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
2142 		       REW_PORT_VLAN_CFG_PORT_TPID_M,
2143 		       REW_PORT_VLAN_CFG, port);
2144 
2145 	/* Disable source address learning for standalone mode */
2146 	ocelot_port_set_learning(ocelot, port, false);
2147 
2148 	/* Set the port's initial logical port ID value, enable receiving
2149 	 * frames on it, and configure the MAC address learning type to
2150 	 * automatic.
2151 	 */
2152 	ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
2153 			 ANA_PORT_PORT_CFG_RECV_ENA |
2154 			 ANA_PORT_PORT_CFG_PORTID_VAL(port),
2155 			 ANA_PORT_PORT_CFG, port);
2156 
2157 	/* Enable vcap lookups */
2158 	ocelot_vcap_enable(ocelot, port);
2159 }
2160 EXPORT_SYMBOL(ocelot_init_port);
2161 
2162 /* Configure and enable the CPU port module, which is a set of queues
2163  * accessible through register MMIO, frame DMA or Ethernet (in case
2164  * NPI mode is used).
2165  */
2166 static void ocelot_cpu_port_init(struct ocelot *ocelot)
2167 {
2168 	int cpu = ocelot->num_phys_ports;
2169 
2170 	/* The unicast destination PGID for the CPU port module is unused */
2171 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
2172 	/* Instead set up a multicast destination PGID for traffic copied to
2173 	 * the CPU. Whitelisted MAC addresses like the port netdevice MAC
2174 	 * addresses will be copied to the CPU via this PGID.
2175 	 */
2176 	ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
2177 	ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
2178 			 ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
2179 			 ANA_PORT_PORT_CFG, cpu);
2180 
2181 	/* Enable CPU port module */
2182 	ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
2183 	/* CPU port Injection/Extraction configuration */
2184 	ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
2185 			    OCELOT_TAG_PREFIX_NONE);
2186 	ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
2187 			    OCELOT_TAG_PREFIX_NONE);
2188 
2189 	/* Configure the CPU port to be VLAN aware */
2190 	ocelot_write_gix(ocelot,
2191 			 ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
2192 			 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
2193 			 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
2194 			 ANA_PORT_VLAN_CFG, cpu);
2195 }
2196 
2197 static void ocelot_detect_features(struct ocelot *ocelot)
2198 {
2199 	int mmgt, eq_ctrl;
2200 
2201 	/* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
2202 	 * the number of 240-byte free memory words (aka 4-cell chunks) and not
2203 	 * 192 bytes as the documentation incorrectly says.
2204 	 */
2205 	mmgt = ocelot_read(ocelot, SYS_MMGT);
2206 	ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
2207 
2208 	eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
2209 	ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
2210 }
2211 
2212 int ocelot_init(struct ocelot *ocelot)
2213 {
2214 	char queue_name[32];
2215 	int i, ret;
2216 	u32 port;
2217 
2218 	if (ocelot->ops->reset) {
2219 		ret = ocelot->ops->reset(ocelot);
2220 		if (ret) {
2221 			dev_err(ocelot->dev, "Switch reset failed\n");
2222 			return ret;
2223 		}
2224 	}
2225 
2226 	ocelot->stats = devm_kcalloc(ocelot->dev,
2227 				     ocelot->num_phys_ports * ocelot->num_stats,
2228 				     sizeof(u64), GFP_KERNEL);
2229 	if (!ocelot->stats)
2230 		return -ENOMEM;
2231 
2232 	mutex_init(&ocelot->stats_lock);
2233 	mutex_init(&ocelot->ptp_lock);
2234 	spin_lock_init(&ocelot->ptp_clock_lock);
2235 	spin_lock_init(&ocelot->ts_id_lock);
2236 	snprintf(queue_name, sizeof(queue_name), "%s-stats",
2237 		 dev_name(ocelot->dev));
2238 	ocelot->stats_queue = create_singlethread_workqueue(queue_name);
2239 	if (!ocelot->stats_queue)
2240 		return -ENOMEM;
2241 
2242 	ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
2243 	if (!ocelot->owq) {
2244 		destroy_workqueue(ocelot->stats_queue);
2245 		return -ENOMEM;
2246 	}
2247 
2248 	INIT_LIST_HEAD(&ocelot->multicast);
2249 	INIT_LIST_HEAD(&ocelot->pgids);
2250 	INIT_LIST_HEAD(&ocelot->vlans);
2251 	ocelot_detect_features(ocelot);
2252 	ocelot_mact_init(ocelot);
2253 	ocelot_vlan_init(ocelot);
2254 	ocelot_vcap_init(ocelot);
2255 	ocelot_cpu_port_init(ocelot);
2256 
2257 	for (port = 0; port < ocelot->num_phys_ports; port++) {
2258 		/* Clear all counters (5 groups) */
2259 		ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
2260 				     SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
2261 			     SYS_STAT_CFG);
2262 	}
2263 
2264 	/* Only use S-Tag */
2265 	ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
2266 
2267 	/* Aggregation mode */
2268 	ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
2269 			     ANA_AGGR_CFG_AC_DMAC_ENA |
2270 			     ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
2271 			     ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA |
2272 			     ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA |
2273 			     ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA,
2274 			     ANA_AGGR_CFG);
2275 
2276 	/* Set MAC age time to default value. The entry is aged after
2277 	 * 2*AGE_PERIOD
2278 	 */
2279 	ocelot_write(ocelot,
2280 		     ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
2281 		     ANA_AUTOAGE);
2282 
2283 	/* Disable learning for frames discarded by VLAN ingress filtering */
2284 	regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
2285 
2286 	/* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
2287 	ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
2288 		     SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
2289 
2290 	/* Setup flooding PGIDs */
2291 	for (i = 0; i < ocelot->num_flooding_pgids; i++)
2292 		ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
2293 				 ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
2294 				 ANA_FLOODING_FLD_UNICAST(PGID_UC),
2295 				 ANA_FLOODING, i);
2296 	ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
2297 		     ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
2298 		     ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
2299 		     ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
2300 		     ANA_FLOODING_IPMC);
2301 
2302 	for (port = 0; port < ocelot->num_phys_ports; port++) {
2303 		/* Transmit the frame to the local port. */
2304 		ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
2305 		/* Do not forward BPDU frames to the front ports. */
2306 		ocelot_write_gix(ocelot,
2307 				 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
2308 				 ANA_PORT_CPU_FWD_BPDU_CFG,
2309 				 port);
2310 		/* Ensure bridging is disabled */
2311 		ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
2312 	}
2313 
2314 	for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
2315 		u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
2316 
2317 		ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
2318 	}
2319 
2320 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE);
2321 
2322 	/* Allow broadcast and unknown L2 multicast to the CPU. */
2323 	ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2324 		       ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2325 		       ANA_PGID_PGID, PGID_MC);
2326 	ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2327 		       ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2328 		       ANA_PGID_PGID, PGID_BC);
2329 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
2330 	ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
2331 
2332 	/* Allow manual injection via DEVCPU_QS registers, and byte swap these
2333 	 * registers endianness.
2334 	 */
2335 	ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
2336 			 QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
2337 	ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
2338 			 QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
2339 	ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
2340 		     ANA_CPUQ_CFG_CPUQ_LRN(2) |
2341 		     ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
2342 		     ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
2343 		     ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
2344 		     ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
2345 		     ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
2346 		     ANA_CPUQ_CFG_CPUQ_IGMP(6) |
2347 		     ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
2348 	for (i = 0; i < 16; i++)
2349 		ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
2350 				 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
2351 				 ANA_CPUQ_8021_CFG, i);
2352 
2353 	INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
2354 	queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
2355 			   OCELOT_STATS_CHECK_DELAY);
2356 
2357 	return 0;
2358 }
2359 EXPORT_SYMBOL(ocelot_init);
2360 
2361 void ocelot_deinit(struct ocelot *ocelot)
2362 {
2363 	cancel_delayed_work(&ocelot->stats_work);
2364 	destroy_workqueue(ocelot->stats_queue);
2365 	destroy_workqueue(ocelot->owq);
2366 	mutex_destroy(&ocelot->stats_lock);
2367 }
2368 EXPORT_SYMBOL(ocelot_deinit);
2369 
2370 void ocelot_deinit_port(struct ocelot *ocelot, int port)
2371 {
2372 	struct ocelot_port *ocelot_port = ocelot->ports[port];
2373 
2374 	skb_queue_purge(&ocelot_port->tx_skbs);
2375 }
2376 EXPORT_SYMBOL(ocelot_deinit_port);
2377 
2378 MODULE_LICENSE("Dual MIT/GPL");
2379