xref: /linux/drivers/net/dsa/sja1105/sja1105_main.c (revision 4201c9260a8d3c4ef238e51692a7e9b4e1e29efe)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/netdev_features.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/if_ether.h>
23 #include <linux/dsa/8021q.h>
24 #include "sja1105.h"
25 
26 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
27 			     unsigned int startup_delay)
28 {
29 	gpiod_set_value_cansleep(gpio, 1);
30 	/* Wait for minimum reset pulse length */
31 	msleep(pulse_len);
32 	gpiod_set_value_cansleep(gpio, 0);
33 	/* Wait until chip is ready after reset */
34 	msleep(startup_delay);
35 }
36 
37 static void
38 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
39 			   int from, int to, bool allow)
40 {
41 	if (allow) {
42 		l2_fwd[from].bc_domain  |= BIT(to);
43 		l2_fwd[from].reach_port |= BIT(to);
44 		l2_fwd[from].fl_domain  |= BIT(to);
45 	} else {
46 		l2_fwd[from].bc_domain  &= ~BIT(to);
47 		l2_fwd[from].reach_port &= ~BIT(to);
48 		l2_fwd[from].fl_domain  &= ~BIT(to);
49 	}
50 }
51 
52 /* Structure used to temporarily transport device tree
53  * settings into sja1105_setup
54  */
55 struct sja1105_dt_port {
56 	phy_interface_t phy_mode;
57 	sja1105_mii_role_t role;
58 };
59 
60 static int sja1105_init_mac_settings(struct sja1105_private *priv)
61 {
62 	struct sja1105_mac_config_entry default_mac = {
63 		/* Enable all 8 priority queues on egress.
64 		 * Every queue i holds top[i] - base[i] frames.
65 		 * Sum of top[i] - base[i] is 511 (max hardware limit).
66 		 */
67 		.top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
68 		.base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
69 		.enabled = {true, true, true, true, true, true, true, true},
70 		/* Keep standard IFG of 12 bytes on egress. */
71 		.ifg = 0,
72 		/* Always put the MAC speed in automatic mode, where it can be
73 		 * adjusted at runtime by PHYLINK.
74 		 */
75 		.speed = SJA1105_SPEED_AUTO,
76 		/* No static correction for 1-step 1588 events */
77 		.tp_delin = 0,
78 		.tp_delout = 0,
79 		/* Disable aging for critical TTEthernet traffic */
80 		.maxage = 0xFF,
81 		/* Internal VLAN (pvid) to apply to untagged ingress */
82 		.vlanprio = 0,
83 		.vlanid = 0,
84 		.ing_mirr = false,
85 		.egr_mirr = false,
86 		/* Don't drop traffic with other EtherType than ETH_P_IP */
87 		.drpnona664 = false,
88 		/* Don't drop double-tagged traffic */
89 		.drpdtag = false,
90 		/* Don't drop untagged traffic */
91 		.drpuntag = false,
92 		/* Don't retag 802.1p (VID 0) traffic with the pvid */
93 		.retag = false,
94 		/* Disable learning and I/O on user ports by default -
95 		 * STP will enable it.
96 		 */
97 		.dyn_learn = false,
98 		.egress = false,
99 		.ingress = false,
100 	};
101 	struct sja1105_mac_config_entry *mac;
102 	struct sja1105_table *table;
103 	int i;
104 
105 	table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
106 
107 	/* Discard previous MAC Configuration Table */
108 	if (table->entry_count) {
109 		kfree(table->entries);
110 		table->entry_count = 0;
111 	}
112 
113 	table->entries = kcalloc(SJA1105_NUM_PORTS,
114 				 table->ops->unpacked_entry_size, GFP_KERNEL);
115 	if (!table->entries)
116 		return -ENOMEM;
117 
118 	table->entry_count = SJA1105_NUM_PORTS;
119 
120 	mac = table->entries;
121 
122 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
123 		mac[i] = default_mac;
124 		if (i == dsa_upstream_port(priv->ds, i)) {
125 			/* STP doesn't get called for CPU port, so we need to
126 			 * set the I/O parameters statically.
127 			 */
128 			mac[i].dyn_learn = true;
129 			mac[i].ingress = true;
130 			mac[i].egress = true;
131 		}
132 	}
133 
134 	return 0;
135 }
136 
137 static int sja1105_init_mii_settings(struct sja1105_private *priv,
138 				     struct sja1105_dt_port *ports)
139 {
140 	struct device *dev = &priv->spidev->dev;
141 	struct sja1105_xmii_params_entry *mii;
142 	struct sja1105_table *table;
143 	int i;
144 
145 	table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
146 
147 	/* Discard previous xMII Mode Parameters Table */
148 	if (table->entry_count) {
149 		kfree(table->entries);
150 		table->entry_count = 0;
151 	}
152 
153 	table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
154 				 table->ops->unpacked_entry_size, GFP_KERNEL);
155 	if (!table->entries)
156 		return -ENOMEM;
157 
158 	/* Override table based on PHYLINK DT bindings */
159 	table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
160 
161 	mii = table->entries;
162 
163 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
164 		switch (ports[i].phy_mode) {
165 		case PHY_INTERFACE_MODE_MII:
166 			mii->xmii_mode[i] = XMII_MODE_MII;
167 			break;
168 		case PHY_INTERFACE_MODE_RMII:
169 			mii->xmii_mode[i] = XMII_MODE_RMII;
170 			break;
171 		case PHY_INTERFACE_MODE_RGMII:
172 		case PHY_INTERFACE_MODE_RGMII_ID:
173 		case PHY_INTERFACE_MODE_RGMII_RXID:
174 		case PHY_INTERFACE_MODE_RGMII_TXID:
175 			mii->xmii_mode[i] = XMII_MODE_RGMII;
176 			break;
177 		default:
178 			dev_err(dev, "Unsupported PHY mode %s!\n",
179 				phy_modes(ports[i].phy_mode));
180 		}
181 
182 		mii->phy_mac[i] = ports[i].role;
183 	}
184 	return 0;
185 }
186 
187 static int sja1105_init_static_fdb(struct sja1105_private *priv)
188 {
189 	struct sja1105_table *table;
190 
191 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
192 
193 	/* We only populate the FDB table through dynamic
194 	 * L2 Address Lookup entries
195 	 */
196 	if (table->entry_count) {
197 		kfree(table->entries);
198 		table->entry_count = 0;
199 	}
200 	return 0;
201 }
202 
203 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
204 {
205 	struct sja1105_table *table;
206 	struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
207 		/* Learned FDB entries are forgotten after 300 seconds */
208 		.maxage = SJA1105_AGEING_TIME_MS(300000),
209 		/* All entries within a FDB bin are available for learning */
210 		.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
211 		/* And the P/Q/R/S equivalent setting: */
212 		.start_dynspc = 0,
213 		/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
214 		.poly = 0x97,
215 		/* This selects between Independent VLAN Learning (IVL) and
216 		 * Shared VLAN Learning (SVL)
217 		 */
218 		.shared_learn = false,
219 		/* Don't discard management traffic based on ENFPORT -
220 		 * we don't perform SMAC port enforcement anyway, so
221 		 * what we are setting here doesn't matter.
222 		 */
223 		.no_enf_hostprt = false,
224 		/* Don't learn SMAC for mac_fltres1 and mac_fltres0.
225 		 * Maybe correlate with no_linklocal_learn from bridge driver?
226 		 */
227 		.no_mgmt_learn = true,
228 		/* P/Q/R/S only */
229 		.use_static = true,
230 		/* Dynamically learned FDB entries can overwrite other (older)
231 		 * dynamic FDB entries
232 		 */
233 		.owr_dyn = true,
234 		.drpnolearn = true,
235 	};
236 
237 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
238 
239 	if (table->entry_count) {
240 		kfree(table->entries);
241 		table->entry_count = 0;
242 	}
243 
244 	table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
245 				 table->ops->unpacked_entry_size, GFP_KERNEL);
246 	if (!table->entries)
247 		return -ENOMEM;
248 
249 	table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
250 
251 	/* This table only has a single entry */
252 	((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
253 				default_l2_lookup_params;
254 
255 	return 0;
256 }
257 
258 static int sja1105_init_static_vlan(struct sja1105_private *priv)
259 {
260 	struct sja1105_table *table;
261 	struct sja1105_vlan_lookup_entry pvid = {
262 		.ving_mirr = 0,
263 		.vegr_mirr = 0,
264 		.vmemb_port = 0,
265 		.vlan_bc = 0,
266 		.tag_port = 0,
267 		.vlanid = 0,
268 	};
269 	int i;
270 
271 	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
272 
273 	/* The static VLAN table will only contain the initial pvid of 0.
274 	 * All other VLANs are to be configured through dynamic entries,
275 	 * and kept in the static configuration table as backing memory.
276 	 * The pvid of 0 is sufficient to pass traffic while the ports are
277 	 * standalone and when vlan_filtering is disabled. When filtering
278 	 * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
279 	 * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
280 	 * vlan' even when vlan_filtering is off, but it has no effect.
281 	 */
282 	if (table->entry_count) {
283 		kfree(table->entries);
284 		table->entry_count = 0;
285 	}
286 
287 	table->entries = kcalloc(1, table->ops->unpacked_entry_size,
288 				 GFP_KERNEL);
289 	if (!table->entries)
290 		return -ENOMEM;
291 
292 	table->entry_count = 1;
293 
294 	/* VLAN ID 0: all DT-defined ports are members; no restrictions on
295 	 * forwarding; always transmit priority-tagged frames as untagged.
296 	 */
297 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
298 		pvid.vmemb_port |= BIT(i);
299 		pvid.vlan_bc |= BIT(i);
300 		pvid.tag_port &= ~BIT(i);
301 	}
302 
303 	((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
304 	return 0;
305 }
306 
307 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
308 {
309 	struct sja1105_l2_forwarding_entry *l2fwd;
310 	struct sja1105_table *table;
311 	int i, j;
312 
313 	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
314 
315 	if (table->entry_count) {
316 		kfree(table->entries);
317 		table->entry_count = 0;
318 	}
319 
320 	table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
321 				 table->ops->unpacked_entry_size, GFP_KERNEL);
322 	if (!table->entries)
323 		return -ENOMEM;
324 
325 	table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
326 
327 	l2fwd = table->entries;
328 
329 	/* First 5 entries define the forwarding rules */
330 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
331 		unsigned int upstream = dsa_upstream_port(priv->ds, i);
332 
333 		for (j = 0; j < SJA1105_NUM_TC; j++)
334 			l2fwd[i].vlan_pmap[j] = j;
335 
336 		if (i == upstream)
337 			continue;
338 
339 		sja1105_port_allow_traffic(l2fwd, i, upstream, true);
340 		sja1105_port_allow_traffic(l2fwd, upstream, i, true);
341 	}
342 	/* Next 8 entries define VLAN PCP mapping from ingress to egress.
343 	 * Create a one-to-one mapping.
344 	 */
345 	for (i = 0; i < SJA1105_NUM_TC; i++)
346 		for (j = 0; j < SJA1105_NUM_PORTS; j++)
347 			l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
348 
349 	return 0;
350 }
351 
352 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
353 {
354 	struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
355 		/* Disallow dynamic reconfiguration of vlan_pmap */
356 		.max_dynp = 0,
357 		/* Use a single memory partition for all ingress queues */
358 		.part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
359 	};
360 	struct sja1105_table *table;
361 
362 	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
363 
364 	if (table->entry_count) {
365 		kfree(table->entries);
366 		table->entry_count = 0;
367 	}
368 
369 	table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
370 				 table->ops->unpacked_entry_size, GFP_KERNEL);
371 	if (!table->entries)
372 		return -ENOMEM;
373 
374 	table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
375 
376 	/* This table only has a single entry */
377 	((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
378 				default_l2fwd_params;
379 
380 	return 0;
381 }
382 
383 static int sja1105_init_general_params(struct sja1105_private *priv)
384 {
385 	struct sja1105_general_params_entry default_general_params = {
386 		/* Disallow dynamic changing of the mirror port */
387 		.mirr_ptacu = 0,
388 		.switchid = priv->ds->index,
389 		/* Priority queue for link-local frames trapped to CPU */
390 		.hostprio = 7,
391 		.mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
392 		.mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
393 		.incl_srcpt1 = false,
394 		.send_meta1  = false,
395 		.mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
396 		.mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
397 		.incl_srcpt0 = false,
398 		.send_meta0  = false,
399 		/* The destination for traffic matching mac_fltres1 and
400 		 * mac_fltres0 on all ports except host_port. Such traffic
401 		 * receieved on host_port itself would be dropped, except
402 		 * by installing a temporary 'management route'
403 		 */
404 		.host_port = dsa_upstream_port(priv->ds, 0),
405 		/* Same as host port */
406 		.mirr_port = dsa_upstream_port(priv->ds, 0),
407 		/* Link-local traffic received on casc_port will be forwarded
408 		 * to host_port without embedding the source port and device ID
409 		 * info in the destination MAC address (presumably because it
410 		 * is a cascaded port and a downstream SJA switch already did
411 		 * that). Default to an invalid port (to disable the feature)
412 		 * and overwrite this if we find any DSA (cascaded) ports.
413 		 */
414 		.casc_port = SJA1105_NUM_PORTS,
415 		/* No TTEthernet */
416 		.vllupformat = 0,
417 		.vlmarker = 0,
418 		.vlmask = 0,
419 		/* Only update correctionField for 1-step PTP (L2 transport) */
420 		.ignore2stf = 0,
421 		/* Forcefully disable VLAN filtering by telling
422 		 * the switch that VLAN has a different EtherType.
423 		 */
424 		.tpid = ETH_P_SJA1105,
425 		.tpid2 = ETH_P_SJA1105,
426 	};
427 	struct sja1105_table *table;
428 	int i, k = 0;
429 
430 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
431 		if (dsa_is_dsa_port(priv->ds, i))
432 			default_general_params.casc_port = i;
433 		else if (dsa_is_user_port(priv->ds, i))
434 			priv->ports[i].mgmt_slot = k++;
435 	}
436 
437 	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
438 
439 	if (table->entry_count) {
440 		kfree(table->entries);
441 		table->entry_count = 0;
442 	}
443 
444 	table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
445 				 table->ops->unpacked_entry_size, GFP_KERNEL);
446 	if (!table->entries)
447 		return -ENOMEM;
448 
449 	table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
450 
451 	/* This table only has a single entry */
452 	((struct sja1105_general_params_entry *)table->entries)[0] =
453 				default_general_params;
454 
455 	return 0;
456 }
457 
458 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
459 
460 static inline void
461 sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
462 		      int index)
463 {
464 	policing[index].sharindx = index;
465 	policing[index].smax = 65535; /* Burst size in bytes */
466 	policing[index].rate = SJA1105_RATE_MBPS(1000);
467 	policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
468 	policing[index].partition = 0;
469 }
470 
471 static int sja1105_init_l2_policing(struct sja1105_private *priv)
472 {
473 	struct sja1105_l2_policing_entry *policing;
474 	struct sja1105_table *table;
475 	int i, j, k;
476 
477 	table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
478 
479 	/* Discard previous L2 Policing Table */
480 	if (table->entry_count) {
481 		kfree(table->entries);
482 		table->entry_count = 0;
483 	}
484 
485 	table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
486 				 table->ops->unpacked_entry_size, GFP_KERNEL);
487 	if (!table->entries)
488 		return -ENOMEM;
489 
490 	table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
491 
492 	policing = table->entries;
493 
494 	/* k sweeps through all unicast policers (0-39).
495 	 * bcast sweeps through policers 40-44.
496 	 */
497 	for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
498 		int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
499 
500 		for (j = 0; j < SJA1105_NUM_TC; j++, k++)
501 			sja1105_setup_policer(policing, k);
502 
503 		/* Set up this port's policer for broadcast traffic */
504 		sja1105_setup_policer(policing, bcast);
505 	}
506 	return 0;
507 }
508 
509 static int sja1105_init_avb_params(struct sja1105_private *priv,
510 				   bool on)
511 {
512 	struct sja1105_avb_params_entry *avb;
513 	struct sja1105_table *table;
514 
515 	table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
516 
517 	/* Discard previous AVB Parameters Table */
518 	if (table->entry_count) {
519 		kfree(table->entries);
520 		table->entry_count = 0;
521 	}
522 
523 	/* Configure the reception of meta frames only if requested */
524 	if (!on)
525 		return 0;
526 
527 	table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
528 				 table->ops->unpacked_entry_size, GFP_KERNEL);
529 	if (!table->entries)
530 		return -ENOMEM;
531 
532 	table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
533 
534 	avb = table->entries;
535 
536 	avb->destmeta = SJA1105_META_DMAC;
537 	avb->srcmeta  = SJA1105_META_SMAC;
538 
539 	return 0;
540 }
541 
542 static int sja1105_static_config_load(struct sja1105_private *priv,
543 				      struct sja1105_dt_port *ports)
544 {
545 	int rc;
546 
547 	sja1105_static_config_free(&priv->static_config);
548 	rc = sja1105_static_config_init(&priv->static_config,
549 					priv->info->static_ops,
550 					priv->info->device_id);
551 	if (rc)
552 		return rc;
553 
554 	/* Build static configuration */
555 	rc = sja1105_init_mac_settings(priv);
556 	if (rc < 0)
557 		return rc;
558 	rc = sja1105_init_mii_settings(priv, ports);
559 	if (rc < 0)
560 		return rc;
561 	rc = sja1105_init_static_fdb(priv);
562 	if (rc < 0)
563 		return rc;
564 	rc = sja1105_init_static_vlan(priv);
565 	if (rc < 0)
566 		return rc;
567 	rc = sja1105_init_l2_lookup_params(priv);
568 	if (rc < 0)
569 		return rc;
570 	rc = sja1105_init_l2_forwarding(priv);
571 	if (rc < 0)
572 		return rc;
573 	rc = sja1105_init_l2_forwarding_params(priv);
574 	if (rc < 0)
575 		return rc;
576 	rc = sja1105_init_l2_policing(priv);
577 	if (rc < 0)
578 		return rc;
579 	rc = sja1105_init_general_params(priv);
580 	if (rc < 0)
581 		return rc;
582 	rc = sja1105_init_avb_params(priv, false);
583 	if (rc < 0)
584 		return rc;
585 
586 	/* Send initial configuration to hardware via SPI */
587 	return sja1105_static_config_upload(priv);
588 }
589 
590 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
591 				      const struct sja1105_dt_port *ports)
592 {
593 	int i;
594 
595 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
596 		if (ports->role == XMII_MAC)
597 			continue;
598 
599 		if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
600 		    ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
601 			priv->rgmii_rx_delay[i] = true;
602 
603 		if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
604 		    ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
605 			priv->rgmii_tx_delay[i] = true;
606 
607 		if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
608 		     !priv->info->setup_rgmii_delay)
609 			return -EINVAL;
610 	}
611 	return 0;
612 }
613 
614 static int sja1105_parse_ports_node(struct sja1105_private *priv,
615 				    struct sja1105_dt_port *ports,
616 				    struct device_node *ports_node)
617 {
618 	struct device *dev = &priv->spidev->dev;
619 	struct device_node *child;
620 
621 	for_each_child_of_node(ports_node, child) {
622 		struct device_node *phy_node;
623 		int phy_mode;
624 		u32 index;
625 
626 		/* Get switch port number from DT */
627 		if (of_property_read_u32(child, "reg", &index) < 0) {
628 			dev_err(dev, "Port number not defined in device tree "
629 				"(property \"reg\")\n");
630 			return -ENODEV;
631 		}
632 
633 		/* Get PHY mode from DT */
634 		phy_mode = of_get_phy_mode(child);
635 		if (phy_mode < 0) {
636 			dev_err(dev, "Failed to read phy-mode or "
637 				"phy-interface-type property for port %d\n",
638 				index);
639 			return -ENODEV;
640 		}
641 		ports[index].phy_mode = phy_mode;
642 
643 		phy_node = of_parse_phandle(child, "phy-handle", 0);
644 		if (!phy_node) {
645 			if (!of_phy_is_fixed_link(child)) {
646 				dev_err(dev, "phy-handle or fixed-link "
647 					"properties missing!\n");
648 				return -ENODEV;
649 			}
650 			/* phy-handle is missing, but fixed-link isn't.
651 			 * So it's a fixed link. Default to PHY role.
652 			 */
653 			ports[index].role = XMII_PHY;
654 		} else {
655 			/* phy-handle present => put port in MAC role */
656 			ports[index].role = XMII_MAC;
657 			of_node_put(phy_node);
658 		}
659 
660 		/* The MAC/PHY role can be overridden with explicit bindings */
661 		if (of_property_read_bool(child, "sja1105,role-mac"))
662 			ports[index].role = XMII_MAC;
663 		else if (of_property_read_bool(child, "sja1105,role-phy"))
664 			ports[index].role = XMII_PHY;
665 	}
666 
667 	return 0;
668 }
669 
670 static int sja1105_parse_dt(struct sja1105_private *priv,
671 			    struct sja1105_dt_port *ports)
672 {
673 	struct device *dev = &priv->spidev->dev;
674 	struct device_node *switch_node = dev->of_node;
675 	struct device_node *ports_node;
676 	int rc;
677 
678 	ports_node = of_get_child_by_name(switch_node, "ports");
679 	if (!ports_node) {
680 		dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
681 		return -ENODEV;
682 	}
683 
684 	rc = sja1105_parse_ports_node(priv, ports, ports_node);
685 	of_node_put(ports_node);
686 
687 	return rc;
688 }
689 
690 /* Convert link speed from SJA1105 to ethtool encoding */
691 static int sja1105_speed[] = {
692 	[SJA1105_SPEED_AUTO]		= SPEED_UNKNOWN,
693 	[SJA1105_SPEED_10MBPS]		= SPEED_10,
694 	[SJA1105_SPEED_100MBPS]		= SPEED_100,
695 	[SJA1105_SPEED_1000MBPS]	= SPEED_1000,
696 };
697 
698 /* Set link speed in the MAC configuration for a specific port. */
699 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
700 				      int speed_mbps)
701 {
702 	struct sja1105_xmii_params_entry *mii;
703 	struct sja1105_mac_config_entry *mac;
704 	struct device *dev = priv->ds->dev;
705 	sja1105_phy_interface_t phy_mode;
706 	sja1105_speed_t speed;
707 	int rc;
708 
709 	/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
710 	 * tables. On E/T, MAC reconfig tables are not readable, only writable.
711 	 * We have to *know* what the MAC looks like.  For the sake of keeping
712 	 * the code common, we'll use the static configuration tables as a
713 	 * reasonable approximation for both E/T and P/Q/R/S.
714 	 */
715 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
716 	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
717 
718 	switch (speed_mbps) {
719 	case SPEED_UNKNOWN:
720 		/* No speed update requested */
721 		speed = SJA1105_SPEED_AUTO;
722 		break;
723 	case SPEED_10:
724 		speed = SJA1105_SPEED_10MBPS;
725 		break;
726 	case SPEED_100:
727 		speed = SJA1105_SPEED_100MBPS;
728 		break;
729 	case SPEED_1000:
730 		speed = SJA1105_SPEED_1000MBPS;
731 		break;
732 	default:
733 		dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
734 		return -EINVAL;
735 	}
736 
737 	/* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
738 	 * table, since this will be used for the clocking setup, and we no
739 	 * longer need to store it in the static config (already told hardware
740 	 * we want auto during upload phase).
741 	 */
742 	mac[port].speed = speed;
743 
744 	/* Write to the dynamic reconfiguration tables */
745 	rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
746 					  &mac[port], true);
747 	if (rc < 0) {
748 		dev_err(dev, "Failed to write MAC config: %d\n", rc);
749 		return rc;
750 	}
751 
752 	/* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
753 	 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
754 	 * RMII no change of the clock setup is required. Actually, changing
755 	 * the clock setup does interrupt the clock signal for a certain time
756 	 * which causes trouble for all PHYs relying on this signal.
757 	 */
758 	phy_mode = mii->xmii_mode[port];
759 	if (phy_mode != XMII_MODE_RGMII)
760 		return 0;
761 
762 	return sja1105_clocking_setup_port(priv, port);
763 }
764 
765 static void sja1105_mac_config(struct dsa_switch *ds, int port,
766 			       unsigned int link_an_mode,
767 			       const struct phylink_link_state *state)
768 {
769 	struct sja1105_private *priv = ds->priv;
770 
771 	if (!state->link)
772 		return;
773 
774 	sja1105_adjust_port_config(priv, port, state->speed);
775 }
776 
777 static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
778 				  unsigned int mode,
779 				  phy_interface_t interface)
780 {
781 	sja1105_inhibit_tx(ds->priv, BIT(port), true);
782 }
783 
784 static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
785 				unsigned int mode,
786 				phy_interface_t interface,
787 				struct phy_device *phydev)
788 {
789 	sja1105_inhibit_tx(ds->priv, BIT(port), false);
790 }
791 
792 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
793 				     unsigned long *supported,
794 				     struct phylink_link_state *state)
795 {
796 	/* Construct a new mask which exhaustively contains all link features
797 	 * supported by the MAC, and then apply that (logical AND) to what will
798 	 * be sent to the PHY for "marketing".
799 	 */
800 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
801 	struct sja1105_private *priv = ds->priv;
802 	struct sja1105_xmii_params_entry *mii;
803 
804 	mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
805 
806 	/* The MAC does not support pause frames, and also doesn't
807 	 * support half-duplex traffic modes.
808 	 */
809 	phylink_set(mask, Autoneg);
810 	phylink_set(mask, MII);
811 	phylink_set(mask, 10baseT_Full);
812 	phylink_set(mask, 100baseT_Full);
813 	if (mii->xmii_mode[port] == XMII_MODE_RGMII)
814 		phylink_set(mask, 1000baseT_Full);
815 
816 	bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
817 	bitmap_and(state->advertising, state->advertising, mask,
818 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
819 }
820 
821 /* First-generation switches have a 4-way set associative TCAM that
822  * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
823  * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
824  * For the placement of a newly learnt FDB entry, the switch selects the bin
825  * based on a hash function, and the way within that bin incrementally.
826  */
827 static inline int sja1105et_fdb_index(int bin, int way)
828 {
829 	return bin * SJA1105ET_FDB_BIN_SIZE + way;
830 }
831 
832 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
833 					 const u8 *addr, u16 vid,
834 					 struct sja1105_l2_lookup_entry *match,
835 					 int *last_unused)
836 {
837 	int way;
838 
839 	for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
840 		struct sja1105_l2_lookup_entry l2_lookup = {0};
841 		int index = sja1105et_fdb_index(bin, way);
842 
843 		/* Skip unused entries, optionally marking them
844 		 * into the return value
845 		 */
846 		if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
847 						index, &l2_lookup)) {
848 			if (last_unused)
849 				*last_unused = way;
850 			continue;
851 		}
852 
853 		if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
854 		    l2_lookup.vlanid == vid) {
855 			if (match)
856 				*match = l2_lookup;
857 			return way;
858 		}
859 	}
860 	/* Return an invalid entry index if not found */
861 	return -1;
862 }
863 
864 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
865 		      const unsigned char *addr, u16 vid)
866 {
867 	struct sja1105_l2_lookup_entry l2_lookup = {0};
868 	struct sja1105_private *priv = ds->priv;
869 	struct device *dev = ds->dev;
870 	int last_unused = -1;
871 	int bin, way;
872 
873 	bin = sja1105et_fdb_hash(priv, addr, vid);
874 
875 	way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
876 					    &l2_lookup, &last_unused);
877 	if (way >= 0) {
878 		/* We have an FDB entry. Is our port in the destination
879 		 * mask? If yes, we need to do nothing. If not, we need
880 		 * to rewrite the entry by adding this port to it.
881 		 */
882 		if (l2_lookup.destports & BIT(port))
883 			return 0;
884 		l2_lookup.destports |= BIT(port);
885 	} else {
886 		int index = sja1105et_fdb_index(bin, way);
887 
888 		/* We don't have an FDB entry. We construct a new one and
889 		 * try to find a place for it within the FDB table.
890 		 */
891 		l2_lookup.macaddr = ether_addr_to_u64(addr);
892 		l2_lookup.destports = BIT(port);
893 		l2_lookup.vlanid = vid;
894 
895 		if (last_unused >= 0) {
896 			way = last_unused;
897 		} else {
898 			/* Bin is full, need to evict somebody.
899 			 * Choose victim at random. If you get these messages
900 			 * often, you may need to consider changing the
901 			 * distribution function:
902 			 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
903 			 */
904 			get_random_bytes(&way, sizeof(u8));
905 			way %= SJA1105ET_FDB_BIN_SIZE;
906 			dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
907 				 bin, addr, way);
908 			/* Evict entry */
909 			sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
910 						     index, NULL, false);
911 		}
912 	}
913 	l2_lookup.index = sja1105et_fdb_index(bin, way);
914 
915 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
916 					    l2_lookup.index, &l2_lookup,
917 					    true);
918 }
919 
920 int sja1105et_fdb_del(struct dsa_switch *ds, int port,
921 		      const unsigned char *addr, u16 vid)
922 {
923 	struct sja1105_l2_lookup_entry l2_lookup = {0};
924 	struct sja1105_private *priv = ds->priv;
925 	int index, bin, way;
926 	bool keep;
927 
928 	bin = sja1105et_fdb_hash(priv, addr, vid);
929 	way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
930 					    &l2_lookup, NULL);
931 	if (way < 0)
932 		return 0;
933 	index = sja1105et_fdb_index(bin, way);
934 
935 	/* We have an FDB entry. Is our port in the destination mask? If yes,
936 	 * we need to remove it. If the resulting port mask becomes empty, we
937 	 * need to completely evict the FDB entry.
938 	 * Otherwise we just write it back.
939 	 */
940 	l2_lookup.destports &= ~BIT(port);
941 
942 	if (l2_lookup.destports)
943 		keep = true;
944 	else
945 		keep = false;
946 
947 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
948 					    index, &l2_lookup, keep);
949 }
950 
951 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
952 			const unsigned char *addr, u16 vid)
953 {
954 	struct sja1105_l2_lookup_entry l2_lookup = {0};
955 	struct sja1105_private *priv = ds->priv;
956 	int rc, i;
957 
958 	/* Search for an existing entry in the FDB table */
959 	l2_lookup.macaddr = ether_addr_to_u64(addr);
960 	l2_lookup.vlanid = vid;
961 	l2_lookup.iotag = SJA1105_S_TAG;
962 	l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
963 	l2_lookup.mask_vlanid = VLAN_VID_MASK;
964 	l2_lookup.mask_iotag = BIT(0);
965 	l2_lookup.destports = BIT(port);
966 
967 	rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
968 					 SJA1105_SEARCH, &l2_lookup);
969 	if (rc == 0) {
970 		/* Found and this port is already in the entry's
971 		 * port mask => job done
972 		 */
973 		if (l2_lookup.destports & BIT(port))
974 			return 0;
975 		/* l2_lookup.index is populated by the switch in case it
976 		 * found something.
977 		 */
978 		l2_lookup.destports |= BIT(port);
979 		goto skip_finding_an_index;
980 	}
981 
982 	/* Not found, so try to find an unused spot in the FDB.
983 	 * This is slightly inefficient because the strategy is knock-knock at
984 	 * every possible position from 0 to 1023.
985 	 */
986 	for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
987 		rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
988 						 i, NULL);
989 		if (rc < 0)
990 			break;
991 	}
992 	if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
993 		dev_err(ds->dev, "FDB is full, cannot add entry.\n");
994 		return -EINVAL;
995 	}
996 	l2_lookup.index = i;
997 
998 skip_finding_an_index:
999 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1000 					    l2_lookup.index, &l2_lookup,
1001 					    true);
1002 }
1003 
1004 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1005 			const unsigned char *addr, u16 vid)
1006 {
1007 	struct sja1105_l2_lookup_entry l2_lookup = {0};
1008 	struct sja1105_private *priv = ds->priv;
1009 	bool keep;
1010 	int rc;
1011 
1012 	l2_lookup.macaddr = ether_addr_to_u64(addr);
1013 	l2_lookup.vlanid = vid;
1014 	l2_lookup.iotag = SJA1105_S_TAG;
1015 	l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1016 	l2_lookup.mask_vlanid = VLAN_VID_MASK;
1017 	l2_lookup.mask_iotag = BIT(0);
1018 	l2_lookup.destports = BIT(port);
1019 
1020 	rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1021 					 SJA1105_SEARCH, &l2_lookup);
1022 	if (rc < 0)
1023 		return 0;
1024 
1025 	l2_lookup.destports &= ~BIT(port);
1026 
1027 	/* Decide whether we remove just this port from the FDB entry,
1028 	 * or if we remove it completely.
1029 	 */
1030 	if (l2_lookup.destports)
1031 		keep = true;
1032 	else
1033 		keep = false;
1034 
1035 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1036 					    l2_lookup.index, &l2_lookup, keep);
1037 }
1038 
1039 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1040 			   const unsigned char *addr, u16 vid)
1041 {
1042 	struct sja1105_private *priv = ds->priv;
1043 	int rc;
1044 
1045 	/* Since we make use of VLANs even when the bridge core doesn't tell us
1046 	 * to, translate these FDB entries into the correct dsa_8021q ones.
1047 	 */
1048 	if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
1049 		unsigned int upstream = dsa_upstream_port(priv->ds, port);
1050 		u16 tx_vid = dsa_8021q_tx_vid(ds, port);
1051 		u16 rx_vid = dsa_8021q_rx_vid(ds, port);
1052 
1053 		rc = priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
1054 		if (rc < 0)
1055 			return rc;
1056 		return priv->info->fdb_add_cmd(ds, upstream, addr, rx_vid);
1057 	}
1058 	return priv->info->fdb_add_cmd(ds, port, addr, vid);
1059 }
1060 
1061 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1062 			   const unsigned char *addr, u16 vid)
1063 {
1064 	struct sja1105_private *priv = ds->priv;
1065 	int rc;
1066 
1067 	/* Since we make use of VLANs even when the bridge core doesn't tell us
1068 	 * to, translate these FDB entries into the correct dsa_8021q ones.
1069 	 */
1070 	if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
1071 		unsigned int upstream = dsa_upstream_port(priv->ds, port);
1072 		u16 tx_vid = dsa_8021q_tx_vid(ds, port);
1073 		u16 rx_vid = dsa_8021q_rx_vid(ds, port);
1074 
1075 		rc = priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
1076 		if (rc < 0)
1077 			return rc;
1078 		return priv->info->fdb_del_cmd(ds, upstream, addr, rx_vid);
1079 	}
1080 	return priv->info->fdb_del_cmd(ds, port, addr, vid);
1081 }
1082 
1083 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1084 			    dsa_fdb_dump_cb_t *cb, void *data)
1085 {
1086 	struct sja1105_private *priv = ds->priv;
1087 	struct device *dev = ds->dev;
1088 	int i;
1089 
1090 	for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1091 		struct sja1105_l2_lookup_entry l2_lookup = {0};
1092 		u8 macaddr[ETH_ALEN];
1093 		int rc;
1094 
1095 		rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1096 						 i, &l2_lookup);
1097 		/* No fdb entry at i, not an issue */
1098 		if (rc == -ENOENT)
1099 			continue;
1100 		if (rc) {
1101 			dev_err(dev, "Failed to dump FDB: %d\n", rc);
1102 			return rc;
1103 		}
1104 
1105 		/* FDB dump callback is per port. This means we have to
1106 		 * disregard a valid entry if it's not for this port, even if
1107 		 * only to revisit it later. This is inefficient because the
1108 		 * 1024-sized FDB table needs to be traversed 4 times through
1109 		 * SPI during a 'bridge fdb show' command.
1110 		 */
1111 		if (!(l2_lookup.destports & BIT(port)))
1112 			continue;
1113 		u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1114 
1115 		/* We need to hide the dsa_8021q VLAN from the user.
1116 		 * Convert the TX VID into the pvid that is active in
1117 		 * standalone and non-vlan_filtering modes, aka 1.
1118 		 * The RX VID is applied on the CPU port, which is not seen by
1119 		 * the bridge core anyway, so there's nothing to hide.
1120 		 */
1121 		if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1122 			l2_lookup.vlanid = 1;
1123 		cb(macaddr, l2_lookup.vlanid, false, data);
1124 	}
1125 	return 0;
1126 }
1127 
1128 /* This callback needs to be present */
1129 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
1130 			       const struct switchdev_obj_port_mdb *mdb)
1131 {
1132 	return 0;
1133 }
1134 
1135 static void sja1105_mdb_add(struct dsa_switch *ds, int port,
1136 			    const struct switchdev_obj_port_mdb *mdb)
1137 {
1138 	sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1139 }
1140 
1141 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1142 			   const struct switchdev_obj_port_mdb *mdb)
1143 {
1144 	return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1145 }
1146 
1147 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1148 				 struct net_device *br, bool member)
1149 {
1150 	struct sja1105_l2_forwarding_entry *l2_fwd;
1151 	struct sja1105_private *priv = ds->priv;
1152 	int i, rc;
1153 
1154 	l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1155 
1156 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1157 		/* Add this port to the forwarding matrix of the
1158 		 * other ports in the same bridge, and viceversa.
1159 		 */
1160 		if (!dsa_is_user_port(ds, i))
1161 			continue;
1162 		/* For the ports already under the bridge, only one thing needs
1163 		 * to be done, and that is to add this port to their
1164 		 * reachability domain. So we can perform the SPI write for
1165 		 * them immediately. However, for this port itself (the one
1166 		 * that is new to the bridge), we need to add all other ports
1167 		 * to its reachability domain. So we do that incrementally in
1168 		 * this loop, and perform the SPI write only at the end, once
1169 		 * the domain contains all other bridge ports.
1170 		 */
1171 		if (i == port)
1172 			continue;
1173 		if (dsa_to_port(ds, i)->bridge_dev != br)
1174 			continue;
1175 		sja1105_port_allow_traffic(l2_fwd, i, port, member);
1176 		sja1105_port_allow_traffic(l2_fwd, port, i, member);
1177 
1178 		rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1179 						  i, &l2_fwd[i], true);
1180 		if (rc < 0)
1181 			return rc;
1182 	}
1183 
1184 	return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1185 					    port, &l2_fwd[port], true);
1186 }
1187 
1188 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1189 					 u8 state)
1190 {
1191 	struct sja1105_private *priv = ds->priv;
1192 	struct sja1105_mac_config_entry *mac;
1193 
1194 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1195 
1196 	switch (state) {
1197 	case BR_STATE_DISABLED:
1198 	case BR_STATE_BLOCKING:
1199 		/* From UM10944 description of DRPDTAG (why put this there?):
1200 		 * "Management traffic flows to the port regardless of the state
1201 		 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1202 		 * At the moment no difference between DISABLED and BLOCKING.
1203 		 */
1204 		mac[port].ingress   = false;
1205 		mac[port].egress    = false;
1206 		mac[port].dyn_learn = false;
1207 		break;
1208 	case BR_STATE_LISTENING:
1209 		mac[port].ingress   = true;
1210 		mac[port].egress    = false;
1211 		mac[port].dyn_learn = false;
1212 		break;
1213 	case BR_STATE_LEARNING:
1214 		mac[port].ingress   = true;
1215 		mac[port].egress    = false;
1216 		mac[port].dyn_learn = true;
1217 		break;
1218 	case BR_STATE_FORWARDING:
1219 		mac[port].ingress   = true;
1220 		mac[port].egress    = true;
1221 		mac[port].dyn_learn = true;
1222 		break;
1223 	default:
1224 		dev_err(ds->dev, "invalid STP state: %d\n", state);
1225 		return;
1226 	}
1227 
1228 	sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1229 				     &mac[port], true);
1230 }
1231 
1232 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1233 			       struct net_device *br)
1234 {
1235 	return sja1105_bridge_member(ds, port, br, true);
1236 }
1237 
1238 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1239 				 struct net_device *br)
1240 {
1241 	sja1105_bridge_member(ds, port, br, false);
1242 }
1243 
1244 /* For situations where we need to change a setting at runtime that is only
1245  * available through the static configuration, resetting the switch in order
1246  * to upload the new static config is unavoidable. Back up the settings we
1247  * modify at runtime (currently only MAC) and restore them after uploading,
1248  * such that this operation is relatively seamless.
1249  */
1250 static int sja1105_static_config_reload(struct sja1105_private *priv)
1251 {
1252 	struct sja1105_mac_config_entry *mac;
1253 	int speed_mbps[SJA1105_NUM_PORTS];
1254 	int rc, i;
1255 
1256 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1257 
1258 	/* Back up the dynamic link speed changed by sja1105_adjust_port_config
1259 	 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1260 	 * switch wants to see in the static config in order to allow us to
1261 	 * change it through the dynamic interface later.
1262 	 */
1263 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1264 		speed_mbps[i] = sja1105_speed[mac[i].speed];
1265 		mac[i].speed = SJA1105_SPEED_AUTO;
1266 	}
1267 
1268 	/* Reset switch and send updated static configuration */
1269 	rc = sja1105_static_config_upload(priv);
1270 	if (rc < 0)
1271 		goto out;
1272 
1273 	/* Configure the CGU (PLLs) for MII and RMII PHYs.
1274 	 * For these interfaces there is no dynamic configuration
1275 	 * needed, since PLLs have same settings at all speeds.
1276 	 */
1277 	rc = sja1105_clocking_setup(priv);
1278 	if (rc < 0)
1279 		goto out;
1280 
1281 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1282 		rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1283 		if (rc < 0)
1284 			goto out;
1285 	}
1286 out:
1287 	return rc;
1288 }
1289 
1290 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1291 {
1292 	struct sja1105_mac_config_entry *mac;
1293 
1294 	mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1295 
1296 	mac[port].vlanid = pvid;
1297 
1298 	return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1299 					   &mac[port], true);
1300 }
1301 
1302 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
1303 {
1304 	struct sja1105_vlan_lookup_entry *vlan;
1305 	int count, i;
1306 
1307 	vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
1308 	count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
1309 
1310 	for (i = 0; i < count; i++)
1311 		if (vlan[i].vlanid == vid)
1312 			return i;
1313 
1314 	/* Return an invalid entry index if not found */
1315 	return -1;
1316 }
1317 
1318 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
1319 			      bool enabled, bool untagged)
1320 {
1321 	struct sja1105_vlan_lookup_entry *vlan;
1322 	struct sja1105_table *table;
1323 	bool keep = true;
1324 	int match, rc;
1325 
1326 	table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
1327 
1328 	match = sja1105_is_vlan_configured(priv, vid);
1329 	if (match < 0) {
1330 		/* Can't delete a missing entry. */
1331 		if (!enabled)
1332 			return 0;
1333 		rc = sja1105_table_resize(table, table->entry_count + 1);
1334 		if (rc)
1335 			return rc;
1336 		match = table->entry_count - 1;
1337 	}
1338 	/* Assign pointer after the resize (it's new memory) */
1339 	vlan = table->entries;
1340 	vlan[match].vlanid = vid;
1341 	if (enabled) {
1342 		vlan[match].vlan_bc |= BIT(port);
1343 		vlan[match].vmemb_port |= BIT(port);
1344 	} else {
1345 		vlan[match].vlan_bc &= ~BIT(port);
1346 		vlan[match].vmemb_port &= ~BIT(port);
1347 	}
1348 	/* Also unset tag_port if removing this VLAN was requested,
1349 	 * just so we don't have a confusing bitmap (no practical purpose).
1350 	 */
1351 	if (untagged || !enabled)
1352 		vlan[match].tag_port &= ~BIT(port);
1353 	else
1354 		vlan[match].tag_port |= BIT(port);
1355 	/* If there's no port left as member of this VLAN,
1356 	 * it's time for it to go.
1357 	 */
1358 	if (!vlan[match].vmemb_port)
1359 		keep = false;
1360 
1361 	dev_dbg(priv->ds->dev,
1362 		"%s: port %d, vid %llu, broadcast domain 0x%llx, "
1363 		"port members 0x%llx, tagged ports 0x%llx, keep %d\n",
1364 		__func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
1365 		vlan[match].vmemb_port, vlan[match].tag_port, keep);
1366 
1367 	rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
1368 					  &vlan[match], keep);
1369 	if (rc < 0)
1370 		return rc;
1371 
1372 	if (!keep)
1373 		return sja1105_table_delete_entry(table, match);
1374 
1375 	return 0;
1376 }
1377 
1378 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1379 {
1380 	int rc, i;
1381 
1382 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1383 		rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
1384 		if (rc < 0) {
1385 			dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
1386 				i, rc);
1387 			return rc;
1388 		}
1389 	}
1390 	dev_info(ds->dev, "%s switch tagging\n",
1391 		 enabled ? "Enabled" : "Disabled");
1392 	return 0;
1393 }
1394 
1395 static enum dsa_tag_protocol
1396 sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
1397 {
1398 	return DSA_TAG_PROTO_SJA1105;
1399 }
1400 
1401 /* This callback needs to be present */
1402 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
1403 				const struct switchdev_obj_port_vlan *vlan)
1404 {
1405 	return 0;
1406 }
1407 
1408 /* The TPID setting belongs to the General Parameters table,
1409  * which can only be partially reconfigured at runtime (and not the TPID).
1410  * So a switch reset is required.
1411  */
1412 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1413 {
1414 	struct sja1105_general_params_entry *general_params;
1415 	struct sja1105_private *priv = ds->priv;
1416 	struct sja1105_table *table;
1417 	u16 tpid, tpid2;
1418 	int rc;
1419 
1420 	if (enabled) {
1421 		/* Enable VLAN filtering. */
1422 		tpid  = ETH_P_8021AD;
1423 		tpid2 = ETH_P_8021Q;
1424 	} else {
1425 		/* Disable VLAN filtering. */
1426 		tpid  = ETH_P_SJA1105;
1427 		tpid2 = ETH_P_SJA1105;
1428 	}
1429 
1430 	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1431 	general_params = table->entries;
1432 	/* EtherType used to identify outer tagged (S-tag) VLAN traffic */
1433 	general_params->tpid = tpid;
1434 	/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
1435 	general_params->tpid2 = tpid2;
1436 	/* When VLAN filtering is on, we need to at least be able to
1437 	 * decode management traffic through the "backup plan".
1438 	 */
1439 	general_params->incl_srcpt1 = enabled;
1440 	general_params->incl_srcpt0 = enabled;
1441 
1442 	rc = sja1105_static_config_reload(priv);
1443 	if (rc)
1444 		dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
1445 
1446 	/* Switch port identification based on 802.1Q is only passable
1447 	 * if we are not under a vlan_filtering bridge. So make sure
1448 	 * the two configurations are mutually exclusive.
1449 	 */
1450 	return sja1105_setup_8021q_tagging(ds, !enabled);
1451 }
1452 
1453 static void sja1105_vlan_add(struct dsa_switch *ds, int port,
1454 			     const struct switchdev_obj_port_vlan *vlan)
1455 {
1456 	struct sja1105_private *priv = ds->priv;
1457 	u16 vid;
1458 	int rc;
1459 
1460 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1461 		rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
1462 					BRIDGE_VLAN_INFO_UNTAGGED);
1463 		if (rc < 0) {
1464 			dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
1465 				vid, port, rc);
1466 			return;
1467 		}
1468 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1469 			rc = sja1105_pvid_apply(ds->priv, port, vid);
1470 			if (rc < 0) {
1471 				dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
1472 					vid, port, rc);
1473 				return;
1474 			}
1475 		}
1476 	}
1477 }
1478 
1479 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
1480 			    const struct switchdev_obj_port_vlan *vlan)
1481 {
1482 	struct sja1105_private *priv = ds->priv;
1483 	u16 vid;
1484 	int rc;
1485 
1486 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1487 		rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
1488 					BRIDGE_VLAN_INFO_UNTAGGED);
1489 		if (rc < 0) {
1490 			dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
1491 				vid, port, rc);
1492 			return rc;
1493 		}
1494 	}
1495 	return 0;
1496 }
1497 
1498 /* The programming model for the SJA1105 switch is "all-at-once" via static
1499  * configuration tables. Some of these can be dynamically modified at runtime,
1500  * but not the xMII mode parameters table.
1501  * Furthermode, some PHYs may not have crystals for generating their clocks
1502  * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
1503  * ref_clk pin. So port clocking needs to be initialized early, before
1504  * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
1505  * Setting correct PHY link speed does not matter now.
1506  * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
1507  * bindings are not yet parsed by DSA core. We need to parse early so that we
1508  * can populate the xMII mode parameters table.
1509  */
1510 static int sja1105_setup(struct dsa_switch *ds)
1511 {
1512 	struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
1513 	struct sja1105_private *priv = ds->priv;
1514 	int rc;
1515 
1516 	rc = sja1105_parse_dt(priv, ports);
1517 	if (rc < 0) {
1518 		dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
1519 		return rc;
1520 	}
1521 
1522 	/* Error out early if internal delays are required through DT
1523 	 * and we can't apply them.
1524 	 */
1525 	rc = sja1105_parse_rgmii_delays(priv, ports);
1526 	if (rc < 0) {
1527 		dev_err(ds->dev, "RGMII delay not supported\n");
1528 		return rc;
1529 	}
1530 
1531 	rc = sja1105_ptp_clock_register(priv);
1532 	if (rc < 0) {
1533 		dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
1534 		return rc;
1535 	}
1536 	/* Create and send configuration down to device */
1537 	rc = sja1105_static_config_load(priv, ports);
1538 	if (rc < 0) {
1539 		dev_err(ds->dev, "Failed to load static config: %d\n", rc);
1540 		return rc;
1541 	}
1542 	/* Configure the CGU (PHY link modes and speeds) */
1543 	rc = sja1105_clocking_setup(priv);
1544 	if (rc < 0) {
1545 		dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
1546 		return rc;
1547 	}
1548 	/* On SJA1105, VLAN filtering per se is always enabled in hardware.
1549 	 * The only thing we can do to disable it is lie about what the 802.1Q
1550 	 * EtherType is.
1551 	 * So it will still try to apply VLAN filtering, but all ingress
1552 	 * traffic (except frames received with EtherType of ETH_P_SJA1105)
1553 	 * will be internally tagged with a distorted VLAN header where the
1554 	 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
1555 	 */
1556 	ds->vlan_filtering_is_global = true;
1557 
1558 	/* The DSA/switchdev model brings up switch ports in standalone mode by
1559 	 * default, and that means vlan_filtering is 0 since they're not under
1560 	 * a bridge, so it's safe to set up switch tagging at this time.
1561 	 */
1562 	return sja1105_setup_8021q_tagging(ds, true);
1563 }
1564 
1565 static void sja1105_teardown(struct dsa_switch *ds)
1566 {
1567 	struct sja1105_private *priv = ds->priv;
1568 
1569 	cancel_work_sync(&priv->tagger_data.rxtstamp_work);
1570 	skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
1571 }
1572 
1573 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
1574 			     struct sk_buff *skb, bool takets)
1575 {
1576 	struct sja1105_mgmt_entry mgmt_route = {0};
1577 	struct sja1105_private *priv = ds->priv;
1578 	struct ethhdr *hdr;
1579 	int timeout = 10;
1580 	int rc;
1581 
1582 	hdr = eth_hdr(skb);
1583 
1584 	mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
1585 	mgmt_route.destports = BIT(port);
1586 	mgmt_route.enfport = 1;
1587 	mgmt_route.tsreg = 0;
1588 	mgmt_route.takets = takets;
1589 
1590 	rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1591 					  slot, &mgmt_route, true);
1592 	if (rc < 0) {
1593 		kfree_skb(skb);
1594 		return rc;
1595 	}
1596 
1597 	/* Transfer skb to the host port. */
1598 	dsa_enqueue_skb(skb, ds->ports[port].slave);
1599 
1600 	/* Wait until the switch has processed the frame */
1601 	do {
1602 		rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
1603 						 slot, &mgmt_route);
1604 		if (rc < 0) {
1605 			dev_err_ratelimited(priv->ds->dev,
1606 					    "failed to poll for mgmt route\n");
1607 			continue;
1608 		}
1609 
1610 		/* UM10944: The ENFPORT flag of the respective entry is
1611 		 * cleared when a match is found. The host can use this
1612 		 * flag as an acknowledgment.
1613 		 */
1614 		cpu_relax();
1615 	} while (mgmt_route.enfport && --timeout);
1616 
1617 	if (!timeout) {
1618 		/* Clean up the management route so that a follow-up
1619 		 * frame may not match on it by mistake.
1620 		 * This is only hardware supported on P/Q/R/S - on E/T it is
1621 		 * a no-op and we are silently discarding the -EOPNOTSUPP.
1622 		 */
1623 		sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1624 					     slot, &mgmt_route, false);
1625 		dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
1626 	}
1627 
1628 	return NETDEV_TX_OK;
1629 }
1630 
1631 /* Deferred work is unfortunately necessary because setting up the management
1632  * route cannot be done from atomit context (SPI transfer takes a sleepable
1633  * lock on the bus)
1634  */
1635 static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
1636 					      struct sk_buff *skb)
1637 {
1638 	struct sja1105_private *priv = ds->priv;
1639 	struct sja1105_port *sp = &priv->ports[port];
1640 	struct skb_shared_hwtstamps shwt = {0};
1641 	int slot = sp->mgmt_slot;
1642 	struct sk_buff *clone;
1643 	u64 now, ts;
1644 	int rc;
1645 
1646 	/* The tragic fact about the switch having 4x2 slots for installing
1647 	 * management routes is that all of them except one are actually
1648 	 * useless.
1649 	 * If 2 slots are simultaneously configured for two BPDUs sent to the
1650 	 * same (multicast) DMAC but on different egress ports, the switch
1651 	 * would confuse them and redirect first frame it receives on the CPU
1652 	 * port towards the port configured on the numerically first slot
1653 	 * (therefore wrong port), then second received frame on second slot
1654 	 * (also wrong port).
1655 	 * So for all practical purposes, there needs to be a lock that
1656 	 * prevents that from happening. The slot used here is utterly useless
1657 	 * (could have simply been 0 just as fine), but we are doing it
1658 	 * nonetheless, in case a smarter idea ever comes up in the future.
1659 	 */
1660 	mutex_lock(&priv->mgmt_lock);
1661 
1662 	/* The clone, if there, was made by dsa_skb_tx_timestamp */
1663 	clone = DSA_SKB_CB(skb)->clone;
1664 
1665 	sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
1666 
1667 	if (!clone)
1668 		goto out;
1669 
1670 	skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
1671 
1672 	mutex_lock(&priv->ptp_lock);
1673 
1674 	now = priv->tstamp_cc.read(&priv->tstamp_cc);
1675 
1676 	rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
1677 	if (rc < 0) {
1678 		dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
1679 		kfree_skb(clone);
1680 		goto out_unlock_ptp;
1681 	}
1682 
1683 	ts = sja1105_tstamp_reconstruct(priv, now, ts);
1684 	ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
1685 
1686 	shwt.hwtstamp = ns_to_ktime(ts);
1687 	skb_complete_tx_timestamp(clone, &shwt);
1688 
1689 out_unlock_ptp:
1690 	mutex_unlock(&priv->ptp_lock);
1691 out:
1692 	mutex_unlock(&priv->mgmt_lock);
1693 	return NETDEV_TX_OK;
1694 }
1695 
1696 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
1697  * which cannot be reconfigured at runtime. So a switch reset is required.
1698  */
1699 static int sja1105_set_ageing_time(struct dsa_switch *ds,
1700 				   unsigned int ageing_time)
1701 {
1702 	struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1703 	struct sja1105_private *priv = ds->priv;
1704 	struct sja1105_table *table;
1705 	unsigned int maxage;
1706 
1707 	table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1708 	l2_lookup_params = table->entries;
1709 
1710 	maxage = SJA1105_AGEING_TIME_MS(ageing_time);
1711 
1712 	if (l2_lookup_params->maxage == maxage)
1713 		return 0;
1714 
1715 	l2_lookup_params->maxage = maxage;
1716 
1717 	return sja1105_static_config_reload(priv);
1718 }
1719 
1720 /* Caller must hold priv->tagger_data.meta_lock */
1721 static int sja1105_change_rxtstamping(struct sja1105_private *priv,
1722 				      bool on)
1723 {
1724 	struct sja1105_general_params_entry *general_params;
1725 	struct sja1105_table *table;
1726 	int rc;
1727 
1728 	table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1729 	general_params = table->entries;
1730 	general_params->send_meta1 = on;
1731 	general_params->send_meta0 = on;
1732 
1733 	rc = sja1105_init_avb_params(priv, on);
1734 	if (rc < 0)
1735 		return rc;
1736 
1737 	/* Initialize the meta state machine to a known state */
1738 	if (priv->tagger_data.stampable_skb) {
1739 		kfree_skb(priv->tagger_data.stampable_skb);
1740 		priv->tagger_data.stampable_skb = NULL;
1741 	}
1742 
1743 	return sja1105_static_config_reload(priv);
1744 }
1745 
1746 static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
1747 				struct ifreq *ifr)
1748 {
1749 	struct sja1105_private *priv = ds->priv;
1750 	struct hwtstamp_config config;
1751 	bool rx_on;
1752 	int rc;
1753 
1754 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1755 		return -EFAULT;
1756 
1757 	switch (config.tx_type) {
1758 	case HWTSTAMP_TX_OFF:
1759 		priv->ports[port].hwts_tx_en = false;
1760 		break;
1761 	case HWTSTAMP_TX_ON:
1762 		priv->ports[port].hwts_tx_en = true;
1763 		break;
1764 	default:
1765 		return -ERANGE;
1766 	}
1767 
1768 	switch (config.rx_filter) {
1769 	case HWTSTAMP_FILTER_NONE:
1770 		rx_on = false;
1771 		break;
1772 	default:
1773 		rx_on = true;
1774 		break;
1775 	}
1776 
1777 	if (rx_on != priv->tagger_data.hwts_rx_en) {
1778 		spin_lock(&priv->tagger_data.meta_lock);
1779 		rc = sja1105_change_rxtstamping(priv, rx_on);
1780 		spin_unlock(&priv->tagger_data.meta_lock);
1781 		if (rc < 0) {
1782 			dev_err(ds->dev,
1783 				"Failed to change RX timestamping: %d\n", rc);
1784 			return -EFAULT;
1785 		}
1786 		priv->tagger_data.hwts_rx_en = rx_on;
1787 	}
1788 
1789 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1790 		return -EFAULT;
1791 	return 0;
1792 }
1793 
1794 static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
1795 				struct ifreq *ifr)
1796 {
1797 	struct sja1105_private *priv = ds->priv;
1798 	struct hwtstamp_config config;
1799 
1800 	config.flags = 0;
1801 	if (priv->ports[port].hwts_tx_en)
1802 		config.tx_type = HWTSTAMP_TX_ON;
1803 	else
1804 		config.tx_type = HWTSTAMP_TX_OFF;
1805 	if (priv->tagger_data.hwts_rx_en)
1806 		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1807 	else
1808 		config.rx_filter = HWTSTAMP_FILTER_NONE;
1809 
1810 	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1811 		-EFAULT : 0;
1812 }
1813 
1814 #define to_tagger(d) \
1815 	container_of((d), struct sja1105_tagger_data, rxtstamp_work)
1816 #define to_sja1105(d) \
1817 	container_of((d), struct sja1105_private, tagger_data)
1818 
1819 static void sja1105_rxtstamp_work(struct work_struct *work)
1820 {
1821 	struct sja1105_tagger_data *data = to_tagger(work);
1822 	struct sja1105_private *priv = to_sja1105(data);
1823 	struct sk_buff *skb;
1824 	u64 now;
1825 
1826 	mutex_lock(&priv->ptp_lock);
1827 
1828 	now = priv->tstamp_cc.read(&priv->tstamp_cc);
1829 
1830 	while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
1831 		struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
1832 		u64 ts;
1833 
1834 		*shwt = (struct skb_shared_hwtstamps) {0};
1835 
1836 		ts = SJA1105_SKB_CB(skb)->meta_tstamp;
1837 		ts = sja1105_tstamp_reconstruct(priv, now, ts);
1838 		ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
1839 
1840 		shwt->hwtstamp = ns_to_ktime(ts);
1841 		netif_rx_ni(skb);
1842 	}
1843 
1844 	mutex_unlock(&priv->ptp_lock);
1845 }
1846 
1847 /* Called from dsa_skb_defer_rx_timestamp */
1848 static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
1849 				  struct sk_buff *skb, unsigned int type)
1850 {
1851 	struct sja1105_private *priv = ds->priv;
1852 	struct sja1105_tagger_data *data = &priv->tagger_data;
1853 
1854 	if (!data->hwts_rx_en)
1855 		return false;
1856 
1857 	/* We need to read the full PTP clock to reconstruct the Rx
1858 	 * timestamp. For that we need a sleepable context.
1859 	 */
1860 	skb_queue_tail(&data->skb_rxtstamp_queue, skb);
1861 	schedule_work(&data->rxtstamp_work);
1862 	return true;
1863 }
1864 
1865 /* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
1866  * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
1867  * callback, where we will timestamp it synchronously.
1868  */
1869 static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
1870 				  struct sk_buff *skb, unsigned int type)
1871 {
1872 	struct sja1105_private *priv = ds->priv;
1873 	struct sja1105_port *sp = &priv->ports[port];
1874 
1875 	if (!sp->hwts_tx_en)
1876 		return false;
1877 
1878 	return true;
1879 }
1880 
1881 static const struct dsa_switch_ops sja1105_switch_ops = {
1882 	.get_tag_protocol	= sja1105_get_tag_protocol,
1883 	.setup			= sja1105_setup,
1884 	.teardown		= sja1105_teardown,
1885 	.set_ageing_time	= sja1105_set_ageing_time,
1886 	.phylink_validate	= sja1105_phylink_validate,
1887 	.phylink_mac_config	= sja1105_mac_config,
1888 	.phylink_mac_link_up	= sja1105_mac_link_up,
1889 	.phylink_mac_link_down	= sja1105_mac_link_down,
1890 	.get_strings		= sja1105_get_strings,
1891 	.get_ethtool_stats	= sja1105_get_ethtool_stats,
1892 	.get_sset_count		= sja1105_get_sset_count,
1893 	.get_ts_info		= sja1105_get_ts_info,
1894 	.port_fdb_dump		= sja1105_fdb_dump,
1895 	.port_fdb_add		= sja1105_fdb_add,
1896 	.port_fdb_del		= sja1105_fdb_del,
1897 	.port_bridge_join	= sja1105_bridge_join,
1898 	.port_bridge_leave	= sja1105_bridge_leave,
1899 	.port_stp_state_set	= sja1105_bridge_stp_state_set,
1900 	.port_vlan_prepare	= sja1105_vlan_prepare,
1901 	.port_vlan_filtering	= sja1105_vlan_filtering,
1902 	.port_vlan_add		= sja1105_vlan_add,
1903 	.port_vlan_del		= sja1105_vlan_del,
1904 	.port_mdb_prepare	= sja1105_mdb_prepare,
1905 	.port_mdb_add		= sja1105_mdb_add,
1906 	.port_mdb_del		= sja1105_mdb_del,
1907 	.port_deferred_xmit	= sja1105_port_deferred_xmit,
1908 	.port_hwtstamp_get	= sja1105_hwtstamp_get,
1909 	.port_hwtstamp_set	= sja1105_hwtstamp_set,
1910 	.port_rxtstamp		= sja1105_port_rxtstamp,
1911 	.port_txtstamp		= sja1105_port_txtstamp,
1912 };
1913 
1914 static int sja1105_check_device_id(struct sja1105_private *priv)
1915 {
1916 	const struct sja1105_regs *regs = priv->info->regs;
1917 	u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
1918 	struct device *dev = &priv->spidev->dev;
1919 	u64 device_id;
1920 	u64 part_no;
1921 	int rc;
1922 
1923 	rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
1924 				  &device_id, SJA1105_SIZE_DEVICE_ID);
1925 	if (rc < 0)
1926 		return rc;
1927 
1928 	if (device_id != priv->info->device_id) {
1929 		dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
1930 			priv->info->device_id, device_id);
1931 		return -ENODEV;
1932 	}
1933 
1934 	rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
1935 					 prod_id, SJA1105_SIZE_DEVICE_ID);
1936 	if (rc < 0)
1937 		return rc;
1938 
1939 	sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
1940 
1941 	if (part_no != priv->info->part_no) {
1942 		dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
1943 			priv->info->part_no, part_no);
1944 		return -ENODEV;
1945 	}
1946 
1947 	return 0;
1948 }
1949 
1950 static int sja1105_probe(struct spi_device *spi)
1951 {
1952 	struct sja1105_tagger_data *tagger_data;
1953 	struct device *dev = &spi->dev;
1954 	struct sja1105_private *priv;
1955 	struct dsa_switch *ds;
1956 	int rc, i;
1957 
1958 	if (!dev->of_node) {
1959 		dev_err(dev, "No DTS bindings for SJA1105 driver\n");
1960 		return -EINVAL;
1961 	}
1962 
1963 	priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
1964 	if (!priv)
1965 		return -ENOMEM;
1966 
1967 	/* Configure the optional reset pin and bring up switch */
1968 	priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
1969 	if (IS_ERR(priv->reset_gpio))
1970 		dev_dbg(dev, "reset-gpios not defined, ignoring\n");
1971 	else
1972 		sja1105_hw_reset(priv->reset_gpio, 1, 1);
1973 
1974 	/* Populate our driver private structure (priv) based on
1975 	 * the device tree node that was probed (spi)
1976 	 */
1977 	priv->spidev = spi;
1978 	spi_set_drvdata(spi, priv);
1979 
1980 	/* Configure the SPI bus */
1981 	spi->bits_per_word = 8;
1982 	rc = spi_setup(spi);
1983 	if (rc < 0) {
1984 		dev_err(dev, "Could not init SPI\n");
1985 		return rc;
1986 	}
1987 
1988 	priv->info = of_device_get_match_data(dev);
1989 
1990 	/* Detect hardware device */
1991 	rc = sja1105_check_device_id(priv);
1992 	if (rc < 0) {
1993 		dev_err(dev, "Device ID check failed: %d\n", rc);
1994 		return rc;
1995 	}
1996 
1997 	dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
1998 
1999 	ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
2000 	if (!ds)
2001 		return -ENOMEM;
2002 
2003 	ds->ops = &sja1105_switch_ops;
2004 	ds->priv = priv;
2005 	priv->ds = ds;
2006 
2007 	tagger_data = &priv->tagger_data;
2008 	skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
2009 	INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
2010 
2011 	/* Connections between dsa_port and sja1105_port */
2012 	for (i = 0; i < SJA1105_NUM_PORTS; i++) {
2013 		struct sja1105_port *sp = &priv->ports[i];
2014 
2015 		ds->ports[i].priv = sp;
2016 		sp->dp = &ds->ports[i];
2017 		sp->data = tagger_data;
2018 	}
2019 	mutex_init(&priv->mgmt_lock);
2020 
2021 	return dsa_register_switch(priv->ds);
2022 }
2023 
2024 static int sja1105_remove(struct spi_device *spi)
2025 {
2026 	struct sja1105_private *priv = spi_get_drvdata(spi);
2027 
2028 	sja1105_ptp_clock_unregister(priv);
2029 	dsa_unregister_switch(priv->ds);
2030 	sja1105_static_config_free(&priv->static_config);
2031 	return 0;
2032 }
2033 
2034 static const struct of_device_id sja1105_dt_ids[] = {
2035 	{ .compatible = "nxp,sja1105e", .data = &sja1105e_info },
2036 	{ .compatible = "nxp,sja1105t", .data = &sja1105t_info },
2037 	{ .compatible = "nxp,sja1105p", .data = &sja1105p_info },
2038 	{ .compatible = "nxp,sja1105q", .data = &sja1105q_info },
2039 	{ .compatible = "nxp,sja1105r", .data = &sja1105r_info },
2040 	{ .compatible = "nxp,sja1105s", .data = &sja1105s_info },
2041 	{ /* sentinel */ },
2042 };
2043 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
2044 
2045 static struct spi_driver sja1105_driver = {
2046 	.driver = {
2047 		.name  = "sja1105",
2048 		.owner = THIS_MODULE,
2049 		.of_match_table = of_match_ptr(sja1105_dt_ids),
2050 	},
2051 	.probe  = sja1105_probe,
2052 	.remove = sja1105_remove,
2053 };
2054 
2055 module_spi_driver(sja1105_driver);
2056 
2057 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
2058 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
2059 MODULE_DESCRIPTION("SJA1105 Driver");
2060 MODULE_LICENSE("GPL v2");
2061