xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c (revision fcc79e1714e8c2b8e216dc3149812edd37884eef)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/switchdev.h>
8 #include <linux/if_bridge.h>
9 #include <linux/iopoll.h>
10 
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 
14 /* Commands for Mac Table Command register */
15 #define MAC_CMD_LEARN         0 /* Insert (Learn) 1 entry */
16 #define MAC_CMD_UNLEARN       1 /* Unlearn (Forget) 1 entry */
17 #define MAC_CMD_LOOKUP        2 /* Look up 1 entry */
18 #define MAC_CMD_READ          3 /* Read entry at Mac Table Index */
19 #define MAC_CMD_WRITE         4 /* Write entry at Mac Table Index */
20 #define MAC_CMD_SCAN          5 /* Scan (Age or find next) */
21 #define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
22 #define MAC_CMD_CLEAR_ALL     7 /* Delete all entries in table */
23 
24 /* Commands for MAC_ENTRY_ADDR_TYPE */
25 #define  MAC_ENTRY_ADDR_TYPE_UPSID_PN         0
26 #define  MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
27 #define  MAC_ENTRY_ADDR_TYPE_GLAG             2
28 #define  MAC_ENTRY_ADDR_TYPE_MC_IDX           3
29 
30 #define TABLE_UPDATE_SLEEP_US 10
31 #define TABLE_UPDATE_TIMEOUT_US 100000
32 
33 struct sparx5_mact_entry {
34 	struct list_head list;
35 	unsigned char mac[ETH_ALEN];
36 	u32 flags;
37 #define MAC_ENT_ALIVE	BIT(0)
38 #define MAC_ENT_MOVED	BIT(1)
39 #define MAC_ENT_LOCK	BIT(2)
40 	u16 vid;
41 	u16 port;
42 };
43 
44 static int sparx5_mact_get_status(struct sparx5 *sparx5)
45 {
46 	return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
47 }
48 
49 static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
50 {
51 	u32 val;
52 
53 	return readx_poll_timeout(sparx5_mact_get_status,
54 		sparx5, val,
55 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
56 		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
57 }
58 
59 static void sparx5_mact_select(struct sparx5 *sparx5,
60 			       const unsigned char mac[ETH_ALEN],
61 			       u16 vid)
62 {
63 	u32 macl = 0, mach = 0;
64 
65 	/* Set the MAC address to handle and the vlan associated in a format
66 	 * understood by the hardware.
67 	 */
68 	mach |= vid    << 16;
69 	mach |= mac[0] << 8;
70 	mach |= mac[1] << 0;
71 	macl |= mac[2] << 24;
72 	macl |= mac[3] << 16;
73 	macl |= mac[4] << 8;
74 	macl |= mac[5] << 0;
75 
76 	spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
77 	spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
78 }
79 
80 int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
81 		      const unsigned char mac[ETH_ALEN], u16 vid)
82 {
83 	const struct sparx5_consts *consts = sparx5->data->consts;
84 	int addr, type, ret;
85 
86 	if (pgid < consts->n_ports) {
87 		type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
88 		addr = pgid % 32;
89 		addr += (pgid / 32) << 5; /* Add upsid */
90 	} else {
91 		type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
92 		addr = pgid - consts->n_ports;
93 	}
94 
95 	mutex_lock(&sparx5->lock);
96 
97 	sparx5_mact_select(sparx5, mac, vid);
98 
99 	/* MAC entry properties */
100 	spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
101 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
102 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
103 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
104 		sparx5, LRN_MAC_ACCESS_CFG_2);
105 	spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
106 
107 	/*  Insert/learn new entry */
108 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
109 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
110 		sparx5, LRN_COMMON_ACCESS_CTRL);
111 
112 	ret = sparx5_mact_wait_for_completion(sparx5);
113 
114 	mutex_unlock(&sparx5->lock);
115 
116 	return ret;
117 }
118 
119 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
120 {
121 	struct sparx5_port *port = netdev_priv(dev);
122 	struct sparx5 *sparx5 = port->sparx5;
123 
124 	return sparx5_mact_forget(sparx5, addr, port->pvid);
125 }
126 
127 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
128 {
129 	struct sparx5_port *port = netdev_priv(dev);
130 	struct sparx5 *sparx5 = port->sparx5;
131 
132 	return sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
133 				 addr, port->pvid);
134 }
135 
136 static int sparx5_mact_get(struct sparx5 *sparx5,
137 			   unsigned char mac[ETH_ALEN],
138 			   u16 *vid, u32 *pcfg2)
139 {
140 	u32 mach, macl, cfg2;
141 	int ret = -ENOENT;
142 
143 	cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
144 	if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
145 		mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
146 		macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
147 		mac[0] = ((mach >> 8)  & 0xff);
148 		mac[1] = ((mach >> 0)  & 0xff);
149 		mac[2] = ((macl >> 24) & 0xff);
150 		mac[3] = ((macl >> 16) & 0xff);
151 		mac[4] = ((macl >> 8)  & 0xff);
152 		mac[5] = ((macl >> 0)  & 0xff);
153 		*vid = mach >> 16;
154 		*pcfg2 = cfg2;
155 		ret = 0;
156 	}
157 
158 	return ret;
159 }
160 
161 bool sparx5_mact_getnext(struct sparx5 *sparx5,
162 			 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
163 {
164 	u32 cfg2;
165 	int ret;
166 
167 	mutex_lock(&sparx5->lock);
168 
169 	sparx5_mact_select(sparx5, mac, *vid);
170 
171 	spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
172 		LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
173 		sparx5, LRN_SCAN_NEXT_CFG);
174 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
175 		(MAC_CMD_FIND_SMALLEST) |
176 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
177 		sparx5, LRN_COMMON_ACCESS_CTRL);
178 
179 	ret = sparx5_mact_wait_for_completion(sparx5);
180 	if (ret == 0) {
181 		ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
182 		if (ret == 0)
183 			*pcfg2 = cfg2;
184 	}
185 
186 	mutex_unlock(&sparx5->lock);
187 
188 	return ret == 0;
189 }
190 
191 int sparx5_mact_find(struct sparx5 *sparx5,
192 		     const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
193 {
194 	int ret;
195 	u32 cfg2;
196 
197 	mutex_lock(&sparx5->lock);
198 
199 	sparx5_mact_select(sparx5, mac, vid);
200 
201 	/* Issue a lookup command */
202 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
203 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
204 		sparx5, LRN_COMMON_ACCESS_CTRL);
205 
206 	ret = sparx5_mact_wait_for_completion(sparx5);
207 	if (ret == 0) {
208 		cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
209 		if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
210 			*pcfg2 = cfg2;
211 		else
212 			ret = -ENOENT;
213 	}
214 
215 	mutex_unlock(&sparx5->lock);
216 
217 	return ret;
218 }
219 
220 int sparx5_mact_forget(struct sparx5 *sparx5,
221 		       const unsigned char mac[ETH_ALEN], u16 vid)
222 {
223 	int ret;
224 
225 	mutex_lock(&sparx5->lock);
226 
227 	sparx5_mact_select(sparx5, mac, vid);
228 
229 	/* Issue an unlearn command */
230 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
231 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
232 		sparx5, LRN_COMMON_ACCESS_CTRL);
233 
234 	ret = sparx5_mact_wait_for_completion(sparx5);
235 
236 	mutex_unlock(&sparx5->lock);
237 
238 	return ret;
239 }
240 
241 static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
242 						  const unsigned char *mac,
243 						  u16 vid, u16 port_index)
244 {
245 	struct sparx5_mact_entry *mact_entry;
246 
247 	mact_entry = devm_kzalloc(sparx5->dev,
248 				  sizeof(*mact_entry), GFP_ATOMIC);
249 	if (!mact_entry)
250 		return NULL;
251 
252 	memcpy(mact_entry->mac, mac, ETH_ALEN);
253 	mact_entry->vid = vid;
254 	mact_entry->port = port_index;
255 	return mact_entry;
256 }
257 
258 static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
259 						 const unsigned char *mac,
260 						 u16 vid, u16 port_index)
261 {
262 	struct sparx5_mact_entry *mact_entry;
263 	struct sparx5_mact_entry *res = NULL;
264 
265 	mutex_lock(&sparx5->mact_lock);
266 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
267 		if (mact_entry->vid == vid &&
268 		    ether_addr_equal(mac, mact_entry->mac) &&
269 		    mact_entry->port == port_index) {
270 			res = mact_entry;
271 			break;
272 		}
273 	}
274 	mutex_unlock(&sparx5->mact_lock);
275 
276 	return res;
277 }
278 
279 static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
280 				      const char *mac, u16 vid,
281 				      struct net_device *dev, bool offloaded)
282 {
283 	struct switchdev_notifier_fdb_info info = {};
284 
285 	info.addr = mac;
286 	info.vid = vid;
287 	info.offloaded = offloaded;
288 	call_switchdev_notifiers(type, dev, &info.info, NULL);
289 }
290 
291 int sparx5_add_mact_entry(struct sparx5 *sparx5,
292 			  struct net_device *dev,
293 			  u16 portno,
294 			  const unsigned char *addr, u16 vid)
295 {
296 	struct sparx5_mact_entry *mact_entry;
297 	int ret;
298 	u32 cfg2;
299 
300 	ret = sparx5_mact_find(sparx5, addr, vid, &cfg2);
301 	if (!ret)
302 		return 0;
303 
304 	/* In case the entry already exists, don't add it again to SW,
305 	 * just update HW, but we need to look in the actual HW because
306 	 * it is possible for an entry to be learn by HW and before the
307 	 * mact thread to start the frame will reach CPU and the CPU will
308 	 * add the entry but without the extern_learn flag.
309 	 */
310 	mact_entry = find_mact_entry(sparx5, addr, vid, portno);
311 	if (mact_entry)
312 		goto update_hw;
313 
314 	/* Add the entry in SW MAC table not to get the notification when
315 	 * SW is pulling again
316 	 */
317 	mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
318 	if (!mact_entry)
319 		return -ENOMEM;
320 
321 	mutex_lock(&sparx5->mact_lock);
322 	list_add_tail(&mact_entry->list, &sparx5->mact_entries);
323 	mutex_unlock(&sparx5->mact_lock);
324 
325 update_hw:
326 	ret = sparx5_mact_learn(sparx5, portno, addr, vid);
327 
328 	/* New entry? */
329 	if (mact_entry->flags == 0) {
330 		mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
331 		sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
332 					  dev, true);
333 	}
334 
335 	return ret;
336 }
337 
338 int sparx5_del_mact_entry(struct sparx5 *sparx5,
339 			  const unsigned char *addr,
340 			  u16 vid)
341 {
342 	struct sparx5_mact_entry *mact_entry, *tmp;
343 
344 	/* Delete the entry in SW MAC table not to get the notification when
345 	 * SW is pulling again
346 	 */
347 	mutex_lock(&sparx5->mact_lock);
348 	list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
349 				 list) {
350 		if ((vid == 0 || mact_entry->vid == vid) &&
351 		    ether_addr_equal(addr, mact_entry->mac)) {
352 			sparx5_mact_forget(sparx5, addr, mact_entry->vid);
353 
354 			list_del(&mact_entry->list);
355 			devm_kfree(sparx5->dev, mact_entry);
356 		}
357 	}
358 	mutex_unlock(&sparx5->mact_lock);
359 
360 	return 0;
361 }
362 
363 static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
364 				     unsigned char mac[ETH_ALEN],
365 				     u16 vid, u32 cfg2)
366 {
367 	struct sparx5_mact_entry *mact_entry;
368 	bool found = false;
369 	u16 port;
370 
371 	if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
372 	    MAC_ENTRY_ADDR_TYPE_UPSID_PN)
373 		return;
374 
375 	port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
376 	if (port >= sparx5->data->consts->n_ports)
377 		return;
378 
379 	if (!test_bit(port, sparx5->bridge_mask))
380 		return;
381 
382 	mutex_lock(&sparx5->mact_lock);
383 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
384 		if (mact_entry->vid == vid &&
385 		    ether_addr_equal(mac, mact_entry->mac)) {
386 			found = true;
387 			mact_entry->flags |= MAC_ENT_ALIVE;
388 			if (mact_entry->port != port) {
389 				dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
390 					 mact_entry->port, port);
391 				mact_entry->port = port;
392 				mact_entry->flags |= MAC_ENT_MOVED;
393 			}
394 			/* Entry handled */
395 			break;
396 		}
397 	}
398 	mutex_unlock(&sparx5->mact_lock);
399 
400 	if (found && !(mact_entry->flags & MAC_ENT_MOVED))
401 		/* Present, not moved */
402 		return;
403 
404 	if (!found) {
405 		/* Entry not found - now add */
406 		mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
407 		if (!mact_entry)
408 			return;
409 
410 		mact_entry->flags |= MAC_ENT_ALIVE;
411 		mutex_lock(&sparx5->mact_lock);
412 		list_add_tail(&mact_entry->list, &sparx5->mact_entries);
413 		mutex_unlock(&sparx5->mact_lock);
414 	}
415 
416 	/* New or moved entry - notify bridge */
417 	sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
418 				  mac, vid, sparx5->ports[port]->ndev,
419 				  true);
420 }
421 
422 void sparx5_mact_pull_work(struct work_struct *work)
423 {
424 	struct delayed_work *del_work = to_delayed_work(work);
425 	struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
426 					     mact_work);
427 	struct sparx5_mact_entry *mact_entry, *tmp;
428 	unsigned char mac[ETH_ALEN];
429 	u32 cfg2;
430 	u16 vid;
431 	int ret;
432 
433 	/* Reset MAC entry flags */
434 	mutex_lock(&sparx5->mact_lock);
435 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
436 		mact_entry->flags &= MAC_ENT_LOCK;
437 	mutex_unlock(&sparx5->mact_lock);
438 
439 	/* MAIN mac address processing loop */
440 	vid = 0;
441 	memset(mac, 0, sizeof(mac));
442 	do {
443 		mutex_lock(&sparx5->lock);
444 		sparx5_mact_select(sparx5, mac, vid);
445 		spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
446 			sparx5, LRN_SCAN_NEXT_CFG);
447 		spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
448 			(MAC_CMD_FIND_SMALLEST) |
449 			LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
450 			sparx5, LRN_COMMON_ACCESS_CTRL);
451 		ret = sparx5_mact_wait_for_completion(sparx5);
452 		if (ret == 0)
453 			ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
454 		mutex_unlock(&sparx5->lock);
455 		if (ret == 0)
456 			sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
457 	} while (ret == 0);
458 
459 	mutex_lock(&sparx5->mact_lock);
460 	list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
461 				 list) {
462 		/* If the entry is in HW or permanent, then skip */
463 		if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
464 			continue;
465 
466 		sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
467 					  mact_entry->mac, mact_entry->vid,
468 					  sparx5->ports[mact_entry->port]->ndev,
469 					  true);
470 
471 		list_del(&mact_entry->list);
472 		devm_kfree(sparx5->dev, mact_entry);
473 	}
474 	mutex_unlock(&sparx5->mact_lock);
475 
476 	queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
477 			   SPX5_MACT_PULL_DELAY);
478 }
479 
480 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
481 {
482 	int value = max(1, msecs / 10); /* unit 10 ms */
483 
484 	spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
485 		 LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
486 		 LRN_AUTOAGE_CFG_UNIT_SIZE |
487 		 LRN_AUTOAGE_CFG_PERIOD_VAL,
488 		 sparx5,
489 		 LRN_AUTOAGE_CFG(0));
490 }
491 
492 void sparx5_mact_init(struct sparx5 *sparx5)
493 {
494 	mutex_init(&sparx5->lock);
495 
496 	/*  Flush MAC table */
497 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
498 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
499 		sparx5, LRN_COMMON_ACCESS_CTRL);
500 
501 	if (sparx5_mact_wait_for_completion(sparx5) != 0)
502 		dev_warn(sparx5->dev, "MAC flush error\n");
503 
504 	sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);
505 }
506