xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c (revision b92dd11725a7c57f55e148c7d3ce58a86f480575)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <net/switchdev.h>
8 #include <linux/if_bridge.h>
9 #include <linux/iopoll.h>
10 
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 
14 /* Commands for Mac Table Command register */
15 #define MAC_CMD_LEARN         0 /* Insert (Learn) 1 entry */
16 #define MAC_CMD_UNLEARN       1 /* Unlearn (Forget) 1 entry */
17 #define MAC_CMD_LOOKUP        2 /* Look up 1 entry */
18 #define MAC_CMD_READ          3 /* Read entry at Mac Table Index */
19 #define MAC_CMD_WRITE         4 /* Write entry at Mac Table Index */
20 #define MAC_CMD_SCAN          5 /* Scan (Age or find next) */
21 #define MAC_CMD_FIND_SMALLEST 6 /* Get next entry */
22 #define MAC_CMD_CLEAR_ALL     7 /* Delete all entries in table */
23 
24 /* Commands for MAC_ENTRY_ADDR_TYPE */
25 #define  MAC_ENTRY_ADDR_TYPE_UPSID_PN         0
26 #define  MAC_ENTRY_ADDR_TYPE_UPSID_CPU_OR_INT 1
27 #define  MAC_ENTRY_ADDR_TYPE_GLAG             2
28 #define  MAC_ENTRY_ADDR_TYPE_MC_IDX           3
29 
30 #define TABLE_UPDATE_SLEEP_US 10
31 #define TABLE_UPDATE_TIMEOUT_US 100000
32 
33 struct sparx5_mact_entry {
34 	struct list_head list;
35 	unsigned char mac[ETH_ALEN];
36 	u32 flags;
37 #define MAC_ENT_ALIVE	BIT(0)
38 #define MAC_ENT_MOVED	BIT(1)
39 #define MAC_ENT_LOCK	BIT(2)
40 	u16 vid;
41 	u16 port;
42 };
43 
44 static int sparx5_mact_get_status(struct sparx5 *sparx5)
45 {
46 	return spx5_rd(sparx5, LRN_COMMON_ACCESS_CTRL);
47 }
48 
49 static int sparx5_mact_wait_for_completion(struct sparx5 *sparx5)
50 {
51 	u32 val;
52 
53 	return readx_poll_timeout(sparx5_mact_get_status,
54 		sparx5, val,
55 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_GET(val) == 0,
56 		TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
57 }
58 
59 static void sparx5_mact_select(struct sparx5 *sparx5,
60 			       const unsigned char mac[ETH_ALEN],
61 			       u16 vid)
62 {
63 	u32 macl = 0, mach = 0;
64 
65 	/* Set the MAC address to handle and the vlan associated in a format
66 	 * understood by the hardware.
67 	 */
68 	mach |= vid    << 16;
69 	mach |= mac[0] << 8;
70 	mach |= mac[1] << 0;
71 	macl |= mac[2] << 24;
72 	macl |= mac[3] << 16;
73 	macl |= mac[4] << 8;
74 	macl |= mac[5] << 0;
75 
76 	spx5_wr(mach, sparx5, LRN_MAC_ACCESS_CFG_0);
77 	spx5_wr(macl, sparx5, LRN_MAC_ACCESS_CFG_1);
78 }
79 
80 int sparx5_mact_learn(struct sparx5 *sparx5, int pgid,
81 		      const unsigned char mac[ETH_ALEN], u16 vid)
82 {
83 	int addr, type, ret;
84 
85 	if (pgid < SPX5_PORTS) {
86 		type = MAC_ENTRY_ADDR_TYPE_UPSID_PN;
87 		addr = pgid % 32;
88 		addr += (pgid / 32) << 5; /* Add upsid */
89 	} else {
90 		type = MAC_ENTRY_ADDR_TYPE_MC_IDX;
91 		addr = pgid - SPX5_PORTS;
92 	}
93 
94 	mutex_lock(&sparx5->lock);
95 
96 	sparx5_mact_select(sparx5, mac, vid);
97 
98 	/* MAC entry properties */
99 	spx5_wr(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_SET(addr) |
100 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_SET(type) |
101 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_SET(1) |
102 		LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_LOCKED_SET(1),
103 		sparx5, LRN_MAC_ACCESS_CFG_2);
104 	spx5_wr(0, sparx5, LRN_MAC_ACCESS_CFG_3);
105 
106 	/*  Insert/learn new entry */
107 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LEARN) |
108 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
109 		sparx5, LRN_COMMON_ACCESS_CTRL);
110 
111 	ret = sparx5_mact_wait_for_completion(sparx5);
112 
113 	mutex_unlock(&sparx5->lock);
114 
115 	return ret;
116 }
117 
118 int sparx5_mc_unsync(struct net_device *dev, const unsigned char *addr)
119 {
120 	struct sparx5_port *port = netdev_priv(dev);
121 	struct sparx5 *sparx5 = port->sparx5;
122 
123 	return sparx5_mact_forget(sparx5, addr, port->pvid);
124 }
125 
126 int sparx5_mc_sync(struct net_device *dev, const unsigned char *addr)
127 {
128 	struct sparx5_port *port = netdev_priv(dev);
129 	struct sparx5 *sparx5 = port->sparx5;
130 
131 	return sparx5_mact_learn(sparx5, PGID_CPU, addr, port->pvid);
132 }
133 
134 static int sparx5_mact_get(struct sparx5 *sparx5,
135 			   unsigned char mac[ETH_ALEN],
136 			   u16 *vid, u32 *pcfg2)
137 {
138 	u32 mach, macl, cfg2;
139 	int ret = -ENOENT;
140 
141 	cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
142 	if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2)) {
143 		mach = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_0);
144 		macl = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_1);
145 		mac[0] = ((mach >> 8)  & 0xff);
146 		mac[1] = ((mach >> 0)  & 0xff);
147 		mac[2] = ((macl >> 24) & 0xff);
148 		mac[3] = ((macl >> 16) & 0xff);
149 		mac[4] = ((macl >> 8)  & 0xff);
150 		mac[5] = ((macl >> 0)  & 0xff);
151 		*vid = mach >> 16;
152 		*pcfg2 = cfg2;
153 		ret = 0;
154 	}
155 
156 	return ret;
157 }
158 
159 bool sparx5_mact_getnext(struct sparx5 *sparx5,
160 			 unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2)
161 {
162 	u32 cfg2;
163 	int ret;
164 
165 	mutex_lock(&sparx5->lock);
166 
167 	sparx5_mact_select(sparx5, mac, *vid);
168 
169 	spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_IGNORE_LOCKED_ENA_SET(1) |
170 		LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
171 		sparx5, LRN_SCAN_NEXT_CFG);
172 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
173 		(MAC_CMD_FIND_SMALLEST) |
174 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
175 		sparx5, LRN_COMMON_ACCESS_CTRL);
176 
177 	ret = sparx5_mact_wait_for_completion(sparx5);
178 	if (ret == 0) {
179 		ret = sparx5_mact_get(sparx5, mac, vid, &cfg2);
180 		if (ret == 0)
181 			*pcfg2 = cfg2;
182 	}
183 
184 	mutex_unlock(&sparx5->lock);
185 
186 	return ret == 0;
187 }
188 
189 int sparx5_mact_find(struct sparx5 *sparx5,
190 		     const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
191 {
192 	int ret;
193 	u32 cfg2;
194 
195 	mutex_lock(&sparx5->lock);
196 
197 	sparx5_mact_select(sparx5, mac, vid);
198 
199 	/* Issue a lookup command */
200 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_LOOKUP) |
201 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
202 		sparx5, LRN_COMMON_ACCESS_CTRL);
203 
204 	ret = sparx5_mact_wait_for_completion(sparx5);
205 	if (ret == 0) {
206 		cfg2 = spx5_rd(sparx5, LRN_MAC_ACCESS_CFG_2);
207 		if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_VLD_GET(cfg2))
208 			*pcfg2 = cfg2;
209 		else
210 			ret = -ENOENT;
211 	}
212 
213 	mutex_unlock(&sparx5->lock);
214 
215 	return ret;
216 }
217 
218 int sparx5_mact_forget(struct sparx5 *sparx5,
219 		       const unsigned char mac[ETH_ALEN], u16 vid)
220 {
221 	int ret;
222 
223 	mutex_lock(&sparx5->lock);
224 
225 	sparx5_mact_select(sparx5, mac, vid);
226 
227 	/* Issue an unlearn command */
228 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_UNLEARN) |
229 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
230 		sparx5, LRN_COMMON_ACCESS_CTRL);
231 
232 	ret = sparx5_mact_wait_for_completion(sparx5);
233 
234 	mutex_unlock(&sparx5->lock);
235 
236 	return ret;
237 }
238 
239 static struct sparx5_mact_entry *alloc_mact_entry(struct sparx5 *sparx5,
240 						  const unsigned char *mac,
241 						  u16 vid, u16 port_index)
242 {
243 	struct sparx5_mact_entry *mact_entry;
244 
245 	mact_entry = devm_kzalloc(sparx5->dev,
246 				  sizeof(*mact_entry), GFP_ATOMIC);
247 	if (!mact_entry)
248 		return NULL;
249 
250 	memcpy(mact_entry->mac, mac, ETH_ALEN);
251 	mact_entry->vid = vid;
252 	mact_entry->port = port_index;
253 	return mact_entry;
254 }
255 
256 static struct sparx5_mact_entry *find_mact_entry(struct sparx5 *sparx5,
257 						 const unsigned char *mac,
258 						 u16 vid, u16 port_index)
259 {
260 	struct sparx5_mact_entry *mact_entry;
261 	struct sparx5_mact_entry *res = NULL;
262 
263 	mutex_lock(&sparx5->mact_lock);
264 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
265 		if (mact_entry->vid == vid &&
266 		    ether_addr_equal(mac, mact_entry->mac) &&
267 		    mact_entry->port == port_index) {
268 			res = mact_entry;
269 			break;
270 		}
271 	}
272 	mutex_unlock(&sparx5->mact_lock);
273 
274 	return res;
275 }
276 
277 static void sparx5_fdb_call_notifiers(enum switchdev_notifier_type type,
278 				      const char *mac, u16 vid,
279 				      struct net_device *dev, bool offloaded)
280 {
281 	struct switchdev_notifier_fdb_info info = {};
282 
283 	info.addr = mac;
284 	info.vid = vid;
285 	info.offloaded = offloaded;
286 	call_switchdev_notifiers(type, dev, &info.info, NULL);
287 }
288 
289 int sparx5_add_mact_entry(struct sparx5 *sparx5,
290 			  struct net_device *dev,
291 			  u16 portno,
292 			  const unsigned char *addr, u16 vid)
293 {
294 	struct sparx5_mact_entry *mact_entry;
295 	int ret;
296 	u32 cfg2;
297 
298 	ret = sparx5_mact_find(sparx5, addr, vid, &cfg2);
299 	if (!ret)
300 		return 0;
301 
302 	/* In case the entry already exists, don't add it again to SW,
303 	 * just update HW, but we need to look in the actual HW because
304 	 * it is possible for an entry to be learn by HW and before the
305 	 * mact thread to start the frame will reach CPU and the CPU will
306 	 * add the entry but without the extern_learn flag.
307 	 */
308 	mact_entry = find_mact_entry(sparx5, addr, vid, portno);
309 	if (mact_entry)
310 		goto update_hw;
311 
312 	/* Add the entry in SW MAC table not to get the notification when
313 	 * SW is pulling again
314 	 */
315 	mact_entry = alloc_mact_entry(sparx5, addr, vid, portno);
316 	if (!mact_entry)
317 		return -ENOMEM;
318 
319 	mutex_lock(&sparx5->mact_lock);
320 	list_add_tail(&mact_entry->list, &sparx5->mact_entries);
321 	mutex_unlock(&sparx5->mact_lock);
322 
323 update_hw:
324 	ret = sparx5_mact_learn(sparx5, portno, addr, vid);
325 
326 	/* New entry? */
327 	if (mact_entry->flags == 0) {
328 		mact_entry->flags |= MAC_ENT_LOCK; /* Don't age this */
329 		sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, addr, vid,
330 					  dev, true);
331 	}
332 
333 	return ret;
334 }
335 
336 int sparx5_del_mact_entry(struct sparx5 *sparx5,
337 			  const unsigned char *addr,
338 			  u16 vid)
339 {
340 	struct sparx5_mact_entry *mact_entry, *tmp;
341 
342 	/* Delete the entry in SW MAC table not to get the notification when
343 	 * SW is pulling again
344 	 */
345 	mutex_lock(&sparx5->mact_lock);
346 	list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
347 				 list) {
348 		if ((vid == 0 || mact_entry->vid == vid) &&
349 		    ether_addr_equal(addr, mact_entry->mac)) {
350 			list_del(&mact_entry->list);
351 			devm_kfree(sparx5->dev, mact_entry);
352 
353 			sparx5_mact_forget(sparx5, addr, mact_entry->vid);
354 		}
355 	}
356 	mutex_unlock(&sparx5->mact_lock);
357 
358 	return 0;
359 }
360 
361 static void sparx5_mact_handle_entry(struct sparx5 *sparx5,
362 				     unsigned char mac[ETH_ALEN],
363 				     u16 vid, u32 cfg2)
364 {
365 	struct sparx5_mact_entry *mact_entry;
366 	bool found = false;
367 	u16 port;
368 
369 	if (LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_TYPE_GET(cfg2) !=
370 	    MAC_ENTRY_ADDR_TYPE_UPSID_PN)
371 		return;
372 
373 	port = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(cfg2);
374 	if (port >= SPX5_PORTS)
375 		return;
376 
377 	if (!test_bit(port, sparx5->bridge_mask))
378 		return;
379 
380 	mutex_lock(&sparx5->mact_lock);
381 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list) {
382 		if (mact_entry->vid == vid &&
383 		    ether_addr_equal(mac, mact_entry->mac)) {
384 			found = true;
385 			mact_entry->flags |= MAC_ENT_ALIVE;
386 			if (mact_entry->port != port) {
387 				dev_warn(sparx5->dev, "Entry move: %d -> %d\n",
388 					 mact_entry->port, port);
389 				mact_entry->port = port;
390 				mact_entry->flags |= MAC_ENT_MOVED;
391 			}
392 			/* Entry handled */
393 			break;
394 		}
395 	}
396 	mutex_unlock(&sparx5->mact_lock);
397 
398 	if (found && !(mact_entry->flags & MAC_ENT_MOVED))
399 		/* Present, not moved */
400 		return;
401 
402 	if (!found) {
403 		/* Entry not found - now add */
404 		mact_entry = alloc_mact_entry(sparx5, mac, vid, port);
405 		if (!mact_entry)
406 			return;
407 
408 		mact_entry->flags |= MAC_ENT_ALIVE;
409 		mutex_lock(&sparx5->mact_lock);
410 		list_add_tail(&mact_entry->list, &sparx5->mact_entries);
411 		mutex_unlock(&sparx5->mact_lock);
412 	}
413 
414 	/* New or moved entry - notify bridge */
415 	sparx5_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
416 				  mac, vid, sparx5->ports[port]->ndev,
417 				  true);
418 }
419 
420 void sparx5_mact_pull_work(struct work_struct *work)
421 {
422 	struct delayed_work *del_work = to_delayed_work(work);
423 	struct sparx5 *sparx5 = container_of(del_work, struct sparx5,
424 					     mact_work);
425 	struct sparx5_mact_entry *mact_entry, *tmp;
426 	unsigned char mac[ETH_ALEN];
427 	u32 cfg2;
428 	u16 vid;
429 	int ret;
430 
431 	/* Reset MAC entry flags */
432 	mutex_lock(&sparx5->mact_lock);
433 	list_for_each_entry(mact_entry, &sparx5->mact_entries, list)
434 		mact_entry->flags &= MAC_ENT_LOCK;
435 	mutex_unlock(&sparx5->mact_lock);
436 
437 	/* MAIN mac address processing loop */
438 	vid = 0;
439 	memset(mac, 0, sizeof(mac));
440 	do {
441 		mutex_lock(&sparx5->lock);
442 		sparx5_mact_select(sparx5, mac, vid);
443 		spx5_wr(LRN_SCAN_NEXT_CFG_SCAN_NEXT_UNTIL_FOUND_ENA_SET(1),
444 			sparx5, LRN_SCAN_NEXT_CFG);
445 		spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET
446 			(MAC_CMD_FIND_SMALLEST) |
447 			LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
448 			sparx5, LRN_COMMON_ACCESS_CTRL);
449 		ret = sparx5_mact_wait_for_completion(sparx5);
450 		if (ret == 0)
451 			ret = sparx5_mact_get(sparx5, mac, &vid, &cfg2);
452 		mutex_unlock(&sparx5->lock);
453 		if (ret == 0)
454 			sparx5_mact_handle_entry(sparx5, mac, vid, cfg2);
455 	} while (ret == 0);
456 
457 	mutex_lock(&sparx5->mact_lock);
458 	list_for_each_entry_safe(mact_entry, tmp, &sparx5->mact_entries,
459 				 list) {
460 		/* If the entry is in HW or permanent, then skip */
461 		if (mact_entry->flags & (MAC_ENT_ALIVE | MAC_ENT_LOCK))
462 			continue;
463 
464 		sparx5_fdb_call_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
465 					  mact_entry->mac, mact_entry->vid,
466 					  sparx5->ports[mact_entry->port]->ndev,
467 					  true);
468 
469 		list_del(&mact_entry->list);
470 		devm_kfree(sparx5->dev, mact_entry);
471 	}
472 	mutex_unlock(&sparx5->mact_lock);
473 
474 	queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
475 			   SPX5_MACT_PULL_DELAY);
476 }
477 
478 void sparx5_set_ageing(struct sparx5 *sparx5, int msecs)
479 {
480 	int value = max(1, msecs / 10); /* unit 10 ms */
481 
482 	spx5_rmw(LRN_AUTOAGE_CFG_UNIT_SIZE_SET(2) | /* 10 ms */
483 		 LRN_AUTOAGE_CFG_PERIOD_VAL_SET(value / 2), /* one bit ageing */
484 		 LRN_AUTOAGE_CFG_UNIT_SIZE |
485 		 LRN_AUTOAGE_CFG_PERIOD_VAL,
486 		 sparx5,
487 		 LRN_AUTOAGE_CFG(0));
488 }
489 
490 void sparx5_mact_init(struct sparx5 *sparx5)
491 {
492 	mutex_init(&sparx5->lock);
493 
494 	/*  Flush MAC table */
495 	spx5_wr(LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_CMD_SET(MAC_CMD_CLEAR_ALL) |
496 		LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT_SET(1),
497 		sparx5, LRN_COMMON_ACCESS_CTRL);
498 
499 	if (sparx5_mact_wait_for_completion(sparx5) != 0)
500 		dev_warn(sparx5->dev, "MAC flush error\n");
501 
502 	sparx5_set_ageing(sparx5, BR_DEFAULT_AGEING_TIME / HZ * 1000);
503 }
504