xref: /linux/drivers/net/ethernet/ti/icssg/icssg_classifier.c (revision 955abe0a1b41de5ba61fe4cd614ebc123084d499)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments ICSSG Ethernet Driver
3  *
4  * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
5  *
6  */
7 
8 #include <linux/etherdevice.h>
9 #include <linux/types.h>
10 #include <linux/regmap.h>
11 
12 #include "icssg_prueth.h"
13 
14 #define ICSSG_NUM_CLASSIFIERS	16
15 #define ICSSG_NUM_FT1_SLOTS	8
16 #define ICSSG_NUM_FT3_SLOTS	16
17 
18 #define ICSSG_NUM_CLASSIFIERS_IN_USE	5
19 
20 /* Filter 1 - FT1 */
21 #define FT1_NUM_SLOTS	8
22 #define FT1_SLOT_SIZE	0x10	/* bytes */
23 
24 /* offsets from FT1 slot base i.e. slot 1 start */
25 #define FT1_DA0		0x0
26 #define FT1_DA1		0x4
27 #define FT1_DA0_MASK	0x8
28 #define FT1_DA1_MASK	0xc
29 
30 #define FT1_N_REG(slize, n, reg)	\
31 	(offs[slice].ft1_slot_base + FT1_SLOT_SIZE * (n) + (reg))
32 
33 #define FT1_LEN_MASK		GENMASK(19, 16)
34 #define FT1_LEN_SHIFT		16
35 #define FT1_LEN(len)		(((len) << FT1_LEN_SHIFT) & FT1_LEN_MASK)
36 #define FT1_START_MASK		GENMASK(14, 0)
37 #define FT1_START(start)	((start) & FT1_START_MASK)
38 #define FT1_MATCH_SLOT(n)	(GENMASK(23, 16) & (BIT(n) << 16))
39 
40 /* FT1 config type */
41 enum ft1_cfg_type {
42 	FT1_CFG_TYPE_DISABLED = 0,
43 	FT1_CFG_TYPE_EQ,
44 	FT1_CFG_TYPE_GT,
45 	FT1_CFG_TYPE_LT,
46 };
47 
48 #define FT1_CFG_SHIFT(n)	(2 * (n))
49 #define FT1_CFG_MASK(n)		(0x3 << FT1_CFG_SHIFT((n)))
50 
51 /* Filter 3 -  FT3 */
52 #define FT3_NUM_SLOTS	16
53 #define FT3_SLOT_SIZE	0x20	/* bytes */
54 
55 /* offsets from FT3 slot n's base */
56 #define FT3_START		0
57 #define FT3_START_AUTO		0x4
58 #define FT3_START_OFFSET	0x8
59 #define FT3_JUMP_OFFSET		0xc
60 #define FT3_LEN			0x10
61 #define FT3_CFG			0x14
62 #define FT3_T			0x18
63 #define FT3_T_MASK		0x1c
64 
65 #define FT3_N_REG(slize, n, reg)	\
66 	(offs[slice].ft3_slot_base + FT3_SLOT_SIZE * (n) + (reg))
67 
68 /* offsets from rx_class n's base */
69 #define RX_CLASS_AND_EN		0
70 #define RX_CLASS_OR_EN		0x4
71 #define RX_CLASS_NUM_SLOTS	16
72 #define RX_CLASS_EN_SIZE	0x8	/* bytes */
73 
74 #define RX_CLASS_N_REG(slice, n, reg)	\
75 	(offs[slice].rx_class_base + RX_CLASS_EN_SIZE * (n) + (reg))
76 
77 /* RX Class Gates */
78 #define RX_CLASS_GATES_SIZE	0x4	/* bytes */
79 
80 #define RX_CLASS_GATES_N_REG(slice, n)	\
81 	(offs[slice].rx_class_gates_base + RX_CLASS_GATES_SIZE * (n))
82 
83 #define RX_CLASS_GATES_ALLOW_MASK	BIT(6)
84 #define RX_CLASS_GATES_RAW_MASK		BIT(5)
85 #define RX_CLASS_GATES_PHASE_MASK	BIT(4)
86 
87 /* RX Class traffic data matching bits */
88 #define RX_CLASS_FT_UC				BIT(31)
89 #define RX_CLASS_FT_MC			BIT(30)
90 #define RX_CLASS_FT_BC			BIT(29)
91 #define RX_CLASS_FT_FW			BIT(28)
92 #define RX_CLASS_FT_RCV			BIT(27)
93 #define RX_CLASS_FT_VLAN		BIT(26)
94 #define RX_CLASS_FT_DA_P		BIT(25)
95 #define RX_CLASS_FT_DA_I		BIT(24)
96 #define RX_CLASS_FT_FT1_MATCH_MASK	GENMASK(23, 16)
97 #define RX_CLASS_FT_FT1_MATCH_SHIFT	16
98 #define RX_CLASS_FT_FT3_MATCH_MASK	GENMASK(15, 0)
99 #define RX_CLASS_FT_FT3_MATCH_SHIFT	0
100 
101 #define RX_CLASS_FT_FT1_MATCH(slot)	\
102 	((BIT(slot) << RX_CLASS_FT_FT1_MATCH_SHIFT) & \
103 	RX_CLASS_FT_FT1_MATCH_MASK)
104 
105 /* RX class type */
106 enum rx_class_sel_type {
107 	RX_CLASS_SEL_TYPE_OR = 0,
108 	RX_CLASS_SEL_TYPE_AND = 1,
109 	RX_CLASS_SEL_TYPE_OR_AND_AND = 2,
110 	RX_CLASS_SEL_TYPE_OR_OR_AND = 3,
111 };
112 
113 #define FT1_CFG_SHIFT(n)	(2 * (n))
114 #define FT1_CFG_MASK(n)		(0x3 << FT1_CFG_SHIFT((n)))
115 
116 #define RX_CLASS_SEL_SHIFT(n)	(2 * (n))
117 #define RX_CLASS_SEL_MASK(n)	(0x3 << RX_CLASS_SEL_SHIFT((n)))
118 
119 #define ICSSG_CFG_OFFSET	0
120 #define MAC_INTERFACE_0		0x18
121 #define MAC_INTERFACE_1		0x1c
122 
123 #define ICSSG_CFG_RX_L2_G_EN	BIT(2)
124 
125 /* These are register offsets per PRU */
126 struct miig_rt_offsets {
127 	u32 mac0;
128 	u32 mac1;
129 	u32 ft1_start_len;
130 	u32 ft1_cfg;
131 	u32 ft1_slot_base;
132 	u32 ft3_slot_base;
133 	u32 ft3_p_base;
134 	u32 ft_rx_ptr;
135 	u32 rx_class_base;
136 	u32 rx_class_cfg1;
137 	u32 rx_class_cfg2;
138 	u32 rx_class_gates_base;
139 	u32 rx_green;
140 	u32 rx_rate_cfg_base;
141 	u32 rx_rate_src_sel0;
142 	u32 rx_rate_src_sel1;
143 	u32 tx_rate_cfg_base;
144 	u32 stat_base;
145 	u32 tx_hsr_tag;
146 	u32 tx_hsr_seq;
147 	u32 tx_vlan_type;
148 	u32 tx_vlan_ins;
149 };
150 
151 /* These are the offset values for miig_rt_offsets registers */
152 static const struct miig_rt_offsets offs[] = {
153 	/* PRU0 */
154 	{
155 		0x8,
156 		0xc,
157 		0x80,
158 		0x84,
159 		0x88,
160 		0x108,
161 		0x308,
162 		0x408,
163 		0x40c,
164 		0x48c,
165 		0x490,
166 		0x494,
167 		0x4d4,
168 		0x4e4,
169 		0x504,
170 		0x508,
171 		0x50c,
172 		0x54c,
173 		0x63c,
174 		0x640,
175 		0x644,
176 		0x648,
177 	},
178 	/* PRU1 */
179 	{
180 		0x10,
181 		0x14,
182 		0x64c,
183 		0x650,
184 		0x654,
185 		0x6d4,
186 		0x8d4,
187 		0x9d4,
188 		0x9d8,
189 		0xa58,
190 		0xa5c,
191 		0xa60,
192 		0xaa0,
193 		0xab0,
194 		0xad0,
195 		0xad4,
196 		0xad8,
197 		0xb18,
198 		0xc08,
199 		0xc0c,
200 		0xc10,
201 		0xc14,
202 	},
203 };
204 
205 static void rx_class_ft1_set_start_len(struct regmap *miig_rt, int slice,
206 				       u16 start, u8 len)
207 {
208 	u32 offset, val;
209 
210 	offset = offs[slice].ft1_start_len;
211 	val = FT1_LEN(len) | FT1_START(start);
212 	regmap_write(miig_rt, offset, val);
213 }
214 
215 static void rx_class_ft1_set_da(struct regmap *miig_rt, int slice,
216 				int n, const u8 *addr)
217 {
218 	u32 offset;
219 
220 	offset = FT1_N_REG(slice, n, FT1_DA0);
221 	regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
222 		     addr[2] << 16 | addr[3] << 24));
223 	offset = FT1_N_REG(slice, n, FT1_DA1);
224 	regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
225 }
226 
227 static void rx_class_ft1_set_da_mask(struct regmap *miig_rt, int slice,
228 				     int n, const u8 *addr)
229 {
230 	u32 offset;
231 
232 	offset = FT1_N_REG(slice, n, FT1_DA0_MASK);
233 	regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
234 		     addr[2] << 16 | addr[3] << 24));
235 	offset = FT1_N_REG(slice, n, FT1_DA1_MASK);
236 	regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
237 }
238 
239 static void rx_class_ft1_cfg_set_type(struct regmap *miig_rt, int slice, int n,
240 				      enum ft1_cfg_type type)
241 {
242 	u32 offset;
243 
244 	offset = offs[slice].ft1_cfg;
245 	regmap_update_bits(miig_rt, offset, FT1_CFG_MASK(n),
246 			   type << FT1_CFG_SHIFT(n));
247 }
248 
249 static void rx_class_sel_set_type(struct regmap *miig_rt, int slice, int n,
250 				  enum rx_class_sel_type type)
251 {
252 	u32 offset;
253 
254 	offset = offs[slice].rx_class_cfg1;
255 	regmap_update_bits(miig_rt, offset, RX_CLASS_SEL_MASK(n),
256 			   type << RX_CLASS_SEL_SHIFT(n));
257 }
258 
259 static void rx_class_set_and(struct regmap *miig_rt, int slice, int n,
260 			     u32 data)
261 {
262 	u32 offset;
263 
264 	offset = RX_CLASS_N_REG(slice, n, RX_CLASS_AND_EN);
265 	regmap_write(miig_rt, offset, data);
266 }
267 
268 static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
269 			    u32 data)
270 {
271 	u32 offset;
272 
273 	offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
274 	regmap_write(miig_rt, offset, data);
275 }
276 
277 static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n)
278 {
279 	u32 offset, val;
280 
281 	offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
282 	regmap_read(miig_rt, offset, &val);
283 
284 	return val;
285 }
286 
287 void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
288 {
289 	regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 |
290 		     mac[2] << 16 | mac[3] << 24));
291 	regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8));
292 }
293 
294 void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
295 {
296 	regmap_write(miig_rt, offs[slice].mac0, (u32)(mac[0] | mac[1] << 8 |
297 		     mac[2] << 16 | mac[3] << 24));
298 	regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
299 }
300 EXPORT_SYMBOL_GPL(icssg_class_set_mac_addr);
301 
302 static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
303 				      int slot, const u8 *addr, const u8 *mask)
304 {
305 	u32 val;
306 	int i;
307 
308 	WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot);
309 
310 	rx_class_ft1_set_da(miig_rt, slice, slot, addr);
311 	rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask);
312 	rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ);
313 
314 	/* Enable the FT1 slot in OR enable for all classifiers */
315 	for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) {
316 		val = rx_class_get_or(miig_rt, slice, i);
317 		val |= RX_CLASS_FT_FT1_MATCH(slot);
318 		rx_class_set_or(miig_rt, slice, i, val);
319 	}
320 }
321 
322 /* disable all RX traffic */
323 void icssg_class_disable(struct regmap *miig_rt, int slice)
324 {
325 	u32 data, offset;
326 	int n;
327 
328 	/* Enable RX_L2_G */
329 	regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_RX_L2_G_EN,
330 			   ICSSG_CFG_RX_L2_G_EN);
331 
332 	for (n = 0; n < ICSSG_NUM_CLASSIFIERS; n++) {
333 		/* AND_EN = 0 */
334 		rx_class_set_and(miig_rt, slice, n, 0);
335 		/* OR_EN = 0 */
336 		rx_class_set_or(miig_rt, slice, n, 0);
337 
338 		/* set CFG1 to OR */
339 		rx_class_sel_set_type(miig_rt, slice, n, RX_CLASS_SEL_TYPE_OR);
340 
341 		/* configure gate */
342 		offset = RX_CLASS_GATES_N_REG(slice, n);
343 		regmap_read(miig_rt, offset, &data);
344 		/* clear class_raw so we go through filters */
345 		data &= ~RX_CLASS_GATES_RAW_MASK;
346 		/* set allow and phase mask */
347 		data |= RX_CLASS_GATES_ALLOW_MASK | RX_CLASS_GATES_PHASE_MASK;
348 		regmap_write(miig_rt, offset, data);
349 	}
350 
351 	/* FT1 Disabled */
352 	for (n = 0; n < ICSSG_NUM_FT1_SLOTS; n++) {
353 		const u8 addr[] = { 0, 0, 0, 0, 0, 0, };
354 
355 		rx_class_ft1_cfg_set_type(miig_rt, slice, n,
356 					  FT1_CFG_TYPE_DISABLED);
357 		rx_class_ft1_set_da(miig_rt, slice, n, addr);
358 		rx_class_ft1_set_da_mask(miig_rt, slice, n, addr);
359 	}
360 
361 	/* clear CFG2 */
362 	regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
363 }
364 EXPORT_SYMBOL_GPL(icssg_class_disable);
365 
366 void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
367 			 bool is_sr1)
368 {
369 	int num_classifiers = is_sr1 ? ICSSG_NUM_CLASSIFIERS_IN_USE : 1;
370 	u32 data;
371 	int n;
372 
373 	/* defaults */
374 	icssg_class_disable(miig_rt, slice);
375 
376 	/* Setup Classifier */
377 	for (n = 0; n < num_classifiers; n++) {
378 		/* match on Broadcast or MAC_PRU address */
379 		data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
380 
381 		/* multicast */
382 		if (allmulti)
383 			data |= RX_CLASS_FT_MC;
384 
385 		rx_class_set_or(miig_rt, slice, n, data);
386 
387 		/* set CFG1 for OR_OR_AND for classifier */
388 		rx_class_sel_set_type(miig_rt, slice, n,
389 				      RX_CLASS_SEL_TYPE_OR_OR_AND);
390 	}
391 
392 	/* clear CFG2 */
393 	regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
394 }
395 EXPORT_SYMBOL_GPL(icssg_class_default);
396 
397 void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
398 {
399 	u32 data, offset;
400 	int n;
401 
402 	/* defaults */
403 	icssg_class_disable(miig_rt, slice);
404 
405 	/* Setup Classifier */
406 	for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) {
407 		/* set RAW_MASK to bypass filters */
408 		offset = RX_CLASS_GATES_N_REG(slice, n);
409 		regmap_read(miig_rt, offset, &data);
410 		data |= RX_CLASS_GATES_RAW_MASK;
411 		regmap_write(miig_rt, offset, data);
412 	}
413 }
414 EXPORT_SYMBOL_GPL(icssg_class_promiscuous_sr1);
415 
416 void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
417 			       struct net_device *ndev)
418 {
419 	u8 mask_addr[6] = { 0, 0, 0, 0, 0, 0xff };
420 	struct netdev_hw_addr *ha;
421 	int slot = 2;
422 
423 	rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
424 	/* reserve first 2 slots for
425 	 *	1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses
426 	 *	2) 01-00-5e-00-00-XX Local Network Control Block
427 	 *			      (224.0.0.0 - 224.0.0.255  (224.0.0/24))
428 	 */
429 	icssg_class_ft1_add_mcast(miig_rt, slice, 0,
430 				  eth_reserved_addr_base, mask_addr);
431 	icssg_class_ft1_add_mcast(miig_rt, slice, 1,
432 				  eth_ipv4_mcast_addr_base, mask_addr);
433 	mask_addr[5] = 0;
434 	netdev_for_each_mc_addr(ha, ndev) {
435 		/* skip addresses matching reserved slots */
436 		if (!memcmp(eth_reserved_addr_base, ha->addr, 5) ||
437 		    !memcmp(eth_ipv4_mcast_addr_base, ha->addr, 5)) {
438 			netdev_dbg(ndev, "mcast skip %pM\n", ha->addr);
439 			continue;
440 		}
441 
442 		if (slot >= FT1_NUM_SLOTS) {
443 			netdev_dbg(ndev,
444 				   "can't add more than %d MC addresses, enabling allmulti\n",
445 				   FT1_NUM_SLOTS);
446 			icssg_class_default(miig_rt, slice, 1, true);
447 			break;
448 		}
449 
450 		netdev_dbg(ndev, "mcast add %pM\n", ha->addr);
451 		icssg_class_ft1_add_mcast(miig_rt, slice, slot,
452 					  ha->addr, mask_addr);
453 		slot++;
454 	}
455 }
456 EXPORT_SYMBOL_GPL(icssg_class_add_mcast_sr1);
457 
458 /* required for SAV check */
459 void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
460 {
461 	const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
462 
463 	rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
464 	rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
465 	rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
466 	rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
467 }
468 EXPORT_SYMBOL_GPL(icssg_ft1_set_mac_addr);
469