1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments ICSSG Ethernet Driver
3 *
4 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
5 *
6 */
7
8 #include <linux/etherdevice.h>
9 #include <linux/types.h>
10 #include <linux/regmap.h>
11
12 #include "icssg_prueth.h"
13
14 #define ICSSG_NUM_CLASSIFIERS 16
15 #define ICSSG_NUM_FT1_SLOTS 8
16 #define ICSSG_NUM_FT3_SLOTS 16
17
18 #define ICSSG_NUM_CLASSIFIERS_IN_USE 5
19
20 /* Filter 1 - FT1 */
21 #define FT1_NUM_SLOTS 8
22 #define FT1_SLOT_SIZE 0x10 /* bytes */
23
24 /* offsets from FT1 slot base i.e. slot 1 start */
25 #define FT1_DA0 0x0
26 #define FT1_DA1 0x4
27 #define FT1_DA0_MASK 0x8
28 #define FT1_DA1_MASK 0xc
29
30 #define FT1_N_REG(slize, n, reg) \
31 (offs[slice].ft1_slot_base + FT1_SLOT_SIZE * (n) + (reg))
32
33 #define FT1_LEN_MASK GENMASK(19, 16)
34 #define FT1_LEN_SHIFT 16
35 #define FT1_LEN(len) (((len) << FT1_LEN_SHIFT) & FT1_LEN_MASK)
36 #define FT1_START_MASK GENMASK(14, 0)
37 #define FT1_START(start) ((start) & FT1_START_MASK)
38 #define FT1_MATCH_SLOT(n) (GENMASK(23, 16) & (BIT(n) << 16))
39
40 /* FT1 config type */
41 enum ft1_cfg_type {
42 FT1_CFG_TYPE_DISABLED = 0,
43 FT1_CFG_TYPE_EQ,
44 FT1_CFG_TYPE_GT,
45 FT1_CFG_TYPE_LT,
46 };
47
48 #define FT1_CFG_SHIFT(n) (2 * (n))
49 #define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
50
51 /* Filter 3 - FT3 */
52 #define FT3_NUM_SLOTS 16
53 #define FT3_SLOT_SIZE 0x20 /* bytes */
54
55 /* offsets from FT3 slot n's base */
56 #define FT3_START 0
57 #define FT3_START_AUTO 0x4
58 #define FT3_START_OFFSET 0x8
59 #define FT3_JUMP_OFFSET 0xc
60 #define FT3_LEN 0x10
61 #define FT3_CFG 0x14
62 #define FT3_T 0x18
63 #define FT3_T_MASK 0x1c
64
65 #define FT3_N_REG(slize, n, reg) \
66 (offs[slice].ft3_slot_base + FT3_SLOT_SIZE * (n) + (reg))
67
68 /* offsets from rx_class n's base */
69 #define RX_CLASS_AND_EN 0
70 #define RX_CLASS_OR_EN 0x4
71 #define RX_CLASS_NUM_SLOTS 16
72 #define RX_CLASS_EN_SIZE 0x8 /* bytes */
73
74 #define RX_CLASS_N_REG(slice, n, reg) \
75 (offs[slice].rx_class_base + RX_CLASS_EN_SIZE * (n) + (reg))
76
77 /* RX Class Gates */
78 #define RX_CLASS_GATES_SIZE 0x4 /* bytes */
79
80 #define RX_CLASS_GATES_N_REG(slice, n) \
81 (offs[slice].rx_class_gates_base + RX_CLASS_GATES_SIZE * (n))
82
83 #define RX_CLASS_GATES_ALLOW_MASK BIT(6)
84 #define RX_CLASS_GATES_RAW_MASK BIT(5)
85 #define RX_CLASS_GATES_PHASE_MASK BIT(4)
86
87 /* RX Class traffic data matching bits */
88 #define RX_CLASS_FT_UC BIT(31)
89 #define RX_CLASS_FT_MC BIT(30)
90 #define RX_CLASS_FT_BC BIT(29)
91 #define RX_CLASS_FT_FW BIT(28)
92 #define RX_CLASS_FT_RCV BIT(27)
93 #define RX_CLASS_FT_VLAN BIT(26)
94 #define RX_CLASS_FT_DA_P BIT(25)
95 #define RX_CLASS_FT_DA_I BIT(24)
96 #define RX_CLASS_FT_FT1_MATCH_MASK GENMASK(23, 16)
97 #define RX_CLASS_FT_FT1_MATCH_SHIFT 16
98 #define RX_CLASS_FT_FT3_MATCH_MASK GENMASK(15, 0)
99 #define RX_CLASS_FT_FT3_MATCH_SHIFT 0
100
101 #define RX_CLASS_FT_FT1_MATCH(slot) \
102 ((BIT(slot) << RX_CLASS_FT_FT1_MATCH_SHIFT) & \
103 RX_CLASS_FT_FT1_MATCH_MASK)
104
105 /* RX class type */
106 enum rx_class_sel_type {
107 RX_CLASS_SEL_TYPE_OR = 0,
108 RX_CLASS_SEL_TYPE_AND = 1,
109 RX_CLASS_SEL_TYPE_OR_AND_AND = 2,
110 RX_CLASS_SEL_TYPE_OR_OR_AND = 3,
111 };
112
113 #define FT1_CFG_SHIFT(n) (2 * (n))
114 #define FT1_CFG_MASK(n) (0x3 << FT1_CFG_SHIFT((n)))
115
116 #define RX_CLASS_SEL_SHIFT(n) (2 * (n))
117 #define RX_CLASS_SEL_MASK(n) (0x3 << RX_CLASS_SEL_SHIFT((n)))
118
119 #define ICSSG_CFG_OFFSET 0
120 #define MAC_INTERFACE_0 0x18
121 #define MAC_INTERFACE_1 0x1c
122
123 #define ICSSG_CFG_RX_L2_G_EN BIT(2)
124
125 /* These are register offsets per PRU */
126 struct miig_rt_offsets {
127 u32 mac0;
128 u32 mac1;
129 u32 ft1_start_len;
130 u32 ft1_cfg;
131 u32 ft1_slot_base;
132 u32 ft3_slot_base;
133 u32 ft3_p_base;
134 u32 ft_rx_ptr;
135 u32 rx_class_base;
136 u32 rx_class_cfg1;
137 u32 rx_class_cfg2;
138 u32 rx_class_gates_base;
139 u32 rx_green;
140 u32 rx_rate_cfg_base;
141 u32 rx_rate_src_sel0;
142 u32 rx_rate_src_sel1;
143 u32 tx_rate_cfg_base;
144 u32 stat_base;
145 u32 tx_hsr_tag;
146 u32 tx_hsr_seq;
147 u32 tx_vlan_type;
148 u32 tx_vlan_ins;
149 };
150
151 /* These are the offset values for miig_rt_offsets registers */
152 static const struct miig_rt_offsets offs[] = {
153 /* PRU0 */
154 {
155 0x8,
156 0xc,
157 0x80,
158 0x84,
159 0x88,
160 0x108,
161 0x308,
162 0x408,
163 0x40c,
164 0x48c,
165 0x490,
166 0x494,
167 0x4d4,
168 0x4e4,
169 0x504,
170 0x508,
171 0x50c,
172 0x54c,
173 0x63c,
174 0x640,
175 0x644,
176 0x648,
177 },
178 /* PRU1 */
179 {
180 0x10,
181 0x14,
182 0x64c,
183 0x650,
184 0x654,
185 0x6d4,
186 0x8d4,
187 0x9d4,
188 0x9d8,
189 0xa58,
190 0xa5c,
191 0xa60,
192 0xaa0,
193 0xab0,
194 0xad0,
195 0xad4,
196 0xad8,
197 0xb18,
198 0xc08,
199 0xc0c,
200 0xc10,
201 0xc14,
202 },
203 };
204
rx_class_ft1_set_start_len(struct regmap * miig_rt,int slice,u16 start,u8 len)205 static void rx_class_ft1_set_start_len(struct regmap *miig_rt, int slice,
206 u16 start, u8 len)
207 {
208 u32 offset, val;
209
210 offset = offs[slice].ft1_start_len;
211 val = FT1_LEN(len) | FT1_START(start);
212 regmap_write(miig_rt, offset, val);
213 }
214
rx_class_ft1_set_da(struct regmap * miig_rt,int slice,int n,const u8 * addr)215 static void rx_class_ft1_set_da(struct regmap *miig_rt, int slice,
216 int n, const u8 *addr)
217 {
218 u32 offset;
219
220 offset = FT1_N_REG(slice, n, FT1_DA0);
221 regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
222 addr[2] << 16 | addr[3] << 24));
223 offset = FT1_N_REG(slice, n, FT1_DA1);
224 regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
225 }
226
rx_class_ft1_set_da_mask(struct regmap * miig_rt,int slice,int n,const u8 * addr)227 static void rx_class_ft1_set_da_mask(struct regmap *miig_rt, int slice,
228 int n, const u8 *addr)
229 {
230 u32 offset;
231
232 offset = FT1_N_REG(slice, n, FT1_DA0_MASK);
233 regmap_write(miig_rt, offset, (u32)(addr[0] | addr[1] << 8 |
234 addr[2] << 16 | addr[3] << 24));
235 offset = FT1_N_REG(slice, n, FT1_DA1_MASK);
236 regmap_write(miig_rt, offset, (u32)(addr[4] | addr[5] << 8));
237 }
238
rx_class_ft1_cfg_set_type(struct regmap * miig_rt,int slice,int n,enum ft1_cfg_type type)239 static void rx_class_ft1_cfg_set_type(struct regmap *miig_rt, int slice, int n,
240 enum ft1_cfg_type type)
241 {
242 u32 offset;
243
244 offset = offs[slice].ft1_cfg;
245 regmap_update_bits(miig_rt, offset, FT1_CFG_MASK(n),
246 type << FT1_CFG_SHIFT(n));
247 }
248
rx_class_sel_set_type(struct regmap * miig_rt,int slice,int n,enum rx_class_sel_type type)249 static void rx_class_sel_set_type(struct regmap *miig_rt, int slice, int n,
250 enum rx_class_sel_type type)
251 {
252 u32 offset;
253
254 offset = offs[slice].rx_class_cfg1;
255 regmap_update_bits(miig_rt, offset, RX_CLASS_SEL_MASK(n),
256 type << RX_CLASS_SEL_SHIFT(n));
257 }
258
rx_class_set_and(struct regmap * miig_rt,int slice,int n,u32 data)259 static void rx_class_set_and(struct regmap *miig_rt, int slice, int n,
260 u32 data)
261 {
262 u32 offset;
263
264 offset = RX_CLASS_N_REG(slice, n, RX_CLASS_AND_EN);
265 regmap_write(miig_rt, offset, data);
266 }
267
rx_class_set_or(struct regmap * miig_rt,int slice,int n,u32 data)268 static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
269 u32 data)
270 {
271 u32 offset;
272
273 offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
274 regmap_write(miig_rt, offset, data);
275 }
276
rx_class_get_or(struct regmap * miig_rt,int slice,int n)277 static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n)
278 {
279 u32 offset, val;
280
281 offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
282 regmap_read(miig_rt, offset, &val);
283
284 return val;
285 }
286
icssg_class_set_host_mac_addr(struct regmap * miig_rt,const u8 * mac)287 void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
288 {
289 regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 |
290 mac[2] << 16 | mac[3] << 24));
291 regmap_write(miig_rt, MAC_INTERFACE_1, (u32)(mac[4] | mac[5] << 8));
292 }
293 EXPORT_SYMBOL_GPL(icssg_class_set_host_mac_addr);
294
icssg_class_set_mac_addr(struct regmap * miig_rt,int slice,u8 * mac)295 void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
296 {
297 regmap_write(miig_rt, offs[slice].mac0, (u32)(mac[0] | mac[1] << 8 |
298 mac[2] << 16 | mac[3] << 24));
299 regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
300 }
301 EXPORT_SYMBOL_GPL(icssg_class_set_mac_addr);
302
icssg_class_ft1_add_mcast(struct regmap * miig_rt,int slice,int slot,const u8 * addr,const u8 * mask)303 static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
304 int slot, const u8 *addr, const u8 *mask)
305 {
306 u32 val;
307 int i;
308
309 WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot);
310
311 rx_class_ft1_set_da(miig_rt, slice, slot, addr);
312 rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask);
313 rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ);
314
315 /* Enable the FT1 slot in OR enable for all classifiers */
316 for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) {
317 val = rx_class_get_or(miig_rt, slice, i);
318 val |= RX_CLASS_FT_FT1_MATCH(slot);
319 rx_class_set_or(miig_rt, slice, i, val);
320 }
321 }
322
323 /* disable all RX traffic */
icssg_class_disable(struct regmap * miig_rt,int slice)324 void icssg_class_disable(struct regmap *miig_rt, int slice)
325 {
326 u32 data, offset;
327 int n;
328
329 /* Enable RX_L2_G */
330 regmap_update_bits(miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_RX_L2_G_EN,
331 ICSSG_CFG_RX_L2_G_EN);
332
333 for (n = 0; n < ICSSG_NUM_CLASSIFIERS; n++) {
334 /* AND_EN = 0 */
335 rx_class_set_and(miig_rt, slice, n, 0);
336 /* OR_EN = 0 */
337 rx_class_set_or(miig_rt, slice, n, 0);
338
339 /* set CFG1 to OR */
340 rx_class_sel_set_type(miig_rt, slice, n, RX_CLASS_SEL_TYPE_OR);
341
342 /* configure gate */
343 offset = RX_CLASS_GATES_N_REG(slice, n);
344 regmap_read(miig_rt, offset, &data);
345 /* clear class_raw so we go through filters */
346 data &= ~RX_CLASS_GATES_RAW_MASK;
347 /* set allow and phase mask */
348 data |= RX_CLASS_GATES_ALLOW_MASK | RX_CLASS_GATES_PHASE_MASK;
349 regmap_write(miig_rt, offset, data);
350 }
351
352 /* FT1 Disabled */
353 for (n = 0; n < ICSSG_NUM_FT1_SLOTS; n++) {
354 const u8 addr[] = { 0, 0, 0, 0, 0, 0, };
355
356 rx_class_ft1_cfg_set_type(miig_rt, slice, n,
357 FT1_CFG_TYPE_DISABLED);
358 rx_class_ft1_set_da(miig_rt, slice, n, addr);
359 rx_class_ft1_set_da_mask(miig_rt, slice, n, addr);
360 }
361
362 /* clear CFG2 */
363 regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
364 }
365 EXPORT_SYMBOL_GPL(icssg_class_disable);
366
icssg_class_default(struct regmap * miig_rt,int slice,bool allmulti,bool is_sr1)367 void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
368 bool is_sr1)
369 {
370 int num_classifiers = is_sr1 ? ICSSG_NUM_CLASSIFIERS_IN_USE : 1;
371 u32 data;
372 int n;
373
374 /* defaults */
375 icssg_class_disable(miig_rt, slice);
376
377 /* Setup Classifier */
378 for (n = 0; n < num_classifiers; n++) {
379 /* match on Broadcast or MAC_PRU address */
380 data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
381
382 /* multicast */
383 if (allmulti)
384 data |= RX_CLASS_FT_MC;
385
386 rx_class_set_or(miig_rt, slice, n, data);
387
388 /* set CFG1 for OR_OR_AND for classifier */
389 rx_class_sel_set_type(miig_rt, slice, n,
390 RX_CLASS_SEL_TYPE_OR_OR_AND);
391 }
392
393 /* clear CFG2 */
394 regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
395 }
396 EXPORT_SYMBOL_GPL(icssg_class_default);
397
icssg_class_promiscuous_sr1(struct regmap * miig_rt,int slice)398 void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
399 {
400 u32 data, offset;
401 int n;
402
403 /* defaults */
404 icssg_class_disable(miig_rt, slice);
405
406 /* Setup Classifier */
407 for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) {
408 /* set RAW_MASK to bypass filters */
409 offset = RX_CLASS_GATES_N_REG(slice, n);
410 regmap_read(miig_rt, offset, &data);
411 data |= RX_CLASS_GATES_RAW_MASK;
412 regmap_write(miig_rt, offset, data);
413 }
414 }
415 EXPORT_SYMBOL_GPL(icssg_class_promiscuous_sr1);
416
icssg_class_add_mcast_sr1(struct regmap * miig_rt,int slice,struct net_device * ndev)417 void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
418 struct net_device *ndev)
419 {
420 u8 mask_addr[6] = { 0, 0, 0, 0, 0, 0xff };
421 struct netdev_hw_addr *ha;
422 int slot = 2;
423
424 rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
425 /* reserve first 2 slots for
426 * 1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses
427 * 2) 01-00-5e-00-00-XX Local Network Control Block
428 * (224.0.0.0 - 224.0.0.255 (224.0.0/24))
429 */
430 icssg_class_ft1_add_mcast(miig_rt, slice, 0,
431 eth_reserved_addr_base, mask_addr);
432 icssg_class_ft1_add_mcast(miig_rt, slice, 1,
433 eth_ipv4_mcast_addr_base, mask_addr);
434 mask_addr[5] = 0;
435 netdev_for_each_mc_addr(ha, ndev) {
436 /* skip addresses matching reserved slots */
437 if (!memcmp(eth_reserved_addr_base, ha->addr, 5) ||
438 !memcmp(eth_ipv4_mcast_addr_base, ha->addr, 5)) {
439 netdev_dbg(ndev, "mcast skip %pM\n", ha->addr);
440 continue;
441 }
442
443 if (slot >= FT1_NUM_SLOTS) {
444 netdev_dbg(ndev,
445 "can't add more than %d MC addresses, enabling allmulti\n",
446 FT1_NUM_SLOTS);
447 icssg_class_default(miig_rt, slice, 1, true);
448 break;
449 }
450
451 netdev_dbg(ndev, "mcast add %pM\n", ha->addr);
452 icssg_class_ft1_add_mcast(miig_rt, slice, slot,
453 ha->addr, mask_addr);
454 slot++;
455 }
456 }
457 EXPORT_SYMBOL_GPL(icssg_class_add_mcast_sr1);
458
459 /* required for SAV check */
icssg_ft1_set_mac_addr(struct regmap * miig_rt,int slice,u8 * mac_addr)460 void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
461 {
462 const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
463
464 rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
465 rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
466 rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
467 rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
468 }
469 EXPORT_SYMBOL_GPL(icssg_ft1_set_mac_addr);
470