1 // SPDX-License-Identifier: GPL-2.0
2 /* ICSSG Ethernet driver
3 *
4 * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
5 */
6
7 #include <linux/iopoll.h>
8 #include <linux/regmap.h>
9 #include <uapi/linux/if_ether.h>
10 #include "icssg_config.h"
11 #include "icssg_prueth.h"
12 #include "icssg_switch_map.h"
13 #include "icssg_mii_rt.h"
14
15 /* TX IPG Values to be set for 100M link speed. These values are
16 * in ocp_clk cycles. So need change if ocp_clk is changed for a specific
17 * h/w design.
18 */
19
20 /* IPG is in core_clk cycles */
21 #define MII_RT_TX_IPG_100M 0x17
22 #define MII_RT_TX_IPG_1G 0xb
23 #define MII_RT_TX_IPG_100M_SR1 0x166
24 #define MII_RT_TX_IPG_1G_SR1 0x1a
25
26 #define ICSSG_QUEUES_MAX 64
27 #define ICSSG_QUEUE_OFFSET 0xd00
28 #define ICSSG_QUEUE_PEEK_OFFSET 0xe00
29 #define ICSSG_QUEUE_CNT_OFFSET 0xe40
30 #define ICSSG_QUEUE_RESET_OFFSET 0xf40
31
32 #define ICSSG_NUM_TX_QUEUES 8
33
34 #define RECYCLE_Q_SLICE0 16
35 #define RECYCLE_Q_SLICE1 17
36
37 #define ICSSG_NUM_OTHER_QUEUES 5 /* port, host and special queues */
38
39 #define PORT_HI_Q_SLICE0 32
40 #define PORT_LO_Q_SLICE0 33
41 #define HOST_HI_Q_SLICE0 34
42 #define HOST_LO_Q_SLICE0 35
43 #define HOST_SPL_Q_SLICE0 40 /* Special Queue */
44
45 #define PORT_HI_Q_SLICE1 36
46 #define PORT_LO_Q_SLICE1 37
47 #define HOST_HI_Q_SLICE1 38
48 #define HOST_LO_Q_SLICE1 39
49 #define HOST_SPL_Q_SLICE1 41 /* Special Queue */
50
51 #define MII_RXCFG_DEFAULT (PRUSS_MII_RT_RXCFG_RX_ENABLE | \
52 PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \
53 PRUSS_MII_RT_RXCFG_RX_L2_EN | \
54 PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS)
55
56 #define MII_TXCFG_DEFAULT (PRUSS_MII_RT_TXCFG_TX_ENABLE | \
57 PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \
58 PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \
59 PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN)
60
61 #define ICSSG_CFG_DEFAULT (ICSSG_CFG_TX_L1_EN | \
62 ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \
63 ICSSG_CFG_TX_PRU_EN | \
64 ICSSG_CFG_SGMII_MODE)
65
66 #define FDB_GEN_CFG1 0x60
67 #define SMEM_VLAN_OFFSET 8
68 #define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8)
69
70 #define FDB_GEN_CFG2 0x64
71 #define FDB_VLAN_EN BIT(6)
72 #define FDB_HOST_EN BIT(2)
73 #define FDB_PRU1_EN BIT(1)
74 #define FDB_PRU0_EN BIT(0)
75 #define FDB_EN_ALL (FDB_PRU0_EN | FDB_PRU1_EN | \
76 FDB_HOST_EN | FDB_VLAN_EN)
77
78 /**
79 * struct map - ICSSG Queue Map
80 * @queue: Queue number
81 * @pd_addr_start: Packet descriptor queue reserved memory
82 * @flags: Flags
83 * @special: Indicates whether this queue is a special queue or not
84 */
85 struct map {
86 int queue;
87 u32 pd_addr_start;
88 u32 flags;
89 bool special;
90 };
91
92 /* Hardware queue map for ICSSG */
93 static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
94 {
95 { PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 },
96 { PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 },
97 { HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 },
98 { HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 },
99 { HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 },
100 },
101 {
102 { PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 },
103 { PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 },
104 { HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 },
105 { HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 },
106 { HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 },
107 },
108 };
109
icssg_config_mii_init_fw_offload(struct prueth_emac * emac)110 static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac)
111 {
112 struct prueth *prueth = emac->prueth;
113 int mii = prueth_emac_slice(emac);
114 u32 txcfg_reg, pcnt_reg, txcfg;
115 struct regmap *mii_rt;
116
117 mii_rt = prueth->mii_rt;
118
119 txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
120 PRUSS_MII_RT_TXCFG1;
121 pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
122 PRUSS_MII_RT_RX_PCNT1;
123
124 txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
125 PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
126 PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN;
127
128 if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1)
129 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
130 else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0)
131 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
132
133 regmap_write(mii_rt, txcfg_reg, txcfg);
134 regmap_write(mii_rt, pcnt_reg, 0x1);
135 }
136
icssg_config_mii_init(struct prueth_emac * emac)137 static void icssg_config_mii_init(struct prueth_emac *emac)
138 {
139 struct prueth *prueth = emac->prueth;
140 int slice = prueth_emac_slice(emac);
141 u32 txcfg, txcfg_reg, pcnt_reg;
142 struct regmap *mii_rt;
143
144 mii_rt = prueth->mii_rt;
145
146 txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
147 PRUSS_MII_RT_TXCFG1;
148 pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
149 PRUSS_MII_RT_RX_PCNT1;
150
151 txcfg = MII_TXCFG_DEFAULT;
152
153 /* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
154 * to be swapped also comparing to RGMII mode.
155 */
156 if (emac->phy_if == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0)
157 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
158 else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
159 txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
160
161 regmap_write(mii_rt, txcfg_reg, txcfg);
162 regmap_write(mii_rt, pcnt_reg, 0x1);
163 }
164
icssg_miig_queues_init(struct prueth * prueth,int slice)165 static void icssg_miig_queues_init(struct prueth *prueth, int slice)
166 {
167 struct regmap *miig_rt = prueth->miig_rt;
168 void __iomem *smem = prueth->shram.va;
169 u8 pd[ICSSG_SPECIAL_PD_SIZE];
170 int queue = 0, i, j;
171 u32 *pdword;
172
173 /* reset hwqueues */
174 if (slice)
175 queue = ICSSG_NUM_TX_QUEUES;
176
177 for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) {
178 regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
179 queue++;
180 }
181
182 queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0;
183 regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
184
185 for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) {
186 regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET,
187 hwq_map[slice][i].queue);
188 }
189
190 /* initialize packet descriptors in SMEM */
191 /* push pakcet descriptors to hwqueues */
192
193 pdword = (u32 *)pd;
194 for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) {
195 const struct map *mp;
196 int pd_size, num_pds;
197 u32 pdaddr;
198
199 mp = &hwq_map[slice][j];
200 if (mp->special) {
201 pd_size = ICSSG_SPECIAL_PD_SIZE;
202 num_pds = ICSSG_NUM_SPECIAL_PDS;
203 } else {
204 pd_size = ICSSG_NORMAL_PD_SIZE;
205 num_pds = ICSSG_NUM_NORMAL_PDS;
206 }
207
208 for (i = 0; i < num_pds; i++) {
209 memset(pd, 0, pd_size);
210
211 pdword[0] &= ICSSG_FLAG_MASK;
212 pdword[0] |= mp->flags;
213 pdaddr = mp->pd_addr_start + i * pd_size;
214
215 memcpy_toio(smem + pdaddr, pd, pd_size);
216 queue = mp->queue;
217 regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue,
218 pdaddr);
219 }
220 }
221 }
222
icssg_config_ipg(struct prueth_emac * emac)223 void icssg_config_ipg(struct prueth_emac *emac)
224 {
225 struct prueth *prueth = emac->prueth;
226 int slice = prueth_emac_slice(emac);
227 u32 ipg;
228
229 switch (emac->speed) {
230 case SPEED_1000:
231 ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G;
232 break;
233 case SPEED_100:
234 ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M;
235 break;
236 case SPEED_10:
237 /* Firmware hardcodes IPG for SR1.0 */
238 if (emac->is_sr1)
239 return;
240 /* IPG for 10M is same as 100M */
241 ipg = MII_RT_TX_IPG_100M;
242 break;
243 default:
244 /* Other links speeds not supported */
245 netdev_err(emac->ndev, "Unsupported link speed\n");
246 return;
247 }
248
249 icssg_mii_update_ipg(prueth->mii_rt, slice, ipg);
250 }
251 EXPORT_SYMBOL_GPL(icssg_config_ipg);
252
emac_r30_cmd_init(struct prueth_emac * emac)253 static void emac_r30_cmd_init(struct prueth_emac *emac)
254 {
255 struct icssg_r30_cmd __iomem *p;
256 int i;
257
258 p = emac->dram.va + MGR_R30_CMD_OFFSET;
259
260 for (i = 0; i < 4; i++)
261 writel(EMAC_NONE, &p->cmd[i]);
262 }
263
emac_r30_is_done(struct prueth_emac * emac)264 static int emac_r30_is_done(struct prueth_emac *emac)
265 {
266 const struct icssg_r30_cmd __iomem *p;
267 u32 cmd;
268 int i;
269
270 p = emac->dram.va + MGR_R30_CMD_OFFSET;
271
272 for (i = 0; i < 4; i++) {
273 cmd = readl(&p->cmd[i]);
274 if (cmd != EMAC_NONE)
275 return 0;
276 }
277
278 return 1;
279 }
280
prueth_fw_offload_buffer_setup(struct prueth_emac * emac)281 static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
282 {
283 struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
284 struct icssg_rxq_ctx __iomem *rxq_ctx;
285 struct prueth *prueth = emac->prueth;
286 int slice = prueth_emac_slice(emac);
287 u32 addr;
288 int i;
289
290 addr = lower_32_bits(prueth->msmcram.pa);
291 if (slice) {
292 if (prueth->pdata.banked_ms_ram)
293 addr += MSMC_RAM_BANK_SIZE;
294 else
295 addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
296 }
297
298 if (addr % SZ_64K) {
299 dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
300 return -EINVAL;
301 }
302
303 bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
304
305 /* Configure buffer pools for forwarding buffers
306 * - used by firmware to store packets to be forwarded to other port
307 * - 8 total pools per slice
308 */
309 for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
310 writel(addr, &bpool_cfg[i].addr);
311 writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
312 addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
313 }
314
315 /* Configure buffer pools for Local Injection buffers
316 * - used by firmware to store packets received from host core
317 * - 16 total pools per slice
318 */
319 for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
320 int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
321
322 /* The driver only uses first 4 queues per PRU,
323 * so only initialize buffer for them
324 */
325 if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
326 < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
327 writel(addr, &bpool_cfg[cfg_idx].addr);
328 writel(PRUETH_SW_LI_BUF_POOL_SIZE,
329 &bpool_cfg[cfg_idx].len);
330 addr += PRUETH_SW_LI_BUF_POOL_SIZE;
331 } else {
332 writel(0, &bpool_cfg[cfg_idx].addr);
333 writel(0, &bpool_cfg[cfg_idx].len);
334 }
335 }
336
337 /* Express RX buffer queue
338 * - used by firmware to store express packets to be transmitted
339 * to the host core
340 */
341 rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
342 for (i = 0; i < 3; i++)
343 writel(addr, &rxq_ctx->start[i]);
344
345 addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
346 writel(addr, &rxq_ctx->end);
347
348 /* Pre-emptible RX buffer queue
349 * - used by firmware to store preemptible packets to be transmitted
350 * to the host core
351 */
352 rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
353 for (i = 0; i < 3; i++)
354 writel(addr, &rxq_ctx->start[i]);
355
356 addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
357 writel(addr, &rxq_ctx->end);
358
359 /* Set pointer for default dropped packet write
360 * - used by firmware to temporarily store packet to be dropped
361 */
362 rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
363 writel(addr, &rxq_ctx->start[0]);
364
365 return 0;
366 }
367
prueth_emac_buffer_setup(struct prueth_emac * emac)368 static int prueth_emac_buffer_setup(struct prueth_emac *emac)
369 {
370 struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
371 struct icssg_rxq_ctx __iomem *rxq_ctx;
372 struct prueth *prueth = emac->prueth;
373 int slice = prueth_emac_slice(emac);
374 u32 addr;
375 int i;
376
377 addr = lower_32_bits(prueth->msmcram.pa);
378 if (slice) {
379 if (prueth->pdata.banked_ms_ram)
380 addr += MSMC_RAM_BANK_SIZE;
381 else
382 addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
383 }
384
385 if (addr % SZ_64K) {
386 dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
387 return -EINVAL;
388 }
389
390 bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
391
392 /* Configure buffer pools for forwarding buffers
393 * - in mac mode - no forwarding so initialize all pools to 0
394 * - 8 total pools per slice
395 */
396 for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
397 writel(0, &bpool_cfg[i].addr);
398 writel(0, &bpool_cfg[i].len);
399 }
400
401 /* Configure buffer pools for Local Injection buffers
402 * - used by firmware to store packets received from host core
403 * - 16 total pools per slice
404 */
405 bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
406 for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
407 int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
408
409 /* In EMAC mode, only first 4 buffers are used,
410 * as 1 slice needs to handle only 1 port
411 */
412 if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
413 writel(addr, &bpool_cfg[cfg_idx].addr);
414 writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
415 &bpool_cfg[cfg_idx].len);
416 addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
417 } else {
418 writel(0, &bpool_cfg[cfg_idx].addr);
419 writel(0, &bpool_cfg[cfg_idx].len);
420 }
421 }
422
423 /* Express RX buffer queue
424 * - used by firmware to store express packets to be transmitted
425 * to host core
426 */
427 rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
428 for (i = 0; i < 3; i++)
429 writel(addr, &rxq_ctx->start[i]);
430
431 addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
432 writel(addr, &rxq_ctx->end);
433
434 /* Pre-emptible RX buffer queue
435 * - used by firmware to store preemptible packets to be transmitted
436 * to host core
437 */
438 rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
439 for (i = 0; i < 3; i++)
440 writel(addr, &rxq_ctx->start[i]);
441
442 addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
443 writel(addr, &rxq_ctx->end);
444
445 /* Set pointer for default dropped packet write
446 * - used by firmware to temporarily store packet to be dropped
447 */
448 rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
449 writel(addr, &rxq_ctx->start[0]);
450
451 return 0;
452 }
453
icssg_init_emac_mode(struct prueth * prueth)454 void icssg_init_emac_mode(struct prueth *prueth)
455 {
456 /* When the device is configured as a bridge and it is being brought
457 * back to the emac mode, the host mac address has to be set as 0.
458 */
459 u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
460 int i;
461 u8 mac[ETH_ALEN] = { 0 };
462
463 /* Set VLAN TABLE address base */
464 regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
465 addr << SMEM_VLAN_OFFSET);
466 /* Set enable VLAN aware mode, and FDBs for all PRUs */
467 regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
468 prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
469 EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
470 for (i = 0; i < SZ_4K - 1; i++) {
471 prueth->vlan_tbl[i].fid = i;
472 prueth->vlan_tbl[i].fid_c1 = 0;
473 }
474 /* Clear host MAC address */
475 icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
476 }
477 EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
478
icssg_init_fw_offload_mode(struct prueth * prueth)479 void icssg_init_fw_offload_mode(struct prueth *prueth)
480 {
481 u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
482 int i;
483
484 /* Set VLAN TABLE address base */
485 regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
486 addr << SMEM_VLAN_OFFSET);
487 /* Set enable VLAN aware mode, and FDBs for all PRUs */
488 regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
489 prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
490 EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
491 for (i = 0; i < SZ_4K - 1; i++) {
492 prueth->vlan_tbl[i].fid = i;
493 prueth->vlan_tbl[i].fid_c1 = 0;
494 }
495
496 if (prueth->hw_bridge_dev)
497 icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
498 icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
499 }
500 EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
501
icssg_config(struct prueth * prueth,struct prueth_emac * emac,int slice)502 int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
503 {
504 void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET;
505 struct icssg_flow_cfg __iomem *flow_cfg;
506 int ret;
507
508 memset_io(config, 0, TAS_GATE_MASK_LIST0);
509 icssg_miig_queues_init(prueth, slice);
510
511 emac->speed = SPEED_1000;
512 emac->duplex = DUPLEX_FULL;
513 if (!phy_interface_mode_is_rgmii(emac->phy_if)) {
514 emac->speed = SPEED_100;
515 emac->duplex = DUPLEX_FULL;
516 }
517 regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
518 ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
519 icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
520 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
521 icssg_config_mii_init_fw_offload(emac);
522 else
523 icssg_config_mii_init(emac);
524 icssg_config_ipg(emac);
525 icssg_update_rgmii_cfg(prueth->miig_rt, emac);
526
527 /* set GPI mode */
528 pruss_cfg_gpimode(prueth->pruss, prueth->pru_id[slice],
529 PRUSS_GPI_MODE_MII);
530
531 /* enable XFR shift for PRU and RTU */
532 pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_PRU, true);
533 pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_RTU, true);
534
535 /* set C28 to 0x100 */
536 pru_rproc_set_ctable(prueth->pru[slice], PRU_C28, 0x100 << 8);
537 pru_rproc_set_ctable(prueth->rtu[slice], PRU_C28, 0x100 << 8);
538 pru_rproc_set_ctable(prueth->txpru[slice], PRU_C28, 0x100 << 8);
539
540 flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
541 writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
542 writew(0, &flow_cfg->mgm_base_flow);
543 writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
544 writeb(0, config + QUEUE_NUM_UNTAGGED);
545
546 if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
547 ret = prueth_fw_offload_buffer_setup(emac);
548 else
549 ret = prueth_emac_buffer_setup(emac);
550 if (ret)
551 return ret;
552
553 emac_r30_cmd_init(emac);
554
555 return 0;
556 }
557 EXPORT_SYMBOL_GPL(icssg_config);
558
559 /* Bitmask for ICSSG r30 commands */
560 static const struct icssg_r30_cmd emac_r32_bitmask[] = {
561 {{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}}, /* EMAC_PORT_DISABLE */
562 {{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */
563 {{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */
564 {{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */
565 {{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */
566 {{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */
567 {{0xfffc0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT UNTAGGED and PRIO */
568 {{EMAC_NONE, 0xffff0020, EMAC_NONE, EMAC_NONE}}, /* TAS Trigger List change */
569 {{EMAC_NONE, 0xdfff1000, EMAC_NONE, EMAC_NONE}}, /* TAS set state ENABLE*/
570 {{EMAC_NONE, 0xefff2000, EMAC_NONE, EMAC_NONE}}, /* TAS set state RESET*/
571 {{EMAC_NONE, 0xcfff0000, EMAC_NONE, EMAC_NONE}}, /* TAS set state DISABLE*/
572 {{EMAC_NONE, EMAC_NONE, 0xffff0400, EMAC_NONE}}, /* UC flooding ENABLE*/
573 {{EMAC_NONE, EMAC_NONE, 0xfbff0000, EMAC_NONE}}, /* UC flooding DISABLE*/
574 {{EMAC_NONE, EMAC_NONE, 0xffff0800, EMAC_NONE}}, /* MC flooding ENABLE*/
575 {{EMAC_NONE, EMAC_NONE, 0xf7ff0000, EMAC_NONE}}, /* MC flooding DISABLE*/
576 {{EMAC_NONE, 0xffff4000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx ENABLE*/
577 {{EMAC_NONE, 0xbfff0000, EMAC_NONE, EMAC_NONE}}, /* Preemption on Tx DISABLE*/
578 {{0xffff0010, EMAC_NONE, 0xffff0010, EMAC_NONE}}, /* VLAN AWARE*/
579 {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}}, /* VLAN UNWARE*/
580 {{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* HSR_RX_OFFLOAD_ENABLE */
581 {{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}} /* HSR_RX_OFFLOAD_DISABLE */
582 };
583
icssg_set_port_state(struct prueth_emac * emac,enum icssg_port_state_cmd cmd)584 int icssg_set_port_state(struct prueth_emac *emac,
585 enum icssg_port_state_cmd cmd)
586 {
587 struct icssg_r30_cmd __iomem *p;
588 int ret = -ETIMEDOUT;
589 int done = 0;
590 int i;
591
592 p = emac->dram.va + MGR_R30_CMD_OFFSET;
593
594 if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) {
595 netdev_err(emac->ndev, "invalid port command\n");
596 return -EINVAL;
597 }
598
599 /* only one command at a time allowed to firmware */
600 mutex_lock(&emac->cmd_lock);
601
602 for (i = 0; i < 4; i++)
603 writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]);
604
605 /* wait for done */
606 ret = read_poll_timeout(emac_r30_is_done, done, done == 1,
607 1000, 10000, false, emac);
608
609 if (ret == -ETIMEDOUT)
610 netdev_err(emac->ndev, "timeout waiting for command done\n");
611
612 mutex_unlock(&emac->cmd_lock);
613
614 return ret;
615 }
616 EXPORT_SYMBOL_GPL(icssg_set_port_state);
617
icssg_config_half_duplex(struct prueth_emac * emac)618 void icssg_config_half_duplex(struct prueth_emac *emac)
619 {
620 u32 val;
621
622 if (!emac->half_duplex)
623 return;
624
625 val = get_random_u32();
626 writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
627 }
628 EXPORT_SYMBOL_GPL(icssg_config_half_duplex);
629
icssg_config_set_speed(struct prueth_emac * emac)630 void icssg_config_set_speed(struct prueth_emac *emac)
631 {
632 u8 fw_speed;
633
634 switch (emac->speed) {
635 case SPEED_1000:
636 fw_speed = FW_LINK_SPEED_1G;
637 break;
638 case SPEED_100:
639 fw_speed = FW_LINK_SPEED_100M;
640 break;
641 case SPEED_10:
642 fw_speed = FW_LINK_SPEED_10M;
643 break;
644 default:
645 /* Other links speeds not supported */
646 netdev_err(emac->ndev, "Unsupported link speed\n");
647 return;
648 }
649
650 if (emac->duplex == DUPLEX_HALF)
651 fw_speed |= FW_LINK_SPEED_HD;
652
653 writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
654 }
655 EXPORT_SYMBOL_GPL(icssg_config_set_speed);
656
icssg_send_fdb_msg(struct prueth_emac * emac,struct mgmt_cmd * cmd,struct mgmt_cmd_rsp * rsp)657 int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
658 struct mgmt_cmd_rsp *rsp)
659 {
660 struct prueth *prueth = emac->prueth;
661 int slice = prueth_emac_slice(emac);
662 int addr, ret;
663
664 addr = icssg_queue_pop(prueth, slice == 0 ?
665 ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1);
666 if (addr < 0)
667 return addr;
668
669 /* First 4 bytes have FW owned buffer linking info which should
670 * not be touched
671 */
672 memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd));
673 icssg_queue_push(prueth, slice == 0 ?
674 ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr);
675 ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0,
676 2000, 20000000, false, prueth, slice == 0 ?
677 ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1);
678 if (ret) {
679 netdev_err(emac->ndev, "Timedout sending HWQ message\n");
680 return ret;
681 }
682
683 memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
684 /* Return buffer back for to pool */
685 icssg_queue_push(prueth, slice == 0 ?
686 ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr);
687
688 return 0;
689 }
690 EXPORT_SYMBOL_GPL(icssg_send_fdb_msg);
691
icssg_fdb_setup(struct prueth_emac * emac,struct mgmt_cmd * fdb_cmd,const unsigned char * addr,u8 fid,int cmd)692 static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd,
693 const unsigned char *addr, u8 fid, int cmd)
694 {
695 int slice = prueth_emac_slice(emac);
696 u8 mac_fid[ETH_ALEN + 2];
697 u16 fdb_slot;
698
699 ether_addr_copy(mac_fid, addr);
700
701 /* 1-1 VID-FID mapping is already setup */
702 mac_fid[ETH_ALEN] = fid;
703 mac_fid[ETH_ALEN + 1] = 0;
704
705 fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
706
707 fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER;
708 fdb_cmd->type = ICSSG_FW_MGMT_FDB_CMD_TYPE;
709 fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq);
710 fdb_cmd->param = cmd;
711 fdb_cmd->param |= (slice << 4);
712
713 memcpy(&fdb_cmd->cmd_args[0], addr, 4);
714 memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2);
715 fdb_cmd->cmd_args[2] = fdb_slot;
716
717 netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid);
718 }
719
icssg_fdb_add_del(struct prueth_emac * emac,const unsigned char * addr,u8 vid,u8 fid_c2,bool add)720 int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
721 u8 vid, u8 fid_c2, bool add)
722 {
723 struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
724 struct mgmt_cmd fdb_cmd = { 0 };
725 u8 fid = vid;
726 int ret;
727
728 icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB);
729
730 fid_c2 |= ICSSG_FDB_ENTRY_VALID;
731 fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24));
732
733 ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
734 if (ret)
735 return ret;
736
737 WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
738 if (fdb_cmd_rsp.status == 1)
739 return 0;
740
741 return -EINVAL;
742 }
743 EXPORT_SYMBOL_GPL(icssg_fdb_add_del);
744
icssg_fdb_lookup(struct prueth_emac * emac,const unsigned char * addr,u8 vid)745 int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
746 u8 vid)
747 {
748 struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
749 struct mgmt_cmd fdb_cmd = { 0 };
750 struct prueth_fdb_slot *slot;
751 u8 fid = vid;
752 int ret, i;
753
754 icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT);
755
756 fdb_cmd.cmd_args[1] |= fid << 16;
757
758 ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
759 if (ret)
760 return ret;
761
762 WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
763
764 slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER);
765 for (i = 0; i < 4; i++) {
766 if (ether_addr_equal(addr, slot->mac) && vid == slot->fid)
767 return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID);
768 slot++;
769 }
770
771 return 0;
772 }
773 EXPORT_SYMBOL_GPL(icssg_fdb_lookup);
774
icssg_vtbl_modify(struct prueth_emac * emac,u8 vid,u8 port_mask,u8 untag_mask,bool add)775 void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
776 u8 untag_mask, bool add)
777 {
778 struct prueth *prueth = emac->prueth;
779 struct prueth_vlan_tbl *tbl;
780 u8 fid_c1;
781
782 tbl = prueth->vlan_tbl;
783 spin_lock(&prueth->vtbl_lock);
784 fid_c1 = tbl[vid].fid_c1;
785
786 /* FID_C1: bit0..2 port membership mask,
787 * bit3..5 tagging mask for each port
788 * bit6 Stream VID (not handled currently)
789 * bit7 MC flood (not handled currently)
790 */
791 if (add) {
792 fid_c1 |= (port_mask | port_mask << 3);
793 fid_c1 &= ~(untag_mask << 3);
794 } else {
795 fid_c1 &= ~(port_mask | port_mask << 3);
796 }
797
798 tbl[vid].fid_c1 = fid_c1;
799 spin_unlock(&prueth->vtbl_lock);
800 }
801 EXPORT_SYMBOL_GPL(icssg_vtbl_modify);
802
icssg_get_pvid(struct prueth_emac * emac)803 u16 icssg_get_pvid(struct prueth_emac *emac)
804 {
805 struct prueth *prueth = emac->prueth;
806 u32 pvid;
807
808 if (emac->port_id == PRUETH_PORT_MII0)
809 pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
810 else
811 pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
812
813 pvid = pvid >> 24;
814
815 return pvid;
816 }
817 EXPORT_SYMBOL_GPL(icssg_get_pvid);
818
icssg_set_pvid(struct prueth * prueth,u8 vid,u8 port)819 void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
820 {
821 u32 pvid;
822
823 /* only 256 VLANs are supported */
824 pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff));
825
826 if (port == PRUETH_PORT_MII0)
827 writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
828 else if (port == PRUETH_PORT_MII1)
829 writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
830 else
831 writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
832 }
833 EXPORT_SYMBOL_GPL(icssg_set_pvid);
834
emac_fdb_flow_id_updated(struct prueth_emac * emac)835 int emac_fdb_flow_id_updated(struct prueth_emac *emac)
836 {
837 struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
838 int slice = prueth_emac_slice(emac);
839 struct mgmt_cmd fdb_cmd = { 0 };
840 int ret;
841
842 fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
843 fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
844 fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
845 fdb_cmd.param = 0;
846
847 fdb_cmd.param |= (slice << 4);
848 fdb_cmd.cmd_args[0] = 0;
849
850 ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
851 if (ret)
852 return ret;
853
854 WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
855 return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
856 }
857 EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
858