xref: /linux/drivers/net/ethernet/ti/icssg/icssg_config.c (revision c2c2ccfd4ba72718266a56f3ecc34c989cb5b7a0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* ICSSG Ethernet driver
3  *
4  * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
5  */
6 
7 #include <linux/iopoll.h>
8 #include <linux/regmap.h>
9 #include <uapi/linux/if_ether.h>
10 #include "icssg_config.h"
11 #include "icssg_prueth.h"
12 #include "icssg_switch_map.h"
13 #include "icssg_mii_rt.h"
14 
15 /* TX IPG Values to be set for 100M link speed. These values are
16  * in ocp_clk cycles. So need change if ocp_clk is changed for a specific
17  * h/w design.
18  */
19 
20 /* IPG is in core_clk cycles */
21 #define MII_RT_TX_IPG_100M	0x17
22 #define MII_RT_TX_IPG_1G	0xb
23 #define MII_RT_TX_IPG_100M_SR1	0x166
24 #define MII_RT_TX_IPG_1G_SR1	0x1a
25 
26 #define	ICSSG_QUEUES_MAX		64
27 #define	ICSSG_QUEUE_OFFSET		0xd00
28 #define	ICSSG_QUEUE_PEEK_OFFSET		0xe00
29 #define	ICSSG_QUEUE_CNT_OFFSET		0xe40
30 #define	ICSSG_QUEUE_RESET_OFFSET	0xf40
31 
32 #define	ICSSG_NUM_TX_QUEUES	8
33 
34 #define	RECYCLE_Q_SLICE0	16
35 #define	RECYCLE_Q_SLICE1	17
36 
37 #define	ICSSG_NUM_OTHER_QUEUES	5	/* port, host and special queues */
38 
39 #define	PORT_HI_Q_SLICE0	32
40 #define	PORT_LO_Q_SLICE0	33
41 #define	HOST_HI_Q_SLICE0	34
42 #define	HOST_LO_Q_SLICE0	35
43 #define	HOST_SPL_Q_SLICE0	40	/* Special Queue */
44 
45 #define	PORT_HI_Q_SLICE1	36
46 #define	PORT_LO_Q_SLICE1	37
47 #define	HOST_HI_Q_SLICE1	38
48 #define	HOST_LO_Q_SLICE1	39
49 #define	HOST_SPL_Q_SLICE1	41	/* Special Queue */
50 
51 #define MII_RXCFG_DEFAULT	(PRUSS_MII_RT_RXCFG_RX_ENABLE | \
52 				 PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \
53 				 PRUSS_MII_RT_RXCFG_RX_L2_EN | \
54 				 PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS)
55 
56 #define MII_TXCFG_DEFAULT	(PRUSS_MII_RT_TXCFG_TX_ENABLE | \
57 				 PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \
58 				 PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \
59 				 PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN)
60 
61 #define ICSSG_CFG_DEFAULT	(ICSSG_CFG_TX_L1_EN | \
62 				 ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \
63 				 ICSSG_CFG_TX_PRU_EN | \
64 				 ICSSG_CFG_SGMII_MODE)
65 
66 #define FDB_GEN_CFG1		0x60
67 #define SMEM_VLAN_OFFSET	8
68 #define SMEM_VLAN_OFFSET_MASK	GENMASK(25, 8)
69 #define FDB_HASH_SIZE_MASK	GENMASK(6, 3)
70 #define FDB_HASH_SIZE_SHIFT	3
71 #define FDB_HASH_SIZE		3
72 
73 #define FDB_GEN_CFG2		0x64
74 #define FDB_VLAN_EN		BIT(6)
75 #define FDB_HOST_EN		BIT(2)
76 #define FDB_PRU1_EN		BIT(1)
77 #define FDB_PRU0_EN		BIT(0)
78 #define FDB_EN_ALL		(FDB_PRU0_EN | FDB_PRU1_EN | \
79 				 FDB_HOST_EN | FDB_VLAN_EN)
80 
81 /**
82  * struct map - ICSSG Queue Map
83  * @queue: Queue number
84  * @pd_addr_start: Packet descriptor queue reserved memory
85  * @flags: Flags
86  * @special: Indicates whether this queue is a special queue or not
87  */
88 struct map {
89 	int queue;
90 	u32 pd_addr_start;
91 	u32 flags;
92 	bool special;
93 };
94 
95 /* Hardware queue map for ICSSG */
96 static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
97 	{
98 		{ PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 },
99 		{ PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 },
100 		{ HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 },
101 		{ HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 },
102 		{ HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 },
103 	},
104 	{
105 		{ PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 },
106 		{ PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 },
107 		{ HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 },
108 		{ HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 },
109 		{ HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 },
110 	},
111 };
112 
icssg_config_mii_init_fw_offload(struct prueth_emac * emac)113 static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac)
114 {
115 	struct prueth *prueth = emac->prueth;
116 	int mii = prueth_emac_slice(emac);
117 	u32 txcfg_reg, pcnt_reg, txcfg;
118 	struct regmap *mii_rt;
119 
120 	mii_rt = prueth->mii_rt;
121 
122 	txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
123 				       PRUSS_MII_RT_TXCFG1;
124 	pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
125 				       PRUSS_MII_RT_RX_PCNT1;
126 
127 	txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
128 		PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
129 		PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN;
130 
131 	if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1)
132 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
133 	else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0)
134 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
135 
136 	regmap_write(mii_rt, txcfg_reg, txcfg);
137 	regmap_write(mii_rt, pcnt_reg, 0x1);
138 }
139 
icssg_config_mii_init(struct prueth_emac * emac)140 static void icssg_config_mii_init(struct prueth_emac *emac)
141 {
142 	struct prueth *prueth = emac->prueth;
143 	int slice = prueth_emac_slice(emac);
144 	u32 txcfg, txcfg_reg, pcnt_reg;
145 	struct regmap *mii_rt;
146 
147 	mii_rt = prueth->mii_rt;
148 
149 	txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
150 				       PRUSS_MII_RT_TXCFG1;
151 	pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
152 				       PRUSS_MII_RT_RX_PCNT1;
153 
154 	txcfg = MII_TXCFG_DEFAULT;
155 
156 	/* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
157 	 * to be swapped also comparing to RGMII mode.
158 	 */
159 	if (emac->phy_if == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0)
160 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
161 	else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
162 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
163 
164 	regmap_write(mii_rt, txcfg_reg, txcfg);
165 	regmap_write(mii_rt, pcnt_reg, 0x1);
166 }
167 
icssg_miig_queues_init(struct prueth * prueth,int slice)168 static void icssg_miig_queues_init(struct prueth *prueth, int slice)
169 {
170 	struct regmap *miig_rt = prueth->miig_rt;
171 	void __iomem *smem = prueth->shram.va;
172 	u8 pd[ICSSG_SPECIAL_PD_SIZE];
173 	int queue = 0, i, j;
174 	u32 *pdword;
175 
176 	/* reset hwqueues */
177 	if (slice)
178 		queue = ICSSG_NUM_TX_QUEUES;
179 
180 	for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) {
181 		regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
182 		queue++;
183 	}
184 
185 	queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0;
186 	regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
187 
188 	for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) {
189 		regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET,
190 			     hwq_map[slice][i].queue);
191 	}
192 
193 	/* initialize packet descriptors in SMEM */
194 	/* push pakcet descriptors to hwqueues */
195 
196 	pdword = (u32 *)pd;
197 	for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) {
198 		const struct map *mp;
199 		int pd_size, num_pds;
200 		u32 pdaddr;
201 
202 		mp = &hwq_map[slice][j];
203 		if (mp->special) {
204 			pd_size = ICSSG_SPECIAL_PD_SIZE;
205 			num_pds = ICSSG_NUM_SPECIAL_PDS;
206 		} else	{
207 			pd_size = ICSSG_NORMAL_PD_SIZE;
208 			num_pds = ICSSG_NUM_NORMAL_PDS;
209 		}
210 
211 		for (i = 0; i < num_pds; i++) {
212 			memset(pd, 0, pd_size);
213 
214 			pdword[0] &= ICSSG_FLAG_MASK;
215 			pdword[0] |= mp->flags;
216 			pdaddr = mp->pd_addr_start + i * pd_size;
217 
218 			memcpy_toio(smem + pdaddr, pd, pd_size);
219 			queue = mp->queue;
220 			regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue,
221 				     pdaddr);
222 		}
223 	}
224 }
225 
icssg_config_ipg(struct prueth_emac * emac)226 void icssg_config_ipg(struct prueth_emac *emac)
227 {
228 	struct prueth *prueth = emac->prueth;
229 	int slice = prueth_emac_slice(emac);
230 	u32 ipg;
231 
232 	switch (emac->speed) {
233 	case SPEED_1000:
234 		ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G;
235 		break;
236 	case SPEED_100:
237 		ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M;
238 		break;
239 	case SPEED_10:
240 		/* Firmware hardcodes IPG for SR1.0 */
241 		if (emac->is_sr1)
242 			return;
243 		/* IPG for 10M is same as 100M */
244 		ipg = MII_RT_TX_IPG_100M;
245 		break;
246 	default:
247 		/* Other links speeds not supported */
248 		netdev_err(emac->ndev, "Unsupported link speed\n");
249 		return;
250 	}
251 
252 	icssg_mii_update_ipg(prueth->mii_rt, slice, ipg);
253 }
254 EXPORT_SYMBOL_GPL(icssg_config_ipg);
255 
emac_r30_cmd_init(struct prueth_emac * emac)256 static void emac_r30_cmd_init(struct prueth_emac *emac)
257 {
258 	struct icssg_r30_cmd __iomem *p;
259 	int i;
260 
261 	p = emac->dram.va + MGR_R30_CMD_OFFSET;
262 
263 	for (i = 0; i < 4; i++)
264 		writel(EMAC_NONE, &p->cmd[i]);
265 }
266 
emac_r30_is_done(struct prueth_emac * emac)267 static int emac_r30_is_done(struct prueth_emac *emac)
268 {
269 	const struct icssg_r30_cmd __iomem *p;
270 	u32 cmd;
271 	int i;
272 
273 	p = emac->dram.va + MGR_R30_CMD_OFFSET;
274 
275 	for (i = 0; i < 4; i++) {
276 		cmd = readl(&p->cmd[i]);
277 		if (cmd != EMAC_NONE)
278 			return 0;
279 	}
280 
281 	return 1;
282 }
283 
prueth_fw_offload_buffer_setup(struct prueth_emac * emac)284 static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
285 {
286 	struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
287 	struct icssg_rxq_ctx __iomem *rxq_ctx;
288 	struct prueth *prueth = emac->prueth;
289 	int slice = prueth_emac_slice(emac);
290 	u32 addr;
291 	int i;
292 
293 	addr = lower_32_bits(prueth->msmcram.pa);
294 	if (slice) {
295 		if (prueth->pdata.banked_ms_ram)
296 			addr += MSMC_RAM_BANK_SIZE;
297 		else
298 			addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
299 	}
300 
301 	if (addr % SZ_64K) {
302 		dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
303 		return -EINVAL;
304 	}
305 
306 	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
307 
308 	/* Configure buffer pools for forwarding buffers
309 	 * - used by firmware to store packets to be forwarded to other port
310 	 * - 8 total pools per slice
311 	 */
312 	for (i = 0; i <  PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
313 		writel(addr, &bpool_cfg[i].addr);
314 		writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
315 		addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
316 	}
317 
318 	/* Configure buffer pools for Local Injection buffers
319 	 *  - used by firmware to store packets received from host core
320 	 *  - 16 total pools per slice
321 	 */
322 	for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
323 		int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
324 
325 		/* The driver only uses first 4 queues per PRU,
326 		 * so only initialize buffer for them
327 		 */
328 		if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
329 			 < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
330 			writel(addr, &bpool_cfg[cfg_idx].addr);
331 			writel(PRUETH_SW_LI_BUF_POOL_SIZE,
332 			       &bpool_cfg[cfg_idx].len);
333 			addr += PRUETH_SW_LI_BUF_POOL_SIZE;
334 		} else {
335 			writel(0, &bpool_cfg[cfg_idx].addr);
336 			writel(0, &bpool_cfg[cfg_idx].len);
337 		}
338 	}
339 
340 	/* Express RX buffer queue
341 	 *  - used by firmware to store express packets to be transmitted
342 	 *    to the host core
343 	 */
344 	rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
345 	for (i = 0; i < 3; i++)
346 		writel(addr, &rxq_ctx->start[i]);
347 
348 	addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
349 	writel(addr, &rxq_ctx->end);
350 
351 	/* Pre-emptible RX buffer queue
352 	 *  - used by firmware to store preemptible packets to be transmitted
353 	 *    to the host core
354 	 */
355 	rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
356 	for (i = 0; i < 3; i++)
357 		writel(addr, &rxq_ctx->start[i]);
358 
359 	addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
360 	writel(addr, &rxq_ctx->end);
361 
362 	/* Set pointer for default dropped packet write
363 	 *  - used by firmware to temporarily store packet to be dropped
364 	 */
365 	rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
366 	writel(addr, &rxq_ctx->start[0]);
367 
368 	return 0;
369 }
370 
prueth_emac_buffer_setup(struct prueth_emac * emac)371 static int prueth_emac_buffer_setup(struct prueth_emac *emac)
372 {
373 	struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
374 	struct icssg_rxq_ctx __iomem *rxq_ctx;
375 	struct prueth *prueth = emac->prueth;
376 	int slice = prueth_emac_slice(emac);
377 	u32 addr;
378 	int i;
379 
380 	addr = lower_32_bits(prueth->msmcram.pa);
381 	if (slice) {
382 		if (prueth->pdata.banked_ms_ram)
383 			addr += MSMC_RAM_BANK_SIZE;
384 		else
385 			addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
386 	}
387 
388 	if (addr % SZ_64K) {
389 		dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
390 		return -EINVAL;
391 	}
392 
393 	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
394 
395 	/* Configure buffer pools for forwarding buffers
396 	 *  - in mac mode - no forwarding so initialize all pools to 0
397 	 *  - 8 total pools per slice
398 	 */
399 	for (i = 0; i <  PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
400 		writel(0, &bpool_cfg[i].addr);
401 		writel(0, &bpool_cfg[i].len);
402 	}
403 
404 	/* Configure buffer pools for Local Injection buffers
405 	 *  - used by firmware to store packets received from host core
406 	 *  - 16 total pools per slice
407 	 */
408 	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
409 	for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
410 		int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
411 
412 		/* In EMAC mode, only first 4 buffers are used,
413 		 * as 1 slice needs to handle only 1 port
414 		 */
415 		if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
416 			writel(addr, &bpool_cfg[cfg_idx].addr);
417 			writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
418 			       &bpool_cfg[cfg_idx].len);
419 			addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
420 		} else {
421 			writel(0, &bpool_cfg[cfg_idx].addr);
422 			writel(0, &bpool_cfg[cfg_idx].len);
423 		}
424 	}
425 
426 	/* Express RX buffer queue
427 	 *  - used by firmware to store express packets to be transmitted
428 	 *    to host core
429 	 */
430 	rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
431 	for (i = 0; i < 3; i++)
432 		writel(addr, &rxq_ctx->start[i]);
433 
434 	addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
435 	writel(addr, &rxq_ctx->end);
436 
437 	/* Pre-emptible RX buffer queue
438 	 *  - used by firmware to store preemptible packets to be transmitted
439 	 *    to host core
440 	 */
441 	rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
442 	for (i = 0; i < 3; i++)
443 		writel(addr, &rxq_ctx->start[i]);
444 
445 	addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
446 	writel(addr, &rxq_ctx->end);
447 
448 	/* Set pointer for default dropped packet write
449 	 *  - used by firmware to temporarily store packet to be dropped
450 	 */
451 	rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
452 	writel(addr, &rxq_ctx->start[0]);
453 
454 	return 0;
455 }
456 
icssg_init_emac_mode(struct prueth * prueth)457 void icssg_init_emac_mode(struct prueth *prueth)
458 {
459 	/* When the device is configured as a bridge and it is being brought
460 	 * back to the emac mode, the host mac address has to be set as 0.
461 	 */
462 	u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
463 	int i;
464 	u8 mac[ETH_ALEN] = { 0 };
465 
466 	/* Set VLAN TABLE address base */
467 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
468 			   addr <<  SMEM_VLAN_OFFSET);
469 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
470 			   FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
471 	/* Set enable VLAN aware mode, and FDBs for all PRUs */
472 	regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
473 	prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
474 			    EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
475 	for (i = 0; i < SZ_4K - 1; i++) {
476 		prueth->vlan_tbl[i].fid = i;
477 		prueth->vlan_tbl[i].fid_c1 = 0;
478 	}
479 	/* Clear host MAC address */
480 	icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
481 }
482 EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
483 
icssg_init_fw_offload_mode(struct prueth * prueth)484 void icssg_init_fw_offload_mode(struct prueth *prueth)
485 {
486 	u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
487 	int i;
488 
489 	/* Set VLAN TABLE address base */
490 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
491 			   addr <<  SMEM_VLAN_OFFSET);
492 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK,
493 			   FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT);
494 	/* Set enable VLAN aware mode, and FDBs for all PRUs */
495 	regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
496 	prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
497 			    EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
498 	for (i = 0; i < SZ_4K - 1; i++) {
499 		prueth->vlan_tbl[i].fid = i;
500 		prueth->vlan_tbl[i].fid_c1 = 0;
501 	}
502 
503 	if (prueth->hw_bridge_dev)
504 		icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
505 	icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
506 }
507 EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
508 
icssg_config(struct prueth * prueth,struct prueth_emac * emac,int slice)509 int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
510 {
511 	void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET;
512 	struct icssg_flow_cfg __iomem *flow_cfg;
513 	int ret;
514 
515 	memset_io(config, 0, TAS_GATE_MASK_LIST0);
516 	icssg_miig_queues_init(prueth, slice);
517 
518 	emac->speed = SPEED_1000;
519 	emac->duplex = DUPLEX_FULL;
520 	if (!phy_interface_mode_is_rgmii(emac->phy_if)) {
521 		emac->speed = SPEED_100;
522 		emac->duplex = DUPLEX_FULL;
523 	}
524 	regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
525 			   ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
526 	icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
527 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
528 		icssg_config_mii_init_fw_offload(emac);
529 	else
530 		icssg_config_mii_init(emac);
531 	icssg_config_ipg(emac);
532 	icssg_update_rgmii_cfg(prueth->miig_rt, emac);
533 
534 	/* set GPI mode */
535 	pruss_cfg_gpimode(prueth->pruss, prueth->pru_id[slice],
536 			  PRUSS_GPI_MODE_MII);
537 
538 	/* enable XFR shift for PRU and RTU */
539 	pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_PRU, true);
540 	pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_RTU, true);
541 
542 	/* set C28 to 0x100 */
543 	pru_rproc_set_ctable(prueth->pru[slice], PRU_C28, 0x100 << 8);
544 	pru_rproc_set_ctable(prueth->rtu[slice], PRU_C28, 0x100 << 8);
545 	pru_rproc_set_ctable(prueth->txpru[slice], PRU_C28, 0x100 << 8);
546 
547 	flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
548 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
549 	writew(0, &flow_cfg->mgm_base_flow);
550 	writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
551 	writeb(0, config + QUEUE_NUM_UNTAGGED);
552 
553 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
554 		ret = prueth_fw_offload_buffer_setup(emac);
555 	else
556 		ret = prueth_emac_buffer_setup(emac);
557 	if (ret)
558 		return ret;
559 
560 	emac_r30_cmd_init(emac);
561 
562 	return 0;
563 }
564 EXPORT_SYMBOL_GPL(icssg_config);
565 
566 /* Bitmask for ICSSG r30 commands */
567 static const struct icssg_r30_cmd emac_r32_bitmask[] = {
568 	{{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}},	/* EMAC_PORT_DISABLE */
569 	{{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}},	/* EMAC_PORT_BLOCK */
570 	{{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}},	/* EMAC_PORT_FORWARD */
571 	{{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}},	/* EMAC_PORT_FORWARD_WO_LEARNING */
572 	{{0xffff0001, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},	/* ACCEPT ALL */
573 	{{0xfffe0002, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},	/* ACCEPT TAGGED */
574 	{{0xfffc0000, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},	/* ACCEPT UNTAGGED and PRIO */
575 	{{EMAC_NONE,  0xffff0020, EMAC_NONE, EMAC_NONE}},	/* TAS Trigger List change */
576 	{{EMAC_NONE,  0xdfff1000, EMAC_NONE, EMAC_NONE}},	/* TAS set state ENABLE*/
577 	{{EMAC_NONE,  0xefff2000, EMAC_NONE, EMAC_NONE}},	/* TAS set state RESET*/
578 	{{EMAC_NONE,  0xcfff0000, EMAC_NONE, EMAC_NONE}},	/* TAS set state DISABLE*/
579 	{{EMAC_NONE,  EMAC_NONE,  0xffff0400, EMAC_NONE}},	/* UC flooding ENABLE*/
580 	{{EMAC_NONE,  EMAC_NONE,  0xfbff0000, EMAC_NONE}},	/* UC flooding DISABLE*/
581 	{{EMAC_NONE,  EMAC_NONE,  0xffff0800, EMAC_NONE}},	/* MC flooding ENABLE*/
582 	{{EMAC_NONE,  EMAC_NONE,  0xf7ff0000, EMAC_NONE}},	/* MC flooding DISABLE*/
583 	{{EMAC_NONE,  0xffff4000, EMAC_NONE, EMAC_NONE}},	/* Preemption on Tx ENABLE*/
584 	{{EMAC_NONE,  0xbfff0000, EMAC_NONE, EMAC_NONE}},	/* Preemption on Tx DISABLE*/
585 	{{0xffff0010,  EMAC_NONE, 0xffff0010, EMAC_NONE}},	/* VLAN AWARE*/
586 	{{0xffef0000,  EMAC_NONE, 0xffef0000, EMAC_NONE}},	/* VLAN UNWARE*/
587 	{{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}},	/* HSR_RX_OFFLOAD_ENABLE */
588 	{{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}		/* HSR_RX_OFFLOAD_DISABLE */
589 };
590 
icssg_set_port_state(struct prueth_emac * emac,enum icssg_port_state_cmd cmd)591 int icssg_set_port_state(struct prueth_emac *emac,
592 			 enum icssg_port_state_cmd cmd)
593 {
594 	struct icssg_r30_cmd __iomem *p;
595 	int ret = -ETIMEDOUT;
596 	int done = 0;
597 	int i;
598 
599 	p = emac->dram.va + MGR_R30_CMD_OFFSET;
600 
601 	if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) {
602 		netdev_err(emac->ndev, "invalid port command\n");
603 		return -EINVAL;
604 	}
605 
606 	/* only one command at a time allowed to firmware */
607 	mutex_lock(&emac->cmd_lock);
608 
609 	for (i = 0; i < 4; i++)
610 		writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]);
611 
612 	/* wait for done */
613 	ret = read_poll_timeout(emac_r30_is_done, done, done == 1,
614 				1000, 10000, false, emac);
615 
616 	if (ret == -ETIMEDOUT)
617 		netdev_err(emac->ndev, "timeout waiting for command done\n");
618 
619 	mutex_unlock(&emac->cmd_lock);
620 
621 	return ret;
622 }
623 EXPORT_SYMBOL_GPL(icssg_set_port_state);
624 
icssg_config_half_duplex(struct prueth_emac * emac)625 void icssg_config_half_duplex(struct prueth_emac *emac)
626 {
627 	u32 val;
628 
629 	if (!emac->half_duplex)
630 		return;
631 
632 	val = get_random_u32();
633 	writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
634 }
635 EXPORT_SYMBOL_GPL(icssg_config_half_duplex);
636 
icssg_config_set_speed(struct prueth_emac * emac)637 void icssg_config_set_speed(struct prueth_emac *emac)
638 {
639 	u8 fw_speed;
640 
641 	switch (emac->speed) {
642 	case SPEED_1000:
643 		fw_speed = FW_LINK_SPEED_1G;
644 		break;
645 	case SPEED_100:
646 		fw_speed = FW_LINK_SPEED_100M;
647 		break;
648 	case SPEED_10:
649 		fw_speed = FW_LINK_SPEED_10M;
650 		break;
651 	default:
652 		/* Other links speeds not supported */
653 		netdev_err(emac->ndev, "Unsupported link speed\n");
654 		return;
655 	}
656 
657 	if (emac->duplex == DUPLEX_HALF)
658 		fw_speed |= FW_LINK_SPEED_HD;
659 
660 	writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
661 }
662 EXPORT_SYMBOL_GPL(icssg_config_set_speed);
663 
icssg_send_fdb_msg(struct prueth_emac * emac,struct mgmt_cmd * cmd,struct mgmt_cmd_rsp * rsp)664 int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
665 		       struct mgmt_cmd_rsp *rsp)
666 {
667 	struct prueth *prueth = emac->prueth;
668 	int slice = prueth_emac_slice(emac);
669 	int addr, ret;
670 
671 	addr = icssg_queue_pop(prueth, slice == 0 ?
672 			       ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1);
673 	if (addr < 0)
674 		return addr;
675 
676 	/* First 4 bytes have FW owned buffer linking info which should
677 	 * not be touched
678 	 */
679 	memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd));
680 	icssg_queue_push(prueth, slice == 0 ?
681 			 ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr);
682 	ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0,
683 				2000, 20000000, false, prueth, slice == 0 ?
684 				ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1);
685 	if (ret) {
686 		netdev_err(emac->ndev, "Timedout sending HWQ message\n");
687 		return ret;
688 	}
689 
690 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
691 	/* Return buffer back for to pool */
692 	icssg_queue_push(prueth, slice == 0 ?
693 			 ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr);
694 
695 	return 0;
696 }
697 EXPORT_SYMBOL_GPL(icssg_send_fdb_msg);
698 
icssg_fdb_setup(struct prueth_emac * emac,struct mgmt_cmd * fdb_cmd,const unsigned char * addr,u8 fid,int cmd)699 static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd,
700 			    const unsigned char *addr, u8 fid, int cmd)
701 {
702 	int slice = prueth_emac_slice(emac);
703 	u8 mac_fid[ETH_ALEN + 2];
704 	u16 fdb_slot;
705 
706 	ether_addr_copy(mac_fid, addr);
707 
708 	/* 1-1 VID-FID mapping is already setup */
709 	mac_fid[ETH_ALEN] = fid;
710 	mac_fid[ETH_ALEN + 1] = 0;
711 
712 	fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
713 
714 	fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER;
715 	fdb_cmd->type   = ICSSG_FW_MGMT_FDB_CMD_TYPE;
716 	fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq);
717 	fdb_cmd->param  = cmd;
718 	fdb_cmd->param |= (slice << 4);
719 
720 	memcpy(&fdb_cmd->cmd_args[0], addr, 4);
721 	memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2);
722 	fdb_cmd->cmd_args[2] = fdb_slot;
723 
724 	netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid);
725 }
726 
icssg_fdb_add_del(struct prueth_emac * emac,const unsigned char * addr,u8 vid,u8 fid_c2,bool add)727 int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
728 		      u8 vid, u8 fid_c2, bool add)
729 {
730 	struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
731 	struct mgmt_cmd fdb_cmd = { 0 };
732 	u8 fid = vid;
733 	int ret;
734 
735 	icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB);
736 
737 	fid_c2 |= ICSSG_FDB_ENTRY_VALID;
738 	fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24));
739 
740 	ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
741 	if (ret)
742 		return ret;
743 
744 	WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
745 	if (fdb_cmd_rsp.status == 1)
746 		return 0;
747 
748 	return -EINVAL;
749 }
750 EXPORT_SYMBOL_GPL(icssg_fdb_add_del);
751 
icssg_fdb_lookup(struct prueth_emac * emac,const unsigned char * addr,u8 vid)752 int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
753 		     u8 vid)
754 {
755 	struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
756 	struct mgmt_cmd fdb_cmd = { 0 };
757 	struct prueth_fdb_slot *slot;
758 	u8 fid = vid;
759 	int ret, i;
760 
761 	icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT);
762 
763 	fdb_cmd.cmd_args[1] |= fid << 16;
764 
765 	ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
766 	if (ret)
767 		return ret;
768 
769 	WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
770 
771 	slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER);
772 	for (i = 0; i < 4; i++) {
773 		if (ether_addr_equal(addr, slot->mac) && vid == slot->fid)
774 			return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID);
775 		slot++;
776 	}
777 
778 	return 0;
779 }
780 EXPORT_SYMBOL_GPL(icssg_fdb_lookup);
781 
icssg_vtbl_modify(struct prueth_emac * emac,u8 vid,u8 port_mask,u8 untag_mask,bool add)782 void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
783 		       u8 untag_mask, bool add)
784 {
785 	struct prueth *prueth = emac->prueth;
786 	struct prueth_vlan_tbl *tbl;
787 	u8 fid_c1;
788 
789 	tbl = prueth->vlan_tbl;
790 	spin_lock(&prueth->vtbl_lock);
791 	fid_c1 = tbl[vid].fid_c1;
792 
793 	/* FID_C1: bit0..2 port membership mask,
794 	 * bit3..5 tagging mask for each port
795 	 * bit6 Stream VID (not handled currently)
796 	 * bit7 MC flood (not handled currently)
797 	 */
798 	if (add) {
799 		fid_c1 |= (port_mask | port_mask << 3);
800 		fid_c1 &= ~(untag_mask << 3);
801 	} else {
802 		fid_c1 &= ~(port_mask | port_mask << 3);
803 	}
804 
805 	tbl[vid].fid_c1 = fid_c1;
806 	spin_unlock(&prueth->vtbl_lock);
807 }
808 EXPORT_SYMBOL_GPL(icssg_vtbl_modify);
809 
icssg_get_pvid(struct prueth_emac * emac)810 u16 icssg_get_pvid(struct prueth_emac *emac)
811 {
812 	struct prueth *prueth = emac->prueth;
813 	u32 pvid;
814 
815 	if (emac->port_id == PRUETH_PORT_MII0)
816 		pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
817 	else
818 		pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
819 
820 	pvid = pvid >> 24;
821 
822 	return pvid;
823 }
824 EXPORT_SYMBOL_GPL(icssg_get_pvid);
825 
icssg_set_pvid(struct prueth * prueth,u8 vid,u8 port)826 void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
827 {
828 	u32 pvid;
829 
830 	/* only 256 VLANs are supported */
831 	pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff));
832 
833 	if (port == PRUETH_PORT_MII0)
834 		writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
835 	else if (port == PRUETH_PORT_MII1)
836 		writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
837 	else
838 		writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
839 }
840 EXPORT_SYMBOL_GPL(icssg_set_pvid);
841 
emac_fdb_flow_id_updated(struct prueth_emac * emac)842 int emac_fdb_flow_id_updated(struct prueth_emac *emac)
843 {
844 	struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
845 	int slice = prueth_emac_slice(emac);
846 	struct mgmt_cmd fdb_cmd = { 0 };
847 	int ret;
848 
849 	fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
850 	fdb_cmd.type   = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
851 	fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
852 	fdb_cmd.param  = 0;
853 
854 	fdb_cmd.param |= (slice << 4);
855 	fdb_cmd.cmd_args[0] = 0;
856 
857 	ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
858 	if (ret)
859 		return ret;
860 
861 	WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
862 	return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
863 }
864 EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
865