xref: /linux/drivers/net/ethernet/ti/icssg/icssg_config.c (revision aba74e639f8d76d29b94991615e33319d7371b63)
1 // SPDX-License-Identifier: GPL-2.0
2 /* ICSSG Ethernet driver
3  *
4  * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
5  */
6 
7 #include <linux/iopoll.h>
8 #include <linux/regmap.h>
9 #include <uapi/linux/if_ether.h>
10 #include "icssg_config.h"
11 #include "icssg_prueth.h"
12 #include "icssg_switch_map.h"
13 #include "icssg_mii_rt.h"
14 
15 /* TX IPG Values to be set for 100M link speed. These values are
16  * in ocp_clk cycles. So need change if ocp_clk is changed for a specific
17  * h/w design.
18  */
19 
20 /* IPG is in core_clk cycles */
21 #define MII_RT_TX_IPG_100M	0x17
22 #define MII_RT_TX_IPG_1G	0xb
23 #define MII_RT_TX_IPG_100M_SR1	0x166
24 #define MII_RT_TX_IPG_1G_SR1	0x1a
25 
26 #define	ICSSG_QUEUES_MAX		64
27 #define	ICSSG_QUEUE_OFFSET		0xd00
28 #define	ICSSG_QUEUE_PEEK_OFFSET		0xe00
29 #define	ICSSG_QUEUE_CNT_OFFSET		0xe40
30 #define	ICSSG_QUEUE_RESET_OFFSET	0xf40
31 
32 #define	ICSSG_NUM_TX_QUEUES	8
33 
34 #define	RECYCLE_Q_SLICE0	16
35 #define	RECYCLE_Q_SLICE1	17
36 
37 #define	ICSSG_NUM_OTHER_QUEUES	5	/* port, host and special queues */
38 
39 #define	PORT_HI_Q_SLICE0	32
40 #define	PORT_LO_Q_SLICE0	33
41 #define	HOST_HI_Q_SLICE0	34
42 #define	HOST_LO_Q_SLICE0	35
43 #define	HOST_SPL_Q_SLICE0	40	/* Special Queue */
44 
45 #define	PORT_HI_Q_SLICE1	36
46 #define	PORT_LO_Q_SLICE1	37
47 #define	HOST_HI_Q_SLICE1	38
48 #define	HOST_LO_Q_SLICE1	39
49 #define	HOST_SPL_Q_SLICE1	41	/* Special Queue */
50 
51 #define MII_RXCFG_DEFAULT	(PRUSS_MII_RT_RXCFG_RX_ENABLE | \
52 				 PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS | \
53 				 PRUSS_MII_RT_RXCFG_RX_L2_EN | \
54 				 PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS)
55 
56 #define MII_TXCFG_DEFAULT	(PRUSS_MII_RT_TXCFG_TX_ENABLE | \
57 				 PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | \
58 				 PRUSS_MII_RT_TXCFG_TX_32_MODE_EN | \
59 				 PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN)
60 
61 #define ICSSG_CFG_DEFAULT	(ICSSG_CFG_TX_L1_EN | \
62 				 ICSSG_CFG_TX_L2_EN | ICSSG_CFG_RX_L2_G_EN | \
63 				 ICSSG_CFG_TX_PRU_EN | \
64 				 ICSSG_CFG_SGMII_MODE)
65 
66 #define FDB_GEN_CFG1		0x60
67 #define SMEM_VLAN_OFFSET	8
68 #define SMEM_VLAN_OFFSET_MASK	GENMASK(25, 8)
69 
70 #define FDB_GEN_CFG2		0x64
71 #define FDB_VLAN_EN		BIT(6)
72 #define FDB_HOST_EN		BIT(2)
73 #define FDB_PRU1_EN		BIT(1)
74 #define FDB_PRU0_EN		BIT(0)
75 #define FDB_EN_ALL		(FDB_PRU0_EN | FDB_PRU1_EN | \
76 				 FDB_HOST_EN | FDB_VLAN_EN)
77 
78 /**
79  * struct map - ICSSG Queue Map
80  * @queue: Queue number
81  * @pd_addr_start: Packet descriptor queue reserved memory
82  * @flags: Flags
83  * @special: Indicates whether this queue is a special queue or not
84  */
85 struct map {
86 	int queue;
87 	u32 pd_addr_start;
88 	u32 flags;
89 	bool special;
90 };
91 
92 /* Hardware queue map for ICSSG */
93 static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
94 	{
95 		{ PORT_HI_Q_SLICE0, PORT_DESC0_HI, 0x200000, 0 },
96 		{ PORT_LO_Q_SLICE0, PORT_DESC0_LO, 0, 0 },
97 		{ HOST_HI_Q_SLICE0, HOST_DESC0_HI, 0x200000, 0 },
98 		{ HOST_LO_Q_SLICE0, HOST_DESC0_LO, 0, 0 },
99 		{ HOST_SPL_Q_SLICE0, HOST_SPPD0, 0x400000, 1 },
100 	},
101 	{
102 		{ PORT_HI_Q_SLICE1, PORT_DESC1_HI, 0xa00000, 0 },
103 		{ PORT_LO_Q_SLICE1, PORT_DESC1_LO, 0x800000, 0 },
104 		{ HOST_HI_Q_SLICE1, HOST_DESC1_HI, 0xa00000, 0 },
105 		{ HOST_LO_Q_SLICE1, HOST_DESC1_LO, 0x800000, 0 },
106 		{ HOST_SPL_Q_SLICE1, HOST_SPPD1, 0xc00000, 1 },
107 	},
108 };
109 
icssg_config_mii_init_fw_offload(struct prueth_emac * emac)110 static void icssg_config_mii_init_fw_offload(struct prueth_emac *emac)
111 {
112 	struct prueth *prueth = emac->prueth;
113 	int mii = prueth_emac_slice(emac);
114 	u32 txcfg_reg, pcnt_reg, txcfg;
115 	struct regmap *mii_rt;
116 
117 	mii_rt = prueth->mii_rt;
118 
119 	txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
120 				       PRUSS_MII_RT_TXCFG1;
121 	pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
122 				       PRUSS_MII_RT_RX_PCNT1;
123 
124 	txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
125 		PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
126 		PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN;
127 
128 	if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1)
129 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
130 	else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0)
131 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
132 
133 	regmap_write(mii_rt, txcfg_reg, txcfg);
134 	regmap_write(mii_rt, pcnt_reg, 0x1);
135 }
136 
icssg_config_mii_init(struct prueth_emac * emac)137 static void icssg_config_mii_init(struct prueth_emac *emac)
138 {
139 	struct prueth *prueth = emac->prueth;
140 	int slice = prueth_emac_slice(emac);
141 	u32 txcfg, txcfg_reg, pcnt_reg;
142 	struct regmap *mii_rt;
143 
144 	mii_rt = prueth->mii_rt;
145 
146 	txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
147 				       PRUSS_MII_RT_TXCFG1;
148 	pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
149 				       PRUSS_MII_RT_RX_PCNT1;
150 
151 	txcfg = MII_TXCFG_DEFAULT;
152 
153 	/* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
154 	 * to be swapped also comparing to RGMII mode.
155 	 */
156 	if (emac->phy_if == PHY_INTERFACE_MODE_MII && slice == ICSS_MII0)
157 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
158 	else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
159 		txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
160 
161 	regmap_write(mii_rt, txcfg_reg, txcfg);
162 	regmap_write(mii_rt, pcnt_reg, 0x1);
163 }
164 
icssg_miig_queues_init(struct prueth * prueth,int slice)165 static void icssg_miig_queues_init(struct prueth *prueth, int slice)
166 {
167 	struct regmap *miig_rt = prueth->miig_rt;
168 	void __iomem *smem = prueth->shram.va;
169 	u8 pd[ICSSG_SPECIAL_PD_SIZE];
170 	int queue = 0, i, j;
171 	u32 *pdword;
172 
173 	/* reset hwqueues */
174 	if (slice)
175 		queue = ICSSG_NUM_TX_QUEUES;
176 
177 	for (i = 0; i < ICSSG_NUM_TX_QUEUES; i++) {
178 		regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
179 		queue++;
180 	}
181 
182 	queue = slice ? RECYCLE_Q_SLICE1 : RECYCLE_Q_SLICE0;
183 	regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET, queue);
184 
185 	for (i = 0; i < ICSSG_NUM_OTHER_QUEUES; i++) {
186 		regmap_write(miig_rt, ICSSG_QUEUE_RESET_OFFSET,
187 			     hwq_map[slice][i].queue);
188 	}
189 
190 	/* initialize packet descriptors in SMEM */
191 	/* push pakcet descriptors to hwqueues */
192 
193 	pdword = (u32 *)pd;
194 	for (j = 0; j < ICSSG_NUM_OTHER_QUEUES; j++) {
195 		const struct map *mp;
196 		int pd_size, num_pds;
197 		u32 pdaddr;
198 
199 		mp = &hwq_map[slice][j];
200 		if (mp->special) {
201 			pd_size = ICSSG_SPECIAL_PD_SIZE;
202 			num_pds = ICSSG_NUM_SPECIAL_PDS;
203 		} else	{
204 			pd_size = ICSSG_NORMAL_PD_SIZE;
205 			num_pds = ICSSG_NUM_NORMAL_PDS;
206 		}
207 
208 		for (i = 0; i < num_pds; i++) {
209 			memset(pd, 0, pd_size);
210 
211 			pdword[0] &= ICSSG_FLAG_MASK;
212 			pdword[0] |= mp->flags;
213 			pdaddr = mp->pd_addr_start + i * pd_size;
214 
215 			memcpy_toio(smem + pdaddr, pd, pd_size);
216 			queue = mp->queue;
217 			regmap_write(miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue,
218 				     pdaddr);
219 		}
220 	}
221 }
222 
icssg_config_ipg(struct prueth_emac * emac)223 void icssg_config_ipg(struct prueth_emac *emac)
224 {
225 	struct prueth *prueth = emac->prueth;
226 	int slice = prueth_emac_slice(emac);
227 	u32 ipg;
228 
229 	switch (emac->speed) {
230 	case SPEED_1000:
231 		ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G;
232 		break;
233 	case SPEED_100:
234 		ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M;
235 		break;
236 	case SPEED_10:
237 		/* Firmware hardcodes IPG for SR1.0 */
238 		if (emac->is_sr1)
239 			return;
240 		/* IPG for 10M is same as 100M */
241 		ipg = MII_RT_TX_IPG_100M;
242 		break;
243 	default:
244 		/* Other links speeds not supported */
245 		netdev_err(emac->ndev, "Unsupported link speed\n");
246 		return;
247 	}
248 
249 	icssg_mii_update_ipg(prueth->mii_rt, slice, ipg);
250 }
251 EXPORT_SYMBOL_GPL(icssg_config_ipg);
252 
emac_r30_cmd_init(struct prueth_emac * emac)253 static void emac_r30_cmd_init(struct prueth_emac *emac)
254 {
255 	struct icssg_r30_cmd __iomem *p;
256 	int i;
257 
258 	p = emac->dram.va + MGR_R30_CMD_OFFSET;
259 
260 	for (i = 0; i < 4; i++)
261 		writel(EMAC_NONE, &p->cmd[i]);
262 }
263 
emac_r30_is_done(struct prueth_emac * emac)264 static int emac_r30_is_done(struct prueth_emac *emac)
265 {
266 	const struct icssg_r30_cmd __iomem *p;
267 	u32 cmd;
268 	int i;
269 
270 	p = emac->dram.va + MGR_R30_CMD_OFFSET;
271 
272 	for (i = 0; i < 4; i++) {
273 		cmd = readl(&p->cmd[i]);
274 		if (cmd != EMAC_NONE)
275 			return 0;
276 	}
277 
278 	return 1;
279 }
280 
prueth_fw_offload_buffer_setup(struct prueth_emac * emac)281 static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
282 {
283 	struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
284 	struct icssg_rxq_ctx __iomem *rxq_ctx;
285 	struct prueth *prueth = emac->prueth;
286 	int slice = prueth_emac_slice(emac);
287 	u32 addr;
288 	int i;
289 
290 	addr = lower_32_bits(prueth->msmcram.pa);
291 	if (slice)
292 		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
293 
294 	if (addr % SZ_64K) {
295 		dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
296 		return -EINVAL;
297 	}
298 
299 	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
300 	/* workaround for f/w bug. bpool 0 needs to be initialized */
301 	for (i = 0; i <  PRUETH_NUM_BUF_POOLS; i++) {
302 		writel(addr, &bpool_cfg[i].addr);
303 		writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
304 		addr += PRUETH_EMAC_BUF_POOL_SIZE;
305 	}
306 
307 	if (!slice)
308 		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
309 	else
310 		addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
311 
312 	for (i = PRUETH_NUM_BUF_POOLS;
313 	     i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
314 	     i++) {
315 		/* The driver only uses first 4 queues per PRU so only initialize them */
316 		if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
317 			writel(addr, &bpool_cfg[i].addr);
318 			writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
319 			addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
320 		} else {
321 			writel(0, &bpool_cfg[i].addr);
322 			writel(0, &bpool_cfg[i].len);
323 		}
324 	}
325 
326 	if (!slice)
327 		addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
328 	else
329 		addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
330 
331 	rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
332 	for (i = 0; i < 3; i++)
333 		writel(addr, &rxq_ctx->start[i]);
334 
335 	addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
336 	writel(addr - SZ_2K, &rxq_ctx->end);
337 
338 	return 0;
339 }
340 
prueth_emac_buffer_setup(struct prueth_emac * emac)341 static int prueth_emac_buffer_setup(struct prueth_emac *emac)
342 {
343 	struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
344 	struct icssg_rxq_ctx __iomem *rxq_ctx;
345 	struct prueth *prueth = emac->prueth;
346 	int slice = prueth_emac_slice(emac);
347 	u32 addr;
348 	int i;
349 
350 	/* Layout to have 64KB aligned buffer pool
351 	 * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
352 	 */
353 
354 	addr = lower_32_bits(prueth->msmcram.pa);
355 	if (slice)
356 		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
357 
358 	if (addr % SZ_64K) {
359 		dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
360 		return -EINVAL;
361 	}
362 
363 	bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
364 	/* workaround for f/w bug. bpool 0 needs to be initilalized */
365 	writel(addr, &bpool_cfg[0].addr);
366 	writel(0, &bpool_cfg[0].len);
367 
368 	for (i = PRUETH_EMAC_BUF_POOL_START;
369 	     i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
370 	     i++) {
371 		writel(addr, &bpool_cfg[i].addr);
372 		writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
373 		addr += PRUETH_EMAC_BUF_POOL_SIZE;
374 	}
375 
376 	if (!slice)
377 		addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
378 	else
379 		addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
380 
381 	/* Pre-emptible RX buffer queue */
382 	rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
383 	for (i = 0; i < 3; i++)
384 		writel(addr, &rxq_ctx->start[i]);
385 
386 	addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
387 	writel(addr, &rxq_ctx->end);
388 
389 	/* Express RX buffer queue */
390 	rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
391 	for (i = 0; i < 3; i++)
392 		writel(addr, &rxq_ctx->start[i]);
393 
394 	addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
395 	writel(addr, &rxq_ctx->end);
396 
397 	return 0;
398 }
399 
icssg_init_emac_mode(struct prueth * prueth)400 void icssg_init_emac_mode(struct prueth *prueth)
401 {
402 	/* When the device is configured as a bridge and it is being brought
403 	 * back to the emac mode, the host mac address has to be set as 0.
404 	 */
405 	u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
406 	int i;
407 	u8 mac[ETH_ALEN] = { 0 };
408 
409 	/* Set VLAN TABLE address base */
410 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
411 			   addr <<  SMEM_VLAN_OFFSET);
412 	/* Set enable VLAN aware mode, and FDBs for all PRUs */
413 	regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
414 	prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
415 			    EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
416 	for (i = 0; i < SZ_4K - 1; i++) {
417 		prueth->vlan_tbl[i].fid = i;
418 		prueth->vlan_tbl[i].fid_c1 = 0;
419 	}
420 	/* Clear host MAC address */
421 	icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
422 }
423 EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
424 
icssg_init_fw_offload_mode(struct prueth * prueth)425 void icssg_init_fw_offload_mode(struct prueth *prueth)
426 {
427 	u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
428 	int i;
429 
430 	/* Set VLAN TABLE address base */
431 	regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
432 			   addr <<  SMEM_VLAN_OFFSET);
433 	/* Set enable VLAN aware mode, and FDBs for all PRUs */
434 	regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
435 	prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
436 			    EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
437 	for (i = 0; i < SZ_4K - 1; i++) {
438 		prueth->vlan_tbl[i].fid = i;
439 		prueth->vlan_tbl[i].fid_c1 = 0;
440 	}
441 
442 	if (prueth->hw_bridge_dev)
443 		icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
444 	icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
445 }
446 EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
447 
icssg_config(struct prueth * prueth,struct prueth_emac * emac,int slice)448 int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
449 {
450 	void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET;
451 	struct icssg_flow_cfg __iomem *flow_cfg;
452 	int ret;
453 
454 	memset_io(config, 0, TAS_GATE_MASK_LIST0);
455 	icssg_miig_queues_init(prueth, slice);
456 
457 	emac->speed = SPEED_1000;
458 	emac->duplex = DUPLEX_FULL;
459 	if (!phy_interface_mode_is_rgmii(emac->phy_if)) {
460 		emac->speed = SPEED_100;
461 		emac->duplex = DUPLEX_FULL;
462 	}
463 	regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
464 			   ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
465 	icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
466 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
467 		icssg_config_mii_init_fw_offload(emac);
468 	else
469 		icssg_config_mii_init(emac);
470 	icssg_config_ipg(emac);
471 	icssg_update_rgmii_cfg(prueth->miig_rt, emac);
472 
473 	/* set GPI mode */
474 	pruss_cfg_gpimode(prueth->pruss, prueth->pru_id[slice],
475 			  PRUSS_GPI_MODE_MII);
476 
477 	/* enable XFR shift for PRU and RTU */
478 	pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_PRU, true);
479 	pruss_cfg_xfr_enable(prueth->pruss, PRU_TYPE_RTU, true);
480 
481 	/* set C28 to 0x100 */
482 	pru_rproc_set_ctable(prueth->pru[slice], PRU_C28, 0x100 << 8);
483 	pru_rproc_set_ctable(prueth->rtu[slice], PRU_C28, 0x100 << 8);
484 	pru_rproc_set_ctable(prueth->txpru[slice], PRU_C28, 0x100 << 8);
485 
486 	flow_cfg = config + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
487 	writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
488 	writew(0, &flow_cfg->mgm_base_flow);
489 	writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
490 	writeb(0, config + QUEUE_NUM_UNTAGGED);
491 
492 	if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
493 		ret = prueth_fw_offload_buffer_setup(emac);
494 	else
495 		ret = prueth_emac_buffer_setup(emac);
496 	if (ret)
497 		return ret;
498 
499 	emac_r30_cmd_init(emac);
500 
501 	return 0;
502 }
503 EXPORT_SYMBOL_GPL(icssg_config);
504 
505 /* Bitmask for ICSSG r30 commands */
506 static const struct icssg_r30_cmd emac_r32_bitmask[] = {
507 	{{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}},	/* EMAC_PORT_DISABLE */
508 	{{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}},	/* EMAC_PORT_BLOCK */
509 	{{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}},	/* EMAC_PORT_FORWARD */
510 	{{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}},	/* EMAC_PORT_FORWARD_WO_LEARNING */
511 	{{0xffff0001, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},	/* ACCEPT ALL */
512 	{{0xfffe0002, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},	/* ACCEPT TAGGED */
513 	{{0xfffc0000, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},	/* ACCEPT UNTAGGED and PRIO */
514 	{{EMAC_NONE,  0xffff0020, EMAC_NONE, EMAC_NONE}},	/* TAS Trigger List change */
515 	{{EMAC_NONE,  0xdfff1000, EMAC_NONE, EMAC_NONE}},	/* TAS set state ENABLE*/
516 	{{EMAC_NONE,  0xefff2000, EMAC_NONE, EMAC_NONE}},	/* TAS set state RESET*/
517 	{{EMAC_NONE,  0xcfff0000, EMAC_NONE, EMAC_NONE}},	/* TAS set state DISABLE*/
518 	{{EMAC_NONE,  EMAC_NONE,  0xffff0400, EMAC_NONE}},	/* UC flooding ENABLE*/
519 	{{EMAC_NONE,  EMAC_NONE,  0xfbff0000, EMAC_NONE}},	/* UC flooding DISABLE*/
520 	{{EMAC_NONE,  EMAC_NONE,  0xffff0800, EMAC_NONE}},	/* MC flooding ENABLE*/
521 	{{EMAC_NONE,  EMAC_NONE,  0xf7ff0000, EMAC_NONE}},	/* MC flooding DISABLE*/
522 	{{EMAC_NONE,  0xffff4000, EMAC_NONE, EMAC_NONE}},	/* Preemption on Tx ENABLE*/
523 	{{EMAC_NONE,  0xbfff0000, EMAC_NONE, EMAC_NONE}},	/* Preemption on Tx DISABLE*/
524 	{{0xffff0010,  EMAC_NONE, 0xffff0010, EMAC_NONE}},	/* VLAN AWARE*/
525 	{{0xffef0000,  EMAC_NONE, 0xffef0000, EMAC_NONE}},	/* VLAN UNWARE*/
526 	{{0xffff2000, EMAC_NONE, EMAC_NONE, EMAC_NONE}},	/* HSR_RX_OFFLOAD_ENABLE */
527 	{{0xdfff0000, EMAC_NONE, EMAC_NONE, EMAC_NONE}}		/* HSR_RX_OFFLOAD_DISABLE */
528 };
529 
icssg_set_port_state(struct prueth_emac * emac,enum icssg_port_state_cmd cmd)530 int icssg_set_port_state(struct prueth_emac *emac,
531 			 enum icssg_port_state_cmd cmd)
532 {
533 	struct icssg_r30_cmd __iomem *p;
534 	int ret = -ETIMEDOUT;
535 	int done = 0;
536 	int i;
537 
538 	p = emac->dram.va + MGR_R30_CMD_OFFSET;
539 
540 	if (cmd >= ICSSG_EMAC_PORT_MAX_COMMANDS) {
541 		netdev_err(emac->ndev, "invalid port command\n");
542 		return -EINVAL;
543 	}
544 
545 	/* only one command at a time allowed to firmware */
546 	mutex_lock(&emac->cmd_lock);
547 
548 	for (i = 0; i < 4; i++)
549 		writel(emac_r32_bitmask[cmd].cmd[i], &p->cmd[i]);
550 
551 	/* wait for done */
552 	ret = read_poll_timeout(emac_r30_is_done, done, done == 1,
553 				1000, 10000, false, emac);
554 
555 	if (ret == -ETIMEDOUT)
556 		netdev_err(emac->ndev, "timeout waiting for command done\n");
557 
558 	mutex_unlock(&emac->cmd_lock);
559 
560 	return ret;
561 }
562 EXPORT_SYMBOL_GPL(icssg_set_port_state);
563 
icssg_config_half_duplex(struct prueth_emac * emac)564 void icssg_config_half_duplex(struct prueth_emac *emac)
565 {
566 	u32 val;
567 
568 	if (!emac->half_duplex)
569 		return;
570 
571 	val = get_random_u32();
572 	writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
573 }
574 EXPORT_SYMBOL_GPL(icssg_config_half_duplex);
575 
icssg_config_set_speed(struct prueth_emac * emac)576 void icssg_config_set_speed(struct prueth_emac *emac)
577 {
578 	u8 fw_speed;
579 
580 	switch (emac->speed) {
581 	case SPEED_1000:
582 		fw_speed = FW_LINK_SPEED_1G;
583 		break;
584 	case SPEED_100:
585 		fw_speed = FW_LINK_SPEED_100M;
586 		break;
587 	case SPEED_10:
588 		fw_speed = FW_LINK_SPEED_10M;
589 		break;
590 	default:
591 		/* Other links speeds not supported */
592 		netdev_err(emac->ndev, "Unsupported link speed\n");
593 		return;
594 	}
595 
596 	if (emac->duplex == DUPLEX_HALF)
597 		fw_speed |= FW_LINK_SPEED_HD;
598 
599 	writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
600 }
601 EXPORT_SYMBOL_GPL(icssg_config_set_speed);
602 
icssg_send_fdb_msg(struct prueth_emac * emac,struct mgmt_cmd * cmd,struct mgmt_cmd_rsp * rsp)603 int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
604 		       struct mgmt_cmd_rsp *rsp)
605 {
606 	struct prueth *prueth = emac->prueth;
607 	int slice = prueth_emac_slice(emac);
608 	int addr, ret;
609 
610 	addr = icssg_queue_pop(prueth, slice == 0 ?
611 			       ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1);
612 	if (addr < 0)
613 		return addr;
614 
615 	/* First 4 bytes have FW owned buffer linking info which should
616 	 * not be touched
617 	 */
618 	memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd));
619 	icssg_queue_push(prueth, slice == 0 ?
620 			 ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr);
621 	ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0,
622 				2000, 20000000, false, prueth, slice == 0 ?
623 				ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1);
624 	if (ret) {
625 		netdev_err(emac->ndev, "Timedout sending HWQ message\n");
626 		return ret;
627 	}
628 
629 	memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
630 	/* Return buffer back for to pool */
631 	icssg_queue_push(prueth, slice == 0 ?
632 			 ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr);
633 
634 	return 0;
635 }
636 EXPORT_SYMBOL_GPL(icssg_send_fdb_msg);
637 
icssg_fdb_setup(struct prueth_emac * emac,struct mgmt_cmd * fdb_cmd,const unsigned char * addr,u8 fid,int cmd)638 static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd,
639 			    const unsigned char *addr, u8 fid, int cmd)
640 {
641 	int slice = prueth_emac_slice(emac);
642 	u8 mac_fid[ETH_ALEN + 2];
643 	u16 fdb_slot;
644 
645 	ether_addr_copy(mac_fid, addr);
646 
647 	/* 1-1 VID-FID mapping is already setup */
648 	mac_fid[ETH_ALEN] = fid;
649 	mac_fid[ETH_ALEN + 1] = 0;
650 
651 	fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
652 
653 	fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER;
654 	fdb_cmd->type   = ICSSG_FW_MGMT_FDB_CMD_TYPE;
655 	fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq);
656 	fdb_cmd->param  = cmd;
657 	fdb_cmd->param |= (slice << 4);
658 
659 	memcpy(&fdb_cmd->cmd_args[0], addr, 4);
660 	memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2);
661 	fdb_cmd->cmd_args[2] = fdb_slot;
662 
663 	netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid);
664 }
665 
icssg_fdb_add_del(struct prueth_emac * emac,const unsigned char * addr,u8 vid,u8 fid_c2,bool add)666 int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
667 		      u8 vid, u8 fid_c2, bool add)
668 {
669 	struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
670 	struct mgmt_cmd fdb_cmd = { 0 };
671 	u8 fid = vid;
672 	int ret;
673 
674 	icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB);
675 
676 	fid_c2 |= ICSSG_FDB_ENTRY_VALID;
677 	fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24));
678 
679 	ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
680 	if (ret)
681 		return ret;
682 
683 	WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
684 	if (fdb_cmd_rsp.status == 1)
685 		return 0;
686 
687 	return -EINVAL;
688 }
689 EXPORT_SYMBOL_GPL(icssg_fdb_add_del);
690 
icssg_fdb_lookup(struct prueth_emac * emac,const unsigned char * addr,u8 vid)691 int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
692 		     u8 vid)
693 {
694 	struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
695 	struct mgmt_cmd fdb_cmd = { 0 };
696 	struct prueth_fdb_slot *slot;
697 	u8 fid = vid;
698 	int ret, i;
699 
700 	icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT);
701 
702 	fdb_cmd.cmd_args[1] |= fid << 16;
703 
704 	ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
705 	if (ret)
706 		return ret;
707 
708 	WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
709 
710 	slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER);
711 	for (i = 0; i < 4; i++) {
712 		if (ether_addr_equal(addr, slot->mac) && vid == slot->fid)
713 			return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID);
714 		slot++;
715 	}
716 
717 	return 0;
718 }
719 EXPORT_SYMBOL_GPL(icssg_fdb_lookup);
720 
icssg_vtbl_modify(struct prueth_emac * emac,u8 vid,u8 port_mask,u8 untag_mask,bool add)721 void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
722 		       u8 untag_mask, bool add)
723 {
724 	struct prueth *prueth = emac->prueth;
725 	struct prueth_vlan_tbl *tbl;
726 	u8 fid_c1;
727 
728 	tbl = prueth->vlan_tbl;
729 	spin_lock(&prueth->vtbl_lock);
730 	fid_c1 = tbl[vid].fid_c1;
731 
732 	/* FID_C1: bit0..2 port membership mask,
733 	 * bit3..5 tagging mask for each port
734 	 * bit6 Stream VID (not handled currently)
735 	 * bit7 MC flood (not handled currently)
736 	 */
737 	if (add) {
738 		fid_c1 |= (port_mask | port_mask << 3);
739 		fid_c1 &= ~(untag_mask << 3);
740 	} else {
741 		fid_c1 &= ~(port_mask | port_mask << 3);
742 	}
743 
744 	tbl[vid].fid_c1 = fid_c1;
745 	spin_unlock(&prueth->vtbl_lock);
746 }
747 EXPORT_SYMBOL_GPL(icssg_vtbl_modify);
748 
icssg_get_pvid(struct prueth_emac * emac)749 u16 icssg_get_pvid(struct prueth_emac *emac)
750 {
751 	struct prueth *prueth = emac->prueth;
752 	u32 pvid;
753 
754 	if (emac->port_id == PRUETH_PORT_MII0)
755 		pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
756 	else
757 		pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
758 
759 	pvid = pvid >> 24;
760 
761 	return pvid;
762 }
763 EXPORT_SYMBOL_GPL(icssg_get_pvid);
764 
icssg_set_pvid(struct prueth * prueth,u8 vid,u8 port)765 void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
766 {
767 	u32 pvid;
768 
769 	/* only 256 VLANs are supported */
770 	pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff));
771 
772 	if (port == PRUETH_PORT_MII0)
773 		writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
774 	else if (port == PRUETH_PORT_MII1)
775 		writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
776 	else
777 		writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
778 }
779 EXPORT_SYMBOL_GPL(icssg_set_pvid);
780 
emac_fdb_flow_id_updated(struct prueth_emac * emac)781 int emac_fdb_flow_id_updated(struct prueth_emac *emac)
782 {
783 	struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
784 	int slice = prueth_emac_slice(emac);
785 	struct mgmt_cmd fdb_cmd = { 0 };
786 	int ret;
787 
788 	fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
789 	fdb_cmd.type   = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
790 	fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
791 	fdb_cmd.param  = 0;
792 
793 	fdb_cmd.param |= (slice << 4);
794 	fdb_cmd.cmd_args[0] = 0;
795 
796 	ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
797 	if (ret)
798 		return ret;
799 
800 	WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
801 	return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
802 }
803 EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
804