xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_port.c (revision 150b567e0d572342ef08bace7ee7aff80fd75327)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/phy/phy.h>
9 #include <net/dcbnl.h>
10 
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 #include "sparx5_port.h"
14 
15 #define SPX5_ETYPE_TAG_C     0x8100
16 #define SPX5_ETYPE_TAG_S     0x88a8
17 
18 #define SPX5_WAIT_US         1000
19 #define SPX5_WAIT_MAX_US     2000
20 
21 enum port_error {
22 	SPX5_PERR_SPEED,
23 	SPX5_PERR_IFTYPE,
24 };
25 
26 #define PAUSE_DISCARD        0xC
27 #define ETH_MAXLEN           (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
28 
decode_sgmii_word(u16 lp_abil,struct sparx5_port_status * status)29 static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
30 {
31 	status->an_complete = true;
32 	if (!(lp_abil & LPA_SGMII_LINK)) {
33 		status->link = false;
34 		return;
35 	}
36 
37 	switch (lp_abil & LPA_SGMII_SPD_MASK) {
38 	case LPA_SGMII_10:
39 		status->speed = SPEED_10;
40 		break;
41 	case LPA_SGMII_100:
42 		status->speed = SPEED_100;
43 		break;
44 	case LPA_SGMII_1000:
45 		status->speed = SPEED_1000;
46 		break;
47 	default:
48 		status->link = false;
49 		return;
50 	}
51 	if (lp_abil & LPA_SGMII_FULL_DUPLEX)
52 		status->duplex = DUPLEX_FULL;
53 	else
54 		status->duplex = DUPLEX_HALF;
55 }
56 
decode_cl37_word(u16 lp_abil,uint16_t ld_abil,struct sparx5_port_status * status)57 static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
58 {
59 	status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
60 	status->an_complete = true;
61 	status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
62 		DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
63 
64 	if ((ld_abil & ADVERTISE_1000XPAUSE) &&
65 	    (lp_abil & ADVERTISE_1000XPAUSE)) {
66 		status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
67 	} else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
68 		   (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
69 		status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
70 			MLO_PAUSE_TX : 0;
71 		status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
72 			MLO_PAUSE_RX : 0;
73 	} else {
74 		status->pause = MLO_PAUSE_NONE;
75 	}
76 }
77 
sparx5_get_dev2g5_status(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_status * status)78 static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
79 				    struct sparx5_port *port,
80 				    struct sparx5_port_status *status)
81 {
82 	u32 portno = port->portno;
83 	u16 lp_adv, ld_adv;
84 	u32 value;
85 
86 	/* Get PCS Link down sticky */
87 	value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
88 	status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
89 	if (status->link_down)	/* Clear the sticky */
90 		spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
91 
92 	/* Get both current Link and Sync status */
93 	value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
94 	status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
95 		       DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
96 
97 	if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
98 		status->speed = SPEED_1000;
99 	else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
100 		status->speed = SPEED_2500;
101 
102 	status->duplex = DUPLEX_FULL;
103 
104 	/* Get PCS ANEG status register */
105 	value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
106 
107 	/* Aneg complete provides more information  */
108 	if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
109 		lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
110 		if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
111 			decode_sgmii_word(lp_adv, status);
112 		} else {
113 			value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
114 			ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
115 			decode_cl37_word(lp_adv, ld_adv, status);
116 		}
117 	}
118 	return 0;
119 }
120 
sparx5_get_sfi_status(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_status * status)121 static int sparx5_get_sfi_status(struct sparx5 *sparx5,
122 				 struct sparx5_port *port,
123 				 struct sparx5_port_status *status)
124 {
125 	bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
126 	u32 portno = port->portno;
127 	u32 value, dev, tinst;
128 	void __iomem *inst;
129 
130 	if (!high_speed_dev) {
131 		netdev_err(port->ndev, "error: low speed and SFI mode\n");
132 		return -EINVAL;
133 	}
134 
135 	dev = sparx5_to_high_dev(sparx5, portno);
136 	tinst = sparx5_port_dev_index(sparx5, portno);
137 	inst = spx5_inst_get(sparx5, dev, tinst);
138 
139 	value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
140 	if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
141 		/* The link is or has been down. Clear the sticky bit */
142 		status->link_down = 1;
143 		spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 		value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
145 	}
146 	status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
147 	status->duplex = DUPLEX_FULL;
148 	if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
149 		status->speed = SPEED_5000;
150 	else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
151 		status->speed = SPEED_10000;
152 	else
153 		status->speed = SPEED_25000;
154 
155 	return 0;
156 }
157 
158 /* Get link status of 1000Base-X/in-band and SFI ports.
159  */
sparx5_get_port_status(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_status * status)160 int sparx5_get_port_status(struct sparx5 *sparx5,
161 			   struct sparx5_port *port,
162 			   struct sparx5_port_status *status)
163 {
164 	memset(status, 0, sizeof(*status));
165 	status->speed = port->conf.speed;
166 	if (port->conf.power_down) {
167 		status->link = false;
168 		return 0;
169 	}
170 	switch (port->conf.portmode) {
171 	case PHY_INTERFACE_MODE_SGMII:
172 	case PHY_INTERFACE_MODE_QSGMII:
173 	case PHY_INTERFACE_MODE_1000BASEX:
174 	case PHY_INTERFACE_MODE_2500BASEX:
175 		return sparx5_get_dev2g5_status(sparx5, port, status);
176 	case PHY_INTERFACE_MODE_5GBASER:
177 	case PHY_INTERFACE_MODE_10GBASER:
178 	case PHY_INTERFACE_MODE_25GBASER:
179 		return sparx5_get_sfi_status(sparx5, port, status);
180 	case PHY_INTERFACE_MODE_NA:
181 		return 0;
182 	default:
183 		netdev_err(port->ndev, "Status not supported");
184 		return -ENODEV;
185 	}
186 	return 0;
187 }
188 
sparx5_port_error(struct sparx5_port * port,struct sparx5_port_config * conf,enum port_error errtype)189 static int sparx5_port_error(struct sparx5_port *port,
190 			     struct sparx5_port_config *conf,
191 			     enum port_error errtype)
192 {
193 	switch (errtype) {
194 	case SPX5_PERR_SPEED:
195 		netdev_err(port->ndev,
196 			   "Interface does not support speed: %u: for %s\n",
197 			   conf->speed, phy_modes(conf->portmode));
198 		break;
199 	case SPX5_PERR_IFTYPE:
200 		netdev_err(port->ndev,
201 			   "Switch port does not support interface type: %s\n",
202 			   phy_modes(conf->portmode));
203 		break;
204 	default:
205 		netdev_err(port->ndev,
206 			   "Interface configuration error\n");
207 	}
208 
209 	return -EINVAL;
210 }
211 
sparx5_port_verify_speed(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)212 static int sparx5_port_verify_speed(struct sparx5 *sparx5,
213 				    struct sparx5_port *port,
214 				    struct sparx5_port_config *conf)
215 {
216 	const struct sparx5_ops *ops = sparx5->data->ops;
217 
218 	if ((ops->is_port_2g5(port->portno) &&
219 	     conf->speed > SPEED_2500) ||
220 	    (ops->is_port_5g(port->portno)  &&
221 	     conf->speed > SPEED_5000) ||
222 	    (ops->is_port_10g(port->portno) &&
223 	     conf->speed > SPEED_10000))
224 		return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
225 
226 	switch (conf->portmode) {
227 	case PHY_INTERFACE_MODE_NA:
228 		return -EINVAL;
229 	case PHY_INTERFACE_MODE_1000BASEX:
230 		if (conf->speed != SPEED_1000 ||
231 		    ops->is_port_2g5(port->portno))
232 			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
233 		if (ops->is_port_2g5(port->portno))
234 			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
235 		break;
236 	case PHY_INTERFACE_MODE_2500BASEX:
237 		if (conf->speed != SPEED_2500 ||
238 		    ops->is_port_2g5(port->portno))
239 			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
240 		break;
241 	case PHY_INTERFACE_MODE_QSGMII:
242 		if (port->portno > 47)
243 			return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
244 		fallthrough;
245 	case PHY_INTERFACE_MODE_SGMII:
246 		if (conf->speed != SPEED_1000 &&
247 		    conf->speed != SPEED_100 &&
248 		    conf->speed != SPEED_10 &&
249 		    conf->speed != SPEED_2500)
250 			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
251 		break;
252 	case PHY_INTERFACE_MODE_5GBASER:
253 	case PHY_INTERFACE_MODE_10GBASER:
254 	case PHY_INTERFACE_MODE_25GBASER:
255 		if ((conf->speed != SPEED_5000 &&
256 		     conf->speed != SPEED_10000 &&
257 		     conf->speed != SPEED_25000))
258 			return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
259 		break;
260 	default:
261 		return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
262 	}
263 	return 0;
264 }
265 
sparx5_dev_change(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)266 static bool sparx5_dev_change(struct sparx5 *sparx5,
267 			      struct sparx5_port *port,
268 			      struct sparx5_port_config *conf)
269 {
270 	return sparx5_is_baser(port->conf.portmode) ^
271 		sparx5_is_baser(conf->portmode);
272 }
273 
sparx5_port_flush_poll(struct sparx5 * sparx5,u32 portno)274 static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
275 {
276 	u32  value, resource, prio, delay_cnt = 0;
277 	bool poll_src = true;
278 	char *mem = "";
279 
280 	/* Resource == 0: Memory tracked per source (SRC-MEM)
281 	 * Resource == 1: Frame references tracked per source (SRC-REF)
282 	 * Resource == 2: Memory tracked per destination (DST-MEM)
283 	 * Resource == 3: Frame references tracked per destination. (DST-REF)
284 	 */
285 	while (1) {
286 		bool empty = true;
287 
288 		for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
289 			u32 base;
290 
291 			base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
292 			for (prio = 0; prio < SPX5_PRIOS; prio++) {
293 				value = spx5_rd(sparx5,
294 						QRES_RES_STAT(base + prio));
295 				if (value) {
296 					mem = resource == 0 ?
297 						"DST-MEM" : "SRC-MEM";
298 					empty = false;
299 				}
300 			}
301 		}
302 
303 		if (empty)
304 			break;
305 
306 		if (delay_cnt++ == 2000) {
307 			dev_err(sparx5->dev,
308 				"Flush timeout port %u. %s queue not empty\n",
309 				portno, mem);
310 			return -EINVAL;
311 		}
312 
313 		usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
314 	}
315 	return 0;
316 }
317 
sparx5_port_disable(struct sparx5 * sparx5,struct sparx5_port * port,bool high_spd_dev)318 static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
319 {
320 	u32 tinst = high_spd_dev ?
321 		    sparx5_port_dev_index(sparx5, port->portno) : port->portno;
322 	u32 dev = high_spd_dev ?
323 		  sparx5_to_high_dev(sparx5, port->portno) : TARGET_DEV2G5;
324 	void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
325 	const struct sparx5_ops *ops = sparx5->data->ops;
326 	u32 spd = port->conf.speed;
327 	u32 spd_prm;
328 	int err;
329 
330 	if (high_spd_dev) {
331 		/* 1: Reset the PCS Rx clock domain  */
332 		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
333 			      DEV10G_DEV_RST_CTRL_PCS_RX_RST,
334 			      devinst,
335 			      DEV10G_DEV_RST_CTRL(0));
336 
337 		/* 2: Disable MAC frame reception */
338 		spx5_inst_rmw(0,
339 			      DEV10G_MAC_ENA_CFG_RX_ENA,
340 			      devinst,
341 			      DEV10G_MAC_ENA_CFG(0));
342 	} else {
343 		/* 1: Reset the PCS Rx clock domain  */
344 		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
345 			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
346 			      devinst,
347 			      DEV2G5_DEV_RST_CTRL(0));
348 		/* 2: Disable MAC frame reception */
349 		spx5_inst_rmw(0,
350 			      DEV2G5_MAC_ENA_CFG_RX_ENA,
351 			      devinst,
352 			      DEV2G5_MAC_ENA_CFG(0));
353 	}
354 	/* 3: Disable traffic being sent to or from switch port->portno */
355 	spx5_rmw(0,
356 		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
357 		 sparx5,
358 		 QFWD_SWITCH_PORT_MODE(port->portno));
359 
360 	/* 4: Disable dequeuing from the egress queues  */
361 	spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
362 		 HSCH_PORT_MODE_DEQUEUE_DIS,
363 		 sparx5,
364 		 HSCH_PORT_MODE(port->portno));
365 
366 	/* 5: Disable Flowcontrol */
367 	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
368 		 QSYS_PAUSE_CFG_PAUSE_STOP,
369 		 sparx5,
370 		 QSYS_PAUSE_CFG(port->portno));
371 
372 	spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
373 	/* 6: Wait while the last frame is exiting the queues */
374 	usleep_range(8 * spd_prm, 10 * spd_prm);
375 
376 	/* 7: Flush the queues associated with the port->portno */
377 	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
378 		 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
379 		 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
380 		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
381 		 HSCH_FLUSH_CTRL_FLUSH_PORT |
382 		 HSCH_FLUSH_CTRL_FLUSH_DST |
383 		 HSCH_FLUSH_CTRL_FLUSH_SRC |
384 		 HSCH_FLUSH_CTRL_FLUSH_ENA,
385 		 sparx5,
386 		 HSCH_FLUSH_CTRL);
387 
388 	/* 8: Enable dequeuing from the egress queues */
389 	spx5_rmw(0,
390 		 HSCH_PORT_MODE_DEQUEUE_DIS,
391 		 sparx5,
392 		 HSCH_PORT_MODE(port->portno));
393 
394 	/* 9: Wait until flushing is complete */
395 	err = sparx5_port_flush_poll(sparx5, port->portno);
396 	if (err)
397 		return err;
398 
399 	/* 10: Reset the  MAC clock domain */
400 	if (high_spd_dev) {
401 		spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
402 			      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
403 			      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
404 			      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
405 			      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
406 			      DEV10G_DEV_RST_CTRL_MAC_TX_RST,
407 			      devinst,
408 			      DEV10G_DEV_RST_CTRL(0));
409 
410 	} else {
411 		spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
412 			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
413 			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
414 			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
415 			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
416 			      DEV2G5_DEV_RST_CTRL_SPEED_SEL |
417 			      DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
418 			      DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
419 			      DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
420 			      DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
421 			      devinst,
422 			      DEV2G5_DEV_RST_CTRL(0));
423 	}
424 	/* 11: Clear flushing */
425 	spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
426 		 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
427 		 HSCH_FLUSH_CTRL_FLUSH_PORT |
428 		 HSCH_FLUSH_CTRL_FLUSH_ENA,
429 		 sparx5,
430 		 HSCH_FLUSH_CTRL);
431 
432 	if (high_spd_dev) {
433 		u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
434 		void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
435 
436 		/* 12: Disable 5G/10G/25 BaseR PCS */
437 		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
438 			      PCS10G_BR_PCS_CFG_PCS_ENA,
439 			      pcsinst,
440 			      PCS10G_BR_PCS_CFG(0));
441 
442 		if (ops->is_port_25g(port->portno))
443 			/* Disable 25G PCS */
444 			spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
445 				 DEV25G_PCS25G_CFG_PCS25G_ENA,
446 				 sparx5,
447 				 DEV25G_PCS25G_CFG(tinst));
448 	} else {
449 		/* 12: Disable 1G PCS */
450 		spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
451 			 DEV2G5_PCS1G_CFG_PCS_ENA,
452 			 sparx5,
453 			 DEV2G5_PCS1G_CFG(port->portno));
454 	}
455 
456 	/* The port is now flushed and disabled  */
457 	return 0;
458 }
459 
sparx5_port_fifo_sz(struct sparx5 * sparx5,u32 portno,u32 speed)460 static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
461 			       u32 portno, u32 speed)
462 {
463 	u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
464 	const u32 taxi_dist[SPX5_PORTS_ALL] = {
465 		6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
466 		4, 4, 4, 4,
467 		11, 12, 13, 14, 15, 16, 17, 18,
468 		11, 12, 13, 14, 15, 16, 17, 18,
469 		11, 12, 13, 14, 15, 16, 17, 18,
470 		11, 12, 13, 14, 15, 16, 17, 18,
471 		4, 6, 8, 4, 6, 8, 6, 8,
472 		2, 2, 2, 2, 2, 2, 2, 4, 2
473 	};
474 	u32 mac_per    = 6400, tmp1, tmp2, tmp3;
475 	u32 fifo_width = 16;
476 	u32 mac_width  = 8;
477 	u32 addition   = 0;
478 
479 	if (!is_sparx5(sparx5))
480 		return 0;
481 
482 	switch (speed) {
483 	case SPEED_25000:
484 		return 0;
485 	case SPEED_10000:
486 		mac_per = 6400;
487 		mac_width = 8;
488 		addition = 1;
489 		break;
490 	case SPEED_5000:
491 		mac_per = 12800;
492 		mac_width = 8;
493 		addition = 0;
494 		break;
495 	case SPEED_2500:
496 		mac_per = 3200;
497 		mac_width = 1;
498 		addition = 0;
499 		break;
500 	case SPEED_1000:
501 		mac_per =  8000;
502 		mac_width = 1;
503 		addition = 0;
504 		break;
505 	case SPEED_100:
506 	case SPEED_10:
507 		return 1;
508 	default:
509 		break;
510 	}
511 
512 	tmp1 = 1000 * mac_width / fifo_width;
513 	tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
514 		       * sys_clk / mac_per);
515 	tmp3 = tmp1 * tmp2 / 1000;
516 	return  (tmp3 + 2000 + 999) / 1000 + addition;
517 }
518 
519 /* Configure port muxing:
520  * QSGMII:     4x2G5 devices
521  */
sparx5_port_mux_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)522 int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
523 			struct sparx5_port_config *conf)
524 {
525 	u32 portno = port->portno;
526 	u32 inst;
527 
528 	if (port->conf.portmode == conf->portmode)
529 		return 0; /* Nothing to do */
530 
531 	switch (conf->portmode) {
532 	case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q'  */
533 		inst = (portno - portno % 4) / 4;
534 		spx5_rmw(BIT(inst),
535 			 BIT(inst),
536 			 sparx5,
537 			 PORT_CONF_QSGMII_ENA);
538 
539 		if ((portno / 4 % 2) == 0) {
540 			/* Affects d0-d3,d8-d11..d40-d43 */
541 			spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
542 				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
543 				 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
544 				 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
545 				 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
546 				 PORT_CONF_USGMII_CFG_QUAD_MODE,
547 				 sparx5,
548 				 PORT_CONF_USGMII_CFG((portno / 8)));
549 		}
550 		break;
551 	default:
552 		break;
553 	}
554 	return 0;
555 }
556 
sparx5_port_max_tags_set(struct sparx5 * sparx5,struct sparx5_port * port)557 static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
558 				    struct sparx5_port *port)
559 {
560 	enum sparx5_port_max_tags max_tags    = port->max_vlan_tags;
561 	int tag_ct          = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
562 			      max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
563 	bool dtag           = max_tags == SPX5_PORT_MAX_TAGS_TWO;
564 	enum sparx5_vlan_port_type vlan_type  = port->vlan_type;
565 	bool dotag          = max_tags != SPX5_PORT_MAX_TAGS_NONE;
566 	u32 dev             = sparx5_to_high_dev(sparx5, port->portno);
567 	u32 tinst           = sparx5_port_dev_index(sparx5, port->portno);
568 	void __iomem *inst  = spx5_inst_get(sparx5, dev, tinst);
569 	const struct sparx5_ops *ops = sparx5->data->ops;
570 	u32 etype;
571 
572 	etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
573 		 port->custom_etype :
574 		 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
575 		 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
576 
577 	spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
578 		DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
579 		DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
580 		DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
581 		sparx5,
582 		DEV2G5_MAC_TAGS_CFG(port->portno));
583 
584 	if (ops->is_port_2g5(port->portno))
585 		return 0;
586 
587 	spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
588 		      DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
589 		      DEV10G_MAC_TAGS_CFG_TAG_ID |
590 		      DEV10G_MAC_TAGS_CFG_TAG_ENA,
591 		      inst,
592 		      DEV10G_MAC_TAGS_CFG(0, 0));
593 
594 	spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
595 		      DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
596 		      inst,
597 		      DEV10G_MAC_NUM_TAGS_CFG(0));
598 
599 	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
600 		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
601 		      inst,
602 		      DEV10G_MAC_MAXLEN_CFG(0));
603 	return 0;
604 }
605 
sparx5_port_fwd_urg(struct sparx5 * sparx5,u32 speed)606 int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
607 {
608 	u32 clk_period_ps = 1600; /* 625Mhz for now */
609 	u32 urg = 672000;
610 
611 	switch (speed) {
612 	case SPEED_10:
613 	case SPEED_100:
614 	case SPEED_1000:
615 		urg = 672000;
616 		break;
617 	case SPEED_2500:
618 		urg = 270000;
619 		break;
620 	case SPEED_5000:
621 		urg = 135000;
622 		break;
623 	case SPEED_10000:
624 		urg = 67200;
625 		break;
626 	case SPEED_25000:
627 		urg = 27000;
628 		break;
629 	}
630 	return urg / clk_period_ps - 1;
631 }
632 
sparx5_wm_enc(u16 value)633 static u16 sparx5_wm_enc(u16 value)
634 {
635 	if (value >= 2048)
636 		return 2048 + value / 16;
637 
638 	return value;
639 }
640 
sparx5_port_fc_setup(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)641 static int sparx5_port_fc_setup(struct sparx5 *sparx5,
642 				struct sparx5_port *port,
643 				struct sparx5_port_config *conf)
644 {
645 	bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
646 	u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
647 
648 	if (conf->pause & MLO_PAUSE_TX)
649 		pause_stop = sparx5_wm_enc(4  * (ETH_MAXLEN /
650 						 SPX5_BUFFER_CELL_SZ));
651 
652 	/* Set HDX flowcontrol */
653 	spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
654 		 DSM_MAC_CFG_HDX_BACKPREASSURE,
655 		 sparx5,
656 		 DSM_MAC_CFG(port->portno));
657 
658 	/* Obey flowcontrol  */
659 	spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
660 		 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
661 		 sparx5,
662 		 DSM_RX_PAUSE_CFG(port->portno));
663 
664 	/* Disable forward pressure */
665 	spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
666 		 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
667 		 sparx5,
668 		 QSYS_FWD_PRESSURE(port->portno));
669 
670 	/* Generate pause frames */
671 	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
672 		 QSYS_PAUSE_CFG_PAUSE_STOP,
673 		 sparx5,
674 		 QSYS_PAUSE_CFG(port->portno));
675 
676 	return 0;
677 }
678 
sparx5_get_aneg_word(struct sparx5_port_config * conf)679 static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
680 {
681 	if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
682 		return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
683 	else
684 		return 1; /* Enable SGMII Aneg */
685 }
686 
sparx5_serdes_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)687 int sparx5_serdes_set(struct sparx5 *sparx5,
688 		      struct sparx5_port *port,
689 		      struct sparx5_port_config *conf)
690 {
691 	int portmode, err, speed = conf->speed;
692 
693 	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
694 	    ((port->portno % 4) != 0)) {
695 		return 0;
696 	}
697 	if (sparx5_is_baser(conf->portmode)) {
698 		if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
699 			speed = SPEED_25000;
700 		else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
701 			speed = SPEED_10000;
702 		else
703 			speed = SPEED_5000;
704 	}
705 
706 	err = phy_set_media(port->serdes, conf->media);
707 	if (err)
708 		return err;
709 	if (speed > 0) {
710 		err = phy_set_speed(port->serdes, speed);
711 		if (err)
712 			return err;
713 	}
714 	if (conf->serdes_reset) {
715 		err = phy_reset(port->serdes);
716 		if (err)
717 			return err;
718 	}
719 
720 	/* Configure SerDes with port parameters
721 	 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
722 	 */
723 	portmode = conf->portmode;
724 	if (sparx5_is_baser(conf->portmode))
725 		portmode = PHY_INTERFACE_MODE_10GBASER;
726 	err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
727 	if (err)
728 		return err;
729 	conf->serdes_reset = false;
730 	return err;
731 }
732 
sparx5_port_pcs_low_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)733 static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
734 				   struct sparx5_port *port,
735 				   struct sparx5_port_config *conf)
736 {
737 	bool sgmii = false, inband_aneg = false;
738 	int err;
739 
740 	if (conf->inband) {
741 		if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
742 		    conf->portmode == PHY_INTERFACE_MODE_QSGMII)
743 			inband_aneg = true; /* Cisco-SGMII in-band-aneg */
744 		else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
745 			 conf->autoneg)
746 			inband_aneg = true; /* Clause-37 in-band-aneg */
747 
748 		err = sparx5_serdes_set(sparx5, port, conf);
749 		if (err)
750 			return -EINVAL;
751 	} else {
752 		sgmii = true; /* Phy is connected to the MAC */
753 	}
754 
755 	/* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
756 	spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
757 		 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
758 		 sparx5,
759 		 DEV2G5_PCS1G_MODE_CFG(port->portno));
760 
761 	/* Enable PCS */
762 	spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
763 		sparx5,
764 		DEV2G5_PCS1G_CFG(port->portno));
765 
766 	if (inband_aneg) {
767 		u16 abil = sparx5_get_aneg_word(conf);
768 
769 		/* Enable in-band aneg */
770 		spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
771 			DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
772 			DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
773 			DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
774 			sparx5,
775 			DEV2G5_PCS1G_ANEG_CFG(port->portno));
776 	} else {
777 		spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
778 	}
779 
780 	/* Take PCS out of reset */
781 	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
782 		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
783 		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
784 		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
785 		 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
786 		 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
787 		 sparx5,
788 		 DEV2G5_DEV_RST_CTRL(port->portno));
789 
790 	return 0;
791 }
792 
sparx5_port_pcs_high_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)793 static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
794 				    struct sparx5_port *port,
795 				    struct sparx5_port_config *conf)
796 {
797 	u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
798 	u32 pix = sparx5_port_dev_index(sparx5, port->portno);
799 	u32 dev = sparx5_to_high_dev(sparx5, port->portno);
800 	u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
801 	void __iomem *devinst;
802 	void __iomem *pcsinst;
803 	int err;
804 
805 	devinst = spx5_inst_get(sparx5, dev, pix);
806 	pcsinst = spx5_inst_get(sparx5, pcs, pix);
807 
808 	/*  SFI : No in-band-aneg. Speeds 5G/10G/25G */
809 	err = sparx5_serdes_set(sparx5, port, conf);
810 	if (err)
811 		return -EINVAL;
812 	if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
813 		/* Enable PCS for 25G device, speed 25G */
814 		spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
815 			 DEV25G_PCS25G_CFG_PCS25G_ENA,
816 			 sparx5,
817 			 DEV25G_PCS25G_CFG(pix));
818 	} else {
819 		/* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
820 		spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
821 			      PCS10G_BR_PCS_CFG_PCS_ENA,
822 			      pcsinst,
823 			      PCS10G_BR_PCS_CFG(0));
824 	}
825 
826 	/* Enable 5G/10G/25G MAC module */
827 	spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
828 		     DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
829 		     devinst,
830 		     DEV10G_MAC_ENA_CFG(0));
831 
832 	/* Take the device out of reset */
833 	spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
834 		      DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
835 		      DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
836 		      DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
837 		      DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
838 		      DEV10G_DEV_RST_CTRL_PCS_RX_RST |
839 		      DEV10G_DEV_RST_CTRL_PCS_TX_RST |
840 		      DEV10G_DEV_RST_CTRL_MAC_RX_RST |
841 		      DEV10G_DEV_RST_CTRL_MAC_TX_RST |
842 		      DEV10G_DEV_RST_CTRL_SPEED_SEL,
843 		      devinst,
844 		      DEV10G_DEV_RST_CTRL(0));
845 
846 	return 0;
847 }
848 
849 /* Switch between 1G/2500 and 5G/10G/25G devices */
sparx5_dev_switch(struct sparx5 * sparx5,int port,bool hsd)850 static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
851 {
852 	const struct sparx5_ops *ops = sparx5->data->ops;
853 	int bt_indx;
854 
855 	bt_indx = BIT(ops->get_port_dev_bit(sparx5, port));
856 
857 	if (ops->is_port_5g(port)) {
858 		spx5_rmw(hsd ? 0 : bt_indx,
859 			 bt_indx,
860 			 sparx5,
861 			 PORT_CONF_DEV5G_MODES);
862 	} else if (ops->is_port_10g(port)) {
863 		spx5_rmw(hsd ? 0 : bt_indx,
864 			 bt_indx,
865 			 sparx5,
866 			 PORT_CONF_DEV10G_MODES);
867 	} else if (ops->is_port_25g(port)) {
868 		spx5_rmw(hsd ? 0 : bt_indx,
869 			 bt_indx,
870 			 sparx5,
871 			 PORT_CONF_DEV25G_MODES);
872 	}
873 }
874 
875 /* Configure speed/duplex dependent registers */
sparx5_port_config_low_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)876 static int sparx5_port_config_low_set(struct sparx5 *sparx5,
877 				      struct sparx5_port *port,
878 				      struct sparx5_port_config *conf)
879 {
880 	u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
881 	bool fdx = conf->duplex == DUPLEX_FULL;
882 	int spd = conf->speed;
883 
884 	clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
885 	gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
886 	tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
887 	hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
888 	hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
889 
890 	/* GIG/FDX mode */
891 	spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
892 		 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
893 		 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
894 		 DEV2G5_MAC_MODE_CFG_FDX_ENA,
895 		 sparx5,
896 		 DEV2G5_MAC_MODE_CFG(port->portno));
897 
898 	/* Set MAC IFG Gaps */
899 	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
900 		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
901 		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
902 		sparx5,
903 		DEV2G5_MAC_IFG_CFG(port->portno));
904 
905 	/* Disabling frame aging when in HDX (due to HDX issue) */
906 	spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
907 		 HSCH_PORT_MODE_AGE_DIS,
908 		 sparx5,
909 		 HSCH_PORT_MODE(port->portno));
910 
911 	/* Enable MAC module */
912 	spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
913 		DEV2G5_MAC_ENA_CFG_TX_ENA,
914 		sparx5,
915 		DEV2G5_MAC_ENA_CFG(port->portno));
916 
917 	/* Select speed and take MAC out of reset */
918 	spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
919 		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
920 		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
921 		 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
922 		 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
923 		 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
924 		 sparx5,
925 		 DEV2G5_DEV_RST_CTRL(port->portno));
926 
927 	/* Enable PHAD_CTRL for better timestamping */
928 	if (!is_sparx5(sparx5)) {
929 		for (int i = 0; i < 2; ++i) {
930 			/* Divide the port clock by three for the two
931 			 * phase detection registers.
932 			 */
933 			spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
934 				 DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
935 				 DEV2G5_PHAD_CTRL_DIV_CFG |
936 				 DEV2G5_PHAD_CTRL_PHAD_ENA,
937 				 sparx5, DEV2G5_PHAD_CTRL(port->portno, i));
938 		}
939 	}
940 
941 	return 0;
942 }
943 
sparx5_port_pcs_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)944 int sparx5_port_pcs_set(struct sparx5 *sparx5,
945 			struct sparx5_port *port,
946 			struct sparx5_port_config *conf)
947 
948 {
949 	bool high_speed_dev = sparx5_is_baser(conf->portmode);
950 	int err;
951 
952 	if (sparx5_dev_change(sparx5, port, conf)) {
953 		/* switch device */
954 		sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
955 
956 		/* Disable the not-in-use device */
957 		err = sparx5_port_disable(sparx5, port, !high_speed_dev);
958 		if (err)
959 			return err;
960 	}
961 	/* Disable the port before re-configuring */
962 	err = sparx5_port_disable(sparx5, port, high_speed_dev);
963 	if (err)
964 		return -EINVAL;
965 
966 	if (high_speed_dev)
967 		err = sparx5_port_pcs_high_set(sparx5, port, conf);
968 	else
969 		err = sparx5_port_pcs_low_set(sparx5, port, conf);
970 
971 	if (err)
972 		return -EINVAL;
973 
974 	if (conf->inband) {
975 		/* Enable/disable 1G counters in ASM */
976 		spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
977 			 ASM_PORT_CFG_CSC_STAT_DIS,
978 			 sparx5,
979 			 ASM_PORT_CFG(port->portno));
980 
981 		/* Enable/disable 1G counters in DSM */
982 		spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
983 			 DSM_BUF_CFG_CSC_STAT_DIS,
984 			 sparx5,
985 			 DSM_BUF_CFG(port->portno));
986 	}
987 
988 	port->conf = *conf;
989 
990 	return 0;
991 }
992 
sparx5_port_config(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)993 int sparx5_port_config(struct sparx5 *sparx5,
994 		       struct sparx5_port *port,
995 		       struct sparx5_port_config *conf)
996 {
997 	bool high_speed_dev = sparx5_is_baser(conf->portmode);
998 	const struct sparx5_ops *ops = sparx5->data->ops;
999 	int err, urgency, stop_wm;
1000 
1001 	err = sparx5_port_verify_speed(sparx5, port, conf);
1002 	if (err)
1003 		return err;
1004 
1005 	/* high speed device is already configured */
1006 	if (!high_speed_dev)
1007 		sparx5_port_config_low_set(sparx5, port, conf);
1008 
1009 	/* Configure flow control */
1010 	err = sparx5_port_fc_setup(sparx5, port, conf);
1011 	if (err)
1012 		return err;
1013 
1014 	if (!is_sparx5(sparx5) && ops->is_port_10g(port->portno) &&
1015 	    conf->speed < SPEED_10000)
1016 		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1017 			 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1018 			 sparx5,
1019 			 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1020 
1021 	/* Set the DSM stop watermark */
1022 	stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
1023 	spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
1024 		 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
1025 		 sparx5,
1026 		 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1027 
1028 	/* Enable port in queue system */
1029 	urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
1030 	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1031 		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1032 		 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1033 		 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1034 		 sparx5,
1035 		 QFWD_SWITCH_PORT_MODE(port->portno));
1036 
1037 	/* Save the new values */
1038 	port->conf = *conf;
1039 
1040 	return 0;
1041 }
1042 
1043 /* Initialize port config to default */
sparx5_port_init(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)1044 int sparx5_port_init(struct sparx5 *sparx5,
1045 		     struct sparx5_port *port,
1046 		     struct sparx5_port_config *conf)
1047 {
1048 	u32 pause_start = sparx5_wm_enc(6  * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1049 	u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1050 	const struct sparx5_ops *ops = sparx5->data->ops;
1051 	u32 devhigh = sparx5_to_high_dev(sparx5, port->portno);
1052 	u32 pix = sparx5_port_dev_index(sparx5, port->portno);
1053 	u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
1054 	bool sd_pol = port->signd_active_high;
1055 	bool sd_sel = !port->signd_internal;
1056 	bool sd_ena = port->signd_enable;
1057 	u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1058 	void __iomem *devinst;
1059 	void __iomem *pcsinst;
1060 	int err;
1061 
1062 	devinst = spx5_inst_get(sparx5, devhigh, pix);
1063 	pcsinst = spx5_inst_get(sparx5, pcs, pix);
1064 
1065 	/* Set the mux port mode  */
1066 	err = ops->set_port_mux(sparx5, port, conf);
1067 	if (err)
1068 		return err;
1069 
1070 	/* Configure MAC vlan awareness */
1071 	err = sparx5_port_max_tags_set(sparx5, port);
1072 	if (err)
1073 		return err;
1074 
1075 	/* Set Max Length */
1076 	spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1077 		 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1078 		 sparx5,
1079 		 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1080 
1081 	/* 1G/2G5: Signal Detect configuration */
1082 	spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1083 		DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1084 		DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1085 		sparx5,
1086 		DEV2G5_PCS1G_SD_CFG(port->portno));
1087 
1088 	/* Set Pause WM hysteresis */
1089 	spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1090 		 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1091 		 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1092 		 QSYS_PAUSE_CFG_PAUSE_START |
1093 		 QSYS_PAUSE_CFG_PAUSE_STOP |
1094 		 QSYS_PAUSE_CFG_PAUSE_ENA,
1095 		 sparx5,
1096 		 QSYS_PAUSE_CFG(port->portno));
1097 
1098 	/* Port ATOP. Frames are tail dropped when this WM is hit */
1099 	spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1100 		sparx5,
1101 		QSYS_ATOP(port->portno));
1102 
1103 	/* Discard pause frame 01-80-C2-00-00-01 */
1104 	spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1105 
1106 	/* Discard SMAC multicast */
1107 	spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1108 		 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1109 		 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1110 
1111 	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1112 	    conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1113 		err = sparx5_serdes_set(sparx5, port, conf);
1114 		if (err)
1115 			return err;
1116 
1117 		if (!ops->is_port_2g5(port->portno))
1118 			/* Enable shadow device */
1119 			spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1120 				 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1121 				 sparx5,
1122 				 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1123 
1124 		sparx5_dev_switch(sparx5, port->portno, false);
1125 	}
1126 	if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1127 		// All ports must be PCS enabled in QSGMII mode
1128 		spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1129 			 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1130 			 sparx5,
1131 			 DEV2G5_DEV_RST_CTRL(port->portno));
1132 	}
1133 	/* Default IFGs for 1G */
1134 	spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1135 		DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1136 		DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1137 		sparx5,
1138 		DEV2G5_MAC_IFG_CFG(port->portno));
1139 
1140 	if (ops->is_port_2g5(port->portno))
1141 		return 0; /* Low speed device only - return */
1142 
1143 	/* Now setup the high speed device */
1144 	if (conf->portmode == PHY_INTERFACE_MODE_NA)
1145 		conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1146 
1147 	if (sparx5_is_baser(conf->portmode))
1148 		sparx5_dev_switch(sparx5, port->portno, true);
1149 
1150 	/* Set Max Length */
1151 	spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1152 		      DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1153 		      devinst,
1154 		      DEV10G_MAC_MAXLEN_CFG(0));
1155 
1156 	/* Handle Signal Detect in 10G PCS */
1157 	spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1158 		     PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1159 		     PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1160 		     pcsinst,
1161 		     PCS10G_BR_PCS_SD_CFG(0));
1162 
1163 	if (ops->is_port_25g(port->portno)) {
1164 		/* Handle Signal Detect in 25G PCS */
1165 		spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1166 			DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1167 			DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1168 			sparx5,
1169 			DEV25G_PCS25G_SD_CFG(pix));
1170 	}
1171 
1172 	if (!is_sparx5(sparx5)) {
1173 		void __iomem *inst;
1174 		u32 dev, tinst;
1175 
1176 		if (ops->is_port_10g(port->portno)) {
1177 			dev = sparx5_to_high_dev(sparx5, port->portno);
1178 			tinst = sparx5_port_dev_index(sparx5, port->portno);
1179 			inst = spx5_inst_get(sparx5, dev, tinst);
1180 
1181 			spx5_inst_wr(5, inst,
1182 				     DEV10G_PTP_STAMPER_CFG(port->portno));
1183 		} else if (ops->is_port_5g(port->portno)) {
1184 			dev = sparx5_to_high_dev(sparx5, port->portno);
1185 			tinst = sparx5_port_dev_index(sparx5, port->portno);
1186 			inst = spx5_inst_get(sparx5, dev, tinst);
1187 
1188 			spx5_inst_wr(5, inst,
1189 				     DEV5G_PTP_STAMPER_CFG(port->portno));
1190 		}
1191 	}
1192 
1193 	return 0;
1194 }
1195 
sparx5_port_enable(struct sparx5_port * port,bool enable)1196 void sparx5_port_enable(struct sparx5_port *port, bool enable)
1197 {
1198 	struct sparx5 *sparx5 = port->sparx5;
1199 
1200 	/* Enable port for frame transfer? */
1201 	spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1202 		 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1203 		 sparx5,
1204 		 QFWD_SWITCH_PORT_MODE(port->portno));
1205 }
1206 
sparx5_port_qos_set(struct sparx5_port * port,struct sparx5_port_qos * qos)1207 int sparx5_port_qos_set(struct sparx5_port *port,
1208 			struct sparx5_port_qos *qos)
1209 {
1210 	sparx5_port_qos_dscp_set(port, &qos->dscp);
1211 	sparx5_port_qos_pcp_set(port, &qos->pcp);
1212 	sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1213 	sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1214 	sparx5_port_qos_default_set(port, qos);
1215 
1216 	return 0;
1217 }
1218 
sparx5_port_qos_pcp_rewr_set(const struct sparx5_port * port,struct sparx5_port_qos_pcp_rewr * qos)1219 int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1220 				 struct sparx5_port_qos_pcp_rewr *qos)
1221 {
1222 	int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1223 	struct sparx5 *sparx5 = port->sparx5;
1224 	u8 pcp, dei;
1225 
1226 	/* Use mapping table, with classified QoS as index, to map QoS and DP
1227 	 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1228 	 * PCP. Classified PCP equals frame PCP.
1229 	 */
1230 	if (qos->enable)
1231 		mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1232 
1233 	spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1234 		 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1235 		 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1236 		 port->sparx5, REW_TAG_CTRL(port->portno));
1237 
1238 	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1239 		/* Extract PCP and DEI */
1240 		pcp = qos->map.map[i];
1241 		if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1242 			dei = 1;
1243 		else
1244 			dei = 0;
1245 
1246 		/* Rewrite PCP and DEI, for each classified QoS class and DP
1247 		 * level. This table is only used if tag ctrl mode is set to
1248 		 * 'mapped'.
1249 		 *
1250 		 * 0:0nd   - prio=0 and dp:0 => pcp=0 and dei=0
1251 		 * 0:0de   - prio=0 and dp:1 => pcp=0 and dei=1
1252 		 */
1253 		if (dei) {
1254 			spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1255 				 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1256 				 REW_PCP_MAP_DE1(port->portno, i));
1257 
1258 			spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1259 				 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1260 				 REW_DEI_MAP_DE1(port->portno, i));
1261 		} else {
1262 			spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1263 				 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1264 				 REW_PCP_MAP_DE0(port->portno, i));
1265 
1266 			spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1267 				 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1268 				 REW_DEI_MAP_DE0(port->portno, i));
1269 		}
1270 	}
1271 
1272 	return 0;
1273 }
1274 
sparx5_port_qos_pcp_set(const struct sparx5_port * port,struct sparx5_port_qos_pcp * qos)1275 int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1276 			    struct sparx5_port_qos_pcp *qos)
1277 {
1278 	struct sparx5 *sparx5 = port->sparx5;
1279 	u8 *pcp_itr = qos->map.map;
1280 	u8 pcp, dp;
1281 	int i;
1282 
1283 	/* Enable/disable pcp and dp for qos classification. */
1284 	spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1285 		 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1286 		 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1287 		 sparx5, ANA_CL_QOS_CFG(port->portno));
1288 
1289 	/* Map each pcp and dei value to priority and dp */
1290 	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1291 		pcp = *(pcp_itr + i);
1292 		dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1293 		spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1294 			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1295 			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1296 			 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1297 			 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1298 	}
1299 
1300 	return 0;
1301 }
1302 
sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port * port,int mode)1303 void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1304 					int mode)
1305 {
1306 	spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1307 		 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1308 		 ANA_CL_QOS_CFG(port->portno));
1309 }
1310 
sparx5_port_qos_dscp_rewr_set(const struct sparx5_port * port,struct sparx5_port_qos_dscp_rewr * qos)1311 int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1312 				  struct sparx5_port_qos_dscp_rewr *qos)
1313 {
1314 	struct sparx5 *sparx5 = port->sparx5;
1315 	bool rewr = false;
1316 	u16 dscp;
1317 	int i;
1318 
1319 	/* On egress, rewrite DSCP value to either classified DSCP or frame
1320 	 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1321 	 */
1322 	if (qos->enable)
1323 		rewr = true;
1324 
1325 	spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1326 		 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1327 		 REW_DSCP_MAP(port->portno));
1328 
1329 	/* On ingress, map each classified QoS class and DP to classified DSCP
1330 	 * value. This mapping table is global for all ports.
1331 	 */
1332 	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1333 		dscp = qos->map.map[i];
1334 		spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1335 			 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1336 			 ANA_CL_QOS_MAP_CFG(i));
1337 	}
1338 
1339 	return 0;
1340 }
1341 
sparx5_port_qos_dscp_set(const struct sparx5_port * port,struct sparx5_port_qos_dscp * qos)1342 int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1343 			     struct sparx5_port_qos_dscp *qos)
1344 {
1345 	struct sparx5 *sparx5 = port->sparx5;
1346 	u8 *dscp = qos->map.map;
1347 	int i;
1348 
1349 	/* Enable/disable dscp and dp for qos classification.
1350 	 * Disable rewrite of dscp values for now.
1351 	 */
1352 	spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1353 		 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1354 		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1355 		 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1356 		 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1357 		 ANA_CL_QOS_CFG(port->portno));
1358 
1359 	/* Map each dscp value to priority and dp */
1360 	for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1361 		spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1362 			 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1363 			 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1364 			 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1365 			 ANA_CL_DSCP_CFG(i));
1366 	}
1367 
1368 	/* Set per-dscp trust */
1369 	for (i = 0; i <  ARRAY_SIZE(qos->map.map); i++) {
1370 		if (qos->qos_enable) {
1371 			spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1372 				 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1373 				 ANA_CL_DSCP_CFG(i));
1374 		}
1375 	}
1376 
1377 	return 0;
1378 }
1379 
sparx5_port_qos_default_set(const struct sparx5_port * port,const struct sparx5_port_qos * qos)1380 int sparx5_port_qos_default_set(const struct sparx5_port *port,
1381 				const struct sparx5_port_qos *qos)
1382 {
1383 	struct sparx5 *sparx5 = port->sparx5;
1384 
1385 	/* Set default prio and dp level */
1386 	spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1387 		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1388 		 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1389 		 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1390 		 sparx5, ANA_CL_QOS_CFG(port->portno));
1391 
1392 	/* Set default pcp and dei for untagged frames */
1393 	spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1394 		 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1395 		 ANA_CL_VLAN_CTRL_PORT_PCP |
1396 		 ANA_CL_VLAN_CTRL_PORT_DEI,
1397 		 sparx5, ANA_CL_VLAN_CTRL(port->portno));
1398 
1399 	return 0;
1400 }
1401 
sparx5_get_internal_port(struct sparx5 * sparx5,int port)1402 int sparx5_get_internal_port(struct sparx5 *sparx5, int port)
1403 {
1404 	return sparx5->data->consts->n_ports + port;
1405 }
1406