1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7 #include <linux/module.h>
8 #include <linux/phy/phy.h>
9 #include <net/dcbnl.h>
10
11 #include "sparx5_main_regs.h"
12 #include "sparx5_main.h"
13 #include "sparx5_port.h"
14
15 #define SPX5_ETYPE_TAG_C 0x8100
16 #define SPX5_ETYPE_TAG_S 0x88a8
17
18 #define SPX5_WAIT_US 1000
19 #define SPX5_WAIT_MAX_US 2000
20
21 enum port_error {
22 SPX5_PERR_SPEED,
23 SPX5_PERR_IFTYPE,
24 };
25
26 #define PAUSE_DISCARD 0xC
27 #define ETH_MAXLEN (ETH_DATA_LEN + ETH_HLEN + ETH_FCS_LEN)
28
decode_sgmii_word(u16 lp_abil,struct sparx5_port_status * status)29 static void decode_sgmii_word(u16 lp_abil, struct sparx5_port_status *status)
30 {
31 status->an_complete = true;
32 if (!(lp_abil & LPA_SGMII_LINK)) {
33 status->link = false;
34 return;
35 }
36
37 switch (lp_abil & LPA_SGMII_SPD_MASK) {
38 case LPA_SGMII_10:
39 status->speed = SPEED_10;
40 break;
41 case LPA_SGMII_100:
42 status->speed = SPEED_100;
43 break;
44 case LPA_SGMII_1000:
45 status->speed = SPEED_1000;
46 break;
47 default:
48 status->link = false;
49 return;
50 }
51 if (lp_abil & LPA_SGMII_FULL_DUPLEX)
52 status->duplex = DUPLEX_FULL;
53 else
54 status->duplex = DUPLEX_HALF;
55 }
56
decode_cl37_word(u16 lp_abil,uint16_t ld_abil,struct sparx5_port_status * status)57 static void decode_cl37_word(u16 lp_abil, uint16_t ld_abil, struct sparx5_port_status *status)
58 {
59 status->link = !(lp_abil & ADVERTISE_RFAULT) && status->link;
60 status->an_complete = true;
61 status->duplex = (ADVERTISE_1000XFULL & lp_abil) ?
62 DUPLEX_FULL : DUPLEX_UNKNOWN; // 1G HDX not supported
63
64 if ((ld_abil & ADVERTISE_1000XPAUSE) &&
65 (lp_abil & ADVERTISE_1000XPAUSE)) {
66 status->pause = MLO_PAUSE_RX | MLO_PAUSE_TX;
67 } else if ((ld_abil & ADVERTISE_1000XPSE_ASYM) &&
68 (lp_abil & ADVERTISE_1000XPSE_ASYM)) {
69 status->pause |= (lp_abil & ADVERTISE_1000XPAUSE) ?
70 MLO_PAUSE_TX : 0;
71 status->pause |= (ld_abil & ADVERTISE_1000XPAUSE) ?
72 MLO_PAUSE_RX : 0;
73 } else {
74 status->pause = MLO_PAUSE_NONE;
75 }
76 }
77
sparx5_get_dev2g5_status(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_status * status)78 static int sparx5_get_dev2g5_status(struct sparx5 *sparx5,
79 struct sparx5_port *port,
80 struct sparx5_port_status *status)
81 {
82 u32 portno = port->portno;
83 u16 lp_adv, ld_adv;
84 u32 value;
85
86 /* Get PCS Link down sticky */
87 value = spx5_rd(sparx5, DEV2G5_PCS1G_STICKY(portno));
88 status->link_down = DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_GET(value);
89 if (status->link_down) /* Clear the sticky */
90 spx5_wr(value, sparx5, DEV2G5_PCS1G_STICKY(portno));
91
92 /* Get both current Link and Sync status */
93 value = spx5_rd(sparx5, DEV2G5_PCS1G_LINK_STATUS(portno));
94 status->link = DEV2G5_PCS1G_LINK_STATUS_LINK_STATUS_GET(value) &&
95 DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS_GET(value);
96
97 if (port->conf.portmode == PHY_INTERFACE_MODE_1000BASEX)
98 status->speed = SPEED_1000;
99 else if (port->conf.portmode == PHY_INTERFACE_MODE_2500BASEX)
100 status->speed = SPEED_2500;
101
102 status->duplex = DUPLEX_FULL;
103
104 /* Get PCS ANEG status register */
105 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_STATUS(portno));
106
107 /* Aneg complete provides more information */
108 if (DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(value)) {
109 lp_adv = DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_GET(value);
110 if (port->conf.portmode == PHY_INTERFACE_MODE_SGMII) {
111 decode_sgmii_word(lp_adv, status);
112 } else {
113 value = spx5_rd(sparx5, DEV2G5_PCS1G_ANEG_CFG(portno));
114 ld_adv = DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_GET(value);
115 decode_cl37_word(lp_adv, ld_adv, status);
116 }
117 }
118 return 0;
119 }
120
sparx5_get_sfi_status(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_status * status)121 static int sparx5_get_sfi_status(struct sparx5 *sparx5,
122 struct sparx5_port *port,
123 struct sparx5_port_status *status)
124 {
125 bool high_speed_dev = sparx5_is_baser(port->conf.portmode);
126 u32 portno = port->portno;
127 u32 value, dev, tinst;
128 void __iomem *inst;
129
130 if (!high_speed_dev) {
131 netdev_err(port->ndev, "error: low speed and SFI mode\n");
132 return -EINVAL;
133 }
134
135 dev = sparx5_to_high_dev(sparx5, portno);
136 tinst = sparx5_port_dev_index(sparx5, portno);
137 inst = spx5_inst_get(sparx5, dev, tinst);
138
139 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
140 if (value != DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY) {
141 /* The link is or has been down. Clear the sticky bit */
142 status->link_down = 1;
143 spx5_inst_wr(0xffffffff, inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
144 value = spx5_inst_rd(inst, DEV10G_MAC_TX_MONITOR_STICKY(0));
145 }
146 status->link = (value == DEV10G_MAC_TX_MONITOR_STICKY_IDLE_STATE_STICKY);
147 status->duplex = DUPLEX_FULL;
148 if (port->conf.portmode == PHY_INTERFACE_MODE_5GBASER)
149 status->speed = SPEED_5000;
150 else if (port->conf.portmode == PHY_INTERFACE_MODE_10GBASER)
151 status->speed = SPEED_10000;
152 else
153 status->speed = SPEED_25000;
154
155 return 0;
156 }
157
158 /* Get link status of 1000Base-X/in-band and SFI ports.
159 */
sparx5_get_port_status(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_status * status)160 int sparx5_get_port_status(struct sparx5 *sparx5,
161 struct sparx5_port *port,
162 struct sparx5_port_status *status)
163 {
164 memset(status, 0, sizeof(*status));
165 status->speed = port->conf.speed;
166 if (port->conf.power_down) {
167 status->link = false;
168 return 0;
169 }
170 switch (port->conf.portmode) {
171 case PHY_INTERFACE_MODE_SGMII:
172 case PHY_INTERFACE_MODE_QSGMII:
173 case PHY_INTERFACE_MODE_1000BASEX:
174 case PHY_INTERFACE_MODE_2500BASEX:
175 return sparx5_get_dev2g5_status(sparx5, port, status);
176 case PHY_INTERFACE_MODE_5GBASER:
177 case PHY_INTERFACE_MODE_10GBASER:
178 case PHY_INTERFACE_MODE_25GBASER:
179 return sparx5_get_sfi_status(sparx5, port, status);
180 case PHY_INTERFACE_MODE_NA:
181 return 0;
182 default:
183 netdev_err(port->ndev, "Status not supported");
184 return -ENODEV;
185 }
186 return 0;
187 }
188
sparx5_port_error(struct sparx5_port * port,struct sparx5_port_config * conf,enum port_error errtype)189 static int sparx5_port_error(struct sparx5_port *port,
190 struct sparx5_port_config *conf,
191 enum port_error errtype)
192 {
193 switch (errtype) {
194 case SPX5_PERR_SPEED:
195 netdev_err(port->ndev,
196 "Interface does not support speed: %u: for %s\n",
197 conf->speed, phy_modes(conf->portmode));
198 break;
199 case SPX5_PERR_IFTYPE:
200 netdev_err(port->ndev,
201 "Switch port does not support interface type: %s\n",
202 phy_modes(conf->portmode));
203 break;
204 default:
205 netdev_err(port->ndev,
206 "Interface configuration error\n");
207 }
208
209 return -EINVAL;
210 }
211
sparx5_port_verify_speed(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)212 static int sparx5_port_verify_speed(struct sparx5 *sparx5,
213 struct sparx5_port *port,
214 struct sparx5_port_config *conf)
215 {
216 const struct sparx5_ops *ops = sparx5->data->ops;
217
218 if ((ops->is_port_2g5(port->portno) &&
219 conf->speed > SPEED_2500) ||
220 (ops->is_port_5g(port->portno) &&
221 conf->speed > SPEED_5000) ||
222 (ops->is_port_10g(port->portno) &&
223 conf->speed > SPEED_10000))
224 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
225
226 switch (conf->portmode) {
227 case PHY_INTERFACE_MODE_NA:
228 return -EINVAL;
229 case PHY_INTERFACE_MODE_1000BASEX:
230 if (conf->speed != SPEED_1000 ||
231 ops->is_port_2g5(port->portno))
232 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
233 if (ops->is_port_2g5(port->portno))
234 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
235 break;
236 case PHY_INTERFACE_MODE_2500BASEX:
237 if (conf->speed != SPEED_2500 ||
238 ops->is_port_2g5(port->portno))
239 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
240 break;
241 case PHY_INTERFACE_MODE_QSGMII:
242 if (port->portno > 47)
243 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
244 fallthrough;
245 case PHY_INTERFACE_MODE_SGMII:
246 if (conf->speed != SPEED_1000 &&
247 conf->speed != SPEED_100 &&
248 conf->speed != SPEED_10 &&
249 conf->speed != SPEED_2500)
250 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
251 break;
252 case PHY_INTERFACE_MODE_5GBASER:
253 case PHY_INTERFACE_MODE_10GBASER:
254 case PHY_INTERFACE_MODE_25GBASER:
255 if ((conf->speed != SPEED_5000 &&
256 conf->speed != SPEED_10000 &&
257 conf->speed != SPEED_25000))
258 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
259 break;
260 case PHY_INTERFACE_MODE_RGMII:
261 case PHY_INTERFACE_MODE_RGMII_ID:
262 case PHY_INTERFACE_MODE_RGMII_TXID:
263 case PHY_INTERFACE_MODE_RGMII_RXID:
264 if (conf->speed != SPEED_1000 &&
265 conf->speed != SPEED_100 &&
266 conf->speed != SPEED_10)
267 return sparx5_port_error(port, conf, SPX5_PERR_SPEED);
268 break;
269 default:
270 return sparx5_port_error(port, conf, SPX5_PERR_IFTYPE);
271 }
272 return 0;
273 }
274
sparx5_dev_change(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)275 static bool sparx5_dev_change(struct sparx5 *sparx5,
276 struct sparx5_port *port,
277 struct sparx5_port_config *conf)
278 {
279 return sparx5_is_baser(port->conf.portmode) ^
280 sparx5_is_baser(conf->portmode);
281 }
282
sparx5_port_flush_poll(struct sparx5 * sparx5,u32 portno)283 static int sparx5_port_flush_poll(struct sparx5 *sparx5, u32 portno)
284 {
285 u32 value, resource, prio, delay_cnt = 0;
286 bool poll_src = true;
287 char *mem = "";
288
289 /* Resource == 0: Memory tracked per source (SRC-MEM)
290 * Resource == 1: Frame references tracked per source (SRC-REF)
291 * Resource == 2: Memory tracked per destination (DST-MEM)
292 * Resource == 3: Frame references tracked per destination. (DST-REF)
293 */
294 while (1) {
295 bool empty = true;
296
297 for (resource = 0; resource < (poll_src ? 2 : 1); resource++) {
298 u32 base;
299
300 base = (resource == 0 ? 2048 : 0) + SPX5_PRIOS * portno;
301 for (prio = 0; prio < SPX5_PRIOS; prio++) {
302 value = spx5_rd(sparx5,
303 QRES_RES_STAT(base + prio));
304 if (value) {
305 mem = resource == 0 ?
306 "DST-MEM" : "SRC-MEM";
307 empty = false;
308 }
309 }
310 }
311
312 if (empty)
313 break;
314
315 if (delay_cnt++ == 2000) {
316 dev_err(sparx5->dev,
317 "Flush timeout port %u. %s queue not empty\n",
318 portno, mem);
319 return -EINVAL;
320 }
321
322 usleep_range(SPX5_WAIT_US, SPX5_WAIT_MAX_US);
323 }
324 return 0;
325 }
326
sparx5_port_disable(struct sparx5 * sparx5,struct sparx5_port * port,bool high_spd_dev)327 static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, bool high_spd_dev)
328 {
329 u32 tinst = high_spd_dev ?
330 sparx5_port_dev_index(sparx5, port->portno) : port->portno;
331 u32 dev = high_spd_dev ?
332 sparx5_to_high_dev(sparx5, port->portno) : TARGET_DEV2G5;
333 void __iomem *devinst = spx5_inst_get(sparx5, dev, tinst);
334 const struct sparx5_ops *ops = sparx5->data->ops;
335 u32 spd = port->conf.speed;
336 u32 spd_prm;
337 int err;
338
339 if (high_spd_dev) {
340 /* 1: Reset the PCS Rx clock domain */
341 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST,
342 DEV10G_DEV_RST_CTRL_PCS_RX_RST,
343 devinst,
344 DEV10G_DEV_RST_CTRL(0));
345
346 /* 2: Disable MAC frame reception */
347 spx5_inst_rmw(0,
348 DEV10G_MAC_ENA_CFG_RX_ENA,
349 devinst,
350 DEV10G_MAC_ENA_CFG(0));
351 } else {
352 /* 1: Reset the PCS Rx clock domain */
353 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
354 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
355 devinst,
356 DEV2G5_DEV_RST_CTRL(0));
357 /* 2: Disable MAC frame reception */
358 spx5_inst_rmw(0,
359 DEV2G5_MAC_ENA_CFG_RX_ENA,
360 devinst,
361 DEV2G5_MAC_ENA_CFG(0));
362 }
363 /* 3: Disable traffic being sent to or from switch port->portno */
364 spx5_rmw(0,
365 QFWD_SWITCH_PORT_MODE_PORT_ENA,
366 sparx5,
367 QFWD_SWITCH_PORT_MODE(port->portno));
368
369 /* 4: Disable dequeuing from the egress queues */
370 spx5_rmw(HSCH_PORT_MODE_DEQUEUE_DIS,
371 HSCH_PORT_MODE_DEQUEUE_DIS,
372 sparx5,
373 HSCH_PORT_MODE(port->portno));
374
375 /* 5: Disable Flowcontrol */
376 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(0xFFF - 1),
377 QSYS_PAUSE_CFG_PAUSE_STOP,
378 sparx5,
379 QSYS_PAUSE_CFG(port->portno));
380
381 spd_prm = spd == SPEED_10 ? 1000 : spd == SPEED_100 ? 100 : 10;
382 /* 6: Wait while the last frame is exiting the queues */
383 usleep_range(8 * spd_prm, 10 * spd_prm);
384
385 /* 7: Flush the queues associated with the port->portno */
386 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
387 HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
388 HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
389 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(1),
390 HSCH_FLUSH_CTRL_FLUSH_PORT |
391 HSCH_FLUSH_CTRL_FLUSH_DST |
392 HSCH_FLUSH_CTRL_FLUSH_SRC |
393 HSCH_FLUSH_CTRL_FLUSH_ENA,
394 sparx5,
395 HSCH_FLUSH_CTRL);
396
397 /* 8: Enable dequeuing from the egress queues */
398 spx5_rmw(0,
399 HSCH_PORT_MODE_DEQUEUE_DIS,
400 sparx5,
401 HSCH_PORT_MODE(port->portno));
402
403 /* 9: Wait until flushing is complete */
404 err = sparx5_port_flush_poll(sparx5, port->portno);
405 if (err)
406 return err;
407
408 /* 10: Reset the MAC clock domain */
409 if (high_spd_dev) {
410 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
411 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(1) |
412 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(1),
413 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
414 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
415 DEV10G_DEV_RST_CTRL_MAC_TX_RST,
416 devinst,
417 DEV10G_DEV_RST_CTRL(0));
418
419 } else {
420 spx5_inst_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(3) |
421 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(1) |
422 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(1) |
423 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(1) |
424 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(1),
425 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
426 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
427 DEV2G5_DEV_RST_CTRL_PCS_RX_RST |
428 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
429 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
430 devinst,
431 DEV2G5_DEV_RST_CTRL(0));
432 }
433 /* 11: Clear flushing */
434 spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
435 HSCH_FLUSH_CTRL_FLUSH_ENA_SET(0),
436 HSCH_FLUSH_CTRL_FLUSH_PORT |
437 HSCH_FLUSH_CTRL_FLUSH_ENA,
438 sparx5,
439 HSCH_FLUSH_CTRL);
440
441 if (high_spd_dev) {
442 u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
443 void __iomem *pcsinst = spx5_inst_get(sparx5, pcs, tinst);
444
445 /* 12: Disable 5G/10G/25 BaseR PCS */
446 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(0),
447 PCS10G_BR_PCS_CFG_PCS_ENA,
448 pcsinst,
449 PCS10G_BR_PCS_CFG(0));
450
451 if (ops->is_port_25g(port->portno))
452 /* Disable 25G PCS */
453 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(0),
454 DEV25G_PCS25G_CFG_PCS25G_ENA,
455 sparx5,
456 DEV25G_PCS25G_CFG(tinst));
457 } else {
458 /* 12: Disable 1G PCS */
459 spx5_rmw(DEV2G5_PCS1G_CFG_PCS_ENA_SET(0),
460 DEV2G5_PCS1G_CFG_PCS_ENA,
461 sparx5,
462 DEV2G5_PCS1G_CFG(port->portno));
463 }
464
465 /* The port is now flushed and disabled */
466 return 0;
467 }
468
sparx5_port_fifo_sz(struct sparx5 * sparx5,u32 portno,u32 speed)469 static int sparx5_port_fifo_sz(struct sparx5 *sparx5,
470 u32 portno, u32 speed)
471 {
472 u32 sys_clk = sparx5_clk_period(sparx5->coreclock);
473 const u32 taxi_dist[SPX5_PORTS_ALL] = {
474 6, 8, 10, 6, 8, 10, 6, 8, 10, 6, 8, 10,
475 4, 4, 4, 4,
476 11, 12, 13, 14, 15, 16, 17, 18,
477 11, 12, 13, 14, 15, 16, 17, 18,
478 11, 12, 13, 14, 15, 16, 17, 18,
479 11, 12, 13, 14, 15, 16, 17, 18,
480 4, 6, 8, 4, 6, 8, 6, 8,
481 2, 2, 2, 2, 2, 2, 2, 4, 2
482 };
483 u32 mac_per = 6400, tmp1, tmp2, tmp3;
484 u32 fifo_width = 16;
485 u32 mac_width = 8;
486 u32 addition = 0;
487
488 if (!is_sparx5(sparx5))
489 return 0;
490
491 switch (speed) {
492 case SPEED_25000:
493 return 0;
494 case SPEED_10000:
495 mac_per = 6400;
496 mac_width = 8;
497 addition = 1;
498 break;
499 case SPEED_5000:
500 mac_per = 12800;
501 mac_width = 8;
502 addition = 0;
503 break;
504 case SPEED_2500:
505 mac_per = 3200;
506 mac_width = 1;
507 addition = 0;
508 break;
509 case SPEED_1000:
510 mac_per = 8000;
511 mac_width = 1;
512 addition = 0;
513 break;
514 case SPEED_100:
515 case SPEED_10:
516 return 1;
517 default:
518 break;
519 }
520
521 tmp1 = 1000 * mac_width / fifo_width;
522 tmp2 = 3000 + ((12000 + 2 * taxi_dist[portno] * 1000)
523 * sys_clk / mac_per);
524 tmp3 = tmp1 * tmp2 / 1000;
525 return (tmp3 + 2000 + 999) / 1000 + addition;
526 }
527
528 /* Configure port muxing:
529 * QSGMII: 4x2G5 devices
530 */
sparx5_port_mux_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)531 int sparx5_port_mux_set(struct sparx5 *sparx5, struct sparx5_port *port,
532 struct sparx5_port_config *conf)
533 {
534 u32 portno = port->portno;
535 u32 inst;
536
537 if (port->conf.portmode == conf->portmode)
538 return 0; /* Nothing to do */
539
540 switch (conf->portmode) {
541 case PHY_INTERFACE_MODE_QSGMII: /* QSGMII: 4x2G5 devices. Mode Q' */
542 inst = (portno - portno % 4) / 4;
543 spx5_rmw(BIT(inst),
544 BIT(inst),
545 sparx5,
546 PORT_CONF_QSGMII_ENA);
547
548 if ((portno / 4 % 2) == 0) {
549 /* Affects d0-d3,d8-d11..d40-d43 */
550 spx5_rmw(PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(1) |
551 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM_SET(1) |
552 PORT_CONF_USGMII_CFG_QUAD_MODE_SET(1),
553 PORT_CONF_USGMII_CFG_BYPASS_SCRAM |
554 PORT_CONF_USGMII_CFG_BYPASS_DESCRAM |
555 PORT_CONF_USGMII_CFG_QUAD_MODE,
556 sparx5,
557 PORT_CONF_USGMII_CFG((portno / 8)));
558 }
559 break;
560 default:
561 break;
562 }
563 return 0;
564 }
565
sparx5_port_max_tags_set(struct sparx5 * sparx5,struct sparx5_port * port)566 static int sparx5_port_max_tags_set(struct sparx5 *sparx5,
567 struct sparx5_port *port)
568 {
569 enum sparx5_port_max_tags max_tags = port->max_vlan_tags;
570 int tag_ct = max_tags == SPX5_PORT_MAX_TAGS_ONE ? 1 :
571 max_tags == SPX5_PORT_MAX_TAGS_TWO ? 2 : 0;
572 bool dtag = max_tags == SPX5_PORT_MAX_TAGS_TWO;
573 enum sparx5_vlan_port_type vlan_type = port->vlan_type;
574 bool dotag = max_tags != SPX5_PORT_MAX_TAGS_NONE;
575 u32 dev = sparx5_to_high_dev(sparx5, port->portno);
576 u32 tinst = sparx5_port_dev_index(sparx5, port->portno);
577 void __iomem *inst = spx5_inst_get(sparx5, dev, tinst);
578 const struct sparx5_ops *ops = sparx5->data->ops;
579 u32 etype;
580
581 etype = (vlan_type == SPX5_VLAN_PORT_TYPE_S_CUSTOM ?
582 port->custom_etype :
583 vlan_type == SPX5_VLAN_PORT_TYPE_C ?
584 SPX5_ETYPE_TAG_C : SPX5_ETYPE_TAG_S);
585
586 spx5_wr(DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(etype) |
587 DEV2G5_MAC_TAGS_CFG_PB_ENA_SET(dtag) |
588 DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(dotag) |
589 DEV2G5_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(dotag),
590 sparx5,
591 DEV2G5_MAC_TAGS_CFG(port->portno));
592
593 if (ops->is_port_2g5(port->portno))
594 return 0;
595
596 spx5_inst_rmw(DEV10G_MAC_TAGS_CFG_TAG_ID_SET(etype) |
597 DEV10G_MAC_TAGS_CFG_TAG_ENA_SET(dotag),
598 DEV10G_MAC_TAGS_CFG_TAG_ID |
599 DEV10G_MAC_TAGS_CFG_TAG_ENA,
600 inst,
601 DEV10G_MAC_TAGS_CFG(0, 0));
602
603 spx5_inst_rmw(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(tag_ct),
604 DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS,
605 inst,
606 DEV10G_MAC_NUM_TAGS_CFG(0));
607
608 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(dotag),
609 DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK,
610 inst,
611 DEV10G_MAC_MAXLEN_CFG(0));
612 return 0;
613 }
614
sparx5_port_fwd_urg(struct sparx5 * sparx5,u32 speed)615 int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed)
616 {
617 u32 clk_period_ps = 1600; /* 625Mhz for now */
618 u32 urg = 672000;
619
620 switch (speed) {
621 case SPEED_10:
622 case SPEED_100:
623 case SPEED_1000:
624 urg = 672000;
625 break;
626 case SPEED_2500:
627 urg = 270000;
628 break;
629 case SPEED_5000:
630 urg = 135000;
631 break;
632 case SPEED_10000:
633 urg = 67200;
634 break;
635 case SPEED_25000:
636 urg = 27000;
637 break;
638 }
639 return urg / clk_period_ps - 1;
640 }
641
sparx5_wm_enc(u16 value)642 static u16 sparx5_wm_enc(u16 value)
643 {
644 if (value >= 2048)
645 return 2048 + value / 16;
646
647 return value;
648 }
649
sparx5_port_fc_setup(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)650 static int sparx5_port_fc_setup(struct sparx5 *sparx5,
651 struct sparx5_port *port,
652 struct sparx5_port_config *conf)
653 {
654 bool fc_obey = conf->pause & MLO_PAUSE_RX ? 1 : 0;
655 u32 pause_stop = 0xFFF - 1; /* FC gen disabled */
656
657 if (conf->pause & MLO_PAUSE_TX)
658 pause_stop = sparx5_wm_enc(4 * (ETH_MAXLEN /
659 SPX5_BUFFER_CELL_SZ));
660
661 /* Set HDX flowcontrol */
662 spx5_rmw(DSM_MAC_CFG_HDX_BACKPREASSURE_SET(conf->duplex == DUPLEX_HALF),
663 DSM_MAC_CFG_HDX_BACKPREASSURE,
664 sparx5,
665 DSM_MAC_CFG(port->portno));
666
667 /* Obey flowcontrol */
668 spx5_rmw(DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(fc_obey),
669 DSM_RX_PAUSE_CFG_RX_PAUSE_EN,
670 sparx5,
671 DSM_RX_PAUSE_CFG(port->portno));
672
673 /* Disable forward pressure */
674 spx5_rmw(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS_SET(fc_obey),
675 QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS,
676 sparx5,
677 QSYS_FWD_PRESSURE(port->portno));
678
679 /* Generate pause frames */
680 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop),
681 QSYS_PAUSE_CFG_PAUSE_STOP,
682 sparx5,
683 QSYS_PAUSE_CFG(port->portno));
684
685 return 0;
686 }
687
sparx5_get_aneg_word(struct sparx5_port_config * conf)688 static u16 sparx5_get_aneg_word(struct sparx5_port_config *conf)
689 {
690 if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX) /* cl-37 aneg */
691 return (conf->pause_adv | ADVERTISE_LPACK | ADVERTISE_1000XFULL);
692 else
693 return 1; /* Enable SGMII Aneg */
694 }
695
sparx5_serdes_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)696 int sparx5_serdes_set(struct sparx5 *sparx5,
697 struct sparx5_port *port,
698 struct sparx5_port_config *conf)
699 {
700 int portmode, err, speed = conf->speed;
701
702 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII &&
703 ((port->portno % 4) != 0)) {
704 return 0;
705 }
706 if (sparx5_is_baser(conf->portmode)) {
707 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER)
708 speed = SPEED_25000;
709 else if (conf->portmode == PHY_INTERFACE_MODE_10GBASER)
710 speed = SPEED_10000;
711 else
712 speed = SPEED_5000;
713 }
714
715 err = phy_set_media(port->serdes, conf->media);
716 if (err)
717 return err;
718 if (speed > 0) {
719 err = phy_set_speed(port->serdes, speed);
720 if (err)
721 return err;
722 }
723 if (conf->serdes_reset) {
724 err = phy_reset(port->serdes);
725 if (err)
726 return err;
727 }
728
729 /* Configure SerDes with port parameters
730 * For BaseR, the serdes driver supports 10GGBASE-R and speed 5G/10G/25G
731 */
732 portmode = conf->portmode;
733 if (sparx5_is_baser(conf->portmode))
734 portmode = PHY_INTERFACE_MODE_10GBASER;
735 err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET, portmode);
736 if (err)
737 return err;
738 conf->serdes_reset = false;
739 return err;
740 }
741
sparx5_port_pcs_low_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)742 static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
743 struct sparx5_port *port,
744 struct sparx5_port_config *conf)
745 {
746 bool sgmii = false, inband_aneg = false;
747 int err;
748
749 if (conf->inband) {
750 if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
751 conf->portmode == PHY_INTERFACE_MODE_QSGMII)
752 inband_aneg = true; /* Cisco-SGMII in-band-aneg */
753 else if (conf->portmode == PHY_INTERFACE_MODE_1000BASEX &&
754 conf->autoneg)
755 inband_aneg = true; /* Clause-37 in-band-aneg */
756
757 err = sparx5_serdes_set(sparx5, port, conf);
758 if (err)
759 return -EINVAL;
760 } else {
761 sgmii = true; /* Phy is connected to the MAC */
762 }
763
764 /* Choose SGMII or 1000BaseX/2500BaseX PCS mode */
765 spx5_rmw(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(sgmii),
766 DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA,
767 sparx5,
768 DEV2G5_PCS1G_MODE_CFG(port->portno));
769
770 /* Enable PCS */
771 spx5_wr(DEV2G5_PCS1G_CFG_PCS_ENA_SET(1),
772 sparx5,
773 DEV2G5_PCS1G_CFG(port->portno));
774
775 if (inband_aneg) {
776 u16 abil = sparx5_get_aneg_word(conf);
777
778 /* Enable in-band aneg */
779 spx5_wr(DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(abil) |
780 DEV2G5_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
781 DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA_SET(1) |
782 DEV2G5_PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT_SET(1),
783 sparx5,
784 DEV2G5_PCS1G_ANEG_CFG(port->portno));
785 } else {
786 spx5_wr(0, sparx5, DEV2G5_PCS1G_ANEG_CFG(port->portno));
787 }
788
789 /* Take PCS out of reset */
790 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(2) |
791 DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
792 DEV2G5_DEV_RST_CTRL_PCS_RX_RST_SET(0),
793 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
794 DEV2G5_DEV_RST_CTRL_PCS_TX_RST |
795 DEV2G5_DEV_RST_CTRL_PCS_RX_RST,
796 sparx5,
797 DEV2G5_DEV_RST_CTRL(port->portno));
798
799 return 0;
800 }
801
sparx5_port_pcs_high_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)802 static int sparx5_port_pcs_high_set(struct sparx5 *sparx5,
803 struct sparx5_port *port,
804 struct sparx5_port_config *conf)
805 {
806 u32 clk_spd = conf->portmode == PHY_INTERFACE_MODE_5GBASER ? 1 : 0;
807 u32 pix = sparx5_port_dev_index(sparx5, port->portno);
808 u32 dev = sparx5_to_high_dev(sparx5, port->portno);
809 u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
810 void __iomem *devinst;
811 void __iomem *pcsinst;
812 int err;
813
814 devinst = spx5_inst_get(sparx5, dev, pix);
815 pcsinst = spx5_inst_get(sparx5, pcs, pix);
816
817 /* SFI : No in-band-aneg. Speeds 5G/10G/25G */
818 err = sparx5_serdes_set(sparx5, port, conf);
819 if (err)
820 return -EINVAL;
821 if (conf->portmode == PHY_INTERFACE_MODE_25GBASER) {
822 /* Enable PCS for 25G device, speed 25G */
823 spx5_rmw(DEV25G_PCS25G_CFG_PCS25G_ENA_SET(1),
824 DEV25G_PCS25G_CFG_PCS25G_ENA,
825 sparx5,
826 DEV25G_PCS25G_CFG(pix));
827 } else {
828 /* Enable PCS for 5G/10G/25G devices, speed 5G/10G */
829 spx5_inst_rmw(PCS10G_BR_PCS_CFG_PCS_ENA_SET(1),
830 PCS10G_BR_PCS_CFG_PCS_ENA,
831 pcsinst,
832 PCS10G_BR_PCS_CFG(0));
833 }
834
835 /* Enable 5G/10G/25G MAC module */
836 spx5_inst_wr(DEV10G_MAC_ENA_CFG_RX_ENA_SET(1) |
837 DEV10G_MAC_ENA_CFG_TX_ENA_SET(1),
838 devinst,
839 DEV10G_MAC_ENA_CFG(0));
840
841 /* Take the device out of reset */
842 spx5_inst_rmw(DEV10G_DEV_RST_CTRL_PCS_RX_RST_SET(0) |
843 DEV10G_DEV_RST_CTRL_PCS_TX_RST_SET(0) |
844 DEV10G_DEV_RST_CTRL_MAC_RX_RST_SET(0) |
845 DEV10G_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
846 DEV10G_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd),
847 DEV10G_DEV_RST_CTRL_PCS_RX_RST |
848 DEV10G_DEV_RST_CTRL_PCS_TX_RST |
849 DEV10G_DEV_RST_CTRL_MAC_RX_RST |
850 DEV10G_DEV_RST_CTRL_MAC_TX_RST |
851 DEV10G_DEV_RST_CTRL_SPEED_SEL,
852 devinst,
853 DEV10G_DEV_RST_CTRL(0));
854
855 return 0;
856 }
857
858 /* Switch between 1G/2500 and 5G/10G/25G devices */
sparx5_dev_switch(struct sparx5 * sparx5,int port,bool hsd)859 static void sparx5_dev_switch(struct sparx5 *sparx5, int port, bool hsd)
860 {
861 const struct sparx5_ops *ops = sparx5->data->ops;
862 int bt_indx;
863
864 bt_indx = BIT(ops->get_port_dev_bit(sparx5, port));
865
866 if (ops->is_port_5g(port)) {
867 spx5_rmw(hsd ? 0 : bt_indx,
868 bt_indx,
869 sparx5,
870 PORT_CONF_DEV5G_MODES);
871 } else if (ops->is_port_10g(port)) {
872 spx5_rmw(hsd ? 0 : bt_indx,
873 bt_indx,
874 sparx5,
875 PORT_CONF_DEV10G_MODES);
876 } else if (ops->is_port_25g(port)) {
877 spx5_rmw(hsd ? 0 : bt_indx,
878 bt_indx,
879 sparx5,
880 PORT_CONF_DEV25G_MODES);
881 }
882 }
883
884 /* Configure speed/duplex dependent registers */
sparx5_port_config_low_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)885 static int sparx5_port_config_low_set(struct sparx5 *sparx5,
886 struct sparx5_port *port,
887 struct sparx5_port_config *conf)
888 {
889 u32 clk_spd, gig_mode, tx_gap, hdx_gap_1, hdx_gap_2;
890 bool fdx = conf->duplex == DUPLEX_FULL;
891 int spd = conf->speed;
892
893 clk_spd = spd == SPEED_10 ? 0 : spd == SPEED_100 ? 1 : 2;
894 gig_mode = spd == SPEED_1000 || spd == SPEED_2500;
895 tx_gap = spd == SPEED_1000 ? 4 : fdx ? 6 : 5;
896 hdx_gap_1 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 1 : 2;
897 hdx_gap_2 = spd == SPEED_1000 ? 0 : spd == SPEED_100 ? 4 : 1;
898
899 /* GIG/FDX mode */
900 spx5_rmw(DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA_SET(gig_mode) |
901 DEV2G5_MAC_MODE_CFG_FDX_ENA_SET(fdx),
902 DEV2G5_MAC_MODE_CFG_GIGA_MODE_ENA |
903 DEV2G5_MAC_MODE_CFG_FDX_ENA,
904 sparx5,
905 DEV2G5_MAC_MODE_CFG(port->portno));
906
907 /* Set MAC IFG Gaps */
908 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(tx_gap) |
909 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(hdx_gap_1) |
910 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(hdx_gap_2),
911 sparx5,
912 DEV2G5_MAC_IFG_CFG(port->portno));
913
914 /* Disabling frame aging when in HDX (due to HDX issue) */
915 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(fdx == 0),
916 HSCH_PORT_MODE_AGE_DIS,
917 sparx5,
918 HSCH_PORT_MODE(port->portno));
919
920 /* Enable MAC module */
921 spx5_wr(DEV2G5_MAC_ENA_CFG_RX_ENA |
922 DEV2G5_MAC_ENA_CFG_TX_ENA,
923 sparx5,
924 DEV2G5_MAC_ENA_CFG(port->portno));
925
926 /* Select speed and take MAC out of reset */
927 spx5_rmw(DEV2G5_DEV_RST_CTRL_SPEED_SEL_SET(clk_spd) |
928 DEV2G5_DEV_RST_CTRL_MAC_TX_RST_SET(0) |
929 DEV2G5_DEV_RST_CTRL_MAC_RX_RST_SET(0),
930 DEV2G5_DEV_RST_CTRL_SPEED_SEL |
931 DEV2G5_DEV_RST_CTRL_MAC_TX_RST |
932 DEV2G5_DEV_RST_CTRL_MAC_RX_RST,
933 sparx5,
934 DEV2G5_DEV_RST_CTRL(port->portno));
935
936 /* Enable PHAD_CTRL for better timestamping */
937 if (!is_sparx5(sparx5)) {
938 for (int i = 0; i < 2; ++i) {
939 /* Divide the port clock by three for the two
940 * phase detection registers.
941 */
942 spx5_rmw(DEV2G5_PHAD_CTRL_DIV_CFG_SET(3) |
943 DEV2G5_PHAD_CTRL_PHAD_ENA_SET(1),
944 DEV2G5_PHAD_CTRL_DIV_CFG |
945 DEV2G5_PHAD_CTRL_PHAD_ENA,
946 sparx5, DEV2G5_PHAD_CTRL(port->portno, i));
947 }
948 }
949
950 return 0;
951 }
952
sparx5_port_pcs_set(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)953 int sparx5_port_pcs_set(struct sparx5 *sparx5,
954 struct sparx5_port *port,
955 struct sparx5_port_config *conf)
956
957 {
958 bool high_speed_dev = sparx5_is_baser(conf->portmode);
959 int err;
960
961 if (sparx5_dev_change(sparx5, port, conf)) {
962 /* switch device */
963 sparx5_dev_switch(sparx5, port->portno, high_speed_dev);
964
965 /* Disable the not-in-use device */
966 err = sparx5_port_disable(sparx5, port, !high_speed_dev);
967 if (err)
968 return err;
969 }
970 /* Disable the port before re-configuring */
971 err = sparx5_port_disable(sparx5, port, high_speed_dev);
972 if (err)
973 return -EINVAL;
974
975 if (high_speed_dev)
976 err = sparx5_port_pcs_high_set(sparx5, port, conf);
977 else
978 err = sparx5_port_pcs_low_set(sparx5, port, conf);
979
980 if (err)
981 return -EINVAL;
982
983 if (conf->inband) {
984 /* Enable/disable 1G counters in ASM */
985 spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
986 ASM_PORT_CFG_CSC_STAT_DIS,
987 sparx5,
988 ASM_PORT_CFG(port->portno));
989
990 /* Enable/disable 1G counters in DSM */
991 spx5_rmw(DSM_BUF_CFG_CSC_STAT_DIS_SET(high_speed_dev),
992 DSM_BUF_CFG_CSC_STAT_DIS,
993 sparx5,
994 DSM_BUF_CFG(port->portno));
995 }
996
997 port->conf = *conf;
998
999 return 0;
1000 }
1001
sparx5_port_config(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)1002 int sparx5_port_config(struct sparx5 *sparx5,
1003 struct sparx5_port *port,
1004 struct sparx5_port_config *conf)
1005 {
1006 bool rgmii = phy_interface_mode_is_rgmii(conf->phy_mode);
1007 bool high_speed_dev = sparx5_is_baser(conf->portmode);
1008 const struct sparx5_ops *ops = sparx5->data->ops;
1009 int err, urgency, stop_wm;
1010
1011 err = sparx5_port_verify_speed(sparx5, port, conf);
1012 if (err)
1013 return err;
1014
1015 if (rgmii) {
1016 err = ops->port_config_rgmii(port, conf);
1017 if (err)
1018 return err;
1019 }
1020
1021 /* high speed device is already configured */
1022 if (!rgmii && !high_speed_dev)
1023 sparx5_port_config_low_set(sparx5, port, conf);
1024
1025 /* Configure flow control */
1026 err = sparx5_port_fc_setup(sparx5, port, conf);
1027 if (err)
1028 return err;
1029
1030 if (!is_sparx5(sparx5) && ops->is_port_10g(port->portno) &&
1031 conf->speed < SPEED_10000)
1032 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1033 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1034 sparx5,
1035 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1036
1037 /* Set the DSM stop watermark */
1038 stop_wm = sparx5_port_fifo_sz(sparx5, port->portno, conf->speed);
1039 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(stop_wm),
1040 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
1041 sparx5,
1042 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1043
1044 /* Enable port in queue system */
1045 urgency = sparx5_port_fwd_urg(sparx5, conf->speed);
1046 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
1047 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
1048 QFWD_SWITCH_PORT_MODE_PORT_ENA |
1049 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
1050 sparx5,
1051 QFWD_SWITCH_PORT_MODE(port->portno));
1052
1053 /* Save the new values */
1054 port->conf = *conf;
1055
1056 return 0;
1057 }
1058
1059 /* Initialize port config to default */
sparx5_port_init(struct sparx5 * sparx5,struct sparx5_port * port,struct sparx5_port_config * conf)1060 int sparx5_port_init(struct sparx5 *sparx5,
1061 struct sparx5_port *port,
1062 struct sparx5_port_config *conf)
1063 {
1064 u32 pause_start = sparx5_wm_enc(6 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1065 u32 atop = sparx5_wm_enc(20 * (ETH_MAXLEN / SPX5_BUFFER_CELL_SZ));
1066 const struct sparx5_ops *ops = sparx5->data->ops;
1067 u32 devhigh = sparx5_to_high_dev(sparx5, port->portno);
1068 u32 pix = sparx5_port_dev_index(sparx5, port->portno);
1069 u32 pcs = sparx5_to_pcs_dev(sparx5, port->portno);
1070 bool sd_pol = port->signd_active_high;
1071 bool sd_sel = !port->signd_internal;
1072 bool sd_ena = port->signd_enable;
1073 u32 pause_stop = 0xFFF - 1; /* FC generate disabled */
1074 void __iomem *devinst;
1075 void __iomem *pcsinst;
1076 int err;
1077
1078 devinst = spx5_inst_get(sparx5, devhigh, pix);
1079 pcsinst = spx5_inst_get(sparx5, pcs, pix);
1080
1081 /* Set the mux port mode */
1082 err = ops->set_port_mux(sparx5, port, conf);
1083 if (err)
1084 return err;
1085
1086 /* Set Pause WM hysteresis */
1087 spx5_rmw(QSYS_PAUSE_CFG_PAUSE_START_SET(pause_start) |
1088 QSYS_PAUSE_CFG_PAUSE_STOP_SET(pause_stop) |
1089 QSYS_PAUSE_CFG_PAUSE_ENA_SET(1),
1090 QSYS_PAUSE_CFG_PAUSE_START |
1091 QSYS_PAUSE_CFG_PAUSE_STOP |
1092 QSYS_PAUSE_CFG_PAUSE_ENA,
1093 sparx5,
1094 QSYS_PAUSE_CFG(port->portno));
1095
1096 /* Port ATOP. Frames are tail dropped when this WM is hit */
1097 spx5_wr(QSYS_ATOP_ATOP_SET(atop),
1098 sparx5,
1099 QSYS_ATOP(port->portno));
1100
1101 /* Discard pause frame 01-80-C2-00-00-01 */
1102 spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
1103
1104 /* Discard SMAC multicast */
1105 spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
1106 ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
1107 sparx5, ANA_CL_FILTER_CTRL(port->portno));
1108
1109 if (ops->is_port_rgmii(port->portno))
1110 return 0; /* RGMII device - nothing more to configure */
1111
1112 /* Configure MAC vlan awareness */
1113 err = sparx5_port_max_tags_set(sparx5, port);
1114 if (err)
1115 return err;
1116
1117 /* Set Max Length */
1118 spx5_rmw(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1119 DEV2G5_MAC_MAXLEN_CFG_MAX_LEN,
1120 sparx5,
1121 DEV2G5_MAC_MAXLEN_CFG(port->portno));
1122
1123 /* 1G/2G5: Signal Detect configuration */
1124 spx5_wr(DEV2G5_PCS1G_SD_CFG_SD_POL_SET(sd_pol) |
1125 DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(sd_sel) |
1126 DEV2G5_PCS1G_SD_CFG_SD_ENA_SET(sd_ena),
1127 sparx5,
1128 DEV2G5_PCS1G_SD_CFG(port->portno));
1129
1130 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
1131 conf->portmode == PHY_INTERFACE_MODE_SGMII) {
1132 err = sparx5_serdes_set(sparx5, port, conf);
1133 if (err)
1134 return err;
1135
1136 if (!ops->is_port_2g5(port->portno))
1137 /* Enable shadow device */
1138 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA_SET(1),
1139 DSM_DEV_TX_STOP_WM_CFG_DEV10G_SHADOW_ENA,
1140 sparx5,
1141 DSM_DEV_TX_STOP_WM_CFG(port->portno));
1142
1143 sparx5_dev_switch(sparx5, port->portno, false);
1144 }
1145 if (conf->portmode == PHY_INTERFACE_MODE_QSGMII) {
1146 // All ports must be PCS enabled in QSGMII mode
1147 spx5_rmw(DEV2G5_DEV_RST_CTRL_PCS_TX_RST_SET(0),
1148 DEV2G5_DEV_RST_CTRL_PCS_TX_RST,
1149 sparx5,
1150 DEV2G5_DEV_RST_CTRL(port->portno));
1151 }
1152 /* Default IFGs for 1G */
1153 spx5_wr(DEV2G5_MAC_IFG_CFG_TX_IFG_SET(6) |
1154 DEV2G5_MAC_IFG_CFG_RX_IFG1_SET(0) |
1155 DEV2G5_MAC_IFG_CFG_RX_IFG2_SET(0),
1156 sparx5,
1157 DEV2G5_MAC_IFG_CFG(port->portno));
1158
1159 if (ops->is_port_2g5(port->portno))
1160 return 0; /* Low speed device only - return */
1161
1162 /* Now setup the high speed device */
1163 if (conf->portmode == PHY_INTERFACE_MODE_NA)
1164 conf->portmode = PHY_INTERFACE_MODE_10GBASER;
1165
1166 if (sparx5_is_baser(conf->portmode))
1167 sparx5_dev_switch(sparx5, port->portno, true);
1168
1169 /* Set Max Length */
1170 spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN),
1171 DEV10G_MAC_MAXLEN_CFG_MAX_LEN,
1172 devinst,
1173 DEV10G_MAC_MAXLEN_CFG(0));
1174
1175 /* Handle Signal Detect in 10G PCS */
1176 spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) |
1177 PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(sd_sel) |
1178 PCS10G_BR_PCS_SD_CFG_SD_ENA_SET(sd_ena),
1179 pcsinst,
1180 PCS10G_BR_PCS_SD_CFG(0));
1181
1182 if (ops->is_port_25g(port->portno)) {
1183 /* Handle Signal Detect in 25G PCS */
1184 spx5_wr(DEV25G_PCS25G_SD_CFG_SD_POL_SET(sd_pol) |
1185 DEV25G_PCS25G_SD_CFG_SD_SEL_SET(sd_sel) |
1186 DEV25G_PCS25G_SD_CFG_SD_ENA_SET(sd_ena),
1187 sparx5,
1188 DEV25G_PCS25G_SD_CFG(pix));
1189 }
1190
1191 if (!is_sparx5(sparx5)) {
1192 void __iomem *inst;
1193 u32 dev, tinst;
1194
1195 if (ops->is_port_10g(port->portno)) {
1196 dev = sparx5_to_high_dev(sparx5, port->portno);
1197 tinst = sparx5_port_dev_index(sparx5, port->portno);
1198 inst = spx5_inst_get(sparx5, dev, tinst);
1199
1200 spx5_inst_wr(5, inst,
1201 DEV10G_PTP_STAMPER_CFG(port->portno));
1202 } else if (ops->is_port_5g(port->portno)) {
1203 dev = sparx5_to_high_dev(sparx5, port->portno);
1204 tinst = sparx5_port_dev_index(sparx5, port->portno);
1205 inst = spx5_inst_get(sparx5, dev, tinst);
1206
1207 spx5_inst_wr(5, inst,
1208 DEV5G_PTP_STAMPER_CFG(port->portno));
1209 }
1210 }
1211
1212 return 0;
1213 }
1214
sparx5_port_enable(struct sparx5_port * port,bool enable)1215 void sparx5_port_enable(struct sparx5_port *port, bool enable)
1216 {
1217 struct sparx5 *sparx5 = port->sparx5;
1218
1219 /* Enable port for frame transfer? */
1220 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(enable),
1221 QFWD_SWITCH_PORT_MODE_PORT_ENA,
1222 sparx5,
1223 QFWD_SWITCH_PORT_MODE(port->portno));
1224 }
1225
sparx5_port_qos_set(struct sparx5_port * port,struct sparx5_port_qos * qos)1226 int sparx5_port_qos_set(struct sparx5_port *port,
1227 struct sparx5_port_qos *qos)
1228 {
1229 sparx5_port_qos_dscp_set(port, &qos->dscp);
1230 sparx5_port_qos_pcp_set(port, &qos->pcp);
1231 sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
1232 sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
1233 sparx5_port_qos_default_set(port, qos);
1234
1235 return 0;
1236 }
1237
sparx5_port_qos_pcp_rewr_set(const struct sparx5_port * port,struct sparx5_port_qos_pcp_rewr * qos)1238 int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
1239 struct sparx5_port_qos_pcp_rewr *qos)
1240 {
1241 int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
1242 struct sparx5 *sparx5 = port->sparx5;
1243 u8 pcp, dei;
1244
1245 /* Use mapping table, with classified QoS as index, to map QoS and DP
1246 * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
1247 * PCP. Classified PCP equals frame PCP.
1248 */
1249 if (qos->enable)
1250 mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
1251
1252 spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
1253 REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
1254 REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
1255 port->sparx5, REW_TAG_CTRL(port->portno));
1256
1257 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1258 /* Extract PCP and DEI */
1259 pcp = qos->map.map[i];
1260 if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
1261 dei = 1;
1262 else
1263 dei = 0;
1264
1265 /* Rewrite PCP and DEI, for each classified QoS class and DP
1266 * level. This table is only used if tag ctrl mode is set to
1267 * 'mapped'.
1268 *
1269 * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
1270 * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
1271 */
1272 if (dei) {
1273 spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
1274 REW_PCP_MAP_DE1_PCP_DE1, sparx5,
1275 REW_PCP_MAP_DE1(port->portno, i));
1276
1277 spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
1278 REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
1279 REW_DEI_MAP_DE1(port->portno, i));
1280 } else {
1281 spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
1282 REW_PCP_MAP_DE0_PCP_DE0, sparx5,
1283 REW_PCP_MAP_DE0(port->portno, i));
1284
1285 spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
1286 REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
1287 REW_DEI_MAP_DE0(port->portno, i));
1288 }
1289 }
1290
1291 return 0;
1292 }
1293
sparx5_port_qos_pcp_set(const struct sparx5_port * port,struct sparx5_port_qos_pcp * qos)1294 int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
1295 struct sparx5_port_qos_pcp *qos)
1296 {
1297 struct sparx5 *sparx5 = port->sparx5;
1298 u8 *pcp_itr = qos->map.map;
1299 u8 pcp, dp;
1300 int i;
1301
1302 /* Enable/disable pcp and dp for qos classification. */
1303 spx5_rmw(ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA_SET(qos->qos_enable) |
1304 ANA_CL_QOS_CFG_PCP_DEI_DP_ENA_SET(qos->dp_enable),
1305 ANA_CL_QOS_CFG_PCP_DEI_QOS_ENA | ANA_CL_QOS_CFG_PCP_DEI_DP_ENA,
1306 sparx5, ANA_CL_QOS_CFG(port->portno));
1307
1308 /* Map each pcp and dei value to priority and dp */
1309 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1310 pcp = *(pcp_itr + i);
1311 dp = (i < SPARX5_PORT_QOS_PCP_COUNT) ? 0 : 1;
1312 spx5_rmw(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL_SET(pcp) |
1313 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(dp),
1314 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL |
1315 ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL, sparx5,
1316 ANA_CL_PCP_DEI_MAP_CFG(port->portno, i));
1317 }
1318
1319 return 0;
1320 }
1321
sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port * port,int mode)1322 void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
1323 int mode)
1324 {
1325 spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
1326 ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
1327 ANA_CL_QOS_CFG(port->portno));
1328 }
1329
sparx5_port_qos_dscp_rewr_set(const struct sparx5_port * port,struct sparx5_port_qos_dscp_rewr * qos)1330 int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
1331 struct sparx5_port_qos_dscp_rewr *qos)
1332 {
1333 struct sparx5 *sparx5 = port->sparx5;
1334 bool rewr = false;
1335 u16 dscp;
1336 int i;
1337
1338 /* On egress, rewrite DSCP value to either classified DSCP or frame
1339 * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
1340 */
1341 if (qos->enable)
1342 rewr = true;
1343
1344 spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
1345 REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
1346 REW_DSCP_MAP(port->portno));
1347
1348 /* On ingress, map each classified QoS class and DP to classified DSCP
1349 * value. This mapping table is global for all ports.
1350 */
1351 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1352 dscp = qos->map.map[i];
1353 spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
1354 ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
1355 ANA_CL_QOS_MAP_CFG(i));
1356 }
1357
1358 return 0;
1359 }
1360
sparx5_port_qos_dscp_set(const struct sparx5_port * port,struct sparx5_port_qos_dscp * qos)1361 int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
1362 struct sparx5_port_qos_dscp *qos)
1363 {
1364 struct sparx5 *sparx5 = port->sparx5;
1365 u8 *dscp = qos->map.map;
1366 int i;
1367
1368 /* Enable/disable dscp and dp for qos classification.
1369 * Disable rewrite of dscp values for now.
1370 */
1371 spx5_rmw(ANA_CL_QOS_CFG_DSCP_QOS_ENA_SET(qos->qos_enable) |
1372 ANA_CL_QOS_CFG_DSCP_DP_ENA_SET(qos->dp_enable) |
1373 ANA_CL_QOS_CFG_DSCP_KEEP_ENA_SET(1),
1374 ANA_CL_QOS_CFG_DSCP_QOS_ENA | ANA_CL_QOS_CFG_DSCP_DP_ENA |
1375 ANA_CL_QOS_CFG_DSCP_KEEP_ENA, sparx5,
1376 ANA_CL_QOS_CFG(port->portno));
1377
1378 /* Map each dscp value to priority and dp */
1379 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1380 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_QOS_VAL_SET(*(dscp + i)) |
1381 ANA_CL_DSCP_CFG_DSCP_DP_VAL_SET(0),
1382 ANA_CL_DSCP_CFG_DSCP_QOS_VAL |
1383 ANA_CL_DSCP_CFG_DSCP_DP_VAL, sparx5,
1384 ANA_CL_DSCP_CFG(i));
1385 }
1386
1387 /* Set per-dscp trust */
1388 for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
1389 if (qos->qos_enable) {
1390 spx5_rmw(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_SET(1),
1391 ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, sparx5,
1392 ANA_CL_DSCP_CFG(i));
1393 }
1394 }
1395
1396 return 0;
1397 }
1398
sparx5_port_qos_default_set(const struct sparx5_port * port,const struct sparx5_port_qos * qos)1399 int sparx5_port_qos_default_set(const struct sparx5_port *port,
1400 const struct sparx5_port_qos *qos)
1401 {
1402 struct sparx5 *sparx5 = port->sparx5;
1403
1404 /* Set default prio and dp level */
1405 spx5_rmw(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL_SET(qos->default_prio) |
1406 ANA_CL_QOS_CFG_DEFAULT_DP_VAL_SET(0),
1407 ANA_CL_QOS_CFG_DEFAULT_QOS_VAL |
1408 ANA_CL_QOS_CFG_DEFAULT_DP_VAL,
1409 sparx5, ANA_CL_QOS_CFG(port->portno));
1410
1411 /* Set default pcp and dei for untagged frames */
1412 spx5_rmw(ANA_CL_VLAN_CTRL_PORT_PCP_SET(0) |
1413 ANA_CL_VLAN_CTRL_PORT_DEI_SET(0),
1414 ANA_CL_VLAN_CTRL_PORT_PCP |
1415 ANA_CL_VLAN_CTRL_PORT_DEI,
1416 sparx5, ANA_CL_VLAN_CTRL(port->portno));
1417
1418 return 0;
1419 }
1420
sparx5_get_internal_port(struct sparx5 * sparx5,int port)1421 int sparx5_get_internal_port(struct sparx5 *sparx5, int port)
1422 {
1423 return sparx5->data->consts->n_ports + port;
1424 }
1425