xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c (revision 9208c05f9fdfd927ea160b97dfef3c379049fff2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2024 Furong Xu <0x1207@gmail.com>
4  * stmmac FPE(802.3 Qbu) handling
5  */
6 #include "stmmac.h"
7 #include "stmmac_fpe.h"
8 #include "dwmac4.h"
9 #include "dwmac5.h"
10 #include "dwxgmac2.h"
11 
12 #define GMAC5_MAC_FPE_CTRL_STS		0x00000234
13 #define XGMAC_MAC_FPE_CTRL_STS		0x00000280
14 
15 #define GMAC5_MTL_FPE_CTRL_STS		0x00000c90
16 #define XGMAC_MTL_FPE_CTRL_STS		0x00001090
17 /* Preemption Classification */
18 #define FPE_MTL_PREEMPTION_CLASS	GENMASK(15, 8)
19 /* Additional Fragment Size of preempted frames */
20 #define FPE_MTL_ADD_FRAG_SZ		GENMASK(1, 0)
21 
22 #define STMMAC_MAC_FPE_CTRL_STS_TRSP	BIT(19)
23 #define STMMAC_MAC_FPE_CTRL_STS_TVER	BIT(18)
24 #define STMMAC_MAC_FPE_CTRL_STS_RRSP	BIT(17)
25 #define STMMAC_MAC_FPE_CTRL_STS_RVER	BIT(16)
26 #define STMMAC_MAC_FPE_CTRL_STS_SRSP	BIT(2)
27 #define STMMAC_MAC_FPE_CTRL_STS_SVER	BIT(1)
28 #define STMMAC_MAC_FPE_CTRL_STS_EFPE	BIT(0)
29 
30 /* FPE link-partner hand-shaking mPacket type */
31 enum stmmac_mpacket_type {
32 	MPACKET_VERIFY = 0,
33 	MPACKET_RESPONSE = 1,
34 };
35 
36 struct stmmac_fpe_reg {
37 	const u32 mac_fpe_reg;		/* offset of MAC_FPE_CTRL_STS */
38 	const u32 mtl_fpe_reg;		/* offset of MTL_FPE_CTRL_STS */
39 	const u32 rxq_ctrl1_reg;	/* offset of MAC_RxQ_Ctrl1 */
40 	const u32 fprq_mask;		/* Frame Preemption Residue Queue */
41 	const u32 int_en_reg;		/* offset of MAC_Interrupt_Enable */
42 	const u32 int_en_bit;		/* Frame Preemption Interrupt Enable */
43 };
44 
45 bool stmmac_fpe_supported(struct stmmac_priv *priv)
46 {
47 	return priv->dma_cap.fpesel && priv->fpe_cfg.reg &&
48 		priv->hw->mac->fpe_map_preemption_class;
49 }
50 
51 static void stmmac_fpe_configure(struct stmmac_priv *priv, bool tx_enable,
52 				 bool pmac_enable)
53 {
54 	struct stmmac_fpe_cfg *cfg = &priv->fpe_cfg;
55 	const struct stmmac_fpe_reg *reg = cfg->reg;
56 	u32 num_rxq = priv->plat->rx_queues_to_use;
57 	void __iomem *ioaddr = priv->ioaddr;
58 	u32 value;
59 
60 	if (tx_enable) {
61 		cfg->fpe_csr = STMMAC_MAC_FPE_CTRL_STS_EFPE;
62 		value = readl(ioaddr + reg->rxq_ctrl1_reg);
63 		value &= ~reg->fprq_mask;
64 		/* Keep this SHIFT, FIELD_PREP() expects a constant mask :-/ */
65 		value |= (num_rxq - 1) << __ffs(reg->fprq_mask);
66 		writel(value, ioaddr + reg->rxq_ctrl1_reg);
67 	} else {
68 		cfg->fpe_csr = 0;
69 	}
70 	writel(cfg->fpe_csr, ioaddr + reg->mac_fpe_reg);
71 
72 	value = readl(ioaddr + reg->int_en_reg);
73 
74 	if (pmac_enable) {
75 		if (!(value & reg->int_en_bit)) {
76 			/* Dummy read to clear any pending masked interrupts */
77 			readl(ioaddr + reg->mac_fpe_reg);
78 
79 			value |= reg->int_en_bit;
80 		}
81 	} else {
82 		value &= ~reg->int_en_bit;
83 	}
84 
85 	writel(value, ioaddr + reg->int_en_reg);
86 }
87 
88 static void stmmac_fpe_send_mpacket(struct stmmac_priv *priv,
89 				    enum stmmac_mpacket_type type)
90 {
91 	const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
92 	void __iomem *ioaddr = priv->ioaddr;
93 	u32 value = priv->fpe_cfg.fpe_csr;
94 
95 	if (type == MPACKET_VERIFY)
96 		value |= STMMAC_MAC_FPE_CTRL_STS_SVER;
97 	else if (type == MPACKET_RESPONSE)
98 		value |= STMMAC_MAC_FPE_CTRL_STS_SRSP;
99 
100 	writel(value, ioaddr + reg->mac_fpe_reg);
101 }
102 
103 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
104 {
105 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
106 
107 	/* This is interrupt context, just spin_lock() */
108 	spin_lock(&fpe_cfg->lock);
109 
110 	if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
111 		goto unlock_out;
112 
113 	/* LP has sent verify mPacket */
114 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
115 		stmmac_fpe_send_mpacket(priv, MPACKET_RESPONSE);
116 
117 	/* Local has sent verify mPacket */
118 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
119 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
120 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
121 
122 	/* LP has sent response mPacket */
123 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
124 	    fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
125 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
126 
127 unlock_out:
128 	spin_unlock(&fpe_cfg->lock);
129 }
130 
131 void stmmac_fpe_irq_status(struct stmmac_priv *priv)
132 {
133 	const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
134 	void __iomem *ioaddr = priv->ioaddr;
135 	struct net_device *dev = priv->dev;
136 	int status = FPE_EVENT_UNKNOWN;
137 	u32 value;
138 
139 	/* Reads from the MAC_FPE_CTRL_STS register should only be performed
140 	 * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
141 	 */
142 	value = readl(ioaddr + reg->mac_fpe_reg);
143 
144 	if (value & STMMAC_MAC_FPE_CTRL_STS_TRSP) {
145 		status |= FPE_EVENT_TRSP;
146 		netdev_dbg(dev, "FPE: Respond mPacket is transmitted\n");
147 	}
148 
149 	if (value & STMMAC_MAC_FPE_CTRL_STS_TVER) {
150 		status |= FPE_EVENT_TVER;
151 		netdev_dbg(dev, "FPE: Verify mPacket is transmitted\n");
152 	}
153 
154 	if (value & STMMAC_MAC_FPE_CTRL_STS_RRSP) {
155 		status |= FPE_EVENT_RRSP;
156 		netdev_dbg(dev, "FPE: Respond mPacket is received\n");
157 	}
158 
159 	if (value & STMMAC_MAC_FPE_CTRL_STS_RVER) {
160 		status |= FPE_EVENT_RVER;
161 		netdev_dbg(dev, "FPE: Verify mPacket is received\n");
162 	}
163 
164 	stmmac_fpe_event_status(priv, status);
165 }
166 
167 /**
168  * stmmac_fpe_verify_timer - Timer for MAC Merge verification
169  * @t:  timer_list struct containing private info
170  *
171  * Verify the MAC Merge capability in the local TX direction, by
172  * transmitting Verify mPackets up to 3 times. Wait until link
173  * partner responds with a Response mPacket, otherwise fail.
174  */
175 static void stmmac_fpe_verify_timer(struct timer_list *t)
176 {
177 	struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
178 	struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
179 						fpe_cfg);
180 	unsigned long flags;
181 	bool rearm = false;
182 
183 	spin_lock_irqsave(&fpe_cfg->lock, flags);
184 
185 	switch (fpe_cfg->status) {
186 	case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
187 	case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
188 		if (fpe_cfg->verify_retries != 0) {
189 			stmmac_fpe_send_mpacket(priv, MPACKET_VERIFY);
190 			rearm = true;
191 		} else {
192 			fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
193 		}
194 
195 		fpe_cfg->verify_retries--;
196 		break;
197 
198 	case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
199 		stmmac_fpe_configure(priv, true, true);
200 		break;
201 
202 	default:
203 		break;
204 	}
205 
206 	if (rearm) {
207 		mod_timer(&fpe_cfg->verify_timer,
208 			  jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
209 	}
210 
211 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
212 }
213 
214 static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
215 {
216 	if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
217 	    fpe_cfg->verify_enabled &&
218 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
219 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
220 		timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
221 		mod_timer(&fpe_cfg->verify_timer, jiffies);
222 	}
223 }
224 
225 void stmmac_fpe_init(struct stmmac_priv *priv)
226 {
227 	priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
228 	priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
229 	priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
230 	timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
231 	spin_lock_init(&priv->fpe_cfg.lock);
232 
233 	if ((!priv->fpe_cfg.reg || !priv->hw->mac->fpe_map_preemption_class) &&
234 	    priv->dma_cap.fpesel)
235 		dev_info(priv->device, "FPE is not supported by driver.\n");
236 }
237 
238 void stmmac_fpe_apply(struct stmmac_priv *priv)
239 {
240 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
241 
242 	/* If verification is disabled, configure FPE right away.
243 	 * Otherwise let the timer code do it.
244 	 */
245 	if (!fpe_cfg->verify_enabled) {
246 		stmmac_fpe_configure(priv, fpe_cfg->tx_enabled,
247 				     fpe_cfg->pmac_enabled);
248 	} else {
249 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
250 		fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
251 
252 		if (netif_running(priv->dev))
253 			stmmac_fpe_verify_timer_arm(fpe_cfg);
254 	}
255 }
256 
257 void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
258 {
259 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
260 	unsigned long flags;
261 
262 	timer_shutdown_sync(&fpe_cfg->verify_timer);
263 
264 	spin_lock_irqsave(&fpe_cfg->lock, flags);
265 
266 	if (is_up && fpe_cfg->pmac_enabled) {
267 		/* VERIFY process requires pmac enabled when NIC comes up */
268 		stmmac_fpe_configure(priv, false, true);
269 
270 		/* New link => maybe new partner => new verification process */
271 		stmmac_fpe_apply(priv);
272 	} else {
273 		/* No link => turn off EFPE */
274 		stmmac_fpe_configure(priv, false, false);
275 	}
276 
277 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
278 }
279 
280 int stmmac_fpe_get_add_frag_size(struct stmmac_priv *priv)
281 {
282 	const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
283 	void __iomem *ioaddr = priv->ioaddr;
284 
285 	return FIELD_GET(FPE_MTL_ADD_FRAG_SZ, readl(ioaddr + reg->mtl_fpe_reg));
286 }
287 
288 void stmmac_fpe_set_add_frag_size(struct stmmac_priv *priv, u32 add_frag_size)
289 {
290 	const struct stmmac_fpe_reg *reg = priv->fpe_cfg.reg;
291 	void __iomem *ioaddr = priv->ioaddr;
292 	u32 value;
293 
294 	value = readl(ioaddr + reg->mtl_fpe_reg);
295 	writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
296 	       ioaddr + reg->mtl_fpe_reg);
297 }
298 
299 #define ALG_ERR_MSG "TX algorithm SP is not suitable for one-to-many mapping"
300 #define WEIGHT_ERR_MSG "TXQ weight %u differs across other TXQs in TC: [%u]"
301 
302 int dwmac5_fpe_map_preemption_class(struct net_device *ndev,
303 				    struct netlink_ext_ack *extack, u32 pclass)
304 {
305 	u32 val, offset, count, queue_weight, preemptible_txqs = 0;
306 	struct stmmac_priv *priv = netdev_priv(ndev);
307 	int num_tc = netdev_get_num_tc(ndev);
308 
309 	if (!pclass)
310 		goto update_mapping;
311 
312 	/* DWMAC CORE4+ can not program TC:TXQ mapping to hardware.
313 	 *
314 	 * Synopsys Databook:
315 	 * "The number of Tx DMA channels is equal to the number of Tx queues,
316 	 * and is direct one-to-one mapping."
317 	 */
318 	for (u32 tc = 0; tc < num_tc; tc++) {
319 		count = ndev->tc_to_txq[tc].count;
320 		offset = ndev->tc_to_txq[tc].offset;
321 
322 		if (pclass & BIT(tc))
323 			preemptible_txqs |= GENMASK(offset + count - 1, offset);
324 
325 		/* This is 1:1 mapping, go to next TC */
326 		if (count == 1)
327 			continue;
328 
329 		if (priv->plat->tx_sched_algorithm == MTL_TX_ALGORITHM_SP) {
330 			NL_SET_ERR_MSG_MOD(extack, ALG_ERR_MSG);
331 			return -EINVAL;
332 		}
333 
334 		queue_weight = priv->plat->tx_queues_cfg[offset].weight;
335 
336 		for (u32 i = 1; i < count; i++) {
337 			if (priv->plat->tx_queues_cfg[offset + i].weight !=
338 			    queue_weight) {
339 				NL_SET_ERR_MSG_FMT_MOD(extack, WEIGHT_ERR_MSG,
340 						       queue_weight, tc);
341 				return -EINVAL;
342 			}
343 		}
344 	}
345 
346 update_mapping:
347 	val = readl(priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
348 	writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
349 	       priv->ioaddr + GMAC5_MTL_FPE_CTRL_STS);
350 
351 	return 0;
352 }
353 
354 int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
355 				      struct netlink_ext_ack *extack, u32 pclass)
356 {
357 	u32 val, offset, count, preemptible_txqs = 0;
358 	struct stmmac_priv *priv = netdev_priv(ndev);
359 	int num_tc = netdev_get_num_tc(ndev);
360 
361 	if (!num_tc) {
362 		/* Restore default TC:Queue mapping */
363 		for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
364 			val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
365 			writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
366 			       priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
367 		}
368 	}
369 
370 	/* Synopsys Databook:
371 	 * "All Queues within a traffic class are selected in a round robin
372 	 * fashion (when packets are available) when the traffic class is
373 	 * selected by the scheduler for packet transmission. This is true for
374 	 * any of the scheduling algorithms."
375 	 */
376 	for (u32 tc = 0; tc < num_tc; tc++) {
377 		count = ndev->tc_to_txq[tc].count;
378 		offset = ndev->tc_to_txq[tc].offset;
379 
380 		if (pclass & BIT(tc))
381 			preemptible_txqs |= GENMASK(offset + count - 1, offset);
382 
383 		for (u32 i = 0; i < count; i++) {
384 			val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
385 			writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
386 			       priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
387 		}
388 	}
389 
390 	val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
391 	writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
392 	       priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
393 
394 	return 0;
395 }
396 
397 const struct stmmac_fpe_reg dwmac5_fpe_reg = {
398 	.mac_fpe_reg = GMAC5_MAC_FPE_CTRL_STS,
399 	.mtl_fpe_reg = GMAC5_MTL_FPE_CTRL_STS,
400 	.rxq_ctrl1_reg = GMAC_RXQ_CTRL1,
401 	.fprq_mask = GMAC_RXQCTRL_FPRQ,
402 	.int_en_reg = GMAC_INT_EN,
403 	.int_en_bit = GMAC_INT_FPE_EN,
404 };
405 
406 const struct stmmac_fpe_reg dwxgmac3_fpe_reg = {
407 	.mac_fpe_reg = XGMAC_MAC_FPE_CTRL_STS,
408 	.mtl_fpe_reg = XGMAC_MTL_FPE_CTRL_STS,
409 	.rxq_ctrl1_reg = XGMAC_RXQ_CTRL1,
410 	.fprq_mask = XGMAC_FPRQ,
411 	.int_en_reg = XGMAC_INT_EN,
412 	.int_en_bit = XGMAC_FPEIE,
413 };
414