xref: /linux/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c (revision 9208c05f9fdfd927ea160b97dfef3c379049fff2)
1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  *
6  * The Sparx5 Chip Register Model can be browsed at this location:
7  * https://github.com/microchip-ung/sparx-5_reginfo
8  */
9 
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/ip.h>
15 #include <linux/dma-mapping.h>
16 
17 #include "sparx5_main_regs.h"
18 #include "sparx5_main.h"
19 #include "sparx5_port.h"
20 
21 #define FDMA_XTR_CHANNEL		6
22 #define FDMA_INJ_CHANNEL		0
23 
24 #define FDMA_XTR_BUFFER_SIZE		2048
25 #define FDMA_WEIGHT			4
26 
27 static int sparx5_fdma_tx_dataptr_cb(struct fdma *fdma, int dcb, int db,
28 				     u64 *dataptr)
29 {
30 	*dataptr = fdma->dma + (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
31 		   ((dcb * fdma->n_dbs + db) * fdma->db_size);
32 
33 	return 0;
34 }
35 
36 static int sparx5_fdma_rx_dataptr_cb(struct fdma *fdma, int dcb, int db,
37 				     u64 *dataptr)
38 {
39 	struct sparx5 *sparx5 = fdma->priv;
40 	struct sparx5_rx *rx = &sparx5->rx;
41 	struct sk_buff *skb;
42 
43 	skb = __netdev_alloc_skb(rx->ndev, fdma->db_size, GFP_ATOMIC);
44 	if (unlikely(!skb))
45 		return -ENOMEM;
46 
47 	*dataptr = virt_to_phys(skb->data);
48 
49 	rx->skb[dcb][db] = skb;
50 
51 	return 0;
52 }
53 
54 static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
55 {
56 	struct fdma *fdma = &rx->fdma;
57 
58 	/* Write the buffer address in the LLP and LLP1 regs */
59 	spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
60 		FDMA_DCB_LLP(fdma->channel_id));
61 	spx5_wr(((u64)fdma->dma) >> 32, sparx5,
62 		FDMA_DCB_LLP1(fdma->channel_id));
63 
64 	/* Set the number of RX DBs to be used, and DB end-of-frame interrupt */
65 	spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
66 		FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
67 		FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE),
68 		sparx5, FDMA_CH_CFG(fdma->channel_id));
69 
70 	/* Set the RX Watermark to max */
71 	spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM,
72 		 sparx5,
73 		 FDMA_XTR_CFG);
74 
75 	/* Start RX fdma */
76 	spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP,
77 		 sparx5, FDMA_PORT_CTRL(0));
78 
79 	/* Enable RX channel DB interrupt */
80 	spx5_rmw(BIT(fdma->channel_id),
81 		 BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
82 		 sparx5, FDMA_INTR_DB_ENA);
83 
84 	/* Activate the RX channel */
85 	spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
86 }
87 
88 static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
89 {
90 	struct fdma *fdma = &rx->fdma;
91 
92 	/* Deactivate the RX channel */
93 	spx5_rmw(0, BIT(fdma->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
94 		 sparx5, FDMA_CH_ACTIVATE);
95 
96 	/* Disable RX channel DB interrupt */
97 	spx5_rmw(0, BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
98 		 sparx5, FDMA_INTR_DB_ENA);
99 
100 	/* Stop RX fdma */
101 	spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP,
102 		 sparx5, FDMA_PORT_CTRL(0));
103 }
104 
105 static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx)
106 {
107 	struct fdma *fdma = &tx->fdma;
108 
109 	/* Write the buffer address in the LLP and LLP1 regs */
110 	spx5_wr(((u64)fdma->dma) & GENMASK(31, 0), sparx5,
111 		FDMA_DCB_LLP(fdma->channel_id));
112 	spx5_wr(((u64)fdma->dma) >> 32, sparx5,
113 		FDMA_DCB_LLP1(fdma->channel_id));
114 
115 	/* Set the number of TX DBs to be used, and DB end-of-frame interrupt */
116 	spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma->n_dbs) |
117 		FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
118 		FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE),
119 		sparx5, FDMA_CH_CFG(fdma->channel_id));
120 
121 	/* Start TX fdma */
122 	spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP,
123 		 sparx5, FDMA_PORT_CTRL(0));
124 
125 	/* Activate the channel */
126 	spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_ACTIVATE);
127 }
128 
129 static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx)
130 {
131 	/* Disable the channel */
132 	spx5_rmw(0, BIT(tx->fdma.channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
133 		 sparx5, FDMA_CH_ACTIVATE);
134 }
135 
136 static void sparx5_fdma_reload(struct sparx5 *sparx5, struct fdma *fdma)
137 {
138 	/* Reload the RX channel */
139 	spx5_wr(BIT(fdma->channel_id), sparx5, FDMA_CH_RELOAD);
140 }
141 
142 static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx)
143 {
144 	struct fdma *fdma = &rx->fdma;
145 	struct sparx5_port *port;
146 	struct fdma_db *db_hw;
147 	struct frame_info fi;
148 	struct sk_buff *skb;
149 
150 	/* Check if the DCB is done */
151 	db_hw = fdma_db_next_get(fdma);
152 	if (unlikely(!fdma_db_is_done(db_hw)))
153 		return false;
154 	skb = rx->skb[fdma->dcb_index][fdma->db_index];
155 	skb_put(skb, fdma_db_len_get(db_hw));
156 	/* Now do the normal processing of the skb */
157 	sparx5_ifh_parse(sparx5, (u32 *)skb->data, &fi);
158 	/* Map to port netdev */
159 	port = fi.src_port < sparx5->data->consts->n_ports ?
160 		       sparx5->ports[fi.src_port] :
161 		       NULL;
162 	if (!port || !port->ndev) {
163 		dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
164 		sparx5_xtr_flush(sparx5, XTR_QUEUE);
165 		return false;
166 	}
167 	skb->dev = port->ndev;
168 	skb_pull(skb, IFH_LEN * sizeof(u32));
169 	if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
170 		skb_trim(skb, skb->len - ETH_FCS_LEN);
171 
172 	sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
173 	skb->protocol = eth_type_trans(skb, skb->dev);
174 	/* Everything we see on an interface that is in the HW bridge
175 	 * has already been forwarded
176 	 */
177 	if (test_bit(port->portno, sparx5->bridge_mask))
178 		skb->offload_fwd_mark = 1;
179 	skb->dev->stats.rx_bytes += skb->len;
180 	skb->dev->stats.rx_packets++;
181 	rx->packets++;
182 	netif_receive_skb(skb);
183 	return true;
184 }
185 
186 static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight)
187 {
188 	struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi);
189 	struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx);
190 	struct fdma *fdma = &rx->fdma;
191 	int counter = 0;
192 
193 	while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) {
194 		fdma_db_advance(fdma);
195 		counter++;
196 		/* Check if the DCB can be reused */
197 		if (fdma_dcb_is_reusable(fdma))
198 			continue;
199 		fdma_dcb_add(fdma, fdma->dcb_index,
200 			     FDMA_DCB_INFO_DATAL(fdma->db_size),
201 			     FDMA_DCB_STATUS_INTR);
202 		fdma_db_reset(fdma);
203 		fdma_dcb_advance(fdma);
204 	}
205 	if (counter < weight) {
206 		napi_complete_done(&rx->napi, counter);
207 		spx5_rmw(BIT(fdma->channel_id),
208 			 BIT(fdma->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA,
209 			 sparx5, FDMA_INTR_DB_ENA);
210 	}
211 	if (counter)
212 		sparx5_fdma_reload(sparx5, fdma);
213 	return counter;
214 }
215 
216 int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb)
217 {
218 	struct sparx5_tx *tx = &sparx5->tx;
219 	struct fdma *fdma = &tx->fdma;
220 	static bool first_time = true;
221 	void *virt_addr;
222 
223 	fdma_dcb_advance(fdma);
224 	if (!fdma_db_is_done(fdma_db_get(fdma, fdma->dcb_index, 0)))
225 		return -EINVAL;
226 
227 	/* Get the virtual address of the dataptr for the next DB */
228 	virt_addr = ((u8 *)fdma->dcbs +
229 		     (sizeof(struct fdma_dcb) * fdma->n_dcbs) +
230 		     ((fdma->dcb_index * fdma->n_dbs) * fdma->db_size));
231 
232 	memcpy(virt_addr, ifh, IFH_LEN * 4);
233 	memcpy(virt_addr + IFH_LEN * 4, skb->data, skb->len);
234 
235 	fdma_dcb_add(fdma, fdma->dcb_index, 0,
236 		     FDMA_DCB_STATUS_SOF |
237 		     FDMA_DCB_STATUS_EOF |
238 		     FDMA_DCB_STATUS_BLOCKO(0) |
239 		     FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4));
240 
241 	if (first_time) {
242 		sparx5_fdma_tx_activate(sparx5, tx);
243 		first_time = false;
244 	} else {
245 		sparx5_fdma_reload(sparx5, fdma);
246 	}
247 	return NETDEV_TX_OK;
248 }
249 
250 static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5)
251 {
252 	struct sparx5_rx *rx = &sparx5->rx;
253 	struct fdma *fdma = &rx->fdma;
254 	int err;
255 
256 	err = fdma_alloc_phys(fdma);
257 	if (err)
258 		return err;
259 
260 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
261 		       FDMA_DCB_STATUS_INTR);
262 
263 	netif_napi_add_weight(rx->ndev, &rx->napi, sparx5_fdma_napi_callback,
264 			      FDMA_WEIGHT);
265 	napi_enable(&rx->napi);
266 	sparx5_fdma_rx_activate(sparx5, rx);
267 	return 0;
268 }
269 
270 static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5)
271 {
272 	struct sparx5_tx *tx = &sparx5->tx;
273 	struct fdma *fdma = &tx->fdma;
274 	int err;
275 
276 	err = fdma_alloc_phys(fdma);
277 	if (err)
278 		return err;
279 
280 	fdma_dcbs_init(fdma, FDMA_DCB_INFO_DATAL(fdma->db_size),
281 		       FDMA_DCB_STATUS_DONE);
282 
283 	return 0;
284 }
285 
286 static void sparx5_fdma_rx_init(struct sparx5 *sparx5,
287 				struct sparx5_rx *rx, int channel)
288 {
289 	struct fdma *fdma = &rx->fdma;
290 	int idx;
291 
292 	fdma->channel_id = channel;
293 	fdma->n_dcbs = FDMA_DCB_MAX;
294 	fdma->n_dbs = FDMA_RX_DCB_MAX_DBS;
295 	fdma->priv = sparx5;
296 	fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
297 	fdma->size = fdma_get_size(&sparx5->rx.fdma);
298 	fdma->ops.dataptr_cb = &sparx5_fdma_rx_dataptr_cb;
299 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
300 	/* Fetch a netdev for SKB and NAPI use, any will do */
301 	for (idx = 0; idx < sparx5->data->consts->n_ports; ++idx) {
302 		struct sparx5_port *port = sparx5->ports[idx];
303 
304 		if (port && port->ndev) {
305 			rx->ndev = port->ndev;
306 			break;
307 		}
308 	}
309 }
310 
311 static void sparx5_fdma_tx_init(struct sparx5 *sparx5,
312 				struct sparx5_tx *tx, int channel)
313 {
314 	struct fdma *fdma = &tx->fdma;
315 
316 	fdma->channel_id = channel;
317 	fdma->n_dcbs = FDMA_DCB_MAX;
318 	fdma->n_dbs = FDMA_TX_DCB_MAX_DBS;
319 	fdma->priv = sparx5;
320 	fdma->db_size = ALIGN(FDMA_XTR_BUFFER_SIZE, PAGE_SIZE);
321 	fdma->size = fdma_get_size_contiguous(&sparx5->tx.fdma);
322 	fdma->ops.dataptr_cb = &sparx5_fdma_tx_dataptr_cb;
323 	fdma->ops.nextptr_cb = &fdma_nextptr_cb;
324 }
325 
326 irqreturn_t sparx5_fdma_handler(int irq, void *args)
327 {
328 	struct sparx5 *sparx5 = args;
329 	u32 db = 0, err = 0;
330 
331 	db = spx5_rd(sparx5, FDMA_INTR_DB);
332 	err = spx5_rd(sparx5, FDMA_INTR_ERR);
333 	/* Clear interrupt */
334 	if (db) {
335 		spx5_wr(0, sparx5, FDMA_INTR_DB_ENA);
336 		spx5_wr(db, sparx5, FDMA_INTR_DB);
337 		napi_schedule(&sparx5->rx.napi);
338 	}
339 	if (err) {
340 		u32 err_type = spx5_rd(sparx5, FDMA_ERRORS);
341 
342 		dev_err_ratelimited(sparx5->dev,
343 				    "ERR: int: %#x, type: %#x\n",
344 				    err, err_type);
345 		spx5_wr(err, sparx5, FDMA_INTR_ERR);
346 		spx5_wr(err_type, sparx5, FDMA_ERRORS);
347 	}
348 	return IRQ_HANDLED;
349 }
350 
351 static void sparx5_fdma_injection_mode(struct sparx5 *sparx5)
352 {
353 	const int byte_swap = 1;
354 	int portno;
355 	int urgency;
356 
357 	/* Change mode to fdma extraction and injection */
358 	spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) |
359 		QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
360 		QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
361 		sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
362 	spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) |
363 		QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
364 		sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
365 
366 	/* CPU ports capture setup */
367 	for (portno = sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_0);
368 	     portno <= sparx5_get_internal_port(sparx5, SPX5_PORT_CPU_1);
369 	     portno++) {
370 		/* ASM CPU port: No preamble, IFH, enable padding */
371 		spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
372 			ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
373 			ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
374 			sparx5, ASM_PORT_CFG(portno));
375 
376 		/* Reset WM cnt to unclog queued frames */
377 		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
378 			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
379 			 sparx5,
380 			 DSM_DEV_TX_STOP_WM_CFG(portno));
381 
382 		/* Set Disassembler Stop Watermark level */
383 		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100),
384 			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
385 			 sparx5,
386 			 DSM_DEV_TX_STOP_WM_CFG(portno));
387 
388 		/* Enable port in queue system */
389 		urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500);
390 		spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
391 			 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency),
392 			 QFWD_SWITCH_PORT_MODE_PORT_ENA |
393 			 QFWD_SWITCH_PORT_MODE_FWD_URGENCY,
394 			 sparx5,
395 			 QFWD_SWITCH_PORT_MODE(portno));
396 
397 		/* Disable Disassembler buffer underrun watchdog
398 		 * to avoid truncated packets in XTR
399 		 */
400 		spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1),
401 			 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
402 			 sparx5,
403 			 DSM_BUF_CFG(portno));
404 
405 		/* Disabling frame aging */
406 		spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1),
407 			 HSCH_PORT_MODE_AGE_DIS,
408 			 sparx5,
409 			 HSCH_PORT_MODE(portno));
410 	}
411 }
412 
413 int sparx5_fdma_start(struct sparx5 *sparx5)
414 {
415 	int err;
416 
417 	/* Reset FDMA state */
418 	spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL);
419 	spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL);
420 
421 	/* Force ACP caching but disable read/write allocation */
422 	spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) |
423 		 CPU_PROC_CTRL_ACP_AWCACHE_SET(0) |
424 		 CPU_PROC_CTRL_ACP_ARCACHE_SET(0),
425 		 CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA |
426 		 CPU_PROC_CTRL_ACP_AWCACHE |
427 		 CPU_PROC_CTRL_ACP_ARCACHE,
428 		 sparx5, CPU_PROC_CTRL);
429 
430 	sparx5_fdma_injection_mode(sparx5);
431 	sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL);
432 	sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL);
433 	err = sparx5_fdma_rx_alloc(sparx5);
434 	if (err) {
435 		dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err);
436 		return err;
437 	}
438 	err = sparx5_fdma_tx_alloc(sparx5);
439 	if (err) {
440 		dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err);
441 		return err;
442 	}
443 	return err;
444 }
445 
446 static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5)
447 {
448 	return spx5_rd(sparx5, FDMA_PORT_CTRL(0));
449 }
450 
451 int sparx5_fdma_stop(struct sparx5 *sparx5)
452 {
453 	u32 val;
454 
455 	napi_disable(&sparx5->rx.napi);
456 	/* Stop the fdma and channel interrupts */
457 	sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx);
458 	sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx);
459 	/* Wait for the RX channel to stop */
460 	read_poll_timeout(sparx5_fdma_port_ctrl, val,
461 			  FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0,
462 			  500, 10000, 0, sparx5);
463 	fdma_free_phys(&sparx5->rx.fdma);
464 	fdma_free_phys(&sparx5->tx.fdma);
465 	return 0;
466 }
467