1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
3 *
4 * Copyright (C) 2022 Renesas Electronics Corporation
5 */
6
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/etherdevice.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/sys_soc.h>
26
27 #include "rswitch.h"
28
rswitch_reg_wait(void __iomem * addr,u32 offs,u32 mask,u32 expected)29 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
30 {
31 u32 val;
32
33 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
34 1, RSWITCH_TIMEOUT_US);
35 }
36
rswitch_modify(void __iomem * addr,enum rswitch_reg reg,u32 clear,u32 set)37 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
38 {
39 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
40 }
41
42 /* Common Agent block (COMA) */
rswitch_reset(struct rswitch_private * priv)43 static void rswitch_reset(struct rswitch_private *priv)
44 {
45 iowrite32(RRC_RR, priv->addr + RRC);
46 iowrite32(RRC_RR_CLR, priv->addr + RRC);
47 }
48
rswitch_clock_enable(struct rswitch_private * priv)49 static void rswitch_clock_enable(struct rswitch_private *priv)
50 {
51 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
52 }
53
rswitch_clock_disable(struct rswitch_private * priv)54 static void rswitch_clock_disable(struct rswitch_private *priv)
55 {
56 iowrite32(RCDC_RCD, priv->addr + RCDC);
57 }
58
rswitch_agent_clock_is_enabled(void __iomem * coma_addr,unsigned int port)59 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr,
60 unsigned int port)
61 {
62 u32 val = ioread32(coma_addr + RCEC);
63
64 if (val & RCEC_RCE)
65 return (val & BIT(port)) ? true : false;
66 else
67 return false;
68 }
69
rswitch_agent_clock_ctrl(void __iomem * coma_addr,unsigned int port,int enable)70 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port,
71 int enable)
72 {
73 u32 val;
74
75 if (enable) {
76 val = ioread32(coma_addr + RCEC);
77 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
78 } else {
79 val = ioread32(coma_addr + RCDC);
80 iowrite32(val | BIT(port), coma_addr + RCDC);
81 }
82 }
83
rswitch_bpool_config(struct rswitch_private * priv)84 static int rswitch_bpool_config(struct rswitch_private *priv)
85 {
86 u32 val;
87
88 val = ioread32(priv->addr + CABPIRM);
89 if (val & CABPIRM_BPR)
90 return 0;
91
92 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
93
94 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
95 }
96
rswitch_coma_init(struct rswitch_private * priv)97 static void rswitch_coma_init(struct rswitch_private *priv)
98 {
99 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
100 }
101
102 /* R-Switch-2 block (TOP) */
rswitch_top_init(struct rswitch_private * priv)103 static void rswitch_top_init(struct rswitch_private *priv)
104 {
105 unsigned int i;
106
107 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
108 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
109 }
110
111 /* Forwarding engine block (MFWD) */
rswitch_fwd_init(struct rswitch_private * priv)112 static void rswitch_fwd_init(struct rswitch_private *priv)
113 {
114 u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
115 unsigned int i;
116
117 /* Start with empty configuration */
118 for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
119 /* Disable all port features */
120 iowrite32(0, priv->addr + FWPC0(i));
121 /* Disallow L3 forwarding and direct descriptor forwarding */
122 iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
123 priv->addr + FWPC1(i));
124 /* Disallow L2 forwarding */
125 iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
126 priv->addr + FWPC2(i));
127 /* Disallow port based forwarding */
128 iowrite32(0, priv->addr + FWPBFC(i));
129 }
130
131 /* For enabled ETHA ports, setup port based forwarding */
132 rswitch_for_each_enabled_port(priv, i) {
133 /* Port based forwarding from port i to GWCA port */
134 rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
135 FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
136 /* Within GWCA port, forward to Rx queue for port i */
137 iowrite32(priv->rdev[i]->rx_queue->index,
138 priv->addr + FWPBFCSDC(GWCA_INDEX, i));
139 }
140
141 /* For GWCA port, allow direct descriptor forwarding */
142 rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
143 }
144
145 /* Gateway CPU agent block (GWCA) */
rswitch_gwca_change_mode(struct rswitch_private * priv,enum rswitch_gwca_mode mode)146 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
147 enum rswitch_gwca_mode mode)
148 {
149 int ret;
150
151 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
152 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
153
154 iowrite32(mode, priv->addr + GWMC);
155
156 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
157
158 if (mode == GWMC_OPC_DISABLE)
159 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
160
161 return ret;
162 }
163
rswitch_gwca_mcast_table_reset(struct rswitch_private * priv)164 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
165 {
166 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
167
168 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
169 }
170
rswitch_gwca_axi_ram_reset(struct rswitch_private * priv)171 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
172 {
173 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
174
175 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
176 }
177
rswitch_is_any_data_irq(struct rswitch_private * priv,u32 * dis,bool tx)178 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
179 {
180 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
181 unsigned int i;
182
183 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
184 if (dis[i] & mask[i])
185 return true;
186 }
187
188 return false;
189 }
190
rswitch_get_data_irq_status(struct rswitch_private * priv,u32 * dis)191 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
192 {
193 unsigned int i;
194
195 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
196 dis[i] = ioread32(priv->addr + GWDIS(i));
197 dis[i] &= ioread32(priv->addr + GWDIE(i));
198 }
199 }
200
rswitch_enadis_data_irq(struct rswitch_private * priv,unsigned int index,bool enable)201 static void rswitch_enadis_data_irq(struct rswitch_private *priv,
202 unsigned int index, bool enable)
203 {
204 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
205
206 iowrite32(BIT(index % 32), priv->addr + offs);
207 }
208
rswitch_ack_data_irq(struct rswitch_private * priv,unsigned int index)209 static void rswitch_ack_data_irq(struct rswitch_private *priv,
210 unsigned int index)
211 {
212 u32 offs = GWDIS(index / 32);
213
214 iowrite32(BIT(index % 32), priv->addr + offs);
215 }
216
rswitch_next_queue_index(struct rswitch_gwca_queue * gq,bool cur,unsigned int num)217 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq,
218 bool cur, unsigned int num)
219 {
220 unsigned int index = cur ? gq->cur : gq->dirty;
221
222 if (index + num >= gq->ring_size)
223 index = (index + num) % gq->ring_size;
224 else
225 index += num;
226
227 return index;
228 }
229
rswitch_get_num_cur_queues(struct rswitch_gwca_queue * gq)230 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
231 {
232 if (gq->cur >= gq->dirty)
233 return gq->cur - gq->dirty;
234 else
235 return gq->ring_size - gq->dirty + gq->cur;
236 }
237
rswitch_is_queue_rxed(struct rswitch_gwca_queue * gq)238 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
239 {
240 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
241
242 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
243 return true;
244
245 return false;
246 }
247
rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue * gq,unsigned int start_index,unsigned int num)248 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
249 unsigned int start_index,
250 unsigned int num)
251 {
252 unsigned int i, index;
253
254 for (i = 0; i < num; i++) {
255 index = (i + start_index) % gq->ring_size;
256 if (gq->rx_bufs[index])
257 continue;
258 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
259 if (!gq->rx_bufs[index])
260 goto err;
261 }
262
263 return 0;
264
265 err:
266 for (; i-- > 0; ) {
267 index = (i + start_index) % gq->ring_size;
268 skb_free_frag(gq->rx_bufs[index]);
269 gq->rx_bufs[index] = NULL;
270 }
271
272 return -ENOMEM;
273 }
274
rswitch_gwca_queue_free(struct net_device * ndev,struct rswitch_gwca_queue * gq)275 static void rswitch_gwca_queue_free(struct net_device *ndev,
276 struct rswitch_gwca_queue *gq)
277 {
278 unsigned int i;
279
280 if (!gq->dir_tx) {
281 dma_free_coherent(ndev->dev.parent,
282 sizeof(struct rswitch_ext_ts_desc) *
283 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
284 gq->rx_ring = NULL;
285
286 for (i = 0; i < gq->ring_size; i++)
287 skb_free_frag(gq->rx_bufs[i]);
288 kfree(gq->rx_bufs);
289 gq->rx_bufs = NULL;
290 } else {
291 dma_free_coherent(ndev->dev.parent,
292 sizeof(struct rswitch_ext_desc) *
293 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
294 gq->tx_ring = NULL;
295 kfree(gq->skbs);
296 gq->skbs = NULL;
297 kfree(gq->unmap_addrs);
298 gq->unmap_addrs = NULL;
299 }
300 }
301
rswitch_gwca_ts_queue_free(struct rswitch_private * priv)302 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
303 {
304 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
305
306 dma_free_coherent(&priv->pdev->dev,
307 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
308 gq->ts_ring, gq->ring_dma);
309 gq->ts_ring = NULL;
310 }
311
rswitch_gwca_queue_alloc(struct net_device * ndev,struct rswitch_private * priv,struct rswitch_gwca_queue * gq,bool dir_tx,unsigned int ring_size)312 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
313 struct rswitch_private *priv,
314 struct rswitch_gwca_queue *gq,
315 bool dir_tx, unsigned int ring_size)
316 {
317 unsigned int i, bit;
318
319 gq->dir_tx = dir_tx;
320 gq->ring_size = ring_size;
321 gq->ndev = ndev;
322
323 if (!dir_tx) {
324 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
325 if (!gq->rx_bufs)
326 return -ENOMEM;
327 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
328 goto out;
329
330 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
331 sizeof(struct rswitch_ext_ts_desc) *
332 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
333 } else {
334 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
335 if (!gq->skbs)
336 return -ENOMEM;
337 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
338 if (!gq->unmap_addrs)
339 goto out;
340 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
341 sizeof(struct rswitch_ext_desc) *
342 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
343 }
344
345 if (!gq->rx_ring && !gq->tx_ring)
346 goto out;
347
348 i = gq->index / 32;
349 bit = BIT(gq->index % 32);
350 if (dir_tx)
351 priv->gwca.tx_irq_bits[i] |= bit;
352 else
353 priv->gwca.rx_irq_bits[i] |= bit;
354
355 return 0;
356
357 out:
358 rswitch_gwca_queue_free(ndev, gq);
359
360 return -ENOMEM;
361 }
362
rswitch_desc_set_dptr(struct rswitch_desc * desc,dma_addr_t addr)363 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
364 {
365 desc->dptrl = cpu_to_le32(lower_32_bits(addr));
366 desc->dptrh = upper_32_bits(addr) & 0xff;
367 }
368
rswitch_desc_get_dptr(const struct rswitch_desc * desc)369 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
370 {
371 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
372 }
373
rswitch_gwca_queue_format(struct net_device * ndev,struct rswitch_private * priv,struct rswitch_gwca_queue * gq)374 static int rswitch_gwca_queue_format(struct net_device *ndev,
375 struct rswitch_private *priv,
376 struct rswitch_gwca_queue *gq)
377 {
378 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
379 struct rswitch_ext_desc *desc;
380 struct rswitch_desc *linkfix;
381 dma_addr_t dma_addr;
382 unsigned int i;
383
384 memset(gq->tx_ring, 0, ring_size);
385 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
386 if (!gq->dir_tx) {
387 dma_addr = dma_map_single(ndev->dev.parent,
388 gq->rx_bufs[i] + RSWITCH_HEADROOM,
389 RSWITCH_MAP_BUF_SIZE,
390 DMA_FROM_DEVICE);
391 if (dma_mapping_error(ndev->dev.parent, dma_addr))
392 goto err;
393
394 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
395 rswitch_desc_set_dptr(&desc->desc, dma_addr);
396 desc->desc.die_dt = DT_FEMPTY | DIE;
397 } else {
398 desc->desc.die_dt = DT_EEMPTY | DIE;
399 }
400 }
401 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
402 desc->desc.die_dt = DT_LINKFIX;
403
404 linkfix = &priv->gwca.linkfix_table[gq->index];
405 linkfix->die_dt = DT_LINKFIX;
406 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
407
408 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
409 priv->addr + GWDCC_OFFS(gq->index));
410
411 return 0;
412
413 err:
414 if (!gq->dir_tx) {
415 for (desc = gq->tx_ring; i-- > 0; desc++) {
416 dma_addr = rswitch_desc_get_dptr(&desc->desc);
417 dma_unmap_single(ndev->dev.parent, dma_addr,
418 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
419 }
420 }
421
422 return -ENOMEM;
423 }
424
rswitch_gwca_ts_queue_fill(struct rswitch_private * priv,unsigned int start_index,unsigned int num)425 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
426 unsigned int start_index,
427 unsigned int num)
428 {
429 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
430 struct rswitch_ts_desc *desc;
431 unsigned int i, index;
432
433 for (i = 0; i < num; i++) {
434 index = (i + start_index) % gq->ring_size;
435 desc = &gq->ts_ring[index];
436 desc->desc.die_dt = DT_FEMPTY_ND | DIE;
437 }
438 }
439
rswitch_gwca_queue_ext_ts_fill(struct net_device * ndev,struct rswitch_gwca_queue * gq,unsigned int start_index,unsigned int num)440 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
441 struct rswitch_gwca_queue *gq,
442 unsigned int start_index,
443 unsigned int num)
444 {
445 struct rswitch_device *rdev = netdev_priv(ndev);
446 struct rswitch_ext_ts_desc *desc;
447 unsigned int i, index;
448 dma_addr_t dma_addr;
449
450 for (i = 0; i < num; i++) {
451 index = (i + start_index) % gq->ring_size;
452 desc = &gq->rx_ring[index];
453 if (!gq->dir_tx) {
454 dma_addr = dma_map_single(ndev->dev.parent,
455 gq->rx_bufs[index] + RSWITCH_HEADROOM,
456 RSWITCH_MAP_BUF_SIZE,
457 DMA_FROM_DEVICE);
458 if (dma_mapping_error(ndev->dev.parent, dma_addr))
459 goto err;
460
461 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
462 rswitch_desc_set_dptr(&desc->desc, dma_addr);
463 dma_wmb();
464 desc->desc.die_dt = DT_FEMPTY | DIE;
465 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
466 } else {
467 desc->desc.die_dt = DT_EEMPTY | DIE;
468 }
469 }
470
471 return 0;
472
473 err:
474 if (!gq->dir_tx) {
475 for (; i-- > 0; ) {
476 index = (i + start_index) % gq->ring_size;
477 desc = &gq->rx_ring[index];
478 dma_addr = rswitch_desc_get_dptr(&desc->desc);
479 dma_unmap_single(ndev->dev.parent, dma_addr,
480 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
481 }
482 }
483
484 return -ENOMEM;
485 }
486
rswitch_gwca_queue_ext_ts_format(struct net_device * ndev,struct rswitch_private * priv,struct rswitch_gwca_queue * gq)487 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
488 struct rswitch_private *priv,
489 struct rswitch_gwca_queue *gq)
490 {
491 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
492 struct rswitch_ext_ts_desc *desc;
493 struct rswitch_desc *linkfix;
494 int err;
495
496 memset(gq->rx_ring, 0, ring_size);
497 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
498 if (err < 0)
499 return err;
500
501 desc = &gq->rx_ring[gq->ring_size]; /* Last */
502 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
503 desc->desc.die_dt = DT_LINKFIX;
504
505 linkfix = &priv->gwca.linkfix_table[gq->index];
506 linkfix->die_dt = DT_LINKFIX;
507 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
508
509 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
510 GWDCC_ETS | GWDCC_EDE,
511 priv->addr + GWDCC_OFFS(gq->index));
512
513 return 0;
514 }
515
rswitch_gwca_linkfix_alloc(struct rswitch_private * priv)516 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
517 {
518 unsigned int i, num_queues = priv->gwca.num_queues;
519 struct rswitch_gwca *gwca = &priv->gwca;
520 struct device *dev = &priv->pdev->dev;
521
522 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
523 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
524 &gwca->linkfix_table_dma, GFP_KERNEL);
525 if (!gwca->linkfix_table)
526 return -ENOMEM;
527 for (i = 0; i < num_queues; i++)
528 gwca->linkfix_table[i].die_dt = DT_EOS;
529
530 return 0;
531 }
532
rswitch_gwca_linkfix_free(struct rswitch_private * priv)533 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
534 {
535 struct rswitch_gwca *gwca = &priv->gwca;
536
537 if (gwca->linkfix_table)
538 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
539 gwca->linkfix_table, gwca->linkfix_table_dma);
540 gwca->linkfix_table = NULL;
541 }
542
rswitch_gwca_ts_queue_alloc(struct rswitch_private * priv)543 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
544 {
545 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
546 struct rswitch_ts_desc *desc;
547
548 gq->ring_size = TS_RING_SIZE;
549 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
550 sizeof(struct rswitch_ts_desc) *
551 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
552
553 if (!gq->ts_ring)
554 return -ENOMEM;
555
556 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
557 desc = &gq->ts_ring[gq->ring_size];
558 desc->desc.die_dt = DT_LINKFIX;
559 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
560
561 return 0;
562 }
563
rswitch_gwca_get(struct rswitch_private * priv)564 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
565 {
566 struct rswitch_gwca_queue *gq;
567 unsigned int index;
568
569 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
570 if (index >= priv->gwca.num_queues)
571 return NULL;
572 set_bit(index, priv->gwca.used);
573 gq = &priv->gwca.queues[index];
574 memset(gq, 0, sizeof(*gq));
575 gq->index = index;
576
577 return gq;
578 }
579
rswitch_gwca_put(struct rswitch_private * priv,struct rswitch_gwca_queue * gq)580 static void rswitch_gwca_put(struct rswitch_private *priv,
581 struct rswitch_gwca_queue *gq)
582 {
583 clear_bit(gq->index, priv->gwca.used);
584 }
585
rswitch_txdmac_alloc(struct net_device * ndev)586 static int rswitch_txdmac_alloc(struct net_device *ndev)
587 {
588 struct rswitch_device *rdev = netdev_priv(ndev);
589 struct rswitch_private *priv = rdev->priv;
590 int err;
591
592 rdev->tx_queue = rswitch_gwca_get(priv);
593 if (!rdev->tx_queue)
594 return -EBUSY;
595
596 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
597 if (err < 0) {
598 rswitch_gwca_put(priv, rdev->tx_queue);
599 return err;
600 }
601
602 return 0;
603 }
604
rswitch_txdmac_free(struct net_device * ndev)605 static void rswitch_txdmac_free(struct net_device *ndev)
606 {
607 struct rswitch_device *rdev = netdev_priv(ndev);
608
609 rswitch_gwca_queue_free(ndev, rdev->tx_queue);
610 rswitch_gwca_put(rdev->priv, rdev->tx_queue);
611 }
612
rswitch_txdmac_init(struct rswitch_private * priv,unsigned int index)613 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index)
614 {
615 struct rswitch_device *rdev = priv->rdev[index];
616
617 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
618 }
619
rswitch_rxdmac_alloc(struct net_device * ndev)620 static int rswitch_rxdmac_alloc(struct net_device *ndev)
621 {
622 struct rswitch_device *rdev = netdev_priv(ndev);
623 struct rswitch_private *priv = rdev->priv;
624 int err;
625
626 rdev->rx_queue = rswitch_gwca_get(priv);
627 if (!rdev->rx_queue)
628 return -EBUSY;
629
630 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
631 if (err < 0) {
632 rswitch_gwca_put(priv, rdev->rx_queue);
633 return err;
634 }
635
636 return 0;
637 }
638
rswitch_rxdmac_free(struct net_device * ndev)639 static void rswitch_rxdmac_free(struct net_device *ndev)
640 {
641 struct rswitch_device *rdev = netdev_priv(ndev);
642
643 rswitch_gwca_queue_free(ndev, rdev->rx_queue);
644 rswitch_gwca_put(rdev->priv, rdev->rx_queue);
645 }
646
rswitch_rxdmac_init(struct rswitch_private * priv,unsigned int index)647 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
648 {
649 struct rswitch_device *rdev = priv->rdev[index];
650 struct net_device *ndev = rdev->ndev;
651
652 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
653 }
654
rswitch_gwca_hw_init(struct rswitch_private * priv)655 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
656 {
657 unsigned int i;
658 int err;
659
660 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
661 if (err < 0)
662 return err;
663 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
664 if (err < 0)
665 return err;
666
667 err = rswitch_gwca_mcast_table_reset(priv);
668 if (err < 0)
669 return err;
670 err = rswitch_gwca_axi_ram_reset(priv);
671 if (err < 0)
672 return err;
673
674 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
675 iowrite32(0, priv->addr + GWTTFC);
676 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
677 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
678 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
679 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
680 iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f),
681 priv->addr + GWMDNC);
682 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
683
684 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
685
686 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
687 err = rswitch_rxdmac_init(priv, i);
688 if (err < 0)
689 return err;
690 err = rswitch_txdmac_init(priv, i);
691 if (err < 0)
692 return err;
693 }
694
695 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
696 if (err < 0)
697 return err;
698 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
699 }
700
rswitch_gwca_hw_deinit(struct rswitch_private * priv)701 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
702 {
703 int err;
704
705 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
706 if (err < 0)
707 return err;
708 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
709 if (err < 0)
710 return err;
711
712 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
713 }
714
rswitch_gwca_halt(struct rswitch_private * priv)715 static int rswitch_gwca_halt(struct rswitch_private *priv)
716 {
717 int err;
718
719 priv->gwca_halt = true;
720 err = rswitch_gwca_hw_deinit(priv);
721 dev_err(&priv->pdev->dev, "halted (%d)\n", err);
722
723 return err;
724 }
725
rswitch_rx_handle_desc(struct net_device * ndev,struct rswitch_gwca_queue * gq,struct rswitch_ext_ts_desc * desc)726 static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev,
727 struct rswitch_gwca_queue *gq,
728 struct rswitch_ext_ts_desc *desc)
729 {
730 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc);
731 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
732 u8 die_dt = desc->desc.die_dt & DT_MASK;
733 struct sk_buff *skb = NULL;
734
735 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE,
736 DMA_FROM_DEVICE);
737
738 /* The RX descriptor order will be one of the following:
739 * - FSINGLE
740 * - FSTART -> FEND
741 * - FSTART -> FMID -> FEND
742 */
743
744 /* Check whether the descriptor is unexpected order */
745 switch (die_dt) {
746 case DT_FSTART:
747 case DT_FSINGLE:
748 if (gq->skb_fstart) {
749 dev_kfree_skb_any(gq->skb_fstart);
750 gq->skb_fstart = NULL;
751 ndev->stats.rx_dropped++;
752 }
753 break;
754 case DT_FMID:
755 case DT_FEND:
756 if (!gq->skb_fstart) {
757 ndev->stats.rx_dropped++;
758 return NULL;
759 }
760 break;
761 default:
762 break;
763 }
764
765 /* Handle the descriptor */
766 switch (die_dt) {
767 case DT_FSTART:
768 case DT_FSINGLE:
769 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
770 if (skb) {
771 skb_reserve(skb, RSWITCH_HEADROOM);
772 skb_put(skb, pkt_len);
773 gq->pkt_len = pkt_len;
774 if (die_dt == DT_FSTART) {
775 gq->skb_fstart = skb;
776 skb = NULL;
777 }
778 }
779 break;
780 case DT_FMID:
781 case DT_FEND:
782 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags,
783 virt_to_page(gq->rx_bufs[gq->cur]),
784 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM,
785 pkt_len, RSWITCH_BUF_SIZE);
786 if (die_dt == DT_FEND) {
787 skb = gq->skb_fstart;
788 gq->skb_fstart = NULL;
789 }
790 gq->pkt_len += pkt_len;
791 break;
792 default:
793 netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt);
794 break;
795 }
796
797 return skb;
798 }
799
rswitch_rx(struct net_device * ndev,int * quota)800 static bool rswitch_rx(struct net_device *ndev, int *quota)
801 {
802 struct rswitch_device *rdev = netdev_priv(ndev);
803 struct rswitch_gwca_queue *gq = rdev->rx_queue;
804 struct rswitch_ext_ts_desc *desc;
805 int limit, boguscnt, ret;
806 struct sk_buff *skb;
807 unsigned int num;
808 u32 get_ts;
809
810 if (*quota <= 0)
811 return true;
812
813 boguscnt = min_t(int, gq->ring_size, *quota);
814 limit = boguscnt;
815
816 desc = &gq->rx_ring[gq->cur];
817 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
818 dma_rmb();
819 skb = rswitch_rx_handle_desc(ndev, gq, desc);
820 if (!skb)
821 goto out;
822
823 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
824 if (get_ts) {
825 struct skb_shared_hwtstamps *shhwtstamps;
826 struct timespec64 ts;
827
828 shhwtstamps = skb_hwtstamps(skb);
829 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
830 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
831 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
832 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
833 }
834 skb->protocol = eth_type_trans(skb, ndev);
835 napi_gro_receive(&rdev->napi, skb);
836 rdev->ndev->stats.rx_packets++;
837 rdev->ndev->stats.rx_bytes += gq->pkt_len;
838
839 out:
840 gq->rx_bufs[gq->cur] = NULL;
841 gq->cur = rswitch_next_queue_index(gq, true, 1);
842 desc = &gq->rx_ring[gq->cur];
843
844 if (--boguscnt <= 0)
845 break;
846 }
847
848 num = rswitch_get_num_cur_queues(gq);
849 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
850 if (ret < 0)
851 goto err;
852 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
853 if (ret < 0)
854 goto err;
855 gq->dirty = rswitch_next_queue_index(gq, false, num);
856
857 *quota -= limit - boguscnt;
858
859 return boguscnt <= 0;
860
861 err:
862 rswitch_gwca_halt(rdev->priv);
863
864 return 0;
865 }
866
rswitch_tx_free(struct net_device * ndev)867 static void rswitch_tx_free(struct net_device *ndev)
868 {
869 struct rswitch_device *rdev = netdev_priv(ndev);
870 struct rswitch_gwca_queue *gq = rdev->tx_queue;
871 struct rswitch_ext_desc *desc;
872 struct sk_buff *skb;
873
874 desc = &gq->tx_ring[gq->dirty];
875 while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
876 dma_rmb();
877
878 skb = gq->skbs[gq->dirty];
879 if (skb) {
880 rdev->ndev->stats.tx_packets++;
881 rdev->ndev->stats.tx_bytes += skb->len;
882 dma_unmap_single(ndev->dev.parent,
883 gq->unmap_addrs[gq->dirty],
884 skb->len, DMA_TO_DEVICE);
885 dev_kfree_skb_any(gq->skbs[gq->dirty]);
886 gq->skbs[gq->dirty] = NULL;
887 }
888
889 desc->desc.die_dt = DT_EEMPTY;
890 gq->dirty = rswitch_next_queue_index(gq, false, 1);
891 desc = &gq->tx_ring[gq->dirty];
892 }
893 }
894
rswitch_poll(struct napi_struct * napi,int budget)895 static int rswitch_poll(struct napi_struct *napi, int budget)
896 {
897 struct net_device *ndev = napi->dev;
898 struct rswitch_private *priv;
899 struct rswitch_device *rdev;
900 unsigned long flags;
901 int quota = budget;
902
903 rdev = netdev_priv(ndev);
904 priv = rdev->priv;
905
906 retry:
907 rswitch_tx_free(ndev);
908
909 if (rswitch_rx(ndev, "a))
910 goto out;
911 else if (rdev->priv->gwca_halt)
912 goto err;
913 else if (rswitch_is_queue_rxed(rdev->rx_queue))
914 goto retry;
915
916 netif_wake_subqueue(ndev, 0);
917
918 if (napi_complete_done(napi, budget - quota)) {
919 spin_lock_irqsave(&priv->lock, flags);
920 if (test_bit(rdev->port, priv->opened_ports)) {
921 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
922 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
923 }
924 spin_unlock_irqrestore(&priv->lock, flags);
925 }
926
927 out:
928 return budget - quota;
929
930 err:
931 napi_complete(napi);
932
933 return 0;
934 }
935
rswitch_queue_interrupt(struct net_device * ndev)936 static void rswitch_queue_interrupt(struct net_device *ndev)
937 {
938 struct rswitch_device *rdev = netdev_priv(ndev);
939
940 if (napi_schedule_prep(&rdev->napi)) {
941 spin_lock(&rdev->priv->lock);
942 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
943 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
944 spin_unlock(&rdev->priv->lock);
945 __napi_schedule(&rdev->napi);
946 }
947 }
948
rswitch_data_irq(struct rswitch_private * priv,u32 * dis)949 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
950 {
951 struct rswitch_gwca_queue *gq;
952 unsigned int i, index, bit;
953
954 for (i = 0; i < priv->gwca.num_queues; i++) {
955 gq = &priv->gwca.queues[i];
956 index = gq->index / 32;
957 bit = BIT(gq->index % 32);
958 if (!(dis[index] & bit))
959 continue;
960
961 rswitch_ack_data_irq(priv, gq->index);
962 rswitch_queue_interrupt(gq->ndev);
963 }
964
965 return IRQ_HANDLED;
966 }
967
rswitch_gwca_irq(int irq,void * dev_id)968 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
969 {
970 struct rswitch_private *priv = dev_id;
971 u32 dis[RSWITCH_NUM_IRQ_REGS];
972 irqreturn_t ret = IRQ_NONE;
973
974 rswitch_get_data_irq_status(priv, dis);
975
976 if (rswitch_is_any_data_irq(priv, dis, true) ||
977 rswitch_is_any_data_irq(priv, dis, false))
978 ret = rswitch_data_irq(priv, dis);
979
980 return ret;
981 }
982
rswitch_gwca_request_irqs(struct rswitch_private * priv)983 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
984 {
985 char *resource_name, *irq_name;
986 int i, ret, irq;
987
988 for (i = 0; i < GWCA_NUM_IRQS; i++) {
989 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
990 if (!resource_name)
991 return -ENOMEM;
992
993 irq = platform_get_irq_byname(priv->pdev, resource_name);
994 kfree(resource_name);
995 if (irq < 0)
996 return irq;
997
998 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
999 GWCA_IRQ_NAME, i);
1000 if (!irq_name)
1001 return -ENOMEM;
1002
1003 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
1004 0, irq_name, priv);
1005 if (ret < 0)
1006 return ret;
1007 }
1008
1009 return 0;
1010 }
1011
rswitch_ts(struct rswitch_private * priv)1012 static void rswitch_ts(struct rswitch_private *priv)
1013 {
1014 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
1015 struct skb_shared_hwtstamps shhwtstamps;
1016 struct rswitch_ts_desc *desc;
1017 struct rswitch_device *rdev;
1018 struct sk_buff *ts_skb;
1019 struct timespec64 ts;
1020 unsigned int num;
1021 u32 tag, port;
1022
1023 desc = &gq->ts_ring[gq->cur];
1024 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
1025 dma_rmb();
1026
1027 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
1028 if (unlikely(port >= RSWITCH_NUM_PORTS))
1029 goto next;
1030 rdev = priv->rdev[port];
1031
1032 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
1033 if (unlikely(tag >= TS_TAGS_PER_PORT))
1034 goto next;
1035 ts_skb = xchg(&rdev->ts_skb[tag], NULL);
1036 smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
1037 clear_bit(tag, rdev->ts_skb_used);
1038
1039 if (unlikely(!ts_skb))
1040 goto next;
1041
1042 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1043 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
1044 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
1045 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
1046 skb_tstamp_tx(ts_skb, &shhwtstamps);
1047 dev_consume_skb_irq(ts_skb);
1048
1049 next:
1050 gq->cur = rswitch_next_queue_index(gq, true, 1);
1051 desc = &gq->ts_ring[gq->cur];
1052 }
1053
1054 num = rswitch_get_num_cur_queues(gq);
1055 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
1056 gq->dirty = rswitch_next_queue_index(gq, false, num);
1057 }
1058
rswitch_gwca_ts_irq(int irq,void * dev_id)1059 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
1060 {
1061 struct rswitch_private *priv = dev_id;
1062
1063 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
1064 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
1065 rswitch_ts(priv);
1066
1067 return IRQ_HANDLED;
1068 }
1069
1070 return IRQ_NONE;
1071 }
1072
rswitch_gwca_ts_request_irqs(struct rswitch_private * priv)1073 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
1074 {
1075 int irq;
1076
1077 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
1078 if (irq < 0)
1079 return irq;
1080
1081 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
1082 0, GWCA_TS_IRQ_NAME, priv);
1083 }
1084
1085 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
rswitch_etha_change_mode(struct rswitch_etha * etha,enum rswitch_etha_mode mode)1086 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
1087 enum rswitch_etha_mode mode)
1088 {
1089 int ret;
1090
1091 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
1092 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
1093
1094 iowrite32(mode, etha->addr + EAMC);
1095
1096 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
1097
1098 if (mode == EAMC_OPC_DISABLE)
1099 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
1100
1101 return ret;
1102 }
1103
rswitch_etha_read_mac_address(struct rswitch_etha * etha)1104 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
1105 {
1106 u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1107 u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1108 u8 *mac = ða->mac_addr[0];
1109
1110 mac[0] = (mrmac0 >> 8) & 0xFF;
1111 mac[1] = (mrmac0 >> 0) & 0xFF;
1112 mac[2] = (mrmac1 >> 24) & 0xFF;
1113 mac[3] = (mrmac1 >> 16) & 0xFF;
1114 mac[4] = (mrmac1 >> 8) & 0xFF;
1115 mac[5] = (mrmac1 >> 0) & 0xFF;
1116 }
1117
rswitch_etha_write_mac_address(struct rswitch_etha * etha,const u8 * mac)1118 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1119 {
1120 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1121 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1122 etha->addr + MRMAC1);
1123 }
1124
rswitch_etha_wait_link_verification(struct rswitch_etha * etha)1125 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
1126 {
1127 iowrite32(MLVC_PLV, etha->addr + MLVC);
1128
1129 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1130 }
1131
rswitch_rmac_setting(struct rswitch_etha * etha,const u8 * mac)1132 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1133 {
1134 u32 pis, lsc;
1135
1136 rswitch_etha_write_mac_address(etha, mac);
1137
1138 switch (etha->phy_interface) {
1139 case PHY_INTERFACE_MODE_SGMII:
1140 pis = MPIC_PIS_GMII;
1141 break;
1142 case PHY_INTERFACE_MODE_USXGMII:
1143 case PHY_INTERFACE_MODE_5GBASER:
1144 pis = MPIC_PIS_XGMII;
1145 break;
1146 default:
1147 pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
1148 break;
1149 }
1150
1151 switch (etha->speed) {
1152 case 100:
1153 lsc = MPIC_LSC_100M;
1154 break;
1155 case 1000:
1156 lsc = MPIC_LSC_1G;
1157 break;
1158 case 2500:
1159 lsc = MPIC_LSC_2_5G;
1160 break;
1161 default:
1162 lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
1163 break;
1164 }
1165
1166 rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
1167 FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
1168 }
1169
rswitch_etha_enable_mii(struct rswitch_etha * etha)1170 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1171 {
1172 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
1173 FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
1174 FIELD_PREP(MPIC_PSMHT, 0x06));
1175 }
1176
rswitch_etha_hw_init(struct rswitch_etha * etha,const u8 * mac)1177 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1178 {
1179 int err;
1180
1181 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1182 if (err < 0)
1183 return err;
1184 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1185 if (err < 0)
1186 return err;
1187
1188 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1189 rswitch_rmac_setting(etha, mac);
1190 rswitch_etha_enable_mii(etha);
1191
1192 err = rswitch_etha_wait_link_verification(etha);
1193 if (err < 0)
1194 return err;
1195
1196 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1197 if (err < 0)
1198 return err;
1199
1200 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1201 }
1202
rswitch_etha_mpsm_op(struct rswitch_etha * etha,bool read,unsigned int mmf,unsigned int pda,unsigned int pra,unsigned int pop,unsigned int prd)1203 static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
1204 unsigned int mmf, unsigned int pda,
1205 unsigned int pra, unsigned int pop,
1206 unsigned int prd)
1207 {
1208 u32 val;
1209 int ret;
1210
1211 val = MPSM_PSME |
1212 FIELD_PREP(MPSM_MFF, mmf) |
1213 FIELD_PREP(MPSM_PDA, pda) |
1214 FIELD_PREP(MPSM_PRA, pra) |
1215 FIELD_PREP(MPSM_POP, pop) |
1216 FIELD_PREP(MPSM_PRD, prd);
1217 iowrite32(val, etha->addr + MPSM);
1218
1219 ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
1220 if (ret)
1221 return ret;
1222
1223 if (read) {
1224 val = ioread32(etha->addr + MPSM);
1225 ret = FIELD_GET(MPSM_PRD, val);
1226 }
1227
1228 return ret;
1229 }
1230
rswitch_etha_mii_read_c45(struct mii_bus * bus,int addr,int devad,int regad)1231 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1232 int regad)
1233 {
1234 struct rswitch_etha *etha = bus->priv;
1235 int ret;
1236
1237 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1238 MPSM_POP_ADDRESS, regad);
1239 if (ret)
1240 return ret;
1241
1242 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
1243 MPSM_POP_READ_C45, 0);
1244 }
1245
rswitch_etha_mii_write_c45(struct mii_bus * bus,int addr,int devad,int regad,u16 val)1246 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1247 int regad, u16 val)
1248 {
1249 struct rswitch_etha *etha = bus->priv;
1250 int ret;
1251
1252 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1253 MPSM_POP_ADDRESS, regad);
1254 if (ret)
1255 return ret;
1256
1257 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1258 MPSM_POP_WRITE, val);
1259 }
1260
rswitch_etha_mii_read_c22(struct mii_bus * bus,int phyad,int regad)1261 static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
1262 {
1263 struct rswitch_etha *etha = bus->priv;
1264
1265 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
1266 MPSM_POP_READ_C22, 0);
1267 }
1268
rswitch_etha_mii_write_c22(struct mii_bus * bus,int phyad,int regad,u16 val)1269 static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
1270 int regad, u16 val)
1271 {
1272 struct rswitch_etha *etha = bus->priv;
1273
1274 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
1275 MPSM_POP_WRITE, val);
1276 }
1277
1278 /* Call of_node_put(port) after done */
rswitch_get_port_node(struct rswitch_device * rdev)1279 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1280 {
1281 struct device_node *ports, *port;
1282 int err = 0;
1283 u32 index;
1284
1285 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1286 "ethernet-ports");
1287 if (!ports)
1288 return NULL;
1289
1290 for_each_child_of_node(ports, port) {
1291 err = of_property_read_u32(port, "reg", &index);
1292 if (err < 0) {
1293 port = NULL;
1294 goto out;
1295 }
1296 if (index == rdev->etha->index) {
1297 if (!of_device_is_available(port))
1298 port = NULL;
1299 break;
1300 }
1301 }
1302
1303 out:
1304 of_node_put(ports);
1305
1306 return port;
1307 }
1308
rswitch_etha_get_params(struct rswitch_device * rdev)1309 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1310 {
1311 u32 max_speed;
1312 int err;
1313
1314 if (!rdev->np_port)
1315 return 0; /* ignored */
1316
1317 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
1318 if (err)
1319 return err;
1320
1321 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1322 if (!err) {
1323 rdev->etha->speed = max_speed;
1324 return 0;
1325 }
1326
1327 /* if no "max-speed" property, let's use default speed */
1328 switch (rdev->etha->phy_interface) {
1329 case PHY_INTERFACE_MODE_MII:
1330 rdev->etha->speed = SPEED_100;
1331 break;
1332 case PHY_INTERFACE_MODE_SGMII:
1333 rdev->etha->speed = SPEED_1000;
1334 break;
1335 case PHY_INTERFACE_MODE_USXGMII:
1336 rdev->etha->speed = SPEED_2500;
1337 break;
1338 default:
1339 return -EINVAL;
1340 }
1341
1342 return 0;
1343 }
1344
rswitch_mii_register(struct rswitch_device * rdev)1345 static int rswitch_mii_register(struct rswitch_device *rdev)
1346 {
1347 struct device_node *mdio_np;
1348 struct mii_bus *mii_bus;
1349 int err;
1350
1351 mii_bus = mdiobus_alloc();
1352 if (!mii_bus)
1353 return -ENOMEM;
1354
1355 mii_bus->name = "rswitch_mii";
1356 sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1357 mii_bus->priv = rdev->etha;
1358 mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1359 mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1360 mii_bus->read = rswitch_etha_mii_read_c22;
1361 mii_bus->write = rswitch_etha_mii_write_c22;
1362 mii_bus->parent = &rdev->priv->pdev->dev;
1363
1364 mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
1365 err = of_mdiobus_register(mii_bus, mdio_np);
1366 if (err < 0) {
1367 mdiobus_free(mii_bus);
1368 goto out;
1369 }
1370
1371 rdev->etha->mii = mii_bus;
1372
1373 out:
1374 of_node_put(mdio_np);
1375
1376 return err;
1377 }
1378
rswitch_mii_unregister(struct rswitch_device * rdev)1379 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1380 {
1381 if (rdev->etha->mii) {
1382 mdiobus_unregister(rdev->etha->mii);
1383 mdiobus_free(rdev->etha->mii);
1384 rdev->etha->mii = NULL;
1385 }
1386 }
1387
rswitch_adjust_link(struct net_device * ndev)1388 static void rswitch_adjust_link(struct net_device *ndev)
1389 {
1390 struct rswitch_device *rdev = netdev_priv(ndev);
1391 struct phy_device *phydev = ndev->phydev;
1392
1393 if (phydev->link != rdev->etha->link) {
1394 phy_print_status(phydev);
1395 if (phydev->link)
1396 phy_power_on(rdev->serdes);
1397 else if (rdev->serdes->power_count)
1398 phy_power_off(rdev->serdes);
1399
1400 rdev->etha->link = phydev->link;
1401
1402 if (!rdev->priv->etha_no_runtime_change &&
1403 phydev->speed != rdev->etha->speed) {
1404 rdev->etha->speed = phydev->speed;
1405
1406 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1407 phy_set_speed(rdev->serdes, rdev->etha->speed);
1408 }
1409 }
1410 }
1411
rswitch_phy_remove_link_mode(struct rswitch_device * rdev,struct phy_device * phydev)1412 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1413 struct phy_device *phydev)
1414 {
1415 if (!rdev->priv->etha_no_runtime_change)
1416 return;
1417
1418 switch (rdev->etha->speed) {
1419 case SPEED_2500:
1420 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1421 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1422 break;
1423 case SPEED_1000:
1424 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1425 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1426 break;
1427 case SPEED_100:
1428 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1429 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1430 break;
1431 default:
1432 break;
1433 }
1434
1435 phy_set_max_speed(phydev, rdev->etha->speed);
1436 }
1437
rswitch_phy_device_init(struct rswitch_device * rdev)1438 static int rswitch_phy_device_init(struct rswitch_device *rdev)
1439 {
1440 struct phy_device *phydev;
1441 struct device_node *phy;
1442 int err = -ENOENT;
1443
1444 if (!rdev->np_port)
1445 return -ENODEV;
1446
1447 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1448 if (!phy)
1449 return -ENODEV;
1450
1451 /* Set phydev->host_interfaces before calling of_phy_connect() to
1452 * configure the PHY with the information of host_interfaces.
1453 */
1454 phydev = of_phy_find_device(phy);
1455 if (!phydev)
1456 goto out;
1457 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1458 phydev->mac_managed_pm = true;
1459
1460 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1461 rdev->etha->phy_interface);
1462 if (!phydev)
1463 goto out;
1464
1465 phy_set_max_speed(phydev, SPEED_2500);
1466 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1467 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1468 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1469 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1470 rswitch_phy_remove_link_mode(rdev, phydev);
1471
1472 phy_attached_info(phydev);
1473
1474 err = 0;
1475 out:
1476 of_node_put(phy);
1477
1478 return err;
1479 }
1480
rswitch_phy_device_deinit(struct rswitch_device * rdev)1481 static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
1482 {
1483 if (rdev->ndev->phydev)
1484 phy_disconnect(rdev->ndev->phydev);
1485 }
1486
rswitch_serdes_set_params(struct rswitch_device * rdev)1487 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1488 {
1489 int err;
1490
1491 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
1492 rdev->etha->phy_interface);
1493 if (err < 0)
1494 return err;
1495
1496 return phy_set_speed(rdev->serdes, rdev->etha->speed);
1497 }
1498
rswitch_ether_port_init_one(struct rswitch_device * rdev)1499 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1500 {
1501 int err;
1502
1503 if (!rdev->etha->operated) {
1504 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1505 if (err < 0)
1506 return err;
1507 if (rdev->priv->etha_no_runtime_change)
1508 rdev->etha->operated = true;
1509 }
1510
1511 err = rswitch_mii_register(rdev);
1512 if (err < 0)
1513 return err;
1514
1515 err = rswitch_phy_device_init(rdev);
1516 if (err < 0)
1517 goto err_phy_device_init;
1518
1519 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1520 if (IS_ERR(rdev->serdes)) {
1521 err = PTR_ERR(rdev->serdes);
1522 goto err_serdes_phy_get;
1523 }
1524
1525 err = rswitch_serdes_set_params(rdev);
1526 if (err < 0)
1527 goto err_serdes_set_params;
1528
1529 return 0;
1530
1531 err_serdes_set_params:
1532 err_serdes_phy_get:
1533 rswitch_phy_device_deinit(rdev);
1534
1535 err_phy_device_init:
1536 rswitch_mii_unregister(rdev);
1537
1538 return err;
1539 }
1540
rswitch_ether_port_deinit_one(struct rswitch_device * rdev)1541 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1542 {
1543 rswitch_phy_device_deinit(rdev);
1544 rswitch_mii_unregister(rdev);
1545 }
1546
rswitch_ether_port_init_all(struct rswitch_private * priv)1547 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1548 {
1549 unsigned int i;
1550 int err;
1551
1552 rswitch_for_each_enabled_port(priv, i) {
1553 err = rswitch_ether_port_init_one(priv->rdev[i]);
1554 if (err)
1555 goto err_init_one;
1556 }
1557
1558 rswitch_for_each_enabled_port(priv, i) {
1559 err = phy_init(priv->rdev[i]->serdes);
1560 if (err)
1561 goto err_serdes;
1562 }
1563
1564 return 0;
1565
1566 err_serdes:
1567 rswitch_for_each_enabled_port_continue_reverse(priv, i)
1568 phy_exit(priv->rdev[i]->serdes);
1569 i = RSWITCH_NUM_PORTS;
1570
1571 err_init_one:
1572 rswitch_for_each_enabled_port_continue_reverse(priv, i)
1573 rswitch_ether_port_deinit_one(priv->rdev[i]);
1574
1575 return err;
1576 }
1577
rswitch_ether_port_deinit_all(struct rswitch_private * priv)1578 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1579 {
1580 unsigned int i;
1581
1582 rswitch_for_each_enabled_port(priv, i) {
1583 phy_exit(priv->rdev[i]->serdes);
1584 rswitch_ether_port_deinit_one(priv->rdev[i]);
1585 }
1586 }
1587
rswitch_open(struct net_device * ndev)1588 static int rswitch_open(struct net_device *ndev)
1589 {
1590 struct rswitch_device *rdev = netdev_priv(ndev);
1591 unsigned long flags;
1592
1593 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1594 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1595
1596 napi_enable(&rdev->napi);
1597
1598 spin_lock_irqsave(&rdev->priv->lock, flags);
1599 bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
1600 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1601 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1602 spin_unlock_irqrestore(&rdev->priv->lock, flags);
1603
1604 phy_start(ndev->phydev);
1605
1606 netif_start_queue(ndev);
1607
1608 return 0;
1609 };
1610
rswitch_stop(struct net_device * ndev)1611 static int rswitch_stop(struct net_device *ndev)
1612 {
1613 struct rswitch_device *rdev = netdev_priv(ndev);
1614 struct sk_buff *ts_skb;
1615 unsigned long flags;
1616 unsigned int tag;
1617
1618 netif_tx_stop_all_queues(ndev);
1619
1620 phy_stop(ndev->phydev);
1621
1622 spin_lock_irqsave(&rdev->priv->lock, flags);
1623 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1624 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1625 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
1626 spin_unlock_irqrestore(&rdev->priv->lock, flags);
1627
1628 napi_disable(&rdev->napi);
1629
1630 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1631 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
1632
1633 for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
1634 tag < TS_TAGS_PER_PORT;
1635 tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
1636 ts_skb = xchg(&rdev->ts_skb[tag], NULL);
1637 clear_bit(tag, rdev->ts_skb_used);
1638 if (ts_skb)
1639 dev_kfree_skb(ts_skb);
1640 }
1641
1642 return 0;
1643 };
1644
rswitch_ext_desc_set_info1(struct rswitch_device * rdev,struct sk_buff * skb,struct rswitch_ext_desc * desc)1645 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
1646 struct sk_buff *skb,
1647 struct rswitch_ext_desc *desc)
1648 {
1649 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1650 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
1651 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1652 unsigned int tag;
1653
1654 tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
1655 if (tag == TS_TAGS_PER_PORT)
1656 return false;
1657 smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
1658 rdev->ts_skb[tag] = skb_get(skb);
1659 set_bit(tag, rdev->ts_skb_used);
1660
1661 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1662 desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
1663
1664 skb_tx_timestamp(skb);
1665 }
1666
1667 return true;
1668 }
1669
rswitch_ext_desc_set(struct rswitch_device * rdev,struct sk_buff * skb,struct rswitch_ext_desc * desc,dma_addr_t dma_addr,u16 len,u8 die_dt)1670 static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
1671 struct sk_buff *skb,
1672 struct rswitch_ext_desc *desc,
1673 dma_addr_t dma_addr, u16 len, u8 die_dt)
1674 {
1675 rswitch_desc_set_dptr(&desc->desc, dma_addr);
1676 desc->desc.info_ds = cpu_to_le16(len);
1677 if (!rswitch_ext_desc_set_info1(rdev, skb, desc))
1678 return false;
1679
1680 dma_wmb();
1681
1682 desc->desc.die_dt = die_dt;
1683
1684 return true;
1685 }
1686
rswitch_ext_desc_get_die_dt(unsigned int nr_desc,unsigned int index)1687 static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
1688 {
1689 if (nr_desc == 1)
1690 return DT_FSINGLE | DIE;
1691 if (index == 0)
1692 return DT_FSTART;
1693 if (nr_desc - 1 == index)
1694 return DT_FEND | DIE;
1695 return DT_FMID;
1696 }
1697
rswitch_ext_desc_get_len(u8 die_dt,unsigned int orig_len)1698 static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
1699 {
1700 switch (die_dt & DT_MASK) {
1701 case DT_FSINGLE:
1702 case DT_FEND:
1703 return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
1704 case DT_FSTART:
1705 case DT_FMID:
1706 return RSWITCH_DESC_BUF_SIZE;
1707 default:
1708 return 0;
1709 }
1710 }
1711
rswitch_start_xmit(struct sk_buff * skb,struct net_device * ndev)1712 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1713 {
1714 struct rswitch_device *rdev = netdev_priv(ndev);
1715 struct rswitch_gwca_queue *gq = rdev->tx_queue;
1716 dma_addr_t dma_addr, dma_addr_orig;
1717 netdev_tx_t ret = NETDEV_TX_OK;
1718 struct rswitch_ext_desc *desc;
1719 unsigned int i, nr_desc;
1720 u8 die_dt;
1721 u16 len;
1722
1723 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
1724 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
1725 netif_stop_subqueue(ndev, 0);
1726 return NETDEV_TX_BUSY;
1727 }
1728
1729 if (skb_put_padto(skb, ETH_ZLEN))
1730 return ret;
1731
1732 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1733 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
1734 goto err_kfree;
1735
1736 /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
1737 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
1738 gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
1739
1740 dma_wmb();
1741
1742 /* DT_FSTART should be set at last. So, this is reverse order. */
1743 for (i = nr_desc; i-- > 0; ) {
1744 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
1745 die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
1746 dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
1747 len = rswitch_ext_desc_get_len(die_dt, skb->len);
1748 if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
1749 goto err_unmap;
1750 }
1751
1752 gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
1753 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1754
1755 return ret;
1756
1757 err_unmap:
1758 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
1759 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
1760
1761 err_kfree:
1762 dev_kfree_skb_any(skb);
1763
1764 return ret;
1765 }
1766
rswitch_get_stats(struct net_device * ndev)1767 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1768 {
1769 return &ndev->stats;
1770 }
1771
rswitch_hwstamp_get(struct net_device * ndev,struct ifreq * req)1772 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1773 {
1774 struct rswitch_device *rdev = netdev_priv(ndev);
1775 struct rcar_gen4_ptp_private *ptp_priv;
1776 struct hwtstamp_config config;
1777
1778 ptp_priv = rdev->priv->ptp_priv;
1779
1780 config.flags = 0;
1781 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1782 HWTSTAMP_TX_OFF;
1783 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1784 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1785 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1786 break;
1787 case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1788 config.rx_filter = HWTSTAMP_FILTER_ALL;
1789 break;
1790 default:
1791 config.rx_filter = HWTSTAMP_FILTER_NONE;
1792 break;
1793 }
1794
1795 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1796 }
1797
rswitch_hwstamp_set(struct net_device * ndev,struct ifreq * req)1798 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1799 {
1800 struct rswitch_device *rdev = netdev_priv(ndev);
1801 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1802 struct hwtstamp_config config;
1803 u32 tstamp_tx_ctrl;
1804
1805 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1806 return -EFAULT;
1807
1808 if (config.flags)
1809 return -EINVAL;
1810
1811 switch (config.tx_type) {
1812 case HWTSTAMP_TX_OFF:
1813 tstamp_tx_ctrl = 0;
1814 break;
1815 case HWTSTAMP_TX_ON:
1816 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1817 break;
1818 default:
1819 return -ERANGE;
1820 }
1821
1822 switch (config.rx_filter) {
1823 case HWTSTAMP_FILTER_NONE:
1824 tstamp_rx_ctrl = 0;
1825 break;
1826 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1827 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1828 break;
1829 default:
1830 config.rx_filter = HWTSTAMP_FILTER_ALL;
1831 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1832 break;
1833 }
1834
1835 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1836 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1837
1838 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1839 }
1840
rswitch_eth_ioctl(struct net_device * ndev,struct ifreq * req,int cmd)1841 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1842 {
1843 if (!netif_running(ndev))
1844 return -EINVAL;
1845
1846 switch (cmd) {
1847 case SIOCGHWTSTAMP:
1848 return rswitch_hwstamp_get(ndev, req);
1849 case SIOCSHWTSTAMP:
1850 return rswitch_hwstamp_set(ndev, req);
1851 default:
1852 return phy_mii_ioctl(ndev->phydev, req, cmd);
1853 }
1854 }
1855
1856 static const struct net_device_ops rswitch_netdev_ops = {
1857 .ndo_open = rswitch_open,
1858 .ndo_stop = rswitch_stop,
1859 .ndo_start_xmit = rswitch_start_xmit,
1860 .ndo_get_stats = rswitch_get_stats,
1861 .ndo_eth_ioctl = rswitch_eth_ioctl,
1862 .ndo_validate_addr = eth_validate_addr,
1863 .ndo_set_mac_address = eth_mac_addr,
1864 };
1865
rswitch_get_ts_info(struct net_device * ndev,struct kernel_ethtool_ts_info * info)1866 static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
1867 {
1868 struct rswitch_device *rdev = netdev_priv(ndev);
1869
1870 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1871 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1872 SOF_TIMESTAMPING_TX_HARDWARE |
1873 SOF_TIMESTAMPING_RX_HARDWARE |
1874 SOF_TIMESTAMPING_RAW_HARDWARE;
1875 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1876 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1877
1878 return 0;
1879 }
1880
1881 static const struct ethtool_ops rswitch_ethtool_ops = {
1882 .get_ts_info = rswitch_get_ts_info,
1883 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1884 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1885 };
1886
1887 static const struct of_device_id renesas_eth_sw_of_table[] = {
1888 { .compatible = "renesas,r8a779f0-ether-switch", },
1889 { }
1890 };
1891 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1892
rswitch_etha_init(struct rswitch_private * priv,unsigned int index)1893 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index)
1894 {
1895 struct rswitch_etha *etha = &priv->etha[index];
1896
1897 memset(etha, 0, sizeof(*etha));
1898 etha->index = index;
1899 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1900 etha->coma_addr = priv->addr;
1901
1902 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
1903 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
1904 * both the numerator and the denominator by 10.
1905 */
1906 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
1907 }
1908
rswitch_device_alloc(struct rswitch_private * priv,unsigned int index)1909 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index)
1910 {
1911 struct platform_device *pdev = priv->pdev;
1912 struct rswitch_device *rdev;
1913 struct net_device *ndev;
1914 int err;
1915
1916 if (index >= RSWITCH_NUM_PORTS)
1917 return -EINVAL;
1918
1919 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1920 if (!ndev)
1921 return -ENOMEM;
1922
1923 SET_NETDEV_DEV(ndev, &pdev->dev);
1924 ether_setup(ndev);
1925
1926 rdev = netdev_priv(ndev);
1927 rdev->ndev = ndev;
1928 rdev->priv = priv;
1929 priv->rdev[index] = rdev;
1930 rdev->port = index;
1931 rdev->etha = &priv->etha[index];
1932 rdev->addr = priv->addr;
1933
1934 ndev->base_addr = (unsigned long)rdev->addr;
1935 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1936 ndev->netdev_ops = &rswitch_netdev_ops;
1937 ndev->ethtool_ops = &rswitch_ethtool_ops;
1938 ndev->max_mtu = RSWITCH_MAX_MTU;
1939 ndev->min_mtu = ETH_MIN_MTU;
1940
1941 netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1942
1943 rdev->np_port = rswitch_get_port_node(rdev);
1944 rdev->disabled = !rdev->np_port;
1945 err = of_get_ethdev_address(rdev->np_port, ndev);
1946 if (err) {
1947 if (is_valid_ether_addr(rdev->etha->mac_addr))
1948 eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1949 else
1950 eth_hw_addr_random(ndev);
1951 }
1952
1953 err = rswitch_etha_get_params(rdev);
1954 if (err < 0)
1955 goto out_get_params;
1956
1957 err = rswitch_rxdmac_alloc(ndev);
1958 if (err < 0)
1959 goto out_rxdmac;
1960
1961 err = rswitch_txdmac_alloc(ndev);
1962 if (err < 0)
1963 goto out_txdmac;
1964
1965 return 0;
1966
1967 out_txdmac:
1968 rswitch_rxdmac_free(ndev);
1969
1970 out_rxdmac:
1971 out_get_params:
1972 of_node_put(rdev->np_port);
1973 netif_napi_del(&rdev->napi);
1974 free_netdev(ndev);
1975
1976 return err;
1977 }
1978
rswitch_device_free(struct rswitch_private * priv,unsigned int index)1979 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index)
1980 {
1981 struct rswitch_device *rdev = priv->rdev[index];
1982 struct net_device *ndev = rdev->ndev;
1983
1984 rswitch_txdmac_free(ndev);
1985 rswitch_rxdmac_free(ndev);
1986 of_node_put(rdev->np_port);
1987 netif_napi_del(&rdev->napi);
1988 free_netdev(ndev);
1989 }
1990
rswitch_init(struct rswitch_private * priv)1991 static int rswitch_init(struct rswitch_private *priv)
1992 {
1993 unsigned int i;
1994 int err;
1995
1996 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1997 rswitch_etha_init(priv, i);
1998
1999 rswitch_clock_enable(priv);
2000 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2001 rswitch_etha_read_mac_address(&priv->etha[i]);
2002
2003 rswitch_reset(priv);
2004
2005 rswitch_clock_enable(priv);
2006 rswitch_top_init(priv);
2007 err = rswitch_bpool_config(priv);
2008 if (err < 0)
2009 return err;
2010
2011 rswitch_coma_init(priv);
2012
2013 err = rswitch_gwca_linkfix_alloc(priv);
2014 if (err < 0)
2015 return -ENOMEM;
2016
2017 err = rswitch_gwca_ts_queue_alloc(priv);
2018 if (err < 0)
2019 goto err_ts_queue_alloc;
2020
2021 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
2022 err = rswitch_device_alloc(priv, i);
2023 if (err < 0) {
2024 for (; i-- > 0; )
2025 rswitch_device_free(priv, i);
2026 goto err_device_alloc;
2027 }
2028 }
2029
2030 rswitch_fwd_init(priv);
2031
2032 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
2033 clk_get_rate(priv->clk));
2034 if (err < 0)
2035 goto err_ptp_register;
2036
2037 err = rswitch_gwca_request_irqs(priv);
2038 if (err < 0)
2039 goto err_gwca_request_irq;
2040
2041 err = rswitch_gwca_ts_request_irqs(priv);
2042 if (err < 0)
2043 goto err_gwca_ts_request_irq;
2044
2045 err = rswitch_gwca_hw_init(priv);
2046 if (err < 0)
2047 goto err_gwca_hw_init;
2048
2049 err = rswitch_ether_port_init_all(priv);
2050 if (err)
2051 goto err_ether_port_init_all;
2052
2053 rswitch_for_each_enabled_port(priv, i) {
2054 err = register_netdev(priv->rdev[i]->ndev);
2055 if (err) {
2056 rswitch_for_each_enabled_port_continue_reverse(priv, i)
2057 unregister_netdev(priv->rdev[i]->ndev);
2058 goto err_register_netdev;
2059 }
2060 }
2061
2062 rswitch_for_each_enabled_port(priv, i)
2063 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
2064 priv->rdev[i]->ndev->dev_addr);
2065
2066 return 0;
2067
2068 err_register_netdev:
2069 rswitch_ether_port_deinit_all(priv);
2070
2071 err_ether_port_init_all:
2072 rswitch_gwca_hw_deinit(priv);
2073
2074 err_gwca_hw_init:
2075 err_gwca_ts_request_irq:
2076 err_gwca_request_irq:
2077 rcar_gen4_ptp_unregister(priv->ptp_priv);
2078
2079 err_ptp_register:
2080 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2081 rswitch_device_free(priv, i);
2082
2083 err_device_alloc:
2084 rswitch_gwca_ts_queue_free(priv);
2085
2086 err_ts_queue_alloc:
2087 rswitch_gwca_linkfix_free(priv);
2088
2089 return err;
2090 }
2091
2092 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = {
2093 { .soc_id = "r8a779f0", .revision = "ES1.0" },
2094 { /* Sentinel */ }
2095 };
2096
renesas_eth_sw_probe(struct platform_device * pdev)2097 static int renesas_eth_sw_probe(struct platform_device *pdev)
2098 {
2099 const struct soc_device_attribute *attr;
2100 struct rswitch_private *priv;
2101 struct resource *res;
2102 int ret;
2103
2104 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
2105 if (!res) {
2106 dev_err(&pdev->dev, "invalid resource\n");
2107 return -EINVAL;
2108 }
2109
2110 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
2111 if (!priv)
2112 return -ENOMEM;
2113 spin_lock_init(&priv->lock);
2114
2115 priv->clk = devm_clk_get(&pdev->dev, NULL);
2116 if (IS_ERR(priv->clk))
2117 return PTR_ERR(priv->clk);
2118
2119 attr = soc_device_match(rswitch_soc_no_speed_change);
2120 if (attr)
2121 priv->etha_no_runtime_change = true;
2122
2123 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
2124 if (!priv->ptp_priv)
2125 return -ENOMEM;
2126
2127 platform_set_drvdata(pdev, priv);
2128 priv->pdev = pdev;
2129 priv->addr = devm_ioremap_resource(&pdev->dev, res);
2130 if (IS_ERR(priv->addr))
2131 return PTR_ERR(priv->addr);
2132
2133 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
2134
2135 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2136 if (ret < 0) {
2137 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2138 if (ret < 0)
2139 return ret;
2140 }
2141
2142 priv->gwca.index = AGENT_INDEX_GWCA;
2143 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
2144 RSWITCH_MAX_NUM_QUEUES);
2145 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
2146 sizeof(*priv->gwca.queues), GFP_KERNEL);
2147 if (!priv->gwca.queues)
2148 return -ENOMEM;
2149
2150 pm_runtime_enable(&pdev->dev);
2151 pm_runtime_get_sync(&pdev->dev);
2152
2153 ret = rswitch_init(priv);
2154 if (ret < 0) {
2155 pm_runtime_put(&pdev->dev);
2156 pm_runtime_disable(&pdev->dev);
2157 return ret;
2158 }
2159
2160 device_set_wakeup_capable(&pdev->dev, 1);
2161
2162 return ret;
2163 }
2164
rswitch_deinit(struct rswitch_private * priv)2165 static void rswitch_deinit(struct rswitch_private *priv)
2166 {
2167 unsigned int i;
2168
2169 rswitch_gwca_hw_deinit(priv);
2170 rcar_gen4_ptp_unregister(priv->ptp_priv);
2171
2172 rswitch_for_each_enabled_port(priv, i) {
2173 struct rswitch_device *rdev = priv->rdev[i];
2174
2175 unregister_netdev(rdev->ndev);
2176 rswitch_ether_port_deinit_one(rdev);
2177 phy_exit(priv->rdev[i]->serdes);
2178 }
2179
2180 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2181 rswitch_device_free(priv, i);
2182
2183 rswitch_gwca_ts_queue_free(priv);
2184 rswitch_gwca_linkfix_free(priv);
2185
2186 rswitch_clock_disable(priv);
2187 }
2188
renesas_eth_sw_remove(struct platform_device * pdev)2189 static void renesas_eth_sw_remove(struct platform_device *pdev)
2190 {
2191 struct rswitch_private *priv = platform_get_drvdata(pdev);
2192
2193 rswitch_deinit(priv);
2194
2195 pm_runtime_put(&pdev->dev);
2196 pm_runtime_disable(&pdev->dev);
2197
2198 platform_set_drvdata(pdev, NULL);
2199 }
2200
renesas_eth_sw_suspend(struct device * dev)2201 static int renesas_eth_sw_suspend(struct device *dev)
2202 {
2203 struct rswitch_private *priv = dev_get_drvdata(dev);
2204 struct net_device *ndev;
2205 unsigned int i;
2206
2207 rswitch_for_each_enabled_port(priv, i) {
2208 ndev = priv->rdev[i]->ndev;
2209 if (netif_running(ndev)) {
2210 netif_device_detach(ndev);
2211 rswitch_stop(ndev);
2212 }
2213 if (priv->rdev[i]->serdes->init_count)
2214 phy_exit(priv->rdev[i]->serdes);
2215 }
2216
2217 return 0;
2218 }
2219
renesas_eth_sw_resume(struct device * dev)2220 static int renesas_eth_sw_resume(struct device *dev)
2221 {
2222 struct rswitch_private *priv = dev_get_drvdata(dev);
2223 struct net_device *ndev;
2224 unsigned int i;
2225
2226 rswitch_for_each_enabled_port(priv, i) {
2227 phy_init(priv->rdev[i]->serdes);
2228 ndev = priv->rdev[i]->ndev;
2229 if (netif_running(ndev)) {
2230 rswitch_open(ndev);
2231 netif_device_attach(ndev);
2232 }
2233 }
2234
2235 return 0;
2236 }
2237
2238 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
2239 renesas_eth_sw_resume);
2240
2241 static struct platform_driver renesas_eth_sw_driver_platform = {
2242 .probe = renesas_eth_sw_probe,
2243 .remove = renesas_eth_sw_remove,
2244 .driver = {
2245 .name = "renesas_eth_sw",
2246 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
2247 .of_match_table = renesas_eth_sw_of_table,
2248 }
2249 };
2250 module_platform_driver(renesas_eth_sw_driver_platform);
2251 MODULE_AUTHOR("Yoshihiro Shimoda");
2252 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
2253 MODULE_LICENSE("GPL");
2254