1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
3 *
4 * Copyright (C) 2022 Renesas Electronics Corporation
5 */
6
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/etherdevice.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/sys_soc.h>
26
27 #include "rswitch.h"
28
rswitch_reg_wait(void __iomem * addr,u32 offs,u32 mask,u32 expected)29 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
30 {
31 u32 val;
32
33 return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
34 1, RSWITCH_TIMEOUT_US);
35 }
36
rswitch_modify(void __iomem * addr,enum rswitch_reg reg,u32 clear,u32 set)37 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
38 {
39 iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
40 }
41
42 /* Common Agent block (COMA) */
rswitch_reset(struct rswitch_private * priv)43 static void rswitch_reset(struct rswitch_private *priv)
44 {
45 iowrite32(RRC_RR, priv->addr + RRC);
46 iowrite32(RRC_RR_CLR, priv->addr + RRC);
47 }
48
rswitch_clock_enable(struct rswitch_private * priv)49 static void rswitch_clock_enable(struct rswitch_private *priv)
50 {
51 iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
52 }
53
rswitch_clock_disable(struct rswitch_private * priv)54 static void rswitch_clock_disable(struct rswitch_private *priv)
55 {
56 iowrite32(RCDC_RCD, priv->addr + RCDC);
57 }
58
rswitch_agent_clock_is_enabled(void __iomem * coma_addr,unsigned int port)59 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr,
60 unsigned int port)
61 {
62 u32 val = ioread32(coma_addr + RCEC);
63
64 if (val & RCEC_RCE)
65 return (val & BIT(port)) ? true : false;
66 else
67 return false;
68 }
69
rswitch_agent_clock_ctrl(void __iomem * coma_addr,unsigned int port,int enable)70 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, unsigned int port,
71 int enable)
72 {
73 u32 val;
74
75 if (enable) {
76 val = ioread32(coma_addr + RCEC);
77 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
78 } else {
79 val = ioread32(coma_addr + RCDC);
80 iowrite32(val | BIT(port), coma_addr + RCDC);
81 }
82 }
83
rswitch_bpool_config(struct rswitch_private * priv)84 static int rswitch_bpool_config(struct rswitch_private *priv)
85 {
86 u32 val;
87
88 val = ioread32(priv->addr + CABPIRM);
89 if (val & CABPIRM_BPR)
90 return 0;
91
92 iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
93
94 return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
95 }
96
rswitch_coma_init(struct rswitch_private * priv)97 static void rswitch_coma_init(struct rswitch_private *priv)
98 {
99 iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
100 }
101
102 /* R-Switch-2 block (TOP) */
rswitch_top_init(struct rswitch_private * priv)103 static void rswitch_top_init(struct rswitch_private *priv)
104 {
105 unsigned int i;
106
107 for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
108 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
109 }
110
111 /* Forwarding engine block (MFWD) */
rswitch_fwd_init(struct rswitch_private * priv)112 static void rswitch_fwd_init(struct rswitch_private *priv)
113 {
114 u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
115 unsigned int i;
116
117 /* Start with empty configuration */
118 for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
119 /* Disable all port features */
120 iowrite32(0, priv->addr + FWPC0(i));
121 /* Disallow L3 forwarding and direct descriptor forwarding */
122 iowrite32(FIELD_PREP(FWCP1_LTHFW, all_ports_mask),
123 priv->addr + FWPC1(i));
124 /* Disallow L2 forwarding */
125 iowrite32(FIELD_PREP(FWCP2_LTWFW, all_ports_mask),
126 priv->addr + FWPC2(i));
127 /* Disallow port based forwarding */
128 iowrite32(0, priv->addr + FWPBFC(i));
129 }
130
131 /* For enabled ETHA ports, setup port based forwarding */
132 rswitch_for_each_enabled_port(priv, i) {
133 /* Port based forwarding from port i to GWCA port */
134 rswitch_modify(priv->addr, FWPBFC(i), FWPBFC_PBDV,
135 FIELD_PREP(FWPBFC_PBDV, BIT(priv->gwca.index)));
136 /* Within GWCA port, forward to Rx queue for port i */
137 iowrite32(priv->rdev[i]->rx_queue->index,
138 priv->addr + FWPBFCSDC(GWCA_INDEX, i));
139 }
140
141 /* For GWCA port, allow direct descriptor forwarding */
142 rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
143 }
144
145 /* Gateway CPU agent block (GWCA) */
rswitch_gwca_change_mode(struct rswitch_private * priv,enum rswitch_gwca_mode mode)146 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
147 enum rswitch_gwca_mode mode)
148 {
149 int ret;
150
151 if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
152 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
153
154 iowrite32(mode, priv->addr + GWMC);
155
156 ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
157
158 if (mode == GWMC_OPC_DISABLE)
159 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
160
161 return ret;
162 }
163
rswitch_gwca_mcast_table_reset(struct rswitch_private * priv)164 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
165 {
166 iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
167
168 return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
169 }
170
rswitch_gwca_axi_ram_reset(struct rswitch_private * priv)171 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
172 {
173 iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
174
175 return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
176 }
177
rswitch_is_any_data_irq(struct rswitch_private * priv,u32 * dis,bool tx)178 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
179 {
180 u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
181 unsigned int i;
182
183 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
184 if (dis[i] & mask[i])
185 return true;
186 }
187
188 return false;
189 }
190
rswitch_get_data_irq_status(struct rswitch_private * priv,u32 * dis)191 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
192 {
193 unsigned int i;
194
195 for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
196 dis[i] = ioread32(priv->addr + GWDIS(i));
197 dis[i] &= ioread32(priv->addr + GWDIE(i));
198 }
199 }
200
rswitch_enadis_data_irq(struct rswitch_private * priv,unsigned int index,bool enable)201 static void rswitch_enadis_data_irq(struct rswitch_private *priv,
202 unsigned int index, bool enable)
203 {
204 u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
205
206 iowrite32(BIT(index % 32), priv->addr + offs);
207 }
208
rswitch_ack_data_irq(struct rswitch_private * priv,unsigned int index)209 static void rswitch_ack_data_irq(struct rswitch_private *priv,
210 unsigned int index)
211 {
212 u32 offs = GWDIS(index / 32);
213
214 iowrite32(BIT(index % 32), priv->addr + offs);
215 }
216
rswitch_next_queue_index(struct rswitch_gwca_queue * gq,bool cur,unsigned int num)217 static unsigned int rswitch_next_queue_index(struct rswitch_gwca_queue *gq,
218 bool cur, unsigned int num)
219 {
220 unsigned int index = cur ? gq->cur : gq->dirty;
221
222 if (index + num >= gq->ring_size)
223 index = (index + num) % gq->ring_size;
224 else
225 index += num;
226
227 return index;
228 }
229
rswitch_get_num_cur_queues(struct rswitch_gwca_queue * gq)230 static unsigned int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
231 {
232 if (gq->cur >= gq->dirty)
233 return gq->cur - gq->dirty;
234 else
235 return gq->ring_size - gq->dirty + gq->cur;
236 }
237
rswitch_is_queue_rxed(struct rswitch_gwca_queue * gq)238 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
239 {
240 struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
241
242 if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
243 return true;
244
245 return false;
246 }
247
rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue * gq,unsigned int start_index,unsigned int num)248 static int rswitch_gwca_queue_alloc_rx_buf(struct rswitch_gwca_queue *gq,
249 unsigned int start_index,
250 unsigned int num)
251 {
252 unsigned int i, index;
253
254 for (i = 0; i < num; i++) {
255 index = (i + start_index) % gq->ring_size;
256 if (gq->rx_bufs[index])
257 continue;
258 gq->rx_bufs[index] = netdev_alloc_frag(RSWITCH_BUF_SIZE);
259 if (!gq->rx_bufs[index])
260 goto err;
261 }
262
263 return 0;
264
265 err:
266 for (; i-- > 0; ) {
267 index = (i + start_index) % gq->ring_size;
268 skb_free_frag(gq->rx_bufs[index]);
269 gq->rx_bufs[index] = NULL;
270 }
271
272 return -ENOMEM;
273 }
274
rswitch_gwca_queue_free(struct net_device * ndev,struct rswitch_gwca_queue * gq)275 static void rswitch_gwca_queue_free(struct net_device *ndev,
276 struct rswitch_gwca_queue *gq)
277 {
278 unsigned int i;
279
280 if (!gq->dir_tx) {
281 dma_free_coherent(ndev->dev.parent,
282 sizeof(struct rswitch_ext_ts_desc) *
283 (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
284 gq->rx_ring = NULL;
285
286 for (i = 0; i < gq->ring_size; i++)
287 skb_free_frag(gq->rx_bufs[i]);
288 kfree(gq->rx_bufs);
289 gq->rx_bufs = NULL;
290 } else {
291 dma_free_coherent(ndev->dev.parent,
292 sizeof(struct rswitch_ext_desc) *
293 (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
294 gq->tx_ring = NULL;
295 kfree(gq->skbs);
296 gq->skbs = NULL;
297 kfree(gq->unmap_addrs);
298 gq->unmap_addrs = NULL;
299 }
300 }
301
rswitch_gwca_ts_queue_free(struct rswitch_private * priv)302 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
303 {
304 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
305
306 dma_free_coherent(&priv->pdev->dev,
307 sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
308 gq->ts_ring, gq->ring_dma);
309 gq->ts_ring = NULL;
310 }
311
rswitch_gwca_queue_alloc(struct net_device * ndev,struct rswitch_private * priv,struct rswitch_gwca_queue * gq,bool dir_tx,unsigned int ring_size)312 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
313 struct rswitch_private *priv,
314 struct rswitch_gwca_queue *gq,
315 bool dir_tx, unsigned int ring_size)
316 {
317 unsigned int i, bit;
318
319 gq->dir_tx = dir_tx;
320 gq->ring_size = ring_size;
321 gq->ndev = ndev;
322
323 if (!dir_tx) {
324 gq->rx_bufs = kcalloc(gq->ring_size, sizeof(*gq->rx_bufs), GFP_KERNEL);
325 if (!gq->rx_bufs)
326 return -ENOMEM;
327 if (rswitch_gwca_queue_alloc_rx_buf(gq, 0, gq->ring_size) < 0)
328 goto out;
329
330 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
331 sizeof(struct rswitch_ext_ts_desc) *
332 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
333 } else {
334 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
335 if (!gq->skbs)
336 return -ENOMEM;
337 gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
338 if (!gq->unmap_addrs)
339 goto out;
340 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
341 sizeof(struct rswitch_ext_desc) *
342 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
343 }
344
345 if (!gq->rx_ring && !gq->tx_ring)
346 goto out;
347
348 i = gq->index / 32;
349 bit = BIT(gq->index % 32);
350 if (dir_tx)
351 priv->gwca.tx_irq_bits[i] |= bit;
352 else
353 priv->gwca.rx_irq_bits[i] |= bit;
354
355 return 0;
356
357 out:
358 rswitch_gwca_queue_free(ndev, gq);
359
360 return -ENOMEM;
361 }
362
rswitch_desc_set_dptr(struct rswitch_desc * desc,dma_addr_t addr)363 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
364 {
365 desc->dptrl = cpu_to_le32(lower_32_bits(addr));
366 desc->dptrh = upper_32_bits(addr) & 0xff;
367 }
368
rswitch_desc_get_dptr(const struct rswitch_desc * desc)369 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
370 {
371 return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
372 }
373
rswitch_gwca_queue_format(struct net_device * ndev,struct rswitch_private * priv,struct rswitch_gwca_queue * gq)374 static int rswitch_gwca_queue_format(struct net_device *ndev,
375 struct rswitch_private *priv,
376 struct rswitch_gwca_queue *gq)
377 {
378 unsigned int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
379 struct rswitch_ext_desc *desc;
380 struct rswitch_desc *linkfix;
381 dma_addr_t dma_addr;
382 unsigned int i;
383
384 memset(gq->tx_ring, 0, ring_size);
385 for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
386 if (!gq->dir_tx) {
387 dma_addr = dma_map_single(ndev->dev.parent,
388 gq->rx_bufs[i] + RSWITCH_HEADROOM,
389 RSWITCH_MAP_BUF_SIZE,
390 DMA_FROM_DEVICE);
391 if (dma_mapping_error(ndev->dev.parent, dma_addr))
392 goto err;
393
394 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
395 rswitch_desc_set_dptr(&desc->desc, dma_addr);
396 desc->desc.die_dt = DT_FEMPTY | DIE;
397 } else {
398 desc->desc.die_dt = DT_EEMPTY | DIE;
399 }
400 }
401 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
402 desc->desc.die_dt = DT_LINKFIX;
403
404 linkfix = &priv->gwca.linkfix_table[gq->index];
405 linkfix->die_dt = DT_LINKFIX;
406 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
407
408 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
409 priv->addr + GWDCC_OFFS(gq->index));
410
411 return 0;
412
413 err:
414 if (!gq->dir_tx) {
415 for (desc = gq->tx_ring; i-- > 0; desc++) {
416 dma_addr = rswitch_desc_get_dptr(&desc->desc);
417 dma_unmap_single(ndev->dev.parent, dma_addr,
418 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
419 }
420 }
421
422 return -ENOMEM;
423 }
424
rswitch_gwca_ts_queue_fill(struct rswitch_private * priv,unsigned int start_index,unsigned int num)425 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
426 unsigned int start_index,
427 unsigned int num)
428 {
429 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
430 struct rswitch_ts_desc *desc;
431 unsigned int i, index;
432
433 for (i = 0; i < num; i++) {
434 index = (i + start_index) % gq->ring_size;
435 desc = &gq->ts_ring[index];
436 desc->desc.die_dt = DT_FEMPTY_ND | DIE;
437 }
438 }
439
rswitch_gwca_queue_ext_ts_fill(struct net_device * ndev,struct rswitch_gwca_queue * gq,unsigned int start_index,unsigned int num)440 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
441 struct rswitch_gwca_queue *gq,
442 unsigned int start_index,
443 unsigned int num)
444 {
445 struct rswitch_device *rdev = netdev_priv(ndev);
446 struct rswitch_ext_ts_desc *desc;
447 unsigned int i, index;
448 dma_addr_t dma_addr;
449
450 for (i = 0; i < num; i++) {
451 index = (i + start_index) % gq->ring_size;
452 desc = &gq->rx_ring[index];
453 if (!gq->dir_tx) {
454 dma_addr = dma_map_single(ndev->dev.parent,
455 gq->rx_bufs[index] + RSWITCH_HEADROOM,
456 RSWITCH_MAP_BUF_SIZE,
457 DMA_FROM_DEVICE);
458 if (dma_mapping_error(ndev->dev.parent, dma_addr))
459 goto err;
460
461 desc->desc.info_ds = cpu_to_le16(RSWITCH_DESC_BUF_SIZE);
462 rswitch_desc_set_dptr(&desc->desc, dma_addr);
463 dma_wmb();
464 desc->desc.die_dt = DT_FEMPTY | DIE;
465 desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
466 } else {
467 desc->desc.die_dt = DT_EEMPTY | DIE;
468 }
469 }
470
471 return 0;
472
473 err:
474 if (!gq->dir_tx) {
475 for (; i-- > 0; ) {
476 index = (i + start_index) % gq->ring_size;
477 desc = &gq->rx_ring[index];
478 dma_addr = rswitch_desc_get_dptr(&desc->desc);
479 dma_unmap_single(ndev->dev.parent, dma_addr,
480 RSWITCH_MAP_BUF_SIZE, DMA_FROM_DEVICE);
481 }
482 }
483
484 return -ENOMEM;
485 }
486
rswitch_gwca_queue_ext_ts_format(struct net_device * ndev,struct rswitch_private * priv,struct rswitch_gwca_queue * gq)487 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
488 struct rswitch_private *priv,
489 struct rswitch_gwca_queue *gq)
490 {
491 unsigned int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
492 struct rswitch_ext_ts_desc *desc;
493 struct rswitch_desc *linkfix;
494 int err;
495
496 memset(gq->rx_ring, 0, ring_size);
497 err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
498 if (err < 0)
499 return err;
500
501 desc = &gq->rx_ring[gq->ring_size]; /* Last */
502 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
503 desc->desc.die_dt = DT_LINKFIX;
504
505 linkfix = &priv->gwca.linkfix_table[gq->index];
506 linkfix->die_dt = DT_LINKFIX;
507 rswitch_desc_set_dptr(linkfix, gq->ring_dma);
508
509 iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
510 GWDCC_ETS | GWDCC_EDE,
511 priv->addr + GWDCC_OFFS(gq->index));
512
513 return 0;
514 }
515
rswitch_gwca_linkfix_alloc(struct rswitch_private * priv)516 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
517 {
518 unsigned int i, num_queues = priv->gwca.num_queues;
519 struct rswitch_gwca *gwca = &priv->gwca;
520 struct device *dev = &priv->pdev->dev;
521
522 gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
523 gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
524 &gwca->linkfix_table_dma, GFP_KERNEL);
525 if (!gwca->linkfix_table)
526 return -ENOMEM;
527 for (i = 0; i < num_queues; i++)
528 gwca->linkfix_table[i].die_dt = DT_EOS;
529
530 return 0;
531 }
532
rswitch_gwca_linkfix_free(struct rswitch_private * priv)533 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
534 {
535 struct rswitch_gwca *gwca = &priv->gwca;
536
537 if (gwca->linkfix_table)
538 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
539 gwca->linkfix_table, gwca->linkfix_table_dma);
540 gwca->linkfix_table = NULL;
541 }
542
rswitch_gwca_ts_queue_alloc(struct rswitch_private * priv)543 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
544 {
545 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
546 struct rswitch_ts_desc *desc;
547
548 gq->ring_size = TS_RING_SIZE;
549 gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
550 sizeof(struct rswitch_ts_desc) *
551 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
552
553 if (!gq->ts_ring)
554 return -ENOMEM;
555
556 rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
557 desc = &gq->ts_ring[gq->ring_size];
558 desc->desc.die_dt = DT_LINKFIX;
559 rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
560
561 return 0;
562 }
563
rswitch_gwca_get(struct rswitch_private * priv)564 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
565 {
566 struct rswitch_gwca_queue *gq;
567 unsigned int index;
568
569 index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
570 if (index >= priv->gwca.num_queues)
571 return NULL;
572 set_bit(index, priv->gwca.used);
573 gq = &priv->gwca.queues[index];
574 memset(gq, 0, sizeof(*gq));
575 gq->index = index;
576
577 return gq;
578 }
579
rswitch_gwca_put(struct rswitch_private * priv,struct rswitch_gwca_queue * gq)580 static void rswitch_gwca_put(struct rswitch_private *priv,
581 struct rswitch_gwca_queue *gq)
582 {
583 clear_bit(gq->index, priv->gwca.used);
584 }
585
rswitch_txdmac_alloc(struct net_device * ndev)586 static int rswitch_txdmac_alloc(struct net_device *ndev)
587 {
588 struct rswitch_device *rdev = netdev_priv(ndev);
589 struct rswitch_private *priv = rdev->priv;
590 int err;
591
592 rdev->tx_queue = rswitch_gwca_get(priv);
593 if (!rdev->tx_queue)
594 return -EBUSY;
595
596 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
597 if (err < 0) {
598 rswitch_gwca_put(priv, rdev->tx_queue);
599 return err;
600 }
601
602 return 0;
603 }
604
rswitch_txdmac_free(struct net_device * ndev)605 static void rswitch_txdmac_free(struct net_device *ndev)
606 {
607 struct rswitch_device *rdev = netdev_priv(ndev);
608
609 rswitch_gwca_queue_free(ndev, rdev->tx_queue);
610 rswitch_gwca_put(rdev->priv, rdev->tx_queue);
611 }
612
rswitch_txdmac_init(struct rswitch_private * priv,unsigned int index)613 static int rswitch_txdmac_init(struct rswitch_private *priv, unsigned int index)
614 {
615 struct rswitch_device *rdev = priv->rdev[index];
616
617 return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
618 }
619
rswitch_rxdmac_alloc(struct net_device * ndev)620 static int rswitch_rxdmac_alloc(struct net_device *ndev)
621 {
622 struct rswitch_device *rdev = netdev_priv(ndev);
623 struct rswitch_private *priv = rdev->priv;
624 int err;
625
626 rdev->rx_queue = rswitch_gwca_get(priv);
627 if (!rdev->rx_queue)
628 return -EBUSY;
629
630 err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
631 if (err < 0) {
632 rswitch_gwca_put(priv, rdev->rx_queue);
633 return err;
634 }
635
636 return 0;
637 }
638
rswitch_rxdmac_free(struct net_device * ndev)639 static void rswitch_rxdmac_free(struct net_device *ndev)
640 {
641 struct rswitch_device *rdev = netdev_priv(ndev);
642
643 rswitch_gwca_queue_free(ndev, rdev->rx_queue);
644 rswitch_gwca_put(rdev->priv, rdev->rx_queue);
645 }
646
rswitch_rxdmac_init(struct rswitch_private * priv,unsigned int index)647 static int rswitch_rxdmac_init(struct rswitch_private *priv, unsigned int index)
648 {
649 struct rswitch_device *rdev = priv->rdev[index];
650 struct net_device *ndev = rdev->ndev;
651
652 return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
653 }
654
rswitch_gwca_hw_init(struct rswitch_private * priv)655 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
656 {
657 unsigned int i;
658 int err;
659
660 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
661 if (err < 0)
662 return err;
663 err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
664 if (err < 0)
665 return err;
666
667 err = rswitch_gwca_mcast_table_reset(priv);
668 if (err < 0)
669 return err;
670 err = rswitch_gwca_axi_ram_reset(priv);
671 if (err < 0)
672 return err;
673
674 iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
675 iowrite32(0, priv->addr + GWTTFC);
676 iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
677 iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
678 iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
679 iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
680 iowrite32(GWMDNC_TSDMN(1) | GWMDNC_TXDMN(0x1e) | GWMDNC_RXDMN(0x1f),
681 priv->addr + GWMDNC);
682 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
683
684 iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
685
686 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
687 err = rswitch_rxdmac_init(priv, i);
688 if (err < 0)
689 return err;
690 err = rswitch_txdmac_init(priv, i);
691 if (err < 0)
692 return err;
693 }
694
695 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
696 if (err < 0)
697 return err;
698 return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
699 }
700
rswitch_gwca_hw_deinit(struct rswitch_private * priv)701 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
702 {
703 int err;
704
705 err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
706 if (err < 0)
707 return err;
708 err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
709 if (err < 0)
710 return err;
711
712 return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
713 }
714
rswitch_gwca_halt(struct rswitch_private * priv)715 static int rswitch_gwca_halt(struct rswitch_private *priv)
716 {
717 int err;
718
719 priv->gwca_halt = true;
720 err = rswitch_gwca_hw_deinit(priv);
721 dev_err(&priv->pdev->dev, "halted (%d)\n", err);
722
723 return err;
724 }
725
rswitch_rx_handle_desc(struct net_device * ndev,struct rswitch_gwca_queue * gq,struct rswitch_ext_ts_desc * desc)726 static struct sk_buff *rswitch_rx_handle_desc(struct net_device *ndev,
727 struct rswitch_gwca_queue *gq,
728 struct rswitch_ext_ts_desc *desc)
729 {
730 dma_addr_t dma_addr = rswitch_desc_get_dptr(&desc->desc);
731 u16 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
732 u8 die_dt = desc->desc.die_dt & DT_MASK;
733 struct sk_buff *skb = NULL;
734
735 dma_unmap_single(ndev->dev.parent, dma_addr, RSWITCH_MAP_BUF_SIZE,
736 DMA_FROM_DEVICE);
737
738 /* The RX descriptor order will be one of the following:
739 * - FSINGLE
740 * - FSTART -> FEND
741 * - FSTART -> FMID -> FEND
742 */
743
744 /* Check whether the descriptor is unexpected order */
745 switch (die_dt) {
746 case DT_FSTART:
747 case DT_FSINGLE:
748 if (gq->skb_fstart) {
749 dev_kfree_skb_any(gq->skb_fstart);
750 gq->skb_fstart = NULL;
751 ndev->stats.rx_dropped++;
752 }
753 break;
754 case DT_FMID:
755 case DT_FEND:
756 if (!gq->skb_fstart) {
757 ndev->stats.rx_dropped++;
758 return NULL;
759 }
760 break;
761 default:
762 break;
763 }
764
765 /* Handle the descriptor */
766 switch (die_dt) {
767 case DT_FSTART:
768 case DT_FSINGLE:
769 skb = build_skb(gq->rx_bufs[gq->cur], RSWITCH_BUF_SIZE);
770 if (skb) {
771 skb_reserve(skb, RSWITCH_HEADROOM);
772 skb_put(skb, pkt_len);
773 gq->pkt_len = pkt_len;
774 if (die_dt == DT_FSTART) {
775 gq->skb_fstart = skb;
776 skb = NULL;
777 }
778 }
779 break;
780 case DT_FMID:
781 case DT_FEND:
782 skb_add_rx_frag(gq->skb_fstart, skb_shinfo(gq->skb_fstart)->nr_frags,
783 virt_to_page(gq->rx_bufs[gq->cur]),
784 offset_in_page(gq->rx_bufs[gq->cur]) + RSWITCH_HEADROOM,
785 pkt_len, RSWITCH_BUF_SIZE);
786 if (die_dt == DT_FEND) {
787 skb = gq->skb_fstart;
788 gq->skb_fstart = NULL;
789 }
790 gq->pkt_len += pkt_len;
791 break;
792 default:
793 netdev_err(ndev, "%s: unexpected value (%x)\n", __func__, die_dt);
794 break;
795 }
796
797 return skb;
798 }
799
rswitch_rx(struct net_device * ndev,int * quota)800 static bool rswitch_rx(struct net_device *ndev, int *quota)
801 {
802 struct rswitch_device *rdev = netdev_priv(ndev);
803 struct rswitch_gwca_queue *gq = rdev->rx_queue;
804 struct rswitch_ext_ts_desc *desc;
805 int limit, boguscnt, ret;
806 struct sk_buff *skb;
807 unsigned int num;
808 u32 get_ts;
809
810 if (*quota <= 0)
811 return true;
812
813 boguscnt = min_t(int, gq->ring_size, *quota);
814 limit = boguscnt;
815
816 desc = &gq->rx_ring[gq->cur];
817 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
818 dma_rmb();
819 skb = rswitch_rx_handle_desc(ndev, gq, desc);
820 if (!skb)
821 goto out;
822
823 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
824 if (get_ts) {
825 struct skb_shared_hwtstamps *shhwtstamps;
826 struct timespec64 ts;
827
828 shhwtstamps = skb_hwtstamps(skb);
829 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
830 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
831 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
832 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
833 }
834 skb->protocol = eth_type_trans(skb, ndev);
835 napi_gro_receive(&rdev->napi, skb);
836 rdev->ndev->stats.rx_packets++;
837 rdev->ndev->stats.rx_bytes += gq->pkt_len;
838
839 out:
840 gq->rx_bufs[gq->cur] = NULL;
841 gq->cur = rswitch_next_queue_index(gq, true, 1);
842 desc = &gq->rx_ring[gq->cur];
843
844 if (--boguscnt <= 0)
845 break;
846 }
847
848 num = rswitch_get_num_cur_queues(gq);
849 ret = rswitch_gwca_queue_alloc_rx_buf(gq, gq->dirty, num);
850 if (ret < 0)
851 goto err;
852 ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
853 if (ret < 0)
854 goto err;
855 gq->dirty = rswitch_next_queue_index(gq, false, num);
856
857 *quota -= limit - boguscnt;
858
859 return boguscnt <= 0;
860
861 err:
862 rswitch_gwca_halt(rdev->priv);
863
864 return 0;
865 }
866
rswitch_tx_free(struct net_device * ndev)867 static void rswitch_tx_free(struct net_device *ndev)
868 {
869 struct rswitch_device *rdev = netdev_priv(ndev);
870 struct rswitch_gwca_queue *gq = rdev->tx_queue;
871 struct rswitch_ext_desc *desc;
872 struct sk_buff *skb;
873
874 desc = &gq->tx_ring[gq->dirty];
875 while ((desc->desc.die_dt & DT_MASK) == DT_FEMPTY) {
876 dma_rmb();
877
878 skb = gq->skbs[gq->dirty];
879 if (skb) {
880 rdev->ndev->stats.tx_packets++;
881 rdev->ndev->stats.tx_bytes += skb->len;
882 dma_unmap_single(ndev->dev.parent,
883 gq->unmap_addrs[gq->dirty],
884 skb->len, DMA_TO_DEVICE);
885 dev_kfree_skb_any(gq->skbs[gq->dirty]);
886 gq->skbs[gq->dirty] = NULL;
887 }
888
889 desc->desc.die_dt = DT_EEMPTY;
890 gq->dirty = rswitch_next_queue_index(gq, false, 1);
891 desc = &gq->tx_ring[gq->dirty];
892 }
893 }
894
rswitch_poll(struct napi_struct * napi,int budget)895 static int rswitch_poll(struct napi_struct *napi, int budget)
896 {
897 struct net_device *ndev = napi->dev;
898 struct rswitch_private *priv;
899 struct rswitch_device *rdev;
900 unsigned long flags;
901 int quota = budget;
902
903 rdev = netdev_priv(ndev);
904 priv = rdev->priv;
905
906 retry:
907 rswitch_tx_free(ndev);
908
909 if (rswitch_rx(ndev, "a))
910 goto out;
911 else if (rdev->priv->gwca_halt)
912 goto err;
913 else if (rswitch_is_queue_rxed(rdev->rx_queue))
914 goto retry;
915
916 netif_wake_subqueue(ndev, 0);
917
918 if (napi_complete_done(napi, budget - quota)) {
919 spin_lock_irqsave(&priv->lock, flags);
920 if (test_bit(rdev->port, priv->opened_ports)) {
921 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
922 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
923 }
924 spin_unlock_irqrestore(&priv->lock, flags);
925 }
926
927 out:
928 return budget - quota;
929
930 err:
931 napi_complete(napi);
932
933 return 0;
934 }
935
rswitch_queue_interrupt(struct net_device * ndev)936 static void rswitch_queue_interrupt(struct net_device *ndev)
937 {
938 struct rswitch_device *rdev = netdev_priv(ndev);
939
940 if (napi_schedule_prep(&rdev->napi)) {
941 spin_lock(&rdev->priv->lock);
942 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
943 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
944 spin_unlock(&rdev->priv->lock);
945 __napi_schedule(&rdev->napi);
946 }
947 }
948
rswitch_data_irq(struct rswitch_private * priv,u32 * dis)949 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
950 {
951 struct rswitch_gwca_queue *gq;
952 unsigned int i, index, bit;
953
954 for (i = 0; i < priv->gwca.num_queues; i++) {
955 gq = &priv->gwca.queues[i];
956 index = gq->index / 32;
957 bit = BIT(gq->index % 32);
958 if (!(dis[index] & bit))
959 continue;
960
961 rswitch_ack_data_irq(priv, gq->index);
962 rswitch_queue_interrupt(gq->ndev);
963 }
964
965 return IRQ_HANDLED;
966 }
967
rswitch_gwca_irq(int irq,void * dev_id)968 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
969 {
970 struct rswitch_private *priv = dev_id;
971 u32 dis[RSWITCH_NUM_IRQ_REGS];
972 irqreturn_t ret = IRQ_NONE;
973
974 rswitch_get_data_irq_status(priv, dis);
975
976 if (rswitch_is_any_data_irq(priv, dis, true) ||
977 rswitch_is_any_data_irq(priv, dis, false))
978 ret = rswitch_data_irq(priv, dis);
979
980 return ret;
981 }
982
rswitch_gwca_request_irqs(struct rswitch_private * priv)983 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
984 {
985 char *resource_name, *irq_name;
986 int i, ret, irq;
987
988 for (i = 0; i < GWCA_NUM_IRQS; i++) {
989 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
990 if (!resource_name)
991 return -ENOMEM;
992
993 irq = platform_get_irq_byname(priv->pdev, resource_name);
994 kfree(resource_name);
995 if (irq < 0)
996 return irq;
997
998 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
999 GWCA_IRQ_NAME, i);
1000 if (!irq_name)
1001 return -ENOMEM;
1002
1003 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
1004 0, irq_name, priv);
1005 if (ret < 0)
1006 return ret;
1007 }
1008
1009 return 0;
1010 }
1011
rswitch_ts(struct rswitch_private * priv)1012 static void rswitch_ts(struct rswitch_private *priv)
1013 {
1014 struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
1015 struct skb_shared_hwtstamps shhwtstamps;
1016 struct rswitch_ts_desc *desc;
1017 struct rswitch_device *rdev;
1018 struct sk_buff *ts_skb;
1019 struct timespec64 ts;
1020 unsigned int num;
1021 u32 tag, port;
1022
1023 desc = &gq->ts_ring[gq->cur];
1024 while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
1025 dma_rmb();
1026
1027 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
1028 if (unlikely(port >= RSWITCH_NUM_PORTS))
1029 goto next;
1030 rdev = priv->rdev[port];
1031
1032 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
1033 if (unlikely(tag >= TS_TAGS_PER_PORT))
1034 goto next;
1035 ts_skb = xchg(&rdev->ts_skb[tag], NULL);
1036 smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
1037 clear_bit(tag, rdev->ts_skb_used);
1038
1039 if (unlikely(!ts_skb))
1040 goto next;
1041
1042 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1043 ts.tv_sec = __le32_to_cpu(desc->ts_sec);
1044 ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
1045 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
1046 skb_tstamp_tx(ts_skb, &shhwtstamps);
1047 dev_consume_skb_irq(ts_skb);
1048
1049 next:
1050 gq->cur = rswitch_next_queue_index(gq, true, 1);
1051 desc = &gq->ts_ring[gq->cur];
1052 }
1053
1054 num = rswitch_get_num_cur_queues(gq);
1055 rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
1056 gq->dirty = rswitch_next_queue_index(gq, false, num);
1057 }
1058
rswitch_gwca_ts_irq(int irq,void * dev_id)1059 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
1060 {
1061 struct rswitch_private *priv = dev_id;
1062
1063 if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
1064 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
1065 rswitch_ts(priv);
1066
1067 return IRQ_HANDLED;
1068 }
1069
1070 return IRQ_NONE;
1071 }
1072
rswitch_gwca_ts_request_irqs(struct rswitch_private * priv)1073 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
1074 {
1075 int irq;
1076
1077 irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
1078 if (irq < 0)
1079 return irq;
1080
1081 return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
1082 0, GWCA_TS_IRQ_NAME, priv);
1083 }
1084
1085 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
rswitch_etha_change_mode(struct rswitch_etha * etha,enum rswitch_etha_mode mode)1086 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
1087 enum rswitch_etha_mode mode)
1088 {
1089 int ret;
1090
1091 if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
1092 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
1093
1094 iowrite32(mode, etha->addr + EAMC);
1095
1096 ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
1097
1098 if (mode == EAMC_OPC_DISABLE)
1099 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
1100
1101 return ret;
1102 }
1103
rswitch_etha_read_mac_address(struct rswitch_etha * etha)1104 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
1105 {
1106 u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1107 u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1108 u8 *mac = ða->mac_addr[0];
1109
1110 mac[0] = (mrmac0 >> 8) & 0xFF;
1111 mac[1] = (mrmac0 >> 0) & 0xFF;
1112 mac[2] = (mrmac1 >> 24) & 0xFF;
1113 mac[3] = (mrmac1 >> 16) & 0xFF;
1114 mac[4] = (mrmac1 >> 8) & 0xFF;
1115 mac[5] = (mrmac1 >> 0) & 0xFF;
1116 }
1117
rswitch_etha_write_mac_address(struct rswitch_etha * etha,const u8 * mac)1118 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1119 {
1120 iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1121 iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1122 etha->addr + MRMAC1);
1123 }
1124
rswitch_etha_wait_link_verification(struct rswitch_etha * etha)1125 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
1126 {
1127 iowrite32(MLVC_PLV, etha->addr + MLVC);
1128
1129 return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1130 }
1131
rswitch_rmac_setting(struct rswitch_etha * etha,const u8 * mac)1132 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1133 {
1134 u32 pis, lsc;
1135
1136 rswitch_etha_write_mac_address(etha, mac);
1137
1138 switch (etha->phy_interface) {
1139 case PHY_INTERFACE_MODE_SGMII:
1140 pis = MPIC_PIS_GMII;
1141 break;
1142 case PHY_INTERFACE_MODE_USXGMII:
1143 case PHY_INTERFACE_MODE_5GBASER:
1144 pis = MPIC_PIS_XGMII;
1145 break;
1146 default:
1147 pis = FIELD_GET(MPIC_PIS, ioread32(etha->addr + MPIC));
1148 break;
1149 }
1150
1151 switch (etha->speed) {
1152 case 100:
1153 lsc = MPIC_LSC_100M;
1154 break;
1155 case 1000:
1156 lsc = MPIC_LSC_1G;
1157 break;
1158 case 2500:
1159 lsc = MPIC_LSC_2_5G;
1160 break;
1161 default:
1162 lsc = FIELD_GET(MPIC_LSC, ioread32(etha->addr + MPIC));
1163 break;
1164 }
1165
1166 rswitch_modify(etha->addr, MPIC, MPIC_PIS | MPIC_LSC,
1167 FIELD_PREP(MPIC_PIS, pis) | FIELD_PREP(MPIC_LSC, lsc));
1168 }
1169
rswitch_etha_enable_mii(struct rswitch_etha * etha)1170 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1171 {
1172 rswitch_modify(etha->addr, MPIC, MPIC_PSMCS | MPIC_PSMHT,
1173 FIELD_PREP(MPIC_PSMCS, etha->psmcs) |
1174 FIELD_PREP(MPIC_PSMHT, 0x06));
1175 }
1176
rswitch_etha_hw_init(struct rswitch_etha * etha,const u8 * mac)1177 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1178 {
1179 int err;
1180
1181 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1182 if (err < 0)
1183 return err;
1184 err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1185 if (err < 0)
1186 return err;
1187
1188 iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1189 rswitch_rmac_setting(etha, mac);
1190 rswitch_etha_enable_mii(etha);
1191
1192 err = rswitch_etha_wait_link_verification(etha);
1193 if (err < 0)
1194 return err;
1195
1196 err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1197 if (err < 0)
1198 return err;
1199
1200 return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1201 }
1202
rswitch_etha_mpsm_op(struct rswitch_etha * etha,bool read,unsigned int mmf,unsigned int pda,unsigned int pra,unsigned int pop,unsigned int prd)1203 static int rswitch_etha_mpsm_op(struct rswitch_etha *etha, bool read,
1204 unsigned int mmf, unsigned int pda,
1205 unsigned int pra, unsigned int pop,
1206 unsigned int prd)
1207 {
1208 u32 val;
1209 int ret;
1210
1211 val = MPSM_PSME |
1212 FIELD_PREP(MPSM_MFF, mmf) |
1213 FIELD_PREP(MPSM_PDA, pda) |
1214 FIELD_PREP(MPSM_PRA, pra) |
1215 FIELD_PREP(MPSM_POP, pop) |
1216 FIELD_PREP(MPSM_PRD, prd);
1217 iowrite32(val, etha->addr + MPSM);
1218
1219 ret = rswitch_reg_wait(etha->addr, MPSM, MPSM_PSME, 0);
1220 if (ret)
1221 return ret;
1222
1223 if (read) {
1224 val = ioread32(etha->addr + MPSM);
1225 ret = FIELD_GET(MPSM_PRD, val);
1226 }
1227
1228 return ret;
1229 }
1230
rswitch_etha_mii_read_c45(struct mii_bus * bus,int addr,int devad,int regad)1231 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1232 int regad)
1233 {
1234 struct rswitch_etha *etha = bus->priv;
1235 int ret;
1236
1237 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1238 MPSM_POP_ADDRESS, regad);
1239 if (ret)
1240 return ret;
1241
1242 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C45, addr, devad,
1243 MPSM_POP_READ_C45, 0);
1244 }
1245
rswitch_etha_mii_write_c45(struct mii_bus * bus,int addr,int devad,int regad,u16 val)1246 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1247 int regad, u16 val)
1248 {
1249 struct rswitch_etha *etha = bus->priv;
1250 int ret;
1251
1252 ret = rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1253 MPSM_POP_ADDRESS, regad);
1254 if (ret)
1255 return ret;
1256
1257 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C45, addr, devad,
1258 MPSM_POP_WRITE, val);
1259 }
1260
rswitch_etha_mii_read_c22(struct mii_bus * bus,int phyad,int regad)1261 static int rswitch_etha_mii_read_c22(struct mii_bus *bus, int phyad, int regad)
1262 {
1263 struct rswitch_etha *etha = bus->priv;
1264
1265 return rswitch_etha_mpsm_op(etha, true, MPSM_MMF_C22, phyad, regad,
1266 MPSM_POP_READ_C22, 0);
1267 }
1268
rswitch_etha_mii_write_c22(struct mii_bus * bus,int phyad,int regad,u16 val)1269 static int rswitch_etha_mii_write_c22(struct mii_bus *bus, int phyad,
1270 int regad, u16 val)
1271 {
1272 struct rswitch_etha *etha = bus->priv;
1273
1274 return rswitch_etha_mpsm_op(etha, false, MPSM_MMF_C22, phyad, regad,
1275 MPSM_POP_WRITE, val);
1276 }
1277
1278 /* Call of_node_put(port) after done */
rswitch_get_port_node(struct rswitch_device * rdev)1279 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1280 {
1281 struct device_node *ports, *port;
1282 int err = 0;
1283 u32 index;
1284
1285 ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1286 "ethernet-ports");
1287 if (!ports)
1288 return NULL;
1289
1290 for_each_available_child_of_node(ports, port) {
1291 err = of_property_read_u32(port, "reg", &index);
1292 if (err < 0) {
1293 port = NULL;
1294 goto out;
1295 }
1296 if (index == rdev->etha->index)
1297 break;
1298 }
1299
1300 out:
1301 of_node_put(ports);
1302
1303 return port;
1304 }
1305
rswitch_etha_get_params(struct rswitch_device * rdev)1306 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1307 {
1308 u32 max_speed;
1309 int err;
1310
1311 if (!rdev->np_port)
1312 return 0; /* ignored */
1313
1314 err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
1315 if (err)
1316 return err;
1317
1318 err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1319 if (!err) {
1320 rdev->etha->speed = max_speed;
1321 return 0;
1322 }
1323
1324 /* if no "max-speed" property, let's use default speed */
1325 switch (rdev->etha->phy_interface) {
1326 case PHY_INTERFACE_MODE_MII:
1327 rdev->etha->speed = SPEED_100;
1328 break;
1329 case PHY_INTERFACE_MODE_SGMII:
1330 rdev->etha->speed = SPEED_1000;
1331 break;
1332 case PHY_INTERFACE_MODE_USXGMII:
1333 rdev->etha->speed = SPEED_2500;
1334 break;
1335 default:
1336 return -EINVAL;
1337 }
1338
1339 return 0;
1340 }
1341
rswitch_mii_register(struct rswitch_device * rdev)1342 static int rswitch_mii_register(struct rswitch_device *rdev)
1343 {
1344 struct device_node *mdio_np;
1345 struct mii_bus *mii_bus;
1346 int err;
1347
1348 mii_bus = mdiobus_alloc();
1349 if (!mii_bus)
1350 return -ENOMEM;
1351
1352 mii_bus->name = "rswitch_mii";
1353 sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1354 mii_bus->priv = rdev->etha;
1355 mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1356 mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1357 mii_bus->read = rswitch_etha_mii_read_c22;
1358 mii_bus->write = rswitch_etha_mii_write_c22;
1359 mii_bus->parent = &rdev->priv->pdev->dev;
1360
1361 mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
1362 err = of_mdiobus_register(mii_bus, mdio_np);
1363 if (err < 0) {
1364 mdiobus_free(mii_bus);
1365 goto out;
1366 }
1367
1368 rdev->etha->mii = mii_bus;
1369
1370 out:
1371 of_node_put(mdio_np);
1372
1373 return err;
1374 }
1375
rswitch_mii_unregister(struct rswitch_device * rdev)1376 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1377 {
1378 if (rdev->etha->mii) {
1379 mdiobus_unregister(rdev->etha->mii);
1380 mdiobus_free(rdev->etha->mii);
1381 rdev->etha->mii = NULL;
1382 }
1383 }
1384
rswitch_adjust_link(struct net_device * ndev)1385 static void rswitch_adjust_link(struct net_device *ndev)
1386 {
1387 struct rswitch_device *rdev = netdev_priv(ndev);
1388 struct phy_device *phydev = ndev->phydev;
1389
1390 if (phydev->link != rdev->etha->link) {
1391 phy_print_status(phydev);
1392 if (phydev->link)
1393 phy_power_on(rdev->serdes);
1394 else if (rdev->serdes->power_count)
1395 phy_power_off(rdev->serdes);
1396
1397 rdev->etha->link = phydev->link;
1398
1399 if (!rdev->priv->etha_no_runtime_change &&
1400 phydev->speed != rdev->etha->speed) {
1401 rdev->etha->speed = phydev->speed;
1402
1403 rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1404 phy_set_speed(rdev->serdes, rdev->etha->speed);
1405 }
1406 }
1407 }
1408
rswitch_phy_remove_link_mode(struct rswitch_device * rdev,struct phy_device * phydev)1409 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1410 struct phy_device *phydev)
1411 {
1412 if (!rdev->priv->etha_no_runtime_change)
1413 return;
1414
1415 switch (rdev->etha->speed) {
1416 case SPEED_2500:
1417 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1418 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1419 break;
1420 case SPEED_1000:
1421 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1422 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1423 break;
1424 case SPEED_100:
1425 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1426 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1427 break;
1428 default:
1429 break;
1430 }
1431
1432 phy_set_max_speed(phydev, rdev->etha->speed);
1433 }
1434
rswitch_phy_device_init(struct rswitch_device * rdev)1435 static int rswitch_phy_device_init(struct rswitch_device *rdev)
1436 {
1437 struct phy_device *phydev;
1438 struct device_node *phy;
1439 int err = -ENOENT;
1440
1441 if (!rdev->np_port)
1442 return -ENODEV;
1443
1444 phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1445 if (!phy)
1446 return -ENODEV;
1447
1448 /* Set phydev->host_interfaces before calling of_phy_connect() to
1449 * configure the PHY with the information of host_interfaces.
1450 */
1451 phydev = of_phy_find_device(phy);
1452 if (!phydev)
1453 goto out;
1454 __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1455 phydev->mac_managed_pm = true;
1456
1457 phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1458 rdev->etha->phy_interface);
1459 if (!phydev)
1460 goto out;
1461
1462 phy_set_max_speed(phydev, SPEED_2500);
1463 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1464 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1465 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1466 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1467 rswitch_phy_remove_link_mode(rdev, phydev);
1468
1469 phy_attached_info(phydev);
1470
1471 err = 0;
1472 out:
1473 of_node_put(phy);
1474
1475 return err;
1476 }
1477
rswitch_phy_device_deinit(struct rswitch_device * rdev)1478 static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
1479 {
1480 if (rdev->ndev->phydev)
1481 phy_disconnect(rdev->ndev->phydev);
1482 }
1483
rswitch_serdes_set_params(struct rswitch_device * rdev)1484 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1485 {
1486 int err;
1487
1488 err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
1489 rdev->etha->phy_interface);
1490 if (err < 0)
1491 return err;
1492
1493 return phy_set_speed(rdev->serdes, rdev->etha->speed);
1494 }
1495
rswitch_ether_port_init_one(struct rswitch_device * rdev)1496 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1497 {
1498 int err;
1499
1500 if (!rdev->etha->operated) {
1501 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1502 if (err < 0)
1503 return err;
1504 if (rdev->priv->etha_no_runtime_change)
1505 rdev->etha->operated = true;
1506 }
1507
1508 err = rswitch_mii_register(rdev);
1509 if (err < 0)
1510 return err;
1511
1512 err = rswitch_phy_device_init(rdev);
1513 if (err < 0)
1514 goto err_phy_device_init;
1515
1516 rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1517 if (IS_ERR(rdev->serdes)) {
1518 err = PTR_ERR(rdev->serdes);
1519 goto err_serdes_phy_get;
1520 }
1521
1522 err = rswitch_serdes_set_params(rdev);
1523 if (err < 0)
1524 goto err_serdes_set_params;
1525
1526 return 0;
1527
1528 err_serdes_set_params:
1529 err_serdes_phy_get:
1530 rswitch_phy_device_deinit(rdev);
1531
1532 err_phy_device_init:
1533 rswitch_mii_unregister(rdev);
1534
1535 return err;
1536 }
1537
rswitch_ether_port_deinit_one(struct rswitch_device * rdev)1538 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1539 {
1540 rswitch_phy_device_deinit(rdev);
1541 rswitch_mii_unregister(rdev);
1542 }
1543
rswitch_ether_port_init_all(struct rswitch_private * priv)1544 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1545 {
1546 unsigned int i;
1547 int err;
1548
1549 rswitch_for_each_enabled_port(priv, i) {
1550 err = rswitch_ether_port_init_one(priv->rdev[i]);
1551 if (err)
1552 goto err_init_one;
1553 }
1554
1555 rswitch_for_each_enabled_port(priv, i) {
1556 err = phy_init(priv->rdev[i]->serdes);
1557 if (err)
1558 goto err_serdes;
1559 }
1560
1561 return 0;
1562
1563 err_serdes:
1564 rswitch_for_each_enabled_port_continue_reverse(priv, i)
1565 phy_exit(priv->rdev[i]->serdes);
1566 i = RSWITCH_NUM_PORTS;
1567
1568 err_init_one:
1569 rswitch_for_each_enabled_port_continue_reverse(priv, i)
1570 rswitch_ether_port_deinit_one(priv->rdev[i]);
1571
1572 return err;
1573 }
1574
rswitch_ether_port_deinit_all(struct rswitch_private * priv)1575 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1576 {
1577 unsigned int i;
1578
1579 rswitch_for_each_enabled_port(priv, i) {
1580 phy_exit(priv->rdev[i]->serdes);
1581 rswitch_ether_port_deinit_one(priv->rdev[i]);
1582 }
1583 }
1584
rswitch_open(struct net_device * ndev)1585 static int rswitch_open(struct net_device *ndev)
1586 {
1587 struct rswitch_device *rdev = netdev_priv(ndev);
1588 unsigned long flags;
1589
1590 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1591 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1592
1593 napi_enable(&rdev->napi);
1594
1595 spin_lock_irqsave(&rdev->priv->lock, flags);
1596 bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
1597 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1598 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1599 spin_unlock_irqrestore(&rdev->priv->lock, flags);
1600
1601 phy_start(ndev->phydev);
1602
1603 netif_start_queue(ndev);
1604
1605 return 0;
1606 };
1607
rswitch_stop(struct net_device * ndev)1608 static int rswitch_stop(struct net_device *ndev)
1609 {
1610 struct rswitch_device *rdev = netdev_priv(ndev);
1611 struct sk_buff *ts_skb;
1612 unsigned long flags;
1613 unsigned int tag;
1614
1615 netif_tx_stop_all_queues(ndev);
1616
1617 phy_stop(ndev->phydev);
1618
1619 spin_lock_irqsave(&rdev->priv->lock, flags);
1620 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1621 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1622 bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
1623 spin_unlock_irqrestore(&rdev->priv->lock, flags);
1624
1625 napi_disable(&rdev->napi);
1626
1627 if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1628 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
1629
1630 for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
1631 tag < TS_TAGS_PER_PORT;
1632 tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
1633 ts_skb = xchg(&rdev->ts_skb[tag], NULL);
1634 clear_bit(tag, rdev->ts_skb_used);
1635 if (ts_skb)
1636 dev_kfree_skb(ts_skb);
1637 }
1638
1639 return 0;
1640 };
1641
rswitch_ext_desc_set_info1(struct rswitch_device * rdev,struct sk_buff * skb,struct rswitch_ext_desc * desc)1642 static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
1643 struct sk_buff *skb,
1644 struct rswitch_ext_desc *desc)
1645 {
1646 desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1647 INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
1648 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1649 unsigned int tag;
1650
1651 tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
1652 if (tag == TS_TAGS_PER_PORT)
1653 return false;
1654 smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
1655 rdev->ts_skb[tag] = skb_get(skb);
1656 set_bit(tag, rdev->ts_skb_used);
1657
1658 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1659 desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
1660
1661 skb_tx_timestamp(skb);
1662 }
1663
1664 return true;
1665 }
1666
rswitch_ext_desc_set(struct rswitch_device * rdev,struct sk_buff * skb,struct rswitch_ext_desc * desc,dma_addr_t dma_addr,u16 len,u8 die_dt)1667 static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
1668 struct sk_buff *skb,
1669 struct rswitch_ext_desc *desc,
1670 dma_addr_t dma_addr, u16 len, u8 die_dt)
1671 {
1672 rswitch_desc_set_dptr(&desc->desc, dma_addr);
1673 desc->desc.info_ds = cpu_to_le16(len);
1674 if (!rswitch_ext_desc_set_info1(rdev, skb, desc))
1675 return false;
1676
1677 dma_wmb();
1678
1679 desc->desc.die_dt = die_dt;
1680
1681 return true;
1682 }
1683
rswitch_ext_desc_get_die_dt(unsigned int nr_desc,unsigned int index)1684 static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
1685 {
1686 if (nr_desc == 1)
1687 return DT_FSINGLE | DIE;
1688 if (index == 0)
1689 return DT_FSTART;
1690 if (nr_desc - 1 == index)
1691 return DT_FEND | DIE;
1692 return DT_FMID;
1693 }
1694
rswitch_ext_desc_get_len(u8 die_dt,unsigned int orig_len)1695 static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
1696 {
1697 switch (die_dt & DT_MASK) {
1698 case DT_FSINGLE:
1699 case DT_FEND:
1700 return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
1701 case DT_FSTART:
1702 case DT_FMID:
1703 return RSWITCH_DESC_BUF_SIZE;
1704 default:
1705 return 0;
1706 }
1707 }
1708
rswitch_start_xmit(struct sk_buff * skb,struct net_device * ndev)1709 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1710 {
1711 struct rswitch_device *rdev = netdev_priv(ndev);
1712 struct rswitch_gwca_queue *gq = rdev->tx_queue;
1713 dma_addr_t dma_addr, dma_addr_orig;
1714 netdev_tx_t ret = NETDEV_TX_OK;
1715 struct rswitch_ext_desc *desc;
1716 unsigned int i, nr_desc;
1717 u8 die_dt;
1718 u16 len;
1719
1720 nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
1721 if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
1722 netif_stop_subqueue(ndev, 0);
1723 return NETDEV_TX_BUSY;
1724 }
1725
1726 if (skb_put_padto(skb, ETH_ZLEN))
1727 return ret;
1728
1729 dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1730 if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
1731 goto err_kfree;
1732
1733 /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
1734 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
1735 gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
1736
1737 dma_wmb();
1738
1739 /* DT_FSTART should be set at last. So, this is reverse order. */
1740 for (i = nr_desc; i-- > 0; ) {
1741 desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
1742 die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
1743 dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
1744 len = rswitch_ext_desc_get_len(die_dt, skb->len);
1745 if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
1746 goto err_unmap;
1747 }
1748
1749 gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
1750 rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1751
1752 return ret;
1753
1754 err_unmap:
1755 gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = NULL;
1756 dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
1757
1758 err_kfree:
1759 dev_kfree_skb_any(skb);
1760
1761 return ret;
1762 }
1763
rswitch_get_stats(struct net_device * ndev)1764 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1765 {
1766 return &ndev->stats;
1767 }
1768
rswitch_hwstamp_get(struct net_device * ndev,struct ifreq * req)1769 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1770 {
1771 struct rswitch_device *rdev = netdev_priv(ndev);
1772 struct rcar_gen4_ptp_private *ptp_priv;
1773 struct hwtstamp_config config;
1774
1775 ptp_priv = rdev->priv->ptp_priv;
1776
1777 config.flags = 0;
1778 config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1779 HWTSTAMP_TX_OFF;
1780 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1781 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1782 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1783 break;
1784 case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1785 config.rx_filter = HWTSTAMP_FILTER_ALL;
1786 break;
1787 default:
1788 config.rx_filter = HWTSTAMP_FILTER_NONE;
1789 break;
1790 }
1791
1792 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1793 }
1794
rswitch_hwstamp_set(struct net_device * ndev,struct ifreq * req)1795 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1796 {
1797 struct rswitch_device *rdev = netdev_priv(ndev);
1798 u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1799 struct hwtstamp_config config;
1800 u32 tstamp_tx_ctrl;
1801
1802 if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1803 return -EFAULT;
1804
1805 if (config.flags)
1806 return -EINVAL;
1807
1808 switch (config.tx_type) {
1809 case HWTSTAMP_TX_OFF:
1810 tstamp_tx_ctrl = 0;
1811 break;
1812 case HWTSTAMP_TX_ON:
1813 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1814 break;
1815 default:
1816 return -ERANGE;
1817 }
1818
1819 switch (config.rx_filter) {
1820 case HWTSTAMP_FILTER_NONE:
1821 tstamp_rx_ctrl = 0;
1822 break;
1823 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1824 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1825 break;
1826 default:
1827 config.rx_filter = HWTSTAMP_FILTER_ALL;
1828 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1829 break;
1830 }
1831
1832 rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1833 rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1834
1835 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1836 }
1837
rswitch_eth_ioctl(struct net_device * ndev,struct ifreq * req,int cmd)1838 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1839 {
1840 if (!netif_running(ndev))
1841 return -EINVAL;
1842
1843 switch (cmd) {
1844 case SIOCGHWTSTAMP:
1845 return rswitch_hwstamp_get(ndev, req);
1846 case SIOCSHWTSTAMP:
1847 return rswitch_hwstamp_set(ndev, req);
1848 default:
1849 return phy_mii_ioctl(ndev->phydev, req, cmd);
1850 }
1851 }
1852
1853 static const struct net_device_ops rswitch_netdev_ops = {
1854 .ndo_open = rswitch_open,
1855 .ndo_stop = rswitch_stop,
1856 .ndo_start_xmit = rswitch_start_xmit,
1857 .ndo_get_stats = rswitch_get_stats,
1858 .ndo_eth_ioctl = rswitch_eth_ioctl,
1859 .ndo_validate_addr = eth_validate_addr,
1860 .ndo_set_mac_address = eth_mac_addr,
1861 };
1862
rswitch_get_ts_info(struct net_device * ndev,struct kernel_ethtool_ts_info * info)1863 static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
1864 {
1865 struct rswitch_device *rdev = netdev_priv(ndev);
1866
1867 info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1868 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1869 SOF_TIMESTAMPING_TX_HARDWARE |
1870 SOF_TIMESTAMPING_RX_HARDWARE |
1871 SOF_TIMESTAMPING_RAW_HARDWARE;
1872 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1873 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1874
1875 return 0;
1876 }
1877
1878 static const struct ethtool_ops rswitch_ethtool_ops = {
1879 .get_ts_info = rswitch_get_ts_info,
1880 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1881 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1882 };
1883
1884 static const struct of_device_id renesas_eth_sw_of_table[] = {
1885 { .compatible = "renesas,r8a779f0-ether-switch", },
1886 { }
1887 };
1888 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1889
rswitch_etha_init(struct rswitch_private * priv,unsigned int index)1890 static void rswitch_etha_init(struct rswitch_private *priv, unsigned int index)
1891 {
1892 struct rswitch_etha *etha = &priv->etha[index];
1893
1894 memset(etha, 0, sizeof(*etha));
1895 etha->index = index;
1896 etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1897 etha->coma_addr = priv->addr;
1898
1899 /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
1900 * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
1901 * both the numerator and the denominator by 10.
1902 */
1903 etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
1904 }
1905
rswitch_device_alloc(struct rswitch_private * priv,unsigned int index)1906 static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index)
1907 {
1908 struct platform_device *pdev = priv->pdev;
1909 struct rswitch_device *rdev;
1910 struct net_device *ndev;
1911 int err;
1912
1913 if (index >= RSWITCH_NUM_PORTS)
1914 return -EINVAL;
1915
1916 ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1917 if (!ndev)
1918 return -ENOMEM;
1919
1920 SET_NETDEV_DEV(ndev, &pdev->dev);
1921 ether_setup(ndev);
1922
1923 rdev = netdev_priv(ndev);
1924 rdev->ndev = ndev;
1925 rdev->priv = priv;
1926 priv->rdev[index] = rdev;
1927 rdev->port = index;
1928 rdev->etha = &priv->etha[index];
1929 rdev->addr = priv->addr;
1930
1931 ndev->base_addr = (unsigned long)rdev->addr;
1932 snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1933 ndev->netdev_ops = &rswitch_netdev_ops;
1934 ndev->ethtool_ops = &rswitch_ethtool_ops;
1935 ndev->max_mtu = RSWITCH_MAX_MTU;
1936 ndev->min_mtu = ETH_MIN_MTU;
1937
1938 netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1939
1940 rdev->np_port = rswitch_get_port_node(rdev);
1941 rdev->disabled = !rdev->np_port;
1942 err = of_get_ethdev_address(rdev->np_port, ndev);
1943 if (err) {
1944 if (is_valid_ether_addr(rdev->etha->mac_addr))
1945 eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1946 else
1947 eth_hw_addr_random(ndev);
1948 }
1949
1950 err = rswitch_etha_get_params(rdev);
1951 if (err < 0)
1952 goto out_get_params;
1953
1954 err = rswitch_rxdmac_alloc(ndev);
1955 if (err < 0)
1956 goto out_rxdmac;
1957
1958 err = rswitch_txdmac_alloc(ndev);
1959 if (err < 0)
1960 goto out_txdmac;
1961
1962 return 0;
1963
1964 out_txdmac:
1965 rswitch_rxdmac_free(ndev);
1966
1967 out_rxdmac:
1968 out_get_params:
1969 of_node_put(rdev->np_port);
1970 netif_napi_del(&rdev->napi);
1971 free_netdev(ndev);
1972
1973 return err;
1974 }
1975
rswitch_device_free(struct rswitch_private * priv,unsigned int index)1976 static void rswitch_device_free(struct rswitch_private *priv, unsigned int index)
1977 {
1978 struct rswitch_device *rdev = priv->rdev[index];
1979 struct net_device *ndev = rdev->ndev;
1980
1981 rswitch_txdmac_free(ndev);
1982 rswitch_rxdmac_free(ndev);
1983 of_node_put(rdev->np_port);
1984 netif_napi_del(&rdev->napi);
1985 free_netdev(ndev);
1986 }
1987
rswitch_init(struct rswitch_private * priv)1988 static int rswitch_init(struct rswitch_private *priv)
1989 {
1990 unsigned int i;
1991 int err;
1992
1993 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1994 rswitch_etha_init(priv, i);
1995
1996 rswitch_clock_enable(priv);
1997 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1998 rswitch_etha_read_mac_address(&priv->etha[i]);
1999
2000 rswitch_reset(priv);
2001
2002 rswitch_clock_enable(priv);
2003 rswitch_top_init(priv);
2004 err = rswitch_bpool_config(priv);
2005 if (err < 0)
2006 return err;
2007
2008 rswitch_coma_init(priv);
2009
2010 err = rswitch_gwca_linkfix_alloc(priv);
2011 if (err < 0)
2012 return -ENOMEM;
2013
2014 err = rswitch_gwca_ts_queue_alloc(priv);
2015 if (err < 0)
2016 goto err_ts_queue_alloc;
2017
2018 for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
2019 err = rswitch_device_alloc(priv, i);
2020 if (err < 0) {
2021 for (; i-- > 0; )
2022 rswitch_device_free(priv, i);
2023 goto err_device_alloc;
2024 }
2025 }
2026
2027 rswitch_fwd_init(priv);
2028
2029 err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
2030 clk_get_rate(priv->clk));
2031 if (err < 0)
2032 goto err_ptp_register;
2033
2034 err = rswitch_gwca_request_irqs(priv);
2035 if (err < 0)
2036 goto err_gwca_request_irq;
2037
2038 err = rswitch_gwca_ts_request_irqs(priv);
2039 if (err < 0)
2040 goto err_gwca_ts_request_irq;
2041
2042 err = rswitch_gwca_hw_init(priv);
2043 if (err < 0)
2044 goto err_gwca_hw_init;
2045
2046 err = rswitch_ether_port_init_all(priv);
2047 if (err)
2048 goto err_ether_port_init_all;
2049
2050 rswitch_for_each_enabled_port(priv, i) {
2051 err = register_netdev(priv->rdev[i]->ndev);
2052 if (err) {
2053 rswitch_for_each_enabled_port_continue_reverse(priv, i)
2054 unregister_netdev(priv->rdev[i]->ndev);
2055 goto err_register_netdev;
2056 }
2057 }
2058
2059 rswitch_for_each_enabled_port(priv, i)
2060 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
2061 priv->rdev[i]->ndev->dev_addr);
2062
2063 return 0;
2064
2065 err_register_netdev:
2066 rswitch_ether_port_deinit_all(priv);
2067
2068 err_ether_port_init_all:
2069 rswitch_gwca_hw_deinit(priv);
2070
2071 err_gwca_hw_init:
2072 err_gwca_ts_request_irq:
2073 err_gwca_request_irq:
2074 rcar_gen4_ptp_unregister(priv->ptp_priv);
2075
2076 err_ptp_register:
2077 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2078 rswitch_device_free(priv, i);
2079
2080 err_device_alloc:
2081 rswitch_gwca_ts_queue_free(priv);
2082
2083 err_ts_queue_alloc:
2084 rswitch_gwca_linkfix_free(priv);
2085
2086 return err;
2087 }
2088
2089 static const struct soc_device_attribute rswitch_soc_no_speed_change[] = {
2090 { .soc_id = "r8a779f0", .revision = "ES1.0" },
2091 { /* Sentinel */ }
2092 };
2093
renesas_eth_sw_probe(struct platform_device * pdev)2094 static int renesas_eth_sw_probe(struct platform_device *pdev)
2095 {
2096 const struct soc_device_attribute *attr;
2097 struct rswitch_private *priv;
2098 struct resource *res;
2099 int ret;
2100
2101 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
2102 if (!res) {
2103 dev_err(&pdev->dev, "invalid resource\n");
2104 return -EINVAL;
2105 }
2106
2107 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
2108 if (!priv)
2109 return -ENOMEM;
2110 spin_lock_init(&priv->lock);
2111
2112 priv->clk = devm_clk_get(&pdev->dev, NULL);
2113 if (IS_ERR(priv->clk))
2114 return PTR_ERR(priv->clk);
2115
2116 attr = soc_device_match(rswitch_soc_no_speed_change);
2117 if (attr)
2118 priv->etha_no_runtime_change = true;
2119
2120 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
2121 if (!priv->ptp_priv)
2122 return -ENOMEM;
2123
2124 platform_set_drvdata(pdev, priv);
2125 priv->pdev = pdev;
2126 priv->addr = devm_ioremap_resource(&pdev->dev, res);
2127 if (IS_ERR(priv->addr))
2128 return PTR_ERR(priv->addr);
2129
2130 priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
2131
2132 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2133 if (ret < 0) {
2134 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2135 if (ret < 0)
2136 return ret;
2137 }
2138
2139 priv->gwca.index = AGENT_INDEX_GWCA;
2140 priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
2141 RSWITCH_MAX_NUM_QUEUES);
2142 priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
2143 sizeof(*priv->gwca.queues), GFP_KERNEL);
2144 if (!priv->gwca.queues)
2145 return -ENOMEM;
2146
2147 pm_runtime_enable(&pdev->dev);
2148 pm_runtime_get_sync(&pdev->dev);
2149
2150 ret = rswitch_init(priv);
2151 if (ret < 0) {
2152 pm_runtime_put(&pdev->dev);
2153 pm_runtime_disable(&pdev->dev);
2154 return ret;
2155 }
2156
2157 device_set_wakeup_capable(&pdev->dev, 1);
2158
2159 return ret;
2160 }
2161
rswitch_deinit(struct rswitch_private * priv)2162 static void rswitch_deinit(struct rswitch_private *priv)
2163 {
2164 unsigned int i;
2165
2166 rswitch_gwca_hw_deinit(priv);
2167 rcar_gen4_ptp_unregister(priv->ptp_priv);
2168
2169 rswitch_for_each_enabled_port(priv, i) {
2170 struct rswitch_device *rdev = priv->rdev[i];
2171
2172 unregister_netdev(rdev->ndev);
2173 rswitch_ether_port_deinit_one(rdev);
2174 phy_exit(priv->rdev[i]->serdes);
2175 }
2176
2177 for (i = 0; i < RSWITCH_NUM_PORTS; i++)
2178 rswitch_device_free(priv, i);
2179
2180 rswitch_gwca_ts_queue_free(priv);
2181 rswitch_gwca_linkfix_free(priv);
2182
2183 rswitch_clock_disable(priv);
2184 }
2185
renesas_eth_sw_remove(struct platform_device * pdev)2186 static void renesas_eth_sw_remove(struct platform_device *pdev)
2187 {
2188 struct rswitch_private *priv = platform_get_drvdata(pdev);
2189
2190 rswitch_deinit(priv);
2191
2192 pm_runtime_put(&pdev->dev);
2193 pm_runtime_disable(&pdev->dev);
2194
2195 platform_set_drvdata(pdev, NULL);
2196 }
2197
renesas_eth_sw_suspend(struct device * dev)2198 static int renesas_eth_sw_suspend(struct device *dev)
2199 {
2200 struct rswitch_private *priv = dev_get_drvdata(dev);
2201 struct net_device *ndev;
2202 unsigned int i;
2203
2204 rswitch_for_each_enabled_port(priv, i) {
2205 ndev = priv->rdev[i]->ndev;
2206 if (netif_running(ndev)) {
2207 netif_device_detach(ndev);
2208 rswitch_stop(ndev);
2209 }
2210 if (priv->rdev[i]->serdes->init_count)
2211 phy_exit(priv->rdev[i]->serdes);
2212 }
2213
2214 return 0;
2215 }
2216
renesas_eth_sw_resume(struct device * dev)2217 static int renesas_eth_sw_resume(struct device *dev)
2218 {
2219 struct rswitch_private *priv = dev_get_drvdata(dev);
2220 struct net_device *ndev;
2221 unsigned int i;
2222
2223 rswitch_for_each_enabled_port(priv, i) {
2224 phy_init(priv->rdev[i]->serdes);
2225 ndev = priv->rdev[i]->ndev;
2226 if (netif_running(ndev)) {
2227 rswitch_open(ndev);
2228 netif_device_attach(ndev);
2229 }
2230 }
2231
2232 return 0;
2233 }
2234
2235 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
2236 renesas_eth_sw_resume);
2237
2238 static struct platform_driver renesas_eth_sw_driver_platform = {
2239 .probe = renesas_eth_sw_probe,
2240 .remove = renesas_eth_sw_remove,
2241 .driver = {
2242 .name = "renesas_eth_sw",
2243 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
2244 .of_match_table = renesas_eth_sw_of_table,
2245 }
2246 };
2247 module_platform_driver(renesas_eth_sw_driver_platform);
2248 MODULE_AUTHOR("Yoshihiro Shimoda");
2249 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
2250 MODULE_LICENSE("GPL");
2251