1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "bcmasp_intf: " fmt
3
4 #include <asm/byteorder.h>
5 #include <linux/brcmphy.h>
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_net.h>
11 #include <linux/of_mdio.h>
12 #include <linux/phy.h>
13 #include <linux/phy_fixed.h>
14 #include <linux/ptp_classify.h>
15 #include <linux/platform_device.h>
16 #include <net/ip.h>
17 #include <net/ipv6.h>
18
19 #include "bcmasp.h"
20 #include "bcmasp_intf_defs.h"
21
incr_ring(int index,int ring_count)22 static int incr_ring(int index, int ring_count)
23 {
24 index++;
25 if (index == ring_count)
26 return 0;
27
28 return index;
29 }
30
31 /* Points to last byte of descriptor */
incr_last_byte(dma_addr_t addr,dma_addr_t beg,int ring_count)32 static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
33 int ring_count)
34 {
35 dma_addr_t end = beg + (ring_count * DESC_SIZE);
36
37 addr += DESC_SIZE;
38 if (addr > end)
39 return beg + DESC_SIZE - 1;
40
41 return addr;
42 }
43
44 /* Points to first byte of descriptor */
incr_first_byte(dma_addr_t addr,dma_addr_t beg,int ring_count)45 static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
46 int ring_count)
47 {
48 dma_addr_t end = beg + (ring_count * DESC_SIZE);
49
50 addr += DESC_SIZE;
51 if (addr >= end)
52 return beg;
53
54 return addr;
55 }
56
bcmasp_enable_tx(struct bcmasp_intf * intf,int en)57 static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
58 {
59 if (en) {
60 tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
61 tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
62 TX_EPKT_C_CFG_MISC_PT |
63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
64 TX_EPKT_C_CFG_MISC);
65 } else {
66 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
67 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
68 }
69 }
70
bcmasp_enable_rx(struct bcmasp_intf * intf,int en)71 static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
72 {
73 if (en)
74 rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
75 RX_EDPKT_CFG_ENABLE);
76 else
77 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
78 }
79
bcmasp_set_rx_mode(struct net_device * dev)80 static void bcmasp_set_rx_mode(struct net_device *dev)
81 {
82 unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
83 struct bcmasp_intf *intf = netdev_priv(dev);
84 struct netdev_hw_addr *ha;
85 int ret;
86
87 spin_lock_bh(&intf->parent->mda_lock);
88
89 bcmasp_disable_all_filters(intf);
90
91 if (dev->flags & IFF_PROMISC)
92 goto set_promisc;
93
94 bcmasp_set_promisc(intf, 0);
95
96 bcmasp_set_broad(intf, 1);
97
98 bcmasp_set_oaddr(intf, dev->dev_addr, 1);
99
100 if (dev->flags & IFF_ALLMULTI) {
101 bcmasp_set_allmulti(intf, 1);
102 } else {
103 bcmasp_set_allmulti(intf, 0);
104
105 netdev_for_each_mc_addr(ha, dev) {
106 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
107 if (ret) {
108 intf->mib.mc_filters_full_cnt++;
109 goto set_promisc;
110 }
111 }
112 }
113
114 netdev_for_each_uc_addr(ha, dev) {
115 ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
116 if (ret) {
117 intf->mib.uc_filters_full_cnt++;
118 goto set_promisc;
119 }
120 }
121
122 spin_unlock_bh(&intf->parent->mda_lock);
123 return;
124
125 set_promisc:
126 bcmasp_set_promisc(intf, 1);
127 intf->mib.promisc_filters_cnt++;
128
129 /* disable all filters used by this port */
130 bcmasp_disable_all_filters(intf);
131
132 spin_unlock_bh(&intf->parent->mda_lock);
133 }
134
bcmasp_clean_txcb(struct bcmasp_intf * intf,int index)135 static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
136 {
137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
138
139 txcb->skb = NULL;
140 dma_unmap_addr_set(txcb, dma_addr, 0);
141 dma_unmap_len_set(txcb, dma_len, 0);
142 txcb->last = false;
143 }
144
tx_spb_ring_full(struct bcmasp_intf * intf,int cnt)145 static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
146 {
147 int next_index, i;
148
149 /* Check if we have enough room for cnt descriptors */
150 for (i = 0; i < cnt; i++) {
151 next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
152 if (next_index == intf->tx_spb_clean_index)
153 return 1;
154 }
155
156 return 0;
157 }
158
bcmasp_csum_offload(struct net_device * dev,struct sk_buff * skb,bool * csum_hw)159 static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
160 struct sk_buff *skb,
161 bool *csum_hw)
162 {
163 struct bcmasp_intf *intf = netdev_priv(dev);
164 u32 header = 0, header2 = 0, epkt = 0;
165 struct bcmasp_pkt_offload *offload;
166 unsigned int header_cnt = 0;
167 u8 ip_proto;
168 int ret;
169
170 if (skb->ip_summed != CHECKSUM_PARTIAL)
171 return skb;
172
173 ret = skb_cow_head(skb, sizeof(*offload));
174 if (ret < 0) {
175 intf->mib.tx_realloc_offload_failed++;
176 goto help;
177 }
178
179 switch (skb->protocol) {
180 case htons(ETH_P_IP):
181 header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
182 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
183 epkt |= PKT_OFFLOAD_EPKT_IP(0);
184 ip_proto = ip_hdr(skb)->protocol;
185 header_cnt += 2;
186 break;
187 case htons(ETH_P_IPV6):
188 header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
189 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
190 epkt |= PKT_OFFLOAD_EPKT_IP(1);
191 ip_proto = ipv6_hdr(skb)->nexthdr;
192 header_cnt += 2;
193 break;
194 default:
195 goto help;
196 }
197
198 switch (ip_proto) {
199 case IPPROTO_TCP:
200 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
201 epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L4;
202 header_cnt++;
203 break;
204 case IPPROTO_UDP:
205 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
206 epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L4;
207 header_cnt++;
208 break;
209 default:
210 goto help;
211 }
212
213 offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
214
215 header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
217 epkt |= PKT_OFFLOAD_EPKT_OP;
218
219 offload->nop = htonl(PKT_OFFLOAD_NOP);
220 offload->header = htonl(header);
221 offload->header2 = htonl(header2);
222 offload->epkt = htonl(epkt);
223 offload->end = htonl(PKT_OFFLOAD_END_OP);
224 *csum_hw = true;
225
226 return skb;
227
228 help:
229 skb_checksum_help(skb);
230
231 return skb;
232 }
233
bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf * intf)234 static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
235 {
236 return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
237 }
238
bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf * intf,dma_addr_t addr)239 static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
240 {
241 rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
242 }
243
bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf * intf,dma_addr_t addr)244 static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
245 {
246 rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
247 }
248
bcmasp_tx_spb_dma_rq(struct bcmasp_intf * intf)249 static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
250 {
251 return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
252 }
253
bcmasp_tx_spb_dma_wq(struct bcmasp_intf * intf,dma_addr_t addr)254 static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
255 {
256 tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
257 }
258
259 static const struct bcmasp_intf_ops bcmasp_intf_ops = {
260 .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
261 .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
262 .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
263 .tx_read = bcmasp_tx_spb_dma_rq,
264 .tx_write = bcmasp_tx_spb_dma_wq,
265 };
266
bcmasp_xmit(struct sk_buff * skb,struct net_device * dev)267 static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
268 {
269 struct bcmasp_intf *intf = netdev_priv(dev);
270 unsigned int total_bytes, size;
271 int spb_index, nr_frags, i, j;
272 struct bcmasp_tx_cb *txcb;
273 dma_addr_t mapping, valid;
274 struct bcmasp_desc *desc;
275 bool csum_hw = false;
276 struct device *kdev;
277 skb_frag_t *frag;
278
279 kdev = &intf->parent->pdev->dev;
280
281 nr_frags = skb_shinfo(skb)->nr_frags;
282
283 if (tx_spb_ring_full(intf, nr_frags + 1)) {
284 netif_stop_queue(dev);
285 if (net_ratelimit())
286 netdev_err(dev, "Tx Ring Full!\n");
287 return NETDEV_TX_BUSY;
288 }
289
290 /* Save skb len before adding csum offload header */
291 total_bytes = skb->len;
292 skb = bcmasp_csum_offload(dev, skb, &csum_hw);
293 if (!skb)
294 return NETDEV_TX_OK;
295
296 spb_index = intf->tx_spb_index;
297 valid = intf->tx_spb_dma_valid;
298 for (i = 0; i <= nr_frags; i++) {
299 if (!i) {
300 size = skb_headlen(skb);
301 if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
302 if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
303 return NETDEV_TX_OK;
304 size = skb->len;
305 }
306 mapping = dma_map_single(kdev, skb->data, size,
307 DMA_TO_DEVICE);
308 } else {
309 frag = &skb_shinfo(skb)->frags[i - 1];
310 size = skb_frag_size(frag);
311 mapping = skb_frag_dma_map(kdev, frag, 0, size,
312 DMA_TO_DEVICE);
313 }
314
315 if (dma_mapping_error(kdev, mapping)) {
316 intf->mib.tx_dma_failed++;
317 spb_index = intf->tx_spb_index;
318 for (j = 0; j < i; j++) {
319 bcmasp_clean_txcb(intf, spb_index);
320 spb_index = incr_ring(spb_index,
321 DESC_RING_COUNT);
322 }
323 /* Rewind so we do not have a hole */
324 spb_index = intf->tx_spb_index;
325 dev_kfree_skb(skb);
326 return NETDEV_TX_OK;
327 }
328
329 txcb = &intf->tx_cbs[spb_index];
330 desc = &intf->tx_spb_cpu[spb_index];
331 memset(desc, 0, sizeof(*desc));
332 txcb->skb = skb;
333 txcb->bytes_sent = total_bytes;
334 dma_unmap_addr_set(txcb, dma_addr, mapping);
335 dma_unmap_len_set(txcb, dma_len, size);
336 if (!i) {
337 desc->flags |= DESC_SOF;
338 if (csum_hw)
339 desc->flags |= DESC_EPKT_CMD;
340 }
341
342 if (i == nr_frags) {
343 desc->flags |= DESC_EOF;
344 txcb->last = true;
345 }
346
347 desc->buf = mapping;
348 desc->size = size;
349 desc->flags |= DESC_INT_EN;
350
351 netif_dbg(intf, tx_queued, dev,
352 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
353 __func__, &mapping, desc->size, desc->flags,
354 spb_index);
355
356 spb_index = incr_ring(spb_index, DESC_RING_COUNT);
357 valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
358 DESC_RING_COUNT);
359 }
360
361 /* Ensure all descriptors have been written to DRAM for the
362 * hardware to see up-to-date contents.
363 */
364 wmb();
365
366 intf->tx_spb_index = spb_index;
367 intf->tx_spb_dma_valid = valid;
368
369 skb_tx_timestamp(skb);
370
371 bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
372
373 if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
374 netif_stop_queue(dev);
375
376 return NETDEV_TX_OK;
377 }
378
bcmasp_netif_start(struct net_device * dev)379 static void bcmasp_netif_start(struct net_device *dev)
380 {
381 struct bcmasp_intf *intf = netdev_priv(dev);
382
383 bcmasp_set_rx_mode(dev);
384 napi_enable(&intf->tx_napi);
385 napi_enable(&intf->rx_napi);
386
387 bcmasp_enable_rx_irq(intf, 1);
388 bcmasp_enable_tx_irq(intf, 1);
389 bcmasp_enable_phy_irq(intf, 1);
390
391 phy_start(dev->phydev);
392 }
393
umac_reset(struct bcmasp_intf * intf)394 static void umac_reset(struct bcmasp_intf *intf)
395 {
396 umac_wl(intf, 0x0, UMC_CMD);
397 umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
398 usleep_range(10, 100);
399 /* We hold the umac in reset and bring it out of
400 * reset when phy link is up.
401 */
402 }
403
umac_set_hw_addr(struct bcmasp_intf * intf,const unsigned char * addr)404 static void umac_set_hw_addr(struct bcmasp_intf *intf,
405 const unsigned char *addr)
406 {
407 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
408 addr[3];
409 u32 mac1 = (addr[4] << 8) | addr[5];
410
411 umac_wl(intf, mac0, UMC_MAC0);
412 umac_wl(intf, mac1, UMC_MAC1);
413 }
414
umac_enable_set(struct bcmasp_intf * intf,u32 mask,unsigned int enable)415 static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
416 unsigned int enable)
417 {
418 u32 reg;
419
420 reg = umac_rl(intf, UMC_CMD);
421 if (reg & UMC_CMD_SW_RESET)
422 return;
423 if (enable)
424 reg |= mask;
425 else
426 reg &= ~mask;
427 umac_wl(intf, reg, UMC_CMD);
428
429 /* UniMAC stops on a packet boundary, wait for a full-sized packet
430 * to be processed (1 msec).
431 */
432 if (enable == 0)
433 usleep_range(1000, 2000);
434 }
435
umac_init(struct bcmasp_intf * intf)436 static void umac_init(struct bcmasp_intf *intf)
437 {
438 umac_wl(intf, 0x800, UMC_FRM_LEN);
439 umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
440 umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
441 }
442
bcmasp_tx_reclaim(struct bcmasp_intf * intf)443 static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
444 {
445 struct bcmasp_intf_stats64 *stats = &intf->stats64;
446 struct device *kdev = &intf->parent->pdev->dev;
447 unsigned long read, released = 0;
448 struct bcmasp_tx_cb *txcb;
449 struct bcmasp_desc *desc;
450 dma_addr_t mapping;
451
452 read = bcmasp_intf_tx_read(intf);
453 while (intf->tx_spb_dma_read != read) {
454 txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
455 mapping = dma_unmap_addr(txcb, dma_addr);
456
457 dma_unmap_single(kdev, mapping,
458 dma_unmap_len(txcb, dma_len),
459 DMA_TO_DEVICE);
460
461 if (txcb->last) {
462 dev_consume_skb_any(txcb->skb);
463
464 u64_stats_update_begin(&stats->syncp);
465 u64_stats_inc(&stats->tx_packets);
466 u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
467 u64_stats_update_end(&stats->syncp);
468 }
469
470 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
471
472 netif_dbg(intf, tx_done, intf->ndev,
473 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
474 __func__, &mapping, desc->size, desc->flags,
475 intf->tx_spb_clean_index);
476
477 bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
478 released++;
479
480 intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
481 DESC_RING_COUNT);
482 intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
483 intf->tx_spb_dma_addr,
484 DESC_RING_COUNT);
485 }
486
487 return released;
488 }
489
bcmasp_tx_poll(struct napi_struct * napi,int budget)490 static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
491 {
492 struct bcmasp_intf *intf =
493 container_of(napi, struct bcmasp_intf, tx_napi);
494 int released = 0;
495
496 released = bcmasp_tx_reclaim(intf);
497
498 napi_complete(&intf->tx_napi);
499
500 bcmasp_enable_tx_irq(intf, 1);
501
502 if (released)
503 netif_wake_queue(intf->ndev);
504
505 return 0;
506 }
507
bcmasp_rx_poll(struct napi_struct * napi,int budget)508 static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
509 {
510 struct bcmasp_intf *intf =
511 container_of(napi, struct bcmasp_intf, rx_napi);
512 struct bcmasp_intf_stats64 *stats = &intf->stats64;
513 struct device *kdev = &intf->parent->pdev->dev;
514 unsigned long processed = 0;
515 struct bcmasp_desc *desc;
516 struct sk_buff *skb;
517 dma_addr_t valid;
518 void *data;
519 u64 flags;
520 u32 len;
521
522 valid = bcmasp_intf_rx_desc_read(intf) + 1;
523 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
524 valid = intf->rx_edpkt_dma_addr;
525
526 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
527 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
528
529 /* Ensure that descriptor has been fully written to DRAM by
530 * hardware before reading by the CPU
531 */
532 rmb();
533
534 /* Calculate virt addr by offsetting from physical addr */
535 data = intf->rx_ring_cpu +
536 (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
537
538 flags = DESC_FLAGS(desc->buf);
539 if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
540 if (net_ratelimit()) {
541 netif_err(intf, rx_status, intf->ndev,
542 "flags=0x%llx\n", flags);
543 }
544
545 u64_stats_update_begin(&stats->syncp);
546 if (flags & DESC_CRC_ERR)
547 u64_stats_inc(&stats->rx_crc_errs);
548 if (flags & DESC_RX_SYM_ERR)
549 u64_stats_inc(&stats->rx_sym_errs);
550 u64_stats_update_end(&stats->syncp);
551
552 goto next;
553 }
554
555 dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
556 DMA_FROM_DEVICE);
557
558 len = desc->size;
559
560 skb = napi_alloc_skb(napi, len);
561 if (!skb) {
562 u64_stats_update_begin(&stats->syncp);
563 u64_stats_inc(&stats->rx_dropped);
564 u64_stats_update_end(&stats->syncp);
565 intf->mib.alloc_rx_skb_failed++;
566
567 goto next;
568 }
569
570 skb_put(skb, len);
571 memcpy(skb->data, data, len);
572
573 skb_pull(skb, 2);
574 len -= 2;
575 if (likely(intf->crc_fwd)) {
576 skb_trim(skb, len - ETH_FCS_LEN);
577 len -= ETH_FCS_LEN;
578 }
579
580 if ((intf->ndev->features & NETIF_F_RXCSUM) &&
581 (desc->buf & DESC_CHKSUM))
582 skb->ip_summed = CHECKSUM_UNNECESSARY;
583
584 skb->protocol = eth_type_trans(skb, intf->ndev);
585
586 napi_gro_receive(napi, skb);
587
588 u64_stats_update_begin(&stats->syncp);
589 u64_stats_inc(&stats->rx_packets);
590 u64_stats_add(&stats->rx_bytes, len);
591 u64_stats_update_end(&stats->syncp);
592
593 next:
594 bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
595 desc->size));
596
597 processed++;
598 intf->rx_edpkt_dma_read =
599 incr_first_byte(intf->rx_edpkt_dma_read,
600 intf->rx_edpkt_dma_addr,
601 DESC_RING_COUNT);
602 intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
603 DESC_RING_COUNT);
604 }
605
606 bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
607
608 if (processed < budget && napi_complete_done(&intf->rx_napi, processed))
609 bcmasp_enable_rx_irq(intf, 1);
610
611 return processed;
612 }
613
bcmasp_adj_link(struct net_device * dev)614 static void bcmasp_adj_link(struct net_device *dev)
615 {
616 struct bcmasp_intf *intf = netdev_priv(dev);
617 struct phy_device *phydev = dev->phydev;
618 u32 cmd_bits = 0, reg;
619 int changed = 0;
620
621 if (intf->old_link != phydev->link) {
622 changed = 1;
623 intf->old_link = phydev->link;
624 }
625
626 if (intf->old_duplex != phydev->duplex) {
627 changed = 1;
628 intf->old_duplex = phydev->duplex;
629 }
630
631 switch (phydev->speed) {
632 case SPEED_2500:
633 cmd_bits = UMC_CMD_SPEED_2500;
634 break;
635 case SPEED_1000:
636 cmd_bits = UMC_CMD_SPEED_1000;
637 break;
638 case SPEED_100:
639 cmd_bits = UMC_CMD_SPEED_100;
640 break;
641 case SPEED_10:
642 cmd_bits = UMC_CMD_SPEED_10;
643 break;
644 default:
645 break;
646 }
647 cmd_bits <<= UMC_CMD_SPEED_SHIFT;
648
649 if (phydev->duplex == DUPLEX_HALF)
650 cmd_bits |= UMC_CMD_HD_EN;
651
652 if (intf->old_pause != phydev->pause) {
653 changed = 1;
654 intf->old_pause = phydev->pause;
655 }
656
657 if (!phydev->pause)
658 cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
659
660 if (!changed)
661 return;
662
663 if (phydev->link) {
664 reg = umac_rl(intf, UMC_CMD);
665 reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
666 UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
667 UMC_CMD_TX_PAUSE_IGNORE);
668 reg |= cmd_bits;
669 if (reg & UMC_CMD_SW_RESET) {
670 reg &= ~UMC_CMD_SW_RESET;
671 umac_wl(intf, reg, UMC_CMD);
672 udelay(2);
673 reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
674 }
675 umac_wl(intf, reg, UMC_CMD);
676
677 umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
678 reg = umac_rl(intf, UMC_EEE_CTRL);
679 if (phydev->enable_tx_lpi)
680 reg |= EEE_EN;
681 else
682 reg &= ~EEE_EN;
683 umac_wl(intf, reg, UMC_EEE_CTRL);
684 }
685
686 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
687 if (phydev->link)
688 reg |= RGMII_LINK;
689 else
690 reg &= ~RGMII_LINK;
691 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
692
693 if (changed)
694 phy_print_status(phydev);
695 }
696
bcmasp_alloc_buffers(struct bcmasp_intf * intf)697 static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
698 {
699 struct device *kdev = &intf->parent->pdev->dev;
700 struct page *buffer_pg;
701
702 /* Alloc RX */
703 intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
704 buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
705 if (!buffer_pg)
706 return -ENOMEM;
707
708 intf->rx_ring_cpu = page_to_virt(buffer_pg);
709 intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
710 DMA_FROM_DEVICE);
711 if (dma_mapping_error(kdev, intf->rx_ring_dma))
712 goto free_rx_buffer;
713
714 intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
715 &intf->rx_edpkt_dma_addr, GFP_KERNEL);
716 if (!intf->rx_edpkt_cpu)
717 goto free_rx_buffer_dma;
718
719 /* Alloc TX */
720 intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
721 &intf->tx_spb_dma_addr, GFP_KERNEL);
722 if (!intf->tx_spb_cpu)
723 goto free_rx_edpkt_dma;
724
725 intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
726 GFP_KERNEL);
727 if (!intf->tx_cbs)
728 goto free_tx_spb_dma;
729
730 return 0;
731
732 free_tx_spb_dma:
733 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
734 intf->tx_spb_dma_addr);
735 free_rx_edpkt_dma:
736 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
737 intf->rx_edpkt_dma_addr);
738 free_rx_buffer_dma:
739 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
740 DMA_FROM_DEVICE);
741 free_rx_buffer:
742 __free_pages(buffer_pg, intf->rx_buf_order);
743
744 return -ENOMEM;
745 }
746
bcmasp_reclaim_free_buffers(struct bcmasp_intf * intf)747 static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
748 {
749 struct device *kdev = &intf->parent->pdev->dev;
750
751 /* RX buffers */
752 dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
753 intf->rx_edpkt_dma_addr);
754 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
755 DMA_FROM_DEVICE);
756 __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
757
758 /* TX buffers */
759 dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
760 intf->tx_spb_dma_addr);
761 kfree(intf->tx_cbs);
762 }
763
bcmasp_init_rx(struct bcmasp_intf * intf)764 static void bcmasp_init_rx(struct bcmasp_intf *intf)
765 {
766 /* Restart from index 0 */
767 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
768 intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
769 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
770 intf->rx_edpkt_index = 0;
771
772 /* Make sure channels are disabled */
773 rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
774
775 /* Rx SPB */
776 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
777 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
778 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
779 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
780 RX_EDPKT_RING_BUFFER_END);
781 rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
782 RX_EDPKT_RING_BUFFER_VALID);
783
784 /* EDPKT */
785 rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
786 RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
787 (RX_EDPKT_CFG_CFG0_64_ALN <<
788 RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
789 (RX_EDPKT_CFG_CFG0_EFRM_STUF),
790 RX_EDPKT_CFG_CFG0);
791 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
792 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
793 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
794 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
795 rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
796
797 umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
798 UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
799 UMAC2FB_CFG);
800 }
801
802
bcmasp_init_tx(struct bcmasp_intf * intf)803 static void bcmasp_init_tx(struct bcmasp_intf *intf)
804 {
805 /* Restart from index 0 */
806 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
807 intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
808 intf->tx_spb_index = 0;
809 intf->tx_spb_clean_index = 0;
810 memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
811
812 /* Make sure channels are disabled */
813 tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
814 tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
815
816 /* Tx SPB */
817 tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
818 TX_SPB_CTRL_XF_CTRL2);
819
820 if (intf->parent->tx_chan_offset)
821 tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
822 tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
823
824 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
825 tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
826 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
827 tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
828 }
829
bcmasp_ephy_enable_set(struct bcmasp_intf * intf,bool enable)830 static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
831 {
832 u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
833 RGMII_EPHY_CFG_IDDQ_GLOBAL;
834 u32 reg;
835
836 reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
837 if (enable) {
838 reg &= ~RGMII_EPHY_CK25_DIS;
839 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
840 mdelay(1);
841
842 reg &= ~mask;
843 reg |= RGMII_EPHY_RESET;
844 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
845 mdelay(1);
846
847 reg &= ~RGMII_EPHY_RESET;
848 } else {
849 reg |= mask | RGMII_EPHY_RESET;
850 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
851 mdelay(1);
852 reg |= RGMII_EPHY_CK25_DIS;
853 }
854 rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
855 mdelay(1);
856
857 /* Set or clear the LED control override to avoid lighting up LEDs
858 * while the EPHY is powered off and drawing unnecessary current.
859 */
860 reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
861 if (enable)
862 reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
863 else
864 reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
865 rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
866 }
867
bcmasp_rgmii_mode_en_set(struct bcmasp_intf * intf,bool enable)868 static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
869 {
870 u32 reg;
871
872 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
873 reg &= ~RGMII_OOB_DIS;
874 if (enable)
875 reg |= RGMII_MODE_EN;
876 else
877 reg &= ~RGMII_MODE_EN;
878 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
879 }
880
bcmasp_netif_deinit(struct net_device * dev)881 static void bcmasp_netif_deinit(struct net_device *dev)
882 {
883 struct bcmasp_intf *intf = netdev_priv(dev);
884 u32 reg, timeout = 1000;
885
886 napi_disable(&intf->tx_napi);
887
888 bcmasp_enable_tx(intf, 0);
889
890 /* Flush any TX packets in the pipe */
891 tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
892 do {
893 reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
894 if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
895 break;
896 usleep_range(1000, 2000);
897 } while (timeout-- > 0);
898 tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
899
900 bcmasp_tx_reclaim(intf);
901
902 umac_enable_set(intf, UMC_CMD_TX_EN, 0);
903
904 phy_stop(dev->phydev);
905
906 umac_enable_set(intf, UMC_CMD_RX_EN, 0);
907
908 bcmasp_flush_rx_port(intf);
909 usleep_range(1000, 2000);
910 bcmasp_enable_rx(intf, 0);
911
912 napi_disable(&intf->rx_napi);
913
914 /* Disable interrupts */
915 bcmasp_enable_tx_irq(intf, 0);
916 bcmasp_enable_rx_irq(intf, 0);
917 bcmasp_enable_phy_irq(intf, 0);
918
919 netif_napi_del(&intf->tx_napi);
920 netif_napi_del(&intf->rx_napi);
921 }
922
bcmasp_stop(struct net_device * dev)923 static int bcmasp_stop(struct net_device *dev)
924 {
925 struct bcmasp_intf *intf = netdev_priv(dev);
926
927 netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
928
929 /* Stop tx from updating HW */
930 netif_tx_disable(dev);
931
932 bcmasp_netif_deinit(dev);
933
934 bcmasp_reclaim_free_buffers(intf);
935
936 phy_disconnect(dev->phydev);
937
938 /* Disable internal EPHY or external PHY */
939 if (intf->internal_phy)
940 bcmasp_ephy_enable_set(intf, false);
941 else
942 bcmasp_rgmii_mode_en_set(intf, false);
943
944 /* Disable the interface clocks */
945 bcmasp_core_clock_set_intf(intf, false);
946
947 clk_disable_unprepare(intf->parent->clk);
948
949 return 0;
950 }
951
bcmasp_configure_port(struct bcmasp_intf * intf)952 static void bcmasp_configure_port(struct bcmasp_intf *intf)
953 {
954 u32 reg, id_mode_dis = 0;
955
956 reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
957 reg &= ~RGMII_PORT_MODE_MASK;
958
959 switch (intf->phy_interface) {
960 case PHY_INTERFACE_MODE_RGMII:
961 /* RGMII_NO_ID: TXC transitions at the same time as TXD
962 * (requires PCB or receiver-side delay)
963 * RGMII: Add 2ns delay on TXC (90 degree shift)
964 *
965 * ID is implicitly disabled for 100Mbps (RG)MII operation.
966 */
967 id_mode_dis = RGMII_ID_MODE_DIS;
968 fallthrough;
969 case PHY_INTERFACE_MODE_RGMII_TXID:
970 reg |= RGMII_PORT_MODE_EXT_GPHY;
971 break;
972 case PHY_INTERFACE_MODE_MII:
973 reg |= RGMII_PORT_MODE_EXT_EPHY;
974 break;
975 default:
976 break;
977 }
978
979 if (intf->internal_phy)
980 reg |= RGMII_PORT_MODE_EPHY;
981
982 rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
983
984 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
985 reg &= ~RGMII_ID_MODE_DIS;
986 reg |= id_mode_dis;
987 rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
988 }
989
bcmasp_netif_init(struct net_device * dev,bool phy_connect)990 static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
991 {
992 struct bcmasp_intf *intf = netdev_priv(dev);
993 phy_interface_t phy_iface = intf->phy_interface;
994 u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
995 PHY_BRCM_DIS_TXCRXC_NOENRGY |
996 PHY_BRCM_IDDQ_SUSPEND;
997 struct phy_device *phydev = NULL;
998 int ret;
999
1000 /* Always enable interface clocks */
1001 bcmasp_core_clock_set_intf(intf, true);
1002
1003 /* Enable internal PHY or external PHY before any MAC activity */
1004 if (intf->internal_phy)
1005 bcmasp_ephy_enable_set(intf, true);
1006 else
1007 bcmasp_rgmii_mode_en_set(intf, true);
1008 bcmasp_configure_port(intf);
1009
1010 /* This is an ugly quirk but we have not been correctly
1011 * interpreting the phy_interface values and we have done that
1012 * across different drivers, so at least we are consistent in
1013 * our mistakes.
1014 *
1015 * When the Generic PHY driver is in use either the PHY has
1016 * been strapped or programmed correctly by the boot loader so
1017 * we should stick to our incorrect interpretation since we
1018 * have validated it.
1019 *
1020 * Now when a dedicated PHY driver is in use, we need to
1021 * reverse the meaning of the phy_interface_mode values to
1022 * something that the PHY driver will interpret and act on such
1023 * that we have two mistakes canceling themselves so to speak.
1024 * We only do this for the two modes that GENET driver
1025 * officially supports on Broadcom STB chips:
1026 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1027 * Other modes are not *officially* supported with the boot
1028 * loader and the scripted environment generating Device Tree
1029 * blobs for those platforms.
1030 *
1031 * Note that internal PHY and fixed-link configurations are not
1032 * affected because they use different phy_interface_t values
1033 * or the Generic PHY driver.
1034 */
1035 switch (phy_iface) {
1036 case PHY_INTERFACE_MODE_RGMII:
1037 phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1038 break;
1039 case PHY_INTERFACE_MODE_RGMII_TXID:
1040 phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1041 break;
1042 default:
1043 break;
1044 }
1045
1046 if (phy_connect) {
1047 phydev = of_phy_connect(dev, intf->phy_dn,
1048 bcmasp_adj_link, phy_flags,
1049 phy_iface);
1050 if (!phydev) {
1051 ret = -ENODEV;
1052 netdev_err(dev, "could not attach to PHY\n");
1053 goto err_phy_disable;
1054 }
1055
1056 if (intf->internal_phy)
1057 dev->phydev->irq = PHY_MAC_INTERRUPT;
1058
1059 /* Indicate that the MAC is responsible for PHY PM */
1060 phydev->mac_managed_pm = true;
1061
1062 /* Set phylib's copy of the LPI timer */
1063 phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
1064 }
1065
1066 umac_reset(intf);
1067
1068 umac_init(intf);
1069
1070 umac_set_hw_addr(intf, dev->dev_addr);
1071
1072 intf->old_duplex = -1;
1073 intf->old_link = -1;
1074 intf->old_pause = -1;
1075
1076 bcmasp_init_tx(intf);
1077 netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
1078 bcmasp_enable_tx(intf, 1);
1079
1080 bcmasp_init_rx(intf);
1081 netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
1082 bcmasp_enable_rx(intf, 1);
1083
1084 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1085
1086 bcmasp_netif_start(dev);
1087
1088 netif_start_queue(dev);
1089
1090 return 0;
1091
1092 err_phy_disable:
1093 if (intf->internal_phy)
1094 bcmasp_ephy_enable_set(intf, false);
1095 else
1096 bcmasp_rgmii_mode_en_set(intf, false);
1097 return ret;
1098 }
1099
bcmasp_open(struct net_device * dev)1100 static int bcmasp_open(struct net_device *dev)
1101 {
1102 struct bcmasp_intf *intf = netdev_priv(dev);
1103 int ret;
1104
1105 netif_dbg(intf, ifup, dev, "bcmasp open\n");
1106
1107 ret = bcmasp_alloc_buffers(intf);
1108 if (ret)
1109 return ret;
1110
1111 ret = clk_prepare_enable(intf->parent->clk);
1112 if (ret)
1113 goto err_free_mem;
1114
1115 ret = bcmasp_netif_init(dev, true);
1116 if (ret) {
1117 clk_disable_unprepare(intf->parent->clk);
1118 goto err_free_mem;
1119 }
1120
1121 return ret;
1122
1123 err_free_mem:
1124 bcmasp_reclaim_free_buffers(intf);
1125
1126 return ret;
1127 }
1128
bcmasp_tx_timeout(struct net_device * dev,unsigned int txqueue)1129 static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1130 {
1131 struct bcmasp_intf *intf = netdev_priv(dev);
1132
1133 netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
1134 intf->mib.tx_timeout_cnt++;
1135 }
1136
bcmasp_get_phys_port_name(struct net_device * dev,char * name,size_t len)1137 static int bcmasp_get_phys_port_name(struct net_device *dev,
1138 char *name, size_t len)
1139 {
1140 struct bcmasp_intf *intf = netdev_priv(dev);
1141
1142 if (snprintf(name, len, "p%d", intf->port) >= len)
1143 return -EINVAL;
1144
1145 return 0;
1146 }
1147
bcmasp_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1148 static void bcmasp_get_stats64(struct net_device *dev,
1149 struct rtnl_link_stats64 *stats)
1150 {
1151 struct bcmasp_intf *intf = netdev_priv(dev);
1152 struct bcmasp_intf_stats64 *lstats;
1153 unsigned int start;
1154
1155 lstats = &intf->stats64;
1156
1157 do {
1158 start = u64_stats_fetch_begin(&lstats->syncp);
1159 stats->rx_packets = u64_stats_read(&lstats->rx_packets);
1160 stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
1161 stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
1162 stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
1163 stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
1164 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1165
1166 stats->tx_packets = u64_stats_read(&lstats->tx_packets);
1167 stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
1168 } while (u64_stats_fetch_retry(&lstats->syncp, start));
1169 }
1170
1171 static const struct net_device_ops bcmasp_netdev_ops = {
1172 .ndo_open = bcmasp_open,
1173 .ndo_stop = bcmasp_stop,
1174 .ndo_start_xmit = bcmasp_xmit,
1175 .ndo_tx_timeout = bcmasp_tx_timeout,
1176 .ndo_set_rx_mode = bcmasp_set_rx_mode,
1177 .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
1178 .ndo_eth_ioctl = phy_do_ioctl_running,
1179 .ndo_set_mac_address = eth_mac_addr,
1180 .ndo_get_stats64 = bcmasp_get_stats64,
1181 };
1182
bcmasp_map_res(struct bcmasp_priv * priv,struct bcmasp_intf * intf)1183 static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1184 {
1185 /* Per port */
1186 intf->res.umac = priv->base + UMC_OFFSET(intf);
1187 intf->res.umac2fb = priv->base + (UMAC2FB_OFFSET + priv->rx_ctrl_offset +
1188 (intf->port * 0x4));
1189 intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1190
1191 /* Per ch */
1192 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1193 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1194 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1195 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1196 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1197
1198 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1199 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1200 }
1201
bcmasp_interface_create(struct bcmasp_priv * priv,struct device_node * ndev_dn,int i)1202 struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1203 struct device_node *ndev_dn, int i)
1204 {
1205 struct device *dev = &priv->pdev->dev;
1206 struct bcmasp_intf *intf;
1207 struct net_device *ndev;
1208 int ch, port, ret;
1209
1210 if (of_property_read_u32(ndev_dn, "reg", &port)) {
1211 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1212 goto err;
1213 }
1214
1215 if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
1216 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1217 goto err;
1218 }
1219
1220 ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1221 if (!ndev) {
1222 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1223 goto err;
1224 }
1225 intf = netdev_priv(ndev);
1226
1227 intf->parent = priv;
1228 intf->ndev = ndev;
1229 intf->channel = ch;
1230 intf->port = port;
1231 intf->ndev_dn = ndev_dn;
1232 intf->index = i;
1233
1234 ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
1235 if (ret < 0) {
1236 dev_err(dev, "invalid PHY mode property\n");
1237 goto err_free_netdev;
1238 }
1239
1240 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1241 intf->internal_phy = true;
1242
1243 intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
1244 if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
1245 ret = of_phy_register_fixed_link(ndev_dn);
1246 if (ret) {
1247 dev_warn(dev, "%s: failed to register fixed PHY\n",
1248 ndev_dn->name);
1249 goto err_free_netdev;
1250 }
1251 intf->phy_dn = ndev_dn;
1252 }
1253
1254 /* Map resource */
1255 bcmasp_map_res(priv, intf);
1256
1257 if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
1258 intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1259 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1260 (intf->port != 1 && intf->internal_phy)) {
1261 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
1262 phy_modes(intf->phy_interface), intf->port);
1263 ret = -EINVAL;
1264 goto err_free_netdev;
1265 }
1266
1267 ret = of_get_ethdev_address(ndev_dn, ndev);
1268 if (ret) {
1269 netdev_warn(ndev, "using random Ethernet MAC\n");
1270 eth_hw_addr_random(ndev);
1271 }
1272
1273 SET_NETDEV_DEV(ndev, dev);
1274 intf->ops = &bcmasp_intf_ops;
1275 ndev->netdev_ops = &bcmasp_netdev_ops;
1276 ndev->ethtool_ops = &bcmasp_ethtool_ops;
1277 intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
1278 NETIF_MSG_PROBE |
1279 NETIF_MSG_LINK);
1280 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1281 NETIF_F_RXCSUM;
1282 ndev->hw_features |= ndev->features;
1283 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1284
1285 netdev_sw_irq_coalesce_default_on(ndev);
1286
1287 return intf;
1288
1289 err_free_netdev:
1290 free_netdev(ndev);
1291 err:
1292 return NULL;
1293 }
1294
bcmasp_interface_destroy(struct bcmasp_intf * intf)1295 void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1296 {
1297 if (intf->ndev->reg_state == NETREG_REGISTERED)
1298 unregister_netdev(intf->ndev);
1299 if (of_phy_is_fixed_link(intf->ndev_dn))
1300 of_phy_deregister_fixed_link(intf->ndev_dn);
1301 free_netdev(intf->ndev);
1302 }
1303
bcmasp_suspend_to_wol(struct bcmasp_intf * intf)1304 static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1305 {
1306 struct net_device *ndev = intf->ndev;
1307 u32 reg;
1308
1309 reg = umac_rl(intf, UMC_MPD_CTRL);
1310 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1311 reg |= UMC_MPD_CTRL_MPD_EN;
1312 reg &= ~UMC_MPD_CTRL_PSW_EN;
1313 if (intf->wolopts & WAKE_MAGICSECURE) {
1314 /* Program the SecureOn password */
1315 umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
1316 UMC_PSW_MS);
1317 umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
1318 UMC_PSW_LS);
1319 reg |= UMC_MPD_CTRL_PSW_EN;
1320 }
1321 umac_wl(intf, reg, UMC_MPD_CTRL);
1322
1323 if (intf->wolopts & WAKE_FILTER)
1324 bcmasp_netfilt_suspend(intf);
1325
1326 /* Bring UniMAC out of reset if needed and enable RX */
1327 reg = umac_rl(intf, UMC_CMD);
1328 if (reg & UMC_CMD_SW_RESET)
1329 reg &= ~UMC_CMD_SW_RESET;
1330
1331 reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
1332 umac_wl(intf, reg, UMC_CMD);
1333
1334 umac_enable_set(intf, UMC_CMD_RX_EN, 1);
1335
1336 if (intf->parent->wol_irq > 0) {
1337 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1338 ASP_WAKEUP_INTR2_MASK_CLEAR);
1339 }
1340
1341 if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
1342 intf->parent->eee_fixup)
1343 intf->parent->eee_fixup(intf, true);
1344
1345 netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1346 }
1347
bcmasp_interface_suspend(struct bcmasp_intf * intf)1348 int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1349 {
1350 struct device *kdev = &intf->parent->pdev->dev;
1351 struct net_device *dev = intf->ndev;
1352
1353 if (!netif_running(dev))
1354 return 0;
1355
1356 netif_device_detach(dev);
1357
1358 bcmasp_netif_deinit(dev);
1359
1360 if (!intf->wolopts) {
1361 if (intf->internal_phy)
1362 bcmasp_ephy_enable_set(intf, false);
1363 else
1364 bcmasp_rgmii_mode_en_set(intf, false);
1365
1366 /* If Wake-on-LAN is disabled, we can safely
1367 * disable the network interface clocks.
1368 */
1369 bcmasp_core_clock_set_intf(intf, false);
1370 }
1371
1372 if (device_may_wakeup(kdev) && intf->wolopts)
1373 bcmasp_suspend_to_wol(intf);
1374
1375 clk_disable_unprepare(intf->parent->clk);
1376
1377 return 0;
1378 }
1379
bcmasp_resume_from_wol(struct bcmasp_intf * intf)1380 static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1381 {
1382 u32 reg;
1383
1384 if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
1385 intf->parent->eee_fixup)
1386 intf->parent->eee_fixup(intf, false);
1387
1388 reg = umac_rl(intf, UMC_MPD_CTRL);
1389 reg &= ~UMC_MPD_CTRL_MPD_EN;
1390 umac_wl(intf, reg, UMC_MPD_CTRL);
1391
1392 if (intf->parent->wol_irq > 0) {
1393 wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1394 ASP_WAKEUP_INTR2_MASK_SET);
1395 }
1396 }
1397
bcmasp_interface_resume(struct bcmasp_intf * intf)1398 int bcmasp_interface_resume(struct bcmasp_intf *intf)
1399 {
1400 struct net_device *dev = intf->ndev;
1401 int ret;
1402
1403 if (!netif_running(dev))
1404 return 0;
1405
1406 ret = clk_prepare_enable(intf->parent->clk);
1407 if (ret)
1408 return ret;
1409
1410 ret = bcmasp_netif_init(dev, false);
1411 if (ret)
1412 goto out;
1413
1414 bcmasp_resume_from_wol(intf);
1415
1416 netif_device_attach(dev);
1417
1418 return 0;
1419
1420 out:
1421 clk_disable_unprepare(intf->parent->clk);
1422 return ret;
1423 }
1424