1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include "stmmac.h"
19 #include "stmmac_fpe.h"
20 #include "stmmac_pcs.h"
21 #include "stmmac_vlan.h"
22 #include "dwmac4.h"
23 #include "dwmac5.h"
24
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)25 static void dwmac4_core_init(struct mac_device_info *hw,
26 struct net_device *dev)
27 {
28 struct stmmac_priv *priv = netdev_priv(dev);
29 void __iomem *ioaddr = hw->pcsr;
30 u32 value = readl(ioaddr + GMAC_CONFIG);
31 unsigned long clk_rate;
32
33 value |= GMAC_CORE_INIT;
34
35 if (hw->ps) {
36 value |= GMAC_CONFIG_TE;
37
38 value &= hw->link.speed_mask;
39 switch (hw->ps) {
40 case SPEED_1000:
41 value |= hw->link.speed1000;
42 break;
43 case SPEED_100:
44 value |= hw->link.speed100;
45 break;
46 case SPEED_10:
47 value |= hw->link.speed10;
48 break;
49 }
50 }
51
52 writel(value, ioaddr + GMAC_CONFIG);
53
54 /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
55 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
56 writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
57
58 /* Enable GMAC interrupts */
59 value = GMAC_INT_DEFAULT_ENABLE;
60
61 if (hw->pcs)
62 value |= GMAC_PCS_IRQ_DEFAULT;
63
64 writel(value, ioaddr + GMAC_INT_EN);
65
66 if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
67 init_waitqueue_head(&priv->tstamp_busy_wait);
68 }
69
dwmac4_update_caps(struct stmmac_priv * priv)70 static void dwmac4_update_caps(struct stmmac_priv *priv)
71 {
72 if (priv->plat->tx_queues_to_use > 1)
73 priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
74 else
75 priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
76 }
77
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)78 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
79 u8 mode, u32 queue)
80 {
81 void __iomem *ioaddr = hw->pcsr;
82 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
83
84 value &= GMAC_RX_QUEUE_CLEAR(queue);
85 if (mode == MTL_QUEUE_AVB)
86 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
87 else if (mode == MTL_QUEUE_DCB)
88 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
89
90 writel(value, ioaddr + GMAC_RXQ_CTRL0);
91 }
92
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)93 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
94 u32 prio, u32 queue)
95 {
96 void __iomem *ioaddr = hw->pcsr;
97 u32 clear_mask = 0;
98 u32 ctrl2, ctrl3;
99 int i;
100
101 ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
102 ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
103
104 /* The software must ensure that the same priority
105 * is not mapped to multiple Rx queues
106 */
107 for (i = 0; i < 4; i++)
108 clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
109 GMAC_RXQCTRL_PSRQX_MASK(i));
110
111 ctrl2 &= ~clear_mask;
112 ctrl3 &= ~clear_mask;
113
114 /* First assign new priorities to a queue, then
115 * clear them from others queues
116 */
117 if (queue < 4) {
118 ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
119 GMAC_RXQCTRL_PSRQX_MASK(queue);
120
121 writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
122 writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
123 } else {
124 queue -= 4;
125
126 ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
127 GMAC_RXQCTRL_PSRQX_MASK(queue);
128
129 writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
130 writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
131 }
132 }
133
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)134 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
135 u32 prio, u32 queue)
136 {
137 void __iomem *ioaddr = hw->pcsr;
138 u32 base_register;
139 u32 value;
140
141 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
142 if (queue >= 4)
143 queue -= 4;
144
145 value = readl(ioaddr + base_register);
146
147 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
148 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
149 GMAC_TXQCTRL_PSTQX_MASK(queue);
150
151 writel(value, ioaddr + base_register);
152 }
153
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)154 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
155 u8 packet, u32 queue)
156 {
157 void __iomem *ioaddr = hw->pcsr;
158 u32 value;
159
160 static const struct stmmac_rx_routing route_possibilities[] = {
161 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
162 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
163 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
164 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
165 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
166 };
167
168 value = readl(ioaddr + GMAC_RXQ_CTRL1);
169
170 /* routing configuration */
171 value &= ~route_possibilities[packet - 1].reg_mask;
172 value |= (queue << route_possibilities[packet-1].reg_shift) &
173 route_possibilities[packet - 1].reg_mask;
174
175 /* some packets require extra ops */
176 if (packet == PACKET_AVCPQ) {
177 value &= ~GMAC_RXQCTRL_TACPQE;
178 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
179 } else if (packet == PACKET_MCBCQ) {
180 value &= ~GMAC_RXQCTRL_MCBCQEN;
181 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
182 }
183
184 writel(value, ioaddr + GMAC_RXQ_CTRL1);
185 }
186
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)187 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
188 u32 rx_alg)
189 {
190 void __iomem *ioaddr = hw->pcsr;
191 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
192
193 value &= ~MTL_OPERATION_RAA;
194 switch (rx_alg) {
195 case MTL_RX_ALGORITHM_SP:
196 value |= MTL_OPERATION_RAA_SP;
197 break;
198 case MTL_RX_ALGORITHM_WSP:
199 value |= MTL_OPERATION_RAA_WSP;
200 break;
201 default:
202 break;
203 }
204
205 writel(value, ioaddr + MTL_OPERATION_MODE);
206 }
207
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)208 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
209 u32 tx_alg)
210 {
211 void __iomem *ioaddr = hw->pcsr;
212 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
213
214 value &= ~MTL_OPERATION_SCHALG_MASK;
215 switch (tx_alg) {
216 case MTL_TX_ALGORITHM_WRR:
217 value |= MTL_OPERATION_SCHALG_WRR;
218 break;
219 case MTL_TX_ALGORITHM_WFQ:
220 value |= MTL_OPERATION_SCHALG_WFQ;
221 break;
222 case MTL_TX_ALGORITHM_DWRR:
223 value |= MTL_OPERATION_SCHALG_DWRR;
224 break;
225 case MTL_TX_ALGORITHM_SP:
226 value |= MTL_OPERATION_SCHALG_SP;
227 break;
228 default:
229 break;
230 }
231
232 writel(value, ioaddr + MTL_OPERATION_MODE);
233 }
234
dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)235 static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
236 struct mac_device_info *hw,
237 u32 weight, u32 queue)
238 {
239 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
240 void __iomem *ioaddr = hw->pcsr;
241 u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
242 queue));
243
244 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
245 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
246 writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
247 }
248
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)249 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
250 {
251 void __iomem *ioaddr = hw->pcsr;
252 u32 value;
253
254 if (queue < 4) {
255 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
256 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
257 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
258 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
259 } else {
260 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
261 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
262 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
263 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
264 }
265 }
266
dwmac4_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)267 static void dwmac4_config_cbs(struct stmmac_priv *priv,
268 struct mac_device_info *hw,
269 u32 send_slope, u32 idle_slope,
270 u32 high_credit, u32 low_credit, u32 queue)
271 {
272 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
273 void __iomem *ioaddr = hw->pcsr;
274 u32 value;
275
276 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
277 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
278 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
279 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
280 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
281
282 /* enable AV algorithm */
283 value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
284 value |= MTL_ETS_CTRL_AVALG;
285 value |= MTL_ETS_CTRL_CC;
286 writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
287
288 /* configure send slope */
289 value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
290 queue));
291 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
292 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
293 writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
294 queue));
295
296 /* configure idle slope (same register as tx weight) */
297 dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
298
299 /* configure high credit */
300 value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
301 value &= ~MTL_HIGH_CRED_HC_MASK;
302 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
303 writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
304
305 /* configure high credit */
306 value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
307 value &= ~MTL_HIGH_CRED_LC_MASK;
308 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
309 writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
310 }
311
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)312 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
313 {
314 void __iomem *ioaddr = hw->pcsr;
315 int i;
316
317 for (i = 0; i < GMAC_REG_NUM; i++)
318 reg_space[i] = readl(ioaddr + i * 4);
319 }
320
dwmac4_rx_ipc_enable(struct mac_device_info * hw)321 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
322 {
323 void __iomem *ioaddr = hw->pcsr;
324 u32 value = readl(ioaddr + GMAC_CONFIG);
325
326 if (hw->rx_csum)
327 value |= GMAC_CONFIG_IPC;
328 else
329 value &= ~GMAC_CONFIG_IPC;
330
331 writel(value, ioaddr + GMAC_CONFIG);
332
333 value = readl(ioaddr + GMAC_CONFIG);
334
335 return !!(value & GMAC_CONFIG_IPC);
336 }
337
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)338 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
339 {
340 void __iomem *ioaddr = hw->pcsr;
341 unsigned int pmt = 0;
342 u32 config;
343
344 if (mode & WAKE_MAGIC) {
345 pr_debug("GMAC: WOL Magic frame\n");
346 pmt |= power_down | magic_pkt_en;
347 }
348 if (mode & WAKE_UCAST) {
349 pr_debug("GMAC: WOL on global unicast\n");
350 pmt |= power_down | global_unicast | wake_up_frame_en;
351 }
352
353 if (pmt) {
354 /* The receiver must be enabled for WOL before powering down */
355 config = readl(ioaddr + GMAC_CONFIG);
356 config |= GMAC_CONFIG_RE;
357 writel(config, ioaddr + GMAC_CONFIG);
358 }
359 writel(pmt, ioaddr + GMAC_PMT);
360 }
361
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)362 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
363 const unsigned char *addr, unsigned int reg_n)
364 {
365 void __iomem *ioaddr = hw->pcsr;
366
367 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
368 GMAC_ADDR_LOW(reg_n));
369 }
370
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)371 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
372 unsigned char *addr, unsigned int reg_n)
373 {
374 void __iomem *ioaddr = hw->pcsr;
375
376 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
377 GMAC_ADDR_LOW(reg_n));
378 }
379
dwmac4_set_lpi_mode(struct mac_device_info * hw,enum stmmac_lpi_mode mode,bool en_tx_lpi_clockgating,u32 et)380 static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
381 enum stmmac_lpi_mode mode,
382 bool en_tx_lpi_clockgating, u32 et)
383 {
384 void __iomem *ioaddr = hw->pcsr;
385 u32 value, mask;
386
387 if (mode == STMMAC_LPI_DISABLE) {
388 value = 0;
389 } else {
390 value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
391
392 if (mode == STMMAC_LPI_TIMER) {
393 /* Return ERANGE if the timer is larger than the
394 * register field.
395 */
396 if (et > STMMAC_ET_MAX)
397 return -ERANGE;
398
399 /* Set the hardware LPI entry timer */
400 writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
401
402 /* Interpret a zero LPI entry timer to mean
403 * immediate entry into LPI mode.
404 */
405 if (et)
406 value |= LPI_CTRL_STATUS_LPIATE;
407 }
408
409 if (en_tx_lpi_clockgating)
410 value |= LPI_CTRL_STATUS_LPITCSE;
411 }
412
413 mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
414 LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
415
416 value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
417 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
418
419 return 0;
420 }
421
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)422 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
423 {
424 void __iomem *ioaddr = hw->pcsr;
425 u32 value;
426
427 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
428
429 if (link)
430 value |= LPI_CTRL_STATUS_PLS;
431 else
432 value &= ~LPI_CTRL_STATUS_PLS;
433
434 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
435 }
436
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)437 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
438 {
439 void __iomem *ioaddr = hw->pcsr;
440 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
441
442 /* Program the timers in the LPI timer control register:
443 * LS: minimum time (ms) for which the link
444 * status from PHY should be ok before transmitting
445 * the LPI pattern.
446 * TW: minimum time (us) for which the core waits
447 * after it has stopped transmitting the LPI pattern.
448 */
449 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
450 }
451
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)452 static void dwmac4_set_filter(struct mac_device_info *hw,
453 struct net_device *dev)
454 {
455 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
456 int numhashregs = (hw->multicast_filter_bins >> 5);
457 int mcbitslog2 = hw->mcast_bits_log2;
458 unsigned int value;
459 u32 mc_filter[8];
460 int i;
461
462 memset(mc_filter, 0, sizeof(mc_filter));
463
464 value = readl(ioaddr + GMAC_PACKET_FILTER);
465 value &= ~GMAC_PACKET_FILTER_HMC;
466 value &= ~GMAC_PACKET_FILTER_HPF;
467 value &= ~GMAC_PACKET_FILTER_PCF;
468 value &= ~GMAC_PACKET_FILTER_PM;
469 value &= ~GMAC_PACKET_FILTER_PR;
470 value &= ~GMAC_PACKET_FILTER_RA;
471 if (dev->flags & IFF_PROMISC) {
472 /* VLAN Tag Filter Fail Packets Queuing */
473 if (hw->vlan_fail_q_en) {
474 value = readl(ioaddr + GMAC_RXQ_CTRL4);
475 value &= ~GMAC_RXQCTRL_VFFQ_MASK;
476 value |= GMAC_RXQCTRL_VFFQE |
477 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
478 writel(value, ioaddr + GMAC_RXQ_CTRL4);
479 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
480 } else {
481 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
482 }
483
484 } else if ((dev->flags & IFF_ALLMULTI) ||
485 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
486 /* Pass all multi */
487 value |= GMAC_PACKET_FILTER_PM;
488 /* Set all the bits of the HASH tab */
489 memset(mc_filter, 0xff, sizeof(mc_filter));
490 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
491 struct netdev_hw_addr *ha;
492
493 /* Hash filter for multicast */
494 value |= GMAC_PACKET_FILTER_HMC;
495
496 netdev_for_each_mc_addr(ha, dev) {
497 /* The upper n bits of the calculated CRC are used to
498 * index the contents of the hash table. The number of
499 * bits used depends on the hardware configuration
500 * selected at core configuration time.
501 */
502 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
503 ETH_ALEN)) >> (32 - mcbitslog2);
504 /* The most significant bit determines the register to
505 * use (H/L) while the other 5 bits determine the bit
506 * within the register.
507 */
508 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
509 }
510 }
511
512 for (i = 0; i < numhashregs; i++)
513 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
514
515 value |= GMAC_PACKET_FILTER_HPF;
516
517 /* Handle multiple unicast addresses */
518 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
519 /* Switch to promiscuous mode if more than 128 addrs
520 * are required
521 */
522 value |= GMAC_PACKET_FILTER_PR;
523 } else {
524 struct netdev_hw_addr *ha;
525 int reg = 1;
526
527 netdev_for_each_uc_addr(ha, dev) {
528 dwmac4_set_umac_addr(hw, ha->addr, reg);
529 reg++;
530 }
531
532 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
533 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
534 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
535 reg++;
536 }
537 }
538
539 /* VLAN filtering */
540 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
541 value &= ~GMAC_PACKET_FILTER_VTFE;
542 else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
543 value |= GMAC_PACKET_FILTER_VTFE;
544
545 writel(value, ioaddr + GMAC_PACKET_FILTER);
546 }
547
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)548 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
549 unsigned int fc, unsigned int pause_time,
550 u32 tx_cnt)
551 {
552 void __iomem *ioaddr = hw->pcsr;
553 unsigned int flow = 0;
554 u32 queue = 0;
555
556 pr_debug("GMAC Flow-Control:\n");
557 if (fc & FLOW_RX) {
558 pr_debug("\tReceive Flow-Control ON\n");
559 flow |= GMAC_RX_FLOW_CTRL_RFE;
560 } else {
561 pr_debug("\tReceive Flow-Control OFF\n");
562 }
563 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
564
565 if (fc & FLOW_TX) {
566 pr_debug("\tTransmit Flow-Control ON\n");
567
568 if (duplex)
569 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
570
571 for (queue = 0; queue < tx_cnt; queue++) {
572 flow = GMAC_TX_FLOW_CTRL_TFE;
573
574 if (duplex)
575 flow |=
576 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
577
578 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
579 }
580 } else {
581 for (queue = 0; queue < tx_cnt; queue++)
582 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
583 }
584 }
585
dwmac4_ctrl_ane(struct stmmac_priv * priv,bool ane,bool srgmi_ral,bool loopback)586 static void dwmac4_ctrl_ane(struct stmmac_priv *priv, bool ane, bool srgmi_ral,
587 bool loopback)
588 {
589 dwmac_ctrl_ane(priv->ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
590 }
591
592 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)593 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
594 {
595 u32 status;
596
597 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
598 x->irq_rgmii_n++;
599
600 /* Check the link status */
601 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
602 int speed_value;
603
604 x->pcs_link = 1;
605
606 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
607 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
608 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
609 x->pcs_speed = SPEED_1000;
610 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
611 x->pcs_speed = SPEED_100;
612 else
613 x->pcs_speed = SPEED_10;
614
615 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD);
616
617 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
618 x->pcs_duplex ? "Full" : "Half");
619 } else {
620 x->pcs_link = 0;
621 pr_info("Link is Down\n");
622 }
623 }
624
dwmac4_irq_mtl_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)625 static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
626 struct mac_device_info *hw, u32 chan)
627 {
628 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
629 void __iomem *ioaddr = hw->pcsr;
630 u32 mtl_int_qx_status;
631 int ret = 0;
632
633 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
634
635 /* Check MTL Interrupt */
636 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
637 /* read Queue x Interrupt status */
638 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
639 chan));
640
641 if (status & MTL_RX_OVERFLOW_INT) {
642 /* clear Interrupt */
643 writel(status | MTL_RX_OVERFLOW_INT,
644 ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
645 ret = CORE_IRQ_MTL_RX_OVERFLOW;
646 }
647 }
648
649 return ret;
650 }
651
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)652 static int dwmac4_irq_status(struct mac_device_info *hw,
653 struct stmmac_extra_stats *x)
654 {
655 void __iomem *ioaddr = hw->pcsr;
656 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
657 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
658 int ret = 0;
659
660 /* Discard disabled bits */
661 intr_status &= intr_enable;
662
663 /* Not used events (e.g. MMC interrupts) are not handled. */
664 if ((intr_status & mmc_tx_irq))
665 x->mmc_tx_irq_n++;
666 if (unlikely(intr_status & mmc_rx_irq))
667 x->mmc_rx_irq_n++;
668 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
669 x->mmc_rx_csum_offload_irq_n++;
670 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
671 if (unlikely(intr_status & pmt_irq)) {
672 readl(ioaddr + GMAC_PMT);
673 x->irq_receive_pmt_irq_n++;
674 }
675
676 /* MAC tx/rx EEE LPI entry/exit interrupts */
677 if (intr_status & lpi_irq) {
678 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
679 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
680
681 if (status & LPI_CTRL_STATUS_TLPIEN) {
682 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
683 x->irq_tx_path_in_lpi_mode_n++;
684 }
685 if (status & LPI_CTRL_STATUS_TLPIEX) {
686 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
687 x->irq_tx_path_exit_lpi_mode_n++;
688 }
689 if (status & LPI_CTRL_STATUS_RLPIEN)
690 x->irq_rx_path_in_lpi_mode_n++;
691 if (status & LPI_CTRL_STATUS_RLPIEX)
692 x->irq_rx_path_exit_lpi_mode_n++;
693 }
694
695 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
696 if (intr_status & PCS_RGSMIIIS_IRQ)
697 dwmac4_phystatus(ioaddr, x);
698
699 return ret;
700 }
701
dwmac4_debug(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)702 static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
703 struct stmmac_extra_stats *x,
704 u32 rx_queues, u32 tx_queues)
705 {
706 const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
707 u32 value;
708 u32 queue;
709
710 for (queue = 0; queue < tx_queues; queue++) {
711 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
712
713 if (value & MTL_DEBUG_TXSTSFSTS)
714 x->mtl_tx_status_fifo_full++;
715 if (value & MTL_DEBUG_TXFSTS)
716 x->mtl_tx_fifo_not_empty++;
717 if (value & MTL_DEBUG_TWCSTS)
718 x->mmtl_fifo_ctrl++;
719 if (value & MTL_DEBUG_TRCSTS_MASK) {
720 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
721 >> MTL_DEBUG_TRCSTS_SHIFT;
722 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
723 x->mtl_tx_fifo_read_ctrl_write++;
724 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
725 x->mtl_tx_fifo_read_ctrl_wait++;
726 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
727 x->mtl_tx_fifo_read_ctrl_read++;
728 else
729 x->mtl_tx_fifo_read_ctrl_idle++;
730 }
731 if (value & MTL_DEBUG_TXPAUSED)
732 x->mac_tx_in_pause++;
733 }
734
735 for (queue = 0; queue < rx_queues; queue++) {
736 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
737
738 if (value & MTL_DEBUG_RXFSTS_MASK) {
739 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
740 >> MTL_DEBUG_RRCSTS_SHIFT;
741
742 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
743 x->mtl_rx_fifo_fill_level_full++;
744 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
745 x->mtl_rx_fifo_fill_above_thresh++;
746 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
747 x->mtl_rx_fifo_fill_below_thresh++;
748 else
749 x->mtl_rx_fifo_fill_level_empty++;
750 }
751 if (value & MTL_DEBUG_RRCSTS_MASK) {
752 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
753 MTL_DEBUG_RRCSTS_SHIFT;
754
755 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
756 x->mtl_rx_fifo_read_ctrl_flush++;
757 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
758 x->mtl_rx_fifo_read_ctrl_read_data++;
759 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
760 x->mtl_rx_fifo_read_ctrl_status++;
761 else
762 x->mtl_rx_fifo_read_ctrl_idle++;
763 }
764 if (value & MTL_DEBUG_RWCSTS)
765 x->mtl_rx_fifo_ctrl_active++;
766 }
767
768 /* GMAC debug */
769 value = readl(ioaddr + GMAC_DEBUG);
770
771 if (value & GMAC_DEBUG_TFCSTS_MASK) {
772 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
773 >> GMAC_DEBUG_TFCSTS_SHIFT;
774
775 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
776 x->mac_tx_frame_ctrl_xfer++;
777 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
778 x->mac_tx_frame_ctrl_pause++;
779 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
780 x->mac_tx_frame_ctrl_wait++;
781 else
782 x->mac_tx_frame_ctrl_idle++;
783 }
784 if (value & GMAC_DEBUG_TPESTS)
785 x->mac_gmii_tx_proto_engine++;
786 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
787 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
788 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
789 if (value & GMAC_DEBUG_RPESTS)
790 x->mac_gmii_rx_proto_engine++;
791 }
792
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)793 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
794 {
795 u32 value = readl(ioaddr + GMAC_CONFIG);
796
797 if (enable)
798 value |= GMAC_CONFIG_LM;
799 else
800 value &= ~GMAC_CONFIG_LM;
801
802 writel(value, ioaddr + GMAC_CONFIG);
803 }
804
dwmac4_sarc_configure(void __iomem * ioaddr,int val)805 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
806 {
807 u32 value = readl(ioaddr + GMAC_CONFIG);
808
809 value &= ~GMAC_CONFIG_SARC;
810 value |= val << GMAC_CONFIG_SARC_SHIFT;
811
812 writel(value, ioaddr + GMAC_CONFIG);
813 }
814
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)815 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
816 u32 addr)
817 {
818 void __iomem *ioaddr = hw->pcsr;
819 u32 value;
820
821 writel(addr, ioaddr + GMAC_ARP_ADDR);
822
823 value = readl(ioaddr + GMAC_CONFIG);
824 if (en)
825 value |= GMAC_CONFIG_ARPEN;
826 else
827 value &= ~GMAC_CONFIG_ARPEN;
828 writel(value, ioaddr + GMAC_CONFIG);
829 }
830
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)831 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
832 bool en, bool ipv6, bool sa, bool inv,
833 u32 match)
834 {
835 void __iomem *ioaddr = hw->pcsr;
836 u32 value;
837
838 value = readl(ioaddr + GMAC_PACKET_FILTER);
839 value |= GMAC_PACKET_FILTER_IPFE;
840 writel(value, ioaddr + GMAC_PACKET_FILTER);
841
842 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
843
844 /* For IPv6 not both SA/DA filters can be active */
845 if (ipv6) {
846 value |= GMAC_L3PEN0;
847 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
848 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
849 if (sa) {
850 value |= GMAC_L3SAM0;
851 if (inv)
852 value |= GMAC_L3SAIM0;
853 } else {
854 value |= GMAC_L3DAM0;
855 if (inv)
856 value |= GMAC_L3DAIM0;
857 }
858 } else {
859 value &= ~GMAC_L3PEN0;
860 if (sa) {
861 value |= GMAC_L3SAM0;
862 if (inv)
863 value |= GMAC_L3SAIM0;
864 } else {
865 value |= GMAC_L3DAM0;
866 if (inv)
867 value |= GMAC_L3DAIM0;
868 }
869 }
870
871 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
872
873 if (sa) {
874 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
875 } else {
876 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
877 }
878
879 if (!en)
880 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
881
882 return 0;
883 }
884
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)885 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
886 bool en, bool udp, bool sa, bool inv,
887 u32 match)
888 {
889 void __iomem *ioaddr = hw->pcsr;
890 u32 value;
891
892 value = readl(ioaddr + GMAC_PACKET_FILTER);
893 value |= GMAC_PACKET_FILTER_IPFE;
894 writel(value, ioaddr + GMAC_PACKET_FILTER);
895
896 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
897 if (udp) {
898 value |= GMAC_L4PEN0;
899 } else {
900 value &= ~GMAC_L4PEN0;
901 }
902
903 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
904 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
905 if (sa) {
906 value |= GMAC_L4SPM0;
907 if (inv)
908 value |= GMAC_L4SPIM0;
909 } else {
910 value |= GMAC_L4DPM0;
911 if (inv)
912 value |= GMAC_L4DPIM0;
913 }
914
915 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
916
917 if (sa) {
918 value = match & GMAC_L4SP0;
919 } else {
920 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
921 }
922
923 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
924
925 if (!en)
926 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
927
928 return 0;
929 }
930
931 const struct stmmac_ops dwmac4_ops = {
932 .core_init = dwmac4_core_init,
933 .update_caps = dwmac4_update_caps,
934 .set_mac = stmmac_set_mac,
935 .rx_ipc = dwmac4_rx_ipc_enable,
936 .rx_queue_enable = dwmac4_rx_queue_enable,
937 .rx_queue_prio = dwmac4_rx_queue_priority,
938 .tx_queue_prio = dwmac4_tx_queue_priority,
939 .rx_queue_routing = dwmac4_rx_queue_routing,
940 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
941 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
942 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
943 .map_mtl_to_dma = dwmac4_map_mtl_dma,
944 .config_cbs = dwmac4_config_cbs,
945 .dump_regs = dwmac4_dump_regs,
946 .host_irq_status = dwmac4_irq_status,
947 .host_mtl_irq_status = dwmac4_irq_mtl_status,
948 .flow_ctrl = dwmac4_flow_ctrl,
949 .pmt = dwmac4_pmt,
950 .set_umac_addr = dwmac4_set_umac_addr,
951 .get_umac_addr = dwmac4_get_umac_addr,
952 .set_lpi_mode = dwmac4_set_lpi_mode,
953 .set_eee_timer = dwmac4_set_eee_timer,
954 .set_eee_pls = dwmac4_set_eee_pls,
955 .pcs_ctrl_ane = dwmac4_ctrl_ane,
956 .debug = dwmac4_debug,
957 .set_filter = dwmac4_set_filter,
958 .set_mac_loopback = dwmac4_set_mac_loopback,
959 .sarc_configure = dwmac4_sarc_configure,
960 .set_arp_offload = dwmac4_set_arp_offload,
961 .config_l3_filter = dwmac4_config_l3_filter,
962 .config_l4_filter = dwmac4_config_l4_filter,
963 };
964
965 const struct stmmac_ops dwmac410_ops = {
966 .core_init = dwmac4_core_init,
967 .update_caps = dwmac4_update_caps,
968 .set_mac = stmmac_dwmac4_set_mac,
969 .rx_ipc = dwmac4_rx_ipc_enable,
970 .rx_queue_enable = dwmac4_rx_queue_enable,
971 .rx_queue_prio = dwmac4_rx_queue_priority,
972 .tx_queue_prio = dwmac4_tx_queue_priority,
973 .rx_queue_routing = dwmac4_rx_queue_routing,
974 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
975 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
976 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
977 .map_mtl_to_dma = dwmac4_map_mtl_dma,
978 .config_cbs = dwmac4_config_cbs,
979 .dump_regs = dwmac4_dump_regs,
980 .host_irq_status = dwmac4_irq_status,
981 .host_mtl_irq_status = dwmac4_irq_mtl_status,
982 .flow_ctrl = dwmac4_flow_ctrl,
983 .pmt = dwmac4_pmt,
984 .set_umac_addr = dwmac4_set_umac_addr,
985 .get_umac_addr = dwmac4_get_umac_addr,
986 .set_lpi_mode = dwmac4_set_lpi_mode,
987 .set_eee_timer = dwmac4_set_eee_timer,
988 .set_eee_pls = dwmac4_set_eee_pls,
989 .pcs_ctrl_ane = dwmac4_ctrl_ane,
990 .debug = dwmac4_debug,
991 .set_filter = dwmac4_set_filter,
992 .flex_pps_config = dwmac5_flex_pps_config,
993 .set_mac_loopback = dwmac4_set_mac_loopback,
994 .sarc_configure = dwmac4_sarc_configure,
995 .set_arp_offload = dwmac4_set_arp_offload,
996 .config_l3_filter = dwmac4_config_l3_filter,
997 .config_l4_filter = dwmac4_config_l4_filter,
998 .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
999 };
1000
1001 const struct stmmac_ops dwmac510_ops = {
1002 .core_init = dwmac4_core_init,
1003 .update_caps = dwmac4_update_caps,
1004 .set_mac = stmmac_dwmac4_set_mac,
1005 .rx_ipc = dwmac4_rx_ipc_enable,
1006 .rx_queue_enable = dwmac4_rx_queue_enable,
1007 .rx_queue_prio = dwmac4_rx_queue_priority,
1008 .tx_queue_prio = dwmac4_tx_queue_priority,
1009 .rx_queue_routing = dwmac4_rx_queue_routing,
1010 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1011 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1012 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1013 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1014 .config_cbs = dwmac4_config_cbs,
1015 .dump_regs = dwmac4_dump_regs,
1016 .host_irq_status = dwmac4_irq_status,
1017 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1018 .flow_ctrl = dwmac4_flow_ctrl,
1019 .pmt = dwmac4_pmt,
1020 .set_umac_addr = dwmac4_set_umac_addr,
1021 .get_umac_addr = dwmac4_get_umac_addr,
1022 .set_lpi_mode = dwmac4_set_lpi_mode,
1023 .set_eee_timer = dwmac4_set_eee_timer,
1024 .set_eee_pls = dwmac4_set_eee_pls,
1025 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1026 .debug = dwmac4_debug,
1027 .set_filter = dwmac4_set_filter,
1028 .safety_feat_config = dwmac5_safety_feat_config,
1029 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1030 .safety_feat_dump = dwmac5_safety_feat_dump,
1031 .rxp_config = dwmac5_rxp_config,
1032 .flex_pps_config = dwmac5_flex_pps_config,
1033 .set_mac_loopback = dwmac4_set_mac_loopback,
1034 .sarc_configure = dwmac4_sarc_configure,
1035 .set_arp_offload = dwmac4_set_arp_offload,
1036 .config_l3_filter = dwmac4_config_l3_filter,
1037 .config_l4_filter = dwmac4_config_l4_filter,
1038 .fpe_map_preemption_class = dwmac5_fpe_map_preemption_class,
1039 };
1040
dwmac4_setup(struct stmmac_priv * priv)1041 int dwmac4_setup(struct stmmac_priv *priv)
1042 {
1043 struct mac_device_info *mac = priv->hw;
1044
1045 dev_info(priv->device, "\tDWMAC4/5\n");
1046
1047 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1048 mac->pcsr = priv->ioaddr;
1049 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1050 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1051 mac->mcast_bits_log2 = 0;
1052
1053 if (mac->multicast_filter_bins)
1054 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1055
1056 mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1057 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
1058 mac->link.duplex = GMAC_CONFIG_DM;
1059 mac->link.speed10 = GMAC_CONFIG_PS;
1060 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1061 mac->link.speed1000 = 0;
1062 mac->link.speed2500 = GMAC_CONFIG_FES;
1063 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1064 mac->mii.addr = GMAC_MDIO_ADDR;
1065 mac->mii.data = GMAC_MDIO_DATA;
1066 mac->mii.addr_shift = 21;
1067 mac->mii.addr_mask = GENMASK(25, 21);
1068 mac->mii.reg_shift = 16;
1069 mac->mii.reg_mask = GENMASK(20, 16);
1070 mac->mii.clk_csr_shift = 8;
1071 mac->mii.clk_csr_mask = GENMASK(11, 8);
1072 mac->num_vlan = stmmac_get_num_vlan(priv->ioaddr);
1073
1074 return 0;
1075 }
1076