1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Loongson Corporation
3 */
4
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include <linux/device.h>
9 #include <linux/of_irq.h>
10 #include "stmmac.h"
11 #include "dwmac_dma.h"
12 #include "dwmac1000.h"
13
14 #define DRIVER_NAME "dwmac-loongson-pci"
15
16 /* Normal Loongson Tx Summary */
17 #define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
18 /* Normal Loongson Rx Summary */
19 #define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000
20
21 #define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \
22 DMA_INTR_ENA_NIE_RX_LOONGSON | \
23 DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE)
24
25 /* Abnormal Loongson Tx Summary */
26 #define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000
27 /* Abnormal Loongson Rx Summary */
28 #define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000
29
30 #define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \
31 DMA_INTR_ENA_AIE_RX_LOONGSON | \
32 DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE)
33
34 #define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \
35 DMA_INTR_ABNORMAL_LOONGSON)
36
37 /* Normal Loongson Tx Interrupt Summary */
38 #define DMA_STATUS_NIS_TX_LOONGSON 0x00040000
39 /* Normal Loongson Rx Interrupt Summary */
40 #define DMA_STATUS_NIS_RX_LOONGSON 0x00020000
41
42 /* Abnormal Loongson Tx Interrupt Summary */
43 #define DMA_STATUS_AIS_TX_LOONGSON 0x00010000
44 /* Abnormal Loongson Rx Interrupt Summary */
45 #define DMA_STATUS_AIS_RX_LOONGSON 0x00008000
46
47 /* Fatal Loongson Tx Bus Error Interrupt */
48 #define DMA_STATUS_FBI_TX_LOONGSON 0x00002000
49 /* Fatal Loongson Rx Bus Error Interrupt */
50 #define DMA_STATUS_FBI_RX_LOONGSON 0x00001000
51
52 #define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \
53 DMA_STATUS_NIS_RX_LOONGSON | \
54 DMA_STATUS_AIS_TX_LOONGSON | \
55 DMA_STATUS_AIS_RX_LOONGSON | \
56 DMA_STATUS_FBI_TX_LOONGSON | \
57 DMA_STATUS_FBI_RX_LOONGSON)
58
59 #define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \
60 DMA_STATUS_RPS | DMA_STATUS_RU | \
61 DMA_STATUS_RI | DMA_STATUS_OVF | \
62 DMA_STATUS_MSK_COMMON_LOONGSON)
63
64 #define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \
65 DMA_STATUS_TJT | DMA_STATUS_TU | \
66 DMA_STATUS_TPS | DMA_STATUS_TI | \
67 DMA_STATUS_MSK_COMMON_LOONGSON)
68
69 #define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03
70 #define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13
71 #define DWMAC_CORE_LS_MULTICHAN 0x10 /* Loongson custom ID */
72 #define CHANNEL_NUM 8
73
74 struct loongson_data {
75 u32 loongson_id;
76 struct device *dev;
77 };
78
79 struct stmmac_pci_info {
80 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
81 };
82
loongson_default_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)83 static void loongson_default_data(struct pci_dev *pdev,
84 struct plat_stmmacenet_data *plat)
85 {
86 /* Get bus_id, this can be overwritten later */
87 plat->bus_id = pci_dev_id(pdev);
88
89 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
90 plat->has_gmac = 1;
91 plat->force_sf_dma_mode = 1;
92
93 /* Set default value for multicast hash bins */
94 plat->multicast_filter_bins = 256;
95
96 plat->mac_interface = PHY_INTERFACE_MODE_NA;
97
98 /* Set default value for unicast filter entries */
99 plat->unicast_filter_entries = 1;
100
101 /* Set the maxmtu to a default of JUMBO_LEN */
102 plat->maxmtu = JUMBO_LEN;
103
104 /* Disable Priority config by default */
105 plat->tx_queues_cfg[0].use_prio = false;
106 plat->rx_queues_cfg[0].use_prio = false;
107
108 /* Disable RX queues routing by default */
109 plat->rx_queues_cfg[0].pkt_route = 0x0;
110
111 plat->clk_ref_rate = 125000000;
112 plat->clk_ptp_rate = 125000000;
113
114 /* Default to phy auto-detection */
115 plat->phy_addr = -1;
116
117 plat->dma_cfg->pbl = 32;
118 plat->dma_cfg->pblx8 = true;
119 }
120
loongson_gmac_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)121 static int loongson_gmac_data(struct pci_dev *pdev,
122 struct plat_stmmacenet_data *plat)
123 {
124 struct loongson_data *ld;
125 int i;
126
127 ld = plat->bsp_priv;
128
129 loongson_default_data(pdev, plat);
130
131 if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
132 plat->rx_queues_to_use = CHANNEL_NUM;
133 plat->tx_queues_to_use = CHANNEL_NUM;
134
135 /* Only channel 0 supports checksum,
136 * so turn off checksum to enable multiple channels.
137 */
138 for (i = 1; i < CHANNEL_NUM; i++)
139 plat->tx_queues_cfg[i].coe_unsupported = 1;
140 } else {
141 plat->tx_queues_to_use = 1;
142 plat->rx_queues_to_use = 1;
143 }
144
145 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
146
147 return 0;
148 }
149
150 static struct stmmac_pci_info loongson_gmac_pci_info = {
151 .setup = loongson_gmac_data,
152 };
153
loongson_gnet_fix_speed(void * priv,unsigned int speed,unsigned int mode)154 static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
155 unsigned int mode)
156 {
157 struct loongson_data *ld = (struct loongson_data *)priv;
158 struct net_device *ndev = dev_get_drvdata(ld->dev);
159 struct stmmac_priv *ptr = netdev_priv(ndev);
160
161 /* The integrated PHY has a weird problem with switching from the low
162 * speeds to 1000Mbps mode. The speedup procedure requires the PHY-link
163 * re-negotiation.
164 */
165 if (speed == SPEED_1000) {
166 if (readl(ptr->ioaddr + MAC_CTRL_REG) &
167 GMAC_CONTROL_PS)
168 /* Word around hardware bug, restart autoneg */
169 phy_restart_aneg(ndev->phydev);
170 }
171 }
172
loongson_gnet_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)173 static int loongson_gnet_data(struct pci_dev *pdev,
174 struct plat_stmmacenet_data *plat)
175 {
176 struct loongson_data *ld;
177 int i;
178
179 ld = plat->bsp_priv;
180
181 loongson_default_data(pdev, plat);
182
183 if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
184 plat->rx_queues_to_use = CHANNEL_NUM;
185 plat->tx_queues_to_use = CHANNEL_NUM;
186
187 /* Only channel 0 supports checksum,
188 * so turn off checksum to enable multiple channels.
189 */
190 for (i = 1; i < CHANNEL_NUM; i++)
191 plat->tx_queues_cfg[i].coe_unsupported = 1;
192 } else {
193 plat->tx_queues_to_use = 1;
194 plat->rx_queues_to_use = 1;
195 }
196
197 plat->phy_interface = PHY_INTERFACE_MODE_GMII;
198 plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
199 plat->fix_mac_speed = loongson_gnet_fix_speed;
200
201 return 0;
202 }
203
204 static struct stmmac_pci_info loongson_gnet_pci_info = {
205 .setup = loongson_gnet_data,
206 };
207
loongson_dwmac_dma_init_channel(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,u32 chan)208 static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
209 void __iomem *ioaddr,
210 struct stmmac_dma_cfg *dma_cfg,
211 u32 chan)
212 {
213 int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
214 int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
215 u32 value;
216
217 value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
218
219 if (dma_cfg->pblx8)
220 value |= DMA_BUS_MODE_MAXPBL;
221
222 value |= DMA_BUS_MODE_USP;
223 value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
224 value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
225 value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
226
227 /* Set the Fixed burst mode */
228 if (dma_cfg->fixed_burst)
229 value |= DMA_BUS_MODE_FB;
230
231 /* Mixed Burst has no effect when fb is set */
232 if (dma_cfg->mixed_burst)
233 value |= DMA_BUS_MODE_MB;
234
235 if (dma_cfg->atds)
236 value |= DMA_BUS_MODE_ATDS;
237
238 if (dma_cfg->aal)
239 value |= DMA_BUS_MODE_AAL;
240
241 writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
242
243 /* Mask interrupts by writing to CSR7 */
244 writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr +
245 DMA_CHAN_INTR_ENA(chan));
246 }
247
loongson_dwmac_dma_interrupt(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 chan,u32 dir)248 static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv,
249 void __iomem *ioaddr,
250 struct stmmac_extra_stats *x,
251 u32 chan, u32 dir)
252 {
253 struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
254 u32 abnor_intr_status;
255 u32 nor_intr_status;
256 u32 fb_intr_status;
257 u32 intr_status;
258 int ret = 0;
259
260 /* read the status register (CSR5) */
261 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
262
263 if (dir == DMA_DIR_RX)
264 intr_status &= DMA_STATUS_MSK_RX_LOONGSON;
265 else if (dir == DMA_DIR_TX)
266 intr_status &= DMA_STATUS_MSK_TX_LOONGSON;
267
268 nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON |
269 DMA_STATUS_NIS_RX_LOONGSON);
270 abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON |
271 DMA_STATUS_AIS_RX_LOONGSON);
272 fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON |
273 DMA_STATUS_FBI_RX_LOONGSON);
274
275 /* ABNORMAL interrupts */
276 if (unlikely(abnor_intr_status)) {
277 if (unlikely(intr_status & DMA_STATUS_UNF)) {
278 ret = tx_hard_error_bump_tc;
279 x->tx_undeflow_irq++;
280 }
281 if (unlikely(intr_status & DMA_STATUS_TJT))
282 x->tx_jabber_irq++;
283 if (unlikely(intr_status & DMA_STATUS_OVF))
284 x->rx_overflow_irq++;
285 if (unlikely(intr_status & DMA_STATUS_RU))
286 x->rx_buf_unav_irq++;
287 if (unlikely(intr_status & DMA_STATUS_RPS))
288 x->rx_process_stopped_irq++;
289 if (unlikely(intr_status & DMA_STATUS_RWT))
290 x->rx_watchdog_irq++;
291 if (unlikely(intr_status & DMA_STATUS_ETI))
292 x->tx_early_irq++;
293 if (unlikely(intr_status & DMA_STATUS_TPS)) {
294 x->tx_process_stopped_irq++;
295 ret = tx_hard_error;
296 }
297 if (unlikely(fb_intr_status)) {
298 x->fatal_bus_error_irq++;
299 ret = tx_hard_error;
300 }
301 }
302 /* TX/RX NORMAL interrupts */
303 if (likely(nor_intr_status)) {
304 if (likely(intr_status & DMA_STATUS_RI)) {
305 u32 value = readl(ioaddr + DMA_INTR_ENA);
306 /* to schedule NAPI on real RIE event. */
307 if (likely(value & DMA_INTR_ENA_RIE)) {
308 u64_stats_update_begin(&stats->syncp);
309 u64_stats_inc(&stats->rx_normal_irq_n[chan]);
310 u64_stats_update_end(&stats->syncp);
311 ret |= handle_rx;
312 }
313 }
314 if (likely(intr_status & DMA_STATUS_TI)) {
315 u64_stats_update_begin(&stats->syncp);
316 u64_stats_inc(&stats->tx_normal_irq_n[chan]);
317 u64_stats_update_end(&stats->syncp);
318 ret |= handle_tx;
319 }
320 if (unlikely(intr_status & DMA_STATUS_ERI))
321 x->rx_early_irq++;
322 }
323 /* Optional hardware blocks, interrupts should be disabled */
324 if (unlikely(intr_status &
325 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
326 pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
327
328 /* Clear the interrupt by writing a logic 1 to the CSR5[19-0] */
329 writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan));
330
331 return ret;
332 }
333
loongson_dwmac_setup(void * apriv)334 static struct mac_device_info *loongson_dwmac_setup(void *apriv)
335 {
336 struct stmmac_priv *priv = apriv;
337 struct mac_device_info *mac;
338 struct stmmac_dma_ops *dma;
339 struct loongson_data *ld;
340 struct pci_dev *pdev;
341
342 ld = priv->plat->bsp_priv;
343 pdev = to_pci_dev(priv->device);
344
345 mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
346 if (!mac)
347 return NULL;
348
349 dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
350 if (!dma)
351 return NULL;
352
353 /* The Loongson GMAC and GNET devices are based on the DW GMAC
354 * v3.50a and v3.73a IP-cores. But the HW designers have changed the
355 * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
356 * network controllers with the multi-channels feature
357 * available to emphasize the differences: multiple DMA-channels,
358 * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
359 * original value so the correct HW-interface would be selected.
360 */
361 if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
362 priv->synopsys_id = DWMAC_CORE_3_70;
363 *dma = dwmac1000_dma_ops;
364 dma->init_chan = loongson_dwmac_dma_init_channel;
365 dma->dma_interrupt = loongson_dwmac_dma_interrupt;
366 mac->dma = dma;
367 }
368
369 priv->dev->priv_flags |= IFF_UNICAST_FLT;
370
371 /* Pre-initialize the respective "mac" fields as it's done in
372 * dwmac1000_setup()
373 */
374 mac->pcsr = priv->ioaddr;
375 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
376 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
377 mac->mcast_bits_log2 = 0;
378
379 if (mac->multicast_filter_bins)
380 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
381
382 /* Loongson GMAC doesn't support the flow control. LS2K2000
383 * GNET doesn't support the half-duplex link mode.
384 */
385 if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
386 mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
387 } else {
388 if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
389 mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
390 MAC_10 | MAC_100 | MAC_1000;
391 else
392 mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
393 MAC_10FD | MAC_100FD | MAC_1000FD;
394 }
395
396 mac->link.duplex = GMAC_CONTROL_DM;
397 mac->link.speed10 = GMAC_CONTROL_PS;
398 mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
399 mac->link.speed1000 = 0;
400 mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
401 mac->mii.addr = GMAC_MII_ADDR;
402 mac->mii.data = GMAC_MII_DATA;
403 mac->mii.addr_shift = 11;
404 mac->mii.addr_mask = 0x0000F800;
405 mac->mii.reg_shift = 6;
406 mac->mii.reg_mask = 0x000007C0;
407 mac->mii.clk_csr_shift = 2;
408 mac->mii.clk_csr_mask = GENMASK(5, 2);
409
410 return mac;
411 }
412
loongson_dwmac_msi_config(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)413 static int loongson_dwmac_msi_config(struct pci_dev *pdev,
414 struct plat_stmmacenet_data *plat,
415 struct stmmac_resources *res)
416 {
417 int i, ret, vecs;
418
419 vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
420 ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
421 if (ret < 0) {
422 dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
423 return ret;
424 }
425
426 res->irq = pci_irq_vector(pdev, 0);
427
428 for (i = 0; i < plat->rx_queues_to_use; i++) {
429 res->rx_irq[CHANNEL_NUM - 1 - i] =
430 pci_irq_vector(pdev, 1 + i * 2);
431 }
432
433 for (i = 0; i < plat->tx_queues_to_use; i++) {
434 res->tx_irq[CHANNEL_NUM - 1 - i] =
435 pci_irq_vector(pdev, 2 + i * 2);
436 }
437
438 plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
439
440 return 0;
441 }
442
loongson_dwmac_msi_clear(struct pci_dev * pdev)443 static void loongson_dwmac_msi_clear(struct pci_dev *pdev)
444 {
445 pci_free_irq_vectors(pdev);
446 }
447
loongson_dwmac_dt_config(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)448 static int loongson_dwmac_dt_config(struct pci_dev *pdev,
449 struct plat_stmmacenet_data *plat,
450 struct stmmac_resources *res)
451 {
452 struct device_node *np = dev_of_node(&pdev->dev);
453 int ret;
454
455 plat->mdio_node = of_get_child_by_name(np, "mdio");
456 if (plat->mdio_node) {
457 dev_info(&pdev->dev, "Found MDIO subnode\n");
458 plat->mdio_bus_data->needs_reset = true;
459 }
460
461 ret = of_alias_get_id(np, "ethernet");
462 if (ret >= 0)
463 plat->bus_id = ret;
464
465 res->irq = of_irq_get_byname(np, "macirq");
466 if (res->irq < 0) {
467 dev_err(&pdev->dev, "IRQ macirq not found\n");
468 ret = -ENODEV;
469 goto err_put_node;
470 }
471
472 res->wol_irq = of_irq_get_byname(np, "eth_wake_irq");
473 if (res->wol_irq < 0) {
474 dev_info(&pdev->dev,
475 "IRQ eth_wake_irq not found, using macirq\n");
476 res->wol_irq = res->irq;
477 }
478
479 res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
480 if (res->lpi_irq < 0) {
481 dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
482 ret = -ENODEV;
483 goto err_put_node;
484 }
485
486 ret = device_get_phy_mode(&pdev->dev);
487 if (ret < 0) {
488 dev_err(&pdev->dev, "phy_mode not found\n");
489 ret = -ENODEV;
490 goto err_put_node;
491 }
492
493 plat->phy_interface = ret;
494
495 return 0;
496
497 err_put_node:
498 of_node_put(plat->mdio_node);
499
500 return ret;
501 }
502
loongson_dwmac_dt_clear(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)503 static void loongson_dwmac_dt_clear(struct pci_dev *pdev,
504 struct plat_stmmacenet_data *plat)
505 {
506 of_node_put(plat->mdio_node);
507 }
508
loongson_dwmac_acpi_config(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)509 static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
510 struct plat_stmmacenet_data *plat,
511 struct stmmac_resources *res)
512 {
513 if (!pdev->irq)
514 return -EINVAL;
515
516 res->irq = pdev->irq;
517
518 return 0;
519 }
520
521 /* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
loongson_dwmac_fix_reset(void * priv,void __iomem * ioaddr)522 static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
523 {
524 u32 value = readl(ioaddr + DMA_BUS_MODE);
525
526 value |= DMA_BUS_MODE_SFT_RESET;
527 writel(value, ioaddr + DMA_BUS_MODE);
528
529 return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
530 !(value & DMA_BUS_MODE_SFT_RESET),
531 10000, 2000000);
532 }
533
loongson_dwmac_probe(struct pci_dev * pdev,const struct pci_device_id * id)534 static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
535 {
536 struct plat_stmmacenet_data *plat;
537 struct stmmac_pci_info *info;
538 struct stmmac_resources res;
539 struct loongson_data *ld;
540 int ret, i;
541
542 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
543 if (!plat)
544 return -ENOMEM;
545
546 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
547 sizeof(*plat->mdio_bus_data),
548 GFP_KERNEL);
549 if (!plat->mdio_bus_data)
550 return -ENOMEM;
551
552 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
553 if (!plat->dma_cfg)
554 return -ENOMEM;
555
556 ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL);
557 if (!ld)
558 return -ENOMEM;
559
560 /* Enable pci device */
561 ret = pci_enable_device(pdev);
562 if (ret) {
563 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
564 return ret;
565 }
566
567 pci_set_master(pdev);
568
569 /* Get the base address of device */
570 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
571 if (pci_resource_len(pdev, i) == 0)
572 continue;
573 ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
574 if (ret)
575 goto err_disable_device;
576 break;
577 }
578
579 memset(&res, 0, sizeof(res));
580 res.addr = pcim_iomap_table(pdev)[0];
581
582 plat->bsp_priv = ld;
583 plat->setup = loongson_dwmac_setup;
584 plat->fix_soc_reset = loongson_dwmac_fix_reset;
585 ld->dev = &pdev->dev;
586 ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
587
588 info = (struct stmmac_pci_info *)id->driver_data;
589 ret = info->setup(pdev, plat);
590 if (ret)
591 goto err_disable_device;
592
593 if (dev_of_node(&pdev->dev))
594 ret = loongson_dwmac_dt_config(pdev, plat, &res);
595 else
596 ret = loongson_dwmac_acpi_config(pdev, plat, &res);
597 if (ret)
598 goto err_disable_device;
599
600 /* Use the common MAC IRQ if per-channel MSIs allocation failed */
601 if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
602 loongson_dwmac_msi_config(pdev, plat, &res);
603
604 ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
605 if (ret)
606 goto err_plat_clear;
607
608 return 0;
609
610 err_plat_clear:
611 if (dev_of_node(&pdev->dev))
612 loongson_dwmac_dt_clear(pdev, plat);
613 if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
614 loongson_dwmac_msi_clear(pdev);
615 err_disable_device:
616 pci_disable_device(pdev);
617 return ret;
618 }
619
loongson_dwmac_remove(struct pci_dev * pdev)620 static void loongson_dwmac_remove(struct pci_dev *pdev)
621 {
622 struct net_device *ndev = dev_get_drvdata(&pdev->dev);
623 struct stmmac_priv *priv = netdev_priv(ndev);
624 struct loongson_data *ld;
625 int i;
626
627 ld = priv->plat->bsp_priv;
628 stmmac_dvr_remove(&pdev->dev);
629
630 if (dev_of_node(&pdev->dev))
631 loongson_dwmac_dt_clear(pdev, priv->plat);
632
633 if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
634 loongson_dwmac_msi_clear(pdev);
635
636 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
637 if (pci_resource_len(pdev, i) == 0)
638 continue;
639 pcim_iounmap_regions(pdev, BIT(i));
640 break;
641 }
642
643 pci_disable_device(pdev);
644 }
645
loongson_dwmac_suspend(struct device * dev)646 static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
647 {
648 struct pci_dev *pdev = to_pci_dev(dev);
649 int ret;
650
651 ret = stmmac_suspend(dev);
652 if (ret)
653 return ret;
654
655 ret = pci_save_state(pdev);
656 if (ret)
657 return ret;
658
659 pci_disable_device(pdev);
660 pci_wake_from_d3(pdev, true);
661 return 0;
662 }
663
loongson_dwmac_resume(struct device * dev)664 static int __maybe_unused loongson_dwmac_resume(struct device *dev)
665 {
666 struct pci_dev *pdev = to_pci_dev(dev);
667 int ret;
668
669 pci_restore_state(pdev);
670 pci_set_power_state(pdev, PCI_D0);
671
672 ret = pci_enable_device(pdev);
673 if (ret)
674 return ret;
675
676 pci_set_master(pdev);
677
678 return stmmac_resume(dev);
679 }
680
681 static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
682 loongson_dwmac_resume);
683
684 static const struct pci_device_id loongson_dwmac_id_table[] = {
685 { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
686 { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
687 {}
688 };
689 MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
690
691 static struct pci_driver loongson_dwmac_driver = {
692 .name = DRIVER_NAME,
693 .id_table = loongson_dwmac_id_table,
694 .probe = loongson_dwmac_probe,
695 .remove = loongson_dwmac_remove,
696 .driver = {
697 .pm = &loongson_dwmac_pm_ops,
698 },
699 };
700
701 module_pci_driver(loongson_dwmac_driver);
702
703 MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
704 MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
705 MODULE_AUTHOR("Yanteng Si <siyanteng@loongson.cn>");
706 MODULE_LICENSE("GPL v2");
707