xref: /linux/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Loongson Corporation
3  */
4 
5 #include <linux/clk-provider.h>
6 #include <linux/pci.h>
7 #include <linux/dmi.h>
8 #include <linux/device.h>
9 #include <linux/of_irq.h>
10 #include "stmmac.h"
11 #include "dwmac_dma.h"
12 #include "dwmac1000.h"
13 
14 /* Normal Loongson Tx Summary */
15 #define DMA_INTR_ENA_NIE_TX_LOONGSON	0x00040000
16 /* Normal Loongson Rx Summary */
17 #define DMA_INTR_ENA_NIE_RX_LOONGSON	0x00020000
18 
19 #define DMA_INTR_NORMAL_LOONGSON	(DMA_INTR_ENA_NIE_TX_LOONGSON | \
20 					 DMA_INTR_ENA_NIE_RX_LOONGSON | \
21 					 DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE)
22 
23 /* Abnormal Loongson Tx Summary */
24 #define DMA_INTR_ENA_AIE_TX_LOONGSON	0x00010000
25 /* Abnormal Loongson Rx Summary */
26 #define DMA_INTR_ENA_AIE_RX_LOONGSON	0x00008000
27 
28 #define DMA_INTR_ABNORMAL_LOONGSON	(DMA_INTR_ENA_AIE_TX_LOONGSON | \
29 					 DMA_INTR_ENA_AIE_RX_LOONGSON | \
30 					 DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE)
31 
32 #define DMA_INTR_DEFAULT_MASK_LOONGSON	(DMA_INTR_NORMAL_LOONGSON | \
33 					 DMA_INTR_ABNORMAL_LOONGSON)
34 
35 /* Normal Loongson Tx Interrupt Summary */
36 #define DMA_STATUS_NIS_TX_LOONGSON	0x00040000
37 /* Normal Loongson Rx Interrupt Summary */
38 #define DMA_STATUS_NIS_RX_LOONGSON	0x00020000
39 
40 /* Abnormal Loongson Tx Interrupt Summary */
41 #define DMA_STATUS_AIS_TX_LOONGSON	0x00010000
42 /* Abnormal Loongson Rx Interrupt Summary */
43 #define DMA_STATUS_AIS_RX_LOONGSON	0x00008000
44 
45 /* Fatal Loongson Tx Bus Error Interrupt */
46 #define DMA_STATUS_FBI_TX_LOONGSON	0x00002000
47 /* Fatal Loongson Rx Bus Error Interrupt */
48 #define DMA_STATUS_FBI_RX_LOONGSON	0x00001000
49 
50 #define DMA_STATUS_MSK_COMMON_LOONGSON	(DMA_STATUS_NIS_TX_LOONGSON | \
51 					 DMA_STATUS_NIS_RX_LOONGSON | \
52 					 DMA_STATUS_AIS_TX_LOONGSON | \
53 					 DMA_STATUS_AIS_RX_LOONGSON | \
54 					 DMA_STATUS_FBI_TX_LOONGSON | \
55 					 DMA_STATUS_FBI_RX_LOONGSON)
56 
57 #define DMA_STATUS_MSK_RX_LOONGSON	(DMA_STATUS_ERI | DMA_STATUS_RWT | \
58 					 DMA_STATUS_RPS | DMA_STATUS_RU  | \
59 					 DMA_STATUS_RI  | DMA_STATUS_OVF | \
60 					 DMA_STATUS_MSK_COMMON_LOONGSON)
61 
62 #define DMA_STATUS_MSK_TX_LOONGSON	(DMA_STATUS_ETI | DMA_STATUS_UNF | \
63 					 DMA_STATUS_TJT | DMA_STATUS_TU  | \
64 					 DMA_STATUS_TPS | DMA_STATUS_TI  | \
65 					 DMA_STATUS_MSK_COMMON_LOONGSON)
66 
67 #define PCI_DEVICE_ID_LOONGSON_GMAC	0x7a03
68 #define PCI_DEVICE_ID_LOONGSON_GNET	0x7a13
69 #define DWMAC_CORE_LS_MULTICHAN	0x10	/* Loongson custom ID */
70 #define CHANNEL_NUM			8
71 
72 struct loongson_data {
73 	u32 loongson_id;
74 	struct device *dev;
75 };
76 
77 struct stmmac_pci_info {
78 	int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
79 };
80 
loongson_default_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)81 static void loongson_default_data(struct pci_dev *pdev,
82 				  struct plat_stmmacenet_data *plat)
83 {
84 	/* Get bus_id, this can be overwritten later */
85 	plat->bus_id = pci_dev_id(pdev);
86 
87 	plat->clk_csr = 2;	/* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
88 	plat->has_gmac = 1;
89 	plat->force_sf_dma_mode = 1;
90 
91 	/* Set default value for multicast hash bins */
92 	plat->multicast_filter_bins = 256;
93 
94 	plat->mac_interface = PHY_INTERFACE_MODE_NA;
95 
96 	/* Set default value for unicast filter entries */
97 	plat->unicast_filter_entries = 1;
98 
99 	/* Set the maxmtu to a default of JUMBO_LEN */
100 	plat->maxmtu = JUMBO_LEN;
101 
102 	/* Disable Priority config by default */
103 	plat->tx_queues_cfg[0].use_prio = false;
104 	plat->rx_queues_cfg[0].use_prio = false;
105 
106 	/* Disable RX queues routing by default */
107 	plat->rx_queues_cfg[0].pkt_route = 0x0;
108 
109 	plat->clk_ref_rate = 125000000;
110 	plat->clk_ptp_rate = 125000000;
111 
112 	/* Default to phy auto-detection */
113 	plat->phy_addr = -1;
114 
115 	plat->dma_cfg->pbl = 32;
116 	plat->dma_cfg->pblx8 = true;
117 }
118 
loongson_gmac_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)119 static int loongson_gmac_data(struct pci_dev *pdev,
120 			      struct plat_stmmacenet_data *plat)
121 {
122 	struct loongson_data *ld;
123 	int i;
124 
125 	ld = plat->bsp_priv;
126 
127 	loongson_default_data(pdev, plat);
128 
129 	if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
130 		plat->rx_queues_to_use = CHANNEL_NUM;
131 		plat->tx_queues_to_use = CHANNEL_NUM;
132 
133 		/* Only channel 0 supports checksum,
134 		 * so turn off checksum to enable multiple channels.
135 		 */
136 		for (i = 1; i < CHANNEL_NUM; i++)
137 			plat->tx_queues_cfg[i].coe_unsupported = 1;
138 	} else {
139 		plat->tx_queues_to_use = 1;
140 		plat->rx_queues_to_use = 1;
141 	}
142 
143 	plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
144 
145 	return 0;
146 }
147 
148 static struct stmmac_pci_info loongson_gmac_pci_info = {
149 	.setup = loongson_gmac_data,
150 };
151 
loongson_gnet_fix_speed(void * priv,unsigned int speed,unsigned int mode)152 static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
153 				    unsigned int mode)
154 {
155 	struct loongson_data *ld = (struct loongson_data *)priv;
156 	struct net_device *ndev = dev_get_drvdata(ld->dev);
157 	struct stmmac_priv *ptr = netdev_priv(ndev);
158 
159 	/* The integrated PHY has a weird problem with switching from the low
160 	 * speeds to 1000Mbps mode. The speedup procedure requires the PHY-link
161 	 * re-negotiation.
162 	 */
163 	if (speed == SPEED_1000) {
164 		if (readl(ptr->ioaddr + MAC_CTRL_REG) &
165 		    GMAC_CONTROL_PS)
166 			/* Word around hardware bug, restart autoneg */
167 			phy_restart_aneg(ndev->phydev);
168 	}
169 }
170 
loongson_gnet_data(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)171 static int loongson_gnet_data(struct pci_dev *pdev,
172 			      struct plat_stmmacenet_data *plat)
173 {
174 	struct loongson_data *ld;
175 	int i;
176 
177 	ld = plat->bsp_priv;
178 
179 	loongson_default_data(pdev, plat);
180 
181 	if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
182 		plat->rx_queues_to_use = CHANNEL_NUM;
183 		plat->tx_queues_to_use = CHANNEL_NUM;
184 
185 		/* Only channel 0 supports checksum,
186 		 * so turn off checksum to enable multiple channels.
187 		 */
188 		for (i = 1; i < CHANNEL_NUM; i++)
189 			plat->tx_queues_cfg[i].coe_unsupported = 1;
190 	} else {
191 		plat->tx_queues_to_use = 1;
192 		plat->rx_queues_to_use = 1;
193 	}
194 
195 	plat->phy_interface = PHY_INTERFACE_MODE_GMII;
196 	plat->mdio_bus_data->phy_mask = ~(u32)BIT(2);
197 	plat->fix_mac_speed = loongson_gnet_fix_speed;
198 
199 	return 0;
200 }
201 
202 static struct stmmac_pci_info loongson_gnet_pci_info = {
203 	.setup = loongson_gnet_data,
204 };
205 
loongson_dwmac_dma_init_channel(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_dma_cfg * dma_cfg,u32 chan)206 static void loongson_dwmac_dma_init_channel(struct stmmac_priv *priv,
207 					    void __iomem *ioaddr,
208 					    struct stmmac_dma_cfg *dma_cfg,
209 					    u32 chan)
210 {
211 	int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
212 	int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
213 	u32 value;
214 
215 	value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan));
216 
217 	if (dma_cfg->pblx8)
218 		value |= DMA_BUS_MODE_MAXPBL;
219 
220 	value |= DMA_BUS_MODE_USP;
221 	value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
222 	value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
223 	value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
224 
225 	/* Set the Fixed burst mode */
226 	if (dma_cfg->fixed_burst)
227 		value |= DMA_BUS_MODE_FB;
228 
229 	/* Mixed Burst has no effect when fb is set */
230 	if (dma_cfg->mixed_burst)
231 		value |= DMA_BUS_MODE_MB;
232 
233 	if (dma_cfg->atds)
234 		value |= DMA_BUS_MODE_ATDS;
235 
236 	if (dma_cfg->aal)
237 		value |= DMA_BUS_MODE_AAL;
238 
239 	writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan));
240 
241 	/* Mask interrupts by writing to CSR7 */
242 	writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr +
243 	       DMA_CHAN_INTR_ENA(chan));
244 }
245 
loongson_dwmac_dma_interrupt(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 chan,u32 dir)246 static int loongson_dwmac_dma_interrupt(struct stmmac_priv *priv,
247 					void __iomem *ioaddr,
248 					struct stmmac_extra_stats *x,
249 					u32 chan, u32 dir)
250 {
251 	struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
252 	u32 abnor_intr_status;
253 	u32 nor_intr_status;
254 	u32 fb_intr_status;
255 	u32 intr_status;
256 	int ret = 0;
257 
258 	/* read the status register (CSR5) */
259 	intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
260 
261 	if (dir == DMA_DIR_RX)
262 		intr_status &= DMA_STATUS_MSK_RX_LOONGSON;
263 	else if (dir == DMA_DIR_TX)
264 		intr_status &= DMA_STATUS_MSK_TX_LOONGSON;
265 
266 	nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON |
267 		DMA_STATUS_NIS_RX_LOONGSON);
268 	abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON |
269 		DMA_STATUS_AIS_RX_LOONGSON);
270 	fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON |
271 		DMA_STATUS_FBI_RX_LOONGSON);
272 
273 	/* ABNORMAL interrupts */
274 	if (unlikely(abnor_intr_status)) {
275 		if (unlikely(intr_status & DMA_STATUS_UNF)) {
276 			ret = tx_hard_error_bump_tc;
277 			x->tx_undeflow_irq++;
278 		}
279 		if (unlikely(intr_status & DMA_STATUS_TJT))
280 			x->tx_jabber_irq++;
281 		if (unlikely(intr_status & DMA_STATUS_OVF))
282 			x->rx_overflow_irq++;
283 		if (unlikely(intr_status & DMA_STATUS_RU))
284 			x->rx_buf_unav_irq++;
285 		if (unlikely(intr_status & DMA_STATUS_RPS))
286 			x->rx_process_stopped_irq++;
287 		if (unlikely(intr_status & DMA_STATUS_RWT))
288 			x->rx_watchdog_irq++;
289 		if (unlikely(intr_status & DMA_STATUS_ETI))
290 			x->tx_early_irq++;
291 		if (unlikely(intr_status & DMA_STATUS_TPS)) {
292 			x->tx_process_stopped_irq++;
293 			ret = tx_hard_error;
294 		}
295 		if (unlikely(fb_intr_status)) {
296 			x->fatal_bus_error_irq++;
297 			ret = tx_hard_error;
298 		}
299 	}
300 	/* TX/RX NORMAL interrupts */
301 	if (likely(nor_intr_status)) {
302 		if (likely(intr_status & DMA_STATUS_RI)) {
303 			u32 value = readl(ioaddr + DMA_INTR_ENA);
304 			/* to schedule NAPI on real RIE event. */
305 			if (likely(value & DMA_INTR_ENA_RIE)) {
306 				u64_stats_update_begin(&stats->syncp);
307 				u64_stats_inc(&stats->rx_normal_irq_n[chan]);
308 				u64_stats_update_end(&stats->syncp);
309 				ret |= handle_rx;
310 			}
311 		}
312 		if (likely(intr_status & DMA_STATUS_TI)) {
313 			u64_stats_update_begin(&stats->syncp);
314 			u64_stats_inc(&stats->tx_normal_irq_n[chan]);
315 			u64_stats_update_end(&stats->syncp);
316 			ret |= handle_tx;
317 		}
318 		if (unlikely(intr_status & DMA_STATUS_ERI))
319 			x->rx_early_irq++;
320 	}
321 	/* Optional hardware blocks, interrupts should be disabled */
322 	if (unlikely(intr_status &
323 		     (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
324 		pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
325 
326 	/* Clear the interrupt by writing a logic 1 to the CSR5[19-0] */
327 	writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan));
328 
329 	return ret;
330 }
331 
loongson_dwmac_setup(void * apriv)332 static struct mac_device_info *loongson_dwmac_setup(void *apriv)
333 {
334 	struct stmmac_priv *priv = apriv;
335 	struct mac_device_info *mac;
336 	struct stmmac_dma_ops *dma;
337 	struct loongson_data *ld;
338 	struct pci_dev *pdev;
339 
340 	ld = priv->plat->bsp_priv;
341 	pdev = to_pci_dev(priv->device);
342 
343 	mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
344 	if (!mac)
345 		return NULL;
346 
347 	dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL);
348 	if (!dma)
349 		return NULL;
350 
351 	/* The Loongson GMAC and GNET devices are based on the DW GMAC
352 	 * v3.50a and v3.73a IP-cores. But the HW designers have changed the
353 	 * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the
354 	 * network controllers with the multi-channels feature
355 	 * available to emphasize the differences: multiple DMA-channels,
356 	 * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the
357 	 * original value so the correct HW-interface would be selected.
358 	 */
359 	if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) {
360 		priv->synopsys_id = DWMAC_CORE_3_70;
361 		*dma = dwmac1000_dma_ops;
362 		dma->init_chan = loongson_dwmac_dma_init_channel;
363 		dma->dma_interrupt = loongson_dwmac_dma_interrupt;
364 		mac->dma = dma;
365 	}
366 
367 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
368 
369 	/* Pre-initialize the respective "mac" fields as it's done in
370 	 * dwmac1000_setup()
371 	 */
372 	mac->pcsr = priv->ioaddr;
373 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
374 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
375 	mac->mcast_bits_log2 = 0;
376 
377 	if (mac->multicast_filter_bins)
378 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
379 
380 	/* Loongson GMAC doesn't support the flow control. LS2K2000
381 	 * GNET doesn't support the half-duplex link mode.
382 	 */
383 	if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) {
384 		mac->link.caps = MAC_10 | MAC_100 | MAC_1000;
385 	} else {
386 		if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
387 			mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
388 					 MAC_10 | MAC_100 | MAC_1000;
389 		else
390 			mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
391 					 MAC_10FD | MAC_100FD | MAC_1000FD;
392 	}
393 
394 	mac->link.duplex = GMAC_CONTROL_DM;
395 	mac->link.speed10 = GMAC_CONTROL_PS;
396 	mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
397 	mac->link.speed1000 = 0;
398 	mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
399 	mac->mii.addr = GMAC_MII_ADDR;
400 	mac->mii.data = GMAC_MII_DATA;
401 	mac->mii.addr_shift = 11;
402 	mac->mii.addr_mask = 0x0000F800;
403 	mac->mii.reg_shift = 6;
404 	mac->mii.reg_mask = 0x000007C0;
405 	mac->mii.clk_csr_shift = 2;
406 	mac->mii.clk_csr_mask = GENMASK(5, 2);
407 
408 	return mac;
409 }
410 
loongson_dwmac_msi_config(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)411 static int loongson_dwmac_msi_config(struct pci_dev *pdev,
412 				     struct plat_stmmacenet_data *plat,
413 				     struct stmmac_resources *res)
414 {
415 	int i, ret, vecs;
416 
417 	vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1);
418 	ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI);
419 	if (ret < 0) {
420 		dev_warn(&pdev->dev, "Failed to allocate MSI IRQs\n");
421 		return ret;
422 	}
423 
424 	res->irq = pci_irq_vector(pdev, 0);
425 
426 	for (i = 0; i < plat->rx_queues_to_use; i++) {
427 		res->rx_irq[CHANNEL_NUM - 1 - i] =
428 			pci_irq_vector(pdev, 1 + i * 2);
429 	}
430 
431 	for (i = 0; i < plat->tx_queues_to_use; i++) {
432 		res->tx_irq[CHANNEL_NUM - 1 - i] =
433 			pci_irq_vector(pdev, 2 + i * 2);
434 	}
435 
436 	plat->flags |= STMMAC_FLAG_MULTI_MSI_EN;
437 
438 	return 0;
439 }
440 
loongson_dwmac_msi_clear(struct pci_dev * pdev)441 static void loongson_dwmac_msi_clear(struct pci_dev *pdev)
442 {
443 	pci_free_irq_vectors(pdev);
444 }
445 
loongson_dwmac_dt_config(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)446 static int loongson_dwmac_dt_config(struct pci_dev *pdev,
447 				    struct plat_stmmacenet_data *plat,
448 				    struct stmmac_resources *res)
449 {
450 	struct device_node *np = dev_of_node(&pdev->dev);
451 	int ret;
452 
453 	plat->mdio_node = of_get_child_by_name(np, "mdio");
454 	if (plat->mdio_node) {
455 		dev_info(&pdev->dev, "Found MDIO subnode\n");
456 		plat->mdio_bus_data->needs_reset = true;
457 	}
458 
459 	ret = of_alias_get_id(np, "ethernet");
460 	if (ret >= 0)
461 		plat->bus_id = ret;
462 
463 	res->irq = of_irq_get_byname(np, "macirq");
464 	if (res->irq < 0) {
465 		dev_err(&pdev->dev, "IRQ macirq not found\n");
466 		ret = -ENODEV;
467 		goto err_put_node;
468 	}
469 
470 	res->wol_irq = of_irq_get_byname(np, "eth_wake_irq");
471 	if (res->wol_irq < 0) {
472 		dev_info(&pdev->dev,
473 			 "IRQ eth_wake_irq not found, using macirq\n");
474 		res->wol_irq = res->irq;
475 	}
476 
477 	res->lpi_irq = of_irq_get_byname(np, "eth_lpi");
478 	if (res->lpi_irq < 0) {
479 		dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
480 		ret = -ENODEV;
481 		goto err_put_node;
482 	}
483 
484 	ret = device_get_phy_mode(&pdev->dev);
485 	if (ret < 0) {
486 		dev_err(&pdev->dev, "phy_mode not found\n");
487 		ret = -ENODEV;
488 		goto err_put_node;
489 	}
490 
491 	plat->phy_interface = ret;
492 
493 	return 0;
494 
495 err_put_node:
496 	of_node_put(plat->mdio_node);
497 
498 	return ret;
499 }
500 
loongson_dwmac_dt_clear(struct pci_dev * pdev,struct plat_stmmacenet_data * plat)501 static void loongson_dwmac_dt_clear(struct pci_dev *pdev,
502 				    struct plat_stmmacenet_data *plat)
503 {
504 	of_node_put(plat->mdio_node);
505 }
506 
loongson_dwmac_acpi_config(struct pci_dev * pdev,struct plat_stmmacenet_data * plat,struct stmmac_resources * res)507 static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
508 				      struct plat_stmmacenet_data *plat,
509 				      struct stmmac_resources *res)
510 {
511 	if (!pdev->irq)
512 		return -EINVAL;
513 
514 	res->irq = pdev->irq;
515 
516 	return 0;
517 }
518 
loongson_dwmac_probe(struct pci_dev * pdev,const struct pci_device_id * id)519 static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
520 {
521 	struct plat_stmmacenet_data *plat;
522 	struct stmmac_pci_info *info;
523 	struct stmmac_resources res;
524 	struct loongson_data *ld;
525 	int ret, i;
526 
527 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
528 	if (!plat)
529 		return -ENOMEM;
530 
531 	plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
532 					   sizeof(*plat->mdio_bus_data),
533 					   GFP_KERNEL);
534 	if (!plat->mdio_bus_data)
535 		return -ENOMEM;
536 
537 	plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
538 	if (!plat->dma_cfg)
539 		return -ENOMEM;
540 
541 	ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL);
542 	if (!ld)
543 		return -ENOMEM;
544 
545 	/* Enable pci device */
546 	ret = pci_enable_device(pdev);
547 	if (ret) {
548 		dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
549 		return ret;
550 	}
551 
552 	pci_set_master(pdev);
553 
554 	/* Get the base address of device */
555 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
556 		if (pci_resource_len(pdev, i) == 0)
557 			continue;
558 		ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
559 		if (ret)
560 			goto err_disable_device;
561 		break;
562 	}
563 
564 	memset(&res, 0, sizeof(res));
565 	res.addr = pcim_iomap_table(pdev)[0];
566 
567 	plat->bsp_priv = ld;
568 	plat->setup = loongson_dwmac_setup;
569 	ld->dev = &pdev->dev;
570 	ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
571 
572 	info = (struct stmmac_pci_info *)id->driver_data;
573 	ret = info->setup(pdev, plat);
574 	if (ret)
575 		goto err_disable_device;
576 
577 	if (dev_of_node(&pdev->dev))
578 		ret = loongson_dwmac_dt_config(pdev, plat, &res);
579 	else
580 		ret = loongson_dwmac_acpi_config(pdev, plat, &res);
581 	if (ret)
582 		goto err_disable_device;
583 
584 	/* Use the common MAC IRQ if per-channel MSIs allocation failed */
585 	if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
586 		loongson_dwmac_msi_config(pdev, plat, &res);
587 
588 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
589 	if (ret)
590 		goto err_plat_clear;
591 
592 	return 0;
593 
594 err_plat_clear:
595 	if (dev_of_node(&pdev->dev))
596 		loongson_dwmac_dt_clear(pdev, plat);
597 	if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
598 		loongson_dwmac_msi_clear(pdev);
599 err_disable_device:
600 	pci_disable_device(pdev);
601 	return ret;
602 }
603 
loongson_dwmac_remove(struct pci_dev * pdev)604 static void loongson_dwmac_remove(struct pci_dev *pdev)
605 {
606 	struct net_device *ndev = dev_get_drvdata(&pdev->dev);
607 	struct stmmac_priv *priv = netdev_priv(ndev);
608 	struct loongson_data *ld;
609 	int i;
610 
611 	ld = priv->plat->bsp_priv;
612 	stmmac_dvr_remove(&pdev->dev);
613 
614 	if (dev_of_node(&pdev->dev))
615 		loongson_dwmac_dt_clear(pdev, priv->plat);
616 
617 	if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
618 		loongson_dwmac_msi_clear(pdev);
619 
620 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
621 		if (pci_resource_len(pdev, i) == 0)
622 			continue;
623 		pcim_iounmap_regions(pdev, BIT(i));
624 		break;
625 	}
626 
627 	pci_disable_device(pdev);
628 }
629 
loongson_dwmac_suspend(struct device * dev)630 static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
631 {
632 	struct pci_dev *pdev = to_pci_dev(dev);
633 	int ret;
634 
635 	ret = stmmac_suspend(dev);
636 	if (ret)
637 		return ret;
638 
639 	ret = pci_save_state(pdev);
640 	if (ret)
641 		return ret;
642 
643 	pci_disable_device(pdev);
644 	pci_wake_from_d3(pdev, true);
645 	return 0;
646 }
647 
loongson_dwmac_resume(struct device * dev)648 static int __maybe_unused loongson_dwmac_resume(struct device *dev)
649 {
650 	struct pci_dev *pdev = to_pci_dev(dev);
651 	int ret;
652 
653 	pci_restore_state(pdev);
654 	pci_set_power_state(pdev, PCI_D0);
655 
656 	ret = pci_enable_device(pdev);
657 	if (ret)
658 		return ret;
659 
660 	pci_set_master(pdev);
661 
662 	return stmmac_resume(dev);
663 }
664 
665 static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
666 			 loongson_dwmac_resume);
667 
668 static const struct pci_device_id loongson_dwmac_id_table[] = {
669 	{ PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) },
670 	{ PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) },
671 	{}
672 };
673 MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
674 
675 static struct pci_driver loongson_dwmac_driver = {
676 	.name = "dwmac-loongson-pci",
677 	.id_table = loongson_dwmac_id_table,
678 	.probe = loongson_dwmac_probe,
679 	.remove = loongson_dwmac_remove,
680 	.driver = {
681 		.pm = &loongson_dwmac_pm_ops,
682 	},
683 };
684 
685 module_pci_driver(loongson_dwmac_driver);
686 
687 MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
688 MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
689 MODULE_AUTHOR("Yanteng Si <siyanteng@loongson.cn>");
690 MODULE_LICENSE("GPL v2");
691