xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c (revision 4201c9260a8d3c4ef238e51692a7e9b4e1e29efe)
1 /*******************************************************************************
2   This contains the functions to handle the platform driver.
3 
4   Copyright (C) 2007-2011  STMicroelectronics Ltd
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   The full GNU General Public License is included in this distribution in
16   the file called "COPYING".
17 
18   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20 
21 #include <linux/platform_device.h>
22 #include <linux/module.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/of_net.h>
26 #include <linux/of_device.h>
27 #include <linux/of_mdio.h>
28 
29 #include "stmmac.h"
30 #include "stmmac_platform.h"
31 
32 #ifdef CONFIG_OF
33 
34 /**
35  * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
36  * @mcast_bins: Multicast filtering bins
37  * Description:
38  * this function validates the number of Multicast filtering bins specified
39  * by the configuration through the device tree. The Synopsys GMAC supports
40  * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
41  * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
42  * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
43  * invalid and will cause the filtering algorithm to use Multicast
44  * promiscuous mode.
45  */
46 static int dwmac1000_validate_mcast_bins(int mcast_bins)
47 {
48 	int x = mcast_bins;
49 
50 	switch (x) {
51 	case HASH_TABLE_SIZE:
52 	case 128:
53 	case 256:
54 		break;
55 	default:
56 		x = 0;
57 		pr_info("Hash table entries set to unexpected value %d",
58 			mcast_bins);
59 		break;
60 	}
61 	return x;
62 }
63 
64 /**
65  * dwmac1000_validate_ucast_entries - validate the Unicast address entries
66  * @ucast_entries: number of Unicast address entries
67  * Description:
68  * This function validates the number of Unicast address entries supported
69  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70  * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
71  * logic. This function validates a valid, supported configuration is
72  * selected, and defaults to 1 Unicast address if an unsupported
73  * configuration is selected.
74  */
75 static int dwmac1000_validate_ucast_entries(int ucast_entries)
76 {
77 	int x = ucast_entries;
78 
79 	switch (x) {
80 	case 1 ... 32:
81 	case 64:
82 	case 128:
83 		break;
84 	default:
85 		x = 1;
86 		pr_info("Unicast table entries set to unexpected value %d\n",
87 			ucast_entries);
88 		break;
89 	}
90 	return x;
91 }
92 
93 /**
94  * stmmac_axi_setup - parse DT parameters for programming the AXI register
95  * @pdev: platform device
96  * Description:
97  * if required, from device-tree the AXI internal register can be tuned
98  * by using platform parameters.
99  */
100 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
101 {
102 	struct device_node *np;
103 	struct stmmac_axi *axi;
104 
105 	np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
106 	if (!np)
107 		return NULL;
108 
109 	axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
110 	if (!axi) {
111 		of_node_put(np);
112 		return ERR_PTR(-ENOMEM);
113 	}
114 
115 	axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
116 	axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
117 	axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
118 	axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
119 	axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
120 	axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
121 
122 	if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
123 		axi->axi_wr_osr_lmt = 1;
124 	if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
125 		axi->axi_rd_osr_lmt = 1;
126 	of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
127 	of_node_put(np);
128 
129 	return axi;
130 }
131 
132 /**
133  * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
134  * @pdev: platform device
135  */
136 static int stmmac_mtl_setup(struct platform_device *pdev,
137 			    struct plat_stmmacenet_data *plat)
138 {
139 	struct device_node *q_node;
140 	struct device_node *rx_node;
141 	struct device_node *tx_node;
142 	u8 queue = 0;
143 	int ret = 0;
144 
145 	/* For backwards-compatibility with device trees that don't have any
146 	 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
147 	 * to one RX and TX queues each.
148 	 */
149 	plat->rx_queues_to_use = 1;
150 	plat->tx_queues_to_use = 1;
151 
152 	/* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
153 	 * to always set this, otherwise Queue will be classified as AVB
154 	 * (because MTL_QUEUE_AVB = 0).
155 	 */
156 	plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
157 	plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
158 
159 	rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
160 	if (!rx_node)
161 		return ret;
162 
163 	tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
164 	if (!tx_node) {
165 		of_node_put(rx_node);
166 		return ret;
167 	}
168 
169 	/* Processing RX queues common config */
170 	if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
171 				 &plat->rx_queues_to_use))
172 		plat->rx_queues_to_use = 1;
173 
174 	if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
175 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
176 	else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
177 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
178 	else
179 		plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
180 
181 	/* Processing individual RX queue config */
182 	for_each_child_of_node(rx_node, q_node) {
183 		if (queue >= plat->rx_queues_to_use)
184 			break;
185 
186 		if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
187 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
188 		else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
189 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
190 		else
191 			plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
192 
193 		if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
194 					 &plat->rx_queues_cfg[queue].chan))
195 			plat->rx_queues_cfg[queue].chan = queue;
196 		/* TODO: Dynamic mapping to be included in the future */
197 
198 		if (of_property_read_u32(q_node, "snps,priority",
199 					&plat->rx_queues_cfg[queue].prio)) {
200 			plat->rx_queues_cfg[queue].prio = 0;
201 			plat->rx_queues_cfg[queue].use_prio = false;
202 		} else {
203 			plat->rx_queues_cfg[queue].use_prio = true;
204 		}
205 
206 		/* RX queue specific packet type routing */
207 		if (of_property_read_bool(q_node, "snps,route-avcp"))
208 			plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
209 		else if (of_property_read_bool(q_node, "snps,route-ptp"))
210 			plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
211 		else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
212 			plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
213 		else if (of_property_read_bool(q_node, "snps,route-up"))
214 			plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
215 		else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
216 			plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
217 		else
218 			plat->rx_queues_cfg[queue].pkt_route = 0x0;
219 
220 		queue++;
221 	}
222 	if (queue != plat->rx_queues_to_use) {
223 		ret = -EINVAL;
224 		dev_err(&pdev->dev, "Not all RX queues were configured\n");
225 		goto out;
226 	}
227 
228 	/* Processing TX queues common config */
229 	if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
230 				 &plat->tx_queues_to_use))
231 		plat->tx_queues_to_use = 1;
232 
233 	if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
234 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
235 	else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
236 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
237 	else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
238 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
239 	else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
240 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
241 	else
242 		plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
243 
244 	queue = 0;
245 
246 	/* Processing individual TX queue config */
247 	for_each_child_of_node(tx_node, q_node) {
248 		if (queue >= plat->tx_queues_to_use)
249 			break;
250 
251 		if (of_property_read_u32(q_node, "snps,weight",
252 					 &plat->tx_queues_cfg[queue].weight))
253 			plat->tx_queues_cfg[queue].weight = 0x10 + queue;
254 
255 		if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
256 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
257 		} else if (of_property_read_bool(q_node,
258 						 "snps,avb-algorithm")) {
259 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
260 
261 			/* Credit Base Shaper parameters used by AVB */
262 			if (of_property_read_u32(q_node, "snps,send_slope",
263 				&plat->tx_queues_cfg[queue].send_slope))
264 				plat->tx_queues_cfg[queue].send_slope = 0x0;
265 			if (of_property_read_u32(q_node, "snps,idle_slope",
266 				&plat->tx_queues_cfg[queue].idle_slope))
267 				plat->tx_queues_cfg[queue].idle_slope = 0x0;
268 			if (of_property_read_u32(q_node, "snps,high_credit",
269 				&plat->tx_queues_cfg[queue].high_credit))
270 				plat->tx_queues_cfg[queue].high_credit = 0x0;
271 			if (of_property_read_u32(q_node, "snps,low_credit",
272 				&plat->tx_queues_cfg[queue].low_credit))
273 				plat->tx_queues_cfg[queue].low_credit = 0x0;
274 		} else {
275 			plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
276 		}
277 
278 		if (of_property_read_u32(q_node, "snps,priority",
279 					&plat->tx_queues_cfg[queue].prio)) {
280 			plat->tx_queues_cfg[queue].prio = 0;
281 			plat->tx_queues_cfg[queue].use_prio = false;
282 		} else {
283 			plat->tx_queues_cfg[queue].use_prio = true;
284 		}
285 
286 		queue++;
287 	}
288 	if (queue != plat->tx_queues_to_use) {
289 		ret = -EINVAL;
290 		dev_err(&pdev->dev, "Not all TX queues were configured\n");
291 		goto out;
292 	}
293 
294 out:
295 	of_node_put(rx_node);
296 	of_node_put(tx_node);
297 	of_node_put(q_node);
298 
299 	return ret;
300 }
301 
302 /**
303  * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
304  * @plat: driver data platform structure
305  * @np: device tree node
306  * @dev: device pointer
307  * Description:
308  * The mdio bus will be allocated in case of a phy transceiver is on board;
309  * it will be NULL if the fixed-link is configured.
310  * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
311  * in any case (for DSA, mdio must be registered even if fixed-link).
312  * The table below sums the supported configurations:
313  *	-------------------------------
314  *	snps,phy-addr	|     Y
315  *	-------------------------------
316  *	phy-handle	|     Y
317  *	-------------------------------
318  *	fixed-link	|     N
319  *	-------------------------------
320  *	snps,dwmac-mdio	|
321  *	  even if	|     Y
322  *	fixed-link	|
323  *	-------------------------------
324  *
325  * It returns 0 in case of success otherwise -ENODEV.
326  */
327 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
328 			 struct device_node *np, struct device *dev)
329 {
330 	bool mdio = true;
331 	static const struct of_device_id need_mdio_ids[] = {
332 		{ .compatible = "snps,dwc-qos-ethernet-4.10" },
333 		{},
334 	};
335 
336 	if (of_match_node(need_mdio_ids, np)) {
337 		plat->mdio_node = of_get_child_by_name(np, "mdio");
338 	} else {
339 		/**
340 		 * If snps,dwmac-mdio is passed from DT, always register
341 		 * the MDIO
342 		 */
343 		for_each_child_of_node(np, plat->mdio_node) {
344 			if (of_device_is_compatible(plat->mdio_node,
345 						    "snps,dwmac-mdio"))
346 				break;
347 		}
348 	}
349 
350 	if (plat->mdio_node) {
351 		dev_dbg(dev, "Found MDIO subnode\n");
352 		mdio = true;
353 	}
354 
355 	if (mdio)
356 		plat->mdio_bus_data =
357 			devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
358 				     GFP_KERNEL);
359 	return 0;
360 }
361 
362 /**
363  * stmmac_probe_config_dt - parse device-tree driver parameters
364  * @pdev: platform_device structure
365  * @mac: MAC address to use
366  * Description:
367  * this function is to read the driver parameters from device-tree and
368  * set some private fields that will be used by the main at runtime.
369  */
370 struct plat_stmmacenet_data *
371 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
372 {
373 	struct device_node *np = pdev->dev.of_node;
374 	struct plat_stmmacenet_data *plat;
375 	struct stmmac_dma_cfg *dma_cfg;
376 	int rc;
377 
378 	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
379 	if (!plat)
380 		return ERR_PTR(-ENOMEM);
381 
382 	*mac = of_get_mac_address(np);
383 	plat->interface = of_get_phy_mode(np);
384 
385 	/* Some wrapper drivers still rely on phy_node. Let's save it while
386 	 * they are not converted to phylink. */
387 	plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
388 
389 	/* PHYLINK automatically parses the phy-handle property */
390 	plat->phylink_node = np;
391 
392 	/* Get max speed of operation from device tree */
393 	if (of_property_read_u32(np, "max-speed", &plat->max_speed))
394 		plat->max_speed = -1;
395 
396 	plat->bus_id = of_alias_get_id(np, "ethernet");
397 	if (plat->bus_id < 0)
398 		plat->bus_id = 0;
399 
400 	/* Default to phy auto-detection */
401 	plat->phy_addr = -1;
402 
403 	/* Default to get clk_csr from stmmac_clk_crs_set(),
404 	 * or get clk_csr from device tree.
405 	 */
406 	plat->clk_csr = -1;
407 	of_property_read_u32(np, "clk_csr", &plat->clk_csr);
408 
409 	/* "snps,phy-addr" is not a standard property. Mark it as deprecated
410 	 * and warn of its use. Remove this when phy node support is added.
411 	 */
412 	if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
413 		dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
414 
415 	/* To Configure PHY by using all device-tree supported properties */
416 	rc = stmmac_dt_phy(plat, np, &pdev->dev);
417 	if (rc)
418 		return ERR_PTR(rc);
419 
420 	of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
421 
422 	of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
423 
424 	plat->force_sf_dma_mode =
425 		of_property_read_bool(np, "snps,force_sf_dma_mode");
426 
427 	plat->en_tx_lpi_clockgating =
428 		of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
429 
430 	/* Set the maxmtu to a default of JUMBO_LEN in case the
431 	 * parameter is not present in the device tree.
432 	 */
433 	plat->maxmtu = JUMBO_LEN;
434 
435 	/* Set default value for multicast hash bins */
436 	plat->multicast_filter_bins = HASH_TABLE_SIZE;
437 
438 	/* Set default value for unicast filter entries */
439 	plat->unicast_filter_entries = 1;
440 
441 	/*
442 	 * Currently only the properties needed on SPEAr600
443 	 * are provided. All other properties should be added
444 	 * once needed on other platforms.
445 	 */
446 	if (of_device_is_compatible(np, "st,spear600-gmac") ||
447 		of_device_is_compatible(np, "snps,dwmac-3.50a") ||
448 		of_device_is_compatible(np, "snps,dwmac-3.70a") ||
449 		of_device_is_compatible(np, "snps,dwmac")) {
450 		/* Note that the max-frame-size parameter as defined in the
451 		 * ePAPR v1.1 spec is defined as max-frame-size, it's
452 		 * actually used as the IEEE definition of MAC Client
453 		 * data, or MTU. The ePAPR specification is confusing as
454 		 * the definition is max-frame-size, but usage examples
455 		 * are clearly MTUs
456 		 */
457 		of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
458 		of_property_read_u32(np, "snps,multicast-filter-bins",
459 				     &plat->multicast_filter_bins);
460 		of_property_read_u32(np, "snps,perfect-filter-entries",
461 				     &plat->unicast_filter_entries);
462 		plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
463 					       plat->unicast_filter_entries);
464 		plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
465 					      plat->multicast_filter_bins);
466 		plat->has_gmac = 1;
467 		plat->pmt = 1;
468 	}
469 
470 	if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
471 	    of_device_is_compatible(np, "snps,dwmac-4.10a") ||
472 	    of_device_is_compatible(np, "snps,dwmac-4.20a")) {
473 		plat->has_gmac4 = 1;
474 		plat->has_gmac = 0;
475 		plat->pmt = 1;
476 		plat->tso_en = of_property_read_bool(np, "snps,tso");
477 	}
478 
479 	if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
480 		of_device_is_compatible(np, "snps,dwmac-3.710")) {
481 		plat->enh_desc = 1;
482 		plat->bugged_jumbo = 1;
483 		plat->force_sf_dma_mode = 1;
484 	}
485 
486 	if (of_device_is_compatible(np, "snps,dwxgmac")) {
487 		plat->has_xgmac = 1;
488 		plat->pmt = 1;
489 		plat->tso_en = of_property_read_bool(np, "snps,tso");
490 	}
491 
492 	dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
493 			       GFP_KERNEL);
494 	if (!dma_cfg) {
495 		stmmac_remove_config_dt(pdev, plat);
496 		return ERR_PTR(-ENOMEM);
497 	}
498 	plat->dma_cfg = dma_cfg;
499 
500 	of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
501 	if (!dma_cfg->pbl)
502 		dma_cfg->pbl = DEFAULT_DMA_PBL;
503 	of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
504 	of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
505 	dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
506 
507 	dma_cfg->aal = of_property_read_bool(np, "snps,aal");
508 	dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
509 	dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
510 
511 	plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
512 	if (plat->force_thresh_dma_mode) {
513 		plat->force_sf_dma_mode = 0;
514 		pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
515 	}
516 
517 	of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
518 
519 	plat->axi = stmmac_axi_setup(pdev);
520 
521 	rc = stmmac_mtl_setup(pdev, plat);
522 	if (rc) {
523 		stmmac_remove_config_dt(pdev, plat);
524 		return ERR_PTR(rc);
525 	}
526 
527 	/* clock setup */
528 	plat->stmmac_clk = devm_clk_get(&pdev->dev,
529 					STMMAC_RESOURCE_NAME);
530 	if (IS_ERR(plat->stmmac_clk)) {
531 		dev_warn(&pdev->dev, "Cannot get CSR clock\n");
532 		plat->stmmac_clk = NULL;
533 	}
534 	clk_prepare_enable(plat->stmmac_clk);
535 
536 	plat->pclk = devm_clk_get(&pdev->dev, "pclk");
537 	if (IS_ERR(plat->pclk)) {
538 		if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
539 			goto error_pclk_get;
540 
541 		plat->pclk = NULL;
542 	}
543 	clk_prepare_enable(plat->pclk);
544 
545 	/* Fall-back to main clock in case of no PTP ref is passed */
546 	plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
547 	if (IS_ERR(plat->clk_ptp_ref)) {
548 		plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
549 		plat->clk_ptp_ref = NULL;
550 		dev_warn(&pdev->dev, "PTP uses main clock\n");
551 	} else {
552 		plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
553 		dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
554 	}
555 
556 	plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
557 						  STMMAC_RESOURCE_NAME);
558 	if (IS_ERR(plat->stmmac_rst)) {
559 		if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
560 			goto error_hw_init;
561 
562 		dev_info(&pdev->dev, "no reset control found\n");
563 		plat->stmmac_rst = NULL;
564 	}
565 
566 	return plat;
567 
568 error_hw_init:
569 	clk_disable_unprepare(plat->pclk);
570 error_pclk_get:
571 	clk_disable_unprepare(plat->stmmac_clk);
572 
573 	return ERR_PTR(-EPROBE_DEFER);
574 }
575 
576 /**
577  * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
578  * @pdev: platform_device structure
579  * @plat: driver data platform structure
580  *
581  * Release resources claimed by stmmac_probe_config_dt().
582  */
583 void stmmac_remove_config_dt(struct platform_device *pdev,
584 			     struct plat_stmmacenet_data *plat)
585 {
586 	of_node_put(plat->phy_node);
587 	of_node_put(plat->mdio_node);
588 }
589 #else
590 struct plat_stmmacenet_data *
591 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
592 {
593 	return ERR_PTR(-EINVAL);
594 }
595 
596 void stmmac_remove_config_dt(struct platform_device *pdev,
597 			     struct plat_stmmacenet_data *plat)
598 {
599 }
600 #endif /* CONFIG_OF */
601 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
602 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
603 
604 int stmmac_get_platform_resources(struct platform_device *pdev,
605 				  struct stmmac_resources *stmmac_res)
606 {
607 	struct resource *res;
608 
609 	memset(stmmac_res, 0, sizeof(*stmmac_res));
610 
611 	/* Get IRQ information early to have an ability to ask for deferred
612 	 * probe if needed before we went too far with resource allocation.
613 	 */
614 	stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
615 	if (stmmac_res->irq < 0) {
616 		if (stmmac_res->irq != -EPROBE_DEFER) {
617 			dev_err(&pdev->dev,
618 				"MAC IRQ configuration information not found\n");
619 		}
620 		return stmmac_res->irq;
621 	}
622 
623 	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
624 	 * The external wake up irq can be passed through the platform code
625 	 * named as "eth_wake_irq"
626 	 *
627 	 * In case the wake up interrupt is not passed from the platform
628 	 * so the driver will continue to use the mac irq (ndev->irq)
629 	 */
630 	stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
631 	if (stmmac_res->wol_irq < 0) {
632 		if (stmmac_res->wol_irq == -EPROBE_DEFER)
633 			return -EPROBE_DEFER;
634 		stmmac_res->wol_irq = stmmac_res->irq;
635 	}
636 
637 	stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
638 	if (stmmac_res->lpi_irq == -EPROBE_DEFER)
639 		return -EPROBE_DEFER;
640 
641 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
642 	stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
643 
644 	return PTR_ERR_OR_ZERO(stmmac_res->addr);
645 }
646 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
647 
648 /**
649  * stmmac_pltfr_remove
650  * @pdev: platform device pointer
651  * Description: this function calls the main to free the net resources
652  * and calls the platforms hook and release the resources (e.g. mem).
653  */
654 int stmmac_pltfr_remove(struct platform_device *pdev)
655 {
656 	struct net_device *ndev = platform_get_drvdata(pdev);
657 	struct stmmac_priv *priv = netdev_priv(ndev);
658 	struct plat_stmmacenet_data *plat = priv->plat;
659 	int ret = stmmac_dvr_remove(&pdev->dev);
660 
661 	if (plat->exit)
662 		plat->exit(pdev, plat->bsp_priv);
663 
664 	stmmac_remove_config_dt(pdev, plat);
665 
666 	return ret;
667 }
668 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
669 
670 #ifdef CONFIG_PM_SLEEP
671 /**
672  * stmmac_pltfr_suspend
673  * @dev: device pointer
674  * Description: this function is invoked when suspend the driver and it direcly
675  * call the main suspend function and then, if required, on some platform, it
676  * can call an exit helper.
677  */
678 static int stmmac_pltfr_suspend(struct device *dev)
679 {
680 	int ret;
681 	struct net_device *ndev = dev_get_drvdata(dev);
682 	struct stmmac_priv *priv = netdev_priv(ndev);
683 	struct platform_device *pdev = to_platform_device(dev);
684 
685 	ret = stmmac_suspend(dev);
686 	if (priv->plat->exit)
687 		priv->plat->exit(pdev, priv->plat->bsp_priv);
688 
689 	return ret;
690 }
691 
692 /**
693  * stmmac_pltfr_resume
694  * @dev: device pointer
695  * Description: this function is invoked when resume the driver before calling
696  * the main resume function, on some platforms, it can call own init helper
697  * if required.
698  */
699 static int stmmac_pltfr_resume(struct device *dev)
700 {
701 	struct net_device *ndev = dev_get_drvdata(dev);
702 	struct stmmac_priv *priv = netdev_priv(ndev);
703 	struct platform_device *pdev = to_platform_device(dev);
704 
705 	if (priv->plat->init)
706 		priv->plat->init(pdev, priv->plat->bsp_priv);
707 
708 	return stmmac_resume(dev);
709 }
710 #endif /* CONFIG_PM_SLEEP */
711 
712 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
713 				       stmmac_pltfr_resume);
714 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
715 
716 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
717 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
718 MODULE_LICENSE("GPL");
719