1 /******************************************************************************* 2 This contains the functions to handle the platform driver. 3 4 Copyright (C) 2007-2011 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 The full GNU General Public License is included in this distribution in 16 the file called "COPYING". 17 18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 19 *******************************************************************************/ 20 21 #include <linux/platform_device.h> 22 #include <linux/module.h> 23 #include <linux/io.h> 24 #include <linux/of.h> 25 #include <linux/of_net.h> 26 #include <linux/of_device.h> 27 #include <linux/of_mdio.h> 28 29 #include "stmmac.h" 30 #include "stmmac_platform.h" 31 32 #ifdef CONFIG_OF 33 34 /** 35 * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins 36 * @mcast_bins: Multicast filtering bins 37 * Description: 38 * this function validates the number of Multicast filtering bins specified 39 * by the configuration through the device tree. The Synopsys GMAC supports 40 * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC 41 * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds 42 * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is 43 * invalid and will cause the filtering algorithm to use Multicast 44 * promiscuous mode. 45 */ 46 static int dwmac1000_validate_mcast_bins(int mcast_bins) 47 { 48 int x = mcast_bins; 49 50 switch (x) { 51 case HASH_TABLE_SIZE: 52 case 128: 53 case 256: 54 break; 55 default: 56 x = 0; 57 pr_info("Hash table entries set to unexpected value %d", 58 mcast_bins); 59 break; 60 } 61 return x; 62 } 63 64 /** 65 * dwmac1000_validate_ucast_entries - validate the Unicast address entries 66 * @ucast_entries: number of Unicast address entries 67 * Description: 68 * This function validates the number of Unicast address entries supported 69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 70 * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter 71 * logic. This function validates a valid, supported configuration is 72 * selected, and defaults to 1 Unicast address if an unsupported 73 * configuration is selected. 74 */ 75 static int dwmac1000_validate_ucast_entries(int ucast_entries) 76 { 77 int x = ucast_entries; 78 79 switch (x) { 80 case 1 ... 32: 81 case 64: 82 case 128: 83 break; 84 default: 85 x = 1; 86 pr_info("Unicast table entries set to unexpected value %d\n", 87 ucast_entries); 88 break; 89 } 90 return x; 91 } 92 93 /** 94 * stmmac_axi_setup - parse DT parameters for programming the AXI register 95 * @pdev: platform device 96 * Description: 97 * if required, from device-tree the AXI internal register can be tuned 98 * by using platform parameters. 99 */ 100 static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) 101 { 102 struct device_node *np; 103 struct stmmac_axi *axi; 104 105 np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); 106 if (!np) 107 return NULL; 108 109 axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL); 110 if (!axi) { 111 of_node_put(np); 112 return ERR_PTR(-ENOMEM); 113 } 114 115 axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); 116 axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); 117 axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); 118 axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); 119 axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); 120 axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); 121 122 if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) 123 axi->axi_wr_osr_lmt = 1; 124 if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt)) 125 axi->axi_rd_osr_lmt = 1; 126 of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); 127 of_node_put(np); 128 129 return axi; 130 } 131 132 /** 133 * stmmac_mtl_setup - parse DT parameters for multiple queues configuration 134 * @pdev: platform device 135 */ 136 static int stmmac_mtl_setup(struct platform_device *pdev, 137 struct plat_stmmacenet_data *plat) 138 { 139 struct device_node *q_node; 140 struct device_node *rx_node; 141 struct device_node *tx_node; 142 u8 queue = 0; 143 int ret = 0; 144 145 /* For backwards-compatibility with device trees that don't have any 146 * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back 147 * to one RX and TX queues each. 148 */ 149 plat->rx_queues_to_use = 1; 150 plat->tx_queues_to_use = 1; 151 152 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need 153 * to always set this, otherwise Queue will be classified as AVB 154 * (because MTL_QUEUE_AVB = 0). 155 */ 156 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 157 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; 158 159 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 160 if (!rx_node) 161 return ret; 162 163 tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0); 164 if (!tx_node) { 165 of_node_put(rx_node); 166 return ret; 167 } 168 169 /* Processing RX queues common config */ 170 if (of_property_read_u32(rx_node, "snps,rx-queues-to-use", 171 &plat->rx_queues_to_use)) 172 plat->rx_queues_to_use = 1; 173 174 if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) 175 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 176 else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp")) 177 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP; 178 else 179 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP; 180 181 /* Processing individual RX queue config */ 182 for_each_child_of_node(rx_node, q_node) { 183 if (queue >= plat->rx_queues_to_use) 184 break; 185 186 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) 187 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 188 else if (of_property_read_bool(q_node, "snps,avb-algorithm")) 189 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 190 else 191 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 192 193 if (of_property_read_u32(q_node, "snps,map-to-dma-channel", 194 &plat->rx_queues_cfg[queue].chan)) 195 plat->rx_queues_cfg[queue].chan = queue; 196 /* TODO: Dynamic mapping to be included in the future */ 197 198 if (of_property_read_u32(q_node, "snps,priority", 199 &plat->rx_queues_cfg[queue].prio)) { 200 plat->rx_queues_cfg[queue].prio = 0; 201 plat->rx_queues_cfg[queue].use_prio = false; 202 } else { 203 plat->rx_queues_cfg[queue].use_prio = true; 204 } 205 206 /* RX queue specific packet type routing */ 207 if (of_property_read_bool(q_node, "snps,route-avcp")) 208 plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ; 209 else if (of_property_read_bool(q_node, "snps,route-ptp")) 210 plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ; 211 else if (of_property_read_bool(q_node, "snps,route-dcbcp")) 212 plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ; 213 else if (of_property_read_bool(q_node, "snps,route-up")) 214 plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ; 215 else if (of_property_read_bool(q_node, "snps,route-multi-broad")) 216 plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ; 217 else 218 plat->rx_queues_cfg[queue].pkt_route = 0x0; 219 220 queue++; 221 } 222 if (queue != plat->rx_queues_to_use) { 223 ret = -EINVAL; 224 dev_err(&pdev->dev, "Not all RX queues were configured\n"); 225 goto out; 226 } 227 228 /* Processing TX queues common config */ 229 if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", 230 &plat->tx_queues_to_use)) 231 plat->tx_queues_to_use = 1; 232 233 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) 234 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR; 235 else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq")) 236 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ; 237 else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr")) 238 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR; 239 else if (of_property_read_bool(tx_node, "snps,tx-sched-sp")) 240 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 241 else 242 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP; 243 244 queue = 0; 245 246 /* Processing individual TX queue config */ 247 for_each_child_of_node(tx_node, q_node) { 248 if (queue >= plat->tx_queues_to_use) 249 break; 250 251 if (of_property_read_u32(q_node, "snps,weight", 252 &plat->tx_queues_cfg[queue].weight)) 253 plat->tx_queues_cfg[queue].weight = 0x10 + queue; 254 255 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) { 256 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 257 } else if (of_property_read_bool(q_node, 258 "snps,avb-algorithm")) { 259 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; 260 261 /* Credit Base Shaper parameters used by AVB */ 262 if (of_property_read_u32(q_node, "snps,send_slope", 263 &plat->tx_queues_cfg[queue].send_slope)) 264 plat->tx_queues_cfg[queue].send_slope = 0x0; 265 if (of_property_read_u32(q_node, "snps,idle_slope", 266 &plat->tx_queues_cfg[queue].idle_slope)) 267 plat->tx_queues_cfg[queue].idle_slope = 0x0; 268 if (of_property_read_u32(q_node, "snps,high_credit", 269 &plat->tx_queues_cfg[queue].high_credit)) 270 plat->tx_queues_cfg[queue].high_credit = 0x0; 271 if (of_property_read_u32(q_node, "snps,low_credit", 272 &plat->tx_queues_cfg[queue].low_credit)) 273 plat->tx_queues_cfg[queue].low_credit = 0x0; 274 } else { 275 plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 276 } 277 278 if (of_property_read_u32(q_node, "snps,priority", 279 &plat->tx_queues_cfg[queue].prio)) { 280 plat->tx_queues_cfg[queue].prio = 0; 281 plat->tx_queues_cfg[queue].use_prio = false; 282 } else { 283 plat->tx_queues_cfg[queue].use_prio = true; 284 } 285 286 queue++; 287 } 288 if (queue != plat->tx_queues_to_use) { 289 ret = -EINVAL; 290 dev_err(&pdev->dev, "Not all TX queues were configured\n"); 291 goto out; 292 } 293 294 out: 295 of_node_put(rx_node); 296 of_node_put(tx_node); 297 of_node_put(q_node); 298 299 return ret; 300 } 301 302 /** 303 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources 304 * @plat: driver data platform structure 305 * @np: device tree node 306 * @dev: device pointer 307 * Description: 308 * The mdio bus will be allocated in case of a phy transceiver is on board; 309 * it will be NULL if the fixed-link is configured. 310 * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated 311 * in any case (for DSA, mdio must be registered even if fixed-link). 312 * The table below sums the supported configurations: 313 * ------------------------------- 314 * snps,phy-addr | Y 315 * ------------------------------- 316 * phy-handle | Y 317 * ------------------------------- 318 * fixed-link | N 319 * ------------------------------- 320 * snps,dwmac-mdio | 321 * even if | Y 322 * fixed-link | 323 * ------------------------------- 324 * 325 * It returns 0 in case of success otherwise -ENODEV. 326 */ 327 static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, 328 struct device_node *np, struct device *dev) 329 { 330 bool mdio = true; 331 static const struct of_device_id need_mdio_ids[] = { 332 { .compatible = "snps,dwc-qos-ethernet-4.10" }, 333 {}, 334 }; 335 336 /* If phy-handle property is passed from DT, use it as the PHY */ 337 plat->phy_node = of_parse_phandle(np, "phy-handle", 0); 338 if (plat->phy_node) 339 dev_dbg(dev, "Found phy-handle subnode\n"); 340 341 /* If phy-handle is not specified, check if we have a fixed-phy */ 342 if (!plat->phy_node && of_phy_is_fixed_link(np)) { 343 if ((of_phy_register_fixed_link(np) < 0)) 344 return -ENODEV; 345 346 dev_dbg(dev, "Found fixed-link subnode\n"); 347 plat->phy_node = of_node_get(np); 348 mdio = false; 349 } 350 351 if (of_match_node(need_mdio_ids, np)) { 352 plat->mdio_node = of_get_child_by_name(np, "mdio"); 353 } else { 354 /** 355 * If snps,dwmac-mdio is passed from DT, always register 356 * the MDIO 357 */ 358 for_each_child_of_node(np, plat->mdio_node) { 359 if (of_device_is_compatible(plat->mdio_node, 360 "snps,dwmac-mdio")) 361 break; 362 } 363 } 364 365 if (plat->mdio_node) { 366 dev_dbg(dev, "Found MDIO subnode\n"); 367 mdio = true; 368 } 369 370 if (mdio) 371 plat->mdio_bus_data = 372 devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), 373 GFP_KERNEL); 374 return 0; 375 } 376 377 /** 378 * stmmac_probe_config_dt - parse device-tree driver parameters 379 * @pdev: platform_device structure 380 * @mac: MAC address to use 381 * Description: 382 * this function is to read the driver parameters from device-tree and 383 * set some private fields that will be used by the main at runtime. 384 */ 385 struct plat_stmmacenet_data * 386 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 387 { 388 struct device_node *np = pdev->dev.of_node; 389 struct plat_stmmacenet_data *plat; 390 struct stmmac_dma_cfg *dma_cfg; 391 int rc; 392 393 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 394 if (!plat) 395 return ERR_PTR(-ENOMEM); 396 397 *mac = of_get_mac_address(np); 398 plat->interface = of_get_phy_mode(np); 399 400 /* Get max speed of operation from device tree */ 401 if (of_property_read_u32(np, "max-speed", &plat->max_speed)) 402 plat->max_speed = -1; 403 404 plat->bus_id = of_alias_get_id(np, "ethernet"); 405 if (plat->bus_id < 0) 406 plat->bus_id = 0; 407 408 /* Default to phy auto-detection */ 409 plat->phy_addr = -1; 410 411 /* Default to get clk_csr from stmmac_clk_crs_set(), 412 * or get clk_csr from device tree. 413 */ 414 plat->clk_csr = -1; 415 of_property_read_u32(np, "clk_csr", &plat->clk_csr); 416 417 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 418 * and warn of its use. Remove this when phy node support is added. 419 */ 420 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 421 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 422 423 /* To Configure PHY by using all device-tree supported properties */ 424 rc = stmmac_dt_phy(plat, np, &pdev->dev); 425 if (rc) 426 return ERR_PTR(rc); 427 428 of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); 429 430 of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size); 431 432 plat->force_sf_dma_mode = 433 of_property_read_bool(np, "snps,force_sf_dma_mode"); 434 435 plat->en_tx_lpi_clockgating = 436 of_property_read_bool(np, "snps,en-tx-lpi-clockgating"); 437 438 /* Set the maxmtu to a default of JUMBO_LEN in case the 439 * parameter is not present in the device tree. 440 */ 441 plat->maxmtu = JUMBO_LEN; 442 443 /* Set default value for multicast hash bins */ 444 plat->multicast_filter_bins = HASH_TABLE_SIZE; 445 446 /* Set default value for unicast filter entries */ 447 plat->unicast_filter_entries = 1; 448 449 /* 450 * Currently only the properties needed on SPEAr600 451 * are provided. All other properties should be added 452 * once needed on other platforms. 453 */ 454 if (of_device_is_compatible(np, "st,spear600-gmac") || 455 of_device_is_compatible(np, "snps,dwmac-3.50a") || 456 of_device_is_compatible(np, "snps,dwmac-3.70a") || 457 of_device_is_compatible(np, "snps,dwmac")) { 458 /* Note that the max-frame-size parameter as defined in the 459 * ePAPR v1.1 spec is defined as max-frame-size, it's 460 * actually used as the IEEE definition of MAC Client 461 * data, or MTU. The ePAPR specification is confusing as 462 * the definition is max-frame-size, but usage examples 463 * are clearly MTUs 464 */ 465 of_property_read_u32(np, "max-frame-size", &plat->maxmtu); 466 of_property_read_u32(np, "snps,multicast-filter-bins", 467 &plat->multicast_filter_bins); 468 of_property_read_u32(np, "snps,perfect-filter-entries", 469 &plat->unicast_filter_entries); 470 plat->unicast_filter_entries = dwmac1000_validate_ucast_entries( 471 plat->unicast_filter_entries); 472 plat->multicast_filter_bins = dwmac1000_validate_mcast_bins( 473 plat->multicast_filter_bins); 474 plat->has_gmac = 1; 475 plat->pmt = 1; 476 } 477 478 if (of_device_is_compatible(np, "snps,dwmac-4.00") || 479 of_device_is_compatible(np, "snps,dwmac-4.10a") || 480 of_device_is_compatible(np, "snps,dwmac-4.20a")) { 481 plat->has_gmac4 = 1; 482 plat->has_gmac = 0; 483 plat->pmt = 1; 484 plat->tso_en = of_property_read_bool(np, "snps,tso"); 485 } 486 487 if (of_device_is_compatible(np, "snps,dwmac-3.610") || 488 of_device_is_compatible(np, "snps,dwmac-3.710")) { 489 plat->enh_desc = 1; 490 plat->bugged_jumbo = 1; 491 plat->force_sf_dma_mode = 1; 492 } 493 494 if (of_device_is_compatible(np, "snps,dwxgmac")) { 495 plat->has_xgmac = 1; 496 plat->pmt = 1; 497 plat->tso_en = of_property_read_bool(np, "snps,tso"); 498 } 499 500 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 501 GFP_KERNEL); 502 if (!dma_cfg) { 503 stmmac_remove_config_dt(pdev, plat); 504 return ERR_PTR(-ENOMEM); 505 } 506 plat->dma_cfg = dma_cfg; 507 508 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 509 if (!dma_cfg->pbl) 510 dma_cfg->pbl = DEFAULT_DMA_PBL; 511 of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl); 512 of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl); 513 dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8"); 514 515 dma_cfg->aal = of_property_read_bool(np, "snps,aal"); 516 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 517 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 518 519 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); 520 if (plat->force_thresh_dma_mode) { 521 plat->force_sf_dma_mode = 0; 522 pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); 523 } 524 525 of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); 526 527 plat->axi = stmmac_axi_setup(pdev); 528 529 rc = stmmac_mtl_setup(pdev, plat); 530 if (rc) { 531 stmmac_remove_config_dt(pdev, plat); 532 return ERR_PTR(rc); 533 } 534 535 /* clock setup */ 536 plat->stmmac_clk = devm_clk_get(&pdev->dev, 537 STMMAC_RESOURCE_NAME); 538 if (IS_ERR(plat->stmmac_clk)) { 539 dev_warn(&pdev->dev, "Cannot get CSR clock\n"); 540 plat->stmmac_clk = NULL; 541 } 542 clk_prepare_enable(plat->stmmac_clk); 543 544 plat->pclk = devm_clk_get(&pdev->dev, "pclk"); 545 if (IS_ERR(plat->pclk)) { 546 if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) 547 goto error_pclk_get; 548 549 plat->pclk = NULL; 550 } 551 clk_prepare_enable(plat->pclk); 552 553 /* Fall-back to main clock in case of no PTP ref is passed */ 554 plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref"); 555 if (IS_ERR(plat->clk_ptp_ref)) { 556 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk); 557 plat->clk_ptp_ref = NULL; 558 dev_warn(&pdev->dev, "PTP uses main clock\n"); 559 } else { 560 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref); 561 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); 562 } 563 564 plat->stmmac_rst = devm_reset_control_get(&pdev->dev, 565 STMMAC_RESOURCE_NAME); 566 if (IS_ERR(plat->stmmac_rst)) { 567 if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) 568 goto error_hw_init; 569 570 dev_info(&pdev->dev, "no reset control found\n"); 571 plat->stmmac_rst = NULL; 572 } 573 574 return plat; 575 576 error_hw_init: 577 clk_disable_unprepare(plat->pclk); 578 error_pclk_get: 579 clk_disable_unprepare(plat->stmmac_clk); 580 581 return ERR_PTR(-EPROBE_DEFER); 582 } 583 584 /** 585 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() 586 * @pdev: platform_device structure 587 * @plat: driver data platform structure 588 * 589 * Release resources claimed by stmmac_probe_config_dt(). 590 */ 591 void stmmac_remove_config_dt(struct platform_device *pdev, 592 struct plat_stmmacenet_data *plat) 593 { 594 struct device_node *np = pdev->dev.of_node; 595 596 if (of_phy_is_fixed_link(np)) 597 of_phy_deregister_fixed_link(np); 598 of_node_put(plat->phy_node); 599 of_node_put(plat->mdio_node); 600 } 601 #else 602 struct plat_stmmacenet_data * 603 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 604 { 605 return ERR_PTR(-EINVAL); 606 } 607 608 void stmmac_remove_config_dt(struct platform_device *pdev, 609 struct plat_stmmacenet_data *plat) 610 { 611 } 612 #endif /* CONFIG_OF */ 613 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); 614 EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); 615 616 int stmmac_get_platform_resources(struct platform_device *pdev, 617 struct stmmac_resources *stmmac_res) 618 { 619 struct resource *res; 620 621 memset(stmmac_res, 0, sizeof(*stmmac_res)); 622 623 /* Get IRQ information early to have an ability to ask for deferred 624 * probe if needed before we went too far with resource allocation. 625 */ 626 stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); 627 if (stmmac_res->irq < 0) { 628 if (stmmac_res->irq != -EPROBE_DEFER) { 629 dev_err(&pdev->dev, 630 "MAC IRQ configuration information not found\n"); 631 } 632 return stmmac_res->irq; 633 } 634 635 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq 636 * The external wake up irq can be passed through the platform code 637 * named as "eth_wake_irq" 638 * 639 * In case the wake up interrupt is not passed from the platform 640 * so the driver will continue to use the mac irq (ndev->irq) 641 */ 642 stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); 643 if (stmmac_res->wol_irq < 0) { 644 if (stmmac_res->wol_irq == -EPROBE_DEFER) 645 return -EPROBE_DEFER; 646 stmmac_res->wol_irq = stmmac_res->irq; 647 } 648 649 stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); 650 if (stmmac_res->lpi_irq == -EPROBE_DEFER) 651 return -EPROBE_DEFER; 652 653 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 654 stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); 655 656 return PTR_ERR_OR_ZERO(stmmac_res->addr); 657 } 658 EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); 659 660 /** 661 * stmmac_pltfr_remove 662 * @pdev: platform device pointer 663 * Description: this function calls the main to free the net resources 664 * and calls the platforms hook and release the resources (e.g. mem). 665 */ 666 int stmmac_pltfr_remove(struct platform_device *pdev) 667 { 668 struct net_device *ndev = platform_get_drvdata(pdev); 669 struct stmmac_priv *priv = netdev_priv(ndev); 670 struct plat_stmmacenet_data *plat = priv->plat; 671 int ret = stmmac_dvr_remove(&pdev->dev); 672 673 if (plat->exit) 674 plat->exit(pdev, plat->bsp_priv); 675 676 stmmac_remove_config_dt(pdev, plat); 677 678 return ret; 679 } 680 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove); 681 682 #ifdef CONFIG_PM_SLEEP 683 /** 684 * stmmac_pltfr_suspend 685 * @dev: device pointer 686 * Description: this function is invoked when suspend the driver and it direcly 687 * call the main suspend function and then, if required, on some platform, it 688 * can call an exit helper. 689 */ 690 static int stmmac_pltfr_suspend(struct device *dev) 691 { 692 int ret; 693 struct net_device *ndev = dev_get_drvdata(dev); 694 struct stmmac_priv *priv = netdev_priv(ndev); 695 struct platform_device *pdev = to_platform_device(dev); 696 697 ret = stmmac_suspend(dev); 698 if (priv->plat->exit) 699 priv->plat->exit(pdev, priv->plat->bsp_priv); 700 701 return ret; 702 } 703 704 /** 705 * stmmac_pltfr_resume 706 * @dev: device pointer 707 * Description: this function is invoked when resume the driver before calling 708 * the main resume function, on some platforms, it can call own init helper 709 * if required. 710 */ 711 static int stmmac_pltfr_resume(struct device *dev) 712 { 713 struct net_device *ndev = dev_get_drvdata(dev); 714 struct stmmac_priv *priv = netdev_priv(ndev); 715 struct platform_device *pdev = to_platform_device(dev); 716 717 if (priv->plat->init) 718 priv->plat->init(pdev, priv->plat->bsp_priv); 719 720 return stmmac_resume(dev); 721 } 722 #endif /* CONFIG_PM_SLEEP */ 723 724 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 725 stmmac_pltfr_resume); 726 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 727 728 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); 729 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 730 MODULE_LICENSE("GPL"); 731