1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Copyright (c) 2014 Advanced Micro Devices, Inc. 60 * All rights reserved. 61 * 62 * Redistribution and use in source and binary forms, with or without 63 * modification, are permitted provided that the following conditions are met: 64 * * Redistributions of source code must retain the above copyright 65 * notice, this list of conditions and the following disclaimer. 66 * * Redistributions in binary form must reproduce the above copyright 67 * notice, this list of conditions and the following disclaimer in the 68 * documentation and/or other materials provided with the distribution. 69 * * Neither the name of Advanced Micro Devices, Inc. nor the 70 * names of its contributors may be used to endorse or promote products 71 * derived from this software without specific prior written permission. 72 * 73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 83 * 84 * This file incorporates work covered by the following copyright and 85 * permission notice: 86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 88 * Inc. unless otherwise expressly agreed to in writing between Synopsys 89 * and you. 90 * 91 * The Software IS NOT an item of Licensed Software or Licensed Product 92 * under any End User Software License Agreement or Agreement for Licensed 93 * Product with Synopsys or any supplement thereto. Permission is hereby 94 * granted, free of charge, to any person obtaining a copy of this software 95 * annotated with this license and the Software, to deal in the Software 96 * without restriction, including without limitation the rights to use, 97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 98 * of the Software, and to permit persons to whom the Software is furnished 99 * to do so, subject to the following conditions: 100 * 101 * The above copyright notice and this permission notice shall be included 102 * in all copies or substantial portions of the Software. 103 * 104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 114 * THE POSSIBILITY OF SUCH DAMAGE. 115 */ 116 117 #include <linux/module.h> 118 #include <linux/device.h> 119 #include <linux/platform_device.h> 120 #include <linux/spinlock.h> 121 #include <linux/netdevice.h> 122 #include <linux/etherdevice.h> 123 #include <linux/io.h> 124 #include <linux/of.h> 125 #include <linux/of_net.h> 126 #include <linux/of_address.h> 127 #include <linux/of_platform.h> 128 #include <linux/clk.h> 129 #include <linux/property.h> 130 #include <linux/acpi.h> 131 #include <linux/mdio.h> 132 133 #include "xgbe.h" 134 #include "xgbe-common.h" 135 136 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 137 MODULE_LICENSE("Dual BSD/GPL"); 138 MODULE_VERSION(XGBE_DRV_VERSION); 139 MODULE_DESCRIPTION(XGBE_DRV_DESC); 140 141 static int debug = -1; 142 module_param(debug, int, S_IWUSR | S_IRUGO); 143 MODULE_PARM_DESC(debug, " Network interface message level setting"); 144 145 static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN | 146 NETIF_MSG_IFUP); 147 148 static const u32 xgbe_serdes_blwc[] = { 149 XGBE_SPEED_1000_BLWC, 150 XGBE_SPEED_2500_BLWC, 151 XGBE_SPEED_10000_BLWC, 152 }; 153 154 static const u32 xgbe_serdes_cdr_rate[] = { 155 XGBE_SPEED_1000_CDR, 156 XGBE_SPEED_2500_CDR, 157 XGBE_SPEED_10000_CDR, 158 }; 159 160 static const u32 xgbe_serdes_pq_skew[] = { 161 XGBE_SPEED_1000_PQ, 162 XGBE_SPEED_2500_PQ, 163 XGBE_SPEED_10000_PQ, 164 }; 165 166 static const u32 xgbe_serdes_tx_amp[] = { 167 XGBE_SPEED_1000_TXAMP, 168 XGBE_SPEED_2500_TXAMP, 169 XGBE_SPEED_10000_TXAMP, 170 }; 171 172 static const u32 xgbe_serdes_dfe_tap_cfg[] = { 173 XGBE_SPEED_1000_DFE_TAP_CONFIG, 174 XGBE_SPEED_2500_DFE_TAP_CONFIG, 175 XGBE_SPEED_10000_DFE_TAP_CONFIG, 176 }; 177 178 static const u32 xgbe_serdes_dfe_tap_ena[] = { 179 XGBE_SPEED_1000_DFE_TAP_ENABLE, 180 XGBE_SPEED_2500_DFE_TAP_ENABLE, 181 XGBE_SPEED_10000_DFE_TAP_ENABLE, 182 }; 183 184 static void xgbe_default_config(struct xgbe_prv_data *pdata) 185 { 186 DBGPR("-->xgbe_default_config\n"); 187 188 pdata->pblx8 = DMA_PBL_X8_ENABLE; 189 pdata->tx_sf_mode = MTL_TSF_ENABLE; 190 pdata->tx_threshold = MTL_TX_THRESHOLD_64; 191 pdata->tx_pbl = DMA_PBL_16; 192 pdata->tx_osp_mode = DMA_OSP_ENABLE; 193 pdata->rx_sf_mode = MTL_RSF_DISABLE; 194 pdata->rx_threshold = MTL_RX_THRESHOLD_64; 195 pdata->rx_pbl = DMA_PBL_16; 196 pdata->pause_autoneg = 1; 197 pdata->tx_pause = 1; 198 pdata->rx_pause = 1; 199 pdata->phy_speed = SPEED_UNKNOWN; 200 pdata->power_down = 0; 201 202 DBGPR("<--xgbe_default_config\n"); 203 } 204 205 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) 206 { 207 xgbe_init_function_ptrs_dev(&pdata->hw_if); 208 xgbe_init_function_ptrs_phy(&pdata->phy_if); 209 xgbe_init_function_ptrs_desc(&pdata->desc_if); 210 } 211 212 #ifdef CONFIG_ACPI 213 static int xgbe_acpi_support(struct xgbe_prv_data *pdata) 214 { 215 struct device *dev = pdata->dev; 216 u32 property; 217 int ret; 218 219 /* Obtain the system clock setting */ 220 ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property); 221 if (ret) { 222 dev_err(dev, "unable to obtain %s property\n", 223 XGBE_ACPI_DMA_FREQ); 224 return ret; 225 } 226 pdata->sysclk_rate = property; 227 228 /* Obtain the PTP clock setting */ 229 ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property); 230 if (ret) { 231 dev_err(dev, "unable to obtain %s property\n", 232 XGBE_ACPI_PTP_FREQ); 233 return ret; 234 } 235 pdata->ptpclk_rate = property; 236 237 return 0; 238 } 239 #else /* CONFIG_ACPI */ 240 static int xgbe_acpi_support(struct xgbe_prv_data *pdata) 241 { 242 return -EINVAL; 243 } 244 #endif /* CONFIG_ACPI */ 245 246 #ifdef CONFIG_OF 247 static int xgbe_of_support(struct xgbe_prv_data *pdata) 248 { 249 struct device *dev = pdata->dev; 250 251 /* Obtain the system clock setting */ 252 pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK); 253 if (IS_ERR(pdata->sysclk)) { 254 dev_err(dev, "dma devm_clk_get failed\n"); 255 return PTR_ERR(pdata->sysclk); 256 } 257 pdata->sysclk_rate = clk_get_rate(pdata->sysclk); 258 259 /* Obtain the PTP clock setting */ 260 pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK); 261 if (IS_ERR(pdata->ptpclk)) { 262 dev_err(dev, "ptp devm_clk_get failed\n"); 263 return PTR_ERR(pdata->ptpclk); 264 } 265 pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk); 266 267 return 0; 268 } 269 270 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata) 271 { 272 struct device *dev = pdata->dev; 273 struct device_node *phy_node; 274 struct platform_device *phy_pdev; 275 276 phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0); 277 if (phy_node) { 278 /* Old style device tree: 279 * The XGBE and PHY resources are separate 280 */ 281 phy_pdev = of_find_device_by_node(phy_node); 282 of_node_put(phy_node); 283 } else { 284 /* New style device tree: 285 * The XGBE and PHY resources are grouped together with 286 * the PHY resources listed last 287 */ 288 get_device(dev); 289 phy_pdev = pdata->pdev; 290 } 291 292 return phy_pdev; 293 } 294 #else /* CONFIG_OF */ 295 static int xgbe_of_support(struct xgbe_prv_data *pdata) 296 { 297 return -EINVAL; 298 } 299 300 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata) 301 { 302 return NULL; 303 } 304 #endif /* CONFIG_OF */ 305 306 static unsigned int xgbe_resource_count(struct platform_device *pdev, 307 unsigned int type) 308 { 309 unsigned int count; 310 int i; 311 312 for (i = 0, count = 0; i < pdev->num_resources; i++) { 313 struct resource *res = &pdev->resource[i]; 314 315 if (type == resource_type(res)) 316 count++; 317 } 318 319 return count; 320 } 321 322 static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata) 323 { 324 struct platform_device *phy_pdev; 325 326 if (pdata->use_acpi) { 327 get_device(pdata->dev); 328 phy_pdev = pdata->pdev; 329 } else { 330 phy_pdev = xgbe_of_get_phy_pdev(pdata); 331 } 332 333 return phy_pdev; 334 } 335 336 static int xgbe_probe(struct platform_device *pdev) 337 { 338 struct xgbe_prv_data *pdata; 339 struct net_device *netdev; 340 struct device *dev = &pdev->dev, *phy_dev; 341 struct platform_device *phy_pdev; 342 struct resource *res; 343 const char *phy_mode; 344 unsigned int i, phy_memnum, phy_irqnum; 345 int ret; 346 347 DBGPR("--> xgbe_probe\n"); 348 349 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data), 350 XGBE_MAX_DMA_CHANNELS); 351 if (!netdev) { 352 dev_err(dev, "alloc_etherdev failed\n"); 353 ret = -ENOMEM; 354 goto err_alloc; 355 } 356 SET_NETDEV_DEV(netdev, dev); 357 pdata = netdev_priv(netdev); 358 pdata->netdev = netdev; 359 pdata->pdev = pdev; 360 pdata->adev = ACPI_COMPANION(dev); 361 pdata->dev = dev; 362 platform_set_drvdata(pdev, netdev); 363 364 spin_lock_init(&pdata->lock); 365 mutex_init(&pdata->xpcs_mutex); 366 mutex_init(&pdata->rss_mutex); 367 spin_lock_init(&pdata->tstamp_lock); 368 369 pdata->msg_enable = netif_msg_init(debug, default_msg_level); 370 371 set_bit(XGBE_DOWN, &pdata->dev_state); 372 373 /* Check if we should use ACPI or DT */ 374 pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1; 375 376 phy_pdev = xgbe_get_phy_pdev(pdata); 377 if (!phy_pdev) { 378 dev_err(dev, "unable to obtain phy device\n"); 379 ret = -EINVAL; 380 goto err_phydev; 381 } 382 phy_dev = &phy_pdev->dev; 383 384 if (pdev == phy_pdev) { 385 /* New style device tree or ACPI: 386 * The XGBE and PHY resources are grouped together with 387 * the PHY resources listed last 388 */ 389 phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3; 390 phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1; 391 } else { 392 /* Old style device tree: 393 * The XGBE and PHY resources are separate 394 */ 395 phy_memnum = 0; 396 phy_irqnum = 0; 397 } 398 399 /* Set and validate the number of descriptors for a ring */ 400 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT); 401 pdata->tx_desc_count = XGBE_TX_DESC_CNT; 402 if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { 403 dev_err(dev, "tx descriptor count (%d) is not valid\n", 404 pdata->tx_desc_count); 405 ret = -EINVAL; 406 goto err_io; 407 } 408 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT); 409 pdata->rx_desc_count = XGBE_RX_DESC_CNT; 410 if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { 411 dev_err(dev, "rx descriptor count (%d) is not valid\n", 412 pdata->rx_desc_count); 413 ret = -EINVAL; 414 goto err_io; 415 } 416 417 /* Obtain the mmio areas for the device */ 418 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 419 pdata->xgmac_regs = devm_ioremap_resource(dev, res); 420 if (IS_ERR(pdata->xgmac_regs)) { 421 dev_err(dev, "xgmac ioremap failed\n"); 422 ret = PTR_ERR(pdata->xgmac_regs); 423 goto err_io; 424 } 425 if (netif_msg_probe(pdata)) 426 dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs); 427 428 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 429 pdata->xpcs_regs = devm_ioremap_resource(dev, res); 430 if (IS_ERR(pdata->xpcs_regs)) { 431 dev_err(dev, "xpcs ioremap failed\n"); 432 ret = PTR_ERR(pdata->xpcs_regs); 433 goto err_io; 434 } 435 if (netif_msg_probe(pdata)) 436 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); 437 438 res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); 439 pdata->rxtx_regs = devm_ioremap_resource(dev, res); 440 if (IS_ERR(pdata->rxtx_regs)) { 441 dev_err(dev, "rxtx ioremap failed\n"); 442 ret = PTR_ERR(pdata->rxtx_regs); 443 goto err_io; 444 } 445 if (netif_msg_probe(pdata)) 446 dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs); 447 448 res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); 449 pdata->sir0_regs = devm_ioremap_resource(dev, res); 450 if (IS_ERR(pdata->sir0_regs)) { 451 dev_err(dev, "sir0 ioremap failed\n"); 452 ret = PTR_ERR(pdata->sir0_regs); 453 goto err_io; 454 } 455 if (netif_msg_probe(pdata)) 456 dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs); 457 458 res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++); 459 pdata->sir1_regs = devm_ioremap_resource(dev, res); 460 if (IS_ERR(pdata->sir1_regs)) { 461 dev_err(dev, "sir1 ioremap failed\n"); 462 ret = PTR_ERR(pdata->sir1_regs); 463 goto err_io; 464 } 465 if (netif_msg_probe(pdata)) 466 dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs); 467 468 /* Retrieve the MAC address */ 469 ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY, 470 pdata->mac_addr, 471 sizeof(pdata->mac_addr)); 472 if (ret || !is_valid_ether_addr(pdata->mac_addr)) { 473 dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY); 474 if (!ret) 475 ret = -EINVAL; 476 goto err_io; 477 } 478 479 /* Retrieve the PHY mode - it must be "xgmii" */ 480 ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY, 481 &phy_mode); 482 if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) { 483 dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY); 484 if (!ret) 485 ret = -EINVAL; 486 goto err_io; 487 } 488 pdata->phy_mode = PHY_INTERFACE_MODE_XGMII; 489 490 /* Check for per channel interrupt support */ 491 if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) 492 pdata->per_channel_irq = 1; 493 494 /* Retrieve the PHY speedset */ 495 ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY, 496 &pdata->speed_set); 497 if (ret) { 498 dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); 499 goto err_io; 500 } 501 502 switch (pdata->speed_set) { 503 case XGBE_SPEEDSET_1000_10000: 504 case XGBE_SPEEDSET_2500_10000: 505 break; 506 default: 507 dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY); 508 ret = -EINVAL; 509 goto err_io; 510 } 511 512 /* Retrieve the PHY configuration properties */ 513 if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) { 514 ret = device_property_read_u32_array(phy_dev, 515 XGBE_BLWC_PROPERTY, 516 pdata->serdes_blwc, 517 XGBE_SPEEDS); 518 if (ret) { 519 dev_err(dev, "invalid %s property\n", 520 XGBE_BLWC_PROPERTY); 521 goto err_io; 522 } 523 } else { 524 memcpy(pdata->serdes_blwc, xgbe_serdes_blwc, 525 sizeof(pdata->serdes_blwc)); 526 } 527 528 if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) { 529 ret = device_property_read_u32_array(phy_dev, 530 XGBE_CDR_RATE_PROPERTY, 531 pdata->serdes_cdr_rate, 532 XGBE_SPEEDS); 533 if (ret) { 534 dev_err(dev, "invalid %s property\n", 535 XGBE_CDR_RATE_PROPERTY); 536 goto err_io; 537 } 538 } else { 539 memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate, 540 sizeof(pdata->serdes_cdr_rate)); 541 } 542 543 if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) { 544 ret = device_property_read_u32_array(phy_dev, 545 XGBE_PQ_SKEW_PROPERTY, 546 pdata->serdes_pq_skew, 547 XGBE_SPEEDS); 548 if (ret) { 549 dev_err(dev, "invalid %s property\n", 550 XGBE_PQ_SKEW_PROPERTY); 551 goto err_io; 552 } 553 } else { 554 memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew, 555 sizeof(pdata->serdes_pq_skew)); 556 } 557 558 if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) { 559 ret = device_property_read_u32_array(phy_dev, 560 XGBE_TX_AMP_PROPERTY, 561 pdata->serdes_tx_amp, 562 XGBE_SPEEDS); 563 if (ret) { 564 dev_err(dev, "invalid %s property\n", 565 XGBE_TX_AMP_PROPERTY); 566 goto err_io; 567 } 568 } else { 569 memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp, 570 sizeof(pdata->serdes_tx_amp)); 571 } 572 573 if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) { 574 ret = device_property_read_u32_array(phy_dev, 575 XGBE_DFE_CFG_PROPERTY, 576 pdata->serdes_dfe_tap_cfg, 577 XGBE_SPEEDS); 578 if (ret) { 579 dev_err(dev, "invalid %s property\n", 580 XGBE_DFE_CFG_PROPERTY); 581 goto err_io; 582 } 583 } else { 584 memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg, 585 sizeof(pdata->serdes_dfe_tap_cfg)); 586 } 587 588 if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) { 589 ret = device_property_read_u32_array(phy_dev, 590 XGBE_DFE_ENA_PROPERTY, 591 pdata->serdes_dfe_tap_ena, 592 XGBE_SPEEDS); 593 if (ret) { 594 dev_err(dev, "invalid %s property\n", 595 XGBE_DFE_ENA_PROPERTY); 596 goto err_io; 597 } 598 } else { 599 memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena, 600 sizeof(pdata->serdes_dfe_tap_ena)); 601 } 602 603 /* Obtain device settings unique to ACPI/OF */ 604 if (pdata->use_acpi) 605 ret = xgbe_acpi_support(pdata); 606 else 607 ret = xgbe_of_support(pdata); 608 if (ret) 609 goto err_io; 610 611 /* Set the DMA coherency values */ 612 pdata->coherent = device_dma_is_coherent(pdata->dev); 613 if (pdata->coherent) { 614 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN; 615 pdata->arcache = XGBE_DMA_OS_ARCACHE; 616 pdata->awcache = XGBE_DMA_OS_AWCACHE; 617 } else { 618 pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN; 619 pdata->arcache = XGBE_DMA_SYS_ARCACHE; 620 pdata->awcache = XGBE_DMA_SYS_AWCACHE; 621 } 622 623 /* Get the device interrupt */ 624 ret = platform_get_irq(pdev, 0); 625 if (ret < 0) { 626 dev_err(dev, "platform_get_irq 0 failed\n"); 627 goto err_io; 628 } 629 pdata->dev_irq = ret; 630 631 /* Get the auto-negotiation interrupt */ 632 ret = platform_get_irq(phy_pdev, phy_irqnum++); 633 if (ret < 0) { 634 dev_err(dev, "platform_get_irq phy 0 failed\n"); 635 goto err_io; 636 } 637 pdata->an_irq = ret; 638 639 netdev->irq = pdata->dev_irq; 640 netdev->base_addr = (unsigned long)pdata->xgmac_regs; 641 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len); 642 643 /* Set all the function pointers */ 644 xgbe_init_all_fptrs(pdata); 645 646 /* Issue software reset to device */ 647 pdata->hw_if.exit(pdata); 648 649 /* Populate the hardware features */ 650 xgbe_get_all_hw_features(pdata); 651 652 /* Set default configuration data */ 653 xgbe_default_config(pdata); 654 655 /* Set the DMA mask */ 656 ret = dma_set_mask_and_coherent(dev, 657 DMA_BIT_MASK(pdata->hw_feat.dma_width)); 658 if (ret) { 659 dev_err(dev, "dma_set_mask_and_coherent failed\n"); 660 goto err_io; 661 } 662 663 /* Calculate the number of Tx and Rx rings to be created 664 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 665 * the number of Tx queues to the number of Tx channels 666 * enabled 667 * -Rx (DMA) Channels do not map 1-to-1 so use the actual 668 * number of Rx queues 669 */ 670 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), 671 pdata->hw_feat.tx_ch_cnt); 672 pdata->tx_q_count = pdata->tx_ring_count; 673 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); 674 if (ret) { 675 dev_err(dev, "error setting real tx queue count\n"); 676 goto err_io; 677 } 678 679 pdata->rx_ring_count = min_t(unsigned int, 680 netif_get_num_default_rss_queues(), 681 pdata->hw_feat.rx_ch_cnt); 682 pdata->rx_q_count = pdata->hw_feat.rx_q_cnt; 683 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); 684 if (ret) { 685 dev_err(dev, "error setting real rx queue count\n"); 686 goto err_io; 687 } 688 689 /* Initialize RSS hash key and lookup table */ 690 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); 691 692 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) 693 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, 694 i % pdata->rx_ring_count); 695 696 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 697 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 698 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 699 700 /* Call MDIO/PHY initialization routine */ 701 pdata->phy_if.phy_init(pdata); 702 703 /* Set device operations */ 704 netdev->netdev_ops = xgbe_get_netdev_ops(); 705 netdev->ethtool_ops = xgbe_get_ethtool_ops(); 706 #ifdef CONFIG_AMD_XGBE_DCB 707 netdev->dcbnl_ops = xgbe_get_dcbnl_ops(); 708 #endif 709 710 /* Set device features */ 711 netdev->hw_features = NETIF_F_SG | 712 NETIF_F_IP_CSUM | 713 NETIF_F_IPV6_CSUM | 714 NETIF_F_RXCSUM | 715 NETIF_F_TSO | 716 NETIF_F_TSO6 | 717 NETIF_F_GRO | 718 NETIF_F_HW_VLAN_CTAG_RX | 719 NETIF_F_HW_VLAN_CTAG_TX | 720 NETIF_F_HW_VLAN_CTAG_FILTER; 721 722 if (pdata->hw_feat.rss) 723 netdev->hw_features |= NETIF_F_RXHASH; 724 725 netdev->vlan_features |= NETIF_F_SG | 726 NETIF_F_IP_CSUM | 727 NETIF_F_IPV6_CSUM | 728 NETIF_F_TSO | 729 NETIF_F_TSO6; 730 731 netdev->features |= netdev->hw_features; 732 pdata->netdev_features = netdev->features; 733 734 netdev->priv_flags |= IFF_UNICAST_FLT; 735 736 /* Use default watchdog timeout */ 737 netdev->watchdog_timeo = 0; 738 739 xgbe_init_rx_coalesce(pdata); 740 xgbe_init_tx_coalesce(pdata); 741 742 netif_carrier_off(netdev); 743 ret = register_netdev(netdev); 744 if (ret) { 745 dev_err(dev, "net device registration failed\n"); 746 goto err_io; 747 } 748 749 /* Create the PHY/ANEG name based on netdev name */ 750 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", 751 netdev_name(netdev)); 752 753 /* Create workqueues */ 754 pdata->dev_workqueue = 755 create_singlethread_workqueue(netdev_name(netdev)); 756 if (!pdata->dev_workqueue) { 757 netdev_err(netdev, "device workqueue creation failed\n"); 758 ret = -ENOMEM; 759 goto err_netdev; 760 } 761 762 pdata->an_workqueue = 763 create_singlethread_workqueue(pdata->an_name); 764 if (!pdata->an_workqueue) { 765 netdev_err(netdev, "phy workqueue creation failed\n"); 766 ret = -ENOMEM; 767 goto err_wq; 768 } 769 770 xgbe_ptp_register(pdata); 771 772 xgbe_debugfs_init(pdata); 773 774 platform_device_put(phy_pdev); 775 776 netdev_notice(netdev, "net device enabled\n"); 777 778 DBGPR("<-- xgbe_probe\n"); 779 780 return 0; 781 782 err_wq: 783 destroy_workqueue(pdata->dev_workqueue); 784 785 err_netdev: 786 unregister_netdev(netdev); 787 788 err_io: 789 platform_device_put(phy_pdev); 790 791 err_phydev: 792 free_netdev(netdev); 793 794 err_alloc: 795 dev_notice(dev, "net device not enabled\n"); 796 797 return ret; 798 } 799 800 static int xgbe_remove(struct platform_device *pdev) 801 { 802 struct net_device *netdev = platform_get_drvdata(pdev); 803 struct xgbe_prv_data *pdata = netdev_priv(netdev); 804 805 DBGPR("-->xgbe_remove\n"); 806 807 xgbe_debugfs_exit(pdata); 808 809 xgbe_ptp_unregister(pdata); 810 811 flush_workqueue(pdata->an_workqueue); 812 destroy_workqueue(pdata->an_workqueue); 813 814 flush_workqueue(pdata->dev_workqueue); 815 destroy_workqueue(pdata->dev_workqueue); 816 817 unregister_netdev(netdev); 818 819 free_netdev(netdev); 820 821 DBGPR("<--xgbe_remove\n"); 822 823 return 0; 824 } 825 826 #ifdef CONFIG_PM 827 static int xgbe_suspend(struct device *dev) 828 { 829 struct net_device *netdev = dev_get_drvdata(dev); 830 struct xgbe_prv_data *pdata = netdev_priv(netdev); 831 int ret = 0; 832 833 DBGPR("-->xgbe_suspend\n"); 834 835 if (netif_running(netdev)) 836 ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT); 837 838 pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); 839 pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER; 840 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); 841 842 DBGPR("<--xgbe_suspend\n"); 843 844 return ret; 845 } 846 847 static int xgbe_resume(struct device *dev) 848 { 849 struct net_device *netdev = dev_get_drvdata(dev); 850 struct xgbe_prv_data *pdata = netdev_priv(netdev); 851 int ret = 0; 852 853 DBGPR("-->xgbe_resume\n"); 854 855 pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; 856 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); 857 858 if (netif_running(netdev)) 859 ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT); 860 861 DBGPR("<--xgbe_resume\n"); 862 863 return ret; 864 } 865 #endif /* CONFIG_PM */ 866 867 #ifdef CONFIG_ACPI 868 static const struct acpi_device_id xgbe_acpi_match[] = { 869 { "AMDI8001", 0 }, 870 {}, 871 }; 872 873 MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match); 874 #endif 875 876 #ifdef CONFIG_OF 877 static const struct of_device_id xgbe_of_match[] = { 878 { .compatible = "amd,xgbe-seattle-v1a", }, 879 {}, 880 }; 881 882 MODULE_DEVICE_TABLE(of, xgbe_of_match); 883 #endif 884 885 static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume); 886 887 static struct platform_driver xgbe_driver = { 888 .driver = { 889 .name = "amd-xgbe", 890 #ifdef CONFIG_ACPI 891 .acpi_match_table = xgbe_acpi_match, 892 #endif 893 #ifdef CONFIG_OF 894 .of_match_table = xgbe_of_match, 895 #endif 896 .pm = &xgbe_pm_ops, 897 }, 898 .probe = xgbe_probe, 899 .remove = xgbe_remove, 900 }; 901 902 module_platform_driver(xgbe_driver); 903