1 /* 2 * SuperH Ethernet device driver 3 * 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 5 * Copyright (C) 2008-2012 Renesas Solutions Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 */ 22 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/etherdevice.h> 30 #include <linux/delay.h> 31 #include <linux/platform_device.h> 32 #include <linux/mdio-bitbang.h> 33 #include <linux/netdevice.h> 34 #include <linux/phy.h> 35 #include <linux/cache.h> 36 #include <linux/io.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/slab.h> 39 #include <linux/ethtool.h> 40 #include <linux/if_vlan.h> 41 #include <linux/clk.h> 42 #include <linux/sh_eth.h> 43 44 #include "sh_eth.h" 45 46 #define SH_ETH_DEF_MSG_ENABLE \ 47 (NETIF_MSG_LINK | \ 48 NETIF_MSG_TIMER | \ 49 NETIF_MSG_RX_ERR| \ 50 NETIF_MSG_TX_ERR) 51 52 #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ 53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 54 defined(CONFIG_ARCH_R8A7740) 55 static void sh_eth_select_mii(struct net_device *ndev) 56 { 57 u32 value = 0x0; 58 struct sh_eth_private *mdp = netdev_priv(ndev); 59 60 switch (mdp->phy_interface) { 61 case PHY_INTERFACE_MODE_GMII: 62 value = 0x2; 63 break; 64 case PHY_INTERFACE_MODE_MII: 65 value = 0x1; 66 break; 67 case PHY_INTERFACE_MODE_RMII: 68 value = 0x0; 69 break; 70 default: 71 pr_warn("PHY interface mode was not setup. Set to MII.\n"); 72 value = 0x1; 73 break; 74 } 75 76 sh_eth_write(ndev, value, RMII_MII); 77 } 78 #endif 79 80 /* There is CPU dependent code */ 81 #if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) 82 #define SH_ETH_RESET_DEFAULT 1 83 static void sh_eth_set_duplex(struct net_device *ndev) 84 { 85 struct sh_eth_private *mdp = netdev_priv(ndev); 86 87 if (mdp->duplex) /* Full */ 88 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 89 else /* Half */ 90 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 91 } 92 93 static void sh_eth_set_rate(struct net_device *ndev) 94 { 95 struct sh_eth_private *mdp = netdev_priv(ndev); 96 unsigned int bits = ECMR_RTM; 97 98 #if defined(CONFIG_ARCH_R8A7779) 99 bits |= ECMR_ELB; 100 #endif 101 102 switch (mdp->speed) { 103 case 10: /* 10BASE */ 104 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); 105 break; 106 case 100:/* 100BASE */ 107 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); 108 break; 109 default: 110 break; 111 } 112 } 113 114 /* SH7724 */ 115 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 116 .set_duplex = sh_eth_set_duplex, 117 .set_rate = sh_eth_set_rate, 118 119 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 120 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 121 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 122 123 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 124 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 125 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 126 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 127 128 .apr = 1, 129 .mpr = 1, 130 .tpauser = 1, 131 .hw_swap = 1, 132 .rpadir = 1, 133 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 134 }; 135 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) 136 #define SH_ETH_HAS_BOTH_MODULES 1 137 #define SH_ETH_HAS_TSU 1 138 static int sh_eth_check_reset(struct net_device *ndev); 139 140 static void sh_eth_set_duplex(struct net_device *ndev) 141 { 142 struct sh_eth_private *mdp = netdev_priv(ndev); 143 144 if (mdp->duplex) /* Full */ 145 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 146 else /* Half */ 147 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 148 } 149 150 static void sh_eth_set_rate(struct net_device *ndev) 151 { 152 struct sh_eth_private *mdp = netdev_priv(ndev); 153 154 switch (mdp->speed) { 155 case 10: /* 10BASE */ 156 sh_eth_write(ndev, 0, RTRATE); 157 break; 158 case 100:/* 100BASE */ 159 sh_eth_write(ndev, 1, RTRATE); 160 break; 161 default: 162 break; 163 } 164 } 165 166 /* SH7757 */ 167 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 168 .set_duplex = sh_eth_set_duplex, 169 .set_rate = sh_eth_set_rate, 170 171 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 172 .rmcr_value = 0x00000001, 173 174 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 175 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 176 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 177 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 178 179 .apr = 1, 180 .mpr = 1, 181 .tpauser = 1, 182 .hw_swap = 1, 183 .no_ade = 1, 184 .rpadir = 1, 185 .rpadir_value = 2 << 16, 186 }; 187 188 #define SH_GIGA_ETH_BASE 0xfee00000 189 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 190 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 191 static void sh_eth_chip_reset_giga(struct net_device *ndev) 192 { 193 int i; 194 unsigned long mahr[2], malr[2]; 195 196 /* save MAHR and MALR */ 197 for (i = 0; i < 2; i++) { 198 malr[i] = ioread32((void *)GIGA_MALR(i)); 199 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 200 } 201 202 /* reset device */ 203 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 204 mdelay(1); 205 206 /* restore MAHR and MALR */ 207 for (i = 0; i < 2; i++) { 208 iowrite32(malr[i], (void *)GIGA_MALR(i)); 209 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 210 } 211 } 212 213 static int sh_eth_is_gether(struct sh_eth_private *mdp); 214 static int sh_eth_reset(struct net_device *ndev) 215 { 216 struct sh_eth_private *mdp = netdev_priv(ndev); 217 int ret = 0; 218 219 if (sh_eth_is_gether(mdp)) { 220 sh_eth_write(ndev, 0x03, EDSR); 221 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 222 EDMR); 223 224 ret = sh_eth_check_reset(ndev); 225 if (ret) 226 goto out; 227 228 /* Table Init */ 229 sh_eth_write(ndev, 0x0, TDLAR); 230 sh_eth_write(ndev, 0x0, TDFAR); 231 sh_eth_write(ndev, 0x0, TDFXR); 232 sh_eth_write(ndev, 0x0, TDFFR); 233 sh_eth_write(ndev, 0x0, RDLAR); 234 sh_eth_write(ndev, 0x0, RDFAR); 235 sh_eth_write(ndev, 0x0, RDFXR); 236 sh_eth_write(ndev, 0x0, RDFFR); 237 } else { 238 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 239 EDMR); 240 mdelay(3); 241 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 242 EDMR); 243 } 244 245 out: 246 return ret; 247 } 248 249 static void sh_eth_set_duplex_giga(struct net_device *ndev) 250 { 251 struct sh_eth_private *mdp = netdev_priv(ndev); 252 253 if (mdp->duplex) /* Full */ 254 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 255 else /* Half */ 256 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 257 } 258 259 static void sh_eth_set_rate_giga(struct net_device *ndev) 260 { 261 struct sh_eth_private *mdp = netdev_priv(ndev); 262 263 switch (mdp->speed) { 264 case 10: /* 10BASE */ 265 sh_eth_write(ndev, 0x00000000, GECMR); 266 break; 267 case 100:/* 100BASE */ 268 sh_eth_write(ndev, 0x00000010, GECMR); 269 break; 270 case 1000: /* 1000BASE */ 271 sh_eth_write(ndev, 0x00000020, GECMR); 272 break; 273 default: 274 break; 275 } 276 } 277 278 /* SH7757(GETHERC) */ 279 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { 280 .chip_reset = sh_eth_chip_reset_giga, 281 .set_duplex = sh_eth_set_duplex_giga, 282 .set_rate = sh_eth_set_rate_giga, 283 284 .ecsr_value = ECSR_ICD | ECSR_MPD, 285 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 286 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 287 288 .tx_check = EESR_TC1 | EESR_FTC, 289 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 290 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 291 EESR_ECI, 292 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 293 EESR_TFE, 294 .fdr_value = 0x0000072f, 295 .rmcr_value = 0x00000001, 296 297 .apr = 1, 298 .mpr = 1, 299 .tpauser = 1, 300 .bculr = 1, 301 .hw_swap = 1, 302 .rpadir = 1, 303 .rpadir_value = 2 << 16, 304 .no_trimd = 1, 305 .no_ade = 1, 306 .tsu = 1, 307 }; 308 309 static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) 310 { 311 if (sh_eth_is_gether(mdp)) 312 return &sh_eth_my_cpu_data_giga; 313 else 314 return &sh_eth_my_cpu_data; 315 } 316 317 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 318 #define SH_ETH_HAS_TSU 1 319 static int sh_eth_check_reset(struct net_device *ndev); 320 static void sh_eth_reset_hw_crc(struct net_device *ndev); 321 322 static void sh_eth_chip_reset(struct net_device *ndev) 323 { 324 struct sh_eth_private *mdp = netdev_priv(ndev); 325 326 /* reset device */ 327 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 328 mdelay(1); 329 } 330 331 static void sh_eth_set_duplex(struct net_device *ndev) 332 { 333 struct sh_eth_private *mdp = netdev_priv(ndev); 334 335 if (mdp->duplex) /* Full */ 336 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 337 else /* Half */ 338 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 339 } 340 341 static void sh_eth_set_rate(struct net_device *ndev) 342 { 343 struct sh_eth_private *mdp = netdev_priv(ndev); 344 345 switch (mdp->speed) { 346 case 10: /* 10BASE */ 347 sh_eth_write(ndev, GECMR_10, GECMR); 348 break; 349 case 100:/* 100BASE */ 350 sh_eth_write(ndev, GECMR_100, GECMR); 351 break; 352 case 1000: /* 1000BASE */ 353 sh_eth_write(ndev, GECMR_1000, GECMR); 354 break; 355 default: 356 break; 357 } 358 } 359 360 /* sh7763 */ 361 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 362 .chip_reset = sh_eth_chip_reset, 363 .set_duplex = sh_eth_set_duplex, 364 .set_rate = sh_eth_set_rate, 365 366 .ecsr_value = ECSR_ICD | ECSR_MPD, 367 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 368 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 369 370 .tx_check = EESR_TC1 | EESR_FTC, 371 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 372 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 373 EESR_ECI, 374 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 375 EESR_TFE, 376 377 .apr = 1, 378 .mpr = 1, 379 .tpauser = 1, 380 .bculr = 1, 381 .hw_swap = 1, 382 .no_trimd = 1, 383 .no_ade = 1, 384 .tsu = 1, 385 #if defined(CONFIG_CPU_SUBTYPE_SH7734) 386 .hw_crc = 1, 387 .select_mii = 1, 388 #endif 389 }; 390 391 static int sh_eth_reset(struct net_device *ndev) 392 { 393 int ret = 0; 394 395 sh_eth_write(ndev, EDSR_ENALL, EDSR); 396 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 397 398 ret = sh_eth_check_reset(ndev); 399 if (ret) 400 goto out; 401 402 /* Table Init */ 403 sh_eth_write(ndev, 0x0, TDLAR); 404 sh_eth_write(ndev, 0x0, TDFAR); 405 sh_eth_write(ndev, 0x0, TDFXR); 406 sh_eth_write(ndev, 0x0, TDFFR); 407 sh_eth_write(ndev, 0x0, RDLAR); 408 sh_eth_write(ndev, 0x0, RDFAR); 409 sh_eth_write(ndev, 0x0, RDFXR); 410 sh_eth_write(ndev, 0x0, RDFFR); 411 412 /* Reset HW CRC register */ 413 sh_eth_reset_hw_crc(ndev); 414 415 /* Select MII mode */ 416 if (sh_eth_my_cpu_data.select_mii) 417 sh_eth_select_mii(ndev); 418 out: 419 return ret; 420 } 421 422 static void sh_eth_reset_hw_crc(struct net_device *ndev) 423 { 424 if (sh_eth_my_cpu_data.hw_crc) 425 sh_eth_write(ndev, 0x0, CSMR); 426 } 427 428 #elif defined(CONFIG_ARCH_R8A7740) 429 #define SH_ETH_HAS_TSU 1 430 static int sh_eth_check_reset(struct net_device *ndev); 431 432 static void sh_eth_chip_reset(struct net_device *ndev) 433 { 434 struct sh_eth_private *mdp = netdev_priv(ndev); 435 436 /* reset device */ 437 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 438 mdelay(1); 439 440 sh_eth_select_mii(ndev); 441 } 442 443 static int sh_eth_reset(struct net_device *ndev) 444 { 445 int ret = 0; 446 447 sh_eth_write(ndev, EDSR_ENALL, EDSR); 448 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 449 450 ret = sh_eth_check_reset(ndev); 451 if (ret) 452 goto out; 453 454 /* Table Init */ 455 sh_eth_write(ndev, 0x0, TDLAR); 456 sh_eth_write(ndev, 0x0, TDFAR); 457 sh_eth_write(ndev, 0x0, TDFXR); 458 sh_eth_write(ndev, 0x0, TDFFR); 459 sh_eth_write(ndev, 0x0, RDLAR); 460 sh_eth_write(ndev, 0x0, RDFAR); 461 sh_eth_write(ndev, 0x0, RDFXR); 462 sh_eth_write(ndev, 0x0, RDFFR); 463 464 out: 465 return ret; 466 } 467 468 static void sh_eth_set_duplex(struct net_device *ndev) 469 { 470 struct sh_eth_private *mdp = netdev_priv(ndev); 471 472 if (mdp->duplex) /* Full */ 473 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 474 else /* Half */ 475 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 476 } 477 478 static void sh_eth_set_rate(struct net_device *ndev) 479 { 480 struct sh_eth_private *mdp = netdev_priv(ndev); 481 482 switch (mdp->speed) { 483 case 10: /* 10BASE */ 484 sh_eth_write(ndev, GECMR_10, GECMR); 485 break; 486 case 100:/* 100BASE */ 487 sh_eth_write(ndev, GECMR_100, GECMR); 488 break; 489 case 1000: /* 1000BASE */ 490 sh_eth_write(ndev, GECMR_1000, GECMR); 491 break; 492 default: 493 break; 494 } 495 } 496 497 /* R8A7740 */ 498 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 499 .chip_reset = sh_eth_chip_reset, 500 .set_duplex = sh_eth_set_duplex, 501 .set_rate = sh_eth_set_rate, 502 503 .ecsr_value = ECSR_ICD | ECSR_MPD, 504 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 505 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 506 507 .tx_check = EESR_TC1 | EESR_FTC, 508 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 509 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 510 EESR_ECI, 511 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 512 EESR_TFE, 513 514 .apr = 1, 515 .mpr = 1, 516 .tpauser = 1, 517 .bculr = 1, 518 .hw_swap = 1, 519 .no_trimd = 1, 520 .no_ade = 1, 521 .tsu = 1, 522 .select_mii = 1, 523 }; 524 525 #elif defined(CONFIG_CPU_SUBTYPE_SH7619) 526 #define SH_ETH_RESET_DEFAULT 1 527 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 528 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 529 530 .apr = 1, 531 .mpr = 1, 532 .tpauser = 1, 533 .hw_swap = 1, 534 }; 535 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 536 #define SH_ETH_RESET_DEFAULT 1 537 #define SH_ETH_HAS_TSU 1 538 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 539 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 540 .tsu = 1, 541 }; 542 #endif 543 544 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 545 { 546 if (!cd->ecsr_value) 547 cd->ecsr_value = DEFAULT_ECSR_INIT; 548 549 if (!cd->ecsipr_value) 550 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 551 552 if (!cd->fcftr_value) 553 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ 554 DEFAULT_FIFO_F_D_RFD; 555 556 if (!cd->fdr_value) 557 cd->fdr_value = DEFAULT_FDR_INIT; 558 559 if (!cd->rmcr_value) 560 cd->rmcr_value = DEFAULT_RMCR_VALUE; 561 562 if (!cd->tx_check) 563 cd->tx_check = DEFAULT_TX_CHECK; 564 565 if (!cd->eesr_err_check) 566 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 567 568 if (!cd->tx_error_check) 569 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; 570 } 571 572 #if defined(SH_ETH_RESET_DEFAULT) 573 /* Chip Reset */ 574 static int sh_eth_reset(struct net_device *ndev) 575 { 576 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 577 mdelay(3); 578 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 579 580 return 0; 581 } 582 #else 583 static int sh_eth_check_reset(struct net_device *ndev) 584 { 585 int ret = 0; 586 int cnt = 100; 587 588 while (cnt > 0) { 589 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 590 break; 591 mdelay(1); 592 cnt--; 593 } 594 if (cnt < 0) { 595 printk(KERN_ERR "Device reset fail\n"); 596 ret = -ETIMEDOUT; 597 } 598 return ret; 599 } 600 #endif 601 602 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 603 static void sh_eth_set_receive_align(struct sk_buff *skb) 604 { 605 int reserve; 606 607 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); 608 if (reserve) 609 skb_reserve(skb, reserve); 610 } 611 #else 612 static void sh_eth_set_receive_align(struct sk_buff *skb) 613 { 614 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); 615 } 616 #endif 617 618 619 /* CPU <-> EDMAC endian convert */ 620 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 621 { 622 switch (mdp->edmac_endian) { 623 case EDMAC_LITTLE_ENDIAN: 624 return cpu_to_le32(x); 625 case EDMAC_BIG_ENDIAN: 626 return cpu_to_be32(x); 627 } 628 return x; 629 } 630 631 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 632 { 633 switch (mdp->edmac_endian) { 634 case EDMAC_LITTLE_ENDIAN: 635 return le32_to_cpu(x); 636 case EDMAC_BIG_ENDIAN: 637 return be32_to_cpu(x); 638 } 639 return x; 640 } 641 642 /* 643 * Program the hardware MAC address from dev->dev_addr. 644 */ 645 static void update_mac_address(struct net_device *ndev) 646 { 647 sh_eth_write(ndev, 648 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 649 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 650 sh_eth_write(ndev, 651 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 652 } 653 654 /* 655 * Get MAC address from SuperH MAC address register 656 * 657 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 658 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 659 * When you want use this device, you must set MAC address in bootloader. 660 * 661 */ 662 static void read_mac_address(struct net_device *ndev, unsigned char *mac) 663 { 664 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 665 memcpy(ndev->dev_addr, mac, 6); 666 } else { 667 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 668 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 669 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 670 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 671 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 672 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 673 } 674 } 675 676 static int sh_eth_is_gether(struct sh_eth_private *mdp) 677 { 678 if (mdp->reg_offset == sh_eth_offset_gigabit) 679 return 1; 680 else 681 return 0; 682 } 683 684 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 685 { 686 if (sh_eth_is_gether(mdp)) 687 return EDTRR_TRNS_GETHER; 688 else 689 return EDTRR_TRNS_ETHER; 690 } 691 692 struct bb_info { 693 void (*set_gate)(void *addr); 694 struct mdiobb_ctrl ctrl; 695 void *addr; 696 u32 mmd_msk;/* MMD */ 697 u32 mdo_msk; 698 u32 mdi_msk; 699 u32 mdc_msk; 700 }; 701 702 /* PHY bit set */ 703 static void bb_set(void *addr, u32 msk) 704 { 705 iowrite32(ioread32(addr) | msk, addr); 706 } 707 708 /* PHY bit clear */ 709 static void bb_clr(void *addr, u32 msk) 710 { 711 iowrite32((ioread32(addr) & ~msk), addr); 712 } 713 714 /* PHY bit read */ 715 static int bb_read(void *addr, u32 msk) 716 { 717 return (ioread32(addr) & msk) != 0; 718 } 719 720 /* Data I/O pin control */ 721 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 722 { 723 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 724 725 if (bitbang->set_gate) 726 bitbang->set_gate(bitbang->addr); 727 728 if (bit) 729 bb_set(bitbang->addr, bitbang->mmd_msk); 730 else 731 bb_clr(bitbang->addr, bitbang->mmd_msk); 732 } 733 734 /* Set bit data*/ 735 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 736 { 737 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 738 739 if (bitbang->set_gate) 740 bitbang->set_gate(bitbang->addr); 741 742 if (bit) 743 bb_set(bitbang->addr, bitbang->mdo_msk); 744 else 745 bb_clr(bitbang->addr, bitbang->mdo_msk); 746 } 747 748 /* Get bit data*/ 749 static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 750 { 751 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 752 753 if (bitbang->set_gate) 754 bitbang->set_gate(bitbang->addr); 755 756 return bb_read(bitbang->addr, bitbang->mdi_msk); 757 } 758 759 /* MDC pin control */ 760 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 761 { 762 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 763 764 if (bitbang->set_gate) 765 bitbang->set_gate(bitbang->addr); 766 767 if (bit) 768 bb_set(bitbang->addr, bitbang->mdc_msk); 769 else 770 bb_clr(bitbang->addr, bitbang->mdc_msk); 771 } 772 773 /* mdio bus control struct */ 774 static struct mdiobb_ops bb_ops = { 775 .owner = THIS_MODULE, 776 .set_mdc = sh_mdc_ctrl, 777 .set_mdio_dir = sh_mmd_ctrl, 778 .set_mdio_data = sh_set_mdio, 779 .get_mdio_data = sh_get_mdio, 780 }; 781 782 /* free skb and descriptor buffer */ 783 static void sh_eth_ring_free(struct net_device *ndev) 784 { 785 struct sh_eth_private *mdp = netdev_priv(ndev); 786 int i; 787 788 /* Free Rx skb ringbuffer */ 789 if (mdp->rx_skbuff) { 790 for (i = 0; i < mdp->num_rx_ring; i++) { 791 if (mdp->rx_skbuff[i]) 792 dev_kfree_skb(mdp->rx_skbuff[i]); 793 } 794 } 795 kfree(mdp->rx_skbuff); 796 mdp->rx_skbuff = NULL; 797 798 /* Free Tx skb ringbuffer */ 799 if (mdp->tx_skbuff) { 800 for (i = 0; i < mdp->num_tx_ring; i++) { 801 if (mdp->tx_skbuff[i]) 802 dev_kfree_skb(mdp->tx_skbuff[i]); 803 } 804 } 805 kfree(mdp->tx_skbuff); 806 mdp->tx_skbuff = NULL; 807 } 808 809 /* format skb and descriptor buffer */ 810 static void sh_eth_ring_format(struct net_device *ndev) 811 { 812 struct sh_eth_private *mdp = netdev_priv(ndev); 813 int i; 814 struct sk_buff *skb; 815 struct sh_eth_rxdesc *rxdesc = NULL; 816 struct sh_eth_txdesc *txdesc = NULL; 817 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 818 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 819 820 mdp->cur_rx = mdp->cur_tx = 0; 821 mdp->dirty_rx = mdp->dirty_tx = 0; 822 823 memset(mdp->rx_ring, 0, rx_ringsize); 824 825 /* build Rx ring buffer */ 826 for (i = 0; i < mdp->num_rx_ring; i++) { 827 /* skb */ 828 mdp->rx_skbuff[i] = NULL; 829 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 830 mdp->rx_skbuff[i] = skb; 831 if (skb == NULL) 832 break; 833 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 834 DMA_FROM_DEVICE); 835 sh_eth_set_receive_align(skb); 836 837 /* RX descriptor */ 838 rxdesc = &mdp->rx_ring[i]; 839 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 840 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 841 842 /* The size of the buffer is 16 byte boundary. */ 843 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 844 /* Rx descriptor address set */ 845 if (i == 0) { 846 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 847 if (sh_eth_is_gether(mdp)) 848 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 849 } 850 } 851 852 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); 853 854 /* Mark the last entry as wrapping the ring. */ 855 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 856 857 memset(mdp->tx_ring, 0, tx_ringsize); 858 859 /* build Tx ring buffer */ 860 for (i = 0; i < mdp->num_tx_ring; i++) { 861 mdp->tx_skbuff[i] = NULL; 862 txdesc = &mdp->tx_ring[i]; 863 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 864 txdesc->buffer_length = 0; 865 if (i == 0) { 866 /* Tx descriptor address set */ 867 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 868 if (sh_eth_is_gether(mdp)) 869 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 870 } 871 } 872 873 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 874 } 875 876 /* Get skb and descriptor buffer */ 877 static int sh_eth_ring_init(struct net_device *ndev) 878 { 879 struct sh_eth_private *mdp = netdev_priv(ndev); 880 int rx_ringsize, tx_ringsize, ret = 0; 881 882 /* 883 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 884 * card needs room to do 8 byte alignment, +2 so we can reserve 885 * the first 2 bytes, and +16 gets room for the status word from the 886 * card. 887 */ 888 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 889 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 890 if (mdp->cd->rpadir) 891 mdp->rx_buf_sz += NET_IP_ALIGN; 892 893 /* Allocate RX and TX skb rings */ 894 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring, 895 GFP_KERNEL); 896 if (!mdp->rx_skbuff) { 897 dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); 898 ret = -ENOMEM; 899 return ret; 900 } 901 902 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring, 903 GFP_KERNEL); 904 if (!mdp->tx_skbuff) { 905 dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); 906 ret = -ENOMEM; 907 goto skb_ring_free; 908 } 909 910 /* Allocate all Rx descriptors. */ 911 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 912 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 913 GFP_KERNEL); 914 915 if (!mdp->rx_ring) { 916 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", 917 rx_ringsize); 918 ret = -ENOMEM; 919 goto desc_ring_free; 920 } 921 922 mdp->dirty_rx = 0; 923 924 /* Allocate all Tx descriptors. */ 925 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 926 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 927 GFP_KERNEL); 928 if (!mdp->tx_ring) { 929 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", 930 tx_ringsize); 931 ret = -ENOMEM; 932 goto desc_ring_free; 933 } 934 return ret; 935 936 desc_ring_free: 937 /* free DMA buffer */ 938 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 939 940 skb_ring_free: 941 /* Free Rx and Tx skb ring buffer */ 942 sh_eth_ring_free(ndev); 943 mdp->tx_ring = NULL; 944 mdp->rx_ring = NULL; 945 946 return ret; 947 } 948 949 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) 950 { 951 int ringsize; 952 953 if (mdp->rx_ring) { 954 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 955 dma_free_coherent(NULL, ringsize, mdp->rx_ring, 956 mdp->rx_desc_dma); 957 mdp->rx_ring = NULL; 958 } 959 960 if (mdp->tx_ring) { 961 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 962 dma_free_coherent(NULL, ringsize, mdp->tx_ring, 963 mdp->tx_desc_dma); 964 mdp->tx_ring = NULL; 965 } 966 } 967 968 static int sh_eth_dev_init(struct net_device *ndev, bool start) 969 { 970 int ret = 0; 971 struct sh_eth_private *mdp = netdev_priv(ndev); 972 u32 val; 973 974 /* Soft Reset */ 975 ret = sh_eth_reset(ndev); 976 if (ret) 977 goto out; 978 979 /* Descriptor format */ 980 sh_eth_ring_format(ndev); 981 if (mdp->cd->rpadir) 982 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 983 984 /* all sh_eth int mask */ 985 sh_eth_write(ndev, 0, EESIPR); 986 987 #if defined(__LITTLE_ENDIAN) 988 if (mdp->cd->hw_swap) 989 sh_eth_write(ndev, EDMR_EL, EDMR); 990 else 991 #endif 992 sh_eth_write(ndev, 0, EDMR); 993 994 /* FIFO size set */ 995 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 996 sh_eth_write(ndev, 0, TFTR); 997 998 /* Frame recv control */ 999 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 1000 1001 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); 1002 1003 if (mdp->cd->bculr) 1004 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 1005 1006 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 1007 1008 if (!mdp->cd->no_trimd) 1009 sh_eth_write(ndev, 0, TRIMD); 1010 1011 /* Recv frame limit set register */ 1012 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 1013 RFLR); 1014 1015 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1016 if (start) 1017 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1018 1019 /* PAUSE Prohibition */ 1020 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1021 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 1022 1023 sh_eth_write(ndev, val, ECMR); 1024 1025 if (mdp->cd->set_rate) 1026 mdp->cd->set_rate(ndev); 1027 1028 /* E-MAC Status Register clear */ 1029 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 1030 1031 /* E-MAC Interrupt Enable register */ 1032 if (start) 1033 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 1034 1035 /* Set MAC address */ 1036 update_mac_address(ndev); 1037 1038 /* mask reset */ 1039 if (mdp->cd->apr) 1040 sh_eth_write(ndev, APR_AP, APR); 1041 if (mdp->cd->mpr) 1042 sh_eth_write(ndev, MPR_MP, MPR); 1043 if (mdp->cd->tpauser) 1044 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 1045 1046 if (start) { 1047 /* Setting the Rx mode will start the Rx process. */ 1048 sh_eth_write(ndev, EDRRR_R, EDRRR); 1049 1050 netif_start_queue(ndev); 1051 } 1052 1053 out: 1054 return ret; 1055 } 1056 1057 /* free Tx skb function */ 1058 static int sh_eth_txfree(struct net_device *ndev) 1059 { 1060 struct sh_eth_private *mdp = netdev_priv(ndev); 1061 struct sh_eth_txdesc *txdesc; 1062 int freeNum = 0; 1063 int entry = 0; 1064 1065 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1066 entry = mdp->dirty_tx % mdp->num_tx_ring; 1067 txdesc = &mdp->tx_ring[entry]; 1068 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1069 break; 1070 /* Free the original skb. */ 1071 if (mdp->tx_skbuff[entry]) { 1072 dma_unmap_single(&ndev->dev, txdesc->addr, 1073 txdesc->buffer_length, DMA_TO_DEVICE); 1074 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1075 mdp->tx_skbuff[entry] = NULL; 1076 freeNum++; 1077 } 1078 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1079 if (entry >= mdp->num_tx_ring - 1) 1080 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1081 1082 ndev->stats.tx_packets++; 1083 ndev->stats.tx_bytes += txdesc->buffer_length; 1084 } 1085 return freeNum; 1086 } 1087 1088 /* Packet receive function */ 1089 static int sh_eth_rx(struct net_device *ndev, u32 intr_status) 1090 { 1091 struct sh_eth_private *mdp = netdev_priv(ndev); 1092 struct sh_eth_rxdesc *rxdesc; 1093 1094 int entry = mdp->cur_rx % mdp->num_rx_ring; 1095 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; 1096 struct sk_buff *skb; 1097 u16 pkt_len = 0; 1098 u32 desc_status; 1099 1100 rxdesc = &mdp->rx_ring[entry]; 1101 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1102 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1103 pkt_len = rxdesc->frame_length; 1104 1105 #if defined(CONFIG_ARCH_R8A7740) 1106 desc_status >>= 16; 1107 #endif 1108 1109 if (--boguscnt < 0) 1110 break; 1111 1112 if (!(desc_status & RDFEND)) 1113 ndev->stats.rx_length_errors++; 1114 1115 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1116 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1117 ndev->stats.rx_errors++; 1118 if (desc_status & RD_RFS1) 1119 ndev->stats.rx_crc_errors++; 1120 if (desc_status & RD_RFS2) 1121 ndev->stats.rx_frame_errors++; 1122 if (desc_status & RD_RFS3) 1123 ndev->stats.rx_length_errors++; 1124 if (desc_status & RD_RFS4) 1125 ndev->stats.rx_length_errors++; 1126 if (desc_status & RD_RFS6) 1127 ndev->stats.rx_missed_errors++; 1128 if (desc_status & RD_RFS10) 1129 ndev->stats.rx_over_errors++; 1130 } else { 1131 if (!mdp->cd->hw_swap) 1132 sh_eth_soft_swap( 1133 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1134 pkt_len + 2); 1135 skb = mdp->rx_skbuff[entry]; 1136 mdp->rx_skbuff[entry] = NULL; 1137 if (mdp->cd->rpadir) 1138 skb_reserve(skb, NET_IP_ALIGN); 1139 skb_put(skb, pkt_len); 1140 skb->protocol = eth_type_trans(skb, ndev); 1141 netif_rx(skb); 1142 ndev->stats.rx_packets++; 1143 ndev->stats.rx_bytes += pkt_len; 1144 } 1145 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1146 entry = (++mdp->cur_rx) % mdp->num_rx_ring; 1147 rxdesc = &mdp->rx_ring[entry]; 1148 } 1149 1150 /* Refill the Rx ring buffers. */ 1151 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1152 entry = mdp->dirty_rx % mdp->num_rx_ring; 1153 rxdesc = &mdp->rx_ring[entry]; 1154 /* The size of the buffer is 16 byte boundary. */ 1155 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1156 1157 if (mdp->rx_skbuff[entry] == NULL) { 1158 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1159 mdp->rx_skbuff[entry] = skb; 1160 if (skb == NULL) 1161 break; /* Better luck next round. */ 1162 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1163 DMA_FROM_DEVICE); 1164 sh_eth_set_receive_align(skb); 1165 1166 skb_checksum_none_assert(skb); 1167 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1168 } 1169 if (entry >= mdp->num_rx_ring - 1) 1170 rxdesc->status |= 1171 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1172 else 1173 rxdesc->status |= 1174 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1175 } 1176 1177 /* Restart Rx engine if stopped. */ 1178 /* If we don't need to check status, don't. -KDU */ 1179 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1180 /* fix the values for the next receiving if RDE is set */ 1181 if (intr_status & EESR_RDE) 1182 mdp->cur_rx = mdp->dirty_rx = 1183 (sh_eth_read(ndev, RDFAR) - 1184 sh_eth_read(ndev, RDLAR)) >> 4; 1185 sh_eth_write(ndev, EDRRR_R, EDRRR); 1186 } 1187 1188 return 0; 1189 } 1190 1191 static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1192 { 1193 /* disable tx and rx */ 1194 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1195 ~(ECMR_RE | ECMR_TE), ECMR); 1196 } 1197 1198 static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1199 { 1200 /* enable tx and rx */ 1201 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1202 (ECMR_RE | ECMR_TE), ECMR); 1203 } 1204 1205 /* error control function */ 1206 static void sh_eth_error(struct net_device *ndev, int intr_status) 1207 { 1208 struct sh_eth_private *mdp = netdev_priv(ndev); 1209 u32 felic_stat; 1210 u32 link_stat; 1211 u32 mask; 1212 1213 if (intr_status & EESR_ECI) { 1214 felic_stat = sh_eth_read(ndev, ECSR); 1215 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1216 if (felic_stat & ECSR_ICD) 1217 ndev->stats.tx_carrier_errors++; 1218 if (felic_stat & ECSR_LCHNG) { 1219 /* Link Changed */ 1220 if (mdp->cd->no_psr || mdp->no_ether_link) { 1221 if (mdp->link == PHY_DOWN) 1222 link_stat = 0; 1223 else 1224 link_stat = PHY_ST_LINK; 1225 } else { 1226 link_stat = (sh_eth_read(ndev, PSR)); 1227 if (mdp->ether_link_active_low) 1228 link_stat = ~link_stat; 1229 } 1230 if (!(link_stat & PHY_ST_LINK)) 1231 sh_eth_rcv_snd_disable(ndev); 1232 else { 1233 /* Link Up */ 1234 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1235 ~DMAC_M_ECI, EESIPR); 1236 /*clear int */ 1237 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1238 ECSR); 1239 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1240 DMAC_M_ECI, EESIPR); 1241 /* enable tx and rx */ 1242 sh_eth_rcv_snd_enable(ndev); 1243 } 1244 } 1245 } 1246 1247 if (intr_status & EESR_TWB) { 1248 /* Write buck end. unused write back interrupt */ 1249 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1250 ndev->stats.tx_aborted_errors++; 1251 if (netif_msg_tx_err(mdp)) 1252 dev_err(&ndev->dev, "Transmit Abort\n"); 1253 } 1254 1255 if (intr_status & EESR_RABT) { 1256 /* Receive Abort int */ 1257 if (intr_status & EESR_RFRMER) { 1258 /* Receive Frame Overflow int */ 1259 ndev->stats.rx_frame_errors++; 1260 if (netif_msg_rx_err(mdp)) 1261 dev_err(&ndev->dev, "Receive Abort\n"); 1262 } 1263 } 1264 1265 if (intr_status & EESR_TDE) { 1266 /* Transmit Descriptor Empty int */ 1267 ndev->stats.tx_fifo_errors++; 1268 if (netif_msg_tx_err(mdp)) 1269 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1270 } 1271 1272 if (intr_status & EESR_TFE) { 1273 /* FIFO under flow */ 1274 ndev->stats.tx_fifo_errors++; 1275 if (netif_msg_tx_err(mdp)) 1276 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1277 } 1278 1279 if (intr_status & EESR_RDE) { 1280 /* Receive Descriptor Empty int */ 1281 ndev->stats.rx_over_errors++; 1282 1283 if (netif_msg_rx_err(mdp)) 1284 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1285 } 1286 1287 if (intr_status & EESR_RFE) { 1288 /* Receive FIFO Overflow int */ 1289 ndev->stats.rx_fifo_errors++; 1290 if (netif_msg_rx_err(mdp)) 1291 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1292 } 1293 1294 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1295 /* Address Error */ 1296 ndev->stats.tx_fifo_errors++; 1297 if (netif_msg_tx_err(mdp)) 1298 dev_err(&ndev->dev, "Address Error\n"); 1299 } 1300 1301 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1302 if (mdp->cd->no_ade) 1303 mask &= ~EESR_ADE; 1304 if (intr_status & mask) { 1305 /* Tx error */ 1306 u32 edtrr = sh_eth_read(ndev, EDTRR); 1307 /* dmesg */ 1308 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1309 intr_status, mdp->cur_tx); 1310 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1311 mdp->dirty_tx, (u32) ndev->state, edtrr); 1312 /* dirty buffer free */ 1313 sh_eth_txfree(ndev); 1314 1315 /* SH7712 BUG */ 1316 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1317 /* tx dma start */ 1318 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1319 } 1320 /* wakeup */ 1321 netif_wake_queue(ndev); 1322 } 1323 } 1324 1325 static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1326 { 1327 struct net_device *ndev = netdev; 1328 struct sh_eth_private *mdp = netdev_priv(ndev); 1329 struct sh_eth_cpu_data *cd = mdp->cd; 1330 irqreturn_t ret = IRQ_NONE; 1331 u32 intr_status = 0; 1332 1333 spin_lock(&mdp->lock); 1334 1335 /* Get interrpt stat */ 1336 intr_status = sh_eth_read(ndev, EESR); 1337 /* Clear interrupt */ 1338 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1339 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1340 cd->tx_check | cd->eesr_err_check)) { 1341 sh_eth_write(ndev, intr_status, EESR); 1342 ret = IRQ_HANDLED; 1343 } else 1344 goto other_irq; 1345 1346 if (intr_status & (EESR_FRC | /* Frame recv*/ 1347 EESR_RMAF | /* Multi cast address recv*/ 1348 EESR_RRF | /* Bit frame recv */ 1349 EESR_RTLF | /* Long frame recv*/ 1350 EESR_RTSF | /* short frame recv */ 1351 EESR_PRE | /* PHY-LSI recv error */ 1352 EESR_CERF)){ /* recv frame CRC error */ 1353 sh_eth_rx(ndev, intr_status); 1354 } 1355 1356 /* Tx Check */ 1357 if (intr_status & cd->tx_check) { 1358 sh_eth_txfree(ndev); 1359 netif_wake_queue(ndev); 1360 } 1361 1362 if (intr_status & cd->eesr_err_check) 1363 sh_eth_error(ndev, intr_status); 1364 1365 other_irq: 1366 spin_unlock(&mdp->lock); 1367 1368 return ret; 1369 } 1370 1371 /* PHY state control function */ 1372 static void sh_eth_adjust_link(struct net_device *ndev) 1373 { 1374 struct sh_eth_private *mdp = netdev_priv(ndev); 1375 struct phy_device *phydev = mdp->phydev; 1376 int new_state = 0; 1377 1378 if (phydev->link != PHY_DOWN) { 1379 if (phydev->duplex != mdp->duplex) { 1380 new_state = 1; 1381 mdp->duplex = phydev->duplex; 1382 if (mdp->cd->set_duplex) 1383 mdp->cd->set_duplex(ndev); 1384 } 1385 1386 if (phydev->speed != mdp->speed) { 1387 new_state = 1; 1388 mdp->speed = phydev->speed; 1389 if (mdp->cd->set_rate) 1390 mdp->cd->set_rate(ndev); 1391 } 1392 if (mdp->link == PHY_DOWN) { 1393 sh_eth_write(ndev, 1394 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1395 new_state = 1; 1396 mdp->link = phydev->link; 1397 } 1398 } else if (mdp->link) { 1399 new_state = 1; 1400 mdp->link = PHY_DOWN; 1401 mdp->speed = 0; 1402 mdp->duplex = -1; 1403 } 1404 1405 if (new_state && netif_msg_link(mdp)) 1406 phy_print_status(phydev); 1407 } 1408 1409 /* PHY init function */ 1410 static int sh_eth_phy_init(struct net_device *ndev) 1411 { 1412 struct sh_eth_private *mdp = netdev_priv(ndev); 1413 char phy_id[MII_BUS_ID_SIZE + 3]; 1414 struct phy_device *phydev = NULL; 1415 1416 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1417 mdp->mii_bus->id , mdp->phy_id); 1418 1419 mdp->link = PHY_DOWN; 1420 mdp->speed = 0; 1421 mdp->duplex = -1; 1422 1423 /* Try connect to PHY */ 1424 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1425 0, mdp->phy_interface); 1426 if (IS_ERR(phydev)) { 1427 dev_err(&ndev->dev, "phy_connect failed\n"); 1428 return PTR_ERR(phydev); 1429 } 1430 1431 dev_info(&ndev->dev, "attached phy %i to driver %s\n", 1432 phydev->addr, phydev->drv->name); 1433 1434 mdp->phydev = phydev; 1435 1436 return 0; 1437 } 1438 1439 /* PHY control start function */ 1440 static int sh_eth_phy_start(struct net_device *ndev) 1441 { 1442 struct sh_eth_private *mdp = netdev_priv(ndev); 1443 int ret; 1444 1445 ret = sh_eth_phy_init(ndev); 1446 if (ret) 1447 return ret; 1448 1449 /* reset phy - this also wakes it from PDOWN */ 1450 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); 1451 phy_start(mdp->phydev); 1452 1453 return 0; 1454 } 1455 1456 static int sh_eth_get_settings(struct net_device *ndev, 1457 struct ethtool_cmd *ecmd) 1458 { 1459 struct sh_eth_private *mdp = netdev_priv(ndev); 1460 unsigned long flags; 1461 int ret; 1462 1463 spin_lock_irqsave(&mdp->lock, flags); 1464 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1465 spin_unlock_irqrestore(&mdp->lock, flags); 1466 1467 return ret; 1468 } 1469 1470 static int sh_eth_set_settings(struct net_device *ndev, 1471 struct ethtool_cmd *ecmd) 1472 { 1473 struct sh_eth_private *mdp = netdev_priv(ndev); 1474 unsigned long flags; 1475 int ret; 1476 1477 spin_lock_irqsave(&mdp->lock, flags); 1478 1479 /* disable tx and rx */ 1480 sh_eth_rcv_snd_disable(ndev); 1481 1482 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1483 if (ret) 1484 goto error_exit; 1485 1486 if (ecmd->duplex == DUPLEX_FULL) 1487 mdp->duplex = 1; 1488 else 1489 mdp->duplex = 0; 1490 1491 if (mdp->cd->set_duplex) 1492 mdp->cd->set_duplex(ndev); 1493 1494 error_exit: 1495 mdelay(1); 1496 1497 /* enable tx and rx */ 1498 sh_eth_rcv_snd_enable(ndev); 1499 1500 spin_unlock_irqrestore(&mdp->lock, flags); 1501 1502 return ret; 1503 } 1504 1505 static int sh_eth_nway_reset(struct net_device *ndev) 1506 { 1507 struct sh_eth_private *mdp = netdev_priv(ndev); 1508 unsigned long flags; 1509 int ret; 1510 1511 spin_lock_irqsave(&mdp->lock, flags); 1512 ret = phy_start_aneg(mdp->phydev); 1513 spin_unlock_irqrestore(&mdp->lock, flags); 1514 1515 return ret; 1516 } 1517 1518 static u32 sh_eth_get_msglevel(struct net_device *ndev) 1519 { 1520 struct sh_eth_private *mdp = netdev_priv(ndev); 1521 return mdp->msg_enable; 1522 } 1523 1524 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1525 { 1526 struct sh_eth_private *mdp = netdev_priv(ndev); 1527 mdp->msg_enable = value; 1528 } 1529 1530 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1531 "rx_current", "tx_current", 1532 "rx_dirty", "tx_dirty", 1533 }; 1534 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1535 1536 static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1537 { 1538 switch (sset) { 1539 case ETH_SS_STATS: 1540 return SH_ETH_STATS_LEN; 1541 default: 1542 return -EOPNOTSUPP; 1543 } 1544 } 1545 1546 static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1547 struct ethtool_stats *stats, u64 *data) 1548 { 1549 struct sh_eth_private *mdp = netdev_priv(ndev); 1550 int i = 0; 1551 1552 /* device-specific stats */ 1553 data[i++] = mdp->cur_rx; 1554 data[i++] = mdp->cur_tx; 1555 data[i++] = mdp->dirty_rx; 1556 data[i++] = mdp->dirty_tx; 1557 } 1558 1559 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1560 { 1561 switch (stringset) { 1562 case ETH_SS_STATS: 1563 memcpy(data, *sh_eth_gstrings_stats, 1564 sizeof(sh_eth_gstrings_stats)); 1565 break; 1566 } 1567 } 1568 1569 static void sh_eth_get_ringparam(struct net_device *ndev, 1570 struct ethtool_ringparam *ring) 1571 { 1572 struct sh_eth_private *mdp = netdev_priv(ndev); 1573 1574 ring->rx_max_pending = RX_RING_MAX; 1575 ring->tx_max_pending = TX_RING_MAX; 1576 ring->rx_pending = mdp->num_rx_ring; 1577 ring->tx_pending = mdp->num_tx_ring; 1578 } 1579 1580 static int sh_eth_set_ringparam(struct net_device *ndev, 1581 struct ethtool_ringparam *ring) 1582 { 1583 struct sh_eth_private *mdp = netdev_priv(ndev); 1584 int ret; 1585 1586 if (ring->tx_pending > TX_RING_MAX || 1587 ring->rx_pending > RX_RING_MAX || 1588 ring->tx_pending < TX_RING_MIN || 1589 ring->rx_pending < RX_RING_MIN) 1590 return -EINVAL; 1591 if (ring->rx_mini_pending || ring->rx_jumbo_pending) 1592 return -EINVAL; 1593 1594 if (netif_running(ndev)) { 1595 netif_tx_disable(ndev); 1596 /* Disable interrupts by clearing the interrupt mask. */ 1597 sh_eth_write(ndev, 0x0000, EESIPR); 1598 /* Stop the chip's Tx and Rx processes. */ 1599 sh_eth_write(ndev, 0, EDTRR); 1600 sh_eth_write(ndev, 0, EDRRR); 1601 synchronize_irq(ndev->irq); 1602 } 1603 1604 /* Free all the skbuffs in the Rx queue. */ 1605 sh_eth_ring_free(ndev); 1606 /* Free DMA buffer */ 1607 sh_eth_free_dma_buffer(mdp); 1608 1609 /* Set new parameters */ 1610 mdp->num_rx_ring = ring->rx_pending; 1611 mdp->num_tx_ring = ring->tx_pending; 1612 1613 ret = sh_eth_ring_init(ndev); 1614 if (ret < 0) { 1615 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); 1616 return ret; 1617 } 1618 ret = sh_eth_dev_init(ndev, false); 1619 if (ret < 0) { 1620 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); 1621 return ret; 1622 } 1623 1624 if (netif_running(ndev)) { 1625 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1626 /* Setting the Rx mode will start the Rx process. */ 1627 sh_eth_write(ndev, EDRRR_R, EDRRR); 1628 netif_wake_queue(ndev); 1629 } 1630 1631 return 0; 1632 } 1633 1634 static const struct ethtool_ops sh_eth_ethtool_ops = { 1635 .get_settings = sh_eth_get_settings, 1636 .set_settings = sh_eth_set_settings, 1637 .nway_reset = sh_eth_nway_reset, 1638 .get_msglevel = sh_eth_get_msglevel, 1639 .set_msglevel = sh_eth_set_msglevel, 1640 .get_link = ethtool_op_get_link, 1641 .get_strings = sh_eth_get_strings, 1642 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1643 .get_sset_count = sh_eth_get_sset_count, 1644 .get_ringparam = sh_eth_get_ringparam, 1645 .set_ringparam = sh_eth_set_ringparam, 1646 }; 1647 1648 /* network device open function */ 1649 static int sh_eth_open(struct net_device *ndev) 1650 { 1651 int ret = 0; 1652 struct sh_eth_private *mdp = netdev_priv(ndev); 1653 1654 pm_runtime_get_sync(&mdp->pdev->dev); 1655 1656 ret = request_irq(ndev->irq, sh_eth_interrupt, 1657 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1658 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1659 defined(CONFIG_CPU_SUBTYPE_SH7757) 1660 IRQF_SHARED, 1661 #else 1662 0, 1663 #endif 1664 ndev->name, ndev); 1665 if (ret) { 1666 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1667 return ret; 1668 } 1669 1670 /* Descriptor set */ 1671 ret = sh_eth_ring_init(ndev); 1672 if (ret) 1673 goto out_free_irq; 1674 1675 /* device init */ 1676 ret = sh_eth_dev_init(ndev, true); 1677 if (ret) 1678 goto out_free_irq; 1679 1680 /* PHY control start*/ 1681 ret = sh_eth_phy_start(ndev); 1682 if (ret) 1683 goto out_free_irq; 1684 1685 return ret; 1686 1687 out_free_irq: 1688 free_irq(ndev->irq, ndev); 1689 pm_runtime_put_sync(&mdp->pdev->dev); 1690 return ret; 1691 } 1692 1693 /* Timeout function */ 1694 static void sh_eth_tx_timeout(struct net_device *ndev) 1695 { 1696 struct sh_eth_private *mdp = netdev_priv(ndev); 1697 struct sh_eth_rxdesc *rxdesc; 1698 int i; 1699 1700 netif_stop_queue(ndev); 1701 1702 if (netif_msg_timer(mdp)) 1703 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," 1704 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); 1705 1706 /* tx_errors count up */ 1707 ndev->stats.tx_errors++; 1708 1709 /* Free all the skbuffs in the Rx queue. */ 1710 for (i = 0; i < mdp->num_rx_ring; i++) { 1711 rxdesc = &mdp->rx_ring[i]; 1712 rxdesc->status = 0; 1713 rxdesc->addr = 0xBADF00D0; 1714 if (mdp->rx_skbuff[i]) 1715 dev_kfree_skb(mdp->rx_skbuff[i]); 1716 mdp->rx_skbuff[i] = NULL; 1717 } 1718 for (i = 0; i < mdp->num_tx_ring; i++) { 1719 if (mdp->tx_skbuff[i]) 1720 dev_kfree_skb(mdp->tx_skbuff[i]); 1721 mdp->tx_skbuff[i] = NULL; 1722 } 1723 1724 /* device init */ 1725 sh_eth_dev_init(ndev, true); 1726 } 1727 1728 /* Packet transmit function */ 1729 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1730 { 1731 struct sh_eth_private *mdp = netdev_priv(ndev); 1732 struct sh_eth_txdesc *txdesc; 1733 u32 entry; 1734 unsigned long flags; 1735 1736 spin_lock_irqsave(&mdp->lock, flags); 1737 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 1738 if (!sh_eth_txfree(ndev)) { 1739 if (netif_msg_tx_queued(mdp)) 1740 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1741 netif_stop_queue(ndev); 1742 spin_unlock_irqrestore(&mdp->lock, flags); 1743 return NETDEV_TX_BUSY; 1744 } 1745 } 1746 spin_unlock_irqrestore(&mdp->lock, flags); 1747 1748 entry = mdp->cur_tx % mdp->num_tx_ring; 1749 mdp->tx_skbuff[entry] = skb; 1750 txdesc = &mdp->tx_ring[entry]; 1751 /* soft swap. */ 1752 if (!mdp->cd->hw_swap) 1753 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 1754 skb->len + 2); 1755 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 1756 DMA_TO_DEVICE); 1757 if (skb->len < ETHERSMALL) 1758 txdesc->buffer_length = ETHERSMALL; 1759 else 1760 txdesc->buffer_length = skb->len; 1761 1762 if (entry >= mdp->num_tx_ring - 1) 1763 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 1764 else 1765 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 1766 1767 mdp->cur_tx++; 1768 1769 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 1770 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1771 1772 return NETDEV_TX_OK; 1773 } 1774 1775 /* device close function */ 1776 static int sh_eth_close(struct net_device *ndev) 1777 { 1778 struct sh_eth_private *mdp = netdev_priv(ndev); 1779 1780 netif_stop_queue(ndev); 1781 1782 /* Disable interrupts by clearing the interrupt mask. */ 1783 sh_eth_write(ndev, 0x0000, EESIPR); 1784 1785 /* Stop the chip's Tx and Rx processes. */ 1786 sh_eth_write(ndev, 0, EDTRR); 1787 sh_eth_write(ndev, 0, EDRRR); 1788 1789 /* PHY Disconnect */ 1790 if (mdp->phydev) { 1791 phy_stop(mdp->phydev); 1792 phy_disconnect(mdp->phydev); 1793 } 1794 1795 free_irq(ndev->irq, ndev); 1796 1797 /* Free all the skbuffs in the Rx queue. */ 1798 sh_eth_ring_free(ndev); 1799 1800 /* free DMA buffer */ 1801 sh_eth_free_dma_buffer(mdp); 1802 1803 pm_runtime_put_sync(&mdp->pdev->dev); 1804 1805 return 0; 1806 } 1807 1808 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 1809 { 1810 struct sh_eth_private *mdp = netdev_priv(ndev); 1811 1812 pm_runtime_get_sync(&mdp->pdev->dev); 1813 1814 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 1815 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 1816 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 1817 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 1818 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 1819 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 1820 if (sh_eth_is_gether(mdp)) { 1821 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 1822 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 1823 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 1824 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 1825 } else { 1826 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 1827 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 1828 } 1829 pm_runtime_put_sync(&mdp->pdev->dev); 1830 1831 return &ndev->stats; 1832 } 1833 1834 /* ioctl to device function */ 1835 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 1836 int cmd) 1837 { 1838 struct sh_eth_private *mdp = netdev_priv(ndev); 1839 struct phy_device *phydev = mdp->phydev; 1840 1841 if (!netif_running(ndev)) 1842 return -EINVAL; 1843 1844 if (!phydev) 1845 return -ENODEV; 1846 1847 return phy_mii_ioctl(phydev, rq, cmd); 1848 } 1849 1850 #if defined(SH_ETH_HAS_TSU) 1851 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 1852 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 1853 int entry) 1854 { 1855 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 1856 } 1857 1858 static u32 sh_eth_tsu_get_post_mask(int entry) 1859 { 1860 return 0x0f << (28 - ((entry % 8) * 4)); 1861 } 1862 1863 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 1864 { 1865 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 1866 } 1867 1868 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 1869 int entry) 1870 { 1871 struct sh_eth_private *mdp = netdev_priv(ndev); 1872 u32 tmp; 1873 void *reg_offset; 1874 1875 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1876 tmp = ioread32(reg_offset); 1877 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 1878 } 1879 1880 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 1881 int entry) 1882 { 1883 struct sh_eth_private *mdp = netdev_priv(ndev); 1884 u32 post_mask, ref_mask, tmp; 1885 void *reg_offset; 1886 1887 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1888 post_mask = sh_eth_tsu_get_post_mask(entry); 1889 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 1890 1891 tmp = ioread32(reg_offset); 1892 iowrite32(tmp & ~post_mask, reg_offset); 1893 1894 /* If other port enables, the function returns "true" */ 1895 return tmp & ref_mask; 1896 } 1897 1898 static int sh_eth_tsu_busy(struct net_device *ndev) 1899 { 1900 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 1901 struct sh_eth_private *mdp = netdev_priv(ndev); 1902 1903 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 1904 udelay(10); 1905 timeout--; 1906 if (timeout <= 0) { 1907 dev_err(&ndev->dev, "%s: timeout\n", __func__); 1908 return -ETIMEDOUT; 1909 } 1910 } 1911 1912 return 0; 1913 } 1914 1915 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 1916 const u8 *addr) 1917 { 1918 u32 val; 1919 1920 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 1921 iowrite32(val, reg); 1922 if (sh_eth_tsu_busy(ndev) < 0) 1923 return -EBUSY; 1924 1925 val = addr[4] << 8 | addr[5]; 1926 iowrite32(val, reg + 4); 1927 if (sh_eth_tsu_busy(ndev) < 0) 1928 return -EBUSY; 1929 1930 return 0; 1931 } 1932 1933 static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 1934 { 1935 u32 val; 1936 1937 val = ioread32(reg); 1938 addr[0] = (val >> 24) & 0xff; 1939 addr[1] = (val >> 16) & 0xff; 1940 addr[2] = (val >> 8) & 0xff; 1941 addr[3] = val & 0xff; 1942 val = ioread32(reg + 4); 1943 addr[4] = (val >> 8) & 0xff; 1944 addr[5] = val & 0xff; 1945 } 1946 1947 1948 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 1949 { 1950 struct sh_eth_private *mdp = netdev_priv(ndev); 1951 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1952 int i; 1953 u8 c_addr[ETH_ALEN]; 1954 1955 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 1956 sh_eth_tsu_read_entry(reg_offset, c_addr); 1957 if (memcmp(addr, c_addr, ETH_ALEN) == 0) 1958 return i; 1959 } 1960 1961 return -ENOENT; 1962 } 1963 1964 static int sh_eth_tsu_find_empty(struct net_device *ndev) 1965 { 1966 u8 blank[ETH_ALEN]; 1967 int entry; 1968 1969 memset(blank, 0, sizeof(blank)); 1970 entry = sh_eth_tsu_find_entry(ndev, blank); 1971 return (entry < 0) ? -ENOMEM : entry; 1972 } 1973 1974 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 1975 int entry) 1976 { 1977 struct sh_eth_private *mdp = netdev_priv(ndev); 1978 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1979 int ret; 1980 u8 blank[ETH_ALEN]; 1981 1982 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 1983 ~(1 << (31 - entry)), TSU_TEN); 1984 1985 memset(blank, 0, sizeof(blank)); 1986 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 1987 if (ret < 0) 1988 return ret; 1989 return 0; 1990 } 1991 1992 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 1993 { 1994 struct sh_eth_private *mdp = netdev_priv(ndev); 1995 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1996 int i, ret; 1997 1998 if (!mdp->cd->tsu) 1999 return 0; 2000 2001 i = sh_eth_tsu_find_entry(ndev, addr); 2002 if (i < 0) { 2003 /* No entry found, create one */ 2004 i = sh_eth_tsu_find_empty(ndev); 2005 if (i < 0) 2006 return -ENOMEM; 2007 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 2008 if (ret < 0) 2009 return ret; 2010 2011 /* Enable the entry */ 2012 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 2013 (1 << (31 - i)), TSU_TEN); 2014 } 2015 2016 /* Entry found or created, enable POST */ 2017 sh_eth_tsu_enable_cam_entry_post(ndev, i); 2018 2019 return 0; 2020 } 2021 2022 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 2023 { 2024 struct sh_eth_private *mdp = netdev_priv(ndev); 2025 int i, ret; 2026 2027 if (!mdp->cd->tsu) 2028 return 0; 2029 2030 i = sh_eth_tsu_find_entry(ndev, addr); 2031 if (i) { 2032 /* Entry found */ 2033 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2034 goto done; 2035 2036 /* Disable the entry if both ports was disabled */ 2037 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2038 if (ret < 0) 2039 return ret; 2040 } 2041 done: 2042 return 0; 2043 } 2044 2045 static int sh_eth_tsu_purge_all(struct net_device *ndev) 2046 { 2047 struct sh_eth_private *mdp = netdev_priv(ndev); 2048 int i, ret; 2049 2050 if (unlikely(!mdp->cd->tsu)) 2051 return 0; 2052 2053 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 2054 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 2055 continue; 2056 2057 /* Disable the entry if both ports was disabled */ 2058 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 2059 if (ret < 0) 2060 return ret; 2061 } 2062 2063 return 0; 2064 } 2065 2066 static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 2067 { 2068 struct sh_eth_private *mdp = netdev_priv(ndev); 2069 u8 addr[ETH_ALEN]; 2070 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 2071 int i; 2072 2073 if (unlikely(!mdp->cd->tsu)) 2074 return; 2075 2076 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 2077 sh_eth_tsu_read_entry(reg_offset, addr); 2078 if (is_multicast_ether_addr(addr)) 2079 sh_eth_tsu_del_entry(ndev, addr); 2080 } 2081 } 2082 2083 /* Multicast reception directions set */ 2084 static void sh_eth_set_multicast_list(struct net_device *ndev) 2085 { 2086 struct sh_eth_private *mdp = netdev_priv(ndev); 2087 u32 ecmr_bits; 2088 int mcast_all = 0; 2089 unsigned long flags; 2090 2091 spin_lock_irqsave(&mdp->lock, flags); 2092 /* 2093 * Initial condition is MCT = 1, PRM = 0. 2094 * Depending on ndev->flags, set PRM or clear MCT 2095 */ 2096 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 2097 2098 if (!(ndev->flags & IFF_MULTICAST)) { 2099 sh_eth_tsu_purge_mcast(ndev); 2100 mcast_all = 1; 2101 } 2102 if (ndev->flags & IFF_ALLMULTI) { 2103 sh_eth_tsu_purge_mcast(ndev); 2104 ecmr_bits &= ~ECMR_MCT; 2105 mcast_all = 1; 2106 } 2107 2108 if (ndev->flags & IFF_PROMISC) { 2109 sh_eth_tsu_purge_all(ndev); 2110 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 2111 } else if (mdp->cd->tsu) { 2112 struct netdev_hw_addr *ha; 2113 netdev_for_each_mc_addr(ha, ndev) { 2114 if (mcast_all && is_multicast_ether_addr(ha->addr)) 2115 continue; 2116 2117 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 2118 if (!mcast_all) { 2119 sh_eth_tsu_purge_mcast(ndev); 2120 ecmr_bits &= ~ECMR_MCT; 2121 mcast_all = 1; 2122 } 2123 } 2124 } 2125 } else { 2126 /* Normal, unicast/broadcast-only mode. */ 2127 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 2128 } 2129 2130 /* update the ethernet mode */ 2131 sh_eth_write(ndev, ecmr_bits, ECMR); 2132 2133 spin_unlock_irqrestore(&mdp->lock, flags); 2134 } 2135 2136 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 2137 { 2138 if (!mdp->port) 2139 return TSU_VTAG0; 2140 else 2141 return TSU_VTAG1; 2142 } 2143 2144 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2145 { 2146 struct sh_eth_private *mdp = netdev_priv(ndev); 2147 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2148 2149 if (unlikely(!mdp->cd->tsu)) 2150 return -EPERM; 2151 2152 /* No filtering if vid = 0 */ 2153 if (!vid) 2154 return 0; 2155 2156 mdp->vlan_num_ids++; 2157 2158 /* 2159 * The controller has one VLAN tag HW filter. So, if the filter is 2160 * already enabled, the driver disables it and the filte 2161 */ 2162 if (mdp->vlan_num_ids > 1) { 2163 /* disable VLAN filter */ 2164 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2165 return 0; 2166 } 2167 2168 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 2169 vtag_reg_index); 2170 2171 return 0; 2172 } 2173 2174 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2175 { 2176 struct sh_eth_private *mdp = netdev_priv(ndev); 2177 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2178 2179 if (unlikely(!mdp->cd->tsu)) 2180 return -EPERM; 2181 2182 /* No filtering if vid = 0 */ 2183 if (!vid) 2184 return 0; 2185 2186 mdp->vlan_num_ids--; 2187 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2188 2189 return 0; 2190 } 2191 #endif /* SH_ETH_HAS_TSU */ 2192 2193 /* SuperH's TSU register init function */ 2194 static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2195 { 2196 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 2197 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 2198 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 2199 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 2200 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 2201 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 2202 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 2203 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 2204 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 2205 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 2206 if (sh_eth_is_gether(mdp)) { 2207 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 2208 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 2209 } else { 2210 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 2211 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 2212 } 2213 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 2214 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 2215 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2216 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 2217 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 2218 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 2219 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 2220 } 2221 2222 /* MDIO bus release function */ 2223 static int sh_mdio_release(struct net_device *ndev) 2224 { 2225 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2226 2227 /* unregister mdio bus */ 2228 mdiobus_unregister(bus); 2229 2230 /* remove mdio bus info from net_device */ 2231 dev_set_drvdata(&ndev->dev, NULL); 2232 2233 /* free interrupts memory */ 2234 kfree(bus->irq); 2235 2236 /* free bitbang info */ 2237 free_mdio_bitbang(bus); 2238 2239 return 0; 2240 } 2241 2242 /* MDIO bus init function */ 2243 static int sh_mdio_init(struct net_device *ndev, int id, 2244 struct sh_eth_plat_data *pd) 2245 { 2246 int ret, i; 2247 struct bb_info *bitbang; 2248 struct sh_eth_private *mdp = netdev_priv(ndev); 2249 2250 /* create bit control struct for PHY */ 2251 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 2252 if (!bitbang) { 2253 ret = -ENOMEM; 2254 goto out; 2255 } 2256 2257 /* bitbang init */ 2258 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2259 bitbang->set_gate = pd->set_mdio_gate; 2260 bitbang->mdi_msk = 0x08; 2261 bitbang->mdo_msk = 0x04; 2262 bitbang->mmd_msk = 0x02;/* MMD */ 2263 bitbang->mdc_msk = 0x01; 2264 bitbang->ctrl.ops = &bb_ops; 2265 2266 /* MII controller setting */ 2267 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2268 if (!mdp->mii_bus) { 2269 ret = -ENOMEM; 2270 goto out_free_bitbang; 2271 } 2272 2273 /* Hook up MII support for ethtool */ 2274 mdp->mii_bus->name = "sh_mii"; 2275 mdp->mii_bus->parent = &ndev->dev; 2276 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2277 mdp->pdev->name, id); 2278 2279 /* PHY IRQ */ 2280 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 2281 if (!mdp->mii_bus->irq) { 2282 ret = -ENOMEM; 2283 goto out_free_bus; 2284 } 2285 2286 for (i = 0; i < PHY_MAX_ADDR; i++) 2287 mdp->mii_bus->irq[i] = PHY_POLL; 2288 2289 /* register mdio bus */ 2290 ret = mdiobus_register(mdp->mii_bus); 2291 if (ret) 2292 goto out_free_irq; 2293 2294 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2295 2296 return 0; 2297 2298 out_free_irq: 2299 kfree(mdp->mii_bus->irq); 2300 2301 out_free_bus: 2302 free_mdio_bitbang(mdp->mii_bus); 2303 2304 out_free_bitbang: 2305 kfree(bitbang); 2306 2307 out: 2308 return ret; 2309 } 2310 2311 static const u16 *sh_eth_get_register_offset(int register_type) 2312 { 2313 const u16 *reg_offset = NULL; 2314 2315 switch (register_type) { 2316 case SH_ETH_REG_GIGABIT: 2317 reg_offset = sh_eth_offset_gigabit; 2318 break; 2319 case SH_ETH_REG_FAST_SH4: 2320 reg_offset = sh_eth_offset_fast_sh4; 2321 break; 2322 case SH_ETH_REG_FAST_SH3_SH2: 2323 reg_offset = sh_eth_offset_fast_sh3_sh2; 2324 break; 2325 default: 2326 printk(KERN_ERR "Unknown register type (%d)\n", register_type); 2327 break; 2328 } 2329 2330 return reg_offset; 2331 } 2332 2333 static const struct net_device_ops sh_eth_netdev_ops = { 2334 .ndo_open = sh_eth_open, 2335 .ndo_stop = sh_eth_close, 2336 .ndo_start_xmit = sh_eth_start_xmit, 2337 .ndo_get_stats = sh_eth_get_stats, 2338 #if defined(SH_ETH_HAS_TSU) 2339 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2340 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2341 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2342 #endif 2343 .ndo_tx_timeout = sh_eth_tx_timeout, 2344 .ndo_do_ioctl = sh_eth_do_ioctl, 2345 .ndo_validate_addr = eth_validate_addr, 2346 .ndo_set_mac_address = eth_mac_addr, 2347 .ndo_change_mtu = eth_change_mtu, 2348 }; 2349 2350 static int sh_eth_drv_probe(struct platform_device *pdev) 2351 { 2352 int ret, devno = 0; 2353 struct resource *res; 2354 struct net_device *ndev = NULL; 2355 struct sh_eth_private *mdp = NULL; 2356 struct sh_eth_plat_data *pd; 2357 2358 /* get base addr */ 2359 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2360 if (unlikely(res == NULL)) { 2361 dev_err(&pdev->dev, "invalid resource\n"); 2362 ret = -EINVAL; 2363 goto out; 2364 } 2365 2366 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2367 if (!ndev) { 2368 ret = -ENOMEM; 2369 goto out; 2370 } 2371 2372 /* The sh Ether-specific entries in the device structure. */ 2373 ndev->base_addr = res->start; 2374 devno = pdev->id; 2375 if (devno < 0) 2376 devno = 0; 2377 2378 ndev->dma = -1; 2379 ret = platform_get_irq(pdev, 0); 2380 if (ret < 0) { 2381 ret = -ENODEV; 2382 goto out_release; 2383 } 2384 ndev->irq = ret; 2385 2386 SET_NETDEV_DEV(ndev, &pdev->dev); 2387 2388 /* Fill in the fields of the device structure with ethernet values. */ 2389 ether_setup(ndev); 2390 2391 mdp = netdev_priv(ndev); 2392 mdp->num_tx_ring = TX_RING_SIZE; 2393 mdp->num_rx_ring = RX_RING_SIZE; 2394 mdp->addr = ioremap(res->start, resource_size(res)); 2395 if (mdp->addr == NULL) { 2396 ret = -ENOMEM; 2397 dev_err(&pdev->dev, "ioremap failed.\n"); 2398 goto out_release; 2399 } 2400 2401 spin_lock_init(&mdp->lock); 2402 mdp->pdev = pdev; 2403 pm_runtime_enable(&pdev->dev); 2404 pm_runtime_resume(&pdev->dev); 2405 2406 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 2407 /* get PHY ID */ 2408 mdp->phy_id = pd->phy; 2409 mdp->phy_interface = pd->phy_interface; 2410 /* EDMAC endian */ 2411 mdp->edmac_endian = pd->edmac_endian; 2412 mdp->no_ether_link = pd->no_ether_link; 2413 mdp->ether_link_active_low = pd->ether_link_active_low; 2414 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); 2415 2416 /* set cpu data */ 2417 #if defined(SH_ETH_HAS_BOTH_MODULES) 2418 mdp->cd = sh_eth_get_cpu_data(mdp); 2419 #else 2420 mdp->cd = &sh_eth_my_cpu_data; 2421 #endif 2422 sh_eth_set_default_cpu_data(mdp->cd); 2423 2424 /* set function */ 2425 ndev->netdev_ops = &sh_eth_netdev_ops; 2426 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2427 ndev->watchdog_timeo = TX_TIMEOUT; 2428 2429 /* debug message level */ 2430 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2431 2432 /* read and set MAC address */ 2433 read_mac_address(ndev, pd->mac_addr); 2434 2435 /* ioremap the TSU registers */ 2436 if (mdp->cd->tsu) { 2437 struct resource *rtsu; 2438 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2439 if (!rtsu) { 2440 dev_err(&pdev->dev, "Not found TSU resource\n"); 2441 ret = -ENODEV; 2442 goto out_release; 2443 } 2444 mdp->tsu_addr = ioremap(rtsu->start, 2445 resource_size(rtsu)); 2446 mdp->port = devno % 2; 2447 ndev->features = NETIF_F_HW_VLAN_FILTER; 2448 } 2449 2450 /* initialize first or needed device */ 2451 if (!devno || pd->needs_init) { 2452 if (mdp->cd->chip_reset) 2453 mdp->cd->chip_reset(ndev); 2454 2455 if (mdp->cd->tsu) { 2456 /* TSU init (Init only)*/ 2457 sh_eth_tsu_init(mdp); 2458 } 2459 } 2460 2461 /* network device register */ 2462 ret = register_netdev(ndev); 2463 if (ret) 2464 goto out_release; 2465 2466 /* mdio bus init */ 2467 ret = sh_mdio_init(ndev, pdev->id, pd); 2468 if (ret) 2469 goto out_unregister; 2470 2471 /* print device information */ 2472 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2473 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2474 2475 platform_set_drvdata(pdev, ndev); 2476 2477 return ret; 2478 2479 out_unregister: 2480 unregister_netdev(ndev); 2481 2482 out_release: 2483 /* net_dev free */ 2484 if (mdp && mdp->addr) 2485 iounmap(mdp->addr); 2486 if (mdp && mdp->tsu_addr) 2487 iounmap(mdp->tsu_addr); 2488 if (ndev) 2489 free_netdev(ndev); 2490 2491 out: 2492 return ret; 2493 } 2494 2495 static int sh_eth_drv_remove(struct platform_device *pdev) 2496 { 2497 struct net_device *ndev = platform_get_drvdata(pdev); 2498 struct sh_eth_private *mdp = netdev_priv(ndev); 2499 2500 if (mdp->cd->tsu) 2501 iounmap(mdp->tsu_addr); 2502 sh_mdio_release(ndev); 2503 unregister_netdev(ndev); 2504 pm_runtime_disable(&pdev->dev); 2505 iounmap(mdp->addr); 2506 free_netdev(ndev); 2507 platform_set_drvdata(pdev, NULL); 2508 2509 return 0; 2510 } 2511 2512 static int sh_eth_runtime_nop(struct device *dev) 2513 { 2514 /* 2515 * Runtime PM callback shared between ->runtime_suspend() 2516 * and ->runtime_resume(). Simply returns success. 2517 * 2518 * This driver re-initializes all registers after 2519 * pm_runtime_get_sync() anyway so there is no need 2520 * to save and restore registers here. 2521 */ 2522 return 0; 2523 } 2524 2525 static struct dev_pm_ops sh_eth_dev_pm_ops = { 2526 .runtime_suspend = sh_eth_runtime_nop, 2527 .runtime_resume = sh_eth_runtime_nop, 2528 }; 2529 2530 static struct platform_driver sh_eth_driver = { 2531 .probe = sh_eth_drv_probe, 2532 .remove = sh_eth_drv_remove, 2533 .driver = { 2534 .name = CARDNAME, 2535 .pm = &sh_eth_dev_pm_ops, 2536 }, 2537 }; 2538 2539 module_platform_driver(sh_eth_driver); 2540 2541 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2542 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2543 MODULE_LICENSE("GPL v2"); 2544