1 /* 2 * SuperH Ethernet device driver 3 * 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 5 * Copyright (C) 2008-2012 Renesas Solutions Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 */ 22 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/spinlock.h> 27 #include <linux/interrupt.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/etherdevice.h> 30 #include <linux/delay.h> 31 #include <linux/platform_device.h> 32 #include <linux/mdio-bitbang.h> 33 #include <linux/netdevice.h> 34 #include <linux/phy.h> 35 #include <linux/cache.h> 36 #include <linux/io.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/slab.h> 39 #include <linux/ethtool.h> 40 #include <linux/if_vlan.h> 41 #include <linux/clk.h> 42 #include <linux/sh_eth.h> 43 44 #include "sh_eth.h" 45 46 #define SH_ETH_DEF_MSG_ENABLE \ 47 (NETIF_MSG_LINK | \ 48 NETIF_MSG_TIMER | \ 49 NETIF_MSG_RX_ERR| \ 50 NETIF_MSG_TX_ERR) 51 52 /* There is CPU dependent code */ 53 #if defined(CONFIG_CPU_SUBTYPE_SH7724) 54 #define SH_ETH_RESET_DEFAULT 1 55 static void sh_eth_set_duplex(struct net_device *ndev) 56 { 57 struct sh_eth_private *mdp = netdev_priv(ndev); 58 59 if (mdp->duplex) /* Full */ 60 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 61 else /* Half */ 62 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 63 } 64 65 static void sh_eth_set_rate(struct net_device *ndev) 66 { 67 struct sh_eth_private *mdp = netdev_priv(ndev); 68 69 switch (mdp->speed) { 70 case 10: /* 10BASE */ 71 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 72 break; 73 case 100:/* 100BASE */ 74 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 75 break; 76 default: 77 break; 78 } 79 } 80 81 /* SH7724 */ 82 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 83 .set_duplex = sh_eth_set_duplex, 84 .set_rate = sh_eth_set_rate, 85 86 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 87 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 88 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 89 90 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 91 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 92 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 93 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 94 95 .apr = 1, 96 .mpr = 1, 97 .tpauser = 1, 98 .hw_swap = 1, 99 .rpadir = 1, 100 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 101 }; 102 #elif defined(CONFIG_CPU_SUBTYPE_SH7757) 103 #define SH_ETH_HAS_BOTH_MODULES 1 104 #define SH_ETH_HAS_TSU 1 105 static void sh_eth_set_duplex(struct net_device *ndev) 106 { 107 struct sh_eth_private *mdp = netdev_priv(ndev); 108 109 if (mdp->duplex) /* Full */ 110 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 111 else /* Half */ 112 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 113 } 114 115 static void sh_eth_set_rate(struct net_device *ndev) 116 { 117 struct sh_eth_private *mdp = netdev_priv(ndev); 118 119 switch (mdp->speed) { 120 case 10: /* 10BASE */ 121 sh_eth_write(ndev, 0, RTRATE); 122 break; 123 case 100:/* 100BASE */ 124 sh_eth_write(ndev, 1, RTRATE); 125 break; 126 default: 127 break; 128 } 129 } 130 131 /* SH7757 */ 132 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 133 .set_duplex = sh_eth_set_duplex, 134 .set_rate = sh_eth_set_rate, 135 136 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 137 .rmcr_value = 0x00000001, 138 139 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 140 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 141 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 142 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 143 144 .apr = 1, 145 .mpr = 1, 146 .tpauser = 1, 147 .hw_swap = 1, 148 .no_ade = 1, 149 .rpadir = 1, 150 .rpadir_value = 2 << 16, 151 }; 152 153 #define SH_GIGA_ETH_BASE 0xfee00000 154 #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 155 #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 156 static void sh_eth_chip_reset_giga(struct net_device *ndev) 157 { 158 int i; 159 unsigned long mahr[2], malr[2]; 160 161 /* save MAHR and MALR */ 162 for (i = 0; i < 2; i++) { 163 malr[i] = ioread32((void *)GIGA_MALR(i)); 164 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 165 } 166 167 /* reset device */ 168 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 169 mdelay(1); 170 171 /* restore MAHR and MALR */ 172 for (i = 0; i < 2; i++) { 173 iowrite32(malr[i], (void *)GIGA_MALR(i)); 174 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 175 } 176 } 177 178 static int sh_eth_is_gether(struct sh_eth_private *mdp); 179 static void sh_eth_reset(struct net_device *ndev) 180 { 181 struct sh_eth_private *mdp = netdev_priv(ndev); 182 int cnt = 100; 183 184 if (sh_eth_is_gether(mdp)) { 185 sh_eth_write(ndev, 0x03, EDSR); 186 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 187 EDMR); 188 while (cnt > 0) { 189 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 190 break; 191 mdelay(1); 192 cnt--; 193 } 194 if (cnt < 0) 195 printk(KERN_ERR "Device reset fail\n"); 196 197 /* Table Init */ 198 sh_eth_write(ndev, 0x0, TDLAR); 199 sh_eth_write(ndev, 0x0, TDFAR); 200 sh_eth_write(ndev, 0x0, TDFXR); 201 sh_eth_write(ndev, 0x0, TDFFR); 202 sh_eth_write(ndev, 0x0, RDLAR); 203 sh_eth_write(ndev, 0x0, RDFAR); 204 sh_eth_write(ndev, 0x0, RDFXR); 205 sh_eth_write(ndev, 0x0, RDFFR); 206 } else { 207 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 208 EDMR); 209 mdelay(3); 210 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 211 EDMR); 212 } 213 } 214 215 static void sh_eth_set_duplex_giga(struct net_device *ndev) 216 { 217 struct sh_eth_private *mdp = netdev_priv(ndev); 218 219 if (mdp->duplex) /* Full */ 220 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 221 else /* Half */ 222 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 223 } 224 225 static void sh_eth_set_rate_giga(struct net_device *ndev) 226 { 227 struct sh_eth_private *mdp = netdev_priv(ndev); 228 229 switch (mdp->speed) { 230 case 10: /* 10BASE */ 231 sh_eth_write(ndev, 0x00000000, GECMR); 232 break; 233 case 100:/* 100BASE */ 234 sh_eth_write(ndev, 0x00000010, GECMR); 235 break; 236 case 1000: /* 1000BASE */ 237 sh_eth_write(ndev, 0x00000020, GECMR); 238 break; 239 default: 240 break; 241 } 242 } 243 244 /* SH7757(GETHERC) */ 245 static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { 246 .chip_reset = sh_eth_chip_reset_giga, 247 .set_duplex = sh_eth_set_duplex_giga, 248 .set_rate = sh_eth_set_rate_giga, 249 250 .ecsr_value = ECSR_ICD | ECSR_MPD, 251 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 252 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 253 254 .tx_check = EESR_TC1 | EESR_FTC, 255 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 256 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 257 EESR_ECI, 258 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 259 EESR_TFE, 260 .fdr_value = 0x0000072f, 261 .rmcr_value = 0x00000001, 262 263 .apr = 1, 264 .mpr = 1, 265 .tpauser = 1, 266 .bculr = 1, 267 .hw_swap = 1, 268 .rpadir = 1, 269 .rpadir_value = 2 << 16, 270 .no_trimd = 1, 271 .no_ade = 1, 272 .tsu = 1, 273 }; 274 275 static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) 276 { 277 if (sh_eth_is_gether(mdp)) 278 return &sh_eth_my_cpu_data_giga; 279 else 280 return &sh_eth_my_cpu_data; 281 } 282 283 #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 284 #define SH_ETH_HAS_TSU 1 285 static void sh_eth_reset_hw_crc(struct net_device *ndev); 286 static void sh_eth_chip_reset(struct net_device *ndev) 287 { 288 struct sh_eth_private *mdp = netdev_priv(ndev); 289 290 /* reset device */ 291 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 292 mdelay(1); 293 } 294 295 static void sh_eth_reset(struct net_device *ndev) 296 { 297 int cnt = 100; 298 299 sh_eth_write(ndev, EDSR_ENALL, EDSR); 300 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 301 while (cnt > 0) { 302 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 303 break; 304 mdelay(1); 305 cnt--; 306 } 307 if (cnt == 0) 308 printk(KERN_ERR "Device reset fail\n"); 309 310 /* Table Init */ 311 sh_eth_write(ndev, 0x0, TDLAR); 312 sh_eth_write(ndev, 0x0, TDFAR); 313 sh_eth_write(ndev, 0x0, TDFXR); 314 sh_eth_write(ndev, 0x0, TDFFR); 315 sh_eth_write(ndev, 0x0, RDLAR); 316 sh_eth_write(ndev, 0x0, RDFAR); 317 sh_eth_write(ndev, 0x0, RDFXR); 318 sh_eth_write(ndev, 0x0, RDFFR); 319 320 /* Reset HW CRC register */ 321 sh_eth_reset_hw_crc(ndev); 322 } 323 324 static void sh_eth_set_duplex(struct net_device *ndev) 325 { 326 struct sh_eth_private *mdp = netdev_priv(ndev); 327 328 if (mdp->duplex) /* Full */ 329 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 330 else /* Half */ 331 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 332 } 333 334 static void sh_eth_set_rate(struct net_device *ndev) 335 { 336 struct sh_eth_private *mdp = netdev_priv(ndev); 337 338 switch (mdp->speed) { 339 case 10: /* 10BASE */ 340 sh_eth_write(ndev, GECMR_10, GECMR); 341 break; 342 case 100:/* 100BASE */ 343 sh_eth_write(ndev, GECMR_100, GECMR); 344 break; 345 case 1000: /* 1000BASE */ 346 sh_eth_write(ndev, GECMR_1000, GECMR); 347 break; 348 default: 349 break; 350 } 351 } 352 353 /* sh7763 */ 354 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 355 .chip_reset = sh_eth_chip_reset, 356 .set_duplex = sh_eth_set_duplex, 357 .set_rate = sh_eth_set_rate, 358 359 .ecsr_value = ECSR_ICD | ECSR_MPD, 360 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 361 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 362 363 .tx_check = EESR_TC1 | EESR_FTC, 364 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 365 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 366 EESR_ECI, 367 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 368 EESR_TFE, 369 370 .apr = 1, 371 .mpr = 1, 372 .tpauser = 1, 373 .bculr = 1, 374 .hw_swap = 1, 375 .no_trimd = 1, 376 .no_ade = 1, 377 .tsu = 1, 378 #if defined(CONFIG_CPU_SUBTYPE_SH7734) 379 .hw_crc = 1, 380 #endif 381 }; 382 383 static void sh_eth_reset_hw_crc(struct net_device *ndev) 384 { 385 if (sh_eth_my_cpu_data.hw_crc) 386 sh_eth_write(ndev, 0x0, CSMR); 387 } 388 389 #elif defined(CONFIG_ARCH_R8A7740) 390 #define SH_ETH_HAS_TSU 1 391 static void sh_eth_chip_reset(struct net_device *ndev) 392 { 393 struct sh_eth_private *mdp = netdev_priv(ndev); 394 unsigned long mii; 395 396 /* reset device */ 397 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 398 mdelay(1); 399 400 switch (mdp->phy_interface) { 401 case PHY_INTERFACE_MODE_GMII: 402 mii = 2; 403 break; 404 case PHY_INTERFACE_MODE_MII: 405 mii = 1; 406 break; 407 case PHY_INTERFACE_MODE_RMII: 408 default: 409 mii = 0; 410 break; 411 } 412 sh_eth_write(ndev, mii, RMII_MII); 413 } 414 415 static void sh_eth_reset(struct net_device *ndev) 416 { 417 int cnt = 100; 418 419 sh_eth_write(ndev, EDSR_ENALL, EDSR); 420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 421 while (cnt > 0) { 422 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 423 break; 424 mdelay(1); 425 cnt--; 426 } 427 if (cnt == 0) 428 printk(KERN_ERR "Device reset fail\n"); 429 430 /* Table Init */ 431 sh_eth_write(ndev, 0x0, TDLAR); 432 sh_eth_write(ndev, 0x0, TDFAR); 433 sh_eth_write(ndev, 0x0, TDFXR); 434 sh_eth_write(ndev, 0x0, TDFFR); 435 sh_eth_write(ndev, 0x0, RDLAR); 436 sh_eth_write(ndev, 0x0, RDFAR); 437 sh_eth_write(ndev, 0x0, RDFXR); 438 sh_eth_write(ndev, 0x0, RDFFR); 439 } 440 441 static void sh_eth_set_duplex(struct net_device *ndev) 442 { 443 struct sh_eth_private *mdp = netdev_priv(ndev); 444 445 if (mdp->duplex) /* Full */ 446 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 447 else /* Half */ 448 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 449 } 450 451 static void sh_eth_set_rate(struct net_device *ndev) 452 { 453 struct sh_eth_private *mdp = netdev_priv(ndev); 454 455 switch (mdp->speed) { 456 case 10: /* 10BASE */ 457 sh_eth_write(ndev, GECMR_10, GECMR); 458 break; 459 case 100:/* 100BASE */ 460 sh_eth_write(ndev, GECMR_100, GECMR); 461 break; 462 case 1000: /* 1000BASE */ 463 sh_eth_write(ndev, GECMR_1000, GECMR); 464 break; 465 default: 466 break; 467 } 468 } 469 470 /* R8A7740 */ 471 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 472 .chip_reset = sh_eth_chip_reset, 473 .set_duplex = sh_eth_set_duplex, 474 .set_rate = sh_eth_set_rate, 475 476 .ecsr_value = ECSR_ICD | ECSR_MPD, 477 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 478 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 479 480 .tx_check = EESR_TC1 | EESR_FTC, 481 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 482 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 483 EESR_ECI, 484 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 485 EESR_TFE, 486 487 .apr = 1, 488 .mpr = 1, 489 .tpauser = 1, 490 .bculr = 1, 491 .hw_swap = 1, 492 .no_trimd = 1, 493 .no_ade = 1, 494 .tsu = 1, 495 }; 496 497 #elif defined(CONFIG_CPU_SUBTYPE_SH7619) 498 #define SH_ETH_RESET_DEFAULT 1 499 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 500 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 501 502 .apr = 1, 503 .mpr = 1, 504 .tpauser = 1, 505 .hw_swap = 1, 506 }; 507 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 508 #define SH_ETH_RESET_DEFAULT 1 509 #define SH_ETH_HAS_TSU 1 510 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 511 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 512 .tsu = 1, 513 }; 514 #endif 515 516 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 517 { 518 if (!cd->ecsr_value) 519 cd->ecsr_value = DEFAULT_ECSR_INIT; 520 521 if (!cd->ecsipr_value) 522 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 523 524 if (!cd->fcftr_value) 525 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ 526 DEFAULT_FIFO_F_D_RFD; 527 528 if (!cd->fdr_value) 529 cd->fdr_value = DEFAULT_FDR_INIT; 530 531 if (!cd->rmcr_value) 532 cd->rmcr_value = DEFAULT_RMCR_VALUE; 533 534 if (!cd->tx_check) 535 cd->tx_check = DEFAULT_TX_CHECK; 536 537 if (!cd->eesr_err_check) 538 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 539 540 if (!cd->tx_error_check) 541 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; 542 } 543 544 #if defined(SH_ETH_RESET_DEFAULT) 545 /* Chip Reset */ 546 static void sh_eth_reset(struct net_device *ndev) 547 { 548 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 549 mdelay(3); 550 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 551 } 552 #endif 553 554 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 555 static void sh_eth_set_receive_align(struct sk_buff *skb) 556 { 557 int reserve; 558 559 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); 560 if (reserve) 561 skb_reserve(skb, reserve); 562 } 563 #else 564 static void sh_eth_set_receive_align(struct sk_buff *skb) 565 { 566 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); 567 } 568 #endif 569 570 571 /* CPU <-> EDMAC endian convert */ 572 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 573 { 574 switch (mdp->edmac_endian) { 575 case EDMAC_LITTLE_ENDIAN: 576 return cpu_to_le32(x); 577 case EDMAC_BIG_ENDIAN: 578 return cpu_to_be32(x); 579 } 580 return x; 581 } 582 583 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 584 { 585 switch (mdp->edmac_endian) { 586 case EDMAC_LITTLE_ENDIAN: 587 return le32_to_cpu(x); 588 case EDMAC_BIG_ENDIAN: 589 return be32_to_cpu(x); 590 } 591 return x; 592 } 593 594 /* 595 * Program the hardware MAC address from dev->dev_addr. 596 */ 597 static void update_mac_address(struct net_device *ndev) 598 { 599 sh_eth_write(ndev, 600 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 601 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 602 sh_eth_write(ndev, 603 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 604 } 605 606 /* 607 * Get MAC address from SuperH MAC address register 608 * 609 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 610 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 611 * When you want use this device, you must set MAC address in bootloader. 612 * 613 */ 614 static void read_mac_address(struct net_device *ndev, unsigned char *mac) 615 { 616 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 617 memcpy(ndev->dev_addr, mac, 6); 618 } else { 619 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 620 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 621 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 622 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 623 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 624 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 625 } 626 } 627 628 static int sh_eth_is_gether(struct sh_eth_private *mdp) 629 { 630 if (mdp->reg_offset == sh_eth_offset_gigabit) 631 return 1; 632 else 633 return 0; 634 } 635 636 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 637 { 638 if (sh_eth_is_gether(mdp)) 639 return EDTRR_TRNS_GETHER; 640 else 641 return EDTRR_TRNS_ETHER; 642 } 643 644 struct bb_info { 645 void (*set_gate)(void *addr); 646 struct mdiobb_ctrl ctrl; 647 void *addr; 648 u32 mmd_msk;/* MMD */ 649 u32 mdo_msk; 650 u32 mdi_msk; 651 u32 mdc_msk; 652 }; 653 654 /* PHY bit set */ 655 static void bb_set(void *addr, u32 msk) 656 { 657 iowrite32(ioread32(addr) | msk, addr); 658 } 659 660 /* PHY bit clear */ 661 static void bb_clr(void *addr, u32 msk) 662 { 663 iowrite32((ioread32(addr) & ~msk), addr); 664 } 665 666 /* PHY bit read */ 667 static int bb_read(void *addr, u32 msk) 668 { 669 return (ioread32(addr) & msk) != 0; 670 } 671 672 /* Data I/O pin control */ 673 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 674 { 675 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 676 677 if (bitbang->set_gate) 678 bitbang->set_gate(bitbang->addr); 679 680 if (bit) 681 bb_set(bitbang->addr, bitbang->mmd_msk); 682 else 683 bb_clr(bitbang->addr, bitbang->mmd_msk); 684 } 685 686 /* Set bit data*/ 687 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 688 { 689 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 690 691 if (bitbang->set_gate) 692 bitbang->set_gate(bitbang->addr); 693 694 if (bit) 695 bb_set(bitbang->addr, bitbang->mdo_msk); 696 else 697 bb_clr(bitbang->addr, bitbang->mdo_msk); 698 } 699 700 /* Get bit data*/ 701 static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 702 { 703 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 704 705 if (bitbang->set_gate) 706 bitbang->set_gate(bitbang->addr); 707 708 return bb_read(bitbang->addr, bitbang->mdi_msk); 709 } 710 711 /* MDC pin control */ 712 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 713 { 714 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 715 716 if (bitbang->set_gate) 717 bitbang->set_gate(bitbang->addr); 718 719 if (bit) 720 bb_set(bitbang->addr, bitbang->mdc_msk); 721 else 722 bb_clr(bitbang->addr, bitbang->mdc_msk); 723 } 724 725 /* mdio bus control struct */ 726 static struct mdiobb_ops bb_ops = { 727 .owner = THIS_MODULE, 728 .set_mdc = sh_mdc_ctrl, 729 .set_mdio_dir = sh_mmd_ctrl, 730 .set_mdio_data = sh_set_mdio, 731 .get_mdio_data = sh_get_mdio, 732 }; 733 734 /* free skb and descriptor buffer */ 735 static void sh_eth_ring_free(struct net_device *ndev) 736 { 737 struct sh_eth_private *mdp = netdev_priv(ndev); 738 int i; 739 740 /* Free Rx skb ringbuffer */ 741 if (mdp->rx_skbuff) { 742 for (i = 0; i < RX_RING_SIZE; i++) { 743 if (mdp->rx_skbuff[i]) 744 dev_kfree_skb(mdp->rx_skbuff[i]); 745 } 746 } 747 kfree(mdp->rx_skbuff); 748 749 /* Free Tx skb ringbuffer */ 750 if (mdp->tx_skbuff) { 751 for (i = 0; i < TX_RING_SIZE; i++) { 752 if (mdp->tx_skbuff[i]) 753 dev_kfree_skb(mdp->tx_skbuff[i]); 754 } 755 } 756 kfree(mdp->tx_skbuff); 757 } 758 759 /* format skb and descriptor buffer */ 760 static void sh_eth_ring_format(struct net_device *ndev) 761 { 762 struct sh_eth_private *mdp = netdev_priv(ndev); 763 int i; 764 struct sk_buff *skb; 765 struct sh_eth_rxdesc *rxdesc = NULL; 766 struct sh_eth_txdesc *txdesc = NULL; 767 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; 768 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; 769 770 mdp->cur_rx = mdp->cur_tx = 0; 771 mdp->dirty_rx = mdp->dirty_tx = 0; 772 773 memset(mdp->rx_ring, 0, rx_ringsize); 774 775 /* build Rx ring buffer */ 776 for (i = 0; i < RX_RING_SIZE; i++) { 777 /* skb */ 778 mdp->rx_skbuff[i] = NULL; 779 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 780 mdp->rx_skbuff[i] = skb; 781 if (skb == NULL) 782 break; 783 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 784 DMA_FROM_DEVICE); 785 sh_eth_set_receive_align(skb); 786 787 /* RX descriptor */ 788 rxdesc = &mdp->rx_ring[i]; 789 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 790 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 791 792 /* The size of the buffer is 16 byte boundary. */ 793 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 794 /* Rx descriptor address set */ 795 if (i == 0) { 796 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 797 if (sh_eth_is_gether(mdp)) 798 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 799 } 800 } 801 802 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 803 804 /* Mark the last entry as wrapping the ring. */ 805 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 806 807 memset(mdp->tx_ring, 0, tx_ringsize); 808 809 /* build Tx ring buffer */ 810 for (i = 0; i < TX_RING_SIZE; i++) { 811 mdp->tx_skbuff[i] = NULL; 812 txdesc = &mdp->tx_ring[i]; 813 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 814 txdesc->buffer_length = 0; 815 if (i == 0) { 816 /* Tx descriptor address set */ 817 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 818 if (sh_eth_is_gether(mdp)) 819 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 820 } 821 } 822 823 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 824 } 825 826 /* Get skb and descriptor buffer */ 827 static int sh_eth_ring_init(struct net_device *ndev) 828 { 829 struct sh_eth_private *mdp = netdev_priv(ndev); 830 int rx_ringsize, tx_ringsize, ret = 0; 831 832 /* 833 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 834 * card needs room to do 8 byte alignment, +2 so we can reserve 835 * the first 2 bytes, and +16 gets room for the status word from the 836 * card. 837 */ 838 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 839 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 840 if (mdp->cd->rpadir) 841 mdp->rx_buf_sz += NET_IP_ALIGN; 842 843 /* Allocate RX and TX skb rings */ 844 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 845 GFP_KERNEL); 846 if (!mdp->rx_skbuff) { 847 dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); 848 ret = -ENOMEM; 849 return ret; 850 } 851 852 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, 853 GFP_KERNEL); 854 if (!mdp->tx_skbuff) { 855 dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); 856 ret = -ENOMEM; 857 goto skb_ring_free; 858 } 859 860 /* Allocate all Rx descriptors. */ 861 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 862 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 863 GFP_KERNEL); 864 865 if (!mdp->rx_ring) { 866 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", 867 rx_ringsize); 868 ret = -ENOMEM; 869 goto desc_ring_free; 870 } 871 872 mdp->dirty_rx = 0; 873 874 /* Allocate all Tx descriptors. */ 875 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 876 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 877 GFP_KERNEL); 878 if (!mdp->tx_ring) { 879 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", 880 tx_ringsize); 881 ret = -ENOMEM; 882 goto desc_ring_free; 883 } 884 return ret; 885 886 desc_ring_free: 887 /* free DMA buffer */ 888 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 889 890 skb_ring_free: 891 /* Free Rx and Tx skb ring buffer */ 892 sh_eth_ring_free(ndev); 893 894 return ret; 895 } 896 897 static int sh_eth_dev_init(struct net_device *ndev) 898 { 899 int ret = 0; 900 struct sh_eth_private *mdp = netdev_priv(ndev); 901 u_int32_t rx_int_var, tx_int_var; 902 u32 val; 903 904 /* Soft Reset */ 905 sh_eth_reset(ndev); 906 907 /* Descriptor format */ 908 sh_eth_ring_format(ndev); 909 if (mdp->cd->rpadir) 910 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 911 912 /* all sh_eth int mask */ 913 sh_eth_write(ndev, 0, EESIPR); 914 915 #if defined(__LITTLE_ENDIAN) 916 if (mdp->cd->hw_swap) 917 sh_eth_write(ndev, EDMR_EL, EDMR); 918 else 919 #endif 920 sh_eth_write(ndev, 0, EDMR); 921 922 /* FIFO size set */ 923 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 924 sh_eth_write(ndev, 0, TFTR); 925 926 /* Frame recv control */ 927 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 928 929 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 930 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 931 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); 932 933 if (mdp->cd->bculr) 934 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 935 936 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 937 938 if (!mdp->cd->no_trimd) 939 sh_eth_write(ndev, 0, TRIMD); 940 941 /* Recv frame limit set register */ 942 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 943 RFLR); 944 945 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 946 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 947 948 /* PAUSE Prohibition */ 949 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 950 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 951 952 sh_eth_write(ndev, val, ECMR); 953 954 if (mdp->cd->set_rate) 955 mdp->cd->set_rate(ndev); 956 957 /* E-MAC Status Register clear */ 958 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 959 960 /* E-MAC Interrupt Enable register */ 961 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 962 963 /* Set MAC address */ 964 update_mac_address(ndev); 965 966 /* mask reset */ 967 if (mdp->cd->apr) 968 sh_eth_write(ndev, APR_AP, APR); 969 if (mdp->cd->mpr) 970 sh_eth_write(ndev, MPR_MP, MPR); 971 if (mdp->cd->tpauser) 972 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 973 974 /* Setting the Rx mode will start the Rx process. */ 975 sh_eth_write(ndev, EDRRR_R, EDRRR); 976 977 netif_start_queue(ndev); 978 979 return ret; 980 } 981 982 /* free Tx skb function */ 983 static int sh_eth_txfree(struct net_device *ndev) 984 { 985 struct sh_eth_private *mdp = netdev_priv(ndev); 986 struct sh_eth_txdesc *txdesc; 987 int freeNum = 0; 988 int entry = 0; 989 990 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 991 entry = mdp->dirty_tx % TX_RING_SIZE; 992 txdesc = &mdp->tx_ring[entry]; 993 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 994 break; 995 /* Free the original skb. */ 996 if (mdp->tx_skbuff[entry]) { 997 dma_unmap_single(&ndev->dev, txdesc->addr, 998 txdesc->buffer_length, DMA_TO_DEVICE); 999 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1000 mdp->tx_skbuff[entry] = NULL; 1001 freeNum++; 1002 } 1003 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1004 if (entry >= TX_RING_SIZE - 1) 1005 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1006 1007 ndev->stats.tx_packets++; 1008 ndev->stats.tx_bytes += txdesc->buffer_length; 1009 } 1010 return freeNum; 1011 } 1012 1013 /* Packet receive function */ 1014 static int sh_eth_rx(struct net_device *ndev) 1015 { 1016 struct sh_eth_private *mdp = netdev_priv(ndev); 1017 struct sh_eth_rxdesc *rxdesc; 1018 1019 int entry = mdp->cur_rx % RX_RING_SIZE; 1020 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 1021 struct sk_buff *skb; 1022 u16 pkt_len = 0; 1023 u32 desc_status; 1024 1025 rxdesc = &mdp->rx_ring[entry]; 1026 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1027 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1028 pkt_len = rxdesc->frame_length; 1029 1030 #if defined(CONFIG_ARCH_R8A7740) 1031 desc_status >>= 16; 1032 #endif 1033 1034 if (--boguscnt < 0) 1035 break; 1036 1037 if (!(desc_status & RDFEND)) 1038 ndev->stats.rx_length_errors++; 1039 1040 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1041 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1042 ndev->stats.rx_errors++; 1043 if (desc_status & RD_RFS1) 1044 ndev->stats.rx_crc_errors++; 1045 if (desc_status & RD_RFS2) 1046 ndev->stats.rx_frame_errors++; 1047 if (desc_status & RD_RFS3) 1048 ndev->stats.rx_length_errors++; 1049 if (desc_status & RD_RFS4) 1050 ndev->stats.rx_length_errors++; 1051 if (desc_status & RD_RFS6) 1052 ndev->stats.rx_missed_errors++; 1053 if (desc_status & RD_RFS10) 1054 ndev->stats.rx_over_errors++; 1055 } else { 1056 if (!mdp->cd->hw_swap) 1057 sh_eth_soft_swap( 1058 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1059 pkt_len + 2); 1060 skb = mdp->rx_skbuff[entry]; 1061 mdp->rx_skbuff[entry] = NULL; 1062 if (mdp->cd->rpadir) 1063 skb_reserve(skb, NET_IP_ALIGN); 1064 skb_put(skb, pkt_len); 1065 skb->protocol = eth_type_trans(skb, ndev); 1066 netif_rx(skb); 1067 ndev->stats.rx_packets++; 1068 ndev->stats.rx_bytes += pkt_len; 1069 } 1070 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1071 entry = (++mdp->cur_rx) % RX_RING_SIZE; 1072 rxdesc = &mdp->rx_ring[entry]; 1073 } 1074 1075 /* Refill the Rx ring buffers. */ 1076 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1077 entry = mdp->dirty_rx % RX_RING_SIZE; 1078 rxdesc = &mdp->rx_ring[entry]; 1079 /* The size of the buffer is 16 byte boundary. */ 1080 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1081 1082 if (mdp->rx_skbuff[entry] == NULL) { 1083 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1084 mdp->rx_skbuff[entry] = skb; 1085 if (skb == NULL) 1086 break; /* Better luck next round. */ 1087 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1088 DMA_FROM_DEVICE); 1089 sh_eth_set_receive_align(skb); 1090 1091 skb_checksum_none_assert(skb); 1092 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1093 } 1094 if (entry >= RX_RING_SIZE - 1) 1095 rxdesc->status |= 1096 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1097 else 1098 rxdesc->status |= 1099 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1100 } 1101 1102 /* Restart Rx engine if stopped. */ 1103 /* If we don't need to check status, don't. -KDU */ 1104 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) 1105 sh_eth_write(ndev, EDRRR_R, EDRRR); 1106 1107 return 0; 1108 } 1109 1110 static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1111 { 1112 /* disable tx and rx */ 1113 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1114 ~(ECMR_RE | ECMR_TE), ECMR); 1115 } 1116 1117 static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1118 { 1119 /* enable tx and rx */ 1120 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1121 (ECMR_RE | ECMR_TE), ECMR); 1122 } 1123 1124 /* error control function */ 1125 static void sh_eth_error(struct net_device *ndev, int intr_status) 1126 { 1127 struct sh_eth_private *mdp = netdev_priv(ndev); 1128 u32 felic_stat; 1129 u32 link_stat; 1130 u32 mask; 1131 1132 if (intr_status & EESR_ECI) { 1133 felic_stat = sh_eth_read(ndev, ECSR); 1134 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1135 if (felic_stat & ECSR_ICD) 1136 ndev->stats.tx_carrier_errors++; 1137 if (felic_stat & ECSR_LCHNG) { 1138 /* Link Changed */ 1139 if (mdp->cd->no_psr || mdp->no_ether_link) { 1140 if (mdp->link == PHY_DOWN) 1141 link_stat = 0; 1142 else 1143 link_stat = PHY_ST_LINK; 1144 } else { 1145 link_stat = (sh_eth_read(ndev, PSR)); 1146 if (mdp->ether_link_active_low) 1147 link_stat = ~link_stat; 1148 } 1149 if (!(link_stat & PHY_ST_LINK)) 1150 sh_eth_rcv_snd_disable(ndev); 1151 else { 1152 /* Link Up */ 1153 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1154 ~DMAC_M_ECI, EESIPR); 1155 /*clear int */ 1156 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1157 ECSR); 1158 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1159 DMAC_M_ECI, EESIPR); 1160 /* enable tx and rx */ 1161 sh_eth_rcv_snd_enable(ndev); 1162 } 1163 } 1164 } 1165 1166 if (intr_status & EESR_TWB) { 1167 /* Write buck end. unused write back interrupt */ 1168 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1169 ndev->stats.tx_aborted_errors++; 1170 if (netif_msg_tx_err(mdp)) 1171 dev_err(&ndev->dev, "Transmit Abort\n"); 1172 } 1173 1174 if (intr_status & EESR_RABT) { 1175 /* Receive Abort int */ 1176 if (intr_status & EESR_RFRMER) { 1177 /* Receive Frame Overflow int */ 1178 ndev->stats.rx_frame_errors++; 1179 if (netif_msg_rx_err(mdp)) 1180 dev_err(&ndev->dev, "Receive Abort\n"); 1181 } 1182 } 1183 1184 if (intr_status & EESR_TDE) { 1185 /* Transmit Descriptor Empty int */ 1186 ndev->stats.tx_fifo_errors++; 1187 if (netif_msg_tx_err(mdp)) 1188 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1189 } 1190 1191 if (intr_status & EESR_TFE) { 1192 /* FIFO under flow */ 1193 ndev->stats.tx_fifo_errors++; 1194 if (netif_msg_tx_err(mdp)) 1195 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1196 } 1197 1198 if (intr_status & EESR_RDE) { 1199 /* Receive Descriptor Empty int */ 1200 ndev->stats.rx_over_errors++; 1201 1202 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) 1203 sh_eth_write(ndev, EDRRR_R, EDRRR); 1204 if (netif_msg_rx_err(mdp)) 1205 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1206 } 1207 1208 if (intr_status & EESR_RFE) { 1209 /* Receive FIFO Overflow int */ 1210 ndev->stats.rx_fifo_errors++; 1211 if (netif_msg_rx_err(mdp)) 1212 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1213 } 1214 1215 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1216 /* Address Error */ 1217 ndev->stats.tx_fifo_errors++; 1218 if (netif_msg_tx_err(mdp)) 1219 dev_err(&ndev->dev, "Address Error\n"); 1220 } 1221 1222 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1223 if (mdp->cd->no_ade) 1224 mask &= ~EESR_ADE; 1225 if (intr_status & mask) { 1226 /* Tx error */ 1227 u32 edtrr = sh_eth_read(ndev, EDTRR); 1228 /* dmesg */ 1229 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1230 intr_status, mdp->cur_tx); 1231 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1232 mdp->dirty_tx, (u32) ndev->state, edtrr); 1233 /* dirty buffer free */ 1234 sh_eth_txfree(ndev); 1235 1236 /* SH7712 BUG */ 1237 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1238 /* tx dma start */ 1239 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1240 } 1241 /* wakeup */ 1242 netif_wake_queue(ndev); 1243 } 1244 } 1245 1246 static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1247 { 1248 struct net_device *ndev = netdev; 1249 struct sh_eth_private *mdp = netdev_priv(ndev); 1250 struct sh_eth_cpu_data *cd = mdp->cd; 1251 irqreturn_t ret = IRQ_NONE; 1252 u32 intr_status = 0; 1253 1254 spin_lock(&mdp->lock); 1255 1256 /* Get interrpt stat */ 1257 intr_status = sh_eth_read(ndev, EESR); 1258 /* Clear interrupt */ 1259 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1260 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1261 cd->tx_check | cd->eesr_err_check)) { 1262 sh_eth_write(ndev, intr_status, EESR); 1263 ret = IRQ_HANDLED; 1264 } else 1265 goto other_irq; 1266 1267 if (intr_status & (EESR_FRC | /* Frame recv*/ 1268 EESR_RMAF | /* Multi cast address recv*/ 1269 EESR_RRF | /* Bit frame recv */ 1270 EESR_RTLF | /* Long frame recv*/ 1271 EESR_RTSF | /* short frame recv */ 1272 EESR_PRE | /* PHY-LSI recv error */ 1273 EESR_CERF)){ /* recv frame CRC error */ 1274 sh_eth_rx(ndev); 1275 } 1276 1277 /* Tx Check */ 1278 if (intr_status & cd->tx_check) { 1279 sh_eth_txfree(ndev); 1280 netif_wake_queue(ndev); 1281 } 1282 1283 if (intr_status & cd->eesr_err_check) 1284 sh_eth_error(ndev, intr_status); 1285 1286 other_irq: 1287 spin_unlock(&mdp->lock); 1288 1289 return ret; 1290 } 1291 1292 static void sh_eth_timer(unsigned long data) 1293 { 1294 struct net_device *ndev = (struct net_device *)data; 1295 struct sh_eth_private *mdp = netdev_priv(ndev); 1296 1297 mod_timer(&mdp->timer, jiffies + (10 * HZ)); 1298 } 1299 1300 /* PHY state control function */ 1301 static void sh_eth_adjust_link(struct net_device *ndev) 1302 { 1303 struct sh_eth_private *mdp = netdev_priv(ndev); 1304 struct phy_device *phydev = mdp->phydev; 1305 int new_state = 0; 1306 1307 if (phydev->link != PHY_DOWN) { 1308 if (phydev->duplex != mdp->duplex) { 1309 new_state = 1; 1310 mdp->duplex = phydev->duplex; 1311 if (mdp->cd->set_duplex) 1312 mdp->cd->set_duplex(ndev); 1313 } 1314 1315 if (phydev->speed != mdp->speed) { 1316 new_state = 1; 1317 mdp->speed = phydev->speed; 1318 if (mdp->cd->set_rate) 1319 mdp->cd->set_rate(ndev); 1320 } 1321 if (mdp->link == PHY_DOWN) { 1322 sh_eth_write(ndev, 1323 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1324 new_state = 1; 1325 mdp->link = phydev->link; 1326 } 1327 } else if (mdp->link) { 1328 new_state = 1; 1329 mdp->link = PHY_DOWN; 1330 mdp->speed = 0; 1331 mdp->duplex = -1; 1332 } 1333 1334 if (new_state && netif_msg_link(mdp)) 1335 phy_print_status(phydev); 1336 } 1337 1338 /* PHY init function */ 1339 static int sh_eth_phy_init(struct net_device *ndev) 1340 { 1341 struct sh_eth_private *mdp = netdev_priv(ndev); 1342 char phy_id[MII_BUS_ID_SIZE + 3]; 1343 struct phy_device *phydev = NULL; 1344 1345 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1346 mdp->mii_bus->id , mdp->phy_id); 1347 1348 mdp->link = PHY_DOWN; 1349 mdp->speed = 0; 1350 mdp->duplex = -1; 1351 1352 /* Try connect to PHY */ 1353 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1354 0, mdp->phy_interface); 1355 if (IS_ERR(phydev)) { 1356 dev_err(&ndev->dev, "phy_connect failed\n"); 1357 return PTR_ERR(phydev); 1358 } 1359 1360 dev_info(&ndev->dev, "attached phy %i to driver %s\n", 1361 phydev->addr, phydev->drv->name); 1362 1363 mdp->phydev = phydev; 1364 1365 return 0; 1366 } 1367 1368 /* PHY control start function */ 1369 static int sh_eth_phy_start(struct net_device *ndev) 1370 { 1371 struct sh_eth_private *mdp = netdev_priv(ndev); 1372 int ret; 1373 1374 ret = sh_eth_phy_init(ndev); 1375 if (ret) 1376 return ret; 1377 1378 /* reset phy - this also wakes it from PDOWN */ 1379 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); 1380 phy_start(mdp->phydev); 1381 1382 return 0; 1383 } 1384 1385 static int sh_eth_get_settings(struct net_device *ndev, 1386 struct ethtool_cmd *ecmd) 1387 { 1388 struct sh_eth_private *mdp = netdev_priv(ndev); 1389 unsigned long flags; 1390 int ret; 1391 1392 spin_lock_irqsave(&mdp->lock, flags); 1393 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1394 spin_unlock_irqrestore(&mdp->lock, flags); 1395 1396 return ret; 1397 } 1398 1399 static int sh_eth_set_settings(struct net_device *ndev, 1400 struct ethtool_cmd *ecmd) 1401 { 1402 struct sh_eth_private *mdp = netdev_priv(ndev); 1403 unsigned long flags; 1404 int ret; 1405 1406 spin_lock_irqsave(&mdp->lock, flags); 1407 1408 /* disable tx and rx */ 1409 sh_eth_rcv_snd_disable(ndev); 1410 1411 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1412 if (ret) 1413 goto error_exit; 1414 1415 if (ecmd->duplex == DUPLEX_FULL) 1416 mdp->duplex = 1; 1417 else 1418 mdp->duplex = 0; 1419 1420 if (mdp->cd->set_duplex) 1421 mdp->cd->set_duplex(ndev); 1422 1423 error_exit: 1424 mdelay(1); 1425 1426 /* enable tx and rx */ 1427 sh_eth_rcv_snd_enable(ndev); 1428 1429 spin_unlock_irqrestore(&mdp->lock, flags); 1430 1431 return ret; 1432 } 1433 1434 static int sh_eth_nway_reset(struct net_device *ndev) 1435 { 1436 struct sh_eth_private *mdp = netdev_priv(ndev); 1437 unsigned long flags; 1438 int ret; 1439 1440 spin_lock_irqsave(&mdp->lock, flags); 1441 ret = phy_start_aneg(mdp->phydev); 1442 spin_unlock_irqrestore(&mdp->lock, flags); 1443 1444 return ret; 1445 } 1446 1447 static u32 sh_eth_get_msglevel(struct net_device *ndev) 1448 { 1449 struct sh_eth_private *mdp = netdev_priv(ndev); 1450 return mdp->msg_enable; 1451 } 1452 1453 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1454 { 1455 struct sh_eth_private *mdp = netdev_priv(ndev); 1456 mdp->msg_enable = value; 1457 } 1458 1459 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1460 "rx_current", "tx_current", 1461 "rx_dirty", "tx_dirty", 1462 }; 1463 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1464 1465 static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1466 { 1467 switch (sset) { 1468 case ETH_SS_STATS: 1469 return SH_ETH_STATS_LEN; 1470 default: 1471 return -EOPNOTSUPP; 1472 } 1473 } 1474 1475 static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1476 struct ethtool_stats *stats, u64 *data) 1477 { 1478 struct sh_eth_private *mdp = netdev_priv(ndev); 1479 int i = 0; 1480 1481 /* device-specific stats */ 1482 data[i++] = mdp->cur_rx; 1483 data[i++] = mdp->cur_tx; 1484 data[i++] = mdp->dirty_rx; 1485 data[i++] = mdp->dirty_tx; 1486 } 1487 1488 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1489 { 1490 switch (stringset) { 1491 case ETH_SS_STATS: 1492 memcpy(data, *sh_eth_gstrings_stats, 1493 sizeof(sh_eth_gstrings_stats)); 1494 break; 1495 } 1496 } 1497 1498 static const struct ethtool_ops sh_eth_ethtool_ops = { 1499 .get_settings = sh_eth_get_settings, 1500 .set_settings = sh_eth_set_settings, 1501 .nway_reset = sh_eth_nway_reset, 1502 .get_msglevel = sh_eth_get_msglevel, 1503 .set_msglevel = sh_eth_set_msglevel, 1504 .get_link = ethtool_op_get_link, 1505 .get_strings = sh_eth_get_strings, 1506 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1507 .get_sset_count = sh_eth_get_sset_count, 1508 }; 1509 1510 /* network device open function */ 1511 static int sh_eth_open(struct net_device *ndev) 1512 { 1513 int ret = 0; 1514 struct sh_eth_private *mdp = netdev_priv(ndev); 1515 1516 pm_runtime_get_sync(&mdp->pdev->dev); 1517 1518 ret = request_irq(ndev->irq, sh_eth_interrupt, 1519 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1520 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1521 defined(CONFIG_CPU_SUBTYPE_SH7757) 1522 IRQF_SHARED, 1523 #else 1524 0, 1525 #endif 1526 ndev->name, ndev); 1527 if (ret) { 1528 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1529 return ret; 1530 } 1531 1532 /* Descriptor set */ 1533 ret = sh_eth_ring_init(ndev); 1534 if (ret) 1535 goto out_free_irq; 1536 1537 /* device init */ 1538 ret = sh_eth_dev_init(ndev); 1539 if (ret) 1540 goto out_free_irq; 1541 1542 /* PHY control start*/ 1543 ret = sh_eth_phy_start(ndev); 1544 if (ret) 1545 goto out_free_irq; 1546 1547 /* Set the timer to check for link beat. */ 1548 init_timer(&mdp->timer); 1549 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 1550 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev); 1551 1552 return ret; 1553 1554 out_free_irq: 1555 free_irq(ndev->irq, ndev); 1556 pm_runtime_put_sync(&mdp->pdev->dev); 1557 return ret; 1558 } 1559 1560 /* Timeout function */ 1561 static void sh_eth_tx_timeout(struct net_device *ndev) 1562 { 1563 struct sh_eth_private *mdp = netdev_priv(ndev); 1564 struct sh_eth_rxdesc *rxdesc; 1565 int i; 1566 1567 netif_stop_queue(ndev); 1568 1569 if (netif_msg_timer(mdp)) 1570 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," 1571 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); 1572 1573 /* tx_errors count up */ 1574 ndev->stats.tx_errors++; 1575 1576 /* timer off */ 1577 del_timer_sync(&mdp->timer); 1578 1579 /* Free all the skbuffs in the Rx queue. */ 1580 for (i = 0; i < RX_RING_SIZE; i++) { 1581 rxdesc = &mdp->rx_ring[i]; 1582 rxdesc->status = 0; 1583 rxdesc->addr = 0xBADF00D0; 1584 if (mdp->rx_skbuff[i]) 1585 dev_kfree_skb(mdp->rx_skbuff[i]); 1586 mdp->rx_skbuff[i] = NULL; 1587 } 1588 for (i = 0; i < TX_RING_SIZE; i++) { 1589 if (mdp->tx_skbuff[i]) 1590 dev_kfree_skb(mdp->tx_skbuff[i]); 1591 mdp->tx_skbuff[i] = NULL; 1592 } 1593 1594 /* device init */ 1595 sh_eth_dev_init(ndev); 1596 1597 /* timer on */ 1598 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 1599 add_timer(&mdp->timer); 1600 } 1601 1602 /* Packet transmit function */ 1603 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1604 { 1605 struct sh_eth_private *mdp = netdev_priv(ndev); 1606 struct sh_eth_txdesc *txdesc; 1607 u32 entry; 1608 unsigned long flags; 1609 1610 spin_lock_irqsave(&mdp->lock, flags); 1611 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1612 if (!sh_eth_txfree(ndev)) { 1613 if (netif_msg_tx_queued(mdp)) 1614 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1615 netif_stop_queue(ndev); 1616 spin_unlock_irqrestore(&mdp->lock, flags); 1617 return NETDEV_TX_BUSY; 1618 } 1619 } 1620 spin_unlock_irqrestore(&mdp->lock, flags); 1621 1622 entry = mdp->cur_tx % TX_RING_SIZE; 1623 mdp->tx_skbuff[entry] = skb; 1624 txdesc = &mdp->tx_ring[entry]; 1625 /* soft swap. */ 1626 if (!mdp->cd->hw_swap) 1627 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 1628 skb->len + 2); 1629 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 1630 DMA_TO_DEVICE); 1631 if (skb->len < ETHERSMALL) 1632 txdesc->buffer_length = ETHERSMALL; 1633 else 1634 txdesc->buffer_length = skb->len; 1635 1636 if (entry >= TX_RING_SIZE - 1) 1637 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 1638 else 1639 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 1640 1641 mdp->cur_tx++; 1642 1643 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 1644 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1645 1646 return NETDEV_TX_OK; 1647 } 1648 1649 /* device close function */ 1650 static int sh_eth_close(struct net_device *ndev) 1651 { 1652 struct sh_eth_private *mdp = netdev_priv(ndev); 1653 int ringsize; 1654 1655 netif_stop_queue(ndev); 1656 1657 /* Disable interrupts by clearing the interrupt mask. */ 1658 sh_eth_write(ndev, 0x0000, EESIPR); 1659 1660 /* Stop the chip's Tx and Rx processes. */ 1661 sh_eth_write(ndev, 0, EDTRR); 1662 sh_eth_write(ndev, 0, EDRRR); 1663 1664 /* PHY Disconnect */ 1665 if (mdp->phydev) { 1666 phy_stop(mdp->phydev); 1667 phy_disconnect(mdp->phydev); 1668 } 1669 1670 free_irq(ndev->irq, ndev); 1671 1672 del_timer_sync(&mdp->timer); 1673 1674 /* Free all the skbuffs in the Rx queue. */ 1675 sh_eth_ring_free(ndev); 1676 1677 /* free DMA buffer */ 1678 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 1679 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1680 1681 /* free DMA buffer */ 1682 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 1683 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); 1684 1685 pm_runtime_put_sync(&mdp->pdev->dev); 1686 1687 return 0; 1688 } 1689 1690 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 1691 { 1692 struct sh_eth_private *mdp = netdev_priv(ndev); 1693 1694 pm_runtime_get_sync(&mdp->pdev->dev); 1695 1696 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 1697 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 1698 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 1699 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 1700 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 1701 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 1702 if (sh_eth_is_gether(mdp)) { 1703 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 1704 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 1705 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 1706 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 1707 } else { 1708 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 1709 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 1710 } 1711 pm_runtime_put_sync(&mdp->pdev->dev); 1712 1713 return &ndev->stats; 1714 } 1715 1716 /* ioctl to device function */ 1717 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 1718 int cmd) 1719 { 1720 struct sh_eth_private *mdp = netdev_priv(ndev); 1721 struct phy_device *phydev = mdp->phydev; 1722 1723 if (!netif_running(ndev)) 1724 return -EINVAL; 1725 1726 if (!phydev) 1727 return -ENODEV; 1728 1729 return phy_mii_ioctl(phydev, rq, cmd); 1730 } 1731 1732 #if defined(SH_ETH_HAS_TSU) 1733 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 1734 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 1735 int entry) 1736 { 1737 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 1738 } 1739 1740 static u32 sh_eth_tsu_get_post_mask(int entry) 1741 { 1742 return 0x0f << (28 - ((entry % 8) * 4)); 1743 } 1744 1745 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 1746 { 1747 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 1748 } 1749 1750 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 1751 int entry) 1752 { 1753 struct sh_eth_private *mdp = netdev_priv(ndev); 1754 u32 tmp; 1755 void *reg_offset; 1756 1757 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1758 tmp = ioread32(reg_offset); 1759 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 1760 } 1761 1762 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 1763 int entry) 1764 { 1765 struct sh_eth_private *mdp = netdev_priv(ndev); 1766 u32 post_mask, ref_mask, tmp; 1767 void *reg_offset; 1768 1769 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1770 post_mask = sh_eth_tsu_get_post_mask(entry); 1771 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 1772 1773 tmp = ioread32(reg_offset); 1774 iowrite32(tmp & ~post_mask, reg_offset); 1775 1776 /* If other port enables, the function returns "true" */ 1777 return tmp & ref_mask; 1778 } 1779 1780 static int sh_eth_tsu_busy(struct net_device *ndev) 1781 { 1782 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 1783 struct sh_eth_private *mdp = netdev_priv(ndev); 1784 1785 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 1786 udelay(10); 1787 timeout--; 1788 if (timeout <= 0) { 1789 dev_err(&ndev->dev, "%s: timeout\n", __func__); 1790 return -ETIMEDOUT; 1791 } 1792 } 1793 1794 return 0; 1795 } 1796 1797 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 1798 const u8 *addr) 1799 { 1800 u32 val; 1801 1802 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 1803 iowrite32(val, reg); 1804 if (sh_eth_tsu_busy(ndev) < 0) 1805 return -EBUSY; 1806 1807 val = addr[4] << 8 | addr[5]; 1808 iowrite32(val, reg + 4); 1809 if (sh_eth_tsu_busy(ndev) < 0) 1810 return -EBUSY; 1811 1812 return 0; 1813 } 1814 1815 static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 1816 { 1817 u32 val; 1818 1819 val = ioread32(reg); 1820 addr[0] = (val >> 24) & 0xff; 1821 addr[1] = (val >> 16) & 0xff; 1822 addr[2] = (val >> 8) & 0xff; 1823 addr[3] = val & 0xff; 1824 val = ioread32(reg + 4); 1825 addr[4] = (val >> 8) & 0xff; 1826 addr[5] = val & 0xff; 1827 } 1828 1829 1830 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 1831 { 1832 struct sh_eth_private *mdp = netdev_priv(ndev); 1833 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1834 int i; 1835 u8 c_addr[ETH_ALEN]; 1836 1837 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 1838 sh_eth_tsu_read_entry(reg_offset, c_addr); 1839 if (memcmp(addr, c_addr, ETH_ALEN) == 0) 1840 return i; 1841 } 1842 1843 return -ENOENT; 1844 } 1845 1846 static int sh_eth_tsu_find_empty(struct net_device *ndev) 1847 { 1848 u8 blank[ETH_ALEN]; 1849 int entry; 1850 1851 memset(blank, 0, sizeof(blank)); 1852 entry = sh_eth_tsu_find_entry(ndev, blank); 1853 return (entry < 0) ? -ENOMEM : entry; 1854 } 1855 1856 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 1857 int entry) 1858 { 1859 struct sh_eth_private *mdp = netdev_priv(ndev); 1860 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1861 int ret; 1862 u8 blank[ETH_ALEN]; 1863 1864 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 1865 ~(1 << (31 - entry)), TSU_TEN); 1866 1867 memset(blank, 0, sizeof(blank)); 1868 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 1869 if (ret < 0) 1870 return ret; 1871 return 0; 1872 } 1873 1874 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 1875 { 1876 struct sh_eth_private *mdp = netdev_priv(ndev); 1877 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1878 int i, ret; 1879 1880 if (!mdp->cd->tsu) 1881 return 0; 1882 1883 i = sh_eth_tsu_find_entry(ndev, addr); 1884 if (i < 0) { 1885 /* No entry found, create one */ 1886 i = sh_eth_tsu_find_empty(ndev); 1887 if (i < 0) 1888 return -ENOMEM; 1889 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 1890 if (ret < 0) 1891 return ret; 1892 1893 /* Enable the entry */ 1894 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 1895 (1 << (31 - i)), TSU_TEN); 1896 } 1897 1898 /* Entry found or created, enable POST */ 1899 sh_eth_tsu_enable_cam_entry_post(ndev, i); 1900 1901 return 0; 1902 } 1903 1904 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 1905 { 1906 struct sh_eth_private *mdp = netdev_priv(ndev); 1907 int i, ret; 1908 1909 if (!mdp->cd->tsu) 1910 return 0; 1911 1912 i = sh_eth_tsu_find_entry(ndev, addr); 1913 if (i) { 1914 /* Entry found */ 1915 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 1916 goto done; 1917 1918 /* Disable the entry if both ports was disabled */ 1919 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 1920 if (ret < 0) 1921 return ret; 1922 } 1923 done: 1924 return 0; 1925 } 1926 1927 static int sh_eth_tsu_purge_all(struct net_device *ndev) 1928 { 1929 struct sh_eth_private *mdp = netdev_priv(ndev); 1930 int i, ret; 1931 1932 if (unlikely(!mdp->cd->tsu)) 1933 return 0; 1934 1935 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 1936 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 1937 continue; 1938 1939 /* Disable the entry if both ports was disabled */ 1940 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 1941 if (ret < 0) 1942 return ret; 1943 } 1944 1945 return 0; 1946 } 1947 1948 static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 1949 { 1950 struct sh_eth_private *mdp = netdev_priv(ndev); 1951 u8 addr[ETH_ALEN]; 1952 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1953 int i; 1954 1955 if (unlikely(!mdp->cd->tsu)) 1956 return; 1957 1958 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 1959 sh_eth_tsu_read_entry(reg_offset, addr); 1960 if (is_multicast_ether_addr(addr)) 1961 sh_eth_tsu_del_entry(ndev, addr); 1962 } 1963 } 1964 1965 /* Multicast reception directions set */ 1966 static void sh_eth_set_multicast_list(struct net_device *ndev) 1967 { 1968 struct sh_eth_private *mdp = netdev_priv(ndev); 1969 u32 ecmr_bits; 1970 int mcast_all = 0; 1971 unsigned long flags; 1972 1973 spin_lock_irqsave(&mdp->lock, flags); 1974 /* 1975 * Initial condition is MCT = 1, PRM = 0. 1976 * Depending on ndev->flags, set PRM or clear MCT 1977 */ 1978 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 1979 1980 if (!(ndev->flags & IFF_MULTICAST)) { 1981 sh_eth_tsu_purge_mcast(ndev); 1982 mcast_all = 1; 1983 } 1984 if (ndev->flags & IFF_ALLMULTI) { 1985 sh_eth_tsu_purge_mcast(ndev); 1986 ecmr_bits &= ~ECMR_MCT; 1987 mcast_all = 1; 1988 } 1989 1990 if (ndev->flags & IFF_PROMISC) { 1991 sh_eth_tsu_purge_all(ndev); 1992 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 1993 } else if (mdp->cd->tsu) { 1994 struct netdev_hw_addr *ha; 1995 netdev_for_each_mc_addr(ha, ndev) { 1996 if (mcast_all && is_multicast_ether_addr(ha->addr)) 1997 continue; 1998 1999 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 2000 if (!mcast_all) { 2001 sh_eth_tsu_purge_mcast(ndev); 2002 ecmr_bits &= ~ECMR_MCT; 2003 mcast_all = 1; 2004 } 2005 } 2006 } 2007 } else { 2008 /* Normal, unicast/broadcast-only mode. */ 2009 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 2010 } 2011 2012 /* update the ethernet mode */ 2013 sh_eth_write(ndev, ecmr_bits, ECMR); 2014 2015 spin_unlock_irqrestore(&mdp->lock, flags); 2016 } 2017 2018 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 2019 { 2020 if (!mdp->port) 2021 return TSU_VTAG0; 2022 else 2023 return TSU_VTAG1; 2024 } 2025 2026 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2027 { 2028 struct sh_eth_private *mdp = netdev_priv(ndev); 2029 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2030 2031 if (unlikely(!mdp->cd->tsu)) 2032 return -EPERM; 2033 2034 /* No filtering if vid = 0 */ 2035 if (!vid) 2036 return 0; 2037 2038 mdp->vlan_num_ids++; 2039 2040 /* 2041 * The controller has one VLAN tag HW filter. So, if the filter is 2042 * already enabled, the driver disables it and the filte 2043 */ 2044 if (mdp->vlan_num_ids > 1) { 2045 /* disable VLAN filter */ 2046 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2047 return 0; 2048 } 2049 2050 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 2051 vtag_reg_index); 2052 2053 return 0; 2054 } 2055 2056 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2057 { 2058 struct sh_eth_private *mdp = netdev_priv(ndev); 2059 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2060 2061 if (unlikely(!mdp->cd->tsu)) 2062 return -EPERM; 2063 2064 /* No filtering if vid = 0 */ 2065 if (!vid) 2066 return 0; 2067 2068 mdp->vlan_num_ids--; 2069 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2070 2071 return 0; 2072 } 2073 #endif /* SH_ETH_HAS_TSU */ 2074 2075 /* SuperH's TSU register init function */ 2076 static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2077 { 2078 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 2079 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 2080 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 2081 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 2082 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 2083 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 2084 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 2085 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 2086 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 2087 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 2088 if (sh_eth_is_gether(mdp)) { 2089 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 2090 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 2091 } else { 2092 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 2093 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 2094 } 2095 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 2096 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 2097 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2098 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 2099 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 2100 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 2101 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 2102 } 2103 2104 /* MDIO bus release function */ 2105 static int sh_mdio_release(struct net_device *ndev) 2106 { 2107 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2108 2109 /* unregister mdio bus */ 2110 mdiobus_unregister(bus); 2111 2112 /* remove mdio bus info from net_device */ 2113 dev_set_drvdata(&ndev->dev, NULL); 2114 2115 /* free interrupts memory */ 2116 kfree(bus->irq); 2117 2118 /* free bitbang info */ 2119 free_mdio_bitbang(bus); 2120 2121 return 0; 2122 } 2123 2124 /* MDIO bus init function */ 2125 static int sh_mdio_init(struct net_device *ndev, int id, 2126 struct sh_eth_plat_data *pd) 2127 { 2128 int ret, i; 2129 struct bb_info *bitbang; 2130 struct sh_eth_private *mdp = netdev_priv(ndev); 2131 2132 /* create bit control struct for PHY */ 2133 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 2134 if (!bitbang) { 2135 ret = -ENOMEM; 2136 goto out; 2137 } 2138 2139 /* bitbang init */ 2140 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2141 bitbang->set_gate = pd->set_mdio_gate; 2142 bitbang->mdi_msk = 0x08; 2143 bitbang->mdo_msk = 0x04; 2144 bitbang->mmd_msk = 0x02;/* MMD */ 2145 bitbang->mdc_msk = 0x01; 2146 bitbang->ctrl.ops = &bb_ops; 2147 2148 /* MII controller setting */ 2149 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2150 if (!mdp->mii_bus) { 2151 ret = -ENOMEM; 2152 goto out_free_bitbang; 2153 } 2154 2155 /* Hook up MII support for ethtool */ 2156 mdp->mii_bus->name = "sh_mii"; 2157 mdp->mii_bus->parent = &ndev->dev; 2158 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2159 mdp->pdev->name, id); 2160 2161 /* PHY IRQ */ 2162 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 2163 if (!mdp->mii_bus->irq) { 2164 ret = -ENOMEM; 2165 goto out_free_bus; 2166 } 2167 2168 for (i = 0; i < PHY_MAX_ADDR; i++) 2169 mdp->mii_bus->irq[i] = PHY_POLL; 2170 2171 /* regist mdio bus */ 2172 ret = mdiobus_register(mdp->mii_bus); 2173 if (ret) 2174 goto out_free_irq; 2175 2176 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2177 2178 return 0; 2179 2180 out_free_irq: 2181 kfree(mdp->mii_bus->irq); 2182 2183 out_free_bus: 2184 free_mdio_bitbang(mdp->mii_bus); 2185 2186 out_free_bitbang: 2187 kfree(bitbang); 2188 2189 out: 2190 return ret; 2191 } 2192 2193 static const u16 *sh_eth_get_register_offset(int register_type) 2194 { 2195 const u16 *reg_offset = NULL; 2196 2197 switch (register_type) { 2198 case SH_ETH_REG_GIGABIT: 2199 reg_offset = sh_eth_offset_gigabit; 2200 break; 2201 case SH_ETH_REG_FAST_SH4: 2202 reg_offset = sh_eth_offset_fast_sh4; 2203 break; 2204 case SH_ETH_REG_FAST_SH3_SH2: 2205 reg_offset = sh_eth_offset_fast_sh3_sh2; 2206 break; 2207 default: 2208 printk(KERN_ERR "Unknown register type (%d)\n", register_type); 2209 break; 2210 } 2211 2212 return reg_offset; 2213 } 2214 2215 static const struct net_device_ops sh_eth_netdev_ops = { 2216 .ndo_open = sh_eth_open, 2217 .ndo_stop = sh_eth_close, 2218 .ndo_start_xmit = sh_eth_start_xmit, 2219 .ndo_get_stats = sh_eth_get_stats, 2220 #if defined(SH_ETH_HAS_TSU) 2221 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2222 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2223 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2224 #endif 2225 .ndo_tx_timeout = sh_eth_tx_timeout, 2226 .ndo_do_ioctl = sh_eth_do_ioctl, 2227 .ndo_validate_addr = eth_validate_addr, 2228 .ndo_set_mac_address = eth_mac_addr, 2229 .ndo_change_mtu = eth_change_mtu, 2230 }; 2231 2232 static int sh_eth_drv_probe(struct platform_device *pdev) 2233 { 2234 int ret, devno = 0; 2235 struct resource *res; 2236 struct net_device *ndev = NULL; 2237 struct sh_eth_private *mdp = NULL; 2238 struct sh_eth_plat_data *pd; 2239 2240 /* get base addr */ 2241 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2242 if (unlikely(res == NULL)) { 2243 dev_err(&pdev->dev, "invalid resource\n"); 2244 ret = -EINVAL; 2245 goto out; 2246 } 2247 2248 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2249 if (!ndev) { 2250 ret = -ENOMEM; 2251 goto out; 2252 } 2253 2254 /* The sh Ether-specific entries in the device structure. */ 2255 ndev->base_addr = res->start; 2256 devno = pdev->id; 2257 if (devno < 0) 2258 devno = 0; 2259 2260 ndev->dma = -1; 2261 ret = platform_get_irq(pdev, 0); 2262 if (ret < 0) { 2263 ret = -ENODEV; 2264 goto out_release; 2265 } 2266 ndev->irq = ret; 2267 2268 SET_NETDEV_DEV(ndev, &pdev->dev); 2269 2270 /* Fill in the fields of the device structure with ethernet values. */ 2271 ether_setup(ndev); 2272 2273 mdp = netdev_priv(ndev); 2274 mdp->addr = ioremap(res->start, resource_size(res)); 2275 if (mdp->addr == NULL) { 2276 ret = -ENOMEM; 2277 dev_err(&pdev->dev, "ioremap failed.\n"); 2278 goto out_release; 2279 } 2280 2281 spin_lock_init(&mdp->lock); 2282 mdp->pdev = pdev; 2283 pm_runtime_enable(&pdev->dev); 2284 pm_runtime_resume(&pdev->dev); 2285 2286 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 2287 /* get PHY ID */ 2288 mdp->phy_id = pd->phy; 2289 mdp->phy_interface = pd->phy_interface; 2290 /* EDMAC endian */ 2291 mdp->edmac_endian = pd->edmac_endian; 2292 mdp->no_ether_link = pd->no_ether_link; 2293 mdp->ether_link_active_low = pd->ether_link_active_low; 2294 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); 2295 2296 /* set cpu data */ 2297 #if defined(SH_ETH_HAS_BOTH_MODULES) 2298 mdp->cd = sh_eth_get_cpu_data(mdp); 2299 #else 2300 mdp->cd = &sh_eth_my_cpu_data; 2301 #endif 2302 sh_eth_set_default_cpu_data(mdp->cd); 2303 2304 /* set function */ 2305 ndev->netdev_ops = &sh_eth_netdev_ops; 2306 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2307 ndev->watchdog_timeo = TX_TIMEOUT; 2308 2309 /* debug message level */ 2310 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2311 mdp->post_rx = POST_RX >> (devno << 1); 2312 mdp->post_fw = POST_FW >> (devno << 1); 2313 2314 /* read and set MAC address */ 2315 read_mac_address(ndev, pd->mac_addr); 2316 2317 /* ioremap the TSU registers */ 2318 if (mdp->cd->tsu) { 2319 struct resource *rtsu; 2320 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2321 if (!rtsu) { 2322 dev_err(&pdev->dev, "Not found TSU resource\n"); 2323 goto out_release; 2324 } 2325 mdp->tsu_addr = ioremap(rtsu->start, 2326 resource_size(rtsu)); 2327 mdp->port = devno % 2; 2328 ndev->features = NETIF_F_HW_VLAN_FILTER; 2329 } 2330 2331 /* initialize first or needed device */ 2332 if (!devno || pd->needs_init) { 2333 if (mdp->cd->chip_reset) 2334 mdp->cd->chip_reset(ndev); 2335 2336 if (mdp->cd->tsu) { 2337 /* TSU init (Init only)*/ 2338 sh_eth_tsu_init(mdp); 2339 } 2340 } 2341 2342 /* network device register */ 2343 ret = register_netdev(ndev); 2344 if (ret) 2345 goto out_release; 2346 2347 /* mdio bus init */ 2348 ret = sh_mdio_init(ndev, pdev->id, pd); 2349 if (ret) 2350 goto out_unregister; 2351 2352 /* print device information */ 2353 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2354 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2355 2356 platform_set_drvdata(pdev, ndev); 2357 2358 return ret; 2359 2360 out_unregister: 2361 unregister_netdev(ndev); 2362 2363 out_release: 2364 /* net_dev free */ 2365 if (mdp && mdp->addr) 2366 iounmap(mdp->addr); 2367 if (mdp && mdp->tsu_addr) 2368 iounmap(mdp->tsu_addr); 2369 if (ndev) 2370 free_netdev(ndev); 2371 2372 out: 2373 return ret; 2374 } 2375 2376 static int sh_eth_drv_remove(struct platform_device *pdev) 2377 { 2378 struct net_device *ndev = platform_get_drvdata(pdev); 2379 struct sh_eth_private *mdp = netdev_priv(ndev); 2380 2381 if (mdp->cd->tsu) 2382 iounmap(mdp->tsu_addr); 2383 sh_mdio_release(ndev); 2384 unregister_netdev(ndev); 2385 pm_runtime_disable(&pdev->dev); 2386 iounmap(mdp->addr); 2387 free_netdev(ndev); 2388 platform_set_drvdata(pdev, NULL); 2389 2390 return 0; 2391 } 2392 2393 static int sh_eth_runtime_nop(struct device *dev) 2394 { 2395 /* 2396 * Runtime PM callback shared between ->runtime_suspend() 2397 * and ->runtime_resume(). Simply returns success. 2398 * 2399 * This driver re-initializes all registers after 2400 * pm_runtime_get_sync() anyway so there is no need 2401 * to save and restore registers here. 2402 */ 2403 return 0; 2404 } 2405 2406 static struct dev_pm_ops sh_eth_dev_pm_ops = { 2407 .runtime_suspend = sh_eth_runtime_nop, 2408 .runtime_resume = sh_eth_runtime_nop, 2409 }; 2410 2411 static struct platform_driver sh_eth_driver = { 2412 .probe = sh_eth_drv_probe, 2413 .remove = sh_eth_drv_remove, 2414 .driver = { 2415 .name = CARDNAME, 2416 .pm = &sh_eth_dev_pm_ops, 2417 }, 2418 }; 2419 2420 module_platform_driver(sh_eth_driver); 2421 2422 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2423 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2424 MODULE_LICENSE("GPL v2"); 2425