1 // SPDX-License-Identifier: GPL-2.0 2 /* Atheros AR71xx built-in ethernet mac driver 3 * 4 * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de> 5 * 6 * List of authors contributed to this driver before mainlining: 7 * Alexander Couzens <lynxis@fe80.eu> 8 * Christian Lamparter <chunkeey@gmail.com> 9 * Chuanhong Guo <gch981213@gmail.com> 10 * Daniel F. Dickinson <cshored@thecshore.com> 11 * David Bauer <mail@david-bauer.net> 12 * Felix Fietkau <nbd@nbd.name> 13 * Gabor Juhos <juhosg@freemail.hu> 14 * Hauke Mehrtens <hauke@hauke-m.de> 15 * Johann Neuhauser <johann@it-neuhauser.de> 16 * John Crispin <john@phrozen.org> 17 * Jo-Philipp Wich <jo@mein.io> 18 * Koen Vandeputte <koen.vandeputte@ncentric.com> 19 * Lucian Cristian <lucian.cristian@gmail.com> 20 * Matt Merhar <mattmerhar@protonmail.com> 21 * Milan Krstic <milan.krstic@gmail.com> 22 * Petr Štetiar <ynezz@true.cz> 23 * Rosen Penev <rosenp@gmail.com> 24 * Stephen Walker <stephendwalker+github@gmail.com> 25 * Vittorio Gambaletta <openwrt@vittgam.net> 26 * Weijie Gao <hackpascal@gmail.com> 27 * Imre Kaloz <kaloz@openwrt.org> 28 */ 29 30 #include <linux/if_vlan.h> 31 #include <linux/mfd/syscon.h> 32 #include <linux/of.h> 33 #include <linux/of_mdio.h> 34 #include <linux/of_net.h> 35 #include <linux/platform_device.h> 36 #include <linux/phylink.h> 37 #include <linux/regmap.h> 38 #include <linux/reset.h> 39 #include <linux/clk.h> 40 #include <linux/io.h> 41 #include <net/selftests.h> 42 43 /* For our NAPI weight bigger does *NOT* mean better - it means more 44 * D-cache misses and lots more wasted cycles than we'll ever 45 * possibly gain from saving instructions. 46 */ 47 #define AG71XX_NAPI_WEIGHT 32 48 #define AG71XX_OOM_REFILL (1 + HZ / 10) 49 50 #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE) 51 #define AG71XX_INT_TX (AG71XX_INT_TX_PS) 52 #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF) 53 54 #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX) 55 #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL) 56 57 #define AG71XX_TX_MTU_LEN 1540 58 59 #define AG71XX_TX_RING_SPLIT 512 60 #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \ 61 AG71XX_TX_RING_SPLIT) 62 #define AG71XX_TX_RING_SIZE_DEFAULT 128 63 #define AG71XX_RX_RING_SIZE_DEFAULT 256 64 65 #define AG71XX_MDIO_RETRY 1000 66 #define AG71XX_MDIO_DELAY 5 67 #define AG71XX_MDIO_MAX_CLK 5000000 68 69 /* Register offsets */ 70 #define AG71XX_REG_MAC_CFG1 0x0000 71 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */ 72 #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */ 73 #define MAC_CFG1_RXE BIT(2) /* Rx Enable */ 74 #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */ 75 #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */ 76 #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */ 77 #define MAC_CFG1_SR BIT(31) /* Soft Reset */ 78 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \ 79 MAC_CFG1_SRX | MAC_CFG1_STX) 80 81 #define AG71XX_REG_MAC_CFG2 0x0004 82 #define MAC_CFG2_FDX BIT(0) 83 #define MAC_CFG2_PAD_CRC_EN BIT(2) 84 #define MAC_CFG2_LEN_CHECK BIT(4) 85 #define MAC_CFG2_IF_1000 BIT(9) 86 #define MAC_CFG2_IF_10_100 BIT(8) 87 88 #define AG71XX_REG_MAC_MFL 0x0010 89 90 #define AG71XX_REG_MII_CFG 0x0020 91 #define MII_CFG_CLK_DIV_4 0 92 #define MII_CFG_CLK_DIV_6 2 93 #define MII_CFG_CLK_DIV_8 3 94 #define MII_CFG_CLK_DIV_10 4 95 #define MII_CFG_CLK_DIV_14 5 96 #define MII_CFG_CLK_DIV_20 6 97 #define MII_CFG_CLK_DIV_28 7 98 #define MII_CFG_CLK_DIV_34 8 99 #define MII_CFG_CLK_DIV_42 9 100 #define MII_CFG_CLK_DIV_50 10 101 #define MII_CFG_CLK_DIV_58 11 102 #define MII_CFG_CLK_DIV_66 12 103 #define MII_CFG_CLK_DIV_74 13 104 #define MII_CFG_CLK_DIV_82 14 105 #define MII_CFG_CLK_DIV_98 15 106 #define MII_CFG_RESET BIT(31) 107 108 #define AG71XX_REG_MII_CMD 0x0024 109 #define MII_CMD_READ BIT(0) 110 111 #define AG71XX_REG_MII_ADDR 0x0028 112 #define MII_ADDR_SHIFT 8 113 114 #define AG71XX_REG_MII_CTRL 0x002c 115 #define AG71XX_REG_MII_STATUS 0x0030 116 #define AG71XX_REG_MII_IND 0x0034 117 #define MII_IND_BUSY BIT(0) 118 #define MII_IND_INVALID BIT(2) 119 120 #define AG71XX_REG_MAC_IFCTL 0x0038 121 #define MAC_IFCTL_SPEED BIT(16) 122 123 #define AG71XX_REG_MAC_ADDR1 0x0040 124 #define AG71XX_REG_MAC_ADDR2 0x0044 125 #define AG71XX_REG_FIFO_CFG0 0x0048 126 #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */ 127 #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */ 128 #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */ 129 #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */ 130 #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */ 131 #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \ 132 | FIFO_CFG0_TXS | FIFO_CFG0_TXF) 133 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT) 134 135 #define FIFO_CFG0_ENABLE_SHIFT 8 136 137 #define AG71XX_REG_FIFO_CFG1 0x004c 138 #define AG71XX_REG_FIFO_CFG2 0x0050 139 #define AG71XX_REG_FIFO_CFG3 0x0054 140 #define AG71XX_REG_FIFO_CFG4 0x0058 141 #define FIFO_CFG4_DE BIT(0) /* Drop Event */ 142 #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */ 143 #define FIFO_CFG4_FC BIT(2) /* False Carrier */ 144 #define FIFO_CFG4_CE BIT(3) /* Code Error */ 145 #define FIFO_CFG4_CR BIT(4) /* CRC error */ 146 #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */ 147 #define FIFO_CFG4_LO BIT(6) /* Length out of range */ 148 #define FIFO_CFG4_OK BIT(7) /* Packet is OK */ 149 #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ 150 #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ 151 #define FIFO_CFG4_DR BIT(10) /* Dribble */ 152 #define FIFO_CFG4_LE BIT(11) /* Long Event */ 153 #define FIFO_CFG4_CF BIT(12) /* Control Frame */ 154 #define FIFO_CFG4_PF BIT(13) /* Pause Frame */ 155 #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ 156 #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ 157 #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ 158 #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ 159 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ 160 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \ 161 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \ 162 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \ 163 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \ 164 FIFO_CFG4_VT) 165 166 #define AG71XX_REG_FIFO_CFG5 0x005c 167 #define FIFO_CFG5_DE BIT(0) /* Drop Event */ 168 #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ 169 #define FIFO_CFG5_FC BIT(2) /* False Carrier */ 170 #define FIFO_CFG5_CE BIT(3) /* Code Error */ 171 #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ 172 #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ 173 #define FIFO_CFG5_OK BIT(6) /* Packet is OK */ 174 #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ 175 #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ 176 #define FIFO_CFG5_DR BIT(9) /* Dribble */ 177 #define FIFO_CFG5_CF BIT(10) /* Control Frame */ 178 #define FIFO_CFG5_PF BIT(11) /* Pause Frame */ 179 #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ 180 #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ 181 #define FIFO_CFG5_LE BIT(14) /* Long Event */ 182 #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ 183 #define FIFO_CFG5_16 BIT(16) /* unknown */ 184 #define FIFO_CFG5_17 BIT(17) /* unknown */ 185 #define FIFO_CFG5_SF BIT(18) /* Short Frame */ 186 #define FIFO_CFG5_BM BIT(19) /* Byte Mode */ 187 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ 188 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ 189 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ 190 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ 191 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ 192 FIFO_CFG5_17 | FIFO_CFG5_SF) 193 194 #define AG71XX_REG_TX_CTRL 0x0180 195 #define TX_CTRL_TXE BIT(0) /* Tx Enable */ 196 197 #define AG71XX_REG_TX_DESC 0x0184 198 #define AG71XX_REG_TX_STATUS 0x0188 199 #define TX_STATUS_PS BIT(0) /* Packet Sent */ 200 #define TX_STATUS_UR BIT(1) /* Tx Underrun */ 201 #define TX_STATUS_BE BIT(3) /* Bus Error */ 202 203 #define AG71XX_REG_RX_CTRL 0x018c 204 #define RX_CTRL_RXE BIT(0) /* Rx Enable */ 205 206 #define AG71XX_DMA_RETRY 10 207 #define AG71XX_DMA_DELAY 1 208 209 #define AG71XX_REG_RX_DESC 0x0190 210 #define AG71XX_REG_RX_STATUS 0x0194 211 #define RX_STATUS_PR BIT(0) /* Packet Received */ 212 #define RX_STATUS_OF BIT(2) /* Rx Overflow */ 213 #define RX_STATUS_BE BIT(3) /* Bus Error */ 214 215 #define AG71XX_REG_INT_ENABLE 0x0198 216 #define AG71XX_REG_INT_STATUS 0x019c 217 #define AG71XX_INT_TX_PS BIT(0) 218 #define AG71XX_INT_TX_UR BIT(1) 219 #define AG71XX_INT_TX_BE BIT(3) 220 #define AG71XX_INT_RX_PR BIT(4) 221 #define AG71XX_INT_RX_OF BIT(6) 222 #define AG71XX_INT_RX_BE BIT(7) 223 224 #define AG71XX_REG_FIFO_DEPTH 0x01a8 225 #define AG71XX_REG_RX_SM 0x01b0 226 #define AG71XX_REG_TX_SM 0x01b4 227 228 #define AG71XX_DEFAULT_MSG_ENABLE \ 229 (NETIF_MSG_DRV \ 230 | NETIF_MSG_PROBE \ 231 | NETIF_MSG_LINK \ 232 | NETIF_MSG_TIMER \ 233 | NETIF_MSG_IFDOWN \ 234 | NETIF_MSG_IFUP \ 235 | NETIF_MSG_RX_ERR \ 236 | NETIF_MSG_TX_ERR) 237 238 struct ag71xx_statistic { 239 unsigned short offset; 240 u32 mask; 241 const char name[ETH_GSTRING_LEN]; 242 }; 243 244 static const struct ag71xx_statistic ag71xx_statistics[] = { 245 { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", }, 246 { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", }, 247 { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", }, 248 { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", }, 249 { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", }, 250 { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", }, 251 { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", }, 252 { 0x009C, GENMASK(23, 0), "Rx Byte", }, 253 { 0x00A0, GENMASK(17, 0), "Rx Packet", }, 254 { 0x00A4, GENMASK(11, 0), "Rx FCS Error", }, 255 { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", }, 256 { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", }, 257 { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", }, 258 { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", }, 259 { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", }, 260 { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", }, 261 { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", }, 262 { 0x00C4, GENMASK(11, 0), "Rx Code Error", }, 263 { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", }, 264 { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", }, 265 { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", }, 266 { 0x00D4, GENMASK(11, 0), "Rx Fragments", }, 267 { 0x00D8, GENMASK(11, 0), "Rx Jabber", }, 268 { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", }, 269 { 0x00E0, GENMASK(23, 0), "Tx Byte", }, 270 { 0x00E4, GENMASK(17, 0), "Tx Packet", }, 271 { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", }, 272 { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", }, 273 { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", }, 274 { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", }, 275 { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", }, 276 { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", }, 277 { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", }, 278 { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", }, 279 { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", }, 280 { 0x010C, GENMASK(12, 0), "Tx Total Collision", }, 281 { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", }, 282 { 0x0114, GENMASK(11, 0), "Tx Drop Frame", }, 283 { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", }, 284 { 0x011C, GENMASK(11, 0), "Tx FCS Error", }, 285 { 0x0120, GENMASK(11, 0), "Tx Control Frame", }, 286 { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", }, 287 { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", }, 288 { 0x012C, GENMASK(11, 0), "Tx Fragment", }, 289 }; 290 291 #define DESC_EMPTY BIT(31) 292 #define DESC_MORE BIT(24) 293 #define DESC_PKTLEN_M 0xfff 294 struct ag71xx_desc { 295 u32 data; 296 u32 ctrl; 297 u32 next; 298 u32 pad; 299 } __aligned(4); 300 301 #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \ 302 L1_CACHE_BYTES) 303 304 struct ag71xx_buf { 305 union { 306 struct { 307 struct sk_buff *skb; 308 unsigned int len; 309 } tx; 310 struct { 311 dma_addr_t dma_addr; 312 void *rx_buf; 313 } rx; 314 }; 315 }; 316 317 struct ag71xx_ring { 318 /* "Hot" fields in the data path. */ 319 unsigned int curr; 320 unsigned int dirty; 321 322 /* "Cold" fields - not used in the data path. */ 323 struct ag71xx_buf *buf; 324 u16 order; 325 u16 desc_split; 326 dma_addr_t descs_dma; 327 u8 *descs_cpu; 328 }; 329 330 enum ag71xx_type { 331 AR7100, 332 AR7240, 333 AR9130, 334 AR9330, 335 AR9340, 336 QCA9530, 337 QCA9550, 338 }; 339 340 struct ag71xx_dcfg { 341 u32 max_frame_len; 342 const u32 *fifodata; 343 u16 desc_pktlen_mask; 344 bool tx_hang_workaround; 345 enum ag71xx_type type; 346 }; 347 348 struct ag71xx { 349 /* Critical data related to the per-packet data path are clustered 350 * early in this structure to help improve the D-cache footprint. 351 */ 352 struct ag71xx_ring rx_ring ____cacheline_aligned; 353 struct ag71xx_ring tx_ring ____cacheline_aligned; 354 355 u16 rx_buf_size; 356 u8 rx_buf_offset; 357 358 struct net_device *ndev; 359 struct platform_device *pdev; 360 struct napi_struct napi; 361 u32 msg_enable; 362 const struct ag71xx_dcfg *dcfg; 363 364 /* From this point onwards we're not looking at per-packet fields. */ 365 void __iomem *mac_base; 366 367 struct ag71xx_desc *stop_desc; 368 dma_addr_t stop_desc_dma; 369 370 phy_interface_t phy_if_mode; 371 struct phylink *phylink; 372 struct phylink_config phylink_config; 373 374 struct delayed_work restart_work; 375 struct timer_list oom_timer; 376 377 struct reset_control *mac_reset; 378 379 u32 fifodata[3]; 380 int mac_idx; 381 382 struct reset_control *mdio_reset; 383 struct clk *clk_mdio; 384 }; 385 386 static int ag71xx_desc_empty(struct ag71xx_desc *desc) 387 { 388 return (desc->ctrl & DESC_EMPTY) != 0; 389 } 390 391 static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx) 392 { 393 return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE]; 394 } 395 396 static int ag71xx_ring_size_order(int size) 397 { 398 return fls(size - 1); 399 } 400 401 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type) 402 { 403 return ag->dcfg->type == type; 404 } 405 406 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value) 407 { 408 iowrite32(value, ag->mac_base + reg); 409 /* flush write */ 410 (void)ioread32(ag->mac_base + reg); 411 } 412 413 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg) 414 { 415 return ioread32(ag->mac_base + reg); 416 } 417 418 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask) 419 { 420 void __iomem *r; 421 422 r = ag->mac_base + reg; 423 iowrite32(ioread32(r) | mask, r); 424 /* flush write */ 425 (void)ioread32(r); 426 } 427 428 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask) 429 { 430 void __iomem *r; 431 432 r = ag->mac_base + reg; 433 iowrite32(ioread32(r) & ~mask, r); 434 /* flush write */ 435 (void)ioread32(r); 436 } 437 438 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints) 439 { 440 ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints); 441 } 442 443 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints) 444 { 445 ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints); 446 } 447 448 static int ag71xx_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) 449 { 450 struct ag71xx *ag = netdev_priv(ndev); 451 452 return phylink_mii_ioctl(ag->phylink, ifr, cmd); 453 } 454 455 static void ag71xx_get_drvinfo(struct net_device *ndev, 456 struct ethtool_drvinfo *info) 457 { 458 struct ag71xx *ag = netdev_priv(ndev); 459 460 strscpy(info->driver, "ag71xx", sizeof(info->driver)); 461 strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node), 462 sizeof(info->bus_info)); 463 } 464 465 static int ag71xx_get_link_ksettings(struct net_device *ndev, 466 struct ethtool_link_ksettings *kset) 467 { 468 struct ag71xx *ag = netdev_priv(ndev); 469 470 return phylink_ethtool_ksettings_get(ag->phylink, kset); 471 } 472 473 static int ag71xx_set_link_ksettings(struct net_device *ndev, 474 const struct ethtool_link_ksettings *kset) 475 { 476 struct ag71xx *ag = netdev_priv(ndev); 477 478 return phylink_ethtool_ksettings_set(ag->phylink, kset); 479 } 480 481 static int ag71xx_ethtool_nway_reset(struct net_device *ndev) 482 { 483 struct ag71xx *ag = netdev_priv(ndev); 484 485 return phylink_ethtool_nway_reset(ag->phylink); 486 } 487 488 static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev, 489 struct ethtool_pauseparam *pause) 490 { 491 struct ag71xx *ag = netdev_priv(ndev); 492 493 phylink_ethtool_get_pauseparam(ag->phylink, pause); 494 } 495 496 static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev, 497 struct ethtool_pauseparam *pause) 498 { 499 struct ag71xx *ag = netdev_priv(ndev); 500 501 return phylink_ethtool_set_pauseparam(ag->phylink, pause); 502 } 503 504 static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset, 505 u8 *data) 506 { 507 int i; 508 509 switch (sset) { 510 case ETH_SS_STATS: 511 for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++) 512 memcpy(data + i * ETH_GSTRING_LEN, 513 ag71xx_statistics[i].name, ETH_GSTRING_LEN); 514 break; 515 case ETH_SS_TEST: 516 net_selftest_get_strings(data); 517 break; 518 } 519 } 520 521 static void ag71xx_ethtool_get_stats(struct net_device *ndev, 522 struct ethtool_stats *stats, u64 *data) 523 { 524 struct ag71xx *ag = netdev_priv(ndev); 525 int i; 526 527 for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++) 528 *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset) 529 & ag71xx_statistics[i].mask; 530 } 531 532 static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset) 533 { 534 switch (sset) { 535 case ETH_SS_STATS: 536 return ARRAY_SIZE(ag71xx_statistics); 537 case ETH_SS_TEST: 538 return net_selftest_get_count(); 539 default: 540 return -EOPNOTSUPP; 541 } 542 } 543 544 static const struct ethtool_ops ag71xx_ethtool_ops = { 545 .get_drvinfo = ag71xx_get_drvinfo, 546 .get_link = ethtool_op_get_link, 547 .get_ts_info = ethtool_op_get_ts_info, 548 .get_link_ksettings = ag71xx_get_link_ksettings, 549 .set_link_ksettings = ag71xx_set_link_ksettings, 550 .nway_reset = ag71xx_ethtool_nway_reset, 551 .get_pauseparam = ag71xx_ethtool_get_pauseparam, 552 .set_pauseparam = ag71xx_ethtool_set_pauseparam, 553 .get_strings = ag71xx_ethtool_get_strings, 554 .get_ethtool_stats = ag71xx_ethtool_get_stats, 555 .get_sset_count = ag71xx_ethtool_get_sset_count, 556 .self_test = net_selftest, 557 }; 558 559 static int ag71xx_mdio_wait_busy(struct ag71xx *ag) 560 { 561 struct net_device *ndev = ag->ndev; 562 int i; 563 564 for (i = 0; i < AG71XX_MDIO_RETRY; i++) { 565 u32 busy; 566 567 udelay(AG71XX_MDIO_DELAY); 568 569 busy = ag71xx_rr(ag, AG71XX_REG_MII_IND); 570 if (!busy) 571 return 0; 572 573 udelay(AG71XX_MDIO_DELAY); 574 } 575 576 netif_err(ag, link, ndev, "MDIO operation timed out\n"); 577 578 return -ETIMEDOUT; 579 } 580 581 static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg) 582 { 583 struct ag71xx *ag = bus->priv; 584 int err, val; 585 586 err = ag71xx_mdio_wait_busy(ag); 587 if (err) 588 return err; 589 590 ag71xx_wr(ag, AG71XX_REG_MII_ADDR, 591 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); 592 /* enable read mode */ 593 ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ); 594 595 err = ag71xx_mdio_wait_busy(ag); 596 if (err) 597 return err; 598 599 val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS); 600 /* disable read mode */ 601 ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0); 602 603 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n", 604 addr, reg, val); 605 606 return val; 607 } 608 609 static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg, 610 u16 val) 611 { 612 struct ag71xx *ag = bus->priv; 613 614 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n", 615 addr, reg, val); 616 617 ag71xx_wr(ag, AG71XX_REG_MII_ADDR, 618 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff)); 619 ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val); 620 621 return ag71xx_mdio_wait_busy(ag); 622 } 623 624 static const u32 ar71xx_mdio_div_table[] = { 625 4, 4, 6, 8, 10, 14, 20, 28, 626 }; 627 628 static const u32 ar7240_mdio_div_table[] = { 629 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, 630 }; 631 632 static const u32 ar933x_mdio_div_table[] = { 633 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, 634 }; 635 636 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div) 637 { 638 unsigned long ref_clock; 639 const u32 *table; 640 int ndivs, i; 641 642 ref_clock = clk_get_rate(ag->clk_mdio); 643 if (!ref_clock) 644 return -EINVAL; 645 646 if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) { 647 table = ar933x_mdio_div_table; 648 ndivs = ARRAY_SIZE(ar933x_mdio_div_table); 649 } else if (ag71xx_is(ag, AR7240)) { 650 table = ar7240_mdio_div_table; 651 ndivs = ARRAY_SIZE(ar7240_mdio_div_table); 652 } else { 653 table = ar71xx_mdio_div_table; 654 ndivs = ARRAY_SIZE(ar71xx_mdio_div_table); 655 } 656 657 for (i = 0; i < ndivs; i++) { 658 unsigned long t; 659 660 t = ref_clock / table[i]; 661 if (t <= AG71XX_MDIO_MAX_CLK) { 662 *div = i; 663 return 0; 664 } 665 } 666 667 return -ENOENT; 668 } 669 670 static int ag71xx_mdio_reset(struct mii_bus *bus) 671 { 672 struct ag71xx *ag = bus->priv; 673 int err; 674 u32 t; 675 676 err = ag71xx_mdio_get_divider(ag, &t); 677 if (err) 678 return err; 679 680 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET); 681 usleep_range(100, 200); 682 683 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t); 684 usleep_range(100, 200); 685 686 return 0; 687 } 688 689 static int ag71xx_mdio_probe(struct ag71xx *ag) 690 { 691 struct device *dev = &ag->pdev->dev; 692 struct net_device *ndev = ag->ndev; 693 static struct mii_bus *mii_bus; 694 struct device_node *np, *mnp; 695 int err; 696 697 np = dev->of_node; 698 699 ag->clk_mdio = devm_clk_get_enabled(dev, "mdio"); 700 if (IS_ERR(ag->clk_mdio)) { 701 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n"); 702 return PTR_ERR(ag->clk_mdio); 703 } 704 705 mii_bus = devm_mdiobus_alloc(dev); 706 if (!mii_bus) 707 return -ENOMEM; 708 709 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); 710 if (IS_ERR(ag->mdio_reset)) { 711 netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); 712 return PTR_ERR(ag->mdio_reset); 713 } 714 715 mii_bus->name = "ag71xx_mdio"; 716 mii_bus->read = ag71xx_mdio_mii_read; 717 mii_bus->write = ag71xx_mdio_mii_write; 718 mii_bus->reset = ag71xx_mdio_reset; 719 mii_bus->priv = ag; 720 mii_bus->parent = dev; 721 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx); 722 723 if (!IS_ERR(ag->mdio_reset)) { 724 reset_control_assert(ag->mdio_reset); 725 msleep(100); 726 reset_control_deassert(ag->mdio_reset); 727 msleep(200); 728 } 729 730 mnp = of_get_child_by_name(np, "mdio"); 731 err = devm_of_mdiobus_register(dev, mii_bus, mnp); 732 of_node_put(mnp); 733 if (err) 734 return err; 735 736 return 0; 737 } 738 739 static void ag71xx_hw_stop(struct ag71xx *ag) 740 { 741 /* disable all interrupts and stop the rx/tx engine */ 742 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0); 743 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); 744 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); 745 } 746 747 static bool ag71xx_check_dma_stuck(struct ag71xx *ag) 748 { 749 unsigned long timestamp; 750 u32 rx_sm, tx_sm, rx_fd; 751 752 timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start); 753 if (likely(time_before(jiffies, timestamp + HZ / 10))) 754 return false; 755 756 if (!netif_carrier_ok(ag->ndev)) 757 return false; 758 759 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM); 760 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6) 761 return true; 762 763 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM); 764 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH); 765 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) && 766 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0) 767 return true; 768 769 return false; 770 } 771 772 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget) 773 { 774 struct ag71xx_ring *ring = &ag->tx_ring; 775 int sent = 0, bytes_compl = 0, n = 0; 776 struct net_device *ndev = ag->ndev; 777 int ring_mask, ring_size; 778 bool dma_stuck = false; 779 780 ring_mask = BIT(ring->order) - 1; 781 ring_size = BIT(ring->order); 782 783 netif_dbg(ag, tx_queued, ndev, "processing TX ring\n"); 784 785 while (ring->dirty + n != ring->curr) { 786 struct ag71xx_desc *desc; 787 struct sk_buff *skb; 788 unsigned int i; 789 790 i = (ring->dirty + n) & ring_mask; 791 desc = ag71xx_ring_desc(ring, i); 792 skb = ring->buf[i].tx.skb; 793 794 if (!flush && !ag71xx_desc_empty(desc)) { 795 if (ag->dcfg->tx_hang_workaround && 796 ag71xx_check_dma_stuck(ag)) { 797 schedule_delayed_work(&ag->restart_work, 798 HZ / 2); 799 dma_stuck = true; 800 } 801 break; 802 } 803 804 if (flush) 805 desc->ctrl |= DESC_EMPTY; 806 807 n++; 808 if (!skb) 809 continue; 810 811 napi_consume_skb(skb, budget); 812 ring->buf[i].tx.skb = NULL; 813 814 bytes_compl += ring->buf[i].tx.len; 815 816 sent++; 817 ring->dirty += n; 818 819 while (n > 0) { 820 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); 821 n--; 822 } 823 } 824 825 netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent); 826 827 if (!sent) 828 return 0; 829 830 ag->ndev->stats.tx_bytes += bytes_compl; 831 ag->ndev->stats.tx_packets += sent; 832 833 netdev_completed_queue(ag->ndev, sent, bytes_compl); 834 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) 835 netif_wake_queue(ag->ndev); 836 837 if (!dma_stuck) 838 cancel_delayed_work(&ag->restart_work); 839 840 return sent; 841 } 842 843 static void ag71xx_dma_wait_stop(struct ag71xx *ag) 844 { 845 struct net_device *ndev = ag->ndev; 846 int i; 847 848 for (i = 0; i < AG71XX_DMA_RETRY; i++) { 849 u32 rx, tx; 850 851 mdelay(AG71XX_DMA_DELAY); 852 853 rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE; 854 tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE; 855 if (!rx && !tx) 856 return; 857 } 858 859 netif_err(ag, hw, ndev, "DMA stop operation timed out\n"); 860 } 861 862 static void ag71xx_dma_reset(struct ag71xx *ag) 863 { 864 struct net_device *ndev = ag->ndev; 865 u32 val; 866 int i; 867 868 /* stop RX and TX */ 869 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); 870 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); 871 872 /* give the hardware some time to really stop all rx/tx activity 873 * clearing the descriptors too early causes random memory corruption 874 */ 875 ag71xx_dma_wait_stop(ag); 876 877 /* clear descriptor addresses */ 878 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); 879 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); 880 881 /* clear pending RX/TX interrupts */ 882 for (i = 0; i < 256; i++) { 883 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); 884 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); 885 } 886 887 /* clear pending errors */ 888 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); 889 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); 890 891 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); 892 if (val) 893 netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n", 894 val); 895 896 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); 897 898 /* mask out reserved bits */ 899 val &= ~0xff000000; 900 901 if (val) 902 netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n", 903 val); 904 } 905 906 static void ag71xx_hw_setup(struct ag71xx *ag) 907 { 908 u32 init = MAC_CFG1_INIT; 909 910 /* setup MAC configuration registers */ 911 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init); 912 913 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, 914 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); 915 916 /* setup max frame length to zero */ 917 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0); 918 919 /* setup FIFO configuration registers */ 920 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); 921 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); 922 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); 923 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); 924 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); 925 } 926 927 static unsigned int ag71xx_max_frame_len(unsigned int mtu) 928 { 929 return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; 930 } 931 932 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac) 933 { 934 u32 t; 935 936 t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16) 937 | (((u32)mac[3]) << 8) | ((u32)mac[2]); 938 939 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t); 940 941 t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16); 942 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t); 943 } 944 945 static void ag71xx_fast_reset(struct ag71xx *ag) 946 { 947 struct net_device *dev = ag->ndev; 948 u32 rx_ds; 949 u32 mii_reg; 950 951 ag71xx_hw_stop(ag); 952 953 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG); 954 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC); 955 956 ag71xx_tx_packets(ag, true, 0); 957 958 reset_control_assert(ag->mac_reset); 959 usleep_range(10, 20); 960 reset_control_deassert(ag->mac_reset); 961 usleep_range(10, 20); 962 963 ag71xx_dma_reset(ag); 964 ag71xx_hw_setup(ag); 965 ag->tx_ring.curr = 0; 966 ag->tx_ring.dirty = 0; 967 netdev_reset_queue(ag->ndev); 968 969 /* setup max frame length */ 970 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 971 ag71xx_max_frame_len(ag->ndev->mtu)); 972 973 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds); 974 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); 975 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg); 976 977 ag71xx_hw_set_macaddr(ag, dev->dev_addr); 978 } 979 980 static void ag71xx_hw_start(struct ag71xx *ag) 981 { 982 /* start RX engine */ 983 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); 984 985 /* enable interrupts */ 986 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT); 987 988 netif_wake_queue(ag->ndev); 989 } 990 991 static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode, 992 const struct phylink_link_state *state) 993 { 994 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); 995 996 if (phylink_autoneg_inband(mode)) 997 return; 998 999 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) 1000 ag71xx_fast_reset(ag); 1001 1002 if (ag->tx_ring.desc_split) { 1003 ag->fifodata[2] &= 0xffff; 1004 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; 1005 } 1006 1007 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); 1008 } 1009 1010 static void ag71xx_mac_link_down(struct phylink_config *config, 1011 unsigned int mode, phy_interface_t interface) 1012 { 1013 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); 1014 1015 ag71xx_hw_stop(ag); 1016 } 1017 1018 static void ag71xx_mac_link_up(struct phylink_config *config, 1019 struct phy_device *phy, 1020 unsigned int mode, phy_interface_t interface, 1021 int speed, int duplex, 1022 bool tx_pause, bool rx_pause) 1023 { 1024 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev)); 1025 u32 cfg1, cfg2; 1026 u32 ifctl; 1027 u32 fifo5; 1028 1029 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); 1030 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); 1031 cfg2 |= duplex ? MAC_CFG2_FDX : 0; 1032 1033 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); 1034 ifctl &= ~(MAC_IFCTL_SPEED); 1035 1036 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); 1037 fifo5 &= ~FIFO_CFG5_BM; 1038 1039 switch (speed) { 1040 case SPEED_1000: 1041 cfg2 |= MAC_CFG2_IF_1000; 1042 fifo5 |= FIFO_CFG5_BM; 1043 break; 1044 case SPEED_100: 1045 cfg2 |= MAC_CFG2_IF_10_100; 1046 ifctl |= MAC_IFCTL_SPEED; 1047 break; 1048 case SPEED_10: 1049 cfg2 |= MAC_CFG2_IF_10_100; 1050 break; 1051 default: 1052 return; 1053 } 1054 1055 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); 1056 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); 1057 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); 1058 1059 cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1); 1060 cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC); 1061 if (tx_pause) 1062 cfg1 |= MAC_CFG1_TFC; 1063 1064 if (rx_pause) 1065 cfg1 |= MAC_CFG1_RFC; 1066 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1); 1067 1068 ag71xx_hw_start(ag); 1069 } 1070 1071 static const struct phylink_mac_ops ag71xx_phylink_mac_ops = { 1072 .mac_config = ag71xx_mac_config, 1073 .mac_link_down = ag71xx_mac_link_down, 1074 .mac_link_up = ag71xx_mac_link_up, 1075 }; 1076 1077 static int ag71xx_phylink_setup(struct ag71xx *ag) 1078 { 1079 struct phylink *phylink; 1080 1081 ag->phylink_config.dev = &ag->ndev->dev; 1082 ag->phylink_config.type = PHYLINK_NETDEV; 1083 ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 1084 MAC_10 | MAC_100 | MAC_1000FD; 1085 1086 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) || 1087 ag71xx_is(ag, AR9340) || 1088 ag71xx_is(ag, QCA9530) || 1089 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) 1090 __set_bit(PHY_INTERFACE_MODE_MII, 1091 ag->phylink_config.supported_interfaces); 1092 1093 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) || 1094 (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) || 1095 (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1)) 1096 __set_bit(PHY_INTERFACE_MODE_GMII, 1097 ag->phylink_config.supported_interfaces); 1098 1099 if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0) 1100 __set_bit(PHY_INTERFACE_MODE_SGMII, 1101 ag->phylink_config.supported_interfaces); 1102 1103 if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0) 1104 __set_bit(PHY_INTERFACE_MODE_RMII, 1105 ag->phylink_config.supported_interfaces); 1106 1107 if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) || 1108 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1)) 1109 __set_bit(PHY_INTERFACE_MODE_RGMII, 1110 ag->phylink_config.supported_interfaces); 1111 1112 phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode, 1113 ag->phy_if_mode, &ag71xx_phylink_mac_ops); 1114 if (IS_ERR(phylink)) 1115 return PTR_ERR(phylink); 1116 1117 ag->phylink = phylink; 1118 return 0; 1119 } 1120 1121 static void ag71xx_ring_tx_clean(struct ag71xx *ag) 1122 { 1123 struct ag71xx_ring *ring = &ag->tx_ring; 1124 int ring_mask = BIT(ring->order) - 1; 1125 u32 bytes_compl = 0, pkts_compl = 0; 1126 struct net_device *ndev = ag->ndev; 1127 1128 while (ring->curr != ring->dirty) { 1129 struct ag71xx_desc *desc; 1130 u32 i = ring->dirty & ring_mask; 1131 1132 desc = ag71xx_ring_desc(ring, i); 1133 if (!ag71xx_desc_empty(desc)) { 1134 desc->ctrl = 0; 1135 ndev->stats.tx_errors++; 1136 } 1137 1138 if (ring->buf[i].tx.skb) { 1139 bytes_compl += ring->buf[i].tx.len; 1140 pkts_compl++; 1141 dev_kfree_skb_any(ring->buf[i].tx.skb); 1142 } 1143 ring->buf[i].tx.skb = NULL; 1144 ring->dirty++; 1145 } 1146 1147 /* flush descriptors */ 1148 wmb(); 1149 1150 netdev_completed_queue(ndev, pkts_compl, bytes_compl); 1151 } 1152 1153 static void ag71xx_ring_tx_init(struct ag71xx *ag) 1154 { 1155 struct ag71xx_ring *ring = &ag->tx_ring; 1156 int ring_size = BIT(ring->order); 1157 int ring_mask = ring_size - 1; 1158 int i; 1159 1160 for (i = 0; i < ring_size; i++) { 1161 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1162 1163 desc->next = (u32)(ring->descs_dma + 1164 AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); 1165 1166 desc->ctrl = DESC_EMPTY; 1167 ring->buf[i].tx.skb = NULL; 1168 } 1169 1170 /* flush descriptors */ 1171 wmb(); 1172 1173 ring->curr = 0; 1174 ring->dirty = 0; 1175 netdev_reset_queue(ag->ndev); 1176 } 1177 1178 static void ag71xx_ring_rx_clean(struct ag71xx *ag) 1179 { 1180 struct ag71xx_ring *ring = &ag->rx_ring; 1181 int ring_size = BIT(ring->order); 1182 int i; 1183 1184 if (!ring->buf) 1185 return; 1186 1187 for (i = 0; i < ring_size; i++) 1188 if (ring->buf[i].rx.rx_buf) { 1189 dma_unmap_single(&ag->pdev->dev, 1190 ring->buf[i].rx.dma_addr, 1191 ag->rx_buf_size, DMA_FROM_DEVICE); 1192 skb_free_frag(ring->buf[i].rx.rx_buf); 1193 } 1194 } 1195 1196 static int ag71xx_buffer_size(struct ag71xx *ag) 1197 { 1198 return ag->rx_buf_size + 1199 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1200 } 1201 1202 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, 1203 int offset, 1204 void *(*alloc)(unsigned int size)) 1205 { 1206 struct ag71xx_ring *ring = &ag->rx_ring; 1207 struct ag71xx_desc *desc; 1208 void *data; 1209 1210 desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); 1211 1212 data = alloc(ag71xx_buffer_size(ag)); 1213 if (!data) 1214 return false; 1215 1216 buf->rx.rx_buf = data; 1217 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, 1218 DMA_FROM_DEVICE); 1219 desc->data = (u32)buf->rx.dma_addr + offset; 1220 return true; 1221 } 1222 1223 static int ag71xx_ring_rx_init(struct ag71xx *ag) 1224 { 1225 struct ag71xx_ring *ring = &ag->rx_ring; 1226 struct net_device *ndev = ag->ndev; 1227 int ring_mask = BIT(ring->order) - 1; 1228 int ring_size = BIT(ring->order); 1229 unsigned int i; 1230 int ret; 1231 1232 ret = 0; 1233 for (i = 0; i < ring_size; i++) { 1234 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1235 1236 desc->next = (u32)(ring->descs_dma + 1237 AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); 1238 1239 netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n", 1240 desc, desc->next); 1241 } 1242 1243 for (i = 0; i < ring_size; i++) { 1244 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1245 1246 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, 1247 netdev_alloc_frag)) { 1248 ret = -ENOMEM; 1249 break; 1250 } 1251 1252 desc->ctrl = DESC_EMPTY; 1253 } 1254 1255 /* flush descriptors */ 1256 wmb(); 1257 1258 ring->curr = 0; 1259 ring->dirty = 0; 1260 1261 return ret; 1262 } 1263 1264 static int ag71xx_ring_rx_refill(struct ag71xx *ag) 1265 { 1266 struct ag71xx_ring *ring = &ag->rx_ring; 1267 int ring_mask = BIT(ring->order) - 1; 1268 int offset = ag->rx_buf_offset; 1269 unsigned int count; 1270 1271 count = 0; 1272 for (; ring->curr - ring->dirty > 0; ring->dirty++) { 1273 struct ag71xx_desc *desc; 1274 unsigned int i; 1275 1276 i = ring->dirty & ring_mask; 1277 desc = ag71xx_ring_desc(ring, i); 1278 1279 if (!ring->buf[i].rx.rx_buf && 1280 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, 1281 napi_alloc_frag)) 1282 break; 1283 1284 desc->ctrl = DESC_EMPTY; 1285 count++; 1286 } 1287 1288 /* flush descriptors */ 1289 wmb(); 1290 1291 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n", 1292 count); 1293 1294 return count; 1295 } 1296 1297 static int ag71xx_rings_init(struct ag71xx *ag) 1298 { 1299 struct ag71xx_ring *tx = &ag->tx_ring; 1300 struct ag71xx_ring *rx = &ag->rx_ring; 1301 int ring_size, tx_size; 1302 1303 ring_size = BIT(tx->order) + BIT(rx->order); 1304 tx_size = BIT(tx->order); 1305 1306 tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL); 1307 if (!tx->buf) 1308 return -ENOMEM; 1309 1310 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, 1311 ring_size * AG71XX_DESC_SIZE, 1312 &tx->descs_dma, GFP_KERNEL); 1313 if (!tx->descs_cpu) { 1314 kfree(tx->buf); 1315 tx->buf = NULL; 1316 return -ENOMEM; 1317 } 1318 1319 rx->buf = &tx->buf[tx_size]; 1320 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; 1321 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; 1322 1323 ag71xx_ring_tx_init(ag); 1324 return ag71xx_ring_rx_init(ag); 1325 } 1326 1327 static void ag71xx_rings_free(struct ag71xx *ag) 1328 { 1329 struct ag71xx_ring *tx = &ag->tx_ring; 1330 struct ag71xx_ring *rx = &ag->rx_ring; 1331 int ring_size; 1332 1333 ring_size = BIT(tx->order) + BIT(rx->order); 1334 1335 if (tx->descs_cpu) 1336 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, 1337 tx->descs_cpu, tx->descs_dma); 1338 1339 kfree(tx->buf); 1340 1341 tx->descs_cpu = NULL; 1342 rx->descs_cpu = NULL; 1343 tx->buf = NULL; 1344 rx->buf = NULL; 1345 } 1346 1347 static void ag71xx_rings_cleanup(struct ag71xx *ag) 1348 { 1349 ag71xx_ring_rx_clean(ag); 1350 ag71xx_ring_tx_clean(ag); 1351 ag71xx_rings_free(ag); 1352 1353 netdev_reset_queue(ag->ndev); 1354 } 1355 1356 static void ag71xx_hw_init(struct ag71xx *ag) 1357 { 1358 ag71xx_hw_stop(ag); 1359 1360 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); 1361 usleep_range(20, 30); 1362 1363 reset_control_assert(ag->mac_reset); 1364 msleep(100); 1365 reset_control_deassert(ag->mac_reset); 1366 msleep(200); 1367 1368 ag71xx_hw_setup(ag); 1369 1370 ag71xx_dma_reset(ag); 1371 } 1372 1373 static int ag71xx_hw_enable(struct ag71xx *ag) 1374 { 1375 int ret; 1376 1377 ret = ag71xx_rings_init(ag); 1378 if (ret) 1379 return ret; 1380 1381 napi_enable(&ag->napi); 1382 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); 1383 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); 1384 netif_start_queue(ag->ndev); 1385 1386 return 0; 1387 } 1388 1389 static void ag71xx_hw_disable(struct ag71xx *ag) 1390 { 1391 netif_stop_queue(ag->ndev); 1392 1393 ag71xx_hw_stop(ag); 1394 ag71xx_dma_reset(ag); 1395 1396 napi_disable(&ag->napi); 1397 del_timer_sync(&ag->oom_timer); 1398 1399 ag71xx_rings_cleanup(ag); 1400 } 1401 1402 static int ag71xx_open(struct net_device *ndev) 1403 { 1404 struct ag71xx *ag = netdev_priv(ndev); 1405 unsigned int max_frame_len; 1406 int ret; 1407 1408 ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0); 1409 if (ret) { 1410 netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n", 1411 ret); 1412 return ret; 1413 } 1414 1415 max_frame_len = ag71xx_max_frame_len(ndev->mtu); 1416 ag->rx_buf_size = 1417 SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN); 1418 1419 /* setup max frame length */ 1420 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len); 1421 ag71xx_hw_set_macaddr(ag, ndev->dev_addr); 1422 1423 ret = ag71xx_hw_enable(ag); 1424 if (ret) 1425 goto err; 1426 1427 phylink_start(ag->phylink); 1428 1429 return 0; 1430 1431 err: 1432 ag71xx_rings_cleanup(ag); 1433 phylink_disconnect_phy(ag->phylink); 1434 return ret; 1435 } 1436 1437 static int ag71xx_stop(struct net_device *ndev) 1438 { 1439 struct ag71xx *ag = netdev_priv(ndev); 1440 1441 phylink_stop(ag->phylink); 1442 phylink_disconnect_phy(ag->phylink); 1443 ag71xx_hw_disable(ag); 1444 1445 return 0; 1446 } 1447 1448 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len) 1449 { 1450 int i, ring_mask, ndesc, split; 1451 struct ag71xx_desc *desc; 1452 1453 ring_mask = BIT(ring->order) - 1; 1454 ndesc = 0; 1455 split = ring->desc_split; 1456 1457 if (!split) 1458 split = len; 1459 1460 while (len > 0) { 1461 unsigned int cur_len = len; 1462 1463 i = (ring->curr + ndesc) & ring_mask; 1464 desc = ag71xx_ring_desc(ring, i); 1465 1466 if (!ag71xx_desc_empty(desc)) 1467 return -1; 1468 1469 if (cur_len > split) { 1470 cur_len = split; 1471 1472 /* TX will hang if DMA transfers <= 4 bytes, 1473 * make sure next segment is more than 4 bytes long. 1474 */ 1475 if (len <= split + 4) 1476 cur_len -= 4; 1477 } 1478 1479 desc->data = addr; 1480 addr += cur_len; 1481 len -= cur_len; 1482 1483 if (len > 0) 1484 cur_len |= DESC_MORE; 1485 1486 /* prevent early tx attempt of this descriptor */ 1487 if (!ndesc) 1488 cur_len |= DESC_EMPTY; 1489 1490 desc->ctrl = cur_len; 1491 ndesc++; 1492 } 1493 1494 return ndesc; 1495 } 1496 1497 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, 1498 struct net_device *ndev) 1499 { 1500 int i, n, ring_min, ring_mask, ring_size; 1501 struct ag71xx *ag = netdev_priv(ndev); 1502 struct ag71xx_ring *ring; 1503 struct ag71xx_desc *desc; 1504 dma_addr_t dma_addr; 1505 1506 ring = &ag->tx_ring; 1507 ring_mask = BIT(ring->order) - 1; 1508 ring_size = BIT(ring->order); 1509 1510 if (skb->len <= 4) { 1511 netif_dbg(ag, tx_err, ndev, "packet len is too small\n"); 1512 goto err_drop; 1513 } 1514 1515 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, 1516 DMA_TO_DEVICE); 1517 1518 i = ring->curr & ring_mask; 1519 desc = ag71xx_ring_desc(ring, i); 1520 1521 /* setup descriptor fields */ 1522 n = ag71xx_fill_dma_desc(ring, (u32)dma_addr, 1523 skb->len & ag->dcfg->desc_pktlen_mask); 1524 if (n < 0) 1525 goto err_drop_unmap; 1526 1527 i = (ring->curr + n - 1) & ring_mask; 1528 ring->buf[i].tx.len = skb->len; 1529 ring->buf[i].tx.skb = skb; 1530 1531 netdev_sent_queue(ndev, skb->len); 1532 1533 skb_tx_timestamp(skb); 1534 1535 desc->ctrl &= ~DESC_EMPTY; 1536 ring->curr += n; 1537 1538 /* flush descriptor */ 1539 wmb(); 1540 1541 ring_min = 2; 1542 if (ring->desc_split) 1543 ring_min *= AG71XX_TX_RING_DS_PER_PKT; 1544 1545 if (ring->curr - ring->dirty >= ring_size - ring_min) { 1546 netif_dbg(ag, tx_err, ndev, "tx queue full\n"); 1547 netif_stop_queue(ndev); 1548 } 1549 1550 netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n"); 1551 1552 /* enable TX engine */ 1553 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); 1554 1555 return NETDEV_TX_OK; 1556 1557 err_drop_unmap: 1558 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); 1559 1560 err_drop: 1561 ndev->stats.tx_dropped++; 1562 1563 dev_kfree_skb(skb); 1564 return NETDEV_TX_OK; 1565 } 1566 1567 static void ag71xx_oom_timer_handler(struct timer_list *t) 1568 { 1569 struct ag71xx *ag = from_timer(ag, t, oom_timer); 1570 1571 napi_schedule(&ag->napi); 1572 } 1573 1574 static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue) 1575 { 1576 struct ag71xx *ag = netdev_priv(ndev); 1577 1578 netif_err(ag, tx_err, ndev, "tx timeout\n"); 1579 1580 schedule_delayed_work(&ag->restart_work, 1); 1581 } 1582 1583 static void ag71xx_restart_work_func(struct work_struct *work) 1584 { 1585 struct ag71xx *ag = container_of(work, struct ag71xx, 1586 restart_work.work); 1587 1588 rtnl_lock(); 1589 ag71xx_hw_disable(ag); 1590 ag71xx_hw_enable(ag); 1591 1592 phylink_stop(ag->phylink); 1593 phylink_start(ag->phylink); 1594 1595 rtnl_unlock(); 1596 } 1597 1598 static int ag71xx_rx_packets(struct ag71xx *ag, int limit) 1599 { 1600 struct net_device *ndev = ag->ndev; 1601 int ring_mask, ring_size, done = 0; 1602 unsigned int pktlen_mask, offset; 1603 struct ag71xx_ring *ring; 1604 struct list_head rx_list; 1605 struct sk_buff *skb; 1606 1607 ring = &ag->rx_ring; 1608 pktlen_mask = ag->dcfg->desc_pktlen_mask; 1609 offset = ag->rx_buf_offset; 1610 ring_mask = BIT(ring->order) - 1; 1611 ring_size = BIT(ring->order); 1612 1613 netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n", 1614 limit, ring->curr, ring->dirty); 1615 1616 INIT_LIST_HEAD(&rx_list); 1617 1618 while (done < limit) { 1619 unsigned int i = ring->curr & ring_mask; 1620 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); 1621 int pktlen; 1622 int err = 0; 1623 1624 if (ag71xx_desc_empty(desc)) 1625 break; 1626 1627 if ((ring->dirty + ring_size) == ring->curr) { 1628 WARN_ONCE(1, "RX out of ring"); 1629 break; 1630 } 1631 1632 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); 1633 1634 pktlen = desc->ctrl & pktlen_mask; 1635 pktlen -= ETH_FCS_LEN; 1636 1637 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr, 1638 ag->rx_buf_size, DMA_FROM_DEVICE); 1639 1640 ndev->stats.rx_packets++; 1641 ndev->stats.rx_bytes += pktlen; 1642 1643 skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag)); 1644 if (!skb) { 1645 skb_free_frag(ring->buf[i].rx.rx_buf); 1646 goto next; 1647 } 1648 1649 skb_reserve(skb, offset); 1650 skb_put(skb, pktlen); 1651 1652 if (err) { 1653 ndev->stats.rx_dropped++; 1654 kfree_skb(skb); 1655 } else { 1656 skb->dev = ndev; 1657 skb->ip_summed = CHECKSUM_NONE; 1658 list_add_tail(&skb->list, &rx_list); 1659 } 1660 1661 next: 1662 ring->buf[i].rx.rx_buf = NULL; 1663 done++; 1664 1665 ring->curr++; 1666 } 1667 1668 ag71xx_ring_rx_refill(ag); 1669 1670 list_for_each_entry(skb, &rx_list, list) 1671 skb->protocol = eth_type_trans(skb, ndev); 1672 netif_receive_skb_list(&rx_list); 1673 1674 netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n", 1675 ring->curr, ring->dirty, done); 1676 1677 return done; 1678 } 1679 1680 static int ag71xx_poll(struct napi_struct *napi, int limit) 1681 { 1682 struct ag71xx *ag = container_of(napi, struct ag71xx, napi); 1683 struct ag71xx_ring *rx_ring = &ag->rx_ring; 1684 int rx_ring_size = BIT(rx_ring->order); 1685 struct net_device *ndev = ag->ndev; 1686 int tx_done, rx_done; 1687 u32 status; 1688 1689 tx_done = ag71xx_tx_packets(ag, false, limit); 1690 1691 netif_dbg(ag, rx_status, ndev, "processing RX ring\n"); 1692 rx_done = ag71xx_rx_packets(ag, limit); 1693 1694 if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf) 1695 goto oom; 1696 1697 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); 1698 if (unlikely(status & RX_STATUS_OF)) { 1699 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); 1700 ndev->stats.rx_fifo_errors++; 1701 1702 /* restart RX */ 1703 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); 1704 } 1705 1706 if (rx_done < limit) { 1707 if (status & RX_STATUS_PR) 1708 goto more; 1709 1710 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); 1711 if (status & TX_STATUS_PS) 1712 goto more; 1713 1714 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n", 1715 rx_done, tx_done, limit); 1716 1717 napi_complete(napi); 1718 1719 /* enable interrupts */ 1720 ag71xx_int_enable(ag, AG71XX_INT_POLL); 1721 return rx_done; 1722 } 1723 1724 more: 1725 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n", 1726 rx_done, tx_done, limit); 1727 return limit; 1728 1729 oom: 1730 netif_err(ag, rx_err, ndev, "out of memory\n"); 1731 1732 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); 1733 napi_complete(napi); 1734 return 0; 1735 } 1736 1737 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id) 1738 { 1739 struct net_device *ndev = dev_id; 1740 struct ag71xx *ag; 1741 u32 status; 1742 1743 ag = netdev_priv(ndev); 1744 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); 1745 1746 if (unlikely(!status)) 1747 return IRQ_NONE; 1748 1749 if (unlikely(status & AG71XX_INT_ERR)) { 1750 if (status & AG71XX_INT_TX_BE) { 1751 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE); 1752 netif_err(ag, intr, ndev, "TX BUS error\n"); 1753 } 1754 if (status & AG71XX_INT_RX_BE) { 1755 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE); 1756 netif_err(ag, intr, ndev, "RX BUS error\n"); 1757 } 1758 } 1759 1760 if (likely(status & AG71XX_INT_POLL)) { 1761 ag71xx_int_disable(ag, AG71XX_INT_POLL); 1762 netif_dbg(ag, intr, ndev, "enable polling mode\n"); 1763 napi_schedule(&ag->napi); 1764 } 1765 1766 return IRQ_HANDLED; 1767 } 1768 1769 static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu) 1770 { 1771 struct ag71xx *ag = netdev_priv(ndev); 1772 1773 WRITE_ONCE(ndev->mtu, new_mtu); 1774 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 1775 ag71xx_max_frame_len(ndev->mtu)); 1776 1777 return 0; 1778 } 1779 1780 static const struct net_device_ops ag71xx_netdev_ops = { 1781 .ndo_open = ag71xx_open, 1782 .ndo_stop = ag71xx_stop, 1783 .ndo_start_xmit = ag71xx_hard_start_xmit, 1784 .ndo_eth_ioctl = ag71xx_do_ioctl, 1785 .ndo_tx_timeout = ag71xx_tx_timeout, 1786 .ndo_change_mtu = ag71xx_change_mtu, 1787 .ndo_set_mac_address = eth_mac_addr, 1788 .ndo_validate_addr = eth_validate_addr, 1789 }; 1790 1791 static const u32 ar71xx_addr_ar7100[] = { 1792 0x19000000, 0x1a000000, 1793 }; 1794 1795 static int ag71xx_probe(struct platform_device *pdev) 1796 { 1797 struct device_node *np = pdev->dev.of_node; 1798 const struct ag71xx_dcfg *dcfg; 1799 struct net_device *ndev; 1800 struct resource *res; 1801 struct clk *clk_eth; 1802 int tx_size, err, i; 1803 struct ag71xx *ag; 1804 1805 if (!np) 1806 return -ENODEV; 1807 1808 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); 1809 if (!ndev) 1810 return -ENOMEM; 1811 1812 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1813 if (!res) 1814 return -EINVAL; 1815 1816 dcfg = of_device_get_match_data(&pdev->dev); 1817 if (!dcfg) 1818 return -EINVAL; 1819 1820 ag = netdev_priv(ndev); 1821 ag->mac_idx = -1; 1822 for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) { 1823 if (ar71xx_addr_ar7100[i] == res->start) 1824 ag->mac_idx = i; 1825 } 1826 1827 if (ag->mac_idx < 0) { 1828 netif_err(ag, probe, ndev, "unknown mac idx\n"); 1829 return -EINVAL; 1830 } 1831 1832 clk_eth = devm_clk_get_enabled(&pdev->dev, "eth"); 1833 if (IS_ERR(clk_eth)) { 1834 netif_err(ag, probe, ndev, "Failed to get eth clk.\n"); 1835 return PTR_ERR(clk_eth); 1836 } 1837 1838 SET_NETDEV_DEV(ndev, &pdev->dev); 1839 1840 ag->pdev = pdev; 1841 ag->ndev = ndev; 1842 ag->dcfg = dcfg; 1843 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE); 1844 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata)); 1845 1846 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); 1847 if (IS_ERR(ag->mac_reset)) { 1848 netif_err(ag, probe, ndev, "missing mac reset\n"); 1849 return PTR_ERR(ag->mac_reset); 1850 } 1851 1852 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 1853 if (!ag->mac_base) 1854 return -ENOMEM; 1855 1856 ndev->irq = platform_get_irq(pdev, 0); 1857 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt, 1858 0x0, dev_name(&pdev->dev), ndev); 1859 if (err) { 1860 netif_err(ag, probe, ndev, "unable to request IRQ %d\n", 1861 ndev->irq); 1862 return err; 1863 } 1864 1865 ndev->netdev_ops = &ag71xx_netdev_ops; 1866 ndev->ethtool_ops = &ag71xx_ethtool_ops; 1867 1868 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); 1869 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); 1870 1871 tx_size = AG71XX_TX_RING_SIZE_DEFAULT; 1872 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); 1873 1874 ndev->min_mtu = 68; 1875 ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0); 1876 1877 ag->rx_buf_offset = NET_SKB_PAD; 1878 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130)) 1879 ag->rx_buf_offset += NET_IP_ALIGN; 1880 1881 if (ag71xx_is(ag, AR7100)) { 1882 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; 1883 tx_size *= AG71XX_TX_RING_DS_PER_PKT; 1884 } 1885 ag->tx_ring.order = ag71xx_ring_size_order(tx_size); 1886 1887 ag->stop_desc = dmam_alloc_coherent(&pdev->dev, 1888 sizeof(struct ag71xx_desc), 1889 &ag->stop_desc_dma, GFP_KERNEL); 1890 if (!ag->stop_desc) 1891 return -ENOMEM; 1892 1893 ag->stop_desc->data = 0; 1894 ag->stop_desc->ctrl = 0; 1895 ag->stop_desc->next = (u32)ag->stop_desc_dma; 1896 1897 err = of_get_ethdev_address(np, ndev); 1898 if (err == -EPROBE_DEFER) 1899 return err; 1900 if (err) { 1901 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n"); 1902 eth_hw_addr_random(ndev); 1903 } 1904 1905 err = of_get_phy_mode(np, &ag->phy_if_mode); 1906 if (err) { 1907 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n"); 1908 return err; 1909 } 1910 1911 netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll, 1912 AG71XX_NAPI_WEIGHT); 1913 1914 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); 1915 1916 ag71xx_hw_init(ag); 1917 1918 err = ag71xx_mdio_probe(ag); 1919 if (err) 1920 return err; 1921 1922 platform_set_drvdata(pdev, ndev); 1923 1924 err = ag71xx_phylink_setup(ag); 1925 if (err) { 1926 netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err); 1927 return err; 1928 } 1929 1930 err = devm_register_netdev(&pdev->dev, ndev); 1931 if (err) { 1932 netif_err(ag, probe, ndev, "unable to register net device\n"); 1933 platform_set_drvdata(pdev, NULL); 1934 return err; 1935 } 1936 1937 netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n", 1938 (unsigned long)ag->mac_base, ndev->irq, 1939 phy_modes(ag->phy_if_mode)); 1940 1941 return 0; 1942 } 1943 1944 static const u32 ar71xx_fifo_ar7100[] = { 1945 0x0fff0000, 0x00001fff, 0x00780fff, 1946 }; 1947 1948 static const u32 ar71xx_fifo_ar9130[] = { 1949 0x0fff0000, 0x00001fff, 0x008001ff, 1950 }; 1951 1952 static const u32 ar71xx_fifo_ar9330[] = { 1953 0x0010ffff, 0x015500aa, 0x01f00140, 1954 }; 1955 1956 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = { 1957 .type = AR7100, 1958 .fifodata = ar71xx_fifo_ar7100, 1959 .max_frame_len = 1540, 1960 .desc_pktlen_mask = SZ_4K - 1, 1961 .tx_hang_workaround = false, 1962 }; 1963 1964 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = { 1965 .type = AR7240, 1966 .fifodata = ar71xx_fifo_ar7100, 1967 .max_frame_len = 1540, 1968 .desc_pktlen_mask = SZ_4K - 1, 1969 .tx_hang_workaround = true, 1970 }; 1971 1972 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = { 1973 .type = AR9130, 1974 .fifodata = ar71xx_fifo_ar9130, 1975 .max_frame_len = 1540, 1976 .desc_pktlen_mask = SZ_4K - 1, 1977 .tx_hang_workaround = false, 1978 }; 1979 1980 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = { 1981 .type = AR9330, 1982 .fifodata = ar71xx_fifo_ar9330, 1983 .max_frame_len = 1540, 1984 .desc_pktlen_mask = SZ_4K - 1, 1985 .tx_hang_workaround = true, 1986 }; 1987 1988 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = { 1989 .type = AR9340, 1990 .fifodata = ar71xx_fifo_ar9330, 1991 .max_frame_len = SZ_16K - 1, 1992 .desc_pktlen_mask = SZ_16K - 1, 1993 .tx_hang_workaround = true, 1994 }; 1995 1996 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = { 1997 .type = QCA9530, 1998 .fifodata = ar71xx_fifo_ar9330, 1999 .max_frame_len = SZ_16K - 1, 2000 .desc_pktlen_mask = SZ_16K - 1, 2001 .tx_hang_workaround = true, 2002 }; 2003 2004 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = { 2005 .type = QCA9550, 2006 .fifodata = ar71xx_fifo_ar9330, 2007 .max_frame_len = 1540, 2008 .desc_pktlen_mask = SZ_16K - 1, 2009 .tx_hang_workaround = true, 2010 }; 2011 2012 static const struct of_device_id ag71xx_match[] = { 2013 { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 }, 2014 { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 }, 2015 { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 }, 2016 { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 }, 2017 { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 }, 2018 { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 }, 2019 { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 }, 2020 { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 }, 2021 { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 }, 2022 { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 }, 2023 {} 2024 }; 2025 MODULE_DEVICE_TABLE(of, ag71xx_match); 2026 2027 static struct platform_driver ag71xx_driver = { 2028 .probe = ag71xx_probe, 2029 .driver = { 2030 .name = "ag71xx", 2031 .of_match_table = ag71xx_match, 2032 } 2033 }; 2034 2035 module_platform_driver(ag71xx_driver); 2036 MODULE_LICENSE("GPL v2"); 2037