1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of_mdio.h> 31 #include <linux/of_net.h> 32 #include <linux/of_platform.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/skbuff.h> 36 #include <linux/math64.h> 37 #include <linux/phy.h> 38 #include <linux/mii.h> 39 #include <linux/ethtool.h> 40 41 #include "xilinx_axienet.h" 42 43 /* Descriptors defines for Tx and Rx DMA */ 44 #define TX_BD_NUM_DEFAULT 128 45 #define RX_BD_NUM_DEFAULT 1024 46 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 47 #define TX_BD_NUM_MAX 4096 48 #define RX_BD_NUM_MAX 4096 49 50 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 51 #define DRIVER_NAME "xaxienet" 52 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 53 #define DRIVER_VERSION "1.00a" 54 55 #define AXIENET_REGS_N 40 56 57 /* Match table for of_platform binding */ 58 static const struct of_device_id axienet_of_match[] = { 59 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 60 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 61 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 62 {}, 63 }; 64 65 MODULE_DEVICE_TABLE(of, axienet_of_match); 66 67 /* Option table for setting up Axi Ethernet hardware options */ 68 static struct axienet_option axienet_options[] = { 69 /* Turn on jumbo packet support for both Rx and Tx */ 70 { 71 .opt = XAE_OPTION_JUMBO, 72 .reg = XAE_TC_OFFSET, 73 .m_or = XAE_TC_JUM_MASK, 74 }, { 75 .opt = XAE_OPTION_JUMBO, 76 .reg = XAE_RCW1_OFFSET, 77 .m_or = XAE_RCW1_JUM_MASK, 78 }, { /* Turn on VLAN packet support for both Rx and Tx */ 79 .opt = XAE_OPTION_VLAN, 80 .reg = XAE_TC_OFFSET, 81 .m_or = XAE_TC_VLAN_MASK, 82 }, { 83 .opt = XAE_OPTION_VLAN, 84 .reg = XAE_RCW1_OFFSET, 85 .m_or = XAE_RCW1_VLAN_MASK, 86 }, { /* Turn on FCS stripping on receive packets */ 87 .opt = XAE_OPTION_FCS_STRIP, 88 .reg = XAE_RCW1_OFFSET, 89 .m_or = XAE_RCW1_FCS_MASK, 90 }, { /* Turn on FCS insertion on transmit packets */ 91 .opt = XAE_OPTION_FCS_INSERT, 92 .reg = XAE_TC_OFFSET, 93 .m_or = XAE_TC_FCS_MASK, 94 }, { /* Turn off length/type field checking on receive packets */ 95 .opt = XAE_OPTION_LENTYPE_ERR, 96 .reg = XAE_RCW1_OFFSET, 97 .m_or = XAE_RCW1_LT_DIS_MASK, 98 }, { /* Turn on Rx flow control */ 99 .opt = XAE_OPTION_FLOW_CONTROL, 100 .reg = XAE_FCC_OFFSET, 101 .m_or = XAE_FCC_FCRX_MASK, 102 }, { /* Turn on Tx flow control */ 103 .opt = XAE_OPTION_FLOW_CONTROL, 104 .reg = XAE_FCC_OFFSET, 105 .m_or = XAE_FCC_FCTX_MASK, 106 }, { /* Turn on promiscuous frame filtering */ 107 .opt = XAE_OPTION_PROMISC, 108 .reg = XAE_FMI_OFFSET, 109 .m_or = XAE_FMI_PM_MASK, 110 }, { /* Enable transmitter */ 111 .opt = XAE_OPTION_TXEN, 112 .reg = XAE_TC_OFFSET, 113 .m_or = XAE_TC_TX_MASK, 114 }, { /* Enable receiver */ 115 .opt = XAE_OPTION_RXEN, 116 .reg = XAE_RCW1_OFFSET, 117 .m_or = XAE_RCW1_RX_MASK, 118 }, 119 {} 120 }; 121 122 /** 123 * axienet_dma_in32 - Memory mapped Axi DMA register read 124 * @lp: Pointer to axienet local structure 125 * @reg: Address offset from the base address of the Axi DMA core 126 * 127 * Return: The contents of the Axi DMA register 128 * 129 * This function returns the contents of the corresponding Axi DMA register. 130 */ 131 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 132 { 133 return ioread32(lp->dma_regs + reg); 134 } 135 136 /** 137 * axienet_dma_out32 - Memory mapped Axi DMA register write. 138 * @lp: Pointer to axienet local structure 139 * @reg: Address offset from the base address of the Axi DMA core 140 * @value: Value to be written into the Axi DMA register 141 * 142 * This function writes the desired value into the corresponding Axi DMA 143 * register. 144 */ 145 static inline void axienet_dma_out32(struct axienet_local *lp, 146 off_t reg, u32 value) 147 { 148 iowrite32(value, lp->dma_regs + reg); 149 } 150 151 static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, 152 dma_addr_t addr) 153 { 154 axienet_dma_out32(lp, reg, lower_32_bits(addr)); 155 156 if (lp->features & XAE_FEATURE_DMA_64BIT) 157 axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); 158 } 159 160 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 161 struct axidma_bd *desc) 162 { 163 desc->phys = lower_32_bits(addr); 164 if (lp->features & XAE_FEATURE_DMA_64BIT) 165 desc->phys_msb = upper_32_bits(addr); 166 } 167 168 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 169 struct axidma_bd *desc) 170 { 171 dma_addr_t ret = desc->phys; 172 173 if (lp->features & XAE_FEATURE_DMA_64BIT) 174 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 175 176 return ret; 177 } 178 179 /** 180 * axienet_dma_bd_release - Release buffer descriptor rings 181 * @ndev: Pointer to the net_device structure 182 * 183 * This function is used to release the descriptors allocated in 184 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 185 * driver stop api is called. 186 */ 187 static void axienet_dma_bd_release(struct net_device *ndev) 188 { 189 int i; 190 struct axienet_local *lp = netdev_priv(ndev); 191 192 /* If we end up here, tx_bd_v must have been DMA allocated. */ 193 dma_free_coherent(lp->dev, 194 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 195 lp->tx_bd_v, 196 lp->tx_bd_p); 197 198 if (!lp->rx_bd_v) 199 return; 200 201 for (i = 0; i < lp->rx_bd_num; i++) { 202 dma_addr_t phys; 203 204 /* A NULL skb means this descriptor has not been initialised 205 * at all. 206 */ 207 if (!lp->rx_bd_v[i].skb) 208 break; 209 210 dev_kfree_skb(lp->rx_bd_v[i].skb); 211 212 /* For each descriptor, we programmed cntrl with the (non-zero) 213 * descriptor size, after it had been successfully allocated. 214 * So a non-zero value in there means we need to unmap it. 215 */ 216 if (lp->rx_bd_v[i].cntrl) { 217 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 218 dma_unmap_single(lp->dev, phys, 219 lp->max_frm_size, DMA_FROM_DEVICE); 220 } 221 } 222 223 dma_free_coherent(lp->dev, 224 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 225 lp->rx_bd_v, 226 lp->rx_bd_p); 227 } 228 229 /** 230 * axienet_usec_to_timer - Calculate IRQ delay timer value 231 * @lp: Pointer to the axienet_local structure 232 * @coalesce_usec: Microseconds to convert into timer value 233 */ 234 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 235 { 236 u32 result; 237 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 238 239 if (lp->axi_clk) 240 clk_rate = clk_get_rate(lp->axi_clk); 241 242 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 243 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 244 (u64)125000000); 245 if (result > 255) 246 result = 255; 247 248 return result; 249 } 250 251 /** 252 * axienet_dma_start - Set up DMA registers and start DMA operation 253 * @lp: Pointer to the axienet_local structure 254 */ 255 static void axienet_dma_start(struct axienet_local *lp) 256 { 257 u32 tx_cr; 258 259 /* Start updating the Rx channel control register */ 260 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 261 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 262 /* Only set interrupt delay timer if not generating an interrupt on 263 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 264 */ 265 if (lp->coalesce_count_rx > 1) 266 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 267 << XAXIDMA_DELAY_SHIFT) | 268 XAXIDMA_IRQ_DELAY_MASK; 269 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 270 271 /* Start updating the Tx channel control register */ 272 tx_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 273 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 274 /* Only set interrupt delay timer if not generating an interrupt on 275 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 276 */ 277 if (lp->coalesce_count_tx > 1) 278 tx_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 279 << XAXIDMA_DELAY_SHIFT) | 280 XAXIDMA_IRQ_DELAY_MASK; 281 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr); 282 283 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 284 * halted state. This will make the Rx side ready for reception. 285 */ 286 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 287 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 288 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 289 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 290 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 291 292 /* Write to the RS (Run-stop) bit in the Tx channel control register. 293 * Tx channel is now ready to run. But only after we write to the 294 * tail pointer register that the Tx channel will start transmitting. 295 */ 296 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 297 tx_cr |= XAXIDMA_CR_RUNSTOP_MASK; 298 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, tx_cr); 299 } 300 301 /** 302 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 303 * @ndev: Pointer to the net_device structure 304 * 305 * Return: 0, on success -ENOMEM, on failure 306 * 307 * This function is called to initialize the Rx and Tx DMA descriptor 308 * rings. This initializes the descriptors with required default values 309 * and is called when Axi Ethernet driver reset is called. 310 */ 311 static int axienet_dma_bd_init(struct net_device *ndev) 312 { 313 int i; 314 struct sk_buff *skb; 315 struct axienet_local *lp = netdev_priv(ndev); 316 317 /* Reset the indexes which are used for accessing the BDs */ 318 lp->tx_bd_ci = 0; 319 lp->tx_bd_tail = 0; 320 lp->rx_bd_ci = 0; 321 322 /* Allocate the Tx and Rx buffer descriptors. */ 323 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 324 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 325 &lp->tx_bd_p, GFP_KERNEL); 326 if (!lp->tx_bd_v) 327 return -ENOMEM; 328 329 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 330 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 331 &lp->rx_bd_p, GFP_KERNEL); 332 if (!lp->rx_bd_v) 333 goto out; 334 335 for (i = 0; i < lp->tx_bd_num; i++) { 336 dma_addr_t addr = lp->tx_bd_p + 337 sizeof(*lp->tx_bd_v) * 338 ((i + 1) % lp->tx_bd_num); 339 340 lp->tx_bd_v[i].next = lower_32_bits(addr); 341 if (lp->features & XAE_FEATURE_DMA_64BIT) 342 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 343 } 344 345 for (i = 0; i < lp->rx_bd_num; i++) { 346 dma_addr_t addr; 347 348 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 349 ((i + 1) % lp->rx_bd_num); 350 lp->rx_bd_v[i].next = lower_32_bits(addr); 351 if (lp->features & XAE_FEATURE_DMA_64BIT) 352 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 353 354 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 355 if (!skb) 356 goto out; 357 358 lp->rx_bd_v[i].skb = skb; 359 addr = dma_map_single(lp->dev, skb->data, 360 lp->max_frm_size, DMA_FROM_DEVICE); 361 if (dma_mapping_error(lp->dev, addr)) { 362 netdev_err(ndev, "DMA mapping error\n"); 363 goto out; 364 } 365 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 366 367 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 368 } 369 370 axienet_dma_start(lp); 371 372 return 0; 373 out: 374 axienet_dma_bd_release(ndev); 375 return -ENOMEM; 376 } 377 378 /** 379 * axienet_set_mac_address - Write the MAC address 380 * @ndev: Pointer to the net_device structure 381 * @address: 6 byte Address to be written as MAC address 382 * 383 * This function is called to initialize the MAC address of the Axi Ethernet 384 * core. It writes to the UAW0 and UAW1 registers of the core. 385 */ 386 static void axienet_set_mac_address(struct net_device *ndev, 387 const void *address) 388 { 389 struct axienet_local *lp = netdev_priv(ndev); 390 391 if (address) 392 eth_hw_addr_set(ndev, address); 393 if (!is_valid_ether_addr(ndev->dev_addr)) 394 eth_hw_addr_random(ndev); 395 396 /* Set up unicast MAC address filter set its mac address */ 397 axienet_iow(lp, XAE_UAW0_OFFSET, 398 (ndev->dev_addr[0]) | 399 (ndev->dev_addr[1] << 8) | 400 (ndev->dev_addr[2] << 16) | 401 (ndev->dev_addr[3] << 24)); 402 axienet_iow(lp, XAE_UAW1_OFFSET, 403 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 404 ~XAE_UAW1_UNICASTADDR_MASK) | 405 (ndev->dev_addr[4] | 406 (ndev->dev_addr[5] << 8)))); 407 } 408 409 /** 410 * netdev_set_mac_address - Write the MAC address (from outside the driver) 411 * @ndev: Pointer to the net_device structure 412 * @p: 6 byte Address to be written as MAC address 413 * 414 * Return: 0 for all conditions. Presently, there is no failure case. 415 * 416 * This function is called to initialize the MAC address of the Axi Ethernet 417 * core. It calls the core specific axienet_set_mac_address. This is the 418 * function that goes into net_device_ops structure entry ndo_set_mac_address. 419 */ 420 static int netdev_set_mac_address(struct net_device *ndev, void *p) 421 { 422 struct sockaddr *addr = p; 423 axienet_set_mac_address(ndev, addr->sa_data); 424 return 0; 425 } 426 427 /** 428 * axienet_set_multicast_list - Prepare the multicast table 429 * @ndev: Pointer to the net_device structure 430 * 431 * This function is called to initialize the multicast table during 432 * initialization. The Axi Ethernet basic multicast support has a four-entry 433 * multicast table which is initialized here. Additionally this function 434 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 435 * means whenever the multicast table entries need to be updated this 436 * function gets called. 437 */ 438 static void axienet_set_multicast_list(struct net_device *ndev) 439 { 440 int i; 441 u32 reg, af0reg, af1reg; 442 struct axienet_local *lp = netdev_priv(ndev); 443 444 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 445 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 446 /* We must make the kernel realize we had to move into 447 * promiscuous mode. If it was a promiscuous mode request 448 * the flag is already set. If not we set it. 449 */ 450 ndev->flags |= IFF_PROMISC; 451 reg = axienet_ior(lp, XAE_FMI_OFFSET); 452 reg |= XAE_FMI_PM_MASK; 453 axienet_iow(lp, XAE_FMI_OFFSET, reg); 454 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 455 } else if (!netdev_mc_empty(ndev)) { 456 struct netdev_hw_addr *ha; 457 458 i = 0; 459 netdev_for_each_mc_addr(ha, ndev) { 460 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 461 break; 462 463 af0reg = (ha->addr[0]); 464 af0reg |= (ha->addr[1] << 8); 465 af0reg |= (ha->addr[2] << 16); 466 af0reg |= (ha->addr[3] << 24); 467 468 af1reg = (ha->addr[4]); 469 af1reg |= (ha->addr[5] << 8); 470 471 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 472 reg |= i; 473 474 axienet_iow(lp, XAE_FMI_OFFSET, reg); 475 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 476 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 477 i++; 478 } 479 } else { 480 reg = axienet_ior(lp, XAE_FMI_OFFSET); 481 reg &= ~XAE_FMI_PM_MASK; 482 483 axienet_iow(lp, XAE_FMI_OFFSET, reg); 484 485 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 486 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 487 reg |= i; 488 489 axienet_iow(lp, XAE_FMI_OFFSET, reg); 490 axienet_iow(lp, XAE_AF0_OFFSET, 0); 491 axienet_iow(lp, XAE_AF1_OFFSET, 0); 492 } 493 494 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 495 } 496 } 497 498 /** 499 * axienet_setoptions - Set an Axi Ethernet option 500 * @ndev: Pointer to the net_device structure 501 * @options: Option to be enabled/disabled 502 * 503 * The Axi Ethernet core has multiple features which can be selectively turned 504 * on or off. The typical options could be jumbo frame option, basic VLAN 505 * option, promiscuous mode option etc. This function is used to set or clear 506 * these options in the Axi Ethernet hardware. This is done through 507 * axienet_option structure . 508 */ 509 static void axienet_setoptions(struct net_device *ndev, u32 options) 510 { 511 int reg; 512 struct axienet_local *lp = netdev_priv(ndev); 513 struct axienet_option *tp = &axienet_options[0]; 514 515 while (tp->opt) { 516 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 517 if (options & tp->opt) 518 reg |= tp->m_or; 519 axienet_iow(lp, tp->reg, reg); 520 tp++; 521 } 522 523 lp->options |= options; 524 } 525 526 static int __axienet_device_reset(struct axienet_local *lp) 527 { 528 u32 value; 529 int ret; 530 531 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 532 * process of Axi DMA takes a while to complete as all pending 533 * commands/transfers will be flushed or completed during this 534 * reset process. 535 * Note that even though both TX and RX have their own reset register, 536 * they both reset the entire DMA core, so only one needs to be used. 537 */ 538 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 539 ret = read_poll_timeout(axienet_dma_in32, value, 540 !(value & XAXIDMA_CR_RESET_MASK), 541 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 542 XAXIDMA_TX_CR_OFFSET); 543 if (ret) { 544 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 545 return ret; 546 } 547 548 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 549 ret = read_poll_timeout(axienet_ior, value, 550 value & XAE_INT_PHYRSTCMPLT_MASK, 551 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 552 XAE_IS_OFFSET); 553 if (ret) { 554 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 555 return ret; 556 } 557 558 return 0; 559 } 560 561 /** 562 * axienet_dma_stop - Stop DMA operation 563 * @lp: Pointer to the axienet_local structure 564 */ 565 static void axienet_dma_stop(struct axienet_local *lp) 566 { 567 int count; 568 u32 cr, sr; 569 570 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 571 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 572 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 573 synchronize_irq(lp->rx_irq); 574 575 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 576 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 577 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 578 synchronize_irq(lp->tx_irq); 579 580 /* Give DMAs a chance to halt gracefully */ 581 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 582 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 583 msleep(20); 584 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 585 } 586 587 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 588 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 589 msleep(20); 590 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 591 } 592 593 /* Do a reset to ensure DMA is really stopped */ 594 axienet_lock_mii(lp); 595 __axienet_device_reset(lp); 596 axienet_unlock_mii(lp); 597 } 598 599 /** 600 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 601 * @ndev: Pointer to the net_device structure 602 * 603 * This function is called to reset and initialize the Axi Ethernet core. This 604 * is typically called during initialization. It does a reset of the Axi DMA 605 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 606 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 607 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 608 * core. 609 * Returns 0 on success or a negative error number otherwise. 610 */ 611 static int axienet_device_reset(struct net_device *ndev) 612 { 613 u32 axienet_status; 614 struct axienet_local *lp = netdev_priv(ndev); 615 int ret; 616 617 ret = __axienet_device_reset(lp); 618 if (ret) 619 return ret; 620 621 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 622 lp->options |= XAE_OPTION_VLAN; 623 lp->options &= (~XAE_OPTION_JUMBO); 624 625 if ((ndev->mtu > XAE_MTU) && 626 (ndev->mtu <= XAE_JUMBO_MTU)) { 627 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 628 XAE_TRL_SIZE; 629 630 if (lp->max_frm_size <= lp->rxmem) 631 lp->options |= XAE_OPTION_JUMBO; 632 } 633 634 ret = axienet_dma_bd_init(ndev); 635 if (ret) { 636 netdev_err(ndev, "%s: descriptor allocation failed\n", 637 __func__); 638 return ret; 639 } 640 641 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 642 axienet_status &= ~XAE_RCW1_RX_MASK; 643 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 644 645 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 646 if (axienet_status & XAE_INT_RXRJECT_MASK) 647 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 648 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 649 XAE_INT_RECV_ERROR_MASK : 0); 650 651 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 652 653 /* Sync default options with HW but leave receiver and 654 * transmitter disabled. 655 */ 656 axienet_setoptions(ndev, lp->options & 657 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 658 axienet_set_mac_address(ndev, NULL); 659 axienet_set_multicast_list(ndev); 660 axienet_setoptions(ndev, lp->options); 661 662 netif_trans_update(ndev); 663 664 return 0; 665 } 666 667 /** 668 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 669 * @ndev: Pointer to the net_device structure 670 * @first_bd: Index of first descriptor to clean up 671 * @nr_bds: Number of descriptors to clean up, can be -1 if unknown. 672 * @sizep: Pointer to a u32 filled with the total sum of all bytes 673 * in all cleaned-up descriptors. Ignored if NULL. 674 * 675 * Would either be called after a successful transmit operation, or after 676 * there was an error when setting up the chain. 677 * Returns the number of descriptors handled. 678 */ 679 static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, 680 int nr_bds, u32 *sizep) 681 { 682 struct axienet_local *lp = netdev_priv(ndev); 683 struct axidma_bd *cur_p; 684 int max_bds = nr_bds; 685 unsigned int status; 686 dma_addr_t phys; 687 int i; 688 689 if (max_bds == -1) 690 max_bds = lp->tx_bd_num; 691 692 for (i = 0; i < max_bds; i++) { 693 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 694 status = cur_p->status; 695 696 /* If no number is given, clean up *all* descriptors that have 697 * been completed by the MAC. 698 */ 699 if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 700 break; 701 702 /* Ensure we see complete descriptor update */ 703 dma_rmb(); 704 phys = desc_get_phys_addr(lp, cur_p); 705 dma_unmap_single(lp->dev, phys, 706 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 707 DMA_TO_DEVICE); 708 709 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 710 dev_consume_skb_irq(cur_p->skb); 711 712 cur_p->app0 = 0; 713 cur_p->app1 = 0; 714 cur_p->app2 = 0; 715 cur_p->app4 = 0; 716 cur_p->skb = NULL; 717 /* ensure our transmit path and device don't prematurely see status cleared */ 718 wmb(); 719 cur_p->cntrl = 0; 720 cur_p->status = 0; 721 722 if (sizep) 723 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 724 } 725 726 return i; 727 } 728 729 /** 730 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 731 * @lp: Pointer to the axienet_local structure 732 * @num_frag: The number of BDs to check for 733 * 734 * Return: 0, on success 735 * NETDEV_TX_BUSY, if any of the descriptors are not free 736 * 737 * This function is invoked before BDs are allocated and transmission starts. 738 * This function returns 0 if a BD or group of BDs can be allocated for 739 * transmission. If the BD or any of the BDs are not free the function 740 * returns a busy status. This is invoked from axienet_start_xmit. 741 */ 742 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 743 int num_frag) 744 { 745 struct axidma_bd *cur_p; 746 747 /* Ensure we see all descriptor updates from device or TX IRQ path */ 748 rmb(); 749 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; 750 if (cur_p->cntrl) 751 return NETDEV_TX_BUSY; 752 return 0; 753 } 754 755 /** 756 * axienet_start_xmit_done - Invoked once a transmit is completed by the 757 * Axi DMA Tx channel. 758 * @ndev: Pointer to the net_device structure 759 * 760 * This function is invoked from the Axi DMA Tx isr to notify the completion 761 * of transmit operation. It clears fields in the corresponding Tx BDs and 762 * unmaps the corresponding buffer so that CPU can regain ownership of the 763 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 764 * required. 765 */ 766 static void axienet_start_xmit_done(struct net_device *ndev) 767 { 768 struct axienet_local *lp = netdev_priv(ndev); 769 u32 packets = 0; 770 u32 size = 0; 771 772 packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size); 773 774 lp->tx_bd_ci += packets; 775 if (lp->tx_bd_ci >= lp->tx_bd_num) 776 lp->tx_bd_ci -= lp->tx_bd_num; 777 778 ndev->stats.tx_packets += packets; 779 ndev->stats.tx_bytes += size; 780 781 /* Matches barrier in axienet_start_xmit */ 782 smp_mb(); 783 784 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 785 netif_wake_queue(ndev); 786 } 787 788 /** 789 * axienet_start_xmit - Starts the transmission. 790 * @skb: sk_buff pointer that contains data to be Txed. 791 * @ndev: Pointer to net_device structure. 792 * 793 * Return: NETDEV_TX_OK, on success 794 * NETDEV_TX_BUSY, if any of the descriptors are not free 795 * 796 * This function is invoked from upper layers to initiate transmission. The 797 * function uses the next available free BDs and populates their fields to 798 * start the transmission. Additionally if checksum offloading is supported, 799 * it populates AXI Stream Control fields with appropriate values. 800 */ 801 static netdev_tx_t 802 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 803 { 804 u32 ii; 805 u32 num_frag; 806 u32 csum_start_off; 807 u32 csum_index_off; 808 skb_frag_t *frag; 809 dma_addr_t tail_p, phys; 810 struct axienet_local *lp = netdev_priv(ndev); 811 struct axidma_bd *cur_p; 812 u32 orig_tail_ptr = lp->tx_bd_tail; 813 814 num_frag = skb_shinfo(skb)->nr_frags; 815 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 816 817 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 818 /* Should not happen as last start_xmit call should have 819 * checked for sufficient space and queue should only be 820 * woken when sufficient space is available. 821 */ 822 netif_stop_queue(ndev); 823 if (net_ratelimit()) 824 netdev_warn(ndev, "TX ring unexpectedly full\n"); 825 return NETDEV_TX_BUSY; 826 } 827 828 if (skb->ip_summed == CHECKSUM_PARTIAL) { 829 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 830 /* Tx Full Checksum Offload Enabled */ 831 cur_p->app0 |= 2; 832 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 833 csum_start_off = skb_transport_offset(skb); 834 csum_index_off = csum_start_off + skb->csum_offset; 835 /* Tx Partial Checksum Offload Enabled */ 836 cur_p->app0 |= 1; 837 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 838 } 839 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 840 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 841 } 842 843 phys = dma_map_single(lp->dev, skb->data, 844 skb_headlen(skb), DMA_TO_DEVICE); 845 if (unlikely(dma_mapping_error(lp->dev, phys))) { 846 if (net_ratelimit()) 847 netdev_err(ndev, "TX DMA mapping error\n"); 848 ndev->stats.tx_dropped++; 849 return NETDEV_TX_OK; 850 } 851 desc_set_phys_addr(lp, phys, cur_p); 852 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 853 854 for (ii = 0; ii < num_frag; ii++) { 855 if (++lp->tx_bd_tail >= lp->tx_bd_num) 856 lp->tx_bd_tail = 0; 857 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 858 frag = &skb_shinfo(skb)->frags[ii]; 859 phys = dma_map_single(lp->dev, 860 skb_frag_address(frag), 861 skb_frag_size(frag), 862 DMA_TO_DEVICE); 863 if (unlikely(dma_mapping_error(lp->dev, phys))) { 864 if (net_ratelimit()) 865 netdev_err(ndev, "TX DMA mapping error\n"); 866 ndev->stats.tx_dropped++; 867 axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1, 868 NULL); 869 lp->tx_bd_tail = orig_tail_ptr; 870 871 return NETDEV_TX_OK; 872 } 873 desc_set_phys_addr(lp, phys, cur_p); 874 cur_p->cntrl = skb_frag_size(frag); 875 } 876 877 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 878 cur_p->skb = skb; 879 880 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 881 /* Start the transfer */ 882 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 883 if (++lp->tx_bd_tail >= lp->tx_bd_num) 884 lp->tx_bd_tail = 0; 885 886 /* Stop queue if next transmit may not have space */ 887 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 888 netif_stop_queue(ndev); 889 890 /* Matches barrier in axienet_start_xmit_done */ 891 smp_mb(); 892 893 /* Space might have just been freed - check again */ 894 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 895 netif_wake_queue(ndev); 896 } 897 898 return NETDEV_TX_OK; 899 } 900 901 /** 902 * axienet_poll - Triggered by RX ISR to complete the received BD processing. 903 * @napi: Pointer to NAPI structure. 904 * @budget: Max number of packets to process. 905 * 906 * Return: Number of RX packets processed. 907 */ 908 static int axienet_poll(struct napi_struct *napi, int budget) 909 { 910 u32 length; 911 u32 csumstatus; 912 u32 size = 0; 913 int packets = 0; 914 dma_addr_t tail_p = 0; 915 struct axidma_bd *cur_p; 916 struct sk_buff *skb, *new_skb; 917 struct axienet_local *lp = container_of(napi, struct axienet_local, napi); 918 919 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 920 921 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 922 dma_addr_t phys; 923 924 /* Ensure we see complete descriptor update */ 925 dma_rmb(); 926 927 skb = cur_p->skb; 928 cur_p->skb = NULL; 929 930 /* skb could be NULL if a previous pass already received the 931 * packet for this slot in the ring, but failed to refill it 932 * with a newly allocated buffer. In this case, don't try to 933 * receive it again. 934 */ 935 if (likely(skb)) { 936 length = cur_p->app4 & 0x0000FFFF; 937 938 phys = desc_get_phys_addr(lp, cur_p); 939 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 940 DMA_FROM_DEVICE); 941 942 skb_put(skb, length); 943 skb->protocol = eth_type_trans(skb, lp->ndev); 944 /*skb_checksum_none_assert(skb);*/ 945 skb->ip_summed = CHECKSUM_NONE; 946 947 /* if we're doing Rx csum offload, set it up */ 948 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 949 csumstatus = (cur_p->app2 & 950 XAE_FULL_CSUM_STATUS_MASK) >> 3; 951 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 952 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 953 skb->ip_summed = CHECKSUM_UNNECESSARY; 954 } 955 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 956 skb->protocol == htons(ETH_P_IP) && 957 skb->len > 64) { 958 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 959 skb->ip_summed = CHECKSUM_COMPLETE; 960 } 961 962 napi_gro_receive(napi, skb); 963 964 size += length; 965 packets++; 966 } 967 968 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 969 if (!new_skb) 970 break; 971 972 phys = dma_map_single(lp->dev, new_skb->data, 973 lp->max_frm_size, 974 DMA_FROM_DEVICE); 975 if (unlikely(dma_mapping_error(lp->dev, phys))) { 976 if (net_ratelimit()) 977 netdev_err(lp->ndev, "RX DMA mapping error\n"); 978 dev_kfree_skb(new_skb); 979 break; 980 } 981 desc_set_phys_addr(lp, phys, cur_p); 982 983 cur_p->cntrl = lp->max_frm_size; 984 cur_p->status = 0; 985 cur_p->skb = new_skb; 986 987 /* Only update tail_p to mark this slot as usable after it has 988 * been successfully refilled. 989 */ 990 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 991 992 if (++lp->rx_bd_ci >= lp->rx_bd_num) 993 lp->rx_bd_ci = 0; 994 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 995 } 996 997 lp->ndev->stats.rx_packets += packets; 998 lp->ndev->stats.rx_bytes += size; 999 1000 if (tail_p) 1001 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1002 1003 if (packets < budget && napi_complete_done(napi, packets)) { 1004 /* Re-enable RX completion interrupts. This should 1005 * cause an immediate interrupt if any RX packets are 1006 * already pending. 1007 */ 1008 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1009 } 1010 return packets; 1011 } 1012 1013 /** 1014 * axienet_tx_irq - Tx Done Isr. 1015 * @irq: irq number 1016 * @_ndev: net_device pointer 1017 * 1018 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1019 * 1020 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 1021 * to complete the BD processing. 1022 */ 1023 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1024 { 1025 unsigned int status; 1026 struct net_device *ndev = _ndev; 1027 struct axienet_local *lp = netdev_priv(ndev); 1028 1029 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1030 1031 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1032 return IRQ_NONE; 1033 1034 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1035 1036 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1037 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1038 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1039 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1040 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1041 schedule_work(&lp->dma_err_task); 1042 } else { 1043 axienet_start_xmit_done(lp->ndev); 1044 } 1045 1046 return IRQ_HANDLED; 1047 } 1048 1049 /** 1050 * axienet_rx_irq - Rx Isr. 1051 * @irq: irq number 1052 * @_ndev: net_device pointer 1053 * 1054 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1055 * 1056 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1057 * processing. 1058 */ 1059 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1060 { 1061 unsigned int status; 1062 struct net_device *ndev = _ndev; 1063 struct axienet_local *lp = netdev_priv(ndev); 1064 1065 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1066 1067 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1068 return IRQ_NONE; 1069 1070 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1071 1072 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1073 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1074 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1075 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1076 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1077 schedule_work(&lp->dma_err_task); 1078 } else { 1079 /* Disable further RX completion interrupts and schedule 1080 * NAPI receive. 1081 */ 1082 u32 cr = lp->rx_dma_cr; 1083 1084 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1085 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1086 1087 napi_schedule(&lp->napi); 1088 } 1089 1090 return IRQ_HANDLED; 1091 } 1092 1093 /** 1094 * axienet_eth_irq - Ethernet core Isr. 1095 * @irq: irq number 1096 * @_ndev: net_device pointer 1097 * 1098 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1099 * 1100 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1101 */ 1102 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1103 { 1104 struct net_device *ndev = _ndev; 1105 struct axienet_local *lp = netdev_priv(ndev); 1106 unsigned int pending; 1107 1108 pending = axienet_ior(lp, XAE_IP_OFFSET); 1109 if (!pending) 1110 return IRQ_NONE; 1111 1112 if (pending & XAE_INT_RXFIFOOVR_MASK) 1113 ndev->stats.rx_missed_errors++; 1114 1115 if (pending & XAE_INT_RXRJECT_MASK) 1116 ndev->stats.rx_frame_errors++; 1117 1118 axienet_iow(lp, XAE_IS_OFFSET, pending); 1119 return IRQ_HANDLED; 1120 } 1121 1122 static void axienet_dma_err_handler(struct work_struct *work); 1123 1124 /** 1125 * axienet_open - Driver open routine. 1126 * @ndev: Pointer to net_device structure 1127 * 1128 * Return: 0, on success. 1129 * non-zero error value on failure 1130 * 1131 * This is the driver open routine. It calls phylink_start to start the 1132 * PHY device. 1133 * It also allocates interrupt service routines, enables the interrupt lines 1134 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1135 * descriptors are initialized. 1136 */ 1137 static int axienet_open(struct net_device *ndev) 1138 { 1139 int ret; 1140 struct axienet_local *lp = netdev_priv(ndev); 1141 1142 dev_dbg(&ndev->dev, "axienet_open()\n"); 1143 1144 /* When we do an Axi Ethernet reset, it resets the complete core 1145 * including the MDIO. MDIO must be disabled before resetting. 1146 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1147 */ 1148 axienet_lock_mii(lp); 1149 ret = axienet_device_reset(ndev); 1150 axienet_unlock_mii(lp); 1151 1152 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1153 if (ret) { 1154 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1155 return ret; 1156 } 1157 1158 phylink_start(lp->phylink); 1159 1160 /* Enable worker thread for Axi DMA error handling */ 1161 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1162 1163 napi_enable(&lp->napi); 1164 1165 /* Enable interrupts for Axi DMA Tx */ 1166 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1167 ndev->name, ndev); 1168 if (ret) 1169 goto err_tx_irq; 1170 /* Enable interrupts for Axi DMA Rx */ 1171 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1172 ndev->name, ndev); 1173 if (ret) 1174 goto err_rx_irq; 1175 /* Enable interrupts for Axi Ethernet core (if defined) */ 1176 if (lp->eth_irq > 0) { 1177 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1178 ndev->name, ndev); 1179 if (ret) 1180 goto err_eth_irq; 1181 } 1182 1183 return 0; 1184 1185 err_eth_irq: 1186 free_irq(lp->rx_irq, ndev); 1187 err_rx_irq: 1188 free_irq(lp->tx_irq, ndev); 1189 err_tx_irq: 1190 napi_disable(&lp->napi); 1191 phylink_stop(lp->phylink); 1192 phylink_disconnect_phy(lp->phylink); 1193 cancel_work_sync(&lp->dma_err_task); 1194 dev_err(lp->dev, "request_irq() failed\n"); 1195 return ret; 1196 } 1197 1198 /** 1199 * axienet_stop - Driver stop routine. 1200 * @ndev: Pointer to net_device structure 1201 * 1202 * Return: 0, on success. 1203 * 1204 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1205 * device. It also removes the interrupt handlers and disables the interrupts. 1206 * The Axi DMA Tx/Rx BDs are released. 1207 */ 1208 static int axienet_stop(struct net_device *ndev) 1209 { 1210 struct axienet_local *lp = netdev_priv(ndev); 1211 1212 dev_dbg(&ndev->dev, "axienet_close()\n"); 1213 1214 napi_disable(&lp->napi); 1215 1216 phylink_stop(lp->phylink); 1217 phylink_disconnect_phy(lp->phylink); 1218 1219 axienet_setoptions(ndev, lp->options & 1220 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1221 1222 axienet_dma_stop(lp); 1223 1224 axienet_iow(lp, XAE_IE_OFFSET, 0); 1225 1226 cancel_work_sync(&lp->dma_err_task); 1227 1228 if (lp->eth_irq > 0) 1229 free_irq(lp->eth_irq, ndev); 1230 free_irq(lp->tx_irq, ndev); 1231 free_irq(lp->rx_irq, ndev); 1232 1233 axienet_dma_bd_release(ndev); 1234 return 0; 1235 } 1236 1237 /** 1238 * axienet_change_mtu - Driver change mtu routine. 1239 * @ndev: Pointer to net_device structure 1240 * @new_mtu: New mtu value to be applied 1241 * 1242 * Return: Always returns 0 (success). 1243 * 1244 * This is the change mtu driver routine. It checks if the Axi Ethernet 1245 * hardware supports jumbo frames before changing the mtu. This can be 1246 * called only when the device is not up. 1247 */ 1248 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1249 { 1250 struct axienet_local *lp = netdev_priv(ndev); 1251 1252 if (netif_running(ndev)) 1253 return -EBUSY; 1254 1255 if ((new_mtu + VLAN_ETH_HLEN + 1256 XAE_TRL_SIZE) > lp->rxmem) 1257 return -EINVAL; 1258 1259 ndev->mtu = new_mtu; 1260 1261 return 0; 1262 } 1263 1264 #ifdef CONFIG_NET_POLL_CONTROLLER 1265 /** 1266 * axienet_poll_controller - Axi Ethernet poll mechanism. 1267 * @ndev: Pointer to net_device structure 1268 * 1269 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1270 * to polling the ISRs and are enabled back after the polling is done. 1271 */ 1272 static void axienet_poll_controller(struct net_device *ndev) 1273 { 1274 struct axienet_local *lp = netdev_priv(ndev); 1275 disable_irq(lp->tx_irq); 1276 disable_irq(lp->rx_irq); 1277 axienet_rx_irq(lp->tx_irq, ndev); 1278 axienet_tx_irq(lp->rx_irq, ndev); 1279 enable_irq(lp->tx_irq); 1280 enable_irq(lp->rx_irq); 1281 } 1282 #endif 1283 1284 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1285 { 1286 struct axienet_local *lp = netdev_priv(dev); 1287 1288 if (!netif_running(dev)) 1289 return -EINVAL; 1290 1291 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1292 } 1293 1294 static const struct net_device_ops axienet_netdev_ops = { 1295 .ndo_open = axienet_open, 1296 .ndo_stop = axienet_stop, 1297 .ndo_start_xmit = axienet_start_xmit, 1298 .ndo_change_mtu = axienet_change_mtu, 1299 .ndo_set_mac_address = netdev_set_mac_address, 1300 .ndo_validate_addr = eth_validate_addr, 1301 .ndo_eth_ioctl = axienet_ioctl, 1302 .ndo_set_rx_mode = axienet_set_multicast_list, 1303 #ifdef CONFIG_NET_POLL_CONTROLLER 1304 .ndo_poll_controller = axienet_poll_controller, 1305 #endif 1306 }; 1307 1308 /** 1309 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1310 * @ndev: Pointer to net_device structure 1311 * @ed: Pointer to ethtool_drvinfo structure 1312 * 1313 * This implements ethtool command for getting the driver information. 1314 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1315 */ 1316 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1317 struct ethtool_drvinfo *ed) 1318 { 1319 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1320 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1321 } 1322 1323 /** 1324 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1325 * AxiEthernet core. 1326 * @ndev: Pointer to net_device structure 1327 * 1328 * This implements ethtool command for getting the total register length 1329 * information. 1330 * 1331 * Return: the total regs length 1332 */ 1333 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1334 { 1335 return sizeof(u32) * AXIENET_REGS_N; 1336 } 1337 1338 /** 1339 * axienet_ethtools_get_regs - Dump the contents of all registers present 1340 * in AxiEthernet core. 1341 * @ndev: Pointer to net_device structure 1342 * @regs: Pointer to ethtool_regs structure 1343 * @ret: Void pointer used to return the contents of the registers. 1344 * 1345 * This implements ethtool command for getting the Axi Ethernet register dump. 1346 * Issue "ethtool -d ethX" to execute this function. 1347 */ 1348 static void axienet_ethtools_get_regs(struct net_device *ndev, 1349 struct ethtool_regs *regs, void *ret) 1350 { 1351 u32 *data = (u32 *) ret; 1352 size_t len = sizeof(u32) * AXIENET_REGS_N; 1353 struct axienet_local *lp = netdev_priv(ndev); 1354 1355 regs->version = 0; 1356 regs->len = len; 1357 1358 memset(data, 0, len); 1359 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1360 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1361 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1362 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1363 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1364 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1365 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1366 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1367 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1368 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1369 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1370 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1371 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1372 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1373 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1374 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1375 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1376 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1377 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1378 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1379 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1380 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1381 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1382 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1383 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1384 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1385 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1386 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1387 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1388 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1389 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1390 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1391 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1392 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1393 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1394 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1395 } 1396 1397 static void 1398 axienet_ethtools_get_ringparam(struct net_device *ndev, 1399 struct ethtool_ringparam *ering, 1400 struct kernel_ethtool_ringparam *kernel_ering, 1401 struct netlink_ext_ack *extack) 1402 { 1403 struct axienet_local *lp = netdev_priv(ndev); 1404 1405 ering->rx_max_pending = RX_BD_NUM_MAX; 1406 ering->rx_mini_max_pending = 0; 1407 ering->rx_jumbo_max_pending = 0; 1408 ering->tx_max_pending = TX_BD_NUM_MAX; 1409 ering->rx_pending = lp->rx_bd_num; 1410 ering->rx_mini_pending = 0; 1411 ering->rx_jumbo_pending = 0; 1412 ering->tx_pending = lp->tx_bd_num; 1413 } 1414 1415 static int 1416 axienet_ethtools_set_ringparam(struct net_device *ndev, 1417 struct ethtool_ringparam *ering, 1418 struct kernel_ethtool_ringparam *kernel_ering, 1419 struct netlink_ext_ack *extack) 1420 { 1421 struct axienet_local *lp = netdev_priv(ndev); 1422 1423 if (ering->rx_pending > RX_BD_NUM_MAX || 1424 ering->rx_mini_pending || 1425 ering->rx_jumbo_pending || 1426 ering->tx_pending < TX_BD_NUM_MIN || 1427 ering->tx_pending > TX_BD_NUM_MAX) 1428 return -EINVAL; 1429 1430 if (netif_running(ndev)) 1431 return -EBUSY; 1432 1433 lp->rx_bd_num = ering->rx_pending; 1434 lp->tx_bd_num = ering->tx_pending; 1435 return 0; 1436 } 1437 1438 /** 1439 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1440 * Tx and Rx paths. 1441 * @ndev: Pointer to net_device structure 1442 * @epauseparm: Pointer to ethtool_pauseparam structure. 1443 * 1444 * This implements ethtool command for getting axi ethernet pause frame 1445 * setting. Issue "ethtool -a ethX" to execute this function. 1446 */ 1447 static void 1448 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1449 struct ethtool_pauseparam *epauseparm) 1450 { 1451 struct axienet_local *lp = netdev_priv(ndev); 1452 1453 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1454 } 1455 1456 /** 1457 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1458 * settings. 1459 * @ndev: Pointer to net_device structure 1460 * @epauseparm:Pointer to ethtool_pauseparam structure 1461 * 1462 * This implements ethtool command for enabling flow control on Rx and Tx 1463 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1464 * function. 1465 * 1466 * Return: 0 on success, -EFAULT if device is running 1467 */ 1468 static int 1469 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1470 struct ethtool_pauseparam *epauseparm) 1471 { 1472 struct axienet_local *lp = netdev_priv(ndev); 1473 1474 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1475 } 1476 1477 /** 1478 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1479 * @ndev: Pointer to net_device structure 1480 * @ecoalesce: Pointer to ethtool_coalesce structure 1481 * @kernel_coal: ethtool CQE mode setting structure 1482 * @extack: extack for reporting error messages 1483 * 1484 * This implements ethtool command for getting the DMA interrupt coalescing 1485 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1486 * execute this function. 1487 * 1488 * Return: 0 always 1489 */ 1490 static int 1491 axienet_ethtools_get_coalesce(struct net_device *ndev, 1492 struct ethtool_coalesce *ecoalesce, 1493 struct kernel_ethtool_coalesce *kernel_coal, 1494 struct netlink_ext_ack *extack) 1495 { 1496 struct axienet_local *lp = netdev_priv(ndev); 1497 1498 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1499 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1500 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1501 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1502 return 0; 1503 } 1504 1505 /** 1506 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1507 * @ndev: Pointer to net_device structure 1508 * @ecoalesce: Pointer to ethtool_coalesce structure 1509 * @kernel_coal: ethtool CQE mode setting structure 1510 * @extack: extack for reporting error messages 1511 * 1512 * This implements ethtool command for setting the DMA interrupt coalescing 1513 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1514 * prompt to execute this function. 1515 * 1516 * Return: 0, on success, Non-zero error value on failure. 1517 */ 1518 static int 1519 axienet_ethtools_set_coalesce(struct net_device *ndev, 1520 struct ethtool_coalesce *ecoalesce, 1521 struct kernel_ethtool_coalesce *kernel_coal, 1522 struct netlink_ext_ack *extack) 1523 { 1524 struct axienet_local *lp = netdev_priv(ndev); 1525 1526 if (netif_running(ndev)) { 1527 netdev_err(ndev, 1528 "Please stop netif before applying configuration\n"); 1529 return -EFAULT; 1530 } 1531 1532 if (ecoalesce->rx_max_coalesced_frames) 1533 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1534 if (ecoalesce->rx_coalesce_usecs) 1535 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1536 if (ecoalesce->tx_max_coalesced_frames) 1537 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1538 if (ecoalesce->tx_coalesce_usecs) 1539 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1540 1541 return 0; 1542 } 1543 1544 static int 1545 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1546 struct ethtool_link_ksettings *cmd) 1547 { 1548 struct axienet_local *lp = netdev_priv(ndev); 1549 1550 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1551 } 1552 1553 static int 1554 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1555 const struct ethtool_link_ksettings *cmd) 1556 { 1557 struct axienet_local *lp = netdev_priv(ndev); 1558 1559 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1560 } 1561 1562 static int axienet_ethtools_nway_reset(struct net_device *dev) 1563 { 1564 struct axienet_local *lp = netdev_priv(dev); 1565 1566 return phylink_ethtool_nway_reset(lp->phylink); 1567 } 1568 1569 static const struct ethtool_ops axienet_ethtool_ops = { 1570 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1571 ETHTOOL_COALESCE_USECS, 1572 .get_drvinfo = axienet_ethtools_get_drvinfo, 1573 .get_regs_len = axienet_ethtools_get_regs_len, 1574 .get_regs = axienet_ethtools_get_regs, 1575 .get_link = ethtool_op_get_link, 1576 .get_ringparam = axienet_ethtools_get_ringparam, 1577 .set_ringparam = axienet_ethtools_set_ringparam, 1578 .get_pauseparam = axienet_ethtools_get_pauseparam, 1579 .set_pauseparam = axienet_ethtools_set_pauseparam, 1580 .get_coalesce = axienet_ethtools_get_coalesce, 1581 .set_coalesce = axienet_ethtools_set_coalesce, 1582 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 1583 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 1584 .nway_reset = axienet_ethtools_nway_reset, 1585 }; 1586 1587 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 1588 { 1589 return container_of(pcs, struct axienet_local, pcs); 1590 } 1591 1592 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 1593 struct phylink_link_state *state) 1594 { 1595 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1596 1597 phylink_mii_c22_pcs_get_state(pcs_phy, state); 1598 } 1599 1600 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 1601 { 1602 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1603 1604 phylink_mii_c22_pcs_an_restart(pcs_phy); 1605 } 1606 1607 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode, 1608 phy_interface_t interface, 1609 const unsigned long *advertising, 1610 bool permit_pause_to_mac) 1611 { 1612 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 1613 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 1614 struct axienet_local *lp = netdev_priv(ndev); 1615 int ret; 1616 1617 if (lp->switch_x_sgmii) { 1618 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 1619 interface == PHY_INTERFACE_MODE_SGMII ? 1620 XLNX_MII_STD_SELECT_SGMII : 0); 1621 if (ret < 0) { 1622 netdev_warn(ndev, 1623 "Failed to switch PHY interface: %d\n", 1624 ret); 1625 return ret; 1626 } 1627 } 1628 1629 ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising); 1630 if (ret < 0) 1631 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 1632 1633 return ret; 1634 } 1635 1636 static const struct phylink_pcs_ops axienet_pcs_ops = { 1637 .pcs_get_state = axienet_pcs_get_state, 1638 .pcs_config = axienet_pcs_config, 1639 .pcs_an_restart = axienet_pcs_an_restart, 1640 }; 1641 1642 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 1643 phy_interface_t interface) 1644 { 1645 struct net_device *ndev = to_net_dev(config->dev); 1646 struct axienet_local *lp = netdev_priv(ndev); 1647 1648 if (interface == PHY_INTERFACE_MODE_1000BASEX || 1649 interface == PHY_INTERFACE_MODE_SGMII) 1650 return &lp->pcs; 1651 1652 return NULL; 1653 } 1654 1655 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 1656 const struct phylink_link_state *state) 1657 { 1658 /* nothing meaningful to do */ 1659 } 1660 1661 static void axienet_mac_link_down(struct phylink_config *config, 1662 unsigned int mode, 1663 phy_interface_t interface) 1664 { 1665 /* nothing meaningful to do */ 1666 } 1667 1668 static void axienet_mac_link_up(struct phylink_config *config, 1669 struct phy_device *phy, 1670 unsigned int mode, phy_interface_t interface, 1671 int speed, int duplex, 1672 bool tx_pause, bool rx_pause) 1673 { 1674 struct net_device *ndev = to_net_dev(config->dev); 1675 struct axienet_local *lp = netdev_priv(ndev); 1676 u32 emmc_reg, fcc_reg; 1677 1678 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 1679 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 1680 1681 switch (speed) { 1682 case SPEED_1000: 1683 emmc_reg |= XAE_EMMC_LINKSPD_1000; 1684 break; 1685 case SPEED_100: 1686 emmc_reg |= XAE_EMMC_LINKSPD_100; 1687 break; 1688 case SPEED_10: 1689 emmc_reg |= XAE_EMMC_LINKSPD_10; 1690 break; 1691 default: 1692 dev_err(&ndev->dev, 1693 "Speed other than 10, 100 or 1Gbps is not supported\n"); 1694 break; 1695 } 1696 1697 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 1698 1699 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 1700 if (tx_pause) 1701 fcc_reg |= XAE_FCC_FCTX_MASK; 1702 else 1703 fcc_reg &= ~XAE_FCC_FCTX_MASK; 1704 if (rx_pause) 1705 fcc_reg |= XAE_FCC_FCRX_MASK; 1706 else 1707 fcc_reg &= ~XAE_FCC_FCRX_MASK; 1708 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 1709 } 1710 1711 static const struct phylink_mac_ops axienet_phylink_ops = { 1712 .validate = phylink_generic_validate, 1713 .mac_select_pcs = axienet_mac_select_pcs, 1714 .mac_config = axienet_mac_config, 1715 .mac_link_down = axienet_mac_link_down, 1716 .mac_link_up = axienet_mac_link_up, 1717 }; 1718 1719 /** 1720 * axienet_dma_err_handler - Work queue task for Axi DMA Error 1721 * @work: pointer to work_struct 1722 * 1723 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1724 * Tx/Rx BDs. 1725 */ 1726 static void axienet_dma_err_handler(struct work_struct *work) 1727 { 1728 u32 i; 1729 u32 axienet_status; 1730 struct axidma_bd *cur_p; 1731 struct axienet_local *lp = container_of(work, struct axienet_local, 1732 dma_err_task); 1733 struct net_device *ndev = lp->ndev; 1734 1735 napi_disable(&lp->napi); 1736 1737 axienet_setoptions(ndev, lp->options & 1738 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1739 1740 axienet_dma_stop(lp); 1741 1742 for (i = 0; i < lp->tx_bd_num; i++) { 1743 cur_p = &lp->tx_bd_v[i]; 1744 if (cur_p->cntrl) { 1745 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 1746 1747 dma_unmap_single(lp->dev, addr, 1748 (cur_p->cntrl & 1749 XAXIDMA_BD_CTRL_LENGTH_MASK), 1750 DMA_TO_DEVICE); 1751 } 1752 if (cur_p->skb) 1753 dev_kfree_skb_irq(cur_p->skb); 1754 cur_p->phys = 0; 1755 cur_p->phys_msb = 0; 1756 cur_p->cntrl = 0; 1757 cur_p->status = 0; 1758 cur_p->app0 = 0; 1759 cur_p->app1 = 0; 1760 cur_p->app2 = 0; 1761 cur_p->app3 = 0; 1762 cur_p->app4 = 0; 1763 cur_p->skb = NULL; 1764 } 1765 1766 for (i = 0; i < lp->rx_bd_num; i++) { 1767 cur_p = &lp->rx_bd_v[i]; 1768 cur_p->status = 0; 1769 cur_p->app0 = 0; 1770 cur_p->app1 = 0; 1771 cur_p->app2 = 0; 1772 cur_p->app3 = 0; 1773 cur_p->app4 = 0; 1774 } 1775 1776 lp->tx_bd_ci = 0; 1777 lp->tx_bd_tail = 0; 1778 lp->rx_bd_ci = 0; 1779 1780 axienet_dma_start(lp); 1781 1782 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1783 axienet_status &= ~XAE_RCW1_RX_MASK; 1784 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1785 1786 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1787 if (axienet_status & XAE_INT_RXRJECT_MASK) 1788 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1789 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 1790 XAE_INT_RECV_ERROR_MASK : 0); 1791 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1792 1793 /* Sync default options with HW but leave receiver and 1794 * transmitter disabled. 1795 */ 1796 axienet_setoptions(ndev, lp->options & 1797 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1798 axienet_set_mac_address(ndev, NULL); 1799 axienet_set_multicast_list(ndev); 1800 axienet_setoptions(ndev, lp->options); 1801 napi_enable(&lp->napi); 1802 } 1803 1804 /** 1805 * axienet_probe - Axi Ethernet probe function. 1806 * @pdev: Pointer to platform device structure. 1807 * 1808 * Return: 0, on success 1809 * Non-zero error value on failure. 1810 * 1811 * This is the probe routine for Axi Ethernet driver. This is called before 1812 * any other driver routines are invoked. It allocates and sets up the Ethernet 1813 * device. Parses through device tree and populates fields of 1814 * axienet_local. It registers the Ethernet device. 1815 */ 1816 static int axienet_probe(struct platform_device *pdev) 1817 { 1818 int ret; 1819 struct device_node *np; 1820 struct axienet_local *lp; 1821 struct net_device *ndev; 1822 struct resource *ethres; 1823 u8 mac_addr[ETH_ALEN]; 1824 int addr_width = 32; 1825 u32 value; 1826 1827 ndev = alloc_etherdev(sizeof(*lp)); 1828 if (!ndev) 1829 return -ENOMEM; 1830 1831 platform_set_drvdata(pdev, ndev); 1832 1833 SET_NETDEV_DEV(ndev, &pdev->dev); 1834 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1835 ndev->features = NETIF_F_SG; 1836 ndev->netdev_ops = &axienet_netdev_ops; 1837 ndev->ethtool_ops = &axienet_ethtool_ops; 1838 1839 /* MTU range: 64 - 9000 */ 1840 ndev->min_mtu = 64; 1841 ndev->max_mtu = XAE_JUMBO_MTU; 1842 1843 lp = netdev_priv(ndev); 1844 lp->ndev = ndev; 1845 lp->dev = &pdev->dev; 1846 lp->options = XAE_OPTION_DEFAULTS; 1847 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 1848 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 1849 1850 netif_napi_add(ndev, &lp->napi, axienet_poll, NAPI_POLL_WEIGHT); 1851 1852 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 1853 if (!lp->axi_clk) { 1854 /* For backward compatibility, if named AXI clock is not present, 1855 * treat the first clock specified as the AXI clock. 1856 */ 1857 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 1858 } 1859 if (IS_ERR(lp->axi_clk)) { 1860 ret = PTR_ERR(lp->axi_clk); 1861 goto free_netdev; 1862 } 1863 ret = clk_prepare_enable(lp->axi_clk); 1864 if (ret) { 1865 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 1866 goto free_netdev; 1867 } 1868 1869 lp->misc_clks[0].id = "axis_clk"; 1870 lp->misc_clks[1].id = "ref_clk"; 1871 lp->misc_clks[2].id = "mgt_clk"; 1872 1873 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1874 if (ret) 1875 goto cleanup_clk; 1876 1877 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 1878 if (ret) 1879 goto cleanup_clk; 1880 1881 /* Map device registers */ 1882 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 1883 if (IS_ERR(lp->regs)) { 1884 ret = PTR_ERR(lp->regs); 1885 goto cleanup_clk; 1886 } 1887 lp->regs_start = ethres->start; 1888 1889 /* Setup checksum offload, but default to off if not specified */ 1890 lp->features = 0; 1891 1892 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1893 if (!ret) { 1894 switch (value) { 1895 case 1: 1896 lp->csum_offload_on_tx_path = 1897 XAE_FEATURE_PARTIAL_TX_CSUM; 1898 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1899 /* Can checksum TCP/UDP over IPv4. */ 1900 ndev->features |= NETIF_F_IP_CSUM; 1901 break; 1902 case 2: 1903 lp->csum_offload_on_tx_path = 1904 XAE_FEATURE_FULL_TX_CSUM; 1905 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1906 /* Can checksum TCP/UDP over IPv4. */ 1907 ndev->features |= NETIF_F_IP_CSUM; 1908 break; 1909 default: 1910 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1911 } 1912 } 1913 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1914 if (!ret) { 1915 switch (value) { 1916 case 1: 1917 lp->csum_offload_on_rx_path = 1918 XAE_FEATURE_PARTIAL_RX_CSUM; 1919 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1920 break; 1921 case 2: 1922 lp->csum_offload_on_rx_path = 1923 XAE_FEATURE_FULL_RX_CSUM; 1924 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1925 break; 1926 default: 1927 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1928 } 1929 } 1930 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1931 * a larger Rx/Tx Memory. Typically, the size must be large so that 1932 * we can enable jumbo option and start supporting jumbo frames. 1933 * Here we check for memory allocated for Rx/Tx in the hardware from 1934 * the device-tree and accordingly set flags. 1935 */ 1936 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1937 1938 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 1939 "xlnx,switch-x-sgmii"); 1940 1941 /* Start with the proprietary, and broken phy_type */ 1942 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 1943 if (!ret) { 1944 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 1945 switch (value) { 1946 case XAE_PHY_TYPE_MII: 1947 lp->phy_mode = PHY_INTERFACE_MODE_MII; 1948 break; 1949 case XAE_PHY_TYPE_GMII: 1950 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 1951 break; 1952 case XAE_PHY_TYPE_RGMII_2_0: 1953 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 1954 break; 1955 case XAE_PHY_TYPE_SGMII: 1956 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 1957 break; 1958 case XAE_PHY_TYPE_1000BASE_X: 1959 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 1960 break; 1961 default: 1962 ret = -EINVAL; 1963 goto cleanup_clk; 1964 } 1965 } else { 1966 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 1967 if (ret) 1968 goto cleanup_clk; 1969 } 1970 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 1971 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 1972 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 1973 ret = -EINVAL; 1974 goto cleanup_clk; 1975 } 1976 1977 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1978 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1979 if (np) { 1980 struct resource dmares; 1981 1982 ret = of_address_to_resource(np, 0, &dmares); 1983 if (ret) { 1984 dev_err(&pdev->dev, 1985 "unable to get DMA resource\n"); 1986 of_node_put(np); 1987 goto cleanup_clk; 1988 } 1989 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 1990 &dmares); 1991 lp->rx_irq = irq_of_parse_and_map(np, 1); 1992 lp->tx_irq = irq_of_parse_and_map(np, 0); 1993 of_node_put(np); 1994 lp->eth_irq = platform_get_irq_optional(pdev, 0); 1995 } else { 1996 /* Check for these resources directly on the Ethernet node. */ 1997 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 1998 lp->rx_irq = platform_get_irq(pdev, 1); 1999 lp->tx_irq = platform_get_irq(pdev, 0); 2000 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2001 } 2002 if (IS_ERR(lp->dma_regs)) { 2003 dev_err(&pdev->dev, "could not map DMA regs\n"); 2004 ret = PTR_ERR(lp->dma_regs); 2005 goto cleanup_clk; 2006 } 2007 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 2008 dev_err(&pdev->dev, "could not determine irqs\n"); 2009 ret = -ENOMEM; 2010 goto cleanup_clk; 2011 } 2012 2013 /* Autodetect the need for 64-bit DMA pointers. 2014 * When the IP is configured for a bus width bigger than 32 bits, 2015 * writing the MSB registers is mandatory, even if they are all 0. 2016 * We can detect this case by writing all 1's to one such register 2017 * and see if that sticks: when the IP is configured for 32 bits 2018 * only, those registers are RES0. 2019 * Those MSB registers were introduced in IP v7.1, which we check first. 2020 */ 2021 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2022 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2023 2024 iowrite32(0x0, desc); 2025 if (ioread32(desc) == 0) { /* sanity check */ 2026 iowrite32(0xffffffff, desc); 2027 if (ioread32(desc) > 0) { 2028 lp->features |= XAE_FEATURE_DMA_64BIT; 2029 addr_width = 64; 2030 dev_info(&pdev->dev, 2031 "autodetected 64-bit DMA range\n"); 2032 } 2033 iowrite32(0x0, desc); 2034 } 2035 } 2036 2037 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2038 if (ret) { 2039 dev_err(&pdev->dev, "No suitable DMA available\n"); 2040 goto cleanup_clk; 2041 } 2042 2043 /* Check for Ethernet core IRQ (optional) */ 2044 if (lp->eth_irq <= 0) 2045 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2046 2047 /* Retrieve the MAC address */ 2048 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2049 if (!ret) { 2050 axienet_set_mac_address(ndev, mac_addr); 2051 } else { 2052 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2053 ret); 2054 axienet_set_mac_address(ndev, NULL); 2055 } 2056 2057 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2058 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2059 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2060 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2061 2062 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2063 ret = __axienet_device_reset(lp); 2064 if (ret) 2065 goto cleanup_clk; 2066 2067 ret = axienet_mdio_setup(lp); 2068 if (ret) 2069 dev_warn(&pdev->dev, 2070 "error registering MDIO bus: %d\n", ret); 2071 2072 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2073 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2074 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2075 if (!np) { 2076 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2077 * Falling back to "phy-handle" here is only for 2078 * backward compatibility with old device trees. 2079 */ 2080 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2081 } 2082 if (!np) { 2083 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2084 ret = -EINVAL; 2085 goto cleanup_mdio; 2086 } 2087 lp->pcs_phy = of_mdio_find_device(np); 2088 if (!lp->pcs_phy) { 2089 ret = -EPROBE_DEFER; 2090 of_node_put(np); 2091 goto cleanup_mdio; 2092 } 2093 of_node_put(np); 2094 lp->pcs.ops = &axienet_pcs_ops; 2095 lp->pcs.poll = true; 2096 } 2097 2098 lp->phylink_config.dev = &ndev->dev; 2099 lp->phylink_config.type = PHYLINK_NETDEV; 2100 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2101 MAC_10FD | MAC_100FD | MAC_1000FD; 2102 2103 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2104 if (lp->switch_x_sgmii) { 2105 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2106 lp->phylink_config.supported_interfaces); 2107 __set_bit(PHY_INTERFACE_MODE_SGMII, 2108 lp->phylink_config.supported_interfaces); 2109 } 2110 2111 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2112 lp->phy_mode, 2113 &axienet_phylink_ops); 2114 if (IS_ERR(lp->phylink)) { 2115 ret = PTR_ERR(lp->phylink); 2116 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2117 goto cleanup_mdio; 2118 } 2119 2120 ret = register_netdev(lp->ndev); 2121 if (ret) { 2122 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2123 goto cleanup_phylink; 2124 } 2125 2126 return 0; 2127 2128 cleanup_phylink: 2129 phylink_destroy(lp->phylink); 2130 2131 cleanup_mdio: 2132 if (lp->pcs_phy) 2133 put_device(&lp->pcs_phy->dev); 2134 if (lp->mii_bus) 2135 axienet_mdio_teardown(lp); 2136 cleanup_clk: 2137 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2138 clk_disable_unprepare(lp->axi_clk); 2139 2140 free_netdev: 2141 free_netdev(ndev); 2142 2143 return ret; 2144 } 2145 2146 static int axienet_remove(struct platform_device *pdev) 2147 { 2148 struct net_device *ndev = platform_get_drvdata(pdev); 2149 struct axienet_local *lp = netdev_priv(ndev); 2150 2151 unregister_netdev(ndev); 2152 2153 if (lp->phylink) 2154 phylink_destroy(lp->phylink); 2155 2156 if (lp->pcs_phy) 2157 put_device(&lp->pcs_phy->dev); 2158 2159 axienet_mdio_teardown(lp); 2160 2161 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2162 clk_disable_unprepare(lp->axi_clk); 2163 2164 free_netdev(ndev); 2165 2166 return 0; 2167 } 2168 2169 static void axienet_shutdown(struct platform_device *pdev) 2170 { 2171 struct net_device *ndev = platform_get_drvdata(pdev); 2172 2173 rtnl_lock(); 2174 netif_device_detach(ndev); 2175 2176 if (netif_running(ndev)) 2177 dev_close(ndev); 2178 2179 rtnl_unlock(); 2180 } 2181 2182 static struct platform_driver axienet_driver = { 2183 .probe = axienet_probe, 2184 .remove = axienet_remove, 2185 .shutdown = axienet_shutdown, 2186 .driver = { 2187 .name = "xilinx_axienet", 2188 .of_match_table = axienet_of_match, 2189 }, 2190 }; 2191 2192 module_platform_driver(axienet_driver); 2193 2194 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2195 MODULE_AUTHOR("Xilinx"); 2196 MODULE_LICENSE("GPL"); 2197