1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 #include <linux/dmaengine.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dma/xilinx_dma.h> 44 #include <linux/circ_buf.h> 45 #include <net/netdev_queues.h> 46 47 #include "xilinx_axienet.h" 48 49 /* Descriptors defines for Tx and Rx DMA */ 50 #define TX_BD_NUM_DEFAULT 128 51 #define RX_BD_NUM_DEFAULT 1024 52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 53 #define TX_BD_NUM_MAX 4096 54 #define RX_BD_NUM_MAX 4096 55 #define DMA_NUM_APP_WORDS 5 56 #define LEN_APP 4 57 #define RX_BUF_NUM_DEFAULT 128 58 59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 60 #define DRIVER_NAME "xaxienet" 61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 62 #define DRIVER_VERSION "1.00a" 63 64 #define AXIENET_REGS_N 40 65 66 static void axienet_rx_submit_desc(struct net_device *ndev); 67 68 /* Match table for of_platform binding */ 69 static const struct of_device_id axienet_of_match[] = { 70 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 71 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 72 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 73 {}, 74 }; 75 76 MODULE_DEVICE_TABLE(of, axienet_of_match); 77 78 /* Option table for setting up Axi Ethernet hardware options */ 79 static struct axienet_option axienet_options[] = { 80 /* Turn on jumbo packet support for both Rx and Tx */ 81 { 82 .opt = XAE_OPTION_JUMBO, 83 .reg = XAE_TC_OFFSET, 84 .m_or = XAE_TC_JUM_MASK, 85 }, { 86 .opt = XAE_OPTION_JUMBO, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_JUM_MASK, 89 }, { /* Turn on VLAN packet support for both Rx and Tx */ 90 .opt = XAE_OPTION_VLAN, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_VLAN_MASK, 93 }, { 94 .opt = XAE_OPTION_VLAN, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_VLAN_MASK, 97 }, { /* Turn on FCS stripping on receive packets */ 98 .opt = XAE_OPTION_FCS_STRIP, 99 .reg = XAE_RCW1_OFFSET, 100 .m_or = XAE_RCW1_FCS_MASK, 101 }, { /* Turn on FCS insertion on transmit packets */ 102 .opt = XAE_OPTION_FCS_INSERT, 103 .reg = XAE_TC_OFFSET, 104 .m_or = XAE_TC_FCS_MASK, 105 }, { /* Turn off length/type field checking on receive packets */ 106 .opt = XAE_OPTION_LENTYPE_ERR, 107 .reg = XAE_RCW1_OFFSET, 108 .m_or = XAE_RCW1_LT_DIS_MASK, 109 }, { /* Turn on Rx flow control */ 110 .opt = XAE_OPTION_FLOW_CONTROL, 111 .reg = XAE_FCC_OFFSET, 112 .m_or = XAE_FCC_FCRX_MASK, 113 }, { /* Turn on Tx flow control */ 114 .opt = XAE_OPTION_FLOW_CONTROL, 115 .reg = XAE_FCC_OFFSET, 116 .m_or = XAE_FCC_FCTX_MASK, 117 }, { /* Turn on promiscuous frame filtering */ 118 .opt = XAE_OPTION_PROMISC, 119 .reg = XAE_FMI_OFFSET, 120 .m_or = XAE_FMI_PM_MASK, 121 }, { /* Enable transmitter */ 122 .opt = XAE_OPTION_TXEN, 123 .reg = XAE_TC_OFFSET, 124 .m_or = XAE_TC_TX_MASK, 125 }, { /* Enable receiver */ 126 .opt = XAE_OPTION_RXEN, 127 .reg = XAE_RCW1_OFFSET, 128 .m_or = XAE_RCW1_RX_MASK, 129 }, 130 {} 131 }; 132 133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) 134 { 135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; 136 } 137 138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) 139 { 140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; 141 } 142 143 /** 144 * axienet_dma_in32 - Memory mapped Axi DMA register read 145 * @lp: Pointer to axienet local structure 146 * @reg: Address offset from the base address of the Axi DMA core 147 * 148 * Return: The contents of the Axi DMA register 149 * 150 * This function returns the contents of the corresponding Axi DMA register. 151 */ 152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 153 { 154 return ioread32(lp->dma_regs + reg); 155 } 156 157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 158 struct axidma_bd *desc) 159 { 160 desc->phys = lower_32_bits(addr); 161 if (lp->features & XAE_FEATURE_DMA_64BIT) 162 desc->phys_msb = upper_32_bits(addr); 163 } 164 165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 166 struct axidma_bd *desc) 167 { 168 dma_addr_t ret = desc->phys; 169 170 if (lp->features & XAE_FEATURE_DMA_64BIT) 171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 172 173 return ret; 174 } 175 176 /** 177 * axienet_dma_bd_release - Release buffer descriptor rings 178 * @ndev: Pointer to the net_device structure 179 * 180 * This function is used to release the descriptors allocated in 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 182 * driver stop api is called. 183 */ 184 static void axienet_dma_bd_release(struct net_device *ndev) 185 { 186 int i; 187 struct axienet_local *lp = netdev_priv(ndev); 188 189 /* If we end up here, tx_bd_v must have been DMA allocated. */ 190 dma_free_coherent(lp->dev, 191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 192 lp->tx_bd_v, 193 lp->tx_bd_p); 194 195 if (!lp->rx_bd_v) 196 return; 197 198 for (i = 0; i < lp->rx_bd_num; i++) { 199 dma_addr_t phys; 200 201 /* A NULL skb means this descriptor has not been initialised 202 * at all. 203 */ 204 if (!lp->rx_bd_v[i].skb) 205 break; 206 207 dev_kfree_skb(lp->rx_bd_v[i].skb); 208 209 /* For each descriptor, we programmed cntrl with the (non-zero) 210 * descriptor size, after it had been successfully allocated. 211 * So a non-zero value in there means we need to unmap it. 212 */ 213 if (lp->rx_bd_v[i].cntrl) { 214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 215 dma_unmap_single(lp->dev, phys, 216 lp->max_frm_size, DMA_FROM_DEVICE); 217 } 218 } 219 220 dma_free_coherent(lp->dev, 221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 222 lp->rx_bd_v, 223 lp->rx_bd_p); 224 } 225 226 /** 227 * axienet_usec_to_timer - Calculate IRQ delay timer value 228 * @lp: Pointer to the axienet_local structure 229 * @coalesce_usec: Microseconds to convert into timer value 230 */ 231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 232 { 233 u32 result; 234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 235 236 if (lp->axi_clk) 237 clk_rate = clk_get_rate(lp->axi_clk); 238 239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 241 (u64)125000000); 242 if (result > 255) 243 result = 255; 244 245 return result; 246 } 247 248 /** 249 * axienet_dma_start - Set up DMA registers and start DMA operation 250 * @lp: Pointer to the axienet_local structure 251 */ 252 static void axienet_dma_start(struct axienet_local *lp) 253 { 254 /* Start updating the Rx channel control register */ 255 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 256 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 257 /* Only set interrupt delay timer if not generating an interrupt on 258 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 259 */ 260 if (lp->coalesce_count_rx > 1) 261 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 262 << XAXIDMA_DELAY_SHIFT) | 263 XAXIDMA_IRQ_DELAY_MASK; 264 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 265 266 /* Start updating the Tx channel control register */ 267 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 268 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 269 /* Only set interrupt delay timer if not generating an interrupt on 270 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 271 */ 272 if (lp->coalesce_count_tx > 1) 273 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 274 << XAXIDMA_DELAY_SHIFT) | 275 XAXIDMA_IRQ_DELAY_MASK; 276 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 277 278 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 279 * halted state. This will make the Rx side ready for reception. 280 */ 281 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 282 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 283 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 284 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 285 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 286 287 /* Write to the RS (Run-stop) bit in the Tx channel control register. 288 * Tx channel is now ready to run. But only after we write to the 289 * tail pointer register that the Tx channel will start transmitting. 290 */ 291 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 292 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 294 } 295 296 /** 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 298 * @ndev: Pointer to the net_device structure 299 * 300 * Return: 0, on success -ENOMEM, on failure 301 * 302 * This function is called to initialize the Rx and Tx DMA descriptor 303 * rings. This initializes the descriptors with required default values 304 * and is called when Axi Ethernet driver reset is called. 305 */ 306 static int axienet_dma_bd_init(struct net_device *ndev) 307 { 308 int i; 309 struct sk_buff *skb; 310 struct axienet_local *lp = netdev_priv(ndev); 311 312 /* Reset the indexes which are used for accessing the BDs */ 313 lp->tx_bd_ci = 0; 314 lp->tx_bd_tail = 0; 315 lp->rx_bd_ci = 0; 316 317 /* Allocate the Tx and Rx buffer descriptors. */ 318 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 319 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 320 &lp->tx_bd_p, GFP_KERNEL); 321 if (!lp->tx_bd_v) 322 return -ENOMEM; 323 324 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 325 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 326 &lp->rx_bd_p, GFP_KERNEL); 327 if (!lp->rx_bd_v) 328 goto out; 329 330 for (i = 0; i < lp->tx_bd_num; i++) { 331 dma_addr_t addr = lp->tx_bd_p + 332 sizeof(*lp->tx_bd_v) * 333 ((i + 1) % lp->tx_bd_num); 334 335 lp->tx_bd_v[i].next = lower_32_bits(addr); 336 if (lp->features & XAE_FEATURE_DMA_64BIT) 337 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 338 } 339 340 for (i = 0; i < lp->rx_bd_num; i++) { 341 dma_addr_t addr; 342 343 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 344 ((i + 1) % lp->rx_bd_num); 345 lp->rx_bd_v[i].next = lower_32_bits(addr); 346 if (lp->features & XAE_FEATURE_DMA_64BIT) 347 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 348 349 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 350 if (!skb) 351 goto out; 352 353 lp->rx_bd_v[i].skb = skb; 354 addr = dma_map_single(lp->dev, skb->data, 355 lp->max_frm_size, DMA_FROM_DEVICE); 356 if (dma_mapping_error(lp->dev, addr)) { 357 netdev_err(ndev, "DMA mapping error\n"); 358 goto out; 359 } 360 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 361 362 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 363 } 364 365 axienet_dma_start(lp); 366 367 return 0; 368 out: 369 axienet_dma_bd_release(ndev); 370 return -ENOMEM; 371 } 372 373 /** 374 * axienet_set_mac_address - Write the MAC address 375 * @ndev: Pointer to the net_device structure 376 * @address: 6 byte Address to be written as MAC address 377 * 378 * This function is called to initialize the MAC address of the Axi Ethernet 379 * core. It writes to the UAW0 and UAW1 registers of the core. 380 */ 381 static void axienet_set_mac_address(struct net_device *ndev, 382 const void *address) 383 { 384 struct axienet_local *lp = netdev_priv(ndev); 385 386 if (address) 387 eth_hw_addr_set(ndev, address); 388 if (!is_valid_ether_addr(ndev->dev_addr)) 389 eth_hw_addr_random(ndev); 390 391 /* Set up unicast MAC address filter set its mac address */ 392 axienet_iow(lp, XAE_UAW0_OFFSET, 393 (ndev->dev_addr[0]) | 394 (ndev->dev_addr[1] << 8) | 395 (ndev->dev_addr[2] << 16) | 396 (ndev->dev_addr[3] << 24)); 397 axienet_iow(lp, XAE_UAW1_OFFSET, 398 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 399 ~XAE_UAW1_UNICASTADDR_MASK) | 400 (ndev->dev_addr[4] | 401 (ndev->dev_addr[5] << 8)))); 402 } 403 404 /** 405 * netdev_set_mac_address - Write the MAC address (from outside the driver) 406 * @ndev: Pointer to the net_device structure 407 * @p: 6 byte Address to be written as MAC address 408 * 409 * Return: 0 for all conditions. Presently, there is no failure case. 410 * 411 * This function is called to initialize the MAC address of the Axi Ethernet 412 * core. It calls the core specific axienet_set_mac_address. This is the 413 * function that goes into net_device_ops structure entry ndo_set_mac_address. 414 */ 415 static int netdev_set_mac_address(struct net_device *ndev, void *p) 416 { 417 struct sockaddr *addr = p; 418 419 axienet_set_mac_address(ndev, addr->sa_data); 420 return 0; 421 } 422 423 /** 424 * axienet_set_multicast_list - Prepare the multicast table 425 * @ndev: Pointer to the net_device structure 426 * 427 * This function is called to initialize the multicast table during 428 * initialization. The Axi Ethernet basic multicast support has a four-entry 429 * multicast table which is initialized here. Additionally this function 430 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 431 * means whenever the multicast table entries need to be updated this 432 * function gets called. 433 */ 434 static void axienet_set_multicast_list(struct net_device *ndev) 435 { 436 int i; 437 u32 reg, af0reg, af1reg; 438 struct axienet_local *lp = netdev_priv(ndev); 439 440 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 441 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 442 /* We must make the kernel realize we had to move into 443 * promiscuous mode. If it was a promiscuous mode request 444 * the flag is already set. If not we set it. 445 */ 446 ndev->flags |= IFF_PROMISC; 447 reg = axienet_ior(lp, XAE_FMI_OFFSET); 448 reg |= XAE_FMI_PM_MASK; 449 axienet_iow(lp, XAE_FMI_OFFSET, reg); 450 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 451 } else if (!netdev_mc_empty(ndev)) { 452 struct netdev_hw_addr *ha; 453 454 i = 0; 455 netdev_for_each_mc_addr(ha, ndev) { 456 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 457 break; 458 459 af0reg = (ha->addr[0]); 460 af0reg |= (ha->addr[1] << 8); 461 af0reg |= (ha->addr[2] << 16); 462 af0reg |= (ha->addr[3] << 24); 463 464 af1reg = (ha->addr[4]); 465 af1reg |= (ha->addr[5] << 8); 466 467 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 468 reg |= i; 469 470 axienet_iow(lp, XAE_FMI_OFFSET, reg); 471 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 472 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 473 i++; 474 } 475 } else { 476 reg = axienet_ior(lp, XAE_FMI_OFFSET); 477 reg &= ~XAE_FMI_PM_MASK; 478 479 axienet_iow(lp, XAE_FMI_OFFSET, reg); 480 481 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 482 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 483 reg |= i; 484 485 axienet_iow(lp, XAE_FMI_OFFSET, reg); 486 axienet_iow(lp, XAE_AF0_OFFSET, 0); 487 axienet_iow(lp, XAE_AF1_OFFSET, 0); 488 } 489 490 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 491 } 492 } 493 494 /** 495 * axienet_setoptions - Set an Axi Ethernet option 496 * @ndev: Pointer to the net_device structure 497 * @options: Option to be enabled/disabled 498 * 499 * The Axi Ethernet core has multiple features which can be selectively turned 500 * on or off. The typical options could be jumbo frame option, basic VLAN 501 * option, promiscuous mode option etc. This function is used to set or clear 502 * these options in the Axi Ethernet hardware. This is done through 503 * axienet_option structure . 504 */ 505 static void axienet_setoptions(struct net_device *ndev, u32 options) 506 { 507 int reg; 508 struct axienet_local *lp = netdev_priv(ndev); 509 struct axienet_option *tp = &axienet_options[0]; 510 511 while (tp->opt) { 512 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 513 if (options & tp->opt) 514 reg |= tp->m_or; 515 axienet_iow(lp, tp->reg, reg); 516 tp++; 517 } 518 519 lp->options |= options; 520 } 521 522 static int __axienet_device_reset(struct axienet_local *lp) 523 { 524 u32 value; 525 int ret; 526 527 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 528 * process of Axi DMA takes a while to complete as all pending 529 * commands/transfers will be flushed or completed during this 530 * reset process. 531 * Note that even though both TX and RX have their own reset register, 532 * they both reset the entire DMA core, so only one needs to be used. 533 */ 534 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 535 ret = read_poll_timeout(axienet_dma_in32, value, 536 !(value & XAXIDMA_CR_RESET_MASK), 537 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 538 XAXIDMA_TX_CR_OFFSET); 539 if (ret) { 540 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 541 return ret; 542 } 543 544 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 545 ret = read_poll_timeout(axienet_ior, value, 546 value & XAE_INT_PHYRSTCMPLT_MASK, 547 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 548 XAE_IS_OFFSET); 549 if (ret) { 550 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 551 return ret; 552 } 553 554 return 0; 555 } 556 557 /** 558 * axienet_dma_stop - Stop DMA operation 559 * @lp: Pointer to the axienet_local structure 560 */ 561 static void axienet_dma_stop(struct axienet_local *lp) 562 { 563 int count; 564 u32 cr, sr; 565 566 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 567 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 568 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 569 synchronize_irq(lp->rx_irq); 570 571 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 572 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 573 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 574 synchronize_irq(lp->tx_irq); 575 576 /* Give DMAs a chance to halt gracefully */ 577 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 578 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 579 msleep(20); 580 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 581 } 582 583 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 584 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 585 msleep(20); 586 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 587 } 588 589 /* Do a reset to ensure DMA is really stopped */ 590 axienet_lock_mii(lp); 591 __axienet_device_reset(lp); 592 axienet_unlock_mii(lp); 593 } 594 595 /** 596 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 597 * @ndev: Pointer to the net_device structure 598 * 599 * This function is called to reset and initialize the Axi Ethernet core. This 600 * is typically called during initialization. It does a reset of the Axi DMA 601 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 602 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 603 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 604 * core. 605 * Returns 0 on success or a negative error number otherwise. 606 */ 607 static int axienet_device_reset(struct net_device *ndev) 608 { 609 u32 axienet_status; 610 struct axienet_local *lp = netdev_priv(ndev); 611 int ret; 612 613 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 614 lp->options |= XAE_OPTION_VLAN; 615 lp->options &= (~XAE_OPTION_JUMBO); 616 617 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { 618 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 619 XAE_TRL_SIZE; 620 621 if (lp->max_frm_size <= lp->rxmem) 622 lp->options |= XAE_OPTION_JUMBO; 623 } 624 625 if (!lp->use_dmaengine) { 626 ret = __axienet_device_reset(lp); 627 if (ret) 628 return ret; 629 630 ret = axienet_dma_bd_init(ndev); 631 if (ret) { 632 netdev_err(ndev, "%s: descriptor allocation failed\n", 633 __func__); 634 return ret; 635 } 636 } 637 638 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 639 axienet_status &= ~XAE_RCW1_RX_MASK; 640 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 641 642 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 643 if (axienet_status & XAE_INT_RXRJECT_MASK) 644 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 645 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 646 XAE_INT_RECV_ERROR_MASK : 0); 647 648 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 649 650 /* Sync default options with HW but leave receiver and 651 * transmitter disabled. 652 */ 653 axienet_setoptions(ndev, lp->options & 654 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 655 axienet_set_mac_address(ndev, NULL); 656 axienet_set_multicast_list(ndev); 657 axienet_setoptions(ndev, lp->options); 658 659 netif_trans_update(ndev); 660 661 return 0; 662 } 663 664 /** 665 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 666 * @lp: Pointer to the axienet_local structure 667 * @first_bd: Index of first descriptor to clean up 668 * @nr_bds: Max number of descriptors to clean up 669 * @force: Whether to clean descriptors even if not complete 670 * @sizep: Pointer to a u32 filled with the total sum of all bytes 671 * in all cleaned-up descriptors. Ignored if NULL. 672 * @budget: NAPI budget (use 0 when not called from NAPI poll) 673 * 674 * Would either be called after a successful transmit operation, or after 675 * there was an error when setting up the chain. 676 * Returns the number of descriptors handled. 677 */ 678 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 679 int nr_bds, bool force, u32 *sizep, int budget) 680 { 681 struct axidma_bd *cur_p; 682 unsigned int status; 683 dma_addr_t phys; 684 int i; 685 686 for (i = 0; i < nr_bds; i++) { 687 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 688 status = cur_p->status; 689 690 /* If force is not specified, clean up only descriptors 691 * that have been completed by the MAC. 692 */ 693 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 694 break; 695 696 /* Ensure we see complete descriptor update */ 697 dma_rmb(); 698 phys = desc_get_phys_addr(lp, cur_p); 699 dma_unmap_single(lp->dev, phys, 700 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 701 DMA_TO_DEVICE); 702 703 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 704 napi_consume_skb(cur_p->skb, budget); 705 706 cur_p->app0 = 0; 707 cur_p->app1 = 0; 708 cur_p->app2 = 0; 709 cur_p->app4 = 0; 710 cur_p->skb = NULL; 711 /* ensure our transmit path and device don't prematurely see status cleared */ 712 wmb(); 713 cur_p->cntrl = 0; 714 cur_p->status = 0; 715 716 if (sizep) 717 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 718 } 719 720 return i; 721 } 722 723 /** 724 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 725 * @lp: Pointer to the axienet_local structure 726 * @num_frag: The number of BDs to check for 727 * 728 * Return: 0, on success 729 * NETDEV_TX_BUSY, if any of the descriptors are not free 730 * 731 * This function is invoked before BDs are allocated and transmission starts. 732 * This function returns 0 if a BD or group of BDs can be allocated for 733 * transmission. If the BD or any of the BDs are not free the function 734 * returns a busy status. 735 */ 736 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 737 int num_frag) 738 { 739 struct axidma_bd *cur_p; 740 741 /* Ensure we see all descriptor updates from device or TX polling */ 742 rmb(); 743 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 744 lp->tx_bd_num]; 745 if (cur_p->cntrl) 746 return NETDEV_TX_BUSY; 747 return 0; 748 } 749 750 /** 751 * axienet_dma_tx_cb - DMA engine callback for TX channel. 752 * @data: Pointer to the axienet_local structure. 753 * @result: error reporting through dmaengine_result. 754 * This function is called by dmaengine driver for TX channel to notify 755 * that the transmit is done. 756 */ 757 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) 758 { 759 struct skbuf_dma_descriptor *skbuf_dma; 760 struct axienet_local *lp = data; 761 struct netdev_queue *txq; 762 int len; 763 764 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); 765 len = skbuf_dma->skb->len; 766 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); 767 u64_stats_update_begin(&lp->tx_stat_sync); 768 u64_stats_add(&lp->tx_bytes, len); 769 u64_stats_add(&lp->tx_packets, 1); 770 u64_stats_update_end(&lp->tx_stat_sync); 771 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); 772 dev_consume_skb_any(skbuf_dma->skb); 773 netif_txq_completed_wake(txq, 1, len, 774 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 775 2 * MAX_SKB_FRAGS); 776 } 777 778 /** 779 * axienet_start_xmit_dmaengine - Starts the transmission. 780 * @skb: sk_buff pointer that contains data to be Txed. 781 * @ndev: Pointer to net_device structure. 782 * 783 * Return: NETDEV_TX_OK on success or any non space errors. 784 * NETDEV_TX_BUSY when free element in TX skb ring buffer 785 * is not available. 786 * 787 * This function is invoked to initiate transmission. The 788 * function sets the skbs, register dma callback API and submit 789 * the dma transaction. 790 * Additionally if checksum offloading is supported, 791 * it populates AXI Stream Control fields with appropriate values. 792 */ 793 static netdev_tx_t 794 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) 795 { 796 struct dma_async_tx_descriptor *dma_tx_desc = NULL; 797 struct axienet_local *lp = netdev_priv(ndev); 798 u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; 799 struct skbuf_dma_descriptor *skbuf_dma; 800 struct dma_device *dma_dev; 801 struct netdev_queue *txq; 802 u32 csum_start_off; 803 u32 csum_index_off; 804 int sg_len; 805 int ret; 806 807 dma_dev = lp->tx_chan->device; 808 sg_len = skb_shinfo(skb)->nr_frags + 1; 809 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { 810 netif_stop_queue(ndev); 811 if (net_ratelimit()) 812 netdev_warn(ndev, "TX ring unexpectedly full\n"); 813 return NETDEV_TX_BUSY; 814 } 815 816 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); 817 if (!skbuf_dma) 818 goto xmit_error_drop_skb; 819 820 lp->tx_ring_head++; 821 sg_init_table(skbuf_dma->sgl, sg_len); 822 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); 823 if (ret < 0) 824 goto xmit_error_drop_skb; 825 826 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 827 if (!ret) 828 goto xmit_error_drop_skb; 829 830 /* Fill up app fields for checksum */ 831 if (skb->ip_summed == CHECKSUM_PARTIAL) { 832 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 833 /* Tx Full Checksum Offload Enabled */ 834 app_metadata[0] |= 2; 835 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 836 csum_start_off = skb_transport_offset(skb); 837 csum_index_off = csum_start_off + skb->csum_offset; 838 /* Tx Partial Checksum Offload Enabled */ 839 app_metadata[0] |= 1; 840 app_metadata[1] = (csum_start_off << 16) | csum_index_off; 841 } 842 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 843 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ 844 } 845 846 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, 847 sg_len, DMA_MEM_TO_DEV, 848 DMA_PREP_INTERRUPT, (void *)app_metadata); 849 if (!dma_tx_desc) 850 goto xmit_error_unmap_sg; 851 852 skbuf_dma->skb = skb; 853 skbuf_dma->sg_len = sg_len; 854 dma_tx_desc->callback_param = lp; 855 dma_tx_desc->callback_result = axienet_dma_tx_cb; 856 dmaengine_submit(dma_tx_desc); 857 dma_async_issue_pending(lp->tx_chan); 858 txq = skb_get_tx_queue(lp->ndev, skb); 859 netdev_tx_sent_queue(txq, skb->len); 860 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 861 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); 862 863 return NETDEV_TX_OK; 864 865 xmit_error_unmap_sg: 866 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 867 xmit_error_drop_skb: 868 dev_kfree_skb_any(skb); 869 return NETDEV_TX_OK; 870 } 871 872 /** 873 * axienet_tx_poll - Invoked once a transmit is completed by the 874 * Axi DMA Tx channel. 875 * @napi: Pointer to NAPI structure. 876 * @budget: Max number of TX packets to process. 877 * 878 * Return: Number of TX packets processed. 879 * 880 * This function is invoked from the NAPI processing to notify the completion 881 * of transmit operation. It clears fields in the corresponding Tx BDs and 882 * unmaps the corresponding buffer so that CPU can regain ownership of the 883 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 884 * required. 885 */ 886 static int axienet_tx_poll(struct napi_struct *napi, int budget) 887 { 888 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 889 struct net_device *ndev = lp->ndev; 890 u32 size = 0; 891 int packets; 892 893 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); 894 895 if (packets) { 896 lp->tx_bd_ci += packets; 897 if (lp->tx_bd_ci >= lp->tx_bd_num) 898 lp->tx_bd_ci %= lp->tx_bd_num; 899 900 u64_stats_update_begin(&lp->tx_stat_sync); 901 u64_stats_add(&lp->tx_packets, packets); 902 u64_stats_add(&lp->tx_bytes, size); 903 u64_stats_update_end(&lp->tx_stat_sync); 904 905 /* Matches barrier in axienet_start_xmit */ 906 smp_mb(); 907 908 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 909 netif_wake_queue(ndev); 910 } 911 912 if (packets < budget && napi_complete_done(napi, packets)) { 913 /* Re-enable TX completion interrupts. This should 914 * cause an immediate interrupt if any TX packets are 915 * already pending. 916 */ 917 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 918 } 919 return packets; 920 } 921 922 /** 923 * axienet_start_xmit - Starts the transmission. 924 * @skb: sk_buff pointer that contains data to be Txed. 925 * @ndev: Pointer to net_device structure. 926 * 927 * Return: NETDEV_TX_OK, on success 928 * NETDEV_TX_BUSY, if any of the descriptors are not free 929 * 930 * This function is invoked from upper layers to initiate transmission. The 931 * function uses the next available free BDs and populates their fields to 932 * start the transmission. Additionally if checksum offloading is supported, 933 * it populates AXI Stream Control fields with appropriate values. 934 */ 935 static netdev_tx_t 936 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 937 { 938 u32 ii; 939 u32 num_frag; 940 u32 csum_start_off; 941 u32 csum_index_off; 942 skb_frag_t *frag; 943 dma_addr_t tail_p, phys; 944 u32 orig_tail_ptr, new_tail_ptr; 945 struct axienet_local *lp = netdev_priv(ndev); 946 struct axidma_bd *cur_p; 947 948 orig_tail_ptr = lp->tx_bd_tail; 949 new_tail_ptr = orig_tail_ptr; 950 951 num_frag = skb_shinfo(skb)->nr_frags; 952 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 953 954 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 955 /* Should not happen as last start_xmit call should have 956 * checked for sufficient space and queue should only be 957 * woken when sufficient space is available. 958 */ 959 netif_stop_queue(ndev); 960 if (net_ratelimit()) 961 netdev_warn(ndev, "TX ring unexpectedly full\n"); 962 return NETDEV_TX_BUSY; 963 } 964 965 if (skb->ip_summed == CHECKSUM_PARTIAL) { 966 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 967 /* Tx Full Checksum Offload Enabled */ 968 cur_p->app0 |= 2; 969 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 970 csum_start_off = skb_transport_offset(skb); 971 csum_index_off = csum_start_off + skb->csum_offset; 972 /* Tx Partial Checksum Offload Enabled */ 973 cur_p->app0 |= 1; 974 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 975 } 976 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 977 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 978 } 979 980 phys = dma_map_single(lp->dev, skb->data, 981 skb_headlen(skb), DMA_TO_DEVICE); 982 if (unlikely(dma_mapping_error(lp->dev, phys))) { 983 if (net_ratelimit()) 984 netdev_err(ndev, "TX DMA mapping error\n"); 985 ndev->stats.tx_dropped++; 986 return NETDEV_TX_OK; 987 } 988 desc_set_phys_addr(lp, phys, cur_p); 989 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 990 991 for (ii = 0; ii < num_frag; ii++) { 992 if (++new_tail_ptr >= lp->tx_bd_num) 993 new_tail_ptr = 0; 994 cur_p = &lp->tx_bd_v[new_tail_ptr]; 995 frag = &skb_shinfo(skb)->frags[ii]; 996 phys = dma_map_single(lp->dev, 997 skb_frag_address(frag), 998 skb_frag_size(frag), 999 DMA_TO_DEVICE); 1000 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1001 if (net_ratelimit()) 1002 netdev_err(ndev, "TX DMA mapping error\n"); 1003 ndev->stats.tx_dropped++; 1004 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 1005 true, NULL, 0); 1006 return NETDEV_TX_OK; 1007 } 1008 desc_set_phys_addr(lp, phys, cur_p); 1009 cur_p->cntrl = skb_frag_size(frag); 1010 } 1011 1012 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 1013 cur_p->skb = skb; 1014 1015 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 1016 if (++new_tail_ptr >= lp->tx_bd_num) 1017 new_tail_ptr = 0; 1018 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 1019 1020 /* Start the transfer */ 1021 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 1022 1023 /* Stop queue if next transmit may not have space */ 1024 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 1025 netif_stop_queue(ndev); 1026 1027 /* Matches barrier in axienet_tx_poll */ 1028 smp_mb(); 1029 1030 /* Space might have just been freed - check again */ 1031 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1032 netif_wake_queue(ndev); 1033 } 1034 1035 return NETDEV_TX_OK; 1036 } 1037 1038 /** 1039 * axienet_dma_rx_cb - DMA engine callback for RX channel. 1040 * @data: Pointer to the skbuf_dma_descriptor structure. 1041 * @result: error reporting through dmaengine_result. 1042 * This function is called by dmaengine driver for RX channel to notify 1043 * that the packet is received. 1044 */ 1045 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) 1046 { 1047 struct skbuf_dma_descriptor *skbuf_dma; 1048 size_t meta_len, meta_max_len, rx_len; 1049 struct axienet_local *lp = data; 1050 struct sk_buff *skb; 1051 u32 *app_metadata; 1052 1053 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1054 skb = skbuf_dma->skb; 1055 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, 1056 &meta_max_len); 1057 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, 1058 DMA_FROM_DEVICE); 1059 /* TODO: Derive app word index programmatically */ 1060 rx_len = (app_metadata[LEN_APP] & 0xFFFF); 1061 skb_put(skb, rx_len); 1062 skb->protocol = eth_type_trans(skb, lp->ndev); 1063 skb->ip_summed = CHECKSUM_NONE; 1064 1065 __netif_rx(skb); 1066 u64_stats_update_begin(&lp->rx_stat_sync); 1067 u64_stats_add(&lp->rx_packets, 1); 1068 u64_stats_add(&lp->rx_bytes, rx_len); 1069 u64_stats_update_end(&lp->rx_stat_sync); 1070 axienet_rx_submit_desc(lp->ndev); 1071 dma_async_issue_pending(lp->rx_chan); 1072 } 1073 1074 /** 1075 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 1076 * @napi: Pointer to NAPI structure. 1077 * @budget: Max number of RX packets to process. 1078 * 1079 * Return: Number of RX packets processed. 1080 */ 1081 static int axienet_rx_poll(struct napi_struct *napi, int budget) 1082 { 1083 u32 length; 1084 u32 csumstatus; 1085 u32 size = 0; 1086 int packets = 0; 1087 dma_addr_t tail_p = 0; 1088 struct axidma_bd *cur_p; 1089 struct sk_buff *skb, *new_skb; 1090 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 1091 1092 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1093 1094 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 1095 dma_addr_t phys; 1096 1097 /* Ensure we see complete descriptor update */ 1098 dma_rmb(); 1099 1100 skb = cur_p->skb; 1101 cur_p->skb = NULL; 1102 1103 /* skb could be NULL if a previous pass already received the 1104 * packet for this slot in the ring, but failed to refill it 1105 * with a newly allocated buffer. In this case, don't try to 1106 * receive it again. 1107 */ 1108 if (likely(skb)) { 1109 length = cur_p->app4 & 0x0000FFFF; 1110 1111 phys = desc_get_phys_addr(lp, cur_p); 1112 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 1113 DMA_FROM_DEVICE); 1114 1115 skb_put(skb, length); 1116 skb->protocol = eth_type_trans(skb, lp->ndev); 1117 /*skb_checksum_none_assert(skb);*/ 1118 skb->ip_summed = CHECKSUM_NONE; 1119 1120 /* if we're doing Rx csum offload, set it up */ 1121 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 1122 csumstatus = (cur_p->app2 & 1123 XAE_FULL_CSUM_STATUS_MASK) >> 3; 1124 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 1125 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 1126 skb->ip_summed = CHECKSUM_UNNECESSARY; 1127 } 1128 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 1129 skb->protocol == htons(ETH_P_IP) && 1130 skb->len > 64) { 1131 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 1132 skb->ip_summed = CHECKSUM_COMPLETE; 1133 } 1134 1135 napi_gro_receive(napi, skb); 1136 1137 size += length; 1138 packets++; 1139 } 1140 1141 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 1142 if (!new_skb) 1143 break; 1144 1145 phys = dma_map_single(lp->dev, new_skb->data, 1146 lp->max_frm_size, 1147 DMA_FROM_DEVICE); 1148 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1149 if (net_ratelimit()) 1150 netdev_err(lp->ndev, "RX DMA mapping error\n"); 1151 dev_kfree_skb(new_skb); 1152 break; 1153 } 1154 desc_set_phys_addr(lp, phys, cur_p); 1155 1156 cur_p->cntrl = lp->max_frm_size; 1157 cur_p->status = 0; 1158 cur_p->skb = new_skb; 1159 1160 /* Only update tail_p to mark this slot as usable after it has 1161 * been successfully refilled. 1162 */ 1163 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1164 1165 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1166 lp->rx_bd_ci = 0; 1167 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1168 } 1169 1170 u64_stats_update_begin(&lp->rx_stat_sync); 1171 u64_stats_add(&lp->rx_packets, packets); 1172 u64_stats_add(&lp->rx_bytes, size); 1173 u64_stats_update_end(&lp->rx_stat_sync); 1174 1175 if (tail_p) 1176 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1177 1178 if (packets < budget && napi_complete_done(napi, packets)) { 1179 /* Re-enable RX completion interrupts. This should 1180 * cause an immediate interrupt if any RX packets are 1181 * already pending. 1182 */ 1183 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1184 } 1185 return packets; 1186 } 1187 1188 /** 1189 * axienet_tx_irq - Tx Done Isr. 1190 * @irq: irq number 1191 * @_ndev: net_device pointer 1192 * 1193 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1194 * 1195 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1196 * TX BD processing. 1197 */ 1198 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1199 { 1200 unsigned int status; 1201 struct net_device *ndev = _ndev; 1202 struct axienet_local *lp = netdev_priv(ndev); 1203 1204 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1205 1206 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1207 return IRQ_NONE; 1208 1209 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1210 1211 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1212 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1213 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1214 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1215 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1216 schedule_work(&lp->dma_err_task); 1217 } else { 1218 /* Disable further TX completion interrupts and schedule 1219 * NAPI to handle the completions. 1220 */ 1221 u32 cr = lp->tx_dma_cr; 1222 1223 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1224 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1225 1226 napi_schedule(&lp->napi_tx); 1227 } 1228 1229 return IRQ_HANDLED; 1230 } 1231 1232 /** 1233 * axienet_rx_irq - Rx Isr. 1234 * @irq: irq number 1235 * @_ndev: net_device pointer 1236 * 1237 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1238 * 1239 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1240 * processing. 1241 */ 1242 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1243 { 1244 unsigned int status; 1245 struct net_device *ndev = _ndev; 1246 struct axienet_local *lp = netdev_priv(ndev); 1247 1248 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1249 1250 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1251 return IRQ_NONE; 1252 1253 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1254 1255 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1256 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1257 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1258 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1259 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1260 schedule_work(&lp->dma_err_task); 1261 } else { 1262 /* Disable further RX completion interrupts and schedule 1263 * NAPI receive. 1264 */ 1265 u32 cr = lp->rx_dma_cr; 1266 1267 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1268 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1269 1270 napi_schedule(&lp->napi_rx); 1271 } 1272 1273 return IRQ_HANDLED; 1274 } 1275 1276 /** 1277 * axienet_eth_irq - Ethernet core Isr. 1278 * @irq: irq number 1279 * @_ndev: net_device pointer 1280 * 1281 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1282 * 1283 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1284 */ 1285 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1286 { 1287 struct net_device *ndev = _ndev; 1288 struct axienet_local *lp = netdev_priv(ndev); 1289 unsigned int pending; 1290 1291 pending = axienet_ior(lp, XAE_IP_OFFSET); 1292 if (!pending) 1293 return IRQ_NONE; 1294 1295 if (pending & XAE_INT_RXFIFOOVR_MASK) 1296 ndev->stats.rx_missed_errors++; 1297 1298 if (pending & XAE_INT_RXRJECT_MASK) 1299 ndev->stats.rx_frame_errors++; 1300 1301 axienet_iow(lp, XAE_IS_OFFSET, pending); 1302 return IRQ_HANDLED; 1303 } 1304 1305 static void axienet_dma_err_handler(struct work_struct *work); 1306 1307 /** 1308 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. 1309 * allocate skbuff, map the scatterlist and obtain a descriptor 1310 * and then add the callback information and submit descriptor. 1311 * 1312 * @ndev: net_device pointer 1313 * 1314 */ 1315 static void axienet_rx_submit_desc(struct net_device *ndev) 1316 { 1317 struct dma_async_tx_descriptor *dma_rx_desc = NULL; 1318 struct axienet_local *lp = netdev_priv(ndev); 1319 struct skbuf_dma_descriptor *skbuf_dma; 1320 struct sk_buff *skb; 1321 dma_addr_t addr; 1322 1323 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); 1324 if (!skbuf_dma) 1325 return; 1326 1327 lp->rx_ring_head++; 1328 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1329 if (!skb) 1330 return; 1331 1332 sg_init_table(skbuf_dma->sgl, 1); 1333 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); 1334 if (unlikely(dma_mapping_error(lp->dev, addr))) { 1335 if (net_ratelimit()) 1336 netdev_err(ndev, "DMA mapping error\n"); 1337 goto rx_submit_err_free_skb; 1338 } 1339 sg_dma_address(skbuf_dma->sgl) = addr; 1340 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; 1341 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, 1342 1, DMA_DEV_TO_MEM, 1343 DMA_PREP_INTERRUPT); 1344 if (!dma_rx_desc) 1345 goto rx_submit_err_unmap_skb; 1346 1347 skbuf_dma->skb = skb; 1348 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); 1349 skbuf_dma->desc = dma_rx_desc; 1350 dma_rx_desc->callback_param = lp; 1351 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1352 dmaengine_submit(dma_rx_desc); 1353 1354 return; 1355 1356 rx_submit_err_unmap_skb: 1357 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); 1358 rx_submit_err_free_skb: 1359 dev_kfree_skb(skb); 1360 } 1361 1362 /** 1363 * axienet_init_dmaengine - init the dmaengine code. 1364 * @ndev: Pointer to net_device structure 1365 * 1366 * Return: 0, on success. 1367 * non-zero error value on failure 1368 * 1369 * This is the dmaengine initialization code. 1370 */ 1371 static int axienet_init_dmaengine(struct net_device *ndev) 1372 { 1373 struct axienet_local *lp = netdev_priv(ndev); 1374 struct skbuf_dma_descriptor *skbuf_dma; 1375 int i, ret; 1376 1377 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 1378 if (IS_ERR(lp->tx_chan)) { 1379 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); 1380 return PTR_ERR(lp->tx_chan); 1381 } 1382 1383 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); 1384 if (IS_ERR(lp->rx_chan)) { 1385 ret = PTR_ERR(lp->rx_chan); 1386 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); 1387 goto err_dma_release_tx; 1388 } 1389 1390 lp->tx_ring_tail = 0; 1391 lp->tx_ring_head = 0; 1392 lp->rx_ring_tail = 0; 1393 lp->rx_ring_head = 0; 1394 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), 1395 GFP_KERNEL); 1396 if (!lp->tx_skb_ring) { 1397 ret = -ENOMEM; 1398 goto err_dma_release_rx; 1399 } 1400 for (i = 0; i < TX_BD_NUM_MAX; i++) { 1401 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1402 if (!skbuf_dma) { 1403 ret = -ENOMEM; 1404 goto err_free_tx_skb_ring; 1405 } 1406 lp->tx_skb_ring[i] = skbuf_dma; 1407 } 1408 1409 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), 1410 GFP_KERNEL); 1411 if (!lp->rx_skb_ring) { 1412 ret = -ENOMEM; 1413 goto err_free_tx_skb_ring; 1414 } 1415 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { 1416 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1417 if (!skbuf_dma) { 1418 ret = -ENOMEM; 1419 goto err_free_rx_skb_ring; 1420 } 1421 lp->rx_skb_ring[i] = skbuf_dma; 1422 } 1423 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ 1424 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1425 axienet_rx_submit_desc(ndev); 1426 dma_async_issue_pending(lp->rx_chan); 1427 1428 return 0; 1429 1430 err_free_rx_skb_ring: 1431 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1432 kfree(lp->rx_skb_ring[i]); 1433 kfree(lp->rx_skb_ring); 1434 err_free_tx_skb_ring: 1435 for (i = 0; i < TX_BD_NUM_MAX; i++) 1436 kfree(lp->tx_skb_ring[i]); 1437 kfree(lp->tx_skb_ring); 1438 err_dma_release_rx: 1439 dma_release_channel(lp->rx_chan); 1440 err_dma_release_tx: 1441 dma_release_channel(lp->tx_chan); 1442 return ret; 1443 } 1444 1445 /** 1446 * axienet_init_legacy_dma - init the dma legacy code. 1447 * @ndev: Pointer to net_device structure 1448 * 1449 * Return: 0, on success. 1450 * non-zero error value on failure 1451 * 1452 * This is the dma initialization code. It also allocates interrupt 1453 * service routines, enables the interrupt lines and ISR handling. 1454 * 1455 */ 1456 static int axienet_init_legacy_dma(struct net_device *ndev) 1457 { 1458 int ret; 1459 struct axienet_local *lp = netdev_priv(ndev); 1460 1461 /* Enable worker thread for Axi DMA error handling */ 1462 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1463 1464 napi_enable(&lp->napi_rx); 1465 napi_enable(&lp->napi_tx); 1466 1467 /* Enable interrupts for Axi DMA Tx */ 1468 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1469 ndev->name, ndev); 1470 if (ret) 1471 goto err_tx_irq; 1472 /* Enable interrupts for Axi DMA Rx */ 1473 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1474 ndev->name, ndev); 1475 if (ret) 1476 goto err_rx_irq; 1477 /* Enable interrupts for Axi Ethernet core (if defined) */ 1478 if (lp->eth_irq > 0) { 1479 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1480 ndev->name, ndev); 1481 if (ret) 1482 goto err_eth_irq; 1483 } 1484 1485 return 0; 1486 1487 err_eth_irq: 1488 free_irq(lp->rx_irq, ndev); 1489 err_rx_irq: 1490 free_irq(lp->tx_irq, ndev); 1491 err_tx_irq: 1492 napi_disable(&lp->napi_tx); 1493 napi_disable(&lp->napi_rx); 1494 cancel_work_sync(&lp->dma_err_task); 1495 dev_err(lp->dev, "request_irq() failed\n"); 1496 return ret; 1497 } 1498 1499 /** 1500 * axienet_open - Driver open routine. 1501 * @ndev: Pointer to net_device structure 1502 * 1503 * Return: 0, on success. 1504 * non-zero error value on failure 1505 * 1506 * This is the driver open routine. It calls phylink_start to start the 1507 * PHY device. 1508 * It also allocates interrupt service routines, enables the interrupt lines 1509 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1510 * descriptors are initialized. 1511 */ 1512 static int axienet_open(struct net_device *ndev) 1513 { 1514 int ret; 1515 struct axienet_local *lp = netdev_priv(ndev); 1516 1517 /* When we do an Axi Ethernet reset, it resets the complete core 1518 * including the MDIO. MDIO must be disabled before resetting. 1519 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1520 */ 1521 axienet_lock_mii(lp); 1522 ret = axienet_device_reset(ndev); 1523 axienet_unlock_mii(lp); 1524 1525 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1526 if (ret) { 1527 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1528 return ret; 1529 } 1530 1531 phylink_start(lp->phylink); 1532 1533 if (lp->use_dmaengine) { 1534 /* Enable interrupts for Axi Ethernet core (if defined) */ 1535 if (lp->eth_irq > 0) { 1536 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1537 ndev->name, ndev); 1538 if (ret) 1539 goto err_phy; 1540 } 1541 1542 ret = axienet_init_dmaengine(ndev); 1543 if (ret < 0) 1544 goto err_free_eth_irq; 1545 } else { 1546 ret = axienet_init_legacy_dma(ndev); 1547 if (ret) 1548 goto err_phy; 1549 } 1550 1551 return 0; 1552 1553 err_free_eth_irq: 1554 if (lp->eth_irq > 0) 1555 free_irq(lp->eth_irq, ndev); 1556 err_phy: 1557 phylink_stop(lp->phylink); 1558 phylink_disconnect_phy(lp->phylink); 1559 return ret; 1560 } 1561 1562 /** 1563 * axienet_stop - Driver stop routine. 1564 * @ndev: Pointer to net_device structure 1565 * 1566 * Return: 0, on success. 1567 * 1568 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1569 * device. It also removes the interrupt handlers and disables the interrupts. 1570 * The Axi DMA Tx/Rx BDs are released. 1571 */ 1572 static int axienet_stop(struct net_device *ndev) 1573 { 1574 struct axienet_local *lp = netdev_priv(ndev); 1575 int i; 1576 1577 if (!lp->use_dmaengine) { 1578 napi_disable(&lp->napi_tx); 1579 napi_disable(&lp->napi_rx); 1580 } 1581 1582 phylink_stop(lp->phylink); 1583 phylink_disconnect_phy(lp->phylink); 1584 1585 axienet_setoptions(ndev, lp->options & 1586 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1587 1588 if (!lp->use_dmaengine) { 1589 axienet_dma_stop(lp); 1590 cancel_work_sync(&lp->dma_err_task); 1591 free_irq(lp->tx_irq, ndev); 1592 free_irq(lp->rx_irq, ndev); 1593 axienet_dma_bd_release(ndev); 1594 } else { 1595 dmaengine_terminate_sync(lp->tx_chan); 1596 dmaengine_synchronize(lp->tx_chan); 1597 dmaengine_terminate_sync(lp->rx_chan); 1598 dmaengine_synchronize(lp->rx_chan); 1599 1600 for (i = 0; i < TX_BD_NUM_MAX; i++) 1601 kfree(lp->tx_skb_ring[i]); 1602 kfree(lp->tx_skb_ring); 1603 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1604 kfree(lp->rx_skb_ring[i]); 1605 kfree(lp->rx_skb_ring); 1606 1607 dma_release_channel(lp->rx_chan); 1608 dma_release_channel(lp->tx_chan); 1609 } 1610 1611 axienet_iow(lp, XAE_IE_OFFSET, 0); 1612 1613 if (lp->eth_irq > 0) 1614 free_irq(lp->eth_irq, ndev); 1615 return 0; 1616 } 1617 1618 /** 1619 * axienet_change_mtu - Driver change mtu routine. 1620 * @ndev: Pointer to net_device structure 1621 * @new_mtu: New mtu value to be applied 1622 * 1623 * Return: Always returns 0 (success). 1624 * 1625 * This is the change mtu driver routine. It checks if the Axi Ethernet 1626 * hardware supports jumbo frames before changing the mtu. This can be 1627 * called only when the device is not up. 1628 */ 1629 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1630 { 1631 struct axienet_local *lp = netdev_priv(ndev); 1632 1633 if (netif_running(ndev)) 1634 return -EBUSY; 1635 1636 if ((new_mtu + VLAN_ETH_HLEN + 1637 XAE_TRL_SIZE) > lp->rxmem) 1638 return -EINVAL; 1639 1640 WRITE_ONCE(ndev->mtu, new_mtu); 1641 1642 return 0; 1643 } 1644 1645 #ifdef CONFIG_NET_POLL_CONTROLLER 1646 /** 1647 * axienet_poll_controller - Axi Ethernet poll mechanism. 1648 * @ndev: Pointer to net_device structure 1649 * 1650 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1651 * to polling the ISRs and are enabled back after the polling is done. 1652 */ 1653 static void axienet_poll_controller(struct net_device *ndev) 1654 { 1655 struct axienet_local *lp = netdev_priv(ndev); 1656 1657 disable_irq(lp->tx_irq); 1658 disable_irq(lp->rx_irq); 1659 axienet_rx_irq(lp->tx_irq, ndev); 1660 axienet_tx_irq(lp->rx_irq, ndev); 1661 enable_irq(lp->tx_irq); 1662 enable_irq(lp->rx_irq); 1663 } 1664 #endif 1665 1666 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1667 { 1668 struct axienet_local *lp = netdev_priv(dev); 1669 1670 if (!netif_running(dev)) 1671 return -EINVAL; 1672 1673 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1674 } 1675 1676 static void 1677 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1678 { 1679 struct axienet_local *lp = netdev_priv(dev); 1680 unsigned int start; 1681 1682 netdev_stats_to_stats64(stats, &dev->stats); 1683 1684 do { 1685 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1686 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1687 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1688 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1689 1690 do { 1691 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1692 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1693 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1694 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1695 } 1696 1697 static const struct net_device_ops axienet_netdev_ops = { 1698 .ndo_open = axienet_open, 1699 .ndo_stop = axienet_stop, 1700 .ndo_start_xmit = axienet_start_xmit, 1701 .ndo_get_stats64 = axienet_get_stats64, 1702 .ndo_change_mtu = axienet_change_mtu, 1703 .ndo_set_mac_address = netdev_set_mac_address, 1704 .ndo_validate_addr = eth_validate_addr, 1705 .ndo_eth_ioctl = axienet_ioctl, 1706 .ndo_set_rx_mode = axienet_set_multicast_list, 1707 #ifdef CONFIG_NET_POLL_CONTROLLER 1708 .ndo_poll_controller = axienet_poll_controller, 1709 #endif 1710 }; 1711 1712 static const struct net_device_ops axienet_netdev_dmaengine_ops = { 1713 .ndo_open = axienet_open, 1714 .ndo_stop = axienet_stop, 1715 .ndo_start_xmit = axienet_start_xmit_dmaengine, 1716 .ndo_get_stats64 = axienet_get_stats64, 1717 .ndo_change_mtu = axienet_change_mtu, 1718 .ndo_set_mac_address = netdev_set_mac_address, 1719 .ndo_validate_addr = eth_validate_addr, 1720 .ndo_eth_ioctl = axienet_ioctl, 1721 .ndo_set_rx_mode = axienet_set_multicast_list, 1722 }; 1723 1724 /** 1725 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1726 * @ndev: Pointer to net_device structure 1727 * @ed: Pointer to ethtool_drvinfo structure 1728 * 1729 * This implements ethtool command for getting the driver information. 1730 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1731 */ 1732 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1733 struct ethtool_drvinfo *ed) 1734 { 1735 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1736 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1737 } 1738 1739 /** 1740 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1741 * AxiEthernet core. 1742 * @ndev: Pointer to net_device structure 1743 * 1744 * This implements ethtool command for getting the total register length 1745 * information. 1746 * 1747 * Return: the total regs length 1748 */ 1749 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1750 { 1751 return sizeof(u32) * AXIENET_REGS_N; 1752 } 1753 1754 /** 1755 * axienet_ethtools_get_regs - Dump the contents of all registers present 1756 * in AxiEthernet core. 1757 * @ndev: Pointer to net_device structure 1758 * @regs: Pointer to ethtool_regs structure 1759 * @ret: Void pointer used to return the contents of the registers. 1760 * 1761 * This implements ethtool command for getting the Axi Ethernet register dump. 1762 * Issue "ethtool -d ethX" to execute this function. 1763 */ 1764 static void axienet_ethtools_get_regs(struct net_device *ndev, 1765 struct ethtool_regs *regs, void *ret) 1766 { 1767 u32 *data = (u32 *)ret; 1768 size_t len = sizeof(u32) * AXIENET_REGS_N; 1769 struct axienet_local *lp = netdev_priv(ndev); 1770 1771 regs->version = 0; 1772 regs->len = len; 1773 1774 memset(data, 0, len); 1775 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1776 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1777 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1778 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1779 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1780 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1781 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1782 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1783 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1784 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1785 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1786 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1787 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1788 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1789 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1790 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1791 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1792 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1793 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1794 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1795 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1796 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1797 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1798 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1799 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1800 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1801 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1802 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1803 if (!lp->use_dmaengine) { 1804 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1805 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1806 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1807 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1808 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1809 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1810 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1811 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1812 } 1813 } 1814 1815 static void 1816 axienet_ethtools_get_ringparam(struct net_device *ndev, 1817 struct ethtool_ringparam *ering, 1818 struct kernel_ethtool_ringparam *kernel_ering, 1819 struct netlink_ext_ack *extack) 1820 { 1821 struct axienet_local *lp = netdev_priv(ndev); 1822 1823 ering->rx_max_pending = RX_BD_NUM_MAX; 1824 ering->rx_mini_max_pending = 0; 1825 ering->rx_jumbo_max_pending = 0; 1826 ering->tx_max_pending = TX_BD_NUM_MAX; 1827 ering->rx_pending = lp->rx_bd_num; 1828 ering->rx_mini_pending = 0; 1829 ering->rx_jumbo_pending = 0; 1830 ering->tx_pending = lp->tx_bd_num; 1831 } 1832 1833 static int 1834 axienet_ethtools_set_ringparam(struct net_device *ndev, 1835 struct ethtool_ringparam *ering, 1836 struct kernel_ethtool_ringparam *kernel_ering, 1837 struct netlink_ext_ack *extack) 1838 { 1839 struct axienet_local *lp = netdev_priv(ndev); 1840 1841 if (ering->rx_pending > RX_BD_NUM_MAX || 1842 ering->rx_mini_pending || 1843 ering->rx_jumbo_pending || 1844 ering->tx_pending < TX_BD_NUM_MIN || 1845 ering->tx_pending > TX_BD_NUM_MAX) 1846 return -EINVAL; 1847 1848 if (netif_running(ndev)) 1849 return -EBUSY; 1850 1851 lp->rx_bd_num = ering->rx_pending; 1852 lp->tx_bd_num = ering->tx_pending; 1853 return 0; 1854 } 1855 1856 /** 1857 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1858 * Tx and Rx paths. 1859 * @ndev: Pointer to net_device structure 1860 * @epauseparm: Pointer to ethtool_pauseparam structure. 1861 * 1862 * This implements ethtool command for getting axi ethernet pause frame 1863 * setting. Issue "ethtool -a ethX" to execute this function. 1864 */ 1865 static void 1866 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1867 struct ethtool_pauseparam *epauseparm) 1868 { 1869 struct axienet_local *lp = netdev_priv(ndev); 1870 1871 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1872 } 1873 1874 /** 1875 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1876 * settings. 1877 * @ndev: Pointer to net_device structure 1878 * @epauseparm:Pointer to ethtool_pauseparam structure 1879 * 1880 * This implements ethtool command for enabling flow control on Rx and Tx 1881 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1882 * function. 1883 * 1884 * Return: 0 on success, -EFAULT if device is running 1885 */ 1886 static int 1887 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1888 struct ethtool_pauseparam *epauseparm) 1889 { 1890 struct axienet_local *lp = netdev_priv(ndev); 1891 1892 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1893 } 1894 1895 /** 1896 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1897 * @ndev: Pointer to net_device structure 1898 * @ecoalesce: Pointer to ethtool_coalesce structure 1899 * @kernel_coal: ethtool CQE mode setting structure 1900 * @extack: extack for reporting error messages 1901 * 1902 * This implements ethtool command for getting the DMA interrupt coalescing 1903 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1904 * execute this function. 1905 * 1906 * Return: 0 always 1907 */ 1908 static int 1909 axienet_ethtools_get_coalesce(struct net_device *ndev, 1910 struct ethtool_coalesce *ecoalesce, 1911 struct kernel_ethtool_coalesce *kernel_coal, 1912 struct netlink_ext_ack *extack) 1913 { 1914 struct axienet_local *lp = netdev_priv(ndev); 1915 1916 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1917 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1918 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1919 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1920 return 0; 1921 } 1922 1923 /** 1924 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1925 * @ndev: Pointer to net_device structure 1926 * @ecoalesce: Pointer to ethtool_coalesce structure 1927 * @kernel_coal: ethtool CQE mode setting structure 1928 * @extack: extack for reporting error messages 1929 * 1930 * This implements ethtool command for setting the DMA interrupt coalescing 1931 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1932 * prompt to execute this function. 1933 * 1934 * Return: 0, on success, Non-zero error value on failure. 1935 */ 1936 static int 1937 axienet_ethtools_set_coalesce(struct net_device *ndev, 1938 struct ethtool_coalesce *ecoalesce, 1939 struct kernel_ethtool_coalesce *kernel_coal, 1940 struct netlink_ext_ack *extack) 1941 { 1942 struct axienet_local *lp = netdev_priv(ndev); 1943 1944 if (netif_running(ndev)) { 1945 NL_SET_ERR_MSG(extack, 1946 "Please stop netif before applying configuration"); 1947 return -EBUSY; 1948 } 1949 1950 if (ecoalesce->rx_max_coalesced_frames) 1951 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1952 if (ecoalesce->rx_coalesce_usecs) 1953 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1954 if (ecoalesce->tx_max_coalesced_frames) 1955 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1956 if (ecoalesce->tx_coalesce_usecs) 1957 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1958 1959 return 0; 1960 } 1961 1962 static int 1963 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1964 struct ethtool_link_ksettings *cmd) 1965 { 1966 struct axienet_local *lp = netdev_priv(ndev); 1967 1968 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1969 } 1970 1971 static int 1972 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1973 const struct ethtool_link_ksettings *cmd) 1974 { 1975 struct axienet_local *lp = netdev_priv(ndev); 1976 1977 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1978 } 1979 1980 static int axienet_ethtools_nway_reset(struct net_device *dev) 1981 { 1982 struct axienet_local *lp = netdev_priv(dev); 1983 1984 return phylink_ethtool_nway_reset(lp->phylink); 1985 } 1986 1987 static const struct ethtool_ops axienet_ethtool_ops = { 1988 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1989 ETHTOOL_COALESCE_USECS, 1990 .get_drvinfo = axienet_ethtools_get_drvinfo, 1991 .get_regs_len = axienet_ethtools_get_regs_len, 1992 .get_regs = axienet_ethtools_get_regs, 1993 .get_link = ethtool_op_get_link, 1994 .get_ringparam = axienet_ethtools_get_ringparam, 1995 .set_ringparam = axienet_ethtools_set_ringparam, 1996 .get_pauseparam = axienet_ethtools_get_pauseparam, 1997 .set_pauseparam = axienet_ethtools_set_pauseparam, 1998 .get_coalesce = axienet_ethtools_get_coalesce, 1999 .set_coalesce = axienet_ethtools_set_coalesce, 2000 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 2001 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 2002 .nway_reset = axienet_ethtools_nway_reset, 2003 }; 2004 2005 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 2006 { 2007 return container_of(pcs, struct axienet_local, pcs); 2008 } 2009 2010 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 2011 struct phylink_link_state *state) 2012 { 2013 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2014 2015 phylink_mii_c22_pcs_get_state(pcs_phy, state); 2016 } 2017 2018 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 2019 { 2020 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2021 2022 phylink_mii_c22_pcs_an_restart(pcs_phy); 2023 } 2024 2025 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2026 phy_interface_t interface, 2027 const unsigned long *advertising, 2028 bool permit_pause_to_mac) 2029 { 2030 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2031 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 2032 struct axienet_local *lp = netdev_priv(ndev); 2033 int ret; 2034 2035 if (lp->switch_x_sgmii) { 2036 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 2037 interface == PHY_INTERFACE_MODE_SGMII ? 2038 XLNX_MII_STD_SELECT_SGMII : 0); 2039 if (ret < 0) { 2040 netdev_warn(ndev, 2041 "Failed to switch PHY interface: %d\n", 2042 ret); 2043 return ret; 2044 } 2045 } 2046 2047 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 2048 neg_mode); 2049 if (ret < 0) 2050 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 2051 2052 return ret; 2053 } 2054 2055 static const struct phylink_pcs_ops axienet_pcs_ops = { 2056 .pcs_get_state = axienet_pcs_get_state, 2057 .pcs_config = axienet_pcs_config, 2058 .pcs_an_restart = axienet_pcs_an_restart, 2059 }; 2060 2061 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 2062 phy_interface_t interface) 2063 { 2064 struct net_device *ndev = to_net_dev(config->dev); 2065 struct axienet_local *lp = netdev_priv(ndev); 2066 2067 if (interface == PHY_INTERFACE_MODE_1000BASEX || 2068 interface == PHY_INTERFACE_MODE_SGMII) 2069 return &lp->pcs; 2070 2071 return NULL; 2072 } 2073 2074 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 2075 const struct phylink_link_state *state) 2076 { 2077 /* nothing meaningful to do */ 2078 } 2079 2080 static void axienet_mac_link_down(struct phylink_config *config, 2081 unsigned int mode, 2082 phy_interface_t interface) 2083 { 2084 /* nothing meaningful to do */ 2085 } 2086 2087 static void axienet_mac_link_up(struct phylink_config *config, 2088 struct phy_device *phy, 2089 unsigned int mode, phy_interface_t interface, 2090 int speed, int duplex, 2091 bool tx_pause, bool rx_pause) 2092 { 2093 struct net_device *ndev = to_net_dev(config->dev); 2094 struct axienet_local *lp = netdev_priv(ndev); 2095 u32 emmc_reg, fcc_reg; 2096 2097 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 2098 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 2099 2100 switch (speed) { 2101 case SPEED_1000: 2102 emmc_reg |= XAE_EMMC_LINKSPD_1000; 2103 break; 2104 case SPEED_100: 2105 emmc_reg |= XAE_EMMC_LINKSPD_100; 2106 break; 2107 case SPEED_10: 2108 emmc_reg |= XAE_EMMC_LINKSPD_10; 2109 break; 2110 default: 2111 dev_err(&ndev->dev, 2112 "Speed other than 10, 100 or 1Gbps is not supported\n"); 2113 break; 2114 } 2115 2116 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 2117 2118 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 2119 if (tx_pause) 2120 fcc_reg |= XAE_FCC_FCTX_MASK; 2121 else 2122 fcc_reg &= ~XAE_FCC_FCTX_MASK; 2123 if (rx_pause) 2124 fcc_reg |= XAE_FCC_FCRX_MASK; 2125 else 2126 fcc_reg &= ~XAE_FCC_FCRX_MASK; 2127 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 2128 } 2129 2130 static const struct phylink_mac_ops axienet_phylink_ops = { 2131 .mac_select_pcs = axienet_mac_select_pcs, 2132 .mac_config = axienet_mac_config, 2133 .mac_link_down = axienet_mac_link_down, 2134 .mac_link_up = axienet_mac_link_up, 2135 }; 2136 2137 /** 2138 * axienet_dma_err_handler - Work queue task for Axi DMA Error 2139 * @work: pointer to work_struct 2140 * 2141 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 2142 * Tx/Rx BDs. 2143 */ 2144 static void axienet_dma_err_handler(struct work_struct *work) 2145 { 2146 u32 i; 2147 u32 axienet_status; 2148 struct axidma_bd *cur_p; 2149 struct axienet_local *lp = container_of(work, struct axienet_local, 2150 dma_err_task); 2151 struct net_device *ndev = lp->ndev; 2152 2153 napi_disable(&lp->napi_tx); 2154 napi_disable(&lp->napi_rx); 2155 2156 axienet_setoptions(ndev, lp->options & 2157 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2158 2159 axienet_dma_stop(lp); 2160 2161 for (i = 0; i < lp->tx_bd_num; i++) { 2162 cur_p = &lp->tx_bd_v[i]; 2163 if (cur_p->cntrl) { 2164 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 2165 2166 dma_unmap_single(lp->dev, addr, 2167 (cur_p->cntrl & 2168 XAXIDMA_BD_CTRL_LENGTH_MASK), 2169 DMA_TO_DEVICE); 2170 } 2171 if (cur_p->skb) 2172 dev_kfree_skb_irq(cur_p->skb); 2173 cur_p->phys = 0; 2174 cur_p->phys_msb = 0; 2175 cur_p->cntrl = 0; 2176 cur_p->status = 0; 2177 cur_p->app0 = 0; 2178 cur_p->app1 = 0; 2179 cur_p->app2 = 0; 2180 cur_p->app3 = 0; 2181 cur_p->app4 = 0; 2182 cur_p->skb = NULL; 2183 } 2184 2185 for (i = 0; i < lp->rx_bd_num; i++) { 2186 cur_p = &lp->rx_bd_v[i]; 2187 cur_p->status = 0; 2188 cur_p->app0 = 0; 2189 cur_p->app1 = 0; 2190 cur_p->app2 = 0; 2191 cur_p->app3 = 0; 2192 cur_p->app4 = 0; 2193 } 2194 2195 lp->tx_bd_ci = 0; 2196 lp->tx_bd_tail = 0; 2197 lp->rx_bd_ci = 0; 2198 2199 axienet_dma_start(lp); 2200 2201 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 2202 axienet_status &= ~XAE_RCW1_RX_MASK; 2203 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 2204 2205 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 2206 if (axienet_status & XAE_INT_RXRJECT_MASK) 2207 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 2208 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 2209 XAE_INT_RECV_ERROR_MASK : 0); 2210 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 2211 2212 /* Sync default options with HW but leave receiver and 2213 * transmitter disabled. 2214 */ 2215 axienet_setoptions(ndev, lp->options & 2216 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2217 axienet_set_mac_address(ndev, NULL); 2218 axienet_set_multicast_list(ndev); 2219 napi_enable(&lp->napi_rx); 2220 napi_enable(&lp->napi_tx); 2221 axienet_setoptions(ndev, lp->options); 2222 } 2223 2224 /** 2225 * axienet_probe - Axi Ethernet probe function. 2226 * @pdev: Pointer to platform device structure. 2227 * 2228 * Return: 0, on success 2229 * Non-zero error value on failure. 2230 * 2231 * This is the probe routine for Axi Ethernet driver. This is called before 2232 * any other driver routines are invoked. It allocates and sets up the Ethernet 2233 * device. Parses through device tree and populates fields of 2234 * axienet_local. It registers the Ethernet device. 2235 */ 2236 static int axienet_probe(struct platform_device *pdev) 2237 { 2238 int ret; 2239 struct device_node *np; 2240 struct axienet_local *lp; 2241 struct net_device *ndev; 2242 struct resource *ethres; 2243 u8 mac_addr[ETH_ALEN]; 2244 int addr_width = 32; 2245 u32 value; 2246 2247 ndev = alloc_etherdev(sizeof(*lp)); 2248 if (!ndev) 2249 return -ENOMEM; 2250 2251 platform_set_drvdata(pdev, ndev); 2252 2253 SET_NETDEV_DEV(ndev, &pdev->dev); 2254 ndev->features = NETIF_F_SG; 2255 ndev->ethtool_ops = &axienet_ethtool_ops; 2256 2257 /* MTU range: 64 - 9000 */ 2258 ndev->min_mtu = 64; 2259 ndev->max_mtu = XAE_JUMBO_MTU; 2260 2261 lp = netdev_priv(ndev); 2262 lp->ndev = ndev; 2263 lp->dev = &pdev->dev; 2264 lp->options = XAE_OPTION_DEFAULTS; 2265 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 2266 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 2267 2268 u64_stats_init(&lp->rx_stat_sync); 2269 u64_stats_init(&lp->tx_stat_sync); 2270 2271 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2272 if (!lp->axi_clk) { 2273 /* For backward compatibility, if named AXI clock is not present, 2274 * treat the first clock specified as the AXI clock. 2275 */ 2276 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2277 } 2278 if (IS_ERR(lp->axi_clk)) { 2279 ret = PTR_ERR(lp->axi_clk); 2280 goto free_netdev; 2281 } 2282 ret = clk_prepare_enable(lp->axi_clk); 2283 if (ret) { 2284 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2285 goto free_netdev; 2286 } 2287 2288 lp->misc_clks[0].id = "axis_clk"; 2289 lp->misc_clks[1].id = "ref_clk"; 2290 lp->misc_clks[2].id = "mgt_clk"; 2291 2292 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2293 if (ret) 2294 goto cleanup_clk; 2295 2296 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2297 if (ret) 2298 goto cleanup_clk; 2299 2300 /* Map device registers */ 2301 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 2302 if (IS_ERR(lp->regs)) { 2303 ret = PTR_ERR(lp->regs); 2304 goto cleanup_clk; 2305 } 2306 lp->regs_start = ethres->start; 2307 2308 /* Setup checksum offload, but default to off if not specified */ 2309 lp->features = 0; 2310 2311 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 2312 if (!ret) { 2313 switch (value) { 2314 case 1: 2315 lp->csum_offload_on_tx_path = 2316 XAE_FEATURE_PARTIAL_TX_CSUM; 2317 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 2318 /* Can checksum TCP/UDP over IPv4. */ 2319 ndev->features |= NETIF_F_IP_CSUM; 2320 break; 2321 case 2: 2322 lp->csum_offload_on_tx_path = 2323 XAE_FEATURE_FULL_TX_CSUM; 2324 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 2325 /* Can checksum TCP/UDP over IPv4. */ 2326 ndev->features |= NETIF_F_IP_CSUM; 2327 break; 2328 default: 2329 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 2330 } 2331 } 2332 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 2333 if (!ret) { 2334 switch (value) { 2335 case 1: 2336 lp->csum_offload_on_rx_path = 2337 XAE_FEATURE_PARTIAL_RX_CSUM; 2338 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 2339 break; 2340 case 2: 2341 lp->csum_offload_on_rx_path = 2342 XAE_FEATURE_FULL_RX_CSUM; 2343 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 2344 break; 2345 default: 2346 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 2347 } 2348 } 2349 /* For supporting jumbo frames, the Axi Ethernet hardware must have 2350 * a larger Rx/Tx Memory. Typically, the size must be large so that 2351 * we can enable jumbo option and start supporting jumbo frames. 2352 * Here we check for memory allocated for Rx/Tx in the hardware from 2353 * the device-tree and accordingly set flags. 2354 */ 2355 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 2356 2357 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 2358 "xlnx,switch-x-sgmii"); 2359 2360 /* Start with the proprietary, and broken phy_type */ 2361 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 2362 if (!ret) { 2363 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 2364 switch (value) { 2365 case XAE_PHY_TYPE_MII: 2366 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2367 break; 2368 case XAE_PHY_TYPE_GMII: 2369 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2370 break; 2371 case XAE_PHY_TYPE_RGMII_2_0: 2372 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2373 break; 2374 case XAE_PHY_TYPE_SGMII: 2375 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2376 break; 2377 case XAE_PHY_TYPE_1000BASE_X: 2378 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2379 break; 2380 default: 2381 ret = -EINVAL; 2382 goto cleanup_clk; 2383 } 2384 } else { 2385 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2386 if (ret) 2387 goto cleanup_clk; 2388 } 2389 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2390 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2391 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2392 ret = -EINVAL; 2393 goto cleanup_clk; 2394 } 2395 2396 if (!of_property_present(pdev->dev.of_node, "dmas")) { 2397 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2398 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2399 2400 if (np) { 2401 struct resource dmares; 2402 2403 ret = of_address_to_resource(np, 0, &dmares); 2404 if (ret) { 2405 dev_err(&pdev->dev, 2406 "unable to get DMA resource\n"); 2407 of_node_put(np); 2408 goto cleanup_clk; 2409 } 2410 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2411 &dmares); 2412 lp->rx_irq = irq_of_parse_and_map(np, 1); 2413 lp->tx_irq = irq_of_parse_and_map(np, 0); 2414 of_node_put(np); 2415 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2416 } else { 2417 /* Check for these resources directly on the Ethernet node. */ 2418 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2419 lp->rx_irq = platform_get_irq(pdev, 1); 2420 lp->tx_irq = platform_get_irq(pdev, 0); 2421 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2422 } 2423 if (IS_ERR(lp->dma_regs)) { 2424 dev_err(&pdev->dev, "could not map DMA regs\n"); 2425 ret = PTR_ERR(lp->dma_regs); 2426 goto cleanup_clk; 2427 } 2428 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2429 dev_err(&pdev->dev, "could not determine irqs\n"); 2430 ret = -ENOMEM; 2431 goto cleanup_clk; 2432 } 2433 2434 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2435 ret = __axienet_device_reset(lp); 2436 if (ret) 2437 goto cleanup_clk; 2438 2439 /* Autodetect the need for 64-bit DMA pointers. 2440 * When the IP is configured for a bus width bigger than 32 bits, 2441 * writing the MSB registers is mandatory, even if they are all 0. 2442 * We can detect this case by writing all 1's to one such register 2443 * and see if that sticks: when the IP is configured for 32 bits 2444 * only, those registers are RES0. 2445 * Those MSB registers were introduced in IP v7.1, which we check first. 2446 */ 2447 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2448 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2449 2450 iowrite32(0x0, desc); 2451 if (ioread32(desc) == 0) { /* sanity check */ 2452 iowrite32(0xffffffff, desc); 2453 if (ioread32(desc) > 0) { 2454 lp->features |= XAE_FEATURE_DMA_64BIT; 2455 addr_width = 64; 2456 dev_info(&pdev->dev, 2457 "autodetected 64-bit DMA range\n"); 2458 } 2459 iowrite32(0x0, desc); 2460 } 2461 } 2462 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2463 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2464 ret = -EINVAL; 2465 goto cleanup_clk; 2466 } 2467 2468 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2469 if (ret) { 2470 dev_err(&pdev->dev, "No suitable DMA available\n"); 2471 goto cleanup_clk; 2472 } 2473 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 2474 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 2475 } else { 2476 struct xilinx_vdma_config cfg; 2477 struct dma_chan *tx_chan; 2478 2479 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2480 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 2481 ret = lp->eth_irq; 2482 goto cleanup_clk; 2483 } 2484 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 2485 if (IS_ERR(tx_chan)) { 2486 ret = PTR_ERR(tx_chan); 2487 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 2488 goto cleanup_clk; 2489 } 2490 2491 cfg.reset = 1; 2492 /* As name says VDMA but it has support for DMA channel reset */ 2493 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); 2494 if (ret < 0) { 2495 dev_err(&pdev->dev, "Reset channel failed\n"); 2496 dma_release_channel(tx_chan); 2497 goto cleanup_clk; 2498 } 2499 2500 dma_release_channel(tx_chan); 2501 lp->use_dmaengine = 1; 2502 } 2503 2504 if (lp->use_dmaengine) 2505 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; 2506 else 2507 ndev->netdev_ops = &axienet_netdev_ops; 2508 /* Check for Ethernet core IRQ (optional) */ 2509 if (lp->eth_irq <= 0) 2510 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2511 2512 /* Retrieve the MAC address */ 2513 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2514 if (!ret) { 2515 axienet_set_mac_address(ndev, mac_addr); 2516 } else { 2517 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2518 ret); 2519 axienet_set_mac_address(ndev, NULL); 2520 } 2521 2522 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2523 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2524 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2525 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2526 2527 ret = axienet_mdio_setup(lp); 2528 if (ret) 2529 dev_warn(&pdev->dev, 2530 "error registering MDIO bus: %d\n", ret); 2531 2532 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2533 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2534 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2535 if (!np) { 2536 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2537 * Falling back to "phy-handle" here is only for 2538 * backward compatibility with old device trees. 2539 */ 2540 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2541 } 2542 if (!np) { 2543 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2544 ret = -EINVAL; 2545 goto cleanup_mdio; 2546 } 2547 lp->pcs_phy = of_mdio_find_device(np); 2548 if (!lp->pcs_phy) { 2549 ret = -EPROBE_DEFER; 2550 of_node_put(np); 2551 goto cleanup_mdio; 2552 } 2553 of_node_put(np); 2554 lp->pcs.ops = &axienet_pcs_ops; 2555 lp->pcs.neg_mode = true; 2556 lp->pcs.poll = true; 2557 } 2558 2559 lp->phylink_config.dev = &ndev->dev; 2560 lp->phylink_config.type = PHYLINK_NETDEV; 2561 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2562 MAC_10FD | MAC_100FD | MAC_1000FD; 2563 2564 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2565 if (lp->switch_x_sgmii) { 2566 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2567 lp->phylink_config.supported_interfaces); 2568 __set_bit(PHY_INTERFACE_MODE_SGMII, 2569 lp->phylink_config.supported_interfaces); 2570 } 2571 2572 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2573 lp->phy_mode, 2574 &axienet_phylink_ops); 2575 if (IS_ERR(lp->phylink)) { 2576 ret = PTR_ERR(lp->phylink); 2577 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2578 goto cleanup_mdio; 2579 } 2580 2581 ret = register_netdev(lp->ndev); 2582 if (ret) { 2583 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2584 goto cleanup_phylink; 2585 } 2586 2587 return 0; 2588 2589 cleanup_phylink: 2590 phylink_destroy(lp->phylink); 2591 2592 cleanup_mdio: 2593 if (lp->pcs_phy) 2594 put_device(&lp->pcs_phy->dev); 2595 if (lp->mii_bus) 2596 axienet_mdio_teardown(lp); 2597 cleanup_clk: 2598 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2599 clk_disable_unprepare(lp->axi_clk); 2600 2601 free_netdev: 2602 free_netdev(ndev); 2603 2604 return ret; 2605 } 2606 2607 static void axienet_remove(struct platform_device *pdev) 2608 { 2609 struct net_device *ndev = platform_get_drvdata(pdev); 2610 struct axienet_local *lp = netdev_priv(ndev); 2611 2612 unregister_netdev(ndev); 2613 2614 if (lp->phylink) 2615 phylink_destroy(lp->phylink); 2616 2617 if (lp->pcs_phy) 2618 put_device(&lp->pcs_phy->dev); 2619 2620 axienet_mdio_teardown(lp); 2621 2622 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2623 clk_disable_unprepare(lp->axi_clk); 2624 2625 free_netdev(ndev); 2626 } 2627 2628 static void axienet_shutdown(struct platform_device *pdev) 2629 { 2630 struct net_device *ndev = platform_get_drvdata(pdev); 2631 2632 rtnl_lock(); 2633 netif_device_detach(ndev); 2634 2635 if (netif_running(ndev)) 2636 dev_close(ndev); 2637 2638 rtnl_unlock(); 2639 } 2640 2641 static int axienet_suspend(struct device *dev) 2642 { 2643 struct net_device *ndev = dev_get_drvdata(dev); 2644 2645 if (!netif_running(ndev)) 2646 return 0; 2647 2648 netif_device_detach(ndev); 2649 2650 rtnl_lock(); 2651 axienet_stop(ndev); 2652 rtnl_unlock(); 2653 2654 return 0; 2655 } 2656 2657 static int axienet_resume(struct device *dev) 2658 { 2659 struct net_device *ndev = dev_get_drvdata(dev); 2660 2661 if (!netif_running(ndev)) 2662 return 0; 2663 2664 rtnl_lock(); 2665 axienet_open(ndev); 2666 rtnl_unlock(); 2667 2668 netif_device_attach(ndev); 2669 2670 return 0; 2671 } 2672 2673 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 2674 axienet_suspend, axienet_resume); 2675 2676 static struct platform_driver axienet_driver = { 2677 .probe = axienet_probe, 2678 .remove_new = axienet_remove, 2679 .shutdown = axienet_shutdown, 2680 .driver = { 2681 .name = "xilinx_axienet", 2682 .pm = &axienet_pm_ops, 2683 .of_match_table = axienet_of_match, 2684 }, 2685 }; 2686 2687 module_platform_driver(axienet_driver); 2688 2689 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2690 MODULE_AUTHOR("Xilinx"); 2691 MODULE_LICENSE("GPL"); 2692