1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 #include <linux/dmaengine.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dma/xilinx_dma.h> 44 #include <linux/circ_buf.h> 45 #include <net/netdev_queues.h> 46 47 #include "xilinx_axienet.h" 48 49 /* Descriptors defines for Tx and Rx DMA */ 50 #define TX_BD_NUM_DEFAULT 128 51 #define RX_BD_NUM_DEFAULT 1024 52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 53 #define TX_BD_NUM_MAX 4096 54 #define RX_BD_NUM_MAX 4096 55 #define DMA_NUM_APP_WORDS 5 56 #define LEN_APP 4 57 #define RX_BUF_NUM_DEFAULT 128 58 59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 60 #define DRIVER_NAME "xaxienet" 61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 62 #define DRIVER_VERSION "1.00a" 63 64 #define AXIENET_REGS_N 40 65 66 static void axienet_rx_submit_desc(struct net_device *ndev); 67 68 /* Match table for of_platform binding */ 69 static const struct of_device_id axienet_of_match[] = { 70 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 71 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 72 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 73 {}, 74 }; 75 76 MODULE_DEVICE_TABLE(of, axienet_of_match); 77 78 /* Option table for setting up Axi Ethernet hardware options */ 79 static struct axienet_option axienet_options[] = { 80 /* Turn on jumbo packet support for both Rx and Tx */ 81 { 82 .opt = XAE_OPTION_JUMBO, 83 .reg = XAE_TC_OFFSET, 84 .m_or = XAE_TC_JUM_MASK, 85 }, { 86 .opt = XAE_OPTION_JUMBO, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_JUM_MASK, 89 }, { /* Turn on VLAN packet support for both Rx and Tx */ 90 .opt = XAE_OPTION_VLAN, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_VLAN_MASK, 93 }, { 94 .opt = XAE_OPTION_VLAN, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_VLAN_MASK, 97 }, { /* Turn on FCS stripping on receive packets */ 98 .opt = XAE_OPTION_FCS_STRIP, 99 .reg = XAE_RCW1_OFFSET, 100 .m_or = XAE_RCW1_FCS_MASK, 101 }, { /* Turn on FCS insertion on transmit packets */ 102 .opt = XAE_OPTION_FCS_INSERT, 103 .reg = XAE_TC_OFFSET, 104 .m_or = XAE_TC_FCS_MASK, 105 }, { /* Turn off length/type field checking on receive packets */ 106 .opt = XAE_OPTION_LENTYPE_ERR, 107 .reg = XAE_RCW1_OFFSET, 108 .m_or = XAE_RCW1_LT_DIS_MASK, 109 }, { /* Turn on Rx flow control */ 110 .opt = XAE_OPTION_FLOW_CONTROL, 111 .reg = XAE_FCC_OFFSET, 112 .m_or = XAE_FCC_FCRX_MASK, 113 }, { /* Turn on Tx flow control */ 114 .opt = XAE_OPTION_FLOW_CONTROL, 115 .reg = XAE_FCC_OFFSET, 116 .m_or = XAE_FCC_FCTX_MASK, 117 }, { /* Turn on promiscuous frame filtering */ 118 .opt = XAE_OPTION_PROMISC, 119 .reg = XAE_FMI_OFFSET, 120 .m_or = XAE_FMI_PM_MASK, 121 }, { /* Enable transmitter */ 122 .opt = XAE_OPTION_TXEN, 123 .reg = XAE_TC_OFFSET, 124 .m_or = XAE_TC_TX_MASK, 125 }, { /* Enable receiver */ 126 .opt = XAE_OPTION_RXEN, 127 .reg = XAE_RCW1_OFFSET, 128 .m_or = XAE_RCW1_RX_MASK, 129 }, 130 {} 131 }; 132 133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) 134 { 135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; 136 } 137 138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) 139 { 140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; 141 } 142 143 /** 144 * axienet_dma_in32 - Memory mapped Axi DMA register read 145 * @lp: Pointer to axienet local structure 146 * @reg: Address offset from the base address of the Axi DMA core 147 * 148 * Return: The contents of the Axi DMA register 149 * 150 * This function returns the contents of the corresponding Axi DMA register. 151 */ 152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 153 { 154 return ioread32(lp->dma_regs + reg); 155 } 156 157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 158 struct axidma_bd *desc) 159 { 160 desc->phys = lower_32_bits(addr); 161 if (lp->features & XAE_FEATURE_DMA_64BIT) 162 desc->phys_msb = upper_32_bits(addr); 163 } 164 165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 166 struct axidma_bd *desc) 167 { 168 dma_addr_t ret = desc->phys; 169 170 if (lp->features & XAE_FEATURE_DMA_64BIT) 171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 172 173 return ret; 174 } 175 176 /** 177 * axienet_dma_bd_release - Release buffer descriptor rings 178 * @ndev: Pointer to the net_device structure 179 * 180 * This function is used to release the descriptors allocated in 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 182 * driver stop api is called. 183 */ 184 static void axienet_dma_bd_release(struct net_device *ndev) 185 { 186 int i; 187 struct axienet_local *lp = netdev_priv(ndev); 188 189 /* If we end up here, tx_bd_v must have been DMA allocated. */ 190 dma_free_coherent(lp->dev, 191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 192 lp->tx_bd_v, 193 lp->tx_bd_p); 194 195 if (!lp->rx_bd_v) 196 return; 197 198 for (i = 0; i < lp->rx_bd_num; i++) { 199 dma_addr_t phys; 200 201 /* A NULL skb means this descriptor has not been initialised 202 * at all. 203 */ 204 if (!lp->rx_bd_v[i].skb) 205 break; 206 207 dev_kfree_skb(lp->rx_bd_v[i].skb); 208 209 /* For each descriptor, we programmed cntrl with the (non-zero) 210 * descriptor size, after it had been successfully allocated. 211 * So a non-zero value in there means we need to unmap it. 212 */ 213 if (lp->rx_bd_v[i].cntrl) { 214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 215 dma_unmap_single(lp->dev, phys, 216 lp->max_frm_size, DMA_FROM_DEVICE); 217 } 218 } 219 220 dma_free_coherent(lp->dev, 221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 222 lp->rx_bd_v, 223 lp->rx_bd_p); 224 } 225 226 /** 227 * axienet_usec_to_timer - Calculate IRQ delay timer value 228 * @lp: Pointer to the axienet_local structure 229 * @coalesce_usec: Microseconds to convert into timer value 230 */ 231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 232 { 233 u32 result; 234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 235 236 if (lp->axi_clk) 237 clk_rate = clk_get_rate(lp->axi_clk); 238 239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 241 (u64)125000000); 242 if (result > 255) 243 result = 255; 244 245 return result; 246 } 247 248 /** 249 * axienet_dma_start - Set up DMA registers and start DMA operation 250 * @lp: Pointer to the axienet_local structure 251 */ 252 static void axienet_dma_start(struct axienet_local *lp) 253 { 254 /* Start updating the Rx channel control register */ 255 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 256 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 257 /* Only set interrupt delay timer if not generating an interrupt on 258 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 259 */ 260 if (lp->coalesce_count_rx > 1) 261 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 262 << XAXIDMA_DELAY_SHIFT) | 263 XAXIDMA_IRQ_DELAY_MASK; 264 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 265 266 /* Start updating the Tx channel control register */ 267 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 268 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 269 /* Only set interrupt delay timer if not generating an interrupt on 270 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 271 */ 272 if (lp->coalesce_count_tx > 1) 273 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 274 << XAXIDMA_DELAY_SHIFT) | 275 XAXIDMA_IRQ_DELAY_MASK; 276 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 277 278 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 279 * halted state. This will make the Rx side ready for reception. 280 */ 281 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 282 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 283 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 284 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 285 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 286 287 /* Write to the RS (Run-stop) bit in the Tx channel control register. 288 * Tx channel is now ready to run. But only after we write to the 289 * tail pointer register that the Tx channel will start transmitting. 290 */ 291 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 292 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 294 } 295 296 /** 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 298 * @ndev: Pointer to the net_device structure 299 * 300 * Return: 0, on success -ENOMEM, on failure 301 * 302 * This function is called to initialize the Rx and Tx DMA descriptor 303 * rings. This initializes the descriptors with required default values 304 * and is called when Axi Ethernet driver reset is called. 305 */ 306 static int axienet_dma_bd_init(struct net_device *ndev) 307 { 308 int i; 309 struct sk_buff *skb; 310 struct axienet_local *lp = netdev_priv(ndev); 311 312 /* Reset the indexes which are used for accessing the BDs */ 313 lp->tx_bd_ci = 0; 314 lp->tx_bd_tail = 0; 315 lp->rx_bd_ci = 0; 316 317 /* Allocate the Tx and Rx buffer descriptors. */ 318 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 319 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 320 &lp->tx_bd_p, GFP_KERNEL); 321 if (!lp->tx_bd_v) 322 return -ENOMEM; 323 324 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 325 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 326 &lp->rx_bd_p, GFP_KERNEL); 327 if (!lp->rx_bd_v) 328 goto out; 329 330 for (i = 0; i < lp->tx_bd_num; i++) { 331 dma_addr_t addr = lp->tx_bd_p + 332 sizeof(*lp->tx_bd_v) * 333 ((i + 1) % lp->tx_bd_num); 334 335 lp->tx_bd_v[i].next = lower_32_bits(addr); 336 if (lp->features & XAE_FEATURE_DMA_64BIT) 337 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 338 } 339 340 for (i = 0; i < lp->rx_bd_num; i++) { 341 dma_addr_t addr; 342 343 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 344 ((i + 1) % lp->rx_bd_num); 345 lp->rx_bd_v[i].next = lower_32_bits(addr); 346 if (lp->features & XAE_FEATURE_DMA_64BIT) 347 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 348 349 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 350 if (!skb) 351 goto out; 352 353 lp->rx_bd_v[i].skb = skb; 354 addr = dma_map_single(lp->dev, skb->data, 355 lp->max_frm_size, DMA_FROM_DEVICE); 356 if (dma_mapping_error(lp->dev, addr)) { 357 netdev_err(ndev, "DMA mapping error\n"); 358 goto out; 359 } 360 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 361 362 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 363 } 364 365 axienet_dma_start(lp); 366 367 return 0; 368 out: 369 axienet_dma_bd_release(ndev); 370 return -ENOMEM; 371 } 372 373 /** 374 * axienet_set_mac_address - Write the MAC address 375 * @ndev: Pointer to the net_device structure 376 * @address: 6 byte Address to be written as MAC address 377 * 378 * This function is called to initialize the MAC address of the Axi Ethernet 379 * core. It writes to the UAW0 and UAW1 registers of the core. 380 */ 381 static void axienet_set_mac_address(struct net_device *ndev, 382 const void *address) 383 { 384 struct axienet_local *lp = netdev_priv(ndev); 385 386 if (address) 387 eth_hw_addr_set(ndev, address); 388 if (!is_valid_ether_addr(ndev->dev_addr)) 389 eth_hw_addr_random(ndev); 390 391 /* Set up unicast MAC address filter set its mac address */ 392 axienet_iow(lp, XAE_UAW0_OFFSET, 393 (ndev->dev_addr[0]) | 394 (ndev->dev_addr[1] << 8) | 395 (ndev->dev_addr[2] << 16) | 396 (ndev->dev_addr[3] << 24)); 397 axienet_iow(lp, XAE_UAW1_OFFSET, 398 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 399 ~XAE_UAW1_UNICASTADDR_MASK) | 400 (ndev->dev_addr[4] | 401 (ndev->dev_addr[5] << 8)))); 402 } 403 404 /** 405 * netdev_set_mac_address - Write the MAC address (from outside the driver) 406 * @ndev: Pointer to the net_device structure 407 * @p: 6 byte Address to be written as MAC address 408 * 409 * Return: 0 for all conditions. Presently, there is no failure case. 410 * 411 * This function is called to initialize the MAC address of the Axi Ethernet 412 * core. It calls the core specific axienet_set_mac_address. This is the 413 * function that goes into net_device_ops structure entry ndo_set_mac_address. 414 */ 415 static int netdev_set_mac_address(struct net_device *ndev, void *p) 416 { 417 struct sockaddr *addr = p; 418 axienet_set_mac_address(ndev, addr->sa_data); 419 return 0; 420 } 421 422 /** 423 * axienet_set_multicast_list - Prepare the multicast table 424 * @ndev: Pointer to the net_device structure 425 * 426 * This function is called to initialize the multicast table during 427 * initialization. The Axi Ethernet basic multicast support has a four-entry 428 * multicast table which is initialized here. Additionally this function 429 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 430 * means whenever the multicast table entries need to be updated this 431 * function gets called. 432 */ 433 static void axienet_set_multicast_list(struct net_device *ndev) 434 { 435 int i = 0; 436 u32 reg, af0reg, af1reg; 437 struct axienet_local *lp = netdev_priv(ndev); 438 439 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 440 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 441 /* We must make the kernel realize we had to move into 442 * promiscuous mode. If it was a promiscuous mode request 443 * the flag is already set. If not we set it. 444 */ 445 ndev->flags |= IFF_PROMISC; 446 reg = axienet_ior(lp, XAE_FMI_OFFSET); 447 reg |= XAE_FMI_PM_MASK; 448 axienet_iow(lp, XAE_FMI_OFFSET, reg); 449 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 450 } else if (!netdev_mc_empty(ndev)) { 451 struct netdev_hw_addr *ha; 452 453 reg = axienet_ior(lp, XAE_FMI_OFFSET); 454 reg &= ~XAE_FMI_PM_MASK; 455 axienet_iow(lp, XAE_FMI_OFFSET, reg); 456 457 netdev_for_each_mc_addr(ha, ndev) { 458 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 459 break; 460 461 af0reg = (ha->addr[0]); 462 af0reg |= (ha->addr[1] << 8); 463 af0reg |= (ha->addr[2] << 16); 464 af0reg |= (ha->addr[3] << 24); 465 466 af1reg = (ha->addr[4]); 467 af1reg |= (ha->addr[5] << 8); 468 469 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 470 reg |= i; 471 472 axienet_iow(lp, XAE_FMI_OFFSET, reg); 473 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 474 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 475 axienet_iow(lp, XAE_FFE_OFFSET, 1); 476 i++; 477 } 478 } else { 479 reg = axienet_ior(lp, XAE_FMI_OFFSET); 480 reg &= ~XAE_FMI_PM_MASK; 481 482 axienet_iow(lp, XAE_FMI_OFFSET, reg); 483 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 484 } 485 486 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 487 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 488 reg |= i; 489 axienet_iow(lp, XAE_FMI_OFFSET, reg); 490 axienet_iow(lp, XAE_FFE_OFFSET, 0); 491 } 492 } 493 494 /** 495 * axienet_setoptions - Set an Axi Ethernet option 496 * @ndev: Pointer to the net_device structure 497 * @options: Option to be enabled/disabled 498 * 499 * The Axi Ethernet core has multiple features which can be selectively turned 500 * on or off. The typical options could be jumbo frame option, basic VLAN 501 * option, promiscuous mode option etc. This function is used to set or clear 502 * these options in the Axi Ethernet hardware. This is done through 503 * axienet_option structure . 504 */ 505 static void axienet_setoptions(struct net_device *ndev, u32 options) 506 { 507 int reg; 508 struct axienet_local *lp = netdev_priv(ndev); 509 struct axienet_option *tp = &axienet_options[0]; 510 511 while (tp->opt) { 512 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 513 if (options & tp->opt) 514 reg |= tp->m_or; 515 axienet_iow(lp, tp->reg, reg); 516 tp++; 517 } 518 519 lp->options |= options; 520 } 521 522 static int __axienet_device_reset(struct axienet_local *lp) 523 { 524 u32 value; 525 int ret; 526 527 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 528 * process of Axi DMA takes a while to complete as all pending 529 * commands/transfers will be flushed or completed during this 530 * reset process. 531 * Note that even though both TX and RX have their own reset register, 532 * they both reset the entire DMA core, so only one needs to be used. 533 */ 534 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 535 ret = read_poll_timeout(axienet_dma_in32, value, 536 !(value & XAXIDMA_CR_RESET_MASK), 537 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 538 XAXIDMA_TX_CR_OFFSET); 539 if (ret) { 540 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 541 return ret; 542 } 543 544 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 545 ret = read_poll_timeout(axienet_ior, value, 546 value & XAE_INT_PHYRSTCMPLT_MASK, 547 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 548 XAE_IS_OFFSET); 549 if (ret) { 550 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 551 return ret; 552 } 553 554 return 0; 555 } 556 557 /** 558 * axienet_dma_stop - Stop DMA operation 559 * @lp: Pointer to the axienet_local structure 560 */ 561 static void axienet_dma_stop(struct axienet_local *lp) 562 { 563 int count; 564 u32 cr, sr; 565 566 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 567 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 568 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 569 synchronize_irq(lp->rx_irq); 570 571 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 572 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 573 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 574 synchronize_irq(lp->tx_irq); 575 576 /* Give DMAs a chance to halt gracefully */ 577 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 578 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 579 msleep(20); 580 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 581 } 582 583 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 584 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 585 msleep(20); 586 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 587 } 588 589 /* Do a reset to ensure DMA is really stopped */ 590 axienet_lock_mii(lp); 591 __axienet_device_reset(lp); 592 axienet_unlock_mii(lp); 593 } 594 595 /** 596 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 597 * @ndev: Pointer to the net_device structure 598 * 599 * This function is called to reset and initialize the Axi Ethernet core. This 600 * is typically called during initialization. It does a reset of the Axi DMA 601 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 602 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 603 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 604 * core. 605 * Returns 0 on success or a negative error number otherwise. 606 */ 607 static int axienet_device_reset(struct net_device *ndev) 608 { 609 u32 axienet_status; 610 struct axienet_local *lp = netdev_priv(ndev); 611 int ret; 612 613 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 614 lp->options |= XAE_OPTION_VLAN; 615 lp->options &= (~XAE_OPTION_JUMBO); 616 617 if ((ndev->mtu > XAE_MTU) && 618 (ndev->mtu <= XAE_JUMBO_MTU)) { 619 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 620 XAE_TRL_SIZE; 621 622 if (lp->max_frm_size <= lp->rxmem) 623 lp->options |= XAE_OPTION_JUMBO; 624 } 625 626 if (!lp->use_dmaengine) { 627 ret = __axienet_device_reset(lp); 628 if (ret) 629 return ret; 630 631 ret = axienet_dma_bd_init(ndev); 632 if (ret) { 633 netdev_err(ndev, "%s: descriptor allocation failed\n", 634 __func__); 635 return ret; 636 } 637 } 638 639 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 640 axienet_status &= ~XAE_RCW1_RX_MASK; 641 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 642 643 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 644 if (axienet_status & XAE_INT_RXRJECT_MASK) 645 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 646 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 647 XAE_INT_RECV_ERROR_MASK : 0); 648 649 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 650 651 /* Sync default options with HW but leave receiver and 652 * transmitter disabled. 653 */ 654 axienet_setoptions(ndev, lp->options & 655 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 656 axienet_set_mac_address(ndev, NULL); 657 axienet_set_multicast_list(ndev); 658 axienet_setoptions(ndev, lp->options); 659 660 netif_trans_update(ndev); 661 662 return 0; 663 } 664 665 /** 666 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 667 * @lp: Pointer to the axienet_local structure 668 * @first_bd: Index of first descriptor to clean up 669 * @nr_bds: Max number of descriptors to clean up 670 * @force: Whether to clean descriptors even if not complete 671 * @sizep: Pointer to a u32 filled with the total sum of all bytes 672 * in all cleaned-up descriptors. Ignored if NULL. 673 * @budget: NAPI budget (use 0 when not called from NAPI poll) 674 * 675 * Would either be called after a successful transmit operation, or after 676 * there was an error when setting up the chain. 677 * Returns the number of descriptors handled. 678 */ 679 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 680 int nr_bds, bool force, u32 *sizep, int budget) 681 { 682 struct axidma_bd *cur_p; 683 unsigned int status; 684 dma_addr_t phys; 685 int i; 686 687 for (i = 0; i < nr_bds; i++) { 688 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 689 status = cur_p->status; 690 691 /* If force is not specified, clean up only descriptors 692 * that have been completed by the MAC. 693 */ 694 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 695 break; 696 697 /* Ensure we see complete descriptor update */ 698 dma_rmb(); 699 phys = desc_get_phys_addr(lp, cur_p); 700 dma_unmap_single(lp->dev, phys, 701 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 702 DMA_TO_DEVICE); 703 704 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 705 napi_consume_skb(cur_p->skb, budget); 706 707 cur_p->app0 = 0; 708 cur_p->app1 = 0; 709 cur_p->app2 = 0; 710 cur_p->app4 = 0; 711 cur_p->skb = NULL; 712 /* ensure our transmit path and device don't prematurely see status cleared */ 713 wmb(); 714 cur_p->cntrl = 0; 715 cur_p->status = 0; 716 717 if (sizep) 718 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 719 } 720 721 return i; 722 } 723 724 /** 725 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 726 * @lp: Pointer to the axienet_local structure 727 * @num_frag: The number of BDs to check for 728 * 729 * Return: 0, on success 730 * NETDEV_TX_BUSY, if any of the descriptors are not free 731 * 732 * This function is invoked before BDs are allocated and transmission starts. 733 * This function returns 0 if a BD or group of BDs can be allocated for 734 * transmission. If the BD or any of the BDs are not free the function 735 * returns a busy status. 736 */ 737 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 738 int num_frag) 739 { 740 struct axidma_bd *cur_p; 741 742 /* Ensure we see all descriptor updates from device or TX polling */ 743 rmb(); 744 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 745 lp->tx_bd_num]; 746 if (cur_p->cntrl) 747 return NETDEV_TX_BUSY; 748 return 0; 749 } 750 751 /** 752 * axienet_dma_tx_cb - DMA engine callback for TX channel. 753 * @data: Pointer to the axienet_local structure. 754 * @result: error reporting through dmaengine_result. 755 * This function is called by dmaengine driver for TX channel to notify 756 * that the transmit is done. 757 */ 758 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) 759 { 760 struct skbuf_dma_descriptor *skbuf_dma; 761 struct axienet_local *lp = data; 762 struct netdev_queue *txq; 763 int len; 764 765 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); 766 len = skbuf_dma->skb->len; 767 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); 768 u64_stats_update_begin(&lp->tx_stat_sync); 769 u64_stats_add(&lp->tx_bytes, len); 770 u64_stats_add(&lp->tx_packets, 1); 771 u64_stats_update_end(&lp->tx_stat_sync); 772 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); 773 dev_consume_skb_any(skbuf_dma->skb); 774 netif_txq_completed_wake(txq, 1, len, 775 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 776 2 * MAX_SKB_FRAGS); 777 } 778 779 /** 780 * axienet_start_xmit_dmaengine - Starts the transmission. 781 * @skb: sk_buff pointer that contains data to be Txed. 782 * @ndev: Pointer to net_device structure. 783 * 784 * Return: NETDEV_TX_OK on success or any non space errors. 785 * NETDEV_TX_BUSY when free element in TX skb ring buffer 786 * is not available. 787 * 788 * This function is invoked to initiate transmission. The 789 * function sets the skbs, register dma callback API and submit 790 * the dma transaction. 791 * Additionally if checksum offloading is supported, 792 * it populates AXI Stream Control fields with appropriate values. 793 */ 794 static netdev_tx_t 795 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) 796 { 797 struct dma_async_tx_descriptor *dma_tx_desc = NULL; 798 struct axienet_local *lp = netdev_priv(ndev); 799 u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; 800 struct skbuf_dma_descriptor *skbuf_dma; 801 struct dma_device *dma_dev; 802 struct netdev_queue *txq; 803 u32 csum_start_off; 804 u32 csum_index_off; 805 int sg_len; 806 int ret; 807 808 dma_dev = lp->tx_chan->device; 809 sg_len = skb_shinfo(skb)->nr_frags + 1; 810 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { 811 netif_stop_queue(ndev); 812 if (net_ratelimit()) 813 netdev_warn(ndev, "TX ring unexpectedly full\n"); 814 return NETDEV_TX_BUSY; 815 } 816 817 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); 818 if (!skbuf_dma) 819 goto xmit_error_drop_skb; 820 821 lp->tx_ring_head++; 822 sg_init_table(skbuf_dma->sgl, sg_len); 823 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); 824 if (ret < 0) 825 goto xmit_error_drop_skb; 826 827 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 828 if (!ret) 829 goto xmit_error_drop_skb; 830 831 /* Fill up app fields for checksum */ 832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 833 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 834 /* Tx Full Checksum Offload Enabled */ 835 app_metadata[0] |= 2; 836 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 837 csum_start_off = skb_transport_offset(skb); 838 csum_index_off = csum_start_off + skb->csum_offset; 839 /* Tx Partial Checksum Offload Enabled */ 840 app_metadata[0] |= 1; 841 app_metadata[1] = (csum_start_off << 16) | csum_index_off; 842 } 843 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 844 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ 845 } 846 847 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, 848 sg_len, DMA_MEM_TO_DEV, 849 DMA_PREP_INTERRUPT, (void *)app_metadata); 850 if (!dma_tx_desc) 851 goto xmit_error_unmap_sg; 852 853 skbuf_dma->skb = skb; 854 skbuf_dma->sg_len = sg_len; 855 dma_tx_desc->callback_param = lp; 856 dma_tx_desc->callback_result = axienet_dma_tx_cb; 857 dmaengine_submit(dma_tx_desc); 858 dma_async_issue_pending(lp->tx_chan); 859 txq = skb_get_tx_queue(lp->ndev, skb); 860 netdev_tx_sent_queue(txq, skb->len); 861 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 862 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); 863 864 return NETDEV_TX_OK; 865 866 xmit_error_unmap_sg: 867 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 868 xmit_error_drop_skb: 869 dev_kfree_skb_any(skb); 870 return NETDEV_TX_OK; 871 } 872 873 /** 874 * axienet_tx_poll - Invoked once a transmit is completed by the 875 * Axi DMA Tx channel. 876 * @napi: Pointer to NAPI structure. 877 * @budget: Max number of TX packets to process. 878 * 879 * Return: Number of TX packets processed. 880 * 881 * This function is invoked from the NAPI processing to notify the completion 882 * of transmit operation. It clears fields in the corresponding Tx BDs and 883 * unmaps the corresponding buffer so that CPU can regain ownership of the 884 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 885 * required. 886 */ 887 static int axienet_tx_poll(struct napi_struct *napi, int budget) 888 { 889 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 890 struct net_device *ndev = lp->ndev; 891 u32 size = 0; 892 int packets; 893 894 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); 895 896 if (packets) { 897 lp->tx_bd_ci += packets; 898 if (lp->tx_bd_ci >= lp->tx_bd_num) 899 lp->tx_bd_ci %= lp->tx_bd_num; 900 901 u64_stats_update_begin(&lp->tx_stat_sync); 902 u64_stats_add(&lp->tx_packets, packets); 903 u64_stats_add(&lp->tx_bytes, size); 904 u64_stats_update_end(&lp->tx_stat_sync); 905 906 /* Matches barrier in axienet_start_xmit */ 907 smp_mb(); 908 909 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 910 netif_wake_queue(ndev); 911 } 912 913 if (packets < budget && napi_complete_done(napi, packets)) { 914 /* Re-enable TX completion interrupts. This should 915 * cause an immediate interrupt if any TX packets are 916 * already pending. 917 */ 918 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 919 } 920 return packets; 921 } 922 923 /** 924 * axienet_start_xmit - Starts the transmission. 925 * @skb: sk_buff pointer that contains data to be Txed. 926 * @ndev: Pointer to net_device structure. 927 * 928 * Return: NETDEV_TX_OK, on success 929 * NETDEV_TX_BUSY, if any of the descriptors are not free 930 * 931 * This function is invoked from upper layers to initiate transmission. The 932 * function uses the next available free BDs and populates their fields to 933 * start the transmission. Additionally if checksum offloading is supported, 934 * it populates AXI Stream Control fields with appropriate values. 935 */ 936 static netdev_tx_t 937 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 938 { 939 u32 ii; 940 u32 num_frag; 941 u32 csum_start_off; 942 u32 csum_index_off; 943 skb_frag_t *frag; 944 dma_addr_t tail_p, phys; 945 u32 orig_tail_ptr, new_tail_ptr; 946 struct axienet_local *lp = netdev_priv(ndev); 947 struct axidma_bd *cur_p; 948 949 orig_tail_ptr = lp->tx_bd_tail; 950 new_tail_ptr = orig_tail_ptr; 951 952 num_frag = skb_shinfo(skb)->nr_frags; 953 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 954 955 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 956 /* Should not happen as last start_xmit call should have 957 * checked for sufficient space and queue should only be 958 * woken when sufficient space is available. 959 */ 960 netif_stop_queue(ndev); 961 if (net_ratelimit()) 962 netdev_warn(ndev, "TX ring unexpectedly full\n"); 963 return NETDEV_TX_BUSY; 964 } 965 966 if (skb->ip_summed == CHECKSUM_PARTIAL) { 967 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 968 /* Tx Full Checksum Offload Enabled */ 969 cur_p->app0 |= 2; 970 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 971 csum_start_off = skb_transport_offset(skb); 972 csum_index_off = csum_start_off + skb->csum_offset; 973 /* Tx Partial Checksum Offload Enabled */ 974 cur_p->app0 |= 1; 975 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 976 } 977 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 978 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 979 } 980 981 phys = dma_map_single(lp->dev, skb->data, 982 skb_headlen(skb), DMA_TO_DEVICE); 983 if (unlikely(dma_mapping_error(lp->dev, phys))) { 984 if (net_ratelimit()) 985 netdev_err(ndev, "TX DMA mapping error\n"); 986 ndev->stats.tx_dropped++; 987 return NETDEV_TX_OK; 988 } 989 desc_set_phys_addr(lp, phys, cur_p); 990 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 991 992 for (ii = 0; ii < num_frag; ii++) { 993 if (++new_tail_ptr >= lp->tx_bd_num) 994 new_tail_ptr = 0; 995 cur_p = &lp->tx_bd_v[new_tail_ptr]; 996 frag = &skb_shinfo(skb)->frags[ii]; 997 phys = dma_map_single(lp->dev, 998 skb_frag_address(frag), 999 skb_frag_size(frag), 1000 DMA_TO_DEVICE); 1001 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1002 if (net_ratelimit()) 1003 netdev_err(ndev, "TX DMA mapping error\n"); 1004 ndev->stats.tx_dropped++; 1005 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 1006 true, NULL, 0); 1007 return NETDEV_TX_OK; 1008 } 1009 desc_set_phys_addr(lp, phys, cur_p); 1010 cur_p->cntrl = skb_frag_size(frag); 1011 } 1012 1013 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 1014 cur_p->skb = skb; 1015 1016 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 1017 if (++new_tail_ptr >= lp->tx_bd_num) 1018 new_tail_ptr = 0; 1019 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 1020 1021 /* Start the transfer */ 1022 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 1023 1024 /* Stop queue if next transmit may not have space */ 1025 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 1026 netif_stop_queue(ndev); 1027 1028 /* Matches barrier in axienet_tx_poll */ 1029 smp_mb(); 1030 1031 /* Space might have just been freed - check again */ 1032 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1033 netif_wake_queue(ndev); 1034 } 1035 1036 return NETDEV_TX_OK; 1037 } 1038 1039 /** 1040 * axienet_dma_rx_cb - DMA engine callback for RX channel. 1041 * @data: Pointer to the skbuf_dma_descriptor structure. 1042 * @result: error reporting through dmaengine_result. 1043 * This function is called by dmaengine driver for RX channel to notify 1044 * that the packet is received. 1045 */ 1046 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) 1047 { 1048 struct skbuf_dma_descriptor *skbuf_dma; 1049 size_t meta_len, meta_max_len, rx_len; 1050 struct axienet_local *lp = data; 1051 struct sk_buff *skb; 1052 u32 *app_metadata; 1053 1054 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1055 skb = skbuf_dma->skb; 1056 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, 1057 &meta_max_len); 1058 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, 1059 DMA_FROM_DEVICE); 1060 /* TODO: Derive app word index programmatically */ 1061 rx_len = (app_metadata[LEN_APP] & 0xFFFF); 1062 skb_put(skb, rx_len); 1063 skb->protocol = eth_type_trans(skb, lp->ndev); 1064 skb->ip_summed = CHECKSUM_NONE; 1065 1066 __netif_rx(skb); 1067 u64_stats_update_begin(&lp->rx_stat_sync); 1068 u64_stats_add(&lp->rx_packets, 1); 1069 u64_stats_add(&lp->rx_bytes, rx_len); 1070 u64_stats_update_end(&lp->rx_stat_sync); 1071 axienet_rx_submit_desc(lp->ndev); 1072 dma_async_issue_pending(lp->rx_chan); 1073 } 1074 1075 /** 1076 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 1077 * @napi: Pointer to NAPI structure. 1078 * @budget: Max number of RX packets to process. 1079 * 1080 * Return: Number of RX packets processed. 1081 */ 1082 static int axienet_rx_poll(struct napi_struct *napi, int budget) 1083 { 1084 u32 length; 1085 u32 csumstatus; 1086 u32 size = 0; 1087 int packets = 0; 1088 dma_addr_t tail_p = 0; 1089 struct axidma_bd *cur_p; 1090 struct sk_buff *skb, *new_skb; 1091 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 1092 1093 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1094 1095 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 1096 dma_addr_t phys; 1097 1098 /* Ensure we see complete descriptor update */ 1099 dma_rmb(); 1100 1101 skb = cur_p->skb; 1102 cur_p->skb = NULL; 1103 1104 /* skb could be NULL if a previous pass already received the 1105 * packet for this slot in the ring, but failed to refill it 1106 * with a newly allocated buffer. In this case, don't try to 1107 * receive it again. 1108 */ 1109 if (likely(skb)) { 1110 length = cur_p->app4 & 0x0000FFFF; 1111 1112 phys = desc_get_phys_addr(lp, cur_p); 1113 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 1114 DMA_FROM_DEVICE); 1115 1116 skb_put(skb, length); 1117 skb->protocol = eth_type_trans(skb, lp->ndev); 1118 /*skb_checksum_none_assert(skb);*/ 1119 skb->ip_summed = CHECKSUM_NONE; 1120 1121 /* if we're doing Rx csum offload, set it up */ 1122 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 1123 csumstatus = (cur_p->app2 & 1124 XAE_FULL_CSUM_STATUS_MASK) >> 3; 1125 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 1126 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 1127 skb->ip_summed = CHECKSUM_UNNECESSARY; 1128 } 1129 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 1130 skb->protocol == htons(ETH_P_IP) && 1131 skb->len > 64) { 1132 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 1133 skb->ip_summed = CHECKSUM_COMPLETE; 1134 } 1135 1136 napi_gro_receive(napi, skb); 1137 1138 size += length; 1139 packets++; 1140 } 1141 1142 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 1143 if (!new_skb) 1144 break; 1145 1146 phys = dma_map_single(lp->dev, new_skb->data, 1147 lp->max_frm_size, 1148 DMA_FROM_DEVICE); 1149 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1150 if (net_ratelimit()) 1151 netdev_err(lp->ndev, "RX DMA mapping error\n"); 1152 dev_kfree_skb(new_skb); 1153 break; 1154 } 1155 desc_set_phys_addr(lp, phys, cur_p); 1156 1157 cur_p->cntrl = lp->max_frm_size; 1158 cur_p->status = 0; 1159 cur_p->skb = new_skb; 1160 1161 /* Only update tail_p to mark this slot as usable after it has 1162 * been successfully refilled. 1163 */ 1164 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1165 1166 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1167 lp->rx_bd_ci = 0; 1168 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1169 } 1170 1171 u64_stats_update_begin(&lp->rx_stat_sync); 1172 u64_stats_add(&lp->rx_packets, packets); 1173 u64_stats_add(&lp->rx_bytes, size); 1174 u64_stats_update_end(&lp->rx_stat_sync); 1175 1176 if (tail_p) 1177 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1178 1179 if (packets < budget && napi_complete_done(napi, packets)) { 1180 /* Re-enable RX completion interrupts. This should 1181 * cause an immediate interrupt if any RX packets are 1182 * already pending. 1183 */ 1184 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1185 } 1186 return packets; 1187 } 1188 1189 /** 1190 * axienet_tx_irq - Tx Done Isr. 1191 * @irq: irq number 1192 * @_ndev: net_device pointer 1193 * 1194 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1195 * 1196 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1197 * TX BD processing. 1198 */ 1199 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1200 { 1201 unsigned int status; 1202 struct net_device *ndev = _ndev; 1203 struct axienet_local *lp = netdev_priv(ndev); 1204 1205 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1206 1207 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1208 return IRQ_NONE; 1209 1210 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1211 1212 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1213 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1214 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1215 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1216 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1217 schedule_work(&lp->dma_err_task); 1218 } else { 1219 /* Disable further TX completion interrupts and schedule 1220 * NAPI to handle the completions. 1221 */ 1222 u32 cr = lp->tx_dma_cr; 1223 1224 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1225 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1226 1227 napi_schedule(&lp->napi_tx); 1228 } 1229 1230 return IRQ_HANDLED; 1231 } 1232 1233 /** 1234 * axienet_rx_irq - Rx Isr. 1235 * @irq: irq number 1236 * @_ndev: net_device pointer 1237 * 1238 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1239 * 1240 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1241 * processing. 1242 */ 1243 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1244 { 1245 unsigned int status; 1246 struct net_device *ndev = _ndev; 1247 struct axienet_local *lp = netdev_priv(ndev); 1248 1249 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1250 1251 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1252 return IRQ_NONE; 1253 1254 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1255 1256 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1257 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1258 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1259 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1260 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1261 schedule_work(&lp->dma_err_task); 1262 } else { 1263 /* Disable further RX completion interrupts and schedule 1264 * NAPI receive. 1265 */ 1266 u32 cr = lp->rx_dma_cr; 1267 1268 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1269 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1270 1271 napi_schedule(&lp->napi_rx); 1272 } 1273 1274 return IRQ_HANDLED; 1275 } 1276 1277 /** 1278 * axienet_eth_irq - Ethernet core Isr. 1279 * @irq: irq number 1280 * @_ndev: net_device pointer 1281 * 1282 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1283 * 1284 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1285 */ 1286 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1287 { 1288 struct net_device *ndev = _ndev; 1289 struct axienet_local *lp = netdev_priv(ndev); 1290 unsigned int pending; 1291 1292 pending = axienet_ior(lp, XAE_IP_OFFSET); 1293 if (!pending) 1294 return IRQ_NONE; 1295 1296 if (pending & XAE_INT_RXFIFOOVR_MASK) 1297 ndev->stats.rx_missed_errors++; 1298 1299 if (pending & XAE_INT_RXRJECT_MASK) 1300 ndev->stats.rx_frame_errors++; 1301 1302 axienet_iow(lp, XAE_IS_OFFSET, pending); 1303 return IRQ_HANDLED; 1304 } 1305 1306 static void axienet_dma_err_handler(struct work_struct *work); 1307 1308 /** 1309 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. 1310 * allocate skbuff, map the scatterlist and obtain a descriptor 1311 * and then add the callback information and submit descriptor. 1312 * 1313 * @ndev: net_device pointer 1314 * 1315 */ 1316 static void axienet_rx_submit_desc(struct net_device *ndev) 1317 { 1318 struct dma_async_tx_descriptor *dma_rx_desc = NULL; 1319 struct axienet_local *lp = netdev_priv(ndev); 1320 struct skbuf_dma_descriptor *skbuf_dma; 1321 struct sk_buff *skb; 1322 dma_addr_t addr; 1323 1324 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); 1325 if (!skbuf_dma) 1326 return; 1327 1328 lp->rx_ring_head++; 1329 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1330 if (!skb) 1331 return; 1332 1333 sg_init_table(skbuf_dma->sgl, 1); 1334 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); 1335 if (unlikely(dma_mapping_error(lp->dev, addr))) { 1336 if (net_ratelimit()) 1337 netdev_err(ndev, "DMA mapping error\n"); 1338 goto rx_submit_err_free_skb; 1339 } 1340 sg_dma_address(skbuf_dma->sgl) = addr; 1341 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; 1342 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, 1343 1, DMA_DEV_TO_MEM, 1344 DMA_PREP_INTERRUPT); 1345 if (!dma_rx_desc) 1346 goto rx_submit_err_unmap_skb; 1347 1348 skbuf_dma->skb = skb; 1349 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); 1350 skbuf_dma->desc = dma_rx_desc; 1351 dma_rx_desc->callback_param = lp; 1352 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1353 dmaengine_submit(dma_rx_desc); 1354 1355 return; 1356 1357 rx_submit_err_unmap_skb: 1358 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); 1359 rx_submit_err_free_skb: 1360 dev_kfree_skb(skb); 1361 } 1362 1363 /** 1364 * axienet_init_dmaengine - init the dmaengine code. 1365 * @ndev: Pointer to net_device structure 1366 * 1367 * Return: 0, on success. 1368 * non-zero error value on failure 1369 * 1370 * This is the dmaengine initialization code. 1371 */ 1372 static int axienet_init_dmaengine(struct net_device *ndev) 1373 { 1374 struct axienet_local *lp = netdev_priv(ndev); 1375 struct skbuf_dma_descriptor *skbuf_dma; 1376 int i, ret; 1377 1378 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 1379 if (IS_ERR(lp->tx_chan)) { 1380 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); 1381 return PTR_ERR(lp->tx_chan); 1382 } 1383 1384 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); 1385 if (IS_ERR(lp->rx_chan)) { 1386 ret = PTR_ERR(lp->rx_chan); 1387 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); 1388 goto err_dma_release_tx; 1389 } 1390 1391 lp->tx_ring_tail = 0; 1392 lp->tx_ring_head = 0; 1393 lp->rx_ring_tail = 0; 1394 lp->rx_ring_head = 0; 1395 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), 1396 GFP_KERNEL); 1397 if (!lp->tx_skb_ring) { 1398 ret = -ENOMEM; 1399 goto err_dma_release_rx; 1400 } 1401 for (i = 0; i < TX_BD_NUM_MAX; i++) { 1402 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1403 if (!skbuf_dma) { 1404 ret = -ENOMEM; 1405 goto err_free_tx_skb_ring; 1406 } 1407 lp->tx_skb_ring[i] = skbuf_dma; 1408 } 1409 1410 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), 1411 GFP_KERNEL); 1412 if (!lp->rx_skb_ring) { 1413 ret = -ENOMEM; 1414 goto err_free_tx_skb_ring; 1415 } 1416 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { 1417 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1418 if (!skbuf_dma) { 1419 ret = -ENOMEM; 1420 goto err_free_rx_skb_ring; 1421 } 1422 lp->rx_skb_ring[i] = skbuf_dma; 1423 } 1424 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ 1425 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1426 axienet_rx_submit_desc(ndev); 1427 dma_async_issue_pending(lp->rx_chan); 1428 1429 return 0; 1430 1431 err_free_rx_skb_ring: 1432 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1433 kfree(lp->rx_skb_ring[i]); 1434 kfree(lp->rx_skb_ring); 1435 err_free_tx_skb_ring: 1436 for (i = 0; i < TX_BD_NUM_MAX; i++) 1437 kfree(lp->tx_skb_ring[i]); 1438 kfree(lp->tx_skb_ring); 1439 err_dma_release_rx: 1440 dma_release_channel(lp->rx_chan); 1441 err_dma_release_tx: 1442 dma_release_channel(lp->tx_chan); 1443 return ret; 1444 } 1445 1446 /** 1447 * axienet_init_legacy_dma - init the dma legacy code. 1448 * @ndev: Pointer to net_device structure 1449 * 1450 * Return: 0, on success. 1451 * non-zero error value on failure 1452 * 1453 * This is the dma initialization code. It also allocates interrupt 1454 * service routines, enables the interrupt lines and ISR handling. 1455 * 1456 */ 1457 static int axienet_init_legacy_dma(struct net_device *ndev) 1458 { 1459 int ret; 1460 struct axienet_local *lp = netdev_priv(ndev); 1461 1462 /* Enable worker thread for Axi DMA error handling */ 1463 lp->stopping = false; 1464 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1465 1466 napi_enable(&lp->napi_rx); 1467 napi_enable(&lp->napi_tx); 1468 1469 /* Enable interrupts for Axi DMA Tx */ 1470 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1471 ndev->name, ndev); 1472 if (ret) 1473 goto err_tx_irq; 1474 /* Enable interrupts for Axi DMA Rx */ 1475 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1476 ndev->name, ndev); 1477 if (ret) 1478 goto err_rx_irq; 1479 /* Enable interrupts for Axi Ethernet core (if defined) */ 1480 if (lp->eth_irq > 0) { 1481 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1482 ndev->name, ndev); 1483 if (ret) 1484 goto err_eth_irq; 1485 } 1486 1487 return 0; 1488 1489 err_eth_irq: 1490 free_irq(lp->rx_irq, ndev); 1491 err_rx_irq: 1492 free_irq(lp->tx_irq, ndev); 1493 err_tx_irq: 1494 napi_disable(&lp->napi_tx); 1495 napi_disable(&lp->napi_rx); 1496 cancel_work_sync(&lp->dma_err_task); 1497 dev_err(lp->dev, "request_irq() failed\n"); 1498 return ret; 1499 } 1500 1501 /** 1502 * axienet_open - Driver open routine. 1503 * @ndev: Pointer to net_device structure 1504 * 1505 * Return: 0, on success. 1506 * non-zero error value on failure 1507 * 1508 * This is the driver open routine. It calls phylink_start to start the 1509 * PHY device. 1510 * It also allocates interrupt service routines, enables the interrupt lines 1511 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1512 * descriptors are initialized. 1513 */ 1514 static int axienet_open(struct net_device *ndev) 1515 { 1516 int ret; 1517 struct axienet_local *lp = netdev_priv(ndev); 1518 1519 dev_dbg(&ndev->dev, "%s\n", __func__); 1520 1521 /* When we do an Axi Ethernet reset, it resets the complete core 1522 * including the MDIO. MDIO must be disabled before resetting. 1523 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1524 */ 1525 axienet_lock_mii(lp); 1526 ret = axienet_device_reset(ndev); 1527 axienet_unlock_mii(lp); 1528 1529 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1530 if (ret) { 1531 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1532 return ret; 1533 } 1534 1535 phylink_start(lp->phylink); 1536 1537 if (lp->use_dmaengine) { 1538 /* Enable interrupts for Axi Ethernet core (if defined) */ 1539 if (lp->eth_irq > 0) { 1540 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1541 ndev->name, ndev); 1542 if (ret) 1543 goto err_phy; 1544 } 1545 1546 ret = axienet_init_dmaengine(ndev); 1547 if (ret < 0) 1548 goto err_free_eth_irq; 1549 } else { 1550 ret = axienet_init_legacy_dma(ndev); 1551 if (ret) 1552 goto err_phy; 1553 } 1554 1555 return 0; 1556 1557 err_free_eth_irq: 1558 if (lp->eth_irq > 0) 1559 free_irq(lp->eth_irq, ndev); 1560 err_phy: 1561 phylink_stop(lp->phylink); 1562 phylink_disconnect_phy(lp->phylink); 1563 return ret; 1564 } 1565 1566 /** 1567 * axienet_stop - Driver stop routine. 1568 * @ndev: Pointer to net_device structure 1569 * 1570 * Return: 0, on success. 1571 * 1572 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1573 * device. It also removes the interrupt handlers and disables the interrupts. 1574 * The Axi DMA Tx/Rx BDs are released. 1575 */ 1576 static int axienet_stop(struct net_device *ndev) 1577 { 1578 struct axienet_local *lp = netdev_priv(ndev); 1579 int i; 1580 1581 dev_dbg(&ndev->dev, "axienet_close()\n"); 1582 1583 if (!lp->use_dmaengine) { 1584 WRITE_ONCE(lp->stopping, true); 1585 flush_work(&lp->dma_err_task); 1586 1587 napi_disable(&lp->napi_tx); 1588 napi_disable(&lp->napi_rx); 1589 } 1590 1591 phylink_stop(lp->phylink); 1592 phylink_disconnect_phy(lp->phylink); 1593 1594 axienet_setoptions(ndev, lp->options & 1595 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1596 1597 if (!lp->use_dmaengine) { 1598 axienet_dma_stop(lp); 1599 cancel_work_sync(&lp->dma_err_task); 1600 free_irq(lp->tx_irq, ndev); 1601 free_irq(lp->rx_irq, ndev); 1602 axienet_dma_bd_release(ndev); 1603 } else { 1604 dmaengine_terminate_sync(lp->tx_chan); 1605 dmaengine_synchronize(lp->tx_chan); 1606 dmaengine_terminate_sync(lp->rx_chan); 1607 dmaengine_synchronize(lp->rx_chan); 1608 1609 for (i = 0; i < TX_BD_NUM_MAX; i++) 1610 kfree(lp->tx_skb_ring[i]); 1611 kfree(lp->tx_skb_ring); 1612 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1613 kfree(lp->rx_skb_ring[i]); 1614 kfree(lp->rx_skb_ring); 1615 1616 dma_release_channel(lp->rx_chan); 1617 dma_release_channel(lp->tx_chan); 1618 } 1619 1620 axienet_iow(lp, XAE_IE_OFFSET, 0); 1621 1622 if (lp->eth_irq > 0) 1623 free_irq(lp->eth_irq, ndev); 1624 return 0; 1625 } 1626 1627 /** 1628 * axienet_change_mtu - Driver change mtu routine. 1629 * @ndev: Pointer to net_device structure 1630 * @new_mtu: New mtu value to be applied 1631 * 1632 * Return: Always returns 0 (success). 1633 * 1634 * This is the change mtu driver routine. It checks if the Axi Ethernet 1635 * hardware supports jumbo frames before changing the mtu. This can be 1636 * called only when the device is not up. 1637 */ 1638 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1639 { 1640 struct axienet_local *lp = netdev_priv(ndev); 1641 1642 if (netif_running(ndev)) 1643 return -EBUSY; 1644 1645 if ((new_mtu + VLAN_ETH_HLEN + 1646 XAE_TRL_SIZE) > lp->rxmem) 1647 return -EINVAL; 1648 1649 WRITE_ONCE(ndev->mtu, new_mtu); 1650 1651 return 0; 1652 } 1653 1654 #ifdef CONFIG_NET_POLL_CONTROLLER 1655 /** 1656 * axienet_poll_controller - Axi Ethernet poll mechanism. 1657 * @ndev: Pointer to net_device structure 1658 * 1659 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1660 * to polling the ISRs and are enabled back after the polling is done. 1661 */ 1662 static void axienet_poll_controller(struct net_device *ndev) 1663 { 1664 struct axienet_local *lp = netdev_priv(ndev); 1665 disable_irq(lp->tx_irq); 1666 disable_irq(lp->rx_irq); 1667 axienet_rx_irq(lp->tx_irq, ndev); 1668 axienet_tx_irq(lp->rx_irq, ndev); 1669 enable_irq(lp->tx_irq); 1670 enable_irq(lp->rx_irq); 1671 } 1672 #endif 1673 1674 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1675 { 1676 struct axienet_local *lp = netdev_priv(dev); 1677 1678 if (!netif_running(dev)) 1679 return -EINVAL; 1680 1681 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1682 } 1683 1684 static void 1685 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1686 { 1687 struct axienet_local *lp = netdev_priv(dev); 1688 unsigned int start; 1689 1690 netdev_stats_to_stats64(stats, &dev->stats); 1691 1692 do { 1693 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1694 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1695 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1696 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1697 1698 do { 1699 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1700 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1701 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1702 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1703 } 1704 1705 static const struct net_device_ops axienet_netdev_ops = { 1706 .ndo_open = axienet_open, 1707 .ndo_stop = axienet_stop, 1708 .ndo_start_xmit = axienet_start_xmit, 1709 .ndo_get_stats64 = axienet_get_stats64, 1710 .ndo_change_mtu = axienet_change_mtu, 1711 .ndo_set_mac_address = netdev_set_mac_address, 1712 .ndo_validate_addr = eth_validate_addr, 1713 .ndo_eth_ioctl = axienet_ioctl, 1714 .ndo_set_rx_mode = axienet_set_multicast_list, 1715 #ifdef CONFIG_NET_POLL_CONTROLLER 1716 .ndo_poll_controller = axienet_poll_controller, 1717 #endif 1718 }; 1719 1720 static const struct net_device_ops axienet_netdev_dmaengine_ops = { 1721 .ndo_open = axienet_open, 1722 .ndo_stop = axienet_stop, 1723 .ndo_start_xmit = axienet_start_xmit_dmaengine, 1724 .ndo_get_stats64 = axienet_get_stats64, 1725 .ndo_change_mtu = axienet_change_mtu, 1726 .ndo_set_mac_address = netdev_set_mac_address, 1727 .ndo_validate_addr = eth_validate_addr, 1728 .ndo_eth_ioctl = axienet_ioctl, 1729 .ndo_set_rx_mode = axienet_set_multicast_list, 1730 }; 1731 1732 /** 1733 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1734 * @ndev: Pointer to net_device structure 1735 * @ed: Pointer to ethtool_drvinfo structure 1736 * 1737 * This implements ethtool command for getting the driver information. 1738 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1739 */ 1740 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1741 struct ethtool_drvinfo *ed) 1742 { 1743 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1744 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1745 } 1746 1747 /** 1748 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1749 * AxiEthernet core. 1750 * @ndev: Pointer to net_device structure 1751 * 1752 * This implements ethtool command for getting the total register length 1753 * information. 1754 * 1755 * Return: the total regs length 1756 */ 1757 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1758 { 1759 return sizeof(u32) * AXIENET_REGS_N; 1760 } 1761 1762 /** 1763 * axienet_ethtools_get_regs - Dump the contents of all registers present 1764 * in AxiEthernet core. 1765 * @ndev: Pointer to net_device structure 1766 * @regs: Pointer to ethtool_regs structure 1767 * @ret: Void pointer used to return the contents of the registers. 1768 * 1769 * This implements ethtool command for getting the Axi Ethernet register dump. 1770 * Issue "ethtool -d ethX" to execute this function. 1771 */ 1772 static void axienet_ethtools_get_regs(struct net_device *ndev, 1773 struct ethtool_regs *regs, void *ret) 1774 { 1775 u32 *data = (u32 *)ret; 1776 size_t len = sizeof(u32) * AXIENET_REGS_N; 1777 struct axienet_local *lp = netdev_priv(ndev); 1778 1779 regs->version = 0; 1780 regs->len = len; 1781 1782 memset(data, 0, len); 1783 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1784 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1785 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1786 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1787 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1788 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1789 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1790 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1791 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1792 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1793 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1794 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1795 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1796 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1797 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1798 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1799 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1800 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1801 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1802 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1803 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1804 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1805 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1806 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1807 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1808 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1809 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1810 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1811 if (!lp->use_dmaengine) { 1812 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1813 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1814 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1815 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1816 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1817 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1818 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1819 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1820 } 1821 } 1822 1823 static void 1824 axienet_ethtools_get_ringparam(struct net_device *ndev, 1825 struct ethtool_ringparam *ering, 1826 struct kernel_ethtool_ringparam *kernel_ering, 1827 struct netlink_ext_ack *extack) 1828 { 1829 struct axienet_local *lp = netdev_priv(ndev); 1830 1831 ering->rx_max_pending = RX_BD_NUM_MAX; 1832 ering->rx_mini_max_pending = 0; 1833 ering->rx_jumbo_max_pending = 0; 1834 ering->tx_max_pending = TX_BD_NUM_MAX; 1835 ering->rx_pending = lp->rx_bd_num; 1836 ering->rx_mini_pending = 0; 1837 ering->rx_jumbo_pending = 0; 1838 ering->tx_pending = lp->tx_bd_num; 1839 } 1840 1841 static int 1842 axienet_ethtools_set_ringparam(struct net_device *ndev, 1843 struct ethtool_ringparam *ering, 1844 struct kernel_ethtool_ringparam *kernel_ering, 1845 struct netlink_ext_ack *extack) 1846 { 1847 struct axienet_local *lp = netdev_priv(ndev); 1848 1849 if (ering->rx_pending > RX_BD_NUM_MAX || 1850 ering->rx_mini_pending || 1851 ering->rx_jumbo_pending || 1852 ering->tx_pending < TX_BD_NUM_MIN || 1853 ering->tx_pending > TX_BD_NUM_MAX) 1854 return -EINVAL; 1855 1856 if (netif_running(ndev)) 1857 return -EBUSY; 1858 1859 lp->rx_bd_num = ering->rx_pending; 1860 lp->tx_bd_num = ering->tx_pending; 1861 return 0; 1862 } 1863 1864 /** 1865 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1866 * Tx and Rx paths. 1867 * @ndev: Pointer to net_device structure 1868 * @epauseparm: Pointer to ethtool_pauseparam structure. 1869 * 1870 * This implements ethtool command for getting axi ethernet pause frame 1871 * setting. Issue "ethtool -a ethX" to execute this function. 1872 */ 1873 static void 1874 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1875 struct ethtool_pauseparam *epauseparm) 1876 { 1877 struct axienet_local *lp = netdev_priv(ndev); 1878 1879 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1880 } 1881 1882 /** 1883 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1884 * settings. 1885 * @ndev: Pointer to net_device structure 1886 * @epauseparm:Pointer to ethtool_pauseparam structure 1887 * 1888 * This implements ethtool command for enabling flow control on Rx and Tx 1889 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1890 * function. 1891 * 1892 * Return: 0 on success, -EFAULT if device is running 1893 */ 1894 static int 1895 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1896 struct ethtool_pauseparam *epauseparm) 1897 { 1898 struct axienet_local *lp = netdev_priv(ndev); 1899 1900 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1901 } 1902 1903 /** 1904 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1905 * @ndev: Pointer to net_device structure 1906 * @ecoalesce: Pointer to ethtool_coalesce structure 1907 * @kernel_coal: ethtool CQE mode setting structure 1908 * @extack: extack for reporting error messages 1909 * 1910 * This implements ethtool command for getting the DMA interrupt coalescing 1911 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1912 * execute this function. 1913 * 1914 * Return: 0 always 1915 */ 1916 static int 1917 axienet_ethtools_get_coalesce(struct net_device *ndev, 1918 struct ethtool_coalesce *ecoalesce, 1919 struct kernel_ethtool_coalesce *kernel_coal, 1920 struct netlink_ext_ack *extack) 1921 { 1922 struct axienet_local *lp = netdev_priv(ndev); 1923 1924 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1925 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1926 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1927 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1928 return 0; 1929 } 1930 1931 /** 1932 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1933 * @ndev: Pointer to net_device structure 1934 * @ecoalesce: Pointer to ethtool_coalesce structure 1935 * @kernel_coal: ethtool CQE mode setting structure 1936 * @extack: extack for reporting error messages 1937 * 1938 * This implements ethtool command for setting the DMA interrupt coalescing 1939 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1940 * prompt to execute this function. 1941 * 1942 * Return: 0, on success, Non-zero error value on failure. 1943 */ 1944 static int 1945 axienet_ethtools_set_coalesce(struct net_device *ndev, 1946 struct ethtool_coalesce *ecoalesce, 1947 struct kernel_ethtool_coalesce *kernel_coal, 1948 struct netlink_ext_ack *extack) 1949 { 1950 struct axienet_local *lp = netdev_priv(ndev); 1951 1952 if (netif_running(ndev)) { 1953 NL_SET_ERR_MSG(extack, 1954 "Please stop netif before applying configuration"); 1955 return -EBUSY; 1956 } 1957 1958 if (ecoalesce->rx_max_coalesced_frames) 1959 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1960 if (ecoalesce->rx_coalesce_usecs) 1961 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1962 if (ecoalesce->tx_max_coalesced_frames) 1963 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1964 if (ecoalesce->tx_coalesce_usecs) 1965 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1966 1967 return 0; 1968 } 1969 1970 static int 1971 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1972 struct ethtool_link_ksettings *cmd) 1973 { 1974 struct axienet_local *lp = netdev_priv(ndev); 1975 1976 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1977 } 1978 1979 static int 1980 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1981 const struct ethtool_link_ksettings *cmd) 1982 { 1983 struct axienet_local *lp = netdev_priv(ndev); 1984 1985 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1986 } 1987 1988 static int axienet_ethtools_nway_reset(struct net_device *dev) 1989 { 1990 struct axienet_local *lp = netdev_priv(dev); 1991 1992 return phylink_ethtool_nway_reset(lp->phylink); 1993 } 1994 1995 static const struct ethtool_ops axienet_ethtool_ops = { 1996 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1997 ETHTOOL_COALESCE_USECS, 1998 .get_drvinfo = axienet_ethtools_get_drvinfo, 1999 .get_regs_len = axienet_ethtools_get_regs_len, 2000 .get_regs = axienet_ethtools_get_regs, 2001 .get_link = ethtool_op_get_link, 2002 .get_ringparam = axienet_ethtools_get_ringparam, 2003 .set_ringparam = axienet_ethtools_set_ringparam, 2004 .get_pauseparam = axienet_ethtools_get_pauseparam, 2005 .set_pauseparam = axienet_ethtools_set_pauseparam, 2006 .get_coalesce = axienet_ethtools_get_coalesce, 2007 .set_coalesce = axienet_ethtools_set_coalesce, 2008 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 2009 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 2010 .nway_reset = axienet_ethtools_nway_reset, 2011 }; 2012 2013 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 2014 { 2015 return container_of(pcs, struct axienet_local, pcs); 2016 } 2017 2018 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 2019 struct phylink_link_state *state) 2020 { 2021 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2022 2023 phylink_mii_c22_pcs_get_state(pcs_phy, state); 2024 } 2025 2026 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 2027 { 2028 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2029 2030 phylink_mii_c22_pcs_an_restart(pcs_phy); 2031 } 2032 2033 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2034 phy_interface_t interface, 2035 const unsigned long *advertising, 2036 bool permit_pause_to_mac) 2037 { 2038 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2039 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 2040 struct axienet_local *lp = netdev_priv(ndev); 2041 int ret; 2042 2043 if (lp->switch_x_sgmii) { 2044 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 2045 interface == PHY_INTERFACE_MODE_SGMII ? 2046 XLNX_MII_STD_SELECT_SGMII : 0); 2047 if (ret < 0) { 2048 netdev_warn(ndev, 2049 "Failed to switch PHY interface: %d\n", 2050 ret); 2051 return ret; 2052 } 2053 } 2054 2055 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 2056 neg_mode); 2057 if (ret < 0) 2058 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 2059 2060 return ret; 2061 } 2062 2063 static const struct phylink_pcs_ops axienet_pcs_ops = { 2064 .pcs_get_state = axienet_pcs_get_state, 2065 .pcs_config = axienet_pcs_config, 2066 .pcs_an_restart = axienet_pcs_an_restart, 2067 }; 2068 2069 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 2070 phy_interface_t interface) 2071 { 2072 struct net_device *ndev = to_net_dev(config->dev); 2073 struct axienet_local *lp = netdev_priv(ndev); 2074 2075 if (interface == PHY_INTERFACE_MODE_1000BASEX || 2076 interface == PHY_INTERFACE_MODE_SGMII) 2077 return &lp->pcs; 2078 2079 return NULL; 2080 } 2081 2082 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 2083 const struct phylink_link_state *state) 2084 { 2085 /* nothing meaningful to do */ 2086 } 2087 2088 static void axienet_mac_link_down(struct phylink_config *config, 2089 unsigned int mode, 2090 phy_interface_t interface) 2091 { 2092 /* nothing meaningful to do */ 2093 } 2094 2095 static void axienet_mac_link_up(struct phylink_config *config, 2096 struct phy_device *phy, 2097 unsigned int mode, phy_interface_t interface, 2098 int speed, int duplex, 2099 bool tx_pause, bool rx_pause) 2100 { 2101 struct net_device *ndev = to_net_dev(config->dev); 2102 struct axienet_local *lp = netdev_priv(ndev); 2103 u32 emmc_reg, fcc_reg; 2104 2105 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 2106 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 2107 2108 switch (speed) { 2109 case SPEED_1000: 2110 emmc_reg |= XAE_EMMC_LINKSPD_1000; 2111 break; 2112 case SPEED_100: 2113 emmc_reg |= XAE_EMMC_LINKSPD_100; 2114 break; 2115 case SPEED_10: 2116 emmc_reg |= XAE_EMMC_LINKSPD_10; 2117 break; 2118 default: 2119 dev_err(&ndev->dev, 2120 "Speed other than 10, 100 or 1Gbps is not supported\n"); 2121 break; 2122 } 2123 2124 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 2125 2126 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 2127 if (tx_pause) 2128 fcc_reg |= XAE_FCC_FCTX_MASK; 2129 else 2130 fcc_reg &= ~XAE_FCC_FCTX_MASK; 2131 if (rx_pause) 2132 fcc_reg |= XAE_FCC_FCRX_MASK; 2133 else 2134 fcc_reg &= ~XAE_FCC_FCRX_MASK; 2135 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 2136 } 2137 2138 static const struct phylink_mac_ops axienet_phylink_ops = { 2139 .mac_select_pcs = axienet_mac_select_pcs, 2140 .mac_config = axienet_mac_config, 2141 .mac_link_down = axienet_mac_link_down, 2142 .mac_link_up = axienet_mac_link_up, 2143 }; 2144 2145 /** 2146 * axienet_dma_err_handler - Work queue task for Axi DMA Error 2147 * @work: pointer to work_struct 2148 * 2149 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 2150 * Tx/Rx BDs. 2151 */ 2152 static void axienet_dma_err_handler(struct work_struct *work) 2153 { 2154 u32 i; 2155 u32 axienet_status; 2156 struct axidma_bd *cur_p; 2157 struct axienet_local *lp = container_of(work, struct axienet_local, 2158 dma_err_task); 2159 struct net_device *ndev = lp->ndev; 2160 2161 /* Don't bother if we are going to stop anyway */ 2162 if (READ_ONCE(lp->stopping)) 2163 return; 2164 2165 napi_disable(&lp->napi_tx); 2166 napi_disable(&lp->napi_rx); 2167 2168 axienet_setoptions(ndev, lp->options & 2169 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2170 2171 axienet_dma_stop(lp); 2172 2173 for (i = 0; i < lp->tx_bd_num; i++) { 2174 cur_p = &lp->tx_bd_v[i]; 2175 if (cur_p->cntrl) { 2176 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 2177 2178 dma_unmap_single(lp->dev, addr, 2179 (cur_p->cntrl & 2180 XAXIDMA_BD_CTRL_LENGTH_MASK), 2181 DMA_TO_DEVICE); 2182 } 2183 if (cur_p->skb) 2184 dev_kfree_skb_irq(cur_p->skb); 2185 cur_p->phys = 0; 2186 cur_p->phys_msb = 0; 2187 cur_p->cntrl = 0; 2188 cur_p->status = 0; 2189 cur_p->app0 = 0; 2190 cur_p->app1 = 0; 2191 cur_p->app2 = 0; 2192 cur_p->app3 = 0; 2193 cur_p->app4 = 0; 2194 cur_p->skb = NULL; 2195 } 2196 2197 for (i = 0; i < lp->rx_bd_num; i++) { 2198 cur_p = &lp->rx_bd_v[i]; 2199 cur_p->status = 0; 2200 cur_p->app0 = 0; 2201 cur_p->app1 = 0; 2202 cur_p->app2 = 0; 2203 cur_p->app3 = 0; 2204 cur_p->app4 = 0; 2205 } 2206 2207 lp->tx_bd_ci = 0; 2208 lp->tx_bd_tail = 0; 2209 lp->rx_bd_ci = 0; 2210 2211 axienet_dma_start(lp); 2212 2213 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 2214 axienet_status &= ~XAE_RCW1_RX_MASK; 2215 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 2216 2217 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 2218 if (axienet_status & XAE_INT_RXRJECT_MASK) 2219 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 2220 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 2221 XAE_INT_RECV_ERROR_MASK : 0); 2222 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 2223 2224 /* Sync default options with HW but leave receiver and 2225 * transmitter disabled. 2226 */ 2227 axienet_setoptions(ndev, lp->options & 2228 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2229 axienet_set_mac_address(ndev, NULL); 2230 axienet_set_multicast_list(ndev); 2231 napi_enable(&lp->napi_rx); 2232 napi_enable(&lp->napi_tx); 2233 axienet_setoptions(ndev, lp->options); 2234 } 2235 2236 /** 2237 * axienet_probe - Axi Ethernet probe function. 2238 * @pdev: Pointer to platform device structure. 2239 * 2240 * Return: 0, on success 2241 * Non-zero error value on failure. 2242 * 2243 * This is the probe routine for Axi Ethernet driver. This is called before 2244 * any other driver routines are invoked. It allocates and sets up the Ethernet 2245 * device. Parses through device tree and populates fields of 2246 * axienet_local. It registers the Ethernet device. 2247 */ 2248 static int axienet_probe(struct platform_device *pdev) 2249 { 2250 int ret; 2251 struct device_node *np; 2252 struct axienet_local *lp; 2253 struct net_device *ndev; 2254 struct resource *ethres; 2255 u8 mac_addr[ETH_ALEN]; 2256 int addr_width = 32; 2257 u32 value; 2258 2259 ndev = alloc_etherdev(sizeof(*lp)); 2260 if (!ndev) 2261 return -ENOMEM; 2262 2263 platform_set_drvdata(pdev, ndev); 2264 2265 SET_NETDEV_DEV(ndev, &pdev->dev); 2266 ndev->features = NETIF_F_SG; 2267 ndev->ethtool_ops = &axienet_ethtool_ops; 2268 2269 /* MTU range: 64 - 9000 */ 2270 ndev->min_mtu = 64; 2271 ndev->max_mtu = XAE_JUMBO_MTU; 2272 2273 lp = netdev_priv(ndev); 2274 lp->ndev = ndev; 2275 lp->dev = &pdev->dev; 2276 lp->options = XAE_OPTION_DEFAULTS; 2277 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 2278 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 2279 2280 u64_stats_init(&lp->rx_stat_sync); 2281 u64_stats_init(&lp->tx_stat_sync); 2282 2283 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2284 if (!lp->axi_clk) { 2285 /* For backward compatibility, if named AXI clock is not present, 2286 * treat the first clock specified as the AXI clock. 2287 */ 2288 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2289 } 2290 if (IS_ERR(lp->axi_clk)) { 2291 ret = PTR_ERR(lp->axi_clk); 2292 goto free_netdev; 2293 } 2294 ret = clk_prepare_enable(lp->axi_clk); 2295 if (ret) { 2296 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2297 goto free_netdev; 2298 } 2299 2300 lp->misc_clks[0].id = "axis_clk"; 2301 lp->misc_clks[1].id = "ref_clk"; 2302 lp->misc_clks[2].id = "mgt_clk"; 2303 2304 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2305 if (ret) 2306 goto cleanup_clk; 2307 2308 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2309 if (ret) 2310 goto cleanup_clk; 2311 2312 /* Map device registers */ 2313 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 2314 if (IS_ERR(lp->regs)) { 2315 ret = PTR_ERR(lp->regs); 2316 goto cleanup_clk; 2317 } 2318 lp->regs_start = ethres->start; 2319 2320 /* Setup checksum offload, but default to off if not specified */ 2321 lp->features = 0; 2322 2323 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 2324 if (!ret) { 2325 switch (value) { 2326 case 1: 2327 lp->csum_offload_on_tx_path = 2328 XAE_FEATURE_PARTIAL_TX_CSUM; 2329 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 2330 /* Can checksum TCP/UDP over IPv4. */ 2331 ndev->features |= NETIF_F_IP_CSUM; 2332 break; 2333 case 2: 2334 lp->csum_offload_on_tx_path = 2335 XAE_FEATURE_FULL_TX_CSUM; 2336 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 2337 /* Can checksum TCP/UDP over IPv4. */ 2338 ndev->features |= NETIF_F_IP_CSUM; 2339 break; 2340 default: 2341 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 2342 } 2343 } 2344 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 2345 if (!ret) { 2346 switch (value) { 2347 case 1: 2348 lp->csum_offload_on_rx_path = 2349 XAE_FEATURE_PARTIAL_RX_CSUM; 2350 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 2351 break; 2352 case 2: 2353 lp->csum_offload_on_rx_path = 2354 XAE_FEATURE_FULL_RX_CSUM; 2355 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 2356 break; 2357 default: 2358 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 2359 } 2360 } 2361 /* For supporting jumbo frames, the Axi Ethernet hardware must have 2362 * a larger Rx/Tx Memory. Typically, the size must be large so that 2363 * we can enable jumbo option and start supporting jumbo frames. 2364 * Here we check for memory allocated for Rx/Tx in the hardware from 2365 * the device-tree and accordingly set flags. 2366 */ 2367 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 2368 2369 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 2370 "xlnx,switch-x-sgmii"); 2371 2372 /* Start with the proprietary, and broken phy_type */ 2373 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 2374 if (!ret) { 2375 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 2376 switch (value) { 2377 case XAE_PHY_TYPE_MII: 2378 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2379 break; 2380 case XAE_PHY_TYPE_GMII: 2381 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2382 break; 2383 case XAE_PHY_TYPE_RGMII_2_0: 2384 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2385 break; 2386 case XAE_PHY_TYPE_SGMII: 2387 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2388 break; 2389 case XAE_PHY_TYPE_1000BASE_X: 2390 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2391 break; 2392 default: 2393 ret = -EINVAL; 2394 goto cleanup_clk; 2395 } 2396 } else { 2397 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2398 if (ret) 2399 goto cleanup_clk; 2400 } 2401 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2402 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2403 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2404 ret = -EINVAL; 2405 goto cleanup_clk; 2406 } 2407 2408 if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) { 2409 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2410 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2411 2412 if (np) { 2413 struct resource dmares; 2414 2415 ret = of_address_to_resource(np, 0, &dmares); 2416 if (ret) { 2417 dev_err(&pdev->dev, 2418 "unable to get DMA resource\n"); 2419 of_node_put(np); 2420 goto cleanup_clk; 2421 } 2422 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2423 &dmares); 2424 lp->rx_irq = irq_of_parse_and_map(np, 1); 2425 lp->tx_irq = irq_of_parse_and_map(np, 0); 2426 of_node_put(np); 2427 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2428 } else { 2429 /* Check for these resources directly on the Ethernet node. */ 2430 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2431 lp->rx_irq = platform_get_irq(pdev, 1); 2432 lp->tx_irq = platform_get_irq(pdev, 0); 2433 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2434 } 2435 if (IS_ERR(lp->dma_regs)) { 2436 dev_err(&pdev->dev, "could not map DMA regs\n"); 2437 ret = PTR_ERR(lp->dma_regs); 2438 goto cleanup_clk; 2439 } 2440 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2441 dev_err(&pdev->dev, "could not determine irqs\n"); 2442 ret = -ENOMEM; 2443 goto cleanup_clk; 2444 } 2445 2446 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2447 ret = __axienet_device_reset(lp); 2448 if (ret) 2449 goto cleanup_clk; 2450 2451 /* Autodetect the need for 64-bit DMA pointers. 2452 * When the IP is configured for a bus width bigger than 32 bits, 2453 * writing the MSB registers is mandatory, even if they are all 0. 2454 * We can detect this case by writing all 1's to one such register 2455 * and see if that sticks: when the IP is configured for 32 bits 2456 * only, those registers are RES0. 2457 * Those MSB registers were introduced in IP v7.1, which we check first. 2458 */ 2459 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2460 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2461 2462 iowrite32(0x0, desc); 2463 if (ioread32(desc) == 0) { /* sanity check */ 2464 iowrite32(0xffffffff, desc); 2465 if (ioread32(desc) > 0) { 2466 lp->features |= XAE_FEATURE_DMA_64BIT; 2467 addr_width = 64; 2468 dev_info(&pdev->dev, 2469 "autodetected 64-bit DMA range\n"); 2470 } 2471 iowrite32(0x0, desc); 2472 } 2473 } 2474 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2475 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2476 ret = -EINVAL; 2477 goto cleanup_clk; 2478 } 2479 2480 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2481 if (ret) { 2482 dev_err(&pdev->dev, "No suitable DMA available\n"); 2483 goto cleanup_clk; 2484 } 2485 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 2486 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 2487 } else { 2488 struct xilinx_vdma_config cfg; 2489 struct dma_chan *tx_chan; 2490 2491 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2492 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 2493 ret = lp->eth_irq; 2494 goto cleanup_clk; 2495 } 2496 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 2497 if (IS_ERR(tx_chan)) { 2498 ret = PTR_ERR(tx_chan); 2499 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 2500 goto cleanup_clk; 2501 } 2502 2503 cfg.reset = 1; 2504 /* As name says VDMA but it has support for DMA channel reset */ 2505 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); 2506 if (ret < 0) { 2507 dev_err(&pdev->dev, "Reset channel failed\n"); 2508 dma_release_channel(tx_chan); 2509 goto cleanup_clk; 2510 } 2511 2512 dma_release_channel(tx_chan); 2513 lp->use_dmaengine = 1; 2514 } 2515 2516 if (lp->use_dmaengine) 2517 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; 2518 else 2519 ndev->netdev_ops = &axienet_netdev_ops; 2520 /* Check for Ethernet core IRQ (optional) */ 2521 if (lp->eth_irq <= 0) 2522 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2523 2524 /* Retrieve the MAC address */ 2525 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2526 if (!ret) { 2527 axienet_set_mac_address(ndev, mac_addr); 2528 } else { 2529 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2530 ret); 2531 axienet_set_mac_address(ndev, NULL); 2532 } 2533 2534 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2535 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2536 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2537 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2538 2539 ret = axienet_mdio_setup(lp); 2540 if (ret) 2541 dev_warn(&pdev->dev, 2542 "error registering MDIO bus: %d\n", ret); 2543 2544 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2545 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2546 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2547 if (!np) { 2548 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2549 * Falling back to "phy-handle" here is only for 2550 * backward compatibility with old device trees. 2551 */ 2552 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2553 } 2554 if (!np) { 2555 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2556 ret = -EINVAL; 2557 goto cleanup_mdio; 2558 } 2559 lp->pcs_phy = of_mdio_find_device(np); 2560 if (!lp->pcs_phy) { 2561 ret = -EPROBE_DEFER; 2562 of_node_put(np); 2563 goto cleanup_mdio; 2564 } 2565 of_node_put(np); 2566 lp->pcs.ops = &axienet_pcs_ops; 2567 lp->pcs.neg_mode = true; 2568 lp->pcs.poll = true; 2569 } 2570 2571 lp->phylink_config.dev = &ndev->dev; 2572 lp->phylink_config.type = PHYLINK_NETDEV; 2573 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2574 MAC_10FD | MAC_100FD | MAC_1000FD; 2575 2576 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2577 if (lp->switch_x_sgmii) { 2578 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2579 lp->phylink_config.supported_interfaces); 2580 __set_bit(PHY_INTERFACE_MODE_SGMII, 2581 lp->phylink_config.supported_interfaces); 2582 } 2583 2584 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2585 lp->phy_mode, 2586 &axienet_phylink_ops); 2587 if (IS_ERR(lp->phylink)) { 2588 ret = PTR_ERR(lp->phylink); 2589 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2590 goto cleanup_mdio; 2591 } 2592 2593 ret = register_netdev(lp->ndev); 2594 if (ret) { 2595 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2596 goto cleanup_phylink; 2597 } 2598 2599 return 0; 2600 2601 cleanup_phylink: 2602 phylink_destroy(lp->phylink); 2603 2604 cleanup_mdio: 2605 if (lp->pcs_phy) 2606 put_device(&lp->pcs_phy->dev); 2607 if (lp->mii_bus) 2608 axienet_mdio_teardown(lp); 2609 cleanup_clk: 2610 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2611 clk_disable_unprepare(lp->axi_clk); 2612 2613 free_netdev: 2614 free_netdev(ndev); 2615 2616 return ret; 2617 } 2618 2619 static void axienet_remove(struct platform_device *pdev) 2620 { 2621 struct net_device *ndev = platform_get_drvdata(pdev); 2622 struct axienet_local *lp = netdev_priv(ndev); 2623 2624 unregister_netdev(ndev); 2625 2626 if (lp->phylink) 2627 phylink_destroy(lp->phylink); 2628 2629 if (lp->pcs_phy) 2630 put_device(&lp->pcs_phy->dev); 2631 2632 axienet_mdio_teardown(lp); 2633 2634 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2635 clk_disable_unprepare(lp->axi_clk); 2636 2637 free_netdev(ndev); 2638 } 2639 2640 static void axienet_shutdown(struct platform_device *pdev) 2641 { 2642 struct net_device *ndev = platform_get_drvdata(pdev); 2643 2644 rtnl_lock(); 2645 netif_device_detach(ndev); 2646 2647 if (netif_running(ndev)) 2648 dev_close(ndev); 2649 2650 rtnl_unlock(); 2651 } 2652 2653 static int axienet_suspend(struct device *dev) 2654 { 2655 struct net_device *ndev = dev_get_drvdata(dev); 2656 2657 if (!netif_running(ndev)) 2658 return 0; 2659 2660 netif_device_detach(ndev); 2661 2662 rtnl_lock(); 2663 axienet_stop(ndev); 2664 rtnl_unlock(); 2665 2666 return 0; 2667 } 2668 2669 static int axienet_resume(struct device *dev) 2670 { 2671 struct net_device *ndev = dev_get_drvdata(dev); 2672 2673 if (!netif_running(ndev)) 2674 return 0; 2675 2676 rtnl_lock(); 2677 axienet_open(ndev); 2678 rtnl_unlock(); 2679 2680 netif_device_attach(ndev); 2681 2682 return 0; 2683 } 2684 2685 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 2686 axienet_suspend, axienet_resume); 2687 2688 static struct platform_driver axienet_driver = { 2689 .probe = axienet_probe, 2690 .remove_new = axienet_remove, 2691 .shutdown = axienet_shutdown, 2692 .driver = { 2693 .name = "xilinx_axienet", 2694 .pm = &axienet_pm_ops, 2695 .of_match_table = axienet_of_match, 2696 }, 2697 }; 2698 2699 module_platform_driver(axienet_driver); 2700 2701 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2702 MODULE_AUTHOR("Xilinx"); 2703 MODULE_LICENSE("GPL"); 2704