1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 #include <linux/dmaengine.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dma/xilinx_dma.h> 44 #include <linux/circ_buf.h> 45 #include <net/netdev_queues.h> 46 47 #include "xilinx_axienet.h" 48 49 /* Descriptors defines for Tx and Rx DMA */ 50 #define TX_BD_NUM_DEFAULT 128 51 #define RX_BD_NUM_DEFAULT 1024 52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 53 #define TX_BD_NUM_MAX 4096 54 #define RX_BD_NUM_MAX 4096 55 #define DMA_NUM_APP_WORDS 5 56 #define LEN_APP 4 57 #define RX_BUF_NUM_DEFAULT 128 58 59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 60 #define DRIVER_NAME "xaxienet" 61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 62 #define DRIVER_VERSION "1.00a" 63 64 #define AXIENET_REGS_N 40 65 66 static void axienet_rx_submit_desc(struct net_device *ndev); 67 68 /* Match table for of_platform binding */ 69 static const struct of_device_id axienet_of_match[] = { 70 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 71 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 72 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 73 {}, 74 }; 75 76 MODULE_DEVICE_TABLE(of, axienet_of_match); 77 78 /* Option table for setting up Axi Ethernet hardware options */ 79 static struct axienet_option axienet_options[] = { 80 /* Turn on jumbo packet support for both Rx and Tx */ 81 { 82 .opt = XAE_OPTION_JUMBO, 83 .reg = XAE_TC_OFFSET, 84 .m_or = XAE_TC_JUM_MASK, 85 }, { 86 .opt = XAE_OPTION_JUMBO, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_JUM_MASK, 89 }, { /* Turn on VLAN packet support for both Rx and Tx */ 90 .opt = XAE_OPTION_VLAN, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_VLAN_MASK, 93 }, { 94 .opt = XAE_OPTION_VLAN, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_VLAN_MASK, 97 }, { /* Turn on FCS stripping on receive packets */ 98 .opt = XAE_OPTION_FCS_STRIP, 99 .reg = XAE_RCW1_OFFSET, 100 .m_or = XAE_RCW1_FCS_MASK, 101 }, { /* Turn on FCS insertion on transmit packets */ 102 .opt = XAE_OPTION_FCS_INSERT, 103 .reg = XAE_TC_OFFSET, 104 .m_or = XAE_TC_FCS_MASK, 105 }, { /* Turn off length/type field checking on receive packets */ 106 .opt = XAE_OPTION_LENTYPE_ERR, 107 .reg = XAE_RCW1_OFFSET, 108 .m_or = XAE_RCW1_LT_DIS_MASK, 109 }, { /* Turn on Rx flow control */ 110 .opt = XAE_OPTION_FLOW_CONTROL, 111 .reg = XAE_FCC_OFFSET, 112 .m_or = XAE_FCC_FCRX_MASK, 113 }, { /* Turn on Tx flow control */ 114 .opt = XAE_OPTION_FLOW_CONTROL, 115 .reg = XAE_FCC_OFFSET, 116 .m_or = XAE_FCC_FCTX_MASK, 117 }, { /* Turn on promiscuous frame filtering */ 118 .opt = XAE_OPTION_PROMISC, 119 .reg = XAE_FMI_OFFSET, 120 .m_or = XAE_FMI_PM_MASK, 121 }, { /* Enable transmitter */ 122 .opt = XAE_OPTION_TXEN, 123 .reg = XAE_TC_OFFSET, 124 .m_or = XAE_TC_TX_MASK, 125 }, { /* Enable receiver */ 126 .opt = XAE_OPTION_RXEN, 127 .reg = XAE_RCW1_OFFSET, 128 .m_or = XAE_RCW1_RX_MASK, 129 }, 130 {} 131 }; 132 133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) 134 { 135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; 136 } 137 138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) 139 { 140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; 141 } 142 143 /** 144 * axienet_dma_in32 - Memory mapped Axi DMA register read 145 * @lp: Pointer to axienet local structure 146 * @reg: Address offset from the base address of the Axi DMA core 147 * 148 * Return: The contents of the Axi DMA register 149 * 150 * This function returns the contents of the corresponding Axi DMA register. 151 */ 152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 153 { 154 return ioread32(lp->dma_regs + reg); 155 } 156 157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 158 struct axidma_bd *desc) 159 { 160 desc->phys = lower_32_bits(addr); 161 if (lp->features & XAE_FEATURE_DMA_64BIT) 162 desc->phys_msb = upper_32_bits(addr); 163 } 164 165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 166 struct axidma_bd *desc) 167 { 168 dma_addr_t ret = desc->phys; 169 170 if (lp->features & XAE_FEATURE_DMA_64BIT) 171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 172 173 return ret; 174 } 175 176 /** 177 * axienet_dma_bd_release - Release buffer descriptor rings 178 * @ndev: Pointer to the net_device structure 179 * 180 * This function is used to release the descriptors allocated in 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 182 * driver stop api is called. 183 */ 184 static void axienet_dma_bd_release(struct net_device *ndev) 185 { 186 int i; 187 struct axienet_local *lp = netdev_priv(ndev); 188 189 /* If we end up here, tx_bd_v must have been DMA allocated. */ 190 dma_free_coherent(lp->dev, 191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 192 lp->tx_bd_v, 193 lp->tx_bd_p); 194 195 if (!lp->rx_bd_v) 196 return; 197 198 for (i = 0; i < lp->rx_bd_num; i++) { 199 dma_addr_t phys; 200 201 /* A NULL skb means this descriptor has not been initialised 202 * at all. 203 */ 204 if (!lp->rx_bd_v[i].skb) 205 break; 206 207 dev_kfree_skb(lp->rx_bd_v[i].skb); 208 209 /* For each descriptor, we programmed cntrl with the (non-zero) 210 * descriptor size, after it had been successfully allocated. 211 * So a non-zero value in there means we need to unmap it. 212 */ 213 if (lp->rx_bd_v[i].cntrl) { 214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 215 dma_unmap_single(lp->dev, phys, 216 lp->max_frm_size, DMA_FROM_DEVICE); 217 } 218 } 219 220 dma_free_coherent(lp->dev, 221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 222 lp->rx_bd_v, 223 lp->rx_bd_p); 224 } 225 226 /** 227 * axienet_usec_to_timer - Calculate IRQ delay timer value 228 * @lp: Pointer to the axienet_local structure 229 * @coalesce_usec: Microseconds to convert into timer value 230 */ 231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 232 { 233 u32 result; 234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 235 236 if (lp->axi_clk) 237 clk_rate = clk_get_rate(lp->axi_clk); 238 239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 241 (u64)125000000); 242 if (result > 255) 243 result = 255; 244 245 return result; 246 } 247 248 /** 249 * axienet_dma_start - Set up DMA registers and start DMA operation 250 * @lp: Pointer to the axienet_local structure 251 */ 252 static void axienet_dma_start(struct axienet_local *lp) 253 { 254 /* Start updating the Rx channel control register */ 255 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 256 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 257 /* Only set interrupt delay timer if not generating an interrupt on 258 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 259 */ 260 if (lp->coalesce_count_rx > 1) 261 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 262 << XAXIDMA_DELAY_SHIFT) | 263 XAXIDMA_IRQ_DELAY_MASK; 264 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 265 266 /* Start updating the Tx channel control register */ 267 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 268 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 269 /* Only set interrupt delay timer if not generating an interrupt on 270 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 271 */ 272 if (lp->coalesce_count_tx > 1) 273 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 274 << XAXIDMA_DELAY_SHIFT) | 275 XAXIDMA_IRQ_DELAY_MASK; 276 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 277 278 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 279 * halted state. This will make the Rx side ready for reception. 280 */ 281 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 282 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 283 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 284 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 285 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 286 287 /* Write to the RS (Run-stop) bit in the Tx channel control register. 288 * Tx channel is now ready to run. But only after we write to the 289 * tail pointer register that the Tx channel will start transmitting. 290 */ 291 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 292 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 294 } 295 296 /** 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 298 * @ndev: Pointer to the net_device structure 299 * 300 * Return: 0, on success -ENOMEM, on failure 301 * 302 * This function is called to initialize the Rx and Tx DMA descriptor 303 * rings. This initializes the descriptors with required default values 304 * and is called when Axi Ethernet driver reset is called. 305 */ 306 static int axienet_dma_bd_init(struct net_device *ndev) 307 { 308 int i; 309 struct sk_buff *skb; 310 struct axienet_local *lp = netdev_priv(ndev); 311 312 /* Reset the indexes which are used for accessing the BDs */ 313 lp->tx_bd_ci = 0; 314 lp->tx_bd_tail = 0; 315 lp->rx_bd_ci = 0; 316 317 /* Allocate the Tx and Rx buffer descriptors. */ 318 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 319 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 320 &lp->tx_bd_p, GFP_KERNEL); 321 if (!lp->tx_bd_v) 322 return -ENOMEM; 323 324 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 325 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 326 &lp->rx_bd_p, GFP_KERNEL); 327 if (!lp->rx_bd_v) 328 goto out; 329 330 for (i = 0; i < lp->tx_bd_num; i++) { 331 dma_addr_t addr = lp->tx_bd_p + 332 sizeof(*lp->tx_bd_v) * 333 ((i + 1) % lp->tx_bd_num); 334 335 lp->tx_bd_v[i].next = lower_32_bits(addr); 336 if (lp->features & XAE_FEATURE_DMA_64BIT) 337 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 338 } 339 340 for (i = 0; i < lp->rx_bd_num; i++) { 341 dma_addr_t addr; 342 343 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 344 ((i + 1) % lp->rx_bd_num); 345 lp->rx_bd_v[i].next = lower_32_bits(addr); 346 if (lp->features & XAE_FEATURE_DMA_64BIT) 347 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 348 349 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 350 if (!skb) 351 goto out; 352 353 lp->rx_bd_v[i].skb = skb; 354 addr = dma_map_single(lp->dev, skb->data, 355 lp->max_frm_size, DMA_FROM_DEVICE); 356 if (dma_mapping_error(lp->dev, addr)) { 357 netdev_err(ndev, "DMA mapping error\n"); 358 goto out; 359 } 360 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 361 362 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 363 } 364 365 axienet_dma_start(lp); 366 367 return 0; 368 out: 369 axienet_dma_bd_release(ndev); 370 return -ENOMEM; 371 } 372 373 /** 374 * axienet_set_mac_address - Write the MAC address 375 * @ndev: Pointer to the net_device structure 376 * @address: 6 byte Address to be written as MAC address 377 * 378 * This function is called to initialize the MAC address of the Axi Ethernet 379 * core. It writes to the UAW0 and UAW1 registers of the core. 380 */ 381 static void axienet_set_mac_address(struct net_device *ndev, 382 const void *address) 383 { 384 struct axienet_local *lp = netdev_priv(ndev); 385 386 if (address) 387 eth_hw_addr_set(ndev, address); 388 if (!is_valid_ether_addr(ndev->dev_addr)) 389 eth_hw_addr_random(ndev); 390 391 /* Set up unicast MAC address filter set its mac address */ 392 axienet_iow(lp, XAE_UAW0_OFFSET, 393 (ndev->dev_addr[0]) | 394 (ndev->dev_addr[1] << 8) | 395 (ndev->dev_addr[2] << 16) | 396 (ndev->dev_addr[3] << 24)); 397 axienet_iow(lp, XAE_UAW1_OFFSET, 398 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 399 ~XAE_UAW1_UNICASTADDR_MASK) | 400 (ndev->dev_addr[4] | 401 (ndev->dev_addr[5] << 8)))); 402 } 403 404 /** 405 * netdev_set_mac_address - Write the MAC address (from outside the driver) 406 * @ndev: Pointer to the net_device structure 407 * @p: 6 byte Address to be written as MAC address 408 * 409 * Return: 0 for all conditions. Presently, there is no failure case. 410 * 411 * This function is called to initialize the MAC address of the Axi Ethernet 412 * core. It calls the core specific axienet_set_mac_address. This is the 413 * function that goes into net_device_ops structure entry ndo_set_mac_address. 414 */ 415 static int netdev_set_mac_address(struct net_device *ndev, void *p) 416 { 417 struct sockaddr *addr = p; 418 axienet_set_mac_address(ndev, addr->sa_data); 419 return 0; 420 } 421 422 /** 423 * axienet_set_multicast_list - Prepare the multicast table 424 * @ndev: Pointer to the net_device structure 425 * 426 * This function is called to initialize the multicast table during 427 * initialization. The Axi Ethernet basic multicast support has a four-entry 428 * multicast table which is initialized here. Additionally this function 429 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 430 * means whenever the multicast table entries need to be updated this 431 * function gets called. 432 */ 433 static void axienet_set_multicast_list(struct net_device *ndev) 434 { 435 int i = 0; 436 u32 reg, af0reg, af1reg; 437 struct axienet_local *lp = netdev_priv(ndev); 438 439 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 440 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 441 /* We must make the kernel realize we had to move into 442 * promiscuous mode. If it was a promiscuous mode request 443 * the flag is already set. If not we set it. 444 */ 445 ndev->flags |= IFF_PROMISC; 446 reg = axienet_ior(lp, XAE_FMI_OFFSET); 447 reg |= XAE_FMI_PM_MASK; 448 axienet_iow(lp, XAE_FMI_OFFSET, reg); 449 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 450 } else if (!netdev_mc_empty(ndev)) { 451 struct netdev_hw_addr *ha; 452 453 reg = axienet_ior(lp, XAE_FMI_OFFSET); 454 reg &= ~XAE_FMI_PM_MASK; 455 axienet_iow(lp, XAE_FMI_OFFSET, reg); 456 457 netdev_for_each_mc_addr(ha, ndev) { 458 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 459 break; 460 461 af0reg = (ha->addr[0]); 462 af0reg |= (ha->addr[1] << 8); 463 af0reg |= (ha->addr[2] << 16); 464 af0reg |= (ha->addr[3] << 24); 465 466 af1reg = (ha->addr[4]); 467 af1reg |= (ha->addr[5] << 8); 468 469 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 470 reg |= i; 471 472 axienet_iow(lp, XAE_FMI_OFFSET, reg); 473 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 474 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 475 axienet_iow(lp, XAE_FFE_OFFSET, 1); 476 i++; 477 } 478 } else { 479 reg = axienet_ior(lp, XAE_FMI_OFFSET); 480 reg &= ~XAE_FMI_PM_MASK; 481 482 axienet_iow(lp, XAE_FMI_OFFSET, reg); 483 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 484 } 485 486 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 487 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 488 reg |= i; 489 axienet_iow(lp, XAE_FMI_OFFSET, reg); 490 axienet_iow(lp, XAE_FFE_OFFSET, 0); 491 } 492 } 493 494 /** 495 * axienet_setoptions - Set an Axi Ethernet option 496 * @ndev: Pointer to the net_device structure 497 * @options: Option to be enabled/disabled 498 * 499 * The Axi Ethernet core has multiple features which can be selectively turned 500 * on or off. The typical options could be jumbo frame option, basic VLAN 501 * option, promiscuous mode option etc. This function is used to set or clear 502 * these options in the Axi Ethernet hardware. This is done through 503 * axienet_option structure . 504 */ 505 static void axienet_setoptions(struct net_device *ndev, u32 options) 506 { 507 int reg; 508 struct axienet_local *lp = netdev_priv(ndev); 509 struct axienet_option *tp = &axienet_options[0]; 510 511 while (tp->opt) { 512 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 513 if (options & tp->opt) 514 reg |= tp->m_or; 515 axienet_iow(lp, tp->reg, reg); 516 tp++; 517 } 518 519 lp->options |= options; 520 } 521 522 static int __axienet_device_reset(struct axienet_local *lp) 523 { 524 u32 value; 525 int ret; 526 527 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 528 * process of Axi DMA takes a while to complete as all pending 529 * commands/transfers will be flushed or completed during this 530 * reset process. 531 * Note that even though both TX and RX have their own reset register, 532 * they both reset the entire DMA core, so only one needs to be used. 533 */ 534 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 535 ret = read_poll_timeout(axienet_dma_in32, value, 536 !(value & XAXIDMA_CR_RESET_MASK), 537 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 538 XAXIDMA_TX_CR_OFFSET); 539 if (ret) { 540 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 541 return ret; 542 } 543 544 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 545 ret = read_poll_timeout(axienet_ior, value, 546 value & XAE_INT_PHYRSTCMPLT_MASK, 547 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 548 XAE_IS_OFFSET); 549 if (ret) { 550 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 551 return ret; 552 } 553 554 return 0; 555 } 556 557 /** 558 * axienet_dma_stop - Stop DMA operation 559 * @lp: Pointer to the axienet_local structure 560 */ 561 static void axienet_dma_stop(struct axienet_local *lp) 562 { 563 int count; 564 u32 cr, sr; 565 566 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 567 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 568 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 569 synchronize_irq(lp->rx_irq); 570 571 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 572 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 573 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 574 synchronize_irq(lp->tx_irq); 575 576 /* Give DMAs a chance to halt gracefully */ 577 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 578 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 579 msleep(20); 580 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 581 } 582 583 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 584 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 585 msleep(20); 586 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 587 } 588 589 /* Do a reset to ensure DMA is really stopped */ 590 axienet_lock_mii(lp); 591 __axienet_device_reset(lp); 592 axienet_unlock_mii(lp); 593 } 594 595 /** 596 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 597 * @ndev: Pointer to the net_device structure 598 * 599 * This function is called to reset and initialize the Axi Ethernet core. This 600 * is typically called during initialization. It does a reset of the Axi DMA 601 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 602 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 603 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 604 * core. 605 * Returns 0 on success or a negative error number otherwise. 606 */ 607 static int axienet_device_reset(struct net_device *ndev) 608 { 609 u32 axienet_status; 610 struct axienet_local *lp = netdev_priv(ndev); 611 int ret; 612 613 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 614 lp->options |= XAE_OPTION_VLAN; 615 lp->options &= (~XAE_OPTION_JUMBO); 616 617 if ((ndev->mtu > XAE_MTU) && 618 (ndev->mtu <= XAE_JUMBO_MTU)) { 619 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 620 XAE_TRL_SIZE; 621 622 if (lp->max_frm_size <= lp->rxmem) 623 lp->options |= XAE_OPTION_JUMBO; 624 } 625 626 if (!lp->use_dmaengine) { 627 ret = __axienet_device_reset(lp); 628 if (ret) 629 return ret; 630 631 ret = axienet_dma_bd_init(ndev); 632 if (ret) { 633 netdev_err(ndev, "%s: descriptor allocation failed\n", 634 __func__); 635 return ret; 636 } 637 } 638 639 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 640 axienet_status &= ~XAE_RCW1_RX_MASK; 641 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 642 643 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 644 if (axienet_status & XAE_INT_RXRJECT_MASK) 645 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 646 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 647 XAE_INT_RECV_ERROR_MASK : 0); 648 649 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 650 651 /* Sync default options with HW but leave receiver and 652 * transmitter disabled. 653 */ 654 axienet_setoptions(ndev, lp->options & 655 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 656 axienet_set_mac_address(ndev, NULL); 657 axienet_set_multicast_list(ndev); 658 axienet_setoptions(ndev, lp->options); 659 660 netif_trans_update(ndev); 661 662 return 0; 663 } 664 665 /** 666 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 667 * @lp: Pointer to the axienet_local structure 668 * @first_bd: Index of first descriptor to clean up 669 * @nr_bds: Max number of descriptors to clean up 670 * @force: Whether to clean descriptors even if not complete 671 * @sizep: Pointer to a u32 filled with the total sum of all bytes 672 * in all cleaned-up descriptors. Ignored if NULL. 673 * @budget: NAPI budget (use 0 when not called from NAPI poll) 674 * 675 * Would either be called after a successful transmit operation, or after 676 * there was an error when setting up the chain. 677 * Returns the number of descriptors handled. 678 */ 679 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 680 int nr_bds, bool force, u32 *sizep, int budget) 681 { 682 struct axidma_bd *cur_p; 683 unsigned int status; 684 dma_addr_t phys; 685 int i; 686 687 for (i = 0; i < nr_bds; i++) { 688 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 689 status = cur_p->status; 690 691 /* If force is not specified, clean up only descriptors 692 * that have been completed by the MAC. 693 */ 694 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 695 break; 696 697 /* Ensure we see complete descriptor update */ 698 dma_rmb(); 699 phys = desc_get_phys_addr(lp, cur_p); 700 dma_unmap_single(lp->dev, phys, 701 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 702 DMA_TO_DEVICE); 703 704 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 705 napi_consume_skb(cur_p->skb, budget); 706 707 cur_p->app0 = 0; 708 cur_p->app1 = 0; 709 cur_p->app2 = 0; 710 cur_p->app4 = 0; 711 cur_p->skb = NULL; 712 /* ensure our transmit path and device don't prematurely see status cleared */ 713 wmb(); 714 cur_p->cntrl = 0; 715 cur_p->status = 0; 716 717 if (sizep) 718 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 719 } 720 721 return i; 722 } 723 724 /** 725 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 726 * @lp: Pointer to the axienet_local structure 727 * @num_frag: The number of BDs to check for 728 * 729 * Return: 0, on success 730 * NETDEV_TX_BUSY, if any of the descriptors are not free 731 * 732 * This function is invoked before BDs are allocated and transmission starts. 733 * This function returns 0 if a BD or group of BDs can be allocated for 734 * transmission. If the BD or any of the BDs are not free the function 735 * returns a busy status. 736 */ 737 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 738 int num_frag) 739 { 740 struct axidma_bd *cur_p; 741 742 /* Ensure we see all descriptor updates from device or TX polling */ 743 rmb(); 744 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 745 lp->tx_bd_num]; 746 if (cur_p->cntrl) 747 return NETDEV_TX_BUSY; 748 return 0; 749 } 750 751 /** 752 * axienet_dma_tx_cb - DMA engine callback for TX channel. 753 * @data: Pointer to the axienet_local structure. 754 * @result: error reporting through dmaengine_result. 755 * This function is called by dmaengine driver for TX channel to notify 756 * that the transmit is done. 757 */ 758 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) 759 { 760 struct skbuf_dma_descriptor *skbuf_dma; 761 struct axienet_local *lp = data; 762 struct netdev_queue *txq; 763 int len; 764 765 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); 766 len = skbuf_dma->skb->len; 767 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); 768 u64_stats_update_begin(&lp->tx_stat_sync); 769 u64_stats_add(&lp->tx_bytes, len); 770 u64_stats_add(&lp->tx_packets, 1); 771 u64_stats_update_end(&lp->tx_stat_sync); 772 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); 773 dev_consume_skb_any(skbuf_dma->skb); 774 netif_txq_completed_wake(txq, 1, len, 775 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 776 2 * MAX_SKB_FRAGS); 777 } 778 779 /** 780 * axienet_start_xmit_dmaengine - Starts the transmission. 781 * @skb: sk_buff pointer that contains data to be Txed. 782 * @ndev: Pointer to net_device structure. 783 * 784 * Return: NETDEV_TX_OK on success or any non space errors. 785 * NETDEV_TX_BUSY when free element in TX skb ring buffer 786 * is not available. 787 * 788 * This function is invoked to initiate transmission. The 789 * function sets the skbs, register dma callback API and submit 790 * the dma transaction. 791 * Additionally if checksum offloading is supported, 792 * it populates AXI Stream Control fields with appropriate values. 793 */ 794 static netdev_tx_t 795 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) 796 { 797 struct dma_async_tx_descriptor *dma_tx_desc = NULL; 798 struct axienet_local *lp = netdev_priv(ndev); 799 u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; 800 struct skbuf_dma_descriptor *skbuf_dma; 801 struct dma_device *dma_dev; 802 struct netdev_queue *txq; 803 u32 csum_start_off; 804 u32 csum_index_off; 805 int sg_len; 806 int ret; 807 808 dma_dev = lp->tx_chan->device; 809 sg_len = skb_shinfo(skb)->nr_frags + 1; 810 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { 811 netif_stop_queue(ndev); 812 if (net_ratelimit()) 813 netdev_warn(ndev, "TX ring unexpectedly full\n"); 814 return NETDEV_TX_BUSY; 815 } 816 817 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); 818 if (!skbuf_dma) 819 goto xmit_error_drop_skb; 820 821 lp->tx_ring_head++; 822 sg_init_table(skbuf_dma->sgl, sg_len); 823 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); 824 if (ret < 0) 825 goto xmit_error_drop_skb; 826 827 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 828 if (!ret) 829 goto xmit_error_drop_skb; 830 831 /* Fill up app fields for checksum */ 832 if (skb->ip_summed == CHECKSUM_PARTIAL) { 833 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 834 /* Tx Full Checksum Offload Enabled */ 835 app_metadata[0] |= 2; 836 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 837 csum_start_off = skb_transport_offset(skb); 838 csum_index_off = csum_start_off + skb->csum_offset; 839 /* Tx Partial Checksum Offload Enabled */ 840 app_metadata[0] |= 1; 841 app_metadata[1] = (csum_start_off << 16) | csum_index_off; 842 } 843 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 844 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ 845 } 846 847 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, 848 sg_len, DMA_MEM_TO_DEV, 849 DMA_PREP_INTERRUPT, (void *)app_metadata); 850 if (!dma_tx_desc) 851 goto xmit_error_unmap_sg; 852 853 skbuf_dma->skb = skb; 854 skbuf_dma->sg_len = sg_len; 855 dma_tx_desc->callback_param = lp; 856 dma_tx_desc->callback_result = axienet_dma_tx_cb; 857 dmaengine_submit(dma_tx_desc); 858 dma_async_issue_pending(lp->tx_chan); 859 txq = skb_get_tx_queue(lp->ndev, skb); 860 netdev_tx_sent_queue(txq, skb->len); 861 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 862 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); 863 864 return NETDEV_TX_OK; 865 866 xmit_error_unmap_sg: 867 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 868 xmit_error_drop_skb: 869 dev_kfree_skb_any(skb); 870 return NETDEV_TX_OK; 871 } 872 873 /** 874 * axienet_tx_poll - Invoked once a transmit is completed by the 875 * Axi DMA Tx channel. 876 * @napi: Pointer to NAPI structure. 877 * @budget: Max number of TX packets to process. 878 * 879 * Return: Number of TX packets processed. 880 * 881 * This function is invoked from the NAPI processing to notify the completion 882 * of transmit operation. It clears fields in the corresponding Tx BDs and 883 * unmaps the corresponding buffer so that CPU can regain ownership of the 884 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 885 * required. 886 */ 887 static int axienet_tx_poll(struct napi_struct *napi, int budget) 888 { 889 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 890 struct net_device *ndev = lp->ndev; 891 u32 size = 0; 892 int packets; 893 894 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); 895 896 if (packets) { 897 lp->tx_bd_ci += packets; 898 if (lp->tx_bd_ci >= lp->tx_bd_num) 899 lp->tx_bd_ci %= lp->tx_bd_num; 900 901 u64_stats_update_begin(&lp->tx_stat_sync); 902 u64_stats_add(&lp->tx_packets, packets); 903 u64_stats_add(&lp->tx_bytes, size); 904 u64_stats_update_end(&lp->tx_stat_sync); 905 906 /* Matches barrier in axienet_start_xmit */ 907 smp_mb(); 908 909 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 910 netif_wake_queue(ndev); 911 } 912 913 if (packets < budget && napi_complete_done(napi, packets)) { 914 /* Re-enable TX completion interrupts. This should 915 * cause an immediate interrupt if any TX packets are 916 * already pending. 917 */ 918 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 919 } 920 return packets; 921 } 922 923 /** 924 * axienet_start_xmit - Starts the transmission. 925 * @skb: sk_buff pointer that contains data to be Txed. 926 * @ndev: Pointer to net_device structure. 927 * 928 * Return: NETDEV_TX_OK, on success 929 * NETDEV_TX_BUSY, if any of the descriptors are not free 930 * 931 * This function is invoked from upper layers to initiate transmission. The 932 * function uses the next available free BDs and populates their fields to 933 * start the transmission. Additionally if checksum offloading is supported, 934 * it populates AXI Stream Control fields with appropriate values. 935 */ 936 static netdev_tx_t 937 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 938 { 939 u32 ii; 940 u32 num_frag; 941 u32 csum_start_off; 942 u32 csum_index_off; 943 skb_frag_t *frag; 944 dma_addr_t tail_p, phys; 945 u32 orig_tail_ptr, new_tail_ptr; 946 struct axienet_local *lp = netdev_priv(ndev); 947 struct axidma_bd *cur_p; 948 949 orig_tail_ptr = lp->tx_bd_tail; 950 new_tail_ptr = orig_tail_ptr; 951 952 num_frag = skb_shinfo(skb)->nr_frags; 953 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 954 955 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 956 /* Should not happen as last start_xmit call should have 957 * checked for sufficient space and queue should only be 958 * woken when sufficient space is available. 959 */ 960 netif_stop_queue(ndev); 961 if (net_ratelimit()) 962 netdev_warn(ndev, "TX ring unexpectedly full\n"); 963 return NETDEV_TX_BUSY; 964 } 965 966 if (skb->ip_summed == CHECKSUM_PARTIAL) { 967 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 968 /* Tx Full Checksum Offload Enabled */ 969 cur_p->app0 |= 2; 970 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 971 csum_start_off = skb_transport_offset(skb); 972 csum_index_off = csum_start_off + skb->csum_offset; 973 /* Tx Partial Checksum Offload Enabled */ 974 cur_p->app0 |= 1; 975 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 976 } 977 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 978 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 979 } 980 981 phys = dma_map_single(lp->dev, skb->data, 982 skb_headlen(skb), DMA_TO_DEVICE); 983 if (unlikely(dma_mapping_error(lp->dev, phys))) { 984 if (net_ratelimit()) 985 netdev_err(ndev, "TX DMA mapping error\n"); 986 ndev->stats.tx_dropped++; 987 return NETDEV_TX_OK; 988 } 989 desc_set_phys_addr(lp, phys, cur_p); 990 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 991 992 for (ii = 0; ii < num_frag; ii++) { 993 if (++new_tail_ptr >= lp->tx_bd_num) 994 new_tail_ptr = 0; 995 cur_p = &lp->tx_bd_v[new_tail_ptr]; 996 frag = &skb_shinfo(skb)->frags[ii]; 997 phys = dma_map_single(lp->dev, 998 skb_frag_address(frag), 999 skb_frag_size(frag), 1000 DMA_TO_DEVICE); 1001 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1002 if (net_ratelimit()) 1003 netdev_err(ndev, "TX DMA mapping error\n"); 1004 ndev->stats.tx_dropped++; 1005 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 1006 true, NULL, 0); 1007 return NETDEV_TX_OK; 1008 } 1009 desc_set_phys_addr(lp, phys, cur_p); 1010 cur_p->cntrl = skb_frag_size(frag); 1011 } 1012 1013 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 1014 cur_p->skb = skb; 1015 1016 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 1017 if (++new_tail_ptr >= lp->tx_bd_num) 1018 new_tail_ptr = 0; 1019 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 1020 1021 /* Start the transfer */ 1022 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 1023 1024 /* Stop queue if next transmit may not have space */ 1025 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 1026 netif_stop_queue(ndev); 1027 1028 /* Matches barrier in axienet_tx_poll */ 1029 smp_mb(); 1030 1031 /* Space might have just been freed - check again */ 1032 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1033 netif_wake_queue(ndev); 1034 } 1035 1036 return NETDEV_TX_OK; 1037 } 1038 1039 /** 1040 * axienet_dma_rx_cb - DMA engine callback for RX channel. 1041 * @data: Pointer to the skbuf_dma_descriptor structure. 1042 * @result: error reporting through dmaengine_result. 1043 * This function is called by dmaengine driver for RX channel to notify 1044 * that the packet is received. 1045 */ 1046 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) 1047 { 1048 struct skbuf_dma_descriptor *skbuf_dma; 1049 size_t meta_len, meta_max_len, rx_len; 1050 struct axienet_local *lp = data; 1051 struct sk_buff *skb; 1052 u32 *app_metadata; 1053 1054 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1055 skb = skbuf_dma->skb; 1056 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, 1057 &meta_max_len); 1058 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, 1059 DMA_FROM_DEVICE); 1060 /* TODO: Derive app word index programmatically */ 1061 rx_len = (app_metadata[LEN_APP] & 0xFFFF); 1062 skb_put(skb, rx_len); 1063 skb->protocol = eth_type_trans(skb, lp->ndev); 1064 skb->ip_summed = CHECKSUM_NONE; 1065 1066 __netif_rx(skb); 1067 u64_stats_update_begin(&lp->rx_stat_sync); 1068 u64_stats_add(&lp->rx_packets, 1); 1069 u64_stats_add(&lp->rx_bytes, rx_len); 1070 u64_stats_update_end(&lp->rx_stat_sync); 1071 axienet_rx_submit_desc(lp->ndev); 1072 dma_async_issue_pending(lp->rx_chan); 1073 } 1074 1075 /** 1076 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 1077 * @napi: Pointer to NAPI structure. 1078 * @budget: Max number of RX packets to process. 1079 * 1080 * Return: Number of RX packets processed. 1081 */ 1082 static int axienet_rx_poll(struct napi_struct *napi, int budget) 1083 { 1084 u32 length; 1085 u32 csumstatus; 1086 u32 size = 0; 1087 int packets = 0; 1088 dma_addr_t tail_p = 0; 1089 struct axidma_bd *cur_p; 1090 struct sk_buff *skb, *new_skb; 1091 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 1092 1093 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1094 1095 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 1096 dma_addr_t phys; 1097 1098 /* Ensure we see complete descriptor update */ 1099 dma_rmb(); 1100 1101 skb = cur_p->skb; 1102 cur_p->skb = NULL; 1103 1104 /* skb could be NULL if a previous pass already received the 1105 * packet for this slot in the ring, but failed to refill it 1106 * with a newly allocated buffer. In this case, don't try to 1107 * receive it again. 1108 */ 1109 if (likely(skb)) { 1110 length = cur_p->app4 & 0x0000FFFF; 1111 1112 phys = desc_get_phys_addr(lp, cur_p); 1113 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 1114 DMA_FROM_DEVICE); 1115 1116 skb_put(skb, length); 1117 skb->protocol = eth_type_trans(skb, lp->ndev); 1118 /*skb_checksum_none_assert(skb);*/ 1119 skb->ip_summed = CHECKSUM_NONE; 1120 1121 /* if we're doing Rx csum offload, set it up */ 1122 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 1123 csumstatus = (cur_p->app2 & 1124 XAE_FULL_CSUM_STATUS_MASK) >> 3; 1125 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 1126 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 1127 skb->ip_summed = CHECKSUM_UNNECESSARY; 1128 } 1129 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 1130 skb->protocol == htons(ETH_P_IP) && 1131 skb->len > 64) { 1132 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 1133 skb->ip_summed = CHECKSUM_COMPLETE; 1134 } 1135 1136 napi_gro_receive(napi, skb); 1137 1138 size += length; 1139 packets++; 1140 } 1141 1142 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 1143 if (!new_skb) 1144 break; 1145 1146 phys = dma_map_single(lp->dev, new_skb->data, 1147 lp->max_frm_size, 1148 DMA_FROM_DEVICE); 1149 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1150 if (net_ratelimit()) 1151 netdev_err(lp->ndev, "RX DMA mapping error\n"); 1152 dev_kfree_skb(new_skb); 1153 break; 1154 } 1155 desc_set_phys_addr(lp, phys, cur_p); 1156 1157 cur_p->cntrl = lp->max_frm_size; 1158 cur_p->status = 0; 1159 cur_p->skb = new_skb; 1160 1161 /* Only update tail_p to mark this slot as usable after it has 1162 * been successfully refilled. 1163 */ 1164 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1165 1166 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1167 lp->rx_bd_ci = 0; 1168 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1169 } 1170 1171 u64_stats_update_begin(&lp->rx_stat_sync); 1172 u64_stats_add(&lp->rx_packets, packets); 1173 u64_stats_add(&lp->rx_bytes, size); 1174 u64_stats_update_end(&lp->rx_stat_sync); 1175 1176 if (tail_p) 1177 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1178 1179 if (packets < budget && napi_complete_done(napi, packets)) { 1180 /* Re-enable RX completion interrupts. This should 1181 * cause an immediate interrupt if any RX packets are 1182 * already pending. 1183 */ 1184 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1185 } 1186 return packets; 1187 } 1188 1189 /** 1190 * axienet_tx_irq - Tx Done Isr. 1191 * @irq: irq number 1192 * @_ndev: net_device pointer 1193 * 1194 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1195 * 1196 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1197 * TX BD processing. 1198 */ 1199 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1200 { 1201 unsigned int status; 1202 struct net_device *ndev = _ndev; 1203 struct axienet_local *lp = netdev_priv(ndev); 1204 1205 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1206 1207 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1208 return IRQ_NONE; 1209 1210 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1211 1212 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1213 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1214 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1215 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1216 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1217 schedule_work(&lp->dma_err_task); 1218 } else { 1219 /* Disable further TX completion interrupts and schedule 1220 * NAPI to handle the completions. 1221 */ 1222 u32 cr = lp->tx_dma_cr; 1223 1224 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1225 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1226 1227 napi_schedule(&lp->napi_tx); 1228 } 1229 1230 return IRQ_HANDLED; 1231 } 1232 1233 /** 1234 * axienet_rx_irq - Rx Isr. 1235 * @irq: irq number 1236 * @_ndev: net_device pointer 1237 * 1238 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1239 * 1240 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1241 * processing. 1242 */ 1243 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1244 { 1245 unsigned int status; 1246 struct net_device *ndev = _ndev; 1247 struct axienet_local *lp = netdev_priv(ndev); 1248 1249 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1250 1251 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1252 return IRQ_NONE; 1253 1254 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1255 1256 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1257 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1258 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1259 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1260 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1261 schedule_work(&lp->dma_err_task); 1262 } else { 1263 /* Disable further RX completion interrupts and schedule 1264 * NAPI receive. 1265 */ 1266 u32 cr = lp->rx_dma_cr; 1267 1268 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1269 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1270 1271 napi_schedule(&lp->napi_rx); 1272 } 1273 1274 return IRQ_HANDLED; 1275 } 1276 1277 /** 1278 * axienet_eth_irq - Ethernet core Isr. 1279 * @irq: irq number 1280 * @_ndev: net_device pointer 1281 * 1282 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1283 * 1284 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1285 */ 1286 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1287 { 1288 struct net_device *ndev = _ndev; 1289 struct axienet_local *lp = netdev_priv(ndev); 1290 unsigned int pending; 1291 1292 pending = axienet_ior(lp, XAE_IP_OFFSET); 1293 if (!pending) 1294 return IRQ_NONE; 1295 1296 if (pending & XAE_INT_RXFIFOOVR_MASK) 1297 ndev->stats.rx_missed_errors++; 1298 1299 if (pending & XAE_INT_RXRJECT_MASK) 1300 ndev->stats.rx_frame_errors++; 1301 1302 axienet_iow(lp, XAE_IS_OFFSET, pending); 1303 return IRQ_HANDLED; 1304 } 1305 1306 static void axienet_dma_err_handler(struct work_struct *work); 1307 1308 /** 1309 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. 1310 * allocate skbuff, map the scatterlist and obtain a descriptor 1311 * and then add the callback information and submit descriptor. 1312 * 1313 * @ndev: net_device pointer 1314 * 1315 */ 1316 static void axienet_rx_submit_desc(struct net_device *ndev) 1317 { 1318 struct dma_async_tx_descriptor *dma_rx_desc = NULL; 1319 struct axienet_local *lp = netdev_priv(ndev); 1320 struct skbuf_dma_descriptor *skbuf_dma; 1321 struct sk_buff *skb; 1322 dma_addr_t addr; 1323 1324 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); 1325 if (!skbuf_dma) 1326 return; 1327 1328 lp->rx_ring_head++; 1329 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1330 if (!skb) 1331 return; 1332 1333 sg_init_table(skbuf_dma->sgl, 1); 1334 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); 1335 if (unlikely(dma_mapping_error(lp->dev, addr))) { 1336 if (net_ratelimit()) 1337 netdev_err(ndev, "DMA mapping error\n"); 1338 goto rx_submit_err_free_skb; 1339 } 1340 sg_dma_address(skbuf_dma->sgl) = addr; 1341 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; 1342 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, 1343 1, DMA_DEV_TO_MEM, 1344 DMA_PREP_INTERRUPT); 1345 if (!dma_rx_desc) 1346 goto rx_submit_err_unmap_skb; 1347 1348 skbuf_dma->skb = skb; 1349 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); 1350 skbuf_dma->desc = dma_rx_desc; 1351 dma_rx_desc->callback_param = lp; 1352 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1353 dmaengine_submit(dma_rx_desc); 1354 1355 return; 1356 1357 rx_submit_err_unmap_skb: 1358 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); 1359 rx_submit_err_free_skb: 1360 dev_kfree_skb(skb); 1361 } 1362 1363 /** 1364 * axienet_init_dmaengine - init the dmaengine code. 1365 * @ndev: Pointer to net_device structure 1366 * 1367 * Return: 0, on success. 1368 * non-zero error value on failure 1369 * 1370 * This is the dmaengine initialization code. 1371 */ 1372 static int axienet_init_dmaengine(struct net_device *ndev) 1373 { 1374 struct axienet_local *lp = netdev_priv(ndev); 1375 struct skbuf_dma_descriptor *skbuf_dma; 1376 int i, ret; 1377 1378 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 1379 if (IS_ERR(lp->tx_chan)) { 1380 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); 1381 return PTR_ERR(lp->tx_chan); 1382 } 1383 1384 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); 1385 if (IS_ERR(lp->rx_chan)) { 1386 ret = PTR_ERR(lp->rx_chan); 1387 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); 1388 goto err_dma_release_tx; 1389 } 1390 1391 lp->tx_ring_tail = 0; 1392 lp->tx_ring_head = 0; 1393 lp->rx_ring_tail = 0; 1394 lp->rx_ring_head = 0; 1395 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), 1396 GFP_KERNEL); 1397 if (!lp->tx_skb_ring) { 1398 ret = -ENOMEM; 1399 goto err_dma_release_rx; 1400 } 1401 for (i = 0; i < TX_BD_NUM_MAX; i++) { 1402 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1403 if (!skbuf_dma) { 1404 ret = -ENOMEM; 1405 goto err_free_tx_skb_ring; 1406 } 1407 lp->tx_skb_ring[i] = skbuf_dma; 1408 } 1409 1410 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), 1411 GFP_KERNEL); 1412 if (!lp->rx_skb_ring) { 1413 ret = -ENOMEM; 1414 goto err_free_tx_skb_ring; 1415 } 1416 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { 1417 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1418 if (!skbuf_dma) { 1419 ret = -ENOMEM; 1420 goto err_free_rx_skb_ring; 1421 } 1422 lp->rx_skb_ring[i] = skbuf_dma; 1423 } 1424 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ 1425 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1426 axienet_rx_submit_desc(ndev); 1427 dma_async_issue_pending(lp->rx_chan); 1428 1429 return 0; 1430 1431 err_free_rx_skb_ring: 1432 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1433 kfree(lp->rx_skb_ring[i]); 1434 kfree(lp->rx_skb_ring); 1435 err_free_tx_skb_ring: 1436 for (i = 0; i < TX_BD_NUM_MAX; i++) 1437 kfree(lp->tx_skb_ring[i]); 1438 kfree(lp->tx_skb_ring); 1439 err_dma_release_rx: 1440 dma_release_channel(lp->rx_chan); 1441 err_dma_release_tx: 1442 dma_release_channel(lp->tx_chan); 1443 return ret; 1444 } 1445 1446 /** 1447 * axienet_init_legacy_dma - init the dma legacy code. 1448 * @ndev: Pointer to net_device structure 1449 * 1450 * Return: 0, on success. 1451 * non-zero error value on failure 1452 * 1453 * This is the dma initialization code. It also allocates interrupt 1454 * service routines, enables the interrupt lines and ISR handling. 1455 * 1456 */ 1457 static int axienet_init_legacy_dma(struct net_device *ndev) 1458 { 1459 int ret; 1460 struct axienet_local *lp = netdev_priv(ndev); 1461 1462 /* Enable worker thread for Axi DMA error handling */ 1463 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1464 1465 napi_enable(&lp->napi_rx); 1466 napi_enable(&lp->napi_tx); 1467 1468 /* Enable interrupts for Axi DMA Tx */ 1469 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1470 ndev->name, ndev); 1471 if (ret) 1472 goto err_tx_irq; 1473 /* Enable interrupts for Axi DMA Rx */ 1474 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1475 ndev->name, ndev); 1476 if (ret) 1477 goto err_rx_irq; 1478 /* Enable interrupts for Axi Ethernet core (if defined) */ 1479 if (lp->eth_irq > 0) { 1480 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1481 ndev->name, ndev); 1482 if (ret) 1483 goto err_eth_irq; 1484 } 1485 1486 return 0; 1487 1488 err_eth_irq: 1489 free_irq(lp->rx_irq, ndev); 1490 err_rx_irq: 1491 free_irq(lp->tx_irq, ndev); 1492 err_tx_irq: 1493 napi_disable(&lp->napi_tx); 1494 napi_disable(&lp->napi_rx); 1495 cancel_work_sync(&lp->dma_err_task); 1496 dev_err(lp->dev, "request_irq() failed\n"); 1497 return ret; 1498 } 1499 1500 /** 1501 * axienet_open - Driver open routine. 1502 * @ndev: Pointer to net_device structure 1503 * 1504 * Return: 0, on success. 1505 * non-zero error value on failure 1506 * 1507 * This is the driver open routine. It calls phylink_start to start the 1508 * PHY device. 1509 * It also allocates interrupt service routines, enables the interrupt lines 1510 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1511 * descriptors are initialized. 1512 */ 1513 static int axienet_open(struct net_device *ndev) 1514 { 1515 int ret; 1516 struct axienet_local *lp = netdev_priv(ndev); 1517 1518 dev_dbg(&ndev->dev, "%s\n", __func__); 1519 1520 /* When we do an Axi Ethernet reset, it resets the complete core 1521 * including the MDIO. MDIO must be disabled before resetting. 1522 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1523 */ 1524 axienet_lock_mii(lp); 1525 ret = axienet_device_reset(ndev); 1526 axienet_unlock_mii(lp); 1527 1528 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1529 if (ret) { 1530 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1531 return ret; 1532 } 1533 1534 phylink_start(lp->phylink); 1535 1536 if (lp->use_dmaengine) { 1537 /* Enable interrupts for Axi Ethernet core (if defined) */ 1538 if (lp->eth_irq > 0) { 1539 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1540 ndev->name, ndev); 1541 if (ret) 1542 goto err_phy; 1543 } 1544 1545 ret = axienet_init_dmaengine(ndev); 1546 if (ret < 0) 1547 goto err_free_eth_irq; 1548 } else { 1549 ret = axienet_init_legacy_dma(ndev); 1550 if (ret) 1551 goto err_phy; 1552 } 1553 1554 return 0; 1555 1556 err_free_eth_irq: 1557 if (lp->eth_irq > 0) 1558 free_irq(lp->eth_irq, ndev); 1559 err_phy: 1560 phylink_stop(lp->phylink); 1561 phylink_disconnect_phy(lp->phylink); 1562 return ret; 1563 } 1564 1565 /** 1566 * axienet_stop - Driver stop routine. 1567 * @ndev: Pointer to net_device structure 1568 * 1569 * Return: 0, on success. 1570 * 1571 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1572 * device. It also removes the interrupt handlers and disables the interrupts. 1573 * The Axi DMA Tx/Rx BDs are released. 1574 */ 1575 static int axienet_stop(struct net_device *ndev) 1576 { 1577 struct axienet_local *lp = netdev_priv(ndev); 1578 int i; 1579 1580 dev_dbg(&ndev->dev, "axienet_close()\n"); 1581 1582 if (!lp->use_dmaengine) { 1583 napi_disable(&lp->napi_tx); 1584 napi_disable(&lp->napi_rx); 1585 } 1586 1587 phylink_stop(lp->phylink); 1588 phylink_disconnect_phy(lp->phylink); 1589 1590 axienet_setoptions(ndev, lp->options & 1591 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1592 1593 if (!lp->use_dmaengine) { 1594 axienet_dma_stop(lp); 1595 cancel_work_sync(&lp->dma_err_task); 1596 free_irq(lp->tx_irq, ndev); 1597 free_irq(lp->rx_irq, ndev); 1598 axienet_dma_bd_release(ndev); 1599 } else { 1600 dmaengine_terminate_sync(lp->tx_chan); 1601 dmaengine_synchronize(lp->tx_chan); 1602 dmaengine_terminate_sync(lp->rx_chan); 1603 dmaengine_synchronize(lp->rx_chan); 1604 1605 for (i = 0; i < TX_BD_NUM_MAX; i++) 1606 kfree(lp->tx_skb_ring[i]); 1607 kfree(lp->tx_skb_ring); 1608 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1609 kfree(lp->rx_skb_ring[i]); 1610 kfree(lp->rx_skb_ring); 1611 1612 dma_release_channel(lp->rx_chan); 1613 dma_release_channel(lp->tx_chan); 1614 } 1615 1616 axienet_iow(lp, XAE_IE_OFFSET, 0); 1617 1618 if (lp->eth_irq > 0) 1619 free_irq(lp->eth_irq, ndev); 1620 return 0; 1621 } 1622 1623 /** 1624 * axienet_change_mtu - Driver change mtu routine. 1625 * @ndev: Pointer to net_device structure 1626 * @new_mtu: New mtu value to be applied 1627 * 1628 * Return: Always returns 0 (success). 1629 * 1630 * This is the change mtu driver routine. It checks if the Axi Ethernet 1631 * hardware supports jumbo frames before changing the mtu. This can be 1632 * called only when the device is not up. 1633 */ 1634 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1635 { 1636 struct axienet_local *lp = netdev_priv(ndev); 1637 1638 if (netif_running(ndev)) 1639 return -EBUSY; 1640 1641 if ((new_mtu + VLAN_ETH_HLEN + 1642 XAE_TRL_SIZE) > lp->rxmem) 1643 return -EINVAL; 1644 1645 WRITE_ONCE(ndev->mtu, new_mtu); 1646 1647 return 0; 1648 } 1649 1650 #ifdef CONFIG_NET_POLL_CONTROLLER 1651 /** 1652 * axienet_poll_controller - Axi Ethernet poll mechanism. 1653 * @ndev: Pointer to net_device structure 1654 * 1655 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1656 * to polling the ISRs and are enabled back after the polling is done. 1657 */ 1658 static void axienet_poll_controller(struct net_device *ndev) 1659 { 1660 struct axienet_local *lp = netdev_priv(ndev); 1661 disable_irq(lp->tx_irq); 1662 disable_irq(lp->rx_irq); 1663 axienet_rx_irq(lp->tx_irq, ndev); 1664 axienet_tx_irq(lp->rx_irq, ndev); 1665 enable_irq(lp->tx_irq); 1666 enable_irq(lp->rx_irq); 1667 } 1668 #endif 1669 1670 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1671 { 1672 struct axienet_local *lp = netdev_priv(dev); 1673 1674 if (!netif_running(dev)) 1675 return -EINVAL; 1676 1677 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1678 } 1679 1680 static void 1681 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1682 { 1683 struct axienet_local *lp = netdev_priv(dev); 1684 unsigned int start; 1685 1686 netdev_stats_to_stats64(stats, &dev->stats); 1687 1688 do { 1689 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1690 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1691 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1692 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1693 1694 do { 1695 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1696 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1697 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1698 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1699 } 1700 1701 static const struct net_device_ops axienet_netdev_ops = { 1702 .ndo_open = axienet_open, 1703 .ndo_stop = axienet_stop, 1704 .ndo_start_xmit = axienet_start_xmit, 1705 .ndo_get_stats64 = axienet_get_stats64, 1706 .ndo_change_mtu = axienet_change_mtu, 1707 .ndo_set_mac_address = netdev_set_mac_address, 1708 .ndo_validate_addr = eth_validate_addr, 1709 .ndo_eth_ioctl = axienet_ioctl, 1710 .ndo_set_rx_mode = axienet_set_multicast_list, 1711 #ifdef CONFIG_NET_POLL_CONTROLLER 1712 .ndo_poll_controller = axienet_poll_controller, 1713 #endif 1714 }; 1715 1716 static const struct net_device_ops axienet_netdev_dmaengine_ops = { 1717 .ndo_open = axienet_open, 1718 .ndo_stop = axienet_stop, 1719 .ndo_start_xmit = axienet_start_xmit_dmaengine, 1720 .ndo_get_stats64 = axienet_get_stats64, 1721 .ndo_change_mtu = axienet_change_mtu, 1722 .ndo_set_mac_address = netdev_set_mac_address, 1723 .ndo_validate_addr = eth_validate_addr, 1724 .ndo_eth_ioctl = axienet_ioctl, 1725 .ndo_set_rx_mode = axienet_set_multicast_list, 1726 }; 1727 1728 /** 1729 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1730 * @ndev: Pointer to net_device structure 1731 * @ed: Pointer to ethtool_drvinfo structure 1732 * 1733 * This implements ethtool command for getting the driver information. 1734 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1735 */ 1736 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1737 struct ethtool_drvinfo *ed) 1738 { 1739 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1740 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1741 } 1742 1743 /** 1744 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1745 * AxiEthernet core. 1746 * @ndev: Pointer to net_device structure 1747 * 1748 * This implements ethtool command for getting the total register length 1749 * information. 1750 * 1751 * Return: the total regs length 1752 */ 1753 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1754 { 1755 return sizeof(u32) * AXIENET_REGS_N; 1756 } 1757 1758 /** 1759 * axienet_ethtools_get_regs - Dump the contents of all registers present 1760 * in AxiEthernet core. 1761 * @ndev: Pointer to net_device structure 1762 * @regs: Pointer to ethtool_regs structure 1763 * @ret: Void pointer used to return the contents of the registers. 1764 * 1765 * This implements ethtool command for getting the Axi Ethernet register dump. 1766 * Issue "ethtool -d ethX" to execute this function. 1767 */ 1768 static void axienet_ethtools_get_regs(struct net_device *ndev, 1769 struct ethtool_regs *regs, void *ret) 1770 { 1771 u32 *data = (u32 *)ret; 1772 size_t len = sizeof(u32) * AXIENET_REGS_N; 1773 struct axienet_local *lp = netdev_priv(ndev); 1774 1775 regs->version = 0; 1776 regs->len = len; 1777 1778 memset(data, 0, len); 1779 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1780 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1781 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1782 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1783 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1784 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1785 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1786 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1787 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1788 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1789 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1790 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1791 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1792 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1793 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1794 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1795 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1796 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1797 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1798 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1799 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1800 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1801 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1802 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1803 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1804 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1805 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1806 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1807 if (!lp->use_dmaengine) { 1808 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1809 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1810 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1811 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1812 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1813 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1814 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1815 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1816 } 1817 } 1818 1819 static void 1820 axienet_ethtools_get_ringparam(struct net_device *ndev, 1821 struct ethtool_ringparam *ering, 1822 struct kernel_ethtool_ringparam *kernel_ering, 1823 struct netlink_ext_ack *extack) 1824 { 1825 struct axienet_local *lp = netdev_priv(ndev); 1826 1827 ering->rx_max_pending = RX_BD_NUM_MAX; 1828 ering->rx_mini_max_pending = 0; 1829 ering->rx_jumbo_max_pending = 0; 1830 ering->tx_max_pending = TX_BD_NUM_MAX; 1831 ering->rx_pending = lp->rx_bd_num; 1832 ering->rx_mini_pending = 0; 1833 ering->rx_jumbo_pending = 0; 1834 ering->tx_pending = lp->tx_bd_num; 1835 } 1836 1837 static int 1838 axienet_ethtools_set_ringparam(struct net_device *ndev, 1839 struct ethtool_ringparam *ering, 1840 struct kernel_ethtool_ringparam *kernel_ering, 1841 struct netlink_ext_ack *extack) 1842 { 1843 struct axienet_local *lp = netdev_priv(ndev); 1844 1845 if (ering->rx_pending > RX_BD_NUM_MAX || 1846 ering->rx_mini_pending || 1847 ering->rx_jumbo_pending || 1848 ering->tx_pending < TX_BD_NUM_MIN || 1849 ering->tx_pending > TX_BD_NUM_MAX) 1850 return -EINVAL; 1851 1852 if (netif_running(ndev)) 1853 return -EBUSY; 1854 1855 lp->rx_bd_num = ering->rx_pending; 1856 lp->tx_bd_num = ering->tx_pending; 1857 return 0; 1858 } 1859 1860 /** 1861 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1862 * Tx and Rx paths. 1863 * @ndev: Pointer to net_device structure 1864 * @epauseparm: Pointer to ethtool_pauseparam structure. 1865 * 1866 * This implements ethtool command for getting axi ethernet pause frame 1867 * setting. Issue "ethtool -a ethX" to execute this function. 1868 */ 1869 static void 1870 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1871 struct ethtool_pauseparam *epauseparm) 1872 { 1873 struct axienet_local *lp = netdev_priv(ndev); 1874 1875 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1876 } 1877 1878 /** 1879 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1880 * settings. 1881 * @ndev: Pointer to net_device structure 1882 * @epauseparm:Pointer to ethtool_pauseparam structure 1883 * 1884 * This implements ethtool command for enabling flow control on Rx and Tx 1885 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1886 * function. 1887 * 1888 * Return: 0 on success, -EFAULT if device is running 1889 */ 1890 static int 1891 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1892 struct ethtool_pauseparam *epauseparm) 1893 { 1894 struct axienet_local *lp = netdev_priv(ndev); 1895 1896 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1897 } 1898 1899 /** 1900 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1901 * @ndev: Pointer to net_device structure 1902 * @ecoalesce: Pointer to ethtool_coalesce structure 1903 * @kernel_coal: ethtool CQE mode setting structure 1904 * @extack: extack for reporting error messages 1905 * 1906 * This implements ethtool command for getting the DMA interrupt coalescing 1907 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1908 * execute this function. 1909 * 1910 * Return: 0 always 1911 */ 1912 static int 1913 axienet_ethtools_get_coalesce(struct net_device *ndev, 1914 struct ethtool_coalesce *ecoalesce, 1915 struct kernel_ethtool_coalesce *kernel_coal, 1916 struct netlink_ext_ack *extack) 1917 { 1918 struct axienet_local *lp = netdev_priv(ndev); 1919 1920 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1921 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1922 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1923 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1924 return 0; 1925 } 1926 1927 /** 1928 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1929 * @ndev: Pointer to net_device structure 1930 * @ecoalesce: Pointer to ethtool_coalesce structure 1931 * @kernel_coal: ethtool CQE mode setting structure 1932 * @extack: extack for reporting error messages 1933 * 1934 * This implements ethtool command for setting the DMA interrupt coalescing 1935 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1936 * prompt to execute this function. 1937 * 1938 * Return: 0, on success, Non-zero error value on failure. 1939 */ 1940 static int 1941 axienet_ethtools_set_coalesce(struct net_device *ndev, 1942 struct ethtool_coalesce *ecoalesce, 1943 struct kernel_ethtool_coalesce *kernel_coal, 1944 struct netlink_ext_ack *extack) 1945 { 1946 struct axienet_local *lp = netdev_priv(ndev); 1947 1948 if (netif_running(ndev)) { 1949 NL_SET_ERR_MSG(extack, 1950 "Please stop netif before applying configuration"); 1951 return -EBUSY; 1952 } 1953 1954 if (ecoalesce->rx_max_coalesced_frames) 1955 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1956 if (ecoalesce->rx_coalesce_usecs) 1957 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1958 if (ecoalesce->tx_max_coalesced_frames) 1959 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1960 if (ecoalesce->tx_coalesce_usecs) 1961 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1962 1963 return 0; 1964 } 1965 1966 static int 1967 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1968 struct ethtool_link_ksettings *cmd) 1969 { 1970 struct axienet_local *lp = netdev_priv(ndev); 1971 1972 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1973 } 1974 1975 static int 1976 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1977 const struct ethtool_link_ksettings *cmd) 1978 { 1979 struct axienet_local *lp = netdev_priv(ndev); 1980 1981 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1982 } 1983 1984 static int axienet_ethtools_nway_reset(struct net_device *dev) 1985 { 1986 struct axienet_local *lp = netdev_priv(dev); 1987 1988 return phylink_ethtool_nway_reset(lp->phylink); 1989 } 1990 1991 static const struct ethtool_ops axienet_ethtool_ops = { 1992 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1993 ETHTOOL_COALESCE_USECS, 1994 .get_drvinfo = axienet_ethtools_get_drvinfo, 1995 .get_regs_len = axienet_ethtools_get_regs_len, 1996 .get_regs = axienet_ethtools_get_regs, 1997 .get_link = ethtool_op_get_link, 1998 .get_ringparam = axienet_ethtools_get_ringparam, 1999 .set_ringparam = axienet_ethtools_set_ringparam, 2000 .get_pauseparam = axienet_ethtools_get_pauseparam, 2001 .set_pauseparam = axienet_ethtools_set_pauseparam, 2002 .get_coalesce = axienet_ethtools_get_coalesce, 2003 .set_coalesce = axienet_ethtools_set_coalesce, 2004 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 2005 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 2006 .nway_reset = axienet_ethtools_nway_reset, 2007 }; 2008 2009 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 2010 { 2011 return container_of(pcs, struct axienet_local, pcs); 2012 } 2013 2014 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 2015 struct phylink_link_state *state) 2016 { 2017 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2018 2019 phylink_mii_c22_pcs_get_state(pcs_phy, state); 2020 } 2021 2022 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 2023 { 2024 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2025 2026 phylink_mii_c22_pcs_an_restart(pcs_phy); 2027 } 2028 2029 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2030 phy_interface_t interface, 2031 const unsigned long *advertising, 2032 bool permit_pause_to_mac) 2033 { 2034 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2035 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 2036 struct axienet_local *lp = netdev_priv(ndev); 2037 int ret; 2038 2039 if (lp->switch_x_sgmii) { 2040 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 2041 interface == PHY_INTERFACE_MODE_SGMII ? 2042 XLNX_MII_STD_SELECT_SGMII : 0); 2043 if (ret < 0) { 2044 netdev_warn(ndev, 2045 "Failed to switch PHY interface: %d\n", 2046 ret); 2047 return ret; 2048 } 2049 } 2050 2051 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 2052 neg_mode); 2053 if (ret < 0) 2054 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 2055 2056 return ret; 2057 } 2058 2059 static const struct phylink_pcs_ops axienet_pcs_ops = { 2060 .pcs_get_state = axienet_pcs_get_state, 2061 .pcs_config = axienet_pcs_config, 2062 .pcs_an_restart = axienet_pcs_an_restart, 2063 }; 2064 2065 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 2066 phy_interface_t interface) 2067 { 2068 struct net_device *ndev = to_net_dev(config->dev); 2069 struct axienet_local *lp = netdev_priv(ndev); 2070 2071 if (interface == PHY_INTERFACE_MODE_1000BASEX || 2072 interface == PHY_INTERFACE_MODE_SGMII) 2073 return &lp->pcs; 2074 2075 return NULL; 2076 } 2077 2078 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 2079 const struct phylink_link_state *state) 2080 { 2081 /* nothing meaningful to do */ 2082 } 2083 2084 static void axienet_mac_link_down(struct phylink_config *config, 2085 unsigned int mode, 2086 phy_interface_t interface) 2087 { 2088 /* nothing meaningful to do */ 2089 } 2090 2091 static void axienet_mac_link_up(struct phylink_config *config, 2092 struct phy_device *phy, 2093 unsigned int mode, phy_interface_t interface, 2094 int speed, int duplex, 2095 bool tx_pause, bool rx_pause) 2096 { 2097 struct net_device *ndev = to_net_dev(config->dev); 2098 struct axienet_local *lp = netdev_priv(ndev); 2099 u32 emmc_reg, fcc_reg; 2100 2101 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 2102 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 2103 2104 switch (speed) { 2105 case SPEED_1000: 2106 emmc_reg |= XAE_EMMC_LINKSPD_1000; 2107 break; 2108 case SPEED_100: 2109 emmc_reg |= XAE_EMMC_LINKSPD_100; 2110 break; 2111 case SPEED_10: 2112 emmc_reg |= XAE_EMMC_LINKSPD_10; 2113 break; 2114 default: 2115 dev_err(&ndev->dev, 2116 "Speed other than 10, 100 or 1Gbps is not supported\n"); 2117 break; 2118 } 2119 2120 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 2121 2122 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 2123 if (tx_pause) 2124 fcc_reg |= XAE_FCC_FCTX_MASK; 2125 else 2126 fcc_reg &= ~XAE_FCC_FCTX_MASK; 2127 if (rx_pause) 2128 fcc_reg |= XAE_FCC_FCRX_MASK; 2129 else 2130 fcc_reg &= ~XAE_FCC_FCRX_MASK; 2131 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 2132 } 2133 2134 static const struct phylink_mac_ops axienet_phylink_ops = { 2135 .mac_select_pcs = axienet_mac_select_pcs, 2136 .mac_config = axienet_mac_config, 2137 .mac_link_down = axienet_mac_link_down, 2138 .mac_link_up = axienet_mac_link_up, 2139 }; 2140 2141 /** 2142 * axienet_dma_err_handler - Work queue task for Axi DMA Error 2143 * @work: pointer to work_struct 2144 * 2145 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 2146 * Tx/Rx BDs. 2147 */ 2148 static void axienet_dma_err_handler(struct work_struct *work) 2149 { 2150 u32 i; 2151 u32 axienet_status; 2152 struct axidma_bd *cur_p; 2153 struct axienet_local *lp = container_of(work, struct axienet_local, 2154 dma_err_task); 2155 struct net_device *ndev = lp->ndev; 2156 2157 napi_disable(&lp->napi_tx); 2158 napi_disable(&lp->napi_rx); 2159 2160 axienet_setoptions(ndev, lp->options & 2161 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2162 2163 axienet_dma_stop(lp); 2164 2165 for (i = 0; i < lp->tx_bd_num; i++) { 2166 cur_p = &lp->tx_bd_v[i]; 2167 if (cur_p->cntrl) { 2168 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 2169 2170 dma_unmap_single(lp->dev, addr, 2171 (cur_p->cntrl & 2172 XAXIDMA_BD_CTRL_LENGTH_MASK), 2173 DMA_TO_DEVICE); 2174 } 2175 if (cur_p->skb) 2176 dev_kfree_skb_irq(cur_p->skb); 2177 cur_p->phys = 0; 2178 cur_p->phys_msb = 0; 2179 cur_p->cntrl = 0; 2180 cur_p->status = 0; 2181 cur_p->app0 = 0; 2182 cur_p->app1 = 0; 2183 cur_p->app2 = 0; 2184 cur_p->app3 = 0; 2185 cur_p->app4 = 0; 2186 cur_p->skb = NULL; 2187 } 2188 2189 for (i = 0; i < lp->rx_bd_num; i++) { 2190 cur_p = &lp->rx_bd_v[i]; 2191 cur_p->status = 0; 2192 cur_p->app0 = 0; 2193 cur_p->app1 = 0; 2194 cur_p->app2 = 0; 2195 cur_p->app3 = 0; 2196 cur_p->app4 = 0; 2197 } 2198 2199 lp->tx_bd_ci = 0; 2200 lp->tx_bd_tail = 0; 2201 lp->rx_bd_ci = 0; 2202 2203 axienet_dma_start(lp); 2204 2205 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 2206 axienet_status &= ~XAE_RCW1_RX_MASK; 2207 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 2208 2209 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 2210 if (axienet_status & XAE_INT_RXRJECT_MASK) 2211 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 2212 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 2213 XAE_INT_RECV_ERROR_MASK : 0); 2214 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 2215 2216 /* Sync default options with HW but leave receiver and 2217 * transmitter disabled. 2218 */ 2219 axienet_setoptions(ndev, lp->options & 2220 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2221 axienet_set_mac_address(ndev, NULL); 2222 axienet_set_multicast_list(ndev); 2223 napi_enable(&lp->napi_rx); 2224 napi_enable(&lp->napi_tx); 2225 axienet_setoptions(ndev, lp->options); 2226 } 2227 2228 /** 2229 * axienet_probe - Axi Ethernet probe function. 2230 * @pdev: Pointer to platform device structure. 2231 * 2232 * Return: 0, on success 2233 * Non-zero error value on failure. 2234 * 2235 * This is the probe routine for Axi Ethernet driver. This is called before 2236 * any other driver routines are invoked. It allocates and sets up the Ethernet 2237 * device. Parses through device tree and populates fields of 2238 * axienet_local. It registers the Ethernet device. 2239 */ 2240 static int axienet_probe(struct platform_device *pdev) 2241 { 2242 int ret; 2243 struct device_node *np; 2244 struct axienet_local *lp; 2245 struct net_device *ndev; 2246 struct resource *ethres; 2247 u8 mac_addr[ETH_ALEN]; 2248 int addr_width = 32; 2249 u32 value; 2250 2251 ndev = alloc_etherdev(sizeof(*lp)); 2252 if (!ndev) 2253 return -ENOMEM; 2254 2255 platform_set_drvdata(pdev, ndev); 2256 2257 SET_NETDEV_DEV(ndev, &pdev->dev); 2258 ndev->features = NETIF_F_SG; 2259 ndev->ethtool_ops = &axienet_ethtool_ops; 2260 2261 /* MTU range: 64 - 9000 */ 2262 ndev->min_mtu = 64; 2263 ndev->max_mtu = XAE_JUMBO_MTU; 2264 2265 lp = netdev_priv(ndev); 2266 lp->ndev = ndev; 2267 lp->dev = &pdev->dev; 2268 lp->options = XAE_OPTION_DEFAULTS; 2269 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 2270 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 2271 2272 u64_stats_init(&lp->rx_stat_sync); 2273 u64_stats_init(&lp->tx_stat_sync); 2274 2275 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2276 if (!lp->axi_clk) { 2277 /* For backward compatibility, if named AXI clock is not present, 2278 * treat the first clock specified as the AXI clock. 2279 */ 2280 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2281 } 2282 if (IS_ERR(lp->axi_clk)) { 2283 ret = PTR_ERR(lp->axi_clk); 2284 goto free_netdev; 2285 } 2286 ret = clk_prepare_enable(lp->axi_clk); 2287 if (ret) { 2288 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2289 goto free_netdev; 2290 } 2291 2292 lp->misc_clks[0].id = "axis_clk"; 2293 lp->misc_clks[1].id = "ref_clk"; 2294 lp->misc_clks[2].id = "mgt_clk"; 2295 2296 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2297 if (ret) 2298 goto cleanup_clk; 2299 2300 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2301 if (ret) 2302 goto cleanup_clk; 2303 2304 /* Map device registers */ 2305 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 2306 if (IS_ERR(lp->regs)) { 2307 ret = PTR_ERR(lp->regs); 2308 goto cleanup_clk; 2309 } 2310 lp->regs_start = ethres->start; 2311 2312 /* Setup checksum offload, but default to off if not specified */ 2313 lp->features = 0; 2314 2315 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 2316 if (!ret) { 2317 switch (value) { 2318 case 1: 2319 lp->csum_offload_on_tx_path = 2320 XAE_FEATURE_PARTIAL_TX_CSUM; 2321 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 2322 /* Can checksum TCP/UDP over IPv4. */ 2323 ndev->features |= NETIF_F_IP_CSUM; 2324 break; 2325 case 2: 2326 lp->csum_offload_on_tx_path = 2327 XAE_FEATURE_FULL_TX_CSUM; 2328 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 2329 /* Can checksum TCP/UDP over IPv4. */ 2330 ndev->features |= NETIF_F_IP_CSUM; 2331 break; 2332 default: 2333 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 2334 } 2335 } 2336 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 2337 if (!ret) { 2338 switch (value) { 2339 case 1: 2340 lp->csum_offload_on_rx_path = 2341 XAE_FEATURE_PARTIAL_RX_CSUM; 2342 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 2343 break; 2344 case 2: 2345 lp->csum_offload_on_rx_path = 2346 XAE_FEATURE_FULL_RX_CSUM; 2347 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 2348 break; 2349 default: 2350 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 2351 } 2352 } 2353 /* For supporting jumbo frames, the Axi Ethernet hardware must have 2354 * a larger Rx/Tx Memory. Typically, the size must be large so that 2355 * we can enable jumbo option and start supporting jumbo frames. 2356 * Here we check for memory allocated for Rx/Tx in the hardware from 2357 * the device-tree and accordingly set flags. 2358 */ 2359 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 2360 2361 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 2362 "xlnx,switch-x-sgmii"); 2363 2364 /* Start with the proprietary, and broken phy_type */ 2365 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 2366 if (!ret) { 2367 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 2368 switch (value) { 2369 case XAE_PHY_TYPE_MII: 2370 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2371 break; 2372 case XAE_PHY_TYPE_GMII: 2373 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2374 break; 2375 case XAE_PHY_TYPE_RGMII_2_0: 2376 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2377 break; 2378 case XAE_PHY_TYPE_SGMII: 2379 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2380 break; 2381 case XAE_PHY_TYPE_1000BASE_X: 2382 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2383 break; 2384 default: 2385 ret = -EINVAL; 2386 goto cleanup_clk; 2387 } 2388 } else { 2389 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2390 if (ret) 2391 goto cleanup_clk; 2392 } 2393 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2394 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2395 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2396 ret = -EINVAL; 2397 goto cleanup_clk; 2398 } 2399 2400 if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) { 2401 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2402 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2403 2404 if (np) { 2405 struct resource dmares; 2406 2407 ret = of_address_to_resource(np, 0, &dmares); 2408 if (ret) { 2409 dev_err(&pdev->dev, 2410 "unable to get DMA resource\n"); 2411 of_node_put(np); 2412 goto cleanup_clk; 2413 } 2414 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2415 &dmares); 2416 lp->rx_irq = irq_of_parse_and_map(np, 1); 2417 lp->tx_irq = irq_of_parse_and_map(np, 0); 2418 of_node_put(np); 2419 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2420 } else { 2421 /* Check for these resources directly on the Ethernet node. */ 2422 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2423 lp->rx_irq = platform_get_irq(pdev, 1); 2424 lp->tx_irq = platform_get_irq(pdev, 0); 2425 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2426 } 2427 if (IS_ERR(lp->dma_regs)) { 2428 dev_err(&pdev->dev, "could not map DMA regs\n"); 2429 ret = PTR_ERR(lp->dma_regs); 2430 goto cleanup_clk; 2431 } 2432 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2433 dev_err(&pdev->dev, "could not determine irqs\n"); 2434 ret = -ENOMEM; 2435 goto cleanup_clk; 2436 } 2437 2438 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2439 ret = __axienet_device_reset(lp); 2440 if (ret) 2441 goto cleanup_clk; 2442 2443 /* Autodetect the need for 64-bit DMA pointers. 2444 * When the IP is configured for a bus width bigger than 32 bits, 2445 * writing the MSB registers is mandatory, even if they are all 0. 2446 * We can detect this case by writing all 1's to one such register 2447 * and see if that sticks: when the IP is configured for 32 bits 2448 * only, those registers are RES0. 2449 * Those MSB registers were introduced in IP v7.1, which we check first. 2450 */ 2451 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2452 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2453 2454 iowrite32(0x0, desc); 2455 if (ioread32(desc) == 0) { /* sanity check */ 2456 iowrite32(0xffffffff, desc); 2457 if (ioread32(desc) > 0) { 2458 lp->features |= XAE_FEATURE_DMA_64BIT; 2459 addr_width = 64; 2460 dev_info(&pdev->dev, 2461 "autodetected 64-bit DMA range\n"); 2462 } 2463 iowrite32(0x0, desc); 2464 } 2465 } 2466 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2467 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2468 ret = -EINVAL; 2469 goto cleanup_clk; 2470 } 2471 2472 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2473 if (ret) { 2474 dev_err(&pdev->dev, "No suitable DMA available\n"); 2475 goto cleanup_clk; 2476 } 2477 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 2478 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 2479 } else { 2480 struct xilinx_vdma_config cfg; 2481 struct dma_chan *tx_chan; 2482 2483 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2484 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 2485 ret = lp->eth_irq; 2486 goto cleanup_clk; 2487 } 2488 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 2489 if (IS_ERR(tx_chan)) { 2490 ret = PTR_ERR(tx_chan); 2491 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 2492 goto cleanup_clk; 2493 } 2494 2495 cfg.reset = 1; 2496 /* As name says VDMA but it has support for DMA channel reset */ 2497 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); 2498 if (ret < 0) { 2499 dev_err(&pdev->dev, "Reset channel failed\n"); 2500 dma_release_channel(tx_chan); 2501 goto cleanup_clk; 2502 } 2503 2504 dma_release_channel(tx_chan); 2505 lp->use_dmaengine = 1; 2506 } 2507 2508 if (lp->use_dmaengine) 2509 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; 2510 else 2511 ndev->netdev_ops = &axienet_netdev_ops; 2512 /* Check for Ethernet core IRQ (optional) */ 2513 if (lp->eth_irq <= 0) 2514 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2515 2516 /* Retrieve the MAC address */ 2517 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2518 if (!ret) { 2519 axienet_set_mac_address(ndev, mac_addr); 2520 } else { 2521 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2522 ret); 2523 axienet_set_mac_address(ndev, NULL); 2524 } 2525 2526 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2527 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2528 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2529 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2530 2531 ret = axienet_mdio_setup(lp); 2532 if (ret) 2533 dev_warn(&pdev->dev, 2534 "error registering MDIO bus: %d\n", ret); 2535 2536 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2537 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2538 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2539 if (!np) { 2540 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2541 * Falling back to "phy-handle" here is only for 2542 * backward compatibility with old device trees. 2543 */ 2544 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2545 } 2546 if (!np) { 2547 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2548 ret = -EINVAL; 2549 goto cleanup_mdio; 2550 } 2551 lp->pcs_phy = of_mdio_find_device(np); 2552 if (!lp->pcs_phy) { 2553 ret = -EPROBE_DEFER; 2554 of_node_put(np); 2555 goto cleanup_mdio; 2556 } 2557 of_node_put(np); 2558 lp->pcs.ops = &axienet_pcs_ops; 2559 lp->pcs.neg_mode = true; 2560 lp->pcs.poll = true; 2561 } 2562 2563 lp->phylink_config.dev = &ndev->dev; 2564 lp->phylink_config.type = PHYLINK_NETDEV; 2565 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2566 MAC_10FD | MAC_100FD | MAC_1000FD; 2567 2568 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2569 if (lp->switch_x_sgmii) { 2570 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2571 lp->phylink_config.supported_interfaces); 2572 __set_bit(PHY_INTERFACE_MODE_SGMII, 2573 lp->phylink_config.supported_interfaces); 2574 } 2575 2576 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2577 lp->phy_mode, 2578 &axienet_phylink_ops); 2579 if (IS_ERR(lp->phylink)) { 2580 ret = PTR_ERR(lp->phylink); 2581 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2582 goto cleanup_mdio; 2583 } 2584 2585 ret = register_netdev(lp->ndev); 2586 if (ret) { 2587 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2588 goto cleanup_phylink; 2589 } 2590 2591 return 0; 2592 2593 cleanup_phylink: 2594 phylink_destroy(lp->phylink); 2595 2596 cleanup_mdio: 2597 if (lp->pcs_phy) 2598 put_device(&lp->pcs_phy->dev); 2599 if (lp->mii_bus) 2600 axienet_mdio_teardown(lp); 2601 cleanup_clk: 2602 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2603 clk_disable_unprepare(lp->axi_clk); 2604 2605 free_netdev: 2606 free_netdev(ndev); 2607 2608 return ret; 2609 } 2610 2611 static void axienet_remove(struct platform_device *pdev) 2612 { 2613 struct net_device *ndev = platform_get_drvdata(pdev); 2614 struct axienet_local *lp = netdev_priv(ndev); 2615 2616 unregister_netdev(ndev); 2617 2618 if (lp->phylink) 2619 phylink_destroy(lp->phylink); 2620 2621 if (lp->pcs_phy) 2622 put_device(&lp->pcs_phy->dev); 2623 2624 axienet_mdio_teardown(lp); 2625 2626 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2627 clk_disable_unprepare(lp->axi_clk); 2628 2629 free_netdev(ndev); 2630 } 2631 2632 static void axienet_shutdown(struct platform_device *pdev) 2633 { 2634 struct net_device *ndev = platform_get_drvdata(pdev); 2635 2636 rtnl_lock(); 2637 netif_device_detach(ndev); 2638 2639 if (netif_running(ndev)) 2640 dev_close(ndev); 2641 2642 rtnl_unlock(); 2643 } 2644 2645 static int axienet_suspend(struct device *dev) 2646 { 2647 struct net_device *ndev = dev_get_drvdata(dev); 2648 2649 if (!netif_running(ndev)) 2650 return 0; 2651 2652 netif_device_detach(ndev); 2653 2654 rtnl_lock(); 2655 axienet_stop(ndev); 2656 rtnl_unlock(); 2657 2658 return 0; 2659 } 2660 2661 static int axienet_resume(struct device *dev) 2662 { 2663 struct net_device *ndev = dev_get_drvdata(dev); 2664 2665 if (!netif_running(ndev)) 2666 return 0; 2667 2668 rtnl_lock(); 2669 axienet_open(ndev); 2670 rtnl_unlock(); 2671 2672 netif_device_attach(ndev); 2673 2674 return 0; 2675 } 2676 2677 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 2678 axienet_suspend, axienet_resume); 2679 2680 static struct platform_driver axienet_driver = { 2681 .probe = axienet_probe, 2682 .remove_new = axienet_remove, 2683 .shutdown = axienet_shutdown, 2684 .driver = { 2685 .name = "xilinx_axienet", 2686 .pm = &axienet_pm_ops, 2687 .of_match_table = axienet_of_match, 2688 }, 2689 }; 2690 2691 module_platform_driver(axienet_driver); 2692 2693 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2694 MODULE_AUTHOR("Xilinx"); 2695 MODULE_LICENSE("GPL"); 2696