1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 #include <linux/dmaengine.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dma/xilinx_dma.h> 44 #include <linux/circ_buf.h> 45 #include <net/netdev_queues.h> 46 47 #include "xilinx_axienet.h" 48 49 /* Descriptors defines for Tx and Rx DMA */ 50 #define TX_BD_NUM_DEFAULT 128 51 #define RX_BD_NUM_DEFAULT 1024 52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 53 #define TX_BD_NUM_MAX 4096 54 #define RX_BD_NUM_MAX 4096 55 #define DMA_NUM_APP_WORDS 5 56 #define LEN_APP 4 57 #define RX_BUF_NUM_DEFAULT 128 58 59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 60 #define DRIVER_NAME "xaxienet" 61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 62 #define DRIVER_VERSION "1.00a" 63 64 #define AXIENET_REGS_N 40 65 66 static void axienet_rx_submit_desc(struct net_device *ndev); 67 68 /* Match table for of_platform binding */ 69 static const struct of_device_id axienet_of_match[] = { 70 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 71 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 72 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 73 {}, 74 }; 75 76 MODULE_DEVICE_TABLE(of, axienet_of_match); 77 78 /* Option table for setting up Axi Ethernet hardware options */ 79 static struct axienet_option axienet_options[] = { 80 /* Turn on jumbo packet support for both Rx and Tx */ 81 { 82 .opt = XAE_OPTION_JUMBO, 83 .reg = XAE_TC_OFFSET, 84 .m_or = XAE_TC_JUM_MASK, 85 }, { 86 .opt = XAE_OPTION_JUMBO, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_JUM_MASK, 89 }, { /* Turn on VLAN packet support for both Rx and Tx */ 90 .opt = XAE_OPTION_VLAN, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_VLAN_MASK, 93 }, { 94 .opt = XAE_OPTION_VLAN, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_VLAN_MASK, 97 }, { /* Turn on FCS stripping on receive packets */ 98 .opt = XAE_OPTION_FCS_STRIP, 99 .reg = XAE_RCW1_OFFSET, 100 .m_or = XAE_RCW1_FCS_MASK, 101 }, { /* Turn on FCS insertion on transmit packets */ 102 .opt = XAE_OPTION_FCS_INSERT, 103 .reg = XAE_TC_OFFSET, 104 .m_or = XAE_TC_FCS_MASK, 105 }, { /* Turn off length/type field checking on receive packets */ 106 .opt = XAE_OPTION_LENTYPE_ERR, 107 .reg = XAE_RCW1_OFFSET, 108 .m_or = XAE_RCW1_LT_DIS_MASK, 109 }, { /* Turn on Rx flow control */ 110 .opt = XAE_OPTION_FLOW_CONTROL, 111 .reg = XAE_FCC_OFFSET, 112 .m_or = XAE_FCC_FCRX_MASK, 113 }, { /* Turn on Tx flow control */ 114 .opt = XAE_OPTION_FLOW_CONTROL, 115 .reg = XAE_FCC_OFFSET, 116 .m_or = XAE_FCC_FCTX_MASK, 117 }, { /* Turn on promiscuous frame filtering */ 118 .opt = XAE_OPTION_PROMISC, 119 .reg = XAE_FMI_OFFSET, 120 .m_or = XAE_FMI_PM_MASK, 121 }, { /* Enable transmitter */ 122 .opt = XAE_OPTION_TXEN, 123 .reg = XAE_TC_OFFSET, 124 .m_or = XAE_TC_TX_MASK, 125 }, { /* Enable receiver */ 126 .opt = XAE_OPTION_RXEN, 127 .reg = XAE_RCW1_OFFSET, 128 .m_or = XAE_RCW1_RX_MASK, 129 }, 130 {} 131 }; 132 133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) 134 { 135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; 136 } 137 138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) 139 { 140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; 141 } 142 143 /** 144 * axienet_dma_in32 - Memory mapped Axi DMA register read 145 * @lp: Pointer to axienet local structure 146 * @reg: Address offset from the base address of the Axi DMA core 147 * 148 * Return: The contents of the Axi DMA register 149 * 150 * This function returns the contents of the corresponding Axi DMA register. 151 */ 152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 153 { 154 return ioread32(lp->dma_regs + reg); 155 } 156 157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 158 struct axidma_bd *desc) 159 { 160 desc->phys = lower_32_bits(addr); 161 if (lp->features & XAE_FEATURE_DMA_64BIT) 162 desc->phys_msb = upper_32_bits(addr); 163 } 164 165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 166 struct axidma_bd *desc) 167 { 168 dma_addr_t ret = desc->phys; 169 170 if (lp->features & XAE_FEATURE_DMA_64BIT) 171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 172 173 return ret; 174 } 175 176 /** 177 * axienet_dma_bd_release - Release buffer descriptor rings 178 * @ndev: Pointer to the net_device structure 179 * 180 * This function is used to release the descriptors allocated in 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 182 * driver stop api is called. 183 */ 184 static void axienet_dma_bd_release(struct net_device *ndev) 185 { 186 int i; 187 struct axienet_local *lp = netdev_priv(ndev); 188 189 /* If we end up here, tx_bd_v must have been DMA allocated. */ 190 dma_free_coherent(lp->dev, 191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 192 lp->tx_bd_v, 193 lp->tx_bd_p); 194 195 if (!lp->rx_bd_v) 196 return; 197 198 for (i = 0; i < lp->rx_bd_num; i++) { 199 dma_addr_t phys; 200 201 /* A NULL skb means this descriptor has not been initialised 202 * at all. 203 */ 204 if (!lp->rx_bd_v[i].skb) 205 break; 206 207 dev_kfree_skb(lp->rx_bd_v[i].skb); 208 209 /* For each descriptor, we programmed cntrl with the (non-zero) 210 * descriptor size, after it had been successfully allocated. 211 * So a non-zero value in there means we need to unmap it. 212 */ 213 if (lp->rx_bd_v[i].cntrl) { 214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 215 dma_unmap_single(lp->dev, phys, 216 lp->max_frm_size, DMA_FROM_DEVICE); 217 } 218 } 219 220 dma_free_coherent(lp->dev, 221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 222 lp->rx_bd_v, 223 lp->rx_bd_p); 224 } 225 226 /** 227 * axienet_usec_to_timer - Calculate IRQ delay timer value 228 * @lp: Pointer to the axienet_local structure 229 * @coalesce_usec: Microseconds to convert into timer value 230 */ 231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 232 { 233 u32 result; 234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 235 236 if (lp->axi_clk) 237 clk_rate = clk_get_rate(lp->axi_clk); 238 239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 241 (u64)125000000); 242 if (result > 255) 243 result = 255; 244 245 return result; 246 } 247 248 /** 249 * axienet_dma_start - Set up DMA registers and start DMA operation 250 * @lp: Pointer to the axienet_local structure 251 */ 252 static void axienet_dma_start(struct axienet_local *lp) 253 { 254 /* Start updating the Rx channel control register */ 255 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 256 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 257 /* Only set interrupt delay timer if not generating an interrupt on 258 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 259 */ 260 if (lp->coalesce_count_rx > 1) 261 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 262 << XAXIDMA_DELAY_SHIFT) | 263 XAXIDMA_IRQ_DELAY_MASK; 264 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 265 266 /* Start updating the Tx channel control register */ 267 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 268 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 269 /* Only set interrupt delay timer if not generating an interrupt on 270 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 271 */ 272 if (lp->coalesce_count_tx > 1) 273 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 274 << XAXIDMA_DELAY_SHIFT) | 275 XAXIDMA_IRQ_DELAY_MASK; 276 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 277 278 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 279 * halted state. This will make the Rx side ready for reception. 280 */ 281 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 282 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 283 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 284 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 285 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 286 287 /* Write to the RS (Run-stop) bit in the Tx channel control register. 288 * Tx channel is now ready to run. But only after we write to the 289 * tail pointer register that the Tx channel will start transmitting. 290 */ 291 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 292 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 294 } 295 296 /** 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 298 * @ndev: Pointer to the net_device structure 299 * 300 * Return: 0, on success -ENOMEM, on failure 301 * 302 * This function is called to initialize the Rx and Tx DMA descriptor 303 * rings. This initializes the descriptors with required default values 304 * and is called when Axi Ethernet driver reset is called. 305 */ 306 static int axienet_dma_bd_init(struct net_device *ndev) 307 { 308 int i; 309 struct sk_buff *skb; 310 struct axienet_local *lp = netdev_priv(ndev); 311 312 /* Reset the indexes which are used for accessing the BDs */ 313 lp->tx_bd_ci = 0; 314 lp->tx_bd_tail = 0; 315 lp->rx_bd_ci = 0; 316 317 /* Allocate the Tx and Rx buffer descriptors. */ 318 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 319 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 320 &lp->tx_bd_p, GFP_KERNEL); 321 if (!lp->tx_bd_v) 322 return -ENOMEM; 323 324 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 325 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 326 &lp->rx_bd_p, GFP_KERNEL); 327 if (!lp->rx_bd_v) 328 goto out; 329 330 for (i = 0; i < lp->tx_bd_num; i++) { 331 dma_addr_t addr = lp->tx_bd_p + 332 sizeof(*lp->tx_bd_v) * 333 ((i + 1) % lp->tx_bd_num); 334 335 lp->tx_bd_v[i].next = lower_32_bits(addr); 336 if (lp->features & XAE_FEATURE_DMA_64BIT) 337 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 338 } 339 340 for (i = 0; i < lp->rx_bd_num; i++) { 341 dma_addr_t addr; 342 343 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 344 ((i + 1) % lp->rx_bd_num); 345 lp->rx_bd_v[i].next = lower_32_bits(addr); 346 if (lp->features & XAE_FEATURE_DMA_64BIT) 347 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 348 349 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 350 if (!skb) 351 goto out; 352 353 lp->rx_bd_v[i].skb = skb; 354 addr = dma_map_single(lp->dev, skb->data, 355 lp->max_frm_size, DMA_FROM_DEVICE); 356 if (dma_mapping_error(lp->dev, addr)) { 357 netdev_err(ndev, "DMA mapping error\n"); 358 goto out; 359 } 360 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 361 362 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 363 } 364 365 axienet_dma_start(lp); 366 367 return 0; 368 out: 369 axienet_dma_bd_release(ndev); 370 return -ENOMEM; 371 } 372 373 /** 374 * axienet_set_mac_address - Write the MAC address 375 * @ndev: Pointer to the net_device structure 376 * @address: 6 byte Address to be written as MAC address 377 * 378 * This function is called to initialize the MAC address of the Axi Ethernet 379 * core. It writes to the UAW0 and UAW1 registers of the core. 380 */ 381 static void axienet_set_mac_address(struct net_device *ndev, 382 const void *address) 383 { 384 struct axienet_local *lp = netdev_priv(ndev); 385 386 if (address) 387 eth_hw_addr_set(ndev, address); 388 if (!is_valid_ether_addr(ndev->dev_addr)) 389 eth_hw_addr_random(ndev); 390 391 /* Set up unicast MAC address filter set its mac address */ 392 axienet_iow(lp, XAE_UAW0_OFFSET, 393 (ndev->dev_addr[0]) | 394 (ndev->dev_addr[1] << 8) | 395 (ndev->dev_addr[2] << 16) | 396 (ndev->dev_addr[3] << 24)); 397 axienet_iow(lp, XAE_UAW1_OFFSET, 398 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 399 ~XAE_UAW1_UNICASTADDR_MASK) | 400 (ndev->dev_addr[4] | 401 (ndev->dev_addr[5] << 8)))); 402 } 403 404 /** 405 * netdev_set_mac_address - Write the MAC address (from outside the driver) 406 * @ndev: Pointer to the net_device structure 407 * @p: 6 byte Address to be written as MAC address 408 * 409 * Return: 0 for all conditions. Presently, there is no failure case. 410 * 411 * This function is called to initialize the MAC address of the Axi Ethernet 412 * core. It calls the core specific axienet_set_mac_address. This is the 413 * function that goes into net_device_ops structure entry ndo_set_mac_address. 414 */ 415 static int netdev_set_mac_address(struct net_device *ndev, void *p) 416 { 417 struct sockaddr *addr = p; 418 axienet_set_mac_address(ndev, addr->sa_data); 419 return 0; 420 } 421 422 /** 423 * axienet_set_multicast_list - Prepare the multicast table 424 * @ndev: Pointer to the net_device structure 425 * 426 * This function is called to initialize the multicast table during 427 * initialization. The Axi Ethernet basic multicast support has a four-entry 428 * multicast table which is initialized here. Additionally this function 429 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 430 * means whenever the multicast table entries need to be updated this 431 * function gets called. 432 */ 433 static void axienet_set_multicast_list(struct net_device *ndev) 434 { 435 int i; 436 u32 reg, af0reg, af1reg; 437 struct axienet_local *lp = netdev_priv(ndev); 438 439 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 440 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 441 /* We must make the kernel realize we had to move into 442 * promiscuous mode. If it was a promiscuous mode request 443 * the flag is already set. If not we set it. 444 */ 445 ndev->flags |= IFF_PROMISC; 446 reg = axienet_ior(lp, XAE_FMI_OFFSET); 447 reg |= XAE_FMI_PM_MASK; 448 axienet_iow(lp, XAE_FMI_OFFSET, reg); 449 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 450 } else if (!netdev_mc_empty(ndev)) { 451 struct netdev_hw_addr *ha; 452 453 i = 0; 454 netdev_for_each_mc_addr(ha, ndev) { 455 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 456 break; 457 458 af0reg = (ha->addr[0]); 459 af0reg |= (ha->addr[1] << 8); 460 af0reg |= (ha->addr[2] << 16); 461 af0reg |= (ha->addr[3] << 24); 462 463 af1reg = (ha->addr[4]); 464 af1reg |= (ha->addr[5] << 8); 465 466 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 467 reg |= i; 468 469 axienet_iow(lp, XAE_FMI_OFFSET, reg); 470 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 471 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 472 i++; 473 } 474 } else { 475 reg = axienet_ior(lp, XAE_FMI_OFFSET); 476 reg &= ~XAE_FMI_PM_MASK; 477 478 axienet_iow(lp, XAE_FMI_OFFSET, reg); 479 480 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 481 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 482 reg |= i; 483 484 axienet_iow(lp, XAE_FMI_OFFSET, reg); 485 axienet_iow(lp, XAE_AF0_OFFSET, 0); 486 axienet_iow(lp, XAE_AF1_OFFSET, 0); 487 } 488 489 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 490 } 491 } 492 493 /** 494 * axienet_setoptions - Set an Axi Ethernet option 495 * @ndev: Pointer to the net_device structure 496 * @options: Option to be enabled/disabled 497 * 498 * The Axi Ethernet core has multiple features which can be selectively turned 499 * on or off. The typical options could be jumbo frame option, basic VLAN 500 * option, promiscuous mode option etc. This function is used to set or clear 501 * these options in the Axi Ethernet hardware. This is done through 502 * axienet_option structure . 503 */ 504 static void axienet_setoptions(struct net_device *ndev, u32 options) 505 { 506 int reg; 507 struct axienet_local *lp = netdev_priv(ndev); 508 struct axienet_option *tp = &axienet_options[0]; 509 510 while (tp->opt) { 511 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 512 if (options & tp->opt) 513 reg |= tp->m_or; 514 axienet_iow(lp, tp->reg, reg); 515 tp++; 516 } 517 518 lp->options |= options; 519 } 520 521 static int __axienet_device_reset(struct axienet_local *lp) 522 { 523 u32 value; 524 int ret; 525 526 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 527 * process of Axi DMA takes a while to complete as all pending 528 * commands/transfers will be flushed or completed during this 529 * reset process. 530 * Note that even though both TX and RX have their own reset register, 531 * they both reset the entire DMA core, so only one needs to be used. 532 */ 533 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 534 ret = read_poll_timeout(axienet_dma_in32, value, 535 !(value & XAXIDMA_CR_RESET_MASK), 536 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 537 XAXIDMA_TX_CR_OFFSET); 538 if (ret) { 539 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 540 return ret; 541 } 542 543 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 544 ret = read_poll_timeout(axienet_ior, value, 545 value & XAE_INT_PHYRSTCMPLT_MASK, 546 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 547 XAE_IS_OFFSET); 548 if (ret) { 549 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 550 return ret; 551 } 552 553 return 0; 554 } 555 556 /** 557 * axienet_dma_stop - Stop DMA operation 558 * @lp: Pointer to the axienet_local structure 559 */ 560 static void axienet_dma_stop(struct axienet_local *lp) 561 { 562 int count; 563 u32 cr, sr; 564 565 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 566 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 567 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 568 synchronize_irq(lp->rx_irq); 569 570 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 571 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 572 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 573 synchronize_irq(lp->tx_irq); 574 575 /* Give DMAs a chance to halt gracefully */ 576 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 577 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 578 msleep(20); 579 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 580 } 581 582 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 583 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 584 msleep(20); 585 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 586 } 587 588 /* Do a reset to ensure DMA is really stopped */ 589 axienet_lock_mii(lp); 590 __axienet_device_reset(lp); 591 axienet_unlock_mii(lp); 592 } 593 594 /** 595 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 596 * @ndev: Pointer to the net_device structure 597 * 598 * This function is called to reset and initialize the Axi Ethernet core. This 599 * is typically called during initialization. It does a reset of the Axi DMA 600 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 601 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 602 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 603 * core. 604 * Returns 0 on success or a negative error number otherwise. 605 */ 606 static int axienet_device_reset(struct net_device *ndev) 607 { 608 u32 axienet_status; 609 struct axienet_local *lp = netdev_priv(ndev); 610 int ret; 611 612 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 613 lp->options |= XAE_OPTION_VLAN; 614 lp->options &= (~XAE_OPTION_JUMBO); 615 616 if ((ndev->mtu > XAE_MTU) && 617 (ndev->mtu <= XAE_JUMBO_MTU)) { 618 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 619 XAE_TRL_SIZE; 620 621 if (lp->max_frm_size <= lp->rxmem) 622 lp->options |= XAE_OPTION_JUMBO; 623 } 624 625 if (!lp->use_dmaengine) { 626 ret = __axienet_device_reset(lp); 627 if (ret) 628 return ret; 629 630 ret = axienet_dma_bd_init(ndev); 631 if (ret) { 632 netdev_err(ndev, "%s: descriptor allocation failed\n", 633 __func__); 634 return ret; 635 } 636 } 637 638 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 639 axienet_status &= ~XAE_RCW1_RX_MASK; 640 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 641 642 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 643 if (axienet_status & XAE_INT_RXRJECT_MASK) 644 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 645 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 646 XAE_INT_RECV_ERROR_MASK : 0); 647 648 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 649 650 /* Sync default options with HW but leave receiver and 651 * transmitter disabled. 652 */ 653 axienet_setoptions(ndev, lp->options & 654 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 655 axienet_set_mac_address(ndev, NULL); 656 axienet_set_multicast_list(ndev); 657 axienet_setoptions(ndev, lp->options); 658 659 netif_trans_update(ndev); 660 661 return 0; 662 } 663 664 /** 665 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 666 * @lp: Pointer to the axienet_local structure 667 * @first_bd: Index of first descriptor to clean up 668 * @nr_bds: Max number of descriptors to clean up 669 * @force: Whether to clean descriptors even if not complete 670 * @sizep: Pointer to a u32 filled with the total sum of all bytes 671 * in all cleaned-up descriptors. Ignored if NULL. 672 * @budget: NAPI budget (use 0 when not called from NAPI poll) 673 * 674 * Would either be called after a successful transmit operation, or after 675 * there was an error when setting up the chain. 676 * Returns the number of descriptors handled. 677 */ 678 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 679 int nr_bds, bool force, u32 *sizep, int budget) 680 { 681 struct axidma_bd *cur_p; 682 unsigned int status; 683 dma_addr_t phys; 684 int i; 685 686 for (i = 0; i < nr_bds; i++) { 687 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 688 status = cur_p->status; 689 690 /* If force is not specified, clean up only descriptors 691 * that have been completed by the MAC. 692 */ 693 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 694 break; 695 696 /* Ensure we see complete descriptor update */ 697 dma_rmb(); 698 phys = desc_get_phys_addr(lp, cur_p); 699 dma_unmap_single(lp->dev, phys, 700 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 701 DMA_TO_DEVICE); 702 703 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) 704 napi_consume_skb(cur_p->skb, budget); 705 706 cur_p->app0 = 0; 707 cur_p->app1 = 0; 708 cur_p->app2 = 0; 709 cur_p->app4 = 0; 710 cur_p->skb = NULL; 711 /* ensure our transmit path and device don't prematurely see status cleared */ 712 wmb(); 713 cur_p->cntrl = 0; 714 cur_p->status = 0; 715 716 if (sizep) 717 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 718 } 719 720 return i; 721 } 722 723 /** 724 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 725 * @lp: Pointer to the axienet_local structure 726 * @num_frag: The number of BDs to check for 727 * 728 * Return: 0, on success 729 * NETDEV_TX_BUSY, if any of the descriptors are not free 730 * 731 * This function is invoked before BDs are allocated and transmission starts. 732 * This function returns 0 if a BD or group of BDs can be allocated for 733 * transmission. If the BD or any of the BDs are not free the function 734 * returns a busy status. 735 */ 736 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 737 int num_frag) 738 { 739 struct axidma_bd *cur_p; 740 741 /* Ensure we see all descriptor updates from device or TX polling */ 742 rmb(); 743 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 744 lp->tx_bd_num]; 745 if (cur_p->cntrl) 746 return NETDEV_TX_BUSY; 747 return 0; 748 } 749 750 /** 751 * axienet_dma_tx_cb - DMA engine callback for TX channel. 752 * @data: Pointer to the axienet_local structure. 753 * @result: error reporting through dmaengine_result. 754 * This function is called by dmaengine driver for TX channel to notify 755 * that the transmit is done. 756 */ 757 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) 758 { 759 struct skbuf_dma_descriptor *skbuf_dma; 760 struct axienet_local *lp = data; 761 struct netdev_queue *txq; 762 int len; 763 764 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); 765 len = skbuf_dma->skb->len; 766 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); 767 u64_stats_update_begin(&lp->tx_stat_sync); 768 u64_stats_add(&lp->tx_bytes, len); 769 u64_stats_add(&lp->tx_packets, 1); 770 u64_stats_update_end(&lp->tx_stat_sync); 771 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); 772 dev_consume_skb_any(skbuf_dma->skb); 773 netif_txq_completed_wake(txq, 1, len, 774 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 775 2 * MAX_SKB_FRAGS); 776 } 777 778 /** 779 * axienet_start_xmit_dmaengine - Starts the transmission. 780 * @skb: sk_buff pointer that contains data to be Txed. 781 * @ndev: Pointer to net_device structure. 782 * 783 * Return: NETDEV_TX_OK on success or any non space errors. 784 * NETDEV_TX_BUSY when free element in TX skb ring buffer 785 * is not available. 786 * 787 * This function is invoked to initiate transmission. The 788 * function sets the skbs, register dma callback API and submit 789 * the dma transaction. 790 * Additionally if checksum offloading is supported, 791 * it populates AXI Stream Control fields with appropriate values. 792 */ 793 static netdev_tx_t 794 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) 795 { 796 struct dma_async_tx_descriptor *dma_tx_desc = NULL; 797 struct axienet_local *lp = netdev_priv(ndev); 798 u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; 799 struct skbuf_dma_descriptor *skbuf_dma; 800 struct dma_device *dma_dev; 801 struct netdev_queue *txq; 802 u32 csum_start_off; 803 u32 csum_index_off; 804 int sg_len; 805 int ret; 806 807 dma_dev = lp->tx_chan->device; 808 sg_len = skb_shinfo(skb)->nr_frags + 1; 809 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { 810 netif_stop_queue(ndev); 811 if (net_ratelimit()) 812 netdev_warn(ndev, "TX ring unexpectedly full\n"); 813 return NETDEV_TX_BUSY; 814 } 815 816 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); 817 if (!skbuf_dma) 818 goto xmit_error_drop_skb; 819 820 lp->tx_ring_head++; 821 sg_init_table(skbuf_dma->sgl, sg_len); 822 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); 823 if (ret < 0) 824 goto xmit_error_drop_skb; 825 826 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 827 if (!ret) 828 goto xmit_error_drop_skb; 829 830 /* Fill up app fields for checksum */ 831 if (skb->ip_summed == CHECKSUM_PARTIAL) { 832 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 833 /* Tx Full Checksum Offload Enabled */ 834 app_metadata[0] |= 2; 835 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 836 csum_start_off = skb_transport_offset(skb); 837 csum_index_off = csum_start_off + skb->csum_offset; 838 /* Tx Partial Checksum Offload Enabled */ 839 app_metadata[0] |= 1; 840 app_metadata[1] = (csum_start_off << 16) | csum_index_off; 841 } 842 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 843 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ 844 } 845 846 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, 847 sg_len, DMA_MEM_TO_DEV, 848 DMA_PREP_INTERRUPT, (void *)app_metadata); 849 if (!dma_tx_desc) 850 goto xmit_error_unmap_sg; 851 852 skbuf_dma->skb = skb; 853 skbuf_dma->sg_len = sg_len; 854 dma_tx_desc->callback_param = lp; 855 dma_tx_desc->callback_result = axienet_dma_tx_cb; 856 dmaengine_submit(dma_tx_desc); 857 dma_async_issue_pending(lp->tx_chan); 858 txq = skb_get_tx_queue(lp->ndev, skb); 859 netdev_tx_sent_queue(txq, skb->len); 860 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 861 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); 862 863 return NETDEV_TX_OK; 864 865 xmit_error_unmap_sg: 866 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 867 xmit_error_drop_skb: 868 dev_kfree_skb_any(skb); 869 return NETDEV_TX_OK; 870 } 871 872 /** 873 * axienet_tx_poll - Invoked once a transmit is completed by the 874 * Axi DMA Tx channel. 875 * @napi: Pointer to NAPI structure. 876 * @budget: Max number of TX packets to process. 877 * 878 * Return: Number of TX packets processed. 879 * 880 * This function is invoked from the NAPI processing to notify the completion 881 * of transmit operation. It clears fields in the corresponding Tx BDs and 882 * unmaps the corresponding buffer so that CPU can regain ownership of the 883 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 884 * required. 885 */ 886 static int axienet_tx_poll(struct napi_struct *napi, int budget) 887 { 888 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 889 struct net_device *ndev = lp->ndev; 890 u32 size = 0; 891 int packets; 892 893 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget); 894 895 if (packets) { 896 lp->tx_bd_ci += packets; 897 if (lp->tx_bd_ci >= lp->tx_bd_num) 898 lp->tx_bd_ci %= lp->tx_bd_num; 899 900 u64_stats_update_begin(&lp->tx_stat_sync); 901 u64_stats_add(&lp->tx_packets, packets); 902 u64_stats_add(&lp->tx_bytes, size); 903 u64_stats_update_end(&lp->tx_stat_sync); 904 905 /* Matches barrier in axienet_start_xmit */ 906 smp_mb(); 907 908 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 909 netif_wake_queue(ndev); 910 } 911 912 if (packets < budget && napi_complete_done(napi, packets)) { 913 /* Re-enable TX completion interrupts. This should 914 * cause an immediate interrupt if any TX packets are 915 * already pending. 916 */ 917 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 918 } 919 return packets; 920 } 921 922 /** 923 * axienet_start_xmit - Starts the transmission. 924 * @skb: sk_buff pointer that contains data to be Txed. 925 * @ndev: Pointer to net_device structure. 926 * 927 * Return: NETDEV_TX_OK, on success 928 * NETDEV_TX_BUSY, if any of the descriptors are not free 929 * 930 * This function is invoked from upper layers to initiate transmission. The 931 * function uses the next available free BDs and populates their fields to 932 * start the transmission. Additionally if checksum offloading is supported, 933 * it populates AXI Stream Control fields with appropriate values. 934 */ 935 static netdev_tx_t 936 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 937 { 938 u32 ii; 939 u32 num_frag; 940 u32 csum_start_off; 941 u32 csum_index_off; 942 skb_frag_t *frag; 943 dma_addr_t tail_p, phys; 944 u32 orig_tail_ptr, new_tail_ptr; 945 struct axienet_local *lp = netdev_priv(ndev); 946 struct axidma_bd *cur_p; 947 948 orig_tail_ptr = lp->tx_bd_tail; 949 new_tail_ptr = orig_tail_ptr; 950 951 num_frag = skb_shinfo(skb)->nr_frags; 952 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 953 954 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 955 /* Should not happen as last start_xmit call should have 956 * checked for sufficient space and queue should only be 957 * woken when sufficient space is available. 958 */ 959 netif_stop_queue(ndev); 960 if (net_ratelimit()) 961 netdev_warn(ndev, "TX ring unexpectedly full\n"); 962 return NETDEV_TX_BUSY; 963 } 964 965 if (skb->ip_summed == CHECKSUM_PARTIAL) { 966 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 967 /* Tx Full Checksum Offload Enabled */ 968 cur_p->app0 |= 2; 969 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 970 csum_start_off = skb_transport_offset(skb); 971 csum_index_off = csum_start_off + skb->csum_offset; 972 /* Tx Partial Checksum Offload Enabled */ 973 cur_p->app0 |= 1; 974 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 975 } 976 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 977 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 978 } 979 980 phys = dma_map_single(lp->dev, skb->data, 981 skb_headlen(skb), DMA_TO_DEVICE); 982 if (unlikely(dma_mapping_error(lp->dev, phys))) { 983 if (net_ratelimit()) 984 netdev_err(ndev, "TX DMA mapping error\n"); 985 ndev->stats.tx_dropped++; 986 return NETDEV_TX_OK; 987 } 988 desc_set_phys_addr(lp, phys, cur_p); 989 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 990 991 for (ii = 0; ii < num_frag; ii++) { 992 if (++new_tail_ptr >= lp->tx_bd_num) 993 new_tail_ptr = 0; 994 cur_p = &lp->tx_bd_v[new_tail_ptr]; 995 frag = &skb_shinfo(skb)->frags[ii]; 996 phys = dma_map_single(lp->dev, 997 skb_frag_address(frag), 998 skb_frag_size(frag), 999 DMA_TO_DEVICE); 1000 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1001 if (net_ratelimit()) 1002 netdev_err(ndev, "TX DMA mapping error\n"); 1003 ndev->stats.tx_dropped++; 1004 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 1005 true, NULL, 0); 1006 return NETDEV_TX_OK; 1007 } 1008 desc_set_phys_addr(lp, phys, cur_p); 1009 cur_p->cntrl = skb_frag_size(frag); 1010 } 1011 1012 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 1013 cur_p->skb = skb; 1014 1015 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 1016 if (++new_tail_ptr >= lp->tx_bd_num) 1017 new_tail_ptr = 0; 1018 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 1019 1020 /* Start the transfer */ 1021 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 1022 1023 /* Stop queue if next transmit may not have space */ 1024 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 1025 netif_stop_queue(ndev); 1026 1027 /* Matches barrier in axienet_tx_poll */ 1028 smp_mb(); 1029 1030 /* Space might have just been freed - check again */ 1031 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1032 netif_wake_queue(ndev); 1033 } 1034 1035 return NETDEV_TX_OK; 1036 } 1037 1038 /** 1039 * axienet_dma_rx_cb - DMA engine callback for RX channel. 1040 * @data: Pointer to the skbuf_dma_descriptor structure. 1041 * @result: error reporting through dmaengine_result. 1042 * This function is called by dmaengine driver for RX channel to notify 1043 * that the packet is received. 1044 */ 1045 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) 1046 { 1047 struct skbuf_dma_descriptor *skbuf_dma; 1048 size_t meta_len, meta_max_len, rx_len; 1049 struct axienet_local *lp = data; 1050 struct sk_buff *skb; 1051 u32 *app_metadata; 1052 1053 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1054 skb = skbuf_dma->skb; 1055 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, 1056 &meta_max_len); 1057 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, 1058 DMA_FROM_DEVICE); 1059 /* TODO: Derive app word index programmatically */ 1060 rx_len = (app_metadata[LEN_APP] & 0xFFFF); 1061 skb_put(skb, rx_len); 1062 skb->protocol = eth_type_trans(skb, lp->ndev); 1063 skb->ip_summed = CHECKSUM_NONE; 1064 1065 __netif_rx(skb); 1066 u64_stats_update_begin(&lp->rx_stat_sync); 1067 u64_stats_add(&lp->rx_packets, 1); 1068 u64_stats_add(&lp->rx_bytes, rx_len); 1069 u64_stats_update_end(&lp->rx_stat_sync); 1070 axienet_rx_submit_desc(lp->ndev); 1071 dma_async_issue_pending(lp->rx_chan); 1072 } 1073 1074 /** 1075 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 1076 * @napi: Pointer to NAPI structure. 1077 * @budget: Max number of RX packets to process. 1078 * 1079 * Return: Number of RX packets processed. 1080 */ 1081 static int axienet_rx_poll(struct napi_struct *napi, int budget) 1082 { 1083 u32 length; 1084 u32 csumstatus; 1085 u32 size = 0; 1086 int packets = 0; 1087 dma_addr_t tail_p = 0; 1088 struct axidma_bd *cur_p; 1089 struct sk_buff *skb, *new_skb; 1090 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 1091 1092 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1093 1094 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 1095 dma_addr_t phys; 1096 1097 /* Ensure we see complete descriptor update */ 1098 dma_rmb(); 1099 1100 skb = cur_p->skb; 1101 cur_p->skb = NULL; 1102 1103 /* skb could be NULL if a previous pass already received the 1104 * packet for this slot in the ring, but failed to refill it 1105 * with a newly allocated buffer. In this case, don't try to 1106 * receive it again. 1107 */ 1108 if (likely(skb)) { 1109 length = cur_p->app4 & 0x0000FFFF; 1110 1111 phys = desc_get_phys_addr(lp, cur_p); 1112 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 1113 DMA_FROM_DEVICE); 1114 1115 skb_put(skb, length); 1116 skb->protocol = eth_type_trans(skb, lp->ndev); 1117 /*skb_checksum_none_assert(skb);*/ 1118 skb->ip_summed = CHECKSUM_NONE; 1119 1120 /* if we're doing Rx csum offload, set it up */ 1121 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 1122 csumstatus = (cur_p->app2 & 1123 XAE_FULL_CSUM_STATUS_MASK) >> 3; 1124 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 1125 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 1126 skb->ip_summed = CHECKSUM_UNNECESSARY; 1127 } 1128 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 1129 skb->protocol == htons(ETH_P_IP) && 1130 skb->len > 64) { 1131 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 1132 skb->ip_summed = CHECKSUM_COMPLETE; 1133 } 1134 1135 napi_gro_receive(napi, skb); 1136 1137 size += length; 1138 packets++; 1139 } 1140 1141 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 1142 if (!new_skb) 1143 break; 1144 1145 phys = dma_map_single(lp->dev, new_skb->data, 1146 lp->max_frm_size, 1147 DMA_FROM_DEVICE); 1148 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1149 if (net_ratelimit()) 1150 netdev_err(lp->ndev, "RX DMA mapping error\n"); 1151 dev_kfree_skb(new_skb); 1152 break; 1153 } 1154 desc_set_phys_addr(lp, phys, cur_p); 1155 1156 cur_p->cntrl = lp->max_frm_size; 1157 cur_p->status = 0; 1158 cur_p->skb = new_skb; 1159 1160 /* Only update tail_p to mark this slot as usable after it has 1161 * been successfully refilled. 1162 */ 1163 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1164 1165 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1166 lp->rx_bd_ci = 0; 1167 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1168 } 1169 1170 u64_stats_update_begin(&lp->rx_stat_sync); 1171 u64_stats_add(&lp->rx_packets, packets); 1172 u64_stats_add(&lp->rx_bytes, size); 1173 u64_stats_update_end(&lp->rx_stat_sync); 1174 1175 if (tail_p) 1176 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1177 1178 if (packets < budget && napi_complete_done(napi, packets)) { 1179 /* Re-enable RX completion interrupts. This should 1180 * cause an immediate interrupt if any RX packets are 1181 * already pending. 1182 */ 1183 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1184 } 1185 return packets; 1186 } 1187 1188 /** 1189 * axienet_tx_irq - Tx Done Isr. 1190 * @irq: irq number 1191 * @_ndev: net_device pointer 1192 * 1193 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1194 * 1195 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1196 * TX BD processing. 1197 */ 1198 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1199 { 1200 unsigned int status; 1201 struct net_device *ndev = _ndev; 1202 struct axienet_local *lp = netdev_priv(ndev); 1203 1204 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1205 1206 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1207 return IRQ_NONE; 1208 1209 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1210 1211 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1212 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1213 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1214 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1215 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1216 schedule_work(&lp->dma_err_task); 1217 } else { 1218 /* Disable further TX completion interrupts and schedule 1219 * NAPI to handle the completions. 1220 */ 1221 u32 cr = lp->tx_dma_cr; 1222 1223 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1224 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1225 1226 napi_schedule(&lp->napi_tx); 1227 } 1228 1229 return IRQ_HANDLED; 1230 } 1231 1232 /** 1233 * axienet_rx_irq - Rx Isr. 1234 * @irq: irq number 1235 * @_ndev: net_device pointer 1236 * 1237 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1238 * 1239 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1240 * processing. 1241 */ 1242 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1243 { 1244 unsigned int status; 1245 struct net_device *ndev = _ndev; 1246 struct axienet_local *lp = netdev_priv(ndev); 1247 1248 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1249 1250 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1251 return IRQ_NONE; 1252 1253 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1254 1255 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1256 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1257 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1258 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1259 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1260 schedule_work(&lp->dma_err_task); 1261 } else { 1262 /* Disable further RX completion interrupts and schedule 1263 * NAPI receive. 1264 */ 1265 u32 cr = lp->rx_dma_cr; 1266 1267 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1268 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1269 1270 napi_schedule(&lp->napi_rx); 1271 } 1272 1273 return IRQ_HANDLED; 1274 } 1275 1276 /** 1277 * axienet_eth_irq - Ethernet core Isr. 1278 * @irq: irq number 1279 * @_ndev: net_device pointer 1280 * 1281 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1282 * 1283 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1284 */ 1285 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1286 { 1287 struct net_device *ndev = _ndev; 1288 struct axienet_local *lp = netdev_priv(ndev); 1289 unsigned int pending; 1290 1291 pending = axienet_ior(lp, XAE_IP_OFFSET); 1292 if (!pending) 1293 return IRQ_NONE; 1294 1295 if (pending & XAE_INT_RXFIFOOVR_MASK) 1296 ndev->stats.rx_missed_errors++; 1297 1298 if (pending & XAE_INT_RXRJECT_MASK) 1299 ndev->stats.rx_frame_errors++; 1300 1301 axienet_iow(lp, XAE_IS_OFFSET, pending); 1302 return IRQ_HANDLED; 1303 } 1304 1305 static void axienet_dma_err_handler(struct work_struct *work); 1306 1307 /** 1308 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. 1309 * allocate skbuff, map the scatterlist and obtain a descriptor 1310 * and then add the callback information and submit descriptor. 1311 * 1312 * @ndev: net_device pointer 1313 * 1314 */ 1315 static void axienet_rx_submit_desc(struct net_device *ndev) 1316 { 1317 struct dma_async_tx_descriptor *dma_rx_desc = NULL; 1318 struct axienet_local *lp = netdev_priv(ndev); 1319 struct skbuf_dma_descriptor *skbuf_dma; 1320 struct sk_buff *skb; 1321 dma_addr_t addr; 1322 1323 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); 1324 if (!skbuf_dma) 1325 return; 1326 1327 lp->rx_ring_head++; 1328 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1329 if (!skb) 1330 return; 1331 1332 sg_init_table(skbuf_dma->sgl, 1); 1333 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); 1334 if (unlikely(dma_mapping_error(lp->dev, addr))) { 1335 if (net_ratelimit()) 1336 netdev_err(ndev, "DMA mapping error\n"); 1337 goto rx_submit_err_free_skb; 1338 } 1339 sg_dma_address(skbuf_dma->sgl) = addr; 1340 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; 1341 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, 1342 1, DMA_DEV_TO_MEM, 1343 DMA_PREP_INTERRUPT); 1344 if (!dma_rx_desc) 1345 goto rx_submit_err_unmap_skb; 1346 1347 skbuf_dma->skb = skb; 1348 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); 1349 skbuf_dma->desc = dma_rx_desc; 1350 dma_rx_desc->callback_param = lp; 1351 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1352 dmaengine_submit(dma_rx_desc); 1353 1354 return; 1355 1356 rx_submit_err_unmap_skb: 1357 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); 1358 rx_submit_err_free_skb: 1359 dev_kfree_skb(skb); 1360 } 1361 1362 /** 1363 * axienet_init_dmaengine - init the dmaengine code. 1364 * @ndev: Pointer to net_device structure 1365 * 1366 * Return: 0, on success. 1367 * non-zero error value on failure 1368 * 1369 * This is the dmaengine initialization code. 1370 */ 1371 static int axienet_init_dmaengine(struct net_device *ndev) 1372 { 1373 struct axienet_local *lp = netdev_priv(ndev); 1374 struct skbuf_dma_descriptor *skbuf_dma; 1375 int i, ret; 1376 1377 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 1378 if (IS_ERR(lp->tx_chan)) { 1379 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); 1380 return PTR_ERR(lp->tx_chan); 1381 } 1382 1383 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); 1384 if (IS_ERR(lp->rx_chan)) { 1385 ret = PTR_ERR(lp->rx_chan); 1386 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); 1387 goto err_dma_release_tx; 1388 } 1389 1390 lp->tx_ring_tail = 0; 1391 lp->tx_ring_head = 0; 1392 lp->rx_ring_tail = 0; 1393 lp->rx_ring_head = 0; 1394 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), 1395 GFP_KERNEL); 1396 if (!lp->tx_skb_ring) { 1397 ret = -ENOMEM; 1398 goto err_dma_release_rx; 1399 } 1400 for (i = 0; i < TX_BD_NUM_MAX; i++) { 1401 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1402 if (!skbuf_dma) { 1403 ret = -ENOMEM; 1404 goto err_free_tx_skb_ring; 1405 } 1406 lp->tx_skb_ring[i] = skbuf_dma; 1407 } 1408 1409 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), 1410 GFP_KERNEL); 1411 if (!lp->rx_skb_ring) { 1412 ret = -ENOMEM; 1413 goto err_free_tx_skb_ring; 1414 } 1415 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { 1416 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1417 if (!skbuf_dma) { 1418 ret = -ENOMEM; 1419 goto err_free_rx_skb_ring; 1420 } 1421 lp->rx_skb_ring[i] = skbuf_dma; 1422 } 1423 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ 1424 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1425 axienet_rx_submit_desc(ndev); 1426 dma_async_issue_pending(lp->rx_chan); 1427 1428 return 0; 1429 1430 err_free_rx_skb_ring: 1431 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1432 kfree(lp->rx_skb_ring[i]); 1433 kfree(lp->rx_skb_ring); 1434 err_free_tx_skb_ring: 1435 for (i = 0; i < TX_BD_NUM_MAX; i++) 1436 kfree(lp->tx_skb_ring[i]); 1437 kfree(lp->tx_skb_ring); 1438 err_dma_release_rx: 1439 dma_release_channel(lp->rx_chan); 1440 err_dma_release_tx: 1441 dma_release_channel(lp->tx_chan); 1442 return ret; 1443 } 1444 1445 /** 1446 * axienet_init_legacy_dma - init the dma legacy code. 1447 * @ndev: Pointer to net_device structure 1448 * 1449 * Return: 0, on success. 1450 * non-zero error value on failure 1451 * 1452 * This is the dma initialization code. It also allocates interrupt 1453 * service routines, enables the interrupt lines and ISR handling. 1454 * 1455 */ 1456 static int axienet_init_legacy_dma(struct net_device *ndev) 1457 { 1458 int ret; 1459 struct axienet_local *lp = netdev_priv(ndev); 1460 1461 /* Enable worker thread for Axi DMA error handling */ 1462 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1463 1464 napi_enable(&lp->napi_rx); 1465 napi_enable(&lp->napi_tx); 1466 1467 /* Enable interrupts for Axi DMA Tx */ 1468 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1469 ndev->name, ndev); 1470 if (ret) 1471 goto err_tx_irq; 1472 /* Enable interrupts for Axi DMA Rx */ 1473 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1474 ndev->name, ndev); 1475 if (ret) 1476 goto err_rx_irq; 1477 /* Enable interrupts for Axi Ethernet core (if defined) */ 1478 if (lp->eth_irq > 0) { 1479 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1480 ndev->name, ndev); 1481 if (ret) 1482 goto err_eth_irq; 1483 } 1484 1485 return 0; 1486 1487 err_eth_irq: 1488 free_irq(lp->rx_irq, ndev); 1489 err_rx_irq: 1490 free_irq(lp->tx_irq, ndev); 1491 err_tx_irq: 1492 napi_disable(&lp->napi_tx); 1493 napi_disable(&lp->napi_rx); 1494 cancel_work_sync(&lp->dma_err_task); 1495 dev_err(lp->dev, "request_irq() failed\n"); 1496 return ret; 1497 } 1498 1499 /** 1500 * axienet_open - Driver open routine. 1501 * @ndev: Pointer to net_device structure 1502 * 1503 * Return: 0, on success. 1504 * non-zero error value on failure 1505 * 1506 * This is the driver open routine. It calls phylink_start to start the 1507 * PHY device. 1508 * It also allocates interrupt service routines, enables the interrupt lines 1509 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1510 * descriptors are initialized. 1511 */ 1512 static int axienet_open(struct net_device *ndev) 1513 { 1514 int ret; 1515 struct axienet_local *lp = netdev_priv(ndev); 1516 1517 dev_dbg(&ndev->dev, "%s\n", __func__); 1518 1519 /* When we do an Axi Ethernet reset, it resets the complete core 1520 * including the MDIO. MDIO must be disabled before resetting. 1521 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1522 */ 1523 axienet_lock_mii(lp); 1524 ret = axienet_device_reset(ndev); 1525 axienet_unlock_mii(lp); 1526 1527 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1528 if (ret) { 1529 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1530 return ret; 1531 } 1532 1533 phylink_start(lp->phylink); 1534 1535 if (lp->use_dmaengine) { 1536 /* Enable interrupts for Axi Ethernet core (if defined) */ 1537 if (lp->eth_irq > 0) { 1538 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1539 ndev->name, ndev); 1540 if (ret) 1541 goto err_phy; 1542 } 1543 1544 ret = axienet_init_dmaengine(ndev); 1545 if (ret < 0) 1546 goto err_free_eth_irq; 1547 } else { 1548 ret = axienet_init_legacy_dma(ndev); 1549 if (ret) 1550 goto err_phy; 1551 } 1552 1553 return 0; 1554 1555 err_free_eth_irq: 1556 if (lp->eth_irq > 0) 1557 free_irq(lp->eth_irq, ndev); 1558 err_phy: 1559 phylink_stop(lp->phylink); 1560 phylink_disconnect_phy(lp->phylink); 1561 return ret; 1562 } 1563 1564 /** 1565 * axienet_stop - Driver stop routine. 1566 * @ndev: Pointer to net_device structure 1567 * 1568 * Return: 0, on success. 1569 * 1570 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1571 * device. It also removes the interrupt handlers and disables the interrupts. 1572 * The Axi DMA Tx/Rx BDs are released. 1573 */ 1574 static int axienet_stop(struct net_device *ndev) 1575 { 1576 struct axienet_local *lp = netdev_priv(ndev); 1577 int i; 1578 1579 dev_dbg(&ndev->dev, "axienet_close()\n"); 1580 1581 if (!lp->use_dmaengine) { 1582 napi_disable(&lp->napi_tx); 1583 napi_disable(&lp->napi_rx); 1584 } 1585 1586 phylink_stop(lp->phylink); 1587 phylink_disconnect_phy(lp->phylink); 1588 1589 axienet_setoptions(ndev, lp->options & 1590 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1591 1592 if (!lp->use_dmaengine) { 1593 axienet_dma_stop(lp); 1594 cancel_work_sync(&lp->dma_err_task); 1595 free_irq(lp->tx_irq, ndev); 1596 free_irq(lp->rx_irq, ndev); 1597 axienet_dma_bd_release(ndev); 1598 } else { 1599 dmaengine_terminate_sync(lp->tx_chan); 1600 dmaengine_synchronize(lp->tx_chan); 1601 dmaengine_terminate_sync(lp->rx_chan); 1602 dmaengine_synchronize(lp->rx_chan); 1603 1604 for (i = 0; i < TX_BD_NUM_MAX; i++) 1605 kfree(lp->tx_skb_ring[i]); 1606 kfree(lp->tx_skb_ring); 1607 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1608 kfree(lp->rx_skb_ring[i]); 1609 kfree(lp->rx_skb_ring); 1610 1611 dma_release_channel(lp->rx_chan); 1612 dma_release_channel(lp->tx_chan); 1613 } 1614 1615 axienet_iow(lp, XAE_IE_OFFSET, 0); 1616 1617 if (lp->eth_irq > 0) 1618 free_irq(lp->eth_irq, ndev); 1619 return 0; 1620 } 1621 1622 /** 1623 * axienet_change_mtu - Driver change mtu routine. 1624 * @ndev: Pointer to net_device structure 1625 * @new_mtu: New mtu value to be applied 1626 * 1627 * Return: Always returns 0 (success). 1628 * 1629 * This is the change mtu driver routine. It checks if the Axi Ethernet 1630 * hardware supports jumbo frames before changing the mtu. This can be 1631 * called only when the device is not up. 1632 */ 1633 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1634 { 1635 struct axienet_local *lp = netdev_priv(ndev); 1636 1637 if (netif_running(ndev)) 1638 return -EBUSY; 1639 1640 if ((new_mtu + VLAN_ETH_HLEN + 1641 XAE_TRL_SIZE) > lp->rxmem) 1642 return -EINVAL; 1643 1644 WRITE_ONCE(ndev->mtu, new_mtu); 1645 1646 return 0; 1647 } 1648 1649 #ifdef CONFIG_NET_POLL_CONTROLLER 1650 /** 1651 * axienet_poll_controller - Axi Ethernet poll mechanism. 1652 * @ndev: Pointer to net_device structure 1653 * 1654 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1655 * to polling the ISRs and are enabled back after the polling is done. 1656 */ 1657 static void axienet_poll_controller(struct net_device *ndev) 1658 { 1659 struct axienet_local *lp = netdev_priv(ndev); 1660 disable_irq(lp->tx_irq); 1661 disable_irq(lp->rx_irq); 1662 axienet_rx_irq(lp->tx_irq, ndev); 1663 axienet_tx_irq(lp->rx_irq, ndev); 1664 enable_irq(lp->tx_irq); 1665 enable_irq(lp->rx_irq); 1666 } 1667 #endif 1668 1669 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1670 { 1671 struct axienet_local *lp = netdev_priv(dev); 1672 1673 if (!netif_running(dev)) 1674 return -EINVAL; 1675 1676 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1677 } 1678 1679 static void 1680 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1681 { 1682 struct axienet_local *lp = netdev_priv(dev); 1683 unsigned int start; 1684 1685 netdev_stats_to_stats64(stats, &dev->stats); 1686 1687 do { 1688 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1689 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1690 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1691 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1692 1693 do { 1694 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1695 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1696 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1697 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1698 } 1699 1700 static const struct net_device_ops axienet_netdev_ops = { 1701 .ndo_open = axienet_open, 1702 .ndo_stop = axienet_stop, 1703 .ndo_start_xmit = axienet_start_xmit, 1704 .ndo_get_stats64 = axienet_get_stats64, 1705 .ndo_change_mtu = axienet_change_mtu, 1706 .ndo_set_mac_address = netdev_set_mac_address, 1707 .ndo_validate_addr = eth_validate_addr, 1708 .ndo_eth_ioctl = axienet_ioctl, 1709 .ndo_set_rx_mode = axienet_set_multicast_list, 1710 #ifdef CONFIG_NET_POLL_CONTROLLER 1711 .ndo_poll_controller = axienet_poll_controller, 1712 #endif 1713 }; 1714 1715 static const struct net_device_ops axienet_netdev_dmaengine_ops = { 1716 .ndo_open = axienet_open, 1717 .ndo_stop = axienet_stop, 1718 .ndo_start_xmit = axienet_start_xmit_dmaengine, 1719 .ndo_get_stats64 = axienet_get_stats64, 1720 .ndo_change_mtu = axienet_change_mtu, 1721 .ndo_set_mac_address = netdev_set_mac_address, 1722 .ndo_validate_addr = eth_validate_addr, 1723 .ndo_eth_ioctl = axienet_ioctl, 1724 .ndo_set_rx_mode = axienet_set_multicast_list, 1725 }; 1726 1727 /** 1728 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1729 * @ndev: Pointer to net_device structure 1730 * @ed: Pointer to ethtool_drvinfo structure 1731 * 1732 * This implements ethtool command for getting the driver information. 1733 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1734 */ 1735 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1736 struct ethtool_drvinfo *ed) 1737 { 1738 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1739 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1740 } 1741 1742 /** 1743 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1744 * AxiEthernet core. 1745 * @ndev: Pointer to net_device structure 1746 * 1747 * This implements ethtool command for getting the total register length 1748 * information. 1749 * 1750 * Return: the total regs length 1751 */ 1752 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1753 { 1754 return sizeof(u32) * AXIENET_REGS_N; 1755 } 1756 1757 /** 1758 * axienet_ethtools_get_regs - Dump the contents of all registers present 1759 * in AxiEthernet core. 1760 * @ndev: Pointer to net_device structure 1761 * @regs: Pointer to ethtool_regs structure 1762 * @ret: Void pointer used to return the contents of the registers. 1763 * 1764 * This implements ethtool command for getting the Axi Ethernet register dump. 1765 * Issue "ethtool -d ethX" to execute this function. 1766 */ 1767 static void axienet_ethtools_get_regs(struct net_device *ndev, 1768 struct ethtool_regs *regs, void *ret) 1769 { 1770 u32 *data = (u32 *)ret; 1771 size_t len = sizeof(u32) * AXIENET_REGS_N; 1772 struct axienet_local *lp = netdev_priv(ndev); 1773 1774 regs->version = 0; 1775 regs->len = len; 1776 1777 memset(data, 0, len); 1778 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1779 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1780 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1781 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1782 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1783 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1784 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1785 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1786 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1787 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1788 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1789 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1790 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1791 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1792 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1793 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1794 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1795 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1796 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1797 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1798 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1799 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1800 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1801 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1802 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1803 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1804 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1805 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1806 if (!lp->use_dmaengine) { 1807 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1808 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1809 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1810 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1811 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1812 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1813 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1814 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1815 } 1816 } 1817 1818 static void 1819 axienet_ethtools_get_ringparam(struct net_device *ndev, 1820 struct ethtool_ringparam *ering, 1821 struct kernel_ethtool_ringparam *kernel_ering, 1822 struct netlink_ext_ack *extack) 1823 { 1824 struct axienet_local *lp = netdev_priv(ndev); 1825 1826 ering->rx_max_pending = RX_BD_NUM_MAX; 1827 ering->rx_mini_max_pending = 0; 1828 ering->rx_jumbo_max_pending = 0; 1829 ering->tx_max_pending = TX_BD_NUM_MAX; 1830 ering->rx_pending = lp->rx_bd_num; 1831 ering->rx_mini_pending = 0; 1832 ering->rx_jumbo_pending = 0; 1833 ering->tx_pending = lp->tx_bd_num; 1834 } 1835 1836 static int 1837 axienet_ethtools_set_ringparam(struct net_device *ndev, 1838 struct ethtool_ringparam *ering, 1839 struct kernel_ethtool_ringparam *kernel_ering, 1840 struct netlink_ext_ack *extack) 1841 { 1842 struct axienet_local *lp = netdev_priv(ndev); 1843 1844 if (ering->rx_pending > RX_BD_NUM_MAX || 1845 ering->rx_mini_pending || 1846 ering->rx_jumbo_pending || 1847 ering->tx_pending < TX_BD_NUM_MIN || 1848 ering->tx_pending > TX_BD_NUM_MAX) 1849 return -EINVAL; 1850 1851 if (netif_running(ndev)) 1852 return -EBUSY; 1853 1854 lp->rx_bd_num = ering->rx_pending; 1855 lp->tx_bd_num = ering->tx_pending; 1856 return 0; 1857 } 1858 1859 /** 1860 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1861 * Tx and Rx paths. 1862 * @ndev: Pointer to net_device structure 1863 * @epauseparm: Pointer to ethtool_pauseparam structure. 1864 * 1865 * This implements ethtool command for getting axi ethernet pause frame 1866 * setting. Issue "ethtool -a ethX" to execute this function. 1867 */ 1868 static void 1869 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1870 struct ethtool_pauseparam *epauseparm) 1871 { 1872 struct axienet_local *lp = netdev_priv(ndev); 1873 1874 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1875 } 1876 1877 /** 1878 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1879 * settings. 1880 * @ndev: Pointer to net_device structure 1881 * @epauseparm:Pointer to ethtool_pauseparam structure 1882 * 1883 * This implements ethtool command for enabling flow control on Rx and Tx 1884 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1885 * function. 1886 * 1887 * Return: 0 on success, -EFAULT if device is running 1888 */ 1889 static int 1890 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1891 struct ethtool_pauseparam *epauseparm) 1892 { 1893 struct axienet_local *lp = netdev_priv(ndev); 1894 1895 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 1896 } 1897 1898 /** 1899 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1900 * @ndev: Pointer to net_device structure 1901 * @ecoalesce: Pointer to ethtool_coalesce structure 1902 * @kernel_coal: ethtool CQE mode setting structure 1903 * @extack: extack for reporting error messages 1904 * 1905 * This implements ethtool command for getting the DMA interrupt coalescing 1906 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1907 * execute this function. 1908 * 1909 * Return: 0 always 1910 */ 1911 static int 1912 axienet_ethtools_get_coalesce(struct net_device *ndev, 1913 struct ethtool_coalesce *ecoalesce, 1914 struct kernel_ethtool_coalesce *kernel_coal, 1915 struct netlink_ext_ack *extack) 1916 { 1917 struct axienet_local *lp = netdev_priv(ndev); 1918 1919 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 1920 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 1921 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 1922 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 1923 return 0; 1924 } 1925 1926 /** 1927 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1928 * @ndev: Pointer to net_device structure 1929 * @ecoalesce: Pointer to ethtool_coalesce structure 1930 * @kernel_coal: ethtool CQE mode setting structure 1931 * @extack: extack for reporting error messages 1932 * 1933 * This implements ethtool command for setting the DMA interrupt coalescing 1934 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1935 * prompt to execute this function. 1936 * 1937 * Return: 0, on success, Non-zero error value on failure. 1938 */ 1939 static int 1940 axienet_ethtools_set_coalesce(struct net_device *ndev, 1941 struct ethtool_coalesce *ecoalesce, 1942 struct kernel_ethtool_coalesce *kernel_coal, 1943 struct netlink_ext_ack *extack) 1944 { 1945 struct axienet_local *lp = netdev_priv(ndev); 1946 1947 if (netif_running(ndev)) { 1948 netdev_err(ndev, 1949 "Please stop netif before applying configuration\n"); 1950 return -EFAULT; 1951 } 1952 1953 if (ecoalesce->rx_max_coalesced_frames) 1954 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1955 if (ecoalesce->rx_coalesce_usecs) 1956 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 1957 if (ecoalesce->tx_max_coalesced_frames) 1958 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1959 if (ecoalesce->tx_coalesce_usecs) 1960 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 1961 1962 return 0; 1963 } 1964 1965 static int 1966 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 1967 struct ethtool_link_ksettings *cmd) 1968 { 1969 struct axienet_local *lp = netdev_priv(ndev); 1970 1971 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 1972 } 1973 1974 static int 1975 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 1976 const struct ethtool_link_ksettings *cmd) 1977 { 1978 struct axienet_local *lp = netdev_priv(ndev); 1979 1980 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 1981 } 1982 1983 static int axienet_ethtools_nway_reset(struct net_device *dev) 1984 { 1985 struct axienet_local *lp = netdev_priv(dev); 1986 1987 return phylink_ethtool_nway_reset(lp->phylink); 1988 } 1989 1990 static const struct ethtool_ops axienet_ethtool_ops = { 1991 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 1992 ETHTOOL_COALESCE_USECS, 1993 .get_drvinfo = axienet_ethtools_get_drvinfo, 1994 .get_regs_len = axienet_ethtools_get_regs_len, 1995 .get_regs = axienet_ethtools_get_regs, 1996 .get_link = ethtool_op_get_link, 1997 .get_ringparam = axienet_ethtools_get_ringparam, 1998 .set_ringparam = axienet_ethtools_set_ringparam, 1999 .get_pauseparam = axienet_ethtools_get_pauseparam, 2000 .set_pauseparam = axienet_ethtools_set_pauseparam, 2001 .get_coalesce = axienet_ethtools_get_coalesce, 2002 .set_coalesce = axienet_ethtools_set_coalesce, 2003 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 2004 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 2005 .nway_reset = axienet_ethtools_nway_reset, 2006 }; 2007 2008 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 2009 { 2010 return container_of(pcs, struct axienet_local, pcs); 2011 } 2012 2013 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 2014 struct phylink_link_state *state) 2015 { 2016 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2017 2018 phylink_mii_c22_pcs_get_state(pcs_phy, state); 2019 } 2020 2021 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 2022 { 2023 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2024 2025 phylink_mii_c22_pcs_an_restart(pcs_phy); 2026 } 2027 2028 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2029 phy_interface_t interface, 2030 const unsigned long *advertising, 2031 bool permit_pause_to_mac) 2032 { 2033 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2034 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 2035 struct axienet_local *lp = netdev_priv(ndev); 2036 int ret; 2037 2038 if (lp->switch_x_sgmii) { 2039 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 2040 interface == PHY_INTERFACE_MODE_SGMII ? 2041 XLNX_MII_STD_SELECT_SGMII : 0); 2042 if (ret < 0) { 2043 netdev_warn(ndev, 2044 "Failed to switch PHY interface: %d\n", 2045 ret); 2046 return ret; 2047 } 2048 } 2049 2050 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 2051 neg_mode); 2052 if (ret < 0) 2053 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 2054 2055 return ret; 2056 } 2057 2058 static const struct phylink_pcs_ops axienet_pcs_ops = { 2059 .pcs_get_state = axienet_pcs_get_state, 2060 .pcs_config = axienet_pcs_config, 2061 .pcs_an_restart = axienet_pcs_an_restart, 2062 }; 2063 2064 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 2065 phy_interface_t interface) 2066 { 2067 struct net_device *ndev = to_net_dev(config->dev); 2068 struct axienet_local *lp = netdev_priv(ndev); 2069 2070 if (interface == PHY_INTERFACE_MODE_1000BASEX || 2071 interface == PHY_INTERFACE_MODE_SGMII) 2072 return &lp->pcs; 2073 2074 return NULL; 2075 } 2076 2077 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 2078 const struct phylink_link_state *state) 2079 { 2080 /* nothing meaningful to do */ 2081 } 2082 2083 static void axienet_mac_link_down(struct phylink_config *config, 2084 unsigned int mode, 2085 phy_interface_t interface) 2086 { 2087 /* nothing meaningful to do */ 2088 } 2089 2090 static void axienet_mac_link_up(struct phylink_config *config, 2091 struct phy_device *phy, 2092 unsigned int mode, phy_interface_t interface, 2093 int speed, int duplex, 2094 bool tx_pause, bool rx_pause) 2095 { 2096 struct net_device *ndev = to_net_dev(config->dev); 2097 struct axienet_local *lp = netdev_priv(ndev); 2098 u32 emmc_reg, fcc_reg; 2099 2100 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 2101 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 2102 2103 switch (speed) { 2104 case SPEED_1000: 2105 emmc_reg |= XAE_EMMC_LINKSPD_1000; 2106 break; 2107 case SPEED_100: 2108 emmc_reg |= XAE_EMMC_LINKSPD_100; 2109 break; 2110 case SPEED_10: 2111 emmc_reg |= XAE_EMMC_LINKSPD_10; 2112 break; 2113 default: 2114 dev_err(&ndev->dev, 2115 "Speed other than 10, 100 or 1Gbps is not supported\n"); 2116 break; 2117 } 2118 2119 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 2120 2121 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 2122 if (tx_pause) 2123 fcc_reg |= XAE_FCC_FCTX_MASK; 2124 else 2125 fcc_reg &= ~XAE_FCC_FCTX_MASK; 2126 if (rx_pause) 2127 fcc_reg |= XAE_FCC_FCRX_MASK; 2128 else 2129 fcc_reg &= ~XAE_FCC_FCRX_MASK; 2130 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 2131 } 2132 2133 static const struct phylink_mac_ops axienet_phylink_ops = { 2134 .mac_select_pcs = axienet_mac_select_pcs, 2135 .mac_config = axienet_mac_config, 2136 .mac_link_down = axienet_mac_link_down, 2137 .mac_link_up = axienet_mac_link_up, 2138 }; 2139 2140 /** 2141 * axienet_dma_err_handler - Work queue task for Axi DMA Error 2142 * @work: pointer to work_struct 2143 * 2144 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 2145 * Tx/Rx BDs. 2146 */ 2147 static void axienet_dma_err_handler(struct work_struct *work) 2148 { 2149 u32 i; 2150 u32 axienet_status; 2151 struct axidma_bd *cur_p; 2152 struct axienet_local *lp = container_of(work, struct axienet_local, 2153 dma_err_task); 2154 struct net_device *ndev = lp->ndev; 2155 2156 napi_disable(&lp->napi_tx); 2157 napi_disable(&lp->napi_rx); 2158 2159 axienet_setoptions(ndev, lp->options & 2160 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2161 2162 axienet_dma_stop(lp); 2163 2164 for (i = 0; i < lp->tx_bd_num; i++) { 2165 cur_p = &lp->tx_bd_v[i]; 2166 if (cur_p->cntrl) { 2167 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 2168 2169 dma_unmap_single(lp->dev, addr, 2170 (cur_p->cntrl & 2171 XAXIDMA_BD_CTRL_LENGTH_MASK), 2172 DMA_TO_DEVICE); 2173 } 2174 if (cur_p->skb) 2175 dev_kfree_skb_irq(cur_p->skb); 2176 cur_p->phys = 0; 2177 cur_p->phys_msb = 0; 2178 cur_p->cntrl = 0; 2179 cur_p->status = 0; 2180 cur_p->app0 = 0; 2181 cur_p->app1 = 0; 2182 cur_p->app2 = 0; 2183 cur_p->app3 = 0; 2184 cur_p->app4 = 0; 2185 cur_p->skb = NULL; 2186 } 2187 2188 for (i = 0; i < lp->rx_bd_num; i++) { 2189 cur_p = &lp->rx_bd_v[i]; 2190 cur_p->status = 0; 2191 cur_p->app0 = 0; 2192 cur_p->app1 = 0; 2193 cur_p->app2 = 0; 2194 cur_p->app3 = 0; 2195 cur_p->app4 = 0; 2196 } 2197 2198 lp->tx_bd_ci = 0; 2199 lp->tx_bd_tail = 0; 2200 lp->rx_bd_ci = 0; 2201 2202 axienet_dma_start(lp); 2203 2204 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 2205 axienet_status &= ~XAE_RCW1_RX_MASK; 2206 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 2207 2208 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 2209 if (axienet_status & XAE_INT_RXRJECT_MASK) 2210 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 2211 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 2212 XAE_INT_RECV_ERROR_MASK : 0); 2213 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 2214 2215 /* Sync default options with HW but leave receiver and 2216 * transmitter disabled. 2217 */ 2218 axienet_setoptions(ndev, lp->options & 2219 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2220 axienet_set_mac_address(ndev, NULL); 2221 axienet_set_multicast_list(ndev); 2222 axienet_setoptions(ndev, lp->options); 2223 napi_enable(&lp->napi_rx); 2224 napi_enable(&lp->napi_tx); 2225 } 2226 2227 /** 2228 * axienet_probe - Axi Ethernet probe function. 2229 * @pdev: Pointer to platform device structure. 2230 * 2231 * Return: 0, on success 2232 * Non-zero error value on failure. 2233 * 2234 * This is the probe routine for Axi Ethernet driver. This is called before 2235 * any other driver routines are invoked. It allocates and sets up the Ethernet 2236 * device. Parses through device tree and populates fields of 2237 * axienet_local. It registers the Ethernet device. 2238 */ 2239 static int axienet_probe(struct platform_device *pdev) 2240 { 2241 int ret; 2242 struct device_node *np; 2243 struct axienet_local *lp; 2244 struct net_device *ndev; 2245 struct resource *ethres; 2246 u8 mac_addr[ETH_ALEN]; 2247 int addr_width = 32; 2248 u32 value; 2249 2250 ndev = alloc_etherdev(sizeof(*lp)); 2251 if (!ndev) 2252 return -ENOMEM; 2253 2254 platform_set_drvdata(pdev, ndev); 2255 2256 SET_NETDEV_DEV(ndev, &pdev->dev); 2257 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 2258 ndev->features = NETIF_F_SG; 2259 ndev->ethtool_ops = &axienet_ethtool_ops; 2260 2261 /* MTU range: 64 - 9000 */ 2262 ndev->min_mtu = 64; 2263 ndev->max_mtu = XAE_JUMBO_MTU; 2264 2265 lp = netdev_priv(ndev); 2266 lp->ndev = ndev; 2267 lp->dev = &pdev->dev; 2268 lp->options = XAE_OPTION_DEFAULTS; 2269 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 2270 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 2271 2272 u64_stats_init(&lp->rx_stat_sync); 2273 u64_stats_init(&lp->tx_stat_sync); 2274 2275 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2276 if (!lp->axi_clk) { 2277 /* For backward compatibility, if named AXI clock is not present, 2278 * treat the first clock specified as the AXI clock. 2279 */ 2280 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2281 } 2282 if (IS_ERR(lp->axi_clk)) { 2283 ret = PTR_ERR(lp->axi_clk); 2284 goto free_netdev; 2285 } 2286 ret = clk_prepare_enable(lp->axi_clk); 2287 if (ret) { 2288 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2289 goto free_netdev; 2290 } 2291 2292 lp->misc_clks[0].id = "axis_clk"; 2293 lp->misc_clks[1].id = "ref_clk"; 2294 lp->misc_clks[2].id = "mgt_clk"; 2295 2296 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2297 if (ret) 2298 goto cleanup_clk; 2299 2300 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2301 if (ret) 2302 goto cleanup_clk; 2303 2304 /* Map device registers */ 2305 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 2306 if (IS_ERR(lp->regs)) { 2307 ret = PTR_ERR(lp->regs); 2308 goto cleanup_clk; 2309 } 2310 lp->regs_start = ethres->start; 2311 2312 /* Setup checksum offload, but default to off if not specified */ 2313 lp->features = 0; 2314 2315 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 2316 if (!ret) { 2317 switch (value) { 2318 case 1: 2319 lp->csum_offload_on_tx_path = 2320 XAE_FEATURE_PARTIAL_TX_CSUM; 2321 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 2322 /* Can checksum TCP/UDP over IPv4. */ 2323 ndev->features |= NETIF_F_IP_CSUM; 2324 break; 2325 case 2: 2326 lp->csum_offload_on_tx_path = 2327 XAE_FEATURE_FULL_TX_CSUM; 2328 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 2329 /* Can checksum TCP/UDP over IPv4. */ 2330 ndev->features |= NETIF_F_IP_CSUM; 2331 break; 2332 default: 2333 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 2334 } 2335 } 2336 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 2337 if (!ret) { 2338 switch (value) { 2339 case 1: 2340 lp->csum_offload_on_rx_path = 2341 XAE_FEATURE_PARTIAL_RX_CSUM; 2342 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 2343 break; 2344 case 2: 2345 lp->csum_offload_on_rx_path = 2346 XAE_FEATURE_FULL_RX_CSUM; 2347 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 2348 break; 2349 default: 2350 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 2351 } 2352 } 2353 /* For supporting jumbo frames, the Axi Ethernet hardware must have 2354 * a larger Rx/Tx Memory. Typically, the size must be large so that 2355 * we can enable jumbo option and start supporting jumbo frames. 2356 * Here we check for memory allocated for Rx/Tx in the hardware from 2357 * the device-tree and accordingly set flags. 2358 */ 2359 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 2360 2361 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 2362 "xlnx,switch-x-sgmii"); 2363 2364 /* Start with the proprietary, and broken phy_type */ 2365 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 2366 if (!ret) { 2367 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 2368 switch (value) { 2369 case XAE_PHY_TYPE_MII: 2370 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2371 break; 2372 case XAE_PHY_TYPE_GMII: 2373 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2374 break; 2375 case XAE_PHY_TYPE_RGMII_2_0: 2376 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2377 break; 2378 case XAE_PHY_TYPE_SGMII: 2379 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2380 break; 2381 case XAE_PHY_TYPE_1000BASE_X: 2382 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2383 break; 2384 default: 2385 ret = -EINVAL; 2386 goto cleanup_clk; 2387 } 2388 } else { 2389 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2390 if (ret) 2391 goto cleanup_clk; 2392 } 2393 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2394 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2395 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2396 ret = -EINVAL; 2397 goto cleanup_clk; 2398 } 2399 2400 if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) { 2401 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2402 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2403 2404 if (np) { 2405 struct resource dmares; 2406 2407 ret = of_address_to_resource(np, 0, &dmares); 2408 if (ret) { 2409 dev_err(&pdev->dev, 2410 "unable to get DMA resource\n"); 2411 of_node_put(np); 2412 goto cleanup_clk; 2413 } 2414 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2415 &dmares); 2416 lp->rx_irq = irq_of_parse_and_map(np, 1); 2417 lp->tx_irq = irq_of_parse_and_map(np, 0); 2418 of_node_put(np); 2419 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2420 } else { 2421 /* Check for these resources directly on the Ethernet node. */ 2422 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2423 lp->rx_irq = platform_get_irq(pdev, 1); 2424 lp->tx_irq = platform_get_irq(pdev, 0); 2425 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2426 } 2427 if (IS_ERR(lp->dma_regs)) { 2428 dev_err(&pdev->dev, "could not map DMA regs\n"); 2429 ret = PTR_ERR(lp->dma_regs); 2430 goto cleanup_clk; 2431 } 2432 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2433 dev_err(&pdev->dev, "could not determine irqs\n"); 2434 ret = -ENOMEM; 2435 goto cleanup_clk; 2436 } 2437 2438 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2439 ret = __axienet_device_reset(lp); 2440 if (ret) 2441 goto cleanup_clk; 2442 2443 /* Autodetect the need for 64-bit DMA pointers. 2444 * When the IP is configured for a bus width bigger than 32 bits, 2445 * writing the MSB registers is mandatory, even if they are all 0. 2446 * We can detect this case by writing all 1's to one such register 2447 * and see if that sticks: when the IP is configured for 32 bits 2448 * only, those registers are RES0. 2449 * Those MSB registers were introduced in IP v7.1, which we check first. 2450 */ 2451 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2452 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2453 2454 iowrite32(0x0, desc); 2455 if (ioread32(desc) == 0) { /* sanity check */ 2456 iowrite32(0xffffffff, desc); 2457 if (ioread32(desc) > 0) { 2458 lp->features |= XAE_FEATURE_DMA_64BIT; 2459 addr_width = 64; 2460 dev_info(&pdev->dev, 2461 "autodetected 64-bit DMA range\n"); 2462 } 2463 iowrite32(0x0, desc); 2464 } 2465 } 2466 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2467 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2468 ret = -EINVAL; 2469 goto cleanup_clk; 2470 } 2471 2472 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2473 if (ret) { 2474 dev_err(&pdev->dev, "No suitable DMA available\n"); 2475 goto cleanup_clk; 2476 } 2477 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 2478 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 2479 } else { 2480 struct xilinx_vdma_config cfg; 2481 struct dma_chan *tx_chan; 2482 2483 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2484 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 2485 ret = lp->eth_irq; 2486 goto cleanup_clk; 2487 } 2488 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 2489 if (IS_ERR(tx_chan)) { 2490 ret = PTR_ERR(tx_chan); 2491 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 2492 goto cleanup_clk; 2493 } 2494 2495 cfg.reset = 1; 2496 /* As name says VDMA but it has support for DMA channel reset */ 2497 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); 2498 if (ret < 0) { 2499 dev_err(&pdev->dev, "Reset channel failed\n"); 2500 dma_release_channel(tx_chan); 2501 goto cleanup_clk; 2502 } 2503 2504 dma_release_channel(tx_chan); 2505 lp->use_dmaengine = 1; 2506 } 2507 2508 if (lp->use_dmaengine) 2509 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; 2510 else 2511 ndev->netdev_ops = &axienet_netdev_ops; 2512 /* Check for Ethernet core IRQ (optional) */ 2513 if (lp->eth_irq <= 0) 2514 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2515 2516 /* Retrieve the MAC address */ 2517 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2518 if (!ret) { 2519 axienet_set_mac_address(ndev, mac_addr); 2520 } else { 2521 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2522 ret); 2523 axienet_set_mac_address(ndev, NULL); 2524 } 2525 2526 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2527 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2528 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2529 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2530 2531 ret = axienet_mdio_setup(lp); 2532 if (ret) 2533 dev_warn(&pdev->dev, 2534 "error registering MDIO bus: %d\n", ret); 2535 2536 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2537 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2538 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2539 if (!np) { 2540 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2541 * Falling back to "phy-handle" here is only for 2542 * backward compatibility with old device trees. 2543 */ 2544 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2545 } 2546 if (!np) { 2547 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2548 ret = -EINVAL; 2549 goto cleanup_mdio; 2550 } 2551 lp->pcs_phy = of_mdio_find_device(np); 2552 if (!lp->pcs_phy) { 2553 ret = -EPROBE_DEFER; 2554 of_node_put(np); 2555 goto cleanup_mdio; 2556 } 2557 of_node_put(np); 2558 lp->pcs.ops = &axienet_pcs_ops; 2559 lp->pcs.neg_mode = true; 2560 lp->pcs.poll = true; 2561 } 2562 2563 lp->phylink_config.dev = &ndev->dev; 2564 lp->phylink_config.type = PHYLINK_NETDEV; 2565 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2566 MAC_10FD | MAC_100FD | MAC_1000FD; 2567 2568 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2569 if (lp->switch_x_sgmii) { 2570 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2571 lp->phylink_config.supported_interfaces); 2572 __set_bit(PHY_INTERFACE_MODE_SGMII, 2573 lp->phylink_config.supported_interfaces); 2574 } 2575 2576 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2577 lp->phy_mode, 2578 &axienet_phylink_ops); 2579 if (IS_ERR(lp->phylink)) { 2580 ret = PTR_ERR(lp->phylink); 2581 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2582 goto cleanup_mdio; 2583 } 2584 2585 ret = register_netdev(lp->ndev); 2586 if (ret) { 2587 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2588 goto cleanup_phylink; 2589 } 2590 2591 return 0; 2592 2593 cleanup_phylink: 2594 phylink_destroy(lp->phylink); 2595 2596 cleanup_mdio: 2597 if (lp->pcs_phy) 2598 put_device(&lp->pcs_phy->dev); 2599 if (lp->mii_bus) 2600 axienet_mdio_teardown(lp); 2601 cleanup_clk: 2602 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2603 clk_disable_unprepare(lp->axi_clk); 2604 2605 free_netdev: 2606 free_netdev(ndev); 2607 2608 return ret; 2609 } 2610 2611 static void axienet_remove(struct platform_device *pdev) 2612 { 2613 struct net_device *ndev = platform_get_drvdata(pdev); 2614 struct axienet_local *lp = netdev_priv(ndev); 2615 2616 unregister_netdev(ndev); 2617 2618 if (lp->phylink) 2619 phylink_destroy(lp->phylink); 2620 2621 if (lp->pcs_phy) 2622 put_device(&lp->pcs_phy->dev); 2623 2624 axienet_mdio_teardown(lp); 2625 2626 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2627 clk_disable_unprepare(lp->axi_clk); 2628 2629 free_netdev(ndev); 2630 } 2631 2632 static void axienet_shutdown(struct platform_device *pdev) 2633 { 2634 struct net_device *ndev = platform_get_drvdata(pdev); 2635 2636 rtnl_lock(); 2637 netif_device_detach(ndev); 2638 2639 if (netif_running(ndev)) 2640 dev_close(ndev); 2641 2642 rtnl_unlock(); 2643 } 2644 2645 static int axienet_suspend(struct device *dev) 2646 { 2647 struct net_device *ndev = dev_get_drvdata(dev); 2648 2649 if (!netif_running(ndev)) 2650 return 0; 2651 2652 netif_device_detach(ndev); 2653 2654 rtnl_lock(); 2655 axienet_stop(ndev); 2656 rtnl_unlock(); 2657 2658 return 0; 2659 } 2660 2661 static int axienet_resume(struct device *dev) 2662 { 2663 struct net_device *ndev = dev_get_drvdata(dev); 2664 2665 if (!netif_running(ndev)) 2666 return 0; 2667 2668 rtnl_lock(); 2669 axienet_open(ndev); 2670 rtnl_unlock(); 2671 2672 netif_device_attach(ndev); 2673 2674 return 0; 2675 } 2676 2677 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 2678 axienet_suspend, axienet_resume); 2679 2680 static struct platform_driver axienet_driver = { 2681 .probe = axienet_probe, 2682 .remove_new = axienet_remove, 2683 .shutdown = axienet_shutdown, 2684 .driver = { 2685 .name = "xilinx_axienet", 2686 .pm = &axienet_pm_ops, 2687 .of_match_table = axienet_of_match, 2688 }, 2689 }; 2690 2691 module_platform_driver(axienet_driver); 2692 2693 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 2694 MODULE_AUTHOR("Xilinx"); 2695 MODULE_LICENSE("GPL"); 2696