1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 #include <linux/dmaengine.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dma/xilinx_dma.h> 44 #include <linux/circ_buf.h> 45 #include <net/netdev_queues.h> 46 47 #include "xilinx_axienet.h" 48 49 /* Descriptors defines for Tx and Rx DMA */ 50 #define TX_BD_NUM_DEFAULT 128 51 #define RX_BD_NUM_DEFAULT 1024 52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 53 #define TX_BD_NUM_MAX 4096 54 #define RX_BD_NUM_MAX 4096 55 #define DMA_NUM_APP_WORDS 5 56 #define LEN_APP 4 57 #define RX_BUF_NUM_DEFAULT 128 58 59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 60 #define DRIVER_NAME "xaxienet" 61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 62 #define DRIVER_VERSION "1.00a" 63 64 #define AXIENET_REGS_N 40 65 66 static void axienet_rx_submit_desc(struct net_device *ndev); 67 68 /* Match table for of_platform binding */ 69 static const struct of_device_id axienet_of_match[] = { 70 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 71 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 72 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 73 {}, 74 }; 75 76 MODULE_DEVICE_TABLE(of, axienet_of_match); 77 78 /* Option table for setting up Axi Ethernet hardware options */ 79 static struct axienet_option axienet_options[] = { 80 /* Turn on jumbo packet support for both Rx and Tx */ 81 { 82 .opt = XAE_OPTION_JUMBO, 83 .reg = XAE_TC_OFFSET, 84 .m_or = XAE_TC_JUM_MASK, 85 }, { 86 .opt = XAE_OPTION_JUMBO, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_JUM_MASK, 89 }, { /* Turn on VLAN packet support for both Rx and Tx */ 90 .opt = XAE_OPTION_VLAN, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_VLAN_MASK, 93 }, { 94 .opt = XAE_OPTION_VLAN, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_VLAN_MASK, 97 }, { /* Turn on FCS stripping on receive packets */ 98 .opt = XAE_OPTION_FCS_STRIP, 99 .reg = XAE_RCW1_OFFSET, 100 .m_or = XAE_RCW1_FCS_MASK, 101 }, { /* Turn on FCS insertion on transmit packets */ 102 .opt = XAE_OPTION_FCS_INSERT, 103 .reg = XAE_TC_OFFSET, 104 .m_or = XAE_TC_FCS_MASK, 105 }, { /* Turn off length/type field checking on receive packets */ 106 .opt = XAE_OPTION_LENTYPE_ERR, 107 .reg = XAE_RCW1_OFFSET, 108 .m_or = XAE_RCW1_LT_DIS_MASK, 109 }, { /* Turn on Rx flow control */ 110 .opt = XAE_OPTION_FLOW_CONTROL, 111 .reg = XAE_FCC_OFFSET, 112 .m_or = XAE_FCC_FCRX_MASK, 113 }, { /* Turn on Tx flow control */ 114 .opt = XAE_OPTION_FLOW_CONTROL, 115 .reg = XAE_FCC_OFFSET, 116 .m_or = XAE_FCC_FCTX_MASK, 117 }, { /* Turn on promiscuous frame filtering */ 118 .opt = XAE_OPTION_PROMISC, 119 .reg = XAE_FMI_OFFSET, 120 .m_or = XAE_FMI_PM_MASK, 121 }, { /* Enable transmitter */ 122 .opt = XAE_OPTION_TXEN, 123 .reg = XAE_TC_OFFSET, 124 .m_or = XAE_TC_TX_MASK, 125 }, { /* Enable receiver */ 126 .opt = XAE_OPTION_RXEN, 127 .reg = XAE_RCW1_OFFSET, 128 .m_or = XAE_RCW1_RX_MASK, 129 }, 130 {} 131 }; 132 133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) 134 { 135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; 136 } 137 138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) 139 { 140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; 141 } 142 143 /** 144 * axienet_dma_in32 - Memory mapped Axi DMA register read 145 * @lp: Pointer to axienet local structure 146 * @reg: Address offset from the base address of the Axi DMA core 147 * 148 * Return: The contents of the Axi DMA register 149 * 150 * This function returns the contents of the corresponding Axi DMA register. 151 */ 152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 153 { 154 return ioread32(lp->dma_regs + reg); 155 } 156 157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 158 struct axidma_bd *desc) 159 { 160 desc->phys = lower_32_bits(addr); 161 if (lp->features & XAE_FEATURE_DMA_64BIT) 162 desc->phys_msb = upper_32_bits(addr); 163 } 164 165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 166 struct axidma_bd *desc) 167 { 168 dma_addr_t ret = desc->phys; 169 170 if (lp->features & XAE_FEATURE_DMA_64BIT) 171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 172 173 return ret; 174 } 175 176 /** 177 * axienet_dma_bd_release - Release buffer descriptor rings 178 * @ndev: Pointer to the net_device structure 179 * 180 * This function is used to release the descriptors allocated in 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 182 * driver stop api is called. 183 */ 184 static void axienet_dma_bd_release(struct net_device *ndev) 185 { 186 int i; 187 struct axienet_local *lp = netdev_priv(ndev); 188 189 /* If we end up here, tx_bd_v must have been DMA allocated. */ 190 dma_free_coherent(lp->dev, 191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 192 lp->tx_bd_v, 193 lp->tx_bd_p); 194 195 if (!lp->rx_bd_v) 196 return; 197 198 for (i = 0; i < lp->rx_bd_num; i++) { 199 dma_addr_t phys; 200 201 /* A NULL skb means this descriptor has not been initialised 202 * at all. 203 */ 204 if (!lp->rx_bd_v[i].skb) 205 break; 206 207 dev_kfree_skb(lp->rx_bd_v[i].skb); 208 209 /* For each descriptor, we programmed cntrl with the (non-zero) 210 * descriptor size, after it had been successfully allocated. 211 * So a non-zero value in there means we need to unmap it. 212 */ 213 if (lp->rx_bd_v[i].cntrl) { 214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 215 dma_unmap_single(lp->dev, phys, 216 lp->max_frm_size, DMA_FROM_DEVICE); 217 } 218 } 219 220 dma_free_coherent(lp->dev, 221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 222 lp->rx_bd_v, 223 lp->rx_bd_p); 224 } 225 226 static u64 axienet_dma_rate(struct axienet_local *lp) 227 { 228 if (lp->axi_clk) 229 return clk_get_rate(lp->axi_clk); 230 return 125000000; /* arbitrary guess if no clock rate set */ 231 } 232 233 /** 234 * axienet_calc_cr() - Calculate control register value 235 * @lp: Device private data 236 * @count: Number of completions before an interrupt 237 * @usec: Microseconds after the last completion before an interrupt 238 * 239 * Calculate a control register value based on the coalescing settings. The 240 * run/stop bit is not set. 241 */ 242 static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec) 243 { 244 u32 cr; 245 246 cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK | 247 XAXIDMA_IRQ_ERROR_MASK; 248 /* Only set interrupt delay timer if not generating an interrupt on 249 * the first packet. Otherwise leave at 0 to disable delay interrupt. 250 */ 251 if (count > 1) { 252 u64 clk_rate = axienet_dma_rate(lp); 253 u32 timer; 254 255 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 256 timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate, 257 XAXIDMA_DELAY_SCALE); 258 259 timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK)); 260 cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) | 261 XAXIDMA_IRQ_DELAY_MASK; 262 } 263 264 return cr; 265 } 266 267 /** 268 * axienet_coalesce_params() - Extract coalesce parameters from the CR 269 * @lp: Device private data 270 * @cr: The control register to parse 271 * @count: Number of packets before an interrupt 272 * @usec: Idle time (in usec) before an interrupt 273 */ 274 static void axienet_coalesce_params(struct axienet_local *lp, u32 cr, 275 u32 *count, u32 *usec) 276 { 277 u64 clk_rate = axienet_dma_rate(lp); 278 u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr); 279 280 *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr); 281 *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate); 282 } 283 284 /** 285 * axienet_dma_start - Set up DMA registers and start DMA operation 286 * @lp: Pointer to the axienet_local structure 287 */ 288 static void axienet_dma_start(struct axienet_local *lp) 289 { 290 spin_lock_irq(&lp->rx_cr_lock); 291 292 /* Start updating the Rx channel control register */ 293 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; 294 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 295 296 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 297 * halted state. This will make the Rx side ready for reception. 298 */ 299 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 300 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 301 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 302 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 303 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 304 lp->rx_dma_started = true; 305 306 spin_unlock_irq(&lp->rx_cr_lock); 307 spin_lock_irq(&lp->tx_cr_lock); 308 309 /* Start updating the Tx channel control register */ 310 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; 311 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 312 313 /* Write to the RS (Run-stop) bit in the Tx channel control register. 314 * Tx channel is now ready to run. But only after we write to the 315 * tail pointer register that the Tx channel will start transmitting. 316 */ 317 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 318 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 319 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 320 lp->tx_dma_started = true; 321 322 spin_unlock_irq(&lp->tx_cr_lock); 323 } 324 325 /** 326 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 327 * @ndev: Pointer to the net_device structure 328 * 329 * Return: 0, on success -ENOMEM, on failure 330 * 331 * This function is called to initialize the Rx and Tx DMA descriptor 332 * rings. This initializes the descriptors with required default values 333 * and is called when Axi Ethernet driver reset is called. 334 */ 335 static int axienet_dma_bd_init(struct net_device *ndev) 336 { 337 int i; 338 struct sk_buff *skb; 339 struct axienet_local *lp = netdev_priv(ndev); 340 341 /* Reset the indexes which are used for accessing the BDs */ 342 lp->tx_bd_ci = 0; 343 lp->tx_bd_tail = 0; 344 lp->rx_bd_ci = 0; 345 346 /* Allocate the Tx and Rx buffer descriptors. */ 347 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 348 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 349 &lp->tx_bd_p, GFP_KERNEL); 350 if (!lp->tx_bd_v) 351 return -ENOMEM; 352 353 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 354 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 355 &lp->rx_bd_p, GFP_KERNEL); 356 if (!lp->rx_bd_v) 357 goto out; 358 359 for (i = 0; i < lp->tx_bd_num; i++) { 360 dma_addr_t addr = lp->tx_bd_p + 361 sizeof(*lp->tx_bd_v) * 362 ((i + 1) % lp->tx_bd_num); 363 364 lp->tx_bd_v[i].next = lower_32_bits(addr); 365 if (lp->features & XAE_FEATURE_DMA_64BIT) 366 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 367 } 368 369 for (i = 0; i < lp->rx_bd_num; i++) { 370 dma_addr_t addr; 371 372 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 373 ((i + 1) % lp->rx_bd_num); 374 lp->rx_bd_v[i].next = lower_32_bits(addr); 375 if (lp->features & XAE_FEATURE_DMA_64BIT) 376 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 377 378 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 379 if (!skb) 380 goto out; 381 382 lp->rx_bd_v[i].skb = skb; 383 addr = dma_map_single(lp->dev, skb->data, 384 lp->max_frm_size, DMA_FROM_DEVICE); 385 if (dma_mapping_error(lp->dev, addr)) { 386 netdev_err(ndev, "DMA mapping error\n"); 387 goto out; 388 } 389 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 390 391 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 392 } 393 394 axienet_dma_start(lp); 395 396 return 0; 397 out: 398 axienet_dma_bd_release(ndev); 399 return -ENOMEM; 400 } 401 402 /** 403 * axienet_set_mac_address - Write the MAC address 404 * @ndev: Pointer to the net_device structure 405 * @address: 6 byte Address to be written as MAC address 406 * 407 * This function is called to initialize the MAC address of the Axi Ethernet 408 * core. It writes to the UAW0 and UAW1 registers of the core. 409 */ 410 static void axienet_set_mac_address(struct net_device *ndev, 411 const void *address) 412 { 413 struct axienet_local *lp = netdev_priv(ndev); 414 415 if (address) 416 eth_hw_addr_set(ndev, address); 417 if (!is_valid_ether_addr(ndev->dev_addr)) 418 eth_hw_addr_random(ndev); 419 420 /* Set up unicast MAC address filter set its mac address */ 421 axienet_iow(lp, XAE_UAW0_OFFSET, 422 (ndev->dev_addr[0]) | 423 (ndev->dev_addr[1] << 8) | 424 (ndev->dev_addr[2] << 16) | 425 (ndev->dev_addr[3] << 24)); 426 axienet_iow(lp, XAE_UAW1_OFFSET, 427 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 428 ~XAE_UAW1_UNICASTADDR_MASK) | 429 (ndev->dev_addr[4] | 430 (ndev->dev_addr[5] << 8)))); 431 } 432 433 /** 434 * netdev_set_mac_address - Write the MAC address (from outside the driver) 435 * @ndev: Pointer to the net_device structure 436 * @p: 6 byte Address to be written as MAC address 437 * 438 * Return: 0 for all conditions. Presently, there is no failure case. 439 * 440 * This function is called to initialize the MAC address of the Axi Ethernet 441 * core. It calls the core specific axienet_set_mac_address. This is the 442 * function that goes into net_device_ops structure entry ndo_set_mac_address. 443 */ 444 static int netdev_set_mac_address(struct net_device *ndev, void *p) 445 { 446 struct sockaddr *addr = p; 447 448 axienet_set_mac_address(ndev, addr->sa_data); 449 return 0; 450 } 451 452 /** 453 * axienet_set_multicast_list - Prepare the multicast table 454 * @ndev: Pointer to the net_device structure 455 * 456 * This function is called to initialize the multicast table during 457 * initialization. The Axi Ethernet basic multicast support has a four-entry 458 * multicast table which is initialized here. Additionally this function 459 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 460 * means whenever the multicast table entries need to be updated this 461 * function gets called. 462 */ 463 static void axienet_set_multicast_list(struct net_device *ndev) 464 { 465 int i = 0; 466 u32 reg, af0reg, af1reg; 467 struct axienet_local *lp = netdev_priv(ndev); 468 469 reg = axienet_ior(lp, XAE_FMI_OFFSET); 470 reg &= ~XAE_FMI_PM_MASK; 471 if (ndev->flags & IFF_PROMISC) 472 reg |= XAE_FMI_PM_MASK; 473 else 474 reg &= ~XAE_FMI_PM_MASK; 475 axienet_iow(lp, XAE_FMI_OFFSET, reg); 476 477 if (ndev->flags & IFF_ALLMULTI || 478 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 479 reg &= 0xFFFFFF00; 480 axienet_iow(lp, XAE_FMI_OFFSET, reg); 481 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */ 482 axienet_iow(lp, XAE_AF1_OFFSET, 0); 483 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */ 484 axienet_iow(lp, XAE_AM1_OFFSET, 0); 485 axienet_iow(lp, XAE_FFE_OFFSET, 1); 486 i = 1; 487 } else if (!netdev_mc_empty(ndev)) { 488 struct netdev_hw_addr *ha; 489 490 netdev_for_each_mc_addr(ha, ndev) { 491 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 492 break; 493 494 af0reg = (ha->addr[0]); 495 af0reg |= (ha->addr[1] << 8); 496 af0reg |= (ha->addr[2] << 16); 497 af0reg |= (ha->addr[3] << 24); 498 499 af1reg = (ha->addr[4]); 500 af1reg |= (ha->addr[5] << 8); 501 502 reg &= 0xFFFFFF00; 503 reg |= i; 504 505 axienet_iow(lp, XAE_FMI_OFFSET, reg); 506 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 507 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 508 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff); 509 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff); 510 axienet_iow(lp, XAE_FFE_OFFSET, 1); 511 i++; 512 } 513 } 514 515 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 516 reg &= 0xFFFFFF00; 517 reg |= i; 518 axienet_iow(lp, XAE_FMI_OFFSET, reg); 519 axienet_iow(lp, XAE_FFE_OFFSET, 0); 520 } 521 } 522 523 /** 524 * axienet_setoptions - Set an Axi Ethernet option 525 * @ndev: Pointer to the net_device structure 526 * @options: Option to be enabled/disabled 527 * 528 * The Axi Ethernet core has multiple features which can be selectively turned 529 * on or off. The typical options could be jumbo frame option, basic VLAN 530 * option, promiscuous mode option etc. This function is used to set or clear 531 * these options in the Axi Ethernet hardware. This is done through 532 * axienet_option structure . 533 */ 534 static void axienet_setoptions(struct net_device *ndev, u32 options) 535 { 536 int reg; 537 struct axienet_local *lp = netdev_priv(ndev); 538 struct axienet_option *tp = &axienet_options[0]; 539 540 while (tp->opt) { 541 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 542 if (options & tp->opt) 543 reg |= tp->m_or; 544 axienet_iow(lp, tp->reg, reg); 545 tp++; 546 } 547 548 lp->options |= options; 549 } 550 551 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat) 552 { 553 u32 counter; 554 555 if (lp->reset_in_progress) 556 return lp->hw_stat_base[stat]; 557 558 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 559 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); 560 } 561 562 static void axienet_stats_update(struct axienet_local *lp, bool reset) 563 { 564 enum temac_stat stat; 565 566 write_seqcount_begin(&lp->hw_stats_seqcount); 567 lp->reset_in_progress = reset; 568 for (stat = 0; stat < STAT_COUNT; stat++) { 569 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 570 571 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; 572 lp->hw_last_counter[stat] = counter; 573 } 574 write_seqcount_end(&lp->hw_stats_seqcount); 575 } 576 577 static void axienet_refresh_stats(struct work_struct *work) 578 { 579 struct axienet_local *lp = container_of(work, struct axienet_local, 580 stats_work.work); 581 582 mutex_lock(&lp->stats_lock); 583 axienet_stats_update(lp, false); 584 mutex_unlock(&lp->stats_lock); 585 586 /* Just less than 2^32 bytes at 2.5 GBit/s */ 587 schedule_delayed_work(&lp->stats_work, 13 * HZ); 588 } 589 590 static int __axienet_device_reset(struct axienet_local *lp) 591 { 592 u32 value; 593 int ret; 594 595 /* Save statistics counters in case they will be reset */ 596 mutex_lock(&lp->stats_lock); 597 if (lp->features & XAE_FEATURE_STATS) 598 axienet_stats_update(lp, true); 599 600 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 601 * process of Axi DMA takes a while to complete as all pending 602 * commands/transfers will be flushed or completed during this 603 * reset process. 604 * Note that even though both TX and RX have their own reset register, 605 * they both reset the entire DMA core, so only one needs to be used. 606 */ 607 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 608 ret = read_poll_timeout(axienet_dma_in32, value, 609 !(value & XAXIDMA_CR_RESET_MASK), 610 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 611 XAXIDMA_TX_CR_OFFSET); 612 if (ret) { 613 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 614 goto out; 615 } 616 617 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 618 ret = read_poll_timeout(axienet_ior, value, 619 value & XAE_INT_PHYRSTCMPLT_MASK, 620 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 621 XAE_IS_OFFSET); 622 if (ret) { 623 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 624 goto out; 625 } 626 627 /* Update statistics counters with new values */ 628 if (lp->features & XAE_FEATURE_STATS) { 629 enum temac_stat stat; 630 631 write_seqcount_begin(&lp->hw_stats_seqcount); 632 lp->reset_in_progress = false; 633 for (stat = 0; stat < STAT_COUNT; stat++) { 634 u32 counter = 635 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 636 637 lp->hw_stat_base[stat] += 638 lp->hw_last_counter[stat] - counter; 639 lp->hw_last_counter[stat] = counter; 640 } 641 write_seqcount_end(&lp->hw_stats_seqcount); 642 } 643 644 out: 645 mutex_unlock(&lp->stats_lock); 646 return ret; 647 } 648 649 /** 650 * axienet_dma_stop - Stop DMA operation 651 * @lp: Pointer to the axienet_local structure 652 */ 653 static void axienet_dma_stop(struct axienet_local *lp) 654 { 655 int count; 656 u32 cr, sr; 657 658 spin_lock_irq(&lp->rx_cr_lock); 659 660 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 661 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 662 lp->rx_dma_started = false; 663 664 spin_unlock_irq(&lp->rx_cr_lock); 665 synchronize_irq(lp->rx_irq); 666 667 spin_lock_irq(&lp->tx_cr_lock); 668 669 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 670 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 671 lp->tx_dma_started = false; 672 673 spin_unlock_irq(&lp->tx_cr_lock); 674 synchronize_irq(lp->tx_irq); 675 676 /* Give DMAs a chance to halt gracefully */ 677 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 678 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 679 msleep(20); 680 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 681 } 682 683 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 684 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 685 msleep(20); 686 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 687 } 688 689 /* Do a reset to ensure DMA is really stopped */ 690 axienet_lock_mii(lp); 691 __axienet_device_reset(lp); 692 axienet_unlock_mii(lp); 693 } 694 695 /** 696 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 697 * @ndev: Pointer to the net_device structure 698 * 699 * This function is called to reset and initialize the Axi Ethernet core. This 700 * is typically called during initialization. It does a reset of the Axi DMA 701 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 702 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 703 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 704 * core. 705 * Returns 0 on success or a negative error number otherwise. 706 */ 707 static int axienet_device_reset(struct net_device *ndev) 708 { 709 u32 axienet_status; 710 struct axienet_local *lp = netdev_priv(ndev); 711 int ret; 712 713 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 714 lp->options |= XAE_OPTION_VLAN; 715 lp->options &= (~XAE_OPTION_JUMBO); 716 717 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { 718 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 719 XAE_TRL_SIZE; 720 721 if (lp->max_frm_size <= lp->rxmem) 722 lp->options |= XAE_OPTION_JUMBO; 723 } 724 725 if (!lp->use_dmaengine) { 726 ret = __axienet_device_reset(lp); 727 if (ret) 728 return ret; 729 730 ret = axienet_dma_bd_init(ndev); 731 if (ret) { 732 netdev_err(ndev, "%s: descriptor allocation failed\n", 733 __func__); 734 return ret; 735 } 736 } 737 738 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 739 axienet_status &= ~XAE_RCW1_RX_MASK; 740 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 741 742 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 743 if (axienet_status & XAE_INT_RXRJECT_MASK) 744 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 745 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 746 XAE_INT_RECV_ERROR_MASK : 0); 747 748 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 749 750 /* Sync default options with HW but leave receiver and 751 * transmitter disabled. 752 */ 753 axienet_setoptions(ndev, lp->options & 754 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 755 axienet_set_mac_address(ndev, NULL); 756 axienet_set_multicast_list(ndev); 757 axienet_setoptions(ndev, lp->options); 758 759 netif_trans_update(ndev); 760 761 return 0; 762 } 763 764 /** 765 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 766 * @lp: Pointer to the axienet_local structure 767 * @first_bd: Index of first descriptor to clean up 768 * @nr_bds: Max number of descriptors to clean up 769 * @force: Whether to clean descriptors even if not complete 770 * @sizep: Pointer to a u32 filled with the total sum of all bytes 771 * in all cleaned-up descriptors. Ignored if NULL. 772 * @budget: NAPI budget (use 0 when not called from NAPI poll) 773 * 774 * Would either be called after a successful transmit operation, or after 775 * there was an error when setting up the chain. 776 * Returns the number of packets handled. 777 */ 778 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 779 int nr_bds, bool force, u32 *sizep, int budget) 780 { 781 struct axidma_bd *cur_p; 782 unsigned int status; 783 int i, packets = 0; 784 dma_addr_t phys; 785 786 for (i = 0; i < nr_bds; i++) { 787 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 788 status = cur_p->status; 789 790 /* If force is not specified, clean up only descriptors 791 * that have been completed by the MAC. 792 */ 793 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 794 break; 795 796 /* Ensure we see complete descriptor update */ 797 dma_rmb(); 798 phys = desc_get_phys_addr(lp, cur_p); 799 dma_unmap_single(lp->dev, phys, 800 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 801 DMA_TO_DEVICE); 802 803 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 804 napi_consume_skb(cur_p->skb, budget); 805 packets++; 806 } 807 808 cur_p->app0 = 0; 809 cur_p->app1 = 0; 810 cur_p->app2 = 0; 811 cur_p->app4 = 0; 812 cur_p->skb = NULL; 813 /* ensure our transmit path and device don't prematurely see status cleared */ 814 wmb(); 815 cur_p->cntrl = 0; 816 cur_p->status = 0; 817 818 if (sizep) 819 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 820 } 821 822 if (!force) { 823 lp->tx_bd_ci += i; 824 if (lp->tx_bd_ci >= lp->tx_bd_num) 825 lp->tx_bd_ci %= lp->tx_bd_num; 826 } 827 828 return packets; 829 } 830 831 /** 832 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 833 * @lp: Pointer to the axienet_local structure 834 * @num_frag: The number of BDs to check for 835 * 836 * Return: 0, on success 837 * NETDEV_TX_BUSY, if any of the descriptors are not free 838 * 839 * This function is invoked before BDs are allocated and transmission starts. 840 * This function returns 0 if a BD or group of BDs can be allocated for 841 * transmission. If the BD or any of the BDs are not free the function 842 * returns a busy status. 843 */ 844 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 845 int num_frag) 846 { 847 struct axidma_bd *cur_p; 848 849 /* Ensure we see all descriptor updates from device or TX polling */ 850 rmb(); 851 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 852 lp->tx_bd_num]; 853 if (cur_p->cntrl) 854 return NETDEV_TX_BUSY; 855 return 0; 856 } 857 858 /** 859 * axienet_dma_tx_cb - DMA engine callback for TX channel. 860 * @data: Pointer to the axienet_local structure. 861 * @result: error reporting through dmaengine_result. 862 * This function is called by dmaengine driver for TX channel to notify 863 * that the transmit is done. 864 */ 865 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) 866 { 867 struct skbuf_dma_descriptor *skbuf_dma; 868 struct axienet_local *lp = data; 869 struct netdev_queue *txq; 870 int len; 871 872 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); 873 len = skbuf_dma->skb->len; 874 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); 875 u64_stats_update_begin(&lp->tx_stat_sync); 876 u64_stats_add(&lp->tx_bytes, len); 877 u64_stats_add(&lp->tx_packets, 1); 878 u64_stats_update_end(&lp->tx_stat_sync); 879 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); 880 dev_consume_skb_any(skbuf_dma->skb); 881 netif_txq_completed_wake(txq, 1, len, 882 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 883 2); 884 } 885 886 /** 887 * axienet_start_xmit_dmaengine - Starts the transmission. 888 * @skb: sk_buff pointer that contains data to be Txed. 889 * @ndev: Pointer to net_device structure. 890 * 891 * Return: NETDEV_TX_OK on success or any non space errors. 892 * NETDEV_TX_BUSY when free element in TX skb ring buffer 893 * is not available. 894 * 895 * This function is invoked to initiate transmission. The 896 * function sets the skbs, register dma callback API and submit 897 * the dma transaction. 898 * Additionally if checksum offloading is supported, 899 * it populates AXI Stream Control fields with appropriate values. 900 */ 901 static netdev_tx_t 902 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) 903 { 904 struct dma_async_tx_descriptor *dma_tx_desc = NULL; 905 struct axienet_local *lp = netdev_priv(ndev); 906 u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; 907 struct skbuf_dma_descriptor *skbuf_dma; 908 struct dma_device *dma_dev; 909 struct netdev_queue *txq; 910 u32 csum_start_off; 911 u32 csum_index_off; 912 int sg_len; 913 int ret; 914 915 dma_dev = lp->tx_chan->device; 916 sg_len = skb_shinfo(skb)->nr_frags + 1; 917 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) { 918 netif_stop_queue(ndev); 919 if (net_ratelimit()) 920 netdev_warn(ndev, "TX ring unexpectedly full\n"); 921 return NETDEV_TX_BUSY; 922 } 923 924 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); 925 if (!skbuf_dma) 926 goto xmit_error_drop_skb; 927 928 lp->tx_ring_head++; 929 sg_init_table(skbuf_dma->sgl, sg_len); 930 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); 931 if (ret < 0) 932 goto xmit_error_drop_skb; 933 934 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 935 if (!ret) 936 goto xmit_error_drop_skb; 937 938 /* Fill up app fields for checksum */ 939 if (skb->ip_summed == CHECKSUM_PARTIAL) { 940 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 941 /* Tx Full Checksum Offload Enabled */ 942 app_metadata[0] |= 2; 943 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 944 csum_start_off = skb_transport_offset(skb); 945 csum_index_off = csum_start_off + skb->csum_offset; 946 /* Tx Partial Checksum Offload Enabled */ 947 app_metadata[0] |= 1; 948 app_metadata[1] = (csum_start_off << 16) | csum_index_off; 949 } 950 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 951 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ 952 } 953 954 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, 955 sg_len, DMA_MEM_TO_DEV, 956 DMA_PREP_INTERRUPT, (void *)app_metadata); 957 if (!dma_tx_desc) 958 goto xmit_error_unmap_sg; 959 960 skbuf_dma->skb = skb; 961 skbuf_dma->sg_len = sg_len; 962 dma_tx_desc->callback_param = lp; 963 dma_tx_desc->callback_result = axienet_dma_tx_cb; 964 txq = skb_get_tx_queue(lp->ndev, skb); 965 netdev_tx_sent_queue(txq, skb->len); 966 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 967 1, 2); 968 969 dmaengine_submit(dma_tx_desc); 970 dma_async_issue_pending(lp->tx_chan); 971 return NETDEV_TX_OK; 972 973 xmit_error_unmap_sg: 974 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 975 xmit_error_drop_skb: 976 dev_kfree_skb_any(skb); 977 return NETDEV_TX_OK; 978 } 979 980 /** 981 * axienet_tx_poll - Invoked once a transmit is completed by the 982 * Axi DMA Tx channel. 983 * @napi: Pointer to NAPI structure. 984 * @budget: Max number of TX packets to process. 985 * 986 * Return: Number of TX packets processed. 987 * 988 * This function is invoked from the NAPI processing to notify the completion 989 * of transmit operation. It clears fields in the corresponding Tx BDs and 990 * unmaps the corresponding buffer so that CPU can regain ownership of the 991 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 992 * required. 993 */ 994 static int axienet_tx_poll(struct napi_struct *napi, int budget) 995 { 996 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 997 struct net_device *ndev = lp->ndev; 998 u32 size = 0; 999 int packets; 1000 1001 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, 1002 &size, budget); 1003 1004 if (packets) { 1005 netdev_completed_queue(ndev, packets, size); 1006 u64_stats_update_begin(&lp->tx_stat_sync); 1007 u64_stats_add(&lp->tx_packets, packets); 1008 u64_stats_add(&lp->tx_bytes, size); 1009 u64_stats_update_end(&lp->tx_stat_sync); 1010 1011 /* Matches barrier in axienet_start_xmit */ 1012 smp_mb(); 1013 1014 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1015 netif_wake_queue(ndev); 1016 } 1017 1018 if (packets < budget && napi_complete_done(napi, packets)) { 1019 /* Re-enable TX completion interrupts. This should 1020 * cause an immediate interrupt if any TX packets are 1021 * already pending. 1022 */ 1023 spin_lock_irq(&lp->tx_cr_lock); 1024 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 1025 spin_unlock_irq(&lp->tx_cr_lock); 1026 } 1027 return packets; 1028 } 1029 1030 /** 1031 * axienet_start_xmit - Starts the transmission. 1032 * @skb: sk_buff pointer that contains data to be Txed. 1033 * @ndev: Pointer to net_device structure. 1034 * 1035 * Return: NETDEV_TX_OK, on success 1036 * NETDEV_TX_BUSY, if any of the descriptors are not free 1037 * 1038 * This function is invoked from upper layers to initiate transmission. The 1039 * function uses the next available free BDs and populates their fields to 1040 * start the transmission. Additionally if checksum offloading is supported, 1041 * it populates AXI Stream Control fields with appropriate values. 1042 */ 1043 static netdev_tx_t 1044 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1045 { 1046 u32 ii; 1047 u32 num_frag; 1048 u32 csum_start_off; 1049 u32 csum_index_off; 1050 skb_frag_t *frag; 1051 dma_addr_t tail_p, phys; 1052 u32 orig_tail_ptr, new_tail_ptr; 1053 struct axienet_local *lp = netdev_priv(ndev); 1054 struct axidma_bd *cur_p; 1055 1056 orig_tail_ptr = lp->tx_bd_tail; 1057 new_tail_ptr = orig_tail_ptr; 1058 1059 num_frag = skb_shinfo(skb)->nr_frags; 1060 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 1061 1062 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 1063 /* Should not happen as last start_xmit call should have 1064 * checked for sufficient space and queue should only be 1065 * woken when sufficient space is available. 1066 */ 1067 netif_stop_queue(ndev); 1068 if (net_ratelimit()) 1069 netdev_warn(ndev, "TX ring unexpectedly full\n"); 1070 return NETDEV_TX_BUSY; 1071 } 1072 1073 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1074 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 1075 /* Tx Full Checksum Offload Enabled */ 1076 cur_p->app0 |= 2; 1077 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 1078 csum_start_off = skb_transport_offset(skb); 1079 csum_index_off = csum_start_off + skb->csum_offset; 1080 /* Tx Partial Checksum Offload Enabled */ 1081 cur_p->app0 |= 1; 1082 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 1083 } 1084 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1085 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 1086 } 1087 1088 phys = dma_map_single(lp->dev, skb->data, 1089 skb_headlen(skb), DMA_TO_DEVICE); 1090 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1091 if (net_ratelimit()) 1092 netdev_err(ndev, "TX DMA mapping error\n"); 1093 ndev->stats.tx_dropped++; 1094 dev_kfree_skb_any(skb); 1095 return NETDEV_TX_OK; 1096 } 1097 desc_set_phys_addr(lp, phys, cur_p); 1098 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 1099 1100 for (ii = 0; ii < num_frag; ii++) { 1101 if (++new_tail_ptr >= lp->tx_bd_num) 1102 new_tail_ptr = 0; 1103 cur_p = &lp->tx_bd_v[new_tail_ptr]; 1104 frag = &skb_shinfo(skb)->frags[ii]; 1105 phys = dma_map_single(lp->dev, 1106 skb_frag_address(frag), 1107 skb_frag_size(frag), 1108 DMA_TO_DEVICE); 1109 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1110 if (net_ratelimit()) 1111 netdev_err(ndev, "TX DMA mapping error\n"); 1112 ndev->stats.tx_dropped++; 1113 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 1114 true, NULL, 0); 1115 dev_kfree_skb_any(skb); 1116 return NETDEV_TX_OK; 1117 } 1118 desc_set_phys_addr(lp, phys, cur_p); 1119 cur_p->cntrl = skb_frag_size(frag); 1120 } 1121 1122 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 1123 cur_p->skb = skb; 1124 1125 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 1126 if (++new_tail_ptr >= lp->tx_bd_num) 1127 new_tail_ptr = 0; 1128 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 1129 netdev_sent_queue(ndev, skb->len); 1130 1131 /* Start the transfer */ 1132 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 1133 1134 /* Stop queue if next transmit may not have space */ 1135 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 1136 netif_stop_queue(ndev); 1137 1138 /* Matches barrier in axienet_tx_poll */ 1139 smp_mb(); 1140 1141 /* Space might have just been freed - check again */ 1142 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1143 netif_wake_queue(ndev); 1144 } 1145 1146 return NETDEV_TX_OK; 1147 } 1148 1149 /** 1150 * axienet_dma_rx_cb - DMA engine callback for RX channel. 1151 * @data: Pointer to the skbuf_dma_descriptor structure. 1152 * @result: error reporting through dmaengine_result. 1153 * This function is called by dmaengine driver for RX channel to notify 1154 * that the packet is received. 1155 */ 1156 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) 1157 { 1158 struct skbuf_dma_descriptor *skbuf_dma; 1159 size_t meta_len, meta_max_len, rx_len; 1160 struct axienet_local *lp = data; 1161 struct sk_buff *skb; 1162 u32 *app_metadata; 1163 int i; 1164 1165 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1166 skb = skbuf_dma->skb; 1167 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, 1168 &meta_max_len); 1169 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, 1170 DMA_FROM_DEVICE); 1171 /* TODO: Derive app word index programmatically */ 1172 rx_len = (app_metadata[LEN_APP] & 0xFFFF); 1173 skb_put(skb, rx_len); 1174 skb->protocol = eth_type_trans(skb, lp->ndev); 1175 skb->ip_summed = CHECKSUM_NONE; 1176 1177 __netif_rx(skb); 1178 u64_stats_update_begin(&lp->rx_stat_sync); 1179 u64_stats_add(&lp->rx_packets, 1); 1180 u64_stats_add(&lp->rx_bytes, rx_len); 1181 u64_stats_update_end(&lp->rx_stat_sync); 1182 1183 for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail, 1184 RX_BUF_NUM_DEFAULT); i++) 1185 axienet_rx_submit_desc(lp->ndev); 1186 dma_async_issue_pending(lp->rx_chan); 1187 } 1188 1189 /** 1190 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 1191 * @napi: Pointer to NAPI structure. 1192 * @budget: Max number of RX packets to process. 1193 * 1194 * Return: Number of RX packets processed. 1195 */ 1196 static int axienet_rx_poll(struct napi_struct *napi, int budget) 1197 { 1198 u32 length; 1199 u32 csumstatus; 1200 u32 size = 0; 1201 int packets = 0; 1202 dma_addr_t tail_p = 0; 1203 struct axidma_bd *cur_p; 1204 struct sk_buff *skb, *new_skb; 1205 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 1206 1207 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1208 1209 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 1210 dma_addr_t phys; 1211 1212 /* Ensure we see complete descriptor update */ 1213 dma_rmb(); 1214 1215 skb = cur_p->skb; 1216 cur_p->skb = NULL; 1217 1218 /* skb could be NULL if a previous pass already received the 1219 * packet for this slot in the ring, but failed to refill it 1220 * with a newly allocated buffer. In this case, don't try to 1221 * receive it again. 1222 */ 1223 if (likely(skb)) { 1224 length = cur_p->app4 & 0x0000FFFF; 1225 1226 phys = desc_get_phys_addr(lp, cur_p); 1227 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 1228 DMA_FROM_DEVICE); 1229 1230 skb_put(skb, length); 1231 skb->protocol = eth_type_trans(skb, lp->ndev); 1232 /*skb_checksum_none_assert(skb);*/ 1233 skb->ip_summed = CHECKSUM_NONE; 1234 1235 /* if we're doing Rx csum offload, set it up */ 1236 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 1237 csumstatus = (cur_p->app2 & 1238 XAE_FULL_CSUM_STATUS_MASK) >> 3; 1239 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 1240 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 1241 skb->ip_summed = CHECKSUM_UNNECESSARY; 1242 } 1243 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 1244 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 1245 skb->ip_summed = CHECKSUM_COMPLETE; 1246 } 1247 1248 napi_gro_receive(napi, skb); 1249 1250 size += length; 1251 packets++; 1252 } 1253 1254 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 1255 if (!new_skb) 1256 break; 1257 1258 phys = dma_map_single(lp->dev, new_skb->data, 1259 lp->max_frm_size, 1260 DMA_FROM_DEVICE); 1261 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1262 if (net_ratelimit()) 1263 netdev_err(lp->ndev, "RX DMA mapping error\n"); 1264 dev_kfree_skb(new_skb); 1265 break; 1266 } 1267 desc_set_phys_addr(lp, phys, cur_p); 1268 1269 cur_p->cntrl = lp->max_frm_size; 1270 cur_p->status = 0; 1271 cur_p->skb = new_skb; 1272 1273 /* Only update tail_p to mark this slot as usable after it has 1274 * been successfully refilled. 1275 */ 1276 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1277 1278 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1279 lp->rx_bd_ci = 0; 1280 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1281 } 1282 1283 u64_stats_update_begin(&lp->rx_stat_sync); 1284 u64_stats_add(&lp->rx_packets, packets); 1285 u64_stats_add(&lp->rx_bytes, size); 1286 u64_stats_update_end(&lp->rx_stat_sync); 1287 1288 if (tail_p) 1289 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1290 1291 if (packets < budget && napi_complete_done(napi, packets)) { 1292 if (READ_ONCE(lp->rx_dim_enabled)) { 1293 struct dim_sample sample = { 1294 .time = ktime_get(), 1295 /* Safe because we are the only writer */ 1296 .pkt_ctr = u64_stats_read(&lp->rx_packets), 1297 .byte_ctr = u64_stats_read(&lp->rx_bytes), 1298 .event_ctr = READ_ONCE(lp->rx_irqs), 1299 }; 1300 1301 net_dim(&lp->rx_dim, &sample); 1302 } 1303 1304 /* Re-enable RX completion interrupts. This should 1305 * cause an immediate interrupt if any RX packets are 1306 * already pending. 1307 */ 1308 spin_lock_irq(&lp->rx_cr_lock); 1309 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1310 spin_unlock_irq(&lp->rx_cr_lock); 1311 } 1312 return packets; 1313 } 1314 1315 /** 1316 * axienet_tx_irq - Tx Done Isr. 1317 * @irq: irq number 1318 * @_ndev: net_device pointer 1319 * 1320 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1321 * 1322 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1323 * TX BD processing. 1324 */ 1325 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1326 { 1327 unsigned int status; 1328 struct net_device *ndev = _ndev; 1329 struct axienet_local *lp = netdev_priv(ndev); 1330 1331 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1332 1333 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1334 return IRQ_NONE; 1335 1336 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1337 1338 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1339 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1340 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1341 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1342 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1343 schedule_work(&lp->dma_err_task); 1344 } else { 1345 /* Disable further TX completion interrupts and schedule 1346 * NAPI to handle the completions. 1347 */ 1348 if (napi_schedule_prep(&lp->napi_tx)) { 1349 u32 cr; 1350 1351 spin_lock(&lp->tx_cr_lock); 1352 cr = lp->tx_dma_cr; 1353 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1354 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1355 spin_unlock(&lp->tx_cr_lock); 1356 __napi_schedule(&lp->napi_tx); 1357 } 1358 } 1359 1360 return IRQ_HANDLED; 1361 } 1362 1363 /** 1364 * axienet_rx_irq - Rx Isr. 1365 * @irq: irq number 1366 * @_ndev: net_device pointer 1367 * 1368 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1369 * 1370 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1371 * processing. 1372 */ 1373 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1374 { 1375 unsigned int status; 1376 struct net_device *ndev = _ndev; 1377 struct axienet_local *lp = netdev_priv(ndev); 1378 1379 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1380 1381 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1382 return IRQ_NONE; 1383 1384 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1385 1386 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1387 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1388 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1389 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1390 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1391 schedule_work(&lp->dma_err_task); 1392 } else { 1393 /* Disable further RX completion interrupts and schedule 1394 * NAPI receive. 1395 */ 1396 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1); 1397 if (napi_schedule_prep(&lp->napi_rx)) { 1398 u32 cr; 1399 1400 spin_lock(&lp->rx_cr_lock); 1401 cr = lp->rx_dma_cr; 1402 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1403 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1404 spin_unlock(&lp->rx_cr_lock); 1405 1406 __napi_schedule(&lp->napi_rx); 1407 } 1408 } 1409 1410 return IRQ_HANDLED; 1411 } 1412 1413 /** 1414 * axienet_eth_irq - Ethernet core Isr. 1415 * @irq: irq number 1416 * @_ndev: net_device pointer 1417 * 1418 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1419 * 1420 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1421 */ 1422 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1423 { 1424 struct net_device *ndev = _ndev; 1425 struct axienet_local *lp = netdev_priv(ndev); 1426 unsigned int pending; 1427 1428 pending = axienet_ior(lp, XAE_IP_OFFSET); 1429 if (!pending) 1430 return IRQ_NONE; 1431 1432 if (pending & XAE_INT_RXFIFOOVR_MASK) 1433 ndev->stats.rx_missed_errors++; 1434 1435 if (pending & XAE_INT_RXRJECT_MASK) 1436 ndev->stats.rx_dropped++; 1437 1438 axienet_iow(lp, XAE_IS_OFFSET, pending); 1439 return IRQ_HANDLED; 1440 } 1441 1442 static void axienet_dma_err_handler(struct work_struct *work); 1443 1444 /** 1445 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. 1446 * allocate skbuff, map the scatterlist and obtain a descriptor 1447 * and then add the callback information and submit descriptor. 1448 * 1449 * @ndev: net_device pointer 1450 * 1451 */ 1452 static void axienet_rx_submit_desc(struct net_device *ndev) 1453 { 1454 struct dma_async_tx_descriptor *dma_rx_desc = NULL; 1455 struct axienet_local *lp = netdev_priv(ndev); 1456 struct skbuf_dma_descriptor *skbuf_dma; 1457 struct sk_buff *skb; 1458 dma_addr_t addr; 1459 1460 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); 1461 if (!skbuf_dma) 1462 return; 1463 1464 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1465 if (!skb) 1466 return; 1467 1468 sg_init_table(skbuf_dma->sgl, 1); 1469 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); 1470 if (unlikely(dma_mapping_error(lp->dev, addr))) { 1471 if (net_ratelimit()) 1472 netdev_err(ndev, "DMA mapping error\n"); 1473 goto rx_submit_err_free_skb; 1474 } 1475 sg_dma_address(skbuf_dma->sgl) = addr; 1476 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; 1477 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, 1478 1, DMA_DEV_TO_MEM, 1479 DMA_PREP_INTERRUPT); 1480 if (!dma_rx_desc) 1481 goto rx_submit_err_unmap_skb; 1482 1483 skbuf_dma->skb = skb; 1484 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); 1485 skbuf_dma->desc = dma_rx_desc; 1486 dma_rx_desc->callback_param = lp; 1487 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1488 lp->rx_ring_head++; 1489 dmaengine_submit(dma_rx_desc); 1490 1491 return; 1492 1493 rx_submit_err_unmap_skb: 1494 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); 1495 rx_submit_err_free_skb: 1496 dev_kfree_skb(skb); 1497 } 1498 1499 /** 1500 * axienet_init_dmaengine - init the dmaengine code. 1501 * @ndev: Pointer to net_device structure 1502 * 1503 * Return: 0, on success. 1504 * non-zero error value on failure 1505 * 1506 * This is the dmaengine initialization code. 1507 */ 1508 static int axienet_init_dmaengine(struct net_device *ndev) 1509 { 1510 struct axienet_local *lp = netdev_priv(ndev); 1511 struct skbuf_dma_descriptor *skbuf_dma; 1512 int i, ret; 1513 1514 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 1515 if (IS_ERR(lp->tx_chan)) { 1516 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); 1517 return PTR_ERR(lp->tx_chan); 1518 } 1519 1520 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); 1521 if (IS_ERR(lp->rx_chan)) { 1522 ret = PTR_ERR(lp->rx_chan); 1523 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); 1524 goto err_dma_release_tx; 1525 } 1526 1527 lp->tx_ring_tail = 0; 1528 lp->tx_ring_head = 0; 1529 lp->rx_ring_tail = 0; 1530 lp->rx_ring_head = 0; 1531 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), 1532 GFP_KERNEL); 1533 if (!lp->tx_skb_ring) { 1534 ret = -ENOMEM; 1535 goto err_dma_release_rx; 1536 } 1537 for (i = 0; i < TX_BD_NUM_MAX; i++) { 1538 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1539 if (!skbuf_dma) { 1540 ret = -ENOMEM; 1541 goto err_free_tx_skb_ring; 1542 } 1543 lp->tx_skb_ring[i] = skbuf_dma; 1544 } 1545 1546 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), 1547 GFP_KERNEL); 1548 if (!lp->rx_skb_ring) { 1549 ret = -ENOMEM; 1550 goto err_free_tx_skb_ring; 1551 } 1552 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { 1553 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1554 if (!skbuf_dma) { 1555 ret = -ENOMEM; 1556 goto err_free_rx_skb_ring; 1557 } 1558 lp->rx_skb_ring[i] = skbuf_dma; 1559 } 1560 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ 1561 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1562 axienet_rx_submit_desc(ndev); 1563 dma_async_issue_pending(lp->rx_chan); 1564 1565 return 0; 1566 1567 err_free_rx_skb_ring: 1568 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1569 kfree(lp->rx_skb_ring[i]); 1570 kfree(lp->rx_skb_ring); 1571 err_free_tx_skb_ring: 1572 for (i = 0; i < TX_BD_NUM_MAX; i++) 1573 kfree(lp->tx_skb_ring[i]); 1574 kfree(lp->tx_skb_ring); 1575 err_dma_release_rx: 1576 dma_release_channel(lp->rx_chan); 1577 err_dma_release_tx: 1578 dma_release_channel(lp->tx_chan); 1579 return ret; 1580 } 1581 1582 /** 1583 * axienet_init_legacy_dma - init the dma legacy code. 1584 * @ndev: Pointer to net_device structure 1585 * 1586 * Return: 0, on success. 1587 * non-zero error value on failure 1588 * 1589 * This is the dma initialization code. It also allocates interrupt 1590 * service routines, enables the interrupt lines and ISR handling. 1591 * 1592 */ 1593 static int axienet_init_legacy_dma(struct net_device *ndev) 1594 { 1595 int ret; 1596 struct axienet_local *lp = netdev_priv(ndev); 1597 1598 /* Enable worker thread for Axi DMA error handling */ 1599 lp->stopping = false; 1600 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1601 1602 napi_enable(&lp->napi_rx); 1603 napi_enable(&lp->napi_tx); 1604 1605 /* Enable interrupts for Axi DMA Tx */ 1606 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1607 ndev->name, ndev); 1608 if (ret) 1609 goto err_tx_irq; 1610 /* Enable interrupts for Axi DMA Rx */ 1611 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1612 ndev->name, ndev); 1613 if (ret) 1614 goto err_rx_irq; 1615 /* Enable interrupts for Axi Ethernet core (if defined) */ 1616 if (lp->eth_irq > 0) { 1617 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1618 ndev->name, ndev); 1619 if (ret) 1620 goto err_eth_irq; 1621 } 1622 1623 return 0; 1624 1625 err_eth_irq: 1626 free_irq(lp->rx_irq, ndev); 1627 err_rx_irq: 1628 free_irq(lp->tx_irq, ndev); 1629 err_tx_irq: 1630 napi_disable(&lp->napi_tx); 1631 napi_disable(&lp->napi_rx); 1632 cancel_work_sync(&lp->dma_err_task); 1633 dev_err(lp->dev, "request_irq() failed\n"); 1634 return ret; 1635 } 1636 1637 /** 1638 * axienet_open - Driver open routine. 1639 * @ndev: Pointer to net_device structure 1640 * 1641 * Return: 0, on success. 1642 * non-zero error value on failure 1643 * 1644 * This is the driver open routine. It calls phylink_start to start the 1645 * PHY device. 1646 * It also allocates interrupt service routines, enables the interrupt lines 1647 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1648 * descriptors are initialized. 1649 */ 1650 static int axienet_open(struct net_device *ndev) 1651 { 1652 int ret; 1653 struct axienet_local *lp = netdev_priv(ndev); 1654 1655 /* When we do an Axi Ethernet reset, it resets the complete core 1656 * including the MDIO. MDIO must be disabled before resetting. 1657 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1658 */ 1659 axienet_lock_mii(lp); 1660 ret = axienet_device_reset(ndev); 1661 axienet_unlock_mii(lp); 1662 1663 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1664 if (ret) { 1665 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1666 return ret; 1667 } 1668 1669 phylink_start(lp->phylink); 1670 1671 /* Start the statistics refresh work */ 1672 schedule_delayed_work(&lp->stats_work, 0); 1673 1674 if (lp->use_dmaengine) { 1675 /* Enable interrupts for Axi Ethernet core (if defined) */ 1676 if (lp->eth_irq > 0) { 1677 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1678 ndev->name, ndev); 1679 if (ret) 1680 goto err_phy; 1681 } 1682 1683 ret = axienet_init_dmaengine(ndev); 1684 if (ret < 0) 1685 goto err_free_eth_irq; 1686 } else { 1687 ret = axienet_init_legacy_dma(ndev); 1688 if (ret) 1689 goto err_phy; 1690 } 1691 1692 return 0; 1693 1694 err_free_eth_irq: 1695 if (lp->eth_irq > 0) 1696 free_irq(lp->eth_irq, ndev); 1697 err_phy: 1698 cancel_work_sync(&lp->rx_dim.work); 1699 cancel_delayed_work_sync(&lp->stats_work); 1700 phylink_stop(lp->phylink); 1701 phylink_disconnect_phy(lp->phylink); 1702 return ret; 1703 } 1704 1705 /** 1706 * axienet_stop - Driver stop routine. 1707 * @ndev: Pointer to net_device structure 1708 * 1709 * Return: 0, on success. 1710 * 1711 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1712 * device. It also removes the interrupt handlers and disables the interrupts. 1713 * The Axi DMA Tx/Rx BDs are released. 1714 */ 1715 static int axienet_stop(struct net_device *ndev) 1716 { 1717 struct axienet_local *lp = netdev_priv(ndev); 1718 int i; 1719 1720 if (!lp->use_dmaengine) { 1721 WRITE_ONCE(lp->stopping, true); 1722 flush_work(&lp->dma_err_task); 1723 1724 napi_disable(&lp->napi_tx); 1725 napi_disable(&lp->napi_rx); 1726 } 1727 1728 cancel_work_sync(&lp->rx_dim.work); 1729 cancel_delayed_work_sync(&lp->stats_work); 1730 1731 phylink_stop(lp->phylink); 1732 phylink_disconnect_phy(lp->phylink); 1733 1734 axienet_setoptions(ndev, lp->options & 1735 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1736 1737 if (!lp->use_dmaengine) { 1738 axienet_dma_stop(lp); 1739 cancel_work_sync(&lp->dma_err_task); 1740 free_irq(lp->tx_irq, ndev); 1741 free_irq(lp->rx_irq, ndev); 1742 axienet_dma_bd_release(ndev); 1743 } else { 1744 dmaengine_terminate_sync(lp->tx_chan); 1745 dmaengine_synchronize(lp->tx_chan); 1746 dmaengine_terminate_sync(lp->rx_chan); 1747 dmaengine_synchronize(lp->rx_chan); 1748 1749 for (i = 0; i < TX_BD_NUM_MAX; i++) 1750 kfree(lp->tx_skb_ring[i]); 1751 kfree(lp->tx_skb_ring); 1752 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1753 kfree(lp->rx_skb_ring[i]); 1754 kfree(lp->rx_skb_ring); 1755 1756 dma_release_channel(lp->rx_chan); 1757 dma_release_channel(lp->tx_chan); 1758 } 1759 1760 netdev_reset_queue(ndev); 1761 axienet_iow(lp, XAE_IE_OFFSET, 0); 1762 1763 if (lp->eth_irq > 0) 1764 free_irq(lp->eth_irq, ndev); 1765 return 0; 1766 } 1767 1768 /** 1769 * axienet_change_mtu - Driver change mtu routine. 1770 * @ndev: Pointer to net_device structure 1771 * @new_mtu: New mtu value to be applied 1772 * 1773 * Return: Always returns 0 (success). 1774 * 1775 * This is the change mtu driver routine. It checks if the Axi Ethernet 1776 * hardware supports jumbo frames before changing the mtu. This can be 1777 * called only when the device is not up. 1778 */ 1779 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1780 { 1781 struct axienet_local *lp = netdev_priv(ndev); 1782 1783 if (netif_running(ndev)) 1784 return -EBUSY; 1785 1786 if ((new_mtu + VLAN_ETH_HLEN + 1787 XAE_TRL_SIZE) > lp->rxmem) 1788 return -EINVAL; 1789 1790 WRITE_ONCE(ndev->mtu, new_mtu); 1791 1792 return 0; 1793 } 1794 1795 #ifdef CONFIG_NET_POLL_CONTROLLER 1796 /** 1797 * axienet_poll_controller - Axi Ethernet poll mechanism. 1798 * @ndev: Pointer to net_device structure 1799 * 1800 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1801 * to polling the ISRs and are enabled back after the polling is done. 1802 */ 1803 static void axienet_poll_controller(struct net_device *ndev) 1804 { 1805 struct axienet_local *lp = netdev_priv(ndev); 1806 1807 disable_irq(lp->tx_irq); 1808 disable_irq(lp->rx_irq); 1809 axienet_rx_irq(lp->tx_irq, ndev); 1810 axienet_tx_irq(lp->rx_irq, ndev); 1811 enable_irq(lp->tx_irq); 1812 enable_irq(lp->rx_irq); 1813 } 1814 #endif 1815 1816 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1817 { 1818 struct axienet_local *lp = netdev_priv(dev); 1819 1820 if (!netif_running(dev)) 1821 return -EINVAL; 1822 1823 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1824 } 1825 1826 static void 1827 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1828 { 1829 struct axienet_local *lp = netdev_priv(dev); 1830 unsigned int start; 1831 1832 netdev_stats_to_stats64(stats, &dev->stats); 1833 1834 do { 1835 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1836 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1837 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1838 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1839 1840 do { 1841 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1842 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1843 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1844 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1845 1846 if (!(lp->features & XAE_FEATURE_STATS)) 1847 return; 1848 1849 do { 1850 start = read_seqcount_begin(&lp->hw_stats_seqcount); 1851 stats->rx_length_errors = 1852 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); 1853 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS); 1854 stats->rx_frame_errors = 1855 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); 1856 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) + 1857 axienet_stat(lp, STAT_FRAGMENT_FRAMES) + 1858 stats->rx_length_errors + 1859 stats->rx_crc_errors + 1860 stats->rx_frame_errors; 1861 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); 1862 1863 stats->tx_aborted_errors = 1864 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); 1865 stats->tx_fifo_errors = 1866 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS); 1867 stats->tx_window_errors = 1868 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); 1869 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) + 1870 stats->tx_aborted_errors + 1871 stats->tx_fifo_errors + 1872 stats->tx_window_errors; 1873 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 1874 } 1875 1876 static const struct net_device_ops axienet_netdev_ops = { 1877 .ndo_open = axienet_open, 1878 .ndo_stop = axienet_stop, 1879 .ndo_start_xmit = axienet_start_xmit, 1880 .ndo_get_stats64 = axienet_get_stats64, 1881 .ndo_change_mtu = axienet_change_mtu, 1882 .ndo_set_mac_address = netdev_set_mac_address, 1883 .ndo_validate_addr = eth_validate_addr, 1884 .ndo_eth_ioctl = axienet_ioctl, 1885 .ndo_set_rx_mode = axienet_set_multicast_list, 1886 #ifdef CONFIG_NET_POLL_CONTROLLER 1887 .ndo_poll_controller = axienet_poll_controller, 1888 #endif 1889 }; 1890 1891 static const struct net_device_ops axienet_netdev_dmaengine_ops = { 1892 .ndo_open = axienet_open, 1893 .ndo_stop = axienet_stop, 1894 .ndo_start_xmit = axienet_start_xmit_dmaengine, 1895 .ndo_get_stats64 = axienet_get_stats64, 1896 .ndo_change_mtu = axienet_change_mtu, 1897 .ndo_set_mac_address = netdev_set_mac_address, 1898 .ndo_validate_addr = eth_validate_addr, 1899 .ndo_eth_ioctl = axienet_ioctl, 1900 .ndo_set_rx_mode = axienet_set_multicast_list, 1901 }; 1902 1903 /** 1904 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1905 * @ndev: Pointer to net_device structure 1906 * @ed: Pointer to ethtool_drvinfo structure 1907 * 1908 * This implements ethtool command for getting the driver information. 1909 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1910 */ 1911 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1912 struct ethtool_drvinfo *ed) 1913 { 1914 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1915 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1916 } 1917 1918 /** 1919 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1920 * AxiEthernet core. 1921 * @ndev: Pointer to net_device structure 1922 * 1923 * This implements ethtool command for getting the total register length 1924 * information. 1925 * 1926 * Return: the total regs length 1927 */ 1928 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1929 { 1930 return sizeof(u32) * AXIENET_REGS_N; 1931 } 1932 1933 /** 1934 * axienet_ethtools_get_regs - Dump the contents of all registers present 1935 * in AxiEthernet core. 1936 * @ndev: Pointer to net_device structure 1937 * @regs: Pointer to ethtool_regs structure 1938 * @ret: Void pointer used to return the contents of the registers. 1939 * 1940 * This implements ethtool command for getting the Axi Ethernet register dump. 1941 * Issue "ethtool -d ethX" to execute this function. 1942 */ 1943 static void axienet_ethtools_get_regs(struct net_device *ndev, 1944 struct ethtool_regs *regs, void *ret) 1945 { 1946 u32 *data = (u32 *)ret; 1947 size_t len = sizeof(u32) * AXIENET_REGS_N; 1948 struct axienet_local *lp = netdev_priv(ndev); 1949 1950 regs->version = 0; 1951 regs->len = len; 1952 1953 memset(data, 0, len); 1954 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1955 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1956 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1957 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1958 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1959 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1960 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1961 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1962 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1963 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1964 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1965 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1966 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1967 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1968 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1969 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1970 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1971 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1972 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1973 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1974 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1975 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1976 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1977 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1978 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1979 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1980 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1981 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1982 if (!lp->use_dmaengine) { 1983 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1984 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1985 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1986 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1987 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1988 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1989 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1990 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1991 } 1992 } 1993 1994 static void 1995 axienet_ethtools_get_ringparam(struct net_device *ndev, 1996 struct ethtool_ringparam *ering, 1997 struct kernel_ethtool_ringparam *kernel_ering, 1998 struct netlink_ext_ack *extack) 1999 { 2000 struct axienet_local *lp = netdev_priv(ndev); 2001 2002 ering->rx_max_pending = RX_BD_NUM_MAX; 2003 ering->rx_mini_max_pending = 0; 2004 ering->rx_jumbo_max_pending = 0; 2005 ering->tx_max_pending = TX_BD_NUM_MAX; 2006 ering->rx_pending = lp->rx_bd_num; 2007 ering->rx_mini_pending = 0; 2008 ering->rx_jumbo_pending = 0; 2009 ering->tx_pending = lp->tx_bd_num; 2010 } 2011 2012 static int 2013 axienet_ethtools_set_ringparam(struct net_device *ndev, 2014 struct ethtool_ringparam *ering, 2015 struct kernel_ethtool_ringparam *kernel_ering, 2016 struct netlink_ext_ack *extack) 2017 { 2018 struct axienet_local *lp = netdev_priv(ndev); 2019 2020 if (ering->rx_pending > RX_BD_NUM_MAX || 2021 ering->rx_mini_pending || 2022 ering->rx_jumbo_pending || 2023 ering->tx_pending < TX_BD_NUM_MIN || 2024 ering->tx_pending > TX_BD_NUM_MAX) 2025 return -EINVAL; 2026 2027 if (netif_running(ndev)) 2028 return -EBUSY; 2029 2030 lp->rx_bd_num = ering->rx_pending; 2031 lp->tx_bd_num = ering->tx_pending; 2032 return 0; 2033 } 2034 2035 /** 2036 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 2037 * Tx and Rx paths. 2038 * @ndev: Pointer to net_device structure 2039 * @epauseparm: Pointer to ethtool_pauseparam structure. 2040 * 2041 * This implements ethtool command for getting axi ethernet pause frame 2042 * setting. Issue "ethtool -a ethX" to execute this function. 2043 */ 2044 static void 2045 axienet_ethtools_get_pauseparam(struct net_device *ndev, 2046 struct ethtool_pauseparam *epauseparm) 2047 { 2048 struct axienet_local *lp = netdev_priv(ndev); 2049 2050 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 2051 } 2052 2053 /** 2054 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 2055 * settings. 2056 * @ndev: Pointer to net_device structure 2057 * @epauseparm:Pointer to ethtool_pauseparam structure 2058 * 2059 * This implements ethtool command for enabling flow control on Rx and Tx 2060 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 2061 * function. 2062 * 2063 * Return: 0 on success, -EFAULT if device is running 2064 */ 2065 static int 2066 axienet_ethtools_set_pauseparam(struct net_device *ndev, 2067 struct ethtool_pauseparam *epauseparm) 2068 { 2069 struct axienet_local *lp = netdev_priv(ndev); 2070 2071 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 2072 } 2073 2074 /** 2075 * axienet_update_coalesce_rx() - Set RX CR 2076 * @lp: Device private data 2077 * @cr: Value to write to the RX CR 2078 * @mask: Bits to set from @cr 2079 */ 2080 static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr, 2081 u32 mask) 2082 { 2083 spin_lock_irq(&lp->rx_cr_lock); 2084 lp->rx_dma_cr &= ~mask; 2085 lp->rx_dma_cr |= cr; 2086 /* If DMA isn't started, then the settings will be applied the next 2087 * time dma_start() is called. 2088 */ 2089 if (lp->rx_dma_started) { 2090 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 2091 2092 /* Don't enable IRQs if they are disabled by NAPI */ 2093 if (reg & XAXIDMA_IRQ_ALL_MASK) 2094 cr = lp->rx_dma_cr; 2095 else 2096 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; 2097 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 2098 } 2099 spin_unlock_irq(&lp->rx_cr_lock); 2100 } 2101 2102 /** 2103 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM 2104 * @lp: Device private data 2105 */ 2106 static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp) 2107 { 2108 return min(1 << (lp->rx_dim.profile_ix << 1), 255); 2109 } 2110 2111 /** 2112 * axienet_rx_dim_work() - Adjust RX DIM settings 2113 * @work: The work struct 2114 */ 2115 static void axienet_rx_dim_work(struct work_struct *work) 2116 { 2117 struct axienet_local *lp = 2118 container_of(work, struct axienet_local, rx_dim.work); 2119 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0); 2120 u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK | 2121 XAXIDMA_IRQ_ERROR_MASK; 2122 2123 axienet_update_coalesce_rx(lp, cr, mask); 2124 lp->rx_dim.state = DIM_START_MEASURE; 2125 } 2126 2127 /** 2128 * axienet_update_coalesce_tx() - Set TX CR 2129 * @lp: Device private data 2130 * @cr: Value to write to the TX CR 2131 * @mask: Bits to set from @cr 2132 */ 2133 static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr, 2134 u32 mask) 2135 { 2136 spin_lock_irq(&lp->tx_cr_lock); 2137 lp->tx_dma_cr &= ~mask; 2138 lp->tx_dma_cr |= cr; 2139 /* If DMA isn't started, then the settings will be applied the next 2140 * time dma_start() is called. 2141 */ 2142 if (lp->tx_dma_started) { 2143 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 2144 2145 /* Don't enable IRQs if they are disabled by NAPI */ 2146 if (reg & XAXIDMA_IRQ_ALL_MASK) 2147 cr = lp->tx_dma_cr; 2148 else 2149 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; 2150 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 2151 } 2152 spin_unlock_irq(&lp->tx_cr_lock); 2153 } 2154 2155 /** 2156 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 2157 * @ndev: Pointer to net_device structure 2158 * @ecoalesce: Pointer to ethtool_coalesce structure 2159 * @kernel_coal: ethtool CQE mode setting structure 2160 * @extack: extack for reporting error messages 2161 * 2162 * This implements ethtool command for getting the DMA interrupt coalescing 2163 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 2164 * execute this function. 2165 * 2166 * Return: 0 always 2167 */ 2168 static int 2169 axienet_ethtools_get_coalesce(struct net_device *ndev, 2170 struct ethtool_coalesce *ecoalesce, 2171 struct kernel_ethtool_coalesce *kernel_coal, 2172 struct netlink_ext_ack *extack) 2173 { 2174 struct axienet_local *lp = netdev_priv(ndev); 2175 u32 cr; 2176 2177 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled; 2178 2179 spin_lock_irq(&lp->rx_cr_lock); 2180 cr = lp->rx_dma_cr; 2181 spin_unlock_irq(&lp->rx_cr_lock); 2182 axienet_coalesce_params(lp, cr, 2183 &ecoalesce->rx_max_coalesced_frames, 2184 &ecoalesce->rx_coalesce_usecs); 2185 2186 spin_lock_irq(&lp->tx_cr_lock); 2187 cr = lp->tx_dma_cr; 2188 spin_unlock_irq(&lp->tx_cr_lock); 2189 axienet_coalesce_params(lp, cr, 2190 &ecoalesce->tx_max_coalesced_frames, 2191 &ecoalesce->tx_coalesce_usecs); 2192 return 0; 2193 } 2194 2195 /** 2196 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 2197 * @ndev: Pointer to net_device structure 2198 * @ecoalesce: Pointer to ethtool_coalesce structure 2199 * @kernel_coal: ethtool CQE mode setting structure 2200 * @extack: extack for reporting error messages 2201 * 2202 * This implements ethtool command for setting the DMA interrupt coalescing 2203 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 2204 * prompt to execute this function. 2205 * 2206 * Return: 0, on success, Non-zero error value on failure. 2207 */ 2208 static int 2209 axienet_ethtools_set_coalesce(struct net_device *ndev, 2210 struct ethtool_coalesce *ecoalesce, 2211 struct kernel_ethtool_coalesce *kernel_coal, 2212 struct netlink_ext_ack *extack) 2213 { 2214 struct axienet_local *lp = netdev_priv(ndev); 2215 bool new_dim = ecoalesce->use_adaptive_rx_coalesce; 2216 bool old_dim = lp->rx_dim_enabled; 2217 u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK; 2218 2219 if (ecoalesce->rx_max_coalesced_frames > 255 || 2220 ecoalesce->tx_max_coalesced_frames > 255) { 2221 NL_SET_ERR_MSG(extack, "frames must be less than 256"); 2222 return -EINVAL; 2223 } 2224 2225 if (!ecoalesce->rx_max_coalesced_frames || 2226 !ecoalesce->tx_max_coalesced_frames) { 2227 NL_SET_ERR_MSG(extack, "frames must be non-zero"); 2228 return -EINVAL; 2229 } 2230 2231 if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) && 2232 !ecoalesce->rx_coalesce_usecs) || 2233 (ecoalesce->tx_max_coalesced_frames > 1 && 2234 !ecoalesce->tx_coalesce_usecs)) { 2235 NL_SET_ERR_MSG(extack, 2236 "usecs must be non-zero when frames is greater than one"); 2237 return -EINVAL; 2238 } 2239 2240 if (new_dim && !old_dim) { 2241 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 2242 ecoalesce->rx_coalesce_usecs); 2243 } else if (!new_dim) { 2244 if (old_dim) { 2245 WRITE_ONCE(lp->rx_dim_enabled, false); 2246 napi_synchronize(&lp->napi_rx); 2247 flush_work(&lp->rx_dim.work); 2248 } 2249 2250 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames, 2251 ecoalesce->rx_coalesce_usecs); 2252 } else { 2253 /* Dummy value for count just to calculate timer */ 2254 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs); 2255 mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK; 2256 } 2257 2258 axienet_update_coalesce_rx(lp, cr, mask); 2259 if (new_dim && !old_dim) 2260 WRITE_ONCE(lp->rx_dim_enabled, true); 2261 2262 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames, 2263 ecoalesce->tx_coalesce_usecs); 2264 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK); 2265 return 0; 2266 } 2267 2268 static int 2269 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 2270 struct ethtool_link_ksettings *cmd) 2271 { 2272 struct axienet_local *lp = netdev_priv(ndev); 2273 2274 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 2275 } 2276 2277 static int 2278 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 2279 const struct ethtool_link_ksettings *cmd) 2280 { 2281 struct axienet_local *lp = netdev_priv(ndev); 2282 2283 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 2284 } 2285 2286 static int axienet_ethtools_nway_reset(struct net_device *dev) 2287 { 2288 struct axienet_local *lp = netdev_priv(dev); 2289 2290 return phylink_ethtool_nway_reset(lp->phylink); 2291 } 2292 2293 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev, 2294 struct ethtool_stats *stats, 2295 u64 *data) 2296 { 2297 struct axienet_local *lp = netdev_priv(dev); 2298 unsigned int start; 2299 2300 do { 2301 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2302 data[0] = axienet_stat(lp, STAT_RX_BYTES); 2303 data[1] = axienet_stat(lp, STAT_TX_BYTES); 2304 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES); 2305 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES); 2306 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES); 2307 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES); 2308 data[8] = axienet_stat(lp, STAT_USER_DEFINED0); 2309 data[9] = axienet_stat(lp, STAT_USER_DEFINED1); 2310 data[10] = axienet_stat(lp, STAT_USER_DEFINED2); 2311 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2312 } 2313 2314 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = { 2315 "Received bytes", 2316 "Transmitted bytes", 2317 "RX Good VLAN Tagged Frames", 2318 "TX Good VLAN Tagged Frames", 2319 "TX Good PFC Frames", 2320 "RX Good PFC Frames", 2321 "User Defined Counter 0", 2322 "User Defined Counter 1", 2323 "User Defined Counter 2", 2324 }; 2325 2326 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2327 { 2328 switch (stringset) { 2329 case ETH_SS_STATS: 2330 memcpy(data, axienet_ethtool_stats_strings, 2331 sizeof(axienet_ethtool_stats_strings)); 2332 break; 2333 } 2334 } 2335 2336 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset) 2337 { 2338 struct axienet_local *lp = netdev_priv(dev); 2339 2340 switch (sset) { 2341 case ETH_SS_STATS: 2342 if (lp->features & XAE_FEATURE_STATS) 2343 return ARRAY_SIZE(axienet_ethtool_stats_strings); 2344 fallthrough; 2345 default: 2346 return -EOPNOTSUPP; 2347 } 2348 } 2349 2350 static void 2351 axienet_ethtools_get_pause_stats(struct net_device *dev, 2352 struct ethtool_pause_stats *pause_stats) 2353 { 2354 struct axienet_local *lp = netdev_priv(dev); 2355 unsigned int start; 2356 2357 if (!(lp->features & XAE_FEATURE_STATS)) 2358 return; 2359 2360 do { 2361 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2362 pause_stats->tx_pause_frames = 2363 axienet_stat(lp, STAT_TX_PAUSE_FRAMES); 2364 pause_stats->rx_pause_frames = 2365 axienet_stat(lp, STAT_RX_PAUSE_FRAMES); 2366 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2367 } 2368 2369 static void 2370 axienet_ethtool_get_eth_mac_stats(struct net_device *dev, 2371 struct ethtool_eth_mac_stats *mac_stats) 2372 { 2373 struct axienet_local *lp = netdev_priv(dev); 2374 unsigned int start; 2375 2376 if (!(lp->features & XAE_FEATURE_STATS)) 2377 return; 2378 2379 do { 2380 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2381 mac_stats->FramesTransmittedOK = 2382 axienet_stat(lp, STAT_TX_GOOD_FRAMES); 2383 mac_stats->SingleCollisionFrames = 2384 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES); 2385 mac_stats->MultipleCollisionFrames = 2386 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES); 2387 mac_stats->FramesReceivedOK = 2388 axienet_stat(lp, STAT_RX_GOOD_FRAMES); 2389 mac_stats->FrameCheckSequenceErrors = 2390 axienet_stat(lp, STAT_RX_FCS_ERRORS); 2391 mac_stats->AlignmentErrors = 2392 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); 2393 mac_stats->FramesWithDeferredXmissions = 2394 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES); 2395 mac_stats->LateCollisions = 2396 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); 2397 mac_stats->FramesAbortedDueToXSColls = 2398 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); 2399 mac_stats->MulticastFramesXmittedOK = 2400 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES); 2401 mac_stats->BroadcastFramesXmittedOK = 2402 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES); 2403 mac_stats->FramesWithExcessiveDeferral = 2404 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL); 2405 mac_stats->MulticastFramesReceivedOK = 2406 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); 2407 mac_stats->BroadcastFramesReceivedOK = 2408 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES); 2409 mac_stats->InRangeLengthErrors = 2410 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); 2411 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2412 } 2413 2414 static void 2415 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev, 2416 struct ethtool_eth_ctrl_stats *ctrl_stats) 2417 { 2418 struct axienet_local *lp = netdev_priv(dev); 2419 unsigned int start; 2420 2421 if (!(lp->features & XAE_FEATURE_STATS)) 2422 return; 2423 2424 do { 2425 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2426 ctrl_stats->MACControlFramesTransmitted = 2427 axienet_stat(lp, STAT_TX_CONTROL_FRAMES); 2428 ctrl_stats->MACControlFramesReceived = 2429 axienet_stat(lp, STAT_RX_CONTROL_FRAMES); 2430 ctrl_stats->UnsupportedOpcodesReceived = 2431 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS); 2432 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2433 } 2434 2435 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = { 2436 { 64, 64 }, 2437 { 65, 127 }, 2438 { 128, 255 }, 2439 { 256, 511 }, 2440 { 512, 1023 }, 2441 { 1024, 1518 }, 2442 { 1519, 16384 }, 2443 { }, 2444 }; 2445 2446 static void 2447 axienet_ethtool_get_rmon_stats(struct net_device *dev, 2448 struct ethtool_rmon_stats *rmon_stats, 2449 const struct ethtool_rmon_hist_range **ranges) 2450 { 2451 struct axienet_local *lp = netdev_priv(dev); 2452 unsigned int start; 2453 2454 if (!(lp->features & XAE_FEATURE_STATS)) 2455 return; 2456 2457 do { 2458 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2459 rmon_stats->undersize_pkts = 2460 axienet_stat(lp, STAT_UNDERSIZE_FRAMES); 2461 rmon_stats->oversize_pkts = 2462 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES); 2463 rmon_stats->fragments = 2464 axienet_stat(lp, STAT_FRAGMENT_FRAMES); 2465 2466 rmon_stats->hist[0] = 2467 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES); 2468 rmon_stats->hist[1] = 2469 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES); 2470 rmon_stats->hist[2] = 2471 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES); 2472 rmon_stats->hist[3] = 2473 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES); 2474 rmon_stats->hist[4] = 2475 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES); 2476 rmon_stats->hist[5] = 2477 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES); 2478 rmon_stats->hist[6] = 2479 rmon_stats->oversize_pkts; 2480 2481 rmon_stats->hist_tx[0] = 2482 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES); 2483 rmon_stats->hist_tx[1] = 2484 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES); 2485 rmon_stats->hist_tx[2] = 2486 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES); 2487 rmon_stats->hist_tx[3] = 2488 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES); 2489 rmon_stats->hist_tx[4] = 2490 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES); 2491 rmon_stats->hist_tx[5] = 2492 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES); 2493 rmon_stats->hist_tx[6] = 2494 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES); 2495 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2496 2497 *ranges = axienet_rmon_ranges; 2498 } 2499 2500 static const struct ethtool_ops axienet_ethtool_ops = { 2501 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 2502 ETHTOOL_COALESCE_USECS | 2503 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 2504 .get_drvinfo = axienet_ethtools_get_drvinfo, 2505 .get_regs_len = axienet_ethtools_get_regs_len, 2506 .get_regs = axienet_ethtools_get_regs, 2507 .get_link = ethtool_op_get_link, 2508 .get_ringparam = axienet_ethtools_get_ringparam, 2509 .set_ringparam = axienet_ethtools_set_ringparam, 2510 .get_pauseparam = axienet_ethtools_get_pauseparam, 2511 .set_pauseparam = axienet_ethtools_set_pauseparam, 2512 .get_coalesce = axienet_ethtools_get_coalesce, 2513 .set_coalesce = axienet_ethtools_set_coalesce, 2514 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 2515 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 2516 .nway_reset = axienet_ethtools_nway_reset, 2517 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats, 2518 .get_strings = axienet_ethtools_get_strings, 2519 .get_sset_count = axienet_ethtools_get_sset_count, 2520 .get_pause_stats = axienet_ethtools_get_pause_stats, 2521 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats, 2522 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats, 2523 .get_rmon_stats = axienet_ethtool_get_rmon_stats, 2524 }; 2525 2526 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 2527 { 2528 return container_of(pcs, struct axienet_local, pcs); 2529 } 2530 2531 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 2532 unsigned int neg_mode, 2533 struct phylink_link_state *state) 2534 { 2535 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2536 2537 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state); 2538 } 2539 2540 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 2541 { 2542 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2543 2544 phylink_mii_c22_pcs_an_restart(pcs_phy); 2545 } 2546 2547 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2548 phy_interface_t interface, 2549 const unsigned long *advertising, 2550 bool permit_pause_to_mac) 2551 { 2552 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2553 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 2554 struct axienet_local *lp = netdev_priv(ndev); 2555 int ret; 2556 2557 if (lp->switch_x_sgmii) { 2558 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 2559 interface == PHY_INTERFACE_MODE_SGMII ? 2560 XLNX_MII_STD_SELECT_SGMII : 0); 2561 if (ret < 0) { 2562 netdev_warn(ndev, 2563 "Failed to switch PHY interface: %d\n", 2564 ret); 2565 return ret; 2566 } 2567 } 2568 2569 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 2570 neg_mode); 2571 if (ret < 0) 2572 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 2573 2574 return ret; 2575 } 2576 2577 static const struct phylink_pcs_ops axienet_pcs_ops = { 2578 .pcs_get_state = axienet_pcs_get_state, 2579 .pcs_config = axienet_pcs_config, 2580 .pcs_an_restart = axienet_pcs_an_restart, 2581 }; 2582 2583 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 2584 phy_interface_t interface) 2585 { 2586 struct net_device *ndev = to_net_dev(config->dev); 2587 struct axienet_local *lp = netdev_priv(ndev); 2588 2589 if (interface == PHY_INTERFACE_MODE_1000BASEX || 2590 interface == PHY_INTERFACE_MODE_SGMII) 2591 return &lp->pcs; 2592 2593 return NULL; 2594 } 2595 2596 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 2597 const struct phylink_link_state *state) 2598 { 2599 /* nothing meaningful to do */ 2600 } 2601 2602 static void axienet_mac_link_down(struct phylink_config *config, 2603 unsigned int mode, 2604 phy_interface_t interface) 2605 { 2606 /* nothing meaningful to do */ 2607 } 2608 2609 static void axienet_mac_link_up(struct phylink_config *config, 2610 struct phy_device *phy, 2611 unsigned int mode, phy_interface_t interface, 2612 int speed, int duplex, 2613 bool tx_pause, bool rx_pause) 2614 { 2615 struct net_device *ndev = to_net_dev(config->dev); 2616 struct axienet_local *lp = netdev_priv(ndev); 2617 u32 emmc_reg, fcc_reg; 2618 2619 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 2620 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 2621 2622 switch (speed) { 2623 case SPEED_1000: 2624 emmc_reg |= XAE_EMMC_LINKSPD_1000; 2625 break; 2626 case SPEED_100: 2627 emmc_reg |= XAE_EMMC_LINKSPD_100; 2628 break; 2629 case SPEED_10: 2630 emmc_reg |= XAE_EMMC_LINKSPD_10; 2631 break; 2632 default: 2633 dev_err(&ndev->dev, 2634 "Speed other than 10, 100 or 1Gbps is not supported\n"); 2635 break; 2636 } 2637 2638 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 2639 2640 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 2641 if (tx_pause) 2642 fcc_reg |= XAE_FCC_FCTX_MASK; 2643 else 2644 fcc_reg &= ~XAE_FCC_FCTX_MASK; 2645 if (rx_pause) 2646 fcc_reg |= XAE_FCC_FCRX_MASK; 2647 else 2648 fcc_reg &= ~XAE_FCC_FCRX_MASK; 2649 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 2650 } 2651 2652 static const struct phylink_mac_ops axienet_phylink_ops = { 2653 .mac_select_pcs = axienet_mac_select_pcs, 2654 .mac_config = axienet_mac_config, 2655 .mac_link_down = axienet_mac_link_down, 2656 .mac_link_up = axienet_mac_link_up, 2657 }; 2658 2659 /** 2660 * axienet_dma_err_handler - Work queue task for Axi DMA Error 2661 * @work: pointer to work_struct 2662 * 2663 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 2664 * Tx/Rx BDs. 2665 */ 2666 static void axienet_dma_err_handler(struct work_struct *work) 2667 { 2668 u32 i; 2669 u32 axienet_status; 2670 struct axidma_bd *cur_p; 2671 struct axienet_local *lp = container_of(work, struct axienet_local, 2672 dma_err_task); 2673 struct net_device *ndev = lp->ndev; 2674 2675 /* Don't bother if we are going to stop anyway */ 2676 if (READ_ONCE(lp->stopping)) 2677 return; 2678 2679 napi_disable(&lp->napi_tx); 2680 napi_disable(&lp->napi_rx); 2681 2682 axienet_setoptions(ndev, lp->options & 2683 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2684 2685 axienet_dma_stop(lp); 2686 netdev_reset_queue(ndev); 2687 2688 for (i = 0; i < lp->tx_bd_num; i++) { 2689 cur_p = &lp->tx_bd_v[i]; 2690 if (cur_p->cntrl) { 2691 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 2692 2693 dma_unmap_single(lp->dev, addr, 2694 (cur_p->cntrl & 2695 XAXIDMA_BD_CTRL_LENGTH_MASK), 2696 DMA_TO_DEVICE); 2697 } 2698 if (cur_p->skb) 2699 dev_kfree_skb_irq(cur_p->skb); 2700 cur_p->phys = 0; 2701 cur_p->phys_msb = 0; 2702 cur_p->cntrl = 0; 2703 cur_p->status = 0; 2704 cur_p->app0 = 0; 2705 cur_p->app1 = 0; 2706 cur_p->app2 = 0; 2707 cur_p->app3 = 0; 2708 cur_p->app4 = 0; 2709 cur_p->skb = NULL; 2710 } 2711 2712 for (i = 0; i < lp->rx_bd_num; i++) { 2713 cur_p = &lp->rx_bd_v[i]; 2714 cur_p->status = 0; 2715 cur_p->app0 = 0; 2716 cur_p->app1 = 0; 2717 cur_p->app2 = 0; 2718 cur_p->app3 = 0; 2719 cur_p->app4 = 0; 2720 } 2721 2722 lp->tx_bd_ci = 0; 2723 lp->tx_bd_tail = 0; 2724 lp->rx_bd_ci = 0; 2725 2726 axienet_dma_start(lp); 2727 2728 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 2729 axienet_status &= ~XAE_RCW1_RX_MASK; 2730 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 2731 2732 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 2733 if (axienet_status & XAE_INT_RXRJECT_MASK) 2734 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 2735 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 2736 XAE_INT_RECV_ERROR_MASK : 0); 2737 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 2738 2739 /* Sync default options with HW but leave receiver and 2740 * transmitter disabled. 2741 */ 2742 axienet_setoptions(ndev, lp->options & 2743 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2744 axienet_set_mac_address(ndev, NULL); 2745 axienet_set_multicast_list(ndev); 2746 napi_enable(&lp->napi_rx); 2747 napi_enable(&lp->napi_tx); 2748 axienet_setoptions(ndev, lp->options); 2749 } 2750 2751 /** 2752 * axienet_probe - Axi Ethernet probe function. 2753 * @pdev: Pointer to platform device structure. 2754 * 2755 * Return: 0, on success 2756 * Non-zero error value on failure. 2757 * 2758 * This is the probe routine for Axi Ethernet driver. This is called before 2759 * any other driver routines are invoked. It allocates and sets up the Ethernet 2760 * device. Parses through device tree and populates fields of 2761 * axienet_local. It registers the Ethernet device. 2762 */ 2763 static int axienet_probe(struct platform_device *pdev) 2764 { 2765 int ret; 2766 struct device_node *np; 2767 struct axienet_local *lp; 2768 struct net_device *ndev; 2769 struct resource *ethres; 2770 u8 mac_addr[ETH_ALEN]; 2771 int addr_width = 32; 2772 u32 value; 2773 2774 ndev = alloc_etherdev(sizeof(*lp)); 2775 if (!ndev) 2776 return -ENOMEM; 2777 2778 platform_set_drvdata(pdev, ndev); 2779 2780 SET_NETDEV_DEV(ndev, &pdev->dev); 2781 ndev->features = NETIF_F_SG; 2782 ndev->ethtool_ops = &axienet_ethtool_ops; 2783 2784 /* MTU range: 64 - 9000 */ 2785 ndev->min_mtu = 64; 2786 ndev->max_mtu = XAE_JUMBO_MTU; 2787 2788 lp = netdev_priv(ndev); 2789 lp->ndev = ndev; 2790 lp->dev = &pdev->dev; 2791 lp->options = XAE_OPTION_DEFAULTS; 2792 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 2793 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 2794 2795 u64_stats_init(&lp->rx_stat_sync); 2796 u64_stats_init(&lp->tx_stat_sync); 2797 2798 mutex_init(&lp->stats_lock); 2799 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); 2800 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); 2801 2802 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2803 if (!lp->axi_clk) { 2804 /* For backward compatibility, if named AXI clock is not present, 2805 * treat the first clock specified as the AXI clock. 2806 */ 2807 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2808 } 2809 if (IS_ERR(lp->axi_clk)) { 2810 ret = PTR_ERR(lp->axi_clk); 2811 goto free_netdev; 2812 } 2813 ret = clk_prepare_enable(lp->axi_clk); 2814 if (ret) { 2815 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2816 goto free_netdev; 2817 } 2818 2819 lp->misc_clks[0].id = "axis_clk"; 2820 lp->misc_clks[1].id = "ref_clk"; 2821 lp->misc_clks[2].id = "mgt_clk"; 2822 2823 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2824 if (ret) 2825 goto cleanup_clk; 2826 2827 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2828 if (ret) 2829 goto cleanup_clk; 2830 2831 /* Map device registers */ 2832 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 2833 if (IS_ERR(lp->regs)) { 2834 ret = PTR_ERR(lp->regs); 2835 goto cleanup_clk; 2836 } 2837 lp->regs_start = ethres->start; 2838 2839 /* Setup checksum offload, but default to off if not specified */ 2840 lp->features = 0; 2841 2842 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS) 2843 lp->features |= XAE_FEATURE_STATS; 2844 2845 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 2846 if (!ret) { 2847 switch (value) { 2848 case 1: 2849 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 2850 /* Can checksum any contiguous range */ 2851 ndev->features |= NETIF_F_HW_CSUM; 2852 break; 2853 case 2: 2854 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 2855 /* Can checksum TCP/UDP over IPv4. */ 2856 ndev->features |= NETIF_F_IP_CSUM; 2857 break; 2858 } 2859 } 2860 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 2861 if (!ret) { 2862 switch (value) { 2863 case 1: 2864 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 2865 ndev->features |= NETIF_F_RXCSUM; 2866 break; 2867 case 2: 2868 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 2869 ndev->features |= NETIF_F_RXCSUM; 2870 break; 2871 } 2872 } 2873 /* For supporting jumbo frames, the Axi Ethernet hardware must have 2874 * a larger Rx/Tx Memory. Typically, the size must be large so that 2875 * we can enable jumbo option and start supporting jumbo frames. 2876 * Here we check for memory allocated for Rx/Tx in the hardware from 2877 * the device-tree and accordingly set flags. 2878 */ 2879 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 2880 2881 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 2882 "xlnx,switch-x-sgmii"); 2883 2884 /* Start with the proprietary, and broken phy_type */ 2885 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 2886 if (!ret) { 2887 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 2888 switch (value) { 2889 case XAE_PHY_TYPE_MII: 2890 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2891 break; 2892 case XAE_PHY_TYPE_GMII: 2893 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2894 break; 2895 case XAE_PHY_TYPE_RGMII_2_0: 2896 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2897 break; 2898 case XAE_PHY_TYPE_SGMII: 2899 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2900 break; 2901 case XAE_PHY_TYPE_1000BASE_X: 2902 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2903 break; 2904 default: 2905 ret = -EINVAL; 2906 goto cleanup_clk; 2907 } 2908 } else { 2909 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2910 if (ret) 2911 goto cleanup_clk; 2912 } 2913 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2914 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2915 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2916 ret = -EINVAL; 2917 goto cleanup_clk; 2918 } 2919 2920 if (!of_property_present(pdev->dev.of_node, "dmas")) { 2921 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2922 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2923 2924 if (np) { 2925 struct resource dmares; 2926 2927 ret = of_address_to_resource(np, 0, &dmares); 2928 if (ret) { 2929 dev_err(&pdev->dev, 2930 "unable to get DMA resource\n"); 2931 of_node_put(np); 2932 goto cleanup_clk; 2933 } 2934 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2935 &dmares); 2936 lp->rx_irq = irq_of_parse_and_map(np, 1); 2937 lp->tx_irq = irq_of_parse_and_map(np, 0); 2938 of_node_put(np); 2939 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2940 } else { 2941 /* Check for these resources directly on the Ethernet node. */ 2942 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2943 lp->rx_irq = platform_get_irq(pdev, 1); 2944 lp->tx_irq = platform_get_irq(pdev, 0); 2945 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2946 } 2947 if (IS_ERR(lp->dma_regs)) { 2948 dev_err(&pdev->dev, "could not map DMA regs\n"); 2949 ret = PTR_ERR(lp->dma_regs); 2950 goto cleanup_clk; 2951 } 2952 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2953 dev_err(&pdev->dev, "could not determine irqs\n"); 2954 ret = -ENOMEM; 2955 goto cleanup_clk; 2956 } 2957 2958 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2959 ret = __axienet_device_reset(lp); 2960 if (ret) 2961 goto cleanup_clk; 2962 2963 /* Autodetect the need for 64-bit DMA pointers. 2964 * When the IP is configured for a bus width bigger than 32 bits, 2965 * writing the MSB registers is mandatory, even if they are all 0. 2966 * We can detect this case by writing all 1's to one such register 2967 * and see if that sticks: when the IP is configured for 32 bits 2968 * only, those registers are RES0. 2969 * Those MSB registers were introduced in IP v7.1, which we check first. 2970 */ 2971 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2972 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2973 2974 iowrite32(0x0, desc); 2975 if (ioread32(desc) == 0) { /* sanity check */ 2976 iowrite32(0xffffffff, desc); 2977 if (ioread32(desc) > 0) { 2978 lp->features |= XAE_FEATURE_DMA_64BIT; 2979 addr_width = 64; 2980 dev_info(&pdev->dev, 2981 "autodetected 64-bit DMA range\n"); 2982 } 2983 iowrite32(0x0, desc); 2984 } 2985 } 2986 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2987 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit architecture\n"); 2988 ret = -EINVAL; 2989 goto cleanup_clk; 2990 } 2991 2992 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2993 if (ret) { 2994 dev_err(&pdev->dev, "No suitable DMA available\n"); 2995 goto cleanup_clk; 2996 } 2997 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 2998 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 2999 } else { 3000 struct xilinx_vdma_config cfg; 3001 struct dma_chan *tx_chan; 3002 3003 lp->eth_irq = platform_get_irq_optional(pdev, 0); 3004 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 3005 ret = lp->eth_irq; 3006 goto cleanup_clk; 3007 } 3008 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 3009 if (IS_ERR(tx_chan)) { 3010 ret = PTR_ERR(tx_chan); 3011 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 3012 goto cleanup_clk; 3013 } 3014 3015 cfg.reset = 1; 3016 /* As name says VDMA but it has support for DMA channel reset */ 3017 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); 3018 if (ret < 0) { 3019 dev_err(&pdev->dev, "Reset channel failed\n"); 3020 dma_release_channel(tx_chan); 3021 goto cleanup_clk; 3022 } 3023 3024 dma_release_channel(tx_chan); 3025 lp->use_dmaengine = 1; 3026 } 3027 3028 if (lp->use_dmaengine) 3029 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; 3030 else 3031 ndev->netdev_ops = &axienet_netdev_ops; 3032 /* Check for Ethernet core IRQ (optional) */ 3033 if (lp->eth_irq <= 0) 3034 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 3035 3036 /* Retrieve the MAC address */ 3037 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 3038 if (!ret) { 3039 axienet_set_mac_address(ndev, mac_addr); 3040 } else { 3041 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 3042 ret); 3043 axienet_set_mac_address(ndev, NULL); 3044 } 3045 3046 spin_lock_init(&lp->rx_cr_lock); 3047 spin_lock_init(&lp->tx_cr_lock); 3048 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work); 3049 lp->rx_dim_enabled = true; 3050 lp->rx_dim.profile_ix = 1; 3051 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 3052 XAXIDMA_DFT_RX_USEC); 3053 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD, 3054 XAXIDMA_DFT_TX_USEC); 3055 3056 ret = axienet_mdio_setup(lp); 3057 if (ret) 3058 dev_warn(&pdev->dev, 3059 "error registering MDIO bus: %d\n", ret); 3060 3061 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 3062 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 3063 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 3064 if (!np) { 3065 /* Deprecated: Always use "pcs-handle" for pcs_phy. 3066 * Falling back to "phy-handle" here is only for 3067 * backward compatibility with old device trees. 3068 */ 3069 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 3070 } 3071 if (!np) { 3072 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 3073 ret = -EINVAL; 3074 goto cleanup_mdio; 3075 } 3076 lp->pcs_phy = of_mdio_find_device(np); 3077 if (!lp->pcs_phy) { 3078 ret = -EPROBE_DEFER; 3079 of_node_put(np); 3080 goto cleanup_mdio; 3081 } 3082 of_node_put(np); 3083 lp->pcs.ops = &axienet_pcs_ops; 3084 lp->pcs.poll = true; 3085 } 3086 3087 lp->phylink_config.dev = &ndev->dev; 3088 lp->phylink_config.type = PHYLINK_NETDEV; 3089 lp->phylink_config.mac_managed_pm = true; 3090 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 3091 MAC_10FD | MAC_100FD | MAC_1000FD; 3092 3093 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 3094 if (lp->switch_x_sgmii) { 3095 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 3096 lp->phylink_config.supported_interfaces); 3097 __set_bit(PHY_INTERFACE_MODE_SGMII, 3098 lp->phylink_config.supported_interfaces); 3099 } 3100 3101 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 3102 lp->phy_mode, 3103 &axienet_phylink_ops); 3104 if (IS_ERR(lp->phylink)) { 3105 ret = PTR_ERR(lp->phylink); 3106 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 3107 goto cleanup_mdio; 3108 } 3109 3110 ret = register_netdev(lp->ndev); 3111 if (ret) { 3112 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 3113 goto cleanup_phylink; 3114 } 3115 3116 return 0; 3117 3118 cleanup_phylink: 3119 phylink_destroy(lp->phylink); 3120 3121 cleanup_mdio: 3122 if (lp->pcs_phy) 3123 put_device(&lp->pcs_phy->dev); 3124 if (lp->mii_bus) 3125 axienet_mdio_teardown(lp); 3126 cleanup_clk: 3127 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 3128 clk_disable_unprepare(lp->axi_clk); 3129 3130 free_netdev: 3131 free_netdev(ndev); 3132 3133 return ret; 3134 } 3135 3136 static void axienet_remove(struct platform_device *pdev) 3137 { 3138 struct net_device *ndev = platform_get_drvdata(pdev); 3139 struct axienet_local *lp = netdev_priv(ndev); 3140 3141 unregister_netdev(ndev); 3142 3143 if (lp->phylink) 3144 phylink_destroy(lp->phylink); 3145 3146 if (lp->pcs_phy) 3147 put_device(&lp->pcs_phy->dev); 3148 3149 axienet_mdio_teardown(lp); 3150 3151 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 3152 clk_disable_unprepare(lp->axi_clk); 3153 3154 free_netdev(ndev); 3155 } 3156 3157 static void axienet_shutdown(struct platform_device *pdev) 3158 { 3159 struct net_device *ndev = platform_get_drvdata(pdev); 3160 3161 rtnl_lock(); 3162 netif_device_detach(ndev); 3163 3164 if (netif_running(ndev)) 3165 dev_close(ndev); 3166 3167 rtnl_unlock(); 3168 } 3169 3170 static int axienet_suspend(struct device *dev) 3171 { 3172 struct net_device *ndev = dev_get_drvdata(dev); 3173 3174 if (!netif_running(ndev)) 3175 return 0; 3176 3177 netif_device_detach(ndev); 3178 3179 rtnl_lock(); 3180 axienet_stop(ndev); 3181 rtnl_unlock(); 3182 3183 return 0; 3184 } 3185 3186 static int axienet_resume(struct device *dev) 3187 { 3188 struct net_device *ndev = dev_get_drvdata(dev); 3189 3190 if (!netif_running(ndev)) 3191 return 0; 3192 3193 rtnl_lock(); 3194 axienet_open(ndev); 3195 rtnl_unlock(); 3196 3197 netif_device_attach(ndev); 3198 3199 return 0; 3200 } 3201 3202 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 3203 axienet_suspend, axienet_resume); 3204 3205 static struct platform_driver axienet_driver = { 3206 .probe = axienet_probe, 3207 .remove = axienet_remove, 3208 .shutdown = axienet_shutdown, 3209 .driver = { 3210 .name = "xilinx_axienet", 3211 .pm = &axienet_pm_ops, 3212 .of_match_table = axienet_of_match, 3213 }, 3214 }; 3215 3216 module_platform_driver(axienet_driver); 3217 3218 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 3219 MODULE_AUTHOR("Xilinx"); 3220 MODULE_LICENSE("GPL"); 3221