1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 #include <linux/dmaengine.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dma/xilinx_dma.h> 44 #include <linux/circ_buf.h> 45 #include <net/netdev_queues.h> 46 47 #include "xilinx_axienet.h" 48 49 /* Descriptors defines for Tx and Rx DMA */ 50 #define TX_BD_NUM_DEFAULT 128 51 #define RX_BD_NUM_DEFAULT 1024 52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 53 #define TX_BD_NUM_MAX 4096 54 #define RX_BD_NUM_MAX 4096 55 #define DMA_NUM_APP_WORDS 5 56 #define LEN_APP 4 57 #define RX_BUF_NUM_DEFAULT 128 58 59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 60 #define DRIVER_NAME "xaxienet" 61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 62 #define DRIVER_VERSION "1.00a" 63 64 #define AXIENET_REGS_N 40 65 66 static void axienet_rx_submit_desc(struct net_device *ndev); 67 68 /* Match table for of_platform binding */ 69 static const struct of_device_id axienet_of_match[] = { 70 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 71 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 72 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 73 {}, 74 }; 75 76 MODULE_DEVICE_TABLE(of, axienet_of_match); 77 78 /* Option table for setting up Axi Ethernet hardware options */ 79 static struct axienet_option axienet_options[] = { 80 /* Turn on jumbo packet support for both Rx and Tx */ 81 { 82 .opt = XAE_OPTION_JUMBO, 83 .reg = XAE_TC_OFFSET, 84 .m_or = XAE_TC_JUM_MASK, 85 }, { 86 .opt = XAE_OPTION_JUMBO, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_JUM_MASK, 89 }, { /* Turn on VLAN packet support for both Rx and Tx */ 90 .opt = XAE_OPTION_VLAN, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_VLAN_MASK, 93 }, { 94 .opt = XAE_OPTION_VLAN, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_VLAN_MASK, 97 }, { /* Turn on FCS stripping on receive packets */ 98 .opt = XAE_OPTION_FCS_STRIP, 99 .reg = XAE_RCW1_OFFSET, 100 .m_or = XAE_RCW1_FCS_MASK, 101 }, { /* Turn on FCS insertion on transmit packets */ 102 .opt = XAE_OPTION_FCS_INSERT, 103 .reg = XAE_TC_OFFSET, 104 .m_or = XAE_TC_FCS_MASK, 105 }, { /* Turn off length/type field checking on receive packets */ 106 .opt = XAE_OPTION_LENTYPE_ERR, 107 .reg = XAE_RCW1_OFFSET, 108 .m_or = XAE_RCW1_LT_DIS_MASK, 109 }, { /* Turn on Rx flow control */ 110 .opt = XAE_OPTION_FLOW_CONTROL, 111 .reg = XAE_FCC_OFFSET, 112 .m_or = XAE_FCC_FCRX_MASK, 113 }, { /* Turn on Tx flow control */ 114 .opt = XAE_OPTION_FLOW_CONTROL, 115 .reg = XAE_FCC_OFFSET, 116 .m_or = XAE_FCC_FCTX_MASK, 117 }, { /* Turn on promiscuous frame filtering */ 118 .opt = XAE_OPTION_PROMISC, 119 .reg = XAE_FMI_OFFSET, 120 .m_or = XAE_FMI_PM_MASK, 121 }, { /* Enable transmitter */ 122 .opt = XAE_OPTION_TXEN, 123 .reg = XAE_TC_OFFSET, 124 .m_or = XAE_TC_TX_MASK, 125 }, { /* Enable receiver */ 126 .opt = XAE_OPTION_RXEN, 127 .reg = XAE_RCW1_OFFSET, 128 .m_or = XAE_RCW1_RX_MASK, 129 }, 130 {} 131 }; 132 133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) 134 { 135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; 136 } 137 138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) 139 { 140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; 141 } 142 143 /** 144 * axienet_dma_in32 - Memory mapped Axi DMA register read 145 * @lp: Pointer to axienet local structure 146 * @reg: Address offset from the base address of the Axi DMA core 147 * 148 * Return: The contents of the Axi DMA register 149 * 150 * This function returns the contents of the corresponding Axi DMA register. 151 */ 152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 153 { 154 return ioread32(lp->dma_regs + reg); 155 } 156 157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 158 struct axidma_bd *desc) 159 { 160 desc->phys = lower_32_bits(addr); 161 if (lp->features & XAE_FEATURE_DMA_64BIT) 162 desc->phys_msb = upper_32_bits(addr); 163 } 164 165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 166 struct axidma_bd *desc) 167 { 168 dma_addr_t ret = desc->phys; 169 170 if (lp->features & XAE_FEATURE_DMA_64BIT) 171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 172 173 return ret; 174 } 175 176 /** 177 * axienet_dma_bd_release - Release buffer descriptor rings 178 * @ndev: Pointer to the net_device structure 179 * 180 * This function is used to release the descriptors allocated in 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 182 * driver stop api is called. 183 */ 184 static void axienet_dma_bd_release(struct net_device *ndev) 185 { 186 int i; 187 struct axienet_local *lp = netdev_priv(ndev); 188 189 /* If we end up here, tx_bd_v must have been DMA allocated. */ 190 dma_free_coherent(lp->dev, 191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 192 lp->tx_bd_v, 193 lp->tx_bd_p); 194 195 if (!lp->rx_bd_v) 196 return; 197 198 for (i = 0; i < lp->rx_bd_num; i++) { 199 dma_addr_t phys; 200 201 /* A NULL skb means this descriptor has not been initialised 202 * at all. 203 */ 204 if (!lp->rx_bd_v[i].skb) 205 break; 206 207 dev_kfree_skb(lp->rx_bd_v[i].skb); 208 209 /* For each descriptor, we programmed cntrl with the (non-zero) 210 * descriptor size, after it had been successfully allocated. 211 * So a non-zero value in there means we need to unmap it. 212 */ 213 if (lp->rx_bd_v[i].cntrl) { 214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 215 dma_unmap_single(lp->dev, phys, 216 lp->max_frm_size, DMA_FROM_DEVICE); 217 } 218 } 219 220 dma_free_coherent(lp->dev, 221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 222 lp->rx_bd_v, 223 lp->rx_bd_p); 224 } 225 226 static u64 axienet_dma_rate(struct axienet_local *lp) 227 { 228 if (lp->axi_clk) 229 return clk_get_rate(lp->axi_clk); 230 return 125000000; /* arbitrary guess if no clock rate set */ 231 } 232 233 /** 234 * axienet_calc_cr() - Calculate control register value 235 * @lp: Device private data 236 * @count: Number of completions before an interrupt 237 * @usec: Microseconds after the last completion before an interrupt 238 * 239 * Calculate a control register value based on the coalescing settings. The 240 * run/stop bit is not set. 241 */ 242 static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec) 243 { 244 u32 cr; 245 246 cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK | 247 XAXIDMA_IRQ_ERROR_MASK; 248 /* Only set interrupt delay timer if not generating an interrupt on 249 * the first packet. Otherwise leave at 0 to disable delay interrupt. 250 */ 251 if (count > 1) { 252 u64 clk_rate = axienet_dma_rate(lp); 253 u32 timer; 254 255 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 256 timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate, 257 XAXIDMA_DELAY_SCALE); 258 259 timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK)); 260 cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) | 261 XAXIDMA_IRQ_DELAY_MASK; 262 } 263 264 return cr; 265 } 266 267 /** 268 * axienet_coalesce_params() - Extract coalesce parameters from the CR 269 * @lp: Device private data 270 * @cr: The control register to parse 271 * @count: Number of packets before an interrupt 272 * @usec: Idle time (in usec) before an interrupt 273 */ 274 static void axienet_coalesce_params(struct axienet_local *lp, u32 cr, 275 u32 *count, u32 *usec) 276 { 277 u64 clk_rate = axienet_dma_rate(lp); 278 u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr); 279 280 *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr); 281 *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate); 282 } 283 284 /** 285 * axienet_dma_start - Set up DMA registers and start DMA operation 286 * @lp: Pointer to the axienet_local structure 287 */ 288 static void axienet_dma_start(struct axienet_local *lp) 289 { 290 spin_lock_irq(&lp->rx_cr_lock); 291 292 /* Start updating the Rx channel control register */ 293 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; 294 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 295 296 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 297 * halted state. This will make the Rx side ready for reception. 298 */ 299 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 300 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 301 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 302 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 303 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 304 lp->rx_dma_started = true; 305 306 spin_unlock_irq(&lp->rx_cr_lock); 307 spin_lock_irq(&lp->tx_cr_lock); 308 309 /* Start updating the Tx channel control register */ 310 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; 311 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 312 313 /* Write to the RS (Run-stop) bit in the Tx channel control register. 314 * Tx channel is now ready to run. But only after we write to the 315 * tail pointer register that the Tx channel will start transmitting. 316 */ 317 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 318 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 319 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 320 lp->tx_dma_started = true; 321 322 spin_unlock_irq(&lp->tx_cr_lock); 323 } 324 325 /** 326 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 327 * @ndev: Pointer to the net_device structure 328 * 329 * Return: 0, on success -ENOMEM, on failure 330 * 331 * This function is called to initialize the Rx and Tx DMA descriptor 332 * rings. This initializes the descriptors with required default values 333 * and is called when Axi Ethernet driver reset is called. 334 */ 335 static int axienet_dma_bd_init(struct net_device *ndev) 336 { 337 int i; 338 struct sk_buff *skb; 339 struct axienet_local *lp = netdev_priv(ndev); 340 341 /* Reset the indexes which are used for accessing the BDs */ 342 lp->tx_bd_ci = 0; 343 lp->tx_bd_tail = 0; 344 lp->rx_bd_ci = 0; 345 346 /* Allocate the Tx and Rx buffer descriptors. */ 347 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 348 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 349 &lp->tx_bd_p, GFP_KERNEL); 350 if (!lp->tx_bd_v) 351 return -ENOMEM; 352 353 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 354 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 355 &lp->rx_bd_p, GFP_KERNEL); 356 if (!lp->rx_bd_v) 357 goto out; 358 359 for (i = 0; i < lp->tx_bd_num; i++) { 360 dma_addr_t addr = lp->tx_bd_p + 361 sizeof(*lp->tx_bd_v) * 362 ((i + 1) % lp->tx_bd_num); 363 364 lp->tx_bd_v[i].next = lower_32_bits(addr); 365 if (lp->features & XAE_FEATURE_DMA_64BIT) 366 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 367 } 368 369 for (i = 0; i < lp->rx_bd_num; i++) { 370 dma_addr_t addr; 371 372 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 373 ((i + 1) % lp->rx_bd_num); 374 lp->rx_bd_v[i].next = lower_32_bits(addr); 375 if (lp->features & XAE_FEATURE_DMA_64BIT) 376 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 377 378 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 379 if (!skb) 380 goto out; 381 382 lp->rx_bd_v[i].skb = skb; 383 addr = dma_map_single(lp->dev, skb->data, 384 lp->max_frm_size, DMA_FROM_DEVICE); 385 if (dma_mapping_error(lp->dev, addr)) { 386 netdev_err(ndev, "DMA mapping error\n"); 387 goto out; 388 } 389 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 390 391 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 392 } 393 394 axienet_dma_start(lp); 395 396 return 0; 397 out: 398 axienet_dma_bd_release(ndev); 399 return -ENOMEM; 400 } 401 402 /** 403 * axienet_set_mac_address - Write the MAC address 404 * @ndev: Pointer to the net_device structure 405 * @address: 6 byte Address to be written as MAC address 406 * 407 * This function is called to initialize the MAC address of the Axi Ethernet 408 * core. It writes to the UAW0 and UAW1 registers of the core. 409 */ 410 static void axienet_set_mac_address(struct net_device *ndev, 411 const void *address) 412 { 413 struct axienet_local *lp = netdev_priv(ndev); 414 415 if (address) 416 eth_hw_addr_set(ndev, address); 417 if (!is_valid_ether_addr(ndev->dev_addr)) 418 eth_hw_addr_random(ndev); 419 420 /* Set up unicast MAC address filter set its mac address */ 421 axienet_iow(lp, XAE_UAW0_OFFSET, 422 (ndev->dev_addr[0]) | 423 (ndev->dev_addr[1] << 8) | 424 (ndev->dev_addr[2] << 16) | 425 (ndev->dev_addr[3] << 24)); 426 axienet_iow(lp, XAE_UAW1_OFFSET, 427 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 428 ~XAE_UAW1_UNICASTADDR_MASK) | 429 (ndev->dev_addr[4] | 430 (ndev->dev_addr[5] << 8)))); 431 } 432 433 /** 434 * netdev_set_mac_address - Write the MAC address (from outside the driver) 435 * @ndev: Pointer to the net_device structure 436 * @p: 6 byte Address to be written as MAC address 437 * 438 * Return: 0 for all conditions. Presently, there is no failure case. 439 * 440 * This function is called to initialize the MAC address of the Axi Ethernet 441 * core. It calls the core specific axienet_set_mac_address. This is the 442 * function that goes into net_device_ops structure entry ndo_set_mac_address. 443 */ 444 static int netdev_set_mac_address(struct net_device *ndev, void *p) 445 { 446 struct sockaddr *addr = p; 447 448 axienet_set_mac_address(ndev, addr->sa_data); 449 return 0; 450 } 451 452 /** 453 * axienet_set_multicast_list - Prepare the multicast table 454 * @ndev: Pointer to the net_device structure 455 * 456 * This function is called to initialize the multicast table during 457 * initialization. The Axi Ethernet basic multicast support has a four-entry 458 * multicast table which is initialized here. Additionally this function 459 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 460 * means whenever the multicast table entries need to be updated this 461 * function gets called. 462 */ 463 static void axienet_set_multicast_list(struct net_device *ndev) 464 { 465 int i = 0; 466 u32 reg, af0reg, af1reg; 467 struct axienet_local *lp = netdev_priv(ndev); 468 469 reg = axienet_ior(lp, XAE_FMI_OFFSET); 470 reg &= ~XAE_FMI_PM_MASK; 471 if (ndev->flags & IFF_PROMISC) 472 reg |= XAE_FMI_PM_MASK; 473 else 474 reg &= ~XAE_FMI_PM_MASK; 475 axienet_iow(lp, XAE_FMI_OFFSET, reg); 476 477 if (ndev->flags & IFF_ALLMULTI || 478 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 479 reg &= 0xFFFFFF00; 480 axienet_iow(lp, XAE_FMI_OFFSET, reg); 481 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */ 482 axienet_iow(lp, XAE_AF1_OFFSET, 0); 483 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */ 484 axienet_iow(lp, XAE_AM1_OFFSET, 0); 485 axienet_iow(lp, XAE_FFE_OFFSET, 1); 486 i = 1; 487 } else if (!netdev_mc_empty(ndev)) { 488 struct netdev_hw_addr *ha; 489 490 netdev_for_each_mc_addr(ha, ndev) { 491 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 492 break; 493 494 af0reg = (ha->addr[0]); 495 af0reg |= (ha->addr[1] << 8); 496 af0reg |= (ha->addr[2] << 16); 497 af0reg |= (ha->addr[3] << 24); 498 499 af1reg = (ha->addr[4]); 500 af1reg |= (ha->addr[5] << 8); 501 502 reg &= 0xFFFFFF00; 503 reg |= i; 504 505 axienet_iow(lp, XAE_FMI_OFFSET, reg); 506 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 507 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 508 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff); 509 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff); 510 axienet_iow(lp, XAE_FFE_OFFSET, 1); 511 i++; 512 } 513 } 514 515 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 516 reg &= 0xFFFFFF00; 517 reg |= i; 518 axienet_iow(lp, XAE_FMI_OFFSET, reg); 519 axienet_iow(lp, XAE_FFE_OFFSET, 0); 520 } 521 } 522 523 /** 524 * axienet_setoptions - Set an Axi Ethernet option 525 * @ndev: Pointer to the net_device structure 526 * @options: Option to be enabled/disabled 527 * 528 * The Axi Ethernet core has multiple features which can be selectively turned 529 * on or off. The typical options could be jumbo frame option, basic VLAN 530 * option, promiscuous mode option etc. This function is used to set or clear 531 * these options in the Axi Ethernet hardware. This is done through 532 * axienet_option structure . 533 */ 534 static void axienet_setoptions(struct net_device *ndev, u32 options) 535 { 536 int reg; 537 struct axienet_local *lp = netdev_priv(ndev); 538 struct axienet_option *tp = &axienet_options[0]; 539 540 while (tp->opt) { 541 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 542 if (options & tp->opt) 543 reg |= tp->m_or; 544 axienet_iow(lp, tp->reg, reg); 545 tp++; 546 } 547 548 lp->options |= options; 549 } 550 551 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat) 552 { 553 u32 counter; 554 555 if (lp->reset_in_progress) 556 return lp->hw_stat_base[stat]; 557 558 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 559 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); 560 } 561 562 static void axienet_stats_update(struct axienet_local *lp, bool reset) 563 { 564 enum temac_stat stat; 565 566 write_seqcount_begin(&lp->hw_stats_seqcount); 567 lp->reset_in_progress = reset; 568 for (stat = 0; stat < STAT_COUNT; stat++) { 569 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 570 571 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; 572 lp->hw_last_counter[stat] = counter; 573 } 574 write_seqcount_end(&lp->hw_stats_seqcount); 575 } 576 577 static void axienet_refresh_stats(struct work_struct *work) 578 { 579 struct axienet_local *lp = container_of(work, struct axienet_local, 580 stats_work.work); 581 582 mutex_lock(&lp->stats_lock); 583 axienet_stats_update(lp, false); 584 mutex_unlock(&lp->stats_lock); 585 586 /* Just less than 2^32 bytes at 2.5 GBit/s */ 587 schedule_delayed_work(&lp->stats_work, 13 * HZ); 588 } 589 590 static int __axienet_device_reset(struct axienet_local *lp) 591 { 592 u32 value; 593 int ret; 594 595 /* Save statistics counters in case they will be reset */ 596 mutex_lock(&lp->stats_lock); 597 if (lp->features & XAE_FEATURE_STATS) 598 axienet_stats_update(lp, true); 599 600 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 601 * process of Axi DMA takes a while to complete as all pending 602 * commands/transfers will be flushed or completed during this 603 * reset process. 604 * Note that even though both TX and RX have their own reset register, 605 * they both reset the entire DMA core, so only one needs to be used. 606 */ 607 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 608 ret = read_poll_timeout(axienet_dma_in32, value, 609 !(value & XAXIDMA_CR_RESET_MASK), 610 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 611 XAXIDMA_TX_CR_OFFSET); 612 if (ret) { 613 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 614 goto out; 615 } 616 617 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 618 ret = read_poll_timeout(axienet_ior, value, 619 value & XAE_INT_PHYRSTCMPLT_MASK, 620 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 621 XAE_IS_OFFSET); 622 if (ret) { 623 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 624 goto out; 625 } 626 627 /* Update statistics counters with new values */ 628 if (lp->features & XAE_FEATURE_STATS) { 629 enum temac_stat stat; 630 631 write_seqcount_begin(&lp->hw_stats_seqcount); 632 lp->reset_in_progress = false; 633 for (stat = 0; stat < STAT_COUNT; stat++) { 634 u32 counter = 635 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 636 637 lp->hw_stat_base[stat] += 638 lp->hw_last_counter[stat] - counter; 639 lp->hw_last_counter[stat] = counter; 640 } 641 write_seqcount_end(&lp->hw_stats_seqcount); 642 } 643 644 out: 645 mutex_unlock(&lp->stats_lock); 646 return ret; 647 } 648 649 /** 650 * axienet_dma_stop - Stop DMA operation 651 * @lp: Pointer to the axienet_local structure 652 */ 653 static void axienet_dma_stop(struct axienet_local *lp) 654 { 655 int count; 656 u32 cr, sr; 657 658 spin_lock_irq(&lp->rx_cr_lock); 659 660 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 661 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 662 lp->rx_dma_started = false; 663 664 spin_unlock_irq(&lp->rx_cr_lock); 665 synchronize_irq(lp->rx_irq); 666 667 spin_lock_irq(&lp->tx_cr_lock); 668 669 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 670 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 671 lp->tx_dma_started = false; 672 673 spin_unlock_irq(&lp->tx_cr_lock); 674 synchronize_irq(lp->tx_irq); 675 676 /* Give DMAs a chance to halt gracefully */ 677 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 678 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 679 msleep(20); 680 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 681 } 682 683 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 684 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 685 msleep(20); 686 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 687 } 688 689 /* Do a reset to ensure DMA is really stopped */ 690 axienet_lock_mii(lp); 691 __axienet_device_reset(lp); 692 axienet_unlock_mii(lp); 693 } 694 695 /** 696 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 697 * @ndev: Pointer to the net_device structure 698 * 699 * This function is called to reset and initialize the Axi Ethernet core. This 700 * is typically called during initialization. It does a reset of the Axi DMA 701 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 702 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 703 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 704 * core. 705 * Returns 0 on success or a negative error number otherwise. 706 */ 707 static int axienet_device_reset(struct net_device *ndev) 708 { 709 u32 axienet_status; 710 struct axienet_local *lp = netdev_priv(ndev); 711 int ret; 712 713 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 714 lp->options |= XAE_OPTION_VLAN; 715 lp->options &= (~XAE_OPTION_JUMBO); 716 717 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { 718 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 719 XAE_TRL_SIZE; 720 721 if (lp->max_frm_size <= lp->rxmem) 722 lp->options |= XAE_OPTION_JUMBO; 723 } 724 725 if (!lp->use_dmaengine) { 726 ret = __axienet_device_reset(lp); 727 if (ret) 728 return ret; 729 730 ret = axienet_dma_bd_init(ndev); 731 if (ret) { 732 netdev_err(ndev, "%s: descriptor allocation failed\n", 733 __func__); 734 return ret; 735 } 736 } 737 738 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 739 axienet_status &= ~XAE_RCW1_RX_MASK; 740 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 741 742 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 743 if (axienet_status & XAE_INT_RXRJECT_MASK) 744 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 745 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 746 XAE_INT_RECV_ERROR_MASK : 0); 747 748 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 749 750 /* Sync default options with HW but leave receiver and 751 * transmitter disabled. 752 */ 753 axienet_setoptions(ndev, lp->options & 754 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 755 axienet_set_mac_address(ndev, NULL); 756 axienet_set_multicast_list(ndev); 757 axienet_setoptions(ndev, lp->options); 758 759 netif_trans_update(ndev); 760 761 return 0; 762 } 763 764 /** 765 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 766 * @lp: Pointer to the axienet_local structure 767 * @first_bd: Index of first descriptor to clean up 768 * @nr_bds: Max number of descriptors to clean up 769 * @force: Whether to clean descriptors even if not complete 770 * @sizep: Pointer to a u32 filled with the total sum of all bytes 771 * in all cleaned-up descriptors. Ignored if NULL. 772 * @budget: NAPI budget (use 0 when not called from NAPI poll) 773 * 774 * Would either be called after a successful transmit operation, or after 775 * there was an error when setting up the chain. 776 * Returns the number of packets handled. 777 */ 778 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 779 int nr_bds, bool force, u32 *sizep, int budget) 780 { 781 struct axidma_bd *cur_p; 782 unsigned int status; 783 int i, packets = 0; 784 dma_addr_t phys; 785 786 for (i = 0; i < nr_bds; i++) { 787 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 788 status = cur_p->status; 789 790 /* If force is not specified, clean up only descriptors 791 * that have been completed by the MAC. 792 */ 793 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 794 break; 795 796 /* Ensure we see complete descriptor update */ 797 dma_rmb(); 798 phys = desc_get_phys_addr(lp, cur_p); 799 dma_unmap_single(lp->dev, phys, 800 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 801 DMA_TO_DEVICE); 802 803 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 804 napi_consume_skb(cur_p->skb, budget); 805 packets++; 806 } 807 808 cur_p->app0 = 0; 809 cur_p->app1 = 0; 810 cur_p->app2 = 0; 811 cur_p->app4 = 0; 812 cur_p->skb = NULL; 813 /* ensure our transmit path and device don't prematurely see status cleared */ 814 wmb(); 815 cur_p->cntrl = 0; 816 cur_p->status = 0; 817 818 if (sizep) 819 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 820 } 821 822 if (!force) { 823 lp->tx_bd_ci += i; 824 if (lp->tx_bd_ci >= lp->tx_bd_num) 825 lp->tx_bd_ci %= lp->tx_bd_num; 826 } 827 828 return packets; 829 } 830 831 /** 832 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 833 * @lp: Pointer to the axienet_local structure 834 * @num_frag: The number of BDs to check for 835 * 836 * Return: 0, on success 837 * NETDEV_TX_BUSY, if any of the descriptors are not free 838 * 839 * This function is invoked before BDs are allocated and transmission starts. 840 * This function returns 0 if a BD or group of BDs can be allocated for 841 * transmission. If the BD or any of the BDs are not free the function 842 * returns a busy status. 843 */ 844 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 845 int num_frag) 846 { 847 struct axidma_bd *cur_p; 848 849 /* Ensure we see all descriptor updates from device or TX polling */ 850 rmb(); 851 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 852 lp->tx_bd_num]; 853 if (cur_p->cntrl) 854 return NETDEV_TX_BUSY; 855 return 0; 856 } 857 858 /** 859 * axienet_dma_tx_cb - DMA engine callback for TX channel. 860 * @data: Pointer to the axienet_local structure. 861 * @result: error reporting through dmaengine_result. 862 * This function is called by dmaengine driver for TX channel to notify 863 * that the transmit is done. 864 */ 865 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) 866 { 867 struct skbuf_dma_descriptor *skbuf_dma; 868 struct axienet_local *lp = data; 869 struct netdev_queue *txq; 870 int len; 871 872 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); 873 len = skbuf_dma->skb->len; 874 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); 875 u64_stats_update_begin(&lp->tx_stat_sync); 876 u64_stats_add(&lp->tx_bytes, len); 877 u64_stats_add(&lp->tx_packets, 1); 878 u64_stats_update_end(&lp->tx_stat_sync); 879 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); 880 dev_consume_skb_any(skbuf_dma->skb); 881 netif_txq_completed_wake(txq, 1, len, 882 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 883 2 * MAX_SKB_FRAGS); 884 } 885 886 /** 887 * axienet_start_xmit_dmaengine - Starts the transmission. 888 * @skb: sk_buff pointer that contains data to be Txed. 889 * @ndev: Pointer to net_device structure. 890 * 891 * Return: NETDEV_TX_OK on success or any non space errors. 892 * NETDEV_TX_BUSY when free element in TX skb ring buffer 893 * is not available. 894 * 895 * This function is invoked to initiate transmission. The 896 * function sets the skbs, register dma callback API and submit 897 * the dma transaction. 898 * Additionally if checksum offloading is supported, 899 * it populates AXI Stream Control fields with appropriate values. 900 */ 901 static netdev_tx_t 902 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) 903 { 904 struct dma_async_tx_descriptor *dma_tx_desc = NULL; 905 struct axienet_local *lp = netdev_priv(ndev); 906 u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; 907 struct skbuf_dma_descriptor *skbuf_dma; 908 struct dma_device *dma_dev; 909 struct netdev_queue *txq; 910 u32 csum_start_off; 911 u32 csum_index_off; 912 int sg_len; 913 int ret; 914 915 dma_dev = lp->tx_chan->device; 916 sg_len = skb_shinfo(skb)->nr_frags + 1; 917 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { 918 netif_stop_queue(ndev); 919 if (net_ratelimit()) 920 netdev_warn(ndev, "TX ring unexpectedly full\n"); 921 return NETDEV_TX_BUSY; 922 } 923 924 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); 925 if (!skbuf_dma) 926 goto xmit_error_drop_skb; 927 928 lp->tx_ring_head++; 929 sg_init_table(skbuf_dma->sgl, sg_len); 930 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); 931 if (ret < 0) 932 goto xmit_error_drop_skb; 933 934 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 935 if (!ret) 936 goto xmit_error_drop_skb; 937 938 /* Fill up app fields for checksum */ 939 if (skb->ip_summed == CHECKSUM_PARTIAL) { 940 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 941 /* Tx Full Checksum Offload Enabled */ 942 app_metadata[0] |= 2; 943 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 944 csum_start_off = skb_transport_offset(skb); 945 csum_index_off = csum_start_off + skb->csum_offset; 946 /* Tx Partial Checksum Offload Enabled */ 947 app_metadata[0] |= 1; 948 app_metadata[1] = (csum_start_off << 16) | csum_index_off; 949 } 950 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 951 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ 952 } 953 954 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, 955 sg_len, DMA_MEM_TO_DEV, 956 DMA_PREP_INTERRUPT, (void *)app_metadata); 957 if (!dma_tx_desc) 958 goto xmit_error_unmap_sg; 959 960 skbuf_dma->skb = skb; 961 skbuf_dma->sg_len = sg_len; 962 dma_tx_desc->callback_param = lp; 963 dma_tx_desc->callback_result = axienet_dma_tx_cb; 964 txq = skb_get_tx_queue(lp->ndev, skb); 965 netdev_tx_sent_queue(txq, skb->len); 966 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 967 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); 968 969 dmaengine_submit(dma_tx_desc); 970 dma_async_issue_pending(lp->tx_chan); 971 return NETDEV_TX_OK; 972 973 xmit_error_unmap_sg: 974 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 975 xmit_error_drop_skb: 976 dev_kfree_skb_any(skb); 977 return NETDEV_TX_OK; 978 } 979 980 /** 981 * axienet_tx_poll - Invoked once a transmit is completed by the 982 * Axi DMA Tx channel. 983 * @napi: Pointer to NAPI structure. 984 * @budget: Max number of TX packets to process. 985 * 986 * Return: Number of TX packets processed. 987 * 988 * This function is invoked from the NAPI processing to notify the completion 989 * of transmit operation. It clears fields in the corresponding Tx BDs and 990 * unmaps the corresponding buffer so that CPU can regain ownership of the 991 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 992 * required. 993 */ 994 static int axienet_tx_poll(struct napi_struct *napi, int budget) 995 { 996 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 997 struct net_device *ndev = lp->ndev; 998 u32 size = 0; 999 int packets; 1000 1001 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, 1002 &size, budget); 1003 1004 if (packets) { 1005 u64_stats_update_begin(&lp->tx_stat_sync); 1006 u64_stats_add(&lp->tx_packets, packets); 1007 u64_stats_add(&lp->tx_bytes, size); 1008 u64_stats_update_end(&lp->tx_stat_sync); 1009 1010 /* Matches barrier in axienet_start_xmit */ 1011 smp_mb(); 1012 1013 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1014 netif_wake_queue(ndev); 1015 } 1016 1017 if (packets < budget && napi_complete_done(napi, packets)) { 1018 /* Re-enable TX completion interrupts. This should 1019 * cause an immediate interrupt if any TX packets are 1020 * already pending. 1021 */ 1022 spin_lock_irq(&lp->tx_cr_lock); 1023 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 1024 spin_unlock_irq(&lp->tx_cr_lock); 1025 } 1026 return packets; 1027 } 1028 1029 /** 1030 * axienet_start_xmit - Starts the transmission. 1031 * @skb: sk_buff pointer that contains data to be Txed. 1032 * @ndev: Pointer to net_device structure. 1033 * 1034 * Return: NETDEV_TX_OK, on success 1035 * NETDEV_TX_BUSY, if any of the descriptors are not free 1036 * 1037 * This function is invoked from upper layers to initiate transmission. The 1038 * function uses the next available free BDs and populates their fields to 1039 * start the transmission. Additionally if checksum offloading is supported, 1040 * it populates AXI Stream Control fields with appropriate values. 1041 */ 1042 static netdev_tx_t 1043 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1044 { 1045 u32 ii; 1046 u32 num_frag; 1047 u32 csum_start_off; 1048 u32 csum_index_off; 1049 skb_frag_t *frag; 1050 dma_addr_t tail_p, phys; 1051 u32 orig_tail_ptr, new_tail_ptr; 1052 struct axienet_local *lp = netdev_priv(ndev); 1053 struct axidma_bd *cur_p; 1054 1055 orig_tail_ptr = lp->tx_bd_tail; 1056 new_tail_ptr = orig_tail_ptr; 1057 1058 num_frag = skb_shinfo(skb)->nr_frags; 1059 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 1060 1061 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 1062 /* Should not happen as last start_xmit call should have 1063 * checked for sufficient space and queue should only be 1064 * woken when sufficient space is available. 1065 */ 1066 netif_stop_queue(ndev); 1067 if (net_ratelimit()) 1068 netdev_warn(ndev, "TX ring unexpectedly full\n"); 1069 return NETDEV_TX_BUSY; 1070 } 1071 1072 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1073 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 1074 /* Tx Full Checksum Offload Enabled */ 1075 cur_p->app0 |= 2; 1076 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 1077 csum_start_off = skb_transport_offset(skb); 1078 csum_index_off = csum_start_off + skb->csum_offset; 1079 /* Tx Partial Checksum Offload Enabled */ 1080 cur_p->app0 |= 1; 1081 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 1082 } 1083 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1084 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 1085 } 1086 1087 phys = dma_map_single(lp->dev, skb->data, 1088 skb_headlen(skb), DMA_TO_DEVICE); 1089 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1090 if (net_ratelimit()) 1091 netdev_err(ndev, "TX DMA mapping error\n"); 1092 ndev->stats.tx_dropped++; 1093 dev_kfree_skb_any(skb); 1094 return NETDEV_TX_OK; 1095 } 1096 desc_set_phys_addr(lp, phys, cur_p); 1097 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 1098 1099 for (ii = 0; ii < num_frag; ii++) { 1100 if (++new_tail_ptr >= lp->tx_bd_num) 1101 new_tail_ptr = 0; 1102 cur_p = &lp->tx_bd_v[new_tail_ptr]; 1103 frag = &skb_shinfo(skb)->frags[ii]; 1104 phys = dma_map_single(lp->dev, 1105 skb_frag_address(frag), 1106 skb_frag_size(frag), 1107 DMA_TO_DEVICE); 1108 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1109 if (net_ratelimit()) 1110 netdev_err(ndev, "TX DMA mapping error\n"); 1111 ndev->stats.tx_dropped++; 1112 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 1113 true, NULL, 0); 1114 dev_kfree_skb_any(skb); 1115 return NETDEV_TX_OK; 1116 } 1117 desc_set_phys_addr(lp, phys, cur_p); 1118 cur_p->cntrl = skb_frag_size(frag); 1119 } 1120 1121 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 1122 cur_p->skb = skb; 1123 1124 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 1125 if (++new_tail_ptr >= lp->tx_bd_num) 1126 new_tail_ptr = 0; 1127 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 1128 1129 /* Start the transfer */ 1130 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 1131 1132 /* Stop queue if next transmit may not have space */ 1133 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 1134 netif_stop_queue(ndev); 1135 1136 /* Matches barrier in axienet_tx_poll */ 1137 smp_mb(); 1138 1139 /* Space might have just been freed - check again */ 1140 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1141 netif_wake_queue(ndev); 1142 } 1143 1144 return NETDEV_TX_OK; 1145 } 1146 1147 /** 1148 * axienet_dma_rx_cb - DMA engine callback for RX channel. 1149 * @data: Pointer to the skbuf_dma_descriptor structure. 1150 * @result: error reporting through dmaengine_result. 1151 * This function is called by dmaengine driver for RX channel to notify 1152 * that the packet is received. 1153 */ 1154 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) 1155 { 1156 struct skbuf_dma_descriptor *skbuf_dma; 1157 size_t meta_len, meta_max_len, rx_len; 1158 struct axienet_local *lp = data; 1159 struct sk_buff *skb; 1160 u32 *app_metadata; 1161 1162 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1163 skb = skbuf_dma->skb; 1164 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, 1165 &meta_max_len); 1166 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, 1167 DMA_FROM_DEVICE); 1168 /* TODO: Derive app word index programmatically */ 1169 rx_len = (app_metadata[LEN_APP] & 0xFFFF); 1170 skb_put(skb, rx_len); 1171 skb->protocol = eth_type_trans(skb, lp->ndev); 1172 skb->ip_summed = CHECKSUM_NONE; 1173 1174 __netif_rx(skb); 1175 u64_stats_update_begin(&lp->rx_stat_sync); 1176 u64_stats_add(&lp->rx_packets, 1); 1177 u64_stats_add(&lp->rx_bytes, rx_len); 1178 u64_stats_update_end(&lp->rx_stat_sync); 1179 axienet_rx_submit_desc(lp->ndev); 1180 dma_async_issue_pending(lp->rx_chan); 1181 } 1182 1183 /** 1184 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 1185 * @napi: Pointer to NAPI structure. 1186 * @budget: Max number of RX packets to process. 1187 * 1188 * Return: Number of RX packets processed. 1189 */ 1190 static int axienet_rx_poll(struct napi_struct *napi, int budget) 1191 { 1192 u32 length; 1193 u32 csumstatus; 1194 u32 size = 0; 1195 int packets = 0; 1196 dma_addr_t tail_p = 0; 1197 struct axidma_bd *cur_p; 1198 struct sk_buff *skb, *new_skb; 1199 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 1200 1201 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1202 1203 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 1204 dma_addr_t phys; 1205 1206 /* Ensure we see complete descriptor update */ 1207 dma_rmb(); 1208 1209 skb = cur_p->skb; 1210 cur_p->skb = NULL; 1211 1212 /* skb could be NULL if a previous pass already received the 1213 * packet for this slot in the ring, but failed to refill it 1214 * with a newly allocated buffer. In this case, don't try to 1215 * receive it again. 1216 */ 1217 if (likely(skb)) { 1218 length = cur_p->app4 & 0x0000FFFF; 1219 1220 phys = desc_get_phys_addr(lp, cur_p); 1221 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 1222 DMA_FROM_DEVICE); 1223 1224 skb_put(skb, length); 1225 skb->protocol = eth_type_trans(skb, lp->ndev); 1226 /*skb_checksum_none_assert(skb);*/ 1227 skb->ip_summed = CHECKSUM_NONE; 1228 1229 /* if we're doing Rx csum offload, set it up */ 1230 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 1231 csumstatus = (cur_p->app2 & 1232 XAE_FULL_CSUM_STATUS_MASK) >> 3; 1233 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 1234 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 1235 skb->ip_summed = CHECKSUM_UNNECESSARY; 1236 } 1237 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 1238 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 1239 skb->ip_summed = CHECKSUM_COMPLETE; 1240 } 1241 1242 napi_gro_receive(napi, skb); 1243 1244 size += length; 1245 packets++; 1246 } 1247 1248 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 1249 if (!new_skb) 1250 break; 1251 1252 phys = dma_map_single(lp->dev, new_skb->data, 1253 lp->max_frm_size, 1254 DMA_FROM_DEVICE); 1255 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1256 if (net_ratelimit()) 1257 netdev_err(lp->ndev, "RX DMA mapping error\n"); 1258 dev_kfree_skb(new_skb); 1259 break; 1260 } 1261 desc_set_phys_addr(lp, phys, cur_p); 1262 1263 cur_p->cntrl = lp->max_frm_size; 1264 cur_p->status = 0; 1265 cur_p->skb = new_skb; 1266 1267 /* Only update tail_p to mark this slot as usable after it has 1268 * been successfully refilled. 1269 */ 1270 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1271 1272 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1273 lp->rx_bd_ci = 0; 1274 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1275 } 1276 1277 u64_stats_update_begin(&lp->rx_stat_sync); 1278 u64_stats_add(&lp->rx_packets, packets); 1279 u64_stats_add(&lp->rx_bytes, size); 1280 u64_stats_update_end(&lp->rx_stat_sync); 1281 1282 if (tail_p) 1283 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1284 1285 if (packets < budget && napi_complete_done(napi, packets)) { 1286 if (READ_ONCE(lp->rx_dim_enabled)) { 1287 struct dim_sample sample = { 1288 .time = ktime_get(), 1289 /* Safe because we are the only writer */ 1290 .pkt_ctr = u64_stats_read(&lp->rx_packets), 1291 .byte_ctr = u64_stats_read(&lp->rx_bytes), 1292 .event_ctr = READ_ONCE(lp->rx_irqs), 1293 }; 1294 1295 net_dim(&lp->rx_dim, &sample); 1296 } 1297 1298 /* Re-enable RX completion interrupts. This should 1299 * cause an immediate interrupt if any RX packets are 1300 * already pending. 1301 */ 1302 spin_lock_irq(&lp->rx_cr_lock); 1303 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1304 spin_unlock_irq(&lp->rx_cr_lock); 1305 } 1306 return packets; 1307 } 1308 1309 /** 1310 * axienet_tx_irq - Tx Done Isr. 1311 * @irq: irq number 1312 * @_ndev: net_device pointer 1313 * 1314 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1315 * 1316 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1317 * TX BD processing. 1318 */ 1319 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1320 { 1321 unsigned int status; 1322 struct net_device *ndev = _ndev; 1323 struct axienet_local *lp = netdev_priv(ndev); 1324 1325 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1326 1327 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1328 return IRQ_NONE; 1329 1330 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1331 1332 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1333 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1334 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1335 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1336 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1337 schedule_work(&lp->dma_err_task); 1338 } else { 1339 /* Disable further TX completion interrupts and schedule 1340 * NAPI to handle the completions. 1341 */ 1342 if (napi_schedule_prep(&lp->napi_tx)) { 1343 u32 cr; 1344 1345 spin_lock(&lp->tx_cr_lock); 1346 cr = lp->tx_dma_cr; 1347 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1348 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1349 spin_unlock(&lp->tx_cr_lock); 1350 __napi_schedule(&lp->napi_tx); 1351 } 1352 } 1353 1354 return IRQ_HANDLED; 1355 } 1356 1357 /** 1358 * axienet_rx_irq - Rx Isr. 1359 * @irq: irq number 1360 * @_ndev: net_device pointer 1361 * 1362 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1363 * 1364 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1365 * processing. 1366 */ 1367 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1368 { 1369 unsigned int status; 1370 struct net_device *ndev = _ndev; 1371 struct axienet_local *lp = netdev_priv(ndev); 1372 1373 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1374 1375 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1376 return IRQ_NONE; 1377 1378 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1379 1380 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1381 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1382 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1383 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1384 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1385 schedule_work(&lp->dma_err_task); 1386 } else { 1387 /* Disable further RX completion interrupts and schedule 1388 * NAPI receive. 1389 */ 1390 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1); 1391 if (napi_schedule_prep(&lp->napi_rx)) { 1392 u32 cr; 1393 1394 spin_lock(&lp->rx_cr_lock); 1395 cr = lp->rx_dma_cr; 1396 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1397 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1398 spin_unlock(&lp->rx_cr_lock); 1399 1400 __napi_schedule(&lp->napi_rx); 1401 } 1402 } 1403 1404 return IRQ_HANDLED; 1405 } 1406 1407 /** 1408 * axienet_eth_irq - Ethernet core Isr. 1409 * @irq: irq number 1410 * @_ndev: net_device pointer 1411 * 1412 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1413 * 1414 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1415 */ 1416 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1417 { 1418 struct net_device *ndev = _ndev; 1419 struct axienet_local *lp = netdev_priv(ndev); 1420 unsigned int pending; 1421 1422 pending = axienet_ior(lp, XAE_IP_OFFSET); 1423 if (!pending) 1424 return IRQ_NONE; 1425 1426 if (pending & XAE_INT_RXFIFOOVR_MASK) 1427 ndev->stats.rx_missed_errors++; 1428 1429 if (pending & XAE_INT_RXRJECT_MASK) 1430 ndev->stats.rx_dropped++; 1431 1432 axienet_iow(lp, XAE_IS_OFFSET, pending); 1433 return IRQ_HANDLED; 1434 } 1435 1436 static void axienet_dma_err_handler(struct work_struct *work); 1437 1438 /** 1439 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. 1440 * allocate skbuff, map the scatterlist and obtain a descriptor 1441 * and then add the callback information and submit descriptor. 1442 * 1443 * @ndev: net_device pointer 1444 * 1445 */ 1446 static void axienet_rx_submit_desc(struct net_device *ndev) 1447 { 1448 struct dma_async_tx_descriptor *dma_rx_desc = NULL; 1449 struct axienet_local *lp = netdev_priv(ndev); 1450 struct skbuf_dma_descriptor *skbuf_dma; 1451 struct sk_buff *skb; 1452 dma_addr_t addr; 1453 1454 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); 1455 if (!skbuf_dma) 1456 return; 1457 1458 lp->rx_ring_head++; 1459 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1460 if (!skb) 1461 return; 1462 1463 sg_init_table(skbuf_dma->sgl, 1); 1464 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); 1465 if (unlikely(dma_mapping_error(lp->dev, addr))) { 1466 if (net_ratelimit()) 1467 netdev_err(ndev, "DMA mapping error\n"); 1468 goto rx_submit_err_free_skb; 1469 } 1470 sg_dma_address(skbuf_dma->sgl) = addr; 1471 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; 1472 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, 1473 1, DMA_DEV_TO_MEM, 1474 DMA_PREP_INTERRUPT); 1475 if (!dma_rx_desc) 1476 goto rx_submit_err_unmap_skb; 1477 1478 skbuf_dma->skb = skb; 1479 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); 1480 skbuf_dma->desc = dma_rx_desc; 1481 dma_rx_desc->callback_param = lp; 1482 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1483 dmaengine_submit(dma_rx_desc); 1484 1485 return; 1486 1487 rx_submit_err_unmap_skb: 1488 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); 1489 rx_submit_err_free_skb: 1490 dev_kfree_skb(skb); 1491 } 1492 1493 /** 1494 * axienet_init_dmaengine - init the dmaengine code. 1495 * @ndev: Pointer to net_device structure 1496 * 1497 * Return: 0, on success. 1498 * non-zero error value on failure 1499 * 1500 * This is the dmaengine initialization code. 1501 */ 1502 static int axienet_init_dmaengine(struct net_device *ndev) 1503 { 1504 struct axienet_local *lp = netdev_priv(ndev); 1505 struct skbuf_dma_descriptor *skbuf_dma; 1506 int i, ret; 1507 1508 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 1509 if (IS_ERR(lp->tx_chan)) { 1510 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); 1511 return PTR_ERR(lp->tx_chan); 1512 } 1513 1514 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); 1515 if (IS_ERR(lp->rx_chan)) { 1516 ret = PTR_ERR(lp->rx_chan); 1517 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); 1518 goto err_dma_release_tx; 1519 } 1520 1521 lp->tx_ring_tail = 0; 1522 lp->tx_ring_head = 0; 1523 lp->rx_ring_tail = 0; 1524 lp->rx_ring_head = 0; 1525 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), 1526 GFP_KERNEL); 1527 if (!lp->tx_skb_ring) { 1528 ret = -ENOMEM; 1529 goto err_dma_release_rx; 1530 } 1531 for (i = 0; i < TX_BD_NUM_MAX; i++) { 1532 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1533 if (!skbuf_dma) { 1534 ret = -ENOMEM; 1535 goto err_free_tx_skb_ring; 1536 } 1537 lp->tx_skb_ring[i] = skbuf_dma; 1538 } 1539 1540 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), 1541 GFP_KERNEL); 1542 if (!lp->rx_skb_ring) { 1543 ret = -ENOMEM; 1544 goto err_free_tx_skb_ring; 1545 } 1546 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { 1547 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1548 if (!skbuf_dma) { 1549 ret = -ENOMEM; 1550 goto err_free_rx_skb_ring; 1551 } 1552 lp->rx_skb_ring[i] = skbuf_dma; 1553 } 1554 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ 1555 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1556 axienet_rx_submit_desc(ndev); 1557 dma_async_issue_pending(lp->rx_chan); 1558 1559 return 0; 1560 1561 err_free_rx_skb_ring: 1562 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1563 kfree(lp->rx_skb_ring[i]); 1564 kfree(lp->rx_skb_ring); 1565 err_free_tx_skb_ring: 1566 for (i = 0; i < TX_BD_NUM_MAX; i++) 1567 kfree(lp->tx_skb_ring[i]); 1568 kfree(lp->tx_skb_ring); 1569 err_dma_release_rx: 1570 dma_release_channel(lp->rx_chan); 1571 err_dma_release_tx: 1572 dma_release_channel(lp->tx_chan); 1573 return ret; 1574 } 1575 1576 /** 1577 * axienet_init_legacy_dma - init the dma legacy code. 1578 * @ndev: Pointer to net_device structure 1579 * 1580 * Return: 0, on success. 1581 * non-zero error value on failure 1582 * 1583 * This is the dma initialization code. It also allocates interrupt 1584 * service routines, enables the interrupt lines and ISR handling. 1585 * 1586 */ 1587 static int axienet_init_legacy_dma(struct net_device *ndev) 1588 { 1589 int ret; 1590 struct axienet_local *lp = netdev_priv(ndev); 1591 1592 /* Enable worker thread for Axi DMA error handling */ 1593 lp->stopping = false; 1594 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1595 1596 napi_enable(&lp->napi_rx); 1597 napi_enable(&lp->napi_tx); 1598 1599 /* Enable interrupts for Axi DMA Tx */ 1600 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1601 ndev->name, ndev); 1602 if (ret) 1603 goto err_tx_irq; 1604 /* Enable interrupts for Axi DMA Rx */ 1605 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1606 ndev->name, ndev); 1607 if (ret) 1608 goto err_rx_irq; 1609 /* Enable interrupts for Axi Ethernet core (if defined) */ 1610 if (lp->eth_irq > 0) { 1611 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1612 ndev->name, ndev); 1613 if (ret) 1614 goto err_eth_irq; 1615 } 1616 1617 return 0; 1618 1619 err_eth_irq: 1620 free_irq(lp->rx_irq, ndev); 1621 err_rx_irq: 1622 free_irq(lp->tx_irq, ndev); 1623 err_tx_irq: 1624 napi_disable(&lp->napi_tx); 1625 napi_disable(&lp->napi_rx); 1626 cancel_work_sync(&lp->dma_err_task); 1627 dev_err(lp->dev, "request_irq() failed\n"); 1628 return ret; 1629 } 1630 1631 /** 1632 * axienet_open - Driver open routine. 1633 * @ndev: Pointer to net_device structure 1634 * 1635 * Return: 0, on success. 1636 * non-zero error value on failure 1637 * 1638 * This is the driver open routine. It calls phylink_start to start the 1639 * PHY device. 1640 * It also allocates interrupt service routines, enables the interrupt lines 1641 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1642 * descriptors are initialized. 1643 */ 1644 static int axienet_open(struct net_device *ndev) 1645 { 1646 int ret; 1647 struct axienet_local *lp = netdev_priv(ndev); 1648 1649 /* When we do an Axi Ethernet reset, it resets the complete core 1650 * including the MDIO. MDIO must be disabled before resetting. 1651 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1652 */ 1653 axienet_lock_mii(lp); 1654 ret = axienet_device_reset(ndev); 1655 axienet_unlock_mii(lp); 1656 1657 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1658 if (ret) { 1659 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1660 return ret; 1661 } 1662 1663 phylink_start(lp->phylink); 1664 1665 /* Start the statistics refresh work */ 1666 schedule_delayed_work(&lp->stats_work, 0); 1667 1668 if (lp->use_dmaengine) { 1669 /* Enable interrupts for Axi Ethernet core (if defined) */ 1670 if (lp->eth_irq > 0) { 1671 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1672 ndev->name, ndev); 1673 if (ret) 1674 goto err_phy; 1675 } 1676 1677 ret = axienet_init_dmaengine(ndev); 1678 if (ret < 0) 1679 goto err_free_eth_irq; 1680 } else { 1681 ret = axienet_init_legacy_dma(ndev); 1682 if (ret) 1683 goto err_phy; 1684 } 1685 1686 return 0; 1687 1688 err_free_eth_irq: 1689 if (lp->eth_irq > 0) 1690 free_irq(lp->eth_irq, ndev); 1691 err_phy: 1692 cancel_work_sync(&lp->rx_dim.work); 1693 cancel_delayed_work_sync(&lp->stats_work); 1694 phylink_stop(lp->phylink); 1695 phylink_disconnect_phy(lp->phylink); 1696 return ret; 1697 } 1698 1699 /** 1700 * axienet_stop - Driver stop routine. 1701 * @ndev: Pointer to net_device structure 1702 * 1703 * Return: 0, on success. 1704 * 1705 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1706 * device. It also removes the interrupt handlers and disables the interrupts. 1707 * The Axi DMA Tx/Rx BDs are released. 1708 */ 1709 static int axienet_stop(struct net_device *ndev) 1710 { 1711 struct axienet_local *lp = netdev_priv(ndev); 1712 int i; 1713 1714 if (!lp->use_dmaengine) { 1715 WRITE_ONCE(lp->stopping, true); 1716 flush_work(&lp->dma_err_task); 1717 1718 napi_disable(&lp->napi_tx); 1719 napi_disable(&lp->napi_rx); 1720 } 1721 1722 cancel_work_sync(&lp->rx_dim.work); 1723 cancel_delayed_work_sync(&lp->stats_work); 1724 1725 phylink_stop(lp->phylink); 1726 phylink_disconnect_phy(lp->phylink); 1727 1728 axienet_setoptions(ndev, lp->options & 1729 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1730 1731 if (!lp->use_dmaengine) { 1732 axienet_dma_stop(lp); 1733 cancel_work_sync(&lp->dma_err_task); 1734 free_irq(lp->tx_irq, ndev); 1735 free_irq(lp->rx_irq, ndev); 1736 axienet_dma_bd_release(ndev); 1737 } else { 1738 dmaengine_terminate_sync(lp->tx_chan); 1739 dmaengine_synchronize(lp->tx_chan); 1740 dmaengine_terminate_sync(lp->rx_chan); 1741 dmaengine_synchronize(lp->rx_chan); 1742 1743 for (i = 0; i < TX_BD_NUM_MAX; i++) 1744 kfree(lp->tx_skb_ring[i]); 1745 kfree(lp->tx_skb_ring); 1746 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1747 kfree(lp->rx_skb_ring[i]); 1748 kfree(lp->rx_skb_ring); 1749 1750 dma_release_channel(lp->rx_chan); 1751 dma_release_channel(lp->tx_chan); 1752 } 1753 1754 axienet_iow(lp, XAE_IE_OFFSET, 0); 1755 1756 if (lp->eth_irq > 0) 1757 free_irq(lp->eth_irq, ndev); 1758 return 0; 1759 } 1760 1761 /** 1762 * axienet_change_mtu - Driver change mtu routine. 1763 * @ndev: Pointer to net_device structure 1764 * @new_mtu: New mtu value to be applied 1765 * 1766 * Return: Always returns 0 (success). 1767 * 1768 * This is the change mtu driver routine. It checks if the Axi Ethernet 1769 * hardware supports jumbo frames before changing the mtu. This can be 1770 * called only when the device is not up. 1771 */ 1772 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1773 { 1774 struct axienet_local *lp = netdev_priv(ndev); 1775 1776 if (netif_running(ndev)) 1777 return -EBUSY; 1778 1779 if ((new_mtu + VLAN_ETH_HLEN + 1780 XAE_TRL_SIZE) > lp->rxmem) 1781 return -EINVAL; 1782 1783 WRITE_ONCE(ndev->mtu, new_mtu); 1784 1785 return 0; 1786 } 1787 1788 #ifdef CONFIG_NET_POLL_CONTROLLER 1789 /** 1790 * axienet_poll_controller - Axi Ethernet poll mechanism. 1791 * @ndev: Pointer to net_device structure 1792 * 1793 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1794 * to polling the ISRs and are enabled back after the polling is done. 1795 */ 1796 static void axienet_poll_controller(struct net_device *ndev) 1797 { 1798 struct axienet_local *lp = netdev_priv(ndev); 1799 1800 disable_irq(lp->tx_irq); 1801 disable_irq(lp->rx_irq); 1802 axienet_rx_irq(lp->tx_irq, ndev); 1803 axienet_tx_irq(lp->rx_irq, ndev); 1804 enable_irq(lp->tx_irq); 1805 enable_irq(lp->rx_irq); 1806 } 1807 #endif 1808 1809 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1810 { 1811 struct axienet_local *lp = netdev_priv(dev); 1812 1813 if (!netif_running(dev)) 1814 return -EINVAL; 1815 1816 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1817 } 1818 1819 static void 1820 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1821 { 1822 struct axienet_local *lp = netdev_priv(dev); 1823 unsigned int start; 1824 1825 netdev_stats_to_stats64(stats, &dev->stats); 1826 1827 do { 1828 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1829 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1830 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1831 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1832 1833 do { 1834 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1835 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1836 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1837 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1838 1839 if (!(lp->features & XAE_FEATURE_STATS)) 1840 return; 1841 1842 do { 1843 start = read_seqcount_begin(&lp->hw_stats_seqcount); 1844 stats->rx_length_errors = 1845 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); 1846 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS); 1847 stats->rx_frame_errors = 1848 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); 1849 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) + 1850 axienet_stat(lp, STAT_FRAGMENT_FRAMES) + 1851 stats->rx_length_errors + 1852 stats->rx_crc_errors + 1853 stats->rx_frame_errors; 1854 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); 1855 1856 stats->tx_aborted_errors = 1857 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); 1858 stats->tx_fifo_errors = 1859 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS); 1860 stats->tx_window_errors = 1861 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); 1862 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) + 1863 stats->tx_aborted_errors + 1864 stats->tx_fifo_errors + 1865 stats->tx_window_errors; 1866 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 1867 } 1868 1869 static const struct net_device_ops axienet_netdev_ops = { 1870 .ndo_open = axienet_open, 1871 .ndo_stop = axienet_stop, 1872 .ndo_start_xmit = axienet_start_xmit, 1873 .ndo_get_stats64 = axienet_get_stats64, 1874 .ndo_change_mtu = axienet_change_mtu, 1875 .ndo_set_mac_address = netdev_set_mac_address, 1876 .ndo_validate_addr = eth_validate_addr, 1877 .ndo_eth_ioctl = axienet_ioctl, 1878 .ndo_set_rx_mode = axienet_set_multicast_list, 1879 #ifdef CONFIG_NET_POLL_CONTROLLER 1880 .ndo_poll_controller = axienet_poll_controller, 1881 #endif 1882 }; 1883 1884 static const struct net_device_ops axienet_netdev_dmaengine_ops = { 1885 .ndo_open = axienet_open, 1886 .ndo_stop = axienet_stop, 1887 .ndo_start_xmit = axienet_start_xmit_dmaengine, 1888 .ndo_get_stats64 = axienet_get_stats64, 1889 .ndo_change_mtu = axienet_change_mtu, 1890 .ndo_set_mac_address = netdev_set_mac_address, 1891 .ndo_validate_addr = eth_validate_addr, 1892 .ndo_eth_ioctl = axienet_ioctl, 1893 .ndo_set_rx_mode = axienet_set_multicast_list, 1894 }; 1895 1896 /** 1897 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1898 * @ndev: Pointer to net_device structure 1899 * @ed: Pointer to ethtool_drvinfo structure 1900 * 1901 * This implements ethtool command for getting the driver information. 1902 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1903 */ 1904 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1905 struct ethtool_drvinfo *ed) 1906 { 1907 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1908 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1909 } 1910 1911 /** 1912 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1913 * AxiEthernet core. 1914 * @ndev: Pointer to net_device structure 1915 * 1916 * This implements ethtool command for getting the total register length 1917 * information. 1918 * 1919 * Return: the total regs length 1920 */ 1921 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1922 { 1923 return sizeof(u32) * AXIENET_REGS_N; 1924 } 1925 1926 /** 1927 * axienet_ethtools_get_regs - Dump the contents of all registers present 1928 * in AxiEthernet core. 1929 * @ndev: Pointer to net_device structure 1930 * @regs: Pointer to ethtool_regs structure 1931 * @ret: Void pointer used to return the contents of the registers. 1932 * 1933 * This implements ethtool command for getting the Axi Ethernet register dump. 1934 * Issue "ethtool -d ethX" to execute this function. 1935 */ 1936 static void axienet_ethtools_get_regs(struct net_device *ndev, 1937 struct ethtool_regs *regs, void *ret) 1938 { 1939 u32 *data = (u32 *)ret; 1940 size_t len = sizeof(u32) * AXIENET_REGS_N; 1941 struct axienet_local *lp = netdev_priv(ndev); 1942 1943 regs->version = 0; 1944 regs->len = len; 1945 1946 memset(data, 0, len); 1947 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1948 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1949 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1950 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1951 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1952 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1953 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1954 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1955 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1956 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1957 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1958 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1959 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1960 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1961 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1962 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1963 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1964 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1965 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1966 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1967 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1968 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1969 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1970 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1971 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1972 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1973 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1974 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1975 if (!lp->use_dmaengine) { 1976 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1977 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1978 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1979 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1980 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1981 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1982 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1983 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1984 } 1985 } 1986 1987 static void 1988 axienet_ethtools_get_ringparam(struct net_device *ndev, 1989 struct ethtool_ringparam *ering, 1990 struct kernel_ethtool_ringparam *kernel_ering, 1991 struct netlink_ext_ack *extack) 1992 { 1993 struct axienet_local *lp = netdev_priv(ndev); 1994 1995 ering->rx_max_pending = RX_BD_NUM_MAX; 1996 ering->rx_mini_max_pending = 0; 1997 ering->rx_jumbo_max_pending = 0; 1998 ering->tx_max_pending = TX_BD_NUM_MAX; 1999 ering->rx_pending = lp->rx_bd_num; 2000 ering->rx_mini_pending = 0; 2001 ering->rx_jumbo_pending = 0; 2002 ering->tx_pending = lp->tx_bd_num; 2003 } 2004 2005 static int 2006 axienet_ethtools_set_ringparam(struct net_device *ndev, 2007 struct ethtool_ringparam *ering, 2008 struct kernel_ethtool_ringparam *kernel_ering, 2009 struct netlink_ext_ack *extack) 2010 { 2011 struct axienet_local *lp = netdev_priv(ndev); 2012 2013 if (ering->rx_pending > RX_BD_NUM_MAX || 2014 ering->rx_mini_pending || 2015 ering->rx_jumbo_pending || 2016 ering->tx_pending < TX_BD_NUM_MIN || 2017 ering->tx_pending > TX_BD_NUM_MAX) 2018 return -EINVAL; 2019 2020 if (netif_running(ndev)) 2021 return -EBUSY; 2022 2023 lp->rx_bd_num = ering->rx_pending; 2024 lp->tx_bd_num = ering->tx_pending; 2025 return 0; 2026 } 2027 2028 /** 2029 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 2030 * Tx and Rx paths. 2031 * @ndev: Pointer to net_device structure 2032 * @epauseparm: Pointer to ethtool_pauseparam structure. 2033 * 2034 * This implements ethtool command for getting axi ethernet pause frame 2035 * setting. Issue "ethtool -a ethX" to execute this function. 2036 */ 2037 static void 2038 axienet_ethtools_get_pauseparam(struct net_device *ndev, 2039 struct ethtool_pauseparam *epauseparm) 2040 { 2041 struct axienet_local *lp = netdev_priv(ndev); 2042 2043 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 2044 } 2045 2046 /** 2047 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 2048 * settings. 2049 * @ndev: Pointer to net_device structure 2050 * @epauseparm:Pointer to ethtool_pauseparam structure 2051 * 2052 * This implements ethtool command for enabling flow control on Rx and Tx 2053 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 2054 * function. 2055 * 2056 * Return: 0 on success, -EFAULT if device is running 2057 */ 2058 static int 2059 axienet_ethtools_set_pauseparam(struct net_device *ndev, 2060 struct ethtool_pauseparam *epauseparm) 2061 { 2062 struct axienet_local *lp = netdev_priv(ndev); 2063 2064 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 2065 } 2066 2067 /** 2068 * axienet_update_coalesce_rx() - Set RX CR 2069 * @lp: Device private data 2070 * @cr: Value to write to the RX CR 2071 * @mask: Bits to set from @cr 2072 */ 2073 static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr, 2074 u32 mask) 2075 { 2076 spin_lock_irq(&lp->rx_cr_lock); 2077 lp->rx_dma_cr &= ~mask; 2078 lp->rx_dma_cr |= cr; 2079 /* If DMA isn't started, then the settings will be applied the next 2080 * time dma_start() is called. 2081 */ 2082 if (lp->rx_dma_started) { 2083 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 2084 2085 /* Don't enable IRQs if they are disabled by NAPI */ 2086 if (reg & XAXIDMA_IRQ_ALL_MASK) 2087 cr = lp->rx_dma_cr; 2088 else 2089 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; 2090 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 2091 } 2092 spin_unlock_irq(&lp->rx_cr_lock); 2093 } 2094 2095 /** 2096 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM 2097 * @lp: Device private data 2098 */ 2099 static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp) 2100 { 2101 return min(1 << (lp->rx_dim.profile_ix << 1), 255); 2102 } 2103 2104 /** 2105 * axienet_rx_dim_work() - Adjust RX DIM settings 2106 * @work: The work struct 2107 */ 2108 static void axienet_rx_dim_work(struct work_struct *work) 2109 { 2110 struct axienet_local *lp = 2111 container_of(work, struct axienet_local, rx_dim.work); 2112 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0); 2113 u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK | 2114 XAXIDMA_IRQ_ERROR_MASK; 2115 2116 axienet_update_coalesce_rx(lp, cr, mask); 2117 lp->rx_dim.state = DIM_START_MEASURE; 2118 } 2119 2120 /** 2121 * axienet_update_coalesce_tx() - Set TX CR 2122 * @lp: Device private data 2123 * @cr: Value to write to the TX CR 2124 * @mask: Bits to set from @cr 2125 */ 2126 static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr, 2127 u32 mask) 2128 { 2129 spin_lock_irq(&lp->tx_cr_lock); 2130 lp->tx_dma_cr &= ~mask; 2131 lp->tx_dma_cr |= cr; 2132 /* If DMA isn't started, then the settings will be applied the next 2133 * time dma_start() is called. 2134 */ 2135 if (lp->tx_dma_started) { 2136 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 2137 2138 /* Don't enable IRQs if they are disabled by NAPI */ 2139 if (reg & XAXIDMA_IRQ_ALL_MASK) 2140 cr = lp->tx_dma_cr; 2141 else 2142 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; 2143 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 2144 } 2145 spin_unlock_irq(&lp->tx_cr_lock); 2146 } 2147 2148 /** 2149 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 2150 * @ndev: Pointer to net_device structure 2151 * @ecoalesce: Pointer to ethtool_coalesce structure 2152 * @kernel_coal: ethtool CQE mode setting structure 2153 * @extack: extack for reporting error messages 2154 * 2155 * This implements ethtool command for getting the DMA interrupt coalescing 2156 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 2157 * execute this function. 2158 * 2159 * Return: 0 always 2160 */ 2161 static int 2162 axienet_ethtools_get_coalesce(struct net_device *ndev, 2163 struct ethtool_coalesce *ecoalesce, 2164 struct kernel_ethtool_coalesce *kernel_coal, 2165 struct netlink_ext_ack *extack) 2166 { 2167 struct axienet_local *lp = netdev_priv(ndev); 2168 u32 cr; 2169 2170 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled; 2171 2172 spin_lock_irq(&lp->rx_cr_lock); 2173 cr = lp->rx_dma_cr; 2174 spin_unlock_irq(&lp->rx_cr_lock); 2175 axienet_coalesce_params(lp, cr, 2176 &ecoalesce->rx_max_coalesced_frames, 2177 &ecoalesce->rx_coalesce_usecs); 2178 2179 spin_lock_irq(&lp->tx_cr_lock); 2180 cr = lp->tx_dma_cr; 2181 spin_unlock_irq(&lp->tx_cr_lock); 2182 axienet_coalesce_params(lp, cr, 2183 &ecoalesce->tx_max_coalesced_frames, 2184 &ecoalesce->tx_coalesce_usecs); 2185 return 0; 2186 } 2187 2188 /** 2189 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 2190 * @ndev: Pointer to net_device structure 2191 * @ecoalesce: Pointer to ethtool_coalesce structure 2192 * @kernel_coal: ethtool CQE mode setting structure 2193 * @extack: extack for reporting error messages 2194 * 2195 * This implements ethtool command for setting the DMA interrupt coalescing 2196 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 2197 * prompt to execute this function. 2198 * 2199 * Return: 0, on success, Non-zero error value on failure. 2200 */ 2201 static int 2202 axienet_ethtools_set_coalesce(struct net_device *ndev, 2203 struct ethtool_coalesce *ecoalesce, 2204 struct kernel_ethtool_coalesce *kernel_coal, 2205 struct netlink_ext_ack *extack) 2206 { 2207 struct axienet_local *lp = netdev_priv(ndev); 2208 bool new_dim = ecoalesce->use_adaptive_rx_coalesce; 2209 bool old_dim = lp->rx_dim_enabled; 2210 u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK; 2211 2212 if (ecoalesce->rx_max_coalesced_frames > 255 || 2213 ecoalesce->tx_max_coalesced_frames > 255) { 2214 NL_SET_ERR_MSG(extack, "frames must be less than 256"); 2215 return -EINVAL; 2216 } 2217 2218 if (!ecoalesce->rx_max_coalesced_frames || 2219 !ecoalesce->tx_max_coalesced_frames) { 2220 NL_SET_ERR_MSG(extack, "frames must be non-zero"); 2221 return -EINVAL; 2222 } 2223 2224 if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) && 2225 !ecoalesce->rx_coalesce_usecs) || 2226 (ecoalesce->tx_max_coalesced_frames > 1 && 2227 !ecoalesce->tx_coalesce_usecs)) { 2228 NL_SET_ERR_MSG(extack, 2229 "usecs must be non-zero when frames is greater than one"); 2230 return -EINVAL; 2231 } 2232 2233 if (new_dim && !old_dim) { 2234 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 2235 ecoalesce->rx_coalesce_usecs); 2236 } else if (!new_dim) { 2237 if (old_dim) { 2238 WRITE_ONCE(lp->rx_dim_enabled, false); 2239 napi_synchronize(&lp->napi_rx); 2240 flush_work(&lp->rx_dim.work); 2241 } 2242 2243 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames, 2244 ecoalesce->rx_coalesce_usecs); 2245 } else { 2246 /* Dummy value for count just to calculate timer */ 2247 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs); 2248 mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK; 2249 } 2250 2251 axienet_update_coalesce_rx(lp, cr, mask); 2252 if (new_dim && !old_dim) 2253 WRITE_ONCE(lp->rx_dim_enabled, true); 2254 2255 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames, 2256 ecoalesce->tx_coalesce_usecs); 2257 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK); 2258 return 0; 2259 } 2260 2261 static int 2262 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 2263 struct ethtool_link_ksettings *cmd) 2264 { 2265 struct axienet_local *lp = netdev_priv(ndev); 2266 2267 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 2268 } 2269 2270 static int 2271 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 2272 const struct ethtool_link_ksettings *cmd) 2273 { 2274 struct axienet_local *lp = netdev_priv(ndev); 2275 2276 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 2277 } 2278 2279 static int axienet_ethtools_nway_reset(struct net_device *dev) 2280 { 2281 struct axienet_local *lp = netdev_priv(dev); 2282 2283 return phylink_ethtool_nway_reset(lp->phylink); 2284 } 2285 2286 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev, 2287 struct ethtool_stats *stats, 2288 u64 *data) 2289 { 2290 struct axienet_local *lp = netdev_priv(dev); 2291 unsigned int start; 2292 2293 do { 2294 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2295 data[0] = axienet_stat(lp, STAT_RX_BYTES); 2296 data[1] = axienet_stat(lp, STAT_TX_BYTES); 2297 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES); 2298 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES); 2299 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES); 2300 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES); 2301 data[8] = axienet_stat(lp, STAT_USER_DEFINED0); 2302 data[9] = axienet_stat(lp, STAT_USER_DEFINED1); 2303 data[10] = axienet_stat(lp, STAT_USER_DEFINED2); 2304 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2305 } 2306 2307 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = { 2308 "Received bytes", 2309 "Transmitted bytes", 2310 "RX Good VLAN Tagged Frames", 2311 "TX Good VLAN Tagged Frames", 2312 "TX Good PFC Frames", 2313 "RX Good PFC Frames", 2314 "User Defined Counter 0", 2315 "User Defined Counter 1", 2316 "User Defined Counter 2", 2317 }; 2318 2319 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2320 { 2321 switch (stringset) { 2322 case ETH_SS_STATS: 2323 memcpy(data, axienet_ethtool_stats_strings, 2324 sizeof(axienet_ethtool_stats_strings)); 2325 break; 2326 } 2327 } 2328 2329 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset) 2330 { 2331 struct axienet_local *lp = netdev_priv(dev); 2332 2333 switch (sset) { 2334 case ETH_SS_STATS: 2335 if (lp->features & XAE_FEATURE_STATS) 2336 return ARRAY_SIZE(axienet_ethtool_stats_strings); 2337 fallthrough; 2338 default: 2339 return -EOPNOTSUPP; 2340 } 2341 } 2342 2343 static void 2344 axienet_ethtools_get_pause_stats(struct net_device *dev, 2345 struct ethtool_pause_stats *pause_stats) 2346 { 2347 struct axienet_local *lp = netdev_priv(dev); 2348 unsigned int start; 2349 2350 if (!(lp->features & XAE_FEATURE_STATS)) 2351 return; 2352 2353 do { 2354 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2355 pause_stats->tx_pause_frames = 2356 axienet_stat(lp, STAT_TX_PAUSE_FRAMES); 2357 pause_stats->rx_pause_frames = 2358 axienet_stat(lp, STAT_RX_PAUSE_FRAMES); 2359 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2360 } 2361 2362 static void 2363 axienet_ethtool_get_eth_mac_stats(struct net_device *dev, 2364 struct ethtool_eth_mac_stats *mac_stats) 2365 { 2366 struct axienet_local *lp = netdev_priv(dev); 2367 unsigned int start; 2368 2369 if (!(lp->features & XAE_FEATURE_STATS)) 2370 return; 2371 2372 do { 2373 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2374 mac_stats->FramesTransmittedOK = 2375 axienet_stat(lp, STAT_TX_GOOD_FRAMES); 2376 mac_stats->SingleCollisionFrames = 2377 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES); 2378 mac_stats->MultipleCollisionFrames = 2379 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES); 2380 mac_stats->FramesReceivedOK = 2381 axienet_stat(lp, STAT_RX_GOOD_FRAMES); 2382 mac_stats->FrameCheckSequenceErrors = 2383 axienet_stat(lp, STAT_RX_FCS_ERRORS); 2384 mac_stats->AlignmentErrors = 2385 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); 2386 mac_stats->FramesWithDeferredXmissions = 2387 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES); 2388 mac_stats->LateCollisions = 2389 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); 2390 mac_stats->FramesAbortedDueToXSColls = 2391 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); 2392 mac_stats->MulticastFramesXmittedOK = 2393 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES); 2394 mac_stats->BroadcastFramesXmittedOK = 2395 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES); 2396 mac_stats->FramesWithExcessiveDeferral = 2397 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL); 2398 mac_stats->MulticastFramesReceivedOK = 2399 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); 2400 mac_stats->BroadcastFramesReceivedOK = 2401 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES); 2402 mac_stats->InRangeLengthErrors = 2403 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); 2404 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2405 } 2406 2407 static void 2408 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev, 2409 struct ethtool_eth_ctrl_stats *ctrl_stats) 2410 { 2411 struct axienet_local *lp = netdev_priv(dev); 2412 unsigned int start; 2413 2414 if (!(lp->features & XAE_FEATURE_STATS)) 2415 return; 2416 2417 do { 2418 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2419 ctrl_stats->MACControlFramesTransmitted = 2420 axienet_stat(lp, STAT_TX_CONTROL_FRAMES); 2421 ctrl_stats->MACControlFramesReceived = 2422 axienet_stat(lp, STAT_RX_CONTROL_FRAMES); 2423 ctrl_stats->UnsupportedOpcodesReceived = 2424 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS); 2425 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2426 } 2427 2428 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = { 2429 { 64, 64 }, 2430 { 65, 127 }, 2431 { 128, 255 }, 2432 { 256, 511 }, 2433 { 512, 1023 }, 2434 { 1024, 1518 }, 2435 { 1519, 16384 }, 2436 { }, 2437 }; 2438 2439 static void 2440 axienet_ethtool_get_rmon_stats(struct net_device *dev, 2441 struct ethtool_rmon_stats *rmon_stats, 2442 const struct ethtool_rmon_hist_range **ranges) 2443 { 2444 struct axienet_local *lp = netdev_priv(dev); 2445 unsigned int start; 2446 2447 if (!(lp->features & XAE_FEATURE_STATS)) 2448 return; 2449 2450 do { 2451 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2452 rmon_stats->undersize_pkts = 2453 axienet_stat(lp, STAT_UNDERSIZE_FRAMES); 2454 rmon_stats->oversize_pkts = 2455 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES); 2456 rmon_stats->fragments = 2457 axienet_stat(lp, STAT_FRAGMENT_FRAMES); 2458 2459 rmon_stats->hist[0] = 2460 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES); 2461 rmon_stats->hist[1] = 2462 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES); 2463 rmon_stats->hist[2] = 2464 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES); 2465 rmon_stats->hist[3] = 2466 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES); 2467 rmon_stats->hist[4] = 2468 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES); 2469 rmon_stats->hist[5] = 2470 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES); 2471 rmon_stats->hist[6] = 2472 rmon_stats->oversize_pkts; 2473 2474 rmon_stats->hist_tx[0] = 2475 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES); 2476 rmon_stats->hist_tx[1] = 2477 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES); 2478 rmon_stats->hist_tx[2] = 2479 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES); 2480 rmon_stats->hist_tx[3] = 2481 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES); 2482 rmon_stats->hist_tx[4] = 2483 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES); 2484 rmon_stats->hist_tx[5] = 2485 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES); 2486 rmon_stats->hist_tx[6] = 2487 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES); 2488 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2489 2490 *ranges = axienet_rmon_ranges; 2491 } 2492 2493 static const struct ethtool_ops axienet_ethtool_ops = { 2494 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 2495 ETHTOOL_COALESCE_USECS | 2496 ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 2497 .get_drvinfo = axienet_ethtools_get_drvinfo, 2498 .get_regs_len = axienet_ethtools_get_regs_len, 2499 .get_regs = axienet_ethtools_get_regs, 2500 .get_link = ethtool_op_get_link, 2501 .get_ringparam = axienet_ethtools_get_ringparam, 2502 .set_ringparam = axienet_ethtools_set_ringparam, 2503 .get_pauseparam = axienet_ethtools_get_pauseparam, 2504 .set_pauseparam = axienet_ethtools_set_pauseparam, 2505 .get_coalesce = axienet_ethtools_get_coalesce, 2506 .set_coalesce = axienet_ethtools_set_coalesce, 2507 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 2508 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 2509 .nway_reset = axienet_ethtools_nway_reset, 2510 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats, 2511 .get_strings = axienet_ethtools_get_strings, 2512 .get_sset_count = axienet_ethtools_get_sset_count, 2513 .get_pause_stats = axienet_ethtools_get_pause_stats, 2514 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats, 2515 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats, 2516 .get_rmon_stats = axienet_ethtool_get_rmon_stats, 2517 }; 2518 2519 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 2520 { 2521 return container_of(pcs, struct axienet_local, pcs); 2522 } 2523 2524 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 2525 unsigned int neg_mode, 2526 struct phylink_link_state *state) 2527 { 2528 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2529 2530 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state); 2531 } 2532 2533 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 2534 { 2535 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2536 2537 phylink_mii_c22_pcs_an_restart(pcs_phy); 2538 } 2539 2540 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2541 phy_interface_t interface, 2542 const unsigned long *advertising, 2543 bool permit_pause_to_mac) 2544 { 2545 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2546 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 2547 struct axienet_local *lp = netdev_priv(ndev); 2548 int ret; 2549 2550 if (lp->switch_x_sgmii) { 2551 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 2552 interface == PHY_INTERFACE_MODE_SGMII ? 2553 XLNX_MII_STD_SELECT_SGMII : 0); 2554 if (ret < 0) { 2555 netdev_warn(ndev, 2556 "Failed to switch PHY interface: %d\n", 2557 ret); 2558 return ret; 2559 } 2560 } 2561 2562 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 2563 neg_mode); 2564 if (ret < 0) 2565 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 2566 2567 return ret; 2568 } 2569 2570 static const struct phylink_pcs_ops axienet_pcs_ops = { 2571 .pcs_get_state = axienet_pcs_get_state, 2572 .pcs_config = axienet_pcs_config, 2573 .pcs_an_restart = axienet_pcs_an_restart, 2574 }; 2575 2576 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 2577 phy_interface_t interface) 2578 { 2579 struct net_device *ndev = to_net_dev(config->dev); 2580 struct axienet_local *lp = netdev_priv(ndev); 2581 2582 if (interface == PHY_INTERFACE_MODE_1000BASEX || 2583 interface == PHY_INTERFACE_MODE_SGMII) 2584 return &lp->pcs; 2585 2586 return NULL; 2587 } 2588 2589 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 2590 const struct phylink_link_state *state) 2591 { 2592 /* nothing meaningful to do */ 2593 } 2594 2595 static void axienet_mac_link_down(struct phylink_config *config, 2596 unsigned int mode, 2597 phy_interface_t interface) 2598 { 2599 /* nothing meaningful to do */ 2600 } 2601 2602 static void axienet_mac_link_up(struct phylink_config *config, 2603 struct phy_device *phy, 2604 unsigned int mode, phy_interface_t interface, 2605 int speed, int duplex, 2606 bool tx_pause, bool rx_pause) 2607 { 2608 struct net_device *ndev = to_net_dev(config->dev); 2609 struct axienet_local *lp = netdev_priv(ndev); 2610 u32 emmc_reg, fcc_reg; 2611 2612 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 2613 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 2614 2615 switch (speed) { 2616 case SPEED_1000: 2617 emmc_reg |= XAE_EMMC_LINKSPD_1000; 2618 break; 2619 case SPEED_100: 2620 emmc_reg |= XAE_EMMC_LINKSPD_100; 2621 break; 2622 case SPEED_10: 2623 emmc_reg |= XAE_EMMC_LINKSPD_10; 2624 break; 2625 default: 2626 dev_err(&ndev->dev, 2627 "Speed other than 10, 100 or 1Gbps is not supported\n"); 2628 break; 2629 } 2630 2631 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 2632 2633 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 2634 if (tx_pause) 2635 fcc_reg |= XAE_FCC_FCTX_MASK; 2636 else 2637 fcc_reg &= ~XAE_FCC_FCTX_MASK; 2638 if (rx_pause) 2639 fcc_reg |= XAE_FCC_FCRX_MASK; 2640 else 2641 fcc_reg &= ~XAE_FCC_FCRX_MASK; 2642 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 2643 } 2644 2645 static const struct phylink_mac_ops axienet_phylink_ops = { 2646 .mac_select_pcs = axienet_mac_select_pcs, 2647 .mac_config = axienet_mac_config, 2648 .mac_link_down = axienet_mac_link_down, 2649 .mac_link_up = axienet_mac_link_up, 2650 }; 2651 2652 /** 2653 * axienet_dma_err_handler - Work queue task for Axi DMA Error 2654 * @work: pointer to work_struct 2655 * 2656 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 2657 * Tx/Rx BDs. 2658 */ 2659 static void axienet_dma_err_handler(struct work_struct *work) 2660 { 2661 u32 i; 2662 u32 axienet_status; 2663 struct axidma_bd *cur_p; 2664 struct axienet_local *lp = container_of(work, struct axienet_local, 2665 dma_err_task); 2666 struct net_device *ndev = lp->ndev; 2667 2668 /* Don't bother if we are going to stop anyway */ 2669 if (READ_ONCE(lp->stopping)) 2670 return; 2671 2672 napi_disable(&lp->napi_tx); 2673 napi_disable(&lp->napi_rx); 2674 2675 axienet_setoptions(ndev, lp->options & 2676 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2677 2678 axienet_dma_stop(lp); 2679 2680 for (i = 0; i < lp->tx_bd_num; i++) { 2681 cur_p = &lp->tx_bd_v[i]; 2682 if (cur_p->cntrl) { 2683 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 2684 2685 dma_unmap_single(lp->dev, addr, 2686 (cur_p->cntrl & 2687 XAXIDMA_BD_CTRL_LENGTH_MASK), 2688 DMA_TO_DEVICE); 2689 } 2690 if (cur_p->skb) 2691 dev_kfree_skb_irq(cur_p->skb); 2692 cur_p->phys = 0; 2693 cur_p->phys_msb = 0; 2694 cur_p->cntrl = 0; 2695 cur_p->status = 0; 2696 cur_p->app0 = 0; 2697 cur_p->app1 = 0; 2698 cur_p->app2 = 0; 2699 cur_p->app3 = 0; 2700 cur_p->app4 = 0; 2701 cur_p->skb = NULL; 2702 } 2703 2704 for (i = 0; i < lp->rx_bd_num; i++) { 2705 cur_p = &lp->rx_bd_v[i]; 2706 cur_p->status = 0; 2707 cur_p->app0 = 0; 2708 cur_p->app1 = 0; 2709 cur_p->app2 = 0; 2710 cur_p->app3 = 0; 2711 cur_p->app4 = 0; 2712 } 2713 2714 lp->tx_bd_ci = 0; 2715 lp->tx_bd_tail = 0; 2716 lp->rx_bd_ci = 0; 2717 2718 axienet_dma_start(lp); 2719 2720 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 2721 axienet_status &= ~XAE_RCW1_RX_MASK; 2722 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 2723 2724 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 2725 if (axienet_status & XAE_INT_RXRJECT_MASK) 2726 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 2727 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 2728 XAE_INT_RECV_ERROR_MASK : 0); 2729 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 2730 2731 /* Sync default options with HW but leave receiver and 2732 * transmitter disabled. 2733 */ 2734 axienet_setoptions(ndev, lp->options & 2735 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2736 axienet_set_mac_address(ndev, NULL); 2737 axienet_set_multicast_list(ndev); 2738 napi_enable(&lp->napi_rx); 2739 napi_enable(&lp->napi_tx); 2740 axienet_setoptions(ndev, lp->options); 2741 } 2742 2743 /** 2744 * axienet_probe - Axi Ethernet probe function. 2745 * @pdev: Pointer to platform device structure. 2746 * 2747 * Return: 0, on success 2748 * Non-zero error value on failure. 2749 * 2750 * This is the probe routine for Axi Ethernet driver. This is called before 2751 * any other driver routines are invoked. It allocates and sets up the Ethernet 2752 * device. Parses through device tree and populates fields of 2753 * axienet_local. It registers the Ethernet device. 2754 */ 2755 static int axienet_probe(struct platform_device *pdev) 2756 { 2757 int ret; 2758 struct device_node *np; 2759 struct axienet_local *lp; 2760 struct net_device *ndev; 2761 struct resource *ethres; 2762 u8 mac_addr[ETH_ALEN]; 2763 int addr_width = 32; 2764 u32 value; 2765 2766 ndev = alloc_etherdev(sizeof(*lp)); 2767 if (!ndev) 2768 return -ENOMEM; 2769 2770 platform_set_drvdata(pdev, ndev); 2771 2772 SET_NETDEV_DEV(ndev, &pdev->dev); 2773 ndev->features = NETIF_F_SG; 2774 ndev->ethtool_ops = &axienet_ethtool_ops; 2775 2776 /* MTU range: 64 - 9000 */ 2777 ndev->min_mtu = 64; 2778 ndev->max_mtu = XAE_JUMBO_MTU; 2779 2780 lp = netdev_priv(ndev); 2781 lp->ndev = ndev; 2782 lp->dev = &pdev->dev; 2783 lp->options = XAE_OPTION_DEFAULTS; 2784 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 2785 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 2786 2787 u64_stats_init(&lp->rx_stat_sync); 2788 u64_stats_init(&lp->tx_stat_sync); 2789 2790 mutex_init(&lp->stats_lock); 2791 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); 2792 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); 2793 2794 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2795 if (!lp->axi_clk) { 2796 /* For backward compatibility, if named AXI clock is not present, 2797 * treat the first clock specified as the AXI clock. 2798 */ 2799 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2800 } 2801 if (IS_ERR(lp->axi_clk)) { 2802 ret = PTR_ERR(lp->axi_clk); 2803 goto free_netdev; 2804 } 2805 ret = clk_prepare_enable(lp->axi_clk); 2806 if (ret) { 2807 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2808 goto free_netdev; 2809 } 2810 2811 lp->misc_clks[0].id = "axis_clk"; 2812 lp->misc_clks[1].id = "ref_clk"; 2813 lp->misc_clks[2].id = "mgt_clk"; 2814 2815 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2816 if (ret) 2817 goto cleanup_clk; 2818 2819 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2820 if (ret) 2821 goto cleanup_clk; 2822 2823 /* Map device registers */ 2824 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 2825 if (IS_ERR(lp->regs)) { 2826 ret = PTR_ERR(lp->regs); 2827 goto cleanup_clk; 2828 } 2829 lp->regs_start = ethres->start; 2830 2831 /* Setup checksum offload, but default to off if not specified */ 2832 lp->features = 0; 2833 2834 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS) 2835 lp->features |= XAE_FEATURE_STATS; 2836 2837 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 2838 if (!ret) { 2839 switch (value) { 2840 case 1: 2841 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 2842 /* Can checksum any contiguous range */ 2843 ndev->features |= NETIF_F_HW_CSUM; 2844 break; 2845 case 2: 2846 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 2847 /* Can checksum TCP/UDP over IPv4. */ 2848 ndev->features |= NETIF_F_IP_CSUM; 2849 break; 2850 } 2851 } 2852 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 2853 if (!ret) { 2854 switch (value) { 2855 case 1: 2856 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 2857 ndev->features |= NETIF_F_RXCSUM; 2858 break; 2859 case 2: 2860 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 2861 ndev->features |= NETIF_F_RXCSUM; 2862 break; 2863 } 2864 } 2865 /* For supporting jumbo frames, the Axi Ethernet hardware must have 2866 * a larger Rx/Tx Memory. Typically, the size must be large so that 2867 * we can enable jumbo option and start supporting jumbo frames. 2868 * Here we check for memory allocated for Rx/Tx in the hardware from 2869 * the device-tree and accordingly set flags. 2870 */ 2871 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 2872 2873 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 2874 "xlnx,switch-x-sgmii"); 2875 2876 /* Start with the proprietary, and broken phy_type */ 2877 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 2878 if (!ret) { 2879 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 2880 switch (value) { 2881 case XAE_PHY_TYPE_MII: 2882 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2883 break; 2884 case XAE_PHY_TYPE_GMII: 2885 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2886 break; 2887 case XAE_PHY_TYPE_RGMII_2_0: 2888 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2889 break; 2890 case XAE_PHY_TYPE_SGMII: 2891 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2892 break; 2893 case XAE_PHY_TYPE_1000BASE_X: 2894 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2895 break; 2896 default: 2897 ret = -EINVAL; 2898 goto cleanup_clk; 2899 } 2900 } else { 2901 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2902 if (ret) 2903 goto cleanup_clk; 2904 } 2905 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2906 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2907 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2908 ret = -EINVAL; 2909 goto cleanup_clk; 2910 } 2911 2912 if (!of_property_present(pdev->dev.of_node, "dmas")) { 2913 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2914 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2915 2916 if (np) { 2917 struct resource dmares; 2918 2919 ret = of_address_to_resource(np, 0, &dmares); 2920 if (ret) { 2921 dev_err(&pdev->dev, 2922 "unable to get DMA resource\n"); 2923 of_node_put(np); 2924 goto cleanup_clk; 2925 } 2926 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2927 &dmares); 2928 lp->rx_irq = irq_of_parse_and_map(np, 1); 2929 lp->tx_irq = irq_of_parse_and_map(np, 0); 2930 of_node_put(np); 2931 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2932 } else { 2933 /* Check for these resources directly on the Ethernet node. */ 2934 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2935 lp->rx_irq = platform_get_irq(pdev, 1); 2936 lp->tx_irq = platform_get_irq(pdev, 0); 2937 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2938 } 2939 if (IS_ERR(lp->dma_regs)) { 2940 dev_err(&pdev->dev, "could not map DMA regs\n"); 2941 ret = PTR_ERR(lp->dma_regs); 2942 goto cleanup_clk; 2943 } 2944 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2945 dev_err(&pdev->dev, "could not determine irqs\n"); 2946 ret = -ENOMEM; 2947 goto cleanup_clk; 2948 } 2949 2950 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2951 ret = __axienet_device_reset(lp); 2952 if (ret) 2953 goto cleanup_clk; 2954 2955 /* Autodetect the need for 64-bit DMA pointers. 2956 * When the IP is configured for a bus width bigger than 32 bits, 2957 * writing the MSB registers is mandatory, even if they are all 0. 2958 * We can detect this case by writing all 1's to one such register 2959 * and see if that sticks: when the IP is configured for 32 bits 2960 * only, those registers are RES0. 2961 * Those MSB registers were introduced in IP v7.1, which we check first. 2962 */ 2963 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2964 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2965 2966 iowrite32(0x0, desc); 2967 if (ioread32(desc) == 0) { /* sanity check */ 2968 iowrite32(0xffffffff, desc); 2969 if (ioread32(desc) > 0) { 2970 lp->features |= XAE_FEATURE_DMA_64BIT; 2971 addr_width = 64; 2972 dev_info(&pdev->dev, 2973 "autodetected 64-bit DMA range\n"); 2974 } 2975 iowrite32(0x0, desc); 2976 } 2977 } 2978 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2979 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2980 ret = -EINVAL; 2981 goto cleanup_clk; 2982 } 2983 2984 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2985 if (ret) { 2986 dev_err(&pdev->dev, "No suitable DMA available\n"); 2987 goto cleanup_clk; 2988 } 2989 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 2990 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 2991 } else { 2992 struct xilinx_vdma_config cfg; 2993 struct dma_chan *tx_chan; 2994 2995 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2996 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 2997 ret = lp->eth_irq; 2998 goto cleanup_clk; 2999 } 3000 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 3001 if (IS_ERR(tx_chan)) { 3002 ret = PTR_ERR(tx_chan); 3003 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 3004 goto cleanup_clk; 3005 } 3006 3007 cfg.reset = 1; 3008 /* As name says VDMA but it has support for DMA channel reset */ 3009 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); 3010 if (ret < 0) { 3011 dev_err(&pdev->dev, "Reset channel failed\n"); 3012 dma_release_channel(tx_chan); 3013 goto cleanup_clk; 3014 } 3015 3016 dma_release_channel(tx_chan); 3017 lp->use_dmaengine = 1; 3018 } 3019 3020 if (lp->use_dmaengine) 3021 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; 3022 else 3023 ndev->netdev_ops = &axienet_netdev_ops; 3024 /* Check for Ethernet core IRQ (optional) */ 3025 if (lp->eth_irq <= 0) 3026 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 3027 3028 /* Retrieve the MAC address */ 3029 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 3030 if (!ret) { 3031 axienet_set_mac_address(ndev, mac_addr); 3032 } else { 3033 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 3034 ret); 3035 axienet_set_mac_address(ndev, NULL); 3036 } 3037 3038 spin_lock_init(&lp->rx_cr_lock); 3039 spin_lock_init(&lp->tx_cr_lock); 3040 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work); 3041 lp->rx_dim_enabled = true; 3042 lp->rx_dim.profile_ix = 1; 3043 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 3044 XAXIDMA_DFT_RX_USEC); 3045 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD, 3046 XAXIDMA_DFT_TX_USEC); 3047 3048 ret = axienet_mdio_setup(lp); 3049 if (ret) 3050 dev_warn(&pdev->dev, 3051 "error registering MDIO bus: %d\n", ret); 3052 3053 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 3054 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 3055 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 3056 if (!np) { 3057 /* Deprecated: Always use "pcs-handle" for pcs_phy. 3058 * Falling back to "phy-handle" here is only for 3059 * backward compatibility with old device trees. 3060 */ 3061 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 3062 } 3063 if (!np) { 3064 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 3065 ret = -EINVAL; 3066 goto cleanup_mdio; 3067 } 3068 lp->pcs_phy = of_mdio_find_device(np); 3069 if (!lp->pcs_phy) { 3070 ret = -EPROBE_DEFER; 3071 of_node_put(np); 3072 goto cleanup_mdio; 3073 } 3074 of_node_put(np); 3075 lp->pcs.ops = &axienet_pcs_ops; 3076 lp->pcs.neg_mode = true; 3077 lp->pcs.poll = true; 3078 } 3079 3080 lp->phylink_config.dev = &ndev->dev; 3081 lp->phylink_config.type = PHYLINK_NETDEV; 3082 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 3083 MAC_10FD | MAC_100FD | MAC_1000FD; 3084 3085 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 3086 if (lp->switch_x_sgmii) { 3087 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 3088 lp->phylink_config.supported_interfaces); 3089 __set_bit(PHY_INTERFACE_MODE_SGMII, 3090 lp->phylink_config.supported_interfaces); 3091 } 3092 3093 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 3094 lp->phy_mode, 3095 &axienet_phylink_ops); 3096 if (IS_ERR(lp->phylink)) { 3097 ret = PTR_ERR(lp->phylink); 3098 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 3099 goto cleanup_mdio; 3100 } 3101 3102 ret = register_netdev(lp->ndev); 3103 if (ret) { 3104 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 3105 goto cleanup_phylink; 3106 } 3107 3108 return 0; 3109 3110 cleanup_phylink: 3111 phylink_destroy(lp->phylink); 3112 3113 cleanup_mdio: 3114 if (lp->pcs_phy) 3115 put_device(&lp->pcs_phy->dev); 3116 if (lp->mii_bus) 3117 axienet_mdio_teardown(lp); 3118 cleanup_clk: 3119 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 3120 clk_disable_unprepare(lp->axi_clk); 3121 3122 free_netdev: 3123 free_netdev(ndev); 3124 3125 return ret; 3126 } 3127 3128 static void axienet_remove(struct platform_device *pdev) 3129 { 3130 struct net_device *ndev = platform_get_drvdata(pdev); 3131 struct axienet_local *lp = netdev_priv(ndev); 3132 3133 unregister_netdev(ndev); 3134 3135 if (lp->phylink) 3136 phylink_destroy(lp->phylink); 3137 3138 if (lp->pcs_phy) 3139 put_device(&lp->pcs_phy->dev); 3140 3141 axienet_mdio_teardown(lp); 3142 3143 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 3144 clk_disable_unprepare(lp->axi_clk); 3145 3146 free_netdev(ndev); 3147 } 3148 3149 static void axienet_shutdown(struct platform_device *pdev) 3150 { 3151 struct net_device *ndev = platform_get_drvdata(pdev); 3152 3153 rtnl_lock(); 3154 netif_device_detach(ndev); 3155 3156 if (netif_running(ndev)) 3157 dev_close(ndev); 3158 3159 rtnl_unlock(); 3160 } 3161 3162 static int axienet_suspend(struct device *dev) 3163 { 3164 struct net_device *ndev = dev_get_drvdata(dev); 3165 3166 if (!netif_running(ndev)) 3167 return 0; 3168 3169 netif_device_detach(ndev); 3170 3171 rtnl_lock(); 3172 axienet_stop(ndev); 3173 rtnl_unlock(); 3174 3175 return 0; 3176 } 3177 3178 static int axienet_resume(struct device *dev) 3179 { 3180 struct net_device *ndev = dev_get_drvdata(dev); 3181 3182 if (!netif_running(ndev)) 3183 return 0; 3184 3185 rtnl_lock(); 3186 axienet_open(ndev); 3187 rtnl_unlock(); 3188 3189 netif_device_attach(ndev); 3190 3191 return 0; 3192 } 3193 3194 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 3195 axienet_suspend, axienet_resume); 3196 3197 static struct platform_driver axienet_driver = { 3198 .probe = axienet_probe, 3199 .remove = axienet_remove, 3200 .shutdown = axienet_shutdown, 3201 .driver = { 3202 .name = "xilinx_axienet", 3203 .pm = &axienet_pm_ops, 3204 .of_match_table = axienet_of_match, 3205 }, 3206 }; 3207 3208 module_platform_driver(axienet_driver); 3209 3210 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 3211 MODULE_AUTHOR("Xilinx"); 3212 MODULE_LICENSE("GPL"); 3213