1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Xilinx Axi Ethernet device driver 4 * 5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 9 * Copyright (c) 2010 - 2011 PetaLogix 10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies 11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 12 * 13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 14 * and Spartan6. 15 * 16 * TODO: 17 * - Add Axi Fifo support. 18 * - Factor out Axi DMA code into separate driver. 19 * - Test and fix basic multicast filtering. 20 * - Add support for extended multicast filtering. 21 * - Test basic VLAN support. 22 * - Add support for extended VLAN support. 23 */ 24 25 #include <linux/clk.h> 26 #include <linux/delay.h> 27 #include <linux/etherdevice.h> 28 #include <linux/module.h> 29 #include <linux/netdevice.h> 30 #include <linux/of.h> 31 #include <linux/of_mdio.h> 32 #include <linux/of_net.h> 33 #include <linux/of_irq.h> 34 #include <linux/of_address.h> 35 #include <linux/platform_device.h> 36 #include <linux/skbuff.h> 37 #include <linux/math64.h> 38 #include <linux/phy.h> 39 #include <linux/mii.h> 40 #include <linux/ethtool.h> 41 #include <linux/dmaengine.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dma/xilinx_dma.h> 44 #include <linux/circ_buf.h> 45 #include <net/netdev_queues.h> 46 47 #include "xilinx_axienet.h" 48 49 /* Descriptors defines for Tx and Rx DMA */ 50 #define TX_BD_NUM_DEFAULT 128 51 #define RX_BD_NUM_DEFAULT 1024 52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) 53 #define TX_BD_NUM_MAX 4096 54 #define RX_BD_NUM_MAX 4096 55 #define DMA_NUM_APP_WORDS 5 56 #define LEN_APP 4 57 #define RX_BUF_NUM_DEFAULT 128 58 59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 60 #define DRIVER_NAME "xaxienet" 61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 62 #define DRIVER_VERSION "1.00a" 63 64 #define AXIENET_REGS_N 40 65 66 static void axienet_rx_submit_desc(struct net_device *ndev); 67 68 /* Match table for of_platform binding */ 69 static const struct of_device_id axienet_of_match[] = { 70 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 71 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 72 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 73 {}, 74 }; 75 76 MODULE_DEVICE_TABLE(of, axienet_of_match); 77 78 /* Option table for setting up Axi Ethernet hardware options */ 79 static struct axienet_option axienet_options[] = { 80 /* Turn on jumbo packet support for both Rx and Tx */ 81 { 82 .opt = XAE_OPTION_JUMBO, 83 .reg = XAE_TC_OFFSET, 84 .m_or = XAE_TC_JUM_MASK, 85 }, { 86 .opt = XAE_OPTION_JUMBO, 87 .reg = XAE_RCW1_OFFSET, 88 .m_or = XAE_RCW1_JUM_MASK, 89 }, { /* Turn on VLAN packet support for both Rx and Tx */ 90 .opt = XAE_OPTION_VLAN, 91 .reg = XAE_TC_OFFSET, 92 .m_or = XAE_TC_VLAN_MASK, 93 }, { 94 .opt = XAE_OPTION_VLAN, 95 .reg = XAE_RCW1_OFFSET, 96 .m_or = XAE_RCW1_VLAN_MASK, 97 }, { /* Turn on FCS stripping on receive packets */ 98 .opt = XAE_OPTION_FCS_STRIP, 99 .reg = XAE_RCW1_OFFSET, 100 .m_or = XAE_RCW1_FCS_MASK, 101 }, { /* Turn on FCS insertion on transmit packets */ 102 .opt = XAE_OPTION_FCS_INSERT, 103 .reg = XAE_TC_OFFSET, 104 .m_or = XAE_TC_FCS_MASK, 105 }, { /* Turn off length/type field checking on receive packets */ 106 .opt = XAE_OPTION_LENTYPE_ERR, 107 .reg = XAE_RCW1_OFFSET, 108 .m_or = XAE_RCW1_LT_DIS_MASK, 109 }, { /* Turn on Rx flow control */ 110 .opt = XAE_OPTION_FLOW_CONTROL, 111 .reg = XAE_FCC_OFFSET, 112 .m_or = XAE_FCC_FCRX_MASK, 113 }, { /* Turn on Tx flow control */ 114 .opt = XAE_OPTION_FLOW_CONTROL, 115 .reg = XAE_FCC_OFFSET, 116 .m_or = XAE_FCC_FCTX_MASK, 117 }, { /* Turn on promiscuous frame filtering */ 118 .opt = XAE_OPTION_PROMISC, 119 .reg = XAE_FMI_OFFSET, 120 .m_or = XAE_FMI_PM_MASK, 121 }, { /* Enable transmitter */ 122 .opt = XAE_OPTION_TXEN, 123 .reg = XAE_TC_OFFSET, 124 .m_or = XAE_TC_TX_MASK, 125 }, { /* Enable receiver */ 126 .opt = XAE_OPTION_RXEN, 127 .reg = XAE_RCW1_OFFSET, 128 .m_or = XAE_RCW1_RX_MASK, 129 }, 130 {} 131 }; 132 133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i) 134 { 135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; 136 } 137 138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i) 139 { 140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; 141 } 142 143 /** 144 * axienet_dma_in32 - Memory mapped Axi DMA register read 145 * @lp: Pointer to axienet local structure 146 * @reg: Address offset from the base address of the Axi DMA core 147 * 148 * Return: The contents of the Axi DMA register 149 * 150 * This function returns the contents of the corresponding Axi DMA register. 151 */ 152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 153 { 154 return ioread32(lp->dma_regs + reg); 155 } 156 157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, 158 struct axidma_bd *desc) 159 { 160 desc->phys = lower_32_bits(addr); 161 if (lp->features & XAE_FEATURE_DMA_64BIT) 162 desc->phys_msb = upper_32_bits(addr); 163 } 164 165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp, 166 struct axidma_bd *desc) 167 { 168 dma_addr_t ret = desc->phys; 169 170 if (lp->features & XAE_FEATURE_DMA_64BIT) 171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; 172 173 return ret; 174 } 175 176 /** 177 * axienet_dma_bd_release - Release buffer descriptor rings 178 * @ndev: Pointer to the net_device structure 179 * 180 * This function is used to release the descriptors allocated in 181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 182 * driver stop api is called. 183 */ 184 static void axienet_dma_bd_release(struct net_device *ndev) 185 { 186 int i; 187 struct axienet_local *lp = netdev_priv(ndev); 188 189 /* If we end up here, tx_bd_v must have been DMA allocated. */ 190 dma_free_coherent(lp->dev, 191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 192 lp->tx_bd_v, 193 lp->tx_bd_p); 194 195 if (!lp->rx_bd_v) 196 return; 197 198 for (i = 0; i < lp->rx_bd_num; i++) { 199 dma_addr_t phys; 200 201 /* A NULL skb means this descriptor has not been initialised 202 * at all. 203 */ 204 if (!lp->rx_bd_v[i].skb) 205 break; 206 207 dev_kfree_skb(lp->rx_bd_v[i].skb); 208 209 /* For each descriptor, we programmed cntrl with the (non-zero) 210 * descriptor size, after it had been successfully allocated. 211 * So a non-zero value in there means we need to unmap it. 212 */ 213 if (lp->rx_bd_v[i].cntrl) { 214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); 215 dma_unmap_single(lp->dev, phys, 216 lp->max_frm_size, DMA_FROM_DEVICE); 217 } 218 } 219 220 dma_free_coherent(lp->dev, 221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 222 lp->rx_bd_v, 223 lp->rx_bd_p); 224 } 225 226 /** 227 * axienet_usec_to_timer - Calculate IRQ delay timer value 228 * @lp: Pointer to the axienet_local structure 229 * @coalesce_usec: Microseconds to convert into timer value 230 */ 231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) 232 { 233 u32 result; 234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ 235 236 if (lp->axi_clk) 237 clk_rate = clk_get_rate(lp->axi_clk); 238 239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ 240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, 241 (u64)125000000); 242 if (result > 255) 243 result = 255; 244 245 return result; 246 } 247 248 /** 249 * axienet_dma_start - Set up DMA registers and start DMA operation 250 * @lp: Pointer to the axienet_local structure 251 */ 252 static void axienet_dma_start(struct axienet_local *lp) 253 { 254 /* Start updating the Rx channel control register */ 255 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | 256 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 257 /* Only set interrupt delay timer if not generating an interrupt on 258 * the first RX packet. Otherwise leave at 0 to disable delay interrupt. 259 */ 260 if (lp->coalesce_count_rx > 1) 261 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) 262 << XAXIDMA_DELAY_SHIFT) | 263 XAXIDMA_IRQ_DELAY_MASK; 264 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 265 266 /* Start updating the Tx channel control register */ 267 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | 268 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; 269 /* Only set interrupt delay timer if not generating an interrupt on 270 * the first TX packet. Otherwise leave at 0 to disable delay interrupt. 271 */ 272 if (lp->coalesce_count_tx > 1) 273 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) 274 << XAXIDMA_DELAY_SHIFT) | 275 XAXIDMA_IRQ_DELAY_MASK; 276 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 277 278 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 279 * halted state. This will make the Rx side ready for reception. 280 */ 281 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 282 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 283 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 284 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 285 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); 286 287 /* Write to the RS (Run-stop) bit in the Tx channel control register. 288 * Tx channel is now ready to run. But only after we write to the 289 * tail pointer register that the Tx channel will start transmitting. 290 */ 291 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 292 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; 293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 294 } 295 296 /** 297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 298 * @ndev: Pointer to the net_device structure 299 * 300 * Return: 0, on success -ENOMEM, on failure 301 * 302 * This function is called to initialize the Rx and Tx DMA descriptor 303 * rings. This initializes the descriptors with required default values 304 * and is called when Axi Ethernet driver reset is called. 305 */ 306 static int axienet_dma_bd_init(struct net_device *ndev) 307 { 308 int i; 309 struct sk_buff *skb; 310 struct axienet_local *lp = netdev_priv(ndev); 311 312 /* Reset the indexes which are used for accessing the BDs */ 313 lp->tx_bd_ci = 0; 314 lp->tx_bd_tail = 0; 315 lp->rx_bd_ci = 0; 316 317 /* Allocate the Tx and Rx buffer descriptors. */ 318 lp->tx_bd_v = dma_alloc_coherent(lp->dev, 319 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, 320 &lp->tx_bd_p, GFP_KERNEL); 321 if (!lp->tx_bd_v) 322 return -ENOMEM; 323 324 lp->rx_bd_v = dma_alloc_coherent(lp->dev, 325 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, 326 &lp->rx_bd_p, GFP_KERNEL); 327 if (!lp->rx_bd_v) 328 goto out; 329 330 for (i = 0; i < lp->tx_bd_num; i++) { 331 dma_addr_t addr = lp->tx_bd_p + 332 sizeof(*lp->tx_bd_v) * 333 ((i + 1) % lp->tx_bd_num); 334 335 lp->tx_bd_v[i].next = lower_32_bits(addr); 336 if (lp->features & XAE_FEATURE_DMA_64BIT) 337 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); 338 } 339 340 for (i = 0; i < lp->rx_bd_num; i++) { 341 dma_addr_t addr; 342 343 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * 344 ((i + 1) % lp->rx_bd_num); 345 lp->rx_bd_v[i].next = lower_32_bits(addr); 346 if (lp->features & XAE_FEATURE_DMA_64BIT) 347 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); 348 349 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 350 if (!skb) 351 goto out; 352 353 lp->rx_bd_v[i].skb = skb; 354 addr = dma_map_single(lp->dev, skb->data, 355 lp->max_frm_size, DMA_FROM_DEVICE); 356 if (dma_mapping_error(lp->dev, addr)) { 357 netdev_err(ndev, "DMA mapping error\n"); 358 goto out; 359 } 360 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); 361 362 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 363 } 364 365 axienet_dma_start(lp); 366 367 return 0; 368 out: 369 axienet_dma_bd_release(ndev); 370 return -ENOMEM; 371 } 372 373 /** 374 * axienet_set_mac_address - Write the MAC address 375 * @ndev: Pointer to the net_device structure 376 * @address: 6 byte Address to be written as MAC address 377 * 378 * This function is called to initialize the MAC address of the Axi Ethernet 379 * core. It writes to the UAW0 and UAW1 registers of the core. 380 */ 381 static void axienet_set_mac_address(struct net_device *ndev, 382 const void *address) 383 { 384 struct axienet_local *lp = netdev_priv(ndev); 385 386 if (address) 387 eth_hw_addr_set(ndev, address); 388 if (!is_valid_ether_addr(ndev->dev_addr)) 389 eth_hw_addr_random(ndev); 390 391 /* Set up unicast MAC address filter set its mac address */ 392 axienet_iow(lp, XAE_UAW0_OFFSET, 393 (ndev->dev_addr[0]) | 394 (ndev->dev_addr[1] << 8) | 395 (ndev->dev_addr[2] << 16) | 396 (ndev->dev_addr[3] << 24)); 397 axienet_iow(lp, XAE_UAW1_OFFSET, 398 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 399 ~XAE_UAW1_UNICASTADDR_MASK) | 400 (ndev->dev_addr[4] | 401 (ndev->dev_addr[5] << 8)))); 402 } 403 404 /** 405 * netdev_set_mac_address - Write the MAC address (from outside the driver) 406 * @ndev: Pointer to the net_device structure 407 * @p: 6 byte Address to be written as MAC address 408 * 409 * Return: 0 for all conditions. Presently, there is no failure case. 410 * 411 * This function is called to initialize the MAC address of the Axi Ethernet 412 * core. It calls the core specific axienet_set_mac_address. This is the 413 * function that goes into net_device_ops structure entry ndo_set_mac_address. 414 */ 415 static int netdev_set_mac_address(struct net_device *ndev, void *p) 416 { 417 struct sockaddr *addr = p; 418 419 axienet_set_mac_address(ndev, addr->sa_data); 420 return 0; 421 } 422 423 /** 424 * axienet_set_multicast_list - Prepare the multicast table 425 * @ndev: Pointer to the net_device structure 426 * 427 * This function is called to initialize the multicast table during 428 * initialization. The Axi Ethernet basic multicast support has a four-entry 429 * multicast table which is initialized here. Additionally this function 430 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 431 * means whenever the multicast table entries need to be updated this 432 * function gets called. 433 */ 434 static void axienet_set_multicast_list(struct net_device *ndev) 435 { 436 int i = 0; 437 u32 reg, af0reg, af1reg; 438 struct axienet_local *lp = netdev_priv(ndev); 439 440 reg = axienet_ior(lp, XAE_FMI_OFFSET); 441 reg &= ~XAE_FMI_PM_MASK; 442 if (ndev->flags & IFF_PROMISC) 443 reg |= XAE_FMI_PM_MASK; 444 else 445 reg &= ~XAE_FMI_PM_MASK; 446 axienet_iow(lp, XAE_FMI_OFFSET, reg); 447 448 if (ndev->flags & IFF_ALLMULTI || 449 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 450 reg &= 0xFFFFFF00; 451 axienet_iow(lp, XAE_FMI_OFFSET, reg); 452 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */ 453 axienet_iow(lp, XAE_AF1_OFFSET, 0); 454 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */ 455 axienet_iow(lp, XAE_AM1_OFFSET, 0); 456 axienet_iow(lp, XAE_FFE_OFFSET, 1); 457 i = 1; 458 } else if (!netdev_mc_empty(ndev)) { 459 struct netdev_hw_addr *ha; 460 461 netdev_for_each_mc_addr(ha, ndev) { 462 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 463 break; 464 465 af0reg = (ha->addr[0]); 466 af0reg |= (ha->addr[1] << 8); 467 af0reg |= (ha->addr[2] << 16); 468 af0reg |= (ha->addr[3] << 24); 469 470 af1reg = (ha->addr[4]); 471 af1reg |= (ha->addr[5] << 8); 472 473 reg &= 0xFFFFFF00; 474 reg |= i; 475 476 axienet_iow(lp, XAE_FMI_OFFSET, reg); 477 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 478 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 479 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff); 480 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff); 481 axienet_iow(lp, XAE_FFE_OFFSET, 1); 482 i++; 483 } 484 } 485 486 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 487 reg &= 0xFFFFFF00; 488 reg |= i; 489 axienet_iow(lp, XAE_FMI_OFFSET, reg); 490 axienet_iow(lp, XAE_FFE_OFFSET, 0); 491 } 492 } 493 494 /** 495 * axienet_setoptions - Set an Axi Ethernet option 496 * @ndev: Pointer to the net_device structure 497 * @options: Option to be enabled/disabled 498 * 499 * The Axi Ethernet core has multiple features which can be selectively turned 500 * on or off. The typical options could be jumbo frame option, basic VLAN 501 * option, promiscuous mode option etc. This function is used to set or clear 502 * these options in the Axi Ethernet hardware. This is done through 503 * axienet_option structure . 504 */ 505 static void axienet_setoptions(struct net_device *ndev, u32 options) 506 { 507 int reg; 508 struct axienet_local *lp = netdev_priv(ndev); 509 struct axienet_option *tp = &axienet_options[0]; 510 511 while (tp->opt) { 512 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 513 if (options & tp->opt) 514 reg |= tp->m_or; 515 axienet_iow(lp, tp->reg, reg); 516 tp++; 517 } 518 519 lp->options |= options; 520 } 521 522 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat) 523 { 524 u32 counter; 525 526 if (lp->reset_in_progress) 527 return lp->hw_stat_base[stat]; 528 529 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 530 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); 531 } 532 533 static void axienet_stats_update(struct axienet_local *lp, bool reset) 534 { 535 enum temac_stat stat; 536 537 write_seqcount_begin(&lp->hw_stats_seqcount); 538 lp->reset_in_progress = reset; 539 for (stat = 0; stat < STAT_COUNT; stat++) { 540 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 541 542 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; 543 lp->hw_last_counter[stat] = counter; 544 } 545 write_seqcount_end(&lp->hw_stats_seqcount); 546 } 547 548 static void axienet_refresh_stats(struct work_struct *work) 549 { 550 struct axienet_local *lp = container_of(work, struct axienet_local, 551 stats_work.work); 552 553 mutex_lock(&lp->stats_lock); 554 axienet_stats_update(lp, false); 555 mutex_unlock(&lp->stats_lock); 556 557 /* Just less than 2^32 bytes at 2.5 GBit/s */ 558 schedule_delayed_work(&lp->stats_work, 13 * HZ); 559 } 560 561 static int __axienet_device_reset(struct axienet_local *lp) 562 { 563 u32 value; 564 int ret; 565 566 /* Save statistics counters in case they will be reset */ 567 mutex_lock(&lp->stats_lock); 568 if (lp->features & XAE_FEATURE_STATS) 569 axienet_stats_update(lp, true); 570 571 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 572 * process of Axi DMA takes a while to complete as all pending 573 * commands/transfers will be flushed or completed during this 574 * reset process. 575 * Note that even though both TX and RX have their own reset register, 576 * they both reset the entire DMA core, so only one needs to be used. 577 */ 578 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); 579 ret = read_poll_timeout(axienet_dma_in32, value, 580 !(value & XAXIDMA_CR_RESET_MASK), 581 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 582 XAXIDMA_TX_CR_OFFSET); 583 if (ret) { 584 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); 585 goto out; 586 } 587 588 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ 589 ret = read_poll_timeout(axienet_ior, value, 590 value & XAE_INT_PHYRSTCMPLT_MASK, 591 DELAY_OF_ONE_MILLISEC, 50000, false, lp, 592 XAE_IS_OFFSET); 593 if (ret) { 594 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); 595 goto out; 596 } 597 598 /* Update statistics counters with new values */ 599 if (lp->features & XAE_FEATURE_STATS) { 600 enum temac_stat stat; 601 602 write_seqcount_begin(&lp->hw_stats_seqcount); 603 lp->reset_in_progress = false; 604 for (stat = 0; stat < STAT_COUNT; stat++) { 605 u32 counter = 606 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8); 607 608 lp->hw_stat_base[stat] += 609 lp->hw_last_counter[stat] - counter; 610 lp->hw_last_counter[stat] = counter; 611 } 612 write_seqcount_end(&lp->hw_stats_seqcount); 613 } 614 615 out: 616 mutex_unlock(&lp->stats_lock); 617 return ret; 618 } 619 620 /** 621 * axienet_dma_stop - Stop DMA operation 622 * @lp: Pointer to the axienet_local structure 623 */ 624 static void axienet_dma_stop(struct axienet_local *lp) 625 { 626 int count; 627 u32 cr, sr; 628 629 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 630 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 631 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 632 synchronize_irq(lp->rx_irq); 633 634 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 635 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); 636 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 637 synchronize_irq(lp->tx_irq); 638 639 /* Give DMAs a chance to halt gracefully */ 640 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 641 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 642 msleep(20); 643 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 644 } 645 646 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 647 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) { 648 msleep(20); 649 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 650 } 651 652 /* Do a reset to ensure DMA is really stopped */ 653 axienet_lock_mii(lp); 654 __axienet_device_reset(lp); 655 axienet_unlock_mii(lp); 656 } 657 658 /** 659 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 660 * @ndev: Pointer to the net_device structure 661 * 662 * This function is called to reset and initialize the Axi Ethernet core. This 663 * is typically called during initialization. It does a reset of the Axi DMA 664 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 665 * are connected to Axi Ethernet reset lines, this in turn resets the Axi 666 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 667 * core. 668 * Returns 0 on success or a negative error number otherwise. 669 */ 670 static int axienet_device_reset(struct net_device *ndev) 671 { 672 u32 axienet_status; 673 struct axienet_local *lp = netdev_priv(ndev); 674 int ret; 675 676 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 677 lp->options |= XAE_OPTION_VLAN; 678 lp->options &= (~XAE_OPTION_JUMBO); 679 680 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { 681 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 682 XAE_TRL_SIZE; 683 684 if (lp->max_frm_size <= lp->rxmem) 685 lp->options |= XAE_OPTION_JUMBO; 686 } 687 688 if (!lp->use_dmaengine) { 689 ret = __axienet_device_reset(lp); 690 if (ret) 691 return ret; 692 693 ret = axienet_dma_bd_init(ndev); 694 if (ret) { 695 netdev_err(ndev, "%s: descriptor allocation failed\n", 696 __func__); 697 return ret; 698 } 699 } 700 701 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 702 axienet_status &= ~XAE_RCW1_RX_MASK; 703 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 704 705 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 706 if (axienet_status & XAE_INT_RXRJECT_MASK) 707 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 708 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 709 XAE_INT_RECV_ERROR_MASK : 0); 710 711 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 712 713 /* Sync default options with HW but leave receiver and 714 * transmitter disabled. 715 */ 716 axienet_setoptions(ndev, lp->options & 717 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 718 axienet_set_mac_address(ndev, NULL); 719 axienet_set_multicast_list(ndev); 720 axienet_setoptions(ndev, lp->options); 721 722 netif_trans_update(ndev); 723 724 return 0; 725 } 726 727 /** 728 * axienet_free_tx_chain - Clean up a series of linked TX descriptors. 729 * @lp: Pointer to the axienet_local structure 730 * @first_bd: Index of first descriptor to clean up 731 * @nr_bds: Max number of descriptors to clean up 732 * @force: Whether to clean descriptors even if not complete 733 * @sizep: Pointer to a u32 filled with the total sum of all bytes 734 * in all cleaned-up descriptors. Ignored if NULL. 735 * @budget: NAPI budget (use 0 when not called from NAPI poll) 736 * 737 * Would either be called after a successful transmit operation, or after 738 * there was an error when setting up the chain. 739 * Returns the number of packets handled. 740 */ 741 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd, 742 int nr_bds, bool force, u32 *sizep, int budget) 743 { 744 struct axidma_bd *cur_p; 745 unsigned int status; 746 int i, packets = 0; 747 dma_addr_t phys; 748 749 for (i = 0; i < nr_bds; i++) { 750 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; 751 status = cur_p->status; 752 753 /* If force is not specified, clean up only descriptors 754 * that have been completed by the MAC. 755 */ 756 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) 757 break; 758 759 /* Ensure we see complete descriptor update */ 760 dma_rmb(); 761 phys = desc_get_phys_addr(lp, cur_p); 762 dma_unmap_single(lp->dev, phys, 763 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 764 DMA_TO_DEVICE); 765 766 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 767 napi_consume_skb(cur_p->skb, budget); 768 packets++; 769 } 770 771 cur_p->app0 = 0; 772 cur_p->app1 = 0; 773 cur_p->app2 = 0; 774 cur_p->app4 = 0; 775 cur_p->skb = NULL; 776 /* ensure our transmit path and device don't prematurely see status cleared */ 777 wmb(); 778 cur_p->cntrl = 0; 779 cur_p->status = 0; 780 781 if (sizep) 782 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 783 } 784 785 if (!force) { 786 lp->tx_bd_ci += i; 787 if (lp->tx_bd_ci >= lp->tx_bd_num) 788 lp->tx_bd_ci %= lp->tx_bd_num; 789 } 790 791 return packets; 792 } 793 794 /** 795 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 796 * @lp: Pointer to the axienet_local structure 797 * @num_frag: The number of BDs to check for 798 * 799 * Return: 0, on success 800 * NETDEV_TX_BUSY, if any of the descriptors are not free 801 * 802 * This function is invoked before BDs are allocated and transmission starts. 803 * This function returns 0 if a BD or group of BDs can be allocated for 804 * transmission. If the BD or any of the BDs are not free the function 805 * returns a busy status. 806 */ 807 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 808 int num_frag) 809 { 810 struct axidma_bd *cur_p; 811 812 /* Ensure we see all descriptor updates from device or TX polling */ 813 rmb(); 814 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % 815 lp->tx_bd_num]; 816 if (cur_p->cntrl) 817 return NETDEV_TX_BUSY; 818 return 0; 819 } 820 821 /** 822 * axienet_dma_tx_cb - DMA engine callback for TX channel. 823 * @data: Pointer to the axienet_local structure. 824 * @result: error reporting through dmaengine_result. 825 * This function is called by dmaengine driver for TX channel to notify 826 * that the transmit is done. 827 */ 828 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result) 829 { 830 struct skbuf_dma_descriptor *skbuf_dma; 831 struct axienet_local *lp = data; 832 struct netdev_queue *txq; 833 int len; 834 835 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); 836 len = skbuf_dma->skb->len; 837 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); 838 u64_stats_update_begin(&lp->tx_stat_sync); 839 u64_stats_add(&lp->tx_bytes, len); 840 u64_stats_add(&lp->tx_packets, 1); 841 u64_stats_update_end(&lp->tx_stat_sync); 842 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); 843 dev_consume_skb_any(skbuf_dma->skb); 844 netif_txq_completed_wake(txq, 1, len, 845 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 846 2 * MAX_SKB_FRAGS); 847 } 848 849 /** 850 * axienet_start_xmit_dmaengine - Starts the transmission. 851 * @skb: sk_buff pointer that contains data to be Txed. 852 * @ndev: Pointer to net_device structure. 853 * 854 * Return: NETDEV_TX_OK on success or any non space errors. 855 * NETDEV_TX_BUSY when free element in TX skb ring buffer 856 * is not available. 857 * 858 * This function is invoked to initiate transmission. The 859 * function sets the skbs, register dma callback API and submit 860 * the dma transaction. 861 * Additionally if checksum offloading is supported, 862 * it populates AXI Stream Control fields with appropriate values. 863 */ 864 static netdev_tx_t 865 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev) 866 { 867 struct dma_async_tx_descriptor *dma_tx_desc = NULL; 868 struct axienet_local *lp = netdev_priv(ndev); 869 u32 app_metadata[DMA_NUM_APP_WORDS] = {0}; 870 struct skbuf_dma_descriptor *skbuf_dma; 871 struct dma_device *dma_dev; 872 struct netdev_queue *txq; 873 u32 csum_start_off; 874 u32 csum_index_off; 875 int sg_len; 876 int ret; 877 878 dma_dev = lp->tx_chan->device; 879 sg_len = skb_shinfo(skb)->nr_frags + 1; 880 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { 881 netif_stop_queue(ndev); 882 if (net_ratelimit()) 883 netdev_warn(ndev, "TX ring unexpectedly full\n"); 884 return NETDEV_TX_BUSY; 885 } 886 887 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); 888 if (!skbuf_dma) 889 goto xmit_error_drop_skb; 890 891 lp->tx_ring_head++; 892 sg_init_table(skbuf_dma->sgl, sg_len); 893 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); 894 if (ret < 0) 895 goto xmit_error_drop_skb; 896 897 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 898 if (!ret) 899 goto xmit_error_drop_skb; 900 901 /* Fill up app fields for checksum */ 902 if (skb->ip_summed == CHECKSUM_PARTIAL) { 903 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 904 /* Tx Full Checksum Offload Enabled */ 905 app_metadata[0] |= 2; 906 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 907 csum_start_off = skb_transport_offset(skb); 908 csum_index_off = csum_start_off + skb->csum_offset; 909 /* Tx Partial Checksum Offload Enabled */ 910 app_metadata[0] |= 1; 911 app_metadata[1] = (csum_start_off << 16) | csum_index_off; 912 } 913 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 914 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */ 915 } 916 917 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, 918 sg_len, DMA_MEM_TO_DEV, 919 DMA_PREP_INTERRUPT, (void *)app_metadata); 920 if (!dma_tx_desc) 921 goto xmit_error_unmap_sg; 922 923 skbuf_dma->skb = skb; 924 skbuf_dma->sg_len = sg_len; 925 dma_tx_desc->callback_param = lp; 926 dma_tx_desc->callback_result = axienet_dma_tx_cb; 927 dmaengine_submit(dma_tx_desc); 928 dma_async_issue_pending(lp->tx_chan); 929 txq = skb_get_tx_queue(lp->ndev, skb); 930 netdev_tx_sent_queue(txq, skb->len); 931 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 932 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); 933 934 return NETDEV_TX_OK; 935 936 xmit_error_unmap_sg: 937 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); 938 xmit_error_drop_skb: 939 dev_kfree_skb_any(skb); 940 return NETDEV_TX_OK; 941 } 942 943 /** 944 * axienet_tx_poll - Invoked once a transmit is completed by the 945 * Axi DMA Tx channel. 946 * @napi: Pointer to NAPI structure. 947 * @budget: Max number of TX packets to process. 948 * 949 * Return: Number of TX packets processed. 950 * 951 * This function is invoked from the NAPI processing to notify the completion 952 * of transmit operation. It clears fields in the corresponding Tx BDs and 953 * unmaps the corresponding buffer so that CPU can regain ownership of the 954 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 955 * required. 956 */ 957 static int axienet_tx_poll(struct napi_struct *napi, int budget) 958 { 959 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx); 960 struct net_device *ndev = lp->ndev; 961 u32 size = 0; 962 int packets; 963 964 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, 965 &size, budget); 966 967 if (packets) { 968 u64_stats_update_begin(&lp->tx_stat_sync); 969 u64_stats_add(&lp->tx_packets, packets); 970 u64_stats_add(&lp->tx_bytes, size); 971 u64_stats_update_end(&lp->tx_stat_sync); 972 973 /* Matches barrier in axienet_start_xmit */ 974 smp_mb(); 975 976 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 977 netif_wake_queue(ndev); 978 } 979 980 if (packets < budget && napi_complete_done(napi, packets)) { 981 /* Re-enable TX completion interrupts. This should 982 * cause an immediate interrupt if any TX packets are 983 * already pending. 984 */ 985 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); 986 } 987 return packets; 988 } 989 990 /** 991 * axienet_start_xmit - Starts the transmission. 992 * @skb: sk_buff pointer that contains data to be Txed. 993 * @ndev: Pointer to net_device structure. 994 * 995 * Return: NETDEV_TX_OK, on success 996 * NETDEV_TX_BUSY, if any of the descriptors are not free 997 * 998 * This function is invoked from upper layers to initiate transmission. The 999 * function uses the next available free BDs and populates their fields to 1000 * start the transmission. Additionally if checksum offloading is supported, 1001 * it populates AXI Stream Control fields with appropriate values. 1002 */ 1003 static netdev_tx_t 1004 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1005 { 1006 u32 ii; 1007 u32 num_frag; 1008 u32 csum_start_off; 1009 u32 csum_index_off; 1010 skb_frag_t *frag; 1011 dma_addr_t tail_p, phys; 1012 u32 orig_tail_ptr, new_tail_ptr; 1013 struct axienet_local *lp = netdev_priv(ndev); 1014 struct axidma_bd *cur_p; 1015 1016 orig_tail_ptr = lp->tx_bd_tail; 1017 new_tail_ptr = orig_tail_ptr; 1018 1019 num_frag = skb_shinfo(skb)->nr_frags; 1020 cur_p = &lp->tx_bd_v[orig_tail_ptr]; 1021 1022 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { 1023 /* Should not happen as last start_xmit call should have 1024 * checked for sufficient space and queue should only be 1025 * woken when sufficient space is available. 1026 */ 1027 netif_stop_queue(ndev); 1028 if (net_ratelimit()) 1029 netdev_warn(ndev, "TX ring unexpectedly full\n"); 1030 return NETDEV_TX_BUSY; 1031 } 1032 1033 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1034 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 1035 /* Tx Full Checksum Offload Enabled */ 1036 cur_p->app0 |= 2; 1037 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { 1038 csum_start_off = skb_transport_offset(skb); 1039 csum_index_off = csum_start_off + skb->csum_offset; 1040 /* Tx Partial Checksum Offload Enabled */ 1041 cur_p->app0 |= 1; 1042 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 1043 } 1044 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1045 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 1046 } 1047 1048 phys = dma_map_single(lp->dev, skb->data, 1049 skb_headlen(skb), DMA_TO_DEVICE); 1050 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1051 if (net_ratelimit()) 1052 netdev_err(ndev, "TX DMA mapping error\n"); 1053 ndev->stats.tx_dropped++; 1054 return NETDEV_TX_OK; 1055 } 1056 desc_set_phys_addr(lp, phys, cur_p); 1057 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 1058 1059 for (ii = 0; ii < num_frag; ii++) { 1060 if (++new_tail_ptr >= lp->tx_bd_num) 1061 new_tail_ptr = 0; 1062 cur_p = &lp->tx_bd_v[new_tail_ptr]; 1063 frag = &skb_shinfo(skb)->frags[ii]; 1064 phys = dma_map_single(lp->dev, 1065 skb_frag_address(frag), 1066 skb_frag_size(frag), 1067 DMA_TO_DEVICE); 1068 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1069 if (net_ratelimit()) 1070 netdev_err(ndev, "TX DMA mapping error\n"); 1071 ndev->stats.tx_dropped++; 1072 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, 1073 true, NULL, 0); 1074 return NETDEV_TX_OK; 1075 } 1076 desc_set_phys_addr(lp, phys, cur_p); 1077 cur_p->cntrl = skb_frag_size(frag); 1078 } 1079 1080 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 1081 cur_p->skb = skb; 1082 1083 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; 1084 if (++new_tail_ptr >= lp->tx_bd_num) 1085 new_tail_ptr = 0; 1086 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); 1087 1088 /* Start the transfer */ 1089 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 1090 1091 /* Stop queue if next transmit may not have space */ 1092 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { 1093 netif_stop_queue(ndev); 1094 1095 /* Matches barrier in axienet_tx_poll */ 1096 smp_mb(); 1097 1098 /* Space might have just been freed - check again */ 1099 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) 1100 netif_wake_queue(ndev); 1101 } 1102 1103 return NETDEV_TX_OK; 1104 } 1105 1106 /** 1107 * axienet_dma_rx_cb - DMA engine callback for RX channel. 1108 * @data: Pointer to the skbuf_dma_descriptor structure. 1109 * @result: error reporting through dmaengine_result. 1110 * This function is called by dmaengine driver for RX channel to notify 1111 * that the packet is received. 1112 */ 1113 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result) 1114 { 1115 struct skbuf_dma_descriptor *skbuf_dma; 1116 size_t meta_len, meta_max_len, rx_len; 1117 struct axienet_local *lp = data; 1118 struct sk_buff *skb; 1119 u32 *app_metadata; 1120 1121 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); 1122 skb = skbuf_dma->skb; 1123 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, 1124 &meta_max_len); 1125 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, 1126 DMA_FROM_DEVICE); 1127 /* TODO: Derive app word index programmatically */ 1128 rx_len = (app_metadata[LEN_APP] & 0xFFFF); 1129 skb_put(skb, rx_len); 1130 skb->protocol = eth_type_trans(skb, lp->ndev); 1131 skb->ip_summed = CHECKSUM_NONE; 1132 1133 __netif_rx(skb); 1134 u64_stats_update_begin(&lp->rx_stat_sync); 1135 u64_stats_add(&lp->rx_packets, 1); 1136 u64_stats_add(&lp->rx_bytes, rx_len); 1137 u64_stats_update_end(&lp->rx_stat_sync); 1138 axienet_rx_submit_desc(lp->ndev); 1139 dma_async_issue_pending(lp->rx_chan); 1140 } 1141 1142 /** 1143 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing. 1144 * @napi: Pointer to NAPI structure. 1145 * @budget: Max number of RX packets to process. 1146 * 1147 * Return: Number of RX packets processed. 1148 */ 1149 static int axienet_rx_poll(struct napi_struct *napi, int budget) 1150 { 1151 u32 length; 1152 u32 csumstatus; 1153 u32 size = 0; 1154 int packets = 0; 1155 dma_addr_t tail_p = 0; 1156 struct axidma_bd *cur_p; 1157 struct sk_buff *skb, *new_skb; 1158 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx); 1159 1160 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1161 1162 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 1163 dma_addr_t phys; 1164 1165 /* Ensure we see complete descriptor update */ 1166 dma_rmb(); 1167 1168 skb = cur_p->skb; 1169 cur_p->skb = NULL; 1170 1171 /* skb could be NULL if a previous pass already received the 1172 * packet for this slot in the ring, but failed to refill it 1173 * with a newly allocated buffer. In this case, don't try to 1174 * receive it again. 1175 */ 1176 if (likely(skb)) { 1177 length = cur_p->app4 & 0x0000FFFF; 1178 1179 phys = desc_get_phys_addr(lp, cur_p); 1180 dma_unmap_single(lp->dev, phys, lp->max_frm_size, 1181 DMA_FROM_DEVICE); 1182 1183 skb_put(skb, length); 1184 skb->protocol = eth_type_trans(skb, lp->ndev); 1185 /*skb_checksum_none_assert(skb);*/ 1186 skb->ip_summed = CHECKSUM_NONE; 1187 1188 /* if we're doing Rx csum offload, set it up */ 1189 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 1190 csumstatus = (cur_p->app2 & 1191 XAE_FULL_CSUM_STATUS_MASK) >> 3; 1192 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || 1193 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { 1194 skb->ip_summed = CHECKSUM_UNNECESSARY; 1195 } 1196 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 1197 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 1198 skb->ip_summed = CHECKSUM_COMPLETE; 1199 } 1200 1201 napi_gro_receive(napi, skb); 1202 1203 size += length; 1204 packets++; 1205 } 1206 1207 new_skb = napi_alloc_skb(napi, lp->max_frm_size); 1208 if (!new_skb) 1209 break; 1210 1211 phys = dma_map_single(lp->dev, new_skb->data, 1212 lp->max_frm_size, 1213 DMA_FROM_DEVICE); 1214 if (unlikely(dma_mapping_error(lp->dev, phys))) { 1215 if (net_ratelimit()) 1216 netdev_err(lp->ndev, "RX DMA mapping error\n"); 1217 dev_kfree_skb(new_skb); 1218 break; 1219 } 1220 desc_set_phys_addr(lp, phys, cur_p); 1221 1222 cur_p->cntrl = lp->max_frm_size; 1223 cur_p->status = 0; 1224 cur_p->skb = new_skb; 1225 1226 /* Only update tail_p to mark this slot as usable after it has 1227 * been successfully refilled. 1228 */ 1229 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 1230 1231 if (++lp->rx_bd_ci >= lp->rx_bd_num) 1232 lp->rx_bd_ci = 0; 1233 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 1234 } 1235 1236 u64_stats_update_begin(&lp->rx_stat_sync); 1237 u64_stats_add(&lp->rx_packets, packets); 1238 u64_stats_add(&lp->rx_bytes, size); 1239 u64_stats_update_end(&lp->rx_stat_sync); 1240 1241 if (tail_p) 1242 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 1243 1244 if (packets < budget && napi_complete_done(napi, packets)) { 1245 /* Re-enable RX completion interrupts. This should 1246 * cause an immediate interrupt if any RX packets are 1247 * already pending. 1248 */ 1249 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); 1250 } 1251 return packets; 1252 } 1253 1254 /** 1255 * axienet_tx_irq - Tx Done Isr. 1256 * @irq: irq number 1257 * @_ndev: net_device pointer 1258 * 1259 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise. 1260 * 1261 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the 1262 * TX BD processing. 1263 */ 1264 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 1265 { 1266 unsigned int status; 1267 struct net_device *ndev = _ndev; 1268 struct axienet_local *lp = netdev_priv(ndev); 1269 1270 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1271 1272 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1273 return IRQ_NONE; 1274 1275 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 1276 1277 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1278 netdev_err(ndev, "DMA Tx error 0x%x\n", status); 1279 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1280 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, 1281 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 1282 schedule_work(&lp->dma_err_task); 1283 } else { 1284 /* Disable further TX completion interrupts and schedule 1285 * NAPI to handle the completions. 1286 */ 1287 u32 cr = lp->tx_dma_cr; 1288 1289 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1290 if (napi_schedule_prep(&lp->napi_tx)) { 1291 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1292 __napi_schedule(&lp->napi_tx); 1293 } 1294 } 1295 1296 return IRQ_HANDLED; 1297 } 1298 1299 /** 1300 * axienet_rx_irq - Rx Isr. 1301 * @irq: irq number 1302 * @_ndev: net_device pointer 1303 * 1304 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise. 1305 * 1306 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD 1307 * processing. 1308 */ 1309 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 1310 { 1311 unsigned int status; 1312 struct net_device *ndev = _ndev; 1313 struct axienet_local *lp = netdev_priv(ndev); 1314 1315 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1316 1317 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 1318 return IRQ_NONE; 1319 1320 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 1321 1322 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) { 1323 netdev_err(ndev, "DMA Rx error 0x%x\n", status); 1324 netdev_err(ndev, "Current BD is at: 0x%x%08x\n", 1325 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, 1326 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 1327 schedule_work(&lp->dma_err_task); 1328 } else { 1329 /* Disable further RX completion interrupts and schedule 1330 * NAPI receive. 1331 */ 1332 u32 cr = lp->rx_dma_cr; 1333 1334 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); 1335 if (napi_schedule_prep(&lp->napi_rx)) { 1336 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1337 __napi_schedule(&lp->napi_rx); 1338 } 1339 } 1340 1341 return IRQ_HANDLED; 1342 } 1343 1344 /** 1345 * axienet_eth_irq - Ethernet core Isr. 1346 * @irq: irq number 1347 * @_ndev: net_device pointer 1348 * 1349 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise. 1350 * 1351 * Handle miscellaneous conditions indicated by Ethernet core IRQ. 1352 */ 1353 static irqreturn_t axienet_eth_irq(int irq, void *_ndev) 1354 { 1355 struct net_device *ndev = _ndev; 1356 struct axienet_local *lp = netdev_priv(ndev); 1357 unsigned int pending; 1358 1359 pending = axienet_ior(lp, XAE_IP_OFFSET); 1360 if (!pending) 1361 return IRQ_NONE; 1362 1363 if (pending & XAE_INT_RXFIFOOVR_MASK) 1364 ndev->stats.rx_missed_errors++; 1365 1366 if (pending & XAE_INT_RXRJECT_MASK) 1367 ndev->stats.rx_dropped++; 1368 1369 axienet_iow(lp, XAE_IS_OFFSET, pending); 1370 return IRQ_HANDLED; 1371 } 1372 1373 static void axienet_dma_err_handler(struct work_struct *work); 1374 1375 /** 1376 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine. 1377 * allocate skbuff, map the scatterlist and obtain a descriptor 1378 * and then add the callback information and submit descriptor. 1379 * 1380 * @ndev: net_device pointer 1381 * 1382 */ 1383 static void axienet_rx_submit_desc(struct net_device *ndev) 1384 { 1385 struct dma_async_tx_descriptor *dma_rx_desc = NULL; 1386 struct axienet_local *lp = netdev_priv(ndev); 1387 struct skbuf_dma_descriptor *skbuf_dma; 1388 struct sk_buff *skb; 1389 dma_addr_t addr; 1390 1391 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); 1392 if (!skbuf_dma) 1393 return; 1394 1395 lp->rx_ring_head++; 1396 skb = netdev_alloc_skb(ndev, lp->max_frm_size); 1397 if (!skb) 1398 return; 1399 1400 sg_init_table(skbuf_dma->sgl, 1); 1401 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); 1402 if (unlikely(dma_mapping_error(lp->dev, addr))) { 1403 if (net_ratelimit()) 1404 netdev_err(ndev, "DMA mapping error\n"); 1405 goto rx_submit_err_free_skb; 1406 } 1407 sg_dma_address(skbuf_dma->sgl) = addr; 1408 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; 1409 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, 1410 1, DMA_DEV_TO_MEM, 1411 DMA_PREP_INTERRUPT); 1412 if (!dma_rx_desc) 1413 goto rx_submit_err_unmap_skb; 1414 1415 skbuf_dma->skb = skb; 1416 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); 1417 skbuf_dma->desc = dma_rx_desc; 1418 dma_rx_desc->callback_param = lp; 1419 dma_rx_desc->callback_result = axienet_dma_rx_cb; 1420 dmaengine_submit(dma_rx_desc); 1421 1422 return; 1423 1424 rx_submit_err_unmap_skb: 1425 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); 1426 rx_submit_err_free_skb: 1427 dev_kfree_skb(skb); 1428 } 1429 1430 /** 1431 * axienet_init_dmaengine - init the dmaengine code. 1432 * @ndev: Pointer to net_device structure 1433 * 1434 * Return: 0, on success. 1435 * non-zero error value on failure 1436 * 1437 * This is the dmaengine initialization code. 1438 */ 1439 static int axienet_init_dmaengine(struct net_device *ndev) 1440 { 1441 struct axienet_local *lp = netdev_priv(ndev); 1442 struct skbuf_dma_descriptor *skbuf_dma; 1443 int i, ret; 1444 1445 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 1446 if (IS_ERR(lp->tx_chan)) { 1447 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); 1448 return PTR_ERR(lp->tx_chan); 1449 } 1450 1451 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); 1452 if (IS_ERR(lp->rx_chan)) { 1453 ret = PTR_ERR(lp->rx_chan); 1454 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); 1455 goto err_dma_release_tx; 1456 } 1457 1458 lp->tx_ring_tail = 0; 1459 lp->tx_ring_head = 0; 1460 lp->rx_ring_tail = 0; 1461 lp->rx_ring_head = 0; 1462 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), 1463 GFP_KERNEL); 1464 if (!lp->tx_skb_ring) { 1465 ret = -ENOMEM; 1466 goto err_dma_release_rx; 1467 } 1468 for (i = 0; i < TX_BD_NUM_MAX; i++) { 1469 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1470 if (!skbuf_dma) { 1471 ret = -ENOMEM; 1472 goto err_free_tx_skb_ring; 1473 } 1474 lp->tx_skb_ring[i] = skbuf_dma; 1475 } 1476 1477 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), 1478 GFP_KERNEL); 1479 if (!lp->rx_skb_ring) { 1480 ret = -ENOMEM; 1481 goto err_free_tx_skb_ring; 1482 } 1483 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) { 1484 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL); 1485 if (!skbuf_dma) { 1486 ret = -ENOMEM; 1487 goto err_free_rx_skb_ring; 1488 } 1489 lp->rx_skb_ring[i] = skbuf_dma; 1490 } 1491 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */ 1492 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1493 axienet_rx_submit_desc(ndev); 1494 dma_async_issue_pending(lp->rx_chan); 1495 1496 return 0; 1497 1498 err_free_rx_skb_ring: 1499 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1500 kfree(lp->rx_skb_ring[i]); 1501 kfree(lp->rx_skb_ring); 1502 err_free_tx_skb_ring: 1503 for (i = 0; i < TX_BD_NUM_MAX; i++) 1504 kfree(lp->tx_skb_ring[i]); 1505 kfree(lp->tx_skb_ring); 1506 err_dma_release_rx: 1507 dma_release_channel(lp->rx_chan); 1508 err_dma_release_tx: 1509 dma_release_channel(lp->tx_chan); 1510 return ret; 1511 } 1512 1513 /** 1514 * axienet_init_legacy_dma - init the dma legacy code. 1515 * @ndev: Pointer to net_device structure 1516 * 1517 * Return: 0, on success. 1518 * non-zero error value on failure 1519 * 1520 * This is the dma initialization code. It also allocates interrupt 1521 * service routines, enables the interrupt lines and ISR handling. 1522 * 1523 */ 1524 static int axienet_init_legacy_dma(struct net_device *ndev) 1525 { 1526 int ret; 1527 struct axienet_local *lp = netdev_priv(ndev); 1528 1529 /* Enable worker thread for Axi DMA error handling */ 1530 lp->stopping = false; 1531 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); 1532 1533 napi_enable(&lp->napi_rx); 1534 napi_enable(&lp->napi_tx); 1535 1536 /* Enable interrupts for Axi DMA Tx */ 1537 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, 1538 ndev->name, ndev); 1539 if (ret) 1540 goto err_tx_irq; 1541 /* Enable interrupts for Axi DMA Rx */ 1542 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, 1543 ndev->name, ndev); 1544 if (ret) 1545 goto err_rx_irq; 1546 /* Enable interrupts for Axi Ethernet core (if defined) */ 1547 if (lp->eth_irq > 0) { 1548 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1549 ndev->name, ndev); 1550 if (ret) 1551 goto err_eth_irq; 1552 } 1553 1554 return 0; 1555 1556 err_eth_irq: 1557 free_irq(lp->rx_irq, ndev); 1558 err_rx_irq: 1559 free_irq(lp->tx_irq, ndev); 1560 err_tx_irq: 1561 napi_disable(&lp->napi_tx); 1562 napi_disable(&lp->napi_rx); 1563 cancel_work_sync(&lp->dma_err_task); 1564 dev_err(lp->dev, "request_irq() failed\n"); 1565 return ret; 1566 } 1567 1568 /** 1569 * axienet_open - Driver open routine. 1570 * @ndev: Pointer to net_device structure 1571 * 1572 * Return: 0, on success. 1573 * non-zero error value on failure 1574 * 1575 * This is the driver open routine. It calls phylink_start to start the 1576 * PHY device. 1577 * It also allocates interrupt service routines, enables the interrupt lines 1578 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 1579 * descriptors are initialized. 1580 */ 1581 static int axienet_open(struct net_device *ndev) 1582 { 1583 int ret; 1584 struct axienet_local *lp = netdev_priv(ndev); 1585 1586 /* When we do an Axi Ethernet reset, it resets the complete core 1587 * including the MDIO. MDIO must be disabled before resetting. 1588 * Hold MDIO bus lock to avoid MDIO accesses during the reset. 1589 */ 1590 axienet_lock_mii(lp); 1591 ret = axienet_device_reset(ndev); 1592 axienet_unlock_mii(lp); 1593 1594 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); 1595 if (ret) { 1596 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); 1597 return ret; 1598 } 1599 1600 phylink_start(lp->phylink); 1601 1602 /* Start the statistics refresh work */ 1603 schedule_delayed_work(&lp->stats_work, 0); 1604 1605 if (lp->use_dmaengine) { 1606 /* Enable interrupts for Axi Ethernet core (if defined) */ 1607 if (lp->eth_irq > 0) { 1608 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, 1609 ndev->name, ndev); 1610 if (ret) 1611 goto err_phy; 1612 } 1613 1614 ret = axienet_init_dmaengine(ndev); 1615 if (ret < 0) 1616 goto err_free_eth_irq; 1617 } else { 1618 ret = axienet_init_legacy_dma(ndev); 1619 if (ret) 1620 goto err_phy; 1621 } 1622 1623 return 0; 1624 1625 err_free_eth_irq: 1626 if (lp->eth_irq > 0) 1627 free_irq(lp->eth_irq, ndev); 1628 err_phy: 1629 cancel_delayed_work_sync(&lp->stats_work); 1630 phylink_stop(lp->phylink); 1631 phylink_disconnect_phy(lp->phylink); 1632 return ret; 1633 } 1634 1635 /** 1636 * axienet_stop - Driver stop routine. 1637 * @ndev: Pointer to net_device structure 1638 * 1639 * Return: 0, on success. 1640 * 1641 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY 1642 * device. It also removes the interrupt handlers and disables the interrupts. 1643 * The Axi DMA Tx/Rx BDs are released. 1644 */ 1645 static int axienet_stop(struct net_device *ndev) 1646 { 1647 struct axienet_local *lp = netdev_priv(ndev); 1648 int i; 1649 1650 if (!lp->use_dmaengine) { 1651 WRITE_ONCE(lp->stopping, true); 1652 flush_work(&lp->dma_err_task); 1653 1654 napi_disable(&lp->napi_tx); 1655 napi_disable(&lp->napi_rx); 1656 } 1657 1658 cancel_delayed_work_sync(&lp->stats_work); 1659 1660 phylink_stop(lp->phylink); 1661 phylink_disconnect_phy(lp->phylink); 1662 1663 axienet_setoptions(ndev, lp->options & 1664 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1665 1666 if (!lp->use_dmaengine) { 1667 axienet_dma_stop(lp); 1668 cancel_work_sync(&lp->dma_err_task); 1669 free_irq(lp->tx_irq, ndev); 1670 free_irq(lp->rx_irq, ndev); 1671 axienet_dma_bd_release(ndev); 1672 } else { 1673 dmaengine_terminate_sync(lp->tx_chan); 1674 dmaengine_synchronize(lp->tx_chan); 1675 dmaengine_terminate_sync(lp->rx_chan); 1676 dmaengine_synchronize(lp->rx_chan); 1677 1678 for (i = 0; i < TX_BD_NUM_MAX; i++) 1679 kfree(lp->tx_skb_ring[i]); 1680 kfree(lp->tx_skb_ring); 1681 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) 1682 kfree(lp->rx_skb_ring[i]); 1683 kfree(lp->rx_skb_ring); 1684 1685 dma_release_channel(lp->rx_chan); 1686 dma_release_channel(lp->tx_chan); 1687 } 1688 1689 axienet_iow(lp, XAE_IE_OFFSET, 0); 1690 1691 if (lp->eth_irq > 0) 1692 free_irq(lp->eth_irq, ndev); 1693 return 0; 1694 } 1695 1696 /** 1697 * axienet_change_mtu - Driver change mtu routine. 1698 * @ndev: Pointer to net_device structure 1699 * @new_mtu: New mtu value to be applied 1700 * 1701 * Return: Always returns 0 (success). 1702 * 1703 * This is the change mtu driver routine. It checks if the Axi Ethernet 1704 * hardware supports jumbo frames before changing the mtu. This can be 1705 * called only when the device is not up. 1706 */ 1707 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1708 { 1709 struct axienet_local *lp = netdev_priv(ndev); 1710 1711 if (netif_running(ndev)) 1712 return -EBUSY; 1713 1714 if ((new_mtu + VLAN_ETH_HLEN + 1715 XAE_TRL_SIZE) > lp->rxmem) 1716 return -EINVAL; 1717 1718 WRITE_ONCE(ndev->mtu, new_mtu); 1719 1720 return 0; 1721 } 1722 1723 #ifdef CONFIG_NET_POLL_CONTROLLER 1724 /** 1725 * axienet_poll_controller - Axi Ethernet poll mechanism. 1726 * @ndev: Pointer to net_device structure 1727 * 1728 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1729 * to polling the ISRs and are enabled back after the polling is done. 1730 */ 1731 static void axienet_poll_controller(struct net_device *ndev) 1732 { 1733 struct axienet_local *lp = netdev_priv(ndev); 1734 1735 disable_irq(lp->tx_irq); 1736 disable_irq(lp->rx_irq); 1737 axienet_rx_irq(lp->tx_irq, ndev); 1738 axienet_tx_irq(lp->rx_irq, ndev); 1739 enable_irq(lp->tx_irq); 1740 enable_irq(lp->rx_irq); 1741 } 1742 #endif 1743 1744 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1745 { 1746 struct axienet_local *lp = netdev_priv(dev); 1747 1748 if (!netif_running(dev)) 1749 return -EINVAL; 1750 1751 return phylink_mii_ioctl(lp->phylink, rq, cmd); 1752 } 1753 1754 static void 1755 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1756 { 1757 struct axienet_local *lp = netdev_priv(dev); 1758 unsigned int start; 1759 1760 netdev_stats_to_stats64(stats, &dev->stats); 1761 1762 do { 1763 start = u64_stats_fetch_begin(&lp->rx_stat_sync); 1764 stats->rx_packets = u64_stats_read(&lp->rx_packets); 1765 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); 1766 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); 1767 1768 do { 1769 start = u64_stats_fetch_begin(&lp->tx_stat_sync); 1770 stats->tx_packets = u64_stats_read(&lp->tx_packets); 1771 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); 1772 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); 1773 1774 if (!(lp->features & XAE_FEATURE_STATS)) 1775 return; 1776 1777 do { 1778 start = read_seqcount_begin(&lp->hw_stats_seqcount); 1779 stats->rx_length_errors = 1780 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); 1781 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS); 1782 stats->rx_frame_errors = 1783 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); 1784 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) + 1785 axienet_stat(lp, STAT_FRAGMENT_FRAMES) + 1786 stats->rx_length_errors + 1787 stats->rx_crc_errors + 1788 stats->rx_frame_errors; 1789 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); 1790 1791 stats->tx_aborted_errors = 1792 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); 1793 stats->tx_fifo_errors = 1794 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS); 1795 stats->tx_window_errors = 1796 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); 1797 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) + 1798 stats->tx_aborted_errors + 1799 stats->tx_fifo_errors + 1800 stats->tx_window_errors; 1801 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 1802 } 1803 1804 static const struct net_device_ops axienet_netdev_ops = { 1805 .ndo_open = axienet_open, 1806 .ndo_stop = axienet_stop, 1807 .ndo_start_xmit = axienet_start_xmit, 1808 .ndo_get_stats64 = axienet_get_stats64, 1809 .ndo_change_mtu = axienet_change_mtu, 1810 .ndo_set_mac_address = netdev_set_mac_address, 1811 .ndo_validate_addr = eth_validate_addr, 1812 .ndo_eth_ioctl = axienet_ioctl, 1813 .ndo_set_rx_mode = axienet_set_multicast_list, 1814 #ifdef CONFIG_NET_POLL_CONTROLLER 1815 .ndo_poll_controller = axienet_poll_controller, 1816 #endif 1817 }; 1818 1819 static const struct net_device_ops axienet_netdev_dmaengine_ops = { 1820 .ndo_open = axienet_open, 1821 .ndo_stop = axienet_stop, 1822 .ndo_start_xmit = axienet_start_xmit_dmaengine, 1823 .ndo_get_stats64 = axienet_get_stats64, 1824 .ndo_change_mtu = axienet_change_mtu, 1825 .ndo_set_mac_address = netdev_set_mac_address, 1826 .ndo_validate_addr = eth_validate_addr, 1827 .ndo_eth_ioctl = axienet_ioctl, 1828 .ndo_set_rx_mode = axienet_set_multicast_list, 1829 }; 1830 1831 /** 1832 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1833 * @ndev: Pointer to net_device structure 1834 * @ed: Pointer to ethtool_drvinfo structure 1835 * 1836 * This implements ethtool command for getting the driver information. 1837 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1838 */ 1839 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1840 struct ethtool_drvinfo *ed) 1841 { 1842 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1843 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1844 } 1845 1846 /** 1847 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1848 * AxiEthernet core. 1849 * @ndev: Pointer to net_device structure 1850 * 1851 * This implements ethtool command for getting the total register length 1852 * information. 1853 * 1854 * Return: the total regs length 1855 */ 1856 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1857 { 1858 return sizeof(u32) * AXIENET_REGS_N; 1859 } 1860 1861 /** 1862 * axienet_ethtools_get_regs - Dump the contents of all registers present 1863 * in AxiEthernet core. 1864 * @ndev: Pointer to net_device structure 1865 * @regs: Pointer to ethtool_regs structure 1866 * @ret: Void pointer used to return the contents of the registers. 1867 * 1868 * This implements ethtool command for getting the Axi Ethernet register dump. 1869 * Issue "ethtool -d ethX" to execute this function. 1870 */ 1871 static void axienet_ethtools_get_regs(struct net_device *ndev, 1872 struct ethtool_regs *regs, void *ret) 1873 { 1874 u32 *data = (u32 *)ret; 1875 size_t len = sizeof(u32) * AXIENET_REGS_N; 1876 struct axienet_local *lp = netdev_priv(ndev); 1877 1878 regs->version = 0; 1879 regs->len = len; 1880 1881 memset(data, 0, len); 1882 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1883 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1884 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1885 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1886 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1887 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1888 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1889 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1890 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1891 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1892 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1893 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1894 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1895 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1896 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1897 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1898 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1899 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1900 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1901 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1902 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1903 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1904 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1905 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1906 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1907 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1908 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1909 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1910 if (!lp->use_dmaengine) { 1911 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1912 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 1913 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET); 1914 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET); 1915 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1916 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 1917 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET); 1918 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET); 1919 } 1920 } 1921 1922 static void 1923 axienet_ethtools_get_ringparam(struct net_device *ndev, 1924 struct ethtool_ringparam *ering, 1925 struct kernel_ethtool_ringparam *kernel_ering, 1926 struct netlink_ext_ack *extack) 1927 { 1928 struct axienet_local *lp = netdev_priv(ndev); 1929 1930 ering->rx_max_pending = RX_BD_NUM_MAX; 1931 ering->rx_mini_max_pending = 0; 1932 ering->rx_jumbo_max_pending = 0; 1933 ering->tx_max_pending = TX_BD_NUM_MAX; 1934 ering->rx_pending = lp->rx_bd_num; 1935 ering->rx_mini_pending = 0; 1936 ering->rx_jumbo_pending = 0; 1937 ering->tx_pending = lp->tx_bd_num; 1938 } 1939 1940 static int 1941 axienet_ethtools_set_ringparam(struct net_device *ndev, 1942 struct ethtool_ringparam *ering, 1943 struct kernel_ethtool_ringparam *kernel_ering, 1944 struct netlink_ext_ack *extack) 1945 { 1946 struct axienet_local *lp = netdev_priv(ndev); 1947 1948 if (ering->rx_pending > RX_BD_NUM_MAX || 1949 ering->rx_mini_pending || 1950 ering->rx_jumbo_pending || 1951 ering->tx_pending < TX_BD_NUM_MIN || 1952 ering->tx_pending > TX_BD_NUM_MAX) 1953 return -EINVAL; 1954 1955 if (netif_running(ndev)) 1956 return -EBUSY; 1957 1958 lp->rx_bd_num = ering->rx_pending; 1959 lp->tx_bd_num = ering->tx_pending; 1960 return 0; 1961 } 1962 1963 /** 1964 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1965 * Tx and Rx paths. 1966 * @ndev: Pointer to net_device structure 1967 * @epauseparm: Pointer to ethtool_pauseparam structure. 1968 * 1969 * This implements ethtool command for getting axi ethernet pause frame 1970 * setting. Issue "ethtool -a ethX" to execute this function. 1971 */ 1972 static void 1973 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1974 struct ethtool_pauseparam *epauseparm) 1975 { 1976 struct axienet_local *lp = netdev_priv(ndev); 1977 1978 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); 1979 } 1980 1981 /** 1982 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1983 * settings. 1984 * @ndev: Pointer to net_device structure 1985 * @epauseparm:Pointer to ethtool_pauseparam structure 1986 * 1987 * This implements ethtool command for enabling flow control on Rx and Tx 1988 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1989 * function. 1990 * 1991 * Return: 0 on success, -EFAULT if device is running 1992 */ 1993 static int 1994 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1995 struct ethtool_pauseparam *epauseparm) 1996 { 1997 struct axienet_local *lp = netdev_priv(ndev); 1998 1999 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); 2000 } 2001 2002 /** 2003 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 2004 * @ndev: Pointer to net_device structure 2005 * @ecoalesce: Pointer to ethtool_coalesce structure 2006 * @kernel_coal: ethtool CQE mode setting structure 2007 * @extack: extack for reporting error messages 2008 * 2009 * This implements ethtool command for getting the DMA interrupt coalescing 2010 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 2011 * execute this function. 2012 * 2013 * Return: 0 always 2014 */ 2015 static int 2016 axienet_ethtools_get_coalesce(struct net_device *ndev, 2017 struct ethtool_coalesce *ecoalesce, 2018 struct kernel_ethtool_coalesce *kernel_coal, 2019 struct netlink_ext_ack *extack) 2020 { 2021 struct axienet_local *lp = netdev_priv(ndev); 2022 2023 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; 2024 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; 2025 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; 2026 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; 2027 return 0; 2028 } 2029 2030 /** 2031 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 2032 * @ndev: Pointer to net_device structure 2033 * @ecoalesce: Pointer to ethtool_coalesce structure 2034 * @kernel_coal: ethtool CQE mode setting structure 2035 * @extack: extack for reporting error messages 2036 * 2037 * This implements ethtool command for setting the DMA interrupt coalescing 2038 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 2039 * prompt to execute this function. 2040 * 2041 * Return: 0, on success, Non-zero error value on failure. 2042 */ 2043 static int 2044 axienet_ethtools_set_coalesce(struct net_device *ndev, 2045 struct ethtool_coalesce *ecoalesce, 2046 struct kernel_ethtool_coalesce *kernel_coal, 2047 struct netlink_ext_ack *extack) 2048 { 2049 struct axienet_local *lp = netdev_priv(ndev); 2050 2051 if (netif_running(ndev)) { 2052 NL_SET_ERR_MSG(extack, 2053 "Please stop netif before applying configuration"); 2054 return -EBUSY; 2055 } 2056 2057 if (ecoalesce->rx_max_coalesced_frames) 2058 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 2059 if (ecoalesce->rx_coalesce_usecs) 2060 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; 2061 if (ecoalesce->tx_max_coalesced_frames) 2062 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 2063 if (ecoalesce->tx_coalesce_usecs) 2064 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; 2065 2066 return 0; 2067 } 2068 2069 static int 2070 axienet_ethtools_get_link_ksettings(struct net_device *ndev, 2071 struct ethtool_link_ksettings *cmd) 2072 { 2073 struct axienet_local *lp = netdev_priv(ndev); 2074 2075 return phylink_ethtool_ksettings_get(lp->phylink, cmd); 2076 } 2077 2078 static int 2079 axienet_ethtools_set_link_ksettings(struct net_device *ndev, 2080 const struct ethtool_link_ksettings *cmd) 2081 { 2082 struct axienet_local *lp = netdev_priv(ndev); 2083 2084 return phylink_ethtool_ksettings_set(lp->phylink, cmd); 2085 } 2086 2087 static int axienet_ethtools_nway_reset(struct net_device *dev) 2088 { 2089 struct axienet_local *lp = netdev_priv(dev); 2090 2091 return phylink_ethtool_nway_reset(lp->phylink); 2092 } 2093 2094 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev, 2095 struct ethtool_stats *stats, 2096 u64 *data) 2097 { 2098 struct axienet_local *lp = netdev_priv(dev); 2099 unsigned int start; 2100 2101 do { 2102 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2103 data[0] = axienet_stat(lp, STAT_RX_BYTES); 2104 data[1] = axienet_stat(lp, STAT_TX_BYTES); 2105 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES); 2106 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES); 2107 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES); 2108 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES); 2109 data[8] = axienet_stat(lp, STAT_USER_DEFINED0); 2110 data[9] = axienet_stat(lp, STAT_USER_DEFINED1); 2111 data[10] = axienet_stat(lp, STAT_USER_DEFINED2); 2112 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2113 } 2114 2115 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = { 2116 "Received bytes", 2117 "Transmitted bytes", 2118 "RX Good VLAN Tagged Frames", 2119 "TX Good VLAN Tagged Frames", 2120 "TX Good PFC Frames", 2121 "RX Good PFC Frames", 2122 "User Defined Counter 0", 2123 "User Defined Counter 1", 2124 "User Defined Counter 2", 2125 }; 2126 2127 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data) 2128 { 2129 switch (stringset) { 2130 case ETH_SS_STATS: 2131 memcpy(data, axienet_ethtool_stats_strings, 2132 sizeof(axienet_ethtool_stats_strings)); 2133 break; 2134 } 2135 } 2136 2137 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset) 2138 { 2139 struct axienet_local *lp = netdev_priv(dev); 2140 2141 switch (sset) { 2142 case ETH_SS_STATS: 2143 if (lp->features & XAE_FEATURE_STATS) 2144 return ARRAY_SIZE(axienet_ethtool_stats_strings); 2145 fallthrough; 2146 default: 2147 return -EOPNOTSUPP; 2148 } 2149 } 2150 2151 static void 2152 axienet_ethtools_get_pause_stats(struct net_device *dev, 2153 struct ethtool_pause_stats *pause_stats) 2154 { 2155 struct axienet_local *lp = netdev_priv(dev); 2156 unsigned int start; 2157 2158 if (!(lp->features & XAE_FEATURE_STATS)) 2159 return; 2160 2161 do { 2162 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2163 pause_stats->tx_pause_frames = 2164 axienet_stat(lp, STAT_TX_PAUSE_FRAMES); 2165 pause_stats->rx_pause_frames = 2166 axienet_stat(lp, STAT_RX_PAUSE_FRAMES); 2167 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2168 } 2169 2170 static void 2171 axienet_ethtool_get_eth_mac_stats(struct net_device *dev, 2172 struct ethtool_eth_mac_stats *mac_stats) 2173 { 2174 struct axienet_local *lp = netdev_priv(dev); 2175 unsigned int start; 2176 2177 if (!(lp->features & XAE_FEATURE_STATS)) 2178 return; 2179 2180 do { 2181 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2182 mac_stats->FramesTransmittedOK = 2183 axienet_stat(lp, STAT_TX_GOOD_FRAMES); 2184 mac_stats->SingleCollisionFrames = 2185 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES); 2186 mac_stats->MultipleCollisionFrames = 2187 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES); 2188 mac_stats->FramesReceivedOK = 2189 axienet_stat(lp, STAT_RX_GOOD_FRAMES); 2190 mac_stats->FrameCheckSequenceErrors = 2191 axienet_stat(lp, STAT_RX_FCS_ERRORS); 2192 mac_stats->AlignmentErrors = 2193 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS); 2194 mac_stats->FramesWithDeferredXmissions = 2195 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES); 2196 mac_stats->LateCollisions = 2197 axienet_stat(lp, STAT_TX_LATE_COLLISIONS); 2198 mac_stats->FramesAbortedDueToXSColls = 2199 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS); 2200 mac_stats->MulticastFramesXmittedOK = 2201 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES); 2202 mac_stats->BroadcastFramesXmittedOK = 2203 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES); 2204 mac_stats->FramesWithExcessiveDeferral = 2205 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL); 2206 mac_stats->MulticastFramesReceivedOK = 2207 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); 2208 mac_stats->BroadcastFramesReceivedOK = 2209 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES); 2210 mac_stats->InRangeLengthErrors = 2211 axienet_stat(lp, STAT_RX_LENGTH_ERRORS); 2212 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2213 } 2214 2215 static void 2216 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev, 2217 struct ethtool_eth_ctrl_stats *ctrl_stats) 2218 { 2219 struct axienet_local *lp = netdev_priv(dev); 2220 unsigned int start; 2221 2222 if (!(lp->features & XAE_FEATURE_STATS)) 2223 return; 2224 2225 do { 2226 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2227 ctrl_stats->MACControlFramesTransmitted = 2228 axienet_stat(lp, STAT_TX_CONTROL_FRAMES); 2229 ctrl_stats->MACControlFramesReceived = 2230 axienet_stat(lp, STAT_RX_CONTROL_FRAMES); 2231 ctrl_stats->UnsupportedOpcodesReceived = 2232 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS); 2233 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2234 } 2235 2236 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = { 2237 { 64, 64 }, 2238 { 65, 127 }, 2239 { 128, 255 }, 2240 { 256, 511 }, 2241 { 512, 1023 }, 2242 { 1024, 1518 }, 2243 { 1519, 16384 }, 2244 { }, 2245 }; 2246 2247 static void 2248 axienet_ethtool_get_rmon_stats(struct net_device *dev, 2249 struct ethtool_rmon_stats *rmon_stats, 2250 const struct ethtool_rmon_hist_range **ranges) 2251 { 2252 struct axienet_local *lp = netdev_priv(dev); 2253 unsigned int start; 2254 2255 if (!(lp->features & XAE_FEATURE_STATS)) 2256 return; 2257 2258 do { 2259 start = read_seqcount_begin(&lp->hw_stats_seqcount); 2260 rmon_stats->undersize_pkts = 2261 axienet_stat(lp, STAT_UNDERSIZE_FRAMES); 2262 rmon_stats->oversize_pkts = 2263 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES); 2264 rmon_stats->fragments = 2265 axienet_stat(lp, STAT_FRAGMENT_FRAMES); 2266 2267 rmon_stats->hist[0] = 2268 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES); 2269 rmon_stats->hist[1] = 2270 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES); 2271 rmon_stats->hist[2] = 2272 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES); 2273 rmon_stats->hist[3] = 2274 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES); 2275 rmon_stats->hist[4] = 2276 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES); 2277 rmon_stats->hist[5] = 2278 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES); 2279 rmon_stats->hist[6] = 2280 rmon_stats->oversize_pkts; 2281 2282 rmon_stats->hist_tx[0] = 2283 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES); 2284 rmon_stats->hist_tx[1] = 2285 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES); 2286 rmon_stats->hist_tx[2] = 2287 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES); 2288 rmon_stats->hist_tx[3] = 2289 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES); 2290 rmon_stats->hist_tx[4] = 2291 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES); 2292 rmon_stats->hist_tx[5] = 2293 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES); 2294 rmon_stats->hist_tx[6] = 2295 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES); 2296 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); 2297 2298 *ranges = axienet_rmon_ranges; 2299 } 2300 2301 static const struct ethtool_ops axienet_ethtool_ops = { 2302 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 2303 ETHTOOL_COALESCE_USECS, 2304 .get_drvinfo = axienet_ethtools_get_drvinfo, 2305 .get_regs_len = axienet_ethtools_get_regs_len, 2306 .get_regs = axienet_ethtools_get_regs, 2307 .get_link = ethtool_op_get_link, 2308 .get_ringparam = axienet_ethtools_get_ringparam, 2309 .set_ringparam = axienet_ethtools_set_ringparam, 2310 .get_pauseparam = axienet_ethtools_get_pauseparam, 2311 .set_pauseparam = axienet_ethtools_set_pauseparam, 2312 .get_coalesce = axienet_ethtools_get_coalesce, 2313 .set_coalesce = axienet_ethtools_set_coalesce, 2314 .get_link_ksettings = axienet_ethtools_get_link_ksettings, 2315 .set_link_ksettings = axienet_ethtools_set_link_ksettings, 2316 .nway_reset = axienet_ethtools_nway_reset, 2317 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats, 2318 .get_strings = axienet_ethtools_get_strings, 2319 .get_sset_count = axienet_ethtools_get_sset_count, 2320 .get_pause_stats = axienet_ethtools_get_pause_stats, 2321 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats, 2322 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats, 2323 .get_rmon_stats = axienet_ethtool_get_rmon_stats, 2324 }; 2325 2326 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs) 2327 { 2328 return container_of(pcs, struct axienet_local, pcs); 2329 } 2330 2331 static void axienet_pcs_get_state(struct phylink_pcs *pcs, 2332 struct phylink_link_state *state) 2333 { 2334 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2335 2336 phylink_mii_c22_pcs_get_state(pcs_phy, state); 2337 } 2338 2339 static void axienet_pcs_an_restart(struct phylink_pcs *pcs) 2340 { 2341 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2342 2343 phylink_mii_c22_pcs_an_restart(pcs_phy); 2344 } 2345 2346 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, 2347 phy_interface_t interface, 2348 const unsigned long *advertising, 2349 bool permit_pause_to_mac) 2350 { 2351 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; 2352 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; 2353 struct axienet_local *lp = netdev_priv(ndev); 2354 int ret; 2355 2356 if (lp->switch_x_sgmii) { 2357 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG, 2358 interface == PHY_INTERFACE_MODE_SGMII ? 2359 XLNX_MII_STD_SELECT_SGMII : 0); 2360 if (ret < 0) { 2361 netdev_warn(ndev, 2362 "Failed to switch PHY interface: %d\n", 2363 ret); 2364 return ret; 2365 } 2366 } 2367 2368 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising, 2369 neg_mode); 2370 if (ret < 0) 2371 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret); 2372 2373 return ret; 2374 } 2375 2376 static const struct phylink_pcs_ops axienet_pcs_ops = { 2377 .pcs_get_state = axienet_pcs_get_state, 2378 .pcs_config = axienet_pcs_config, 2379 .pcs_an_restart = axienet_pcs_an_restart, 2380 }; 2381 2382 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config, 2383 phy_interface_t interface) 2384 { 2385 struct net_device *ndev = to_net_dev(config->dev); 2386 struct axienet_local *lp = netdev_priv(ndev); 2387 2388 if (interface == PHY_INTERFACE_MODE_1000BASEX || 2389 interface == PHY_INTERFACE_MODE_SGMII) 2390 return &lp->pcs; 2391 2392 return NULL; 2393 } 2394 2395 static void axienet_mac_config(struct phylink_config *config, unsigned int mode, 2396 const struct phylink_link_state *state) 2397 { 2398 /* nothing meaningful to do */ 2399 } 2400 2401 static void axienet_mac_link_down(struct phylink_config *config, 2402 unsigned int mode, 2403 phy_interface_t interface) 2404 { 2405 /* nothing meaningful to do */ 2406 } 2407 2408 static void axienet_mac_link_up(struct phylink_config *config, 2409 struct phy_device *phy, 2410 unsigned int mode, phy_interface_t interface, 2411 int speed, int duplex, 2412 bool tx_pause, bool rx_pause) 2413 { 2414 struct net_device *ndev = to_net_dev(config->dev); 2415 struct axienet_local *lp = netdev_priv(ndev); 2416 u32 emmc_reg, fcc_reg; 2417 2418 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 2419 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 2420 2421 switch (speed) { 2422 case SPEED_1000: 2423 emmc_reg |= XAE_EMMC_LINKSPD_1000; 2424 break; 2425 case SPEED_100: 2426 emmc_reg |= XAE_EMMC_LINKSPD_100; 2427 break; 2428 case SPEED_10: 2429 emmc_reg |= XAE_EMMC_LINKSPD_10; 2430 break; 2431 default: 2432 dev_err(&ndev->dev, 2433 "Speed other than 10, 100 or 1Gbps is not supported\n"); 2434 break; 2435 } 2436 2437 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 2438 2439 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); 2440 if (tx_pause) 2441 fcc_reg |= XAE_FCC_FCTX_MASK; 2442 else 2443 fcc_reg &= ~XAE_FCC_FCTX_MASK; 2444 if (rx_pause) 2445 fcc_reg |= XAE_FCC_FCRX_MASK; 2446 else 2447 fcc_reg &= ~XAE_FCC_FCRX_MASK; 2448 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg); 2449 } 2450 2451 static const struct phylink_mac_ops axienet_phylink_ops = { 2452 .mac_select_pcs = axienet_mac_select_pcs, 2453 .mac_config = axienet_mac_config, 2454 .mac_link_down = axienet_mac_link_down, 2455 .mac_link_up = axienet_mac_link_up, 2456 }; 2457 2458 /** 2459 * axienet_dma_err_handler - Work queue task for Axi DMA Error 2460 * @work: pointer to work_struct 2461 * 2462 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 2463 * Tx/Rx BDs. 2464 */ 2465 static void axienet_dma_err_handler(struct work_struct *work) 2466 { 2467 u32 i; 2468 u32 axienet_status; 2469 struct axidma_bd *cur_p; 2470 struct axienet_local *lp = container_of(work, struct axienet_local, 2471 dma_err_task); 2472 struct net_device *ndev = lp->ndev; 2473 2474 /* Don't bother if we are going to stop anyway */ 2475 if (READ_ONCE(lp->stopping)) 2476 return; 2477 2478 napi_disable(&lp->napi_tx); 2479 napi_disable(&lp->napi_rx); 2480 2481 axienet_setoptions(ndev, lp->options & 2482 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2483 2484 axienet_dma_stop(lp); 2485 2486 for (i = 0; i < lp->tx_bd_num; i++) { 2487 cur_p = &lp->tx_bd_v[i]; 2488 if (cur_p->cntrl) { 2489 dma_addr_t addr = desc_get_phys_addr(lp, cur_p); 2490 2491 dma_unmap_single(lp->dev, addr, 2492 (cur_p->cntrl & 2493 XAXIDMA_BD_CTRL_LENGTH_MASK), 2494 DMA_TO_DEVICE); 2495 } 2496 if (cur_p->skb) 2497 dev_kfree_skb_irq(cur_p->skb); 2498 cur_p->phys = 0; 2499 cur_p->phys_msb = 0; 2500 cur_p->cntrl = 0; 2501 cur_p->status = 0; 2502 cur_p->app0 = 0; 2503 cur_p->app1 = 0; 2504 cur_p->app2 = 0; 2505 cur_p->app3 = 0; 2506 cur_p->app4 = 0; 2507 cur_p->skb = NULL; 2508 } 2509 2510 for (i = 0; i < lp->rx_bd_num; i++) { 2511 cur_p = &lp->rx_bd_v[i]; 2512 cur_p->status = 0; 2513 cur_p->app0 = 0; 2514 cur_p->app1 = 0; 2515 cur_p->app2 = 0; 2516 cur_p->app3 = 0; 2517 cur_p->app4 = 0; 2518 } 2519 2520 lp->tx_bd_ci = 0; 2521 lp->tx_bd_tail = 0; 2522 lp->rx_bd_ci = 0; 2523 2524 axienet_dma_start(lp); 2525 2526 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 2527 axienet_status &= ~XAE_RCW1_RX_MASK; 2528 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 2529 2530 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 2531 if (axienet_status & XAE_INT_RXRJECT_MASK) 2532 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 2533 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? 2534 XAE_INT_RECV_ERROR_MASK : 0); 2535 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 2536 2537 /* Sync default options with HW but leave receiver and 2538 * transmitter disabled. 2539 */ 2540 axienet_setoptions(ndev, lp->options & 2541 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 2542 axienet_set_mac_address(ndev, NULL); 2543 axienet_set_multicast_list(ndev); 2544 napi_enable(&lp->napi_rx); 2545 napi_enable(&lp->napi_tx); 2546 axienet_setoptions(ndev, lp->options); 2547 } 2548 2549 /** 2550 * axienet_probe - Axi Ethernet probe function. 2551 * @pdev: Pointer to platform device structure. 2552 * 2553 * Return: 0, on success 2554 * Non-zero error value on failure. 2555 * 2556 * This is the probe routine for Axi Ethernet driver. This is called before 2557 * any other driver routines are invoked. It allocates and sets up the Ethernet 2558 * device. Parses through device tree and populates fields of 2559 * axienet_local. It registers the Ethernet device. 2560 */ 2561 static int axienet_probe(struct platform_device *pdev) 2562 { 2563 int ret; 2564 struct device_node *np; 2565 struct axienet_local *lp; 2566 struct net_device *ndev; 2567 struct resource *ethres; 2568 u8 mac_addr[ETH_ALEN]; 2569 int addr_width = 32; 2570 u32 value; 2571 2572 ndev = alloc_etherdev(sizeof(*lp)); 2573 if (!ndev) 2574 return -ENOMEM; 2575 2576 platform_set_drvdata(pdev, ndev); 2577 2578 SET_NETDEV_DEV(ndev, &pdev->dev); 2579 ndev->features = NETIF_F_SG; 2580 ndev->ethtool_ops = &axienet_ethtool_ops; 2581 2582 /* MTU range: 64 - 9000 */ 2583 ndev->min_mtu = 64; 2584 ndev->max_mtu = XAE_JUMBO_MTU; 2585 2586 lp = netdev_priv(ndev); 2587 lp->ndev = ndev; 2588 lp->dev = &pdev->dev; 2589 lp->options = XAE_OPTION_DEFAULTS; 2590 lp->rx_bd_num = RX_BD_NUM_DEFAULT; 2591 lp->tx_bd_num = TX_BD_NUM_DEFAULT; 2592 2593 u64_stats_init(&lp->rx_stat_sync); 2594 u64_stats_init(&lp->tx_stat_sync); 2595 2596 mutex_init(&lp->stats_lock); 2597 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); 2598 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); 2599 2600 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); 2601 if (!lp->axi_clk) { 2602 /* For backward compatibility, if named AXI clock is not present, 2603 * treat the first clock specified as the AXI clock. 2604 */ 2605 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); 2606 } 2607 if (IS_ERR(lp->axi_clk)) { 2608 ret = PTR_ERR(lp->axi_clk); 2609 goto free_netdev; 2610 } 2611 ret = clk_prepare_enable(lp->axi_clk); 2612 if (ret) { 2613 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); 2614 goto free_netdev; 2615 } 2616 2617 lp->misc_clks[0].id = "axis_clk"; 2618 lp->misc_clks[1].id = "ref_clk"; 2619 lp->misc_clks[2].id = "mgt_clk"; 2620 2621 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2622 if (ret) 2623 goto cleanup_clk; 2624 2625 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2626 if (ret) 2627 goto cleanup_clk; 2628 2629 /* Map device registers */ 2630 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres); 2631 if (IS_ERR(lp->regs)) { 2632 ret = PTR_ERR(lp->regs); 2633 goto cleanup_clk; 2634 } 2635 lp->regs_start = ethres->start; 2636 2637 /* Setup checksum offload, but default to off if not specified */ 2638 lp->features = 0; 2639 2640 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS) 2641 lp->features |= XAE_FEATURE_STATS; 2642 2643 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 2644 if (!ret) { 2645 switch (value) { 2646 case 1: 2647 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 2648 /* Can checksum any contiguous range */ 2649 ndev->features |= NETIF_F_HW_CSUM; 2650 break; 2651 case 2: 2652 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 2653 /* Can checksum TCP/UDP over IPv4. */ 2654 ndev->features |= NETIF_F_IP_CSUM; 2655 break; 2656 } 2657 } 2658 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 2659 if (!ret) { 2660 switch (value) { 2661 case 1: 2662 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 2663 ndev->features |= NETIF_F_RXCSUM; 2664 break; 2665 case 2: 2666 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 2667 ndev->features |= NETIF_F_RXCSUM; 2668 break; 2669 } 2670 } 2671 /* For supporting jumbo frames, the Axi Ethernet hardware must have 2672 * a larger Rx/Tx Memory. Typically, the size must be large so that 2673 * we can enable jumbo option and start supporting jumbo frames. 2674 * Here we check for memory allocated for Rx/Tx in the hardware from 2675 * the device-tree and accordingly set flags. 2676 */ 2677 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 2678 2679 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, 2680 "xlnx,switch-x-sgmii"); 2681 2682 /* Start with the proprietary, and broken phy_type */ 2683 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); 2684 if (!ret) { 2685 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); 2686 switch (value) { 2687 case XAE_PHY_TYPE_MII: 2688 lp->phy_mode = PHY_INTERFACE_MODE_MII; 2689 break; 2690 case XAE_PHY_TYPE_GMII: 2691 lp->phy_mode = PHY_INTERFACE_MODE_GMII; 2692 break; 2693 case XAE_PHY_TYPE_RGMII_2_0: 2694 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; 2695 break; 2696 case XAE_PHY_TYPE_SGMII: 2697 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; 2698 break; 2699 case XAE_PHY_TYPE_1000BASE_X: 2700 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; 2701 break; 2702 default: 2703 ret = -EINVAL; 2704 goto cleanup_clk; 2705 } 2706 } else { 2707 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); 2708 if (ret) 2709 goto cleanup_clk; 2710 } 2711 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && 2712 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { 2713 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); 2714 ret = -EINVAL; 2715 goto cleanup_clk; 2716 } 2717 2718 if (!of_property_present(pdev->dev.of_node, "dmas")) { 2719 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 2720 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 2721 2722 if (np) { 2723 struct resource dmares; 2724 2725 ret = of_address_to_resource(np, 0, &dmares); 2726 if (ret) { 2727 dev_err(&pdev->dev, 2728 "unable to get DMA resource\n"); 2729 of_node_put(np); 2730 goto cleanup_clk; 2731 } 2732 lp->dma_regs = devm_ioremap_resource(&pdev->dev, 2733 &dmares); 2734 lp->rx_irq = irq_of_parse_and_map(np, 1); 2735 lp->tx_irq = irq_of_parse_and_map(np, 0); 2736 of_node_put(np); 2737 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2738 } else { 2739 /* Check for these resources directly on the Ethernet node. */ 2740 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); 2741 lp->rx_irq = platform_get_irq(pdev, 1); 2742 lp->tx_irq = platform_get_irq(pdev, 0); 2743 lp->eth_irq = platform_get_irq_optional(pdev, 2); 2744 } 2745 if (IS_ERR(lp->dma_regs)) { 2746 dev_err(&pdev->dev, "could not map DMA regs\n"); 2747 ret = PTR_ERR(lp->dma_regs); 2748 goto cleanup_clk; 2749 } 2750 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { 2751 dev_err(&pdev->dev, "could not determine irqs\n"); 2752 ret = -ENOMEM; 2753 goto cleanup_clk; 2754 } 2755 2756 /* Reset core now that clocks are enabled, prior to accessing MDIO */ 2757 ret = __axienet_device_reset(lp); 2758 if (ret) 2759 goto cleanup_clk; 2760 2761 /* Autodetect the need for 64-bit DMA pointers. 2762 * When the IP is configured for a bus width bigger than 32 bits, 2763 * writing the MSB registers is mandatory, even if they are all 0. 2764 * We can detect this case by writing all 1's to one such register 2765 * and see if that sticks: when the IP is configured for 32 bits 2766 * only, those registers are RES0. 2767 * Those MSB registers were introduced in IP v7.1, which we check first. 2768 */ 2769 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) { 2770 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; 2771 2772 iowrite32(0x0, desc); 2773 if (ioread32(desc) == 0) { /* sanity check */ 2774 iowrite32(0xffffffff, desc); 2775 if (ioread32(desc) > 0) { 2776 lp->features |= XAE_FEATURE_DMA_64BIT; 2777 addr_width = 64; 2778 dev_info(&pdev->dev, 2779 "autodetected 64-bit DMA range\n"); 2780 } 2781 iowrite32(0x0, desc); 2782 } 2783 } 2784 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { 2785 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); 2786 ret = -EINVAL; 2787 goto cleanup_clk; 2788 } 2789 2790 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); 2791 if (ret) { 2792 dev_err(&pdev->dev, "No suitable DMA available\n"); 2793 goto cleanup_clk; 2794 } 2795 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); 2796 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); 2797 } else { 2798 struct xilinx_vdma_config cfg; 2799 struct dma_chan *tx_chan; 2800 2801 lp->eth_irq = platform_get_irq_optional(pdev, 0); 2802 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { 2803 ret = lp->eth_irq; 2804 goto cleanup_clk; 2805 } 2806 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); 2807 if (IS_ERR(tx_chan)) { 2808 ret = PTR_ERR(tx_chan); 2809 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); 2810 goto cleanup_clk; 2811 } 2812 2813 cfg.reset = 1; 2814 /* As name says VDMA but it has support for DMA channel reset */ 2815 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg); 2816 if (ret < 0) { 2817 dev_err(&pdev->dev, "Reset channel failed\n"); 2818 dma_release_channel(tx_chan); 2819 goto cleanup_clk; 2820 } 2821 2822 dma_release_channel(tx_chan); 2823 lp->use_dmaengine = 1; 2824 } 2825 2826 if (lp->use_dmaengine) 2827 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; 2828 else 2829 ndev->netdev_ops = &axienet_netdev_ops; 2830 /* Check for Ethernet core IRQ (optional) */ 2831 if (lp->eth_irq <= 0) 2832 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); 2833 2834 /* Retrieve the MAC address */ 2835 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); 2836 if (!ret) { 2837 axienet_set_mac_address(ndev, mac_addr); 2838 } else { 2839 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", 2840 ret); 2841 axienet_set_mac_address(ndev, NULL); 2842 } 2843 2844 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 2845 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 2846 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; 2847 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; 2848 2849 ret = axienet_mdio_setup(lp); 2850 if (ret) 2851 dev_warn(&pdev->dev, 2852 "error registering MDIO bus: %d\n", ret); 2853 2854 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || 2855 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { 2856 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); 2857 if (!np) { 2858 /* Deprecated: Always use "pcs-handle" for pcs_phy. 2859 * Falling back to "phy-handle" here is only for 2860 * backward compatibility with old device trees. 2861 */ 2862 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 2863 } 2864 if (!np) { 2865 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); 2866 ret = -EINVAL; 2867 goto cleanup_mdio; 2868 } 2869 lp->pcs_phy = of_mdio_find_device(np); 2870 if (!lp->pcs_phy) { 2871 ret = -EPROBE_DEFER; 2872 of_node_put(np); 2873 goto cleanup_mdio; 2874 } 2875 of_node_put(np); 2876 lp->pcs.ops = &axienet_pcs_ops; 2877 lp->pcs.neg_mode = true; 2878 lp->pcs.poll = true; 2879 } 2880 2881 lp->phylink_config.dev = &ndev->dev; 2882 lp->phylink_config.type = PHYLINK_NETDEV; 2883 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | 2884 MAC_10FD | MAC_100FD | MAC_1000FD; 2885 2886 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); 2887 if (lp->switch_x_sgmii) { 2888 __set_bit(PHY_INTERFACE_MODE_1000BASEX, 2889 lp->phylink_config.supported_interfaces); 2890 __set_bit(PHY_INTERFACE_MODE_SGMII, 2891 lp->phylink_config.supported_interfaces); 2892 } 2893 2894 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, 2895 lp->phy_mode, 2896 &axienet_phylink_ops); 2897 if (IS_ERR(lp->phylink)) { 2898 ret = PTR_ERR(lp->phylink); 2899 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); 2900 goto cleanup_mdio; 2901 } 2902 2903 ret = register_netdev(lp->ndev); 2904 if (ret) { 2905 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 2906 goto cleanup_phylink; 2907 } 2908 2909 return 0; 2910 2911 cleanup_phylink: 2912 phylink_destroy(lp->phylink); 2913 2914 cleanup_mdio: 2915 if (lp->pcs_phy) 2916 put_device(&lp->pcs_phy->dev); 2917 if (lp->mii_bus) 2918 axienet_mdio_teardown(lp); 2919 cleanup_clk: 2920 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2921 clk_disable_unprepare(lp->axi_clk); 2922 2923 free_netdev: 2924 free_netdev(ndev); 2925 2926 return ret; 2927 } 2928 2929 static void axienet_remove(struct platform_device *pdev) 2930 { 2931 struct net_device *ndev = platform_get_drvdata(pdev); 2932 struct axienet_local *lp = netdev_priv(ndev); 2933 2934 unregister_netdev(ndev); 2935 2936 if (lp->phylink) 2937 phylink_destroy(lp->phylink); 2938 2939 if (lp->pcs_phy) 2940 put_device(&lp->pcs_phy->dev); 2941 2942 axienet_mdio_teardown(lp); 2943 2944 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); 2945 clk_disable_unprepare(lp->axi_clk); 2946 2947 free_netdev(ndev); 2948 } 2949 2950 static void axienet_shutdown(struct platform_device *pdev) 2951 { 2952 struct net_device *ndev = platform_get_drvdata(pdev); 2953 2954 rtnl_lock(); 2955 netif_device_detach(ndev); 2956 2957 if (netif_running(ndev)) 2958 dev_close(ndev); 2959 2960 rtnl_unlock(); 2961 } 2962 2963 static int axienet_suspend(struct device *dev) 2964 { 2965 struct net_device *ndev = dev_get_drvdata(dev); 2966 2967 if (!netif_running(ndev)) 2968 return 0; 2969 2970 netif_device_detach(ndev); 2971 2972 rtnl_lock(); 2973 axienet_stop(ndev); 2974 rtnl_unlock(); 2975 2976 return 0; 2977 } 2978 2979 static int axienet_resume(struct device *dev) 2980 { 2981 struct net_device *ndev = dev_get_drvdata(dev); 2982 2983 if (!netif_running(ndev)) 2984 return 0; 2985 2986 rtnl_lock(); 2987 axienet_open(ndev); 2988 rtnl_unlock(); 2989 2990 netif_device_attach(ndev); 2991 2992 return 0; 2993 } 2994 2995 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops, 2996 axienet_suspend, axienet_resume); 2997 2998 static struct platform_driver axienet_driver = { 2999 .probe = axienet_probe, 3000 .remove = axienet_remove, 3001 .shutdown = axienet_shutdown, 3002 .driver = { 3003 .name = "xilinx_axienet", 3004 .pm = &axienet_pm_ops, 3005 .of_match_table = axienet_of_match, 3006 }, 3007 }; 3008 3009 module_platform_driver(axienet_driver); 3010 3011 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 3012 MODULE_AUTHOR("Xilinx"); 3013 MODULE_LICENSE("GPL"); 3014