1 /* 2 * Xilinx Axi Ethernet device driver 3 * 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 * Copyright (c) 2010 - 2011 PetaLogix 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 10 * 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 * and Spartan6. 13 * 14 * TODO: 15 * - Add Axi Fifo support. 16 * - Factor out Axi DMA code into separate driver. 17 * - Test and fix basic multicast filtering. 18 * - Add support for extended multicast filtering. 19 * - Test basic VLAN support. 20 * - Add support for extended VLAN support. 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/etherdevice.h> 25 #include <linux/module.h> 26 #include <linux/netdevice.h> 27 #include <linux/of_mdio.h> 28 #include <linux/of_platform.h> 29 #include <linux/of_irq.h> 30 #include <linux/of_address.h> 31 #include <linux/skbuff.h> 32 #include <linux/spinlock.h> 33 #include <linux/phy.h> 34 #include <linux/mii.h> 35 #include <linux/ethtool.h> 36 37 #include "xilinx_axienet.h" 38 39 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 40 #define TX_BD_NUM 64 41 #define RX_BD_NUM 128 42 43 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 44 #define DRIVER_NAME "xaxienet" 45 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 46 #define DRIVER_VERSION "1.00a" 47 48 #define AXIENET_REGS_N 32 49 50 /* Match table for of_platform binding */ 51 static const struct of_device_id axienet_of_match[] = { 52 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 53 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 54 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 55 {}, 56 }; 57 58 MODULE_DEVICE_TABLE(of, axienet_of_match); 59 60 /* Option table for setting up Axi Ethernet hardware options */ 61 static struct axienet_option axienet_options[] = { 62 /* Turn on jumbo packet support for both Rx and Tx */ 63 { 64 .opt = XAE_OPTION_JUMBO, 65 .reg = XAE_TC_OFFSET, 66 .m_or = XAE_TC_JUM_MASK, 67 }, { 68 .opt = XAE_OPTION_JUMBO, 69 .reg = XAE_RCW1_OFFSET, 70 .m_or = XAE_RCW1_JUM_MASK, 71 }, { /* Turn on VLAN packet support for both Rx and Tx */ 72 .opt = XAE_OPTION_VLAN, 73 .reg = XAE_TC_OFFSET, 74 .m_or = XAE_TC_VLAN_MASK, 75 }, { 76 .opt = XAE_OPTION_VLAN, 77 .reg = XAE_RCW1_OFFSET, 78 .m_or = XAE_RCW1_VLAN_MASK, 79 }, { /* Turn on FCS stripping on receive packets */ 80 .opt = XAE_OPTION_FCS_STRIP, 81 .reg = XAE_RCW1_OFFSET, 82 .m_or = XAE_RCW1_FCS_MASK, 83 }, { /* Turn on FCS insertion on transmit packets */ 84 .opt = XAE_OPTION_FCS_INSERT, 85 .reg = XAE_TC_OFFSET, 86 .m_or = XAE_TC_FCS_MASK, 87 }, { /* Turn off length/type field checking on receive packets */ 88 .opt = XAE_OPTION_LENTYPE_ERR, 89 .reg = XAE_RCW1_OFFSET, 90 .m_or = XAE_RCW1_LT_DIS_MASK, 91 }, { /* Turn on Rx flow control */ 92 .opt = XAE_OPTION_FLOW_CONTROL, 93 .reg = XAE_FCC_OFFSET, 94 .m_or = XAE_FCC_FCRX_MASK, 95 }, { /* Turn on Tx flow control */ 96 .opt = XAE_OPTION_FLOW_CONTROL, 97 .reg = XAE_FCC_OFFSET, 98 .m_or = XAE_FCC_FCTX_MASK, 99 }, { /* Turn on promiscuous frame filtering */ 100 .opt = XAE_OPTION_PROMISC, 101 .reg = XAE_FMI_OFFSET, 102 .m_or = XAE_FMI_PM_MASK, 103 }, { /* Enable transmitter */ 104 .opt = XAE_OPTION_TXEN, 105 .reg = XAE_TC_OFFSET, 106 .m_or = XAE_TC_TX_MASK, 107 }, { /* Enable receiver */ 108 .opt = XAE_OPTION_RXEN, 109 .reg = XAE_RCW1_OFFSET, 110 .m_or = XAE_RCW1_RX_MASK, 111 }, 112 {} 113 }; 114 115 /** 116 * axienet_dma_in32 - Memory mapped Axi DMA register read 117 * @lp: Pointer to axienet local structure 118 * @reg: Address offset from the base address of the Axi DMA core 119 * 120 * Return: The contents of the Axi DMA register 121 * 122 * This function returns the contents of the corresponding Axi DMA register. 123 */ 124 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 125 { 126 return in_be32(lp->dma_regs + reg); 127 } 128 129 /** 130 * axienet_dma_out32 - Memory mapped Axi DMA register write. 131 * @lp: Pointer to axienet local structure 132 * @reg: Address offset from the base address of the Axi DMA core 133 * @value: Value to be written into the Axi DMA register 134 * 135 * This function writes the desired value into the corresponding Axi DMA 136 * register. 137 */ 138 static inline void axienet_dma_out32(struct axienet_local *lp, 139 off_t reg, u32 value) 140 { 141 out_be32((lp->dma_regs + reg), value); 142 } 143 144 /** 145 * axienet_dma_bd_release - Release buffer descriptor rings 146 * @ndev: Pointer to the net_device structure 147 * 148 * This function is used to release the descriptors allocated in 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 150 * driver stop api is called. 151 */ 152 static void axienet_dma_bd_release(struct net_device *ndev) 153 { 154 int i; 155 struct axienet_local *lp = netdev_priv(ndev); 156 157 for (i = 0; i < RX_BD_NUM; i++) { 158 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 159 lp->max_frm_size, DMA_FROM_DEVICE); 160 dev_kfree_skb((struct sk_buff *) 161 (lp->rx_bd_v[i].sw_id_offset)); 162 } 163 164 if (lp->rx_bd_v) { 165 dma_free_coherent(ndev->dev.parent, 166 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 167 lp->rx_bd_v, 168 lp->rx_bd_p); 169 } 170 if (lp->tx_bd_v) { 171 dma_free_coherent(ndev->dev.parent, 172 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 173 lp->tx_bd_v, 174 lp->tx_bd_p); 175 } 176 } 177 178 /** 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 180 * @ndev: Pointer to the net_device structure 181 * 182 * Return: 0, on success -ENOMEM, on failure 183 * 184 * This function is called to initialize the Rx and Tx DMA descriptor 185 * rings. This initializes the descriptors with required default values 186 * and is called when Axi Ethernet driver reset is called. 187 */ 188 static int axienet_dma_bd_init(struct net_device *ndev) 189 { 190 u32 cr; 191 int i; 192 struct sk_buff *skb; 193 struct axienet_local *lp = netdev_priv(ndev); 194 195 /* Reset the indexes which are used for accessing the BDs */ 196 lp->tx_bd_ci = 0; 197 lp->tx_bd_tail = 0; 198 lp->rx_bd_ci = 0; 199 200 /* Allocate the Tx and Rx buffer descriptors. */ 201 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 202 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 203 &lp->tx_bd_p, GFP_KERNEL); 204 if (!lp->tx_bd_v) 205 goto out; 206 207 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 208 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 209 &lp->rx_bd_p, GFP_KERNEL); 210 if (!lp->rx_bd_v) 211 goto out; 212 213 for (i = 0; i < TX_BD_NUM; i++) { 214 lp->tx_bd_v[i].next = lp->tx_bd_p + 215 sizeof(*lp->tx_bd_v) * 216 ((i + 1) % TX_BD_NUM); 217 } 218 219 for (i = 0; i < RX_BD_NUM; i++) { 220 lp->rx_bd_v[i].next = lp->rx_bd_p + 221 sizeof(*lp->rx_bd_v) * 222 ((i + 1) % RX_BD_NUM); 223 224 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 225 if (!skb) 226 goto out; 227 228 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 229 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 230 skb->data, 231 lp->max_frm_size, 232 DMA_FROM_DEVICE); 233 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 234 } 235 236 /* Start updating the Rx channel control register */ 237 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 238 /* Update the interrupt coalesce count */ 239 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 240 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 241 /* Update the delay timer count */ 242 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 243 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 244 /* Enable coalesce, delay timer and error interrupts */ 245 cr |= XAXIDMA_IRQ_ALL_MASK; 246 /* Write to the Rx channel control register */ 247 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 248 249 /* Start updating the Tx channel control register */ 250 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 251 /* Update the interrupt coalesce count */ 252 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 253 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 254 /* Update the delay timer count */ 255 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 256 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 257 /* Enable coalesce, delay timer and error interrupts */ 258 cr |= XAXIDMA_IRQ_ALL_MASK; 259 /* Write to the Tx channel control register */ 260 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 261 262 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 263 * halted state. This will make the Rx side ready for reception. 264 */ 265 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 266 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 267 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 268 cr | XAXIDMA_CR_RUNSTOP_MASK); 269 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 270 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 271 272 /* Write to the RS (Run-stop) bit in the Tx channel control register. 273 * Tx channel is now ready to run. But only after we write to the 274 * tail pointer register that the Tx channel will start transmitting. 275 */ 276 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 277 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 278 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 279 cr | XAXIDMA_CR_RUNSTOP_MASK); 280 281 return 0; 282 out: 283 axienet_dma_bd_release(ndev); 284 return -ENOMEM; 285 } 286 287 /** 288 * axienet_set_mac_address - Write the MAC address 289 * @ndev: Pointer to the net_device structure 290 * @address: 6 byte Address to be written as MAC address 291 * 292 * This function is called to initialize the MAC address of the Axi Ethernet 293 * core. It writes to the UAW0 and UAW1 registers of the core. 294 */ 295 static void axienet_set_mac_address(struct net_device *ndev, void *address) 296 { 297 struct axienet_local *lp = netdev_priv(ndev); 298 299 if (address) 300 memcpy(ndev->dev_addr, address, ETH_ALEN); 301 if (!is_valid_ether_addr(ndev->dev_addr)) 302 eth_random_addr(ndev->dev_addr); 303 304 /* Set up unicast MAC address filter set its mac address */ 305 axienet_iow(lp, XAE_UAW0_OFFSET, 306 (ndev->dev_addr[0]) | 307 (ndev->dev_addr[1] << 8) | 308 (ndev->dev_addr[2] << 16) | 309 (ndev->dev_addr[3] << 24)); 310 axienet_iow(lp, XAE_UAW1_OFFSET, 311 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 312 ~XAE_UAW1_UNICASTADDR_MASK) | 313 (ndev->dev_addr[4] | 314 (ndev->dev_addr[5] << 8)))); 315 } 316 317 /** 318 * netdev_set_mac_address - Write the MAC address (from outside the driver) 319 * @ndev: Pointer to the net_device structure 320 * @p: 6 byte Address to be written as MAC address 321 * 322 * Return: 0 for all conditions. Presently, there is no failure case. 323 * 324 * This function is called to initialize the MAC address of the Axi Ethernet 325 * core. It calls the core specific axienet_set_mac_address. This is the 326 * function that goes into net_device_ops structure entry ndo_set_mac_address. 327 */ 328 static int netdev_set_mac_address(struct net_device *ndev, void *p) 329 { 330 struct sockaddr *addr = p; 331 axienet_set_mac_address(ndev, addr->sa_data); 332 return 0; 333 } 334 335 /** 336 * axienet_set_multicast_list - Prepare the multicast table 337 * @ndev: Pointer to the net_device structure 338 * 339 * This function is called to initialize the multicast table during 340 * initialization. The Axi Ethernet basic multicast support has a four-entry 341 * multicast table which is initialized here. Additionally this function 342 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 343 * means whenever the multicast table entries need to be updated this 344 * function gets called. 345 */ 346 static void axienet_set_multicast_list(struct net_device *ndev) 347 { 348 int i; 349 u32 reg, af0reg, af1reg; 350 struct axienet_local *lp = netdev_priv(ndev); 351 352 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 353 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 354 /* We must make the kernel realize we had to move into 355 * promiscuous mode. If it was a promiscuous mode request 356 * the flag is already set. If not we set it. 357 */ 358 ndev->flags |= IFF_PROMISC; 359 reg = axienet_ior(lp, XAE_FMI_OFFSET); 360 reg |= XAE_FMI_PM_MASK; 361 axienet_iow(lp, XAE_FMI_OFFSET, reg); 362 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 363 } else if (!netdev_mc_empty(ndev)) { 364 struct netdev_hw_addr *ha; 365 366 i = 0; 367 netdev_for_each_mc_addr(ha, ndev) { 368 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 369 break; 370 371 af0reg = (ha->addr[0]); 372 af0reg |= (ha->addr[1] << 8); 373 af0reg |= (ha->addr[2] << 16); 374 af0reg |= (ha->addr[3] << 24); 375 376 af1reg = (ha->addr[4]); 377 af1reg |= (ha->addr[5] << 8); 378 379 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 380 reg |= i; 381 382 axienet_iow(lp, XAE_FMI_OFFSET, reg); 383 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 384 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 385 i++; 386 } 387 } else { 388 reg = axienet_ior(lp, XAE_FMI_OFFSET); 389 reg &= ~XAE_FMI_PM_MASK; 390 391 axienet_iow(lp, XAE_FMI_OFFSET, reg); 392 393 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 394 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 395 reg |= i; 396 397 axienet_iow(lp, XAE_FMI_OFFSET, reg); 398 axienet_iow(lp, XAE_AF0_OFFSET, 0); 399 axienet_iow(lp, XAE_AF1_OFFSET, 0); 400 } 401 402 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 403 } 404 } 405 406 /** 407 * axienet_setoptions - Set an Axi Ethernet option 408 * @ndev: Pointer to the net_device structure 409 * @options: Option to be enabled/disabled 410 * 411 * The Axi Ethernet core has multiple features which can be selectively turned 412 * on or off. The typical options could be jumbo frame option, basic VLAN 413 * option, promiscuous mode option etc. This function is used to set or clear 414 * these options in the Axi Ethernet hardware. This is done through 415 * axienet_option structure . 416 */ 417 static void axienet_setoptions(struct net_device *ndev, u32 options) 418 { 419 int reg; 420 struct axienet_local *lp = netdev_priv(ndev); 421 struct axienet_option *tp = &axienet_options[0]; 422 423 while (tp->opt) { 424 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 425 if (options & tp->opt) 426 reg |= tp->m_or; 427 axienet_iow(lp, tp->reg, reg); 428 tp++; 429 } 430 431 lp->options |= options; 432 } 433 434 static void __axienet_device_reset(struct axienet_local *lp, 435 struct device *dev, off_t offset) 436 { 437 u32 timeout; 438 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 439 * process of Axi DMA takes a while to complete as all pending 440 * commands/transfers will be flushed or completed during this 441 * reset process. 442 */ 443 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 444 timeout = DELAY_OF_ONE_MILLISEC; 445 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 446 udelay(1); 447 if (--timeout == 0) { 448 netdev_err(lp->ndev, "%s: DMA reset timeout!\n", 449 __func__); 450 break; 451 } 452 } 453 } 454 455 /** 456 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 457 * @ndev: Pointer to the net_device structure 458 * 459 * This function is called to reset and initialize the Axi Ethernet core. This 460 * is typically called during initialization. It does a reset of the Axi DMA 461 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 462 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 463 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 464 * core. 465 */ 466 static void axienet_device_reset(struct net_device *ndev) 467 { 468 u32 axienet_status; 469 struct axienet_local *lp = netdev_priv(ndev); 470 471 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 472 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 473 474 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 475 lp->options |= XAE_OPTION_VLAN; 476 lp->options &= (~XAE_OPTION_JUMBO); 477 478 if ((ndev->mtu > XAE_MTU) && 479 (ndev->mtu <= XAE_JUMBO_MTU)) { 480 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + 481 XAE_TRL_SIZE; 482 483 if (lp->max_frm_size <= lp->rxmem) 484 lp->options |= XAE_OPTION_JUMBO; 485 } 486 487 if (axienet_dma_bd_init(ndev)) { 488 netdev_err(ndev, "%s: descriptor allocation failed\n", 489 __func__); 490 } 491 492 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 493 axienet_status &= ~XAE_RCW1_RX_MASK; 494 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 495 496 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 497 if (axienet_status & XAE_INT_RXRJECT_MASK) 498 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 499 500 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 501 502 /* Sync default options with HW but leave receiver and 503 * transmitter disabled. 504 */ 505 axienet_setoptions(ndev, lp->options & 506 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 507 axienet_set_mac_address(ndev, NULL); 508 axienet_set_multicast_list(ndev); 509 axienet_setoptions(ndev, lp->options); 510 511 ndev->trans_start = jiffies; 512 } 513 514 /** 515 * axienet_adjust_link - Adjust the PHY link speed/duplex. 516 * @ndev: Pointer to the net_device structure 517 * 518 * This function is called to change the speed and duplex setting after 519 * auto negotiation is done by the PHY. This is the function that gets 520 * registered with the PHY interface through the "of_phy_connect" call. 521 */ 522 static void axienet_adjust_link(struct net_device *ndev) 523 { 524 u32 emmc_reg; 525 u32 link_state; 526 u32 setspeed = 1; 527 struct axienet_local *lp = netdev_priv(ndev); 528 struct phy_device *phy = lp->phy_dev; 529 530 link_state = phy->speed | (phy->duplex << 1) | phy->link; 531 if (lp->last_link != link_state) { 532 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 533 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) 534 setspeed = 0; 535 } else { 536 if ((phy->speed == SPEED_1000) && 537 (lp->phy_type == XAE_PHY_TYPE_MII)) 538 setspeed = 0; 539 } 540 541 if (setspeed == 1) { 542 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 543 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 544 545 switch (phy->speed) { 546 case SPEED_1000: 547 emmc_reg |= XAE_EMMC_LINKSPD_1000; 548 break; 549 case SPEED_100: 550 emmc_reg |= XAE_EMMC_LINKSPD_100; 551 break; 552 case SPEED_10: 553 emmc_reg |= XAE_EMMC_LINKSPD_10; 554 break; 555 default: 556 dev_err(&ndev->dev, "Speed other than 10, 100 " 557 "or 1Gbps is not supported\n"); 558 break; 559 } 560 561 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 562 lp->last_link = link_state; 563 phy_print_status(phy); 564 } else { 565 netdev_err(ndev, 566 "Error setting Axi Ethernet mac speed\n"); 567 } 568 } 569 } 570 571 /** 572 * axienet_start_xmit_done - Invoked once a transmit is completed by the 573 * Axi DMA Tx channel. 574 * @ndev: Pointer to the net_device structure 575 * 576 * This function is invoked from the Axi DMA Tx isr to notify the completion 577 * of transmit operation. It clears fields in the corresponding Tx BDs and 578 * unmaps the corresponding buffer so that CPU can regain ownership of the 579 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 580 * required. 581 */ 582 static void axienet_start_xmit_done(struct net_device *ndev) 583 { 584 u32 size = 0; 585 u32 packets = 0; 586 struct axienet_local *lp = netdev_priv(ndev); 587 struct axidma_bd *cur_p; 588 unsigned int status = 0; 589 590 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 591 status = cur_p->status; 592 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 593 dma_unmap_single(ndev->dev.parent, cur_p->phys, 594 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 595 DMA_TO_DEVICE); 596 if (cur_p->app4) 597 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 598 /*cur_p->phys = 0;*/ 599 cur_p->app0 = 0; 600 cur_p->app1 = 0; 601 cur_p->app2 = 0; 602 cur_p->app4 = 0; 603 cur_p->status = 0; 604 605 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 606 packets++; 607 608 ++lp->tx_bd_ci; 609 lp->tx_bd_ci %= TX_BD_NUM; 610 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 611 status = cur_p->status; 612 } 613 614 ndev->stats.tx_packets += packets; 615 ndev->stats.tx_bytes += size; 616 netif_wake_queue(ndev); 617 } 618 619 /** 620 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 621 * @lp: Pointer to the axienet_local structure 622 * @num_frag: The number of BDs to check for 623 * 624 * Return: 0, on success 625 * NETDEV_TX_BUSY, if any of the descriptors are not free 626 * 627 * This function is invoked before BDs are allocated and transmission starts. 628 * This function returns 0 if a BD or group of BDs can be allocated for 629 * transmission. If the BD or any of the BDs are not free the function 630 * returns a busy status. This is invoked from axienet_start_xmit. 631 */ 632 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 633 int num_frag) 634 { 635 struct axidma_bd *cur_p; 636 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 637 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 638 return NETDEV_TX_BUSY; 639 return 0; 640 } 641 642 /** 643 * axienet_start_xmit - Starts the transmission. 644 * @skb: sk_buff pointer that contains data to be Txed. 645 * @ndev: Pointer to net_device structure. 646 * 647 * Return: NETDEV_TX_OK, on success 648 * NETDEV_TX_BUSY, if any of the descriptors are not free 649 * 650 * This function is invoked from upper layers to initiate transmission. The 651 * function uses the next available free BDs and populates their fields to 652 * start the transmission. Additionally if checksum offloading is supported, 653 * it populates AXI Stream Control fields with appropriate values. 654 */ 655 static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 656 { 657 u32 ii; 658 u32 num_frag; 659 u32 csum_start_off; 660 u32 csum_index_off; 661 skb_frag_t *frag; 662 dma_addr_t tail_p; 663 struct axienet_local *lp = netdev_priv(ndev); 664 struct axidma_bd *cur_p; 665 666 num_frag = skb_shinfo(skb)->nr_frags; 667 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 668 669 if (axienet_check_tx_bd_space(lp, num_frag)) { 670 if (!netif_queue_stopped(ndev)) 671 netif_stop_queue(ndev); 672 return NETDEV_TX_BUSY; 673 } 674 675 if (skb->ip_summed == CHECKSUM_PARTIAL) { 676 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 677 /* Tx Full Checksum Offload Enabled */ 678 cur_p->app0 |= 2; 679 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 680 csum_start_off = skb_transport_offset(skb); 681 csum_index_off = csum_start_off + skb->csum_offset; 682 /* Tx Partial Checksum Offload Enabled */ 683 cur_p->app0 |= 1; 684 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 685 } 686 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 687 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 688 } 689 690 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 691 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 692 skb_headlen(skb), DMA_TO_DEVICE); 693 694 for (ii = 0; ii < num_frag; ii++) { 695 ++lp->tx_bd_tail; 696 lp->tx_bd_tail %= TX_BD_NUM; 697 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 698 frag = &skb_shinfo(skb)->frags[ii]; 699 cur_p->phys = dma_map_single(ndev->dev.parent, 700 skb_frag_address(frag), 701 skb_frag_size(frag), 702 DMA_TO_DEVICE); 703 cur_p->cntrl = skb_frag_size(frag); 704 } 705 706 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 707 cur_p->app4 = (unsigned long)skb; 708 709 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 710 /* Start the transfer */ 711 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 712 ++lp->tx_bd_tail; 713 lp->tx_bd_tail %= TX_BD_NUM; 714 715 return NETDEV_TX_OK; 716 } 717 718 /** 719 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 720 * BD processing. 721 * @ndev: Pointer to net_device structure. 722 * 723 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 724 * does minimal processing and invokes "netif_rx" to complete further 725 * processing. 726 */ 727 static void axienet_recv(struct net_device *ndev) 728 { 729 u32 length; 730 u32 csumstatus; 731 u32 size = 0; 732 u32 packets = 0; 733 dma_addr_t tail_p = 0; 734 struct axienet_local *lp = netdev_priv(ndev); 735 struct sk_buff *skb, *new_skb; 736 struct axidma_bd *cur_p; 737 738 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 739 740 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 741 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 742 skb = (struct sk_buff *) (cur_p->sw_id_offset); 743 length = cur_p->app4 & 0x0000FFFF; 744 745 dma_unmap_single(ndev->dev.parent, cur_p->phys, 746 lp->max_frm_size, 747 DMA_FROM_DEVICE); 748 749 skb_put(skb, length); 750 skb->protocol = eth_type_trans(skb, ndev); 751 /*skb_checksum_none_assert(skb);*/ 752 skb->ip_summed = CHECKSUM_NONE; 753 754 /* if we're doing Rx csum offload, set it up */ 755 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 756 csumstatus = (cur_p->app2 & 757 XAE_FULL_CSUM_STATUS_MASK) >> 3; 758 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 759 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 760 skb->ip_summed = CHECKSUM_UNNECESSARY; 761 } 762 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 763 skb->protocol == htons(ETH_P_IP) && 764 skb->len > 64) { 765 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 766 skb->ip_summed = CHECKSUM_COMPLETE; 767 } 768 769 netif_rx(skb); 770 771 size += length; 772 packets++; 773 774 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 775 if (!new_skb) 776 return; 777 778 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 779 lp->max_frm_size, 780 DMA_FROM_DEVICE); 781 cur_p->cntrl = lp->max_frm_size; 782 cur_p->status = 0; 783 cur_p->sw_id_offset = (u32) new_skb; 784 785 ++lp->rx_bd_ci; 786 lp->rx_bd_ci %= RX_BD_NUM; 787 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 788 } 789 790 ndev->stats.rx_packets += packets; 791 ndev->stats.rx_bytes += size; 792 793 if (tail_p) 794 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 795 } 796 797 /** 798 * axienet_tx_irq - Tx Done Isr. 799 * @irq: irq number 800 * @_ndev: net_device pointer 801 * 802 * Return: IRQ_HANDLED for all cases. 803 * 804 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 805 * to complete the BD processing. 806 */ 807 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 808 { 809 u32 cr; 810 unsigned int status; 811 struct net_device *ndev = _ndev; 812 struct axienet_local *lp = netdev_priv(ndev); 813 814 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 815 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 816 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 817 axienet_start_xmit_done(lp->ndev); 818 goto out; 819 } 820 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 821 dev_err(&ndev->dev, "No interrupts asserted in Tx path"); 822 if (status & XAXIDMA_IRQ_ERROR_MASK) { 823 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 824 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 825 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 826 827 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 828 /* Disable coalesce, delay timer and error interrupts */ 829 cr &= (~XAXIDMA_IRQ_ALL_MASK); 830 /* Write to the Tx channel control register */ 831 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 832 833 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 834 /* Disable coalesce, delay timer and error interrupts */ 835 cr &= (~XAXIDMA_IRQ_ALL_MASK); 836 /* Write to the Rx channel control register */ 837 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 838 839 tasklet_schedule(&lp->dma_err_tasklet); 840 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 841 } 842 out: 843 return IRQ_HANDLED; 844 } 845 846 /** 847 * axienet_rx_irq - Rx Isr. 848 * @irq: irq number 849 * @_ndev: net_device pointer 850 * 851 * Return: IRQ_HANDLED for all cases. 852 * 853 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 854 * processing. 855 */ 856 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 857 { 858 u32 cr; 859 unsigned int status; 860 struct net_device *ndev = _ndev; 861 struct axienet_local *lp = netdev_priv(ndev); 862 863 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 864 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 865 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 866 axienet_recv(lp->ndev); 867 goto out; 868 } 869 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 870 dev_err(&ndev->dev, "No interrupts asserted in Rx path"); 871 if (status & XAXIDMA_IRQ_ERROR_MASK) { 872 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 873 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 874 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 875 876 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 877 /* Disable coalesce, delay timer and error interrupts */ 878 cr &= (~XAXIDMA_IRQ_ALL_MASK); 879 /* Finally write to the Tx channel control register */ 880 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 881 882 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 883 /* Disable coalesce, delay timer and error interrupts */ 884 cr &= (~XAXIDMA_IRQ_ALL_MASK); 885 /* write to the Rx channel control register */ 886 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 887 888 tasklet_schedule(&lp->dma_err_tasklet); 889 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 890 } 891 out: 892 return IRQ_HANDLED; 893 } 894 895 static void axienet_dma_err_handler(unsigned long data); 896 897 /** 898 * axienet_open - Driver open routine. 899 * @ndev: Pointer to net_device structure 900 * 901 * Return: 0, on success. 902 * -ENODEV, if PHY cannot be connected to 903 * non-zero error value on failure 904 * 905 * This is the driver open routine. It calls phy_start to start the PHY device. 906 * It also allocates interrupt service routines, enables the interrupt lines 907 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 908 * descriptors are initialized. 909 */ 910 static int axienet_open(struct net_device *ndev) 911 { 912 int ret, mdio_mcreg; 913 struct axienet_local *lp = netdev_priv(ndev); 914 915 dev_dbg(&ndev->dev, "axienet_open()\n"); 916 917 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 918 ret = axienet_mdio_wait_until_ready(lp); 919 if (ret < 0) 920 return ret; 921 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 922 * When we do an Axi Ethernet reset, it resets the complete core 923 * including the MDIO. If MDIO is not disabled when the reset 924 * process is started, MDIO will be broken afterwards. 925 */ 926 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 927 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 928 axienet_device_reset(ndev); 929 /* Enable the MDIO */ 930 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 931 ret = axienet_mdio_wait_until_ready(lp); 932 if (ret < 0) 933 return ret; 934 935 if (lp->phy_node) { 936 if (lp->phy_type == XAE_PHY_TYPE_GMII) { 937 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 938 axienet_adjust_link, 0, 939 PHY_INTERFACE_MODE_GMII); 940 } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) { 941 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 942 axienet_adjust_link, 0, 943 PHY_INTERFACE_MODE_RGMII_ID); 944 } 945 946 if (!lp->phy_dev) 947 dev_err(lp->dev, "of_phy_connect() failed\n"); 948 else 949 phy_start(lp->phy_dev); 950 } 951 952 /* Enable tasklets for Axi DMA error handling */ 953 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 954 (unsigned long) lp); 955 956 /* Enable interrupts for Axi DMA Tx */ 957 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 958 if (ret) 959 goto err_tx_irq; 960 /* Enable interrupts for Axi DMA Rx */ 961 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 962 if (ret) 963 goto err_rx_irq; 964 965 return 0; 966 967 err_rx_irq: 968 free_irq(lp->tx_irq, ndev); 969 err_tx_irq: 970 if (lp->phy_dev) 971 phy_disconnect(lp->phy_dev); 972 lp->phy_dev = NULL; 973 tasklet_kill(&lp->dma_err_tasklet); 974 dev_err(lp->dev, "request_irq() failed\n"); 975 return ret; 976 } 977 978 /** 979 * axienet_stop - Driver stop routine. 980 * @ndev: Pointer to net_device structure 981 * 982 * Return: 0, on success. 983 * 984 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 985 * device. It also removes the interrupt handlers and disables the interrupts. 986 * The Axi DMA Tx/Rx BDs are released. 987 */ 988 static int axienet_stop(struct net_device *ndev) 989 { 990 u32 cr; 991 struct axienet_local *lp = netdev_priv(ndev); 992 993 dev_dbg(&ndev->dev, "axienet_close()\n"); 994 995 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 996 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 997 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 998 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 999 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1000 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 1001 axienet_setoptions(ndev, lp->options & 1002 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1003 1004 tasklet_kill(&lp->dma_err_tasklet); 1005 1006 free_irq(lp->tx_irq, ndev); 1007 free_irq(lp->rx_irq, ndev); 1008 1009 if (lp->phy_dev) 1010 phy_disconnect(lp->phy_dev); 1011 lp->phy_dev = NULL; 1012 1013 axienet_dma_bd_release(ndev); 1014 return 0; 1015 } 1016 1017 /** 1018 * axienet_change_mtu - Driver change mtu routine. 1019 * @ndev: Pointer to net_device structure 1020 * @new_mtu: New mtu value to be applied 1021 * 1022 * Return: Always returns 0 (success). 1023 * 1024 * This is the change mtu driver routine. It checks if the Axi Ethernet 1025 * hardware supports jumbo frames before changing the mtu. This can be 1026 * called only when the device is not up. 1027 */ 1028 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1029 { 1030 struct axienet_local *lp = netdev_priv(ndev); 1031 1032 if (netif_running(ndev)) 1033 return -EBUSY; 1034 1035 if ((new_mtu + VLAN_ETH_HLEN + 1036 XAE_TRL_SIZE) > lp->rxmem) 1037 return -EINVAL; 1038 1039 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) 1040 return -EINVAL; 1041 1042 ndev->mtu = new_mtu; 1043 1044 return 0; 1045 } 1046 1047 #ifdef CONFIG_NET_POLL_CONTROLLER 1048 /** 1049 * axienet_poll_controller - Axi Ethernet poll mechanism. 1050 * @ndev: Pointer to net_device structure 1051 * 1052 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1053 * to polling the ISRs and are enabled back after the polling is done. 1054 */ 1055 static void axienet_poll_controller(struct net_device *ndev) 1056 { 1057 struct axienet_local *lp = netdev_priv(ndev); 1058 disable_irq(lp->tx_irq); 1059 disable_irq(lp->rx_irq); 1060 axienet_rx_irq(lp->tx_irq, ndev); 1061 axienet_tx_irq(lp->rx_irq, ndev); 1062 enable_irq(lp->tx_irq); 1063 enable_irq(lp->rx_irq); 1064 } 1065 #endif 1066 1067 static const struct net_device_ops axienet_netdev_ops = { 1068 .ndo_open = axienet_open, 1069 .ndo_stop = axienet_stop, 1070 .ndo_start_xmit = axienet_start_xmit, 1071 .ndo_change_mtu = axienet_change_mtu, 1072 .ndo_set_mac_address = netdev_set_mac_address, 1073 .ndo_validate_addr = eth_validate_addr, 1074 .ndo_set_rx_mode = axienet_set_multicast_list, 1075 #ifdef CONFIG_NET_POLL_CONTROLLER 1076 .ndo_poll_controller = axienet_poll_controller, 1077 #endif 1078 }; 1079 1080 /** 1081 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. 1082 * @ndev: Pointer to net_device structure 1083 * @ecmd: Pointer to ethtool_cmd structure 1084 * 1085 * This implements ethtool command for getting PHY settings. If PHY could 1086 * not be found, the function returns -ENODEV. This function calls the 1087 * relevant PHY ethtool API to get the PHY settings. 1088 * Issue "ethtool ethX" under linux prompt to execute this function. 1089 * 1090 * Return: 0 on success, -ENODEV if PHY doesn't exist 1091 */ 1092 static int axienet_ethtools_get_settings(struct net_device *ndev, 1093 struct ethtool_cmd *ecmd) 1094 { 1095 struct axienet_local *lp = netdev_priv(ndev); 1096 struct phy_device *phydev = lp->phy_dev; 1097 if (!phydev) 1098 return -ENODEV; 1099 return phy_ethtool_gset(phydev, ecmd); 1100 } 1101 1102 /** 1103 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. 1104 * @ndev: Pointer to net_device structure 1105 * @ecmd: Pointer to ethtool_cmd structure 1106 * 1107 * This implements ethtool command for setting various PHY settings. If PHY 1108 * could not be found, the function returns -ENODEV. This function calls the 1109 * relevant PHY ethtool API to set the PHY. 1110 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 1111 * function. 1112 * 1113 * Return: 0 on success, -ENODEV if PHY doesn't exist 1114 */ 1115 static int axienet_ethtools_set_settings(struct net_device *ndev, 1116 struct ethtool_cmd *ecmd) 1117 { 1118 struct axienet_local *lp = netdev_priv(ndev); 1119 struct phy_device *phydev = lp->phy_dev; 1120 if (!phydev) 1121 return -ENODEV; 1122 return phy_ethtool_sset(phydev, ecmd); 1123 } 1124 1125 /** 1126 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1127 * @ndev: Pointer to net_device structure 1128 * @ed: Pointer to ethtool_drvinfo structure 1129 * 1130 * This implements ethtool command for getting the driver information. 1131 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1132 */ 1133 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1134 struct ethtool_drvinfo *ed) 1135 { 1136 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1137 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1138 ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; 1139 } 1140 1141 /** 1142 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1143 * AxiEthernet core. 1144 * @ndev: Pointer to net_device structure 1145 * 1146 * This implements ethtool command for getting the total register length 1147 * information. 1148 * 1149 * Return: the total regs length 1150 */ 1151 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1152 { 1153 return sizeof(u32) * AXIENET_REGS_N; 1154 } 1155 1156 /** 1157 * axienet_ethtools_get_regs - Dump the contents of all registers present 1158 * in AxiEthernet core. 1159 * @ndev: Pointer to net_device structure 1160 * @regs: Pointer to ethtool_regs structure 1161 * @ret: Void pointer used to return the contents of the registers. 1162 * 1163 * This implements ethtool command for getting the Axi Ethernet register dump. 1164 * Issue "ethtool -d ethX" to execute this function. 1165 */ 1166 static void axienet_ethtools_get_regs(struct net_device *ndev, 1167 struct ethtool_regs *regs, void *ret) 1168 { 1169 u32 *data = (u32 *) ret; 1170 size_t len = sizeof(u32) * AXIENET_REGS_N; 1171 struct axienet_local *lp = netdev_priv(ndev); 1172 1173 regs->version = 0; 1174 regs->len = len; 1175 1176 memset(data, 0, len); 1177 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1178 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1179 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1180 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1181 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1182 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1183 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1184 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1185 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1186 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1187 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1188 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1189 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1190 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1191 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1192 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1193 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1194 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1195 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1196 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1197 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1198 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1199 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1200 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1201 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1202 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1203 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1204 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1205 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1206 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1207 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1208 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1209 } 1210 1211 /** 1212 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1213 * Tx and Rx paths. 1214 * @ndev: Pointer to net_device structure 1215 * @epauseparm: Pointer to ethtool_pauseparam structure. 1216 * 1217 * This implements ethtool command for getting axi ethernet pause frame 1218 * setting. Issue "ethtool -a ethX" to execute this function. 1219 */ 1220 static void 1221 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1222 struct ethtool_pauseparam *epauseparm) 1223 { 1224 u32 regval; 1225 struct axienet_local *lp = netdev_priv(ndev); 1226 epauseparm->autoneg = 0; 1227 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1228 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1229 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1230 } 1231 1232 /** 1233 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1234 * settings. 1235 * @ndev: Pointer to net_device structure 1236 * @epauseparm:Pointer to ethtool_pauseparam structure 1237 * 1238 * This implements ethtool command for enabling flow control on Rx and Tx 1239 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1240 * function. 1241 * 1242 * Return: 0 on success, -EFAULT if device is running 1243 */ 1244 static int 1245 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1246 struct ethtool_pauseparam *epauseparm) 1247 { 1248 u32 regval = 0; 1249 struct axienet_local *lp = netdev_priv(ndev); 1250 1251 if (netif_running(ndev)) { 1252 netdev_err(ndev, 1253 "Please stop netif before applying configuration\n"); 1254 return -EFAULT; 1255 } 1256 1257 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1258 if (epauseparm->tx_pause) 1259 regval |= XAE_FCC_FCTX_MASK; 1260 else 1261 regval &= ~XAE_FCC_FCTX_MASK; 1262 if (epauseparm->rx_pause) 1263 regval |= XAE_FCC_FCRX_MASK; 1264 else 1265 regval &= ~XAE_FCC_FCRX_MASK; 1266 axienet_iow(lp, XAE_FCC_OFFSET, regval); 1267 1268 return 0; 1269 } 1270 1271 /** 1272 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1273 * @ndev: Pointer to net_device structure 1274 * @ecoalesce: Pointer to ethtool_coalesce structure 1275 * 1276 * This implements ethtool command for getting the DMA interrupt coalescing 1277 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1278 * execute this function. 1279 * 1280 * Return: 0 always 1281 */ 1282 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1283 struct ethtool_coalesce *ecoalesce) 1284 { 1285 u32 regval = 0; 1286 struct axienet_local *lp = netdev_priv(ndev); 1287 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1288 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1289 >> XAXIDMA_COALESCE_SHIFT; 1290 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1291 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1292 >> XAXIDMA_COALESCE_SHIFT; 1293 return 0; 1294 } 1295 1296 /** 1297 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1298 * @ndev: Pointer to net_device structure 1299 * @ecoalesce: Pointer to ethtool_coalesce structure 1300 * 1301 * This implements ethtool command for setting the DMA interrupt coalescing 1302 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1303 * prompt to execute this function. 1304 * 1305 * Return: 0, on success, Non-zero error value on failure. 1306 */ 1307 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1308 struct ethtool_coalesce *ecoalesce) 1309 { 1310 struct axienet_local *lp = netdev_priv(ndev); 1311 1312 if (netif_running(ndev)) { 1313 netdev_err(ndev, 1314 "Please stop netif before applying configuration\n"); 1315 return -EFAULT; 1316 } 1317 1318 if ((ecoalesce->rx_coalesce_usecs) || 1319 (ecoalesce->rx_coalesce_usecs_irq) || 1320 (ecoalesce->rx_max_coalesced_frames_irq) || 1321 (ecoalesce->tx_coalesce_usecs) || 1322 (ecoalesce->tx_coalesce_usecs_irq) || 1323 (ecoalesce->tx_max_coalesced_frames_irq) || 1324 (ecoalesce->stats_block_coalesce_usecs) || 1325 (ecoalesce->use_adaptive_rx_coalesce) || 1326 (ecoalesce->use_adaptive_tx_coalesce) || 1327 (ecoalesce->pkt_rate_low) || 1328 (ecoalesce->rx_coalesce_usecs_low) || 1329 (ecoalesce->rx_max_coalesced_frames_low) || 1330 (ecoalesce->tx_coalesce_usecs_low) || 1331 (ecoalesce->tx_max_coalesced_frames_low) || 1332 (ecoalesce->pkt_rate_high) || 1333 (ecoalesce->rx_coalesce_usecs_high) || 1334 (ecoalesce->rx_max_coalesced_frames_high) || 1335 (ecoalesce->tx_coalesce_usecs_high) || 1336 (ecoalesce->tx_max_coalesced_frames_high) || 1337 (ecoalesce->rate_sample_interval)) 1338 return -EOPNOTSUPP; 1339 if (ecoalesce->rx_max_coalesced_frames) 1340 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1341 if (ecoalesce->tx_max_coalesced_frames) 1342 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1343 1344 return 0; 1345 } 1346 1347 static struct ethtool_ops axienet_ethtool_ops = { 1348 .get_settings = axienet_ethtools_get_settings, 1349 .set_settings = axienet_ethtools_set_settings, 1350 .get_drvinfo = axienet_ethtools_get_drvinfo, 1351 .get_regs_len = axienet_ethtools_get_regs_len, 1352 .get_regs = axienet_ethtools_get_regs, 1353 .get_link = ethtool_op_get_link, 1354 .get_pauseparam = axienet_ethtools_get_pauseparam, 1355 .set_pauseparam = axienet_ethtools_set_pauseparam, 1356 .get_coalesce = axienet_ethtools_get_coalesce, 1357 .set_coalesce = axienet_ethtools_set_coalesce, 1358 }; 1359 1360 /** 1361 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1362 * @data: Data passed 1363 * 1364 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1365 * Tx/Rx BDs. 1366 */ 1367 static void axienet_dma_err_handler(unsigned long data) 1368 { 1369 u32 axienet_status; 1370 u32 cr, i; 1371 int mdio_mcreg; 1372 struct axienet_local *lp = (struct axienet_local *) data; 1373 struct net_device *ndev = lp->ndev; 1374 struct axidma_bd *cur_p; 1375 1376 axienet_setoptions(ndev, lp->options & 1377 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1378 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1379 axienet_mdio_wait_until_ready(lp); 1380 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1381 * When we do an Axi Ethernet reset, it resets the complete core 1382 * including the MDIO. So if MDIO is not disabled when the reset 1383 * process is started, MDIO will be broken afterwards. 1384 */ 1385 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1386 ~XAE_MDIO_MC_MDIOEN_MASK)); 1387 1388 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 1389 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 1390 1391 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1392 axienet_mdio_wait_until_ready(lp); 1393 1394 for (i = 0; i < TX_BD_NUM; i++) { 1395 cur_p = &lp->tx_bd_v[i]; 1396 if (cur_p->phys) 1397 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1398 (cur_p->cntrl & 1399 XAXIDMA_BD_CTRL_LENGTH_MASK), 1400 DMA_TO_DEVICE); 1401 if (cur_p->app4) 1402 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1403 cur_p->phys = 0; 1404 cur_p->cntrl = 0; 1405 cur_p->status = 0; 1406 cur_p->app0 = 0; 1407 cur_p->app1 = 0; 1408 cur_p->app2 = 0; 1409 cur_p->app3 = 0; 1410 cur_p->app4 = 0; 1411 cur_p->sw_id_offset = 0; 1412 } 1413 1414 for (i = 0; i < RX_BD_NUM; i++) { 1415 cur_p = &lp->rx_bd_v[i]; 1416 cur_p->status = 0; 1417 cur_p->app0 = 0; 1418 cur_p->app1 = 0; 1419 cur_p->app2 = 0; 1420 cur_p->app3 = 0; 1421 cur_p->app4 = 0; 1422 } 1423 1424 lp->tx_bd_ci = 0; 1425 lp->tx_bd_tail = 0; 1426 lp->rx_bd_ci = 0; 1427 1428 /* Start updating the Rx channel control register */ 1429 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1430 /* Update the interrupt coalesce count */ 1431 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1432 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1433 /* Update the delay timer count */ 1434 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1435 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1436 /* Enable coalesce, delay timer and error interrupts */ 1437 cr |= XAXIDMA_IRQ_ALL_MASK; 1438 /* Finally write to the Rx channel control register */ 1439 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1440 1441 /* Start updating the Tx channel control register */ 1442 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1443 /* Update the interrupt coalesce count */ 1444 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1445 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1446 /* Update the delay timer count */ 1447 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1448 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1449 /* Enable coalesce, delay timer and error interrupts */ 1450 cr |= XAXIDMA_IRQ_ALL_MASK; 1451 /* Finally write to the Tx channel control register */ 1452 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1453 1454 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1455 * halted state. This will make the Rx side ready for reception. 1456 */ 1457 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1458 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1459 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1460 cr | XAXIDMA_CR_RUNSTOP_MASK); 1461 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1462 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1463 1464 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1465 * Tx channel is now ready to run. But only after we write to the 1466 * tail pointer register that the Tx channel will start transmitting 1467 */ 1468 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1469 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1470 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1471 cr | XAXIDMA_CR_RUNSTOP_MASK); 1472 1473 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1474 axienet_status &= ~XAE_RCW1_RX_MASK; 1475 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1476 1477 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1478 if (axienet_status & XAE_INT_RXRJECT_MASK) 1479 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1480 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1481 1482 /* Sync default options with HW but leave receiver and 1483 * transmitter disabled. 1484 */ 1485 axienet_setoptions(ndev, lp->options & 1486 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1487 axienet_set_mac_address(ndev, NULL); 1488 axienet_set_multicast_list(ndev); 1489 axienet_setoptions(ndev, lp->options); 1490 } 1491 1492 /** 1493 * axienet_probe - Axi Ethernet probe function. 1494 * @pdev: Pointer to platform device structure. 1495 * 1496 * Return: 0, on success 1497 * Non-zero error value on failure. 1498 * 1499 * This is the probe routine for Axi Ethernet driver. This is called before 1500 * any other driver routines are invoked. It allocates and sets up the Ethernet 1501 * device. Parses through device tree and populates fields of 1502 * axienet_local. It registers the Ethernet device. 1503 */ 1504 static int axienet_probe(struct platform_device *pdev) 1505 { 1506 int ret; 1507 struct device_node *np; 1508 struct axienet_local *lp; 1509 struct net_device *ndev; 1510 u8 mac_addr[6]; 1511 struct resource *ethres, dmares; 1512 u32 value; 1513 1514 ndev = alloc_etherdev(sizeof(*lp)); 1515 if (!ndev) 1516 return -ENOMEM; 1517 1518 platform_set_drvdata(pdev, ndev); 1519 1520 SET_NETDEV_DEV(ndev, &pdev->dev); 1521 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1522 ndev->features = NETIF_F_SG; 1523 ndev->netdev_ops = &axienet_netdev_ops; 1524 ndev->ethtool_ops = &axienet_ethtool_ops; 1525 1526 lp = netdev_priv(ndev); 1527 lp->ndev = ndev; 1528 lp->dev = &pdev->dev; 1529 lp->options = XAE_OPTION_DEFAULTS; 1530 /* Map device registers */ 1531 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1532 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1533 if (IS_ERR(lp->regs)) { 1534 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1535 ret = PTR_ERR(lp->regs); 1536 goto free_netdev; 1537 } 1538 1539 /* Setup checksum offload, but default to off if not specified */ 1540 lp->features = 0; 1541 1542 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); 1543 if (!ret) { 1544 switch (value) { 1545 case 1: 1546 lp->csum_offload_on_tx_path = 1547 XAE_FEATURE_PARTIAL_TX_CSUM; 1548 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1549 /* Can checksum TCP/UDP over IPv4. */ 1550 ndev->features |= NETIF_F_IP_CSUM; 1551 break; 1552 case 2: 1553 lp->csum_offload_on_tx_path = 1554 XAE_FEATURE_FULL_TX_CSUM; 1555 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1556 /* Can checksum TCP/UDP over IPv4. */ 1557 ndev->features |= NETIF_F_IP_CSUM; 1558 break; 1559 default: 1560 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1561 } 1562 } 1563 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); 1564 if (!ret) { 1565 switch (value) { 1566 case 1: 1567 lp->csum_offload_on_rx_path = 1568 XAE_FEATURE_PARTIAL_RX_CSUM; 1569 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1570 break; 1571 case 2: 1572 lp->csum_offload_on_rx_path = 1573 XAE_FEATURE_FULL_RX_CSUM; 1574 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1575 break; 1576 default: 1577 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1578 } 1579 } 1580 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1581 * a larger Rx/Tx Memory. Typically, the size must be large so that 1582 * we can enable jumbo option and start supporting jumbo frames. 1583 * Here we check for memory allocated for Rx/Tx in the hardware from 1584 * the device-tree and accordingly set flags. 1585 */ 1586 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); 1587 of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type); 1588 1589 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1590 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); 1591 if (IS_ERR(np)) { 1592 dev_err(&pdev->dev, "could not find DMA node\n"); 1593 ret = PTR_ERR(np); 1594 goto free_netdev; 1595 } 1596 ret = of_address_to_resource(np, 0, &dmares); 1597 if (ret) { 1598 dev_err(&pdev->dev, "unable to get DMA resource\n"); 1599 goto free_netdev; 1600 } 1601 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1602 if (IS_ERR(lp->dma_regs)) { 1603 dev_err(&pdev->dev, "could not map DMA regs\n"); 1604 ret = PTR_ERR(lp->dma_regs); 1605 goto free_netdev; 1606 } 1607 lp->rx_irq = irq_of_parse_and_map(np, 1); 1608 lp->tx_irq = irq_of_parse_and_map(np, 0); 1609 of_node_put(np); 1610 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1611 dev_err(&pdev->dev, "could not determine irqs\n"); 1612 ret = -ENOMEM; 1613 goto free_netdev; 1614 } 1615 1616 /* Retrieve the MAC address */ 1617 ret = of_property_read_u8_array(pdev->dev.of_node, 1618 "local-mac-address", mac_addr, 6); 1619 if (ret) { 1620 dev_err(&pdev->dev, "could not find MAC address\n"); 1621 goto free_netdev; 1622 } 1623 axienet_set_mac_address(ndev, (void *)mac_addr); 1624 1625 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1626 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1627 1628 lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1629 if (lp->phy_node) { 1630 ret = axienet_mdio_setup(lp, pdev->dev.of_node); 1631 if (ret) 1632 dev_warn(&pdev->dev, "error registering MDIO bus\n"); 1633 } 1634 1635 ret = register_netdev(lp->ndev); 1636 if (ret) { 1637 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1638 goto free_netdev; 1639 } 1640 1641 return 0; 1642 1643 free_netdev: 1644 free_netdev(ndev); 1645 1646 return ret; 1647 } 1648 1649 static int axienet_remove(struct platform_device *pdev) 1650 { 1651 struct net_device *ndev = platform_get_drvdata(pdev); 1652 struct axienet_local *lp = netdev_priv(ndev); 1653 1654 axienet_mdio_teardown(lp); 1655 unregister_netdev(ndev); 1656 1657 of_node_put(lp->phy_node); 1658 lp->phy_node = NULL; 1659 1660 free_netdev(ndev); 1661 1662 return 0; 1663 } 1664 1665 static struct platform_driver axienet_driver = { 1666 .probe = axienet_probe, 1667 .remove = axienet_remove, 1668 .driver = { 1669 .name = "xilinx_axienet", 1670 .of_match_table = axienet_of_match, 1671 }, 1672 }; 1673 1674 module_platform_driver(axienet_driver); 1675 1676 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1677 MODULE_AUTHOR("Xilinx"); 1678 MODULE_LICENSE("GPL"); 1679