1 /* 2 * Xilinx Axi Ethernet device driver 3 * 4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu> 8 * Copyright (c) 2010 - 2011 PetaLogix 9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. 10 * 11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 * and Spartan6. 13 * 14 * TODO: 15 * - Add Axi Fifo support. 16 * - Factor out Axi DMA code into separate driver. 17 * - Test and fix basic multicast filtering. 18 * - Add support for extended multicast filtering. 19 * - Test basic VLAN support. 20 * - Add support for extended VLAN support. 21 */ 22 23 #include <linux/delay.h> 24 #include <linux/etherdevice.h> 25 #include <linux/module.h> 26 #include <linux/netdevice.h> 27 #include <linux/of_mdio.h> 28 #include <linux/of_platform.h> 29 #include <linux/of_irq.h> 30 #include <linux/of_address.h> 31 #include <linux/skbuff.h> 32 #include <linux/spinlock.h> 33 #include <linux/phy.h> 34 #include <linux/mii.h> 35 #include <linux/ethtool.h> 36 37 #include "xilinx_axienet.h" 38 39 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 40 #define TX_BD_NUM 64 41 #define RX_BD_NUM 128 42 43 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 44 #define DRIVER_NAME "xaxienet" 45 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 46 #define DRIVER_VERSION "1.00a" 47 48 #define AXIENET_REGS_N 32 49 50 /* Match table for of_platform binding */ 51 static struct of_device_id axienet_of_match[] = { 52 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 53 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 54 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 55 {}, 56 }; 57 58 MODULE_DEVICE_TABLE(of, axienet_of_match); 59 60 /* Option table for setting up Axi Ethernet hardware options */ 61 static struct axienet_option axienet_options[] = { 62 /* Turn on jumbo packet support for both Rx and Tx */ 63 { 64 .opt = XAE_OPTION_JUMBO, 65 .reg = XAE_TC_OFFSET, 66 .m_or = XAE_TC_JUM_MASK, 67 }, { 68 .opt = XAE_OPTION_JUMBO, 69 .reg = XAE_RCW1_OFFSET, 70 .m_or = XAE_RCW1_JUM_MASK, 71 }, { /* Turn on VLAN packet support for both Rx and Tx */ 72 .opt = XAE_OPTION_VLAN, 73 .reg = XAE_TC_OFFSET, 74 .m_or = XAE_TC_VLAN_MASK, 75 }, { 76 .opt = XAE_OPTION_VLAN, 77 .reg = XAE_RCW1_OFFSET, 78 .m_or = XAE_RCW1_VLAN_MASK, 79 }, { /* Turn on FCS stripping on receive packets */ 80 .opt = XAE_OPTION_FCS_STRIP, 81 .reg = XAE_RCW1_OFFSET, 82 .m_or = XAE_RCW1_FCS_MASK, 83 }, { /* Turn on FCS insertion on transmit packets */ 84 .opt = XAE_OPTION_FCS_INSERT, 85 .reg = XAE_TC_OFFSET, 86 .m_or = XAE_TC_FCS_MASK, 87 }, { /* Turn off length/type field checking on receive packets */ 88 .opt = XAE_OPTION_LENTYPE_ERR, 89 .reg = XAE_RCW1_OFFSET, 90 .m_or = XAE_RCW1_LT_DIS_MASK, 91 }, { /* Turn on Rx flow control */ 92 .opt = XAE_OPTION_FLOW_CONTROL, 93 .reg = XAE_FCC_OFFSET, 94 .m_or = XAE_FCC_FCRX_MASK, 95 }, { /* Turn on Tx flow control */ 96 .opt = XAE_OPTION_FLOW_CONTROL, 97 .reg = XAE_FCC_OFFSET, 98 .m_or = XAE_FCC_FCTX_MASK, 99 }, { /* Turn on promiscuous frame filtering */ 100 .opt = XAE_OPTION_PROMISC, 101 .reg = XAE_FMI_OFFSET, 102 .m_or = XAE_FMI_PM_MASK, 103 }, { /* Enable transmitter */ 104 .opt = XAE_OPTION_TXEN, 105 .reg = XAE_TC_OFFSET, 106 .m_or = XAE_TC_TX_MASK, 107 }, { /* Enable receiver */ 108 .opt = XAE_OPTION_RXEN, 109 .reg = XAE_RCW1_OFFSET, 110 .m_or = XAE_RCW1_RX_MASK, 111 }, 112 {} 113 }; 114 115 /** 116 * axienet_dma_in32 - Memory mapped Axi DMA register read 117 * @lp: Pointer to axienet local structure 118 * @reg: Address offset from the base address of the Axi DMA core 119 * 120 * returns: The contents of the Axi DMA register 121 * 122 * This function returns the contents of the corresponding Axi DMA register. 123 */ 124 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 125 { 126 return in_be32(lp->dma_regs + reg); 127 } 128 129 /** 130 * axienet_dma_out32 - Memory mapped Axi DMA register write. 131 * @lp: Pointer to axienet local structure 132 * @reg: Address offset from the base address of the Axi DMA core 133 * @value: Value to be written into the Axi DMA register 134 * 135 * This function writes the desired value into the corresponding Axi DMA 136 * register. 137 */ 138 static inline void axienet_dma_out32(struct axienet_local *lp, 139 off_t reg, u32 value) 140 { 141 out_be32((lp->dma_regs + reg), value); 142 } 143 144 /** 145 * axienet_dma_bd_release - Release buffer descriptor rings 146 * @ndev: Pointer to the net_device structure 147 * 148 * This function is used to release the descriptors allocated in 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 150 * driver stop api is called. 151 */ 152 static void axienet_dma_bd_release(struct net_device *ndev) 153 { 154 int i; 155 struct axienet_local *lp = netdev_priv(ndev); 156 157 for (i = 0; i < RX_BD_NUM; i++) { 158 dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 159 lp->max_frm_size, DMA_FROM_DEVICE); 160 dev_kfree_skb((struct sk_buff *) 161 (lp->rx_bd_v[i].sw_id_offset)); 162 } 163 164 if (lp->rx_bd_v) { 165 dma_free_coherent(ndev->dev.parent, 166 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 167 lp->rx_bd_v, 168 lp->rx_bd_p); 169 } 170 if (lp->tx_bd_v) { 171 dma_free_coherent(ndev->dev.parent, 172 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 173 lp->tx_bd_v, 174 lp->tx_bd_p); 175 } 176 } 177 178 /** 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 180 * @ndev: Pointer to the net_device structure 181 * 182 * returns: 0, on success 183 * -ENOMEM, on failure 184 * 185 * This function is called to initialize the Rx and Tx DMA descriptor 186 * rings. This initializes the descriptors with required default values 187 * and is called when Axi Ethernet driver reset is called. 188 */ 189 static int axienet_dma_bd_init(struct net_device *ndev) 190 { 191 u32 cr; 192 int i; 193 struct sk_buff *skb; 194 struct axienet_local *lp = netdev_priv(ndev); 195 196 /* Reset the indexes which are used for accessing the BDs */ 197 lp->tx_bd_ci = 0; 198 lp->tx_bd_tail = 0; 199 lp->rx_bd_ci = 0; 200 201 /* 202 * Allocate the Tx and Rx buffer descriptors. 203 */ 204 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 205 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 206 &lp->tx_bd_p, GFP_KERNEL); 207 if (!lp->tx_bd_v) 208 goto out; 209 210 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 211 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 212 &lp->rx_bd_p, GFP_KERNEL); 213 if (!lp->rx_bd_v) 214 goto out; 215 216 for (i = 0; i < TX_BD_NUM; i++) { 217 lp->tx_bd_v[i].next = lp->tx_bd_p + 218 sizeof(*lp->tx_bd_v) * 219 ((i + 1) % TX_BD_NUM); 220 } 221 222 for (i = 0; i < RX_BD_NUM; i++) { 223 lp->rx_bd_v[i].next = lp->rx_bd_p + 224 sizeof(*lp->rx_bd_v) * 225 ((i + 1) % RX_BD_NUM); 226 227 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 228 if (!skb) 229 goto out; 230 231 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 232 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 233 skb->data, 234 lp->max_frm_size, 235 DMA_FROM_DEVICE); 236 lp->rx_bd_v[i].cntrl = lp->max_frm_size; 237 } 238 239 /* Start updating the Rx channel control register */ 240 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 241 /* Update the interrupt coalesce count */ 242 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 243 ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 244 /* Update the delay timer count */ 245 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 246 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 247 /* Enable coalesce, delay timer and error interrupts */ 248 cr |= XAXIDMA_IRQ_ALL_MASK; 249 /* Write to the Rx channel control register */ 250 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 251 252 /* Start updating the Tx channel control register */ 253 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 254 /* Update the interrupt coalesce count */ 255 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 256 ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 257 /* Update the delay timer count */ 258 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 259 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 260 /* Enable coalesce, delay timer and error interrupts */ 261 cr |= XAXIDMA_IRQ_ALL_MASK; 262 /* Write to the Tx channel control register */ 263 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 264 265 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 266 * halted state. This will make the Rx side ready for reception.*/ 267 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 268 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 269 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 270 cr | XAXIDMA_CR_RUNSTOP_MASK); 271 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 272 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 273 274 /* Write to the RS (Run-stop) bit in the Tx channel control register. 275 * Tx channel is now ready to run. But only after we write to the 276 * tail pointer register that the Tx channel will start transmitting */ 277 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 278 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 279 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 280 cr | XAXIDMA_CR_RUNSTOP_MASK); 281 282 return 0; 283 out: 284 axienet_dma_bd_release(ndev); 285 return -ENOMEM; 286 } 287 288 /** 289 * axienet_set_mac_address - Write the MAC address 290 * @ndev: Pointer to the net_device structure 291 * @address: 6 byte Address to be written as MAC address 292 * 293 * This function is called to initialize the MAC address of the Axi Ethernet 294 * core. It writes to the UAW0 and UAW1 registers of the core. 295 */ 296 static void axienet_set_mac_address(struct net_device *ndev, void *address) 297 { 298 struct axienet_local *lp = netdev_priv(ndev); 299 300 if (address) 301 memcpy(ndev->dev_addr, address, ETH_ALEN); 302 if (!is_valid_ether_addr(ndev->dev_addr)) 303 eth_random_addr(ndev->dev_addr); 304 305 /* Set up unicast MAC address filter set its mac address */ 306 axienet_iow(lp, XAE_UAW0_OFFSET, 307 (ndev->dev_addr[0]) | 308 (ndev->dev_addr[1] << 8) | 309 (ndev->dev_addr[2] << 16) | 310 (ndev->dev_addr[3] << 24)); 311 axienet_iow(lp, XAE_UAW1_OFFSET, 312 (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 313 ~XAE_UAW1_UNICASTADDR_MASK) | 314 (ndev->dev_addr[4] | 315 (ndev->dev_addr[5] << 8)))); 316 } 317 318 /** 319 * netdev_set_mac_address - Write the MAC address (from outside the driver) 320 * @ndev: Pointer to the net_device structure 321 * @p: 6 byte Address to be written as MAC address 322 * 323 * returns: 0 for all conditions. Presently, there is no failure case. 324 * 325 * This function is called to initialize the MAC address of the Axi Ethernet 326 * core. It calls the core specific axienet_set_mac_address. This is the 327 * function that goes into net_device_ops structure entry ndo_set_mac_address. 328 */ 329 static int netdev_set_mac_address(struct net_device *ndev, void *p) 330 { 331 struct sockaddr *addr = p; 332 axienet_set_mac_address(ndev, addr->sa_data); 333 return 0; 334 } 335 336 /** 337 * axienet_set_multicast_list - Prepare the multicast table 338 * @ndev: Pointer to the net_device structure 339 * 340 * This function is called to initialize the multicast table during 341 * initialization. The Axi Ethernet basic multicast support has a four-entry 342 * multicast table which is initialized here. Additionally this function 343 * goes into the net_device_ops structure entry ndo_set_multicast_list. This 344 * means whenever the multicast table entries need to be updated this 345 * function gets called. 346 */ 347 static void axienet_set_multicast_list(struct net_device *ndev) 348 { 349 int i; 350 u32 reg, af0reg, af1reg; 351 struct axienet_local *lp = netdev_priv(ndev); 352 353 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 354 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 355 /* We must make the kernel realize we had to move into 356 * promiscuous mode. If it was a promiscuous mode request 357 * the flag is already set. If not we set it. */ 358 ndev->flags |= IFF_PROMISC; 359 reg = axienet_ior(lp, XAE_FMI_OFFSET); 360 reg |= XAE_FMI_PM_MASK; 361 axienet_iow(lp, XAE_FMI_OFFSET, reg); 362 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 363 } else if (!netdev_mc_empty(ndev)) { 364 struct netdev_hw_addr *ha; 365 366 i = 0; 367 netdev_for_each_mc_addr(ha, ndev) { 368 if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 369 break; 370 371 af0reg = (ha->addr[0]); 372 af0reg |= (ha->addr[1] << 8); 373 af0reg |= (ha->addr[2] << 16); 374 af0reg |= (ha->addr[3] << 24); 375 376 af1reg = (ha->addr[4]); 377 af1reg |= (ha->addr[5] << 8); 378 379 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 380 reg |= i; 381 382 axienet_iow(lp, XAE_FMI_OFFSET, reg); 383 axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 384 axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 385 i++; 386 } 387 } else { 388 reg = axienet_ior(lp, XAE_FMI_OFFSET); 389 reg &= ~XAE_FMI_PM_MASK; 390 391 axienet_iow(lp, XAE_FMI_OFFSET, reg); 392 393 for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 394 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 395 reg |= i; 396 397 axienet_iow(lp, XAE_FMI_OFFSET, reg); 398 axienet_iow(lp, XAE_AF0_OFFSET, 0); 399 axienet_iow(lp, XAE_AF1_OFFSET, 0); 400 } 401 402 dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 403 } 404 } 405 406 /** 407 * axienet_setoptions - Set an Axi Ethernet option 408 * @ndev: Pointer to the net_device structure 409 * @options: Option to be enabled/disabled 410 * 411 * The Axi Ethernet core has multiple features which can be selectively turned 412 * on or off. The typical options could be jumbo frame option, basic VLAN 413 * option, promiscuous mode option etc. This function is used to set or clear 414 * these options in the Axi Ethernet hardware. This is done through 415 * axienet_option structure . 416 */ 417 static void axienet_setoptions(struct net_device *ndev, u32 options) 418 { 419 int reg; 420 struct axienet_local *lp = netdev_priv(ndev); 421 struct axienet_option *tp = &axienet_options[0]; 422 423 while (tp->opt) { 424 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 425 if (options & tp->opt) 426 reg |= tp->m_or; 427 axienet_iow(lp, tp->reg, reg); 428 tp++; 429 } 430 431 lp->options |= options; 432 } 433 434 static void __axienet_device_reset(struct axienet_local *lp, 435 struct device *dev, off_t offset) 436 { 437 u32 timeout; 438 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 439 * process of Axi DMA takes a while to complete as all pending 440 * commands/transfers will be flushed or completed during this 441 * reset process. */ 442 axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 443 timeout = DELAY_OF_ONE_MILLISEC; 444 while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 445 udelay(1); 446 if (--timeout == 0) { 447 dev_err(dev, "axienet_device_reset DMA " 448 "reset timeout!\n"); 449 break; 450 } 451 } 452 } 453 454 /** 455 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 456 * @ndev: Pointer to the net_device structure 457 * 458 * This function is called to reset and initialize the Axi Ethernet core. This 459 * is typically called during initialization. It does a reset of the Axi DMA 460 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 461 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 462 * Ethernet core. No separate hardware reset is done for the Axi Ethernet 463 * core. 464 */ 465 static void axienet_device_reset(struct net_device *ndev) 466 { 467 u32 axienet_status; 468 struct axienet_local *lp = netdev_priv(ndev); 469 470 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 471 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 472 473 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 474 lp->options &= (~XAE_OPTION_JUMBO); 475 476 if ((ndev->mtu > XAE_MTU) && 477 (ndev->mtu <= XAE_JUMBO_MTU) && 478 (lp->jumbo_support)) { 479 lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE + 480 XAE_TRL_SIZE; 481 lp->options |= XAE_OPTION_JUMBO; 482 } 483 484 if (axienet_dma_bd_init(ndev)) { 485 dev_err(&ndev->dev, "axienet_device_reset descriptor " 486 "allocation failed\n"); 487 } 488 489 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 490 axienet_status &= ~XAE_RCW1_RX_MASK; 491 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 492 493 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 494 if (axienet_status & XAE_INT_RXRJECT_MASK) 495 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 496 497 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 498 499 /* Sync default options with HW but leave receiver and 500 * transmitter disabled.*/ 501 axienet_setoptions(ndev, lp->options & 502 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 503 axienet_set_mac_address(ndev, NULL); 504 axienet_set_multicast_list(ndev); 505 axienet_setoptions(ndev, lp->options); 506 507 ndev->trans_start = jiffies; 508 } 509 510 /** 511 * axienet_adjust_link - Adjust the PHY link speed/duplex. 512 * @ndev: Pointer to the net_device structure 513 * 514 * This function is called to change the speed and duplex setting after 515 * auto negotiation is done by the PHY. This is the function that gets 516 * registered with the PHY interface through the "of_phy_connect" call. 517 */ 518 static void axienet_adjust_link(struct net_device *ndev) 519 { 520 u32 emmc_reg; 521 u32 link_state; 522 u32 setspeed = 1; 523 struct axienet_local *lp = netdev_priv(ndev); 524 struct phy_device *phy = lp->phy_dev; 525 526 link_state = phy->speed | (phy->duplex << 1) | phy->link; 527 if (lp->last_link != link_state) { 528 if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 529 if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) 530 setspeed = 0; 531 } else { 532 if ((phy->speed == SPEED_1000) && 533 (lp->phy_type == XAE_PHY_TYPE_MII)) 534 setspeed = 0; 535 } 536 537 if (setspeed == 1) { 538 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 539 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 540 541 switch (phy->speed) { 542 case SPEED_1000: 543 emmc_reg |= XAE_EMMC_LINKSPD_1000; 544 break; 545 case SPEED_100: 546 emmc_reg |= XAE_EMMC_LINKSPD_100; 547 break; 548 case SPEED_10: 549 emmc_reg |= XAE_EMMC_LINKSPD_10; 550 break; 551 default: 552 dev_err(&ndev->dev, "Speed other than 10, 100 " 553 "or 1Gbps is not supported\n"); 554 break; 555 } 556 557 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 558 lp->last_link = link_state; 559 phy_print_status(phy); 560 } else { 561 dev_err(&ndev->dev, "Error setting Axi Ethernet " 562 "mac speed\n"); 563 } 564 } 565 } 566 567 /** 568 * axienet_start_xmit_done - Invoked once a transmit is completed by the 569 * Axi DMA Tx channel. 570 * @ndev: Pointer to the net_device structure 571 * 572 * This function is invoked from the Axi DMA Tx isr to notify the completion 573 * of transmit operation. It clears fields in the corresponding Tx BDs and 574 * unmaps the corresponding buffer so that CPU can regain ownership of the 575 * buffer. It finally invokes "netif_wake_queue" to restart transmission if 576 * required. 577 */ 578 static void axienet_start_xmit_done(struct net_device *ndev) 579 { 580 u32 size = 0; 581 u32 packets = 0; 582 struct axienet_local *lp = netdev_priv(ndev); 583 struct axidma_bd *cur_p; 584 unsigned int status = 0; 585 586 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 587 status = cur_p->status; 588 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 589 dma_unmap_single(ndev->dev.parent, cur_p->phys, 590 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 591 DMA_TO_DEVICE); 592 if (cur_p->app4) 593 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 594 /*cur_p->phys = 0;*/ 595 cur_p->app0 = 0; 596 cur_p->app1 = 0; 597 cur_p->app2 = 0; 598 cur_p->app4 = 0; 599 cur_p->status = 0; 600 601 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 602 packets++; 603 604 ++lp->tx_bd_ci; 605 lp->tx_bd_ci %= TX_BD_NUM; 606 cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 607 status = cur_p->status; 608 } 609 610 ndev->stats.tx_packets += packets; 611 ndev->stats.tx_bytes += size; 612 netif_wake_queue(ndev); 613 } 614 615 /** 616 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 617 * @lp: Pointer to the axienet_local structure 618 * @num_frag: The number of BDs to check for 619 * 620 * returns: 0, on success 621 * NETDEV_TX_BUSY, if any of the descriptors are not free 622 * 623 * This function is invoked before BDs are allocated and transmission starts. 624 * This function returns 0 if a BD or group of BDs can be allocated for 625 * transmission. If the BD or any of the BDs are not free the function 626 * returns a busy status. This is invoked from axienet_start_xmit. 627 */ 628 static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 629 int num_frag) 630 { 631 struct axidma_bd *cur_p; 632 cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 633 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 634 return NETDEV_TX_BUSY; 635 return 0; 636 } 637 638 /** 639 * axienet_start_xmit - Starts the transmission. 640 * @skb: sk_buff pointer that contains data to be Txed. 641 * @ndev: Pointer to net_device structure. 642 * 643 * returns: NETDEV_TX_OK, on success 644 * NETDEV_TX_BUSY, if any of the descriptors are not free 645 * 646 * This function is invoked from upper layers to initiate transmission. The 647 * function uses the next available free BDs and populates their fields to 648 * start the transmission. Additionally if checksum offloading is supported, 649 * it populates AXI Stream Control fields with appropriate values. 650 */ 651 static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 652 { 653 u32 ii; 654 u32 num_frag; 655 u32 csum_start_off; 656 u32 csum_index_off; 657 skb_frag_t *frag; 658 dma_addr_t tail_p; 659 struct axienet_local *lp = netdev_priv(ndev); 660 struct axidma_bd *cur_p; 661 662 num_frag = skb_shinfo(skb)->nr_frags; 663 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 664 665 if (axienet_check_tx_bd_space(lp, num_frag)) { 666 if (!netif_queue_stopped(ndev)) 667 netif_stop_queue(ndev); 668 return NETDEV_TX_BUSY; 669 } 670 671 if (skb->ip_summed == CHECKSUM_PARTIAL) { 672 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 673 /* Tx Full Checksum Offload Enabled */ 674 cur_p->app0 |= 2; 675 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 676 csum_start_off = skb_transport_offset(skb); 677 csum_index_off = csum_start_off + skb->csum_offset; 678 /* Tx Partial Checksum Offload Enabled */ 679 cur_p->app0 |= 1; 680 cur_p->app1 = (csum_start_off << 16) | csum_index_off; 681 } 682 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 683 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 684 } 685 686 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 687 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 688 skb_headlen(skb), DMA_TO_DEVICE); 689 690 for (ii = 0; ii < num_frag; ii++) { 691 ++lp->tx_bd_tail; 692 lp->tx_bd_tail %= TX_BD_NUM; 693 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 694 frag = &skb_shinfo(skb)->frags[ii]; 695 cur_p->phys = dma_map_single(ndev->dev.parent, 696 skb_frag_address(frag), 697 skb_frag_size(frag), 698 DMA_TO_DEVICE); 699 cur_p->cntrl = skb_frag_size(frag); 700 } 701 702 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 703 cur_p->app4 = (unsigned long)skb; 704 705 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 706 /* Start the transfer */ 707 axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 708 ++lp->tx_bd_tail; 709 lp->tx_bd_tail %= TX_BD_NUM; 710 711 return NETDEV_TX_OK; 712 } 713 714 /** 715 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 716 * BD processing. 717 * @ndev: Pointer to net_device structure. 718 * 719 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 720 * does minimal processing and invokes "netif_rx" to complete further 721 * processing. 722 */ 723 static void axienet_recv(struct net_device *ndev) 724 { 725 u32 length; 726 u32 csumstatus; 727 u32 size = 0; 728 u32 packets = 0; 729 dma_addr_t tail_p; 730 struct axienet_local *lp = netdev_priv(ndev); 731 struct sk_buff *skb, *new_skb; 732 struct axidma_bd *cur_p; 733 734 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 735 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 736 737 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 738 skb = (struct sk_buff *) (cur_p->sw_id_offset); 739 length = cur_p->app4 & 0x0000FFFF; 740 741 dma_unmap_single(ndev->dev.parent, cur_p->phys, 742 lp->max_frm_size, 743 DMA_FROM_DEVICE); 744 745 skb_put(skb, length); 746 skb->protocol = eth_type_trans(skb, ndev); 747 /*skb_checksum_none_assert(skb);*/ 748 skb->ip_summed = CHECKSUM_NONE; 749 750 /* if we're doing Rx csum offload, set it up */ 751 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 752 csumstatus = (cur_p->app2 & 753 XAE_FULL_CSUM_STATUS_MASK) >> 3; 754 if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 755 (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 756 skb->ip_summed = CHECKSUM_UNNECESSARY; 757 } 758 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 759 skb->protocol == htons(ETH_P_IP) && 760 skb->len > 64) { 761 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 762 skb->ip_summed = CHECKSUM_COMPLETE; 763 } 764 765 netif_rx(skb); 766 767 size += length; 768 packets++; 769 770 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 771 if (!new_skb) 772 return; 773 774 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 775 lp->max_frm_size, 776 DMA_FROM_DEVICE); 777 cur_p->cntrl = lp->max_frm_size; 778 cur_p->status = 0; 779 cur_p->sw_id_offset = (u32) new_skb; 780 781 ++lp->rx_bd_ci; 782 lp->rx_bd_ci %= RX_BD_NUM; 783 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 784 } 785 786 ndev->stats.rx_packets += packets; 787 ndev->stats.rx_bytes += size; 788 789 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 790 } 791 792 /** 793 * axienet_tx_irq - Tx Done Isr. 794 * @irq: irq number 795 * @_ndev: net_device pointer 796 * 797 * returns: IRQ_HANDLED for all cases. 798 * 799 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 800 * to complete the BD processing. 801 */ 802 static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 803 { 804 u32 cr; 805 unsigned int status; 806 struct net_device *ndev = _ndev; 807 struct axienet_local *lp = netdev_priv(ndev); 808 809 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 810 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 811 axienet_start_xmit_done(lp->ndev); 812 goto out; 813 } 814 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 815 dev_err(&ndev->dev, "No interrupts asserted in Tx path"); 816 if (status & XAXIDMA_IRQ_ERROR_MASK) { 817 dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 818 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 819 (lp->tx_bd_v[lp->tx_bd_ci]).phys); 820 821 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 822 /* Disable coalesce, delay timer and error interrupts */ 823 cr &= (~XAXIDMA_IRQ_ALL_MASK); 824 /* Write to the Tx channel control register */ 825 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 826 827 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 828 /* Disable coalesce, delay timer and error interrupts */ 829 cr &= (~XAXIDMA_IRQ_ALL_MASK); 830 /* Write to the Rx channel control register */ 831 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 832 833 tasklet_schedule(&lp->dma_err_tasklet); 834 } 835 out: 836 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 837 return IRQ_HANDLED; 838 } 839 840 /** 841 * axienet_rx_irq - Rx Isr. 842 * @irq: irq number 843 * @_ndev: net_device pointer 844 * 845 * returns: IRQ_HANDLED for all cases. 846 * 847 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 848 * processing. 849 */ 850 static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 851 { 852 u32 cr; 853 unsigned int status; 854 struct net_device *ndev = _ndev; 855 struct axienet_local *lp = netdev_priv(ndev); 856 857 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 858 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 859 axienet_recv(lp->ndev); 860 goto out; 861 } 862 if (!(status & XAXIDMA_IRQ_ALL_MASK)) 863 dev_err(&ndev->dev, "No interrupts asserted in Rx path"); 864 if (status & XAXIDMA_IRQ_ERROR_MASK) { 865 dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 866 dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 867 (lp->rx_bd_v[lp->rx_bd_ci]).phys); 868 869 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 870 /* Disable coalesce, delay timer and error interrupts */ 871 cr &= (~XAXIDMA_IRQ_ALL_MASK); 872 /* Finally write to the Tx channel control register */ 873 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 874 875 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 876 /* Disable coalesce, delay timer and error interrupts */ 877 cr &= (~XAXIDMA_IRQ_ALL_MASK); 878 /* write to the Rx channel control register */ 879 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 880 881 tasklet_schedule(&lp->dma_err_tasklet); 882 } 883 out: 884 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 885 return IRQ_HANDLED; 886 } 887 888 static void axienet_dma_err_handler(unsigned long data); 889 890 /** 891 * axienet_open - Driver open routine. 892 * @ndev: Pointer to net_device structure 893 * 894 * returns: 0, on success. 895 * -ENODEV, if PHY cannot be connected to 896 * non-zero error value on failure 897 * 898 * This is the driver open routine. It calls phy_start to start the PHY device. 899 * It also allocates interrupt service routines, enables the interrupt lines 900 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 901 * descriptors are initialized. 902 */ 903 static int axienet_open(struct net_device *ndev) 904 { 905 int ret, mdio_mcreg; 906 struct axienet_local *lp = netdev_priv(ndev); 907 908 dev_dbg(&ndev->dev, "axienet_open()\n"); 909 910 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 911 ret = axienet_mdio_wait_until_ready(lp); 912 if (ret < 0) 913 return ret; 914 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 915 * When we do an Axi Ethernet reset, it resets the complete core 916 * including the MDIO. If MDIO is not disabled when the reset 917 * process is started, MDIO will be broken afterwards. */ 918 axienet_iow(lp, XAE_MDIO_MC_OFFSET, 919 (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 920 axienet_device_reset(ndev); 921 /* Enable the MDIO */ 922 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 923 ret = axienet_mdio_wait_until_ready(lp); 924 if (ret < 0) 925 return ret; 926 927 if (lp->phy_node) { 928 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 929 axienet_adjust_link, 0, 930 PHY_INTERFACE_MODE_GMII); 931 if (!lp->phy_dev) { 932 dev_err(lp->dev, "of_phy_connect() failed\n"); 933 return -ENODEV; 934 } 935 phy_start(lp->phy_dev); 936 } 937 938 /* Enable tasklets for Axi DMA error handling */ 939 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 940 (unsigned long) lp); 941 942 /* Enable interrupts for Axi DMA Tx */ 943 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 944 if (ret) 945 goto err_tx_irq; 946 /* Enable interrupts for Axi DMA Rx */ 947 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 948 if (ret) 949 goto err_rx_irq; 950 951 return 0; 952 953 err_rx_irq: 954 free_irq(lp->tx_irq, ndev); 955 err_tx_irq: 956 if (lp->phy_dev) 957 phy_disconnect(lp->phy_dev); 958 lp->phy_dev = NULL; 959 tasklet_kill(&lp->dma_err_tasklet); 960 dev_err(lp->dev, "request_irq() failed\n"); 961 return ret; 962 } 963 964 /** 965 * axienet_stop - Driver stop routine. 966 * @ndev: Pointer to net_device structure 967 * 968 * returns: 0, on success. 969 * 970 * This is the driver stop routine. It calls phy_disconnect to stop the PHY 971 * device. It also removes the interrupt handlers and disables the interrupts. 972 * The Axi DMA Tx/Rx BDs are released. 973 */ 974 static int axienet_stop(struct net_device *ndev) 975 { 976 u32 cr; 977 struct axienet_local *lp = netdev_priv(ndev); 978 979 dev_dbg(&ndev->dev, "axienet_close()\n"); 980 981 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 982 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 983 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 984 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 985 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 986 cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 987 axienet_setoptions(ndev, lp->options & 988 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 989 990 tasklet_kill(&lp->dma_err_tasklet); 991 992 free_irq(lp->tx_irq, ndev); 993 free_irq(lp->rx_irq, ndev); 994 995 if (lp->phy_dev) 996 phy_disconnect(lp->phy_dev); 997 lp->phy_dev = NULL; 998 999 axienet_dma_bd_release(ndev); 1000 return 0; 1001 } 1002 1003 /** 1004 * axienet_change_mtu - Driver change mtu routine. 1005 * @ndev: Pointer to net_device structure 1006 * @new_mtu: New mtu value to be applied 1007 * 1008 * returns: Always returns 0 (success). 1009 * 1010 * This is the change mtu driver routine. It checks if the Axi Ethernet 1011 * hardware supports jumbo frames before changing the mtu. This can be 1012 * called only when the device is not up. 1013 */ 1014 static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1015 { 1016 struct axienet_local *lp = netdev_priv(ndev); 1017 1018 if (netif_running(ndev)) 1019 return -EBUSY; 1020 if (lp->jumbo_support) { 1021 if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) 1022 return -EINVAL; 1023 ndev->mtu = new_mtu; 1024 } else { 1025 if ((new_mtu > XAE_MTU) || (new_mtu < 64)) 1026 return -EINVAL; 1027 ndev->mtu = new_mtu; 1028 } 1029 1030 return 0; 1031 } 1032 1033 #ifdef CONFIG_NET_POLL_CONTROLLER 1034 /** 1035 * axienet_poll_controller - Axi Ethernet poll mechanism. 1036 * @ndev: Pointer to net_device structure 1037 * 1038 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1039 * to polling the ISRs and are enabled back after the polling is done. 1040 */ 1041 static void axienet_poll_controller(struct net_device *ndev) 1042 { 1043 struct axienet_local *lp = netdev_priv(ndev); 1044 disable_irq(lp->tx_irq); 1045 disable_irq(lp->rx_irq); 1046 axienet_rx_irq(lp->tx_irq, ndev); 1047 axienet_tx_irq(lp->rx_irq, ndev); 1048 enable_irq(lp->tx_irq); 1049 enable_irq(lp->rx_irq); 1050 } 1051 #endif 1052 1053 static const struct net_device_ops axienet_netdev_ops = { 1054 .ndo_open = axienet_open, 1055 .ndo_stop = axienet_stop, 1056 .ndo_start_xmit = axienet_start_xmit, 1057 .ndo_change_mtu = axienet_change_mtu, 1058 .ndo_set_mac_address = netdev_set_mac_address, 1059 .ndo_validate_addr = eth_validate_addr, 1060 .ndo_set_rx_mode = axienet_set_multicast_list, 1061 #ifdef CONFIG_NET_POLL_CONTROLLER 1062 .ndo_poll_controller = axienet_poll_controller, 1063 #endif 1064 }; 1065 1066 /** 1067 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. 1068 * @ndev: Pointer to net_device structure 1069 * @ecmd: Pointer to ethtool_cmd structure 1070 * 1071 * This implements ethtool command for getting PHY settings. If PHY could 1072 * not be found, the function returns -ENODEV. This function calls the 1073 * relevant PHY ethtool API to get the PHY settings. 1074 * Issue "ethtool ethX" under linux prompt to execute this function. 1075 */ 1076 static int axienet_ethtools_get_settings(struct net_device *ndev, 1077 struct ethtool_cmd *ecmd) 1078 { 1079 struct axienet_local *lp = netdev_priv(ndev); 1080 struct phy_device *phydev = lp->phy_dev; 1081 if (!phydev) 1082 return -ENODEV; 1083 return phy_ethtool_gset(phydev, ecmd); 1084 } 1085 1086 /** 1087 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. 1088 * @ndev: Pointer to net_device structure 1089 * @ecmd: Pointer to ethtool_cmd structure 1090 * 1091 * This implements ethtool command for setting various PHY settings. If PHY 1092 * could not be found, the function returns -ENODEV. This function calls the 1093 * relevant PHY ethtool API to set the PHY. 1094 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 1095 * function. 1096 */ 1097 static int axienet_ethtools_set_settings(struct net_device *ndev, 1098 struct ethtool_cmd *ecmd) 1099 { 1100 struct axienet_local *lp = netdev_priv(ndev); 1101 struct phy_device *phydev = lp->phy_dev; 1102 if (!phydev) 1103 return -ENODEV; 1104 return phy_ethtool_sset(phydev, ecmd); 1105 } 1106 1107 /** 1108 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1109 * @ndev: Pointer to net_device structure 1110 * @ed: Pointer to ethtool_drvinfo structure 1111 * 1112 * This implements ethtool command for getting the driver information. 1113 * Issue "ethtool -i ethX" under linux prompt to execute this function. 1114 */ 1115 static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1116 struct ethtool_drvinfo *ed) 1117 { 1118 strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); 1119 strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); 1120 ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; 1121 } 1122 1123 /** 1124 * axienet_ethtools_get_regs_len - Get the total regs length present in the 1125 * AxiEthernet core. 1126 * @ndev: Pointer to net_device structure 1127 * 1128 * This implements ethtool command for getting the total register length 1129 * information. 1130 */ 1131 static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1132 { 1133 return sizeof(u32) * AXIENET_REGS_N; 1134 } 1135 1136 /** 1137 * axienet_ethtools_get_regs - Dump the contents of all registers present 1138 * in AxiEthernet core. 1139 * @ndev: Pointer to net_device structure 1140 * @regs: Pointer to ethtool_regs structure 1141 * @ret: Void pointer used to return the contents of the registers. 1142 * 1143 * This implements ethtool command for getting the Axi Ethernet register dump. 1144 * Issue "ethtool -d ethX" to execute this function. 1145 */ 1146 static void axienet_ethtools_get_regs(struct net_device *ndev, 1147 struct ethtool_regs *regs, void *ret) 1148 { 1149 u32 *data = (u32 *) ret; 1150 size_t len = sizeof(u32) * AXIENET_REGS_N; 1151 struct axienet_local *lp = netdev_priv(ndev); 1152 1153 regs->version = 0; 1154 regs->len = len; 1155 1156 memset(data, 0, len); 1157 data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1158 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1159 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1160 data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1161 data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1162 data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1163 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1164 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1165 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1166 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1167 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1168 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1169 data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1170 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1171 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1172 data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1173 data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1174 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1175 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1176 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1177 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1178 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1179 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1180 data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1181 data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1182 data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1183 data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1184 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1185 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1186 data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1187 data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1188 data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1189 } 1190 1191 /** 1192 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1193 * Tx and Rx paths. 1194 * @ndev: Pointer to net_device structure 1195 * @epauseparm: Pointer to ethtool_pauseparam structure. 1196 * 1197 * This implements ethtool command for getting axi ethernet pause frame 1198 * setting. Issue "ethtool -a ethX" to execute this function. 1199 */ 1200 static void 1201 axienet_ethtools_get_pauseparam(struct net_device *ndev, 1202 struct ethtool_pauseparam *epauseparm) 1203 { 1204 u32 regval; 1205 struct axienet_local *lp = netdev_priv(ndev); 1206 epauseparm->autoneg = 0; 1207 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1208 epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1209 epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1210 } 1211 1212 /** 1213 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1214 * settings. 1215 * @ndev: Pointer to net_device structure 1216 * @epauseparam:Pointer to ethtool_pauseparam structure 1217 * 1218 * This implements ethtool command for enabling flow control on Rx and Tx 1219 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1220 * function. 1221 */ 1222 static int 1223 axienet_ethtools_set_pauseparam(struct net_device *ndev, 1224 struct ethtool_pauseparam *epauseparm) 1225 { 1226 u32 regval = 0; 1227 struct axienet_local *lp = netdev_priv(ndev); 1228 1229 if (netif_running(ndev)) { 1230 printk(KERN_ERR "%s: Please stop netif before applying " 1231 "configruation\n", ndev->name); 1232 return -EFAULT; 1233 } 1234 1235 regval = axienet_ior(lp, XAE_FCC_OFFSET); 1236 if (epauseparm->tx_pause) 1237 regval |= XAE_FCC_FCTX_MASK; 1238 else 1239 regval &= ~XAE_FCC_FCTX_MASK; 1240 if (epauseparm->rx_pause) 1241 regval |= XAE_FCC_FCRX_MASK; 1242 else 1243 regval &= ~XAE_FCC_FCRX_MASK; 1244 axienet_iow(lp, XAE_FCC_OFFSET, regval); 1245 1246 return 0; 1247 } 1248 1249 /** 1250 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1251 * @ndev: Pointer to net_device structure 1252 * @ecoalesce: Pointer to ethtool_coalesce structure 1253 * 1254 * This implements ethtool command for getting the DMA interrupt coalescing 1255 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1256 * execute this function. 1257 */ 1258 static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1259 struct ethtool_coalesce *ecoalesce) 1260 { 1261 u32 regval = 0; 1262 struct axienet_local *lp = netdev_priv(ndev); 1263 regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1264 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1265 >> XAXIDMA_COALESCE_SHIFT; 1266 regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1267 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1268 >> XAXIDMA_COALESCE_SHIFT; 1269 return 0; 1270 } 1271 1272 /** 1273 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1274 * @ndev: Pointer to net_device structure 1275 * @ecoalesce: Pointer to ethtool_coalesce structure 1276 * 1277 * This implements ethtool command for setting the DMA interrupt coalescing 1278 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1279 * prompt to execute this function. 1280 */ 1281 static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1282 struct ethtool_coalesce *ecoalesce) 1283 { 1284 struct axienet_local *lp = netdev_priv(ndev); 1285 1286 if (netif_running(ndev)) { 1287 printk(KERN_ERR "%s: Please stop netif before applying " 1288 "configruation\n", ndev->name); 1289 return -EFAULT; 1290 } 1291 1292 if ((ecoalesce->rx_coalesce_usecs) || 1293 (ecoalesce->rx_coalesce_usecs_irq) || 1294 (ecoalesce->rx_max_coalesced_frames_irq) || 1295 (ecoalesce->tx_coalesce_usecs) || 1296 (ecoalesce->tx_coalesce_usecs_irq) || 1297 (ecoalesce->tx_max_coalesced_frames_irq) || 1298 (ecoalesce->stats_block_coalesce_usecs) || 1299 (ecoalesce->use_adaptive_rx_coalesce) || 1300 (ecoalesce->use_adaptive_tx_coalesce) || 1301 (ecoalesce->pkt_rate_low) || 1302 (ecoalesce->rx_coalesce_usecs_low) || 1303 (ecoalesce->rx_max_coalesced_frames_low) || 1304 (ecoalesce->tx_coalesce_usecs_low) || 1305 (ecoalesce->tx_max_coalesced_frames_low) || 1306 (ecoalesce->pkt_rate_high) || 1307 (ecoalesce->rx_coalesce_usecs_high) || 1308 (ecoalesce->rx_max_coalesced_frames_high) || 1309 (ecoalesce->tx_coalesce_usecs_high) || 1310 (ecoalesce->tx_max_coalesced_frames_high) || 1311 (ecoalesce->rate_sample_interval)) 1312 return -EOPNOTSUPP; 1313 if (ecoalesce->rx_max_coalesced_frames) 1314 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1315 if (ecoalesce->tx_max_coalesced_frames) 1316 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1317 1318 return 0; 1319 } 1320 1321 static struct ethtool_ops axienet_ethtool_ops = { 1322 .get_settings = axienet_ethtools_get_settings, 1323 .set_settings = axienet_ethtools_set_settings, 1324 .get_drvinfo = axienet_ethtools_get_drvinfo, 1325 .get_regs_len = axienet_ethtools_get_regs_len, 1326 .get_regs = axienet_ethtools_get_regs, 1327 .get_link = ethtool_op_get_link, 1328 .get_pauseparam = axienet_ethtools_get_pauseparam, 1329 .set_pauseparam = axienet_ethtools_set_pauseparam, 1330 .get_coalesce = axienet_ethtools_get_coalesce, 1331 .set_coalesce = axienet_ethtools_set_coalesce, 1332 }; 1333 1334 /** 1335 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1336 * @data: Data passed 1337 * 1338 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1339 * Tx/Rx BDs. 1340 */ 1341 static void axienet_dma_err_handler(unsigned long data) 1342 { 1343 u32 axienet_status; 1344 u32 cr, i; 1345 int mdio_mcreg; 1346 struct axienet_local *lp = (struct axienet_local *) data; 1347 struct net_device *ndev = lp->ndev; 1348 struct axidma_bd *cur_p; 1349 1350 axienet_setoptions(ndev, lp->options & 1351 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1352 mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1353 axienet_mdio_wait_until_ready(lp); 1354 /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1355 * When we do an Axi Ethernet reset, it resets the complete core 1356 * including the MDIO. So if MDIO is not disabled when the reset 1357 * process is started, MDIO will be broken afterwards. */ 1358 axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1359 ~XAE_MDIO_MC_MDIOEN_MASK)); 1360 1361 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 1362 __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 1363 1364 axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1365 axienet_mdio_wait_until_ready(lp); 1366 1367 for (i = 0; i < TX_BD_NUM; i++) { 1368 cur_p = &lp->tx_bd_v[i]; 1369 if (cur_p->phys) 1370 dma_unmap_single(ndev->dev.parent, cur_p->phys, 1371 (cur_p->cntrl & 1372 XAXIDMA_BD_CTRL_LENGTH_MASK), 1373 DMA_TO_DEVICE); 1374 if (cur_p->app4) 1375 dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1376 cur_p->phys = 0; 1377 cur_p->cntrl = 0; 1378 cur_p->status = 0; 1379 cur_p->app0 = 0; 1380 cur_p->app1 = 0; 1381 cur_p->app2 = 0; 1382 cur_p->app3 = 0; 1383 cur_p->app4 = 0; 1384 cur_p->sw_id_offset = 0; 1385 } 1386 1387 for (i = 0; i < RX_BD_NUM; i++) { 1388 cur_p = &lp->rx_bd_v[i]; 1389 cur_p->status = 0; 1390 cur_p->app0 = 0; 1391 cur_p->app1 = 0; 1392 cur_p->app2 = 0; 1393 cur_p->app3 = 0; 1394 cur_p->app4 = 0; 1395 } 1396 1397 lp->tx_bd_ci = 0; 1398 lp->tx_bd_tail = 0; 1399 lp->rx_bd_ci = 0; 1400 1401 /* Start updating the Rx channel control register */ 1402 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1403 /* Update the interrupt coalesce count */ 1404 cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1405 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1406 /* Update the delay timer count */ 1407 cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1408 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1409 /* Enable coalesce, delay timer and error interrupts */ 1410 cr |= XAXIDMA_IRQ_ALL_MASK; 1411 /* Finally write to the Rx channel control register */ 1412 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1413 1414 /* Start updating the Tx channel control register */ 1415 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1416 /* Update the interrupt coalesce count */ 1417 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1418 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1419 /* Update the delay timer count */ 1420 cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1421 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1422 /* Enable coalesce, delay timer and error interrupts */ 1423 cr |= XAXIDMA_IRQ_ALL_MASK; 1424 /* Finally write to the Tx channel control register */ 1425 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1426 1427 /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1428 * halted state. This will make the Rx side ready for reception.*/ 1429 axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1430 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1431 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1432 cr | XAXIDMA_CR_RUNSTOP_MASK); 1433 axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1434 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1435 1436 /* Write to the RS (Run-stop) bit in the Tx channel control register. 1437 * Tx channel is now ready to run. But only after we write to the 1438 * tail pointer register that the Tx channel will start transmitting */ 1439 axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1440 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1441 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1442 cr | XAXIDMA_CR_RUNSTOP_MASK); 1443 1444 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1445 axienet_status &= ~XAE_RCW1_RX_MASK; 1446 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1447 1448 axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1449 if (axienet_status & XAE_INT_RXRJECT_MASK) 1450 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1451 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1452 1453 /* Sync default options with HW but leave receiver and 1454 * transmitter disabled.*/ 1455 axienet_setoptions(ndev, lp->options & 1456 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1457 axienet_set_mac_address(ndev, NULL); 1458 axienet_set_multicast_list(ndev); 1459 axienet_setoptions(ndev, lp->options); 1460 } 1461 1462 /** 1463 * axienet_of_probe - Axi Ethernet probe function. 1464 * @op: Pointer to platform device structure. 1465 * @match: Pointer to device id structure 1466 * 1467 * returns: 0, on success 1468 * Non-zero error value on failure. 1469 * 1470 * This is the probe routine for Axi Ethernet driver. This is called before 1471 * any other driver routines are invoked. It allocates and sets up the Ethernet 1472 * device. Parses through device tree and populates fields of 1473 * axienet_local. It registers the Ethernet device. 1474 */ 1475 static int axienet_of_probe(struct platform_device *op) 1476 { 1477 __be32 *p; 1478 int size, ret = 0; 1479 struct device_node *np; 1480 struct axienet_local *lp; 1481 struct net_device *ndev; 1482 const void *addr; 1483 1484 ndev = alloc_etherdev(sizeof(*lp)); 1485 if (!ndev) 1486 return -ENOMEM; 1487 1488 ether_setup(ndev); 1489 platform_set_drvdata(op, ndev); 1490 1491 SET_NETDEV_DEV(ndev, &op->dev); 1492 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1493 ndev->features = NETIF_F_SG; 1494 ndev->netdev_ops = &axienet_netdev_ops; 1495 ndev->ethtool_ops = &axienet_ethtool_ops; 1496 1497 lp = netdev_priv(ndev); 1498 lp->ndev = ndev; 1499 lp->dev = &op->dev; 1500 lp->options = XAE_OPTION_DEFAULTS; 1501 /* Map device registers */ 1502 lp->regs = of_iomap(op->dev.of_node, 0); 1503 if (!lp->regs) { 1504 dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1505 goto nodev; 1506 } 1507 /* Setup checksum offload, but default to off if not specified */ 1508 lp->features = 0; 1509 1510 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); 1511 if (p) { 1512 switch (be32_to_cpup(p)) { 1513 case 1: 1514 lp->csum_offload_on_tx_path = 1515 XAE_FEATURE_PARTIAL_TX_CSUM; 1516 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1517 /* Can checksum TCP/UDP over IPv4. */ 1518 ndev->features |= NETIF_F_IP_CSUM; 1519 break; 1520 case 2: 1521 lp->csum_offload_on_tx_path = 1522 XAE_FEATURE_FULL_TX_CSUM; 1523 lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1524 /* Can checksum TCP/UDP over IPv4. */ 1525 ndev->features |= NETIF_F_IP_CSUM; 1526 break; 1527 default: 1528 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1529 } 1530 } 1531 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); 1532 if (p) { 1533 switch (be32_to_cpup(p)) { 1534 case 1: 1535 lp->csum_offload_on_rx_path = 1536 XAE_FEATURE_PARTIAL_RX_CSUM; 1537 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1538 break; 1539 case 2: 1540 lp->csum_offload_on_rx_path = 1541 XAE_FEATURE_FULL_RX_CSUM; 1542 lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1543 break; 1544 default: 1545 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1546 } 1547 } 1548 /* For supporting jumbo frames, the Axi Ethernet hardware must have 1549 * a larger Rx/Tx Memory. Typically, the size must be more than or 1550 * equal to 16384 bytes, so that we can enable jumbo option and start 1551 * supporting jumbo frames. Here we check for memory allocated for 1552 * Rx/Tx in the hardware from the device-tree and accordingly set 1553 * flags. */ 1554 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL); 1555 if (p) { 1556 if ((be32_to_cpup(p)) >= 0x4000) 1557 lp->jumbo_support = 1; 1558 } 1559 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", 1560 NULL); 1561 if (p) 1562 lp->temac_type = be32_to_cpup(p); 1563 p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); 1564 if (p) 1565 lp->phy_type = be32_to_cpup(p); 1566 1567 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1568 np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1569 if (!np) { 1570 dev_err(&op->dev, "could not find DMA node\n"); 1571 goto err_iounmap; 1572 } 1573 lp->dma_regs = of_iomap(np, 0); 1574 if (lp->dma_regs) { 1575 dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs); 1576 } else { 1577 dev_err(&op->dev, "unable to map DMA registers\n"); 1578 of_node_put(np); 1579 } 1580 lp->rx_irq = irq_of_parse_and_map(np, 1); 1581 lp->tx_irq = irq_of_parse_and_map(np, 0); 1582 of_node_put(np); 1583 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) { 1584 dev_err(&op->dev, "could not determine irqs\n"); 1585 ret = -ENOMEM; 1586 goto err_iounmap_2; 1587 } 1588 1589 /* Retrieve the MAC address */ 1590 addr = of_get_property(op->dev.of_node, "local-mac-address", &size); 1591 if ((!addr) || (size != 6)) { 1592 dev_err(&op->dev, "could not find MAC address\n"); 1593 ret = -ENODEV; 1594 goto err_iounmap_2; 1595 } 1596 axienet_set_mac_address(ndev, (void *) addr); 1597 1598 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1599 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1600 1601 lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); 1602 ret = axienet_mdio_setup(lp, op->dev.of_node); 1603 if (ret) 1604 dev_warn(&op->dev, "error registering MDIO bus\n"); 1605 1606 ret = register_netdev(lp->ndev); 1607 if (ret) { 1608 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1609 goto err_iounmap_2; 1610 } 1611 1612 return 0; 1613 1614 err_iounmap_2: 1615 if (lp->dma_regs) 1616 iounmap(lp->dma_regs); 1617 err_iounmap: 1618 iounmap(lp->regs); 1619 nodev: 1620 free_netdev(ndev); 1621 ndev = NULL; 1622 return ret; 1623 } 1624 1625 static int axienet_of_remove(struct platform_device *op) 1626 { 1627 struct net_device *ndev = platform_get_drvdata(op); 1628 struct axienet_local *lp = netdev_priv(ndev); 1629 1630 axienet_mdio_teardown(lp); 1631 unregister_netdev(ndev); 1632 1633 of_node_put(lp->phy_node); 1634 lp->phy_node = NULL; 1635 1636 iounmap(lp->regs); 1637 if (lp->dma_regs) 1638 iounmap(lp->dma_regs); 1639 free_netdev(ndev); 1640 1641 return 0; 1642 } 1643 1644 static struct platform_driver axienet_of_driver = { 1645 .probe = axienet_of_probe, 1646 .remove = axienet_of_remove, 1647 .driver = { 1648 .name = "xilinx_axienet", 1649 .of_match_table = axienet_of_match, 1650 }, 1651 }; 1652 1653 module_platform_driver(axienet_of_driver); 1654 1655 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1656 MODULE_AUTHOR("Xilinx"); 1657 MODULE_LICENSE("GPL"); 1658