1 /* 2 * Copyright(c) 2015 EZchip Technologies. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 */ 16 17 #include <linux/module.h> 18 #include <linux/etherdevice.h> 19 #include <linux/of_address.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_net.h> 22 #include <linux/of_platform.h> 23 #include "nps_enet.h" 24 25 #define DRV_NAME "nps_mgt_enet" 26 27 static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv) 28 { 29 u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); 30 u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 31 32 return (!tx_ctrl_ct && priv->tx_skb); 33 } 34 35 static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len) 36 { 37 struct nps_enet_priv *priv = netdev_priv(ndev); 38 u32 i, len = DIV_ROUND_UP(frame_len, sizeof(u32)); 39 40 /* Empty Rx FIFO buffer by reading all words */ 41 for (i = 0; i < len; i++) 42 nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 43 } 44 45 static void nps_enet_read_rx_fifo(struct net_device *ndev, 46 unsigned char *dst, u32 length) 47 { 48 struct nps_enet_priv *priv = netdev_priv(ndev); 49 s32 i, last = length & (sizeof(u32) - 1); 50 u32 *reg = (u32 *)dst, len = length / sizeof(u32); 51 bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32)); 52 53 /* In case dst is not aligned we need an intermediate buffer */ 54 if (dst_is_aligned) { 55 ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); 56 reg += len; 57 } else { /* !dst_is_aligned */ 58 for (i = 0; i < len; i++, reg++) { 59 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 60 61 put_unaligned_be32(buf, reg); 62 } 63 } 64 /* copy last bytes (if any) */ 65 if (last) { 66 u32 buf; 67 68 ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); 69 memcpy((u8 *)reg, &buf, last); 70 } 71 } 72 73 static u32 nps_enet_rx_handler(struct net_device *ndev) 74 { 75 u32 frame_len, err = 0; 76 u32 work_done = 0; 77 struct nps_enet_priv *priv = netdev_priv(ndev); 78 struct sk_buff *skb; 79 u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); 80 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; 81 u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT; 82 u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT; 83 84 frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT; 85 86 /* Check if we got RX */ 87 if (!rx_ctrl_cr) 88 return work_done; 89 90 /* If we got here there is a work for us */ 91 work_done++; 92 93 /* Check Rx error */ 94 if (rx_ctrl_er) { 95 ndev->stats.rx_errors++; 96 err = 1; 97 } 98 99 /* Check Rx CRC error */ 100 if (rx_ctrl_crc) { 101 ndev->stats.rx_crc_errors++; 102 ndev->stats.rx_dropped++; 103 err = 1; 104 } 105 106 /* Check Frame length Min 64b */ 107 if (unlikely(frame_len < ETH_ZLEN)) { 108 ndev->stats.rx_length_errors++; 109 ndev->stats.rx_dropped++; 110 err = 1; 111 } 112 113 if (err) 114 goto rx_irq_clean; 115 116 /* Skb allocation */ 117 skb = netdev_alloc_skb_ip_align(ndev, frame_len); 118 if (unlikely(!skb)) { 119 ndev->stats.rx_errors++; 120 ndev->stats.rx_dropped++; 121 goto rx_irq_clean; 122 } 123 124 /* Copy frame from Rx fifo into the skb */ 125 nps_enet_read_rx_fifo(ndev, skb->data, frame_len); 126 127 skb_put(skb, frame_len); 128 skb->protocol = eth_type_trans(skb, ndev); 129 skb->ip_summed = CHECKSUM_UNNECESSARY; 130 131 ndev->stats.rx_packets++; 132 ndev->stats.rx_bytes += frame_len; 133 netif_receive_skb(skb); 134 135 goto rx_irq_frame_done; 136 137 rx_irq_clean: 138 /* Clean Rx fifo */ 139 nps_enet_clean_rx_fifo(ndev, frame_len); 140 141 rx_irq_frame_done: 142 /* Ack Rx ctrl register */ 143 nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0); 144 145 return work_done; 146 } 147 148 static void nps_enet_tx_handler(struct net_device *ndev) 149 { 150 struct nps_enet_priv *priv = netdev_priv(ndev); 151 u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); 152 u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; 153 u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; 154 155 /* Check if we got TX */ 156 if (!nps_enet_is_tx_pending(priv)) 157 return; 158 159 /* Ack Tx ctrl register */ 160 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0); 161 162 /* Check Tx transmit error */ 163 if (unlikely(tx_ctrl_et)) { 164 ndev->stats.tx_errors++; 165 } else { 166 ndev->stats.tx_packets++; 167 ndev->stats.tx_bytes += tx_ctrl_nt; 168 } 169 170 dev_kfree_skb(priv->tx_skb); 171 priv->tx_skb = NULL; 172 173 if (netif_queue_stopped(ndev)) 174 netif_wake_queue(ndev); 175 } 176 177 /** 178 * nps_enet_poll - NAPI poll handler. 179 * @napi: Pointer to napi_struct structure. 180 * @budget: How many frames to process on one call. 181 * 182 * returns: Number of processed frames 183 */ 184 static int nps_enet_poll(struct napi_struct *napi, int budget) 185 { 186 struct net_device *ndev = napi->dev; 187 struct nps_enet_priv *priv = netdev_priv(ndev); 188 u32 work_done; 189 190 nps_enet_tx_handler(ndev); 191 work_done = nps_enet_rx_handler(ndev); 192 if (work_done < budget) { 193 u32 buf_int_enable_value = 0; 194 195 napi_complete(napi); 196 197 /* set tx_done and rx_rdy bits */ 198 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 199 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 200 201 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 202 buf_int_enable_value); 203 204 /* in case we will get a tx interrupt while interrupts 205 * are masked, we will lose it since the tx is edge interrupt. 206 * specifically, while executing the code section above, 207 * between nps_enet_tx_handler and the interrupts enable, all 208 * tx requests will be stuck until we will get an rx interrupt. 209 * the two code lines below will solve this situation by 210 * re-adding ourselves to the poll list. 211 */ 212 if (nps_enet_is_tx_pending(priv)) { 213 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 214 napi_reschedule(napi); 215 } 216 } 217 218 return work_done; 219 } 220 221 /** 222 * nps_enet_irq_handler - Global interrupt handler for ENET. 223 * @irq: irq number. 224 * @dev_instance: device instance. 225 * 226 * returns: IRQ_HANDLED for all cases. 227 * 228 * EZchip ENET has 2 interrupt causes, and depending on bits raised in 229 * CTRL registers we may tell what is a reason for interrupt to fire up. 230 * We got one for RX and the other for TX (completion). 231 */ 232 static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) 233 { 234 struct net_device *ndev = dev_instance; 235 struct nps_enet_priv *priv = netdev_priv(ndev); 236 u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); 237 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; 238 239 if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr) 240 if (likely(napi_schedule_prep(&priv->napi))) { 241 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 242 __napi_schedule(&priv->napi); 243 } 244 245 return IRQ_HANDLED; 246 } 247 248 static void nps_enet_set_hw_mac_address(struct net_device *ndev) 249 { 250 struct nps_enet_priv *priv = netdev_priv(ndev); 251 u32 ge_mac_cfg_1_value = 0; 252 u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; 253 254 /* set MAC address in HW */ 255 ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT; 256 ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT; 257 ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT; 258 ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT; 259 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK) 260 | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT; 261 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK) 262 | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT; 263 264 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1, 265 ge_mac_cfg_1_value); 266 267 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, 268 *ge_mac_cfg_2_value); 269 } 270 271 /** 272 * nps_enet_hw_reset - Reset the network device. 273 * @ndev: Pointer to the network device. 274 * 275 * This function reset the PCS and TX fifo. 276 * The programming model is to set the relevant reset bits 277 * wait for some time for this to propagate and then unset 278 * the reset bits. This way we ensure that reset procedure 279 * is done successfully by device. 280 */ 281 static void nps_enet_hw_reset(struct net_device *ndev) 282 { 283 struct nps_enet_priv *priv = netdev_priv(ndev); 284 u32 ge_rst_value = 0, phase_fifo_ctl_value = 0; 285 286 /* Pcs reset sequence*/ 287 ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT; 288 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 289 usleep_range(10, 20); 290 ge_rst_value = 0; 291 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 292 293 /* Tx fifo reset sequence */ 294 phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT; 295 phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT; 296 nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, 297 phase_fifo_ctl_value); 298 usleep_range(10, 20); 299 phase_fifo_ctl_value = 0; 300 nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, 301 phase_fifo_ctl_value); 302 } 303 304 static void nps_enet_hw_enable_control(struct net_device *ndev) 305 { 306 struct nps_enet_priv *priv = netdev_priv(ndev); 307 u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0; 308 u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; 309 u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value; 310 s32 max_frame_length; 311 312 /* Enable Rx and Tx statistics */ 313 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK) 314 | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT; 315 316 /* Discard packets with different MAC address */ 317 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) 318 | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; 319 320 /* Discard multicast packets */ 321 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) 322 | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; 323 324 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, 325 *ge_mac_cfg_2_value); 326 327 /* Discard Packets bigger than max frame length */ 328 max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN; 329 if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) { 330 *ge_mac_cfg_3_value = 331 (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK) 332 | max_frame_length << CFG_3_MAX_LEN_SHIFT; 333 } 334 335 /* Enable interrupts */ 336 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 337 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 338 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 339 buf_int_enable_value); 340 341 /* Write device MAC address to HW */ 342 nps_enet_set_hw_mac_address(ndev); 343 344 /* Rx and Tx HW features */ 345 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT; 346 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT; 347 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT; 348 349 /* IFG configuration */ 350 ge_mac_cfg_0_value |= 351 NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT; 352 ge_mac_cfg_0_value |= 353 NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT; 354 355 /* preamble configuration */ 356 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT; 357 ge_mac_cfg_0_value |= 358 NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT; 359 360 /* enable flow control frames */ 361 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT; 362 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT; 363 ge_mac_cfg_0_value |= 364 NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT; 365 *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK) 366 | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT; 367 368 /* Enable Rx and Tx */ 369 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT; 370 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT; 371 372 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3, 373 *ge_mac_cfg_3_value); 374 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 375 ge_mac_cfg_0_value); 376 } 377 378 static void nps_enet_hw_disable_control(struct net_device *ndev) 379 { 380 struct nps_enet_priv *priv = netdev_priv(ndev); 381 382 /* Disable interrupts */ 383 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 384 385 /* Disable Rx and Tx */ 386 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 0); 387 } 388 389 static void nps_enet_send_frame(struct net_device *ndev, 390 struct sk_buff *skb) 391 { 392 struct nps_enet_priv *priv = netdev_priv(ndev); 393 u32 tx_ctrl_value = 0; 394 short length = skb->len; 395 u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); 396 u32 *src = (void *)skb->data; 397 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); 398 399 /* In case src is not aligned we need an intermediate buffer */ 400 if (src_is_aligned) 401 iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len); 402 else /* !src_is_aligned */ 403 for (i = 0; i < len; i++, src++) 404 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, 405 get_unaligned_be32(src)); 406 407 /* Write the length of the Frame */ 408 tx_ctrl_value |= length << TX_CTL_NT_SHIFT; 409 410 tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; 411 /* Send Frame */ 412 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); 413 } 414 415 /** 416 * nps_enet_set_mac_address - Set the MAC address for this device. 417 * @ndev: Pointer to net_device structure. 418 * @p: 6 byte Address to be written as MAC address. 419 * 420 * This function copies the HW address from the sockaddr structure to the 421 * net_device structure and updates the address in HW. 422 * 423 * returns: -EBUSY if the net device is busy or 0 if the address is set 424 * successfully. 425 */ 426 static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p) 427 { 428 struct sockaddr *addr = p; 429 s32 res; 430 431 if (netif_running(ndev)) 432 return -EBUSY; 433 434 res = eth_mac_addr(ndev, p); 435 if (!res) { 436 ether_addr_copy(ndev->dev_addr, addr->sa_data); 437 nps_enet_set_hw_mac_address(ndev); 438 } 439 440 return res; 441 } 442 443 /** 444 * nps_enet_set_rx_mode - Change the receive filtering mode. 445 * @ndev: Pointer to the network device. 446 * 447 * This function enables/disables promiscuous mode 448 */ 449 static void nps_enet_set_rx_mode(struct net_device *ndev) 450 { 451 struct nps_enet_priv *priv = netdev_priv(ndev); 452 u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value; 453 454 if (ndev->flags & IFF_PROMISC) { 455 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) 456 | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT; 457 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) 458 | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT; 459 460 } else { 461 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) 462 | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; 463 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) 464 | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; 465 } 466 467 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value); 468 } 469 470 /** 471 * nps_enet_open - Open the network device. 472 * @ndev: Pointer to the network device. 473 * 474 * returns: 0, on success or non-zero error value on failure. 475 * 476 * This function sets the MAC address, requests and enables an IRQ 477 * for the ENET device and starts the Tx queue. 478 */ 479 static s32 nps_enet_open(struct net_device *ndev) 480 { 481 struct nps_enet_priv *priv = netdev_priv(ndev); 482 s32 err; 483 484 /* Reset private variables */ 485 priv->tx_skb = NULL; 486 priv->ge_mac_cfg_2_value = 0; 487 priv->ge_mac_cfg_3_value = 0; 488 489 /* ge_mac_cfg_3 default values */ 490 priv->ge_mac_cfg_3_value |= 491 NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT; 492 493 priv->ge_mac_cfg_3_value |= 494 NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT; 495 496 /* Disable HW device */ 497 nps_enet_hw_disable_control(ndev); 498 499 /* irq Rx allocation */ 500 err = request_irq(priv->irq, nps_enet_irq_handler, 501 0, "enet-rx-tx", ndev); 502 if (err) 503 return err; 504 505 napi_enable(&priv->napi); 506 507 /* Enable HW device */ 508 nps_enet_hw_reset(ndev); 509 nps_enet_hw_enable_control(ndev); 510 511 netif_start_queue(ndev); 512 513 return 0; 514 } 515 516 /** 517 * nps_enet_stop - Close the network device. 518 * @ndev: Pointer to the network device. 519 * 520 * This function stops the Tx queue, disables interrupts for the ENET device. 521 */ 522 static s32 nps_enet_stop(struct net_device *ndev) 523 { 524 struct nps_enet_priv *priv = netdev_priv(ndev); 525 526 napi_disable(&priv->napi); 527 netif_stop_queue(ndev); 528 nps_enet_hw_disable_control(ndev); 529 free_irq(priv->irq, ndev); 530 531 return 0; 532 } 533 534 /** 535 * nps_enet_start_xmit - Starts the data transmission. 536 * @skb: sk_buff pointer that contains data to be Transmitted. 537 * @ndev: Pointer to net_device structure. 538 * 539 * returns: NETDEV_TX_OK, on success 540 * NETDEV_TX_BUSY, if any of the descriptors are not free. 541 * 542 * This function is invoked from upper layers to initiate transmission. 543 */ 544 static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb, 545 struct net_device *ndev) 546 { 547 struct nps_enet_priv *priv = netdev_priv(ndev); 548 549 /* This driver handles one frame at a time */ 550 netif_stop_queue(ndev); 551 552 priv->tx_skb = skb; 553 554 /* make sure tx_skb is actually written to the memory 555 * before the HW is informed and the IRQ is fired. 556 */ 557 wmb(); 558 559 nps_enet_send_frame(ndev, skb); 560 561 return NETDEV_TX_OK; 562 } 563 564 #ifdef CONFIG_NET_POLL_CONTROLLER 565 static void nps_enet_poll_controller(struct net_device *ndev) 566 { 567 disable_irq(ndev->irq); 568 nps_enet_irq_handler(ndev->irq, ndev); 569 enable_irq(ndev->irq); 570 } 571 #endif 572 573 static const struct net_device_ops nps_netdev_ops = { 574 .ndo_open = nps_enet_open, 575 .ndo_stop = nps_enet_stop, 576 .ndo_start_xmit = nps_enet_start_xmit, 577 .ndo_set_mac_address = nps_enet_set_mac_address, 578 .ndo_set_rx_mode = nps_enet_set_rx_mode, 579 #ifdef CONFIG_NET_POLL_CONTROLLER 580 .ndo_poll_controller = nps_enet_poll_controller, 581 #endif 582 }; 583 584 static s32 nps_enet_probe(struct platform_device *pdev) 585 { 586 struct device *dev = &pdev->dev; 587 struct net_device *ndev; 588 struct nps_enet_priv *priv; 589 s32 err = 0; 590 const char *mac_addr; 591 struct resource *res_regs; 592 593 if (!dev->of_node) 594 return -ENODEV; 595 596 ndev = alloc_etherdev(sizeof(struct nps_enet_priv)); 597 if (!ndev) 598 return -ENOMEM; 599 600 platform_set_drvdata(pdev, ndev); 601 SET_NETDEV_DEV(ndev, dev); 602 priv = netdev_priv(ndev); 603 604 /* The EZ NET specific entries in the device structure. */ 605 ndev->netdev_ops = &nps_netdev_ops; 606 ndev->watchdog_timeo = (400 * HZ / 1000); 607 /* FIXME :: no multicast support yet */ 608 ndev->flags &= ~IFF_MULTICAST; 609 610 res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 611 priv->regs_base = devm_ioremap_resource(dev, res_regs); 612 if (IS_ERR(priv->regs_base)) { 613 err = PTR_ERR(priv->regs_base); 614 goto out_netdev; 615 } 616 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); 617 618 /* set kernel MAC address to dev */ 619 mac_addr = of_get_mac_address(dev->of_node); 620 if (mac_addr) 621 ether_addr_copy(ndev->dev_addr, mac_addr); 622 else 623 eth_hw_addr_random(ndev); 624 625 /* Get IRQ number */ 626 priv->irq = platform_get_irq(pdev, 0); 627 if (!priv->irq) { 628 dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n"); 629 err = -ENODEV; 630 goto out_netdev; 631 } 632 633 netif_napi_add(ndev, &priv->napi, nps_enet_poll, 634 NPS_ENET_NAPI_POLL_WEIGHT); 635 636 /* Register the driver. Should be the last thing in probe */ 637 err = register_netdev(ndev); 638 if (err) { 639 dev_err(dev, "Failed to register ndev for %s, err = 0x%08x\n", 640 ndev->name, (s32)err); 641 goto out_netif_api; 642 } 643 644 dev_info(dev, "(rx/tx=%d)\n", priv->irq); 645 return 0; 646 647 out_netif_api: 648 netif_napi_del(&priv->napi); 649 out_netdev: 650 if (err) 651 free_netdev(ndev); 652 653 return err; 654 } 655 656 static s32 nps_enet_remove(struct platform_device *pdev) 657 { 658 struct net_device *ndev = platform_get_drvdata(pdev); 659 struct nps_enet_priv *priv = netdev_priv(ndev); 660 661 unregister_netdev(ndev); 662 free_netdev(ndev); 663 netif_napi_del(&priv->napi); 664 665 return 0; 666 } 667 668 static const struct of_device_id nps_enet_dt_ids[] = { 669 { .compatible = "ezchip,nps-mgt-enet" }, 670 { /* Sentinel */ } 671 }; 672 MODULE_DEVICE_TABLE(of, nps_enet_dt_ids); 673 674 static struct platform_driver nps_enet_driver = { 675 .probe = nps_enet_probe, 676 .remove = nps_enet_remove, 677 .driver = { 678 .name = DRV_NAME, 679 .of_match_table = nps_enet_dt_ids, 680 }, 681 }; 682 683 module_platform_driver(nps_enet_driver); 684 685 MODULE_AUTHOR("EZchip Semiconductor"); 686 MODULE_LICENSE("GPL v2"); 687