1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Microchip ENCX24J600 ethernet driver 4 * 5 * Copyright (C) 2015 Gridpoint 6 * Author: Jon Ringle <jringle@gridpoint.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/errno.h> 11 #include <linux/etherdevice.h> 12 #include <linux/ethtool.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/netdevice.h> 17 #include <linux/regmap.h> 18 #include <linux/skbuff.h> 19 #include <linux/spi/spi.h> 20 21 #include "encx24j600_hw.h" 22 23 #define DRV_NAME "encx24j600" 24 #define DRV_VERSION "1.0" 25 26 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 27 static int debug = -1; 28 module_param(debug, int, 0000); 29 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 30 31 /* SRAM memory layout: 32 * 33 * 0x0000-0x05ff TX buffers 1.5KB (1*1536) reside in the GP area in SRAM 34 * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM 35 */ 36 #define ENC_TX_BUF_START 0x0000U 37 #define ENC_RX_BUF_START 0x0600U 38 #define ENC_RX_BUF_END 0x5fffU 39 #define ENC_SRAM_SIZE 0x6000U 40 41 enum { 42 RXFILTER_NORMAL, 43 RXFILTER_MULTI, 44 RXFILTER_PROMISC 45 }; 46 47 struct encx24j600_priv { 48 struct net_device *ndev; 49 struct mutex lock; /* device access lock */ 50 struct encx24j600_context ctx; 51 struct sk_buff *tx_skb; 52 struct task_struct *kworker_task; 53 struct kthread_worker kworker; 54 struct kthread_work tx_work; 55 struct kthread_work setrx_work; 56 u16 next_packet; 57 bool hw_enabled; 58 bool full_duplex; 59 bool autoneg; 60 u16 speed; 61 int rxfilter; 62 u32 msg_enable; 63 }; 64 65 static void dump_packet(const char *msg, int len, const char *data) 66 { 67 pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len); 68 print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len); 69 } 70 71 static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg, 72 struct rsv *rsv) 73 { 74 struct net_device *dev = priv->ndev; 75 76 netdev_info(dev, "RX packet Len:%d\n", rsv->len); 77 netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg, 78 rsv->next_packet); 79 netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n", 80 RSV_GETBIT(rsv->rxstat, RSV_RXOK), 81 RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE)); 82 netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n", 83 RSV_GETBIT(rsv->rxstat, RSV_CRCERROR), 84 RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR), 85 RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE)); 86 netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n", 87 RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST), 88 RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST), 89 RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV), 90 RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV)); 91 netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n", 92 RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME), 93 RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME), 94 RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE), 95 RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN)); 96 } 97 98 static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg) 99 { 100 struct net_device *dev = priv->ndev; 101 unsigned int val = 0; 102 int ret = regmap_read(priv->ctx.regmap, reg, &val); 103 104 if (unlikely(ret)) 105 netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n", 106 __func__, ret, reg); 107 return val; 108 } 109 110 static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val) 111 { 112 struct net_device *dev = priv->ndev; 113 int ret = regmap_write(priv->ctx.regmap, reg, val); 114 115 if (unlikely(ret)) 116 netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n", 117 __func__, ret, reg, val); 118 } 119 120 static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg, 121 u16 mask, u16 val) 122 { 123 struct net_device *dev = priv->ndev; 124 int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val); 125 126 if (unlikely(ret)) 127 netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n", 128 __func__, ret, reg, val, mask); 129 } 130 131 static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg) 132 { 133 struct net_device *dev = priv->ndev; 134 unsigned int val = 0; 135 int ret = regmap_read(priv->ctx.phymap, reg, &val); 136 137 if (unlikely(ret)) 138 netif_err(priv, drv, dev, "%s: error %d reading %02x\n", 139 __func__, ret, reg); 140 return val; 141 } 142 143 static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val) 144 { 145 struct net_device *dev = priv->ndev; 146 int ret = regmap_write(priv->ctx.phymap, reg, val); 147 148 if (unlikely(ret)) 149 netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n", 150 __func__, ret, reg, val); 151 } 152 153 static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask) 154 { 155 encx24j600_update_reg(priv, reg, mask, 0); 156 } 157 158 static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask) 159 { 160 encx24j600_update_reg(priv, reg, mask, mask); 161 } 162 163 static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd) 164 { 165 struct net_device *dev = priv->ndev; 166 int ret = regmap_write(priv->ctx.regmap, cmd, 0); 167 168 if (unlikely(ret)) 169 netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n", 170 __func__, ret, cmd); 171 } 172 173 static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data, 174 size_t count) 175 { 176 int ret; 177 178 mutex_lock(&priv->ctx.mutex); 179 ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count); 180 mutex_unlock(&priv->ctx.mutex); 181 182 return ret; 183 } 184 185 static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg, 186 const u8 *data, size_t count) 187 { 188 int ret; 189 190 mutex_lock(&priv->ctx.mutex); 191 ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count); 192 mutex_unlock(&priv->ctx.mutex); 193 194 return ret; 195 } 196 197 static void encx24j600_update_phcon1(struct encx24j600_priv *priv) 198 { 199 u16 phcon1 = encx24j600_read_phy(priv, PHCON1); 200 201 if (priv->autoneg == AUTONEG_ENABLE) { 202 phcon1 |= ANEN | RENEG; 203 } else { 204 phcon1 &= ~ANEN; 205 if (priv->speed == SPEED_100) 206 phcon1 |= SPD100; 207 else 208 phcon1 &= ~SPD100; 209 210 if (priv->full_duplex) 211 phcon1 |= PFULDPX; 212 else 213 phcon1 &= ~PFULDPX; 214 } 215 encx24j600_write_phy(priv, PHCON1, phcon1); 216 } 217 218 /* Waits for autonegotiation to complete. */ 219 static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv) 220 { 221 struct net_device *dev = priv->ndev; 222 unsigned long timeout = jiffies + msecs_to_jiffies(2000); 223 u16 phstat1; 224 u16 estat; 225 226 phstat1 = encx24j600_read_phy(priv, PHSTAT1); 227 while ((phstat1 & ANDONE) == 0) { 228 if (time_after(jiffies, timeout)) { 229 u16 phstat3; 230 231 netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n"); 232 233 priv->autoneg = AUTONEG_DISABLE; 234 phstat3 = encx24j600_read_phy(priv, PHSTAT3); 235 priv->speed = (phstat3 & PHY3SPD100) 236 ? SPEED_100 : SPEED_10; 237 priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0; 238 encx24j600_update_phcon1(priv); 239 netif_notice(priv, drv, dev, "Using parallel detection: %s/%s", 240 priv->speed == SPEED_100 ? "100" : "10", 241 priv->full_duplex ? "Full" : "Half"); 242 243 return -ETIMEDOUT; 244 } 245 cpu_relax(); 246 phstat1 = encx24j600_read_phy(priv, PHSTAT1); 247 } 248 249 estat = encx24j600_read_reg(priv, ESTAT); 250 if (estat & PHYDPX) { 251 encx24j600_set_bits(priv, MACON2, FULDPX); 252 encx24j600_write_reg(priv, MABBIPG, 0x15); 253 } else { 254 encx24j600_clr_bits(priv, MACON2, FULDPX); 255 encx24j600_write_reg(priv, MABBIPG, 0x12); 256 /* Max retransmittions attempt */ 257 encx24j600_write_reg(priv, MACLCON, 0x370f); 258 } 259 260 return 0; 261 } 262 263 /* Access the PHY to determine link status */ 264 static void encx24j600_check_link_status(struct encx24j600_priv *priv) 265 { 266 struct net_device *dev = priv->ndev; 267 u16 estat; 268 269 estat = encx24j600_read_reg(priv, ESTAT); 270 271 if (estat & PHYLNK) { 272 if (priv->autoneg == AUTONEG_ENABLE) 273 encx24j600_wait_for_autoneg(priv); 274 275 netif_carrier_on(dev); 276 netif_info(priv, ifup, dev, "link up\n"); 277 } else { 278 netif_info(priv, ifdown, dev, "link down\n"); 279 280 /* Re-enable autoneg since we won't know what we might be 281 * connected to when the link is brought back up again. 282 */ 283 priv->autoneg = AUTONEG_ENABLE; 284 priv->full_duplex = true; 285 priv->speed = SPEED_100; 286 netif_carrier_off(dev); 287 } 288 } 289 290 static void encx24j600_int_link_handler(struct encx24j600_priv *priv) 291 { 292 struct net_device *dev = priv->ndev; 293 294 netif_dbg(priv, intr, dev, "%s", __func__); 295 encx24j600_check_link_status(priv); 296 encx24j600_clr_bits(priv, EIR, LINKIF); 297 } 298 299 static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err) 300 { 301 struct net_device *dev = priv->ndev; 302 303 if (!priv->tx_skb) { 304 BUG(); 305 return; 306 } 307 308 mutex_lock(&priv->lock); 309 310 if (err) 311 dev->stats.tx_errors++; 312 else 313 dev->stats.tx_packets++; 314 315 dev->stats.tx_bytes += priv->tx_skb->len; 316 317 encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF); 318 319 netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : ""); 320 321 dev_kfree_skb(priv->tx_skb); 322 priv->tx_skb = NULL; 323 324 netif_wake_queue(dev); 325 326 mutex_unlock(&priv->lock); 327 } 328 329 static int encx24j600_receive_packet(struct encx24j600_priv *priv, 330 struct rsv *rsv) 331 { 332 struct net_device *dev = priv->ndev; 333 struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN); 334 335 if (!skb) { 336 pr_err_ratelimited("RX: OOM: packet dropped\n"); 337 dev->stats.rx_dropped++; 338 return -ENOMEM; 339 } 340 skb_reserve(skb, NET_IP_ALIGN); 341 encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len); 342 343 if (netif_msg_pktdata(priv)) 344 dump_packet("RX", skb->len, skb->data); 345 346 skb->dev = dev; 347 skb->protocol = eth_type_trans(skb, dev); 348 skb->ip_summed = CHECKSUM_COMPLETE; 349 350 /* Maintain stats */ 351 dev->stats.rx_packets++; 352 dev->stats.rx_bytes += rsv->len; 353 354 netif_rx(skb); 355 356 return 0; 357 } 358 359 static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count) 360 { 361 struct net_device *dev = priv->ndev; 362 363 while (packet_count--) { 364 struct rsv rsv; 365 u16 newrxtail; 366 367 encx24j600_write_reg(priv, ERXRDPT, priv->next_packet); 368 encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv)); 369 370 if (netif_msg_rx_status(priv)) 371 encx24j600_dump_rsv(priv, __func__, &rsv); 372 373 if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) || 374 (rsv.len > MAX_FRAMELEN)) { 375 netif_err(priv, rx_err, dev, "RX Error %04x\n", 376 rsv.rxstat); 377 dev->stats.rx_errors++; 378 379 if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR)) 380 dev->stats.rx_crc_errors++; 381 if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR)) 382 dev->stats.rx_frame_errors++; 383 if (rsv.len > MAX_FRAMELEN) 384 dev->stats.rx_over_errors++; 385 } else { 386 encx24j600_receive_packet(priv, &rsv); 387 } 388 389 priv->next_packet = rsv.next_packet; 390 391 newrxtail = priv->next_packet - 2; 392 if (newrxtail == ENC_RX_BUF_START) 393 newrxtail = SRAM_SIZE - 2; 394 395 encx24j600_cmd(priv, SETPKTDEC); 396 encx24j600_write_reg(priv, ERXTAIL, newrxtail); 397 } 398 } 399 400 static irqreturn_t encx24j600_isr(int irq, void *dev_id) 401 { 402 struct encx24j600_priv *priv = dev_id; 403 struct net_device *dev = priv->ndev; 404 int eir; 405 406 /* Clear interrupts */ 407 encx24j600_cmd(priv, CLREIE); 408 409 eir = encx24j600_read_reg(priv, EIR); 410 411 if (eir & LINKIF) 412 encx24j600_int_link_handler(priv); 413 414 if (eir & TXIF) 415 encx24j600_tx_complete(priv, false); 416 417 if (eir & TXABTIF) 418 encx24j600_tx_complete(priv, true); 419 420 if (eir & RXABTIF) { 421 if (eir & PCFULIF) { 422 /* Packet counter is full */ 423 netif_err(priv, rx_err, dev, "Packet counter full\n"); 424 } 425 dev->stats.rx_dropped++; 426 encx24j600_clr_bits(priv, EIR, RXABTIF); 427 } 428 429 if (eir & PKTIF) { 430 u8 packet_count; 431 432 mutex_lock(&priv->lock); 433 434 packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff; 435 while (packet_count) { 436 encx24j600_rx_packets(priv, packet_count); 437 packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff; 438 } 439 440 mutex_unlock(&priv->lock); 441 } 442 443 /* Enable interrupts */ 444 encx24j600_cmd(priv, SETEIE); 445 446 return IRQ_HANDLED; 447 } 448 449 static int encx24j600_soft_reset(struct encx24j600_priv *priv) 450 { 451 int ret = 0; 452 int timeout; 453 u16 eudast; 454 455 /* Write and verify a test value to EUDAST */ 456 regcache_cache_bypass(priv->ctx.regmap, true); 457 timeout = 10; 458 do { 459 encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL); 460 eudast = encx24j600_read_reg(priv, EUDAST); 461 usleep_range(25, 100); 462 } while ((eudast != EUDAST_TEST_VAL) && --timeout); 463 regcache_cache_bypass(priv->ctx.regmap, false); 464 465 if (timeout == 0) { 466 ret = -ETIMEDOUT; 467 goto err_out; 468 } 469 470 /* Wait for CLKRDY to become set */ 471 timeout = 10; 472 while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout) 473 usleep_range(25, 100); 474 475 if (timeout == 0) { 476 ret = -ETIMEDOUT; 477 goto err_out; 478 } 479 480 /* Issue a System Reset command */ 481 encx24j600_cmd(priv, SETETHRST); 482 usleep_range(25, 100); 483 484 /* Confirm that EUDAST has 0000h after system reset */ 485 if (encx24j600_read_reg(priv, EUDAST) != 0) { 486 ret = -EINVAL; 487 goto err_out; 488 } 489 490 /* Wait for PHY register and status bits to become available */ 491 usleep_range(256, 1000); 492 493 err_out: 494 return ret; 495 } 496 497 static int encx24j600_hw_reset(struct encx24j600_priv *priv) 498 { 499 int ret; 500 501 mutex_lock(&priv->lock); 502 ret = encx24j600_soft_reset(priv); 503 mutex_unlock(&priv->lock); 504 505 return ret; 506 } 507 508 static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv) 509 { 510 encx24j600_set_bits(priv, ECON2, TXRST); 511 encx24j600_clr_bits(priv, ECON2, TXRST); 512 } 513 514 static void encx24j600_hw_init_tx(struct encx24j600_priv *priv) 515 { 516 /* Reset TX */ 517 encx24j600_reset_hw_tx(priv); 518 519 /* Clear the TXIF flag if were previously set */ 520 encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF); 521 522 /* Write the Tx Buffer pointer */ 523 encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START); 524 } 525 526 static void encx24j600_hw_init_rx(struct encx24j600_priv *priv) 527 { 528 encx24j600_cmd(priv, DISABLERX); 529 530 /* Set up RX packet start address in the SRAM */ 531 encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START); 532 533 /* Preload the RX Data pointer to the beginning of the RX area */ 534 encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START); 535 536 priv->next_packet = ENC_RX_BUF_START; 537 538 /* Set up RX end address in the SRAM */ 539 encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2); 540 541 /* Reset the user data pointers */ 542 encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE); 543 encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1); 544 545 /* Set Max Frame length */ 546 encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN); 547 } 548 549 static void encx24j600_dump_config(struct encx24j600_priv *priv, 550 const char *msg) 551 { 552 pr_info(DRV_NAME ": %s\n", msg); 553 554 /* CHIP configuration */ 555 pr_info(DRV_NAME " ECON1: %04X\n", encx24j600_read_reg(priv, ECON1)); 556 pr_info(DRV_NAME " ECON2: %04X\n", encx24j600_read_reg(priv, ECON2)); 557 pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv, 558 ERXFCON)); 559 pr_info(DRV_NAME " ESTAT: %04X\n", encx24j600_read_reg(priv, ESTAT)); 560 pr_info(DRV_NAME " EIR: %04X\n", encx24j600_read_reg(priv, EIR)); 561 pr_info(DRV_NAME " EIDLED: %04X\n", encx24j600_read_reg(priv, EIDLED)); 562 563 /* MAC layer configuration */ 564 pr_info(DRV_NAME " MACON1: %04X\n", encx24j600_read_reg(priv, MACON1)); 565 pr_info(DRV_NAME " MACON2: %04X\n", encx24j600_read_reg(priv, MACON2)); 566 pr_info(DRV_NAME " MAIPG: %04X\n", encx24j600_read_reg(priv, MAIPG)); 567 pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv, 568 MACLCON)); 569 pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv, 570 MABBIPG)); 571 572 /* PHY configuation */ 573 pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1)); 574 pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2)); 575 pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA)); 576 pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv, 577 PHANLPA)); 578 pr_info(DRV_NAME " PHANE: %04X\n", encx24j600_read_phy(priv, PHANE)); 579 pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv, 580 PHSTAT1)); 581 pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv, 582 PHSTAT2)); 583 pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv, 584 PHSTAT3)); 585 } 586 587 static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv) 588 { 589 switch (priv->rxfilter) { 590 case RXFILTER_PROMISC: 591 encx24j600_set_bits(priv, MACON1, PASSALL); 592 encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN); 593 break; 594 case RXFILTER_MULTI: 595 encx24j600_clr_bits(priv, MACON1, PASSALL); 596 encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN); 597 break; 598 case RXFILTER_NORMAL: 599 default: 600 encx24j600_clr_bits(priv, MACON1, PASSALL); 601 encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN); 602 break; 603 } 604 } 605 606 static void encx24j600_hw_init(struct encx24j600_priv *priv) 607 { 608 u16 macon2; 609 610 priv->hw_enabled = false; 611 612 /* PHY Leds: link status, 613 * LEDA: Link State + collision events 614 * LEDB: Link State + transmit/receive events 615 */ 616 encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00); 617 618 /* Loopback disabled */ 619 encx24j600_write_reg(priv, MACON1, 0x9); 620 621 /* interpacket gap value */ 622 encx24j600_write_reg(priv, MAIPG, 0x0c12); 623 624 /* Write the auto negotiation pattern */ 625 encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT); 626 627 encx24j600_update_phcon1(priv); 628 encx24j600_check_link_status(priv); 629 630 macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER; 631 if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex) 632 macon2 |= FULDPX; 633 634 encx24j600_set_bits(priv, MACON2, macon2); 635 636 priv->rxfilter = RXFILTER_NORMAL; 637 encx24j600_set_rxfilter_mode(priv); 638 639 /* Program the Maximum frame length */ 640 encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN); 641 642 /* Init Tx pointers */ 643 encx24j600_hw_init_tx(priv); 644 645 /* Init Rx pointers */ 646 encx24j600_hw_init_rx(priv); 647 648 if (netif_msg_hw(priv)) 649 encx24j600_dump_config(priv, "Hw is initialized"); 650 } 651 652 static void encx24j600_hw_enable(struct encx24j600_priv *priv) 653 { 654 /* Clear the interrupt flags in case was set */ 655 encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF | 656 PKTIF | LINKIF)); 657 658 /* Enable the interrupts */ 659 encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE | 660 PKTIE | LINKIE | INTIE)); 661 662 /* Enable RX */ 663 encx24j600_cmd(priv, ENABLERX); 664 665 priv->hw_enabled = true; 666 } 667 668 static void encx24j600_hw_disable(struct encx24j600_priv *priv) 669 { 670 /* Disable all interrupts */ 671 encx24j600_write_reg(priv, EIE, 0); 672 673 /* Disable RX */ 674 encx24j600_cmd(priv, DISABLERX); 675 676 priv->hw_enabled = false; 677 } 678 679 static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed, 680 u8 duplex) 681 { 682 struct encx24j600_priv *priv = netdev_priv(dev); 683 int ret = 0; 684 685 if (!priv->hw_enabled) { 686 /* link is in low power mode now; duplex setting 687 * will take effect on next encx24j600_hw_init() 688 */ 689 if (speed == SPEED_10 || speed == SPEED_100) { 690 priv->autoneg = (autoneg == AUTONEG_ENABLE); 691 priv->full_duplex = (duplex == DUPLEX_FULL); 692 priv->speed = (speed == SPEED_100); 693 } else { 694 netif_warn(priv, link, dev, "unsupported link speed setting\n"); 695 /*speeds other than SPEED_10 and SPEED_100 */ 696 /*are not supported by chip */ 697 ret = -EOPNOTSUPP; 698 } 699 } else { 700 netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n"); 701 ret = -EBUSY; 702 } 703 return ret; 704 } 705 706 static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv, 707 unsigned char *ethaddr) 708 { 709 unsigned short val; 710 711 val = encx24j600_read_reg(priv, MAADR1); 712 713 ethaddr[0] = val & 0x00ff; 714 ethaddr[1] = (val & 0xff00) >> 8; 715 716 val = encx24j600_read_reg(priv, MAADR2); 717 718 ethaddr[2] = val & 0x00ffU; 719 ethaddr[3] = (val & 0xff00U) >> 8; 720 721 val = encx24j600_read_reg(priv, MAADR3); 722 723 ethaddr[4] = val & 0x00ffU; 724 ethaddr[5] = (val & 0xff00U) >> 8; 725 } 726 727 /* Program the hardware MAC address from dev->dev_addr.*/ 728 static int encx24j600_set_hw_macaddr(struct net_device *dev) 729 { 730 struct encx24j600_priv *priv = netdev_priv(dev); 731 732 if (priv->hw_enabled) { 733 netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n"); 734 return -EBUSY; 735 } 736 737 mutex_lock(&priv->lock); 738 739 netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n", 740 dev->name, dev->dev_addr); 741 742 encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] | 743 dev->dev_addr[5] << 8)); 744 encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] | 745 dev->dev_addr[3] << 8)); 746 encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] | 747 dev->dev_addr[1] << 8)); 748 749 mutex_unlock(&priv->lock); 750 751 return 0; 752 } 753 754 /* Store the new hardware address in dev->dev_addr, and update the MAC.*/ 755 static int encx24j600_set_mac_address(struct net_device *dev, void *addr) 756 { 757 struct sockaddr *address = addr; 758 759 if (netif_running(dev)) 760 return -EBUSY; 761 if (!is_valid_ether_addr(address->sa_data)) 762 return -EADDRNOTAVAIL; 763 764 eth_hw_addr_set(dev, address->sa_data); 765 return encx24j600_set_hw_macaddr(dev); 766 } 767 768 static int encx24j600_open(struct net_device *dev) 769 { 770 struct encx24j600_priv *priv = netdev_priv(dev); 771 772 int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr, 773 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 774 DRV_NAME, priv); 775 if (unlikely(ret < 0)) { 776 netdev_err(dev, "request irq %d failed (ret = %d)\n", 777 priv->ctx.spi->irq, ret); 778 return ret; 779 } 780 781 encx24j600_hw_disable(priv); 782 encx24j600_hw_init(priv); 783 encx24j600_hw_enable(priv); 784 netif_start_queue(dev); 785 786 return 0; 787 } 788 789 static int encx24j600_stop(struct net_device *dev) 790 { 791 struct encx24j600_priv *priv = netdev_priv(dev); 792 793 netif_stop_queue(dev); 794 free_irq(priv->ctx.spi->irq, priv); 795 return 0; 796 } 797 798 static void encx24j600_setrx_proc(struct kthread_work *ws) 799 { 800 struct encx24j600_priv *priv = 801 container_of(ws, struct encx24j600_priv, setrx_work); 802 803 mutex_lock(&priv->lock); 804 encx24j600_set_rxfilter_mode(priv); 805 mutex_unlock(&priv->lock); 806 } 807 808 static void encx24j600_set_multicast_list(struct net_device *dev) 809 { 810 struct encx24j600_priv *priv = netdev_priv(dev); 811 int oldfilter = priv->rxfilter; 812 813 if (dev->flags & IFF_PROMISC) { 814 netif_dbg(priv, link, dev, "promiscuous mode\n"); 815 priv->rxfilter = RXFILTER_PROMISC; 816 } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) { 817 netif_dbg(priv, link, dev, "%smulticast mode\n", 818 (dev->flags & IFF_ALLMULTI) ? "all-" : ""); 819 priv->rxfilter = RXFILTER_MULTI; 820 } else { 821 netif_dbg(priv, link, dev, "normal mode\n"); 822 priv->rxfilter = RXFILTER_NORMAL; 823 } 824 825 if (oldfilter != priv->rxfilter) 826 kthread_queue_work(&priv->kworker, &priv->setrx_work); 827 } 828 829 static void encx24j600_hw_tx(struct encx24j600_priv *priv) 830 { 831 struct net_device *dev = priv->ndev; 832 833 netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n", 834 priv->tx_skb->len); 835 836 if (netif_msg_pktdata(priv)) 837 dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data); 838 839 if (encx24j600_read_reg(priv, EIR) & TXABTIF) 840 /* Last transmition aborted due to error. Reset TX interface */ 841 encx24j600_reset_hw_tx(priv); 842 843 /* Clear the TXIF flag if were previously set */ 844 encx24j600_clr_bits(priv, EIR, TXIF); 845 846 /* Set the data pointer to the TX buffer address in the SRAM */ 847 encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START); 848 849 /* Copy the packet into the SRAM */ 850 encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data, 851 priv->tx_skb->len); 852 853 /* Program the Tx buffer start pointer */ 854 encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START); 855 856 /* Program the packet length */ 857 encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len); 858 859 /* Start the transmission */ 860 encx24j600_cmd(priv, SETTXRTS); 861 } 862 863 static void encx24j600_tx_proc(struct kthread_work *ws) 864 { 865 struct encx24j600_priv *priv = 866 container_of(ws, struct encx24j600_priv, tx_work); 867 868 mutex_lock(&priv->lock); 869 encx24j600_hw_tx(priv); 870 mutex_unlock(&priv->lock); 871 } 872 873 static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev) 874 { 875 struct encx24j600_priv *priv = netdev_priv(dev); 876 877 netif_stop_queue(dev); 878 879 /* save the timestamp */ 880 netif_trans_update(dev); 881 882 /* Remember the skb for deferred processing */ 883 priv->tx_skb = skb; 884 885 kthread_queue_work(&priv->kworker, &priv->tx_work); 886 887 return NETDEV_TX_OK; 888 } 889 890 /* Deal with a transmit timeout */ 891 static void encx24j600_tx_timeout(struct net_device *dev, unsigned int txqueue) 892 { 893 struct encx24j600_priv *priv = netdev_priv(dev); 894 895 netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n", 896 jiffies, jiffies - dev_trans_start(dev)); 897 898 dev->stats.tx_errors++; 899 netif_wake_queue(dev); 900 } 901 902 static int encx24j600_get_regs_len(struct net_device *dev) 903 { 904 return SFR_REG_COUNT; 905 } 906 907 static void encx24j600_get_regs(struct net_device *dev, 908 struct ethtool_regs *regs, void *p) 909 { 910 struct encx24j600_priv *priv = netdev_priv(dev); 911 u16 *buff = p; 912 u8 reg; 913 914 regs->version = 1; 915 mutex_lock(&priv->lock); 916 for (reg = 0; reg < SFR_REG_COUNT; reg += 2) { 917 unsigned int val = 0; 918 /* ignore errors for unreadable registers */ 919 regmap_read(priv->ctx.regmap, reg, &val); 920 buff[reg] = val & 0xffff; 921 } 922 mutex_unlock(&priv->lock); 923 } 924 925 static void encx24j600_get_drvinfo(struct net_device *dev, 926 struct ethtool_drvinfo *info) 927 { 928 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 929 strscpy(info->version, DRV_VERSION, sizeof(info->version)); 930 strscpy(info->bus_info, dev_name(dev->dev.parent), 931 sizeof(info->bus_info)); 932 } 933 934 static int encx24j600_get_link_ksettings(struct net_device *dev, 935 struct ethtool_link_ksettings *cmd) 936 { 937 struct encx24j600_priv *priv = netdev_priv(dev); 938 u32 supported; 939 940 supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 941 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 942 SUPPORTED_Autoneg | SUPPORTED_TP; 943 944 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 945 supported); 946 947 cmd->base.speed = priv->speed; 948 cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 949 cmd->base.port = PORT_TP; 950 cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; 951 952 return 0; 953 } 954 955 static int 956 encx24j600_set_link_ksettings(struct net_device *dev, 957 const struct ethtool_link_ksettings *cmd) 958 { 959 return encx24j600_setlink(dev, cmd->base.autoneg, 960 cmd->base.speed, cmd->base.duplex); 961 } 962 963 static u32 encx24j600_get_msglevel(struct net_device *dev) 964 { 965 struct encx24j600_priv *priv = netdev_priv(dev); 966 967 return priv->msg_enable; 968 } 969 970 static void encx24j600_set_msglevel(struct net_device *dev, u32 val) 971 { 972 struct encx24j600_priv *priv = netdev_priv(dev); 973 974 priv->msg_enable = val; 975 } 976 977 static const struct ethtool_ops encx24j600_ethtool_ops = { 978 .get_drvinfo = encx24j600_get_drvinfo, 979 .get_msglevel = encx24j600_get_msglevel, 980 .set_msglevel = encx24j600_set_msglevel, 981 .get_regs_len = encx24j600_get_regs_len, 982 .get_regs = encx24j600_get_regs, 983 .get_link_ksettings = encx24j600_get_link_ksettings, 984 .set_link_ksettings = encx24j600_set_link_ksettings, 985 }; 986 987 static const struct net_device_ops encx24j600_netdev_ops = { 988 .ndo_open = encx24j600_open, 989 .ndo_stop = encx24j600_stop, 990 .ndo_start_xmit = encx24j600_tx, 991 .ndo_set_rx_mode = encx24j600_set_multicast_list, 992 .ndo_set_mac_address = encx24j600_set_mac_address, 993 .ndo_tx_timeout = encx24j600_tx_timeout, 994 .ndo_validate_addr = eth_validate_addr, 995 }; 996 997 static int encx24j600_spi_probe(struct spi_device *spi) 998 { 999 int ret; 1000 1001 struct net_device *ndev; 1002 struct encx24j600_priv *priv; 1003 u16 eidled; 1004 u8 addr[ETH_ALEN]; 1005 1006 ndev = alloc_etherdev(sizeof(struct encx24j600_priv)); 1007 1008 if (!ndev) { 1009 ret = -ENOMEM; 1010 goto error_out; 1011 } 1012 1013 priv = netdev_priv(ndev); 1014 spi_set_drvdata(spi, priv); 1015 dev_set_drvdata(&spi->dev, priv); 1016 SET_NETDEV_DEV(ndev, &spi->dev); 1017 1018 priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 1019 priv->ndev = ndev; 1020 1021 /* Default configuration PHY configuration */ 1022 priv->full_duplex = true; 1023 priv->autoneg = AUTONEG_ENABLE; 1024 priv->speed = SPEED_100; 1025 1026 priv->ctx.spi = spi; 1027 ndev->irq = spi->irq; 1028 ndev->netdev_ops = &encx24j600_netdev_ops; 1029 1030 ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); 1031 if (ret) 1032 goto out_free; 1033 1034 mutex_init(&priv->lock); 1035 1036 /* Reset device and check if it is connected */ 1037 if (encx24j600_hw_reset(priv)) { 1038 netif_err(priv, probe, ndev, 1039 DRV_NAME ": Chip is not detected\n"); 1040 ret = -EIO; 1041 goto out_free; 1042 } 1043 1044 /* Initialize the device HW to the consistent state */ 1045 encx24j600_hw_init(priv); 1046 1047 kthread_init_worker(&priv->kworker); 1048 kthread_init_work(&priv->tx_work, encx24j600_tx_proc); 1049 kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc); 1050 1051 priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker, 1052 "encx24j600"); 1053 1054 if (IS_ERR(priv->kworker_task)) { 1055 ret = PTR_ERR(priv->kworker_task); 1056 goto out_free; 1057 } 1058 1059 /* Get the MAC address from the chip */ 1060 encx24j600_hw_get_macaddr(priv, addr); 1061 eth_hw_addr_set(ndev, addr); 1062 1063 ndev->ethtool_ops = &encx24j600_ethtool_ops; 1064 1065 ret = register_netdev(ndev); 1066 if (unlikely(ret)) { 1067 netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n", 1068 ret); 1069 goto out_stop; 1070 } 1071 1072 eidled = encx24j600_read_reg(priv, EIDLED); 1073 if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) { 1074 ret = -EINVAL; 1075 goto out_unregister; 1076 } 1077 1078 netif_info(priv, probe, ndev, "Silicon rev ID: 0x%02x\n", 1079 (eidled & REVID_MASK) >> REVID_SHIFT); 1080 1081 netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr); 1082 1083 return ret; 1084 1085 out_unregister: 1086 unregister_netdev(priv->ndev); 1087 out_stop: 1088 kthread_stop(priv->kworker_task); 1089 out_free: 1090 free_netdev(ndev); 1091 1092 error_out: 1093 return ret; 1094 } 1095 1096 static void encx24j600_spi_remove(struct spi_device *spi) 1097 { 1098 struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev); 1099 1100 unregister_netdev(priv->ndev); 1101 kthread_stop(priv->kworker_task); 1102 1103 free_netdev(priv->ndev); 1104 } 1105 1106 static const struct spi_device_id encx24j600_spi_id_table[] = { 1107 { .name = "encx24j600" }, 1108 { /* sentinel */ } 1109 }; 1110 MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table); 1111 1112 static struct spi_driver encx24j600_spi_net_driver = { 1113 .driver = { 1114 .name = DRV_NAME, 1115 .owner = THIS_MODULE, 1116 .bus = &spi_bus_type, 1117 }, 1118 .probe = encx24j600_spi_probe, 1119 .remove = encx24j600_spi_remove, 1120 .id_table = encx24j600_spi_id_table, 1121 }; 1122 1123 module_spi_driver(encx24j600_spi_net_driver); 1124 1125 MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); 1126 MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>"); 1127 MODULE_LICENSE("GPL"); 1128