1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */ 3 /* 4 Copyright (c) 2001, 2002 by D-Link Corporation 5 Written by Edward Peng.<edward_peng@dlink.com.tw> 6 Created 03-May-2001, base on Linux' sundance.c. 7 8 */ 9 10 #include "dl2k.h" 11 #include <linux/dma-mapping.h> 12 13 #define dw32(reg, val) iowrite32(val, ioaddr + (reg)) 14 #define dw16(reg, val) iowrite16(val, ioaddr + (reg)) 15 #define dw8(reg, val) iowrite8(val, ioaddr + (reg)) 16 #define dr32(reg) ioread32(ioaddr + (reg)) 17 #define dr16(reg) ioread16(ioaddr + (reg)) 18 #define dr8(reg) ioread8(ioaddr + (reg)) 19 20 #define MAX_UNITS 8 21 static int mtu[MAX_UNITS]; 22 static int vlan[MAX_UNITS]; 23 static int jumbo[MAX_UNITS]; 24 static char *media[MAX_UNITS]; 25 static int tx_flow=-1; 26 static int rx_flow=-1; 27 static int copy_thresh; 28 static int rx_coalesce=10; /* Rx frame count each interrupt */ 29 static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */ 30 static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */ 31 32 33 MODULE_AUTHOR ("Edward Peng"); 34 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter"); 35 MODULE_LICENSE("GPL"); 36 module_param_array(mtu, int, NULL, 0); 37 module_param_array(media, charp, NULL, 0); 38 module_param_array(vlan, int, NULL, 0); 39 module_param_array(jumbo, int, NULL, 0); 40 module_param(tx_flow, int, 0); 41 module_param(rx_flow, int, 0); 42 module_param(copy_thresh, int, 0); 43 module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */ 44 module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */ 45 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */ 46 47 48 /* Enable the default interrupts */ 49 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 50 UpdateStats | LinkEvent) 51 52 static void dl2k_enable_int(struct netdev_private *np) 53 { 54 void __iomem *ioaddr = np->ioaddr; 55 56 dw16(IntEnable, DEFAULT_INTR); 57 } 58 59 static const int max_intrloop = 50; 60 static const int multicast_filter_limit = 0x40; 61 62 static int rio_open (struct net_device *dev); 63 static void rio_timer (struct timer_list *t); 64 static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue); 65 static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); 66 static irqreturn_t rio_interrupt (int irq, void *dev_instance); 67 static void rio_free_tx (struct net_device *dev, int irq); 68 static void tx_error (struct net_device *dev, int tx_status); 69 static int receive_packet (struct net_device *dev); 70 static void rio_error (struct net_device *dev, int int_status); 71 static void set_multicast (struct net_device *dev); 72 static struct net_device_stats *get_stats (struct net_device *dev); 73 static int clear_stats (struct net_device *dev); 74 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); 75 static int rio_close (struct net_device *dev); 76 static int find_miiphy (struct net_device *dev); 77 static int parse_eeprom (struct net_device *dev); 78 static int read_eeprom (struct netdev_private *, int eep_addr); 79 static int mii_wait_link (struct net_device *dev, int wait); 80 static int mii_set_media (struct net_device *dev); 81 static int mii_get_media (struct net_device *dev); 82 static int mii_set_media_pcs (struct net_device *dev); 83 static int mii_get_media_pcs (struct net_device *dev); 84 static int mii_read (struct net_device *dev, int phy_addr, int reg_num); 85 static int mii_write (struct net_device *dev, int phy_addr, int reg_num, 86 u16 data); 87 88 static const struct ethtool_ops ethtool_ops; 89 90 static const struct net_device_ops netdev_ops = { 91 .ndo_open = rio_open, 92 .ndo_start_xmit = start_xmit, 93 .ndo_stop = rio_close, 94 .ndo_get_stats = get_stats, 95 .ndo_validate_addr = eth_validate_addr, 96 .ndo_set_mac_address = eth_mac_addr, 97 .ndo_set_rx_mode = set_multicast, 98 .ndo_do_ioctl = rio_ioctl, 99 .ndo_tx_timeout = rio_tx_timeout, 100 }; 101 102 static int 103 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 104 { 105 struct net_device *dev; 106 struct netdev_private *np; 107 static int card_idx; 108 int chip_idx = ent->driver_data; 109 int err, irq; 110 void __iomem *ioaddr; 111 void *ring_space; 112 dma_addr_t ring_dma; 113 114 err = pci_enable_device (pdev); 115 if (err) 116 return err; 117 118 irq = pdev->irq; 119 err = pci_request_regions (pdev, "dl2k"); 120 if (err) 121 goto err_out_disable; 122 123 pci_set_master (pdev); 124 125 err = -ENOMEM; 126 127 dev = alloc_etherdev (sizeof (*np)); 128 if (!dev) 129 goto err_out_res; 130 SET_NETDEV_DEV(dev, &pdev->dev); 131 132 np = netdev_priv(dev); 133 134 /* IO registers range. */ 135 ioaddr = pci_iomap(pdev, 0, 0); 136 if (!ioaddr) 137 goto err_out_dev; 138 np->eeprom_addr = ioaddr; 139 140 #ifdef MEM_MAPPING 141 /* MM registers range. */ 142 ioaddr = pci_iomap(pdev, 1, 0); 143 if (!ioaddr) 144 goto err_out_iounmap; 145 #endif 146 np->ioaddr = ioaddr; 147 np->chip_id = chip_idx; 148 np->pdev = pdev; 149 spin_lock_init (&np->tx_lock); 150 spin_lock_init (&np->rx_lock); 151 152 /* Parse manual configuration */ 153 np->an_enable = 1; 154 np->tx_coalesce = 1; 155 if (card_idx < MAX_UNITS) { 156 if (media[card_idx] != NULL) { 157 np->an_enable = 0; 158 if (strcmp (media[card_idx], "auto") == 0 || 159 strcmp (media[card_idx], "autosense") == 0 || 160 strcmp (media[card_idx], "0") == 0 ) { 161 np->an_enable = 2; 162 } else if (strcmp (media[card_idx], "100mbps_fd") == 0 || 163 strcmp (media[card_idx], "4") == 0) { 164 np->speed = 100; 165 np->full_duplex = 1; 166 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || 167 strcmp (media[card_idx], "3") == 0) { 168 np->speed = 100; 169 np->full_duplex = 0; 170 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || 171 strcmp (media[card_idx], "2") == 0) { 172 np->speed = 10; 173 np->full_duplex = 1; 174 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || 175 strcmp (media[card_idx], "1") == 0) { 176 np->speed = 10; 177 np->full_duplex = 0; 178 } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 || 179 strcmp (media[card_idx], "6") == 0) { 180 np->speed=1000; 181 np->full_duplex=1; 182 } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 || 183 strcmp (media[card_idx], "5") == 0) { 184 np->speed = 1000; 185 np->full_duplex = 0; 186 } else { 187 np->an_enable = 1; 188 } 189 } 190 if (jumbo[card_idx] != 0) { 191 np->jumbo = 1; 192 dev->mtu = MAX_JUMBO; 193 } else { 194 np->jumbo = 0; 195 if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE) 196 dev->mtu = mtu[card_idx]; 197 } 198 np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ? 199 vlan[card_idx] : 0; 200 if (rx_coalesce > 0 && rx_timeout > 0) { 201 np->rx_coalesce = rx_coalesce; 202 np->rx_timeout = rx_timeout; 203 np->coalesce = 1; 204 } 205 np->tx_flow = (tx_flow == 0) ? 0 : 1; 206 np->rx_flow = (rx_flow == 0) ? 0 : 1; 207 208 if (tx_coalesce < 1) 209 tx_coalesce = 1; 210 else if (tx_coalesce > TX_RING_SIZE-1) 211 tx_coalesce = TX_RING_SIZE - 1; 212 } 213 dev->netdev_ops = &netdev_ops; 214 dev->watchdog_timeo = TX_TIMEOUT; 215 dev->ethtool_ops = ðtool_ops; 216 #if 0 217 dev->features = NETIF_F_IP_CSUM; 218 #endif 219 /* MTU range: 68 - 1536 or 8000 */ 220 dev->min_mtu = ETH_MIN_MTU; 221 dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE; 222 223 pci_set_drvdata (pdev, dev); 224 225 ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma); 226 if (!ring_space) 227 goto err_out_iounmap; 228 np->tx_ring = ring_space; 229 np->tx_ring_dma = ring_dma; 230 231 ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma); 232 if (!ring_space) 233 goto err_out_unmap_tx; 234 np->rx_ring = ring_space; 235 np->rx_ring_dma = ring_dma; 236 237 /* Parse eeprom data */ 238 parse_eeprom (dev); 239 240 /* Find PHY address */ 241 err = find_miiphy (dev); 242 if (err) 243 goto err_out_unmap_rx; 244 245 /* Fiber device? */ 246 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 247 np->link_status = 0; 248 /* Set media and reset PHY */ 249 if (np->phy_media) { 250 /* default Auto-Negotiation for fiber deivices */ 251 if (np->an_enable == 2) { 252 np->an_enable = 1; 253 } 254 } else { 255 /* Auto-Negotiation is mandatory for 1000BASE-T, 256 IEEE 802.3ab Annex 28D page 14 */ 257 if (np->speed == 1000) 258 np->an_enable = 1; 259 } 260 261 err = register_netdev (dev); 262 if (err) 263 goto err_out_unmap_rx; 264 265 card_idx++; 266 267 printk (KERN_INFO "%s: %s, %pM, IRQ %d\n", 268 dev->name, np->name, dev->dev_addr, irq); 269 if (tx_coalesce > 1) 270 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 271 tx_coalesce); 272 if (np->coalesce) 273 printk(KERN_INFO 274 "rx_coalesce:\t%d packets\n" 275 "rx_timeout: \t%d ns\n", 276 np->rx_coalesce, np->rx_timeout*640); 277 if (np->vlan) 278 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 279 return 0; 280 281 err_out_unmap_rx: 282 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 283 err_out_unmap_tx: 284 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 285 err_out_iounmap: 286 #ifdef MEM_MAPPING 287 pci_iounmap(pdev, np->ioaddr); 288 #endif 289 pci_iounmap(pdev, np->eeprom_addr); 290 err_out_dev: 291 free_netdev (dev); 292 err_out_res: 293 pci_release_regions (pdev); 294 err_out_disable: 295 pci_disable_device (pdev); 296 return err; 297 } 298 299 static int 300 find_miiphy (struct net_device *dev) 301 { 302 struct netdev_private *np = netdev_priv(dev); 303 int i, phy_found = 0; 304 305 np->phy_addr = 1; 306 307 for (i = 31; i >= 0; i--) { 308 int mii_status = mii_read (dev, i, 1); 309 if (mii_status != 0xffff && mii_status != 0x0000) { 310 np->phy_addr = i; 311 phy_found++; 312 } 313 } 314 if (!phy_found) { 315 printk (KERN_ERR "%s: No MII PHY found!\n", dev->name); 316 return -ENODEV; 317 } 318 return 0; 319 } 320 321 static int 322 parse_eeprom (struct net_device *dev) 323 { 324 struct netdev_private *np = netdev_priv(dev); 325 void __iomem *ioaddr = np->ioaddr; 326 int i, j; 327 u8 sromdata[256]; 328 u8 *psib; 329 u32 crc; 330 PSROM_t psrom = (PSROM_t) sromdata; 331 332 int cid, next; 333 334 for (i = 0; i < 128; i++) 335 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i)); 336 337 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 338 /* Check CRC */ 339 crc = ~ether_crc_le (256 - 4, sromdata); 340 if (psrom->crc != cpu_to_le32(crc)) { 341 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 342 dev->name); 343 return -1; 344 } 345 } 346 347 /* Set MAC address */ 348 for (i = 0; i < 6; i++) 349 dev->dev_addr[i] = psrom->mac_addr[i]; 350 351 if (np->chip_id == CHIP_IP1000A) { 352 np->led_mode = psrom->led_mode; 353 return 0; 354 } 355 356 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 357 return 0; 358 } 359 360 /* Parse Software Information Block */ 361 i = 0x30; 362 psib = (u8 *) sromdata; 363 do { 364 cid = psib[i++]; 365 next = psib[i++]; 366 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) { 367 printk (KERN_ERR "Cell data error\n"); 368 return -1; 369 } 370 switch (cid) { 371 case 0: /* Format version */ 372 break; 373 case 1: /* End of cell */ 374 return 0; 375 case 2: /* Duplex Polarity */ 376 np->duplex_polarity = psib[i]; 377 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]); 378 break; 379 case 3: /* Wake Polarity */ 380 np->wake_polarity = psib[i]; 381 break; 382 case 9: /* Adapter description */ 383 j = (next - i > 255) ? 255 : next - i; 384 memcpy (np->name, &(psib[i]), j); 385 break; 386 case 4: 387 case 5: 388 case 6: 389 case 7: 390 case 8: /* Reversed */ 391 break; 392 default: /* Unknown cell */ 393 return -1; 394 } 395 i = next; 396 } while (1); 397 398 return 0; 399 } 400 401 static void rio_set_led_mode(struct net_device *dev) 402 { 403 struct netdev_private *np = netdev_priv(dev); 404 void __iomem *ioaddr = np->ioaddr; 405 u32 mode; 406 407 if (np->chip_id != CHIP_IP1000A) 408 return; 409 410 mode = dr32(ASICCtrl); 411 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 412 413 if (np->led_mode & 0x01) 414 mode |= IPG_AC_LED_MODE; 415 if (np->led_mode & 0x02) 416 mode |= IPG_AC_LED_MODE_BIT_1; 417 if (np->led_mode & 0x08) 418 mode |= IPG_AC_LED_SPEED; 419 420 dw32(ASICCtrl, mode); 421 } 422 423 static inline dma_addr_t desc_to_dma(struct netdev_desc *desc) 424 { 425 return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48); 426 } 427 428 static void free_list(struct net_device *dev) 429 { 430 struct netdev_private *np = netdev_priv(dev); 431 struct sk_buff *skb; 432 int i; 433 434 /* Free all the skbuffs in the queue. */ 435 for (i = 0; i < RX_RING_SIZE; i++) { 436 skb = np->rx_skbuff[i]; 437 if (skb) { 438 pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]), 439 skb->len, PCI_DMA_FROMDEVICE); 440 dev_kfree_skb(skb); 441 np->rx_skbuff[i] = NULL; 442 } 443 np->rx_ring[i].status = 0; 444 np->rx_ring[i].fraginfo = 0; 445 } 446 for (i = 0; i < TX_RING_SIZE; i++) { 447 skb = np->tx_skbuff[i]; 448 if (skb) { 449 pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]), 450 skb->len, PCI_DMA_TODEVICE); 451 dev_kfree_skb(skb); 452 np->tx_skbuff[i] = NULL; 453 } 454 } 455 } 456 457 static void rio_reset_ring(struct netdev_private *np) 458 { 459 int i; 460 461 np->cur_rx = 0; 462 np->cur_tx = 0; 463 np->old_rx = 0; 464 np->old_tx = 0; 465 466 for (i = 0; i < TX_RING_SIZE; i++) 467 np->tx_ring[i].status = cpu_to_le64(TFDDone); 468 469 for (i = 0; i < RX_RING_SIZE; i++) 470 np->rx_ring[i].status = 0; 471 } 472 473 /* allocate and initialize Tx and Rx descriptors */ 474 static int alloc_list(struct net_device *dev) 475 { 476 struct netdev_private *np = netdev_priv(dev); 477 int i; 478 479 rio_reset_ring(np); 480 np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32); 481 482 /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */ 483 for (i = 0; i < TX_RING_SIZE; i++) { 484 np->tx_skbuff[i] = NULL; 485 np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma + 486 ((i + 1) % TX_RING_SIZE) * 487 sizeof(struct netdev_desc)); 488 } 489 490 /* Initialize Rx descriptors & allocate buffers */ 491 for (i = 0; i < RX_RING_SIZE; i++) { 492 /* Allocated fixed size of skbuff */ 493 struct sk_buff *skb; 494 495 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 496 np->rx_skbuff[i] = skb; 497 if (!skb) { 498 free_list(dev); 499 return -ENOMEM; 500 } 501 502 np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma + 503 ((i + 1) % RX_RING_SIZE) * 504 sizeof(struct netdev_desc)); 505 /* Rubicon now supports 40 bits of addressing space. */ 506 np->rx_ring[i].fraginfo = 507 cpu_to_le64(pci_map_single( 508 np->pdev, skb->data, np->rx_buf_sz, 509 PCI_DMA_FROMDEVICE)); 510 np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48); 511 } 512 513 return 0; 514 } 515 516 static void rio_hw_init(struct net_device *dev) 517 { 518 struct netdev_private *np = netdev_priv(dev); 519 void __iomem *ioaddr = np->ioaddr; 520 int i; 521 u16 macctrl; 522 523 /* Reset all logic functions */ 524 dw16(ASICCtrl + 2, 525 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 526 mdelay(10); 527 528 rio_set_led_mode(dev); 529 530 /* DebugCtrl bit 4, 5, 9 must set */ 531 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 532 533 if (np->chip_id == CHIP_IP1000A && 534 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) { 535 /* PHY magic taken from ipg driver, undocumented registers */ 536 mii_write(dev, np->phy_addr, 31, 0x0001); 537 mii_write(dev, np->phy_addr, 27, 0x01e0); 538 mii_write(dev, np->phy_addr, 31, 0x0002); 539 mii_write(dev, np->phy_addr, 27, 0xeb8e); 540 mii_write(dev, np->phy_addr, 31, 0x0000); 541 mii_write(dev, np->phy_addr, 30, 0x005e); 542 /* advertise 1000BASE-T half & full duplex, prefer MASTER */ 543 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700); 544 } 545 546 if (np->phy_media) 547 mii_set_media_pcs(dev); 548 else 549 mii_set_media(dev); 550 551 /* Jumbo frame */ 552 if (np->jumbo != 0) 553 dw16(MaxFrameSize, MAX_JUMBO+14); 554 555 /* Set RFDListPtr */ 556 dw32(RFDListPtr0, np->rx_ring_dma); 557 dw32(RFDListPtr1, 0); 558 559 /* Set station address */ 560 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works 561 * too. However, it doesn't work on IP1000A so we use 16-bit access. 562 */ 563 for (i = 0; i < 3; i++) 564 dw16(StationAddr0 + 2 * i, 565 cpu_to_le16(((u16 *)dev->dev_addr)[i])); 566 567 set_multicast (dev); 568 if (np->coalesce) { 569 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16); 570 } 571 /* Set RIO to poll every N*320nsec. */ 572 dw8(RxDMAPollPeriod, 0x20); 573 dw8(TxDMAPollPeriod, 0xff); 574 dw8(RxDMABurstThresh, 0x30); 575 dw8(RxDMAUrgentThresh, 0x30); 576 dw32(RmonStatMask, 0x0007ffff); 577 /* clear statistics */ 578 clear_stats (dev); 579 580 /* VLAN supported */ 581 if (np->vlan) { 582 /* priority field in RxDMAIntCtrl */ 583 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10); 584 /* VLANId */ 585 dw16(VLANId, np->vlan); 586 /* Length/Type should be 0x8100 */ 587 dw32(VLANTag, 0x8100 << 16 | np->vlan); 588 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 589 VLAN information tagged by TFC' VID, CFI fields. */ 590 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); 591 } 592 593 /* Start Tx/Rx */ 594 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable); 595 596 macctrl = 0; 597 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 598 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 599 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 600 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 601 dw16(MACCtrl, macctrl); 602 } 603 604 static void rio_hw_stop(struct net_device *dev) 605 { 606 struct netdev_private *np = netdev_priv(dev); 607 void __iomem *ioaddr = np->ioaddr; 608 609 /* Disable interrupts */ 610 dw16(IntEnable, 0); 611 612 /* Stop Tx and Rx logics */ 613 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable); 614 } 615 616 static int rio_open(struct net_device *dev) 617 { 618 struct netdev_private *np = netdev_priv(dev); 619 const int irq = np->pdev->irq; 620 int i; 621 622 i = alloc_list(dev); 623 if (i) 624 return i; 625 626 rio_hw_init(dev); 627 628 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 629 if (i) { 630 rio_hw_stop(dev); 631 free_list(dev); 632 return i; 633 } 634 635 timer_setup(&np->timer, rio_timer, 0); 636 np->timer.expires = jiffies + 1 * HZ; 637 add_timer(&np->timer); 638 639 netif_start_queue (dev); 640 641 dl2k_enable_int(np); 642 return 0; 643 } 644 645 static void 646 rio_timer (struct timer_list *t) 647 { 648 struct netdev_private *np = from_timer(np, t, timer); 649 struct net_device *dev = pci_get_drvdata(np->pdev); 650 unsigned int entry; 651 int next_tick = 1*HZ; 652 unsigned long flags; 653 654 spin_lock_irqsave(&np->rx_lock, flags); 655 /* Recover rx ring exhausted error */ 656 if (np->cur_rx - np->old_rx >= RX_RING_SIZE) { 657 printk(KERN_INFO "Try to recover rx ring exhausted...\n"); 658 /* Re-allocate skbuffs to fill the descriptor ring */ 659 for (; np->cur_rx - np->old_rx > 0; np->old_rx++) { 660 struct sk_buff *skb; 661 entry = np->old_rx % RX_RING_SIZE; 662 /* Dropped packets don't need to re-allocate */ 663 if (np->rx_skbuff[entry] == NULL) { 664 skb = netdev_alloc_skb_ip_align(dev, 665 np->rx_buf_sz); 666 if (skb == NULL) { 667 np->rx_ring[entry].fraginfo = 0; 668 printk (KERN_INFO 669 "%s: Still unable to re-allocate Rx skbuff.#%d\n", 670 dev->name, entry); 671 break; 672 } 673 np->rx_skbuff[entry] = skb; 674 np->rx_ring[entry].fraginfo = 675 cpu_to_le64 (pci_map_single 676 (np->pdev, skb->data, np->rx_buf_sz, 677 PCI_DMA_FROMDEVICE)); 678 } 679 np->rx_ring[entry].fraginfo |= 680 cpu_to_le64((u64)np->rx_buf_sz << 48); 681 np->rx_ring[entry].status = 0; 682 } /* end for */ 683 } /* end if */ 684 spin_unlock_irqrestore (&np->rx_lock, flags); 685 np->timer.expires = jiffies + next_tick; 686 add_timer(&np->timer); 687 } 688 689 static void 690 rio_tx_timeout (struct net_device *dev, unsigned int txqueue) 691 { 692 struct netdev_private *np = netdev_priv(dev); 693 void __iomem *ioaddr = np->ioaddr; 694 695 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 696 dev->name, dr32(TxStatus)); 697 rio_free_tx(dev, 0); 698 dev->if_port = 0; 699 netif_trans_update(dev); /* prevent tx timeout */ 700 } 701 702 static netdev_tx_t 703 start_xmit (struct sk_buff *skb, struct net_device *dev) 704 { 705 struct netdev_private *np = netdev_priv(dev); 706 void __iomem *ioaddr = np->ioaddr; 707 struct netdev_desc *txdesc; 708 unsigned entry; 709 u64 tfc_vlan_tag = 0; 710 711 if (np->link_status == 0) { /* Link Down */ 712 dev_kfree_skb(skb); 713 return NETDEV_TX_OK; 714 } 715 entry = np->cur_tx % TX_RING_SIZE; 716 np->tx_skbuff[entry] = skb; 717 txdesc = &np->tx_ring[entry]; 718 719 #if 0 720 if (skb->ip_summed == CHECKSUM_PARTIAL) { 721 txdesc->status |= 722 cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable | 723 IPChecksumEnable); 724 } 725 #endif 726 if (np->vlan) { 727 tfc_vlan_tag = VLANTagInsert | 728 ((u64)np->vlan << 32) | 729 ((u64)skb->priority << 45); 730 } 731 txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data, 732 skb->len, 733 PCI_DMA_TODEVICE)); 734 txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48); 735 736 /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode 737 * Work around: Always use 1 descriptor in 10Mbps mode */ 738 if (entry % np->tx_coalesce == 0 || np->speed == 10) 739 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 740 WordAlignDisable | 741 TxDMAIndicate | 742 (1 << FragCountShift)); 743 else 744 txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag | 745 WordAlignDisable | 746 (1 << FragCountShift)); 747 748 /* TxDMAPollNow */ 749 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000); 750 /* Schedule ISR */ 751 dw32(CountDown, 10000); 752 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 753 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 754 < TX_QUEUE_LEN - 1 && np->speed != 10) { 755 /* do nothing */ 756 } else if (!netif_queue_stopped(dev)) { 757 netif_stop_queue (dev); 758 } 759 760 /* The first TFDListPtr */ 761 if (!dr32(TFDListPtr0)) { 762 dw32(TFDListPtr0, np->tx_ring_dma + 763 entry * sizeof (struct netdev_desc)); 764 dw32(TFDListPtr1, 0); 765 } 766 767 return NETDEV_TX_OK; 768 } 769 770 static irqreturn_t 771 rio_interrupt (int irq, void *dev_instance) 772 { 773 struct net_device *dev = dev_instance; 774 struct netdev_private *np = netdev_priv(dev); 775 void __iomem *ioaddr = np->ioaddr; 776 unsigned int_status; 777 int cnt = max_intrloop; 778 int handled = 0; 779 780 while (1) { 781 int_status = dr16(IntStatus); 782 dw16(IntStatus, int_status); 783 int_status &= DEFAULT_INTR; 784 if (int_status == 0 || --cnt < 0) 785 break; 786 handled = 1; 787 /* Processing received packets */ 788 if (int_status & RxDMAComplete) 789 receive_packet (dev); 790 /* TxDMAComplete interrupt */ 791 if ((int_status & (TxDMAComplete|IntRequested))) { 792 int tx_status; 793 tx_status = dr32(TxStatus); 794 if (tx_status & 0x01) 795 tx_error (dev, tx_status); 796 /* Free used tx skbuffs */ 797 rio_free_tx (dev, 1); 798 } 799 800 /* Handle uncommon events */ 801 if (int_status & 802 (HostError | LinkEvent | UpdateStats)) 803 rio_error (dev, int_status); 804 } 805 if (np->cur_tx != np->old_tx) 806 dw32(CountDown, 100); 807 return IRQ_RETVAL(handled); 808 } 809 810 static void 811 rio_free_tx (struct net_device *dev, int irq) 812 { 813 struct netdev_private *np = netdev_priv(dev); 814 int entry = np->old_tx % TX_RING_SIZE; 815 int tx_use = 0; 816 unsigned long flag = 0; 817 818 if (irq) 819 spin_lock(&np->tx_lock); 820 else 821 spin_lock_irqsave(&np->tx_lock, flag); 822 823 /* Free used tx skbuffs */ 824 while (entry != np->cur_tx) { 825 struct sk_buff *skb; 826 827 if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone))) 828 break; 829 skb = np->tx_skbuff[entry]; 830 pci_unmap_single (np->pdev, 831 desc_to_dma(&np->tx_ring[entry]), 832 skb->len, PCI_DMA_TODEVICE); 833 if (irq) 834 dev_consume_skb_irq(skb); 835 else 836 dev_kfree_skb(skb); 837 838 np->tx_skbuff[entry] = NULL; 839 entry = (entry + 1) % TX_RING_SIZE; 840 tx_use++; 841 } 842 if (irq) 843 spin_unlock(&np->tx_lock); 844 else 845 spin_unlock_irqrestore(&np->tx_lock, flag); 846 np->old_tx = entry; 847 848 /* If the ring is no longer full, clear tx_full and 849 call netif_wake_queue() */ 850 851 if (netif_queue_stopped(dev) && 852 ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 853 < TX_QUEUE_LEN - 1 || np->speed == 10)) { 854 netif_wake_queue (dev); 855 } 856 } 857 858 static void 859 tx_error (struct net_device *dev, int tx_status) 860 { 861 struct netdev_private *np = netdev_priv(dev); 862 void __iomem *ioaddr = np->ioaddr; 863 int frame_id; 864 int i; 865 866 frame_id = (tx_status & 0xffff0000); 867 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 868 dev->name, tx_status, frame_id); 869 dev->stats.tx_errors++; 870 /* Ttransmit Underrun */ 871 if (tx_status & 0x10) { 872 dev->stats.tx_fifo_errors++; 873 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10); 874 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 875 dw16(ASICCtrl + 2, 876 TxReset | DMAReset | FIFOReset | NetworkReset); 877 /* Wait for ResetBusy bit clear */ 878 for (i = 50; i > 0; i--) { 879 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 880 break; 881 mdelay (1); 882 } 883 rio_set_led_mode(dev); 884 rio_free_tx (dev, 1); 885 /* Reset TFDListPtr */ 886 dw32(TFDListPtr0, np->tx_ring_dma + 887 np->old_tx * sizeof (struct netdev_desc)); 888 dw32(TFDListPtr1, 0); 889 890 /* Let TxStartThresh stay default value */ 891 } 892 /* Late Collision */ 893 if (tx_status & 0x04) { 894 dev->stats.tx_fifo_errors++; 895 /* TxReset and clear FIFO */ 896 dw16(ASICCtrl + 2, TxReset | FIFOReset); 897 /* Wait reset done */ 898 for (i = 50; i > 0; i--) { 899 if (!(dr16(ASICCtrl + 2) & ResetBusy)) 900 break; 901 mdelay (1); 902 } 903 rio_set_led_mode(dev); 904 /* Let TxStartThresh stay default value */ 905 } 906 /* Maximum Collisions */ 907 if (tx_status & 0x08) 908 dev->stats.collisions++; 909 /* Restart the Tx */ 910 dw32(MACCtrl, dr16(MACCtrl) | TxEnable); 911 } 912 913 static int 914 receive_packet (struct net_device *dev) 915 { 916 struct netdev_private *np = netdev_priv(dev); 917 int entry = np->cur_rx % RX_RING_SIZE; 918 int cnt = 30; 919 920 /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */ 921 while (1) { 922 struct netdev_desc *desc = &np->rx_ring[entry]; 923 int pkt_len; 924 u64 frame_status; 925 926 if (!(desc->status & cpu_to_le64(RFDDone)) || 927 !(desc->status & cpu_to_le64(FrameStart)) || 928 !(desc->status & cpu_to_le64(FrameEnd))) 929 break; 930 931 /* Chip omits the CRC. */ 932 frame_status = le64_to_cpu(desc->status); 933 pkt_len = frame_status & 0xffff; 934 if (--cnt < 0) 935 break; 936 /* Update rx error statistics, drop packet. */ 937 if (frame_status & RFS_Errors) { 938 dev->stats.rx_errors++; 939 if (frame_status & (RxRuntFrame | RxLengthError)) 940 dev->stats.rx_length_errors++; 941 if (frame_status & RxFCSError) 942 dev->stats.rx_crc_errors++; 943 if (frame_status & RxAlignmentError && np->speed != 1000) 944 dev->stats.rx_frame_errors++; 945 if (frame_status & RxFIFOOverrun) 946 dev->stats.rx_fifo_errors++; 947 } else { 948 struct sk_buff *skb; 949 950 /* Small skbuffs for short packets */ 951 if (pkt_len > copy_thresh) { 952 pci_unmap_single (np->pdev, 953 desc_to_dma(desc), 954 np->rx_buf_sz, 955 PCI_DMA_FROMDEVICE); 956 skb_put (skb = np->rx_skbuff[entry], pkt_len); 957 np->rx_skbuff[entry] = NULL; 958 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) { 959 pci_dma_sync_single_for_cpu(np->pdev, 960 desc_to_dma(desc), 961 np->rx_buf_sz, 962 PCI_DMA_FROMDEVICE); 963 skb_copy_to_linear_data (skb, 964 np->rx_skbuff[entry]->data, 965 pkt_len); 966 skb_put (skb, pkt_len); 967 pci_dma_sync_single_for_device(np->pdev, 968 desc_to_dma(desc), 969 np->rx_buf_sz, 970 PCI_DMA_FROMDEVICE); 971 } 972 skb->protocol = eth_type_trans (skb, dev); 973 #if 0 974 /* Checksum done by hw, but csum value unavailable. */ 975 if (np->pdev->pci_rev_id >= 0x0c && 976 !(frame_status & (TCPError | UDPError | IPError))) { 977 skb->ip_summed = CHECKSUM_UNNECESSARY; 978 } 979 #endif 980 netif_rx (skb); 981 } 982 entry = (entry + 1) % RX_RING_SIZE; 983 } 984 spin_lock(&np->rx_lock); 985 np->cur_rx = entry; 986 /* Re-allocate skbuffs to fill the descriptor ring */ 987 entry = np->old_rx; 988 while (entry != np->cur_rx) { 989 struct sk_buff *skb; 990 /* Dropped packets don't need to re-allocate */ 991 if (np->rx_skbuff[entry] == NULL) { 992 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 993 if (skb == NULL) { 994 np->rx_ring[entry].fraginfo = 0; 995 printk (KERN_INFO 996 "%s: receive_packet: " 997 "Unable to re-allocate Rx skbuff.#%d\n", 998 dev->name, entry); 999 break; 1000 } 1001 np->rx_skbuff[entry] = skb; 1002 np->rx_ring[entry].fraginfo = 1003 cpu_to_le64 (pci_map_single 1004 (np->pdev, skb->data, np->rx_buf_sz, 1005 PCI_DMA_FROMDEVICE)); 1006 } 1007 np->rx_ring[entry].fraginfo |= 1008 cpu_to_le64((u64)np->rx_buf_sz << 48); 1009 np->rx_ring[entry].status = 0; 1010 entry = (entry + 1) % RX_RING_SIZE; 1011 } 1012 np->old_rx = entry; 1013 spin_unlock(&np->rx_lock); 1014 return 0; 1015 } 1016 1017 static void 1018 rio_error (struct net_device *dev, int int_status) 1019 { 1020 struct netdev_private *np = netdev_priv(dev); 1021 void __iomem *ioaddr = np->ioaddr; 1022 u16 macctrl; 1023 1024 /* Link change event */ 1025 if (int_status & LinkEvent) { 1026 if (mii_wait_link (dev, 10) == 0) { 1027 printk (KERN_INFO "%s: Link up\n", dev->name); 1028 if (np->phy_media) 1029 mii_get_media_pcs (dev); 1030 else 1031 mii_get_media (dev); 1032 if (np->speed == 1000) 1033 np->tx_coalesce = tx_coalesce; 1034 else 1035 np->tx_coalesce = 1; 1036 macctrl = 0; 1037 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 1038 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 1039 macctrl |= (np->tx_flow) ? 1040 TxFlowControlEnable : 0; 1041 macctrl |= (np->rx_flow) ? 1042 RxFlowControlEnable : 0; 1043 dw16(MACCtrl, macctrl); 1044 np->link_status = 1; 1045 netif_carrier_on(dev); 1046 } else { 1047 printk (KERN_INFO "%s: Link off\n", dev->name); 1048 np->link_status = 0; 1049 netif_carrier_off(dev); 1050 } 1051 } 1052 1053 /* UpdateStats statistics registers */ 1054 if (int_status & UpdateStats) { 1055 get_stats (dev); 1056 } 1057 1058 /* PCI Error, a catastronphic error related to the bus interface 1059 occurs, set GlobalReset and HostReset to reset. */ 1060 if (int_status & HostError) { 1061 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 1062 dev->name, int_status); 1063 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1064 mdelay (500); 1065 rio_set_led_mode(dev); 1066 } 1067 } 1068 1069 static struct net_device_stats * 1070 get_stats (struct net_device *dev) 1071 { 1072 struct netdev_private *np = netdev_priv(dev); 1073 void __iomem *ioaddr = np->ioaddr; 1074 #ifdef MEM_MAPPING 1075 int i; 1076 #endif 1077 unsigned int stat_reg; 1078 1079 /* All statistics registers need to be acknowledged, 1080 else statistic overflow could cause problems */ 1081 1082 dev->stats.rx_packets += dr32(FramesRcvOk); 1083 dev->stats.tx_packets += dr32(FramesXmtOk); 1084 dev->stats.rx_bytes += dr32(OctetRcvOk); 1085 dev->stats.tx_bytes += dr32(OctetXmtOk); 1086 1087 dev->stats.multicast = dr32(McstFramesRcvdOk); 1088 dev->stats.collisions += dr32(SingleColFrames) 1089 + dr32(MultiColFrames); 1090 1091 /* detailed tx errors */ 1092 stat_reg = dr16(FramesAbortXSColls); 1093 dev->stats.tx_aborted_errors += stat_reg; 1094 dev->stats.tx_errors += stat_reg; 1095 1096 stat_reg = dr16(CarrierSenseErrors); 1097 dev->stats.tx_carrier_errors += stat_reg; 1098 dev->stats.tx_errors += stat_reg; 1099 1100 /* Clear all other statistic register. */ 1101 dr32(McstOctetXmtOk); 1102 dr16(BcstFramesXmtdOk); 1103 dr32(McstFramesXmtdOk); 1104 dr16(BcstFramesRcvdOk); 1105 dr16(MacControlFramesRcvd); 1106 dr16(FrameTooLongErrors); 1107 dr16(InRangeLengthErrors); 1108 dr16(FramesCheckSeqErrors); 1109 dr16(FramesLostRxErrors); 1110 dr32(McstOctetXmtOk); 1111 dr32(BcstOctetXmtOk); 1112 dr32(McstFramesXmtdOk); 1113 dr32(FramesWDeferredXmt); 1114 dr32(LateCollisions); 1115 dr16(BcstFramesXmtdOk); 1116 dr16(MacControlFramesXmtd); 1117 dr16(FramesWEXDeferal); 1118 1119 #ifdef MEM_MAPPING 1120 for (i = 0x100; i <= 0x150; i += 4) 1121 dr32(i); 1122 #endif 1123 dr16(TxJumboFrames); 1124 dr16(RxJumboFrames); 1125 dr16(TCPCheckSumErrors); 1126 dr16(UDPCheckSumErrors); 1127 dr16(IPCheckSumErrors); 1128 return &dev->stats; 1129 } 1130 1131 static int 1132 clear_stats (struct net_device *dev) 1133 { 1134 struct netdev_private *np = netdev_priv(dev); 1135 void __iomem *ioaddr = np->ioaddr; 1136 #ifdef MEM_MAPPING 1137 int i; 1138 #endif 1139 1140 /* All statistics registers need to be acknowledged, 1141 else statistic overflow could cause problems */ 1142 dr32(FramesRcvOk); 1143 dr32(FramesXmtOk); 1144 dr32(OctetRcvOk); 1145 dr32(OctetXmtOk); 1146 1147 dr32(McstFramesRcvdOk); 1148 dr32(SingleColFrames); 1149 dr32(MultiColFrames); 1150 dr32(LateCollisions); 1151 /* detailed rx errors */ 1152 dr16(FrameTooLongErrors); 1153 dr16(InRangeLengthErrors); 1154 dr16(FramesCheckSeqErrors); 1155 dr16(FramesLostRxErrors); 1156 1157 /* detailed tx errors */ 1158 dr16(FramesAbortXSColls); 1159 dr16(CarrierSenseErrors); 1160 1161 /* Clear all other statistic register. */ 1162 dr32(McstOctetXmtOk); 1163 dr16(BcstFramesXmtdOk); 1164 dr32(McstFramesXmtdOk); 1165 dr16(BcstFramesRcvdOk); 1166 dr16(MacControlFramesRcvd); 1167 dr32(McstOctetXmtOk); 1168 dr32(BcstOctetXmtOk); 1169 dr32(McstFramesXmtdOk); 1170 dr32(FramesWDeferredXmt); 1171 dr16(BcstFramesXmtdOk); 1172 dr16(MacControlFramesXmtd); 1173 dr16(FramesWEXDeferal); 1174 #ifdef MEM_MAPPING 1175 for (i = 0x100; i <= 0x150; i += 4) 1176 dr32(i); 1177 #endif 1178 dr16(TxJumboFrames); 1179 dr16(RxJumboFrames); 1180 dr16(TCPCheckSumErrors); 1181 dr16(UDPCheckSumErrors); 1182 dr16(IPCheckSumErrors); 1183 return 0; 1184 } 1185 1186 static void 1187 set_multicast (struct net_device *dev) 1188 { 1189 struct netdev_private *np = netdev_priv(dev); 1190 void __iomem *ioaddr = np->ioaddr; 1191 u32 hash_table[2]; 1192 u16 rx_mode = 0; 1193 1194 hash_table[0] = hash_table[1] = 0; 1195 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1196 hash_table[1] |= 0x02000000; 1197 if (dev->flags & IFF_PROMISC) { 1198 /* Receive all frames promiscuously. */ 1199 rx_mode = ReceiveAllFrames; 1200 } else if ((dev->flags & IFF_ALLMULTI) || 1201 (netdev_mc_count(dev) > multicast_filter_limit)) { 1202 /* Receive broadcast and multicast frames */ 1203 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1204 } else if (!netdev_mc_empty(dev)) { 1205 struct netdev_hw_addr *ha; 1206 /* Receive broadcast frames and multicast frames filtering 1207 by Hashtable */ 1208 rx_mode = 1209 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1210 netdev_for_each_mc_addr(ha, dev) { 1211 int bit, index = 0; 1212 int crc = ether_crc_le(ETH_ALEN, ha->addr); 1213 /* The inverted high significant 6 bits of CRC are 1214 used as an index to hashtable */ 1215 for (bit = 0; bit < 6; bit++) 1216 if (crc & (1 << (31 - bit))) 1217 index |= (1 << bit); 1218 hash_table[index / 32] |= (1 << (index % 32)); 1219 } 1220 } else { 1221 rx_mode = ReceiveBroadcast | ReceiveUnicast; 1222 } 1223 if (np->vlan) { 1224 /* ReceiveVLANMatch field in ReceiveMode */ 1225 rx_mode |= ReceiveVLANMatch; 1226 } 1227 1228 dw32(HashTable0, hash_table[0]); 1229 dw32(HashTable1, hash_table[1]); 1230 dw16(ReceiveMode, rx_mode); 1231 } 1232 1233 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1234 { 1235 struct netdev_private *np = netdev_priv(dev); 1236 1237 strlcpy(info->driver, "dl2k", sizeof(info->driver)); 1238 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); 1239 } 1240 1241 static int rio_get_link_ksettings(struct net_device *dev, 1242 struct ethtool_link_ksettings *cmd) 1243 { 1244 struct netdev_private *np = netdev_priv(dev); 1245 u32 supported, advertising; 1246 1247 if (np->phy_media) { 1248 /* fiber device */ 1249 supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE; 1250 advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE; 1251 cmd->base.port = PORT_FIBRE; 1252 } else { 1253 /* copper device */ 1254 supported = SUPPORTED_10baseT_Half | 1255 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half 1256 | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | 1257 SUPPORTED_Autoneg | SUPPORTED_MII; 1258 advertising = ADVERTISED_10baseT_Half | 1259 ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | 1260 ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | 1261 ADVERTISED_Autoneg | ADVERTISED_MII; 1262 cmd->base.port = PORT_MII; 1263 } 1264 if (np->link_status) { 1265 cmd->base.speed = np->speed; 1266 cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 1267 } else { 1268 cmd->base.speed = SPEED_UNKNOWN; 1269 cmd->base.duplex = DUPLEX_UNKNOWN; 1270 } 1271 if (np->an_enable) 1272 cmd->base.autoneg = AUTONEG_ENABLE; 1273 else 1274 cmd->base.autoneg = AUTONEG_DISABLE; 1275 1276 cmd->base.phy_address = np->phy_addr; 1277 1278 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1279 supported); 1280 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1281 advertising); 1282 1283 return 0; 1284 } 1285 1286 static int rio_set_link_ksettings(struct net_device *dev, 1287 const struct ethtool_link_ksettings *cmd) 1288 { 1289 struct netdev_private *np = netdev_priv(dev); 1290 u32 speed = cmd->base.speed; 1291 u8 duplex = cmd->base.duplex; 1292 1293 netif_carrier_off(dev); 1294 if (cmd->base.autoneg == AUTONEG_ENABLE) { 1295 if (np->an_enable) { 1296 return 0; 1297 } else { 1298 np->an_enable = 1; 1299 mii_set_media(dev); 1300 return 0; 1301 } 1302 } else { 1303 np->an_enable = 0; 1304 if (np->speed == 1000) { 1305 speed = SPEED_100; 1306 duplex = DUPLEX_FULL; 1307 printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n"); 1308 } 1309 switch (speed) { 1310 case SPEED_10: 1311 np->speed = 10; 1312 np->full_duplex = (duplex == DUPLEX_FULL); 1313 break; 1314 case SPEED_100: 1315 np->speed = 100; 1316 np->full_duplex = (duplex == DUPLEX_FULL); 1317 break; 1318 case SPEED_1000: /* not supported */ 1319 default: 1320 return -EINVAL; 1321 } 1322 mii_set_media(dev); 1323 } 1324 return 0; 1325 } 1326 1327 static u32 rio_get_link(struct net_device *dev) 1328 { 1329 struct netdev_private *np = netdev_priv(dev); 1330 return np->link_status; 1331 } 1332 1333 static const struct ethtool_ops ethtool_ops = { 1334 .get_drvinfo = rio_get_drvinfo, 1335 .get_link = rio_get_link, 1336 .get_link_ksettings = rio_get_link_ksettings, 1337 .set_link_ksettings = rio_set_link_ksettings, 1338 }; 1339 1340 static int 1341 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1342 { 1343 int phy_addr; 1344 struct netdev_private *np = netdev_priv(dev); 1345 struct mii_ioctl_data *miidata = if_mii(rq); 1346 1347 phy_addr = np->phy_addr; 1348 switch (cmd) { 1349 case SIOCGMIIPHY: 1350 miidata->phy_id = phy_addr; 1351 break; 1352 case SIOCGMIIREG: 1353 miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); 1354 break; 1355 case SIOCSMIIREG: 1356 if (!capable(CAP_NET_ADMIN)) 1357 return -EPERM; 1358 mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); 1359 break; 1360 default: 1361 return -EOPNOTSUPP; 1362 } 1363 return 0; 1364 } 1365 1366 #define EEP_READ 0x0200 1367 #define EEP_BUSY 0x8000 1368 /* Read the EEPROM word */ 1369 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1370 static int read_eeprom(struct netdev_private *np, int eep_addr) 1371 { 1372 void __iomem *ioaddr = np->eeprom_addr; 1373 int i = 1000; 1374 1375 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff)); 1376 while (i-- > 0) { 1377 if (!(dr16(EepromCtrl) & EEP_BUSY)) 1378 return dr16(EepromData); 1379 } 1380 return 0; 1381 } 1382 1383 enum phy_ctrl_bits { 1384 MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04, 1385 MII_DUPLEX = 0x08, 1386 }; 1387 1388 #define mii_delay() dr8(PhyCtrl) 1389 static void 1390 mii_sendbit (struct net_device *dev, u32 data) 1391 { 1392 struct netdev_private *np = netdev_priv(dev); 1393 void __iomem *ioaddr = np->ioaddr; 1394 1395 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE; 1396 dw8(PhyCtrl, data); 1397 mii_delay (); 1398 dw8(PhyCtrl, data | MII_CLK); 1399 mii_delay (); 1400 } 1401 1402 static int 1403 mii_getbit (struct net_device *dev) 1404 { 1405 struct netdev_private *np = netdev_priv(dev); 1406 void __iomem *ioaddr = np->ioaddr; 1407 u8 data; 1408 1409 data = (dr8(PhyCtrl) & 0xf8) | MII_READ; 1410 dw8(PhyCtrl, data); 1411 mii_delay (); 1412 dw8(PhyCtrl, data | MII_CLK); 1413 mii_delay (); 1414 return (dr8(PhyCtrl) >> 1) & 1; 1415 } 1416 1417 static void 1418 mii_send_bits (struct net_device *dev, u32 data, int len) 1419 { 1420 int i; 1421 1422 for (i = len - 1; i >= 0; i--) { 1423 mii_sendbit (dev, data & (1 << i)); 1424 } 1425 } 1426 1427 static int 1428 mii_read (struct net_device *dev, int phy_addr, int reg_num) 1429 { 1430 u32 cmd; 1431 int i; 1432 u32 retval = 0; 1433 1434 /* Preamble */ 1435 mii_send_bits (dev, 0xffffffff, 32); 1436 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1437 /* ST,OP = 0110'b for read operation */ 1438 cmd = (0x06 << 10 | phy_addr << 5 | reg_num); 1439 mii_send_bits (dev, cmd, 14); 1440 /* Turnaround */ 1441 if (mii_getbit (dev)) 1442 goto err_out; 1443 /* Read data */ 1444 for (i = 0; i < 16; i++) { 1445 retval |= mii_getbit (dev); 1446 retval <<= 1; 1447 } 1448 /* End cycle */ 1449 mii_getbit (dev); 1450 return (retval >> 1) & 0xffff; 1451 1452 err_out: 1453 return 0; 1454 } 1455 static int 1456 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data) 1457 { 1458 u32 cmd; 1459 1460 /* Preamble */ 1461 mii_send_bits (dev, 0xffffffff, 32); 1462 /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */ 1463 /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */ 1464 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data; 1465 mii_send_bits (dev, cmd, 32); 1466 /* End cycle */ 1467 mii_getbit (dev); 1468 return 0; 1469 } 1470 static int 1471 mii_wait_link (struct net_device *dev, int wait) 1472 { 1473 __u16 bmsr; 1474 int phy_addr; 1475 struct netdev_private *np; 1476 1477 np = netdev_priv(dev); 1478 phy_addr = np->phy_addr; 1479 1480 do { 1481 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1482 if (bmsr & BMSR_LSTATUS) 1483 return 0; 1484 mdelay (1); 1485 } while (--wait > 0); 1486 return -1; 1487 } 1488 static int 1489 mii_get_media (struct net_device *dev) 1490 { 1491 __u16 negotiate; 1492 __u16 bmsr; 1493 __u16 mscr; 1494 __u16 mssr; 1495 int phy_addr; 1496 struct netdev_private *np; 1497 1498 np = netdev_priv(dev); 1499 phy_addr = np->phy_addr; 1500 1501 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1502 if (np->an_enable) { 1503 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1504 /* Auto-Negotiation not completed */ 1505 return -1; 1506 } 1507 negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) & 1508 mii_read (dev, phy_addr, MII_LPA); 1509 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1510 mssr = mii_read (dev, phy_addr, MII_STAT1000); 1511 if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) { 1512 np->speed = 1000; 1513 np->full_duplex = 1; 1514 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1515 } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) { 1516 np->speed = 1000; 1517 np->full_duplex = 0; 1518 printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n"); 1519 } else if (negotiate & ADVERTISE_100FULL) { 1520 np->speed = 100; 1521 np->full_duplex = 1; 1522 printk (KERN_INFO "Auto 100 Mbps, Full duplex\n"); 1523 } else if (negotiate & ADVERTISE_100HALF) { 1524 np->speed = 100; 1525 np->full_duplex = 0; 1526 printk (KERN_INFO "Auto 100 Mbps, Half duplex\n"); 1527 } else if (negotiate & ADVERTISE_10FULL) { 1528 np->speed = 10; 1529 np->full_duplex = 1; 1530 printk (KERN_INFO "Auto 10 Mbps, Full duplex\n"); 1531 } else if (negotiate & ADVERTISE_10HALF) { 1532 np->speed = 10; 1533 np->full_duplex = 0; 1534 printk (KERN_INFO "Auto 10 Mbps, Half duplex\n"); 1535 } 1536 if (negotiate & ADVERTISE_PAUSE_CAP) { 1537 np->tx_flow &= 1; 1538 np->rx_flow &= 1; 1539 } else if (negotiate & ADVERTISE_PAUSE_ASYM) { 1540 np->tx_flow = 0; 1541 np->rx_flow &= 1; 1542 } 1543 /* else tx_flow, rx_flow = user select */ 1544 } else { 1545 __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1546 switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) { 1547 case BMCR_SPEED1000: 1548 printk (KERN_INFO "Operating at 1000 Mbps, "); 1549 break; 1550 case BMCR_SPEED100: 1551 printk (KERN_INFO "Operating at 100 Mbps, "); 1552 break; 1553 case 0: 1554 printk (KERN_INFO "Operating at 10 Mbps, "); 1555 } 1556 if (bmcr & BMCR_FULLDPLX) { 1557 printk (KERN_CONT "Full duplex\n"); 1558 } else { 1559 printk (KERN_CONT "Half duplex\n"); 1560 } 1561 } 1562 if (np->tx_flow) 1563 printk(KERN_INFO "Enable Tx Flow Control\n"); 1564 else 1565 printk(KERN_INFO "Disable Tx Flow Control\n"); 1566 if (np->rx_flow) 1567 printk(KERN_INFO "Enable Rx Flow Control\n"); 1568 else 1569 printk(KERN_INFO "Disable Rx Flow Control\n"); 1570 1571 return 0; 1572 } 1573 1574 static int 1575 mii_set_media (struct net_device *dev) 1576 { 1577 __u16 pscr; 1578 __u16 bmcr; 1579 __u16 bmsr; 1580 __u16 anar; 1581 int phy_addr; 1582 struct netdev_private *np; 1583 np = netdev_priv(dev); 1584 phy_addr = np->phy_addr; 1585 1586 /* Does user set speed? */ 1587 if (np->an_enable) { 1588 /* Advertise capabilities */ 1589 bmsr = mii_read (dev, phy_addr, MII_BMSR); 1590 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1591 ~(ADVERTISE_100FULL | ADVERTISE_10FULL | 1592 ADVERTISE_100HALF | ADVERTISE_10HALF | 1593 ADVERTISE_100BASE4); 1594 if (bmsr & BMSR_100FULL) 1595 anar |= ADVERTISE_100FULL; 1596 if (bmsr & BMSR_100HALF) 1597 anar |= ADVERTISE_100HALF; 1598 if (bmsr & BMSR_100BASE4) 1599 anar |= ADVERTISE_100BASE4; 1600 if (bmsr & BMSR_10FULL) 1601 anar |= ADVERTISE_10FULL; 1602 if (bmsr & BMSR_10HALF) 1603 anar |= ADVERTISE_10HALF; 1604 anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1605 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1606 1607 /* Enable Auto crossover */ 1608 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1609 pscr |= 3 << 5; /* 11'b */ 1610 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1611 1612 /* Soft reset PHY */ 1613 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1614 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1615 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1616 mdelay(1); 1617 } else { 1618 /* Force speed setting */ 1619 /* 1) Disable Auto crossover */ 1620 pscr = mii_read (dev, phy_addr, MII_PHY_SCR); 1621 pscr &= ~(3 << 5); 1622 mii_write (dev, phy_addr, MII_PHY_SCR, pscr); 1623 1624 /* 2) PHY Reset */ 1625 bmcr = mii_read (dev, phy_addr, MII_BMCR); 1626 bmcr |= BMCR_RESET; 1627 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1628 1629 /* 3) Power Down */ 1630 bmcr = 0x1940; /* must be 0x1940 */ 1631 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1632 mdelay (100); /* wait a certain time */ 1633 1634 /* 4) Advertise nothing */ 1635 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1636 1637 /* 5) Set media and Power Up */ 1638 bmcr = BMCR_PDOWN; 1639 if (np->speed == 100) { 1640 bmcr |= BMCR_SPEED100; 1641 printk (KERN_INFO "Manual 100 Mbps, "); 1642 } else if (np->speed == 10) { 1643 printk (KERN_INFO "Manual 10 Mbps, "); 1644 } 1645 if (np->full_duplex) { 1646 bmcr |= BMCR_FULLDPLX; 1647 printk (KERN_CONT "Full duplex\n"); 1648 } else { 1649 printk (KERN_CONT "Half duplex\n"); 1650 } 1651 #if 0 1652 /* Set 1000BaseT Master/Slave setting */ 1653 mscr = mii_read (dev, phy_addr, MII_CTRL1000); 1654 mscr |= MII_MSCR_CFG_ENABLE; 1655 mscr &= ~MII_MSCR_CFG_VALUE = 0; 1656 #endif 1657 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1658 mdelay(10); 1659 } 1660 return 0; 1661 } 1662 1663 static int 1664 mii_get_media_pcs (struct net_device *dev) 1665 { 1666 __u16 negotiate; 1667 __u16 bmsr; 1668 int phy_addr; 1669 struct netdev_private *np; 1670 1671 np = netdev_priv(dev); 1672 phy_addr = np->phy_addr; 1673 1674 bmsr = mii_read (dev, phy_addr, PCS_BMSR); 1675 if (np->an_enable) { 1676 if (!(bmsr & BMSR_ANEGCOMPLETE)) { 1677 /* Auto-Negotiation not completed */ 1678 return -1; 1679 } 1680 negotiate = mii_read (dev, phy_addr, PCS_ANAR) & 1681 mii_read (dev, phy_addr, PCS_ANLPAR); 1682 np->speed = 1000; 1683 if (negotiate & PCS_ANAR_FULL_DUPLEX) { 1684 printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n"); 1685 np->full_duplex = 1; 1686 } else { 1687 printk (KERN_INFO "Auto 1000 Mbps, half duplex\n"); 1688 np->full_duplex = 0; 1689 } 1690 if (negotiate & PCS_ANAR_PAUSE) { 1691 np->tx_flow &= 1; 1692 np->rx_flow &= 1; 1693 } else if (negotiate & PCS_ANAR_ASYMMETRIC) { 1694 np->tx_flow = 0; 1695 np->rx_flow &= 1; 1696 } 1697 /* else tx_flow, rx_flow = user select */ 1698 } else { 1699 __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR); 1700 printk (KERN_INFO "Operating at 1000 Mbps, "); 1701 if (bmcr & BMCR_FULLDPLX) { 1702 printk (KERN_CONT "Full duplex\n"); 1703 } else { 1704 printk (KERN_CONT "Half duplex\n"); 1705 } 1706 } 1707 if (np->tx_flow) 1708 printk(KERN_INFO "Enable Tx Flow Control\n"); 1709 else 1710 printk(KERN_INFO "Disable Tx Flow Control\n"); 1711 if (np->rx_flow) 1712 printk(KERN_INFO "Enable Rx Flow Control\n"); 1713 else 1714 printk(KERN_INFO "Disable Rx Flow Control\n"); 1715 1716 return 0; 1717 } 1718 1719 static int 1720 mii_set_media_pcs (struct net_device *dev) 1721 { 1722 __u16 bmcr; 1723 __u16 esr; 1724 __u16 anar; 1725 int phy_addr; 1726 struct netdev_private *np; 1727 np = netdev_priv(dev); 1728 phy_addr = np->phy_addr; 1729 1730 /* Auto-Negotiation? */ 1731 if (np->an_enable) { 1732 /* Advertise capabilities */ 1733 esr = mii_read (dev, phy_addr, PCS_ESR); 1734 anar = mii_read (dev, phy_addr, MII_ADVERTISE) & 1735 ~PCS_ANAR_HALF_DUPLEX & 1736 ~PCS_ANAR_FULL_DUPLEX; 1737 if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD)) 1738 anar |= PCS_ANAR_HALF_DUPLEX; 1739 if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD)) 1740 anar |= PCS_ANAR_FULL_DUPLEX; 1741 anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC; 1742 mii_write (dev, phy_addr, MII_ADVERTISE, anar); 1743 1744 /* Soft reset PHY */ 1745 mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET); 1746 bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET; 1747 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1748 mdelay(1); 1749 } else { 1750 /* Force speed setting */ 1751 /* PHY Reset */ 1752 bmcr = BMCR_RESET; 1753 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1754 mdelay(10); 1755 if (np->full_duplex) { 1756 bmcr = BMCR_FULLDPLX; 1757 printk (KERN_INFO "Manual full duplex\n"); 1758 } else { 1759 bmcr = 0; 1760 printk (KERN_INFO "Manual half duplex\n"); 1761 } 1762 mii_write (dev, phy_addr, MII_BMCR, bmcr); 1763 mdelay(10); 1764 1765 /* Advertise nothing */ 1766 mii_write (dev, phy_addr, MII_ADVERTISE, 0); 1767 } 1768 return 0; 1769 } 1770 1771 1772 static int 1773 rio_close (struct net_device *dev) 1774 { 1775 struct netdev_private *np = netdev_priv(dev); 1776 struct pci_dev *pdev = np->pdev; 1777 1778 netif_stop_queue (dev); 1779 1780 rio_hw_stop(dev); 1781 1782 free_irq(pdev->irq, dev); 1783 del_timer_sync (&np->timer); 1784 1785 free_list(dev); 1786 1787 return 0; 1788 } 1789 1790 static void 1791 rio_remove1 (struct pci_dev *pdev) 1792 { 1793 struct net_device *dev = pci_get_drvdata (pdev); 1794 1795 if (dev) { 1796 struct netdev_private *np = netdev_priv(dev); 1797 1798 unregister_netdev (dev); 1799 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, 1800 np->rx_ring_dma); 1801 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1802 np->tx_ring_dma); 1803 #ifdef MEM_MAPPING 1804 pci_iounmap(pdev, np->ioaddr); 1805 #endif 1806 pci_iounmap(pdev, np->eeprom_addr); 1807 free_netdev (dev); 1808 pci_release_regions (pdev); 1809 pci_disable_device (pdev); 1810 } 1811 } 1812 1813 #ifdef CONFIG_PM_SLEEP 1814 static int rio_suspend(struct device *device) 1815 { 1816 struct net_device *dev = dev_get_drvdata(device); 1817 struct netdev_private *np = netdev_priv(dev); 1818 1819 if (!netif_running(dev)) 1820 return 0; 1821 1822 netif_device_detach(dev); 1823 del_timer_sync(&np->timer); 1824 rio_hw_stop(dev); 1825 1826 return 0; 1827 } 1828 1829 static int rio_resume(struct device *device) 1830 { 1831 struct net_device *dev = dev_get_drvdata(device); 1832 struct netdev_private *np = netdev_priv(dev); 1833 1834 if (!netif_running(dev)) 1835 return 0; 1836 1837 rio_reset_ring(np); 1838 rio_hw_init(dev); 1839 np->timer.expires = jiffies + 1 * HZ; 1840 add_timer(&np->timer); 1841 netif_device_attach(dev); 1842 dl2k_enable_int(np); 1843 1844 return 0; 1845 } 1846 1847 static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume); 1848 #define RIO_PM_OPS (&rio_pm_ops) 1849 1850 #else 1851 1852 #define RIO_PM_OPS NULL 1853 1854 #endif /* CONFIG_PM_SLEEP */ 1855 1856 static struct pci_driver rio_driver = { 1857 .name = "dl2k", 1858 .id_table = rio_pci_tbl, 1859 .probe = rio_probe1, 1860 .remove = rio_remove1, 1861 .driver.pm = RIO_PM_OPS, 1862 }; 1863 1864 module_pci_driver(rio_driver); 1865 1866 /* Read Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst. */ 1867