1 /* 2 * Driver for Marvell Discovery (MV643XX) and Marvell Orion ethernet ports 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 4 * 5 * Based on the 64360 driver from: 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il> 7 * Rabeeh Khoury <rabeeh@marvell.com> 8 * 9 * Copyright (C) 2003 PMC-Sierra, Inc., 10 * written by Manish Lachwani 11 * 12 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> 13 * 14 * Copyright (C) 2004-2006 MontaVista Software, Inc. 15 * Dale Farnsworth <dale@farnsworth.org> 16 * 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 18 * <sjhill@realitydiluted.com> 19 * 20 * Copyright (C) 2007-2008 Marvell Semiconductor 21 * Lennert Buytenhek <buytenh@marvell.com> 22 * 23 * This program is free software; you can redistribute it and/or 24 * modify it under the terms of the GNU General Public License 25 * as published by the Free Software Foundation; either version 2 26 * of the License, or (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 */ 37 38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 39 40 #include <linux/init.h> 41 #include <linux/dma-mapping.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/tcp.h> 45 #include <linux/udp.h> 46 #include <linux/etherdevice.h> 47 #include <linux/delay.h> 48 #include <linux/ethtool.h> 49 #include <linux/platform_device.h> 50 #include <linux/module.h> 51 #include <linux/kernel.h> 52 #include <linux/spinlock.h> 53 #include <linux/workqueue.h> 54 #include <linux/phy.h> 55 #include <linux/mv643xx_eth.h> 56 #include <linux/io.h> 57 #include <linux/types.h> 58 #include <linux/inet_lro.h> 59 #include <linux/slab.h> 60 #include <asm/system.h> 61 62 static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 63 static char mv643xx_eth_driver_version[] = "1.4"; 64 65 66 /* 67 * Registers shared between all ports. 68 */ 69 #define PHY_ADDR 0x0000 70 #define SMI_REG 0x0004 71 #define SMI_BUSY 0x10000000 72 #define SMI_READ_VALID 0x08000000 73 #define SMI_OPCODE_READ 0x04000000 74 #define SMI_OPCODE_WRITE 0x00000000 75 #define ERR_INT_CAUSE 0x0080 76 #define ERR_INT_SMI_DONE 0x00000010 77 #define ERR_INT_MASK 0x0084 78 #define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 79 #define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 80 #define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 81 #define WINDOW_BAR_ENABLE 0x0290 82 #define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 83 84 /* 85 * Main per-port registers. These live at offset 0x0400 for 86 * port #0, 0x0800 for port #1, and 0x0c00 for port #2. 87 */ 88 #define PORT_CONFIG 0x0000 89 #define UNICAST_PROMISCUOUS_MODE 0x00000001 90 #define PORT_CONFIG_EXT 0x0004 91 #define MAC_ADDR_LOW 0x0014 92 #define MAC_ADDR_HIGH 0x0018 93 #define SDMA_CONFIG 0x001c 94 #define TX_BURST_SIZE_16_64BIT 0x01000000 95 #define TX_BURST_SIZE_4_64BIT 0x00800000 96 #define BLM_TX_NO_SWAP 0x00000020 97 #define BLM_RX_NO_SWAP 0x00000010 98 #define RX_BURST_SIZE_16_64BIT 0x00000008 99 #define RX_BURST_SIZE_4_64BIT 0x00000004 100 #define PORT_SERIAL_CONTROL 0x003c 101 #define SET_MII_SPEED_TO_100 0x01000000 102 #define SET_GMII_SPEED_TO_1000 0x00800000 103 #define SET_FULL_DUPLEX_MODE 0x00200000 104 #define MAX_RX_PACKET_9700BYTE 0x000a0000 105 #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 106 #define DO_NOT_FORCE_LINK_FAIL 0x00000400 107 #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 108 #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 109 #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 110 #define FORCE_LINK_PASS 0x00000002 111 #define SERIAL_PORT_ENABLE 0x00000001 112 #define PORT_STATUS 0x0044 113 #define TX_FIFO_EMPTY 0x00000400 114 #define TX_IN_PROGRESS 0x00000080 115 #define PORT_SPEED_MASK 0x00000030 116 #define PORT_SPEED_1000 0x00000010 117 #define PORT_SPEED_100 0x00000020 118 #define PORT_SPEED_10 0x00000000 119 #define FLOW_CONTROL_ENABLED 0x00000008 120 #define FULL_DUPLEX 0x00000004 121 #define LINK_UP 0x00000002 122 #define TXQ_COMMAND 0x0048 123 #define TXQ_FIX_PRIO_CONF 0x004c 124 #define TX_BW_RATE 0x0050 125 #define TX_BW_MTU 0x0058 126 #define TX_BW_BURST 0x005c 127 #define INT_CAUSE 0x0060 128 #define INT_TX_END 0x07f80000 129 #define INT_TX_END_0 0x00080000 130 #define INT_RX 0x000003fc 131 #define INT_RX_0 0x00000004 132 #define INT_EXT 0x00000002 133 #define INT_CAUSE_EXT 0x0064 134 #define INT_EXT_LINK_PHY 0x00110000 135 #define INT_EXT_TX 0x000000ff 136 #define INT_MASK 0x0068 137 #define INT_MASK_EXT 0x006c 138 #define TX_FIFO_URGENT_THRESHOLD 0x0074 139 #define TXQ_FIX_PRIO_CONF_MOVED 0x00dc 140 #define TX_BW_RATE_MOVED 0x00e0 141 #define TX_BW_MTU_MOVED 0x00e8 142 #define TX_BW_BURST_MOVED 0x00ec 143 #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) 144 #define RXQ_COMMAND 0x0280 145 #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) 146 #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) 147 #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) 148 #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) 149 150 /* 151 * Misc per-port registers. 152 */ 153 #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 154 #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 155 #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 156 #define UNICAST_TABLE(p) (0x1600 + ((p) << 10)) 157 158 159 /* 160 * SDMA configuration register default value. 161 */ 162 #if defined(__BIG_ENDIAN) 163 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 164 (RX_BURST_SIZE_4_64BIT | \ 165 TX_BURST_SIZE_4_64BIT) 166 #elif defined(__LITTLE_ENDIAN) 167 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 168 (RX_BURST_SIZE_4_64BIT | \ 169 BLM_RX_NO_SWAP | \ 170 BLM_TX_NO_SWAP | \ 171 TX_BURST_SIZE_4_64BIT) 172 #else 173 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 174 #endif 175 176 177 /* 178 * Misc definitions. 179 */ 180 #define DEFAULT_RX_QUEUE_SIZE 128 181 #define DEFAULT_TX_QUEUE_SIZE 256 182 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) 183 184 185 /* 186 * RX/TX descriptors. 187 */ 188 #if defined(__BIG_ENDIAN) 189 struct rx_desc { 190 u16 byte_cnt; /* Descriptor buffer byte count */ 191 u16 buf_size; /* Buffer size */ 192 u32 cmd_sts; /* Descriptor command status */ 193 u32 next_desc_ptr; /* Next descriptor pointer */ 194 u32 buf_ptr; /* Descriptor buffer pointer */ 195 }; 196 197 struct tx_desc { 198 u16 byte_cnt; /* buffer byte count */ 199 u16 l4i_chk; /* CPU provided TCP checksum */ 200 u32 cmd_sts; /* Command/status field */ 201 u32 next_desc_ptr; /* Pointer to next descriptor */ 202 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 203 }; 204 #elif defined(__LITTLE_ENDIAN) 205 struct rx_desc { 206 u32 cmd_sts; /* Descriptor command status */ 207 u16 buf_size; /* Buffer size */ 208 u16 byte_cnt; /* Descriptor buffer byte count */ 209 u32 buf_ptr; /* Descriptor buffer pointer */ 210 u32 next_desc_ptr; /* Next descriptor pointer */ 211 }; 212 213 struct tx_desc { 214 u32 cmd_sts; /* Command/status field */ 215 u16 l4i_chk; /* CPU provided TCP checksum */ 216 u16 byte_cnt; /* buffer byte count */ 217 u32 buf_ptr; /* pointer to buffer for this descriptor*/ 218 u32 next_desc_ptr; /* Pointer to next descriptor */ 219 }; 220 #else 221 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 222 #endif 223 224 /* RX & TX descriptor command */ 225 #define BUFFER_OWNED_BY_DMA 0x80000000 226 227 /* RX & TX descriptor status */ 228 #define ERROR_SUMMARY 0x00000001 229 230 /* RX descriptor status */ 231 #define LAYER_4_CHECKSUM_OK 0x40000000 232 #define RX_ENABLE_INTERRUPT 0x20000000 233 #define RX_FIRST_DESC 0x08000000 234 #define RX_LAST_DESC 0x04000000 235 #define RX_IP_HDR_OK 0x02000000 236 #define RX_PKT_IS_IPV4 0x01000000 237 #define RX_PKT_IS_ETHERNETV2 0x00800000 238 #define RX_PKT_LAYER4_TYPE_MASK 0x00600000 239 #define RX_PKT_LAYER4_TYPE_TCP_IPV4 0x00000000 240 #define RX_PKT_IS_VLAN_TAGGED 0x00080000 241 242 /* TX descriptor command */ 243 #define TX_ENABLE_INTERRUPT 0x00800000 244 #define GEN_CRC 0x00400000 245 #define TX_FIRST_DESC 0x00200000 246 #define TX_LAST_DESC 0x00100000 247 #define ZERO_PADDING 0x00080000 248 #define GEN_IP_V4_CHECKSUM 0x00040000 249 #define GEN_TCP_UDP_CHECKSUM 0x00020000 250 #define UDP_FRAME 0x00010000 251 #define MAC_HDR_EXTRA_4_BYTES 0x00008000 252 #define MAC_HDR_EXTRA_8_BYTES 0x00000200 253 254 #define TX_IHL_SHIFT 11 255 256 257 /* global *******************************************************************/ 258 struct mv643xx_eth_shared_private { 259 /* 260 * Ethernet controller base address. 261 */ 262 void __iomem *base; 263 264 /* 265 * Points at the right SMI instance to use. 266 */ 267 struct mv643xx_eth_shared_private *smi; 268 269 /* 270 * Provides access to local SMI interface. 271 */ 272 struct mii_bus *smi_bus; 273 274 /* 275 * If we have access to the error interrupt pin (which is 276 * somewhat misnamed as it not only reflects internal errors 277 * but also reflects SMI completion), use that to wait for 278 * SMI access completion instead of polling the SMI busy bit. 279 */ 280 int err_interrupt; 281 wait_queue_head_t smi_busy_wait; 282 283 /* 284 * Per-port MBUS window access register value. 285 */ 286 u32 win_protect; 287 288 /* 289 * Hardware-specific parameters. 290 */ 291 unsigned int t_clk; 292 int extended_rx_coal_limit; 293 int tx_bw_control; 294 int tx_csum_limit; 295 }; 296 297 #define TX_BW_CONTROL_ABSENT 0 298 #define TX_BW_CONTROL_OLD_LAYOUT 1 299 #define TX_BW_CONTROL_NEW_LAYOUT 2 300 301 static int mv643xx_eth_open(struct net_device *dev); 302 static int mv643xx_eth_stop(struct net_device *dev); 303 304 305 /* per-port *****************************************************************/ 306 struct mib_counters { 307 u64 good_octets_received; 308 u32 bad_octets_received; 309 u32 internal_mac_transmit_err; 310 u32 good_frames_received; 311 u32 bad_frames_received; 312 u32 broadcast_frames_received; 313 u32 multicast_frames_received; 314 u32 frames_64_octets; 315 u32 frames_65_to_127_octets; 316 u32 frames_128_to_255_octets; 317 u32 frames_256_to_511_octets; 318 u32 frames_512_to_1023_octets; 319 u32 frames_1024_to_max_octets; 320 u64 good_octets_sent; 321 u32 good_frames_sent; 322 u32 excessive_collision; 323 u32 multicast_frames_sent; 324 u32 broadcast_frames_sent; 325 u32 unrec_mac_control_received; 326 u32 fc_sent; 327 u32 good_fc_received; 328 u32 bad_fc_received; 329 u32 undersize_received; 330 u32 fragments_received; 331 u32 oversize_received; 332 u32 jabber_received; 333 u32 mac_receive_error; 334 u32 bad_crc_event; 335 u32 collision; 336 u32 late_collision; 337 }; 338 339 struct lro_counters { 340 u32 lro_aggregated; 341 u32 lro_flushed; 342 u32 lro_no_desc; 343 }; 344 345 struct rx_queue { 346 int index; 347 348 int rx_ring_size; 349 350 int rx_desc_count; 351 int rx_curr_desc; 352 int rx_used_desc; 353 354 struct rx_desc *rx_desc_area; 355 dma_addr_t rx_desc_dma; 356 int rx_desc_area_size; 357 struct sk_buff **rx_skb; 358 359 struct net_lro_mgr lro_mgr; 360 struct net_lro_desc lro_arr[8]; 361 }; 362 363 struct tx_queue { 364 int index; 365 366 int tx_ring_size; 367 368 int tx_desc_count; 369 int tx_curr_desc; 370 int tx_used_desc; 371 372 struct tx_desc *tx_desc_area; 373 dma_addr_t tx_desc_dma; 374 int tx_desc_area_size; 375 376 struct sk_buff_head tx_skb; 377 378 unsigned long tx_packets; 379 unsigned long tx_bytes; 380 unsigned long tx_dropped; 381 }; 382 383 struct mv643xx_eth_private { 384 struct mv643xx_eth_shared_private *shared; 385 void __iomem *base; 386 int port_num; 387 388 struct net_device *dev; 389 390 struct phy_device *phy; 391 392 struct timer_list mib_counters_timer; 393 spinlock_t mib_counters_lock; 394 struct mib_counters mib_counters; 395 396 struct lro_counters lro_counters; 397 398 struct work_struct tx_timeout_task; 399 400 struct napi_struct napi; 401 u32 int_mask; 402 u8 oom; 403 u8 work_link; 404 u8 work_tx; 405 u8 work_tx_end; 406 u8 work_rx; 407 u8 work_rx_refill; 408 409 int skb_size; 410 struct sk_buff_head rx_recycle; 411 412 /* 413 * RX state. 414 */ 415 int rx_ring_size; 416 unsigned long rx_desc_sram_addr; 417 int rx_desc_sram_size; 418 int rxq_count; 419 struct timer_list rx_oom; 420 struct rx_queue rxq[8]; 421 422 /* 423 * TX state. 424 */ 425 int tx_ring_size; 426 unsigned long tx_desc_sram_addr; 427 int tx_desc_sram_size; 428 int txq_count; 429 struct tx_queue txq[8]; 430 }; 431 432 433 /* port register accessors **************************************************/ 434 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) 435 { 436 return readl(mp->shared->base + offset); 437 } 438 439 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) 440 { 441 return readl(mp->base + offset); 442 } 443 444 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 445 { 446 writel(data, mp->shared->base + offset); 447 } 448 449 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) 450 { 451 writel(data, mp->base + offset); 452 } 453 454 455 /* rxq/txq helper functions *************************************************/ 456 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 457 { 458 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); 459 } 460 461 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 462 { 463 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); 464 } 465 466 static void rxq_enable(struct rx_queue *rxq) 467 { 468 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 469 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); 470 } 471 472 static void rxq_disable(struct rx_queue *rxq) 473 { 474 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 475 u8 mask = 1 << rxq->index; 476 477 wrlp(mp, RXQ_COMMAND, mask << 8); 478 while (rdlp(mp, RXQ_COMMAND) & mask) 479 udelay(10); 480 } 481 482 static void txq_reset_hw_ptr(struct tx_queue *txq) 483 { 484 struct mv643xx_eth_private *mp = txq_to_mp(txq); 485 u32 addr; 486 487 addr = (u32)txq->tx_desc_dma; 488 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 489 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); 490 } 491 492 static void txq_enable(struct tx_queue *txq) 493 { 494 struct mv643xx_eth_private *mp = txq_to_mp(txq); 495 wrlp(mp, TXQ_COMMAND, 1 << txq->index); 496 } 497 498 static void txq_disable(struct tx_queue *txq) 499 { 500 struct mv643xx_eth_private *mp = txq_to_mp(txq); 501 u8 mask = 1 << txq->index; 502 503 wrlp(mp, TXQ_COMMAND, mask << 8); 504 while (rdlp(mp, TXQ_COMMAND) & mask) 505 udelay(10); 506 } 507 508 static void txq_maybe_wake(struct tx_queue *txq) 509 { 510 struct mv643xx_eth_private *mp = txq_to_mp(txq); 511 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 512 513 if (netif_tx_queue_stopped(nq)) { 514 __netif_tx_lock(nq, smp_processor_id()); 515 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 516 netif_tx_wake_queue(nq); 517 __netif_tx_unlock(nq); 518 } 519 } 520 521 522 /* rx napi ******************************************************************/ 523 static int 524 mv643xx_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, 525 u64 *hdr_flags, void *priv) 526 { 527 unsigned long cmd_sts = (unsigned long)priv; 528 529 /* 530 * Make sure that this packet is Ethernet II, is not VLAN 531 * tagged, is IPv4, has a valid IP header, and is TCP. 532 */ 533 if ((cmd_sts & (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 534 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_MASK | 535 RX_PKT_IS_VLAN_TAGGED)) != 536 (RX_IP_HDR_OK | RX_PKT_IS_IPV4 | 537 RX_PKT_IS_ETHERNETV2 | RX_PKT_LAYER4_TYPE_TCP_IPV4)) 538 return -1; 539 540 skb_reset_network_header(skb); 541 skb_set_transport_header(skb, ip_hdrlen(skb)); 542 *iphdr = ip_hdr(skb); 543 *tcph = tcp_hdr(skb); 544 *hdr_flags = LRO_IPV4 | LRO_TCP; 545 546 return 0; 547 } 548 549 static int rxq_process(struct rx_queue *rxq, int budget) 550 { 551 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 552 struct net_device_stats *stats = &mp->dev->stats; 553 int lro_flush_needed; 554 int rx; 555 556 lro_flush_needed = 0; 557 rx = 0; 558 while (rx < budget && rxq->rx_desc_count) { 559 struct rx_desc *rx_desc; 560 unsigned int cmd_sts; 561 struct sk_buff *skb; 562 u16 byte_cnt; 563 564 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 565 566 cmd_sts = rx_desc->cmd_sts; 567 if (cmd_sts & BUFFER_OWNED_BY_DMA) 568 break; 569 rmb(); 570 571 skb = rxq->rx_skb[rxq->rx_curr_desc]; 572 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 573 574 rxq->rx_curr_desc++; 575 if (rxq->rx_curr_desc == rxq->rx_ring_size) 576 rxq->rx_curr_desc = 0; 577 578 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, 579 rx_desc->buf_size, DMA_FROM_DEVICE); 580 rxq->rx_desc_count--; 581 rx++; 582 583 mp->work_rx_refill |= 1 << rxq->index; 584 585 byte_cnt = rx_desc->byte_cnt; 586 587 /* 588 * Update statistics. 589 * 590 * Note that the descriptor byte count includes 2 dummy 591 * bytes automatically inserted by the hardware at the 592 * start of the packet (which we don't count), and a 4 593 * byte CRC at the end of the packet (which we do count). 594 */ 595 stats->rx_packets++; 596 stats->rx_bytes += byte_cnt - 2; 597 598 /* 599 * In case we received a packet without first / last bits 600 * on, or the error summary bit is set, the packet needs 601 * to be dropped. 602 */ 603 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY)) 604 != (RX_FIRST_DESC | RX_LAST_DESC)) 605 goto err; 606 607 /* 608 * The -4 is for the CRC in the trailer of the 609 * received packet 610 */ 611 skb_put(skb, byte_cnt - 2 - 4); 612 613 if (cmd_sts & LAYER_4_CHECKSUM_OK) 614 skb->ip_summed = CHECKSUM_UNNECESSARY; 615 skb->protocol = eth_type_trans(skb, mp->dev); 616 617 if (skb->dev->features & NETIF_F_LRO && 618 skb->ip_summed == CHECKSUM_UNNECESSARY) { 619 lro_receive_skb(&rxq->lro_mgr, skb, (void *)cmd_sts); 620 lro_flush_needed = 1; 621 } else 622 netif_receive_skb(skb); 623 624 continue; 625 626 err: 627 stats->rx_dropped++; 628 629 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 630 (RX_FIRST_DESC | RX_LAST_DESC)) { 631 if (net_ratelimit()) 632 netdev_err(mp->dev, 633 "received packet spanning multiple descriptors\n"); 634 } 635 636 if (cmd_sts & ERROR_SUMMARY) 637 stats->rx_errors++; 638 639 dev_kfree_skb(skb); 640 } 641 642 if (lro_flush_needed) 643 lro_flush_all(&rxq->lro_mgr); 644 645 if (rx < budget) 646 mp->work_rx &= ~(1 << rxq->index); 647 648 return rx; 649 } 650 651 static int rxq_refill(struct rx_queue *rxq, int budget) 652 { 653 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 654 int refilled; 655 656 refilled = 0; 657 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 658 struct sk_buff *skb; 659 int rx; 660 struct rx_desc *rx_desc; 661 int size; 662 663 skb = __skb_dequeue(&mp->rx_recycle); 664 if (skb == NULL) 665 skb = dev_alloc_skb(mp->skb_size); 666 667 if (skb == NULL) { 668 mp->oom = 1; 669 goto oom; 670 } 671 672 if (SKB_DMA_REALIGN) 673 skb_reserve(skb, SKB_DMA_REALIGN); 674 675 refilled++; 676 rxq->rx_desc_count++; 677 678 rx = rxq->rx_used_desc++; 679 if (rxq->rx_used_desc == rxq->rx_ring_size) 680 rxq->rx_used_desc = 0; 681 682 rx_desc = rxq->rx_desc_area + rx; 683 684 size = skb->end - skb->data; 685 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 686 skb->data, size, 687 DMA_FROM_DEVICE); 688 rx_desc->buf_size = size; 689 rxq->rx_skb[rx] = skb; 690 wmb(); 691 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT; 692 wmb(); 693 694 /* 695 * The hardware automatically prepends 2 bytes of 696 * dummy data to each received packet, so that the 697 * IP header ends up 16-byte aligned. 698 */ 699 skb_reserve(skb, 2); 700 } 701 702 if (refilled < budget) 703 mp->work_rx_refill &= ~(1 << rxq->index); 704 705 oom: 706 return refilled; 707 } 708 709 710 /* tx ***********************************************************************/ 711 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) 712 { 713 int frag; 714 715 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 716 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 717 718 if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) 719 return 1; 720 } 721 722 return 0; 723 } 724 725 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 726 { 727 struct mv643xx_eth_private *mp = txq_to_mp(txq); 728 int nr_frags = skb_shinfo(skb)->nr_frags; 729 int frag; 730 731 for (frag = 0; frag < nr_frags; frag++) { 732 skb_frag_t *this_frag; 733 int tx_index; 734 struct tx_desc *desc; 735 736 this_frag = &skb_shinfo(skb)->frags[frag]; 737 tx_index = txq->tx_curr_desc++; 738 if (txq->tx_curr_desc == txq->tx_ring_size) 739 txq->tx_curr_desc = 0; 740 desc = &txq->tx_desc_area[tx_index]; 741 742 /* 743 * The last fragment will generate an interrupt 744 * which will free the skb on TX completion. 745 */ 746 if (frag == nr_frags - 1) { 747 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 748 ZERO_PADDING | TX_LAST_DESC | 749 TX_ENABLE_INTERRUPT; 750 } else { 751 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 752 } 753 754 desc->l4i_chk = 0; 755 desc->byte_cnt = skb_frag_size(this_frag); 756 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, 757 this_frag, 0, 758 skb_frag_size(this_frag), 759 DMA_TO_DEVICE); 760 } 761 } 762 763 static inline __be16 sum16_as_be(__sum16 sum) 764 { 765 return (__force __be16)sum; 766 } 767 768 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 769 { 770 struct mv643xx_eth_private *mp = txq_to_mp(txq); 771 int nr_frags = skb_shinfo(skb)->nr_frags; 772 int tx_index; 773 struct tx_desc *desc; 774 u32 cmd_sts; 775 u16 l4i_chk; 776 int length; 777 778 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 779 l4i_chk = 0; 780 781 if (skb->ip_summed == CHECKSUM_PARTIAL) { 782 int hdr_len; 783 int tag_bytes; 784 785 BUG_ON(skb->protocol != htons(ETH_P_IP) && 786 skb->protocol != htons(ETH_P_8021Q)); 787 788 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 789 tag_bytes = hdr_len - ETH_HLEN; 790 if (skb->len - hdr_len > mp->shared->tx_csum_limit || 791 unlikely(tag_bytes & ~12)) { 792 if (skb_checksum_help(skb) == 0) 793 goto no_csum; 794 kfree_skb(skb); 795 return 1; 796 } 797 798 if (tag_bytes & 4) 799 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 800 if (tag_bytes & 8) 801 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 802 803 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 804 GEN_IP_V4_CHECKSUM | 805 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 806 807 switch (ip_hdr(skb)->protocol) { 808 case IPPROTO_UDP: 809 cmd_sts |= UDP_FRAME; 810 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 811 break; 812 case IPPROTO_TCP: 813 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 814 break; 815 default: 816 BUG(); 817 } 818 } else { 819 no_csum: 820 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 821 cmd_sts |= 5 << TX_IHL_SHIFT; 822 } 823 824 tx_index = txq->tx_curr_desc++; 825 if (txq->tx_curr_desc == txq->tx_ring_size) 826 txq->tx_curr_desc = 0; 827 desc = &txq->tx_desc_area[tx_index]; 828 829 if (nr_frags) { 830 txq_submit_frag_skb(txq, skb); 831 length = skb_headlen(skb); 832 } else { 833 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT; 834 length = skb->len; 835 } 836 837 desc->l4i_chk = l4i_chk; 838 desc->byte_cnt = length; 839 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, 840 length, DMA_TO_DEVICE); 841 842 __skb_queue_tail(&txq->tx_skb, skb); 843 844 skb_tx_timestamp(skb); 845 846 /* ensure all other descriptors are written before first cmd_sts */ 847 wmb(); 848 desc->cmd_sts = cmd_sts; 849 850 /* clear TX_END status */ 851 mp->work_tx_end &= ~(1 << txq->index); 852 853 /* ensure all descriptors are written before poking hardware */ 854 wmb(); 855 txq_enable(txq); 856 857 txq->tx_desc_count += nr_frags + 1; 858 859 return 0; 860 } 861 862 static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 863 { 864 struct mv643xx_eth_private *mp = netdev_priv(dev); 865 int length, queue; 866 struct tx_queue *txq; 867 struct netdev_queue *nq; 868 869 queue = skb_get_queue_mapping(skb); 870 txq = mp->txq + queue; 871 nq = netdev_get_tx_queue(dev, queue); 872 873 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 874 txq->tx_dropped++; 875 netdev_printk(KERN_DEBUG, dev, 876 "failed to linearize skb with tiny unaligned fragment\n"); 877 return NETDEV_TX_BUSY; 878 } 879 880 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 881 if (net_ratelimit()) 882 netdev_err(dev, "tx queue full?!\n"); 883 kfree_skb(skb); 884 return NETDEV_TX_OK; 885 } 886 887 length = skb->len; 888 889 if (!txq_submit_skb(txq, skb)) { 890 int entries_left; 891 892 txq->tx_bytes += length; 893 txq->tx_packets++; 894 895 entries_left = txq->tx_ring_size - txq->tx_desc_count; 896 if (entries_left < MAX_SKB_FRAGS + 1) 897 netif_tx_stop_queue(nq); 898 } 899 900 return NETDEV_TX_OK; 901 } 902 903 904 /* tx napi ******************************************************************/ 905 static void txq_kick(struct tx_queue *txq) 906 { 907 struct mv643xx_eth_private *mp = txq_to_mp(txq); 908 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 909 u32 hw_desc_ptr; 910 u32 expected_ptr; 911 912 __netif_tx_lock(nq, smp_processor_id()); 913 914 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) 915 goto out; 916 917 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); 918 expected_ptr = (u32)txq->tx_desc_dma + 919 txq->tx_curr_desc * sizeof(struct tx_desc); 920 921 if (hw_desc_ptr != expected_ptr) 922 txq_enable(txq); 923 924 out: 925 __netif_tx_unlock(nq); 926 927 mp->work_tx_end &= ~(1 << txq->index); 928 } 929 930 static int txq_reclaim(struct tx_queue *txq, int budget, int force) 931 { 932 struct mv643xx_eth_private *mp = txq_to_mp(txq); 933 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 934 int reclaimed; 935 936 __netif_tx_lock(nq, smp_processor_id()); 937 938 reclaimed = 0; 939 while (reclaimed < budget && txq->tx_desc_count > 0) { 940 int tx_index; 941 struct tx_desc *desc; 942 u32 cmd_sts; 943 struct sk_buff *skb; 944 945 tx_index = txq->tx_used_desc; 946 desc = &txq->tx_desc_area[tx_index]; 947 cmd_sts = desc->cmd_sts; 948 949 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 950 if (!force) 951 break; 952 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; 953 } 954 955 txq->tx_used_desc = tx_index + 1; 956 if (txq->tx_used_desc == txq->tx_ring_size) 957 txq->tx_used_desc = 0; 958 959 reclaimed++; 960 txq->tx_desc_count--; 961 962 skb = NULL; 963 if (cmd_sts & TX_LAST_DESC) 964 skb = __skb_dequeue(&txq->tx_skb); 965 966 if (cmd_sts & ERROR_SUMMARY) { 967 netdev_info(mp->dev, "tx error\n"); 968 mp->dev->stats.tx_errors++; 969 } 970 971 if (cmd_sts & TX_FIRST_DESC) { 972 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 973 desc->byte_cnt, DMA_TO_DEVICE); 974 } else { 975 dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr, 976 desc->byte_cnt, DMA_TO_DEVICE); 977 } 978 979 if (skb != NULL) { 980 if (skb_queue_len(&mp->rx_recycle) < 981 mp->rx_ring_size && 982 skb_recycle_check(skb, mp->skb_size)) 983 __skb_queue_head(&mp->rx_recycle, skb); 984 else 985 dev_kfree_skb(skb); 986 } 987 } 988 989 __netif_tx_unlock(nq); 990 991 if (reclaimed < budget) 992 mp->work_tx &= ~(1 << txq->index); 993 994 return reclaimed; 995 } 996 997 998 /* tx rate control **********************************************************/ 999 /* 1000 * Set total maximum TX rate (shared by all TX queues for this port) 1001 * to 'rate' bits per second, with a maximum burst of 'burst' bytes. 1002 */ 1003 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) 1004 { 1005 int token_rate; 1006 int mtu; 1007 int bucket_size; 1008 1009 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1010 if (token_rate > 1023) 1011 token_rate = 1023; 1012 1013 mtu = (mp->dev->mtu + 255) >> 8; 1014 if (mtu > 63) 1015 mtu = 63; 1016 1017 bucket_size = (burst + 255) >> 8; 1018 if (bucket_size > 65535) 1019 bucket_size = 65535; 1020 1021 switch (mp->shared->tx_bw_control) { 1022 case TX_BW_CONTROL_OLD_LAYOUT: 1023 wrlp(mp, TX_BW_RATE, token_rate); 1024 wrlp(mp, TX_BW_MTU, mtu); 1025 wrlp(mp, TX_BW_BURST, bucket_size); 1026 break; 1027 case TX_BW_CONTROL_NEW_LAYOUT: 1028 wrlp(mp, TX_BW_RATE_MOVED, token_rate); 1029 wrlp(mp, TX_BW_MTU_MOVED, mtu); 1030 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); 1031 break; 1032 } 1033 } 1034 1035 static void txq_set_rate(struct tx_queue *txq, int rate, int burst) 1036 { 1037 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1038 int token_rate; 1039 int bucket_size; 1040 1041 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1042 if (token_rate > 1023) 1043 token_rate = 1023; 1044 1045 bucket_size = (burst + 255) >> 8; 1046 if (bucket_size > 65535) 1047 bucket_size = 65535; 1048 1049 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); 1050 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); 1051 } 1052 1053 static void txq_set_fixed_prio_mode(struct tx_queue *txq) 1054 { 1055 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1056 int off; 1057 u32 val; 1058 1059 /* 1060 * Turn on fixed priority mode. 1061 */ 1062 off = 0; 1063 switch (mp->shared->tx_bw_control) { 1064 case TX_BW_CONTROL_OLD_LAYOUT: 1065 off = TXQ_FIX_PRIO_CONF; 1066 break; 1067 case TX_BW_CONTROL_NEW_LAYOUT: 1068 off = TXQ_FIX_PRIO_CONF_MOVED; 1069 break; 1070 } 1071 1072 if (off) { 1073 val = rdlp(mp, off); 1074 val |= 1 << txq->index; 1075 wrlp(mp, off, val); 1076 } 1077 } 1078 1079 1080 /* mii management interface *************************************************/ 1081 static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1082 { 1083 struct mv643xx_eth_shared_private *msp = dev_id; 1084 1085 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) { 1086 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE); 1087 wake_up(&msp->smi_busy_wait); 1088 return IRQ_HANDLED; 1089 } 1090 1091 return IRQ_NONE; 1092 } 1093 1094 static int smi_is_done(struct mv643xx_eth_shared_private *msp) 1095 { 1096 return !(readl(msp->base + SMI_REG) & SMI_BUSY); 1097 } 1098 1099 static int smi_wait_ready(struct mv643xx_eth_shared_private *msp) 1100 { 1101 if (msp->err_interrupt == NO_IRQ) { 1102 int i; 1103 1104 for (i = 0; !smi_is_done(msp); i++) { 1105 if (i == 10) 1106 return -ETIMEDOUT; 1107 msleep(10); 1108 } 1109 1110 return 0; 1111 } 1112 1113 if (!smi_is_done(msp)) { 1114 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp), 1115 msecs_to_jiffies(100)); 1116 if (!smi_is_done(msp)) 1117 return -ETIMEDOUT; 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int smi_bus_read(struct mii_bus *bus, int addr, int reg) 1124 { 1125 struct mv643xx_eth_shared_private *msp = bus->priv; 1126 void __iomem *smi_reg = msp->base + SMI_REG; 1127 int ret; 1128 1129 if (smi_wait_ready(msp)) { 1130 pr_warn("SMI bus busy timeout\n"); 1131 return -ETIMEDOUT; 1132 } 1133 1134 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1135 1136 if (smi_wait_ready(msp)) { 1137 pr_warn("SMI bus busy timeout\n"); 1138 return -ETIMEDOUT; 1139 } 1140 1141 ret = readl(smi_reg); 1142 if (!(ret & SMI_READ_VALID)) { 1143 pr_warn("SMI bus read not valid\n"); 1144 return -ENODEV; 1145 } 1146 1147 return ret & 0xffff; 1148 } 1149 1150 static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) 1151 { 1152 struct mv643xx_eth_shared_private *msp = bus->priv; 1153 void __iomem *smi_reg = msp->base + SMI_REG; 1154 1155 if (smi_wait_ready(msp)) { 1156 pr_warn("SMI bus busy timeout\n"); 1157 return -ETIMEDOUT; 1158 } 1159 1160 writel(SMI_OPCODE_WRITE | (reg << 21) | 1161 (addr << 16) | (val & 0xffff), smi_reg); 1162 1163 if (smi_wait_ready(msp)) { 1164 pr_warn("SMI bus busy timeout\n"); 1165 return -ETIMEDOUT; 1166 } 1167 1168 return 0; 1169 } 1170 1171 1172 /* statistics ***************************************************************/ 1173 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1174 { 1175 struct mv643xx_eth_private *mp = netdev_priv(dev); 1176 struct net_device_stats *stats = &dev->stats; 1177 unsigned long tx_packets = 0; 1178 unsigned long tx_bytes = 0; 1179 unsigned long tx_dropped = 0; 1180 int i; 1181 1182 for (i = 0; i < mp->txq_count; i++) { 1183 struct tx_queue *txq = mp->txq + i; 1184 1185 tx_packets += txq->tx_packets; 1186 tx_bytes += txq->tx_bytes; 1187 tx_dropped += txq->tx_dropped; 1188 } 1189 1190 stats->tx_packets = tx_packets; 1191 stats->tx_bytes = tx_bytes; 1192 stats->tx_dropped = tx_dropped; 1193 1194 return stats; 1195 } 1196 1197 static void mv643xx_eth_grab_lro_stats(struct mv643xx_eth_private *mp) 1198 { 1199 u32 lro_aggregated = 0; 1200 u32 lro_flushed = 0; 1201 u32 lro_no_desc = 0; 1202 int i; 1203 1204 for (i = 0; i < mp->rxq_count; i++) { 1205 struct rx_queue *rxq = mp->rxq + i; 1206 1207 lro_aggregated += rxq->lro_mgr.stats.aggregated; 1208 lro_flushed += rxq->lro_mgr.stats.flushed; 1209 lro_no_desc += rxq->lro_mgr.stats.no_desc; 1210 } 1211 1212 mp->lro_counters.lro_aggregated = lro_aggregated; 1213 mp->lro_counters.lro_flushed = lro_flushed; 1214 mp->lro_counters.lro_no_desc = lro_no_desc; 1215 } 1216 1217 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1218 { 1219 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1220 } 1221 1222 static void mib_counters_clear(struct mv643xx_eth_private *mp) 1223 { 1224 int i; 1225 1226 for (i = 0; i < 0x80; i += 4) 1227 mib_read(mp, i); 1228 } 1229 1230 static void mib_counters_update(struct mv643xx_eth_private *mp) 1231 { 1232 struct mib_counters *p = &mp->mib_counters; 1233 1234 spin_lock_bh(&mp->mib_counters_lock); 1235 p->good_octets_received += mib_read(mp, 0x00); 1236 p->bad_octets_received += mib_read(mp, 0x08); 1237 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1238 p->good_frames_received += mib_read(mp, 0x10); 1239 p->bad_frames_received += mib_read(mp, 0x14); 1240 p->broadcast_frames_received += mib_read(mp, 0x18); 1241 p->multicast_frames_received += mib_read(mp, 0x1c); 1242 p->frames_64_octets += mib_read(mp, 0x20); 1243 p->frames_65_to_127_octets += mib_read(mp, 0x24); 1244 p->frames_128_to_255_octets += mib_read(mp, 0x28); 1245 p->frames_256_to_511_octets += mib_read(mp, 0x2c); 1246 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1247 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1248 p->good_octets_sent += mib_read(mp, 0x38); 1249 p->good_frames_sent += mib_read(mp, 0x40); 1250 p->excessive_collision += mib_read(mp, 0x44); 1251 p->multicast_frames_sent += mib_read(mp, 0x48); 1252 p->broadcast_frames_sent += mib_read(mp, 0x4c); 1253 p->unrec_mac_control_received += mib_read(mp, 0x50); 1254 p->fc_sent += mib_read(mp, 0x54); 1255 p->good_fc_received += mib_read(mp, 0x58); 1256 p->bad_fc_received += mib_read(mp, 0x5c); 1257 p->undersize_received += mib_read(mp, 0x60); 1258 p->fragments_received += mib_read(mp, 0x64); 1259 p->oversize_received += mib_read(mp, 0x68); 1260 p->jabber_received += mib_read(mp, 0x6c); 1261 p->mac_receive_error += mib_read(mp, 0x70); 1262 p->bad_crc_event += mib_read(mp, 0x74); 1263 p->collision += mib_read(mp, 0x78); 1264 p->late_collision += mib_read(mp, 0x7c); 1265 spin_unlock_bh(&mp->mib_counters_lock); 1266 1267 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); 1268 } 1269 1270 static void mib_counters_timer_wrapper(unsigned long _mp) 1271 { 1272 struct mv643xx_eth_private *mp = (void *)_mp; 1273 1274 mib_counters_update(mp); 1275 } 1276 1277 1278 /* interrupt coalescing *****************************************************/ 1279 /* 1280 * Hardware coalescing parameters are set in units of 64 t_clk 1281 * cycles. I.e.: 1282 * 1283 * coal_delay_in_usec = 64000000 * register_value / t_clk_rate 1284 * 1285 * register_value = coal_delay_in_usec * t_clk_rate / 64000000 1286 * 1287 * In the ->set*() methods, we round the computed register value 1288 * to the nearest integer. 1289 */ 1290 static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) 1291 { 1292 u32 val = rdlp(mp, SDMA_CONFIG); 1293 u64 temp; 1294 1295 if (mp->shared->extended_rx_coal_limit) 1296 temp = ((val & 0x02000000) >> 10) | ((val & 0x003fff80) >> 7); 1297 else 1298 temp = (val & 0x003fff00) >> 8; 1299 1300 temp *= 64000000; 1301 do_div(temp, mp->shared->t_clk); 1302 1303 return (unsigned int)temp; 1304 } 1305 1306 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1307 { 1308 u64 temp; 1309 u32 val; 1310 1311 temp = (u64)usec * mp->shared->t_clk; 1312 temp += 31999999; 1313 do_div(temp, 64000000); 1314 1315 val = rdlp(mp, SDMA_CONFIG); 1316 if (mp->shared->extended_rx_coal_limit) { 1317 if (temp > 0xffff) 1318 temp = 0xffff; 1319 val &= ~0x023fff80; 1320 val |= (temp & 0x8000) << 10; 1321 val |= (temp & 0x7fff) << 7; 1322 } else { 1323 if (temp > 0x3fff) 1324 temp = 0x3fff; 1325 val &= ~0x003fff00; 1326 val |= (temp & 0x3fff) << 8; 1327 } 1328 wrlp(mp, SDMA_CONFIG, val); 1329 } 1330 1331 static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) 1332 { 1333 u64 temp; 1334 1335 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1336 temp *= 64000000; 1337 do_div(temp, mp->shared->t_clk); 1338 1339 return (unsigned int)temp; 1340 } 1341 1342 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) 1343 { 1344 u64 temp; 1345 1346 temp = (u64)usec * mp->shared->t_clk; 1347 temp += 31999999; 1348 do_div(temp, 64000000); 1349 1350 if (temp > 0x3fff) 1351 temp = 0x3fff; 1352 1353 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); 1354 } 1355 1356 1357 /* ethtool ******************************************************************/ 1358 struct mv643xx_eth_stats { 1359 char stat_string[ETH_GSTRING_LEN]; 1360 int sizeof_stat; 1361 int netdev_off; 1362 int mp_off; 1363 }; 1364 1365 #define SSTAT(m) \ 1366 { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1367 offsetof(struct net_device, stats.m), -1 } 1368 1369 #define MIBSTAT(m) \ 1370 { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1371 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1372 1373 #define LROSTAT(m) \ 1374 { #m, FIELD_SIZEOF(struct lro_counters, m), \ 1375 -1, offsetof(struct mv643xx_eth_private, lro_counters.m) } 1376 1377 static const struct mv643xx_eth_stats mv643xx_eth_stats[] = { 1378 SSTAT(rx_packets), 1379 SSTAT(tx_packets), 1380 SSTAT(rx_bytes), 1381 SSTAT(tx_bytes), 1382 SSTAT(rx_errors), 1383 SSTAT(tx_errors), 1384 SSTAT(rx_dropped), 1385 SSTAT(tx_dropped), 1386 MIBSTAT(good_octets_received), 1387 MIBSTAT(bad_octets_received), 1388 MIBSTAT(internal_mac_transmit_err), 1389 MIBSTAT(good_frames_received), 1390 MIBSTAT(bad_frames_received), 1391 MIBSTAT(broadcast_frames_received), 1392 MIBSTAT(multicast_frames_received), 1393 MIBSTAT(frames_64_octets), 1394 MIBSTAT(frames_65_to_127_octets), 1395 MIBSTAT(frames_128_to_255_octets), 1396 MIBSTAT(frames_256_to_511_octets), 1397 MIBSTAT(frames_512_to_1023_octets), 1398 MIBSTAT(frames_1024_to_max_octets), 1399 MIBSTAT(good_octets_sent), 1400 MIBSTAT(good_frames_sent), 1401 MIBSTAT(excessive_collision), 1402 MIBSTAT(multicast_frames_sent), 1403 MIBSTAT(broadcast_frames_sent), 1404 MIBSTAT(unrec_mac_control_received), 1405 MIBSTAT(fc_sent), 1406 MIBSTAT(good_fc_received), 1407 MIBSTAT(bad_fc_received), 1408 MIBSTAT(undersize_received), 1409 MIBSTAT(fragments_received), 1410 MIBSTAT(oversize_received), 1411 MIBSTAT(jabber_received), 1412 MIBSTAT(mac_receive_error), 1413 MIBSTAT(bad_crc_event), 1414 MIBSTAT(collision), 1415 MIBSTAT(late_collision), 1416 LROSTAT(lro_aggregated), 1417 LROSTAT(lro_flushed), 1418 LROSTAT(lro_no_desc), 1419 }; 1420 1421 static int 1422 mv643xx_eth_get_settings_phy(struct mv643xx_eth_private *mp, 1423 struct ethtool_cmd *cmd) 1424 { 1425 int err; 1426 1427 err = phy_read_status(mp->phy); 1428 if (err == 0) 1429 err = phy_ethtool_gset(mp->phy, cmd); 1430 1431 /* 1432 * The MAC does not support 1000baseT_Half. 1433 */ 1434 cmd->supported &= ~SUPPORTED_1000baseT_Half; 1435 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1436 1437 return err; 1438 } 1439 1440 static int 1441 mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp, 1442 struct ethtool_cmd *cmd) 1443 { 1444 u32 port_status; 1445 1446 port_status = rdlp(mp, PORT_STATUS); 1447 1448 cmd->supported = SUPPORTED_MII; 1449 cmd->advertising = ADVERTISED_MII; 1450 switch (port_status & PORT_SPEED_MASK) { 1451 case PORT_SPEED_10: 1452 ethtool_cmd_speed_set(cmd, SPEED_10); 1453 break; 1454 case PORT_SPEED_100: 1455 ethtool_cmd_speed_set(cmd, SPEED_100); 1456 break; 1457 case PORT_SPEED_1000: 1458 ethtool_cmd_speed_set(cmd, SPEED_1000); 1459 break; 1460 default: 1461 cmd->speed = -1; 1462 break; 1463 } 1464 cmd->duplex = (port_status & FULL_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF; 1465 cmd->port = PORT_MII; 1466 cmd->phy_address = 0; 1467 cmd->transceiver = XCVR_INTERNAL; 1468 cmd->autoneg = AUTONEG_DISABLE; 1469 cmd->maxtxpkt = 1; 1470 cmd->maxrxpkt = 1; 1471 1472 return 0; 1473 } 1474 1475 static int 1476 mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1477 { 1478 struct mv643xx_eth_private *mp = netdev_priv(dev); 1479 1480 if (mp->phy != NULL) 1481 return mv643xx_eth_get_settings_phy(mp, cmd); 1482 else 1483 return mv643xx_eth_get_settings_phyless(mp, cmd); 1484 } 1485 1486 static int 1487 mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1488 { 1489 struct mv643xx_eth_private *mp = netdev_priv(dev); 1490 1491 if (mp->phy == NULL) 1492 return -EINVAL; 1493 1494 /* 1495 * The MAC does not support 1000baseT_Half. 1496 */ 1497 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1498 1499 return phy_ethtool_sset(mp->phy, cmd); 1500 } 1501 1502 static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1503 struct ethtool_drvinfo *drvinfo) 1504 { 1505 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 1506 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 1507 strncpy(drvinfo->fw_version, "N/A", 32); 1508 strncpy(drvinfo->bus_info, "platform", 32); 1509 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 1510 } 1511 1512 static int mv643xx_eth_nway_reset(struct net_device *dev) 1513 { 1514 struct mv643xx_eth_private *mp = netdev_priv(dev); 1515 1516 if (mp->phy == NULL) 1517 return -EINVAL; 1518 1519 return genphy_restart_aneg(mp->phy); 1520 } 1521 1522 static int 1523 mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1524 { 1525 struct mv643xx_eth_private *mp = netdev_priv(dev); 1526 1527 ec->rx_coalesce_usecs = get_rx_coal(mp); 1528 ec->tx_coalesce_usecs = get_tx_coal(mp); 1529 1530 return 0; 1531 } 1532 1533 static int 1534 mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1535 { 1536 struct mv643xx_eth_private *mp = netdev_priv(dev); 1537 1538 set_rx_coal(mp, ec->rx_coalesce_usecs); 1539 set_tx_coal(mp, ec->tx_coalesce_usecs); 1540 1541 return 0; 1542 } 1543 1544 static void 1545 mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1546 { 1547 struct mv643xx_eth_private *mp = netdev_priv(dev); 1548 1549 er->rx_max_pending = 4096; 1550 er->tx_max_pending = 4096; 1551 1552 er->rx_pending = mp->rx_ring_size; 1553 er->tx_pending = mp->tx_ring_size; 1554 } 1555 1556 static int 1557 mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) 1558 { 1559 struct mv643xx_eth_private *mp = netdev_priv(dev); 1560 1561 if (er->rx_mini_pending || er->rx_jumbo_pending) 1562 return -EINVAL; 1563 1564 mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096; 1565 mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096; 1566 1567 if (netif_running(dev)) { 1568 mv643xx_eth_stop(dev); 1569 if (mv643xx_eth_open(dev)) { 1570 netdev_err(dev, 1571 "fatal error on re-opening device after ring param change\n"); 1572 return -ENOMEM; 1573 } 1574 } 1575 1576 return 0; 1577 } 1578 1579 1580 static int 1581 mv643xx_eth_set_features(struct net_device *dev, u32 features) 1582 { 1583 struct mv643xx_eth_private *mp = netdev_priv(dev); 1584 u32 rx_csum = features & NETIF_F_RXCSUM; 1585 1586 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); 1587 1588 return 0; 1589 } 1590 1591 static void mv643xx_eth_get_strings(struct net_device *dev, 1592 uint32_t stringset, uint8_t *data) 1593 { 1594 int i; 1595 1596 if (stringset == ETH_SS_STATS) { 1597 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1598 memcpy(data + i * ETH_GSTRING_LEN, 1599 mv643xx_eth_stats[i].stat_string, 1600 ETH_GSTRING_LEN); 1601 } 1602 } 1603 } 1604 1605 static void mv643xx_eth_get_ethtool_stats(struct net_device *dev, 1606 struct ethtool_stats *stats, 1607 uint64_t *data) 1608 { 1609 struct mv643xx_eth_private *mp = netdev_priv(dev); 1610 int i; 1611 1612 mv643xx_eth_get_stats(dev); 1613 mib_counters_update(mp); 1614 mv643xx_eth_grab_lro_stats(mp); 1615 1616 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1617 const struct mv643xx_eth_stats *stat; 1618 void *p; 1619 1620 stat = mv643xx_eth_stats + i; 1621 1622 if (stat->netdev_off >= 0) 1623 p = ((void *)mp->dev) + stat->netdev_off; 1624 else 1625 p = ((void *)mp) + stat->mp_off; 1626 1627 data[i] = (stat->sizeof_stat == 8) ? 1628 *(uint64_t *)p : *(uint32_t *)p; 1629 } 1630 } 1631 1632 static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset) 1633 { 1634 if (sset == ETH_SS_STATS) 1635 return ARRAY_SIZE(mv643xx_eth_stats); 1636 1637 return -EOPNOTSUPP; 1638 } 1639 1640 static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1641 .get_settings = mv643xx_eth_get_settings, 1642 .set_settings = mv643xx_eth_set_settings, 1643 .get_drvinfo = mv643xx_eth_get_drvinfo, 1644 .nway_reset = mv643xx_eth_nway_reset, 1645 .get_link = ethtool_op_get_link, 1646 .get_coalesce = mv643xx_eth_get_coalesce, 1647 .set_coalesce = mv643xx_eth_set_coalesce, 1648 .get_ringparam = mv643xx_eth_get_ringparam, 1649 .set_ringparam = mv643xx_eth_set_ringparam, 1650 .get_strings = mv643xx_eth_get_strings, 1651 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1652 .get_sset_count = mv643xx_eth_get_sset_count, 1653 }; 1654 1655 1656 /* address handling *********************************************************/ 1657 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1658 { 1659 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); 1660 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); 1661 1662 addr[0] = (mac_h >> 24) & 0xff; 1663 addr[1] = (mac_h >> 16) & 0xff; 1664 addr[2] = (mac_h >> 8) & 0xff; 1665 addr[3] = mac_h & 0xff; 1666 addr[4] = (mac_l >> 8) & 0xff; 1667 addr[5] = mac_l & 0xff; 1668 } 1669 1670 static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1671 { 1672 wrlp(mp, MAC_ADDR_HIGH, 1673 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); 1674 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); 1675 } 1676 1677 static u32 uc_addr_filter_mask(struct net_device *dev) 1678 { 1679 struct netdev_hw_addr *ha; 1680 u32 nibbles; 1681 1682 if (dev->flags & IFF_PROMISC) 1683 return 0; 1684 1685 nibbles = 1 << (dev->dev_addr[5] & 0x0f); 1686 netdev_for_each_uc_addr(ha, dev) { 1687 if (memcmp(dev->dev_addr, ha->addr, 5)) 1688 return 0; 1689 if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0) 1690 return 0; 1691 1692 nibbles |= 1 << (ha->addr[5] & 0x0f); 1693 } 1694 1695 return nibbles; 1696 } 1697 1698 static void mv643xx_eth_program_unicast_filter(struct net_device *dev) 1699 { 1700 struct mv643xx_eth_private *mp = netdev_priv(dev); 1701 u32 port_config; 1702 u32 nibbles; 1703 int i; 1704 1705 uc_addr_set(mp, dev->dev_addr); 1706 1707 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; 1708 1709 nibbles = uc_addr_filter_mask(dev); 1710 if (!nibbles) { 1711 port_config |= UNICAST_PROMISCUOUS_MODE; 1712 nibbles = 0xffff; 1713 } 1714 1715 for (i = 0; i < 16; i += 4) { 1716 int off = UNICAST_TABLE(mp->port_num) + i; 1717 u32 v; 1718 1719 v = 0; 1720 if (nibbles & 1) 1721 v |= 0x00000001; 1722 if (nibbles & 2) 1723 v |= 0x00000100; 1724 if (nibbles & 4) 1725 v |= 0x00010000; 1726 if (nibbles & 8) 1727 v |= 0x01000000; 1728 nibbles >>= 4; 1729 1730 wrl(mp, off, v); 1731 } 1732 1733 wrlp(mp, PORT_CONFIG, port_config); 1734 } 1735 1736 static int addr_crc(unsigned char *addr) 1737 { 1738 int crc = 0; 1739 int i; 1740 1741 for (i = 0; i < 6; i++) { 1742 int j; 1743 1744 crc = (crc ^ addr[i]) << 8; 1745 for (j = 7; j >= 0; j--) { 1746 if (crc & (0x100 << j)) 1747 crc ^= 0x107 << j; 1748 } 1749 } 1750 1751 return crc; 1752 } 1753 1754 static void mv643xx_eth_program_multicast_filter(struct net_device *dev) 1755 { 1756 struct mv643xx_eth_private *mp = netdev_priv(dev); 1757 u32 *mc_spec; 1758 u32 *mc_other; 1759 struct netdev_hw_addr *ha; 1760 int i; 1761 1762 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1763 int port_num; 1764 u32 accept; 1765 1766 oom: 1767 port_num = mp->port_num; 1768 accept = 0x01010101; 1769 for (i = 0; i < 0x100; i += 4) { 1770 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1771 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1772 } 1773 return; 1774 } 1775 1776 mc_spec = kmalloc(0x200, GFP_ATOMIC); 1777 if (mc_spec == NULL) 1778 goto oom; 1779 mc_other = mc_spec + (0x100 >> 2); 1780 1781 memset(mc_spec, 0, 0x100); 1782 memset(mc_other, 0, 0x100); 1783 1784 netdev_for_each_mc_addr(ha, dev) { 1785 u8 *a = ha->addr; 1786 u32 *table; 1787 int entry; 1788 1789 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1790 table = mc_spec; 1791 entry = a[5]; 1792 } else { 1793 table = mc_other; 1794 entry = addr_crc(a); 1795 } 1796 1797 table[entry >> 2] |= 1 << (8 * (entry & 3)); 1798 } 1799 1800 for (i = 0; i < 0x100; i += 4) { 1801 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); 1802 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); 1803 } 1804 1805 kfree(mc_spec); 1806 } 1807 1808 static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1809 { 1810 mv643xx_eth_program_unicast_filter(dev); 1811 mv643xx_eth_program_multicast_filter(dev); 1812 } 1813 1814 static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1815 { 1816 struct sockaddr *sa = addr; 1817 1818 if (!is_valid_ether_addr(sa->sa_data)) 1819 return -EINVAL; 1820 1821 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); 1822 1823 netif_addr_lock_bh(dev); 1824 mv643xx_eth_program_unicast_filter(dev); 1825 netif_addr_unlock_bh(dev); 1826 1827 return 0; 1828 } 1829 1830 1831 /* rx/tx queue initialisation ***********************************************/ 1832 static int rxq_init(struct mv643xx_eth_private *mp, int index) 1833 { 1834 struct rx_queue *rxq = mp->rxq + index; 1835 struct rx_desc *rx_desc; 1836 int size; 1837 int i; 1838 1839 rxq->index = index; 1840 1841 rxq->rx_ring_size = mp->rx_ring_size; 1842 1843 rxq->rx_desc_count = 0; 1844 rxq->rx_curr_desc = 0; 1845 rxq->rx_used_desc = 0; 1846 1847 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1848 1849 if (index == 0 && size <= mp->rx_desc_sram_size) { 1850 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1851 mp->rx_desc_sram_size); 1852 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1853 } else { 1854 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1855 size, &rxq->rx_desc_dma, 1856 GFP_KERNEL); 1857 } 1858 1859 if (rxq->rx_desc_area == NULL) { 1860 netdev_err(mp->dev, 1861 "can't allocate rx ring (%d bytes)\n", size); 1862 goto out; 1863 } 1864 memset(rxq->rx_desc_area, 0, size); 1865 1866 rxq->rx_desc_area_size = size; 1867 rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), 1868 GFP_KERNEL); 1869 if (rxq->rx_skb == NULL) { 1870 netdev_err(mp->dev, "can't allocate rx skb ring\n"); 1871 goto out_free; 1872 } 1873 1874 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1875 for (i = 0; i < rxq->rx_ring_size; i++) { 1876 int nexti; 1877 1878 nexti = i + 1; 1879 if (nexti == rxq->rx_ring_size) 1880 nexti = 0; 1881 1882 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1883 nexti * sizeof(struct rx_desc); 1884 } 1885 1886 rxq->lro_mgr.dev = mp->dev; 1887 memset(&rxq->lro_mgr.stats, 0, sizeof(rxq->lro_mgr.stats)); 1888 rxq->lro_mgr.features = LRO_F_NAPI; 1889 rxq->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1890 rxq->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1891 rxq->lro_mgr.max_desc = ARRAY_SIZE(rxq->lro_arr); 1892 rxq->lro_mgr.max_aggr = 32; 1893 rxq->lro_mgr.frag_align_pad = 0; 1894 rxq->lro_mgr.lro_arr = rxq->lro_arr; 1895 rxq->lro_mgr.get_skb_header = mv643xx_get_skb_header; 1896 1897 memset(&rxq->lro_arr, 0, sizeof(rxq->lro_arr)); 1898 1899 return 0; 1900 1901 1902 out_free: 1903 if (index == 0 && size <= mp->rx_desc_sram_size) 1904 iounmap(rxq->rx_desc_area); 1905 else 1906 dma_free_coherent(mp->dev->dev.parent, size, 1907 rxq->rx_desc_area, 1908 rxq->rx_desc_dma); 1909 1910 out: 1911 return -ENOMEM; 1912 } 1913 1914 static void rxq_deinit(struct rx_queue *rxq) 1915 { 1916 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 1917 int i; 1918 1919 rxq_disable(rxq); 1920 1921 for (i = 0; i < rxq->rx_ring_size; i++) { 1922 if (rxq->rx_skb[i]) { 1923 dev_kfree_skb(rxq->rx_skb[i]); 1924 rxq->rx_desc_count--; 1925 } 1926 } 1927 1928 if (rxq->rx_desc_count) { 1929 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", 1930 rxq->rx_desc_count); 1931 } 1932 1933 if (rxq->index == 0 && 1934 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1935 iounmap(rxq->rx_desc_area); 1936 else 1937 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, 1938 rxq->rx_desc_area, rxq->rx_desc_dma); 1939 1940 kfree(rxq->rx_skb); 1941 } 1942 1943 static int txq_init(struct mv643xx_eth_private *mp, int index) 1944 { 1945 struct tx_queue *txq = mp->txq + index; 1946 struct tx_desc *tx_desc; 1947 int size; 1948 int i; 1949 1950 txq->index = index; 1951 1952 txq->tx_ring_size = mp->tx_ring_size; 1953 1954 txq->tx_desc_count = 0; 1955 txq->tx_curr_desc = 0; 1956 txq->tx_used_desc = 0; 1957 1958 size = txq->tx_ring_size * sizeof(struct tx_desc); 1959 1960 if (index == 0 && size <= mp->tx_desc_sram_size) { 1961 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1962 mp->tx_desc_sram_size); 1963 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1964 } else { 1965 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, 1966 size, &txq->tx_desc_dma, 1967 GFP_KERNEL); 1968 } 1969 1970 if (txq->tx_desc_area == NULL) { 1971 netdev_err(mp->dev, 1972 "can't allocate tx ring (%d bytes)\n", size); 1973 return -ENOMEM; 1974 } 1975 memset(txq->tx_desc_area, 0, size); 1976 1977 txq->tx_desc_area_size = size; 1978 1979 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1980 for (i = 0; i < txq->tx_ring_size; i++) { 1981 struct tx_desc *txd = tx_desc + i; 1982 int nexti; 1983 1984 nexti = i + 1; 1985 if (nexti == txq->tx_ring_size) 1986 nexti = 0; 1987 1988 txd->cmd_sts = 0; 1989 txd->next_desc_ptr = txq->tx_desc_dma + 1990 nexti * sizeof(struct tx_desc); 1991 } 1992 1993 skb_queue_head_init(&txq->tx_skb); 1994 1995 return 0; 1996 } 1997 1998 static void txq_deinit(struct tx_queue *txq) 1999 { 2000 struct mv643xx_eth_private *mp = txq_to_mp(txq); 2001 2002 txq_disable(txq); 2003 txq_reclaim(txq, txq->tx_ring_size, 1); 2004 2005 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 2006 2007 if (txq->index == 0 && 2008 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 2009 iounmap(txq->tx_desc_area); 2010 else 2011 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2012 txq->tx_desc_area, txq->tx_desc_dma); 2013 } 2014 2015 2016 /* netdev ops and related ***************************************************/ 2017 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) 2018 { 2019 u32 int_cause; 2020 u32 int_cause_ext; 2021 2022 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; 2023 if (int_cause == 0) 2024 return 0; 2025 2026 int_cause_ext = 0; 2027 if (int_cause & INT_EXT) { 2028 int_cause &= ~INT_EXT; 2029 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); 2030 } 2031 2032 if (int_cause) { 2033 wrlp(mp, INT_CAUSE, ~int_cause); 2034 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 2035 ~(rdlp(mp, TXQ_COMMAND) & 0xff); 2036 mp->work_rx |= (int_cause & INT_RX) >> 2; 2037 } 2038 2039 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 2040 if (int_cause_ext) { 2041 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); 2042 if (int_cause_ext & INT_EXT_LINK_PHY) 2043 mp->work_link = 1; 2044 mp->work_tx |= int_cause_ext & INT_EXT_TX; 2045 } 2046 2047 return 1; 2048 } 2049 2050 static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 2051 { 2052 struct net_device *dev = (struct net_device *)dev_id; 2053 struct mv643xx_eth_private *mp = netdev_priv(dev); 2054 2055 if (unlikely(!mv643xx_eth_collect_events(mp))) 2056 return IRQ_NONE; 2057 2058 wrlp(mp, INT_MASK, 0); 2059 napi_schedule(&mp->napi); 2060 2061 return IRQ_HANDLED; 2062 } 2063 2064 static void handle_link_event(struct mv643xx_eth_private *mp) 2065 { 2066 struct net_device *dev = mp->dev; 2067 u32 port_status; 2068 int speed; 2069 int duplex; 2070 int fc; 2071 2072 port_status = rdlp(mp, PORT_STATUS); 2073 if (!(port_status & LINK_UP)) { 2074 if (netif_carrier_ok(dev)) { 2075 int i; 2076 2077 netdev_info(dev, "link down\n"); 2078 2079 netif_carrier_off(dev); 2080 2081 for (i = 0; i < mp->txq_count; i++) { 2082 struct tx_queue *txq = mp->txq + i; 2083 2084 txq_reclaim(txq, txq->tx_ring_size, 1); 2085 txq_reset_hw_ptr(txq); 2086 } 2087 } 2088 return; 2089 } 2090 2091 switch (port_status & PORT_SPEED_MASK) { 2092 case PORT_SPEED_10: 2093 speed = 10; 2094 break; 2095 case PORT_SPEED_100: 2096 speed = 100; 2097 break; 2098 case PORT_SPEED_1000: 2099 speed = 1000; 2100 break; 2101 default: 2102 speed = -1; 2103 break; 2104 } 2105 duplex = (port_status & FULL_DUPLEX) ? 1 : 0; 2106 fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; 2107 2108 netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", 2109 speed, duplex ? "full" : "half", fc ? "en" : "dis"); 2110 2111 if (!netif_carrier_ok(dev)) 2112 netif_carrier_on(dev); 2113 } 2114 2115 static int mv643xx_eth_poll(struct napi_struct *napi, int budget) 2116 { 2117 struct mv643xx_eth_private *mp; 2118 int work_done; 2119 2120 mp = container_of(napi, struct mv643xx_eth_private, napi); 2121 2122 if (unlikely(mp->oom)) { 2123 mp->oom = 0; 2124 del_timer(&mp->rx_oom); 2125 } 2126 2127 work_done = 0; 2128 while (work_done < budget) { 2129 u8 queue_mask; 2130 int queue; 2131 int work_tbd; 2132 2133 if (mp->work_link) { 2134 mp->work_link = 0; 2135 handle_link_event(mp); 2136 work_done++; 2137 continue; 2138 } 2139 2140 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2141 if (likely(!mp->oom)) 2142 queue_mask |= mp->work_rx_refill; 2143 2144 if (!queue_mask) { 2145 if (mv643xx_eth_collect_events(mp)) 2146 continue; 2147 break; 2148 } 2149 2150 queue = fls(queue_mask) - 1; 2151 queue_mask = 1 << queue; 2152 2153 work_tbd = budget - work_done; 2154 if (work_tbd > 16) 2155 work_tbd = 16; 2156 2157 if (mp->work_tx_end & queue_mask) { 2158 txq_kick(mp->txq + queue); 2159 } else if (mp->work_tx & queue_mask) { 2160 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); 2161 txq_maybe_wake(mp->txq + queue); 2162 } else if (mp->work_rx & queue_mask) { 2163 work_done += rxq_process(mp->rxq + queue, work_tbd); 2164 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2165 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2166 } else { 2167 BUG(); 2168 } 2169 } 2170 2171 if (work_done < budget) { 2172 if (mp->oom) 2173 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2174 napi_complete(napi); 2175 wrlp(mp, INT_MASK, mp->int_mask); 2176 } 2177 2178 return work_done; 2179 } 2180 2181 static inline void oom_timer_wrapper(unsigned long data) 2182 { 2183 struct mv643xx_eth_private *mp = (void *)data; 2184 2185 napi_schedule(&mp->napi); 2186 } 2187 2188 static void phy_reset(struct mv643xx_eth_private *mp) 2189 { 2190 int data; 2191 2192 data = phy_read(mp->phy, MII_BMCR); 2193 if (data < 0) 2194 return; 2195 2196 data |= BMCR_RESET; 2197 if (phy_write(mp->phy, MII_BMCR, data) < 0) 2198 return; 2199 2200 do { 2201 data = phy_read(mp->phy, MII_BMCR); 2202 } while (data >= 0 && data & BMCR_RESET); 2203 } 2204 2205 static void port_start(struct mv643xx_eth_private *mp) 2206 { 2207 u32 pscr; 2208 int i; 2209 2210 /* 2211 * Perform PHY reset, if there is a PHY. 2212 */ 2213 if (mp->phy != NULL) { 2214 struct ethtool_cmd cmd; 2215 2216 mv643xx_eth_get_settings(mp->dev, &cmd); 2217 phy_reset(mp); 2218 mv643xx_eth_set_settings(mp->dev, &cmd); 2219 } 2220 2221 /* 2222 * Configure basic link parameters. 2223 */ 2224 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2225 2226 pscr |= SERIAL_PORT_ENABLE; 2227 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2228 2229 pscr |= DO_NOT_FORCE_LINK_FAIL; 2230 if (mp->phy == NULL) 2231 pscr |= FORCE_LINK_PASS; 2232 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2233 2234 /* 2235 * Configure TX path and queues. 2236 */ 2237 tx_set_rate(mp, 1000000000, 16777216); 2238 for (i = 0; i < mp->txq_count; i++) { 2239 struct tx_queue *txq = mp->txq + i; 2240 2241 txq_reset_hw_ptr(txq); 2242 txq_set_rate(txq, 1000000000, 16777216); 2243 txq_set_fixed_prio_mode(txq); 2244 } 2245 2246 /* 2247 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2248 * frames to RX queue #0, and include the pseudo-header when 2249 * calculating receive checksums. 2250 */ 2251 mv643xx_eth_set_features(mp->dev, mp->dev->features); 2252 2253 /* 2254 * Treat BPDUs as normal multicasts, and disable partition mode. 2255 */ 2256 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); 2257 2258 /* 2259 * Add configured unicast addresses to address filter table. 2260 */ 2261 mv643xx_eth_program_unicast_filter(mp->dev); 2262 2263 /* 2264 * Enable the receive queues. 2265 */ 2266 for (i = 0; i < mp->rxq_count; i++) { 2267 struct rx_queue *rxq = mp->rxq + i; 2268 u32 addr; 2269 2270 addr = (u32)rxq->rx_desc_dma; 2271 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2272 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); 2273 2274 rxq_enable(rxq); 2275 } 2276 } 2277 2278 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2279 { 2280 int skb_size; 2281 2282 /* 2283 * Reserve 2+14 bytes for an ethernet header (the hardware 2284 * automatically prepends 2 bytes of dummy data to each 2285 * received packet), 16 bytes for up to four VLAN tags, and 2286 * 4 bytes for the trailing FCS -- 36 bytes total. 2287 */ 2288 skb_size = mp->dev->mtu + 36; 2289 2290 /* 2291 * Make sure that the skb size is a multiple of 8 bytes, as 2292 * the lower three bits of the receive descriptor's buffer 2293 * size field are ignored by the hardware. 2294 */ 2295 mp->skb_size = (skb_size + 7) & ~7; 2296 2297 /* 2298 * If NET_SKB_PAD is smaller than a cache line, 2299 * netdev_alloc_skb() will cause skb->data to be misaligned 2300 * to a cache line boundary. If this is the case, include 2301 * some extra space to allow re-aligning the data area. 2302 */ 2303 mp->skb_size += SKB_DMA_REALIGN; 2304 } 2305 2306 static int mv643xx_eth_open(struct net_device *dev) 2307 { 2308 struct mv643xx_eth_private *mp = netdev_priv(dev); 2309 int err; 2310 int i; 2311 2312 wrlp(mp, INT_CAUSE, 0); 2313 wrlp(mp, INT_CAUSE_EXT, 0); 2314 rdlp(mp, INT_CAUSE_EXT); 2315 2316 err = request_irq(dev->irq, mv643xx_eth_irq, 2317 IRQF_SHARED, dev->name, dev); 2318 if (err) { 2319 netdev_err(dev, "can't assign irq\n"); 2320 return -EAGAIN; 2321 } 2322 2323 mv643xx_eth_recalc_skb_size(mp); 2324 2325 napi_enable(&mp->napi); 2326 2327 skb_queue_head_init(&mp->rx_recycle); 2328 2329 mp->int_mask = INT_EXT; 2330 2331 for (i = 0; i < mp->rxq_count; i++) { 2332 err = rxq_init(mp, i); 2333 if (err) { 2334 while (--i >= 0) 2335 rxq_deinit(mp->rxq + i); 2336 goto out; 2337 } 2338 2339 rxq_refill(mp->rxq + i, INT_MAX); 2340 mp->int_mask |= INT_RX_0 << i; 2341 } 2342 2343 if (mp->oom) { 2344 mp->rx_oom.expires = jiffies + (HZ / 10); 2345 add_timer(&mp->rx_oom); 2346 } 2347 2348 for (i = 0; i < mp->txq_count; i++) { 2349 err = txq_init(mp, i); 2350 if (err) { 2351 while (--i >= 0) 2352 txq_deinit(mp->txq + i); 2353 goto out_free; 2354 } 2355 mp->int_mask |= INT_TX_END_0 << i; 2356 } 2357 2358 port_start(mp); 2359 2360 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); 2361 wrlp(mp, INT_MASK, mp->int_mask); 2362 2363 return 0; 2364 2365 2366 out_free: 2367 for (i = 0; i < mp->rxq_count; i++) 2368 rxq_deinit(mp->rxq + i); 2369 out: 2370 free_irq(dev->irq, dev); 2371 2372 return err; 2373 } 2374 2375 static void port_reset(struct mv643xx_eth_private *mp) 2376 { 2377 unsigned int data; 2378 int i; 2379 2380 for (i = 0; i < mp->rxq_count; i++) 2381 rxq_disable(mp->rxq + i); 2382 for (i = 0; i < mp->txq_count; i++) 2383 txq_disable(mp->txq + i); 2384 2385 while (1) { 2386 u32 ps = rdlp(mp, PORT_STATUS); 2387 2388 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2389 break; 2390 udelay(10); 2391 } 2392 2393 /* Reset the Enable bit in the Configuration Register */ 2394 data = rdlp(mp, PORT_SERIAL_CONTROL); 2395 data &= ~(SERIAL_PORT_ENABLE | 2396 DO_NOT_FORCE_LINK_FAIL | 2397 FORCE_LINK_PASS); 2398 wrlp(mp, PORT_SERIAL_CONTROL, data); 2399 } 2400 2401 static int mv643xx_eth_stop(struct net_device *dev) 2402 { 2403 struct mv643xx_eth_private *mp = netdev_priv(dev); 2404 int i; 2405 2406 wrlp(mp, INT_MASK_EXT, 0x00000000); 2407 wrlp(mp, INT_MASK, 0x00000000); 2408 rdlp(mp, INT_MASK); 2409 2410 napi_disable(&mp->napi); 2411 2412 del_timer_sync(&mp->rx_oom); 2413 2414 netif_carrier_off(dev); 2415 2416 free_irq(dev->irq, dev); 2417 2418 port_reset(mp); 2419 mv643xx_eth_get_stats(dev); 2420 mib_counters_update(mp); 2421 del_timer_sync(&mp->mib_counters_timer); 2422 2423 skb_queue_purge(&mp->rx_recycle); 2424 2425 for (i = 0; i < mp->rxq_count; i++) 2426 rxq_deinit(mp->rxq + i); 2427 for (i = 0; i < mp->txq_count; i++) 2428 txq_deinit(mp->txq + i); 2429 2430 return 0; 2431 } 2432 2433 static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2434 { 2435 struct mv643xx_eth_private *mp = netdev_priv(dev); 2436 2437 if (mp->phy != NULL) 2438 return phy_mii_ioctl(mp->phy, ifr, cmd); 2439 2440 return -EOPNOTSUPP; 2441 } 2442 2443 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2444 { 2445 struct mv643xx_eth_private *mp = netdev_priv(dev); 2446 2447 if (new_mtu < 64 || new_mtu > 9500) 2448 return -EINVAL; 2449 2450 dev->mtu = new_mtu; 2451 mv643xx_eth_recalc_skb_size(mp); 2452 tx_set_rate(mp, 1000000000, 16777216); 2453 2454 if (!netif_running(dev)) 2455 return 0; 2456 2457 /* 2458 * Stop and then re-open the interface. This will allocate RX 2459 * skbs of the new MTU. 2460 * There is a possible danger that the open will not succeed, 2461 * due to memory being full. 2462 */ 2463 mv643xx_eth_stop(dev); 2464 if (mv643xx_eth_open(dev)) { 2465 netdev_err(dev, 2466 "fatal error on re-opening device after MTU change\n"); 2467 } 2468 2469 return 0; 2470 } 2471 2472 static void tx_timeout_task(struct work_struct *ugly) 2473 { 2474 struct mv643xx_eth_private *mp; 2475 2476 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2477 if (netif_running(mp->dev)) { 2478 netif_tx_stop_all_queues(mp->dev); 2479 port_reset(mp); 2480 port_start(mp); 2481 netif_tx_wake_all_queues(mp->dev); 2482 } 2483 } 2484 2485 static void mv643xx_eth_tx_timeout(struct net_device *dev) 2486 { 2487 struct mv643xx_eth_private *mp = netdev_priv(dev); 2488 2489 netdev_info(dev, "tx timeout\n"); 2490 2491 schedule_work(&mp->tx_timeout_task); 2492 } 2493 2494 #ifdef CONFIG_NET_POLL_CONTROLLER 2495 static void mv643xx_eth_netpoll(struct net_device *dev) 2496 { 2497 struct mv643xx_eth_private *mp = netdev_priv(dev); 2498 2499 wrlp(mp, INT_MASK, 0x00000000); 2500 rdlp(mp, INT_MASK); 2501 2502 mv643xx_eth_irq(dev->irq, dev); 2503 2504 wrlp(mp, INT_MASK, mp->int_mask); 2505 } 2506 #endif 2507 2508 2509 /* platform glue ************************************************************/ 2510 static void 2511 mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp, 2512 struct mbus_dram_target_info *dram) 2513 { 2514 void __iomem *base = msp->base; 2515 u32 win_enable; 2516 u32 win_protect; 2517 int i; 2518 2519 for (i = 0; i < 6; i++) { 2520 writel(0, base + WINDOW_BASE(i)); 2521 writel(0, base + WINDOW_SIZE(i)); 2522 if (i < 4) 2523 writel(0, base + WINDOW_REMAP_HIGH(i)); 2524 } 2525 2526 win_enable = 0x3f; 2527 win_protect = 0; 2528 2529 for (i = 0; i < dram->num_cs; i++) { 2530 struct mbus_dram_window *cs = dram->cs + i; 2531 2532 writel((cs->base & 0xffff0000) | 2533 (cs->mbus_attr << 8) | 2534 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 2535 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 2536 2537 win_enable &= ~(1 << i); 2538 win_protect |= 3 << (2 * i); 2539 } 2540 2541 writel(win_enable, base + WINDOW_BAR_ENABLE); 2542 msp->win_protect = win_protect; 2543 } 2544 2545 static void infer_hw_params(struct mv643xx_eth_shared_private *msp) 2546 { 2547 /* 2548 * Check whether we have a 14-bit coal limit field in bits 2549 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2550 * SDMA config register. 2551 */ 2552 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG); 2553 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000) 2554 msp->extended_rx_coal_limit = 1; 2555 else 2556 msp->extended_rx_coal_limit = 0; 2557 2558 /* 2559 * Check whether the MAC supports TX rate control, and if 2560 * yes, whether its associated registers are in the old or 2561 * the new place. 2562 */ 2563 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED); 2564 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) { 2565 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2566 } else { 2567 writel(7, msp->base + 0x0400 + TX_BW_RATE); 2568 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7) 2569 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2570 else 2571 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2572 } 2573 } 2574 2575 static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2576 { 2577 static int mv643xx_eth_version_printed; 2578 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2579 struct mv643xx_eth_shared_private *msp; 2580 struct resource *res; 2581 int ret; 2582 2583 if (!mv643xx_eth_version_printed++) 2584 pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", 2585 mv643xx_eth_driver_version); 2586 2587 ret = -EINVAL; 2588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2589 if (res == NULL) 2590 goto out; 2591 2592 ret = -ENOMEM; 2593 msp = kzalloc(sizeof(*msp), GFP_KERNEL); 2594 if (msp == NULL) 2595 goto out; 2596 2597 msp->base = ioremap(res->start, resource_size(res)); 2598 if (msp->base == NULL) 2599 goto out_free; 2600 2601 /* 2602 * Set up and register SMI bus. 2603 */ 2604 if (pd == NULL || pd->shared_smi == NULL) { 2605 msp->smi_bus = mdiobus_alloc(); 2606 if (msp->smi_bus == NULL) 2607 goto out_unmap; 2608 2609 msp->smi_bus->priv = msp; 2610 msp->smi_bus->name = "mv643xx_eth smi"; 2611 msp->smi_bus->read = smi_bus_read; 2612 msp->smi_bus->write = smi_bus_write, 2613 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 2614 msp->smi_bus->parent = &pdev->dev; 2615 msp->smi_bus->phy_mask = 0xffffffff; 2616 if (mdiobus_register(msp->smi_bus) < 0) 2617 goto out_free_mii_bus; 2618 msp->smi = msp; 2619 } else { 2620 msp->smi = platform_get_drvdata(pd->shared_smi); 2621 } 2622 2623 msp->err_interrupt = NO_IRQ; 2624 init_waitqueue_head(&msp->smi_busy_wait); 2625 2626 /* 2627 * Check whether the error interrupt is hooked up. 2628 */ 2629 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2630 if (res != NULL) { 2631 int err; 2632 2633 err = request_irq(res->start, mv643xx_eth_err_irq, 2634 IRQF_SHARED, "mv643xx_eth", msp); 2635 if (!err) { 2636 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK); 2637 msp->err_interrupt = res->start; 2638 } 2639 } 2640 2641 /* 2642 * (Re-)program MBUS remapping windows if we are asked to. 2643 */ 2644 if (pd != NULL && pd->dram != NULL) 2645 mv643xx_eth_conf_mbus_windows(msp, pd->dram); 2646 2647 /* 2648 * Detect hardware parameters. 2649 */ 2650 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2651 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2652 pd->tx_csum_limit : 9 * 1024; 2653 infer_hw_params(msp); 2654 2655 platform_set_drvdata(pdev, msp); 2656 2657 return 0; 2658 2659 out_free_mii_bus: 2660 mdiobus_free(msp->smi_bus); 2661 out_unmap: 2662 iounmap(msp->base); 2663 out_free: 2664 kfree(msp); 2665 out: 2666 return ret; 2667 } 2668 2669 static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2670 { 2671 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2672 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2673 2674 if (pd == NULL || pd->shared_smi == NULL) { 2675 mdiobus_unregister(msp->smi_bus); 2676 mdiobus_free(msp->smi_bus); 2677 } 2678 if (msp->err_interrupt != NO_IRQ) 2679 free_irq(msp->err_interrupt, msp); 2680 iounmap(msp->base); 2681 kfree(msp); 2682 2683 return 0; 2684 } 2685 2686 static struct platform_driver mv643xx_eth_shared_driver = { 2687 .probe = mv643xx_eth_shared_probe, 2688 .remove = mv643xx_eth_shared_remove, 2689 .driver = { 2690 .name = MV643XX_ETH_SHARED_NAME, 2691 .owner = THIS_MODULE, 2692 }, 2693 }; 2694 2695 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 2696 { 2697 int addr_shift = 5 * mp->port_num; 2698 u32 data; 2699 2700 data = rdl(mp, PHY_ADDR); 2701 data &= ~(0x1f << addr_shift); 2702 data |= (phy_addr & 0x1f) << addr_shift; 2703 wrl(mp, PHY_ADDR, data); 2704 } 2705 2706 static int phy_addr_get(struct mv643xx_eth_private *mp) 2707 { 2708 unsigned int data; 2709 2710 data = rdl(mp, PHY_ADDR); 2711 2712 return (data >> (5 * mp->port_num)) & 0x1f; 2713 } 2714 2715 static void set_params(struct mv643xx_eth_private *mp, 2716 struct mv643xx_eth_platform_data *pd) 2717 { 2718 struct net_device *dev = mp->dev; 2719 2720 if (is_valid_ether_addr(pd->mac_addr)) 2721 memcpy(dev->dev_addr, pd->mac_addr, 6); 2722 else 2723 uc_addr_get(mp, dev->dev_addr); 2724 2725 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2726 if (pd->rx_queue_size) 2727 mp->rx_ring_size = pd->rx_queue_size; 2728 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2729 mp->rx_desc_sram_size = pd->rx_sram_size; 2730 2731 mp->rxq_count = pd->rx_queue_count ? : 1; 2732 2733 mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2734 if (pd->tx_queue_size) 2735 mp->tx_ring_size = pd->tx_queue_size; 2736 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2737 mp->tx_desc_sram_size = pd->tx_sram_size; 2738 2739 mp->txq_count = pd->tx_queue_count ? : 1; 2740 } 2741 2742 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2743 int phy_addr) 2744 { 2745 struct mii_bus *bus = mp->shared->smi->smi_bus; 2746 struct phy_device *phydev; 2747 int start; 2748 int num; 2749 int i; 2750 2751 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2752 start = phy_addr_get(mp) & 0x1f; 2753 num = 32; 2754 } else { 2755 start = phy_addr & 0x1f; 2756 num = 1; 2757 } 2758 2759 phydev = NULL; 2760 for (i = 0; i < num; i++) { 2761 int addr = (start + i) & 0x1f; 2762 2763 if (bus->phy_map[addr] == NULL) 2764 mdiobus_scan(bus, addr); 2765 2766 if (phydev == NULL) { 2767 phydev = bus->phy_map[addr]; 2768 if (phydev != NULL) 2769 phy_addr_set(mp, addr); 2770 } 2771 } 2772 2773 return phydev; 2774 } 2775 2776 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) 2777 { 2778 struct phy_device *phy = mp->phy; 2779 2780 phy_reset(mp); 2781 2782 phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII); 2783 2784 if (speed == 0) { 2785 phy->autoneg = AUTONEG_ENABLE; 2786 phy->speed = 0; 2787 phy->duplex = 0; 2788 phy->advertising = phy->supported | ADVERTISED_Autoneg; 2789 } else { 2790 phy->autoneg = AUTONEG_DISABLE; 2791 phy->advertising = 0; 2792 phy->speed = speed; 2793 phy->duplex = duplex; 2794 } 2795 phy_start_aneg(phy); 2796 } 2797 2798 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2799 { 2800 u32 pscr; 2801 2802 pscr = rdlp(mp, PORT_SERIAL_CONTROL); 2803 if (pscr & SERIAL_PORT_ENABLE) { 2804 pscr &= ~SERIAL_PORT_ENABLE; 2805 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2806 } 2807 2808 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2809 if (mp->phy == NULL) { 2810 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2811 if (speed == SPEED_1000) 2812 pscr |= SET_GMII_SPEED_TO_1000; 2813 else if (speed == SPEED_100) 2814 pscr |= SET_MII_SPEED_TO_100; 2815 2816 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL; 2817 2818 pscr |= DISABLE_AUTO_NEG_FOR_DUPLEX; 2819 if (duplex == DUPLEX_FULL) 2820 pscr |= SET_FULL_DUPLEX_MODE; 2821 } 2822 2823 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2824 } 2825 2826 static const struct net_device_ops mv643xx_eth_netdev_ops = { 2827 .ndo_open = mv643xx_eth_open, 2828 .ndo_stop = mv643xx_eth_stop, 2829 .ndo_start_xmit = mv643xx_eth_xmit, 2830 .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, 2831 .ndo_set_mac_address = mv643xx_eth_set_mac_address, 2832 .ndo_validate_addr = eth_validate_addr, 2833 .ndo_do_ioctl = mv643xx_eth_ioctl, 2834 .ndo_change_mtu = mv643xx_eth_change_mtu, 2835 .ndo_set_features = mv643xx_eth_set_features, 2836 .ndo_tx_timeout = mv643xx_eth_tx_timeout, 2837 .ndo_get_stats = mv643xx_eth_get_stats, 2838 #ifdef CONFIG_NET_POLL_CONTROLLER 2839 .ndo_poll_controller = mv643xx_eth_netpoll, 2840 #endif 2841 }; 2842 2843 static int mv643xx_eth_probe(struct platform_device *pdev) 2844 { 2845 struct mv643xx_eth_platform_data *pd; 2846 struct mv643xx_eth_private *mp; 2847 struct net_device *dev; 2848 struct resource *res; 2849 int err; 2850 2851 pd = pdev->dev.platform_data; 2852 if (pd == NULL) { 2853 dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); 2854 return -ENODEV; 2855 } 2856 2857 if (pd->shared == NULL) { 2858 dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); 2859 return -ENODEV; 2860 } 2861 2862 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8); 2863 if (!dev) 2864 return -ENOMEM; 2865 2866 mp = netdev_priv(dev); 2867 platform_set_drvdata(pdev, mp); 2868 2869 mp->shared = platform_get_drvdata(pd->shared); 2870 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); 2871 mp->port_num = pd->port_number; 2872 2873 mp->dev = dev; 2874 2875 set_params(mp, pd); 2876 netif_set_real_num_tx_queues(dev, mp->txq_count); 2877 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2878 2879 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2880 mp->phy = phy_scan(mp, pd->phy_addr); 2881 2882 if (mp->phy != NULL) 2883 phy_init(mp, pd->speed, pd->duplex); 2884 2885 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2886 2887 init_pscr(mp, pd->speed, pd->duplex); 2888 2889 2890 mib_counters_clear(mp); 2891 2892 init_timer(&mp->mib_counters_timer); 2893 mp->mib_counters_timer.data = (unsigned long)mp; 2894 mp->mib_counters_timer.function = mib_counters_timer_wrapper; 2895 mp->mib_counters_timer.expires = jiffies + 30 * HZ; 2896 add_timer(&mp->mib_counters_timer); 2897 2898 spin_lock_init(&mp->mib_counters_lock); 2899 2900 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2901 2902 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2903 2904 init_timer(&mp->rx_oom); 2905 mp->rx_oom.data = (unsigned long)mp; 2906 mp->rx_oom.function = oom_timer_wrapper; 2907 2908 2909 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2910 BUG_ON(!res); 2911 dev->irq = res->start; 2912 2913 dev->netdev_ops = &mv643xx_eth_netdev_ops; 2914 2915 dev->watchdog_timeo = 2 * HZ; 2916 dev->base_addr = 0; 2917 2918 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 2919 NETIF_F_RXCSUM | NETIF_F_LRO; 2920 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 2921 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2922 2923 dev->priv_flags |= IFF_UNICAST_FLT; 2924 2925 SET_NETDEV_DEV(dev, &pdev->dev); 2926 2927 if (mp->shared->win_protect) 2928 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); 2929 2930 netif_carrier_off(dev); 2931 2932 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); 2933 2934 set_rx_coal(mp, 250); 2935 set_tx_coal(mp, 0); 2936 2937 err = register_netdev(dev); 2938 if (err) 2939 goto out; 2940 2941 netdev_notice(dev, "port %d with MAC address %pM\n", 2942 mp->port_num, dev->dev_addr); 2943 2944 if (mp->tx_desc_sram_size > 0) 2945 netdev_notice(dev, "configured with sram\n"); 2946 2947 return 0; 2948 2949 out: 2950 free_netdev(dev); 2951 2952 return err; 2953 } 2954 2955 static int mv643xx_eth_remove(struct platform_device *pdev) 2956 { 2957 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2958 2959 unregister_netdev(mp->dev); 2960 if (mp->phy != NULL) 2961 phy_detach(mp->phy); 2962 cancel_work_sync(&mp->tx_timeout_task); 2963 free_netdev(mp->dev); 2964 2965 platform_set_drvdata(pdev, NULL); 2966 2967 return 0; 2968 } 2969 2970 static void mv643xx_eth_shutdown(struct platform_device *pdev) 2971 { 2972 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2973 2974 /* Mask all interrupts on ethernet port */ 2975 wrlp(mp, INT_MASK, 0); 2976 rdlp(mp, INT_MASK); 2977 2978 if (netif_running(mp->dev)) 2979 port_reset(mp); 2980 } 2981 2982 static struct platform_driver mv643xx_eth_driver = { 2983 .probe = mv643xx_eth_probe, 2984 .remove = mv643xx_eth_remove, 2985 .shutdown = mv643xx_eth_shutdown, 2986 .driver = { 2987 .name = MV643XX_ETH_NAME, 2988 .owner = THIS_MODULE, 2989 }, 2990 }; 2991 2992 static int __init mv643xx_eth_init_module(void) 2993 { 2994 int rc; 2995 2996 rc = platform_driver_register(&mv643xx_eth_shared_driver); 2997 if (!rc) { 2998 rc = platform_driver_register(&mv643xx_eth_driver); 2999 if (rc) 3000 platform_driver_unregister(&mv643xx_eth_shared_driver); 3001 } 3002 3003 return rc; 3004 } 3005 module_init(mv643xx_eth_init_module); 3006 3007 static void __exit mv643xx_eth_cleanup_module(void) 3008 { 3009 platform_driver_unregister(&mv643xx_eth_driver); 3010 platform_driver_unregister(&mv643xx_eth_shared_driver); 3011 } 3012 module_exit(mv643xx_eth_cleanup_module); 3013 3014 MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, " 3015 "Manish Lachwani, Dale Farnsworth and Lennert Buytenhek"); 3016 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 3017 MODULE_LICENSE("GPL"); 3018 MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 3019 MODULE_ALIAS("platform:" MV643XX_ETH_NAME); 3020