1 /***************************************************************************** 2 * * 3 * File: cxgb2.c * 4 * $Revision: 1.25 $ * 5 * $Date: 2005/06/22 00:43:25 $ * 6 * Description: * 7 * Chelsio 10Gb Ethernet Driver. * 8 * * 9 * This program is free software; you can redistribute it and/or modify * 10 * it under the terms of the GNU General Public License, version 2, as * 11 * published by the Free Software Foundation. * 12 * * 13 * You should have received a copy of the GNU General Public License along * 14 * with this program; if not, see <http://www.gnu.org/licenses/>. * 15 * * 16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * 17 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * 18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * 19 * * 20 * http://www.chelsio.com * 21 * * 22 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * 23 * All rights reserved. * 24 * * 25 * Maintainers: maintainers@chelsio.com * 26 * * 27 * Authors: Dimitrios Michailidis <dm@chelsio.com> * 28 * Tina Yang <tainay@chelsio.com> * 29 * Felix Marti <felix@chelsio.com> * 30 * Scott Bardone <sbardone@chelsio.com> * 31 * Kurt Ottaway <kottaway@chelsio.com> * 32 * Frank DiMambro <frank@chelsio.com> * 33 * * 34 * History: * 35 * * 36 ****************************************************************************/ 37 38 #include "common.h" 39 #include <linux/module.h> 40 #include <linux/pci.h> 41 #include <linux/netdevice.h> 42 #include <linux/etherdevice.h> 43 #include <linux/if_vlan.h> 44 #include <linux/mii.h> 45 #include <linux/sockios.h> 46 #include <linux/dma-mapping.h> 47 #include <linux/uaccess.h> 48 49 #include "cpl5_cmd.h" 50 #include "regs.h" 51 #include "gmac.h" 52 #include "cphy.h" 53 #include "sge.h" 54 #include "tp.h" 55 #include "espi.h" 56 #include "elmer0.h" 57 58 #include <linux/workqueue.h> 59 60 static inline void schedule_mac_stats_update(struct adapter *ap, int secs) 61 { 62 schedule_delayed_work(&ap->stats_update_task, secs * HZ); 63 } 64 65 static inline void cancel_mac_stats_update(struct adapter *ap) 66 { 67 cancel_delayed_work(&ap->stats_update_task); 68 } 69 70 #define MAX_CMDQ_ENTRIES 16384 71 #define MAX_CMDQ1_ENTRIES 1024 72 #define MAX_RX_BUFFERS 16384 73 #define MAX_RX_JUMBO_BUFFERS 16384 74 #define MAX_TX_BUFFERS_HIGH 16384U 75 #define MAX_TX_BUFFERS_LOW 1536U 76 #define MAX_TX_BUFFERS 1460U 77 #define MIN_FL_ENTRIES 32 78 79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 80 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 81 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 82 83 /* 84 * The EEPROM is actually bigger but only the first few bytes are used so we 85 * only report those. 86 */ 87 #define EEPROM_SIZE 32 88 89 MODULE_DESCRIPTION(DRV_DESCRIPTION); 90 MODULE_AUTHOR("Chelsio Communications"); 91 MODULE_LICENSE("GPL"); 92 93 static int dflt_msg_enable = DFLT_MSG_ENABLE; 94 95 module_param(dflt_msg_enable, int, 0); 96 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap"); 97 98 #define HCLOCK 0x0 99 #define LCLOCK 0x1 100 101 /* T1 cards powersave mode */ 102 static int t1_clock(struct adapter *adapter, int mode); 103 static int t1powersave = 1; /* HW default is powersave mode. */ 104 105 module_param(t1powersave, int, 0); 106 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode"); 107 108 static int disable_msi = 0; 109 module_param(disable_msi, int, 0); 110 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 111 112 /* 113 * Setup MAC to receive the types of packets we want. 114 */ 115 static void t1_set_rxmode(struct net_device *dev) 116 { 117 struct adapter *adapter = dev->ml_priv; 118 struct cmac *mac = adapter->port[dev->if_port].mac; 119 struct t1_rx_mode rm; 120 121 rm.dev = dev; 122 mac->ops->set_rx_mode(mac, &rm); 123 } 124 125 static void link_report(struct port_info *p) 126 { 127 if (!netif_carrier_ok(p->dev)) 128 netdev_info(p->dev, "link down\n"); 129 else { 130 const char *s = "10Mbps"; 131 132 switch (p->link_config.speed) { 133 case SPEED_10000: s = "10Gbps"; break; 134 case SPEED_1000: s = "1000Mbps"; break; 135 case SPEED_100: s = "100Mbps"; break; 136 } 137 138 netdev_info(p->dev, "link up, %s, %s-duplex\n", 139 s, p->link_config.duplex == DUPLEX_FULL 140 ? "full" : "half"); 141 } 142 } 143 144 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat, 145 int speed, int duplex, int pause) 146 { 147 struct port_info *p = &adapter->port[port_id]; 148 149 if (link_stat != netif_carrier_ok(p->dev)) { 150 if (link_stat) 151 netif_carrier_on(p->dev); 152 else 153 netif_carrier_off(p->dev); 154 link_report(p); 155 156 /* multi-ports: inform toe */ 157 if ((speed > 0) && (adapter->params.nports > 1)) { 158 unsigned int sched_speed = 10; 159 switch (speed) { 160 case SPEED_1000: 161 sched_speed = 1000; 162 break; 163 case SPEED_100: 164 sched_speed = 100; 165 break; 166 case SPEED_10: 167 sched_speed = 10; 168 break; 169 } 170 t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed); 171 } 172 } 173 } 174 175 static void link_start(struct port_info *p) 176 { 177 struct cmac *mac = p->mac; 178 179 mac->ops->reset(mac); 180 if (mac->ops->macaddress_set) 181 mac->ops->macaddress_set(mac, p->dev->dev_addr); 182 t1_set_rxmode(p->dev); 183 t1_link_start(p->phy, mac, &p->link_config); 184 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 185 } 186 187 static void enable_hw_csum(struct adapter *adapter) 188 { 189 if (adapter->port[0].dev->hw_features & NETIF_F_TSO) 190 t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */ 191 t1_tp_set_tcp_checksum_offload(adapter->tp, 1); 192 } 193 194 /* 195 * Things to do upon first use of a card. 196 * This must run with the rtnl lock held. 197 */ 198 static int cxgb_up(struct adapter *adapter) 199 { 200 int err = 0; 201 202 if (!(adapter->flags & FULL_INIT_DONE)) { 203 err = t1_init_hw_modules(adapter); 204 if (err) 205 goto out_err; 206 207 enable_hw_csum(adapter); 208 adapter->flags |= FULL_INIT_DONE; 209 } 210 211 t1_interrupts_clear(adapter); 212 213 adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); 214 err = request_threaded_irq(adapter->pdev->irq, t1_interrupt, 215 t1_interrupt_thread, 216 adapter->params.has_msi ? 0 : IRQF_SHARED, 217 adapter->name, adapter); 218 if (err) { 219 if (adapter->params.has_msi) 220 pci_disable_msi(adapter->pdev); 221 222 goto out_err; 223 } 224 225 t1_sge_start(adapter->sge); 226 t1_interrupts_enable(adapter); 227 out_err: 228 return err; 229 } 230 231 /* 232 * Release resources when all the ports have been stopped. 233 */ 234 static void cxgb_down(struct adapter *adapter) 235 { 236 t1_sge_stop(adapter->sge); 237 t1_interrupts_disable(adapter); 238 free_irq(adapter->pdev->irq, adapter); 239 if (adapter->params.has_msi) 240 pci_disable_msi(adapter->pdev); 241 } 242 243 static int cxgb_open(struct net_device *dev) 244 { 245 int err; 246 struct adapter *adapter = dev->ml_priv; 247 int other_ports = adapter->open_device_map & PORT_MASK; 248 249 napi_enable(&adapter->napi); 250 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { 251 napi_disable(&adapter->napi); 252 return err; 253 } 254 255 __set_bit(dev->if_port, &adapter->open_device_map); 256 link_start(&adapter->port[dev->if_port]); 257 netif_start_queue(dev); 258 if (!other_ports && adapter->params.stats_update_period) 259 schedule_mac_stats_update(adapter, 260 adapter->params.stats_update_period); 261 262 t1_vlan_mode(adapter, dev->features); 263 return 0; 264 } 265 266 static int cxgb_close(struct net_device *dev) 267 { 268 struct adapter *adapter = dev->ml_priv; 269 struct port_info *p = &adapter->port[dev->if_port]; 270 struct cmac *mac = p->mac; 271 272 netif_stop_queue(dev); 273 napi_disable(&adapter->napi); 274 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); 275 netif_carrier_off(dev); 276 277 clear_bit(dev->if_port, &adapter->open_device_map); 278 if (adapter->params.stats_update_period && 279 !(adapter->open_device_map & PORT_MASK)) { 280 /* Stop statistics accumulation. */ 281 smp_mb__after_atomic(); 282 spin_lock(&adapter->work_lock); /* sync with update task */ 283 spin_unlock(&adapter->work_lock); 284 cancel_mac_stats_update(adapter); 285 } 286 287 if (!adapter->open_device_map) 288 cxgb_down(adapter); 289 return 0; 290 } 291 292 static struct net_device_stats *t1_get_stats(struct net_device *dev) 293 { 294 struct adapter *adapter = dev->ml_priv; 295 struct port_info *p = &adapter->port[dev->if_port]; 296 struct net_device_stats *ns = &dev->stats; 297 const struct cmac_statistics *pstats; 298 299 /* Do a full update of the MAC stats */ 300 pstats = p->mac->ops->statistics_update(p->mac, 301 MAC_STATS_UPDATE_FULL); 302 303 ns->tx_packets = pstats->TxUnicastFramesOK + 304 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK; 305 306 ns->rx_packets = pstats->RxUnicastFramesOK + 307 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK; 308 309 ns->tx_bytes = pstats->TxOctetsOK; 310 ns->rx_bytes = pstats->RxOctetsOK; 311 312 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors + 313 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions; 314 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors + 315 pstats->RxFCSErrors + pstats->RxAlignErrors + 316 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors + 317 pstats->RxSymbolErrors + pstats->RxRuntErrors; 318 319 ns->multicast = pstats->RxMulticastFramesOK; 320 ns->collisions = pstats->TxTotalCollisions; 321 322 /* detailed rx_errors */ 323 ns->rx_length_errors = pstats->RxFrameTooLongErrors + 324 pstats->RxJabberErrors; 325 ns->rx_over_errors = 0; 326 ns->rx_crc_errors = pstats->RxFCSErrors; 327 ns->rx_frame_errors = pstats->RxAlignErrors; 328 ns->rx_fifo_errors = 0; 329 ns->rx_missed_errors = 0; 330 331 /* detailed tx_errors */ 332 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions; 333 ns->tx_carrier_errors = 0; 334 ns->tx_fifo_errors = pstats->TxUnderrun; 335 ns->tx_heartbeat_errors = 0; 336 ns->tx_window_errors = pstats->TxLateCollisions; 337 return ns; 338 } 339 340 static u32 get_msglevel(struct net_device *dev) 341 { 342 struct adapter *adapter = dev->ml_priv; 343 344 return adapter->msg_enable; 345 } 346 347 static void set_msglevel(struct net_device *dev, u32 val) 348 { 349 struct adapter *adapter = dev->ml_priv; 350 351 adapter->msg_enable = val; 352 } 353 354 static const char stats_strings[][ETH_GSTRING_LEN] = { 355 "TxOctetsOK", 356 "TxOctetsBad", 357 "TxUnicastFramesOK", 358 "TxMulticastFramesOK", 359 "TxBroadcastFramesOK", 360 "TxPauseFrames", 361 "TxFramesWithDeferredXmissions", 362 "TxLateCollisions", 363 "TxTotalCollisions", 364 "TxFramesAbortedDueToXSCollisions", 365 "TxUnderrun", 366 "TxLengthErrors", 367 "TxInternalMACXmitError", 368 "TxFramesWithExcessiveDeferral", 369 "TxFCSErrors", 370 "TxJumboFramesOk", 371 "TxJumboOctetsOk", 372 373 "RxOctetsOK", 374 "RxOctetsBad", 375 "RxUnicastFramesOK", 376 "RxMulticastFramesOK", 377 "RxBroadcastFramesOK", 378 "RxPauseFrames", 379 "RxFCSErrors", 380 "RxAlignErrors", 381 "RxSymbolErrors", 382 "RxDataErrors", 383 "RxSequenceErrors", 384 "RxRuntErrors", 385 "RxJabberErrors", 386 "RxInternalMACRcvError", 387 "RxInRangeLengthErrors", 388 "RxOutOfRangeLengthField", 389 "RxFrameTooLongErrors", 390 "RxJumboFramesOk", 391 "RxJumboOctetsOk", 392 393 /* Port stats */ 394 "RxCsumGood", 395 "TxCsumOffload", 396 "TxTso", 397 "RxVlan", 398 "TxVlan", 399 "TxNeedHeadroom", 400 401 /* Interrupt stats */ 402 "rx drops", 403 "pure_rsps", 404 "unhandled irqs", 405 "respQ_empty", 406 "respQ_overflow", 407 "freelistQ_empty", 408 "pkt_too_big", 409 "pkt_mismatch", 410 "cmdQ_full0", 411 "cmdQ_full1", 412 413 "espi_DIP2ParityErr", 414 "espi_DIP4Err", 415 "espi_RxDrops", 416 "espi_TxDrops", 417 "espi_RxOvfl", 418 "espi_ParityErr" 419 }; 420 421 #define T2_REGMAP_SIZE (3 * 1024) 422 423 static int get_regs_len(struct net_device *dev) 424 { 425 return T2_REGMAP_SIZE; 426 } 427 428 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 429 { 430 struct adapter *adapter = dev->ml_priv; 431 432 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 433 strlcpy(info->bus_info, pci_name(adapter->pdev), 434 sizeof(info->bus_info)); 435 } 436 437 static int get_sset_count(struct net_device *dev, int sset) 438 { 439 switch (sset) { 440 case ETH_SS_STATS: 441 return ARRAY_SIZE(stats_strings); 442 default: 443 return -EOPNOTSUPP; 444 } 445 } 446 447 static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 448 { 449 if (stringset == ETH_SS_STATS) 450 memcpy(data, stats_strings, sizeof(stats_strings)); 451 } 452 453 static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 454 u64 *data) 455 { 456 struct adapter *adapter = dev->ml_priv; 457 struct cmac *mac = adapter->port[dev->if_port].mac; 458 const struct cmac_statistics *s; 459 const struct sge_intr_counts *t; 460 struct sge_port_stats ss; 461 462 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); 463 t = t1_sge_get_intr_counts(adapter->sge); 464 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); 465 466 *data++ = s->TxOctetsOK; 467 *data++ = s->TxOctetsBad; 468 *data++ = s->TxUnicastFramesOK; 469 *data++ = s->TxMulticastFramesOK; 470 *data++ = s->TxBroadcastFramesOK; 471 *data++ = s->TxPauseFrames; 472 *data++ = s->TxFramesWithDeferredXmissions; 473 *data++ = s->TxLateCollisions; 474 *data++ = s->TxTotalCollisions; 475 *data++ = s->TxFramesAbortedDueToXSCollisions; 476 *data++ = s->TxUnderrun; 477 *data++ = s->TxLengthErrors; 478 *data++ = s->TxInternalMACXmitError; 479 *data++ = s->TxFramesWithExcessiveDeferral; 480 *data++ = s->TxFCSErrors; 481 *data++ = s->TxJumboFramesOK; 482 *data++ = s->TxJumboOctetsOK; 483 484 *data++ = s->RxOctetsOK; 485 *data++ = s->RxOctetsBad; 486 *data++ = s->RxUnicastFramesOK; 487 *data++ = s->RxMulticastFramesOK; 488 *data++ = s->RxBroadcastFramesOK; 489 *data++ = s->RxPauseFrames; 490 *data++ = s->RxFCSErrors; 491 *data++ = s->RxAlignErrors; 492 *data++ = s->RxSymbolErrors; 493 *data++ = s->RxDataErrors; 494 *data++ = s->RxSequenceErrors; 495 *data++ = s->RxRuntErrors; 496 *data++ = s->RxJabberErrors; 497 *data++ = s->RxInternalMACRcvError; 498 *data++ = s->RxInRangeLengthErrors; 499 *data++ = s->RxOutOfRangeLengthField; 500 *data++ = s->RxFrameTooLongErrors; 501 *data++ = s->RxJumboFramesOK; 502 *data++ = s->RxJumboOctetsOK; 503 504 *data++ = ss.rx_cso_good; 505 *data++ = ss.tx_cso; 506 *data++ = ss.tx_tso; 507 *data++ = ss.vlan_xtract; 508 *data++ = ss.vlan_insert; 509 *data++ = ss.tx_need_hdrroom; 510 511 *data++ = t->rx_drops; 512 *data++ = t->pure_rsps; 513 *data++ = t->unhandled_irqs; 514 *data++ = t->respQ_empty; 515 *data++ = t->respQ_overflow; 516 *data++ = t->freelistQ_empty; 517 *data++ = t->pkt_too_big; 518 *data++ = t->pkt_mismatch; 519 *data++ = t->cmdQ_full[0]; 520 *data++ = t->cmdQ_full[1]; 521 522 if (adapter->espi) { 523 const struct espi_intr_counts *e; 524 525 e = t1_espi_get_intr_counts(adapter->espi); 526 *data++ = e->DIP2_parity_err; 527 *data++ = e->DIP4_err; 528 *data++ = e->rx_drops; 529 *data++ = e->tx_drops; 530 *data++ = e->rx_ovflw; 531 *data++ = e->parity_err; 532 } 533 } 534 535 static inline void reg_block_dump(struct adapter *ap, void *buf, 536 unsigned int start, unsigned int end) 537 { 538 u32 *p = buf + start; 539 540 for ( ; start <= end; start += sizeof(u32)) 541 *p++ = readl(ap->regs + start); 542 } 543 544 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 545 void *buf) 546 { 547 struct adapter *ap = dev->ml_priv; 548 549 /* 550 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision 551 */ 552 regs->version = 2; 553 554 memset(buf, 0, T2_REGMAP_SIZE); 555 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER); 556 reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE); 557 reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR); 558 reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT); 559 reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE); 560 reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE); 561 reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT); 562 reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL); 563 reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE); 564 reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD); 565 } 566 567 static int get_link_ksettings(struct net_device *dev, 568 struct ethtool_link_ksettings *cmd) 569 { 570 struct adapter *adapter = dev->ml_priv; 571 struct port_info *p = &adapter->port[dev->if_port]; 572 u32 supported, advertising; 573 574 supported = p->link_config.supported; 575 advertising = p->link_config.advertising; 576 577 if (netif_carrier_ok(dev)) { 578 cmd->base.speed = p->link_config.speed; 579 cmd->base.duplex = p->link_config.duplex; 580 } else { 581 cmd->base.speed = SPEED_UNKNOWN; 582 cmd->base.duplex = DUPLEX_UNKNOWN; 583 } 584 585 cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; 586 cmd->base.phy_address = p->phy->mdio.prtad; 587 cmd->base.autoneg = p->link_config.autoneg; 588 589 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 590 supported); 591 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 592 advertising); 593 594 return 0; 595 } 596 597 static int speed_duplex_to_caps(int speed, int duplex) 598 { 599 int cap = 0; 600 601 switch (speed) { 602 case SPEED_10: 603 if (duplex == DUPLEX_FULL) 604 cap = SUPPORTED_10baseT_Full; 605 else 606 cap = SUPPORTED_10baseT_Half; 607 break; 608 case SPEED_100: 609 if (duplex == DUPLEX_FULL) 610 cap = SUPPORTED_100baseT_Full; 611 else 612 cap = SUPPORTED_100baseT_Half; 613 break; 614 case SPEED_1000: 615 if (duplex == DUPLEX_FULL) 616 cap = SUPPORTED_1000baseT_Full; 617 else 618 cap = SUPPORTED_1000baseT_Half; 619 break; 620 case SPEED_10000: 621 if (duplex == DUPLEX_FULL) 622 cap = SUPPORTED_10000baseT_Full; 623 } 624 return cap; 625 } 626 627 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ 628 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ 629 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ 630 ADVERTISED_10000baseT_Full) 631 632 static int set_link_ksettings(struct net_device *dev, 633 const struct ethtool_link_ksettings *cmd) 634 { 635 struct adapter *adapter = dev->ml_priv; 636 struct port_info *p = &adapter->port[dev->if_port]; 637 struct link_config *lc = &p->link_config; 638 u32 advertising; 639 640 ethtool_convert_link_mode_to_legacy_u32(&advertising, 641 cmd->link_modes.advertising); 642 643 if (!(lc->supported & SUPPORTED_Autoneg)) 644 return -EOPNOTSUPP; /* can't change speed/duplex */ 645 646 if (cmd->base.autoneg == AUTONEG_DISABLE) { 647 u32 speed = cmd->base.speed; 648 int cap = speed_duplex_to_caps(speed, cmd->base.duplex); 649 650 if (!(lc->supported & cap) || (speed == SPEED_1000)) 651 return -EINVAL; 652 lc->requested_speed = speed; 653 lc->requested_duplex = cmd->base.duplex; 654 lc->advertising = 0; 655 } else { 656 advertising &= ADVERTISED_MASK; 657 if (advertising & (advertising - 1)) 658 advertising = lc->supported; 659 advertising &= lc->supported; 660 if (!advertising) 661 return -EINVAL; 662 lc->requested_speed = SPEED_INVALID; 663 lc->requested_duplex = DUPLEX_INVALID; 664 lc->advertising = advertising | ADVERTISED_Autoneg; 665 } 666 lc->autoneg = cmd->base.autoneg; 667 if (netif_running(dev)) 668 t1_link_start(p->phy, p->mac, lc); 669 return 0; 670 } 671 672 static void get_pauseparam(struct net_device *dev, 673 struct ethtool_pauseparam *epause) 674 { 675 struct adapter *adapter = dev->ml_priv; 676 struct port_info *p = &adapter->port[dev->if_port]; 677 678 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; 679 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; 680 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; 681 } 682 683 static int set_pauseparam(struct net_device *dev, 684 struct ethtool_pauseparam *epause) 685 { 686 struct adapter *adapter = dev->ml_priv; 687 struct port_info *p = &adapter->port[dev->if_port]; 688 struct link_config *lc = &p->link_config; 689 690 if (epause->autoneg == AUTONEG_DISABLE) 691 lc->requested_fc = 0; 692 else if (lc->supported & SUPPORTED_Autoneg) 693 lc->requested_fc = PAUSE_AUTONEG; 694 else 695 return -EINVAL; 696 697 if (epause->rx_pause) 698 lc->requested_fc |= PAUSE_RX; 699 if (epause->tx_pause) 700 lc->requested_fc |= PAUSE_TX; 701 if (lc->autoneg == AUTONEG_ENABLE) { 702 if (netif_running(dev)) 703 t1_link_start(p->phy, p->mac, lc); 704 } else { 705 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 706 if (netif_running(dev)) 707 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1, 708 lc->fc); 709 } 710 return 0; 711 } 712 713 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e, 714 struct kernel_ethtool_ringparam *kernel_e, 715 struct netlink_ext_ack *extack) 716 { 717 struct adapter *adapter = dev->ml_priv; 718 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 719 720 e->rx_max_pending = MAX_RX_BUFFERS; 721 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; 722 e->tx_max_pending = MAX_CMDQ_ENTRIES; 723 724 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl]; 725 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl]; 726 e->tx_pending = adapter->params.sge.cmdQ_size[0]; 727 } 728 729 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e, 730 struct kernel_ethtool_ringparam *kernel_e, 731 struct netlink_ext_ack *extack) 732 { 733 struct adapter *adapter = dev->ml_priv; 734 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 735 736 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || 737 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || 738 e->tx_pending > MAX_CMDQ_ENTRIES || 739 e->rx_pending < MIN_FL_ENTRIES || 740 e->rx_jumbo_pending < MIN_FL_ENTRIES || 741 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1)) 742 return -EINVAL; 743 744 if (adapter->flags & FULL_INIT_DONE) 745 return -EBUSY; 746 747 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; 748 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; 749 adapter->params.sge.cmdQ_size[0] = e->tx_pending; 750 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ? 751 MAX_CMDQ1_ENTRIES : e->tx_pending; 752 return 0; 753 } 754 755 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, 756 struct kernel_ethtool_coalesce *kernel_coal, 757 struct netlink_ext_ack *extack) 758 { 759 struct adapter *adapter = dev->ml_priv; 760 761 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; 762 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; 763 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; 764 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); 765 return 0; 766 } 767 768 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, 769 struct kernel_ethtool_coalesce *kernel_coal, 770 struct netlink_ext_ack *extack) 771 { 772 struct adapter *adapter = dev->ml_priv; 773 774 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; 775 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; 776 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable; 777 return 0; 778 } 779 780 static int get_eeprom_len(struct net_device *dev) 781 { 782 struct adapter *adapter = dev->ml_priv; 783 784 return t1_is_asic(adapter) ? EEPROM_SIZE : 0; 785 } 786 787 #define EEPROM_MAGIC(ap) \ 788 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16)) 789 790 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, 791 u8 *data) 792 { 793 int i; 794 u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); 795 struct adapter *adapter = dev->ml_priv; 796 797 e->magic = EEPROM_MAGIC(adapter); 798 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) 799 t1_seeprom_read(adapter, i, (__le32 *)&buf[i]); 800 memcpy(data, buf + e->offset, e->len); 801 return 0; 802 } 803 804 static const struct ethtool_ops t1_ethtool_ops = { 805 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | 806 ETHTOOL_COALESCE_USE_ADAPTIVE_RX | 807 ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL, 808 .get_drvinfo = get_drvinfo, 809 .get_msglevel = get_msglevel, 810 .set_msglevel = set_msglevel, 811 .get_ringparam = get_sge_param, 812 .set_ringparam = set_sge_param, 813 .get_coalesce = get_coalesce, 814 .set_coalesce = set_coalesce, 815 .get_eeprom_len = get_eeprom_len, 816 .get_eeprom = get_eeprom, 817 .get_pauseparam = get_pauseparam, 818 .set_pauseparam = set_pauseparam, 819 .get_link = ethtool_op_get_link, 820 .get_strings = get_strings, 821 .get_sset_count = get_sset_count, 822 .get_ethtool_stats = get_stats, 823 .get_regs_len = get_regs_len, 824 .get_regs = get_regs, 825 .get_link_ksettings = get_link_ksettings, 826 .set_link_ksettings = set_link_ksettings, 827 }; 828 829 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 830 { 831 struct adapter *adapter = dev->ml_priv; 832 struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio; 833 834 return mdio_mii_ioctl(mdio, if_mii(req), cmd); 835 } 836 837 static int t1_change_mtu(struct net_device *dev, int new_mtu) 838 { 839 int ret; 840 struct adapter *adapter = dev->ml_priv; 841 struct cmac *mac = adapter->port[dev->if_port].mac; 842 843 if (!mac->ops->set_mtu) 844 return -EOPNOTSUPP; 845 if ((ret = mac->ops->set_mtu(mac, new_mtu))) 846 return ret; 847 dev->mtu = new_mtu; 848 return 0; 849 } 850 851 static int t1_set_mac_addr(struct net_device *dev, void *p) 852 { 853 struct adapter *adapter = dev->ml_priv; 854 struct cmac *mac = adapter->port[dev->if_port].mac; 855 struct sockaddr *addr = p; 856 857 if (!mac->ops->macaddress_set) 858 return -EOPNOTSUPP; 859 860 eth_hw_addr_set(dev, addr->sa_data); 861 mac->ops->macaddress_set(mac, dev->dev_addr); 862 return 0; 863 } 864 865 static netdev_features_t t1_fix_features(struct net_device *dev, 866 netdev_features_t features) 867 { 868 /* 869 * Since there is no support for separate rx/tx vlan accel 870 * enable/disable make sure tx flag is always in same state as rx. 871 */ 872 if (features & NETIF_F_HW_VLAN_CTAG_RX) 873 features |= NETIF_F_HW_VLAN_CTAG_TX; 874 else 875 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 876 877 return features; 878 } 879 880 static int t1_set_features(struct net_device *dev, netdev_features_t features) 881 { 882 netdev_features_t changed = dev->features ^ features; 883 struct adapter *adapter = dev->ml_priv; 884 885 if (changed & NETIF_F_HW_VLAN_CTAG_RX) 886 t1_vlan_mode(adapter, features); 887 888 return 0; 889 } 890 #ifdef CONFIG_NET_POLL_CONTROLLER 891 static void t1_netpoll(struct net_device *dev) 892 { 893 unsigned long flags; 894 struct adapter *adapter = dev->ml_priv; 895 896 local_irq_save(flags); 897 t1_interrupt(adapter->pdev->irq, adapter); 898 local_irq_restore(flags); 899 } 900 #endif 901 902 /* 903 * Periodic accumulation of MAC statistics. This is used only if the MAC 904 * does not have any other way to prevent stats counter overflow. 905 */ 906 static void mac_stats_task(struct work_struct *work) 907 { 908 int i; 909 struct adapter *adapter = 910 container_of(work, struct adapter, stats_update_task.work); 911 912 for_each_port(adapter, i) { 913 struct port_info *p = &adapter->port[i]; 914 915 if (netif_running(p->dev)) 916 p->mac->ops->statistics_update(p->mac, 917 MAC_STATS_UPDATE_FAST); 918 } 919 920 /* Schedule the next statistics update if any port is active. */ 921 spin_lock(&adapter->work_lock); 922 if (adapter->open_device_map & PORT_MASK) 923 schedule_mac_stats_update(adapter, 924 adapter->params.stats_update_period); 925 spin_unlock(&adapter->work_lock); 926 } 927 928 static const struct net_device_ops cxgb_netdev_ops = { 929 .ndo_open = cxgb_open, 930 .ndo_stop = cxgb_close, 931 .ndo_start_xmit = t1_start_xmit, 932 .ndo_get_stats = t1_get_stats, 933 .ndo_validate_addr = eth_validate_addr, 934 .ndo_set_rx_mode = t1_set_rxmode, 935 .ndo_eth_ioctl = t1_ioctl, 936 .ndo_change_mtu = t1_change_mtu, 937 .ndo_set_mac_address = t1_set_mac_addr, 938 .ndo_fix_features = t1_fix_features, 939 .ndo_set_features = t1_set_features, 940 #ifdef CONFIG_NET_POLL_CONTROLLER 941 .ndo_poll_controller = t1_netpoll, 942 #endif 943 }; 944 945 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 946 { 947 int i, err, pci_using_dac = 0; 948 unsigned long mmio_start, mmio_len; 949 const struct board_info *bi; 950 struct adapter *adapter = NULL; 951 struct port_info *pi; 952 953 err = pci_enable_device(pdev); 954 if (err) 955 return err; 956 957 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 958 pr_err("%s: cannot find PCI device memory base address\n", 959 pci_name(pdev)); 960 err = -ENODEV; 961 goto out_disable_pdev; 962 } 963 964 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 965 pci_using_dac = 1; 966 967 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 968 pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n", 969 pci_name(pdev)); 970 err = -ENODEV; 971 goto out_disable_pdev; 972 } 973 974 } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { 975 pr_err("%s: no usable DMA configuration\n", pci_name(pdev)); 976 goto out_disable_pdev; 977 } 978 979 err = pci_request_regions(pdev, DRV_NAME); 980 if (err) { 981 pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev)); 982 goto out_disable_pdev; 983 } 984 985 pci_set_master(pdev); 986 987 mmio_start = pci_resource_start(pdev, 0); 988 mmio_len = pci_resource_len(pdev, 0); 989 bi = t1_get_board_info(ent->driver_data); 990 991 for (i = 0; i < bi->port_number; ++i) { 992 struct net_device *netdev; 993 994 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); 995 if (!netdev) { 996 err = -ENOMEM; 997 goto out_free_dev; 998 } 999 1000 SET_NETDEV_DEV(netdev, &pdev->dev); 1001 1002 if (!adapter) { 1003 adapter = netdev_priv(netdev); 1004 adapter->pdev = pdev; 1005 adapter->port[0].dev = netdev; /* so we don't leak it */ 1006 1007 adapter->regs = ioremap(mmio_start, mmio_len); 1008 if (!adapter->regs) { 1009 pr_err("%s: cannot map device registers\n", 1010 pci_name(pdev)); 1011 err = -ENOMEM; 1012 goto out_free_dev; 1013 } 1014 1015 if (t1_get_board_rev(adapter, bi, &adapter->params)) { 1016 err = -ENODEV; /* Can't handle this chip rev */ 1017 goto out_free_dev; 1018 } 1019 1020 adapter->name = pci_name(pdev); 1021 adapter->msg_enable = dflt_msg_enable; 1022 adapter->mmio_len = mmio_len; 1023 1024 spin_lock_init(&adapter->tpi_lock); 1025 spin_lock_init(&adapter->work_lock); 1026 spin_lock_init(&adapter->async_lock); 1027 spin_lock_init(&adapter->mac_lock); 1028 1029 INIT_DELAYED_WORK(&adapter->stats_update_task, 1030 mac_stats_task); 1031 1032 pci_set_drvdata(pdev, netdev); 1033 } 1034 1035 pi = &adapter->port[i]; 1036 pi->dev = netdev; 1037 netif_carrier_off(netdev); 1038 netdev->irq = pdev->irq; 1039 netdev->if_port = i; 1040 netdev->mem_start = mmio_start; 1041 netdev->mem_end = mmio_start + mmio_len - 1; 1042 netdev->ml_priv = adapter; 1043 netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | 1044 NETIF_F_RXCSUM; 1045 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | 1046 NETIF_F_RXCSUM | NETIF_F_LLTX; 1047 1048 if (pci_using_dac) 1049 netdev->features |= NETIF_F_HIGHDMA; 1050 if (vlan_tso_capable(adapter)) { 1051 netdev->features |= 1052 NETIF_F_HW_VLAN_CTAG_TX | 1053 NETIF_F_HW_VLAN_CTAG_RX; 1054 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; 1055 1056 /* T204: disable TSO */ 1057 if (!(is_T2(adapter)) || bi->port_number != 4) { 1058 netdev->hw_features |= NETIF_F_TSO; 1059 netdev->features |= NETIF_F_TSO; 1060 } 1061 } 1062 1063 netdev->netdev_ops = &cxgb_netdev_ops; 1064 netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ? 1065 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); 1066 1067 netif_napi_add(netdev, &adapter->napi, t1_poll, 64); 1068 1069 netdev->ethtool_ops = &t1_ethtool_ops; 1070 1071 switch (bi->board) { 1072 case CHBT_BOARD_CHT110: 1073 case CHBT_BOARD_N110: 1074 case CHBT_BOARD_N210: 1075 case CHBT_BOARD_CHT210: 1076 netdev->max_mtu = PM3393_MAX_FRAME_SIZE - 1077 (ETH_HLEN + ETH_FCS_LEN); 1078 break; 1079 case CHBT_BOARD_CHN204: 1080 netdev->max_mtu = VSC7326_MAX_MTU; 1081 break; 1082 default: 1083 netdev->max_mtu = ETH_DATA_LEN; 1084 break; 1085 } 1086 } 1087 1088 if (t1_init_sw_modules(adapter, bi) < 0) { 1089 err = -ENODEV; 1090 goto out_free_dev; 1091 } 1092 1093 /* 1094 * The card is now ready to go. If any errors occur during device 1095 * registration we do not fail the whole card but rather proceed only 1096 * with the ports we manage to register successfully. However we must 1097 * register at least one net device. 1098 */ 1099 for (i = 0; i < bi->port_number; ++i) { 1100 err = register_netdev(adapter->port[i].dev); 1101 if (err) 1102 pr_warn("%s: cannot register net device %s, skipping\n", 1103 pci_name(pdev), adapter->port[i].dev->name); 1104 else { 1105 /* 1106 * Change the name we use for messages to the name of 1107 * the first successfully registered interface. 1108 */ 1109 if (!adapter->registered_device_map) 1110 adapter->name = adapter->port[i].dev->name; 1111 1112 __set_bit(i, &adapter->registered_device_map); 1113 } 1114 } 1115 if (!adapter->registered_device_map) { 1116 pr_err("%s: could not register any net devices\n", 1117 pci_name(pdev)); 1118 err = -EINVAL; 1119 goto out_release_adapter_res; 1120 } 1121 1122 pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n", 1123 adapter->name, bi->desc, adapter->params.chip_revision, 1124 adapter->params.pci.is_pcix ? "PCIX" : "PCI", 1125 adapter->params.pci.speed, adapter->params.pci.width); 1126 1127 /* 1128 * Set the T1B ASIC and memory clocks. 1129 */ 1130 if (t1powersave) 1131 adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */ 1132 else 1133 adapter->t1powersave = HCLOCK; 1134 if (t1_is_T1B(adapter)) 1135 t1_clock(adapter, t1powersave); 1136 1137 return 0; 1138 1139 out_release_adapter_res: 1140 t1_free_sw_modules(adapter); 1141 out_free_dev: 1142 if (adapter) { 1143 if (adapter->regs) 1144 iounmap(adapter->regs); 1145 for (i = bi->port_number - 1; i >= 0; --i) 1146 if (adapter->port[i].dev) 1147 free_netdev(adapter->port[i].dev); 1148 } 1149 pci_release_regions(pdev); 1150 out_disable_pdev: 1151 pci_disable_device(pdev); 1152 return err; 1153 } 1154 1155 static void bit_bang(struct adapter *adapter, int bitdata, int nbits) 1156 { 1157 int data; 1158 int i; 1159 u32 val; 1160 1161 enum { 1162 S_CLOCK = 1 << 3, 1163 S_DATA = 1 << 4 1164 }; 1165 1166 for (i = (nbits - 1); i > -1; i--) { 1167 1168 udelay(50); 1169 1170 data = ((bitdata >> i) & 0x1); 1171 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1172 1173 if (data) 1174 val |= S_DATA; 1175 else 1176 val &= ~S_DATA; 1177 1178 udelay(50); 1179 1180 /* Set SCLOCK low */ 1181 val &= ~S_CLOCK; 1182 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1183 1184 udelay(50); 1185 1186 /* Write SCLOCK high */ 1187 val |= S_CLOCK; 1188 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1189 1190 } 1191 } 1192 1193 static int t1_clock(struct adapter *adapter, int mode) 1194 { 1195 u32 val; 1196 int M_CORE_VAL; 1197 int M_MEM_VAL; 1198 1199 enum { 1200 M_CORE_BITS = 9, 1201 T_CORE_VAL = 0, 1202 T_CORE_BITS = 2, 1203 N_CORE_VAL = 0, 1204 N_CORE_BITS = 2, 1205 M_MEM_BITS = 9, 1206 T_MEM_VAL = 0, 1207 T_MEM_BITS = 2, 1208 N_MEM_VAL = 0, 1209 N_MEM_BITS = 2, 1210 NP_LOAD = 1 << 17, 1211 S_LOAD_MEM = 1 << 5, 1212 S_LOAD_CORE = 1 << 6, 1213 S_CLOCK = 1 << 3 1214 }; 1215 1216 if (!t1_is_T1B(adapter)) 1217 return -ENODEV; /* Can't re-clock this chip. */ 1218 1219 if (mode & 2) 1220 return 0; /* show current mode. */ 1221 1222 if ((adapter->t1powersave & 1) == (mode & 1)) 1223 return -EALREADY; /* ASIC already running in mode. */ 1224 1225 if ((mode & 1) == HCLOCK) { 1226 M_CORE_VAL = 0x14; 1227 M_MEM_VAL = 0x18; 1228 adapter->t1powersave = HCLOCK; /* overclock */ 1229 } else { 1230 M_CORE_VAL = 0xe; 1231 M_MEM_VAL = 0x10; 1232 adapter->t1powersave = LCLOCK; /* underclock */ 1233 } 1234 1235 /* Don't interrupt this serial stream! */ 1236 spin_lock(&adapter->tpi_lock); 1237 1238 /* Initialize for ASIC core */ 1239 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1240 val |= NP_LOAD; 1241 udelay(50); 1242 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1243 udelay(50); 1244 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1245 val &= ~S_LOAD_CORE; 1246 val &= ~S_CLOCK; 1247 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1248 udelay(50); 1249 1250 /* Serial program the ASIC clock synthesizer */ 1251 bit_bang(adapter, T_CORE_VAL, T_CORE_BITS); 1252 bit_bang(adapter, N_CORE_VAL, N_CORE_BITS); 1253 bit_bang(adapter, M_CORE_VAL, M_CORE_BITS); 1254 udelay(50); 1255 1256 /* Finish ASIC core */ 1257 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1258 val |= S_LOAD_CORE; 1259 udelay(50); 1260 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1261 udelay(50); 1262 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1263 val &= ~S_LOAD_CORE; 1264 udelay(50); 1265 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1266 udelay(50); 1267 1268 /* Initialize for memory */ 1269 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1270 val |= NP_LOAD; 1271 udelay(50); 1272 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1273 udelay(50); 1274 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1275 val &= ~S_LOAD_MEM; 1276 val &= ~S_CLOCK; 1277 udelay(50); 1278 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1279 udelay(50); 1280 1281 /* Serial program the memory clock synthesizer */ 1282 bit_bang(adapter, T_MEM_VAL, T_MEM_BITS); 1283 bit_bang(adapter, N_MEM_VAL, N_MEM_BITS); 1284 bit_bang(adapter, M_MEM_VAL, M_MEM_BITS); 1285 udelay(50); 1286 1287 /* Finish memory */ 1288 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1289 val |= S_LOAD_MEM; 1290 udelay(50); 1291 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1292 udelay(50); 1293 __t1_tpi_read(adapter, A_ELMER0_GPO, &val); 1294 val &= ~S_LOAD_MEM; 1295 udelay(50); 1296 __t1_tpi_write(adapter, A_ELMER0_GPO, val); 1297 1298 spin_unlock(&adapter->tpi_lock); 1299 1300 return 0; 1301 } 1302 1303 static inline void t1_sw_reset(struct pci_dev *pdev) 1304 { 1305 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3); 1306 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); 1307 } 1308 1309 static void remove_one(struct pci_dev *pdev) 1310 { 1311 struct net_device *dev = pci_get_drvdata(pdev); 1312 struct adapter *adapter = dev->ml_priv; 1313 int i; 1314 1315 for_each_port(adapter, i) { 1316 if (test_bit(i, &adapter->registered_device_map)) 1317 unregister_netdev(adapter->port[i].dev); 1318 } 1319 1320 t1_free_sw_modules(adapter); 1321 iounmap(adapter->regs); 1322 1323 while (--i >= 0) { 1324 if (adapter->port[i].dev) 1325 free_netdev(adapter->port[i].dev); 1326 } 1327 1328 pci_release_regions(pdev); 1329 pci_disable_device(pdev); 1330 t1_sw_reset(pdev); 1331 } 1332 1333 static struct pci_driver cxgb_pci_driver = { 1334 .name = DRV_NAME, 1335 .id_table = t1_pci_tbl, 1336 .probe = init_one, 1337 .remove = remove_one, 1338 }; 1339 1340 module_pci_driver(cxgb_pci_driver); 1341