1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Cavium, Inc. 4 */ 5 6 #include <linux/acpi.h> 7 #include <linux/module.h> 8 #include <linux/interrupt.h> 9 #include <linux/pci.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/phy.h> 13 #include <linux/of.h> 14 #include <linux/of_mdio.h> 15 #include <linux/of_net.h> 16 17 #include "nic_reg.h" 18 #include "nic.h" 19 #include "thunder_bgx.h" 20 21 #define DRV_NAME "thunder_bgx" 22 #define DRV_VERSION "1.0" 23 24 /* RX_DMAC_CTL configuration */ 25 enum MCAST_MODE { 26 MCAST_MODE_REJECT = 0x0, 27 MCAST_MODE_ACCEPT = 0x1, 28 MCAST_MODE_CAM_FILTER = 0x2, 29 RSVD = 0x3 30 }; 31 32 #define BCAST_ACCEPT BIT(0) 33 #define CAM_ACCEPT BIT(3) 34 #define MCAST_MODE_MASK 0x3 35 #define BGX_MCAST_MODE(x) (x << 1) 36 37 struct dmac_map { 38 u64 vf_map; 39 u64 dmac; 40 }; 41 42 struct lmac { 43 struct bgx *bgx; 44 /* actual number of DMACs configured */ 45 u8 dmacs_cfg; 46 /* overal number of possible DMACs could be configured per LMAC */ 47 u8 dmacs_count; 48 struct dmac_map *dmacs; /* DMAC:VFs tracking filter array */ 49 u8 mac[ETH_ALEN]; 50 u8 lmac_type; 51 u8 lane_to_sds; 52 bool use_training; 53 bool autoneg; 54 bool link_up; 55 int lmacid; /* ID within BGX */ 56 int lmacid_bd; /* ID on board */ 57 struct net_device *netdev; 58 struct phy_device *phydev; 59 unsigned int last_duplex; 60 unsigned int last_link; 61 unsigned int last_speed; 62 bool is_sgmii; 63 struct delayed_work dwork; 64 struct workqueue_struct *check_link; 65 }; 66 67 struct bgx { 68 u8 bgx_id; 69 struct lmac lmac[MAX_LMAC_PER_BGX]; 70 u8 lmac_count; 71 u8 max_lmac; 72 u8 acpi_lmac_idx; 73 void __iomem *reg_base; 74 struct pci_dev *pdev; 75 bool is_dlm; 76 bool is_rgx; 77 }; 78 79 static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; 80 static int lmac_count; /* Total no of LMACs in system */ 81 82 static int bgx_xaui_check_link(struct lmac *lmac); 83 84 /* Supported devices */ 85 static const struct pci_device_id bgx_id_table[] = { 86 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, 87 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) }, 88 { 0, } /* end of table */ 89 }; 90 91 MODULE_AUTHOR("Cavium Inc"); 92 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); 93 MODULE_LICENSE("GPL v2"); 94 MODULE_VERSION(DRV_VERSION); 95 MODULE_DEVICE_TABLE(pci, bgx_id_table); 96 97 /* The Cavium ThunderX network controller can *only* be found in SoCs 98 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 99 * registers on this platform are implicitly strongly ordered with respect 100 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use 101 * with no memory barriers in this driver. The readq()/writeq() functions add 102 * explicit ordering operation which in this case are redundant, and only 103 * add overhead. 104 */ 105 106 /* Register read/write APIs */ 107 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) 108 { 109 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 110 111 return readq_relaxed(addr); 112 } 113 114 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 115 { 116 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 117 118 writeq_relaxed(val, addr); 119 } 120 121 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) 122 { 123 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; 124 125 writeq_relaxed(val | readq_relaxed(addr), addr); 126 } 127 128 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) 129 { 130 int timeout = 100; 131 u64 reg_val; 132 133 while (timeout) { 134 reg_val = bgx_reg_read(bgx, lmac, reg); 135 if (zero && !(reg_val & mask)) 136 return 0; 137 if (!zero && (reg_val & mask)) 138 return 0; 139 usleep_range(1000, 2000); 140 timeout--; 141 } 142 return 1; 143 } 144 145 static int max_bgx_per_node; 146 static void set_max_bgx_per_node(struct pci_dev *pdev) 147 { 148 u16 sdevid; 149 150 if (max_bgx_per_node) 151 return; 152 153 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); 154 switch (sdevid) { 155 case PCI_SUBSYS_DEVID_81XX_BGX: 156 case PCI_SUBSYS_DEVID_81XX_RGX: 157 max_bgx_per_node = MAX_BGX_PER_CN81XX; 158 break; 159 case PCI_SUBSYS_DEVID_83XX_BGX: 160 max_bgx_per_node = MAX_BGX_PER_CN83XX; 161 break; 162 case PCI_SUBSYS_DEVID_88XX_BGX: 163 default: 164 max_bgx_per_node = MAX_BGX_PER_CN88XX; 165 break; 166 } 167 } 168 169 static struct bgx *get_bgx(int node, int bgx_idx) 170 { 171 int idx = (node * max_bgx_per_node) + bgx_idx; 172 173 return bgx_vnic[idx]; 174 } 175 176 /* Return number of BGX present in HW */ 177 unsigned bgx_get_map(int node) 178 { 179 int i; 180 unsigned map = 0; 181 182 for (i = 0; i < max_bgx_per_node; i++) { 183 if (bgx_vnic[(node * max_bgx_per_node) + i]) 184 map |= (1 << i); 185 } 186 187 return map; 188 } 189 EXPORT_SYMBOL(bgx_get_map); 190 191 /* Return number of LMAC configured for this BGX */ 192 int bgx_get_lmac_count(int node, int bgx_idx) 193 { 194 struct bgx *bgx; 195 196 bgx = get_bgx(node, bgx_idx); 197 if (bgx) 198 return bgx->lmac_count; 199 200 return 0; 201 } 202 EXPORT_SYMBOL(bgx_get_lmac_count); 203 204 /* Returns the current link status of LMAC */ 205 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) 206 { 207 struct bgx_link_status *link = (struct bgx_link_status *)status; 208 struct bgx *bgx; 209 struct lmac *lmac; 210 211 bgx = get_bgx(node, bgx_idx); 212 if (!bgx) 213 return; 214 215 lmac = &bgx->lmac[lmacid]; 216 link->mac_type = lmac->lmac_type; 217 link->link_up = lmac->link_up; 218 link->duplex = lmac->last_duplex; 219 link->speed = lmac->last_speed; 220 } 221 EXPORT_SYMBOL(bgx_get_lmac_link_state); 222 223 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) 224 { 225 struct bgx *bgx = get_bgx(node, bgx_idx); 226 227 if (bgx) 228 return bgx->lmac[lmacid].mac; 229 230 return NULL; 231 } 232 EXPORT_SYMBOL(bgx_get_lmac_mac); 233 234 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) 235 { 236 struct bgx *bgx = get_bgx(node, bgx_idx); 237 238 if (!bgx) 239 return; 240 241 ether_addr_copy(bgx->lmac[lmacid].mac, mac); 242 } 243 EXPORT_SYMBOL(bgx_set_lmac_mac); 244 245 static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid) 246 { 247 struct lmac *lmac = NULL; 248 u8 idx = 0; 249 250 lmac = &bgx->lmac[lmacid]; 251 /* reset CAM filters */ 252 for (idx = 0; idx < lmac->dmacs_count; idx++) 253 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + 254 ((lmacid * lmac->dmacs_count) + idx) * 255 sizeof(u64), 0); 256 } 257 258 static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id) 259 { 260 int i = 0; 261 262 if (!lmac) 263 return; 264 265 /* We've got reset filters request from some of attached VF, while the 266 * others might want to keep their configuration. So in this case lets 267 * iterate over all of configured filters and decrease number of 268 * referencies. if some addresses get zero refs remove them from list 269 */ 270 for (i = lmac->dmacs_cfg - 1; i >= 0; i--) { 271 lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id); 272 if (!lmac->dmacs[i].vf_map) { 273 lmac->dmacs_cfg--; 274 lmac->dmacs[i].dmac = 0; 275 lmac->dmacs[i].vf_map = 0; 276 } 277 } 278 } 279 280 static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id) 281 { 282 u8 i = 0; 283 284 if (!lmac) 285 return -1; 286 287 /* At the same time we could have several VFs 'attached' to some 288 * particular LMAC, and each VF is represented as network interface 289 * for kernel. So from user perspective it should be possible to 290 * manipulate with its' (VF) receive modes. However from PF 291 * driver perspective we need to keep track of filter configurations 292 * for different VFs to prevent filter values dupes 293 */ 294 for (i = 0; i < lmac->dmacs_cfg; i++) { 295 if (lmac->dmacs[i].dmac == dmac) { 296 lmac->dmacs[i].vf_map |= BIT_ULL(vf_id); 297 return -1; 298 } 299 } 300 301 if (!(lmac->dmacs_cfg < lmac->dmacs_count)) 302 return -1; 303 304 /* keep it for further tracking */ 305 lmac->dmacs[lmac->dmacs_cfg].dmac = dmac; 306 lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id); 307 lmac->dmacs_cfg++; 308 return 0; 309 } 310 311 static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid, 312 u64 cam_dmac, u8 idx) 313 { 314 struct lmac *lmac = NULL; 315 u64 cfg = 0; 316 317 /* skip zero addresses as meaningless */ 318 if (!cam_dmac || !bgx) 319 return -1; 320 321 lmac = &bgx->lmac[lmacid]; 322 323 /* configure DCAM filtering for designated LMAC */ 324 cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) | 325 RX_DMACX_CAM_EN | cam_dmac; 326 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + 327 ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), cfg); 328 return 0; 329 } 330 331 void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, 332 u64 cam_dmac, u8 vf_id) 333 { 334 struct bgx *bgx = get_bgx(node, bgx_idx); 335 struct lmac *lmac = NULL; 336 337 if (!bgx) 338 return; 339 340 lmac = &bgx->lmac[lmacid]; 341 342 if (!cam_dmac) 343 cam_dmac = ether_addr_to_u64(lmac->mac); 344 345 /* since we might have several VFs attached to particular LMAC 346 * and kernel could call mcast config for each of them with the 347 * same MAC, check if requested MAC is already in filtering list and 348 * updare/prepare list of MACs to be applied later to HW filters 349 */ 350 bgx_lmac_save_filter(lmac, cam_dmac, vf_id); 351 } 352 EXPORT_SYMBOL(bgx_set_dmac_cam_filter); 353 354 void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode) 355 { 356 struct bgx *bgx = get_bgx(node, bgx_idx); 357 struct lmac *lmac = NULL; 358 u64 cfg = 0; 359 u8 i = 0; 360 361 if (!bgx) 362 return; 363 364 lmac = &bgx->lmac[lmacid]; 365 366 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL); 367 if (mode & BGX_XCAST_BCAST_ACCEPT) 368 cfg |= BCAST_ACCEPT; 369 else 370 cfg &= ~BCAST_ACCEPT; 371 372 /* disable all MCASTs and DMAC filtering */ 373 cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK)); 374 375 /* check requested bits and set filtergin mode appropriately */ 376 if (mode & (BGX_XCAST_MCAST_ACCEPT)) { 377 cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT)); 378 } else if (mode & BGX_XCAST_MCAST_FILTER) { 379 cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT); 380 for (i = 0; i < lmac->dmacs_cfg; i++) 381 bgx_set_dmac_cam_filter_mac(bgx, lmacid, 382 lmac->dmacs[i].dmac, i); 383 } 384 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, cfg); 385 } 386 EXPORT_SYMBOL(bgx_set_xcast_mode); 387 388 void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id) 389 { 390 struct bgx *bgx = get_bgx(node, bgx_idx); 391 392 if (!bgx) 393 return; 394 395 bgx_lmac_remove_filters(&bgx->lmac[lmacid], vf_id); 396 bgx_flush_dmac_cam_filter(bgx, lmacid); 397 bgx_set_xcast_mode(node, bgx_idx, lmacid, 398 (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT)); 399 } 400 EXPORT_SYMBOL(bgx_reset_xcast_mode); 401 402 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) 403 { 404 struct bgx *bgx = get_bgx(node, bgx_idx); 405 struct lmac *lmac; 406 u64 cfg; 407 408 if (!bgx) 409 return; 410 lmac = &bgx->lmac[lmacid]; 411 412 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 413 if (enable) { 414 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; 415 416 /* enable TX FIFO Underflow interrupt */ 417 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S, 418 GMI_TXX_INT_UNDFLW); 419 } else { 420 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); 421 422 /* Disable TX FIFO Underflow interrupt */ 423 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C, 424 GMI_TXX_INT_UNDFLW); 425 } 426 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 427 428 if (bgx->is_rgx) 429 xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed); 430 } 431 EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); 432 433 /* Enables or disables timestamp insertion by BGX for Rx packets */ 434 void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable) 435 { 436 struct bgx *bgx = get_bgx(node, bgx_idx); 437 struct lmac *lmac; 438 u64 csr_offset, cfg; 439 440 if (!bgx) 441 return; 442 443 lmac = &bgx->lmac[lmacid]; 444 445 if (lmac->lmac_type == BGX_MODE_SGMII || 446 lmac->lmac_type == BGX_MODE_QSGMII || 447 lmac->lmac_type == BGX_MODE_RGMII) 448 csr_offset = BGX_GMP_GMI_RXX_FRM_CTL; 449 else 450 csr_offset = BGX_SMUX_RX_FRM_CTL; 451 452 cfg = bgx_reg_read(bgx, lmacid, csr_offset); 453 454 if (enable) 455 cfg |= BGX_PKT_RX_PTP_EN; 456 else 457 cfg &= ~BGX_PKT_RX_PTP_EN; 458 bgx_reg_write(bgx, lmacid, csr_offset, cfg); 459 } 460 EXPORT_SYMBOL(bgx_config_timestamping); 461 462 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause) 463 { 464 struct pfc *pfc = (struct pfc *)pause; 465 struct bgx *bgx = get_bgx(node, bgx_idx); 466 struct lmac *lmac; 467 u64 cfg; 468 469 if (!bgx) 470 return; 471 lmac = &bgx->lmac[lmacid]; 472 if (lmac->is_sgmii) 473 return; 474 475 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL); 476 pfc->fc_rx = cfg & RX_EN; 477 pfc->fc_tx = cfg & TX_EN; 478 pfc->autoneg = 0; 479 } 480 EXPORT_SYMBOL(bgx_lmac_get_pfc); 481 482 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause) 483 { 484 struct pfc *pfc = (struct pfc *)pause; 485 struct bgx *bgx = get_bgx(node, bgx_idx); 486 struct lmac *lmac; 487 u64 cfg; 488 489 if (!bgx) 490 return; 491 lmac = &bgx->lmac[lmacid]; 492 if (lmac->is_sgmii) 493 return; 494 495 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL); 496 cfg &= ~(RX_EN | TX_EN); 497 cfg |= (pfc->fc_rx ? RX_EN : 0x00); 498 cfg |= (pfc->fc_tx ? TX_EN : 0x00); 499 bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg); 500 } 501 EXPORT_SYMBOL(bgx_lmac_set_pfc); 502 503 static void bgx_sgmii_change_link_state(struct lmac *lmac) 504 { 505 struct bgx *bgx = lmac->bgx; 506 u64 cmr_cfg; 507 u64 port_cfg = 0; 508 u64 misc_ctl = 0; 509 bool tx_en, rx_en; 510 511 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 512 tx_en = cmr_cfg & CMR_PKT_TX_EN; 513 rx_en = cmr_cfg & CMR_PKT_RX_EN; 514 cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); 515 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 516 517 /* Wait for BGX RX to be idle */ 518 if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, 519 GMI_PORT_CFG_RX_IDLE, false)) { 520 dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n", 521 bgx->bgx_id, lmac->lmacid); 522 return; 523 } 524 525 /* Wait for BGX TX to be idle */ 526 if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, 527 GMI_PORT_CFG_TX_IDLE, false)) { 528 dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n", 529 bgx->bgx_id, lmac->lmacid); 530 return; 531 } 532 533 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 534 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); 535 536 if (lmac->link_up) { 537 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; 538 port_cfg &= ~GMI_PORT_CFG_DUPLEX; 539 port_cfg |= (lmac->last_duplex << 2); 540 } else { 541 misc_ctl |= PCS_MISC_CTL_GMX_ENO; 542 } 543 544 switch (lmac->last_speed) { 545 case 10: 546 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 547 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ 548 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 549 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 550 misc_ctl |= 50; /* samp_pt */ 551 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 552 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 553 break; 554 case 100: 555 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ 556 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 557 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ 558 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 559 misc_ctl |= 5; /* samp_pt */ 560 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); 561 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); 562 break; 563 case 1000: 564 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ 565 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ 566 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ 567 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; 568 misc_ctl |= 1; /* samp_pt */ 569 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); 570 if (lmac->last_duplex) 571 bgx_reg_write(bgx, lmac->lmacid, 572 BGX_GMP_GMI_TXX_BURST, 0); 573 else 574 bgx_reg_write(bgx, lmac->lmacid, 575 BGX_GMP_GMI_TXX_BURST, 8192); 576 break; 577 default: 578 break; 579 } 580 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 581 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 582 583 /* Restore CMR config settings */ 584 cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0); 585 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 586 587 if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) 588 xcv_setup_link(lmac->link_up, lmac->last_speed); 589 } 590 591 static void bgx_lmac_handler(struct net_device *netdev) 592 { 593 struct phy_device *phydev; 594 struct lmac *lmac, **priv; 595 int link_changed = 0; 596 597 priv = netdev_priv(netdev); 598 lmac = *priv; 599 phydev = lmac->phydev; 600 601 if (!phydev->link && lmac->last_link) 602 link_changed = -1; 603 604 if (phydev->link && 605 (lmac->last_duplex != phydev->duplex || 606 lmac->last_link != phydev->link || 607 lmac->last_speed != phydev->speed)) { 608 link_changed = 1; 609 } 610 611 lmac->last_link = phydev->link; 612 lmac->last_speed = phydev->speed; 613 lmac->last_duplex = phydev->duplex; 614 615 if (!link_changed) 616 return; 617 618 if (link_changed > 0) 619 lmac->link_up = true; 620 else 621 lmac->link_up = false; 622 623 if (lmac->is_sgmii) 624 bgx_sgmii_change_link_state(lmac); 625 else 626 bgx_xaui_check_link(lmac); 627 } 628 629 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) 630 { 631 struct bgx *bgx; 632 633 bgx = get_bgx(node, bgx_idx); 634 if (!bgx) 635 return 0; 636 637 if (idx > 8) 638 lmac = 0; 639 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); 640 } 641 EXPORT_SYMBOL(bgx_get_rx_stats); 642 643 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) 644 { 645 struct bgx *bgx; 646 647 bgx = get_bgx(node, bgx_idx); 648 if (!bgx) 649 return 0; 650 651 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); 652 } 653 EXPORT_SYMBOL(bgx_get_tx_stats); 654 655 /* Configure BGX LMAC in internal loopback mode */ 656 void bgx_lmac_internal_loopback(int node, int bgx_idx, 657 int lmac_idx, bool enable) 658 { 659 struct bgx *bgx; 660 struct lmac *lmac; 661 u64 cfg; 662 663 bgx = get_bgx(node, bgx_idx); 664 if (!bgx) 665 return; 666 667 lmac = &bgx->lmac[lmac_idx]; 668 if (lmac->is_sgmii) { 669 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL); 670 if (enable) 671 cfg |= PCS_MRX_CTL_LOOPBACK1; 672 else 673 cfg &= ~PCS_MRX_CTL_LOOPBACK1; 674 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg); 675 } else { 676 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1); 677 if (enable) 678 cfg |= SPU_CTL_LOOPBACK; 679 else 680 cfg &= ~SPU_CTL_LOOPBACK; 681 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg); 682 } 683 } 684 EXPORT_SYMBOL(bgx_lmac_internal_loopback); 685 686 static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) 687 { 688 int lmacid = lmac->lmacid; 689 u64 cfg; 690 691 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); 692 /* max packet size */ 693 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); 694 695 /* Disable frame alignment if using preamble */ 696 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 697 if (cfg & 1) 698 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); 699 700 /* Enable lmac */ 701 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 702 703 /* PCS reset */ 704 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); 705 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, 706 PCS_MRX_CTL_RESET, true)) { 707 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); 708 return -1; 709 } 710 711 /* power down, reset autoneg, autoneg enable */ 712 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 713 cfg &= ~PCS_MRX_CTL_PWR_DN; 714 cfg |= PCS_MRX_CTL_RST_AN; 715 if (lmac->phydev) { 716 cfg |= PCS_MRX_CTL_AN_EN; 717 } else { 718 /* In scenarios where PHY driver is not present or it's a 719 * non-standard PHY, FW sets AN_EN to inform Linux driver 720 * to do auto-neg and link polling or not. 721 */ 722 if (cfg & PCS_MRX_CTL_AN_EN) 723 lmac->autoneg = true; 724 } 725 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 726 727 if (lmac->lmac_type == BGX_MODE_QSGMII) { 728 /* Disable disparity check for QSGMII */ 729 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL); 730 cfg &= ~PCS_MISC_CTL_DISP_EN; 731 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg); 732 return 0; 733 } 734 735 if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) { 736 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 737 PCS_MRX_STATUS_AN_CPT, false)) { 738 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); 739 return -1; 740 } 741 } 742 743 return 0; 744 } 745 746 static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac) 747 { 748 u64 cfg; 749 int lmacid = lmac->lmacid; 750 751 /* Reset SPU */ 752 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); 753 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 754 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 755 return -1; 756 } 757 758 /* Disable LMAC */ 759 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 760 cfg &= ~CMR_EN; 761 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 762 763 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 764 /* Set interleaved running disparity for RXAUI */ 765 if (lmac->lmac_type == BGX_MODE_RXAUI) 766 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, 767 SPU_MISC_CTL_INTLV_RDISP); 768 769 /* Clear receive packet disable */ 770 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); 771 cfg &= ~SPU_MISC_CTL_RX_DIS; 772 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); 773 774 /* clear all interrupts */ 775 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); 776 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); 777 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); 778 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); 779 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 780 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 781 782 if (lmac->use_training) { 783 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); 784 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); 785 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); 786 /* training enable */ 787 bgx_reg_modify(bgx, lmacid, 788 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); 789 } 790 791 /* Append FCS to each packet */ 792 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); 793 794 /* Disable forward error correction */ 795 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); 796 cfg &= ~SPU_FEC_CTL_FEC_EN; 797 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); 798 799 /* Disable autoneg */ 800 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); 801 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); 802 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); 803 804 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); 805 if (lmac->lmac_type == BGX_MODE_10G_KR) 806 cfg |= (1 << 23); 807 else if (lmac->lmac_type == BGX_MODE_40G_KR) 808 cfg |= (1 << 24); 809 else 810 cfg &= ~((1 << 23) | (1 << 24)); 811 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); 812 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); 813 814 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); 815 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; 816 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); 817 818 /* Enable lmac */ 819 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 820 821 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); 822 cfg &= ~SPU_CTL_LOW_POWER; 823 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); 824 825 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); 826 cfg &= ~SMU_TX_CTL_UNI_EN; 827 cfg |= SMU_TX_CTL_DIC_EN; 828 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); 829 830 /* Enable receive and transmission of pause frames */ 831 bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) | 832 BCK_EN | DRP_EN | TX_EN | RX_EN)); 833 /* Configure pause time and interval */ 834 bgx_reg_write(bgx, lmacid, 835 BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME); 836 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL); 837 cfg &= ~0xFFFFull; 838 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL, 839 cfg | (DEFAULT_PAUSE_TIME - 0x1000)); 840 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01); 841 842 /* take lmac_count into account */ 843 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); 844 /* max packet size */ 845 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); 846 847 return 0; 848 } 849 850 static int bgx_xaui_check_link(struct lmac *lmac) 851 { 852 struct bgx *bgx = lmac->bgx; 853 int lmacid = lmac->lmacid; 854 int lmac_type = lmac->lmac_type; 855 u64 cfg; 856 857 if (lmac->use_training) { 858 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 859 if (!(cfg & (1ull << 13))) { 860 cfg = (1ull << 13) | (1ull << 14); 861 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 862 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); 863 cfg |= (1ull << 0); 864 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); 865 return -1; 866 } 867 } 868 869 /* wait for PCS to come out of reset */ 870 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { 871 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); 872 return -1; 873 } 874 875 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || 876 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { 877 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, 878 SPU_BR_STATUS_BLK_LOCK, false)) { 879 dev_err(&bgx->pdev->dev, 880 "SPU_BR_STATUS_BLK_LOCK not completed\n"); 881 return -1; 882 } 883 } else { 884 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, 885 SPU_BX_STATUS_RX_ALIGN, false)) { 886 dev_err(&bgx->pdev->dev, 887 "SPU_BX_STATUS_RX_ALIGN not completed\n"); 888 return -1; 889 } 890 } 891 892 /* Clear rcvflt bit (latching high) and read it back */ 893 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) 894 bgx_reg_modify(bgx, lmacid, 895 BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); 896 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { 897 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); 898 if (lmac->use_training) { 899 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); 900 if (!(cfg & (1ull << 13))) { 901 cfg = (1ull << 13) | (1ull << 14); 902 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); 903 cfg = bgx_reg_read(bgx, lmacid, 904 BGX_SPUX_BR_PMD_CRTL); 905 cfg |= (1ull << 0); 906 bgx_reg_write(bgx, lmacid, 907 BGX_SPUX_BR_PMD_CRTL, cfg); 908 return -1; 909 } 910 } 911 return -1; 912 } 913 914 /* Wait for BGX RX to be idle */ 915 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { 916 dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); 917 return -1; 918 } 919 920 /* Wait for BGX TX to be idle */ 921 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { 922 dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); 923 return -1; 924 } 925 926 /* Check for MAC RX faults */ 927 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL); 928 /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */ 929 cfg &= SMU_RX_CTL_STATUS; 930 if (!cfg) 931 return 0; 932 933 /* Rx local/remote fault seen. 934 * Do lmac reinit to see if condition recovers 935 */ 936 bgx_lmac_xaui_init(bgx, lmac); 937 938 return -1; 939 } 940 941 static void bgx_poll_for_sgmii_link(struct lmac *lmac) 942 { 943 u64 pcs_link, an_result; 944 u8 speed; 945 946 pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, 947 BGX_GMP_PCS_MRX_STATUS); 948 949 /*Link state bit is sticky, read it again*/ 950 if (!(pcs_link & PCS_MRX_STATUS_LINK)) 951 pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, 952 BGX_GMP_PCS_MRX_STATUS); 953 954 if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS, 955 PCS_MRX_STATUS_AN_CPT, false)) { 956 lmac->link_up = false; 957 lmac->last_speed = SPEED_UNKNOWN; 958 lmac->last_duplex = DUPLEX_UNKNOWN; 959 goto next_poll; 960 } 961 962 lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false; 963 an_result = bgx_reg_read(lmac->bgx, lmac->lmacid, 964 BGX_GMP_PCS_ANX_AN_RESULTS); 965 966 speed = (an_result >> 3) & 0x3; 967 lmac->last_duplex = (an_result >> 1) & 0x1; 968 switch (speed) { 969 case 0: 970 lmac->last_speed = SPEED_10; 971 break; 972 case 1: 973 lmac->last_speed = SPEED_100; 974 break; 975 case 2: 976 lmac->last_speed = SPEED_1000; 977 break; 978 default: 979 lmac->link_up = false; 980 lmac->last_speed = SPEED_UNKNOWN; 981 lmac->last_duplex = DUPLEX_UNKNOWN; 982 break; 983 } 984 985 next_poll: 986 987 if (lmac->last_link != lmac->link_up) { 988 if (lmac->link_up) 989 bgx_sgmii_change_link_state(lmac); 990 lmac->last_link = lmac->link_up; 991 } 992 993 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3); 994 } 995 996 static void bgx_poll_for_link(struct work_struct *work) 997 { 998 struct lmac *lmac; 999 u64 spu_link, smu_link; 1000 1001 lmac = container_of(work, struct lmac, dwork.work); 1002 if (lmac->is_sgmii) { 1003 bgx_poll_for_sgmii_link(lmac); 1004 return; 1005 } 1006 1007 /* Receive link is latching low. Force it high and verify it */ 1008 bgx_reg_modify(lmac->bgx, lmac->lmacid, 1009 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); 1010 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, 1011 SPU_STATUS1_RCV_LNK, false); 1012 1013 spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); 1014 smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL); 1015 1016 if ((spu_link & SPU_STATUS1_RCV_LNK) && 1017 !(smu_link & SMU_RX_CTL_STATUS)) { 1018 lmac->link_up = true; 1019 if (lmac->lmac_type == BGX_MODE_XLAUI) 1020 lmac->last_speed = SPEED_40000; 1021 else 1022 lmac->last_speed = SPEED_10000; 1023 lmac->last_duplex = DUPLEX_FULL; 1024 } else { 1025 lmac->link_up = false; 1026 lmac->last_speed = SPEED_UNKNOWN; 1027 lmac->last_duplex = DUPLEX_UNKNOWN; 1028 } 1029 1030 if (lmac->last_link != lmac->link_up) { 1031 if (lmac->link_up) { 1032 if (bgx_xaui_check_link(lmac)) { 1033 /* Errors, clear link_up state */ 1034 lmac->link_up = false; 1035 lmac->last_speed = SPEED_UNKNOWN; 1036 lmac->last_duplex = DUPLEX_UNKNOWN; 1037 } 1038 } 1039 lmac->last_link = lmac->link_up; 1040 } 1041 1042 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); 1043 } 1044 1045 static int phy_interface_mode(u8 lmac_type) 1046 { 1047 if (lmac_type == BGX_MODE_QSGMII) 1048 return PHY_INTERFACE_MODE_QSGMII; 1049 if (lmac_type == BGX_MODE_RGMII) 1050 return PHY_INTERFACE_MODE_RGMII_RXID; 1051 1052 return PHY_INTERFACE_MODE_SGMII; 1053 } 1054 1055 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) 1056 { 1057 struct lmac *lmac; 1058 u64 cfg; 1059 1060 lmac = &bgx->lmac[lmacid]; 1061 lmac->bgx = bgx; 1062 1063 if ((lmac->lmac_type == BGX_MODE_SGMII) || 1064 (lmac->lmac_type == BGX_MODE_QSGMII) || 1065 (lmac->lmac_type == BGX_MODE_RGMII)) { 1066 lmac->is_sgmii = true; 1067 if (bgx_lmac_sgmii_init(bgx, lmac)) 1068 return -1; 1069 } else { 1070 lmac->is_sgmii = false; 1071 if (bgx_lmac_xaui_init(bgx, lmac)) 1072 return -1; 1073 } 1074 1075 if (lmac->is_sgmii) { 1076 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); 1077 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 1078 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); 1079 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); 1080 } else { 1081 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); 1082 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ 1083 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); 1084 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); 1085 } 1086 1087 /* actual number of filters available to exact LMAC */ 1088 lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count); 1089 lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs), 1090 GFP_KERNEL); 1091 if (!lmac->dmacs) 1092 return -ENOMEM; 1093 1094 /* Enable lmac */ 1095 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 1096 1097 /* Restore default cfg, incase low level firmware changed it */ 1098 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 1099 1100 if ((lmac->lmac_type != BGX_MODE_XFI) && 1101 (lmac->lmac_type != BGX_MODE_XLAUI) && 1102 (lmac->lmac_type != BGX_MODE_40G_KR) && 1103 (lmac->lmac_type != BGX_MODE_10G_KR)) { 1104 if (!lmac->phydev) { 1105 if (lmac->autoneg) { 1106 bgx_reg_write(bgx, lmacid, 1107 BGX_GMP_PCS_LINKX_TIMER, 1108 PCS_LINKX_TIMER_COUNT); 1109 goto poll; 1110 } else { 1111 /* Default to below link speed and duplex */ 1112 lmac->link_up = true; 1113 lmac->last_speed = SPEED_1000; 1114 lmac->last_duplex = DUPLEX_FULL; 1115 bgx_sgmii_change_link_state(lmac); 1116 return 0; 1117 } 1118 } 1119 lmac->phydev->dev_flags = 0; 1120 1121 if (phy_connect_direct(lmac->netdev, lmac->phydev, 1122 bgx_lmac_handler, 1123 phy_interface_mode(lmac->lmac_type))) 1124 return -ENODEV; 1125 1126 phy_start(lmac->phydev); 1127 return 0; 1128 } 1129 1130 poll: 1131 lmac->check_link = alloc_ordered_workqueue("check_link", WQ_MEM_RECLAIM); 1132 if (!lmac->check_link) 1133 return -ENOMEM; 1134 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); 1135 queue_delayed_work(lmac->check_link, &lmac->dwork, 0); 1136 1137 return 0; 1138 } 1139 1140 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) 1141 { 1142 struct lmac *lmac; 1143 u64 cfg; 1144 1145 lmac = &bgx->lmac[lmacid]; 1146 if (lmac->check_link) { 1147 /* Destroy work queue */ 1148 cancel_delayed_work_sync(&lmac->dwork); 1149 destroy_workqueue(lmac->check_link); 1150 } 1151 1152 /* Disable packet reception */ 1153 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 1154 cfg &= ~CMR_PKT_RX_EN; 1155 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 1156 1157 /* Give chance for Rx/Tx FIFO to get drained */ 1158 bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true); 1159 bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true); 1160 1161 /* Disable packet transmission */ 1162 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 1163 cfg &= ~CMR_PKT_TX_EN; 1164 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 1165 1166 /* Disable serdes lanes */ 1167 if (!lmac->is_sgmii) 1168 bgx_reg_modify(bgx, lmacid, 1169 BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); 1170 else 1171 bgx_reg_modify(bgx, lmacid, 1172 BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN); 1173 1174 /* Disable LMAC */ 1175 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); 1176 cfg &= ~CMR_EN; 1177 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); 1178 1179 bgx_flush_dmac_cam_filter(bgx, lmacid); 1180 kfree(lmac->dmacs); 1181 1182 if ((lmac->lmac_type != BGX_MODE_XFI) && 1183 (lmac->lmac_type != BGX_MODE_XLAUI) && 1184 (lmac->lmac_type != BGX_MODE_40G_KR) && 1185 (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) 1186 phy_disconnect(lmac->phydev); 1187 1188 lmac->phydev = NULL; 1189 } 1190 1191 static void bgx_init_hw(struct bgx *bgx) 1192 { 1193 int i; 1194 struct lmac *lmac; 1195 1196 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); 1197 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) 1198 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); 1199 1200 /* Set lmac type and lane2serdes mapping */ 1201 for (i = 0; i < bgx->lmac_count; i++) { 1202 lmac = &bgx->lmac[i]; 1203 bgx_reg_write(bgx, i, BGX_CMRX_CFG, 1204 (lmac->lmac_type << 8) | lmac->lane_to_sds); 1205 bgx->lmac[i].lmacid_bd = lmac_count; 1206 lmac_count++; 1207 } 1208 1209 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); 1210 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); 1211 1212 /* Set the backpressure AND mask */ 1213 for (i = 0; i < bgx->lmac_count; i++) 1214 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, 1215 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << 1216 (i * MAX_BGX_CHANS_PER_LMAC)); 1217 1218 /* Disable all MAC filtering */ 1219 for (i = 0; i < RX_DMAC_COUNT; i++) 1220 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); 1221 1222 /* Disable MAC steering (NCSI traffic) */ 1223 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) 1224 bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00); 1225 } 1226 1227 static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) 1228 { 1229 return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF); 1230 } 1231 1232 static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) 1233 { 1234 struct device *dev = &bgx->pdev->dev; 1235 struct lmac *lmac; 1236 char str[27]; 1237 1238 if (!bgx->is_dlm && lmacid) 1239 return; 1240 1241 lmac = &bgx->lmac[lmacid]; 1242 if (!bgx->is_dlm) 1243 sprintf(str, "BGX%d QLM mode", bgx->bgx_id); 1244 else 1245 sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid); 1246 1247 switch (lmac->lmac_type) { 1248 case BGX_MODE_SGMII: 1249 dev_info(dev, "%s: SGMII\n", (char *)str); 1250 break; 1251 case BGX_MODE_XAUI: 1252 dev_info(dev, "%s: XAUI\n", (char *)str); 1253 break; 1254 case BGX_MODE_RXAUI: 1255 dev_info(dev, "%s: RXAUI\n", (char *)str); 1256 break; 1257 case BGX_MODE_XFI: 1258 if (!lmac->use_training) 1259 dev_info(dev, "%s: XFI\n", (char *)str); 1260 else 1261 dev_info(dev, "%s: 10G_KR\n", (char *)str); 1262 break; 1263 case BGX_MODE_XLAUI: 1264 if (!lmac->use_training) 1265 dev_info(dev, "%s: XLAUI\n", (char *)str); 1266 else 1267 dev_info(dev, "%s: 40G_KR4\n", (char *)str); 1268 break; 1269 case BGX_MODE_QSGMII: 1270 dev_info(dev, "%s: QSGMII\n", (char *)str); 1271 break; 1272 case BGX_MODE_RGMII: 1273 dev_info(dev, "%s: RGMII\n", (char *)str); 1274 break; 1275 case BGX_MODE_INVALID: 1276 /* Nothing to do */ 1277 break; 1278 } 1279 } 1280 1281 static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac) 1282 { 1283 switch (lmac->lmac_type) { 1284 case BGX_MODE_SGMII: 1285 case BGX_MODE_XFI: 1286 lmac->lane_to_sds = lmac->lmacid; 1287 break; 1288 case BGX_MODE_XAUI: 1289 case BGX_MODE_XLAUI: 1290 case BGX_MODE_RGMII: 1291 lmac->lane_to_sds = 0xE4; 1292 break; 1293 case BGX_MODE_RXAUI: 1294 lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4; 1295 break; 1296 case BGX_MODE_QSGMII: 1297 /* There is no way to determine if DLM0/2 is QSGMII or 1298 * DLM1/3 is configured to QSGMII as bootloader will 1299 * configure all LMACs, so take whatever is configured 1300 * by low level firmware. 1301 */ 1302 lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac); 1303 break; 1304 default: 1305 lmac->lane_to_sds = 0; 1306 break; 1307 } 1308 } 1309 1310 static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) 1311 { 1312 if ((lmac->lmac_type != BGX_MODE_10G_KR) && 1313 (lmac->lmac_type != BGX_MODE_40G_KR)) { 1314 lmac->use_training = false; 1315 return; 1316 } 1317 1318 lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) & 1319 SPU_PMD_CRTL_TRAIN_EN; 1320 } 1321 1322 static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) 1323 { 1324 struct lmac *lmac; 1325 u64 cmr_cfg; 1326 u8 lmac_type; 1327 u8 lane_to_sds; 1328 1329 lmac = &bgx->lmac[idx]; 1330 1331 if (!bgx->is_dlm || bgx->is_rgx) { 1332 /* Read LMAC0 type to figure out QLM mode 1333 * This is configured by low level firmware 1334 */ 1335 cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); 1336 lmac->lmac_type = (cmr_cfg >> 8) & 0x07; 1337 if (bgx->is_rgx) 1338 lmac->lmac_type = BGX_MODE_RGMII; 1339 lmac_set_training(bgx, lmac, 0); 1340 lmac_set_lane2sds(bgx, lmac); 1341 return; 1342 } 1343 1344 /* For DLMs or SLMs on 80/81/83xx so many lane configurations 1345 * are possible and vary across boards. Also Kernel doesn't have 1346 * any way to identify board type/info and since firmware does, 1347 * just take lmac type and serdes lane config as is. 1348 */ 1349 cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); 1350 lmac_type = (u8)((cmr_cfg >> 8) & 0x07); 1351 lane_to_sds = (u8)(cmr_cfg & 0xFF); 1352 /* Check if config is reset value */ 1353 if ((lmac_type == 0) && (lane_to_sds == 0xE4)) 1354 lmac->lmac_type = BGX_MODE_INVALID; 1355 else 1356 lmac->lmac_type = lmac_type; 1357 lmac->lane_to_sds = lane_to_sds; 1358 lmac_set_training(bgx, lmac, lmac->lmacid); 1359 } 1360 1361 static void bgx_get_qlm_mode(struct bgx *bgx) 1362 { 1363 struct lmac *lmac; 1364 u8 idx; 1365 1366 /* Init all LMAC's type to invalid */ 1367 for (idx = 0; idx < bgx->max_lmac; idx++) { 1368 lmac = &bgx->lmac[idx]; 1369 lmac->lmacid = idx; 1370 lmac->lmac_type = BGX_MODE_INVALID; 1371 lmac->use_training = false; 1372 } 1373 1374 /* It is assumed that low level firmware sets this value */ 1375 bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; 1376 if (bgx->lmac_count > bgx->max_lmac) 1377 bgx->lmac_count = bgx->max_lmac; 1378 1379 for (idx = 0; idx < bgx->lmac_count; idx++) { 1380 bgx_set_lmac_config(bgx, idx); 1381 bgx_print_qlm_mode(bgx, idx); 1382 } 1383 } 1384 1385 #ifdef CONFIG_ACPI 1386 1387 static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, 1388 u8 *dst) 1389 { 1390 u8 mac[ETH_ALEN]; 1391 int ret; 1392 1393 ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac); 1394 if (ret) { 1395 dev_err(dev, "MAC address invalid: %pM\n", mac); 1396 return -EINVAL; 1397 } 1398 1399 dev_info(dev, "MAC address set to: %pM\n", mac); 1400 1401 ether_addr_copy(dst, mac); 1402 return 0; 1403 } 1404 1405 /* Currently only sets the MAC address. */ 1406 static acpi_status bgx_acpi_register_phy(acpi_handle handle, 1407 u32 lvl, void *context, void **rv) 1408 { 1409 struct bgx *bgx = context; 1410 struct device *dev = &bgx->pdev->dev; 1411 struct acpi_device *adev; 1412 1413 adev = acpi_fetch_acpi_dev(handle); 1414 if (!adev) 1415 goto out; 1416 1417 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); 1418 1419 SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); 1420 1421 bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; 1422 bgx->acpi_lmac_idx++; /* move to next LMAC */ 1423 out: 1424 return AE_OK; 1425 } 1426 1427 static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, 1428 void *context, void **ret_val) 1429 { 1430 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 1431 struct bgx *bgx = context; 1432 char bgx_sel[5]; 1433 1434 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); 1435 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { 1436 pr_warn("Invalid link device\n"); 1437 return AE_OK; 1438 } 1439 1440 if (strncmp(string.pointer, bgx_sel, 4)) { 1441 kfree(string.pointer); 1442 return AE_OK; 1443 } 1444 1445 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1446 bgx_acpi_register_phy, NULL, bgx, NULL); 1447 1448 kfree(string.pointer); 1449 return AE_CTRL_TERMINATE; 1450 } 1451 1452 static int bgx_init_acpi_phy(struct bgx *bgx) 1453 { 1454 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); 1455 return 0; 1456 } 1457 1458 #else 1459 1460 static int bgx_init_acpi_phy(struct bgx *bgx) 1461 { 1462 return -ENODEV; 1463 } 1464 1465 #endif /* CONFIG_ACPI */ 1466 1467 #if IS_ENABLED(CONFIG_OF_MDIO) 1468 1469 static int bgx_init_of_phy(struct bgx *bgx) 1470 { 1471 struct fwnode_handle *fwn; 1472 struct device_node *node = NULL; 1473 u8 lmac = 0; 1474 1475 device_for_each_child_node(&bgx->pdev->dev, fwn) { 1476 struct phy_device *pd; 1477 struct device_node *phy_np; 1478 1479 /* Should always be an OF node. But if it is not, we 1480 * cannot handle it, so exit the loop. 1481 */ 1482 node = to_of_node(fwn); 1483 if (!node) 1484 break; 1485 1486 of_get_mac_address(node, bgx->lmac[lmac].mac); 1487 1488 SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev); 1489 bgx->lmac[lmac].lmacid = lmac; 1490 1491 phy_np = of_parse_phandle(node, "phy-handle", 0); 1492 /* If there is no phy or defective firmware presents 1493 * this cortina phy, for which there is no driver 1494 * support, ignore it. 1495 */ 1496 if (phy_np && 1497 !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { 1498 /* Wait until the phy drivers are available */ 1499 pd = of_phy_find_device(phy_np); 1500 if (!pd) 1501 goto defer; 1502 bgx->lmac[lmac].phydev = pd; 1503 } 1504 1505 lmac++; 1506 if (lmac == bgx->max_lmac) { 1507 of_node_put(node); 1508 break; 1509 } 1510 } 1511 return 0; 1512 1513 defer: 1514 /* We are bailing out, try not to leak device reference counts 1515 * for phy devices we may have already found. 1516 */ 1517 while (lmac) { 1518 if (bgx->lmac[lmac].phydev) { 1519 put_device(&bgx->lmac[lmac].phydev->mdio.dev); 1520 bgx->lmac[lmac].phydev = NULL; 1521 } 1522 lmac--; 1523 } 1524 of_node_put(node); 1525 return -EPROBE_DEFER; 1526 } 1527 1528 #else 1529 1530 static int bgx_init_of_phy(struct bgx *bgx) 1531 { 1532 return -ENODEV; 1533 } 1534 1535 #endif /* CONFIG_OF_MDIO */ 1536 1537 static int bgx_init_phy(struct bgx *bgx) 1538 { 1539 if (!acpi_disabled) 1540 return bgx_init_acpi_phy(bgx); 1541 1542 return bgx_init_of_phy(bgx); 1543 } 1544 1545 static irqreturn_t bgx_intr_handler(int irq, void *data) 1546 { 1547 struct bgx *bgx = (struct bgx *)data; 1548 u64 status, val; 1549 int lmac; 1550 1551 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1552 status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT); 1553 if (status & GMI_TXX_INT_UNDFLW) { 1554 pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n", 1555 bgx->bgx_id, lmac); 1556 val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG); 1557 val &= ~CMR_EN; 1558 bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); 1559 val |= CMR_EN; 1560 bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val); 1561 } 1562 /* clear interrupts */ 1563 bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status); 1564 } 1565 1566 return IRQ_HANDLED; 1567 } 1568 1569 static void bgx_register_intr(struct pci_dev *pdev) 1570 { 1571 struct bgx *bgx = pci_get_drvdata(pdev); 1572 int ret; 1573 1574 ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET, 1575 BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES); 1576 if (ret < 0) { 1577 pci_err(pdev, "Req for #%d msix vectors failed\n", 1578 BGX_LMAC_VEC_OFFSET); 1579 return; 1580 } 1581 ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL, 1582 bgx, "BGX%d", bgx->bgx_id); 1583 if (ret) 1584 pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); 1585 } 1586 1587 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1588 { 1589 int err; 1590 struct device *dev = &pdev->dev; 1591 struct bgx *bgx = NULL; 1592 u8 lmac; 1593 u16 sdevid; 1594 1595 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1596 if (!bgx) 1597 return -ENOMEM; 1598 bgx->pdev = pdev; 1599 1600 pci_set_drvdata(pdev, bgx); 1601 1602 err = pcim_enable_device(pdev); 1603 if (err) { 1604 pci_set_drvdata(pdev, NULL); 1605 return dev_err_probe(dev, err, "Failed to enable PCI device\n"); 1606 } 1607 1608 err = pci_request_regions(pdev, DRV_NAME); 1609 if (err) { 1610 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1611 goto err_disable_device; 1612 } 1613 1614 /* MAP configuration registers */ 1615 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1616 if (!bgx->reg_base) { 1617 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); 1618 err = -ENOMEM; 1619 goto err_release_regions; 1620 } 1621 1622 set_max_bgx_per_node(pdev); 1623 1624 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1625 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1626 bgx->bgx_id = (pci_resource_start(pdev, 1627 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; 1628 bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node; 1629 bgx->max_lmac = MAX_LMAC_PER_BGX; 1630 bgx_vnic[bgx->bgx_id] = bgx; 1631 } else { 1632 bgx->is_rgx = true; 1633 bgx->max_lmac = 1; 1634 bgx->bgx_id = MAX_BGX_PER_CN81XX - 1; 1635 bgx_vnic[bgx->bgx_id] = bgx; 1636 xcv_init_hw(); 1637 } 1638 1639 /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one 1640 * BGX i.e BGX2 can be split across 2 DLMs. 1641 */ 1642 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); 1643 if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) || 1644 ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2))) 1645 bgx->is_dlm = true; 1646 1647 bgx_get_qlm_mode(bgx); 1648 1649 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1650 struct lmac *lmacp, **priv; 1651 1652 lmacp = &bgx->lmac[lmac]; 1653 lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *)); 1654 1655 if (!lmacp->netdev) { 1656 for (int i = 0; i < lmac; i++) 1657 free_netdev(bgx->lmac[i].netdev); 1658 err = -ENOMEM; 1659 goto err_enable; 1660 } 1661 1662 priv = netdev_priv(lmacp->netdev); 1663 *priv = lmacp; 1664 } 1665 1666 err = bgx_init_phy(bgx); 1667 if (err) 1668 goto err_enable; 1669 1670 bgx_init_hw(bgx); 1671 1672 bgx_register_intr(pdev); 1673 1674 /* Enable all LMACs */ 1675 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1676 err = bgx_lmac_enable(bgx, lmac); 1677 if (err) { 1678 dev_err(dev, "BGX%d failed to enable lmac%d\n", 1679 bgx->bgx_id, lmac); 1680 while (lmac) 1681 bgx_lmac_disable(bgx, --lmac); 1682 goto err_enable; 1683 } 1684 } 1685 1686 return 0; 1687 1688 err_enable: 1689 bgx_vnic[bgx->bgx_id] = NULL; 1690 pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); 1691 err_release_regions: 1692 pci_release_regions(pdev); 1693 err_disable_device: 1694 pci_disable_device(pdev); 1695 pci_set_drvdata(pdev, NULL); 1696 return err; 1697 } 1698 1699 static void bgx_remove(struct pci_dev *pdev) 1700 { 1701 struct bgx *bgx = pci_get_drvdata(pdev); 1702 u8 lmac; 1703 1704 /* Disable all LMACs */ 1705 for (lmac = 0; lmac < bgx->lmac_count; lmac++) { 1706 bgx_lmac_disable(bgx, lmac); 1707 free_netdev(bgx->lmac[lmac].netdev); 1708 } 1709 1710 pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx); 1711 1712 bgx_vnic[bgx->bgx_id] = NULL; 1713 pci_release_regions(pdev); 1714 pci_disable_device(pdev); 1715 pci_set_drvdata(pdev, NULL); 1716 } 1717 1718 static struct pci_driver bgx_driver = { 1719 .name = DRV_NAME, 1720 .id_table = bgx_id_table, 1721 .probe = bgx_probe, 1722 .remove = bgx_remove, 1723 }; 1724 1725 static int __init bgx_init_module(void) 1726 { 1727 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); 1728 1729 return pci_register_driver(&bgx_driver); 1730 } 1731 1732 static void __exit bgx_cleanup_module(void) 1733 { 1734 pci_unregister_driver(&bgx_driver); 1735 } 1736 1737 module_init(bgx_init_module); 1738 module_exit(bgx_cleanup_module); 1739