1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell OcteonTx2 CGX driver 3 * 4 * Copyright (C) 2018 Marvell. 5 * 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/module.h> 10 #include <linux/interrupt.h> 11 #include <linux/pci.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/phy.h> 16 #include <linux/of.h> 17 #include <linux/of_mdio.h> 18 #include <linux/of_net.h> 19 20 #include "cgx.h" 21 #include "rvu.h" 22 #include "lmac_common.h" 23 24 #define DRV_NAME "Marvell-CGX/RPM" 25 #define DRV_STRING "Marvell CGX/RPM Driver" 26 27 #define CGX_RX_STAT_GLOBAL_INDEX 9 28 29 static LIST_HEAD(cgx_list); 30 31 /* Convert firmware speed encoding to user format(Mbps) */ 32 static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = { 33 [CGX_LINK_NONE] = 0, 34 [CGX_LINK_10M] = 10, 35 [CGX_LINK_100M] = 100, 36 [CGX_LINK_1G] = 1000, 37 [CGX_LINK_2HG] = 2500, 38 [CGX_LINK_5G] = 5000, 39 [CGX_LINK_10G] = 10000, 40 [CGX_LINK_20G] = 20000, 41 [CGX_LINK_25G] = 25000, 42 [CGX_LINK_40G] = 40000, 43 [CGX_LINK_50G] = 50000, 44 [CGX_LINK_80G] = 80000, 45 [CGX_LINK_100G] = 100000, 46 }; 47 48 /* Convert firmware lmac type encoding to string */ 49 static const char *cgx_lmactype_string[LMAC_MODE_MAX] = { 50 [LMAC_MODE_SGMII] = "SGMII", 51 [LMAC_MODE_XAUI] = "XAUI", 52 [LMAC_MODE_RXAUI] = "RXAUI", 53 [LMAC_MODE_10G_R] = "10G_R", 54 [LMAC_MODE_40G_R] = "40G_R", 55 [LMAC_MODE_QSGMII] = "QSGMII", 56 [LMAC_MODE_25G_R] = "25G_R", 57 [LMAC_MODE_50G_R] = "50G_R", 58 [LMAC_MODE_100G_R] = "100G_R", 59 [LMAC_MODE_USXGMII] = "USXGMII", 60 [LMAC_MODE_USGMII] = "USGMII", 61 }; 62 63 /* CGX PHY management internal APIs */ 64 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); 65 66 /* Supported devices */ 67 static const struct pci_device_id cgx_id_table[] = { 68 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, 69 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, 70 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) }, 71 { 0, } /* end of table */ 72 }; 73 74 MODULE_DEVICE_TABLE(pci, cgx_id_table); 75 76 static bool is_dev_rpm(void *cgxd) 77 { 78 struct cgx *cgx = cgxd; 79 80 return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) || 81 (cgx->pdev->device == PCI_DEVID_CN10KB_RPM); 82 } 83 84 bool is_lmac_valid(struct cgx *cgx, int lmac_id) 85 { 86 if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac) 87 return false; 88 return test_bit(lmac_id, &cgx->lmac_bmap); 89 } 90 91 /* Helper function to get sequential index 92 * given the enabled LMAC of a CGX 93 */ 94 static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) 95 { 96 int tmp, id = 0; 97 98 for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { 99 if (tmp == lmac_id) 100 break; 101 id++; 102 } 103 104 return id; 105 } 106 107 struct mac_ops *get_mac_ops(void *cgxd) 108 { 109 if (!cgxd) 110 return cgxd; 111 112 return ((struct cgx *)cgxd)->mac_ops; 113 } 114 115 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) 116 { 117 writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + 118 offset); 119 } 120 121 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) 122 { 123 return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + 124 offset); 125 } 126 127 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) 128 { 129 if (!cgx || lmac_id >= cgx->max_lmac_per_mac) 130 return NULL; 131 132 return cgx->lmac_idmap[lmac_id]; 133 } 134 135 int cgx_get_cgxcnt_max(void) 136 { 137 struct cgx *cgx_dev; 138 int idmax = -ENODEV; 139 140 list_for_each_entry(cgx_dev, &cgx_list, cgx_list) 141 if (cgx_dev->cgx_id > idmax) 142 idmax = cgx_dev->cgx_id; 143 144 if (idmax < 0) 145 return 0; 146 147 return idmax + 1; 148 } 149 150 int cgx_get_lmac_cnt(void *cgxd) 151 { 152 struct cgx *cgx = cgxd; 153 154 if (!cgx) 155 return -ENODEV; 156 157 return cgx->lmac_count; 158 } 159 160 void *cgx_get_pdata(int cgx_id) 161 { 162 struct cgx *cgx_dev; 163 164 list_for_each_entry(cgx_dev, &cgx_list, cgx_list) { 165 if (cgx_dev->cgx_id == cgx_id) 166 return cgx_dev; 167 } 168 return NULL; 169 } 170 171 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val) 172 { 173 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 174 175 /* Software must not access disabled LMAC registers */ 176 if (!is_lmac_valid(cgx_dev, lmac_id)) 177 return; 178 cgx_write(cgx_dev, lmac_id, offset, val); 179 } 180 181 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset) 182 { 183 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 184 185 /* Software must not access disabled LMAC registers */ 186 if (!is_lmac_valid(cgx_dev, lmac_id)) 187 return 0; 188 189 return cgx_read(cgx_dev, lmac_id, offset); 190 } 191 192 int cgx_get_cgxid(void *cgxd) 193 { 194 struct cgx *cgx = cgxd; 195 196 if (!cgx) 197 return -EINVAL; 198 199 return cgx->cgx_id; 200 } 201 202 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) 203 { 204 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 205 u64 cfg; 206 207 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG); 208 209 return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; 210 } 211 212 /* Ensure the required lock for event queue(where asynchronous events are 213 * posted) is acquired before calling this API. Else an asynchronous event(with 214 * latest link status) can reach the destination before this function returns 215 * and could make the link status appear wrong. 216 */ 217 int cgx_get_link_info(void *cgxd, int lmac_id, 218 struct cgx_link_user_info *linfo) 219 { 220 struct lmac *lmac = lmac_pdata(lmac_id, cgxd); 221 222 if (!lmac) 223 return -ENODEV; 224 225 *linfo = lmac->link_info; 226 return 0; 227 } 228 229 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) 230 { 231 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 232 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); 233 struct mac_ops *mac_ops; 234 int index, id; 235 u64 cfg; 236 237 if (!lmac) 238 return -ENODEV; 239 240 /* access mac_ops to know csr_offset */ 241 mac_ops = cgx_dev->mac_ops; 242 243 /* copy 6bytes from macaddr */ 244 /* memcpy(&cfg, mac_addr, 6); */ 245 246 cfg = ether_addr_to_u64(mac_addr); 247 248 id = get_sequence_id_of_lmac(cgx_dev, lmac_id); 249 250 index = id * lmac->mac_to_index_bmap.max; 251 252 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 253 cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); 254 255 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); 256 cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | 257 CGX_DMAC_MCAST_MODE); 258 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); 259 260 return 0; 261 } 262 263 u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) 264 { 265 struct mac_ops *mac_ops; 266 struct cgx *cgx = cgxd; 267 268 if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) 269 return 0; 270 271 cgx = cgxd; 272 /* Get mac_ops to know csr offset */ 273 mac_ops = cgx->mac_ops; 274 275 return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); 276 } 277 278 u64 cgx_read_dmac_entry(void *cgxd, int index) 279 { 280 struct mac_ops *mac_ops; 281 struct cgx *cgx; 282 283 if (!cgxd) 284 return 0; 285 286 cgx = cgxd; 287 mac_ops = cgx->mac_ops; 288 return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); 289 } 290 291 int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) 292 { 293 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 294 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); 295 struct mac_ops *mac_ops; 296 int index, idx; 297 u64 cfg = 0; 298 int id; 299 300 if (!lmac) 301 return -ENODEV; 302 303 mac_ops = cgx_dev->mac_ops; 304 /* Get available index where entry is to be installed */ 305 idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); 306 if (idx < 0) 307 return idx; 308 309 id = get_sequence_id_of_lmac(cgx_dev, lmac_id); 310 311 index = id * lmac->mac_to_index_bmap.max + idx; 312 313 cfg = ether_addr_to_u64(mac_addr); 314 cfg |= CGX_DMAC_CAM_ADDR_ENABLE; 315 cfg |= ((u64)lmac_id << 49); 316 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); 317 318 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); 319 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); 320 321 if (is_multicast_ether_addr(mac_addr)) { 322 cfg &= ~GENMASK_ULL(2, 1); 323 cfg |= CGX_DMAC_MCAST_MODE_CAM; 324 lmac->mcast_filters_count++; 325 } else if (!lmac->mcast_filters_count) { 326 cfg |= CGX_DMAC_MCAST_MODE; 327 } 328 329 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); 330 331 return idx; 332 } 333 334 int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) 335 { 336 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 337 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); 338 struct mac_ops *mac_ops; 339 u8 index = 0, id; 340 u64 cfg; 341 342 if (!lmac) 343 return -ENODEV; 344 345 mac_ops = cgx_dev->mac_ops; 346 /* Restore index 0 to its default init value as done during 347 * cgx_lmac_init 348 */ 349 set_bit(0, lmac->mac_to_index_bmap.bmap); 350 351 id = get_sequence_id_of_lmac(cgx_dev, lmac_id); 352 353 index = id * lmac->mac_to_index_bmap.max + index; 354 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); 355 356 /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ 357 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); 358 cfg &= ~CGX_DMAC_CAM_ACCEPT; 359 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); 360 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); 361 362 return 0; 363 } 364 365 /* Allows caller to change macaddress associated with index 366 * in dmac filter table including index 0 reserved for 367 * interface mac address 368 */ 369 int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) 370 { 371 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 372 struct mac_ops *mac_ops; 373 struct lmac *lmac; 374 u64 cfg; 375 int id; 376 377 lmac = lmac_pdata(lmac_id, cgx_dev); 378 if (!lmac) 379 return -ENODEV; 380 381 mac_ops = cgx_dev->mac_ops; 382 /* Validate the index */ 383 if (index >= lmac->mac_to_index_bmap.max) 384 return -EINVAL; 385 386 /* ensure index is already set */ 387 if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) 388 return -EINVAL; 389 390 id = get_sequence_id_of_lmac(cgx_dev, lmac_id); 391 392 index = id * lmac->mac_to_index_bmap.max + index; 393 394 cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); 395 cfg &= ~CGX_RX_DMAC_ADR_MASK; 396 cfg |= ether_addr_to_u64(mac_addr); 397 398 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); 399 return 0; 400 } 401 402 int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) 403 { 404 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 405 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); 406 struct mac_ops *mac_ops; 407 u8 mac[ETH_ALEN]; 408 u64 cfg; 409 int id; 410 411 if (!lmac) 412 return -ENODEV; 413 414 mac_ops = cgx_dev->mac_ops; 415 /* Validate the index */ 416 if (index >= lmac->mac_to_index_bmap.max) 417 return -EINVAL; 418 419 /* Skip deletion for reserved index i.e. index 0 */ 420 if (index == 0) 421 return 0; 422 423 rvu_free_rsrc(&lmac->mac_to_index_bmap, index); 424 425 id = get_sequence_id_of_lmac(cgx_dev, lmac_id); 426 427 index = id * lmac->mac_to_index_bmap.max + index; 428 429 /* Read MAC address to check whether it is ucast or mcast */ 430 cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); 431 432 u64_to_ether_addr(cfg, mac); 433 if (is_multicast_ether_addr(mac)) 434 lmac->mcast_filters_count--; 435 436 if (!lmac->mcast_filters_count) { 437 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); 438 cfg &= ~GENMASK_ULL(2, 1); 439 cfg |= CGX_DMAC_MCAST_MODE; 440 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); 441 } 442 443 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); 444 445 return 0; 446 } 447 448 int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) 449 { 450 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 451 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); 452 453 if (lmac) 454 return lmac->mac_to_index_bmap.max; 455 456 return 0; 457 } 458 459 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) 460 { 461 struct cgx *cgx_dev = cgx_get_pdata(cgx_id); 462 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); 463 struct mac_ops *mac_ops; 464 int index; 465 u64 cfg; 466 int id; 467 468 mac_ops = cgx_dev->mac_ops; 469 470 id = get_sequence_id_of_lmac(cgx_dev, lmac_id); 471 472 index = id * lmac->mac_to_index_bmap.max; 473 474 cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); 475 return cfg & CGX_RX_DMAC_ADR_MASK; 476 } 477 478 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) 479 { 480 struct cgx *cgx = cgxd; 481 482 if (!is_lmac_valid(cgx, lmac_id)) 483 return -ENODEV; 484 485 cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F)); 486 return 0; 487 } 488 489 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id) 490 { 491 struct cgx *cgx = cgxd; 492 u64 cfg; 493 494 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); 495 return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK; 496 } 497 498 static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id) 499 { 500 struct cgx *cgx = cgxd; 501 u8 num_lmacs; 502 u32 fifo_len; 503 504 fifo_len = cgx->mac_ops->fifo_len; 505 num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); 506 507 switch (num_lmacs) { 508 case 1: 509 return fifo_len; 510 case 2: 511 return fifo_len / 2; 512 case 3: 513 /* LMAC0 gets half of the FIFO, reset 1/4th */ 514 if (lmac_id == 0) 515 return fifo_len / 2; 516 return fifo_len / 4; 517 case 4: 518 default: 519 return fifo_len / 4; 520 } 521 return 0; 522 } 523 524 /* Configure CGX LMAC in internal loopback mode */ 525 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) 526 { 527 struct cgx *cgx = cgxd; 528 struct lmac *lmac; 529 u64 cfg; 530 531 if (!is_lmac_valid(cgx, lmac_id)) 532 return -ENODEV; 533 534 lmac = lmac_pdata(lmac_id, cgx); 535 if (lmac->lmac_type == LMAC_MODE_SGMII || 536 lmac->lmac_type == LMAC_MODE_QSGMII) { 537 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); 538 if (enable) 539 cfg |= CGXX_GMP_PCS_MRX_CTL_LBK; 540 else 541 cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK; 542 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg); 543 } else { 544 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1); 545 if (enable) 546 cfg |= CGXX_SPUX_CONTROL1_LBK; 547 else 548 cfg &= ~CGXX_SPUX_CONTROL1_LBK; 549 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg); 550 } 551 return 0; 552 } 553 554 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) 555 { 556 struct cgx *cgx = cgx_get_pdata(cgx_id); 557 struct lmac *lmac = lmac_pdata(lmac_id, cgx); 558 struct mac_ops *mac_ops; 559 u16 max_dmac; 560 int index, i; 561 u64 cfg = 0; 562 int id; 563 564 if (!cgx || !lmac) 565 return; 566 567 max_dmac = lmac->mac_to_index_bmap.max; 568 id = get_sequence_id_of_lmac(cgx, lmac_id); 569 570 mac_ops = cgx->mac_ops; 571 if (enable) { 572 /* Enable promiscuous mode on LMAC */ 573 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); 574 cfg &= ~CGX_DMAC_CAM_ACCEPT; 575 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); 576 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); 577 578 for (i = 0; i < max_dmac; i++) { 579 index = id * max_dmac + i; 580 cfg = cgx_read(cgx, 0, 581 (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); 582 cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; 583 cgx_write(cgx, 0, 584 (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); 585 } 586 } else { 587 /* Disable promiscuous mode */ 588 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); 589 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; 590 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); 591 for (i = 0; i < max_dmac; i++) { 592 index = id * max_dmac + i; 593 cfg = cgx_read(cgx, 0, 594 (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); 595 if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { 596 cfg |= CGX_DMAC_CAM_ADDR_ENABLE; 597 cgx_write(cgx, 0, 598 (CGXX_CMRX_RX_DMAC_CAM0 + 599 index * 0x8), 600 cfg); 601 } 602 } 603 } 604 } 605 606 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id, 607 u8 *tx_pause, u8 *rx_pause) 608 { 609 struct cgx *cgx = cgxd; 610 u64 cfg; 611 612 if (is_dev_rpm(cgx)) 613 return 0; 614 615 if (!is_lmac_valid(cgx, lmac_id)) 616 return -ENODEV; 617 618 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); 619 *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK); 620 621 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); 622 *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV); 623 return 0; 624 } 625 626 /* Enable or disable forwarding received pause frames to Tx block */ 627 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable) 628 { 629 struct cgx *cgx = cgxd; 630 u8 rx_pause, tx_pause; 631 bool is_pfc_enabled; 632 struct lmac *lmac; 633 u64 cfg; 634 635 if (!cgx) 636 return; 637 638 lmac = lmac_pdata(lmac_id, cgx); 639 if (!lmac) 640 return; 641 642 /* Pause frames are not enabled just return */ 643 if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) 644 return; 645 646 cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause); 647 is_pfc_enabled = rx_pause ? false : true; 648 649 if (enable) { 650 if (!is_pfc_enabled) { 651 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); 652 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; 653 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); 654 655 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); 656 cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; 657 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); 658 } else { 659 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); 660 cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN; 661 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); 662 } 663 } else { 664 665 if (!is_pfc_enabled) { 666 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); 667 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; 668 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); 669 670 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); 671 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; 672 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); 673 } else { 674 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); 675 cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN; 676 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); 677 } 678 } 679 } 680 681 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) 682 { 683 struct cgx *cgx = cgxd; 684 685 if (!is_lmac_valid(cgx, lmac_id)) 686 return -ENODEV; 687 *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); 688 return 0; 689 } 690 691 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat) 692 { 693 struct cgx *cgx = cgxd; 694 695 if (!is_lmac_valid(cgx, lmac_id)) 696 return -ENODEV; 697 *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8)); 698 return 0; 699 } 700 701 u64 cgx_features_get(void *cgxd) 702 { 703 return ((struct cgx *)cgxd)->hw_features; 704 } 705 706 int cgx_stats_reset(void *cgxd, int lmac_id) 707 { 708 struct cgx *cgx = cgxd; 709 int stat_id; 710 711 if (!is_lmac_valid(cgx, lmac_id)) 712 return -ENODEV; 713 714 for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) { 715 if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX) 716 /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ 717 cgx_write(cgx, 0, 718 (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); 719 else 720 cgx_write(cgx, lmac_id, 721 (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); 722 } 723 724 for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++) 725 cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0); 726 727 return 0; 728 } 729 730 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo) 731 { 732 if (!linfo->fec) 733 return 0; 734 735 switch (linfo->lmac_type_id) { 736 case LMAC_MODE_SGMII: 737 case LMAC_MODE_XAUI: 738 case LMAC_MODE_RXAUI: 739 case LMAC_MODE_QSGMII: 740 return 0; 741 case LMAC_MODE_10G_R: 742 case LMAC_MODE_25G_R: 743 case LMAC_MODE_100G_R: 744 case LMAC_MODE_USXGMII: 745 return 1; 746 case LMAC_MODE_40G_R: 747 return 4; 748 case LMAC_MODE_50G_R: 749 if (linfo->fec == OTX2_FEC_BASER) 750 return 2; 751 else 752 return 1; 753 default: 754 return 0; 755 } 756 } 757 758 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp) 759 { 760 int stats, fec_stats_count = 0; 761 int corr_reg, uncorr_reg; 762 struct cgx *cgx = cgxd; 763 764 if (!is_lmac_valid(cgx, lmac_id)) 765 return -ENODEV; 766 767 if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) 768 return 0; 769 770 fec_stats_count = 771 cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info); 772 if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { 773 corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS; 774 uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS; 775 } else { 776 corr_reg = CGXX_SPUX_RSFEC_CORR; 777 uncorr_reg = CGXX_SPUX_RSFEC_UNCORR; 778 } 779 for (stats = 0; stats < fec_stats_count; stats++) { 780 rsp->fec_corr_blks += 781 cgx_read(cgx, lmac_id, corr_reg + (stats * 8)); 782 rsp->fec_uncorr_blks += 783 cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8)); 784 } 785 return 0; 786 } 787 788 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) 789 { 790 struct cgx *cgx = cgxd; 791 u64 cfg; 792 793 if (!is_lmac_valid(cgx, lmac_id)) 794 return -ENODEV; 795 796 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); 797 if (enable) 798 cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN; 799 else 800 cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN); 801 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); 802 return 0; 803 } 804 805 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) 806 { 807 struct cgx *cgx = cgxd; 808 u64 cfg, last; 809 810 if (!is_lmac_valid(cgx, lmac_id)) 811 return -ENODEV; 812 813 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); 814 last = cfg; 815 if (enable) 816 cfg |= DATA_PKT_TX_EN; 817 else 818 cfg &= ~DATA_PKT_TX_EN; 819 820 if (cfg != last) 821 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); 822 return !!(last & DATA_PKT_TX_EN); 823 } 824 825 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, 826 u8 tx_pause, u8 rx_pause) 827 { 828 struct cgx *cgx = cgxd; 829 u64 cfg; 830 831 if (is_dev_rpm(cgx)) 832 return 0; 833 834 if (!is_lmac_valid(cgx, lmac_id)) 835 return -ENODEV; 836 837 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); 838 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; 839 cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0; 840 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); 841 842 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); 843 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; 844 cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; 845 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); 846 847 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); 848 cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; 849 cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0; 850 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); 851 852 cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP); 853 if (tx_pause) { 854 cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id); 855 } else { 856 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id); 857 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id); 858 } 859 cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg); 860 return 0; 861 } 862 863 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable) 864 { 865 struct cgx *cgx = cgxd; 866 u64 cfg; 867 868 if (!is_lmac_valid(cgx, lmac_id)) 869 return; 870 871 if (enable) { 872 /* Set pause time and interval */ 873 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME, 874 DEFAULT_PAUSE_TIME); 875 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL); 876 cfg &= ~0xFFFFULL; 877 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL, 878 cfg | (DEFAULT_PAUSE_TIME / 2)); 879 880 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME, 881 DEFAULT_PAUSE_TIME); 882 883 cfg = cgx_read(cgx, lmac_id, 884 CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL); 885 cfg &= ~0xFFFFULL; 886 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL, 887 cfg | (DEFAULT_PAUSE_TIME / 2)); 888 } 889 890 /* ALL pause frames received are completely ignored */ 891 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); 892 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; 893 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); 894 895 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); 896 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; 897 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); 898 899 /* Disable pause frames transmission */ 900 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); 901 cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; 902 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); 903 904 cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP); 905 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id); 906 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id); 907 cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg); 908 909 /* Disable all PFC classes by default */ 910 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); 911 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg); 912 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); 913 } 914 915 int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause, 916 int pfvf_idx) 917 { 918 struct cgx *cgx = cgxd; 919 struct lmac *lmac; 920 921 lmac = lmac_pdata(lmac_id, cgx); 922 if (!lmac) 923 return -ENODEV; 924 925 if (!rx_pause) 926 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); 927 else 928 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); 929 930 if (!tx_pause) 931 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); 932 else 933 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); 934 935 /* check if other pfvfs are using flow control */ 936 if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) { 937 dev_warn(&cgx->pdev->dev, 938 "Receive Flow control disable not permitted as its used by other PFVFs\n"); 939 return -EPERM; 940 } 941 942 if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) { 943 dev_warn(&cgx->pdev->dev, 944 "Transmit Flow control disable not permitted as its used by other PFVFs\n"); 945 return -EPERM; 946 } 947 948 return 0; 949 } 950 951 int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, 952 u8 rx_pause, u16 pfc_en) 953 { 954 struct cgx *cgx = cgxd; 955 u64 cfg; 956 957 if (!is_lmac_valid(cgx, lmac_id)) 958 return -ENODEV; 959 960 /* Return as no traffic classes are requested */ 961 if (tx_pause && !pfc_en) 962 return 0; 963 964 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); 965 pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg); 966 967 if (rx_pause) { 968 cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN | 969 CGXX_SMUX_CBFC_CTL_BCK_EN | 970 CGXX_SMUX_CBFC_CTL_DRP_EN); 971 } else { 972 cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN | 973 CGXX_SMUX_CBFC_CTL_BCK_EN | 974 CGXX_SMUX_CBFC_CTL_DRP_EN); 975 } 976 977 if (tx_pause) { 978 cfg |= CGXX_SMUX_CBFC_CTL_TX_EN; 979 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg); 980 } else { 981 cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN; 982 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg); 983 } 984 985 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); 986 987 /* Write source MAC address which will be filled into PFC packet */ 988 cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id); 989 cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg); 990 991 return 0; 992 } 993 994 int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause, 995 u8 *rx_pause) 996 { 997 struct cgx *cgx = cgxd; 998 u64 cfg; 999 1000 if (!is_lmac_valid(cgx, lmac_id)) 1001 return -ENODEV; 1002 1003 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); 1004 1005 *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN); 1006 *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN); 1007 1008 return 0; 1009 } 1010 1011 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) 1012 { 1013 struct cgx *cgx = cgxd; 1014 u64 cfg; 1015 1016 if (!cgx) 1017 return; 1018 1019 if (enable) { 1020 /* Enable inbound PTP timestamping */ 1021 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); 1022 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; 1023 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); 1024 1025 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); 1026 cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE; 1027 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); 1028 } else { 1029 /* Disable inbound PTP stamping */ 1030 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); 1031 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE; 1032 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); 1033 1034 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); 1035 cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE; 1036 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); 1037 } 1038 } 1039 1040 /* CGX Firmware interface low level support */ 1041 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) 1042 { 1043 struct cgx *cgx = lmac->cgx; 1044 struct device *dev; 1045 int err = 0; 1046 u64 cmd; 1047 1048 /* Ensure no other command is in progress */ 1049 err = mutex_lock_interruptible(&lmac->cmd_lock); 1050 if (err) 1051 return err; 1052 1053 /* Ensure command register is free */ 1054 cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG); 1055 if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) { 1056 err = -EBUSY; 1057 goto unlock; 1058 } 1059 1060 /* Update ownership in command request */ 1061 req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req); 1062 1063 /* Mark this lmac as pending, before we start */ 1064 lmac->cmd_pend = true; 1065 1066 /* Start command in hardware */ 1067 cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req); 1068 1069 /* Ensure command is completed without errors */ 1070 if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, 1071 msecs_to_jiffies(CGX_CMD_TIMEOUT))) { 1072 dev = &cgx->pdev->dev; 1073 dev_err(dev, "cgx port %d:%d cmd %lld timeout\n", 1074 cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req)); 1075 err = LMAC_AF_ERR_CMD_TIMEOUT; 1076 goto unlock; 1077 } 1078 1079 /* we have a valid command response */ 1080 smp_rmb(); /* Ensure the latest updates are visible */ 1081 *resp = lmac->resp; 1082 1083 unlock: 1084 mutex_unlock(&lmac->cmd_lock); 1085 1086 return err; 1087 } 1088 1089 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id) 1090 { 1091 struct lmac *lmac; 1092 int err; 1093 1094 lmac = lmac_pdata(lmac_id, cgx); 1095 if (!lmac) 1096 return -ENODEV; 1097 1098 err = cgx_fwi_cmd_send(req, resp, lmac); 1099 1100 /* Check for valid response */ 1101 if (!err) { 1102 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL) 1103 return -EIO; 1104 else 1105 return 0; 1106 } 1107 1108 return err; 1109 } 1110 1111 static int cgx_link_usertable_index_map(int speed) 1112 { 1113 switch (speed) { 1114 case SPEED_10: 1115 return CGX_LINK_10M; 1116 case SPEED_100: 1117 return CGX_LINK_100M; 1118 case SPEED_1000: 1119 return CGX_LINK_1G; 1120 case SPEED_2500: 1121 return CGX_LINK_2HG; 1122 case SPEED_5000: 1123 return CGX_LINK_5G; 1124 case SPEED_10000: 1125 return CGX_LINK_10G; 1126 case SPEED_20000: 1127 return CGX_LINK_20G; 1128 case SPEED_25000: 1129 return CGX_LINK_25G; 1130 case SPEED_40000: 1131 return CGX_LINK_40G; 1132 case SPEED_50000: 1133 return CGX_LINK_50G; 1134 case 80000: 1135 return CGX_LINK_80G; 1136 case SPEED_100000: 1137 return CGX_LINK_100G; 1138 case SPEED_UNKNOWN: 1139 return CGX_LINK_NONE; 1140 } 1141 return CGX_LINK_NONE; 1142 } 1143 1144 static void set_mod_args(struct cgx_set_link_mode_args *args, 1145 u32 speed, u8 duplex, u8 autoneg, u64 mode) 1146 { 1147 /* Fill default values incase of user did not pass 1148 * valid parameters 1149 */ 1150 if (args->duplex == DUPLEX_UNKNOWN) 1151 args->duplex = duplex; 1152 if (args->speed == SPEED_UNKNOWN) 1153 args->speed = speed; 1154 if (args->an == AUTONEG_UNKNOWN) 1155 args->an = autoneg; 1156 args->mode = mode; 1157 args->ports = 0; 1158 } 1159 1160 static void otx2_map_ethtool_link_modes(u64 bitmask, 1161 struct cgx_set_link_mode_args *args) 1162 { 1163 switch (bitmask) { 1164 case ETHTOOL_LINK_MODE_10baseT_Half_BIT: 1165 set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII)); 1166 break; 1167 case ETHTOOL_LINK_MODE_10baseT_Full_BIT: 1168 set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII)); 1169 break; 1170 case ETHTOOL_LINK_MODE_100baseT_Half_BIT: 1171 set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII)); 1172 break; 1173 case ETHTOOL_LINK_MODE_100baseT_Full_BIT: 1174 set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII)); 1175 break; 1176 case ETHTOOL_LINK_MODE_1000baseT_Half_BIT: 1177 set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII)); 1178 break; 1179 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: 1180 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII)); 1181 break; 1182 case ETHTOOL_LINK_MODE_1000baseX_Full_BIT: 1183 set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX)); 1184 break; 1185 case ETHTOOL_LINK_MODE_10000baseT_Full_BIT: 1186 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII)); 1187 break; 1188 case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT: 1189 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C)); 1190 break; 1191 case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT: 1192 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M)); 1193 break; 1194 case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT: 1195 set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR)); 1196 break; 1197 case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT: 1198 set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C)); 1199 break; 1200 case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT: 1201 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR)); 1202 break; 1203 case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT: 1204 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR)); 1205 break; 1206 case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT: 1207 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C)); 1208 break; 1209 case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT: 1210 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M)); 1211 break; 1212 case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT: 1213 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4)); 1214 break; 1215 case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT: 1216 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4)); 1217 break; 1218 case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT: 1219 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C)); 1220 break; 1221 case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT: 1222 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M)); 1223 break; 1224 case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT: 1225 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR)); 1226 break; 1227 case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT: 1228 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR)); 1229 break; 1230 case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT: 1231 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C)); 1232 break; 1233 case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT: 1234 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M)); 1235 break; 1236 case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT: 1237 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4)); 1238 break; 1239 case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT: 1240 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4)); 1241 break; 1242 default: 1243 set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX)); 1244 break; 1245 } 1246 } 1247 1248 static inline void link_status_user_format(u64 lstat, 1249 struct cgx_link_user_info *linfo, 1250 struct cgx *cgx, u8 lmac_id) 1251 { 1252 linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); 1253 linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); 1254 linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; 1255 linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); 1256 linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); 1257 linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat); 1258 1259 if (linfo->lmac_type_id >= LMAC_MODE_MAX) { 1260 dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", 1261 linfo->lmac_type_id, cgx->cgx_id, lmac_id); 1262 strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type)); 1263 return; 1264 } 1265 1266 strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id], 1267 sizeof(linfo->lmac_type)); 1268 } 1269 1270 /* Hardware event handlers */ 1271 static inline void cgx_link_change_handler(u64 lstat, 1272 struct lmac *lmac) 1273 { 1274 struct cgx_link_user_info *linfo; 1275 struct cgx *cgx = lmac->cgx; 1276 struct cgx_link_event event; 1277 struct device *dev; 1278 int err_type; 1279 1280 dev = &cgx->pdev->dev; 1281 1282 link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id); 1283 err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat); 1284 1285 event.cgx_id = cgx->cgx_id; 1286 event.lmac_id = lmac->lmac_id; 1287 1288 /* update the local copy of link status */ 1289 lmac->link_info = event.link_uinfo; 1290 linfo = &lmac->link_info; 1291 1292 if (err_type == CGX_ERR_SPEED_CHANGE_INVALID) 1293 return; 1294 1295 /* Ensure callback doesn't get unregistered until we finish it */ 1296 spin_lock(&lmac->event_cb_lock); 1297 1298 if (!lmac->event_cb.notify_link_chg) { 1299 dev_dbg(dev, "cgx port %d:%d Link change handler null", 1300 cgx->cgx_id, lmac->lmac_id); 1301 if (err_type != CGX_ERR_NONE) { 1302 dev_err(dev, "cgx port %d:%d Link error %d\n", 1303 cgx->cgx_id, lmac->lmac_id, err_type); 1304 } 1305 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", 1306 cgx->cgx_id, lmac->lmac_id, 1307 linfo->link_up ? "UP" : "DOWN", linfo->speed); 1308 goto err; 1309 } 1310 1311 if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) 1312 dev_err(dev, "event notification failure\n"); 1313 err: 1314 spin_unlock(&lmac->event_cb_lock); 1315 } 1316 1317 static inline bool cgx_cmdresp_is_linkevent(u64 event) 1318 { 1319 u8 id; 1320 1321 id = FIELD_GET(EVTREG_ID, event); 1322 if (id == CGX_CMD_LINK_BRING_UP || 1323 id == CGX_CMD_LINK_BRING_DOWN || 1324 id == CGX_CMD_MODE_CHANGE) 1325 return true; 1326 else 1327 return false; 1328 } 1329 1330 static inline bool cgx_event_is_linkevent(u64 event) 1331 { 1332 if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE) 1333 return true; 1334 else 1335 return false; 1336 } 1337 1338 static irqreturn_t cgx_fwi_event_handler(int irq, void *data) 1339 { 1340 u64 event, offset, clear_bit; 1341 struct lmac *lmac = data; 1342 struct cgx *cgx; 1343 1344 cgx = lmac->cgx; 1345 1346 /* Clear SW_INT for RPM and CMR_INT for CGX */ 1347 offset = cgx->mac_ops->int_register; 1348 clear_bit = cgx->mac_ops->int_ena_bit; 1349 1350 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); 1351 1352 if (!FIELD_GET(EVTREG_ACK, event)) 1353 return IRQ_NONE; 1354 1355 switch (FIELD_GET(EVTREG_EVT_TYPE, event)) { 1356 case CGX_EVT_CMD_RESP: 1357 /* Copy the response. Since only one command is active at a 1358 * time, there is no way a response can get overwritten 1359 */ 1360 lmac->resp = event; 1361 /* Ensure response is updated before thread context starts */ 1362 smp_wmb(); 1363 1364 /* There wont be separate events for link change initiated from 1365 * software; Hence report the command responses as events 1366 */ 1367 if (cgx_cmdresp_is_linkevent(event)) 1368 cgx_link_change_handler(event, lmac); 1369 1370 /* Release thread waiting for completion */ 1371 lmac->cmd_pend = false; 1372 wake_up(&lmac->wq_cmd_cmplt); 1373 break; 1374 case CGX_EVT_ASYNC: 1375 if (cgx_event_is_linkevent(event)) 1376 cgx_link_change_handler(event, lmac); 1377 break; 1378 } 1379 1380 /* Any new event or command response will be posted by firmware 1381 * only after the current status is acked. 1382 * Ack the interrupt register as well. 1383 */ 1384 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); 1385 cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit); 1386 1387 return IRQ_HANDLED; 1388 } 1389 1390 /* APIs for PHY management using CGX firmware interface */ 1391 1392 /* callback registration for hardware events like link change */ 1393 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) 1394 { 1395 struct cgx *cgx = cgxd; 1396 struct lmac *lmac; 1397 1398 lmac = lmac_pdata(lmac_id, cgx); 1399 if (!lmac) 1400 return -ENODEV; 1401 1402 lmac->event_cb = *cb; 1403 1404 return 0; 1405 } 1406 1407 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id) 1408 { 1409 struct lmac *lmac; 1410 unsigned long flags; 1411 struct cgx *cgx = cgxd; 1412 1413 lmac = lmac_pdata(lmac_id, cgx); 1414 if (!lmac) 1415 return -ENODEV; 1416 1417 spin_lock_irqsave(&lmac->event_cb_lock, flags); 1418 lmac->event_cb.notify_link_chg = NULL; 1419 lmac->event_cb.data = NULL; 1420 spin_unlock_irqrestore(&lmac->event_cb_lock, flags); 1421 1422 return 0; 1423 } 1424 1425 int cgx_get_fwdata_base(u64 *base) 1426 { 1427 u64 req = 0, resp; 1428 struct cgx *cgx; 1429 int first_lmac; 1430 int err; 1431 1432 cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list); 1433 if (!cgx) 1434 return -ENXIO; 1435 1436 first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); 1437 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); 1438 err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); 1439 if (!err) 1440 *base = FIELD_GET(RESP_FWD_BASE, resp); 1441 1442 return err; 1443 } 1444 1445 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, 1446 int cgx_id, int lmac_id) 1447 { 1448 struct cgx *cgx = cgxd; 1449 u64 req = 0, resp; 1450 1451 if (!cgx) 1452 return -ENODEV; 1453 1454 if (args.mode) 1455 otx2_map_ethtool_link_modes(args.mode, &args); 1456 if (!args.speed && args.duplex && !args.an) 1457 return -EINVAL; 1458 1459 req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req); 1460 req = FIELD_SET(CMDMODECHANGE_SPEED, 1461 cgx_link_usertable_index_map(args.speed), req); 1462 req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req); 1463 req = FIELD_SET(CMDMODECHANGE_AN, args.an, req); 1464 req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req); 1465 req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req); 1466 1467 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); 1468 } 1469 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id) 1470 { 1471 u64 req = 0, resp; 1472 struct cgx *cgx; 1473 int err = 0; 1474 1475 cgx = cgx_get_pdata(cgx_id); 1476 if (!cgx) 1477 return -ENXIO; 1478 1479 req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req); 1480 req = FIELD_SET(CMDSETFEC, fec, req); 1481 err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); 1482 if (err) 1483 return err; 1484 1485 cgx->lmac_idmap[lmac_id]->link_info.fec = 1486 FIELD_GET(RESP_LINKSTAT_FEC, resp); 1487 return cgx->lmac_idmap[lmac_id]->link_info.fec; 1488 } 1489 1490 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id) 1491 { 1492 struct cgx *cgx = cgxd; 1493 u64 req = 0, resp; 1494 1495 if (!cgx) 1496 return -ENODEV; 1497 1498 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req); 1499 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); 1500 } 1501 1502 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) 1503 { 1504 u64 req = 0; 1505 u64 resp; 1506 1507 if (enable) { 1508 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); 1509 /* On CN10K firmware offloads link bring up/down operations to ECP 1510 * On Octeontx2 link operations are handled by firmware itself 1511 * which can cause mbox errors so configure maximum time firmware 1512 * poll for Link as 1000 ms 1513 */ 1514 if (!is_dev_rpm(cgx)) 1515 req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req); 1516 1517 } else { 1518 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); 1519 } 1520 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); 1521 } 1522 1523 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) 1524 { 1525 int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); 1526 u64 req = 0; 1527 1528 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); 1529 return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac); 1530 } 1531 1532 static int cgx_lmac_verify_fwi_version(struct cgx *cgx) 1533 { 1534 struct device *dev = &cgx->pdev->dev; 1535 int major_ver, minor_ver; 1536 u64 resp; 1537 int err; 1538 1539 if (!cgx->lmac_count) 1540 return 0; 1541 1542 err = cgx_fwi_read_version(&resp, cgx); 1543 if (err) 1544 return err; 1545 1546 major_ver = FIELD_GET(RESP_MAJOR_VER, resp); 1547 minor_ver = FIELD_GET(RESP_MINOR_VER, resp); 1548 dev_dbg(dev, "Firmware command interface version = %d.%d\n", 1549 major_ver, minor_ver); 1550 if (major_ver != CGX_FIRMWARE_MAJOR_VER) 1551 return -EIO; 1552 else 1553 return 0; 1554 } 1555 1556 static void cgx_lmac_linkup_work(struct work_struct *work) 1557 { 1558 struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work); 1559 struct device *dev = &cgx->pdev->dev; 1560 int i, err; 1561 1562 /* Do Link up for all the enabled lmacs */ 1563 for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { 1564 err = cgx_fwi_link_change(cgx, i, true); 1565 if (err) 1566 dev_info(dev, "cgx port %d:%d Link up command failed\n", 1567 cgx->cgx_id, i); 1568 } 1569 } 1570 1571 int cgx_lmac_linkup_start(void *cgxd) 1572 { 1573 struct cgx *cgx = cgxd; 1574 1575 if (!cgx) 1576 return -ENODEV; 1577 1578 queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); 1579 1580 return 0; 1581 } 1582 1583 int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr) 1584 { 1585 struct cgx *cgx = cgxd; 1586 u64 cfg; 1587 1588 if (!is_lmac_valid(cgx, lmac_id)) 1589 return -ENODEV; 1590 1591 /* Resetting PFC related CSRs */ 1592 cfg = 0xff; 1593 cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg); 1594 1595 if (pf_req_flr) 1596 cgx_lmac_internal_loopback(cgxd, lmac_id, false); 1597 return 0; 1598 } 1599 1600 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, 1601 int cnt, bool req_free) 1602 { 1603 struct mac_ops *mac_ops = cgx->mac_ops; 1604 u64 offset, ena_bit; 1605 unsigned int irq; 1606 int err; 1607 1608 irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi + 1609 cnt * mac_ops->irq_offset); 1610 offset = mac_ops->int_set_reg; 1611 ena_bit = mac_ops->int_ena_bit; 1612 1613 if (req_free) { 1614 free_irq(irq, lmac); 1615 return 0; 1616 } 1617 1618 err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac); 1619 if (err) 1620 return err; 1621 1622 /* Enable interrupt */ 1623 cgx_write(cgx, lmac->lmac_id, offset, ena_bit); 1624 return 0; 1625 } 1626 1627 int cgx_get_nr_lmacs(void *cgxd) 1628 { 1629 struct cgx *cgx = cgxd; 1630 1631 return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL; 1632 } 1633 1634 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index) 1635 { 1636 struct cgx *cgx = cgxd; 1637 1638 return cgx->lmac_idmap[lmac_index]->lmac_id; 1639 } 1640 1641 unsigned long cgx_get_lmac_bmap(void *cgxd) 1642 { 1643 struct cgx *cgx = cgxd; 1644 1645 return cgx->lmac_bmap; 1646 } 1647 1648 static int cgx_lmac_init(struct cgx *cgx) 1649 { 1650 struct lmac *lmac; 1651 u64 lmac_list; 1652 int i, err; 1653 1654 /* lmac_list specifies which lmacs are enabled 1655 * when bit n is set to 1, LMAC[n] is enabled 1656 */ 1657 if (cgx->mac_ops->non_contiguous_serdes_lane) { 1658 if (is_dev_rpm2(cgx)) 1659 lmac_list = 1660 cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL; 1661 else 1662 lmac_list = 1663 cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; 1664 } 1665 1666 if (cgx->lmac_count > cgx->max_lmac_per_mac) 1667 cgx->lmac_count = cgx->max_lmac_per_mac; 1668 1669 for (i = 0; i < cgx->lmac_count; i++) { 1670 lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL); 1671 if (!lmac) 1672 return -ENOMEM; 1673 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); 1674 if (!lmac->name) { 1675 err = -ENOMEM; 1676 goto err_lmac_free; 1677 } 1678 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); 1679 if (cgx->mac_ops->non_contiguous_serdes_lane) { 1680 lmac->lmac_id = __ffs64(lmac_list); 1681 lmac_list &= ~BIT_ULL(lmac->lmac_id); 1682 } else { 1683 lmac->lmac_id = i; 1684 } 1685 1686 lmac->cgx = cgx; 1687 lmac->mac_to_index_bmap.max = 1688 cgx->mac_ops->dmac_filter_count / 1689 cgx->lmac_count; 1690 1691 err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); 1692 if (err) 1693 goto err_name_free; 1694 1695 /* Reserve first entry for default MAC address */ 1696 set_bit(0, lmac->mac_to_index_bmap.bmap); 1697 1698 lmac->rx_fc_pfvf_bmap.max = 128; 1699 err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap); 1700 if (err) 1701 goto err_dmac_bmap_free; 1702 1703 lmac->tx_fc_pfvf_bmap.max = 128; 1704 err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap); 1705 if (err) 1706 goto err_rx_fc_bmap_free; 1707 1708 init_waitqueue_head(&lmac->wq_cmd_cmplt); 1709 mutex_init(&lmac->cmd_lock); 1710 spin_lock_init(&lmac->event_cb_lock); 1711 err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false); 1712 if (err) 1713 goto err_bitmap_free; 1714 1715 /* Add reference */ 1716 cgx->lmac_idmap[lmac->lmac_id] = lmac; 1717 set_bit(lmac->lmac_id, &cgx->lmac_bmap); 1718 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); 1719 lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); 1720 } 1721 1722 return cgx_lmac_verify_fwi_version(cgx); 1723 1724 err_bitmap_free: 1725 rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap); 1726 err_rx_fc_bmap_free: 1727 rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap); 1728 err_dmac_bmap_free: 1729 rvu_free_bitmap(&lmac->mac_to_index_bmap); 1730 err_name_free: 1731 kfree(lmac->name); 1732 err_lmac_free: 1733 kfree(lmac); 1734 return err; 1735 } 1736 1737 static int cgx_lmac_exit(struct cgx *cgx) 1738 { 1739 struct lmac *lmac; 1740 int i; 1741 1742 if (cgx->cgx_cmd_workq) { 1743 destroy_workqueue(cgx->cgx_cmd_workq); 1744 cgx->cgx_cmd_workq = NULL; 1745 } 1746 1747 /* Free all lmac related resources */ 1748 for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { 1749 lmac = cgx->lmac_idmap[i]; 1750 if (!lmac) 1751 continue; 1752 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); 1753 cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); 1754 kfree(lmac->mac_to_index_bmap.bmap); 1755 kfree(lmac->name); 1756 kfree(lmac); 1757 } 1758 1759 return 0; 1760 } 1761 1762 static void cgx_populate_features(struct cgx *cgx) 1763 { 1764 u64 cfg; 1765 1766 cfg = cgx_read(cgx, 0, CGX_CONST); 1767 cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); 1768 cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); 1769 1770 if (is_dev_rpm(cgx)) 1771 cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | 1772 RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); 1773 else 1774 cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | 1775 RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); 1776 } 1777 1778 static u8 cgx_get_rxid_mapoffset(struct cgx *cgx) 1779 { 1780 if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM || 1781 is_dev_rpm2(cgx)) 1782 return 0x80; 1783 else 1784 return 0x60; 1785 } 1786 1787 static struct mac_ops cgx_mac_ops = { 1788 .name = "cgx", 1789 .csr_offset = 0, 1790 .lmac_offset = 18, 1791 .int_register = CGXX_CMRX_INT, 1792 .int_set_reg = CGXX_CMRX_INT_ENA_W1S, 1793 .irq_offset = 9, 1794 .int_ena_bit = FW_CGX_INT, 1795 .lmac_fwi = CGX_LMAC_FWI, 1796 .non_contiguous_serdes_lane = false, 1797 .rx_stats_cnt = 9, 1798 .tx_stats_cnt = 18, 1799 .dmac_filter_count = 32, 1800 .get_nr_lmacs = cgx_get_nr_lmacs, 1801 .get_lmac_type = cgx_get_lmac_type, 1802 .lmac_fifo_len = cgx_get_lmac_fifo_len, 1803 .mac_lmac_intl_lbk = cgx_lmac_internal_loopback, 1804 .mac_get_rx_stats = cgx_get_rx_stats, 1805 .mac_get_tx_stats = cgx_get_tx_stats, 1806 .get_fec_stats = cgx_get_fec_stats, 1807 .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding, 1808 .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, 1809 .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, 1810 .mac_pause_frm_config = cgx_lmac_pause_frm_config, 1811 .mac_enadis_ptp_config = cgx_lmac_ptp_config, 1812 .mac_rx_tx_enable = cgx_lmac_rx_tx_enable, 1813 .mac_tx_enable = cgx_lmac_tx_enable, 1814 .pfc_config = cgx_lmac_pfc_config, 1815 .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg, 1816 .mac_reset = cgx_lmac_reset, 1817 .mac_stats_reset = cgx_stats_reset, 1818 }; 1819 1820 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1821 { 1822 struct device *dev = &pdev->dev; 1823 struct cgx *cgx; 1824 int err, nvec; 1825 1826 cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL); 1827 if (!cgx) 1828 return -ENOMEM; 1829 cgx->pdev = pdev; 1830 1831 pci_set_drvdata(pdev, cgx); 1832 1833 /* Use mac_ops to get MAC specific features */ 1834 if (is_dev_rpm(cgx)) 1835 cgx->mac_ops = rpm_get_mac_ops(cgx); 1836 else 1837 cgx->mac_ops = &cgx_mac_ops; 1838 1839 cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx); 1840 1841 err = pci_enable_device(pdev); 1842 if (err) { 1843 dev_err(dev, "Failed to enable PCI device\n"); 1844 pci_set_drvdata(pdev, NULL); 1845 return err; 1846 } 1847 1848 err = pci_request_regions(pdev, DRV_NAME); 1849 if (err) { 1850 dev_err(dev, "PCI request regions failed 0x%x\n", err); 1851 goto err_disable_device; 1852 } 1853 1854 /* MAP configuration registers */ 1855 cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); 1856 if (!cgx->reg_base) { 1857 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n"); 1858 err = -ENOMEM; 1859 goto err_release_regions; 1860 } 1861 1862 cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); 1863 if (!cgx->lmac_count) { 1864 dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id); 1865 err = -EOPNOTSUPP; 1866 goto err_release_regions; 1867 } 1868 1869 nvec = pci_msix_vec_count(cgx->pdev); 1870 err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); 1871 if (err < 0 || err != nvec) { 1872 dev_err(dev, "Request for %d msix vectors failed, err %d\n", 1873 nvec, err); 1874 goto err_release_regions; 1875 } 1876 1877 cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) 1878 & CGX_ID_MASK; 1879 1880 /* init wq for processing linkup requests */ 1881 INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); 1882 cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); 1883 if (!cgx->cgx_cmd_workq) { 1884 dev_err(dev, "alloc workqueue failed for cgx cmd"); 1885 err = -ENOMEM; 1886 goto err_free_irq_vectors; 1887 } 1888 1889 list_add(&cgx->cgx_list, &cgx_list); 1890 1891 1892 cgx_populate_features(cgx); 1893 1894 mutex_init(&cgx->lock); 1895 1896 err = cgx_lmac_init(cgx); 1897 if (err) 1898 goto err_release_lmac; 1899 1900 return 0; 1901 1902 err_release_lmac: 1903 cgx_lmac_exit(cgx); 1904 list_del(&cgx->cgx_list); 1905 err_free_irq_vectors: 1906 pci_free_irq_vectors(pdev); 1907 err_release_regions: 1908 pci_release_regions(pdev); 1909 err_disable_device: 1910 pci_disable_device(pdev); 1911 pci_set_drvdata(pdev, NULL); 1912 return err; 1913 } 1914 1915 static void cgx_remove(struct pci_dev *pdev) 1916 { 1917 struct cgx *cgx = pci_get_drvdata(pdev); 1918 1919 if (cgx) { 1920 cgx_lmac_exit(cgx); 1921 list_del(&cgx->cgx_list); 1922 } 1923 pci_free_irq_vectors(pdev); 1924 pci_release_regions(pdev); 1925 pci_disable_device(pdev); 1926 pci_set_drvdata(pdev, NULL); 1927 } 1928 1929 struct pci_driver cgx_driver = { 1930 .name = DRV_NAME, 1931 .id_table = cgx_id_table, 1932 .probe = cgx_probe, 1933 .remove = cgx_remove, 1934 }; 1935