1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ 3 4 #include <linux/bitfield.h> 5 #include <net/tcp.h> 6 7 #include "fbnic.h" 8 #include "fbnic_mac.h" 9 #include "fbnic_netdev.h" 10 11 static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset, 12 unsigned int cls, unsigned int readrq) 13 { 14 u32 val = rd32(fbd, offset); 15 16 /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can 17 * use them when setting either the TDE_CTF or RNI_RBP registers. 18 */ 19 val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB; 20 21 val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) | 22 FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls); 23 24 wr32(fbd, offset, val); 25 } 26 27 static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset, 28 unsigned int cls, unsigned int mps) 29 { 30 u32 val = rd32(fbd, offset); 31 32 /* Currently all MPS masks are identical so just use the first one */ 33 val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS); 34 35 val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) | 36 FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls); 37 38 wr32(fbd, offset, val); 39 } 40 41 static void fbnic_mac_init_axi(struct fbnic_dev *fbd) 42 { 43 bool override_1k = false; 44 int readrq, mps, cls; 45 46 /* All of the values are based on being a power of 2 starting 47 * with 64 == 0. Therefore we can either divide by 64 in the 48 * case of constants, or just subtract 6 from the log2 of the value 49 * in order to get the value we will be programming into the 50 * registers. 51 */ 52 readrq = ilog2(fbd->readrq) - 6; 53 if (readrq > 3) 54 override_1k = true; 55 readrq = clamp(readrq, 0, 3); 56 57 mps = ilog2(fbd->mps) - 6; 58 mps = clamp(mps, 0, 3); 59 60 cls = ilog2(L1_CACHE_BYTES) - 6; 61 cls = clamp(cls, 0, 3); 62 63 /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */ 64 fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq); 65 fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps); 66 67 /* Configure QM TNI TDE: 68 * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of 69 * buffer capacity to descriptors. 70 * - Max outstanding transactions to 128 71 */ 72 wr32(fbd, FBNIC_QM_TNI_TDE_CTL, 73 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) | 74 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) | 75 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) | 76 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) | 77 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls)); 78 79 fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq); 80 fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps); 81 fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps); 82 } 83 84 static void fbnic_mac_init_qm(struct fbnic_dev *fbd) 85 { 86 u64 default_meta = FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, ETH_HLEN) | 87 FBNIC_TWD_FLAG_REQ_COMPLETION; 88 u32 clock_freq; 89 90 /* Configure default TWQ Metadata descriptor */ 91 wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_L, 92 lower_32_bits(default_meta)); 93 wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_H, 94 upper_32_bits(default_meta)); 95 96 /* Configure TSO behavior */ 97 wr32(fbd, FBNIC_QM_TQS_CTL0, 98 FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK, 99 FBNIC_QM_TQS_CTL0_LSO_TS_LAST) | 100 FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH, 101 FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN)); 102 103 /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */ 104 wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX); 105 106 /* Configure MTU 107 * Due to known HW issue we cannot set the MTU to within 16 octets 108 * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to 109 * MTU + 1. 110 */ 111 wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1); 112 wr32(fbd, FBNIC_QM_TQS_MTU_CTL1, 113 FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK, 114 FBNIC_MAX_JUMBO_FRAME_SIZE + 1)); 115 116 clock_freq = FBNIC_CLOCK_FREQ; 117 118 /* Be aggressive on the timings. We will have the interrupt 119 * threshold timer tick once every 1 usec and coalesce writes for 120 * up to 80 usecs. 121 */ 122 wr32(fbd, FBNIC_QM_TCQ_CTL0, 123 FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES, 124 clock_freq / 1000000) | 125 FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT, 126 clock_freq / 12500)); 127 128 /* We will have the interrupt threshold timer tick once every 129 * 1 usec and coalesce writes for up to 2 usecs. 130 */ 131 wr32(fbd, FBNIC_QM_RCQ_CTL0, 132 FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES, 133 clock_freq / 1000000) | 134 FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT, 135 clock_freq / 500000)); 136 137 /* Configure spacer control to 64 beats. */ 138 wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG, 139 FBNIC_FAB_AXI4_AR_SPACER_MASK | 140 FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2)); 141 } 142 143 #define FBNIC_DROP_EN_MASK 0x7d 144 #define FBNIC_PAUSE_EN_MASK 0x14 145 #define FBNIC_ECN_EN_MASK 0x10 146 147 struct fbnic_fifo_config { 148 unsigned int addr; 149 unsigned int size; 150 }; 151 152 /* Rx FIFO Configuration 153 * The table consists of 8 entries, of which only 4 are currently used 154 * The starting addr is in units of 64B and the size is in 2KB units 155 * Below is the human readable version of the table defined below: 156 * Function Addr Size 157 * ---------------------------------- 158 * Network to Host/BMC 384K 64K 159 * Unused 160 * Unused 161 * Network to BMC 448K 32K 162 * Network to Host 0 384K 163 * Unused 164 * BMC to Host 480K 32K 165 * Unused 166 */ 167 static const struct fbnic_fifo_config fifo_config[] = { 168 { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */ 169 { }, /* Unused */ 170 { }, /* Unused */ 171 { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */ 172 { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */ 173 { }, /* Unused */ 174 { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */ 175 { } /* Unused */ 176 }; 177 178 static void fbnic_mac_init_rxb(struct fbnic_dev *fbd) 179 { 180 bool rx_enable; 181 int i; 182 183 rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) & 184 FBNIC_RPC_RMI_CONFIG_ENABLE); 185 186 for (i = 0; i < 8; i++) { 187 unsigned int size = fifo_config[i].size; 188 189 /* If we are coming up on a system that already has the 190 * Rx data path enabled we don't need to reconfigure the 191 * FIFOs. Instead we can check to verify the values are 192 * large enough to meet our needs, and use the values to 193 * populate the flow control, ECN, and drop thresholds. 194 */ 195 if (rx_enable) { 196 size = FIELD_GET(FBNIC_RXB_PBUF_SIZE, 197 rd32(fbd, FBNIC_RXB_PBUF_CFG(i))); 198 if (size < fifo_config[i].size) 199 dev_warn(fbd->dev, 200 "fifo%d size of %d smaller than expected value of %d\n", 201 i, size << 11, 202 fifo_config[i].size << 11); 203 } else { 204 /* Program RXB Cuthrough */ 205 wr32(fbd, FBNIC_RXB_CT_SIZE(i), 206 FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) | 207 FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2)); 208 209 /* The granularity for the packet buffer size is 2KB 210 * granularity while the packet buffer base address is 211 * only 64B granularity 212 */ 213 wr32(fbd, FBNIC_RXB_PBUF_CFG(i), 214 FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR, 215 fifo_config[i].addr) | 216 FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size)); 217 218 /* The granularity for the credits is 64B. This is 219 * based on RXB_PBUF_SIZE * 32 + 4. 220 */ 221 wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i), 222 FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK, 223 size ? size * 32 + 4 : 0)); 224 } 225 226 if (!size) 227 continue; 228 229 /* Pause is size of FIFO with 56KB skid to start/stop */ 230 wr32(fbd, FBNIC_RXB_PAUSE_THLD(i), 231 !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff : 232 FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON, 233 size * 32 - 0x380) | 234 FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380)); 235 236 /* Enable Drop when only one packet is left in the FIFO */ 237 wr32(fbd, FBNIC_RXB_DROP_THLD(i), 238 !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff : 239 FIELD_PREP(FBNIC_RXB_DROP_THLD_ON, 240 size * 32 - 241 FBNIC_MAX_JUMBO_FRAME_SIZE / 64) | 242 FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF, 243 size * 32 - 244 FBNIC_MAX_JUMBO_FRAME_SIZE / 64)); 245 246 /* Enable ECN bit when 1/4 of RXB is filled with at least 247 * 1 room for one full jumbo frame before setting ECN 248 */ 249 wr32(fbd, FBNIC_RXB_ECN_THLD(i), 250 !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff : 251 FIELD_PREP(FBNIC_RXB_ECN_THLD_ON, 252 max_t(unsigned int, 253 size * 32 / 4, 254 FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) | 255 FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF, 256 max_t(unsigned int, 257 size * 32 / 4, 258 FBNIC_MAX_JUMBO_FRAME_SIZE / 64))); 259 } 260 261 /* For now only enable drop and ECN. We need to add driver/kernel 262 * interfaces for configuring pause. 263 */ 264 wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, 265 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE, 266 FBNIC_DROP_EN_MASK) | 267 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE, 268 FBNIC_ECN_EN_MASK)); 269 270 /* Program INTF credits */ 271 wr32(fbd, FBNIC_RXB_INTF_CREDIT, 272 FBNIC_RXB_INTF_CREDIT_MASK0 | 273 FBNIC_RXB_INTF_CREDIT_MASK1 | 274 FBNIC_RXB_INTF_CREDIT_MASK2 | 275 FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8)); 276 277 /* Configure calendar slots. 278 * Rx: 0 - 62 RDE 1st, BMC 2nd 279 * 63 BMC 1st, RDE 2nd 280 */ 281 for (i = 0; i < 16; i++) { 282 u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; 283 284 wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val); 285 } 286 287 /* Split the credits for the DRR up as follows: 288 * Quantum0: 8000 Network to Host 289 * Quantum1: 0 Not used 290 * Quantum2: 80 BMC to Host 291 * Quantum3: 0 Not used 292 * Quantum4: 8000 Multicast to Host and BMC 293 */ 294 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0, 295 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) | 296 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50)); 297 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT, 298 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f)); 299 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1, 300 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40)); 301 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT, 302 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f)); 303 304 /* Program RXB FCS Endian register */ 305 wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0); 306 } 307 308 static void fbnic_mac_init_txb(struct fbnic_dev *fbd) 309 { 310 int i; 311 312 wr32(fbd, FBNIC_TCE_TXB_CTRL, 0); 313 314 /* Configure Tx QM Credits */ 315 wr32(fbd, FBNIC_QM_TQS_CTL1, 316 FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) | 317 FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20)); 318 319 /* Initialize internal Tx queues */ 320 wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0); 321 wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0); 322 wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL, 323 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) | 324 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000)); 325 wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0); 326 wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL, 327 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | 328 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400)); 329 wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL, 330 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | 331 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600)); 332 333 wr32(fbd, FBNIC_TCE_LSO_CTRL, 334 FBNIC_TCE_LSO_CTRL_IPID_MODE_INC | 335 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH | 336 TCPHDR_FIN) | 337 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH | 338 TCPHDR_CWR | 339 TCPHDR_FIN) | 340 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR)); 341 wr32(fbd, FBNIC_TCE_CSO_CTRL, 0); 342 343 wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ, 344 FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX, 345 FBNIC_MAX_JUMBO_FRAME_SIZE) | 346 FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX, 347 FBNIC_MAX_JUMBO_FRAME_SIZE)); 348 wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ, 349 FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI, 350 FBNIC_MAX_JUMBO_FRAME_SIZE)); 351 352 /* Configure calendar slots. 353 * Tx: 0 - 62 TMI 1st, BMC 2nd 354 * 63 BMC 1st, TMI 2nd 355 */ 356 for (i = 0; i < 16; i++) { 357 u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; 358 359 wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val); 360 } 361 362 /* Configure DWRR */ 363 wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL, 364 FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) | 365 FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04)); 366 wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0); 367 wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0); 368 wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL, 369 FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) | 370 FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82)); 371 wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0); 372 wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL, 373 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) | 374 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20)); 375 wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT, 376 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03)); 377 378 /* Configure SOP protocol protection */ 379 wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL, 380 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) | 381 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) | 382 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c)); 383 384 /* Conservative configuration on MAC interface Start of Packet 385 * protection FIFO. This sets the minimum depth of the FIFO before 386 * we start sending packets to the MAC measured in 64B units and 387 * up to 160 entries deep. 388 * 389 * For the ASIC the clock is fast enough that we will likely fill 390 * the SOP FIFO before the MAC can drain it. So just use a minimum 391 * value of 8. 392 */ 393 wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8); 394 395 wrfl(fbd); 396 wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE | 397 FBNIC_TCE_TXB_CTRL_LOAD); 398 } 399 400 static void fbnic_mac_init_regs(struct fbnic_dev *fbd) 401 { 402 fbnic_mac_init_axi(fbd); 403 fbnic_mac_init_qm(fbd); 404 fbnic_mac_init_rxb(fbd); 405 fbnic_mac_init_txb(fbd); 406 } 407 408 static void __fbnic_mac_stat_rd64(struct fbnic_dev *fbd, bool reset, u32 reg, 409 struct fbnic_stat_counter *stat) 410 { 411 u64 new_reg_value; 412 413 new_reg_value = fbnic_stat_rd64(fbd, reg, 1); 414 if (!reset) 415 stat->value += new_reg_value - stat->u.old_reg_value_64; 416 stat->u.old_reg_value_64 = new_reg_value; 417 stat->reported = true; 418 } 419 420 #define fbnic_mac_stat_rd64(fbd, reset, __stat, __CSR) \ 421 __fbnic_mac_stat_rd64(fbd, reset, FBNIC_##__CSR##_L, &(__stat)) 422 423 static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause) 424 { 425 u32 rxb_pause_ctrl; 426 427 /* Enable generation of pause frames if enabled */ 428 rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL); 429 rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE; 430 if (tx_pause) 431 rxb_pause_ctrl |= 432 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE, 433 FBNIC_PAUSE_EN_MASK); 434 wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl); 435 } 436 437 static int fbnic_mac_get_link_event(struct fbnic_dev *fbd) 438 { 439 u32 intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS); 440 441 if (intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN) 442 return FBNIC_LINK_EVENT_DOWN; 443 444 return (intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ? 445 FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE; 446 } 447 448 static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd, 449 bool tx_pause, bool rx_pause) 450 { 451 /* Enable MAC Promiscuous mode and Tx padding */ 452 u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN | 453 FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN; 454 struct fbnic_net *fbn = netdev_priv(fbd->netdev); 455 456 /* Disable pause frames if not enabled */ 457 if (!tx_pause) 458 command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS; 459 if (!rx_pause) 460 command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS; 461 462 /* Disable fault handling if no FEC is requested */ 463 if (fbn->fec == FBNIC_FEC_OFF) 464 command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS; 465 466 return command_config; 467 } 468 469 static bool fbnic_mac_get_link_status(struct fbnic_dev *fbd, u8 aui, u8 fec) 470 { 471 u32 pcs_status, lane_mask = ~0; 472 473 pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0); 474 if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK)) 475 return false; 476 477 /* Define the expected lane mask for the status bits we need to check */ 478 switch (aui) { 479 case FBNIC_AUI_100GAUI2: 480 lane_mask = 0xf; 481 break; 482 case FBNIC_AUI_50GAUI1: 483 lane_mask = 3; 484 break; 485 case FBNIC_AUI_LAUI2: 486 switch (fec) { 487 case FBNIC_FEC_OFF: 488 lane_mask = 0x63; 489 break; 490 case FBNIC_FEC_RS: 491 lane_mask = 5; 492 break; 493 case FBNIC_FEC_BASER: 494 lane_mask = 0xf; 495 break; 496 } 497 break; 498 case FBNIC_AUI_25GAUI: 499 lane_mask = 1; 500 break; 501 } 502 503 /* Use an XOR to remove the bits we expect to see set */ 504 switch (fec) { 505 case FBNIC_FEC_OFF: 506 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK, 507 pcs_status); 508 break; 509 case FBNIC_FEC_RS: 510 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK, 511 pcs_status); 512 break; 513 case FBNIC_FEC_BASER: 514 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK, 515 rd32(fbd, FBNIC_SIG_PCS_OUT1)); 516 break; 517 } 518 519 /* If all lanes cancelled then we have a lock on all lanes */ 520 return !lane_mask; 521 } 522 523 static bool fbnic_pmd_update_state(struct fbnic_dev *fbd, bool signal_detect) 524 { 525 /* Delay link up for 4 seconds to allow for link training. 526 * The state transitions for this are as follows: 527 * 528 * All states have the following two transitions in common: 529 * Loss of signal -> FBNIC_PMD_INITIALIZE 530 * The condition handled below (!signal) 531 * Reconfiguration -> FBNIC_PMD_INITIALIZE 532 * Occurs when mac_prepare starts a PHY reconfig 533 * FBNIC_PMD_TRAINING: 534 * signal still detected && 4s have passed -> Report link up 535 * When link is brought up in link_up -> FBNIC_PMD_SEND_DATA 536 * FBNIC_PMD_INITIALIZE: 537 * signal detected -> FBNIC_PMD_TRAINING 538 */ 539 if (!signal_detect) { 540 fbd->pmd_state = FBNIC_PMD_INITIALIZE; 541 return false; 542 } 543 544 switch (fbd->pmd_state) { 545 case FBNIC_PMD_TRAINING: 546 return time_before(fbd->end_of_pmd_training, jiffies); 547 case FBNIC_PMD_LINK_READY: 548 case FBNIC_PMD_SEND_DATA: 549 return true; 550 } 551 552 fbd->end_of_pmd_training = jiffies + 4 * HZ; 553 554 /* Ensure end_of_training is visible before the state change */ 555 smp_wmb(); 556 557 fbd->pmd_state = FBNIC_PMD_TRAINING; 558 559 return false; 560 } 561 562 static bool fbnic_mac_get_link(struct fbnic_dev *fbd, u8 aui, u8 fec) 563 { 564 bool link; 565 566 /* Flush status bits to clear possible stale data, 567 * bits should reset themselves back to 1 if link is truly up 568 */ 569 wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK | 570 FBNIC_SIG_PCS_OUT0_BLOCK_LOCK | 571 FBNIC_SIG_PCS_OUT0_AMPS_LOCK); 572 wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK); 573 wrfl(fbd); 574 575 /* Clear interrupt state due to recent changes. */ 576 wr32(fbd, FBNIC_SIG_PCS_INTR_STS, 577 FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP); 578 579 link = fbnic_mac_get_link_status(fbd, aui, fec); 580 link = fbnic_pmd_update_state(fbd, link); 581 582 /* Enable interrupt to only capture changes in link state */ 583 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, 584 ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP); 585 wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY); 586 587 return link; 588 } 589 590 void fbnic_mac_get_fw_settings(struct fbnic_dev *fbd, u8 *aui, u8 *fec) 591 { 592 /* Retrieve default speed from FW */ 593 switch (fbd->fw_cap.link_speed) { 594 case FBNIC_FW_LINK_MODE_25CR: 595 *aui = FBNIC_AUI_25GAUI; 596 break; 597 case FBNIC_FW_LINK_MODE_50CR2: 598 *aui = FBNIC_AUI_LAUI2; 599 break; 600 case FBNIC_FW_LINK_MODE_50CR: 601 *aui = FBNIC_AUI_50GAUI1; 602 *fec = FBNIC_FEC_RS; 603 return; 604 case FBNIC_FW_LINK_MODE_100CR2: 605 *aui = FBNIC_AUI_100GAUI2; 606 *fec = FBNIC_FEC_RS; 607 return; 608 default: 609 *aui = FBNIC_AUI_UNKNOWN; 610 return; 611 } 612 613 /* Update FEC first to reflect FW current mode */ 614 switch (fbd->fw_cap.link_fec) { 615 case FBNIC_FW_LINK_FEC_NONE: 616 *fec = FBNIC_FEC_OFF; 617 break; 618 case FBNIC_FW_LINK_FEC_RS: 619 default: 620 *fec = FBNIC_FEC_RS; 621 break; 622 case FBNIC_FW_LINK_FEC_BASER: 623 *fec = FBNIC_FEC_BASER; 624 break; 625 } 626 } 627 628 static void fbnic_mac_prepare(struct fbnic_dev *fbd, u8 aui, u8 fec) 629 { 630 /* Mask and clear the PCS interrupt, will be enabled by link handler */ 631 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); 632 wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); 633 634 /* If we don't have link tear it all down and start over */ 635 if (!fbnic_mac_get_link_status(fbd, aui, fec)) 636 fbd->pmd_state = FBNIC_PMD_INITIALIZE; 637 } 638 639 static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd) 640 { 641 u32 cmd_cfg, mac_ctrl; 642 643 cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false); 644 mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); 645 646 mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | 647 FBNIC_SIG_MAC_IN0_RESET_TX_CLK | 648 FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | 649 FBNIC_SIG_MAC_IN0_RESET_RX_CLK; 650 651 wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); 652 wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); 653 } 654 655 static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd, 656 bool tx_pause, bool rx_pause) 657 { 658 u32 cmd_cfg, mac_ctrl; 659 660 fbnic_mac_tx_pause_config(fbd, tx_pause); 661 662 cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause); 663 mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); 664 665 mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | 666 FBNIC_SIG_MAC_IN0_RESET_TX_CLK | 667 FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | 668 FBNIC_SIG_MAC_IN0_RESET_RX_CLK); 669 cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA | 670 FBNIC_MAC_COMMAND_CONFIG_TX_ENA; 671 672 wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); 673 wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); 674 } 675 676 static void 677 fbnic_pcs_rsfec_stat_rd32(struct fbnic_dev *fbd, u32 reg, bool reset, 678 struct fbnic_stat_counter *stat) 679 { 680 u32 pcs_rsfec_stat; 681 682 /* The PCS/RFSEC registers are only 16b wide each. So what we will 683 * have after the 64b read is 0x0000xxxx0000xxxx. To make it usable 684 * as a full stat we will shift the upper bits into the lower set of 685 * 0s and then mask off the math at 32b. 686 * 687 * Read ordering must be lower reg followed by upper reg. 688 */ 689 pcs_rsfec_stat = rd32(fbd, reg) & 0xffff; 690 pcs_rsfec_stat |= rd32(fbd, reg + 1) << 16; 691 692 /* RFSEC registers clear themselves upon being read so there is no 693 * need to store the old_reg_value. 694 */ 695 if (!reset) 696 stat->value += pcs_rsfec_stat; 697 } 698 699 static void 700 fbnic_mac_get_fec_stats(struct fbnic_dev *fbd, bool reset, 701 struct fbnic_fec_stats *s) 702 { 703 fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_CCW_LO(0), reset, 704 &s->corrected_blocks); 705 fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_NCCW_LO(0), reset, 706 &s->uncorrectable_blocks); 707 } 708 709 static void 710 fbnic_mac_get_pcs_stats(struct fbnic_dev *fbd, bool reset, 711 struct fbnic_pcs_stats *s) 712 { 713 int i; 714 715 for (i = 0; i < FBNIC_PCS_MAX_LANES; i++) 716 fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_PCS_SYMBLERR_LO(i), reset, 717 &s->SymbolErrorDuringCarrier.lanes[i]); 718 } 719 720 static void 721 fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset, 722 struct fbnic_eth_mac_stats *mac_stats) 723 { 724 fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsReceivedOK, 725 MAC_STAT_RX_BYTE_COUNT); 726 fbnic_mac_stat_rd64(fbd, reset, mac_stats->AlignmentErrors, 727 MAC_STAT_RX_ALIGN_ERROR); 728 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameTooLongErrors, 729 MAC_STAT_RX_TOOLONG); 730 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesReceivedOK, 731 MAC_STAT_RX_RECEIVED_OK); 732 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameCheckSequenceErrors, 733 MAC_STAT_RX_PACKET_BAD_FCS); 734 fbnic_mac_stat_rd64(fbd, reset, 735 mac_stats->FramesLostDueToIntMACRcvError, 736 MAC_STAT_RX_IFINERRORS); 737 fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesReceivedOK, 738 MAC_STAT_RX_MULTICAST); 739 fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesReceivedOK, 740 MAC_STAT_RX_BROADCAST); 741 fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsTransmittedOK, 742 MAC_STAT_TX_BYTE_COUNT); 743 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesTransmittedOK, 744 MAC_STAT_TX_TRANSMITTED_OK); 745 fbnic_mac_stat_rd64(fbd, reset, 746 mac_stats->FramesLostDueToIntMACXmitError, 747 MAC_STAT_TX_IFOUTERRORS); 748 fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesXmittedOK, 749 MAC_STAT_TX_MULTICAST); 750 fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesXmittedOK, 751 MAC_STAT_TX_BROADCAST); 752 } 753 754 static void 755 fbnic_mac_get_pause_stats(struct fbnic_dev *fbd, bool reset, 756 struct fbnic_pause_stats *pause_stats) 757 { 758 fbnic_mac_stat_rd64(fbd, reset, pause_stats->tx_pause_frames, 759 MAC_STAT_TX_XOFF_STB); 760 fbnic_mac_stat_rd64(fbd, reset, pause_stats->rx_pause_frames, 761 MAC_STAT_RX_XOFF_STB); 762 } 763 764 static void 765 fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset, 766 struct fbnic_eth_ctrl_stats *ctrl_stats) 767 { 768 fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesReceived, 769 MAC_STAT_RX_CONTROL_FRAMES); 770 fbnic_mac_stat_rd64(fbd, reset, ctrl_stats->MACControlFramesTransmitted, 771 MAC_STAT_TX_CONTROL_FRAMES); 772 } 773 774 static void 775 fbnic_mac_get_rmon_stats(struct fbnic_dev *fbd, bool reset, 776 struct fbnic_rmon_stats *rmon_stats) 777 { 778 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->undersize_pkts, 779 MAC_STAT_RX_UNDERSIZE); 780 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->oversize_pkts, 781 MAC_STAT_RX_OVERSIZE); 782 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->fragments, 783 MAC_STAT_RX_FRAGMENT); 784 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->jabbers, 785 MAC_STAT_RX_JABBER); 786 787 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[0], 788 MAC_STAT_RX_PACKET_64_BYTES); 789 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[1], 790 MAC_STAT_RX_PACKET_65_127_BYTES); 791 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[2], 792 MAC_STAT_RX_PACKET_128_255_BYTES); 793 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[3], 794 MAC_STAT_RX_PACKET_256_511_BYTES); 795 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[4], 796 MAC_STAT_RX_PACKET_512_1023_BYTES); 797 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[5], 798 MAC_STAT_RX_PACKET_1024_1518_BYTES); 799 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[6], 800 RPC_STAT_RX_PACKET_1519_2047_BYTES); 801 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[7], 802 RPC_STAT_RX_PACKET_2048_4095_BYTES); 803 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[8], 804 RPC_STAT_RX_PACKET_4096_8191_BYTES); 805 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[9], 806 RPC_STAT_RX_PACKET_8192_9216_BYTES); 807 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist[10], 808 RPC_STAT_RX_PACKET_9217_MAX_BYTES); 809 810 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[0], 811 MAC_STAT_TX_PACKET_64_BYTES); 812 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[1], 813 MAC_STAT_TX_PACKET_65_127_BYTES); 814 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[2], 815 MAC_STAT_TX_PACKET_128_255_BYTES); 816 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[3], 817 MAC_STAT_TX_PACKET_256_511_BYTES); 818 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[4], 819 MAC_STAT_TX_PACKET_512_1023_BYTES); 820 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[5], 821 MAC_STAT_TX_PACKET_1024_1518_BYTES); 822 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[6], 823 TMI_STAT_TX_PACKET_1519_2047_BYTES); 824 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[7], 825 TMI_STAT_TX_PACKET_2048_4095_BYTES); 826 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[8], 827 TMI_STAT_TX_PACKET_4096_8191_BYTES); 828 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[9], 829 TMI_STAT_TX_PACKET_8192_9216_BYTES); 830 fbnic_mac_stat_rd64(fbd, reset, rmon_stats->hist_tx[10], 831 TMI_STAT_TX_PACKET_9217_MAX_BYTES); 832 } 833 834 static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id, 835 long *val) 836 { 837 struct fbnic_fw_completion *fw_cmpl; 838 int err = 0, retries = 5; 839 s32 *sensor; 840 841 fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP); 842 if (!fw_cmpl) 843 return -ENOMEM; 844 845 switch (id) { 846 case FBNIC_SENSOR_TEMP: 847 sensor = &fw_cmpl->u.tsene.millidegrees; 848 break; 849 case FBNIC_SENSOR_VOLTAGE: 850 sensor = &fw_cmpl->u.tsene.millivolts; 851 break; 852 default: 853 err = -EINVAL; 854 goto exit_free; 855 } 856 857 err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl); 858 if (err) { 859 dev_err(fbd->dev, 860 "Failed to transmit TSENE read msg, err %d\n", 861 err); 862 goto exit_free; 863 } 864 865 /* Allow 2 seconds for reply, resend and try up to 5 times */ 866 while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) { 867 retries--; 868 869 if (retries == 0) { 870 dev_err(fbd->dev, 871 "Timed out waiting for TSENE read\n"); 872 err = -ETIMEDOUT; 873 goto exit_cleanup; 874 } 875 876 err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL); 877 if (err) { 878 dev_err(fbd->dev, 879 "Failed to transmit TSENE read msg, err %d\n", 880 err); 881 goto exit_cleanup; 882 } 883 } 884 885 /* Handle error returned by firmware */ 886 if (fw_cmpl->result) { 887 err = fw_cmpl->result; 888 dev_err(fbd->dev, "%s: Firmware returned error %d\n", 889 __func__, err); 890 goto exit_cleanup; 891 } 892 893 *val = *sensor; 894 exit_cleanup: 895 fbnic_mbx_clear_cmpl(fbd, fw_cmpl); 896 exit_free: 897 fbnic_fw_put_cmpl(fw_cmpl); 898 899 return err; 900 } 901 902 static const struct fbnic_mac fbnic_mac_asic = { 903 .init_regs = fbnic_mac_init_regs, 904 .get_link = fbnic_mac_get_link, 905 .get_link_event = fbnic_mac_get_link_event, 906 .prepare = fbnic_mac_prepare, 907 .get_fec_stats = fbnic_mac_get_fec_stats, 908 .get_pcs_stats = fbnic_mac_get_pcs_stats, 909 .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats, 910 .get_pause_stats = fbnic_mac_get_pause_stats, 911 .get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats, 912 .get_rmon_stats = fbnic_mac_get_rmon_stats, 913 .link_down = fbnic_mac_link_down_asic, 914 .link_up = fbnic_mac_link_up_asic, 915 .get_sensor = fbnic_mac_get_sensor_asic, 916 }; 917 918 /** 919 * fbnic_mac_init - Assign a MAC type and initialize the fbnic device 920 * @fbd: Device pointer to device to initialize 921 * 922 * Return: zero on success, negative on failure 923 * 924 * Initialize the MAC function pointers and initializes the MAC of 925 * the device. 926 **/ 927 int fbnic_mac_init(struct fbnic_dev *fbd) 928 { 929 fbd->mac = &fbnic_mac_asic; 930 931 fbd->mac->init_regs(fbd); 932 933 return 0; 934 } 935