1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */ 3 4 #include <linux/bitfield.h> 5 #include <net/tcp.h> 6 7 #include "fbnic.h" 8 #include "fbnic_mac.h" 9 #include "fbnic_netdev.h" 10 11 static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset, 12 unsigned int cls, unsigned int readrq) 13 { 14 u32 val = rd32(fbd, offset); 15 16 /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can 17 * use them when setting either the TDE_CTF or RNI_RBP registers. 18 */ 19 val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB; 20 21 val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) | 22 FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls); 23 24 wr32(fbd, offset, val); 25 } 26 27 static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset, 28 unsigned int cls, unsigned int mps) 29 { 30 u32 val = rd32(fbd, offset); 31 32 /* Currently all MPS masks are identical so just use the first one */ 33 val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS); 34 35 val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) | 36 FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls); 37 38 wr32(fbd, offset, val); 39 } 40 41 static void fbnic_mac_init_axi(struct fbnic_dev *fbd) 42 { 43 bool override_1k = false; 44 int readrq, mps, cls; 45 46 /* All of the values are based on being a power of 2 starting 47 * with 64 == 0. Therefore we can either divide by 64 in the 48 * case of constants, or just subtract 6 from the log2 of the value 49 * in order to get the value we will be programming into the 50 * registers. 51 */ 52 readrq = ilog2(fbd->readrq) - 6; 53 if (readrq > 3) 54 override_1k = true; 55 readrq = clamp(readrq, 0, 3); 56 57 mps = ilog2(fbd->mps) - 6; 58 mps = clamp(mps, 0, 3); 59 60 cls = ilog2(L1_CACHE_BYTES) - 6; 61 cls = clamp(cls, 0, 3); 62 63 /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */ 64 fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq); 65 fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps); 66 67 /* Configure QM TNI TDE: 68 * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of 69 * buffer capacity to descriptors. 70 * - Max outstanding transactions to 128 71 */ 72 wr32(fbd, FBNIC_QM_TNI_TDE_CTL, 73 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) | 74 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) | 75 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) | 76 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) | 77 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls)); 78 79 fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq); 80 fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps); 81 fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps); 82 83 /* Enable XALI AR/AW outbound */ 84 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, 85 FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); 86 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, 87 FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); 88 } 89 90 static void fbnic_mac_init_qm(struct fbnic_dev *fbd) 91 { 92 u32 clock_freq; 93 94 /* Configure TSO behavior */ 95 wr32(fbd, FBNIC_QM_TQS_CTL0, 96 FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK, 97 FBNIC_QM_TQS_CTL0_LSO_TS_LAST) | 98 FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH, 99 FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN)); 100 101 /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */ 102 wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX); 103 104 /* Configure MTU 105 * Due to known HW issue we cannot set the MTU to within 16 octets 106 * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to 107 * MTU + 1. 108 */ 109 wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1); 110 wr32(fbd, FBNIC_QM_TQS_MTU_CTL1, 111 FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK, 112 FBNIC_MAX_JUMBO_FRAME_SIZE + 1)); 113 114 clock_freq = FBNIC_CLOCK_FREQ; 115 116 /* Be aggressive on the timings. We will have the interrupt 117 * threshold timer tick once every 1 usec and coalesce writes for 118 * up to 80 usecs. 119 */ 120 wr32(fbd, FBNIC_QM_TCQ_CTL0, 121 FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES, 122 clock_freq / 1000000) | 123 FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT, 124 clock_freq / 12500)); 125 126 /* We will have the interrupt threshold timer tick once every 127 * 1 usec and coalesce writes for up to 2 usecs. 128 */ 129 wr32(fbd, FBNIC_QM_RCQ_CTL0, 130 FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES, 131 clock_freq / 1000000) | 132 FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT, 133 clock_freq / 500000)); 134 135 /* Configure spacer control to 64 beats. */ 136 wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG, 137 FBNIC_FAB_AXI4_AR_SPACER_MASK | 138 FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2)); 139 } 140 141 #define FBNIC_DROP_EN_MASK 0x7d 142 #define FBNIC_PAUSE_EN_MASK 0x14 143 #define FBNIC_ECN_EN_MASK 0x10 144 145 struct fbnic_fifo_config { 146 unsigned int addr; 147 unsigned int size; 148 }; 149 150 /* Rx FIFO Configuration 151 * The table consists of 8 entries, of which only 4 are currently used 152 * The starting addr is in units of 64B and the size is in 2KB units 153 * Below is the human readable version of the table defined below: 154 * Function Addr Size 155 * ---------------------------------- 156 * Network to Host/BMC 384K 64K 157 * Unused 158 * Unused 159 * Network to BMC 448K 32K 160 * Network to Host 0 384K 161 * Unused 162 * BMC to Host 480K 32K 163 * Unused 164 */ 165 static const struct fbnic_fifo_config fifo_config[] = { 166 { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */ 167 { }, /* Unused */ 168 { }, /* Unused */ 169 { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */ 170 { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */ 171 { }, /* Unused */ 172 { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */ 173 { } /* Unused */ 174 }; 175 176 static void fbnic_mac_init_rxb(struct fbnic_dev *fbd) 177 { 178 bool rx_enable; 179 int i; 180 181 rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) & 182 FBNIC_RPC_RMI_CONFIG_ENABLE); 183 184 for (i = 0; i < 8; i++) { 185 unsigned int size = fifo_config[i].size; 186 187 /* If we are coming up on a system that already has the 188 * Rx data path enabled we don't need to reconfigure the 189 * FIFOs. Instead we can check to verify the values are 190 * large enough to meet our needs, and use the values to 191 * populate the flow control, ECN, and drop thresholds. 192 */ 193 if (rx_enable) { 194 size = FIELD_GET(FBNIC_RXB_PBUF_SIZE, 195 rd32(fbd, FBNIC_RXB_PBUF_CFG(i))); 196 if (size < fifo_config[i].size) 197 dev_warn(fbd->dev, 198 "fifo%d size of %d smaller than expected value of %d\n", 199 i, size << 11, 200 fifo_config[i].size << 11); 201 } else { 202 /* Program RXB Cuthrough */ 203 wr32(fbd, FBNIC_RXB_CT_SIZE(i), 204 FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) | 205 FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2)); 206 207 /* The granularity for the packet buffer size is 2KB 208 * granularity while the packet buffer base address is 209 * only 64B granularity 210 */ 211 wr32(fbd, FBNIC_RXB_PBUF_CFG(i), 212 FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR, 213 fifo_config[i].addr) | 214 FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size)); 215 216 /* The granularity for the credits is 64B. This is 217 * based on RXB_PBUF_SIZE * 32 + 4. 218 */ 219 wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i), 220 FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK, 221 size ? size * 32 + 4 : 0)); 222 } 223 224 if (!size) 225 continue; 226 227 /* Pause is size of FIFO with 56KB skid to start/stop */ 228 wr32(fbd, FBNIC_RXB_PAUSE_THLD(i), 229 !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff : 230 FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON, 231 size * 32 - 0x380) | 232 FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380)); 233 234 /* Enable Drop when only one packet is left in the FIFO */ 235 wr32(fbd, FBNIC_RXB_DROP_THLD(i), 236 !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff : 237 FIELD_PREP(FBNIC_RXB_DROP_THLD_ON, 238 size * 32 - 239 FBNIC_MAX_JUMBO_FRAME_SIZE / 64) | 240 FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF, 241 size * 32 - 242 FBNIC_MAX_JUMBO_FRAME_SIZE / 64)); 243 244 /* Enable ECN bit when 1/4 of RXB is filled with at least 245 * 1 room for one full jumbo frame before setting ECN 246 */ 247 wr32(fbd, FBNIC_RXB_ECN_THLD(i), 248 !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff : 249 FIELD_PREP(FBNIC_RXB_ECN_THLD_ON, 250 max_t(unsigned int, 251 size * 32 / 4, 252 FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) | 253 FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF, 254 max_t(unsigned int, 255 size * 32 / 4, 256 FBNIC_MAX_JUMBO_FRAME_SIZE / 64))); 257 } 258 259 /* For now only enable drop and ECN. We need to add driver/kernel 260 * interfaces for configuring pause. 261 */ 262 wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, 263 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE, 264 FBNIC_DROP_EN_MASK) | 265 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE, 266 FBNIC_ECN_EN_MASK)); 267 268 /* Program INTF credits */ 269 wr32(fbd, FBNIC_RXB_INTF_CREDIT, 270 FBNIC_RXB_INTF_CREDIT_MASK0 | 271 FBNIC_RXB_INTF_CREDIT_MASK1 | 272 FBNIC_RXB_INTF_CREDIT_MASK2 | 273 FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8)); 274 275 /* Configure calendar slots. 276 * Rx: 0 - 62 RDE 1st, BMC 2nd 277 * 63 BMC 1st, RDE 2nd 278 */ 279 for (i = 0; i < 16; i++) { 280 u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; 281 282 wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val); 283 } 284 285 /* Split the credits for the DRR up as follows: 286 * Quantum0: 8000 Network to Host 287 * Quantum1: 0 Not used 288 * Quantum2: 80 BMC to Host 289 * Quantum3: 0 Not used 290 * Quantum4: 8000 Multicast to Host and BMC 291 */ 292 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0, 293 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) | 294 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50)); 295 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT, 296 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f)); 297 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1, 298 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40)); 299 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT, 300 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f)); 301 302 /* Program RXB FCS Endian register */ 303 wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0); 304 } 305 306 static void fbnic_mac_init_txb(struct fbnic_dev *fbd) 307 { 308 int i; 309 310 wr32(fbd, FBNIC_TCE_TXB_CTRL, 0); 311 312 /* Configure Tx QM Credits */ 313 wr32(fbd, FBNIC_QM_TQS_CTL1, 314 FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) | 315 FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20)); 316 317 /* Initialize internal Tx queues */ 318 wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0); 319 wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0); 320 wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL, 321 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) | 322 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000)); 323 wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0); 324 wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL, 325 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | 326 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400)); 327 wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL, 328 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | 329 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600)); 330 331 wr32(fbd, FBNIC_TCE_LSO_CTRL, 332 FBNIC_TCE_LSO_CTRL_IPID_MODE_INC | 333 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH | 334 TCPHDR_FIN) | 335 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH | 336 TCPHDR_CWR | 337 TCPHDR_FIN) | 338 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR)); 339 wr32(fbd, FBNIC_TCE_CSO_CTRL, 0); 340 341 wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ, 342 FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX, 343 FBNIC_MAX_JUMBO_FRAME_SIZE) | 344 FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX, 345 FBNIC_MAX_JUMBO_FRAME_SIZE)); 346 wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ, 347 FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI, 348 FBNIC_MAX_JUMBO_FRAME_SIZE)); 349 350 /* Configure calendar slots. 351 * Tx: 0 - 62 TMI 1st, BMC 2nd 352 * 63 BMC 1st, TMI 2nd 353 */ 354 for (i = 0; i < 16; i++) { 355 u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; 356 357 wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val); 358 } 359 360 /* Configure DWRR */ 361 wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL, 362 FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) | 363 FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04)); 364 wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0); 365 wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0); 366 wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL, 367 FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) | 368 FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82)); 369 wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0); 370 wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL, 371 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) | 372 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20)); 373 wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT, 374 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03)); 375 376 /* Configure SOP protocol protection */ 377 wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL, 378 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) | 379 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) | 380 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c)); 381 382 /* Conservative configuration on MAC interface Start of Packet 383 * protection FIFO. This sets the minimum depth of the FIFO before 384 * we start sending packets to the MAC measured in 64B units and 385 * up to 160 entries deep. 386 * 387 * For the ASIC the clock is fast enough that we will likely fill 388 * the SOP FIFO before the MAC can drain it. So just use a minimum 389 * value of 8. 390 */ 391 wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8); 392 393 wrfl(fbd); 394 wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE | 395 FBNIC_TCE_TXB_CTRL_LOAD); 396 } 397 398 static void fbnic_mac_init_regs(struct fbnic_dev *fbd) 399 { 400 fbnic_mac_init_axi(fbd); 401 fbnic_mac_init_qm(fbd); 402 fbnic_mac_init_rxb(fbd); 403 fbnic_mac_init_txb(fbd); 404 } 405 406 static void __fbnic_mac_stat_rd64(struct fbnic_dev *fbd, bool reset, u32 reg, 407 struct fbnic_stat_counter *stat) 408 { 409 u64 new_reg_value; 410 411 new_reg_value = fbnic_stat_rd64(fbd, reg, 1); 412 if (!reset) 413 stat->value += new_reg_value - stat->u.old_reg_value_64; 414 stat->u.old_reg_value_64 = new_reg_value; 415 stat->reported = true; 416 } 417 418 #define fbnic_mac_stat_rd64(fbd, reset, __stat, __CSR) \ 419 __fbnic_mac_stat_rd64(fbd, reset, FBNIC_##__CSR##_L, &(__stat)) 420 421 static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause) 422 { 423 u32 rxb_pause_ctrl; 424 425 /* Enable generation of pause frames if enabled */ 426 rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL); 427 rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE; 428 if (tx_pause) 429 rxb_pause_ctrl |= 430 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE, 431 FBNIC_PAUSE_EN_MASK); 432 wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl); 433 } 434 435 static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd) 436 { 437 u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS); 438 439 if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN) 440 return FBNIC_LINK_EVENT_DOWN; 441 442 return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ? 443 FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE; 444 } 445 446 static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd, 447 bool tx_pause, bool rx_pause) 448 { 449 /* Enable MAC Promiscuous mode and Tx padding */ 450 u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN | 451 FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN; 452 struct fbnic_net *fbn = netdev_priv(fbd->netdev); 453 454 /* Disable pause frames if not enabled */ 455 if (!tx_pause) 456 command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS; 457 if (!rx_pause) 458 command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS; 459 460 /* Disable fault handling if no FEC is requested */ 461 if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF) 462 command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS; 463 464 return command_config; 465 } 466 467 static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) 468 { 469 struct fbnic_net *fbn = netdev_priv(fbd->netdev); 470 u32 pcs_status, lane_mask = ~0; 471 472 pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0); 473 if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK)) 474 return false; 475 476 /* Define the expected lane mask for the status bits we need to check */ 477 switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) { 478 case FBNIC_LINK_100R2: 479 lane_mask = 0xf; 480 break; 481 case FBNIC_LINK_50R1: 482 lane_mask = 3; 483 break; 484 case FBNIC_LINK_50R2: 485 switch (fbn->fec & FBNIC_FEC_MODE_MASK) { 486 case FBNIC_FEC_OFF: 487 lane_mask = 0x63; 488 break; 489 case FBNIC_FEC_RS: 490 lane_mask = 5; 491 break; 492 case FBNIC_FEC_BASER: 493 lane_mask = 0xf; 494 break; 495 } 496 break; 497 case FBNIC_LINK_25R1: 498 lane_mask = 1; 499 break; 500 } 501 502 /* Use an XOR to remove the bits we expect to see set */ 503 switch (fbn->fec & FBNIC_FEC_MODE_MASK) { 504 case FBNIC_FEC_OFF: 505 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK, 506 pcs_status); 507 break; 508 case FBNIC_FEC_RS: 509 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK, 510 pcs_status); 511 break; 512 case FBNIC_FEC_BASER: 513 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK, 514 rd32(fbd, FBNIC_SIG_PCS_OUT1)); 515 break; 516 } 517 518 /* If all lanes cancelled then we have a lock on all lanes */ 519 return !lane_mask; 520 } 521 522 static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd) 523 { 524 bool link; 525 526 /* Flush status bits to clear possible stale data, 527 * bits should reset themselves back to 1 if link is truly up 528 */ 529 wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK | 530 FBNIC_SIG_PCS_OUT0_BLOCK_LOCK | 531 FBNIC_SIG_PCS_OUT0_AMPS_LOCK); 532 wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK); 533 wrfl(fbd); 534 535 /* Clear interrupt state due to recent changes. */ 536 wr32(fbd, FBNIC_SIG_PCS_INTR_STS, 537 FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP); 538 539 link = fbnic_mac_get_pcs_link_status(fbd); 540 541 /* Enable interrupt to only capture changes in link state */ 542 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, 543 ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP); 544 wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY); 545 546 return link; 547 } 548 549 static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd) 550 { 551 struct fbnic_net *fbn = netdev_priv(fbd->netdev); 552 u8 link_mode = fbn->link_mode; 553 u8 fec = fbn->fec; 554 555 /* Update FEC first to reflect FW current mode */ 556 if (fbn->fec & FBNIC_FEC_AUTO) { 557 switch (fbd->fw_cap.link_fec) { 558 case FBNIC_FW_LINK_FEC_NONE: 559 fec = FBNIC_FEC_OFF; 560 break; 561 case FBNIC_FW_LINK_FEC_RS: 562 fec = FBNIC_FEC_RS; 563 break; 564 case FBNIC_FW_LINK_FEC_BASER: 565 fec = FBNIC_FEC_BASER; 566 break; 567 default: 568 return; 569 } 570 571 fbn->fec = fec; 572 } 573 574 /* Do nothing if AUTO mode is not engaged */ 575 if (fbn->link_mode & FBNIC_LINK_AUTO) { 576 switch (fbd->fw_cap.link_speed) { 577 case FBNIC_FW_LINK_SPEED_25R1: 578 link_mode = FBNIC_LINK_25R1; 579 break; 580 case FBNIC_FW_LINK_SPEED_50R2: 581 link_mode = FBNIC_LINK_50R2; 582 break; 583 case FBNIC_FW_LINK_SPEED_50R1: 584 link_mode = FBNIC_LINK_50R1; 585 fec = FBNIC_FEC_RS; 586 break; 587 case FBNIC_FW_LINK_SPEED_100R2: 588 link_mode = FBNIC_LINK_100R2; 589 fec = FBNIC_FEC_RS; 590 break; 591 default: 592 return; 593 } 594 595 fbn->link_mode = link_mode; 596 } 597 } 598 599 static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd) 600 { 601 /* Mask and clear the PCS interrupt, will be enabled by link handler */ 602 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); 603 wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); 604 605 /* Pull in settings from FW */ 606 fbnic_pcs_get_fw_settings(fbd); 607 608 return 0; 609 } 610 611 static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd) 612 { 613 /* Mask and clear the PCS interrupt */ 614 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); 615 wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); 616 } 617 618 static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd) 619 { 620 u32 cmd_cfg, mac_ctrl; 621 622 cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false); 623 mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); 624 625 mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | 626 FBNIC_SIG_MAC_IN0_RESET_TX_CLK | 627 FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | 628 FBNIC_SIG_MAC_IN0_RESET_RX_CLK; 629 630 wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); 631 wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); 632 } 633 634 static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd, 635 bool tx_pause, bool rx_pause) 636 { 637 u32 cmd_cfg, mac_ctrl; 638 639 fbnic_mac_tx_pause_config(fbd, tx_pause); 640 641 cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause); 642 mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); 643 644 mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | 645 FBNIC_SIG_MAC_IN0_RESET_TX_CLK | 646 FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | 647 FBNIC_SIG_MAC_IN0_RESET_RX_CLK); 648 cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA | 649 FBNIC_MAC_COMMAND_CONFIG_TX_ENA; 650 651 wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); 652 wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); 653 } 654 655 static void 656 fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset, 657 struct fbnic_eth_mac_stats *mac_stats) 658 { 659 fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsReceivedOK, 660 MAC_STAT_RX_BYTE_COUNT); 661 fbnic_mac_stat_rd64(fbd, reset, mac_stats->AlignmentErrors, 662 MAC_STAT_RX_ALIGN_ERROR); 663 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameTooLongErrors, 664 MAC_STAT_RX_TOOLONG); 665 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesReceivedOK, 666 MAC_STAT_RX_RECEIVED_OK); 667 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameCheckSequenceErrors, 668 MAC_STAT_RX_PACKET_BAD_FCS); 669 fbnic_mac_stat_rd64(fbd, reset, 670 mac_stats->FramesLostDueToIntMACRcvError, 671 MAC_STAT_RX_IFINERRORS); 672 fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesReceivedOK, 673 MAC_STAT_RX_MULTICAST); 674 fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesReceivedOK, 675 MAC_STAT_RX_BROADCAST); 676 fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsTransmittedOK, 677 MAC_STAT_TX_BYTE_COUNT); 678 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesTransmittedOK, 679 MAC_STAT_TX_TRANSMITTED_OK); 680 fbnic_mac_stat_rd64(fbd, reset, 681 mac_stats->FramesLostDueToIntMACXmitError, 682 MAC_STAT_TX_IFOUTERRORS); 683 fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesXmittedOK, 684 MAC_STAT_TX_MULTICAST); 685 fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesXmittedOK, 686 MAC_STAT_TX_BROADCAST); 687 } 688 689 static const struct fbnic_mac fbnic_mac_asic = { 690 .init_regs = fbnic_mac_init_regs, 691 .pcs_enable = fbnic_pcs_enable_asic, 692 .pcs_disable = fbnic_pcs_disable_asic, 693 .pcs_get_link = fbnic_pcs_get_link_asic, 694 .pcs_get_link_event = fbnic_pcs_get_link_event_asic, 695 .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats, 696 .link_down = fbnic_mac_link_down_asic, 697 .link_up = fbnic_mac_link_up_asic, 698 }; 699 700 /** 701 * fbnic_mac_init - Assign a MAC type and initialize the fbnic device 702 * @fbd: Device pointer to device to initialize 703 * 704 * Return: zero on success, negative on failure 705 * 706 * Initialize the MAC function pointers and initializes the MAC of 707 * the device. 708 **/ 709 int fbnic_mac_init(struct fbnic_dev *fbd) 710 { 711 fbd->mac = &fbnic_mac_asic; 712 713 fbd->mac->init_regs(fbd); 714 715 return 0; 716 } 717