1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/bitfield.h>
5 #include <net/tcp.h>
6
7 #include "fbnic.h"
8 #include "fbnic_mac.h"
9 #include "fbnic_netdev.h"
10
fbnic_init_readrq(struct fbnic_dev * fbd,unsigned int offset,unsigned int cls,unsigned int readrq)11 static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset,
12 unsigned int cls, unsigned int readrq)
13 {
14 u32 val = rd32(fbd, offset);
15
16 /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can
17 * use them when setting either the TDE_CTF or RNI_RBP registers.
18 */
19 val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB;
20
21 val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) |
22 FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls);
23
24 wr32(fbd, offset, val);
25 }
26
fbnic_init_mps(struct fbnic_dev * fbd,unsigned int offset,unsigned int cls,unsigned int mps)27 static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset,
28 unsigned int cls, unsigned int mps)
29 {
30 u32 val = rd32(fbd, offset);
31
32 /* Currently all MPS masks are identical so just use the first one */
33 val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS);
34
35 val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) |
36 FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls);
37
38 wr32(fbd, offset, val);
39 }
40
fbnic_mac_init_axi(struct fbnic_dev * fbd)41 static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
42 {
43 bool override_1k = false;
44 int readrq, mps, cls;
45
46 /* All of the values are based on being a power of 2 starting
47 * with 64 == 0. Therefore we can either divide by 64 in the
48 * case of constants, or just subtract 6 from the log2 of the value
49 * in order to get the value we will be programming into the
50 * registers.
51 */
52 readrq = ilog2(fbd->readrq) - 6;
53 if (readrq > 3)
54 override_1k = true;
55 readrq = clamp(readrq, 0, 3);
56
57 mps = ilog2(fbd->mps) - 6;
58 mps = clamp(mps, 0, 3);
59
60 cls = ilog2(L1_CACHE_BYTES) - 6;
61 cls = clamp(cls, 0, 3);
62
63 /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */
64 fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq);
65 fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps);
66
67 /* Configure QM TNI TDE:
68 * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of
69 * buffer capacity to descriptors.
70 * - Max outstanding transactions to 128
71 */
72 wr32(fbd, FBNIC_QM_TNI_TDE_CTL,
73 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) |
74 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) |
75 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) |
76 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) |
77 FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls));
78
79 fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
80 fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
81 fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
82 }
83
fbnic_mac_init_qm(struct fbnic_dev * fbd)84 static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
85 {
86 u32 clock_freq;
87
88 /* Configure TSO behavior */
89 wr32(fbd, FBNIC_QM_TQS_CTL0,
90 FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK,
91 FBNIC_QM_TQS_CTL0_LSO_TS_LAST) |
92 FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH,
93 FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN));
94
95 /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */
96 wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX);
97
98 /* Configure MTU
99 * Due to known HW issue we cannot set the MTU to within 16 octets
100 * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to
101 * MTU + 1.
102 */
103 wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1);
104 wr32(fbd, FBNIC_QM_TQS_MTU_CTL1,
105 FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK,
106 FBNIC_MAX_JUMBO_FRAME_SIZE + 1));
107
108 clock_freq = FBNIC_CLOCK_FREQ;
109
110 /* Be aggressive on the timings. We will have the interrupt
111 * threshold timer tick once every 1 usec and coalesce writes for
112 * up to 80 usecs.
113 */
114 wr32(fbd, FBNIC_QM_TCQ_CTL0,
115 FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES,
116 clock_freq / 1000000) |
117 FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT,
118 clock_freq / 12500));
119
120 /* We will have the interrupt threshold timer tick once every
121 * 1 usec and coalesce writes for up to 2 usecs.
122 */
123 wr32(fbd, FBNIC_QM_RCQ_CTL0,
124 FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES,
125 clock_freq / 1000000) |
126 FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT,
127 clock_freq / 500000));
128
129 /* Configure spacer control to 64 beats. */
130 wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG,
131 FBNIC_FAB_AXI4_AR_SPACER_MASK |
132 FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2));
133 }
134
135 #define FBNIC_DROP_EN_MASK 0x7d
136 #define FBNIC_PAUSE_EN_MASK 0x14
137 #define FBNIC_ECN_EN_MASK 0x10
138
139 struct fbnic_fifo_config {
140 unsigned int addr;
141 unsigned int size;
142 };
143
144 /* Rx FIFO Configuration
145 * The table consists of 8 entries, of which only 4 are currently used
146 * The starting addr is in units of 64B and the size is in 2KB units
147 * Below is the human readable version of the table defined below:
148 * Function Addr Size
149 * ----------------------------------
150 * Network to Host/BMC 384K 64K
151 * Unused
152 * Unused
153 * Network to BMC 448K 32K
154 * Network to Host 0 384K
155 * Unused
156 * BMC to Host 480K 32K
157 * Unused
158 */
159 static const struct fbnic_fifo_config fifo_config[] = {
160 { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */
161 { }, /* Unused */
162 { }, /* Unused */
163 { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */
164 { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */
165 { }, /* Unused */
166 { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */
167 { } /* Unused */
168 };
169
fbnic_mac_init_rxb(struct fbnic_dev * fbd)170 static void fbnic_mac_init_rxb(struct fbnic_dev *fbd)
171 {
172 bool rx_enable;
173 int i;
174
175 rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) &
176 FBNIC_RPC_RMI_CONFIG_ENABLE);
177
178 for (i = 0; i < 8; i++) {
179 unsigned int size = fifo_config[i].size;
180
181 /* If we are coming up on a system that already has the
182 * Rx data path enabled we don't need to reconfigure the
183 * FIFOs. Instead we can check to verify the values are
184 * large enough to meet our needs, and use the values to
185 * populate the flow control, ECN, and drop thresholds.
186 */
187 if (rx_enable) {
188 size = FIELD_GET(FBNIC_RXB_PBUF_SIZE,
189 rd32(fbd, FBNIC_RXB_PBUF_CFG(i)));
190 if (size < fifo_config[i].size)
191 dev_warn(fbd->dev,
192 "fifo%d size of %d smaller than expected value of %d\n",
193 i, size << 11,
194 fifo_config[i].size << 11);
195 } else {
196 /* Program RXB Cuthrough */
197 wr32(fbd, FBNIC_RXB_CT_SIZE(i),
198 FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) |
199 FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2));
200
201 /* The granularity for the packet buffer size is 2KB
202 * granularity while the packet buffer base address is
203 * only 64B granularity
204 */
205 wr32(fbd, FBNIC_RXB_PBUF_CFG(i),
206 FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR,
207 fifo_config[i].addr) |
208 FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size));
209
210 /* The granularity for the credits is 64B. This is
211 * based on RXB_PBUF_SIZE * 32 + 4.
212 */
213 wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i),
214 FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK,
215 size ? size * 32 + 4 : 0));
216 }
217
218 if (!size)
219 continue;
220
221 /* Pause is size of FIFO with 56KB skid to start/stop */
222 wr32(fbd, FBNIC_RXB_PAUSE_THLD(i),
223 !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff :
224 FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON,
225 size * 32 - 0x380) |
226 FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380));
227
228 /* Enable Drop when only one packet is left in the FIFO */
229 wr32(fbd, FBNIC_RXB_DROP_THLD(i),
230 !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff :
231 FIELD_PREP(FBNIC_RXB_DROP_THLD_ON,
232 size * 32 -
233 FBNIC_MAX_JUMBO_FRAME_SIZE / 64) |
234 FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF,
235 size * 32 -
236 FBNIC_MAX_JUMBO_FRAME_SIZE / 64));
237
238 /* Enable ECN bit when 1/4 of RXB is filled with at least
239 * 1 room for one full jumbo frame before setting ECN
240 */
241 wr32(fbd, FBNIC_RXB_ECN_THLD(i),
242 !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff :
243 FIELD_PREP(FBNIC_RXB_ECN_THLD_ON,
244 max_t(unsigned int,
245 size * 32 / 4,
246 FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) |
247 FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF,
248 max_t(unsigned int,
249 size * 32 / 4,
250 FBNIC_MAX_JUMBO_FRAME_SIZE / 64)));
251 }
252
253 /* For now only enable drop and ECN. We need to add driver/kernel
254 * interfaces for configuring pause.
255 */
256 wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL,
257 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE,
258 FBNIC_DROP_EN_MASK) |
259 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE,
260 FBNIC_ECN_EN_MASK));
261
262 /* Program INTF credits */
263 wr32(fbd, FBNIC_RXB_INTF_CREDIT,
264 FBNIC_RXB_INTF_CREDIT_MASK0 |
265 FBNIC_RXB_INTF_CREDIT_MASK1 |
266 FBNIC_RXB_INTF_CREDIT_MASK2 |
267 FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8));
268
269 /* Configure calendar slots.
270 * Rx: 0 - 62 RDE 1st, BMC 2nd
271 * 63 BMC 1st, RDE 2nd
272 */
273 for (i = 0; i < 16; i++) {
274 u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
275
276 wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val);
277 }
278
279 /* Split the credits for the DRR up as follows:
280 * Quantum0: 8000 Network to Host
281 * Quantum1: 0 Not used
282 * Quantum2: 80 BMC to Host
283 * Quantum3: 0 Not used
284 * Quantum4: 8000 Multicast to Host and BMC
285 */
286 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0,
287 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) |
288 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50));
289 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT,
290 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f));
291 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1,
292 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40));
293 wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT,
294 FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f));
295
296 /* Program RXB FCS Endian register */
297 wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0);
298 }
299
fbnic_mac_init_txb(struct fbnic_dev * fbd)300 static void fbnic_mac_init_txb(struct fbnic_dev *fbd)
301 {
302 int i;
303
304 wr32(fbd, FBNIC_TCE_TXB_CTRL, 0);
305
306 /* Configure Tx QM Credits */
307 wr32(fbd, FBNIC_QM_TQS_CTL1,
308 FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) |
309 FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20));
310
311 /* Initialize internal Tx queues */
312 wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0);
313 wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0);
314 wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL,
315 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) |
316 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000));
317 wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0);
318 wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL,
319 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
320 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400));
321 wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL,
322 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
323 FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600));
324
325 wr32(fbd, FBNIC_TCE_LSO_CTRL,
326 FBNIC_TCE_LSO_CTRL_IPID_MODE_INC |
327 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH |
328 TCPHDR_FIN) |
329 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH |
330 TCPHDR_CWR |
331 TCPHDR_FIN) |
332 FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR));
333 wr32(fbd, FBNIC_TCE_CSO_CTRL, 0);
334
335 wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ,
336 FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX,
337 FBNIC_MAX_JUMBO_FRAME_SIZE) |
338 FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX,
339 FBNIC_MAX_JUMBO_FRAME_SIZE));
340 wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ,
341 FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI,
342 FBNIC_MAX_JUMBO_FRAME_SIZE));
343
344 /* Configure calendar slots.
345 * Tx: 0 - 62 TMI 1st, BMC 2nd
346 * 63 BMC 1st, TMI 2nd
347 */
348 for (i = 0; i < 16; i++) {
349 u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
350
351 wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val);
352 }
353
354 /* Configure DWRR */
355 wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL,
356 FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) |
357 FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04));
358 wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0);
359 wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0);
360 wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL,
361 FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) |
362 FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82));
363 wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0);
364 wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL,
365 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) |
366 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20));
367 wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT,
368 FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03));
369
370 /* Configure SOP protocol protection */
371 wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL,
372 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) |
373 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) |
374 FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c));
375
376 /* Conservative configuration on MAC interface Start of Packet
377 * protection FIFO. This sets the minimum depth of the FIFO before
378 * we start sending packets to the MAC measured in 64B units and
379 * up to 160 entries deep.
380 *
381 * For the ASIC the clock is fast enough that we will likely fill
382 * the SOP FIFO before the MAC can drain it. So just use a minimum
383 * value of 8.
384 */
385 wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8);
386
387 wrfl(fbd);
388 wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE |
389 FBNIC_TCE_TXB_CTRL_LOAD);
390 }
391
fbnic_mac_init_regs(struct fbnic_dev * fbd)392 static void fbnic_mac_init_regs(struct fbnic_dev *fbd)
393 {
394 fbnic_mac_init_axi(fbd);
395 fbnic_mac_init_qm(fbd);
396 fbnic_mac_init_rxb(fbd);
397 fbnic_mac_init_txb(fbd);
398 }
399
__fbnic_mac_stat_rd64(struct fbnic_dev * fbd,bool reset,u32 reg,struct fbnic_stat_counter * stat)400 static void __fbnic_mac_stat_rd64(struct fbnic_dev *fbd, bool reset, u32 reg,
401 struct fbnic_stat_counter *stat)
402 {
403 u64 new_reg_value;
404
405 new_reg_value = fbnic_stat_rd64(fbd, reg, 1);
406 if (!reset)
407 stat->value += new_reg_value - stat->u.old_reg_value_64;
408 stat->u.old_reg_value_64 = new_reg_value;
409 stat->reported = true;
410 }
411
412 #define fbnic_mac_stat_rd64(fbd, reset, __stat, __CSR) \
413 __fbnic_mac_stat_rd64(fbd, reset, FBNIC_##__CSR##_L, &(__stat))
414
fbnic_mac_tx_pause_config(struct fbnic_dev * fbd,bool tx_pause)415 static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
416 {
417 u32 rxb_pause_ctrl;
418
419 /* Enable generation of pause frames if enabled */
420 rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL);
421 rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE;
422 if (tx_pause)
423 rxb_pause_ctrl |=
424 FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE,
425 FBNIC_PAUSE_EN_MASK);
426 wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
427 }
428
fbnic_pcs_get_link_event_asic(struct fbnic_dev * fbd)429 static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd)
430 {
431 u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
432
433 if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
434 return FBNIC_LINK_EVENT_DOWN;
435
436 return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
437 FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
438 }
439
__fbnic_mac_cmd_config_asic(struct fbnic_dev * fbd,bool tx_pause,bool rx_pause)440 static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
441 bool tx_pause, bool rx_pause)
442 {
443 /* Enable MAC Promiscuous mode and Tx padding */
444 u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN |
445 FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN;
446 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
447
448 /* Disable pause frames if not enabled */
449 if (!tx_pause)
450 command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS;
451 if (!rx_pause)
452 command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
453
454 /* Disable fault handling if no FEC is requested */
455 if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF)
456 command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
457
458 return command_config;
459 }
460
fbnic_mac_get_pcs_link_status(struct fbnic_dev * fbd)461 static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
462 {
463 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
464 u32 pcs_status, lane_mask = ~0;
465
466 pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
467 if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK))
468 return false;
469
470 /* Define the expected lane mask for the status bits we need to check */
471 switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) {
472 case FBNIC_LINK_100R2:
473 lane_mask = 0xf;
474 break;
475 case FBNIC_LINK_50R1:
476 lane_mask = 3;
477 break;
478 case FBNIC_LINK_50R2:
479 switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
480 case FBNIC_FEC_OFF:
481 lane_mask = 0x63;
482 break;
483 case FBNIC_FEC_RS:
484 lane_mask = 5;
485 break;
486 case FBNIC_FEC_BASER:
487 lane_mask = 0xf;
488 break;
489 }
490 break;
491 case FBNIC_LINK_25R1:
492 lane_mask = 1;
493 break;
494 }
495
496 /* Use an XOR to remove the bits we expect to see set */
497 switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
498 case FBNIC_FEC_OFF:
499 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
500 pcs_status);
501 break;
502 case FBNIC_FEC_RS:
503 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK,
504 pcs_status);
505 break;
506 case FBNIC_FEC_BASER:
507 lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK,
508 rd32(fbd, FBNIC_SIG_PCS_OUT1));
509 break;
510 }
511
512 /* If all lanes cancelled then we have a lock on all lanes */
513 return !lane_mask;
514 }
515
fbnic_pcs_get_link_asic(struct fbnic_dev * fbd)516 static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
517 {
518 bool link;
519
520 /* Flush status bits to clear possible stale data,
521 * bits should reset themselves back to 1 if link is truly up
522 */
523 wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK |
524 FBNIC_SIG_PCS_OUT0_BLOCK_LOCK |
525 FBNIC_SIG_PCS_OUT0_AMPS_LOCK);
526 wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK);
527 wrfl(fbd);
528
529 /* Clear interrupt state due to recent changes. */
530 wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
531 FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
532
533 link = fbnic_mac_get_pcs_link_status(fbd);
534
535 /* Enable interrupt to only capture changes in link state */
536 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
537 ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP);
538 wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY);
539
540 return link;
541 }
542
fbnic_pcs_get_fw_settings(struct fbnic_dev * fbd)543 static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd)
544 {
545 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
546 u8 link_mode = fbn->link_mode;
547 u8 fec = fbn->fec;
548
549 /* Update FEC first to reflect FW current mode */
550 if (fbn->fec & FBNIC_FEC_AUTO) {
551 switch (fbd->fw_cap.link_fec) {
552 case FBNIC_FW_LINK_FEC_NONE:
553 fec = FBNIC_FEC_OFF;
554 break;
555 case FBNIC_FW_LINK_FEC_RS:
556 fec = FBNIC_FEC_RS;
557 break;
558 case FBNIC_FW_LINK_FEC_BASER:
559 fec = FBNIC_FEC_BASER;
560 break;
561 default:
562 return;
563 }
564
565 fbn->fec = fec;
566 }
567
568 /* Do nothing if AUTO mode is not engaged */
569 if (fbn->link_mode & FBNIC_LINK_AUTO) {
570 switch (fbd->fw_cap.link_speed) {
571 case FBNIC_FW_LINK_SPEED_25R1:
572 link_mode = FBNIC_LINK_25R1;
573 break;
574 case FBNIC_FW_LINK_SPEED_50R2:
575 link_mode = FBNIC_LINK_50R2;
576 break;
577 case FBNIC_FW_LINK_SPEED_50R1:
578 link_mode = FBNIC_LINK_50R1;
579 fec = FBNIC_FEC_RS;
580 break;
581 case FBNIC_FW_LINK_SPEED_100R2:
582 link_mode = FBNIC_LINK_100R2;
583 fec = FBNIC_FEC_RS;
584 break;
585 default:
586 return;
587 }
588
589 fbn->link_mode = link_mode;
590 }
591 }
592
fbnic_pcs_enable_asic(struct fbnic_dev * fbd)593 static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
594 {
595 /* Mask and clear the PCS interrupt, will be enabled by link handler */
596 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
597 wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
598
599 /* Pull in settings from FW */
600 fbnic_pcs_get_fw_settings(fbd);
601
602 return 0;
603 }
604
fbnic_pcs_disable_asic(struct fbnic_dev * fbd)605 static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd)
606 {
607 /* Mask and clear the PCS interrupt */
608 wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
609 wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
610 }
611
fbnic_mac_link_down_asic(struct fbnic_dev * fbd)612 static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
613 {
614 u32 cmd_cfg, mac_ctrl;
615
616 cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false);
617 mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
618
619 mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
620 FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
621 FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
622 FBNIC_SIG_MAC_IN0_RESET_RX_CLK;
623
624 wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
625 wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
626 }
627
fbnic_mac_link_up_asic(struct fbnic_dev * fbd,bool tx_pause,bool rx_pause)628 static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
629 bool tx_pause, bool rx_pause)
630 {
631 u32 cmd_cfg, mac_ctrl;
632
633 fbnic_mac_tx_pause_config(fbd, tx_pause);
634
635 cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause);
636 mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
637
638 mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
639 FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
640 FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
641 FBNIC_SIG_MAC_IN0_RESET_RX_CLK);
642 cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA |
643 FBNIC_MAC_COMMAND_CONFIG_TX_ENA;
644
645 wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
646 wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
647 }
648
649 static void
fbnic_mac_get_eth_mac_stats(struct fbnic_dev * fbd,bool reset,struct fbnic_eth_mac_stats * mac_stats)650 fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
651 struct fbnic_eth_mac_stats *mac_stats)
652 {
653 fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsReceivedOK,
654 MAC_STAT_RX_BYTE_COUNT);
655 fbnic_mac_stat_rd64(fbd, reset, mac_stats->AlignmentErrors,
656 MAC_STAT_RX_ALIGN_ERROR);
657 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameTooLongErrors,
658 MAC_STAT_RX_TOOLONG);
659 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesReceivedOK,
660 MAC_STAT_RX_RECEIVED_OK);
661 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FrameCheckSequenceErrors,
662 MAC_STAT_RX_PACKET_BAD_FCS);
663 fbnic_mac_stat_rd64(fbd, reset,
664 mac_stats->FramesLostDueToIntMACRcvError,
665 MAC_STAT_RX_IFINERRORS);
666 fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesReceivedOK,
667 MAC_STAT_RX_MULTICAST);
668 fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesReceivedOK,
669 MAC_STAT_RX_BROADCAST);
670 fbnic_mac_stat_rd64(fbd, reset, mac_stats->OctetsTransmittedOK,
671 MAC_STAT_TX_BYTE_COUNT);
672 fbnic_mac_stat_rd64(fbd, reset, mac_stats->FramesTransmittedOK,
673 MAC_STAT_TX_TRANSMITTED_OK);
674 fbnic_mac_stat_rd64(fbd, reset,
675 mac_stats->FramesLostDueToIntMACXmitError,
676 MAC_STAT_TX_IFOUTERRORS);
677 fbnic_mac_stat_rd64(fbd, reset, mac_stats->MulticastFramesXmittedOK,
678 MAC_STAT_TX_MULTICAST);
679 fbnic_mac_stat_rd64(fbd, reset, mac_stats->BroadcastFramesXmittedOK,
680 MAC_STAT_TX_BROADCAST);
681 }
682
fbnic_mac_get_sensor_asic(struct fbnic_dev * fbd,int id,long * val)683 static int fbnic_mac_get_sensor_asic(struct fbnic_dev *fbd, int id,
684 long *val)
685 {
686 struct fbnic_fw_completion *fw_cmpl;
687 int err = 0, retries = 5;
688 s32 *sensor;
689
690 fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
691 if (!fw_cmpl)
692 return -ENOMEM;
693
694 switch (id) {
695 case FBNIC_SENSOR_TEMP:
696 sensor = &fw_cmpl->u.tsene.millidegrees;
697 break;
698 case FBNIC_SENSOR_VOLTAGE:
699 sensor = &fw_cmpl->u.tsene.millivolts;
700 break;
701 default:
702 err = -EINVAL;
703 goto exit_free;
704 }
705
706 err = fbnic_fw_xmit_tsene_read_msg(fbd, fw_cmpl);
707 if (err) {
708 dev_err(fbd->dev,
709 "Failed to transmit TSENE read msg, err %d\n",
710 err);
711 goto exit_free;
712 }
713
714 /* Allow 2 seconds for reply, resend and try up to 5 times */
715 while (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
716 retries--;
717
718 if (retries == 0) {
719 dev_err(fbd->dev,
720 "Timed out waiting for TSENE read\n");
721 err = -ETIMEDOUT;
722 goto exit_cleanup;
723 }
724
725 err = fbnic_fw_xmit_tsene_read_msg(fbd, NULL);
726 if (err) {
727 dev_err(fbd->dev,
728 "Failed to transmit TSENE read msg, err %d\n",
729 err);
730 goto exit_cleanup;
731 }
732 }
733
734 /* Handle error returned by firmware */
735 if (fw_cmpl->result) {
736 err = fw_cmpl->result;
737 dev_err(fbd->dev, "%s: Firmware returned error %d\n",
738 __func__, err);
739 goto exit_cleanup;
740 }
741
742 *val = *sensor;
743 exit_cleanup:
744 fbnic_fw_clear_cmpl(fbd, fw_cmpl);
745 exit_free:
746 fbnic_fw_put_cmpl(fw_cmpl);
747
748 return err;
749 }
750
751 static const struct fbnic_mac fbnic_mac_asic = {
752 .init_regs = fbnic_mac_init_regs,
753 .pcs_enable = fbnic_pcs_enable_asic,
754 .pcs_disable = fbnic_pcs_disable_asic,
755 .pcs_get_link = fbnic_pcs_get_link_asic,
756 .pcs_get_link_event = fbnic_pcs_get_link_event_asic,
757 .get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
758 .link_down = fbnic_mac_link_down_asic,
759 .link_up = fbnic_mac_link_up_asic,
760 .get_sensor = fbnic_mac_get_sensor_asic,
761 };
762
763 /**
764 * fbnic_mac_init - Assign a MAC type and initialize the fbnic device
765 * @fbd: Device pointer to device to initialize
766 *
767 * Return: zero on success, negative on failure
768 *
769 * Initialize the MAC function pointers and initializes the MAC of
770 * the device.
771 **/
fbnic_mac_init(struct fbnic_dev * fbd)772 int fbnic_mac_init(struct fbnic_dev *fbd)
773 {
774 fbd->mac = &fbnic_mac_asic;
775
776 fbd->mac->init_regs(fbd);
777
778 return 0;
779 }
780