1 /*-
2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3 *
4 * Copyright (c) 2017 - 2023 Intel Corporation
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenFabrics.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include "osdep.h"
36 #include "irdma_type.h"
37 #include "icrdma_hw.h"
38
39 void disable_prefetch(struct irdma_hw *hw);
40
41 void disable_tx_spad(struct irdma_hw *hw);
42
43 void rdpu_ackreqpmthresh(struct irdma_hw *hw);
44
45 static u32 icrdma_regs[IRDMA_MAX_REGS] = {
46 PFPE_CQPTAIL,
47 PFPE_CQPDB,
48 PFPE_CCQPSTATUS,
49 PFPE_CCQPHIGH,
50 PFPE_CCQPLOW,
51 PFPE_CQARM,
52 PFPE_CQACK,
53 PFPE_AEQALLOC,
54 PFPE_CQPERRCODES,
55 PFPE_WQEALLOC,
56 GLINT_DYN_CTL(0),
57 ICRDMA_DB_ADDR_OFFSET,
58
59 GLPCI_LBARCTRL,
60 GLPE_CPUSTATUS0,
61 GLPE_CPUSTATUS1,
62 GLPE_CPUSTATUS2,
63 PFINT_AEQCTL,
64 GLINT_CEQCTL(0),
65 VSIQF_PE_CTL1(0),
66 PFHMC_PDINV,
67 GLHMC_VFPDINV(0),
68 GLPE_CRITERR,
69 GLINT_RATE(0),
70 };
71
72 static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
73 ICRDMA_CCQPSTATUS_CCQP_DONE,
74 ICRDMA_CCQPSTATUS_CCQP_ERR,
75 ICRDMA_CQPSQ_STAG_PDID,
76 ICRDMA_CQPSQ_CQ_CEQID,
77 ICRDMA_CQPSQ_CQ_CQID,
78 ICRDMA_COMMIT_FPM_CQCNT,
79 ICRDMA_CQPSQ_UPESD_HMCFNID,
80 };
81
82 static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
83 ICRDMA_CCQPSTATUS_CCQP_DONE_S,
84 ICRDMA_CCQPSTATUS_CCQP_ERR_S,
85 ICRDMA_CQPSQ_STAG_PDID_S,
86 ICRDMA_CQPSQ_CQ_CEQID_S,
87 ICRDMA_CQPSQ_CQ_CQID_S,
88 ICRDMA_COMMIT_FPM_CQCNT_S,
89 ICRDMA_CQPSQ_UPESD_HMCFNID_S,
90 };
91
92 /**
93 * icrdma_ena_irq - Enable interrupt
94 * @dev: pointer to the device structure
95 * @idx: vector index
96 */
97 static void
icrdma_ena_irq(struct irdma_sc_dev * dev,u32 idx)98 icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
99 {
100 u32 val;
101 u32 interval = 0;
102
103 if (dev->ceq_itr && dev->aeq->msix_idx != idx)
104 interval = dev->ceq_itr >> 1; /* 2 usec units */
105 val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, IRDMA_IDX_ITR0) |
106 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
107 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, true) |
108 FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, true);
109 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
110 }
111
112 /**
113 * icrdma_disable_irq - Disable interrupt
114 * @dev: pointer to the device structure
115 * @idx: vector index
116 */
117 static void
icrdma_disable_irq(struct irdma_sc_dev * dev,u32 idx)118 icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
119 {
120 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
121 }
122
123 /**
124 * icrdma_cfg_ceq- Configure CEQ interrupt
125 * @dev: pointer to the device structure
126 * @ceq_id: Completion Event Queue ID
127 * @idx: vector index
128 * @enable: True to enable, False disables
129 */
130 static void
icrdma_cfg_ceq(struct irdma_sc_dev * dev,u32 ceq_id,u32 idx,bool enable)131 icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
132 bool enable)
133 {
134 u32 reg_val;
135
136 reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA : 0;
137 reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) |
138 IRDMA_GLINT_CEQCTL_ITR_INDX;
139
140 writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
141 }
142
143 static const struct irdma_irq_ops icrdma_irq_ops = {
144 .irdma_cfg_aeq = irdma_cfg_aeq,
145 .irdma_cfg_ceq = icrdma_cfg_ceq,
146 .irdma_dis_irq = icrdma_disable_irq,
147 .irdma_en_irq = icrdma_ena_irq,
148 };
149
150 static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
151 [IRDMA_HW_STAT_INDEX_RXVLANERR] = {0, 32, IRDMA_MAX_STATS_24},
152 [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = {8, 0, IRDMA_MAX_STATS_48},
153 [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = {16, 0, IRDMA_MAX_STATS_48},
154 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = {24, 32, IRDMA_MAX_STATS_32},
155 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = {24, 0, IRDMA_MAX_STATS_32},
156 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = {32, 0, IRDMA_MAX_STATS_48},
157 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = {40, 0, IRDMA_MAX_STATS_48},
158 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = {48, 0, IRDMA_MAX_STATS_48},
159 [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = {56, 0, IRDMA_MAX_STATS_48},
160 [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = {64, 0, IRDMA_MAX_STATS_48},
161 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = {72, 32, IRDMA_MAX_STATS_32},
162 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = {72, 0, IRDMA_MAX_STATS_32},
163 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = {80, 0, IRDMA_MAX_STATS_48},
164 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = {88, 0, IRDMA_MAX_STATS_48},
165 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = {96, 0, IRDMA_MAX_STATS_48},
166 [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = {104, 0, IRDMA_MAX_STATS_48},
167 [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = {112, 0, IRDMA_MAX_STATS_48},
168 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = {120, 0, IRDMA_MAX_STATS_48},
169 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = {128, 0, IRDMA_MAX_STATS_48},
170 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = {136, 0, IRDMA_MAX_STATS_48},
171 [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = {144, 0, IRDMA_MAX_STATS_48},
172 [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = {152, 0, IRDMA_MAX_STATS_48},
173 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = {160, 0, IRDMA_MAX_STATS_48},
174 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = {168, 0, IRDMA_MAX_STATS_48},
175 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = {176, 0, IRDMA_MAX_STATS_48},
176 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = {184, 32, IRDMA_MAX_STATS_24},
177 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = {184, 0, IRDMA_MAX_STATS_24},
178 [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = {192, 32, IRDMA_MAX_STATS_48},
179 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = {200, 32, IRDMA_MAX_STATS_24},
180 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = {200, 0, IRDMA_MAX_STATS_24},
181 [IRDMA_HW_STAT_INDEX_TCPTXSEG] = {208, 0, IRDMA_MAX_STATS_48},
182 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = {216, 32, IRDMA_MAX_STATS_32},
183 [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = {224, 0, IRDMA_MAX_STATS_48},
184 [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = {232, 0, IRDMA_MAX_STATS_48},
185 [IRDMA_HW_STAT_INDEX_RDMARXWRS] = {240, 0, IRDMA_MAX_STATS_48},
186 [IRDMA_HW_STAT_INDEX_RDMARXRDS] = {248, 0, IRDMA_MAX_STATS_48},
187 [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = {256, 0, IRDMA_MAX_STATS_48},
188 [IRDMA_HW_STAT_INDEX_RDMATXWRS] = {264, 0, IRDMA_MAX_STATS_48},
189 [IRDMA_HW_STAT_INDEX_RDMATXRDS] = {272, 0, IRDMA_MAX_STATS_48},
190 [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = {280, 0, IRDMA_MAX_STATS_48},
191 [IRDMA_HW_STAT_INDEX_RDMAVBND] = {288, 0, IRDMA_MAX_STATS_48},
192 [IRDMA_HW_STAT_INDEX_RDMAVINV] = {296, 0, IRDMA_MAX_STATS_48},
193 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = {304, 0, IRDMA_MAX_STATS_48},
194 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = {312, 32, IRDMA_MAX_STATS_16},
195 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = {312, 0, IRDMA_MAX_STATS_32},
196 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = {320, 0, IRDMA_MAX_STATS_32},
197 };
198
199 void
icrdma_init_hw(struct irdma_sc_dev * dev)200 icrdma_init_hw(struct irdma_sc_dev *dev)
201 {
202 int i;
203 u8 IOMEM *hw_addr;
204
205 for (i = 0; i < IRDMA_MAX_REGS; ++i) {
206 hw_addr = dev->hw->hw_addr;
207
208 if (i == IRDMA_DB_ADDR_OFFSET)
209 hw_addr = NULL;
210
211 dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]);
212 }
213
214 for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
215 dev->hw_shifts[i] = icrdma_shifts[i];
216
217 for (i = 0; i < IRDMA_MAX_MASKS; ++i)
218 dev->hw_masks[i] = icrdma_masks[i];
219
220 dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
221 dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
222 dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
223 dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
224 dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
225 dev->irq_ops = &icrdma_irq_ops;
226 dev->hw_stats_map = icrdma_hw_stat_map;
227 dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
228 dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
229 dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
230 dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
231 dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
232 dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
233
234 dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
235 dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
236 dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
237 dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
238 disable_tx_spad(dev->hw);
239 disable_prefetch(dev->hw);
240 rdpu_ackreqpmthresh(dev->hw);
241 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RELAX_RQ_ORDER;
242 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
243 IRDMA_FEATURE_CQ_RESIZE;
244 }
245
246 void
irdma_init_config_check(struct irdma_config_check * cc,u8 traffic_class,u16 qs_handle)247 irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u16 qs_handle)
248 {
249 cc->config_ok = false;
250 cc->traffic_class = traffic_class;
251 cc->qs_handle = qs_handle;
252 cc->lfc_set = 0;
253 cc->pfc_set = 0;
254 }
255
256 static bool
irdma_is_lfc_set(struct irdma_config_check * cc,struct irdma_sc_vsi * vsi)257 irdma_is_lfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
258 {
259 u32 lfc = 1;
260 u8 fn_id = vsi->dev->hmc_fn_id;
261
262 lfc &= (rd32(vsi->dev->hw,
263 PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
264 lfc &= (rd32(vsi->dev->hw,
265 PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8);
266 lfc &= rd32(vsi->dev->hw,
267 PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id);
268
269 if (lfc)
270 return true;
271 return false;
272 }
273
274 static bool
irdma_check_tc_has_pfc(struct irdma_sc_vsi * vsi,u64 reg_offset,u16 traffic_class)275 irdma_check_tc_has_pfc(struct irdma_sc_vsi *vsi, u64 reg_offset, u16 traffic_class)
276 {
277 u32 value, pfc = 0;
278 u32 i;
279
280 value = rd32(vsi->dev->hw, reg_offset);
281 for (i = 0; i < 4; i++)
282 pfc |= (value >> (8 * i + traffic_class)) & 0x1;
283
284 if (pfc)
285 return true;
286 return false;
287 }
288
289 static bool
irdma_is_pfc_set(struct irdma_config_check * cc,struct irdma_sc_vsi * vsi)290 irdma_is_pfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
291 {
292 u32 pause;
293 u8 fn_id = vsi->dev->hmc_fn_id;
294
295 pause = (rd32(vsi->dev->hw,
296 PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >>
297 cc->traffic_class) & BIT(0);
298 pause &= (rd32(vsi->dev->hw,
299 PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >>
300 cc->traffic_class) & BIT(0);
301
302 return irdma_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) &&
303 pause;
304 }
305
306 bool
irdma_is_config_ok(struct irdma_config_check * cc,struct irdma_sc_vsi * vsi)307 irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi)
308 {
309 cc->lfc_set = irdma_is_lfc_set(cc, vsi);
310 cc->pfc_set = irdma_is_pfc_set(cc, vsi);
311
312 cc->config_ok = cc->lfc_set || cc->pfc_set;
313
314 return cc->config_ok;
315 }
316
317 #define IRDMA_RCV_WND_NO_FC 65536
318 #define IRDMA_RCV_WND_FC 65536
319
320 #define IRDMA_CWND_NO_FC 0x1
321 #define IRDMA_CWND_FC 0x18
322
323 #define IRDMA_RTOMIN_NO_FC 0x5
324 #define IRDMA_RTOMIN_FC 0x32
325
326 #define IRDMA_ACKCREDS_NO_FC 0x02
327 #define IRDMA_ACKCREDS_FC 0x06
328
329 static void
irdma_check_flow_ctrl(struct irdma_sc_vsi * vsi,u8 user_prio,u8 traffic_class)330 irdma_check_flow_ctrl(struct irdma_sc_vsi *vsi, u8 user_prio, u8 traffic_class)
331 {
332 struct irdma_config_check *cfg_chk = &vsi->cfg_check[user_prio];
333
334 if (!irdma_is_config_ok(cfg_chk, vsi)) {
335 if (vsi->tc_print_warning[traffic_class]) {
336 irdma_pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", traffic_class);
337 vsi->tc_print_warning[traffic_class] = false;
338 }
339 } else {
340 if (vsi->tc_print_warning[traffic_class]) {
341 irdma_pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", traffic_class);
342 vsi->tc_print_warning[traffic_class] = false;
343 }
344 }
345 }
346
347 void
irdma_check_fc_for_tc_update(struct irdma_sc_vsi * vsi,struct irdma_l2params * l2params)348 irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi,
349 struct irdma_l2params *l2params)
350 {
351 u8 i;
352
353 for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
354 vsi->tc_print_warning[i] = true;
355
356 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
357 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
358 u8 tc = l2params->up2tc[i];
359
360 cfg_chk->traffic_class = tc;
361 cfg_chk->qs_handle = vsi->qos[i].qs_handle;
362 irdma_check_flow_ctrl(vsi, i, tc);
363 }
364 }
365
366 void
irdma_check_fc_for_qp(struct irdma_sc_vsi * vsi,struct irdma_sc_qp * sc_qp)367 irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp)
368 {
369 u8 i;
370
371 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
372 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i];
373
374 irdma_init_config_check(cfg_chk,
375 vsi->qos[i].traffic_class,
376 vsi->qos[i].qs_handle);
377 if (sc_qp->qs_handle == cfg_chk->qs_handle)
378 irdma_check_flow_ctrl(vsi, i, cfg_chk->traffic_class);
379 }
380 }
381
382 #define GLPE_WQMTXIDXADDR 0x50E000
383 #define GLPE_WQMTXIDXDATA 0x50E004
384
385 void
disable_prefetch(struct irdma_hw * hw)386 disable_prefetch(struct irdma_hw *hw)
387 {
388 u32 wqm_data;
389
390 wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
391 irdma_mb();
392
393 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
394 wqm_data &= ~(1);
395 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
396 }
397
398 void
disable_tx_spad(struct irdma_hw * hw)399 disable_tx_spad(struct irdma_hw *hw)
400 {
401 u32 wqm_data;
402
403 wr32(hw, GLPE_WQMTXIDXADDR, 0x12);
404 irdma_mb();
405
406 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA);
407 wqm_data &= ~(1 << 3);
408 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data);
409 }
410
411 #define GL_RDPU_CNTRL 0x52054
412 void
rdpu_ackreqpmthresh(struct irdma_hw * hw)413 rdpu_ackreqpmthresh(struct irdma_hw *hw)
414 {
415 u32 val;
416
417 val = rd32(hw, GL_RDPU_CNTRL);
418 val &= ~(0x3f << 10);
419 val |= (3 << 10);
420 wr32(hw, GL_RDPU_CNTRL, val);
421 }
422