1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2017 - 2022 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include "osdep.h" 36 #include "irdma_type.h" 37 #include "icrdma_hw.h" 38 39 void disable_prefetch(struct irdma_hw *hw); 40 41 void disable_tx_spad(struct irdma_hw *hw); 42 43 void rdpu_ackreqpmthresh(struct irdma_hw *hw); 44 45 static u32 icrdma_regs[IRDMA_MAX_REGS] = { 46 PFPE_CQPTAIL, 47 PFPE_CQPDB, 48 PFPE_CCQPSTATUS, 49 PFPE_CCQPHIGH, 50 PFPE_CCQPLOW, 51 PFPE_CQARM, 52 PFPE_CQACK, 53 PFPE_AEQALLOC, 54 PFPE_CQPERRCODES, 55 PFPE_WQEALLOC, 56 GLINT_DYN_CTL(0), 57 ICRDMA_DB_ADDR_OFFSET, 58 59 GLPCI_LBARCTRL, 60 GLPE_CPUSTATUS0, 61 GLPE_CPUSTATUS1, 62 GLPE_CPUSTATUS2, 63 PFINT_AEQCTL, 64 GLINT_CEQCTL(0), 65 VSIQF_PE_CTL1(0), 66 PFHMC_PDINV, 67 GLHMC_VFPDINV(0), 68 GLPE_CRITERR, 69 GLINT_RATE(0), 70 }; 71 72 static u64 icrdma_masks[IRDMA_MAX_MASKS] = { 73 ICRDMA_CCQPSTATUS_CCQP_DONE, 74 ICRDMA_CCQPSTATUS_CCQP_ERR, 75 ICRDMA_CQPSQ_STAG_PDID, 76 ICRDMA_CQPSQ_CQ_CEQID, 77 ICRDMA_CQPSQ_CQ_CQID, 78 ICRDMA_COMMIT_FPM_CQCNT, 79 ICRDMA_CQPSQ_UPESD_HMCFNID 80 }; 81 82 static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = { 83 ICRDMA_CCQPSTATUS_CCQP_DONE_S, 84 ICRDMA_CCQPSTATUS_CCQP_ERR_S, 85 ICRDMA_CQPSQ_STAG_PDID_S, 86 ICRDMA_CQPSQ_CQ_CEQID_S, 87 ICRDMA_CQPSQ_CQ_CQID_S, 88 ICRDMA_COMMIT_FPM_CQCNT_S, 89 ICRDMA_CQPSQ_UPESD_HMCFNID_S 90 }; 91 92 /** 93 * icrdma_ena_irq - Enable interrupt 94 * @dev: pointer to the device structure 95 * @idx: vector index 96 */ 97 static void 98 icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx) 99 { 100 u32 val; 101 u32 interval = 0; 102 103 if (dev->ceq_itr && dev->aeq->msix_idx != idx) 104 interval = dev->ceq_itr >> 1; /* 2 usec units */ 105 val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, IRDMA_IDX_ITR0) | 106 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) | 107 FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, true) | 108 FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, true); 109 writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx); 110 } 111 112 /** 113 * icrdma_disable_irq - Disable interrupt 114 * @dev: pointer to the device structure 115 * @idx: vector index 116 */ 117 static void 118 icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx) 119 { 120 writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx); 121 } 122 123 /** 124 * icrdma_cfg_ceq- Configure CEQ interrupt 125 * @dev: pointer to the device structure 126 * @ceq_id: Completion Event Queue ID 127 * @idx: vector index 128 * @enable: True to enable, False disables 129 */ 130 static void 131 icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx, 132 bool enable) 133 { 134 u32 reg_val; 135 136 reg_val = enable ? IRDMA_GLINT_CEQCTL_CAUSE_ENA : 0; 137 reg_val |= (idx << IRDMA_GLINT_CEQCTL_MSIX_INDX_S) | 138 IRDMA_GLINT_CEQCTL_ITR_INDX; 139 140 writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id); 141 } 142 143 static const struct irdma_irq_ops icrdma_irq_ops = { 144 .irdma_cfg_aeq = irdma_cfg_aeq, 145 .irdma_cfg_ceq = icrdma_cfg_ceq, 146 .irdma_dis_irq = icrdma_disable_irq, 147 .irdma_en_irq = icrdma_ena_irq, 148 }; 149 150 static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = { 151 [IRDMA_HW_STAT_INDEX_RXVLANERR] = {0, 32, IRDMA_MAX_STATS_24}, 152 [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = {8, 0, IRDMA_MAX_STATS_48}, 153 [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = {16, 0, IRDMA_MAX_STATS_48}, 154 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = {24, 32, IRDMA_MAX_STATS_32}, 155 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = {24, 0, IRDMA_MAX_STATS_32}, 156 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = {32, 0, IRDMA_MAX_STATS_48}, 157 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = {40, 0, IRDMA_MAX_STATS_48}, 158 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = {48, 0, IRDMA_MAX_STATS_48}, 159 [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = {56, 0, IRDMA_MAX_STATS_48}, 160 [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = {64, 0, IRDMA_MAX_STATS_48}, 161 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = {72, 32, IRDMA_MAX_STATS_32}, 162 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = {72, 0, IRDMA_MAX_STATS_32}, 163 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = {80, 0, IRDMA_MAX_STATS_48}, 164 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = {88, 0, IRDMA_MAX_STATS_48}, 165 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = {96, 0, IRDMA_MAX_STATS_48}, 166 [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = {104, 0, IRDMA_MAX_STATS_48}, 167 [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = {112, 0, IRDMA_MAX_STATS_48}, 168 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = {120, 0, IRDMA_MAX_STATS_48}, 169 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = {128, 0, IRDMA_MAX_STATS_48}, 170 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = {136, 0, IRDMA_MAX_STATS_48}, 171 [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = {144, 0, IRDMA_MAX_STATS_48}, 172 [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = {152, 0, IRDMA_MAX_STATS_48}, 173 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = {160, 0, IRDMA_MAX_STATS_48}, 174 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = {168, 0, IRDMA_MAX_STATS_48}, 175 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = {176, 0, IRDMA_MAX_STATS_48}, 176 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = {184, 32, IRDMA_MAX_STATS_24}, 177 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = {184, 0, IRDMA_MAX_STATS_24}, 178 [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = {192, 32, IRDMA_MAX_STATS_48}, 179 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = {200, 32, IRDMA_MAX_STATS_24}, 180 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = {200, 0, IRDMA_MAX_STATS_24}, 181 [IRDMA_HW_STAT_INDEX_TCPTXSEG] = {208, 0, IRDMA_MAX_STATS_48}, 182 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = {216, 32, IRDMA_MAX_STATS_32}, 183 [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = {224, 0, IRDMA_MAX_STATS_48}, 184 [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = {232, 0, IRDMA_MAX_STATS_48}, 185 [IRDMA_HW_STAT_INDEX_RDMARXWRS] = {240, 0, IRDMA_MAX_STATS_48}, 186 [IRDMA_HW_STAT_INDEX_RDMARXRDS] = {248, 0, IRDMA_MAX_STATS_48}, 187 [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = {256, 0, IRDMA_MAX_STATS_48}, 188 [IRDMA_HW_STAT_INDEX_RDMATXWRS] = {264, 0, IRDMA_MAX_STATS_48}, 189 [IRDMA_HW_STAT_INDEX_RDMATXRDS] = {272, 0, IRDMA_MAX_STATS_48}, 190 [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = {280, 0, IRDMA_MAX_STATS_48}, 191 [IRDMA_HW_STAT_INDEX_RDMAVBND] = {288, 0, IRDMA_MAX_STATS_48}, 192 [IRDMA_HW_STAT_INDEX_RDMAVINV] = {296, 0, IRDMA_MAX_STATS_48}, 193 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = {304, 0, IRDMA_MAX_STATS_48}, 194 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = {312, 32, IRDMA_MAX_STATS_16}, 195 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = {312, 0, IRDMA_MAX_STATS_32}, 196 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = {320, 0, IRDMA_MAX_STATS_32}, 197 }; 198 199 void 200 icrdma_init_hw(struct irdma_sc_dev *dev) 201 { 202 int i; 203 u8 IOMEM *hw_addr; 204 205 for (i = 0; i < IRDMA_MAX_REGS; ++i) { 206 hw_addr = dev->hw->hw_addr; 207 208 if (i == IRDMA_DB_ADDR_OFFSET) 209 hw_addr = NULL; 210 211 dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]); 212 } 213 dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID; 214 dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID; 215 216 for (i = 0; i < IRDMA_MAX_SHIFTS; ++i) 217 dev->hw_shifts[i] = icrdma_shifts[i]; 218 219 for (i = 0; i < IRDMA_MAX_MASKS; ++i) 220 dev->hw_masks[i] = icrdma_masks[i]; 221 222 dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC]; 223 dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM]; 224 dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC]; 225 dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; 226 dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; 227 dev->irq_ops = &icrdma_irq_ops; 228 dev->hw_stats_map = icrdma_hw_stat_map; 229 dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; 230 dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; 231 dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; 232 dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; 233 dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2; 234 235 dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT; 236 dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD; 237 dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE; 238 dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR; 239 disable_tx_spad(dev->hw); 240 disable_prefetch(dev->hw); 241 rdpu_ackreqpmthresh(dev->hw); 242 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RELAX_RQ_ORDER; 243 dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE | 244 IRDMA_FEATURE_CQ_RESIZE; 245 } 246 247 void 248 irdma_init_config_check(struct irdma_config_check *cc, u8 traffic_class, u16 qs_handle) 249 { 250 cc->config_ok = false; 251 cc->traffic_class = traffic_class; 252 cc->qs_handle = qs_handle; 253 cc->lfc_set = 0; 254 cc->pfc_set = 0; 255 } 256 257 static bool 258 irdma_is_lfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi) 259 { 260 u32 lfc = 1; 261 u8 fn_id = vsi->dev->hmc_fn_id; 262 263 lfc &= (rd32(vsi->dev->hw, 264 PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8); 265 lfc &= (rd32(vsi->dev->hw, 266 PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> 8); 267 lfc &= rd32(vsi->dev->hw, 268 PRTMAC_HSEC_CTL_RX_ENABLE_GPP_0 + 4 * vsi->dev->hmc_fn_id); 269 270 if (lfc) 271 return true; 272 return false; 273 } 274 275 static bool 276 irdma_check_tc_has_pfc(struct irdma_sc_vsi *vsi, u64 reg_offset, u16 traffic_class) 277 { 278 u32 value, pfc = 0; 279 u32 i; 280 281 value = rd32(vsi->dev->hw, reg_offset); 282 for (i = 0; i < 4; i++) 283 pfc |= (value >> (8 * i + traffic_class)) & 0x1; 284 285 if (pfc) 286 return true; 287 return false; 288 } 289 290 static bool 291 irdma_is_pfc_set(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi) 292 { 293 u32 pause; 294 u8 fn_id = vsi->dev->hmc_fn_id; 295 296 pause = (rd32(vsi->dev->hw, 297 PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_0 + 4 * fn_id) >> 298 cc->traffic_class) & BIT(0); 299 pause &= (rd32(vsi->dev->hw, 300 PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_0 + 4 * fn_id) >> 301 cc->traffic_class) & BIT(0); 302 303 return irdma_check_tc_has_pfc(vsi, GLDCB_TC2PFC, cc->traffic_class) && 304 pause; 305 } 306 307 bool 308 irdma_is_config_ok(struct irdma_config_check *cc, struct irdma_sc_vsi *vsi) 309 { 310 cc->lfc_set = irdma_is_lfc_set(cc, vsi); 311 cc->pfc_set = irdma_is_pfc_set(cc, vsi); 312 313 cc->config_ok = cc->lfc_set || cc->pfc_set; 314 315 return cc->config_ok; 316 } 317 318 #define IRDMA_RCV_WND_NO_FC 65536 319 #define IRDMA_RCV_WND_FC 65536 320 321 #define IRDMA_CWND_NO_FC 0x1 322 #define IRDMA_CWND_FC 0x18 323 324 #define IRDMA_RTOMIN_NO_FC 0x5 325 #define IRDMA_RTOMIN_FC 0x32 326 327 #define IRDMA_ACKCREDS_NO_FC 0x02 328 #define IRDMA_ACKCREDS_FC 0x06 329 330 static void 331 irdma_check_flow_ctrl(struct irdma_sc_vsi *vsi, u8 user_prio, u8 traffic_class) 332 { 333 struct irdma_config_check *cfg_chk = &vsi->cfg_check[user_prio]; 334 335 if (!irdma_is_config_ok(cfg_chk, vsi)) { 336 if (vsi->tc_print_warning[traffic_class]) { 337 irdma_pr_info("INFO: Flow control is disabled for this traffic class (%d) on this vsi.\n", traffic_class); 338 vsi->tc_print_warning[traffic_class] = false; 339 } 340 } else { 341 if (vsi->tc_print_warning[traffic_class]) { 342 irdma_pr_info("INFO: Flow control is enabled for this traffic class (%d) on this vsi.\n", traffic_class); 343 vsi->tc_print_warning[traffic_class] = false; 344 } 345 } 346 } 347 348 void 349 irdma_check_fc_for_tc_update(struct irdma_sc_vsi *vsi, 350 struct irdma_l2params *l2params) 351 { 352 u8 i; 353 354 for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++) 355 vsi->tc_print_warning[i] = true; 356 357 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 358 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i]; 359 u8 tc = l2params->up2tc[i]; 360 361 cfg_chk->traffic_class = tc; 362 cfg_chk->qs_handle = vsi->qos[i].qs_handle; 363 irdma_check_flow_ctrl(vsi, i, tc); 364 } 365 } 366 367 void 368 irdma_check_fc_for_qp(struct irdma_sc_vsi *vsi, struct irdma_sc_qp *sc_qp) 369 { 370 u8 i; 371 372 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 373 struct irdma_config_check *cfg_chk = &vsi->cfg_check[i]; 374 375 irdma_init_config_check(cfg_chk, 376 vsi->qos[i].traffic_class, 377 vsi->qos[i].qs_handle); 378 if (sc_qp->qs_handle == cfg_chk->qs_handle) 379 irdma_check_flow_ctrl(vsi, i, cfg_chk->traffic_class); 380 } 381 } 382 383 #define GLPE_WQMTXIDXADDR 0x50E000 384 #define GLPE_WQMTXIDXDATA 0x50E004 385 386 void 387 disable_prefetch(struct irdma_hw *hw) 388 { 389 u32 wqm_data; 390 391 wr32(hw, GLPE_WQMTXIDXADDR, 0x12); 392 irdma_mb(); 393 394 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA); 395 wqm_data &= ~(1); 396 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data); 397 } 398 399 void 400 disable_tx_spad(struct irdma_hw *hw) 401 { 402 u32 wqm_data; 403 404 wr32(hw, GLPE_WQMTXIDXADDR, 0x12); 405 irdma_mb(); 406 407 wqm_data = rd32(hw, GLPE_WQMTXIDXDATA); 408 wqm_data &= ~(1 << 3); 409 wr32(hw, GLPE_WQMTXIDXDATA, wqm_data); 410 } 411 412 #define GL_RDPU_CNTRL 0x52054 413 void 414 rdpu_ackreqpmthresh(struct irdma_hw *hw) 415 { 416 u32 val; 417 418 val = rd32(hw, GL_RDPU_CNTRL); 419 val &= ~(0x3f << 10); 420 val |= (3 << 10); 421 wr32(hw, GL_RDPU_CNTRL, val); 422 } 423