1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Slow Path Operators 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/interrupt.h> 42 #include <linux/spinlock.h> 43 #include <linux/sched.h> 44 #include <linux/pci.h> 45 46 #include "roce_hsi.h" 47 48 #include "qplib_res.h" 49 #include "qplib_rcfw.h" 50 #include "qplib_sp.h" 51 #include "qplib_tlv.h" 52 53 const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, 54 0, 0, 0, 0, 0, 0, 0, 0 } }; 55 56 /* Device */ 57 58 static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) 59 { 60 u16 pcie_ctl2 = 0; 61 62 if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) 63 return false; 64 65 pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2); 66 return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); 67 } 68 69 static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, 70 char *fw_ver) 71 { 72 struct creq_query_version_resp resp = {}; 73 struct bnxt_qplib_cmdqmsg msg = {}; 74 struct cmdq_query_version req = {}; 75 int rc; 76 77 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 78 CMDQ_BASE_OPCODE_QUERY_VERSION, 79 sizeof(req)); 80 81 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); 82 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 83 if (rc) 84 return; 85 fw_ver[0] = resp.fw_maj; 86 fw_ver[1] = resp.fw_minor; 87 fw_ver[2] = resp.fw_bld; 88 fw_ver[3] = resp.fw_rsvd; 89 } 90 91 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw) 92 { 93 struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr; 94 struct creq_query_func_resp resp = {}; 95 struct bnxt_qplib_cmdqmsg msg = {}; 96 struct creq_query_func_resp_sb *sb; 97 struct bnxt_qplib_rcfw_sbuf sbuf; 98 struct bnxt_qplib_chip_ctx *cctx; 99 struct cmdq_query_func req = {}; 100 u8 *tqm_alloc; 101 int i, rc; 102 u32 temp; 103 104 cctx = rcfw->res->cctx; 105 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 106 CMDQ_BASE_OPCODE_QUERY_FUNC, 107 sizeof(req)); 108 109 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); 110 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 111 &sbuf.dma_addr, GFP_KERNEL); 112 if (!sbuf.sb) 113 return -ENOMEM; 114 sb = sbuf.sb; 115 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 116 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 117 sizeof(resp), 0); 118 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 119 if (rc) 120 goto bail; 121 122 /* Extract the context from the side buffer */ 123 attr->max_qp = le32_to_cpu(sb->max_qp); 124 /* max_qp value reported by FW doesn't include the QP1 */ 125 attr->max_qp += 1; 126 attr->max_qp_rd_atom = 127 sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 128 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; 129 attr->max_qp_init_rd_atom = 130 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 131 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; 132 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1; 133 if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) { 134 /* 135 * 128 WQEs needs to be reserved for the HW (8916). Prevent 136 * reporting the max number on legacy devices 137 */ 138 attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1; 139 } 140 141 /* Adjust for max_qp_wqes for variable wqe */ 142 if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) 143 attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1; 144 145 attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ? 146 min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6; 147 attr->max_cq = le32_to_cpu(sb->max_cq); 148 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); 149 if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx)) 150 attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes); 151 attr->max_cq_sges = attr->max_qp_sges; 152 attr->max_mr = le32_to_cpu(sb->max_mr); 153 attr->max_mw = le32_to_cpu(sb->max_mw); 154 155 attr->max_mr_size = le64_to_cpu(sb->max_mr_size); 156 attr->max_pd = 64 * 1024; 157 attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp); 158 attr->max_ah = le32_to_cpu(sb->max_ah); 159 160 attr->max_srq = le16_to_cpu(sb->max_srq); 161 attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; 162 attr->max_srq_sges = sb->max_srq_sge; 163 attr->max_pkey = 1; 164 attr->max_inline_data = le32_to_cpu(sb->max_inline_data); 165 if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx)) 166 attr->l2_db_size = (sb->l2_db_space_size + 1) * 167 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); 168 /* 169 * Read the max gid supported by HW. 170 * For each entry in HW GID in HW table, we consume 2 171 * GID entries in the kernel GID table. So max_gid reported 172 * to stack can be up to twice the value reported by the HW, up to 256 gids. 173 */ 174 attr->max_sgid = le32_to_cpu(sb->max_gid); 175 attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid); 176 attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); 177 attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2); 178 179 if (_is_max_srq_ext_supported(attr->dev_cap_flags2)) 180 attr->max_srq += le16_to_cpu(sb->max_srq_ext); 181 182 bnxt_qplib_query_version(rcfw, attr->fw_ver); 183 184 for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) { 185 temp = le32_to_cpu(sb->tqm_alloc_reqs[i]); 186 tqm_alloc = (u8 *)&temp; 187 attr->tqm_alloc_reqs[i * 4] = *tqm_alloc; 188 attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc); 189 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); 190 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 191 } 192 193 if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI) 194 attr->max_dpi = le32_to_cpu(sb->max_dpi); 195 196 attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); 197 bail: 198 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, 199 sbuf.sb, sbuf.dma_addr); 200 return rc; 201 } 202 203 int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, 204 struct bnxt_qplib_rcfw *rcfw, 205 struct bnxt_qplib_ctx *ctx) 206 { 207 struct creq_set_func_resources_resp resp = {}; 208 struct cmdq_set_func_resources req = {}; 209 struct bnxt_qplib_cmdqmsg msg = {}; 210 int rc; 211 212 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 213 CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES, 214 sizeof(req)); 215 216 req.number_of_qp = cpu_to_le32(ctx->qpc_count); 217 req.number_of_mrw = cpu_to_le32(ctx->mrw_count); 218 req.number_of_srq = cpu_to_le32(ctx->srqc_count); 219 req.number_of_cq = cpu_to_le32(ctx->cq_count); 220 221 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); 222 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); 223 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); 224 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); 225 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); 226 227 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 228 sizeof(resp), 0); 229 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 230 if (rc) { 231 dev_err(&res->pdev->dev, "Failed to set function resources\n"); 232 } 233 return rc; 234 } 235 236 /* SGID */ 237 int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, 238 struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, 239 struct bnxt_qplib_gid *gid) 240 { 241 if (index >= sgid_tbl->max) { 242 dev_err(&res->pdev->dev, 243 "Index %d exceeded SGID table max (%d)\n", 244 index, sgid_tbl->max); 245 return -EINVAL; 246 } 247 memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); 248 return 0; 249 } 250 251 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, 252 struct bnxt_qplib_gid *gid, u16 vlan_id, bool update) 253 { 254 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, 255 struct bnxt_qplib_res, 256 sgid_tbl); 257 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 258 int index; 259 260 /* Do we need a sgid_lock here? */ 261 if (!sgid_tbl->active) { 262 dev_err(&res->pdev->dev, "SGID table has no active entries\n"); 263 return -ENOMEM; 264 } 265 for (index = 0; index < sgid_tbl->max; index++) { 266 if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) && 267 vlan_id == sgid_tbl->tbl[index].vlan_id) 268 break; 269 } 270 if (index == sgid_tbl->max) { 271 dev_warn(&res->pdev->dev, "GID not found in the SGID table\n"); 272 return 0; 273 } 274 /* Remove GID from the SGID table */ 275 if (update) { 276 struct creq_delete_gid_resp resp = {}; 277 struct bnxt_qplib_cmdqmsg msg = {}; 278 struct cmdq_delete_gid req = {}; 279 int rc; 280 281 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 282 CMDQ_BASE_OPCODE_DELETE_GID, 283 sizeof(req)); 284 if (sgid_tbl->hw_id[index] == 0xFFFF) { 285 dev_err(&res->pdev->dev, 286 "GID entry contains an invalid HW id\n"); 287 return -EINVAL; 288 } 289 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); 290 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 291 sizeof(resp), 0); 292 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 293 if (rc) 294 return rc; 295 } 296 memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, 297 sizeof(bnxt_qplib_gid_zero)); 298 sgid_tbl->tbl[index].vlan_id = 0xFFFF; 299 sgid_tbl->vlan[index] = 0; 300 sgid_tbl->active--; 301 dev_dbg(&res->pdev->dev, 302 "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n", 303 index, sgid_tbl->hw_id[index], sgid_tbl->active); 304 sgid_tbl->hw_id[index] = (u16)-1; 305 306 /* unlock */ 307 return 0; 308 } 309 310 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, 311 struct bnxt_qplib_gid *gid, const u8 *smac, 312 u16 vlan_id, bool update, u32 *index) 313 { 314 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, 315 struct bnxt_qplib_res, 316 sgid_tbl); 317 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 318 int i, free_idx; 319 320 /* Do we need a sgid_lock here? */ 321 if (sgid_tbl->active == sgid_tbl->max) { 322 dev_err(&res->pdev->dev, "SGID table is full\n"); 323 return -ENOMEM; 324 } 325 free_idx = sgid_tbl->max; 326 for (i = 0; i < sgid_tbl->max; i++) { 327 if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) && 328 sgid_tbl->tbl[i].vlan_id == vlan_id) { 329 dev_dbg(&res->pdev->dev, 330 "SGID entry already exist in entry %d!\n", i); 331 *index = i; 332 return -EALREADY; 333 } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, 334 sizeof(bnxt_qplib_gid_zero)) && 335 free_idx == sgid_tbl->max) { 336 free_idx = i; 337 } 338 } 339 if (free_idx == sgid_tbl->max) { 340 dev_err(&res->pdev->dev, 341 "SGID table is FULL but count is not MAX??\n"); 342 return -ENOMEM; 343 } 344 if (update) { 345 struct creq_add_gid_resp resp = {}; 346 struct bnxt_qplib_cmdqmsg msg = {}; 347 struct cmdq_add_gid req = {}; 348 int rc; 349 350 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 351 CMDQ_BASE_OPCODE_ADD_GID, 352 sizeof(req)); 353 354 req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); 355 req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); 356 req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); 357 req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); 358 /* 359 * driver should ensure that all RoCE traffic is always VLAN 360 * tagged if RoCE traffic is running on non-zero VLAN ID or 361 * RoCE traffic is running on non-zero Priority. 362 */ 363 if ((vlan_id != 0xFFFF) || res->prio) { 364 if (vlan_id != 0xFFFF) 365 req.vlan = cpu_to_le16 366 (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK); 367 req.vlan |= cpu_to_le16 368 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | 369 CMDQ_ADD_GID_VLAN_VLAN_EN); 370 } 371 372 /* MAC in network format */ 373 req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); 374 req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); 375 req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); 376 377 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 378 sizeof(resp), 0); 379 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 380 if (rc) 381 return rc; 382 sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); 383 } 384 /* Add GID to the sgid_tbl */ 385 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); 386 sgid_tbl->tbl[free_idx].vlan_id = vlan_id; 387 sgid_tbl->active++; 388 if (vlan_id != 0xFFFF) 389 sgid_tbl->vlan[free_idx] = 1; 390 391 dev_dbg(&res->pdev->dev, 392 "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n", 393 free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active); 394 395 *index = free_idx; 396 /* unlock */ 397 return 0; 398 } 399 400 int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, 401 struct bnxt_qplib_gid *gid, u16 gid_idx, 402 const u8 *smac) 403 { 404 struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, 405 struct bnxt_qplib_res, 406 sgid_tbl); 407 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 408 struct creq_modify_gid_resp resp = {}; 409 struct bnxt_qplib_cmdqmsg msg = {}; 410 struct cmdq_modify_gid req = {}; 411 int rc; 412 413 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 414 CMDQ_BASE_OPCODE_MODIFY_GID, 415 sizeof(req)); 416 417 req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); 418 req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); 419 req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); 420 req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); 421 if (res->prio) { 422 req.vlan |= cpu_to_le16 423 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | 424 CMDQ_ADD_GID_VLAN_VLAN_EN); 425 } 426 427 /* MAC in network format */ 428 req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); 429 req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); 430 req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); 431 432 req.gid_index = cpu_to_le16(gid_idx); 433 434 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 435 sizeof(resp), 0); 436 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 437 return rc; 438 } 439 440 /* AH */ 441 int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, 442 bool block) 443 { 444 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 445 struct creq_create_ah_resp resp = {}; 446 struct bnxt_qplib_cmdqmsg msg = {}; 447 struct cmdq_create_ah req = {}; 448 u32 temp32[4]; 449 u16 temp16[3]; 450 int rc; 451 452 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 453 CMDQ_BASE_OPCODE_CREATE_AH, 454 sizeof(req)); 455 456 memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid)); 457 req.dgid[0] = cpu_to_le32(temp32[0]); 458 req.dgid[1] = cpu_to_le32(temp32[1]); 459 req.dgid[2] = cpu_to_le32(temp32[2]); 460 req.dgid[3] = cpu_to_le32(temp32[3]); 461 462 req.type = ah->nw_type; 463 req.hop_limit = ah->hop_limit; 464 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]); 465 req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label & 466 CMDQ_CREATE_AH_FLOW_LABEL_MASK) | 467 CMDQ_CREATE_AH_DEST_VLAN_ID_MASK); 468 req.pd_id = cpu_to_le32(ah->pd->id); 469 req.traffic_class = ah->traffic_class; 470 471 /* MAC in network format */ 472 memcpy(temp16, ah->dmac, 6); 473 req.dest_mac[0] = cpu_to_le16(temp16[0]); 474 req.dest_mac[1] = cpu_to_le16(temp16[1]); 475 req.dest_mac[2] = cpu_to_le16(temp16[2]); 476 477 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 478 sizeof(resp), block); 479 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 480 if (rc) 481 return rc; 482 483 ah->id = le32_to_cpu(resp.xid); 484 return 0; 485 } 486 487 int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, 488 bool block) 489 { 490 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 491 struct creq_destroy_ah_resp resp = {}; 492 struct bnxt_qplib_cmdqmsg msg = {}; 493 struct cmdq_destroy_ah req = {}; 494 int rc; 495 496 /* Clean up the AH table in the device */ 497 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 498 CMDQ_BASE_OPCODE_DESTROY_AH, 499 sizeof(req)); 500 501 req.ah_cid = cpu_to_le32(ah->id); 502 503 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 504 sizeof(resp), block); 505 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 506 return rc; 507 } 508 509 /* MRW */ 510 int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) 511 { 512 struct creq_deallocate_key_resp resp = {}; 513 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 514 struct cmdq_deallocate_key req = {}; 515 struct bnxt_qplib_cmdqmsg msg = {}; 516 int rc; 517 518 if (mrw->lkey == 0xFFFFFFFF) { 519 dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n"); 520 return 0; 521 } 522 523 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 524 CMDQ_BASE_OPCODE_DEALLOCATE_KEY, 525 sizeof(req)); 526 527 req.mrw_flags = mrw->type; 528 529 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || 530 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || 531 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) 532 req.key = cpu_to_le32(mrw->rkey); 533 else 534 req.key = cpu_to_le32(mrw->lkey); 535 536 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 537 sizeof(resp), 0); 538 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 539 if (rc) 540 return rc; 541 542 /* Free the qplib's MRW memory */ 543 if (mrw->hwq.max_elements) 544 bnxt_qplib_free_hwq(res, &mrw->hwq); 545 546 return 0; 547 } 548 549 int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) 550 { 551 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 552 struct creq_allocate_mrw_resp resp = {}; 553 struct bnxt_qplib_cmdqmsg msg = {}; 554 struct cmdq_allocate_mrw req = {}; 555 unsigned long tmp; 556 int rc; 557 558 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 559 CMDQ_BASE_OPCODE_ALLOCATE_MRW, 560 sizeof(req)); 561 562 req.pd_id = cpu_to_le32(mrw->pd->id); 563 req.mrw_flags = mrw->type; 564 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR && 565 mrw->access_flags & BNXT_QPLIB_FR_PMR) || 566 mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A || 567 mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B) 568 req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY; 569 tmp = (unsigned long)mrw; 570 req.mrw_handle = cpu_to_le64(tmp); 571 572 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 573 sizeof(resp), 0); 574 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 575 if (rc) 576 return rc; 577 578 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || 579 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || 580 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) 581 mrw->rkey = le32_to_cpu(resp.xid); 582 else 583 mrw->lkey = le32_to_cpu(resp.xid); 584 return 0; 585 } 586 587 int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, 588 bool block) 589 { 590 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 591 struct creq_deregister_mr_resp resp = {}; 592 struct bnxt_qplib_cmdqmsg msg = {}; 593 struct cmdq_deregister_mr req = {}; 594 int rc; 595 596 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 597 CMDQ_BASE_OPCODE_DEREGISTER_MR, 598 sizeof(req)); 599 600 req.lkey = cpu_to_le32(mrw->lkey); 601 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 602 sizeof(resp), block); 603 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 604 if (rc) 605 return rc; 606 607 /* Free the qplib's MR memory */ 608 if (mrw->hwq.max_elements) { 609 mrw->va = 0; 610 mrw->total_size = 0; 611 bnxt_qplib_free_hwq(res, &mrw->hwq); 612 } 613 614 return 0; 615 } 616 617 int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, 618 struct ib_umem *umem, int num_pbls, u32 buf_pg_size) 619 { 620 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 621 struct bnxt_qplib_hwq_attr hwq_attr = {}; 622 struct bnxt_qplib_sg_info sginfo = {}; 623 struct creq_register_mr_resp resp = {}; 624 struct bnxt_qplib_cmdqmsg msg = {}; 625 struct cmdq_register_mr req = {}; 626 int pages, rc; 627 u32 pg_size; 628 u16 level; 629 630 if (num_pbls) { 631 pages = roundup_pow_of_two(num_pbls); 632 /* Allocate memory for the non-leaf pages to store buf ptrs. 633 * Non-leaf pages always uses system PAGE_SIZE 634 */ 635 /* Free the hwq if it already exist, must be a rereg */ 636 if (mr->hwq.max_elements) 637 bnxt_qplib_free_hwq(res, &mr->hwq); 638 hwq_attr.res = res; 639 hwq_attr.depth = pages; 640 hwq_attr.stride = sizeof(dma_addr_t); 641 hwq_attr.type = HWQ_TYPE_MR; 642 hwq_attr.sginfo = &sginfo; 643 hwq_attr.sginfo->umem = umem; 644 hwq_attr.sginfo->npages = pages; 645 hwq_attr.sginfo->pgsize = buf_pg_size; 646 hwq_attr.sginfo->pgshft = ilog2(buf_pg_size); 647 rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); 648 if (rc) { 649 dev_err(&res->pdev->dev, 650 "SP: Reg MR memory allocation failed\n"); 651 return -ENOMEM; 652 } 653 } 654 655 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 656 CMDQ_BASE_OPCODE_REGISTER_MR, 657 sizeof(req)); 658 659 /* Configure the request */ 660 if (mr->hwq.level == PBL_LVL_MAX) { 661 /* No PBL provided, just use system PAGE_SIZE */ 662 level = 0; 663 req.pbl = 0; 664 pg_size = PAGE_SIZE; 665 } else { 666 level = mr->hwq.level; 667 req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]); 668 } 669 pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE; 670 req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) | 671 ((ilog2(pg_size) << 672 CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) & 673 CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK); 674 req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) << 675 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) & 676 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK)); 677 req.access = (mr->access_flags & BNXT_QPLIB_MR_ACCESS_MASK); 678 req.va = cpu_to_le64(mr->va); 679 req.key = cpu_to_le32(mr->lkey); 680 if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) 681 req.key = cpu_to_le32(mr->pd->id); 682 req.flags = cpu_to_le16(mr->flags); 683 req.mr_size = cpu_to_le64(mr->total_size); 684 685 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), 686 sizeof(resp), 0); 687 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 688 if (rc) 689 goto fail; 690 691 if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) { 692 mr->lkey = le32_to_cpu(resp.xid); 693 mr->rkey = mr->lkey; 694 } 695 696 return 0; 697 698 fail: 699 if (mr->hwq.max_elements) 700 bnxt_qplib_free_hwq(res, &mr->hwq); 701 return rc; 702 } 703 704 int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res, 705 struct bnxt_qplib_frpl *frpl, 706 int max_pg_ptrs) 707 { 708 struct bnxt_qplib_hwq_attr hwq_attr = {}; 709 struct bnxt_qplib_sg_info sginfo = {}; 710 int pg_ptrs, pages, rc; 711 712 /* Re-calculate the max to fit the HWQ allocation model */ 713 pg_ptrs = roundup_pow_of_two(max_pg_ptrs); 714 pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT; 715 if (!pages) 716 pages++; 717 718 if (pages > MAX_PBL_LVL_1_PGS) 719 return -ENOMEM; 720 721 sginfo.pgsize = PAGE_SIZE; 722 sginfo.nopte = true; 723 724 hwq_attr.res = res; 725 hwq_attr.depth = pg_ptrs; 726 hwq_attr.stride = PAGE_SIZE; 727 hwq_attr.sginfo = &sginfo; 728 hwq_attr.type = HWQ_TYPE_CTX; 729 rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr); 730 if (!rc) 731 frpl->max_pg_ptrs = pg_ptrs; 732 733 return rc; 734 } 735 736 int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, 737 struct bnxt_qplib_frpl *frpl) 738 { 739 bnxt_qplib_free_hwq(res, &frpl->hwq); 740 return 0; 741 } 742 743 int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, 744 struct bnxt_qplib_roce_stats *stats) 745 { 746 struct creq_query_roce_stats_resp resp = {}; 747 struct creq_query_roce_stats_resp_sb *sb; 748 struct cmdq_query_roce_stats req = {}; 749 struct bnxt_qplib_cmdqmsg msg = {}; 750 struct bnxt_qplib_rcfw_sbuf sbuf; 751 int rc; 752 753 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 754 CMDQ_BASE_OPCODE_QUERY_ROCE_STATS, 755 sizeof(req)); 756 757 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); 758 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 759 &sbuf.dma_addr, GFP_KERNEL); 760 if (!sbuf.sb) 761 return -ENOMEM; 762 sb = sbuf.sb; 763 764 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 765 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 766 sizeof(resp), 0); 767 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 768 if (rc) 769 goto bail; 770 /* Extract the context from the side buffer */ 771 stats->to_retransmits = le64_to_cpu(sb->to_retransmits); 772 stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd); 773 stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded); 774 stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd); 775 stats->missing_resp = le64_to_cpu(sb->missing_resp); 776 stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err); 777 stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err); 778 stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err); 779 stats->local_protection_err = le64_to_cpu(sb->local_protection_err); 780 stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err); 781 stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err); 782 stats->remote_access_err = le64_to_cpu(sb->remote_access_err); 783 stats->remote_op_err = le64_to_cpu(sb->remote_op_err); 784 stats->dup_req = le64_to_cpu(sb->dup_req); 785 stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max); 786 stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch); 787 stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe); 788 stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err); 789 stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey); 790 stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err); 791 stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm); 792 stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err); 793 stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey); 794 stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err); 795 stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm); 796 stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err); 797 stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow); 798 stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode); 799 stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic); 800 stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err); 801 stats->res_mem_error = le64_to_cpu(sb->res_mem_error); 802 stats->res_srq_err = le64_to_cpu(sb->res_srq_err); 803 stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err); 804 stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey); 805 stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err); 806 stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err); 807 stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err); 808 stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err); 809 stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err); 810 if (!rcfw->init_oos_stats) { 811 rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count); 812 rcfw->init_oos_stats = 1; 813 } else { 814 stats->res_oos_drop_count += 815 (le64_to_cpu(sb->res_oos_drop_count) - 816 rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK; 817 rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count); 818 } 819 820 bail: 821 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, 822 sbuf.sb, sbuf.dma_addr); 823 return rc; 824 } 825 826 int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, 827 struct bnxt_qplib_ext_stat *estat) 828 { 829 struct creq_query_roce_stats_ext_resp resp = {}; 830 struct creq_query_roce_stats_ext_resp_sb *sb; 831 struct cmdq_query_roce_stats_ext req = {}; 832 struct bnxt_qplib_cmdqmsg msg = {}; 833 struct bnxt_qplib_rcfw_sbuf sbuf; 834 int rc; 835 836 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); 837 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 838 &sbuf.dma_addr, GFP_KERNEL); 839 if (!sbuf.sb) 840 return -ENOMEM; 841 842 sb = sbuf.sb; 843 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 844 CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS, 845 sizeof(req)); 846 847 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 848 req.resp_addr = cpu_to_le64(sbuf.dma_addr); 849 if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf) 850 req.function_id = 851 cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID | 852 (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT)); 853 else 854 req.function_id = cpu_to_le32(fid); 855 req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID); 856 857 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 858 sizeof(resp), 0); 859 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 860 if (rc) 861 goto bail; 862 863 estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts); 864 estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts); 865 estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts); 866 estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts); 867 estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts); 868 estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts); 869 estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes); 870 estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts); 871 estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts); 872 estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts); 873 estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts); 874 estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts); 875 estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts); 876 estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes); 877 estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts); 878 estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes); 879 estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts); 880 estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts); 881 estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts); 882 estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts); 883 estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts); 884 885 bail: 886 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, 887 sbuf.sb, sbuf.dma_addr); 888 return rc; 889 } 890 891 static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req, 892 struct bnxt_qplib_cc_param_ext *cc_ext) 893 { 894 ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask); 895 cc_ext->ext_mask = 0; 896 ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi); 897 ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp); 898 ext_req->init_cp = cpu_to_le16(cc_ext->init_cp); 899 ext_req->tr_update_mode = cc_ext->tr_update_mode; 900 ext_req->tr_update_cycles = cc_ext->tr_update_cyls; 901 ext_req->fr_num_rtts = cc_ext->fr_rtt; 902 ext_req->ai_rate_increase = cc_ext->ai_rate_incr; 903 ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th); 904 ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th); 905 ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th); 906 ext_req->bw_avg_weight = cc_ext->bw_avg_weight; 907 ext_req->actual_cr_factor = cc_ext->cr_factor; 908 ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp); 909 ext_req->cp_bias_en = cc_ext->cp_bias_en; 910 ext_req->cp_bias = cc_ext->cp_bias; 911 ext_req->cnp_ecn = cc_ext->cnp_ecn; 912 ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en; 913 ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec); 914 ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th); 915 ext_req->cr_width = cc_ext->cr_width; 916 ext_req->quota_period_min = cc_ext->min_quota; 917 ext_req->quota_period_max = cc_ext->max_quota; 918 ext_req->quota_period_abs_max = cc_ext->abs_max_quota; 919 ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb); 920 ext_req->cr_prob_factor = cc_ext->cr_prob_fac; 921 ext_req->tr_prob_factor = cc_ext->tr_prob_fac; 922 ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th); 923 ext_req->red_div = cc_ext->red_div; 924 ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th; 925 ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt); 926 ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio; 927 ext_req->use_rate_table = cc_ext->low_rate_en; 928 ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th); 929 ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1); 930 ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2); 931 ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th); 932 ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1); 933 ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2); 934 ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt); 935 ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes; 936 } 937 938 int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res, 939 struct bnxt_qplib_cc_param *cc_param) 940 { 941 struct bnxt_qplib_tlv_modify_cc_req tlv_req = {}; 942 struct creq_modify_roce_cc_resp resp = {}; 943 struct bnxt_qplib_cmdqmsg msg = {}; 944 struct cmdq_modify_roce_cc *req; 945 int req_size; 946 void *cmd; 947 int rc; 948 949 /* Prepare the older base command */ 950 req = &tlv_req.base_req; 951 cmd = req; 952 req_size = sizeof(*req); 953 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC, 954 sizeof(*req)); 955 req->modify_mask = cpu_to_le32(cc_param->mask); 956 req->enable_cc = cc_param->enable; 957 req->g = cc_param->g; 958 req->num_phases_per_state = cc_param->nph_per_state; 959 req->time_per_phase = cc_param->time_pph; 960 req->pkts_per_phase = cc_param->pkts_pph; 961 req->init_cr = cpu_to_le16(cc_param->init_cr); 962 req->init_tr = cpu_to_le16(cc_param->init_tr); 963 req->tos_dscp_tos_ecn = (cc_param->tos_dscp << CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) | 964 (cc_param->tos_ecn & CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK); 965 req->alt_vlan_pcp = cc_param->alt_vlan_pcp; 966 req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp); 967 req->rtt = cpu_to_le16(cc_param->rtt); 968 req->tcp_cp = cpu_to_le16(cc_param->tcp_cp); 969 req->cc_mode = cc_param->cc_mode; 970 req->inactivity_th = cpu_to_le16(cc_param->inact_th); 971 972 /* For chip gen P5 onwards fill extended cmd and header */ 973 if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) { 974 struct roce_tlv *hdr; 975 u32 payload; 976 u32 chunks; 977 978 cmd = &tlv_req; 979 req_size = sizeof(tlv_req); 980 /* Prepare primary tlv header */ 981 hdr = &tlv_req.tlv_hdr; 982 chunks = CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req)); 983 payload = sizeof(struct cmdq_modify_roce_cc); 984 __roce_1st_tlv_prep(hdr, chunks, payload, true); 985 /* Prepare secondary tlv header */ 986 hdr = (struct roce_tlv *)&tlv_req.ext_req; 987 payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) - 988 sizeof(struct roce_tlv); 989 __roce_ext_tlv_prep(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN1, payload, false, true); 990 bnxt_qplib_fill_cc_gen1(&tlv_req.ext_req, &cc_param->cc_ext); 991 } 992 993 bnxt_qplib_fill_cmdqmsg(&msg, cmd, &resp, NULL, req_size, 994 sizeof(resp), 0); 995 rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg); 996 return rc; 997 } 998 999 int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 res_type, 1000 u32 xid, u32 resp_size, void *resp_va) 1001 { 1002 struct creq_read_context resp = {}; 1003 struct bnxt_qplib_cmdqmsg msg = {}; 1004 struct cmdq_read_context req = {}; 1005 struct bnxt_qplib_rcfw_sbuf sbuf; 1006 int rc; 1007 1008 sbuf.size = resp_size; 1009 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 1010 &sbuf.dma_addr, GFP_KERNEL); 1011 if (!sbuf.sb) 1012 return -ENOMEM; 1013 1014 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, 1015 CMDQ_BASE_OPCODE_READ_CONTEXT, sizeof(req)); 1016 req.resp_addr = cpu_to_le64(sbuf.dma_addr); 1017 req.resp_size = resp_size / BNXT_QPLIB_CMDQE_UNITS; 1018 1019 req.xid = cpu_to_le32(xid); 1020 req.type = res_type; 1021 1022 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 1023 sizeof(resp), 0); 1024 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); 1025 if (rc) 1026 goto free_mem; 1027 1028 memcpy(resp_va, sbuf.sb, resp_size); 1029 free_mem: 1030 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); 1031 return rc; 1032 } 1033 1034 static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext, 1035 struct creq_query_roce_cc_gen1_resp_sb_tlv *sb) 1036 { 1037 cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi); 1038 cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps); 1039 cc_ext->init_cp = le16_to_cpu(sb->init_cp); 1040 cc_ext->tr_update_mode = sb->tr_update_mode; 1041 cc_ext->tr_update_cyls = sb->tr_update_cycles; 1042 cc_ext->fr_rtt = sb->fr_num_rtts; 1043 cc_ext->ai_rate_incr = sb->ai_rate_increase; 1044 cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th); 1045 cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th); 1046 cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th); 1047 cc_ext->bw_avg_weight = sb->bw_avg_weight; 1048 cc_ext->cr_factor = sb->actual_cr_factor; 1049 cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th); 1050 cc_ext->cp_bias_en = sb->cp_bias_en; 1051 cc_ext->cp_bias = sb->cp_bias; 1052 cc_ext->cnp_ecn = sb->cnp_ecn; 1053 cc_ext->rtt_jitter_en = sb->rtt_jitter_en; 1054 cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec); 1055 cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th); 1056 cc_ext->cr_width = sb->cr_width; 1057 cc_ext->min_quota = sb->quota_period_min; 1058 cc_ext->max_quota = sb->quota_period_max; 1059 cc_ext->abs_max_quota = sb->quota_period_abs_max; 1060 cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound); 1061 cc_ext->cr_prob_fac = sb->cr_prob_factor; 1062 cc_ext->tr_prob_fac = sb->tr_prob_factor; 1063 cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th); 1064 cc_ext->red_div = sb->red_div; 1065 cc_ext->cnp_ratio_th = sb->cnp_ratio_th; 1066 cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts); 1067 cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio; 1068 cc_ext->low_rate_en = sb->use_rate_table; 1069 cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th); 1070 cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1); 1071 cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2); 1072 cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th); 1073 cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1); 1074 cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2); 1075 cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt); 1076 cc_ext->cc_ack_bytes = sb->cc_ack_bytes; 1077 cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th); 1078 } 1079 1080 int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res, 1081 struct bnxt_qplib_cc_param *cc_param) 1082 { 1083 struct bnxt_qplib_tlv_query_rcc_sb *ext_sb; 1084 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1085 struct creq_query_roce_cc_resp resp = {}; 1086 struct creq_query_roce_cc_resp_sb *sb; 1087 struct bnxt_qplib_cmdqmsg msg = {}; 1088 struct cmdq_query_roce_cc req = {}; 1089 struct bnxt_qplib_rcfw_sbuf sbuf; 1090 size_t resp_size; 1091 int rc; 1092 1093 /* Query the parameters from chip */ 1094 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC, 1095 sizeof(req)); 1096 if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) 1097 resp_size = sizeof(*ext_sb); 1098 else 1099 resp_size = sizeof(*sb); 1100 1101 sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS); 1102 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size, 1103 &sbuf.dma_addr, GFP_KERNEL); 1104 if (!sbuf.sb) 1105 return -ENOMEM; 1106 1107 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS; 1108 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), 1109 sizeof(resp), 0); 1110 rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg); 1111 if (rc) 1112 goto out; 1113 1114 ext_sb = sbuf.sb; 1115 sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb : 1116 (struct creq_query_roce_cc_resp_sb *)ext_sb; 1117 1118 cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC; 1119 cc_param->tos_ecn = (sb->tos_dscp_tos_ecn & 1120 CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >> 1121 CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT; 1122 cc_param->tos_dscp = (sb->tos_dscp_tos_ecn & 1123 CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >> 1124 CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT; 1125 cc_param->alt_tos_dscp = sb->alt_tos_dscp; 1126 cc_param->alt_vlan_pcp = sb->alt_vlan_pcp; 1127 1128 cc_param->g = sb->g; 1129 cc_param->nph_per_state = sb->num_phases_per_state; 1130 cc_param->init_cr = le16_to_cpu(sb->init_cr); 1131 cc_param->init_tr = le16_to_cpu(sb->init_tr); 1132 cc_param->cc_mode = sb->cc_mode; 1133 cc_param->inact_th = le16_to_cpu(sb->inactivity_th); 1134 cc_param->rtt = le16_to_cpu(sb->rtt); 1135 cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp); 1136 cc_param->time_pph = sb->time_per_phase; 1137 cc_param->pkts_pph = sb->pkts_per_phase; 1138 if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) { 1139 bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb); 1140 cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16; 1141 } 1142 out: 1143 dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); 1144 return rc; 1145 } 1146