1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_roce.c 30 */ 31 #include <sys/cdefs.h> 32 #include "bcm_osal.h" 33 #include "ecore.h" 34 #include "ecore_status.h" 35 #include "ecore_sp_commands.h" 36 #include "ecore_cxt.h" 37 #include "ecore_rdma.h" 38 #include "reg_addr.h" 39 #include "ecore_rt_defs.h" 40 #include "ecore_init_ops.h" 41 #include "ecore_hw.h" 42 #include "ecore_mcp.h" 43 #include "ecore_init_fw_funcs.h" 44 #include "ecore_int.h" 45 #include "pcics_reg_driver.h" 46 #include "ecore_iro.h" 47 #include "ecore_gtt_reg_addr.h" 48 #ifndef LINUX_REMOVE 49 #include "ecore_tcp_ip.h" 50 #endif 51 52 #ifdef _NTDDK_ 53 #pragma warning(push) 54 #pragma warning(disable : 28167) 55 #pragma warning(disable : 28123) 56 #pragma warning(disable : 28182) 57 #pragma warning(disable : 6011) 58 #endif 59 60 static void ecore_roce_free_icid(struct ecore_hwfn *p_hwfn, u16 icid); 61 62 static enum _ecore_status_t 63 ecore_roce_async_event(struct ecore_hwfn *p_hwfn, 64 u8 fw_event_code, 65 u16 OSAL_UNUSED echo, 66 union event_ring_data *data, 67 u8 OSAL_UNUSED fw_return_code) 68 { 69 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { 70 u16 icid = (u16)OSAL_LE32_TO_CPU( 71 data->rdma_data.rdma_destroy_qp_data.cid); 72 73 /* icid release in this async event can occur only if the icid 74 * was offloaded to the FW. In case it wasn't offloaded this is 75 * handled in ecore_roce_sp_destroy_qp. 76 */ 77 ecore_roce_free_icid(p_hwfn, icid); 78 } else 79 p_hwfn->p_rdma_info->events.affiliated_event( 80 p_hwfn->p_rdma_info->events.context, 81 fw_event_code, 82 (void *)&data->rdma_data.async_handle); 83 84 return ECORE_SUCCESS; 85 } 86 87 #ifdef CONFIG_DCQCN 88 static enum _ecore_status_t ecore_roce_start_rl( 89 struct ecore_hwfn *p_hwfn, 90 struct ecore_roce_dcqcn_params *dcqcn_params) 91 { 92 struct ecore_rl_update_params params; 93 94 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n"); 95 OSAL_MEMSET(¶ms, 0, sizeof(params)); 96 97 params.rl_id_first = (u8)RESC_START(p_hwfn, ECORE_RL); 98 params.rl_id_last = RESC_START(p_hwfn, ECORE_RL) + 99 ecore_init_qm_get_num_pf_rls(p_hwfn); 100 params.dcqcn_update_param_flg = 1; 101 params.rl_init_flg = 1; 102 params.rl_start_flg = 1; 103 params.rl_stop_flg = 0; 104 params.rl_dc_qcn_flg = 1; 105 106 params.rl_bc_rate = dcqcn_params->rl_bc_rate; 107 params.rl_max_rate = dcqcn_params->rl_max_rate; 108 params.rl_r_ai = dcqcn_params->rl_r_ai; 109 params.rl_r_hai = dcqcn_params->rl_r_hai; 110 params.dcqcn_gd = dcqcn_params->dcqcn_gd; 111 params.dcqcn_k_us = dcqcn_params->dcqcn_k_us; 112 params.dcqcn_timeuot_us = dcqcn_params->dcqcn_timeout_us; 113 114 return ecore_sp_rl_update(p_hwfn, ¶ms); 115 } 116 117 enum _ecore_status_t ecore_roce_stop_rl(struct ecore_hwfn *p_hwfn) 118 { 119 struct ecore_rl_update_params params; 120 121 if (!p_hwfn->p_rdma_info->roce.dcqcn_reaction_point) 122 return ECORE_SUCCESS; 123 124 OSAL_MEMSET(¶ms, 0, sizeof(params)); 125 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "\n"); 126 127 params.rl_id_first = (u8)RESC_START(p_hwfn, ECORE_RL); 128 params.rl_id_last = RESC_START(p_hwfn, ECORE_RL) + 129 ecore_init_qm_get_num_pf_rls(p_hwfn); 130 params.rl_stop_flg = 1; 131 132 return ecore_sp_rl_update(p_hwfn, ¶ms); 133 } 134 135 #define NIG_REG_ROCE_DUPLICATE_TO_HOST_BTH 2 136 #define NIG_REG_ROCE_DUPLICATE_TO_HOST_ECN 1 137 138 enum _ecore_status_t ecore_roce_dcqcn_cfg( 139 struct ecore_hwfn *p_hwfn, 140 struct ecore_roce_dcqcn_params *params, 141 struct roce_init_func_ramrod_data *p_ramrod, 142 struct ecore_ptt *p_ptt) 143 { 144 u32 val = 0; 145 enum _ecore_status_t rc = ECORE_SUCCESS; 146 147 if (!p_hwfn->pf_params.rdma_pf_params.enable_dcqcn || 148 p_hwfn->p_rdma_info->proto == PROTOCOLID_IWARP) 149 return rc; 150 151 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 0; 152 if (params->notification_point) { 153 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 154 "Configuring dcqcn notification point: timeout = 0x%x\n", 155 params->cnp_send_timeout); 156 p_ramrod->roce.cnp_send_timeout = params->cnp_send_timeout; 157 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 1; 158 /* Configure NIG to duplicate to host and storm when: 159 * - (ECN == 2'b11 (notification point) 160 */ 161 val |= 1 << NIG_REG_ROCE_DUPLICATE_TO_HOST_ECN; 162 } 163 164 if (params->reaction_point) { 165 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 166 "Configuring dcqcn reaction point\n"); 167 p_hwfn->p_rdma_info->roce.dcqcn_enabled = 1; 168 p_hwfn->p_rdma_info->roce.dcqcn_reaction_point = 1; 169 /* Configure NIG to duplicate to host and storm when: 170 * - BTH opcode equals bth_hdr_flow_ctrl_opcode_2 171 * (reaction point) 172 */ 173 val |= 1 << NIG_REG_ROCE_DUPLICATE_TO_HOST_BTH; 174 175 rc = ecore_roce_start_rl(p_hwfn, params); 176 } 177 178 if (rc) 179 return rc; 180 181 p_ramrod->roce.cnp_dscp = params->cnp_dscp; 182 p_ramrod->roce.cnp_vlan_priority = params->cnp_vlan_priority; 183 184 ecore_wr(p_hwfn, 185 p_ptt, 186 NIG_REG_ROCE_DUPLICATE_TO_HOST, 187 val); 188 189 return rc; 190 } 191 #endif 192 193 enum _ecore_status_t ecore_roce_stop(struct ecore_hwfn *p_hwfn) 194 { 195 struct ecore_bmap *cid_map = &p_hwfn->p_rdma_info->cid_map; 196 int wait_count = 0; 197 198 /* when destroying a_RoCE QP the control is returned to the 199 * user after the synchronous part. The asynchronous part may 200 * take a little longer. We delay for a short while if an 201 * asyn destroy QP is still expected. Beyond the added delay 202 * we clear the bitmap anyway. 203 */ 204 while (OSAL_BITMAP_WEIGHT(cid_map->bitmap, cid_map->max_count)) { 205 OSAL_MSLEEP(100); 206 if (wait_count++ > 20) { 207 DP_NOTICE(p_hwfn, false, 208 "cid bitmap wait timed out\n"); 209 break; 210 } 211 } 212 213 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); 214 215 return ECORE_SUCCESS; 216 } 217 218 static void ecore_rdma_copy_gids(struct ecore_rdma_qp *qp, __le32 *src_gid, 219 __le32 *dst_gid) { 220 u32 i; 221 222 if (qp->roce_mode == ROCE_V2_IPV4) { 223 /* The IPv4 addresses shall be aligned to the highest word. 224 * The lower words must be zero. 225 */ 226 OSAL_MEMSET(src_gid, 0, sizeof(union ecore_gid)); 227 OSAL_MEMSET(dst_gid, 0, sizeof(union ecore_gid)); 228 src_gid[3] = OSAL_CPU_TO_LE32(qp->sgid.ipv4_addr); 229 dst_gid[3] = OSAL_CPU_TO_LE32(qp->dgid.ipv4_addr); 230 } else { 231 /* RoCE, and RoCE v2 - IPv6: GIDs and IPv6 addresses coincide in 232 * location and size 233 */ 234 for (i = 0; i < OSAL_ARRAY_SIZE(qp->sgid.dwords); i++) { 235 src_gid[i] = OSAL_CPU_TO_LE32(qp->sgid.dwords[i]); 236 dst_gid[i] = OSAL_CPU_TO_LE32(qp->dgid.dwords[i]); 237 } 238 } 239 } 240 241 static enum roce_flavor ecore_roce_mode_to_flavor(enum roce_mode roce_mode) 242 { 243 enum roce_flavor flavor; 244 245 switch (roce_mode) { 246 case ROCE_V1: 247 flavor = PLAIN_ROCE; 248 break; 249 case ROCE_V2_IPV4: 250 flavor = RROCE_IPV4; 251 break; 252 case ROCE_V2_IPV6: 253 flavor = (enum roce_flavor)ROCE_V2_IPV6; 254 break; 255 default: 256 flavor = (enum roce_flavor)MAX_ROCE_MODE; 257 break; 258 } 259 return flavor; 260 } 261 262 #if 0 263 static void ecore_roce_free_cid_pair(struct ecore_hwfn *p_hwfn, u16 cid) 264 { 265 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 266 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, cid); 267 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, cid + 1); 268 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 269 } 270 #endif 271 272 static void ecore_roce_free_qp(struct ecore_hwfn *p_hwfn, u16 qp_idx) 273 { 274 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 275 ecore_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->qp_map, qp_idx); 276 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 277 } 278 279 #define ECORE_ROCE_CREATE_QP_ATTEMPTS (20) 280 #define ECORE_ROCE_CREATE_QP_MSLEEP (10) 281 282 static enum _ecore_status_t ecore_roce_wait_free_cids(struct ecore_hwfn *p_hwfn, u32 qp_idx) 283 { 284 struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 285 bool cids_free = false; 286 u32 icid, iter = 0; 287 int req, resp; 288 289 icid = ECORE_ROCE_QP_TO_ICID(qp_idx); 290 291 /* Make sure that the cids that were used by the QP index are free. 292 * This is necessary because the destroy flow returns to the user before 293 * the device finishes clean up. 294 * It can happen in the following flows: 295 * (1) ib_destroy_qp followed by an ib_create_qp 296 * (2) ib_modify_qp to RESET followed (not immediately), by an 297 * ib_modify_qp to RTR 298 */ 299 300 do { 301 OSAL_SPIN_LOCK(&p_rdma_info->lock); 302 resp = ecore_bmap_test_id(p_hwfn, &p_rdma_info->cid_map, icid); 303 req = ecore_bmap_test_id(p_hwfn, &p_rdma_info->cid_map, icid + 1); 304 if (!resp && !req) 305 cids_free = true; 306 307 OSAL_SPIN_UNLOCK(&p_rdma_info->lock); 308 309 if (!cids_free) { 310 OSAL_MSLEEP(ECORE_ROCE_CREATE_QP_MSLEEP); 311 iter++; 312 } 313 } while (!cids_free && iter < ECORE_ROCE_CREATE_QP_ATTEMPTS); 314 315 if (!cids_free) { 316 DP_ERR(p_hwfn->p_dev, 317 "responder and/or requester CIDs are still in use. resp=%d, req=%d\n", 318 resp, req); 319 return ECORE_AGAIN; 320 } 321 322 return ECORE_SUCCESS; 323 } 324 325 enum _ecore_status_t ecore_roce_alloc_qp_idx( 326 struct ecore_hwfn *p_hwfn, u16 *qp_idx16) 327 { 328 struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 329 u32 start_cid, icid, cid, qp_idx; 330 enum _ecore_status_t rc; 331 332 OSAL_SPIN_LOCK(&p_rdma_info->lock); 333 rc = ecore_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->qp_map, &qp_idx); 334 if (rc != ECORE_SUCCESS) { 335 DP_NOTICE(p_hwfn, false, "failed to allocate qp\n"); 336 OSAL_SPIN_UNLOCK(&p_rdma_info->lock); 337 return rc; 338 } 339 340 OSAL_SPIN_UNLOCK(&p_rdma_info->lock); 341 342 /* Verify the cid bits that of this qp index are clear */ 343 rc = ecore_roce_wait_free_cids(p_hwfn, qp_idx); 344 if (rc) { 345 rc = ECORE_UNKNOWN_ERROR; 346 goto err; 347 } 348 349 /* Allocate a DMA-able context for an ILT page, if not existing, for the 350 * associated iids. 351 * Note: If second allocation fails there's no need to free the first as 352 * it will be used in the future. 353 */ 354 icid = ECORE_ROCE_QP_TO_ICID(qp_idx); 355 start_cid = ecore_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); 356 cid = start_cid + icid; 357 358 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, cid); 359 if (rc != ECORE_SUCCESS) 360 goto err; 361 362 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, cid + 1); 363 if (rc != ECORE_SUCCESS) 364 goto err; 365 366 /* qp index is under 2^16 */ 367 *qp_idx16 = (u16)qp_idx; 368 369 return ECORE_SUCCESS; 370 371 err: 372 ecore_roce_free_qp(p_hwfn, (u16)qp_idx); 373 374 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc); 375 376 return rc; 377 } 378 379 static void ecore_roce_set_cid(struct ecore_hwfn *p_hwfn, 380 u32 cid) 381 { 382 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 383 ecore_bmap_set_id(p_hwfn, 384 &p_hwfn->p_rdma_info->cid_map, 385 cid); 386 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 387 } 388 389 static enum _ecore_status_t ecore_roce_sp_create_responder( 390 struct ecore_hwfn *p_hwfn, 391 struct ecore_rdma_qp *qp) 392 { 393 struct roce_create_qp_resp_ramrod_data *p_ramrod; 394 u16 regular_latency_queue, low_latency_queue; 395 struct ecore_sp_init_data init_data; 396 enum roce_flavor roce_flavor; 397 struct ecore_spq_entry *p_ent; 398 enum _ecore_status_t rc; 399 u32 cid_start; 400 u16 fw_srq_id; 401 bool is_xrc; 402 403 if (!qp->has_resp) 404 return ECORE_SUCCESS; 405 406 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "qp_idx = %08x\n", qp->qp_idx); 407 408 /* Allocate DMA-able memory for IRQ */ 409 qp->irq_num_pages = 1; 410 qp->irq = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 411 &qp->irq_phys_addr, 412 RDMA_RING_PAGE_SIZE); 413 if (!qp->irq) { 414 rc = ECORE_NOMEM; 415 DP_NOTICE(p_hwfn, false, 416 "ecore create responder failed: cannot allocate memory (irq). rc = %d\n", 417 rc); 418 return rc; 419 } 420 421 /* Get SPQ entry */ 422 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 423 init_data.cid = qp->icid; 424 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 425 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 426 427 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, 428 PROTOCOLID_ROCE, &init_data); 429 if (rc != ECORE_SUCCESS) 430 goto err; 431 432 p_ramrod = &p_ent->ramrod.roce_create_qp_resp; 433 434 p_ramrod->flags = 0; 435 436 roce_flavor = ecore_roce_mode_to_flavor(qp->roce_mode); 437 SET_FIELD(p_ramrod->flags, 438 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, 439 roce_flavor); 440 441 SET_FIELD(p_ramrod->flags, 442 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 443 qp->incoming_rdma_read_en); 444 445 SET_FIELD(p_ramrod->flags, 446 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 447 qp->incoming_rdma_write_en); 448 449 SET_FIELD(p_ramrod->flags, 450 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, 451 qp->incoming_atomic_en); 452 453 SET_FIELD(p_ramrod->flags, 454 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 455 qp->e2e_flow_control_en); 456 457 SET_FIELD(p_ramrod->flags, 458 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, 459 qp->use_srq); 460 461 SET_FIELD(p_ramrod->flags, 462 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, 463 qp->fmr_and_reserved_lkey); 464 465 SET_FIELD(p_ramrod->flags, 466 ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, 467 ecore_rdma_is_xrc_qp(qp)); 468 469 /* TBD: future use only 470 * #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 471 * #define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 472 */ 473 SET_FIELD(p_ramrod->flags, 474 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 475 qp->min_rnr_nak_timer); 476 477 p_ramrod->max_ird = 478 qp->max_rd_atomic_resp; 479 p_ramrod->traffic_class = qp->traffic_class_tos; 480 p_ramrod->hop_limit = qp->hop_limit_ttl; 481 p_ramrod->irq_num_pages = qp->irq_num_pages; 482 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey); 483 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label); 484 p_ramrod->dst_qp_id = OSAL_CPU_TO_LE32(qp->dest_qp); 485 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu); 486 p_ramrod->initial_psn = OSAL_CPU_TO_LE32(qp->rq_psn); 487 p_ramrod->pd = OSAL_CPU_TO_LE16(qp->pd); 488 p_ramrod->rq_num_pages = OSAL_CPU_TO_LE16(qp->rq_num_pages); 489 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); 490 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); 491 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 492 p_ramrod->qp_handle_for_async.hi = 493 OSAL_CPU_TO_LE32(qp->qp_handle_async.hi); 494 p_ramrod->qp_handle_for_async.lo = 495 OSAL_CPU_TO_LE32(qp->qp_handle_async.lo); 496 p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi); 497 p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo); 498 p_ramrod->cq_cid = OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); 499 p_ramrod->xrc_domain = OSAL_CPU_TO_LE16(qp->xrcd_id); 500 501 #ifdef CONFIG_DCQCN 502 /* when dcqcn is enabled physical queues are determined accoridng to qp id */ 503 if (p_hwfn->p_rdma_info->roce.dcqcn_enabled) 504 regular_latency_queue = 505 ecore_get_cm_pq_idx_rl(p_hwfn, 506 (qp->icid >> 1) % 507 ROCE_DCQCN_RP_MAX_QPS); 508 else 509 #endif 510 regular_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 511 low_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT); 512 513 p_ramrod->regular_latency_phy_queue = OSAL_CPU_TO_LE16(regular_latency_queue); 514 p_ramrod->low_latency_phy_queue = OSAL_CPU_TO_LE16(low_latency_queue); 515 p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi); 516 517 ecore_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 518 ecore_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 519 520 p_ramrod->udp_src_port = qp->udp_src_port; 521 p_ramrod->vlan_id = OSAL_CPU_TO_LE16(qp->vlan_id); 522 is_xrc = ecore_rdma_is_xrc_qp(qp); 523 fw_srq_id = ecore_rdma_get_fw_srq_id(p_hwfn, qp->srq_id, is_xrc); 524 p_ramrod->srq_id.srq_idx = OSAL_CPU_TO_LE16(fw_srq_id); 525 p_ramrod->srq_id.opaque_fid = OSAL_CPU_TO_LE16(p_hwfn->hw_info.opaque_fid); 526 527 p_ramrod->stats_counter_id = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) + 528 qp->stats_queue; 529 530 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 531 532 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d regular physical queue = 0x%x, low latency physical queue 0x%x\n", 533 rc, regular_latency_queue, low_latency_queue); 534 535 if (rc != ECORE_SUCCESS) 536 goto err; 537 538 qp->resp_offloaded = true; 539 qp->cq_prod.resp = 0; 540 541 cid_start = ecore_cxt_get_proto_cid_start(p_hwfn, 542 p_hwfn->p_rdma_info->proto); 543 ecore_roce_set_cid(p_hwfn, qp->icid - cid_start); 544 545 return rc; 546 547 err: 548 DP_NOTICE(p_hwfn, false, "create responder - failed, rc = %d\n", rc); 549 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 550 qp->irq, 551 qp->irq_phys_addr, 552 qp->irq_num_pages * 553 RDMA_RING_PAGE_SIZE); 554 555 return rc; 556 } 557 558 static enum _ecore_status_t ecore_roce_sp_create_requester( 559 struct ecore_hwfn *p_hwfn, 560 struct ecore_rdma_qp *qp) 561 { 562 struct roce_create_qp_req_ramrod_data *p_ramrod; 563 u16 regular_latency_queue, low_latency_queue; 564 struct ecore_sp_init_data init_data; 565 enum roce_flavor roce_flavor; 566 struct ecore_spq_entry *p_ent; 567 enum _ecore_status_t rc; 568 u32 cid_start; 569 570 if (!qp->has_req) 571 return ECORE_SUCCESS; 572 573 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid); 574 575 /* Allocate DMA-able memory for ORQ */ 576 qp->orq_num_pages = 1; 577 qp->orq = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 578 &qp->orq_phys_addr, 579 RDMA_RING_PAGE_SIZE); 580 if (!qp->orq) 581 { 582 rc = ECORE_NOMEM; 583 DP_NOTICE(p_hwfn, false, 584 "ecore create requester failed: cannot allocate memory (orq). rc = %d\n", 585 rc); 586 return rc; 587 } 588 589 /* Get SPQ entry */ 590 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 591 init_data.cid = qp->icid + 1; 592 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 593 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 594 595 rc = ecore_sp_init_request(p_hwfn, &p_ent, 596 ROCE_RAMROD_CREATE_QP, 597 PROTOCOLID_ROCE, &init_data); 598 if (rc != ECORE_SUCCESS) 599 goto err; 600 601 p_ramrod = &p_ent->ramrod.roce_create_qp_req; 602 603 p_ramrod->flags = 0; 604 605 roce_flavor = ecore_roce_mode_to_flavor(qp->roce_mode); 606 SET_FIELD(p_ramrod->flags, 607 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, 608 roce_flavor); 609 610 SET_FIELD(p_ramrod->flags, 611 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, 612 qp->fmr_and_reserved_lkey); 613 614 SET_FIELD(p_ramrod->flags, 615 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, 616 qp->signal_all); 617 618 /* TBD: 619 * future use only 620 * #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 621 * #define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 622 */ 623 SET_FIELD(p_ramrod->flags, 624 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, 625 qp->retry_cnt); 626 627 SET_FIELD(p_ramrod->flags, 628 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 629 qp->rnr_retry_cnt); 630 631 SET_FIELD(p_ramrod->flags, 632 ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, 633 ecore_rdma_is_xrc_qp(qp)); 634 635 p_ramrod->max_ord = qp->max_rd_atomic_req; 636 p_ramrod->traffic_class = qp->traffic_class_tos; 637 p_ramrod->hop_limit = qp->hop_limit_ttl; 638 p_ramrod->orq_num_pages = qp->orq_num_pages; 639 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey); 640 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label); 641 p_ramrod->dst_qp_id = OSAL_CPU_TO_LE32(qp->dest_qp); 642 p_ramrod->ack_timeout_val = OSAL_CPU_TO_LE32(qp->ack_timeout); 643 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu); 644 p_ramrod->initial_psn = OSAL_CPU_TO_LE32(qp->sq_psn); 645 p_ramrod->pd = OSAL_CPU_TO_LE16(qp->pd); 646 p_ramrod->sq_num_pages = OSAL_CPU_TO_LE16(qp->sq_num_pages); 647 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); 648 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); 649 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 650 p_ramrod->qp_handle_for_async.hi = 651 OSAL_CPU_TO_LE32(qp->qp_handle_async.hi); 652 p_ramrod->qp_handle_for_async.lo = 653 OSAL_CPU_TO_LE32(qp->qp_handle_async.lo); 654 p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi); 655 p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo); 656 p_ramrod->cq_cid = OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) | 657 qp->sq_cq_id); 658 659 #ifdef CONFIG_DCQCN 660 /* when dcqcn is enabled physical queues are determined accoridng to qp id */ 661 if (p_hwfn->p_rdma_info->roce.dcqcn_enabled) 662 regular_latency_queue = 663 ecore_get_cm_pq_idx_rl(p_hwfn, 664 (qp->icid >> 1) % 665 ROCE_DCQCN_RP_MAX_QPS); 666 else 667 #endif 668 regular_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 669 low_latency_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT); 670 671 p_ramrod->regular_latency_phy_queue = OSAL_CPU_TO_LE16(regular_latency_queue); 672 p_ramrod->low_latency_phy_queue = OSAL_CPU_TO_LE16(low_latency_queue); 673 p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi); 674 675 ecore_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); 676 ecore_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); 677 678 p_ramrod->udp_src_port = qp->udp_src_port; 679 p_ramrod->vlan_id = OSAL_CPU_TO_LE16(qp->vlan_id); 680 p_ramrod->stats_counter_id = RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) + 681 qp->stats_queue; 682 683 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 684 685 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rc = %d\n", rc); 686 687 if (rc != ECORE_SUCCESS) 688 goto err; 689 690 qp->req_offloaded = true; 691 qp->cq_prod.req = 0; 692 693 cid_start = ecore_cxt_get_proto_cid_start(p_hwfn, 694 p_hwfn->p_rdma_info->proto); 695 ecore_roce_set_cid(p_hwfn, qp->icid + 1 - cid_start); 696 697 return rc; 698 699 err: 700 DP_NOTICE(p_hwfn, false, "Create requested - failed, rc = %d\n", rc); 701 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 702 qp->orq, 703 qp->orq_phys_addr, 704 qp->orq_num_pages * 705 RDMA_RING_PAGE_SIZE); 706 return rc; 707 } 708 709 static enum _ecore_status_t ecore_roce_sp_modify_responder( 710 struct ecore_hwfn *p_hwfn, 711 struct ecore_rdma_qp *qp, 712 bool move_to_err, 713 u32 modify_flags) 714 { 715 struct roce_modify_qp_resp_ramrod_data *p_ramrod; 716 struct ecore_sp_init_data init_data; 717 struct ecore_spq_entry *p_ent; 718 enum _ecore_status_t rc; 719 720 if (!qp->has_resp) 721 return ECORE_SUCCESS; 722 723 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid); 724 725 if (move_to_err && !qp->resp_offloaded) 726 return ECORE_SUCCESS; 727 728 /* Get SPQ entry */ 729 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 730 init_data.cid = qp->icid; 731 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 732 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 733 734 rc = ecore_sp_init_request(p_hwfn, &p_ent, 735 ROCE_EVENT_MODIFY_QP, 736 PROTOCOLID_ROCE, &init_data); 737 if (rc != ECORE_SUCCESS) 738 { 739 DP_NOTICE(p_hwfn, false, "rc = %d\n", rc); 740 return rc; 741 } 742 743 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; 744 745 p_ramrod->flags = 0; 746 747 SET_FIELD(p_ramrod->flags, 748 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, 749 move_to_err); 750 751 SET_FIELD(p_ramrod->flags, 752 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, 753 qp->incoming_rdma_read_en); 754 755 SET_FIELD(p_ramrod->flags, 756 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, 757 qp->incoming_rdma_write_en); 758 759 SET_FIELD(p_ramrod->flags, 760 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, 761 qp->incoming_atomic_en); 762 763 SET_FIELD(p_ramrod->flags, 764 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, 765 qp->e2e_flow_control_en); 766 767 SET_FIELD(p_ramrod->flags, 768 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, 769 GET_FIELD(modify_flags, 770 ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); 771 772 SET_FIELD(p_ramrod->flags, 773 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, 774 GET_FIELD(modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY)); 775 776 SET_FIELD(p_ramrod->flags, 777 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, 778 GET_FIELD(modify_flags, 779 ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 780 781 SET_FIELD(p_ramrod->flags, 782 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, 783 GET_FIELD(modify_flags, 784 ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); 785 786 /* TBD: future use only 787 * #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 788 * #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 789 */ 790 791 SET_FIELD(p_ramrod->flags, 792 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, 793 GET_FIELD(modify_flags, 794 ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); 795 796 p_ramrod->fields = 0; 797 SET_FIELD(p_ramrod->fields, 798 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, 799 qp->min_rnr_nak_timer); 800 801 p_ramrod->max_ird = qp->max_rd_atomic_resp; 802 p_ramrod->traffic_class = qp->traffic_class_tos; 803 p_ramrod->hop_limit = qp->hop_limit_ttl; 804 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey); 805 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label); 806 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu); 807 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 808 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 809 810 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify responder, rc = %d\n", rc); 811 return rc; 812 } 813 814 static enum _ecore_status_t ecore_roce_sp_modify_requester( 815 struct ecore_hwfn *p_hwfn, 816 struct ecore_rdma_qp *qp, 817 bool move_to_sqd, 818 bool move_to_err, 819 u32 modify_flags) 820 { 821 struct roce_modify_qp_req_ramrod_data *p_ramrod; 822 struct ecore_sp_init_data init_data; 823 struct ecore_spq_entry *p_ent; 824 enum _ecore_status_t rc; 825 826 if (!qp->has_req) 827 return ECORE_SUCCESS; 828 829 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid); 830 831 if (move_to_err && !(qp->req_offloaded)) 832 return ECORE_SUCCESS; 833 834 /* Get SPQ entry */ 835 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 836 init_data.cid = qp->icid + 1; 837 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 838 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 839 840 rc = ecore_sp_init_request(p_hwfn, &p_ent, 841 ROCE_EVENT_MODIFY_QP, 842 PROTOCOLID_ROCE, &init_data); 843 if (rc != ECORE_SUCCESS) { 844 DP_NOTICE(p_hwfn, false, "rc = %d\n", rc); 845 return rc; 846 } 847 848 p_ramrod = &p_ent->ramrod.roce_modify_qp_req; 849 850 p_ramrod->flags = 0; 851 852 SET_FIELD(p_ramrod->flags, 853 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, 854 move_to_err); 855 856 SET_FIELD(p_ramrod->flags, 857 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, 858 move_to_sqd); 859 860 SET_FIELD(p_ramrod->flags, 861 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, 862 qp->sqd_async); 863 864 SET_FIELD(p_ramrod->flags, 865 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, 866 GET_FIELD(modify_flags, ECORE_ROCE_MODIFY_QP_VALID_PKEY)); 867 868 SET_FIELD(p_ramrod->flags, 869 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, 870 GET_FIELD(modify_flags, 871 ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); 872 873 SET_FIELD(p_ramrod->flags, 874 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, 875 GET_FIELD(modify_flags, 876 ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); 877 878 SET_FIELD(p_ramrod->flags, 879 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, 880 GET_FIELD(modify_flags, 881 ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); 882 883 SET_FIELD(p_ramrod->flags, 884 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, 885 GET_FIELD(modify_flags, 886 ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT)); 887 888 SET_FIELD(p_ramrod->flags, 889 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, 890 GET_FIELD(modify_flags, 891 ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); 892 893 /* TBD: future use only 894 * #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 895 * #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 896 */ 897 898 p_ramrod->fields = 0; 899 SET_FIELD(p_ramrod->fields, 900 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, 901 qp->retry_cnt); 902 903 SET_FIELD(p_ramrod->fields, 904 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, 905 qp->rnr_retry_cnt); 906 907 p_ramrod->max_ord = qp->max_rd_atomic_req; 908 p_ramrod->traffic_class = qp->traffic_class_tos; 909 p_ramrod->hop_limit = qp->hop_limit_ttl; 910 p_ramrod->p_key = OSAL_CPU_TO_LE16(qp->pkey); 911 p_ramrod->flow_label = OSAL_CPU_TO_LE32(qp->flow_label); 912 p_ramrod->ack_timeout_val = OSAL_CPU_TO_LE32(qp->ack_timeout); 913 p_ramrod->mtu = OSAL_CPU_TO_LE16(qp->mtu); 914 ecore_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); 915 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 916 917 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Modify requester, rc = %d\n", rc); 918 return rc; 919 } 920 921 static enum _ecore_status_t ecore_roce_sp_destroy_qp_responder( 922 struct ecore_hwfn *p_hwfn, 923 struct ecore_rdma_qp *qp, 924 u32 *num_invalidated_mw, 925 u32 *cq_prod) 926 { 927 struct roce_destroy_qp_resp_output_params *p_ramrod_res; 928 struct roce_destroy_qp_resp_ramrod_data *p_ramrod; 929 struct ecore_sp_init_data init_data; 930 struct ecore_spq_entry *p_ent; 931 dma_addr_t ramrod_res_phys; 932 enum _ecore_status_t rc; 933 934 if (!qp->has_resp) { 935 *num_invalidated_mw = 0; 936 *cq_prod = 0; 937 return ECORE_SUCCESS; 938 } 939 940 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid); 941 942 *num_invalidated_mw = 0; 943 944 if (!qp->resp_offloaded) { 945 *cq_prod = qp->cq_prod.resp; 946 return ECORE_SUCCESS; 947 } 948 949 /* Get SPQ entry */ 950 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 951 init_data.cid = qp->icid; 952 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 953 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 954 955 rc = ecore_sp_init_request(p_hwfn, &p_ent, 956 ROCE_RAMROD_DESTROY_QP, 957 PROTOCOLID_ROCE, &init_data); 958 if (rc != ECORE_SUCCESS) 959 return rc; 960 961 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; 962 963 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 964 &ramrod_res_phys, sizeof(*p_ramrod_res)); 965 966 if (!p_ramrod_res) 967 { 968 rc = ECORE_NOMEM; 969 DP_NOTICE(p_hwfn, false, 970 "ecore destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", 971 rc); 972 return rc; 973 } 974 975 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 976 977 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 978 if (rc != ECORE_SUCCESS) 979 goto err; 980 981 *num_invalidated_mw 982 = OSAL_LE32_TO_CPU(p_ramrod_res->num_invalidated_mw); 983 *cq_prod = OSAL_LE32_TO_CPU(p_ramrod_res->cq_prod); 984 qp->cq_prod.resp = *cq_prod; 985 986 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ 987 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 988 qp->irq, 989 qp->irq_phys_addr, 990 qp->irq_num_pages * 991 RDMA_RING_PAGE_SIZE); 992 993 qp->resp_offloaded = false; 994 995 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroy responder, rc = %d\n", rc); 996 997 /* "fall through" */ 998 999 err: 1000 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys, 1001 sizeof(*p_ramrod_res)); 1002 1003 return rc; 1004 } 1005 1006 static enum _ecore_status_t ecore_roce_sp_destroy_qp_requester( 1007 struct ecore_hwfn *p_hwfn, 1008 struct ecore_rdma_qp *qp, 1009 u32 *num_bound_mw, 1010 u32 *cq_prod) 1011 { 1012 struct roce_destroy_qp_req_output_params *p_ramrod_res; 1013 struct roce_destroy_qp_req_ramrod_data *p_ramrod; 1014 struct ecore_sp_init_data init_data; 1015 struct ecore_spq_entry *p_ent; 1016 dma_addr_t ramrod_res_phys; 1017 enum _ecore_status_t rc; 1018 1019 if (!qp->has_req) { 1020 *num_bound_mw = 0; 1021 *cq_prod = 0; 1022 return ECORE_SUCCESS; 1023 } 1024 1025 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "icid = %08x\n", qp->icid); 1026 1027 if (!qp->req_offloaded) { 1028 *cq_prod = qp->cq_prod.req; 1029 return ECORE_SUCCESS; 1030 } 1031 1032 p_ramrod_res = (struct roce_destroy_qp_req_output_params *) 1033 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &ramrod_res_phys, 1034 sizeof(*p_ramrod_res)); 1035 if (!p_ramrod_res) 1036 { 1037 DP_NOTICE(p_hwfn, false, 1038 "ecore destroy requester failed: cannot allocate memory (ramrod)\n"); 1039 return ECORE_NOMEM; 1040 } 1041 1042 /* Get SPQ entry */ 1043 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1044 init_data.cid = qp->icid + 1; 1045 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1046 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1047 1048 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, 1049 PROTOCOLID_ROCE, &init_data); 1050 if (rc != ECORE_SUCCESS) 1051 goto err; 1052 1053 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; 1054 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); 1055 1056 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1057 if (rc != ECORE_SUCCESS) 1058 goto err; 1059 1060 *num_bound_mw = OSAL_LE32_TO_CPU(p_ramrod_res->num_bound_mw); 1061 *cq_prod = OSAL_LE32_TO_CPU(p_ramrod_res->cq_prod); 1062 qp->cq_prod.req = *cq_prod; 1063 1064 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ 1065 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1066 qp->orq, 1067 qp->orq_phys_addr, 1068 qp->orq_num_pages * 1069 RDMA_RING_PAGE_SIZE); 1070 1071 qp->req_offloaded = false; 1072 1073 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Destroy requester, rc = %d\n", rc); 1074 1075 /* "fall through" */ 1076 1077 err: 1078 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_ramrod_res, ramrod_res_phys, 1079 sizeof(*p_ramrod_res)); 1080 1081 return rc; 1082 } 1083 1084 static OSAL_INLINE enum _ecore_status_t ecore_roce_sp_query_responder( 1085 struct ecore_hwfn *p_hwfn, 1086 struct ecore_rdma_qp *qp, 1087 struct ecore_rdma_query_qp_out_params *out_params) 1088 { 1089 struct roce_query_qp_resp_output_params *p_resp_ramrod_res; 1090 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; 1091 struct ecore_sp_init_data init_data; 1092 dma_addr_t resp_ramrod_res_phys; 1093 struct ecore_spq_entry *p_ent; 1094 enum _ecore_status_t rc = ECORE_SUCCESS; 1095 bool error_flag; 1096 1097 if (!qp->resp_offloaded) { 1098 /* Don't send query qp for the responder */ 1099 out_params->rq_psn = qp->rq_psn; 1100 1101 return ECORE_SUCCESS; 1102 } 1103 1104 /* Send a query responder ramrod to the FW */ 1105 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *) 1106 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &resp_ramrod_res_phys, 1107 sizeof(*p_resp_ramrod_res)); 1108 if (!p_resp_ramrod_res) 1109 { 1110 DP_NOTICE(p_hwfn, false, 1111 "ecore query qp failed: cannot allocate memory (ramrod)\n"); 1112 return ECORE_NOMEM; 1113 } 1114 1115 /* Get SPQ entry */ 1116 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1117 init_data.cid = qp->icid; 1118 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1119 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1120 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 1121 PROTOCOLID_ROCE, &init_data); 1122 if (rc != ECORE_SUCCESS) 1123 goto err; 1124 1125 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; 1126 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); 1127 1128 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1129 if (rc != ECORE_SUCCESS) 1130 goto err; 1131 1132 out_params->rq_psn = OSAL_LE32_TO_CPU(p_resp_ramrod_res->psn); 1133 error_flag = GET_FIELD( 1134 OSAL_LE32_TO_CPU(p_resp_ramrod_res->err_flag), 1135 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); 1136 if (error_flag) 1137 qp->cur_state = ECORE_ROCE_QP_STATE_ERR; 1138 1139 err: 1140 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_resp_ramrod_res, 1141 resp_ramrod_res_phys, 1142 sizeof(*p_resp_ramrod_res)); 1143 1144 return rc; 1145 } 1146 1147 static OSAL_INLINE enum _ecore_status_t ecore_roce_sp_query_requester( 1148 struct ecore_hwfn *p_hwfn, 1149 struct ecore_rdma_qp *qp, 1150 struct ecore_rdma_query_qp_out_params *out_params, 1151 bool *sq_draining) 1152 { 1153 struct roce_query_qp_req_output_params *p_req_ramrod_res; 1154 struct roce_query_qp_req_ramrod_data *p_req_ramrod; 1155 struct ecore_sp_init_data init_data; 1156 dma_addr_t req_ramrod_res_phys; 1157 struct ecore_spq_entry *p_ent; 1158 enum _ecore_status_t rc = ECORE_SUCCESS; 1159 bool error_flag; 1160 1161 if (!qp->req_offloaded) 1162 { 1163 /* Don't send query qp for the requester */ 1164 out_params->sq_psn = qp->sq_psn; 1165 out_params->draining = false; 1166 1167 *sq_draining = 0; 1168 1169 return ECORE_SUCCESS; 1170 } 1171 1172 /* Send a query requester ramrod to the FW */ 1173 p_req_ramrod_res = (struct roce_query_qp_req_output_params *) 1174 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &req_ramrod_res_phys, 1175 sizeof(*p_req_ramrod_res)); 1176 if (!p_req_ramrod_res) 1177 { 1178 DP_NOTICE(p_hwfn, false, 1179 "ecore query qp failed: cannot allocate memory (ramrod). rc = %d\n", 1180 rc); 1181 return ECORE_NOMEM; 1182 } 1183 1184 /* Get SPQ entry */ 1185 init_data.cid = qp->icid + 1; 1186 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, 1187 PROTOCOLID_ROCE, &init_data); 1188 if (rc != ECORE_SUCCESS) 1189 goto err; 1190 1191 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; 1192 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); 1193 1194 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1195 if (rc != ECORE_SUCCESS) 1196 goto err; 1197 1198 out_params->sq_psn = OSAL_LE32_TO_CPU(p_req_ramrod_res->psn); 1199 error_flag = GET_FIELD(OSAL_LE32_TO_CPU(p_req_ramrod_res->flags), 1200 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); 1201 if (error_flag) 1202 qp->cur_state = ECORE_ROCE_QP_STATE_ERR; 1203 else 1204 *sq_draining = GET_FIELD( 1205 OSAL_LE32_TO_CPU(p_req_ramrod_res->flags), 1206 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); 1207 1208 err: 1209 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_req_ramrod_res, 1210 req_ramrod_res_phys, sizeof(*p_req_ramrod_res)); 1211 1212 return rc; 1213 } 1214 1215 enum _ecore_status_t ecore_roce_query_qp( 1216 struct ecore_hwfn *p_hwfn, 1217 struct ecore_rdma_qp *qp, 1218 struct ecore_rdma_query_qp_out_params *out_params) 1219 { 1220 enum _ecore_status_t rc; 1221 1222 rc = ecore_roce_sp_query_responder(p_hwfn, qp, out_params); 1223 if (rc) 1224 return rc; 1225 1226 rc = ecore_roce_sp_query_requester(p_hwfn, qp, out_params, 1227 &out_params->draining); 1228 if (rc) 1229 return rc; 1230 1231 out_params->state = qp->cur_state; 1232 1233 return ECORE_SUCCESS; 1234 } 1235 1236 enum _ecore_status_t ecore_roce_destroy_qp(struct ecore_hwfn *p_hwfn, 1237 struct ecore_rdma_qp *qp, 1238 struct ecore_rdma_destroy_qp_out_params *out_params) 1239 { 1240 u32 cq_prod_resp = qp->cq_prod.resp, cq_prod_req = qp->cq_prod.req; 1241 u32 num_invalidated_mw = 0; 1242 u32 num_bound_mw = 0; 1243 enum _ecore_status_t rc; 1244 1245 /* Destroys the specified QP 1246 * Note: if qp state != RESET/ERR/INIT then upper driver first need to 1247 * call modify qp to move the qp to ERR state 1248 */ 1249 if ((qp->cur_state != ECORE_ROCE_QP_STATE_RESET) && 1250 (qp->cur_state != ECORE_ROCE_QP_STATE_ERR) && 1251 (qp->cur_state != ECORE_ROCE_QP_STATE_INIT)) 1252 { 1253 DP_NOTICE(p_hwfn, 1254 true, 1255 "QP must be in error, reset or init state before destroying it\n"); 1256 return ECORE_INVAL; 1257 } 1258 1259 if (qp->cur_state != ECORE_ROCE_QP_STATE_RESET) { 1260 rc = ecore_roce_sp_destroy_qp_responder(p_hwfn, 1261 qp, 1262 &num_invalidated_mw, 1263 &cq_prod_resp); 1264 if (rc != ECORE_SUCCESS) 1265 return rc; 1266 1267 /* Send destroy requester ramrod */ 1268 rc = ecore_roce_sp_destroy_qp_requester(p_hwfn, qp, 1269 &num_bound_mw, 1270 &cq_prod_req); 1271 if (rc != ECORE_SUCCESS) 1272 return rc; 1273 1274 /* resp_ofload was true, num_invalidated_mw is valid */ 1275 if (num_invalidated_mw != num_bound_mw) { 1276 DP_NOTICE(p_hwfn, 1277 true, 1278 "number of invalidate memory windows is different from bounded ones\n"); 1279 return ECORE_INVAL; 1280 } 1281 } 1282 1283 ecore_roce_free_qp(p_hwfn, qp->qp_idx); 1284 1285 out_params->rq_cq_prod = cq_prod_resp; 1286 out_params->sq_cq_prod = cq_prod_req; 1287 1288 return ECORE_SUCCESS; 1289 } 1290 1291 enum _ecore_status_t ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid) 1292 { 1293 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 1294 struct ecore_sp_init_data init_data; 1295 struct ecore_spq_entry *p_ent; 1296 enum _ecore_status_t rc; 1297 1298 if (!rdma_cxt) { 1299 DP_ERR(p_hwfn->p_dev, 1300 "destroy ud qp failed due to NULL rdma_cxt\n"); 1301 return ECORE_INVAL; 1302 } 1303 1304 /* Get SPQ entry */ 1305 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1306 init_data.cid = cid; 1307 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1308 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1309 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_UD_QP, 1310 PROTOCOLID_ROCE, &init_data); 1311 if (rc != ECORE_SUCCESS) 1312 goto err; 1313 1314 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1315 if (rc != ECORE_SUCCESS) 1316 goto err; 1317 1318 ecore_roce_free_qp(p_hwfn, ECORE_ROCE_ICID_TO_QP(cid)); 1319 1320 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "freed a ud qp with cid=%d\n", cid); 1321 1322 return ECORE_SUCCESS; 1323 1324 err: 1325 DP_ERR(p_hwfn, "failed destroying a ud qp with cid=%d\n", cid); 1326 1327 return rc; 1328 } 1329 1330 enum _ecore_status_t ecore_roce_create_ud_qp(void *rdma_cxt, 1331 struct ecore_rdma_create_qp_out_params *out_params) 1332 { 1333 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 1334 struct ecore_sp_init_data init_data; 1335 struct ecore_spq_entry *p_ent; 1336 enum _ecore_status_t rc; 1337 u16 icid, qp_idx; 1338 1339 if (!rdma_cxt || !out_params) { 1340 DP_ERR(p_hwfn->p_dev, 1341 "ecore roce create ud qp failed due to NULL entry (rdma_cxt=%p, out=%p)\n", 1342 rdma_cxt, out_params); 1343 return ECORE_INVAL; 1344 } 1345 1346 rc = ecore_roce_alloc_qp_idx(p_hwfn, &qp_idx); 1347 if (rc != ECORE_SUCCESS) 1348 goto err; 1349 1350 icid = ECORE_ROCE_QP_TO_ICID(qp_idx); 1351 1352 /* Get SPQ entry */ 1353 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1354 init_data.cid = icid; 1355 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1356 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1357 rc = ecore_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_UD_QP, 1358 PROTOCOLID_ROCE, &init_data); 1359 if (rc != ECORE_SUCCESS) 1360 goto err1; 1361 1362 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1363 if (rc != ECORE_SUCCESS) 1364 goto err1; 1365 1366 out_params->icid = icid; 1367 out_params->qp_id = ((0xFF << 16) | icid); 1368 1369 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "created a ud qp with icid=%d\n", 1370 icid); 1371 1372 return ECORE_SUCCESS; 1373 1374 err1: 1375 ecore_roce_free_qp(p_hwfn, qp_idx); 1376 1377 err: 1378 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "failed creating a ud qp\n"); 1379 1380 return rc; 1381 } 1382 1383 enum _ecore_status_t 1384 ecore_roce_modify_qp(struct ecore_hwfn *p_hwfn, 1385 struct ecore_rdma_qp *qp, 1386 enum ecore_roce_qp_state prev_state, 1387 struct ecore_rdma_modify_qp_in_params *params) 1388 { 1389 u32 num_invalidated_mw = 0, num_bound_mw = 0; 1390 enum _ecore_status_t rc = ECORE_SUCCESS; 1391 1392 /* Perform additional operations according to the current state and the 1393 * next state 1394 */ 1395 if (((prev_state == ECORE_ROCE_QP_STATE_INIT) || 1396 (prev_state == ECORE_ROCE_QP_STATE_RESET)) && 1397 (qp->cur_state == ECORE_ROCE_QP_STATE_RTR)) 1398 { 1399 /* Init->RTR or Reset->RTR */ 1400 1401 /* Verify the cid bits that of this qp index are clear */ 1402 rc = ecore_roce_wait_free_cids(p_hwfn, qp->qp_idx); 1403 if (rc) 1404 return rc; 1405 1406 rc = ecore_roce_sp_create_responder(p_hwfn, qp); 1407 return rc; 1408 1409 } else if ((prev_state == ECORE_ROCE_QP_STATE_RTR) && 1410 (qp->cur_state == ECORE_ROCE_QP_STATE_RTS)) 1411 { 1412 /* RTR-> RTS */ 1413 rc = ecore_roce_sp_create_requester(p_hwfn, qp); 1414 if (rc != ECORE_SUCCESS) 1415 return rc; 1416 1417 /* Send modify responder ramrod */ 1418 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false, 1419 params->modify_flags); 1420 return rc; 1421 1422 } else if ((prev_state == ECORE_ROCE_QP_STATE_RTS) && 1423 (qp->cur_state == ECORE_ROCE_QP_STATE_RTS)) 1424 { 1425 /* RTS->RTS */ 1426 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false, 1427 params->modify_flags); 1428 if (rc != ECORE_SUCCESS) 1429 return rc; 1430 1431 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, false, 1432 params->modify_flags); 1433 return rc; 1434 1435 } else if ((prev_state == ECORE_ROCE_QP_STATE_RTS) && 1436 (qp->cur_state == ECORE_ROCE_QP_STATE_SQD)) 1437 { 1438 /* RTS->SQD */ 1439 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, true, false, 1440 params->modify_flags); 1441 return rc; 1442 1443 } else if ((prev_state == ECORE_ROCE_QP_STATE_SQD) && 1444 (qp->cur_state == ECORE_ROCE_QP_STATE_SQD)) 1445 { 1446 /* SQD->SQD */ 1447 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false, 1448 params->modify_flags); 1449 if (rc != ECORE_SUCCESS) 1450 return rc; 1451 1452 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, false, 1453 params->modify_flags); 1454 return rc; 1455 1456 } else if ((prev_state == ECORE_ROCE_QP_STATE_SQD) && 1457 (qp->cur_state == ECORE_ROCE_QP_STATE_RTS)) 1458 { 1459 /* SQD->RTS */ 1460 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, false, 1461 params->modify_flags); 1462 if (rc != ECORE_SUCCESS) 1463 return rc; 1464 1465 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, false, 1466 params->modify_flags); 1467 1468 return rc; 1469 } else if (qp->cur_state == ECORE_ROCE_QP_STATE_ERR) { 1470 /* ->ERR */ 1471 rc = ecore_roce_sp_modify_responder(p_hwfn, qp, true, 1472 params->modify_flags); 1473 if (rc != ECORE_SUCCESS) 1474 return rc; 1475 1476 rc = ecore_roce_sp_modify_requester(p_hwfn, qp, false, true, 1477 params->modify_flags); 1478 return rc; 1479 1480 } else if (qp->cur_state == ECORE_ROCE_QP_STATE_RESET) { 1481 /* Any state -> RESET */ 1482 1483 /* Send destroy responder ramrod */ 1484 rc = ecore_roce_sp_destroy_qp_responder(p_hwfn, qp, 1485 &num_invalidated_mw, 1486 &qp->cq_prod.resp); 1487 1488 if (rc != ECORE_SUCCESS) 1489 return rc; 1490 1491 rc = ecore_roce_sp_destroy_qp_requester(p_hwfn, qp, 1492 &num_bound_mw, 1493 &qp->cq_prod.req); 1494 1495 if (rc != ECORE_SUCCESS) 1496 return rc; 1497 1498 if (num_invalidated_mw != num_bound_mw) { 1499 DP_NOTICE(p_hwfn, 1500 true, 1501 "number of invalidate memory windows is different from bounded ones\n"); 1502 return ECORE_INVAL; 1503 } 1504 } else { 1505 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ECORE_SUCCESS\n"); 1506 } 1507 1508 return rc; 1509 } 1510 1511 static void ecore_roce_free_icid(struct ecore_hwfn *p_hwfn, u16 icid) 1512 { 1513 struct ecore_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 1514 u32 start_cid, cid; 1515 1516 start_cid = ecore_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); 1517 cid = icid - start_cid; 1518 1519 OSAL_SPIN_LOCK(&p_rdma_info->lock); 1520 1521 ecore_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); 1522 1523 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 1524 } 1525 1526 static void ecore_rdma_dpm_conf(struct ecore_hwfn *p_hwfn, 1527 struct ecore_ptt *p_ptt) 1528 { 1529 u32 val; 1530 1531 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; 1532 1533 ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); 1534 DP_VERBOSE(p_hwfn, (ECORE_MSG_DCB | ECORE_MSG_RDMA), 1535 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", 1536 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); 1537 } 1538 1539 /* This function disables EDPM due to DCBx considerations */ 1540 void ecore_roce_dpm_dcbx(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1541 { 1542 u8 val; 1543 1544 /* if any QPs are already active, we want to disable DPM, since their 1545 * context information contains information from before the latest DCBx 1546 * update. Otherwise enable it. 1547 */ 1548 val = (ecore_rdma_allocated_qps(p_hwfn)) ? true : false; 1549 p_hwfn->dcbx_no_edpm = (u8)val; 1550 1551 ecore_rdma_dpm_conf(p_hwfn, p_ptt); 1552 } 1553 1554 /* This function disables EDPM due to doorbell bar considerations */ 1555 void ecore_rdma_dpm_bar(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 1556 { 1557 p_hwfn->db_bar_no_edpm = true; 1558 1559 ecore_rdma_dpm_conf(p_hwfn, p_ptt); 1560 } 1561 1562 enum _ecore_status_t ecore_roce_setup(struct ecore_hwfn *p_hwfn) 1563 { 1564 return ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE, 1565 ecore_roce_async_event); 1566 } 1567 1568 #ifdef _NTDDK_ 1569 #pragma warning(pop) 1570 #endif 1571