1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_iwarp.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "bcm_osal.h" 35 #include "ecore.h" 36 #include "ecore_status.h" 37 #include "ecore_sp_commands.h" 38 #include "ecore_cxt.h" 39 #include "ecore_rdma.h" 40 #include "reg_addr.h" 41 #include "ecore_hw.h" 42 #include "ecore_hsi_iwarp.h" 43 #include "ecore_ll2.h" 44 #include "ecore_ooo.h" 45 #ifndef LINUX_REMOVE 46 #include "ecore_tcp_ip.h" 47 #endif 48 49 #ifdef _NTDDK_ 50 #pragma warning(push) 51 #pragma warning(disable : 28123) 52 #pragma warning(disable : 28167) 53 #endif 54 55 /* Default values used for MPA Rev 1 */ 56 #define ECORE_IWARP_ORD_DEFAULT 32 57 #define ECORE_IWARP_IRD_DEFAULT 32 58 59 #define ECORE_IWARP_MAX_FW_MSS 4120 60 61 struct mpa_v2_hdr { 62 __be16 ird; 63 __be16 ord; 64 }; 65 66 #define MPA_V2_PEER2PEER_MODEL 0x8000 67 #define MPA_V2_SEND_RTR 0x4000 /* on ird */ 68 #define MPA_V2_READ_RTR 0x4000 /* on ord */ 69 #define MPA_V2_WRITE_RTR 0x8000 70 #define MPA_V2_IRD_ORD_MASK 0x3FFF 71 72 #define MPA_REV2(_mpa_rev) (_mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) 73 74 #define ECORE_IWARP_INVALID_TCP_CID 0xffffffff 75 /* How many times fin will be sent before FW aborts and send RST */ 76 #define ECORE_IWARP_MAX_FIN_RT_DEFAULT 2 77 #define ECORE_IWARP_RCV_WND_SIZE_MIN (0xffff) 78 /* INTERNAL: These numbers are derived from BRB buffer sizes to obtain optimal performance */ 79 #define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_2_PORTS (200*1024) 80 #define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_4_PORTS (100*1024) 81 #define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_2_PORTS (150*1024) 82 #define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_4_PORTS (90*1024) 83 #define ECORE_IWARP_MAX_WND_SCALE (14) 84 /* Timestamp header is the length of the timestamp option (10): 85 * kind:8 bit, length:8 bit, timestamp:32 bit, ack: 32bit 86 * rounded up to a multiple of 4 87 */ 88 #define TIMESTAMP_HEADER_SIZE (12) 89 90 static enum _ecore_status_t 91 ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn, 92 u8 fw_event_code, 93 u16 OSAL_UNUSED echo, 94 union event_ring_data *data, 95 u8 fw_return_code); 96 97 static enum _ecore_status_t 98 ecore_iwarp_empty_ramrod(struct ecore_hwfn *p_hwfn, 99 struct ecore_iwarp_listener *listener); 100 101 static OSAL_INLINE struct ecore_iwarp_fpdu * 102 ecore_iwarp_get_curr_fpdu(struct ecore_hwfn *p_hwfn, u16 cid); 103 104 /* Override devinfo with iWARP specific values */ 105 void 106 ecore_iwarp_init_devinfo(struct ecore_hwfn *p_hwfn) 107 { 108 struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev; 109 110 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; 111 dev->max_qp = OSAL_MIN_T(u64, 112 IWARP_MAX_QPS, 113 p_hwfn->p_rdma_info->num_qps) - 114 ECORE_IWARP_PREALLOC_CNT; 115 116 dev->max_cq = dev->max_qp; 117 118 dev->max_qp_resp_rd_atomic_resc = ECORE_IWARP_IRD_DEFAULT; 119 dev->max_qp_req_rd_atomic_resc = ECORE_IWARP_ORD_DEFAULT; 120 } 121 122 enum _ecore_status_t 123 ecore_iwarp_init_hw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) 124 { 125 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; 126 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); 127 p_hwfn->b_rdma_enabled_in_prs = true; 128 129 return 0; 130 } 131 132 void 133 ecore_iwarp_init_fw_ramrod(struct ecore_hwfn *p_hwfn, 134 struct iwarp_init_func_ramrod_data *p_ramrod) 135 { 136 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 137 "ooo handle = %d\n", 138 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle); 139 140 p_ramrod->iwarp.ll2_ooo_q_index = 141 p_hwfn->hw_info.resc_start[ECORE_LL2_QUEUE] + 142 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; 143 144 p_ramrod->tcp.max_fin_rt = ECORE_IWARP_MAX_FIN_RT_DEFAULT; 145 return; 146 } 147 148 static enum _ecore_status_t 149 ecore_iwarp_alloc_cid(struct ecore_hwfn *p_hwfn, u32 *cid) 150 { 151 enum _ecore_status_t rc; 152 153 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 154 155 rc = ecore_rdma_bmap_alloc_id(p_hwfn, 156 &p_hwfn->p_rdma_info->cid_map, 157 cid); 158 159 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 160 *cid += ecore_cxt_get_proto_cid_start(p_hwfn, 161 p_hwfn->p_rdma_info->proto); 162 if (rc != ECORE_SUCCESS) { 163 DP_NOTICE(p_hwfn, false, "Failed in allocating iwarp cid\n"); 164 return rc; 165 } 166 167 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *cid); 168 169 if (rc != ECORE_SUCCESS) { 170 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 171 *cid -= ecore_cxt_get_proto_cid_start(p_hwfn, 172 p_hwfn->p_rdma_info->proto); 173 174 ecore_bmap_release_id(p_hwfn, 175 &p_hwfn->p_rdma_info->cid_map, 176 *cid); 177 178 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 179 } 180 181 return rc; 182 } 183 184 static void 185 ecore_iwarp_set_tcp_cid(struct ecore_hwfn *p_hwfn, u32 cid) 186 { 187 cid -= ecore_cxt_get_proto_cid_start(p_hwfn, 188 p_hwfn->p_rdma_info->proto); 189 190 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 191 ecore_bmap_set_id(p_hwfn, 192 &p_hwfn->p_rdma_info->tcp_cid_map, 193 cid); 194 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 195 } 196 197 /* This function allocates a cid for passive tcp ( called from syn receive) 198 * the reason it's separate from the regular cid allocation is because it 199 * is assured that these cids already have ilt alloacted. They are preallocated 200 * to ensure that we won't need to allocate memory during syn processing 201 */ 202 static enum _ecore_status_t 203 ecore_iwarp_alloc_tcp_cid(struct ecore_hwfn *p_hwfn, u32 *cid) 204 { 205 enum _ecore_status_t rc; 206 207 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 208 209 rc = ecore_rdma_bmap_alloc_id(p_hwfn, 210 &p_hwfn->p_rdma_info->tcp_cid_map, 211 cid); 212 213 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 214 215 *cid += ecore_cxt_get_proto_cid_start(p_hwfn, 216 p_hwfn->p_rdma_info->proto); 217 if (rc != ECORE_SUCCESS) { 218 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 219 "can't allocate iwarp tcp cid max-count=%d\n", 220 p_hwfn->p_rdma_info->tcp_cid_map.max_count); 221 222 *cid = ECORE_IWARP_INVALID_TCP_CID; 223 } 224 225 return rc; 226 } 227 228 /* We have two cid maps, one for tcp which should be used only from passive 229 * syn processing and replacing a pre-allocated ep in the list. the second 230 * for active tcp and for QPs. 231 */ 232 static void ecore_iwarp_cid_cleaned(struct ecore_hwfn *p_hwfn, u32 cid) 233 { 234 cid -= ecore_cxt_get_proto_cid_start(p_hwfn, 235 p_hwfn->p_rdma_info->proto); 236 237 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 238 239 if (cid < ECORE_IWARP_PREALLOC_CNT) { 240 ecore_bmap_release_id(p_hwfn, 241 &p_hwfn->p_rdma_info->tcp_cid_map, 242 cid); 243 } else { 244 ecore_bmap_release_id(p_hwfn, 245 &p_hwfn->p_rdma_info->cid_map, 246 cid); 247 } 248 249 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 250 } 251 252 enum _ecore_status_t 253 ecore_iwarp_create_qp(struct ecore_hwfn *p_hwfn, 254 struct ecore_rdma_qp *qp, 255 struct ecore_rdma_create_qp_out_params *out_params) 256 { 257 struct iwarp_create_qp_ramrod_data *p_ramrod; 258 struct ecore_sp_init_data init_data; 259 struct ecore_spq_entry *p_ent; 260 enum _ecore_status_t rc; 261 u16 physical_queue; 262 u32 cid; 263 264 qp->shared_queue = 265 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 266 &qp->shared_queue_phys_addr, 267 IWARP_SHARED_QUEUE_PAGE_SIZE); 268 if (!qp->shared_queue) { 269 DP_NOTICE(p_hwfn, false, 270 "ecore iwarp create qp failed: cannot allocate memory (shared queue).\n"); 271 return ECORE_NOMEM; 272 } else { 273 out_params->sq_pbl_virt = (u8 *)qp->shared_queue + 274 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; 275 out_params->sq_pbl_phys = qp->shared_queue_phys_addr + 276 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; 277 out_params->rq_pbl_virt = (u8 *)qp->shared_queue + 278 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; 279 out_params->rq_pbl_phys = qp->shared_queue_phys_addr + 280 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; 281 } 282 283 rc = ecore_iwarp_alloc_cid(p_hwfn, &cid); 284 if (rc != ECORE_SUCCESS) 285 goto err1; 286 287 qp->icid = (u16)cid; 288 289 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 290 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 291 init_data.cid = qp->icid; 292 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 293 294 rc = ecore_sp_init_request(p_hwfn, &p_ent, 295 IWARP_RAMROD_CMD_ID_CREATE_QP, 296 PROTOCOLID_IWARP, &init_data); 297 if (rc != ECORE_SUCCESS) 298 return rc; 299 300 p_ramrod = &p_ent->ramrod.iwarp_create_qp; 301 302 SET_FIELD(p_ramrod->flags, 303 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, 304 qp->fmr_and_reserved_lkey); 305 306 SET_FIELD(p_ramrod->flags, 307 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, 308 qp->signal_all); 309 310 SET_FIELD(p_ramrod->flags, 311 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, 312 qp->incoming_rdma_read_en); 313 314 SET_FIELD(p_ramrod->flags, 315 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, 316 qp->incoming_rdma_write_en); 317 318 SET_FIELD(p_ramrod->flags, 319 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, 320 qp->incoming_atomic_en); 321 322 SET_FIELD(p_ramrod->flags, 323 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, 324 qp->use_srq); 325 326 p_ramrod->pd = qp->pd; 327 p_ramrod->sq_num_pages = qp->sq_num_pages; 328 p_ramrod->rq_num_pages = qp->rq_num_pages; 329 330 p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi); 331 p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo); 332 333 p_ramrod->cq_cid_for_sq = 334 OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) | 335 qp->sq_cq_id); 336 p_ramrod->cq_cid_for_rq = 337 OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) | 338 qp->rq_cq_id); 339 340 p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi); 341 342 physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 343 p_ramrod->physical_q0 = OSAL_CPU_TO_LE16(physical_queue); 344 physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); 345 p_ramrod->physical_q1 = OSAL_CPU_TO_LE16(physical_queue); 346 347 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 348 349 if (rc != ECORE_SUCCESS) 350 goto err1; 351 352 return rc; 353 354 err1: 355 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 356 qp->shared_queue, 357 qp->shared_queue_phys_addr, 358 IWARP_SHARED_QUEUE_PAGE_SIZE); 359 360 return rc; 361 } 362 363 static enum _ecore_status_t 364 ecore_iwarp_modify_fw(struct ecore_hwfn *p_hwfn, 365 struct ecore_rdma_qp *qp) 366 { 367 struct iwarp_modify_qp_ramrod_data *p_ramrod; 368 struct ecore_sp_init_data init_data; 369 struct ecore_spq_entry *p_ent; 370 enum _ecore_status_t rc; 371 372 /* Get SPQ entry */ 373 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 374 init_data.cid = qp->icid; 375 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 376 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 377 378 rc = ecore_sp_init_request(p_hwfn, &p_ent, 379 IWARP_RAMROD_CMD_ID_MODIFY_QP, 380 p_hwfn->p_rdma_info->proto, 381 &init_data); 382 if (rc != ECORE_SUCCESS) 383 return rc; 384 385 p_ramrod = &p_ent->ramrod.iwarp_modify_qp; 386 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 387 0x1); 388 if (qp->iwarp_state == ECORE_IWARP_QP_STATE_CLOSING) 389 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING; 390 else 391 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR; 392 393 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 394 395 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)rc=%d\n", 396 qp->icid, rc); 397 398 return rc; 399 } 400 401 enum ecore_iwarp_qp_state 402 ecore_roce2iwarp_state(enum ecore_roce_qp_state state) 403 { 404 switch (state) { 405 case ECORE_ROCE_QP_STATE_RESET: 406 case ECORE_ROCE_QP_STATE_INIT: 407 case ECORE_ROCE_QP_STATE_RTR: 408 return ECORE_IWARP_QP_STATE_IDLE; 409 case ECORE_ROCE_QP_STATE_RTS: 410 return ECORE_IWARP_QP_STATE_RTS; 411 case ECORE_ROCE_QP_STATE_SQD: 412 return ECORE_IWARP_QP_STATE_CLOSING; 413 case ECORE_ROCE_QP_STATE_ERR: 414 return ECORE_IWARP_QP_STATE_ERROR; 415 case ECORE_ROCE_QP_STATE_SQE: 416 return ECORE_IWARP_QP_STATE_TERMINATE; 417 } 418 return ECORE_IWARP_QP_STATE_ERROR; 419 } 420 421 static enum ecore_roce_qp_state 422 ecore_iwarp2roce_state(enum ecore_iwarp_qp_state state) 423 { 424 switch (state) { 425 case ECORE_IWARP_QP_STATE_IDLE: 426 return ECORE_ROCE_QP_STATE_INIT; 427 case ECORE_IWARP_QP_STATE_RTS: 428 return ECORE_ROCE_QP_STATE_RTS; 429 case ECORE_IWARP_QP_STATE_TERMINATE: 430 return ECORE_ROCE_QP_STATE_SQE; 431 case ECORE_IWARP_QP_STATE_CLOSING: 432 return ECORE_ROCE_QP_STATE_SQD; 433 case ECORE_IWARP_QP_STATE_ERROR: 434 return ECORE_ROCE_QP_STATE_ERR; 435 } 436 return ECORE_ROCE_QP_STATE_ERR; 437 } 438 439 const char *iwarp_state_names[] = { 440 "IDLE", 441 "RTS", 442 "TERMINATE", 443 "CLOSING", 444 "ERROR", 445 }; 446 447 enum _ecore_status_t 448 ecore_iwarp_modify_qp(struct ecore_hwfn *p_hwfn, 449 struct ecore_rdma_qp *qp, 450 enum ecore_iwarp_qp_state new_state, 451 bool internal) 452 { 453 enum ecore_iwarp_qp_state prev_iw_state; 454 enum _ecore_status_t rc = 0; 455 bool modify_fw = false; 456 457 /* modify QP can be called from upper-layer or as a result of async 458 * RST/FIN... therefore need to protect 459 */ 460 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock); 461 prev_iw_state = qp->iwarp_state; 462 463 if (prev_iw_state == new_state) { 464 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock); 465 return ECORE_SUCCESS; 466 } 467 468 switch (prev_iw_state) { 469 case ECORE_IWARP_QP_STATE_IDLE: 470 switch (new_state) { 471 case ECORE_IWARP_QP_STATE_RTS: 472 qp->iwarp_state = ECORE_IWARP_QP_STATE_RTS; 473 break; 474 case ECORE_IWARP_QP_STATE_ERROR: 475 qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR; 476 if (!internal) 477 modify_fw = true; 478 break; 479 default: 480 break; 481 } 482 break; 483 case ECORE_IWARP_QP_STATE_RTS: 484 switch (new_state) { 485 case ECORE_IWARP_QP_STATE_CLOSING: 486 if (!internal) 487 modify_fw = true; 488 489 qp->iwarp_state = ECORE_IWARP_QP_STATE_CLOSING; 490 break; 491 case ECORE_IWARP_QP_STATE_ERROR: 492 if (!internal) 493 modify_fw = true; 494 qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR; 495 break; 496 default: 497 break; 498 } 499 break; 500 case ECORE_IWARP_QP_STATE_ERROR: 501 switch (new_state) { 502 case ECORE_IWARP_QP_STATE_IDLE: 503 /* TODO: destroy flow -> need to destroy EP&QP */ 504 qp->iwarp_state = new_state; 505 break; 506 case ECORE_IWARP_QP_STATE_CLOSING: 507 /* could happen due to race... do nothing.... */ 508 break; 509 default: 510 rc = ECORE_INVAL; 511 } 512 break; 513 case ECORE_IWARP_QP_STATE_TERMINATE: 514 case ECORE_IWARP_QP_STATE_CLOSING: 515 qp->iwarp_state = new_state; 516 break; 517 default: 518 break; 519 } 520 521 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) %s --> %s %s\n", 522 qp->icid, 523 iwarp_state_names[prev_iw_state], 524 iwarp_state_names[qp->iwarp_state], 525 internal ? "internal" : " "); 526 527 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock); 528 529 if (modify_fw) 530 ecore_iwarp_modify_fw(p_hwfn, qp); 531 532 return rc; 533 } 534 535 enum _ecore_status_t 536 ecore_iwarp_fw_destroy(struct ecore_hwfn *p_hwfn, 537 struct ecore_rdma_qp *qp) 538 { 539 struct ecore_sp_init_data init_data; 540 struct ecore_spq_entry *p_ent; 541 enum _ecore_status_t rc; 542 543 /* Get SPQ entry */ 544 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 545 init_data.cid = qp->icid; 546 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 547 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 548 549 rc = ecore_sp_init_request(p_hwfn, &p_ent, 550 IWARP_RAMROD_CMD_ID_DESTROY_QP, 551 p_hwfn->p_rdma_info->proto, 552 &init_data); 553 if (rc != ECORE_SUCCESS) 554 return rc; 555 556 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 557 558 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc); 559 560 return rc; 561 } 562 563 static void ecore_iwarp_destroy_ep(struct ecore_hwfn *p_hwfn, 564 struct ecore_iwarp_ep *ep, 565 bool remove_from_active_list) 566 { 567 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 568 ep->ep_buffer_virt, 569 ep->ep_buffer_phys, 570 sizeof(*ep->ep_buffer_virt)); 571 572 if (remove_from_active_list) { 573 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 574 575 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry, 576 &p_hwfn->p_rdma_info->iwarp.ep_list); 577 578 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 579 } 580 581 if (ep->qp) 582 ep->qp->ep = OSAL_NULL; 583 584 OSAL_FREE(p_hwfn->p_dev, ep); 585 } 586 587 enum _ecore_status_t 588 ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn, 589 struct ecore_rdma_qp *qp) 590 { 591 enum _ecore_status_t rc = ECORE_SUCCESS; 592 struct ecore_iwarp_ep *ep = qp->ep; 593 struct ecore_iwarp_fpdu *fpdu; 594 int wait_count = 0; 595 596 fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, qp->icid); 597 if (fpdu && fpdu->incomplete_bytes) 598 DP_NOTICE(p_hwfn, false, 599 "Pending Partial fpdu with incomplete bytes=%d\n", 600 fpdu->incomplete_bytes); 601 602 if (qp->iwarp_state != ECORE_IWARP_QP_STATE_ERROR) { 603 604 rc = ecore_iwarp_modify_qp(p_hwfn, qp, 605 ECORE_IWARP_QP_STATE_ERROR, 606 false); 607 608 if (rc != ECORE_SUCCESS) 609 return rc; 610 } 611 612 /* Make sure ep is closed before returning and freeing memory. */ 613 if (ep) { 614 while (ep->state != ECORE_IWARP_EP_CLOSED) { 615 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 616 "Waiting for ep->state to be closed...state=%x\n", 617 ep->state); 618 619 OSAL_MSLEEP(100); 620 if (wait_count++ > 200) { 621 DP_NOTICE(p_hwfn, false, "ep state close timeout state=%x\n", 622 ep->state); 623 break; 624 } 625 } 626 627 ecore_iwarp_destroy_ep(p_hwfn, ep, false); 628 } 629 630 rc = ecore_iwarp_fw_destroy(p_hwfn, qp); 631 632 if (qp->shared_queue) 633 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 634 qp->shared_queue, 635 qp->shared_queue_phys_addr, 636 IWARP_SHARED_QUEUE_PAGE_SIZE); 637 638 return rc; 639 } 640 641 static enum _ecore_status_t 642 ecore_iwarp_create_ep(struct ecore_hwfn *p_hwfn, 643 struct ecore_iwarp_ep **ep_out) 644 { 645 struct ecore_iwarp_ep *ep; 646 enum _ecore_status_t rc; 647 648 ep = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*ep)); 649 if (!ep) { 650 DP_NOTICE(p_hwfn, false, 651 "ecore create ep failed: cannot allocate memory (ep). rc = %d\n", 652 ECORE_NOMEM); 653 return ECORE_NOMEM; 654 } 655 656 ep->state = ECORE_IWARP_EP_INIT; 657 658 /* ep_buffer is allocated once and is structured as follows: 659 * [MAX_PRIV_DATA_LEN][MAX_PRIV_DATA_LEN][union async_output] 660 * We could have allocated this in three calls but since all together 661 * it is less than a page, we do one allocation and initialize pointers 662 * accordingly 663 */ 664 ep->ep_buffer_virt = OSAL_DMA_ALLOC_COHERENT( 665 p_hwfn->p_dev, 666 &ep->ep_buffer_phys, 667 sizeof(*ep->ep_buffer_virt)); 668 669 if (!ep->ep_buffer_virt) { 670 DP_NOTICE(p_hwfn, false, 671 "ecore create ep failed: cannot allocate memory (ulp buffer). rc = %d\n", 672 ECORE_NOMEM); 673 rc = ECORE_NOMEM; 674 goto err; 675 } 676 677 ep->sig = 0xdeadbeef; 678 679 *ep_out = ep; 680 681 return ECORE_SUCCESS; 682 683 err: 684 OSAL_FREE(p_hwfn->p_dev, ep); 685 return rc; 686 } 687 688 static void 689 ecore_iwarp_print_tcp_ramrod(struct ecore_hwfn *p_hwfn, 690 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) 691 { 692 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, ">>> PRINT TCP RAMROD\n"); 693 694 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_mac=%x %x %x\n", 695 p_tcp_ramrod->tcp.local_mac_addr_lo, 696 p_tcp_ramrod->tcp.local_mac_addr_mid, 697 p_tcp_ramrod->tcp.local_mac_addr_hi); 698 699 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_mac=%x %x %x\n", 700 p_tcp_ramrod->tcp.remote_mac_addr_lo, 701 p_tcp_ramrod->tcp.remote_mac_addr_mid, 702 p_tcp_ramrod->tcp.remote_mac_addr_hi); 703 704 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "vlan_id=%x\n", 705 p_tcp_ramrod->tcp.vlan_id); 706 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flags=%x\n", 707 p_tcp_ramrod->tcp.flags); 708 709 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ip_version=%x\n", 710 p_tcp_ramrod->tcp.ip_version); 711 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_ip=%x.%x.%x.%x\n", 712 p_tcp_ramrod->tcp.local_ip[0], 713 p_tcp_ramrod->tcp.local_ip[1], 714 p_tcp_ramrod->tcp.local_ip[2], 715 p_tcp_ramrod->tcp.local_ip[3]); 716 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_ip=%x.%x.%x.%x\n", 717 p_tcp_ramrod->tcp.remote_ip[0], 718 p_tcp_ramrod->tcp.remote_ip[1], 719 p_tcp_ramrod->tcp.remote_ip[2], 720 p_tcp_ramrod->tcp.remote_ip[3]); 721 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flow_label=%x\n", 722 p_tcp_ramrod->tcp.flow_label); 723 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ttl=%x\n", 724 p_tcp_ramrod->tcp.ttl); 725 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "tos_or_tc=%x\n", 726 p_tcp_ramrod->tcp.tos_or_tc); 727 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_port=%x\n", 728 p_tcp_ramrod->tcp.local_port); 729 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_port=%x\n", 730 p_tcp_ramrod->tcp.remote_port); 731 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "mss=%x\n", 732 p_tcp_ramrod->tcp.mss); 733 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rcv_wnd_scale=%x\n", 734 p_tcp_ramrod->tcp.rcv_wnd_scale); 735 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "connect_mode=%x\n", 736 p_tcp_ramrod->tcp.connect_mode); 737 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_ip_payload_length=%x\n", 738 p_tcp_ramrod->tcp.syn_ip_payload_length); 739 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_lo=%x\n", 740 p_tcp_ramrod->tcp.syn_phy_addr_lo); 741 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_hi=%x\n", 742 p_tcp_ramrod->tcp.syn_phy_addr_hi); 743 744 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "<<<f PRINT TCP RAMROD\n"); 745 } 746 747 /* Default values for tcp option2 */ 748 #define ECORE_IWARP_DEF_MAX_RT_TIME (0) 749 #define ECORE_IWARP_DEF_CWND_FACTOR (4) 750 #define ECORE_IWARP_DEF_KA_MAX_PROBE_CNT (5) 751 #define ECORE_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ 752 #define ECORE_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ 753 754 static enum _ecore_status_t 755 ecore_iwarp_tcp_offload(struct ecore_hwfn *p_hwfn, 756 struct ecore_iwarp_ep *ep) 757 { 758 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 759 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; 760 struct ecore_sp_init_data init_data; 761 struct ecore_spq_entry *p_ent; 762 dma_addr_t async_output_phys; 763 dma_addr_t in_pdata_phys; 764 enum _ecore_status_t rc; 765 u16 physical_q; 766 u8 tcp_flags; 767 int i; 768 769 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 770 init_data.cid = ep->tcp_cid; 771 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 772 773 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 774 init_data.comp_mode = ECORE_SPQ_MODE_CB; 775 } else { 776 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 777 } 778 779 rc = ecore_sp_init_request(p_hwfn, &p_ent, 780 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, 781 PROTOCOLID_IWARP, &init_data); 782 if (rc != ECORE_SUCCESS) 783 return rc; 784 785 p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; 786 787 /* Point to the "second half" of the ulp buffer */ 788 in_pdata_phys = ep->ep_buffer_phys + 789 OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata); 790 p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.hi = 791 DMA_HI_LE(in_pdata_phys); 792 p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.lo = 793 DMA_LO_LE(in_pdata_phys); 794 p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = 795 OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata)); 796 797 async_output_phys = ep->ep_buffer_phys + 798 OFFSETOF(struct ecore_iwarp_ep_memory, async_output); 799 800 p_tcp_ramrod->iwarp.async_eqe_output_buf.hi = 801 DMA_HI_LE(async_output_phys); 802 p_tcp_ramrod->iwarp.async_eqe_output_buf.lo = 803 DMA_LO_LE(async_output_phys); 804 p_tcp_ramrod->iwarp.handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep)); 805 p_tcp_ramrod->iwarp.handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep)); 806 807 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 808 p_tcp_ramrod->iwarp.physical_q0 = OSAL_CPU_TO_LE16(physical_q); 809 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); 810 p_tcp_ramrod->iwarp.physical_q1 = OSAL_CPU_TO_LE16(physical_q); 811 p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; 812 813 ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.remote_mac_addr_hi, 814 &p_tcp_ramrod->tcp.remote_mac_addr_mid, 815 &p_tcp_ramrod->tcp.remote_mac_addr_lo, 816 ep->remote_mac_addr); 817 ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.local_mac_addr_hi, 818 &p_tcp_ramrod->tcp.local_mac_addr_mid, 819 &p_tcp_ramrod->tcp.local_mac_addr_lo, 820 ep->local_mac_addr); 821 822 p_tcp_ramrod->tcp.vlan_id = OSAL_CPU_TO_LE16(ep->cm_info.vlan); 823 824 tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; 825 p_tcp_ramrod->tcp.flags = 0; 826 SET_FIELD(p_tcp_ramrod->tcp.flags, 827 TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 828 !!(tcp_flags & ECORE_IWARP_TS_EN)); 829 830 SET_FIELD(p_tcp_ramrod->tcp.flags, 831 TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 832 !!(tcp_flags & ECORE_IWARP_DA_EN)); 833 834 p_tcp_ramrod->tcp.ip_version = ep->cm_info.ip_version; 835 836 for (i = 0; i < 4; i++) { 837 p_tcp_ramrod->tcp.remote_ip[i] = 838 OSAL_CPU_TO_LE32(ep->cm_info.remote_ip[i]); 839 p_tcp_ramrod->tcp.local_ip[i] = 840 OSAL_CPU_TO_LE32(ep->cm_info.local_ip[i]); 841 } 842 843 p_tcp_ramrod->tcp.remote_port = 844 OSAL_CPU_TO_LE16(ep->cm_info.remote_port); 845 p_tcp_ramrod->tcp.local_port = OSAL_CPU_TO_LE16(ep->cm_info.local_port); 846 p_tcp_ramrod->tcp.mss = OSAL_CPU_TO_LE16(ep->mss); 847 p_tcp_ramrod->tcp.flow_label = 0; 848 p_tcp_ramrod->tcp.ttl = 0x40; 849 p_tcp_ramrod->tcp.tos_or_tc = 0; 850 851 p_tcp_ramrod->tcp.max_rt_time = ECORE_IWARP_DEF_MAX_RT_TIME; 852 p_tcp_ramrod->tcp.cwnd = ECORE_IWARP_DEF_CWND_FACTOR * p_tcp_ramrod->tcp.mss; 853 p_tcp_ramrod->tcp.ka_max_probe_cnt = ECORE_IWARP_DEF_KA_MAX_PROBE_CNT; 854 p_tcp_ramrod->tcp.ka_timeout = ECORE_IWARP_DEF_KA_TIMEOUT; 855 p_tcp_ramrod->tcp.ka_interval = ECORE_IWARP_DEF_KA_INTERVAL; 856 857 p_tcp_ramrod->tcp.rcv_wnd_scale = 858 (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; 859 p_tcp_ramrod->tcp.connect_mode = ep->connect_mode; 860 861 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 862 p_tcp_ramrod->tcp.syn_ip_payload_length = 863 OSAL_CPU_TO_LE16(ep->syn_ip_payload_length); 864 p_tcp_ramrod->tcp.syn_phy_addr_hi = 865 DMA_HI_LE(ep->syn_phy_addr); 866 p_tcp_ramrod->tcp.syn_phy_addr_lo = 867 DMA_LO_LE(ep->syn_phy_addr); 868 } 869 870 ecore_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); 871 872 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 873 874 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 875 "EP(0x%x) Offload completed rc=%d\n" , ep->tcp_cid, rc); 876 877 return rc; 878 } 879 880 /* This function should be called after IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE 881 * is received. it will be called from the dpc context. 882 */ 883 static enum _ecore_status_t 884 ecore_iwarp_mpa_offload(struct ecore_hwfn *p_hwfn, 885 struct ecore_iwarp_ep *ep) 886 { 887 struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; 888 struct ecore_iwarp_info *iwarp_info; 889 struct ecore_sp_init_data init_data; 890 struct ecore_spq_entry *p_ent; 891 dma_addr_t async_output_phys; 892 dma_addr_t out_pdata_phys; 893 dma_addr_t in_pdata_phys; 894 struct ecore_rdma_qp *qp; 895 bool reject; 896 enum _ecore_status_t rc; 897 898 if (!ep) 899 return ECORE_INVAL; 900 901 qp = ep->qp; 902 reject = (qp == OSAL_NULL); 903 904 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 905 init_data.cid = reject ? ep->tcp_cid : qp->icid; 906 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 907 908 if (ep->connect_mode == TCP_CONNECT_ACTIVE || !ep->event_cb) 909 init_data.comp_mode = ECORE_SPQ_MODE_CB; 910 else 911 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 912 913 rc = ecore_sp_init_request(p_hwfn, &p_ent, 914 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, 915 PROTOCOLID_IWARP, &init_data); 916 917 if (rc != ECORE_SUCCESS) 918 return rc; 919 920 p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; 921 out_pdata_phys = ep->ep_buffer_phys + 922 OFFSETOF(struct ecore_iwarp_ep_memory, out_pdata); 923 p_mpa_ramrod->common.outgoing_ulp_buffer.addr.hi = 924 DMA_HI_LE(out_pdata_phys); 925 p_mpa_ramrod->common.outgoing_ulp_buffer.addr.lo = 926 DMA_LO_LE(out_pdata_phys); 927 p_mpa_ramrod->common.outgoing_ulp_buffer.len = 928 ep->cm_info.private_data_len; 929 p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; 930 931 p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord; 932 p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird; 933 934 p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; 935 936 in_pdata_phys = ep->ep_buffer_phys + 937 OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata); 938 p_mpa_ramrod->tcp_connect_side = ep->connect_mode; 939 p_mpa_ramrod->incoming_ulp_buffer.addr.hi = 940 DMA_HI_LE(in_pdata_phys); 941 p_mpa_ramrod->incoming_ulp_buffer.addr.lo = 942 DMA_LO_LE(in_pdata_phys); 943 p_mpa_ramrod->incoming_ulp_buffer.len = 944 OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata)); 945 async_output_phys = ep->ep_buffer_phys + 946 OFFSETOF(struct ecore_iwarp_ep_memory, async_output); 947 p_mpa_ramrod->async_eqe_output_buf.hi = 948 DMA_HI_LE(async_output_phys); 949 p_mpa_ramrod->async_eqe_output_buf.lo = 950 DMA_LO_LE(async_output_phys); 951 p_mpa_ramrod->handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep)); 952 p_mpa_ramrod->handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep)); 953 954 if (!reject) { 955 p_mpa_ramrod->shared_queue_addr.hi = 956 DMA_HI_LE(qp->shared_queue_phys_addr); 957 p_mpa_ramrod->shared_queue_addr.lo = 958 DMA_LO_LE(qp->shared_queue_phys_addr); 959 960 p_mpa_ramrod->stats_counter_id = 961 RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) + 962 qp->stats_queue; 963 } else { 964 p_mpa_ramrod->common.reject = 1; 965 } 966 967 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 968 p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size; 969 p_mpa_ramrod->mode = ep->mpa_rev; 970 SET_FIELD(p_mpa_ramrod->rtr_pref, 971 IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, 972 ep->rtr_type); 973 974 ep->state = ECORE_IWARP_EP_MPA_OFFLOADED; 975 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 976 if (!reject) 977 ep->cid = qp->icid; /* Now they're migrated. */ 978 979 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 980 "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n", 981 reject ? 0xffff : qp->icid, ep->tcp_cid, rc, ep->cm_info.ird, 982 ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); 983 return rc; 984 } 985 986 static void 987 ecore_iwarp_mpa_received(struct ecore_hwfn *p_hwfn, 988 struct ecore_iwarp_ep *ep) 989 { 990 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 991 struct ecore_iwarp_cm_event_params params; 992 struct mpa_v2_hdr *mpa_v2_params; 993 union async_output *async_data; 994 u16 mpa_ord, mpa_ird; 995 u8 mpa_hdr_size = 0; 996 u8 mpa_rev; 997 998 async_data = &ep->ep_buffer_virt->async_output; 999 1000 mpa_rev = async_data->mpa_request.mpa_handshake_mode; 1001 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1002 "private_data_len=%x handshake_mode=%x private_data=(%x)\n", 1003 async_data->mpa_request.ulp_data_len, 1004 mpa_rev, 1005 *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata))); 1006 1007 if (ep->listener->state > ECORE_IWARP_LISTENER_STATE_UNPAUSE) { 1008 /* MPA reject initiated by ecore */ 1009 OSAL_MEMSET(&ep->cm_info, 0, sizeof(ep->cm_info)); 1010 ep->event_cb = OSAL_NULL; 1011 ecore_iwarp_mpa_offload(p_hwfn, ep); 1012 return; 1013 } 1014 1015 if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { 1016 if (iwarp_info->mpa_rev == MPA_NEGOTIATION_TYPE_BASIC) { 1017 DP_ERR(p_hwfn, "MPA_NEGOTIATE Received MPA rev 2 on driver supporting only MPA rev 1\n"); 1018 /* MPA_REV2 ToDo: close the tcp connection. */ 1019 return; 1020 } 1021 1022 /* Read ord/ird values from private data buffer */ 1023 mpa_v2_params = 1024 (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); 1025 mpa_hdr_size = sizeof(*mpa_v2_params); 1026 1027 mpa_ord = ntohs(mpa_v2_params->ord); 1028 mpa_ird = ntohs(mpa_v2_params->ird); 1029 1030 /* Temprary store in cm_info incoming ord/ird requested, later 1031 * replace with negotiated value during accept 1032 */ 1033 ep->cm_info.ord = (u8)OSAL_MIN_T(u16, 1034 (mpa_ord & MPA_V2_IRD_ORD_MASK), 1035 ECORE_IWARP_ORD_DEFAULT); 1036 1037 ep->cm_info.ird = (u8)OSAL_MIN_T(u16, 1038 (mpa_ird & MPA_V2_IRD_ORD_MASK), 1039 ECORE_IWARP_IRD_DEFAULT); 1040 1041 /* Peer2Peer negotiation */ 1042 ep->rtr_type = MPA_RTR_TYPE_NONE; 1043 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { 1044 if (mpa_ord & MPA_V2_WRITE_RTR) 1045 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; 1046 1047 if (mpa_ord & MPA_V2_READ_RTR) 1048 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; 1049 1050 if (mpa_ird & MPA_V2_SEND_RTR) 1051 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; 1052 1053 ep->rtr_type &= iwarp_info->rtr_type; 1054 /* if we're left with no match send our capabilities */ 1055 if (ep->rtr_type == MPA_RTR_TYPE_NONE) 1056 ep->rtr_type = iwarp_info->rtr_type; 1057 1058 /* prioritize write over send and read */ 1059 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) 1060 ep->rtr_type = MPA_RTR_TYPE_ZERO_WRITE; 1061 } 1062 1063 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; 1064 } else { 1065 ep->cm_info.ord = ECORE_IWARP_ORD_DEFAULT; 1066 ep->cm_info.ird = ECORE_IWARP_IRD_DEFAULT; 1067 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; 1068 } 1069 1070 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1071 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n", 1072 mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, 1073 async_data->mpa_request.ulp_data_len, 1074 mpa_hdr_size); 1075 1076 /* Strip mpa v2 hdr from private data before sending to upper layer */ 1077 ep->cm_info.private_data = 1078 ep->ep_buffer_virt->in_pdata + mpa_hdr_size; 1079 1080 ep->cm_info.private_data_len = 1081 async_data->mpa_request.ulp_data_len - mpa_hdr_size; 1082 1083 params.event = ECORE_IWARP_EVENT_MPA_REQUEST; 1084 params.cm_info = &ep->cm_info; 1085 params.ep_context = ep; 1086 params.status = ECORE_SUCCESS; 1087 1088 ep->state = ECORE_IWARP_EP_MPA_REQ_RCVD; 1089 ep->event_cb(ep->cb_context, ¶ms); 1090 } 1091 1092 static void 1093 ecore_iwarp_move_to_ep_list(struct ecore_hwfn *p_hwfn, 1094 osal_list_t *list, struct ecore_iwarp_ep *ep) 1095 { 1096 OSAL_SPIN_LOCK(&ep->listener->lock); 1097 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry, &ep->listener->ep_list); 1098 OSAL_SPIN_UNLOCK(&ep->listener->lock); 1099 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1100 OSAL_LIST_PUSH_TAIL(&ep->list_entry, list); 1101 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1102 } 1103 1104 static void 1105 ecore_iwarp_return_ep(struct ecore_hwfn *p_hwfn, 1106 struct ecore_iwarp_ep *ep) 1107 { 1108 ep->state = ECORE_IWARP_EP_INIT; 1109 if (ep->qp) 1110 ep->qp->ep = OSAL_NULL; 1111 ep->qp = OSAL_NULL; 1112 OSAL_MEMSET(&ep->cm_info, 0, sizeof(ep->cm_info)); 1113 1114 if (ep->tcp_cid == ECORE_IWARP_INVALID_TCP_CID) { 1115 /* We don't care about the return code, it's ok if tcp_cid 1116 * remains invalid...in this case we'll defer allocation 1117 */ 1118 ecore_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); 1119 } 1120 1121 ecore_iwarp_move_to_ep_list(p_hwfn, 1122 &p_hwfn->p_rdma_info->iwarp.ep_free_list, 1123 ep); 1124 } 1125 1126 static void 1127 ecore_iwarp_parse_private_data(struct ecore_hwfn *p_hwfn, 1128 struct ecore_iwarp_ep *ep) 1129 { 1130 struct mpa_v2_hdr *mpa_v2_params; 1131 union async_output *async_data; 1132 u16 mpa_ird, mpa_ord; 1133 u8 mpa_data_size = 0; 1134 1135 if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { 1136 mpa_v2_params = (struct mpa_v2_hdr *) 1137 ((u8 *)ep->ep_buffer_virt->in_pdata); 1138 mpa_data_size = sizeof(*mpa_v2_params); 1139 mpa_ird = ntohs(mpa_v2_params->ird); 1140 mpa_ord = ntohs(mpa_v2_params->ord); 1141 1142 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); 1143 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); 1144 } /* else: Ord / Ird already configured */ 1145 1146 async_data = &ep->ep_buffer_virt->async_output; 1147 1148 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; 1149 ep->cm_info.private_data_len = 1150 async_data->mpa_response.ulp_data_len - mpa_data_size; 1151 } 1152 1153 static void 1154 ecore_iwarp_mpa_reply_arrived(struct ecore_hwfn *p_hwfn, 1155 struct ecore_iwarp_ep *ep) 1156 { 1157 struct ecore_iwarp_cm_event_params params; 1158 1159 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 1160 DP_NOTICE(p_hwfn, true, "MPA reply event not expected on passive side!\n"); 1161 return; 1162 } 1163 1164 params.event = ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY; 1165 1166 ecore_iwarp_parse_private_data(p_hwfn, ep); 1167 1168 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1169 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", 1170 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); 1171 1172 params.cm_info = &ep->cm_info; 1173 params.ep_context = ep; 1174 params.status = ECORE_SUCCESS; 1175 1176 ep->mpa_reply_processed = true; 1177 1178 ep->event_cb(ep->cb_context, ¶ms); 1179 } 1180 1181 #define ECORE_IWARP_CONNECT_MODE_STRING(ep) \ 1182 (ep->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" 1183 1184 /* Called as a result of the event: 1185 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE 1186 */ 1187 static void 1188 ecore_iwarp_mpa_complete(struct ecore_hwfn *p_hwfn, 1189 struct ecore_iwarp_ep *ep, 1190 u8 fw_return_code) 1191 { 1192 struct ecore_iwarp_cm_event_params params; 1193 1194 if (ep->connect_mode == TCP_CONNECT_ACTIVE) 1195 params.event = ECORE_IWARP_EVENT_ACTIVE_COMPLETE; 1196 else 1197 params.event = ECORE_IWARP_EVENT_PASSIVE_COMPLETE; 1198 1199 if (ep->connect_mode == TCP_CONNECT_ACTIVE && 1200 !ep->mpa_reply_processed) { 1201 ecore_iwarp_parse_private_data(p_hwfn, ep); 1202 } 1203 1204 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1205 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", 1206 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); 1207 1208 params.cm_info = &ep->cm_info; 1209 1210 params.ep_context = ep; 1211 1212 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && 1213 (ep->state != ECORE_IWARP_EP_MPA_OFFLOADED)) { 1214 /* This is a FW bug. Shouldn't get complete without offload */ 1215 DP_NOTICE(p_hwfn, false, "%s(0x%x) ERROR: Got MPA complete without MPA offload fw_return_code=%d ep->state=%d\n", 1216 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid, 1217 fw_return_code, ep->state); 1218 ep->state = ECORE_IWARP_EP_CLOSED; 1219 return; 1220 } 1221 1222 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && 1223 (ep->state == ECORE_IWARP_EP_ABORTING)) 1224 return; 1225 1226 ep->state = ECORE_IWARP_EP_CLOSED; 1227 1228 switch (fw_return_code) { 1229 case RDMA_RETURN_OK: 1230 ep->qp->max_rd_atomic_req = ep->cm_info.ord; 1231 ep->qp->max_rd_atomic_resp = ep->cm_info.ird; 1232 ecore_iwarp_modify_qp(p_hwfn, ep->qp, 1233 ECORE_IWARP_QP_STATE_RTS, 1234 1); 1235 ep->state = ECORE_IWARP_EP_ESTABLISHED; 1236 params.status = ECORE_SUCCESS; 1237 break; 1238 case IWARP_CONN_ERROR_MPA_TIMEOUT: 1239 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA timeout\n", 1240 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1241 params.status = ECORE_TIMEOUT; 1242 break; 1243 case IWARP_CONN_ERROR_MPA_ERROR_REJECT: 1244 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Reject\n", 1245 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1246 params.status = ECORE_CONN_REFUSED; 1247 break; 1248 case IWARP_CONN_ERROR_MPA_RST: 1249 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n", 1250 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid, 1251 ep->tcp_cid); 1252 params.status = ECORE_CONN_RESET; 1253 break; 1254 case IWARP_CONN_ERROR_MPA_FIN: 1255 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA received FIN\n", 1256 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1257 params.status = ECORE_CONN_REFUSED; 1258 break; 1259 case IWARP_CONN_ERROR_MPA_INSUF_IRD: 1260 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA insufficient ird\n", 1261 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1262 params.status = ECORE_CONN_REFUSED; 1263 break; 1264 case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: 1265 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA RTR MISMATCH\n", 1266 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1267 params.status = ECORE_CONN_REFUSED; 1268 break; 1269 case IWARP_CONN_ERROR_MPA_INVALID_PACKET: 1270 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Invalid Packet\n", 1271 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1272 params.status = ECORE_CONN_REFUSED; 1273 break; 1274 case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: 1275 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Local Error\n", 1276 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1277 params.status = ECORE_CONN_REFUSED; 1278 break; 1279 case IWARP_CONN_ERROR_MPA_TERMINATE: 1280 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA TERMINATE\n", 1281 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid); 1282 params.status = ECORE_CONN_REFUSED; 1283 break; 1284 default: 1285 params.status = ECORE_CONN_RESET; 1286 break; 1287 } 1288 1289 if (ep->event_cb) 1290 ep->event_cb(ep->cb_context, ¶ms); 1291 1292 /* on passive side, if there is no associated QP (REJECT) we need to 1293 * return the ep to the pool, otherwise we wait for QP to release it. 1294 * Since we add an element in accept instead of this one. in anycase 1295 * we need to remove it from the ep_list (active connections)... 1296 */ 1297 if (fw_return_code != RDMA_RETURN_OK) { 1298 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID; 1299 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && 1300 (ep->qp == OSAL_NULL)) { /* Rejected */ 1301 ecore_iwarp_return_ep(p_hwfn, ep); 1302 } else { 1303 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1304 OSAL_LIST_REMOVE_ENTRY( 1305 &ep->list_entry, 1306 &p_hwfn->p_rdma_info->iwarp.ep_list); 1307 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1308 } 1309 } 1310 } 1311 1312 static void 1313 ecore_iwarp_mpa_v2_set_private(struct ecore_hwfn *p_hwfn, 1314 struct ecore_iwarp_ep *ep, 1315 u8 *mpa_data_size) 1316 { 1317 struct mpa_v2_hdr *mpa_v2_params; 1318 u16 mpa_ird, mpa_ord; 1319 1320 *mpa_data_size = 0; 1321 if (MPA_REV2(ep->mpa_rev)) { 1322 mpa_v2_params = 1323 (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; 1324 *mpa_data_size = sizeof(*mpa_v2_params); 1325 1326 mpa_ird = (u16)ep->cm_info.ird; 1327 mpa_ord = (u16)ep->cm_info.ord; 1328 1329 if (ep->rtr_type != MPA_RTR_TYPE_NONE) { 1330 mpa_ird |= MPA_V2_PEER2PEER_MODEL; 1331 1332 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) 1333 mpa_ird |= MPA_V2_SEND_RTR; 1334 1335 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) 1336 mpa_ord |= MPA_V2_WRITE_RTR; 1337 1338 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) 1339 mpa_ord |= MPA_V2_READ_RTR; 1340 } 1341 1342 mpa_v2_params->ird = htons(mpa_ird); 1343 mpa_v2_params->ord = htons(mpa_ord); 1344 1345 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1346 "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n", 1347 mpa_v2_params->ird, 1348 mpa_v2_params->ord, 1349 *((u32 *)mpa_v2_params), 1350 mpa_ord & MPA_V2_IRD_ORD_MASK, 1351 mpa_ird & MPA_V2_IRD_ORD_MASK, 1352 !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), 1353 !!(mpa_ird & MPA_V2_SEND_RTR), 1354 !!(mpa_ord & MPA_V2_WRITE_RTR), 1355 !!(mpa_ord & MPA_V2_READ_RTR)); 1356 } 1357 } 1358 1359 enum _ecore_status_t 1360 ecore_iwarp_connect(void *rdma_cxt, 1361 struct ecore_iwarp_connect_in *iparams, 1362 struct ecore_iwarp_connect_out *oparams) 1363 { 1364 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 1365 struct ecore_iwarp_info *iwarp_info; 1366 struct ecore_iwarp_ep *ep; 1367 enum _ecore_status_t rc; 1368 u8 mpa_data_size = 0; 1369 u8 ts_hdr_size = 0; 1370 u32 cid; 1371 1372 if ((iparams->cm_info.ord > ECORE_IWARP_ORD_DEFAULT) || 1373 (iparams->cm_info.ird > ECORE_IWARP_IRD_DEFAULT)) { 1374 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1375 "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", 1376 iparams->qp->icid, iparams->cm_info.ord, 1377 iparams->cm_info.ird); 1378 1379 return ECORE_INVAL; 1380 } 1381 1382 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1383 1384 /* Allocate ep object */ 1385 rc = ecore_iwarp_alloc_cid(p_hwfn, &cid); 1386 if (rc != ECORE_SUCCESS) 1387 return rc; 1388 1389 if (iparams->qp->ep == OSAL_NULL) { 1390 rc = ecore_iwarp_create_ep(p_hwfn, &ep); 1391 if (rc != ECORE_SUCCESS) 1392 return rc; 1393 } else { 1394 ep = iparams->qp->ep; 1395 DP_ERR(p_hwfn, "Note re-use of QP for different connect\n"); 1396 ep->state = ECORE_IWARP_EP_INIT; 1397 } 1398 1399 ep->tcp_cid = cid; 1400 1401 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1402 OSAL_LIST_PUSH_TAIL(&ep->list_entry, 1403 &p_hwfn->p_rdma_info->iwarp.ep_list); 1404 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1405 1406 ep->qp = iparams->qp; 1407 ep->qp->ep = ep; 1408 OSAL_MEMCPY(ep->remote_mac_addr, 1409 iparams->remote_mac_addr, 1410 ETH_ALEN); 1411 OSAL_MEMCPY(ep->local_mac_addr, 1412 iparams->local_mac_addr, 1413 ETH_ALEN); 1414 OSAL_MEMCPY(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); 1415 1416 ep->cm_info.ord = iparams->cm_info.ord; 1417 ep->cm_info.ird = iparams->cm_info.ird; 1418 1419 ep->rtr_type = iwarp_info->rtr_type; 1420 if (iwarp_info->peer2peer == 0) 1421 ep->rtr_type = MPA_RTR_TYPE_NONE; 1422 1423 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && 1424 (ep->cm_info.ord == 0)) 1425 ep->cm_info.ord = 1; 1426 1427 ep->mpa_rev = iwarp_info->mpa_rev; 1428 1429 ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1430 1431 ep->cm_info.private_data = (u8 *)ep->ep_buffer_virt->out_pdata; 1432 ep->cm_info.private_data_len = 1433 iparams->cm_info.private_data_len + mpa_data_size; 1434 1435 OSAL_MEMCPY((u8 *)(u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1436 iparams->cm_info.private_data, 1437 iparams->cm_info.private_data_len); 1438 1439 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & ECORE_IWARP_TS_EN) 1440 ts_hdr_size = TIMESTAMP_HEADER_SIZE; 1441 1442 ep->mss = iparams->mss - ts_hdr_size; 1443 ep->mss = OSAL_MIN_T(u16, ECORE_IWARP_MAX_FW_MSS, ep->mss); 1444 1445 ep->event_cb = iparams->event_cb; 1446 ep->cb_context = iparams->cb_context; 1447 ep->connect_mode = TCP_CONNECT_ACTIVE; 1448 1449 oparams->ep_context = ep; 1450 1451 rc = ecore_iwarp_tcp_offload(p_hwfn, ep); 1452 1453 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n", 1454 iparams->qp->icid, ep->tcp_cid, rc); 1455 1456 if (rc != ECORE_SUCCESS) 1457 ecore_iwarp_destroy_ep(p_hwfn, ep, true); 1458 1459 return rc; 1460 } 1461 1462 static struct ecore_iwarp_ep * 1463 ecore_iwarp_get_free_ep(struct ecore_hwfn *p_hwfn) 1464 { 1465 struct ecore_iwarp_ep *ep = OSAL_NULL; 1466 enum _ecore_status_t rc; 1467 1468 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1469 1470 if (OSAL_LIST_IS_EMPTY(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { 1471 DP_ERR(p_hwfn, "Ep list is empty\n"); 1472 goto out; 1473 } 1474 1475 ep = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_rdma_info->iwarp.ep_free_list, 1476 struct ecore_iwarp_ep, 1477 list_entry); 1478 1479 /* in some cases we could have failed allocating a tcp cid when added 1480 * from accept / failure... retry now..this is not the common case. 1481 */ 1482 if (ep->tcp_cid == ECORE_IWARP_INVALID_TCP_CID) { 1483 rc = ecore_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); 1484 /* if we fail we could look for another entry with a valid 1485 * tcp_cid, but since we don't expect to reach this anyway 1486 * it's not worth the handling 1487 */ 1488 if (rc) { 1489 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID; 1490 ep = OSAL_NULL; 1491 goto out; 1492 } 1493 } 1494 1495 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry, 1496 &p_hwfn->p_rdma_info->iwarp.ep_free_list); 1497 1498 out: 1499 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1500 return ep; 1501 } 1502 1503 /* takes into account timer scan ~20 ms and interrupt/dpc overhead */ 1504 #define ECORE_IWARP_MAX_CID_CLEAN_TIME 100 1505 /* Technically we shouldn't reach this count with 100 ms iteration sleep */ 1506 #define ECORE_IWARP_MAX_NO_PROGRESS_CNT 5 1507 1508 /* This function waits for all the bits of a bmap to be cleared, as long as 1509 * there is progress ( i.e. the number of bits left to be cleared decreases ) 1510 * the function continues. 1511 */ 1512 static enum _ecore_status_t 1513 ecore_iwarp_wait_cid_map_cleared(struct ecore_hwfn *p_hwfn, 1514 struct ecore_bmap *bmap) 1515 { 1516 int prev_weight = 0; 1517 int wait_count = 0; 1518 int weight = 0; 1519 1520 weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count); 1521 prev_weight = weight; 1522 1523 while (weight) { 1524 OSAL_MSLEEP(ECORE_IWARP_MAX_CID_CLEAN_TIME); 1525 1526 weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count); 1527 1528 if (prev_weight == weight) { 1529 wait_count++; 1530 } else { 1531 prev_weight = weight; 1532 wait_count = 0; 1533 } 1534 1535 if (wait_count > ECORE_IWARP_MAX_NO_PROGRESS_CNT) { 1536 DP_NOTICE(p_hwfn, false, 1537 "%s bitmap wait timed out (%d cids pending)\n", 1538 bmap->name, weight); 1539 return ECORE_TIMEOUT; 1540 } 1541 } 1542 return ECORE_SUCCESS; 1543 } 1544 1545 static enum _ecore_status_t 1546 ecore_iwarp_wait_for_all_cids(struct ecore_hwfn *p_hwfn) 1547 { 1548 enum _ecore_status_t rc; 1549 int i; 1550 1551 rc = ecore_iwarp_wait_cid_map_cleared( 1552 p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map); 1553 if (rc) 1554 return rc; 1555 1556 /* Now free the tcp cids from the main cid map */ 1557 for (i = 0; i < ECORE_IWARP_PREALLOC_CNT; i++) { 1558 ecore_bmap_release_id(p_hwfn, 1559 &p_hwfn->p_rdma_info->cid_map, 1560 i); 1561 } 1562 1563 /* Now wait for all cids to be completed */ 1564 rc = ecore_iwarp_wait_cid_map_cleared( 1565 p_hwfn, &p_hwfn->p_rdma_info->cid_map); 1566 1567 return rc; 1568 } 1569 1570 static void 1571 ecore_iwarp_free_prealloc_ep(struct ecore_hwfn *p_hwfn) 1572 { 1573 struct ecore_iwarp_ep *ep; 1574 u32 cid; 1575 1576 while (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { 1577 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1578 1579 ep = OSAL_LIST_FIRST_ENTRY( 1580 &p_hwfn->p_rdma_info->iwarp.ep_free_list, 1581 struct ecore_iwarp_ep, list_entry); 1582 1583 if (ep == OSAL_NULL) { 1584 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1585 break; 1586 } 1587 1588 #ifdef _NTDDK_ 1589 #pragma warning(suppress : 6011) 1590 #endif 1591 OSAL_LIST_REMOVE_ENTRY( 1592 &ep->list_entry, 1593 &p_hwfn->p_rdma_info->iwarp.ep_free_list); 1594 1595 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1596 1597 if (ep->tcp_cid != ECORE_IWARP_INVALID_TCP_CID) { 1598 cid = ep->tcp_cid - ecore_cxt_get_proto_cid_start( 1599 p_hwfn, p_hwfn->p_rdma_info->proto); 1600 1601 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock); 1602 1603 ecore_bmap_release_id(p_hwfn, 1604 &p_hwfn->p_rdma_info->tcp_cid_map, 1605 cid); 1606 1607 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock); 1608 } 1609 1610 ecore_iwarp_destroy_ep(p_hwfn, ep, false); 1611 } 1612 } 1613 1614 static enum _ecore_status_t 1615 ecore_iwarp_prealloc_ep(struct ecore_hwfn *p_hwfn, bool init) 1616 { 1617 struct ecore_iwarp_ep *ep; 1618 int rc = ECORE_SUCCESS; 1619 u32 cid; 1620 int count; 1621 int i; 1622 1623 if (init) 1624 count = ECORE_IWARP_PREALLOC_CNT; 1625 else 1626 count = 1; 1627 1628 for (i = 0; i < count; i++) { 1629 rc = ecore_iwarp_create_ep(p_hwfn, &ep); 1630 if (rc != ECORE_SUCCESS) 1631 return rc; 1632 1633 /* During initialization we allocate from the main pool, 1634 * afterwards we allocate only from the tcp_cid. 1635 */ 1636 if (init) { 1637 rc = ecore_iwarp_alloc_cid(p_hwfn, &cid); 1638 if (rc != ECORE_SUCCESS) 1639 goto err; 1640 ecore_iwarp_set_tcp_cid(p_hwfn, cid); 1641 } else { 1642 /* We don't care about the return code, it's ok if 1643 * tcp_cid remains invalid...in this case we'll 1644 * defer allocation 1645 */ 1646 ecore_iwarp_alloc_tcp_cid(p_hwfn, &cid); 1647 } 1648 1649 ep->tcp_cid = cid; 1650 1651 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1652 OSAL_LIST_PUSH_TAIL(&ep->list_entry, 1653 &p_hwfn->p_rdma_info->iwarp.ep_free_list); 1654 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1655 } 1656 1657 return rc; 1658 1659 err: 1660 ecore_iwarp_destroy_ep(p_hwfn, ep, false); 1661 1662 return rc; 1663 } 1664 1665 enum _ecore_status_t 1666 ecore_iwarp_alloc(struct ecore_hwfn *p_hwfn) 1667 { 1668 enum _ecore_status_t rc; 1669 1670 #ifdef CONFIG_ECORE_LOCK_ALLOC 1671 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->p_rdma_info->iwarp.iw_lock); 1672 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->p_rdma_info->iwarp.qp_lock); 1673 #endif 1674 OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1675 OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.qp_lock); 1676 1677 /* Allocate bitmap for tcp cid. These are used by passive side 1678 * to ensure it can allocate a tcp cid during dpc that was 1679 * pre-acquired and doesn't require dynamic allocation of ilt 1680 */ 1681 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1682 ECORE_IWARP_PREALLOC_CNT, 1683 "TCP_CID"); 1684 if (rc != ECORE_SUCCESS) { 1685 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1686 "Failed to allocate tcp cid, rc = %d\n", 1687 rc); 1688 return rc; 1689 } 1690 1691 OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.ep_free_list); 1692 //DAVIDS OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.iw_lock); 1693 rc = ecore_iwarp_prealloc_ep(p_hwfn, true); 1694 if (rc != ECORE_SUCCESS) { 1695 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1696 "ecore_iwarp_prealloc_ep failed, rc = %d\n", 1697 rc); 1698 return rc; 1699 } 1700 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1701 "ecore_iwarp_prealloc_ep success, rc = %d\n", 1702 rc); 1703 1704 return ecore_ooo_alloc(p_hwfn); 1705 } 1706 1707 void 1708 ecore_iwarp_resc_free(struct ecore_hwfn *p_hwfn) 1709 { 1710 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 1711 1712 #ifdef CONFIG_ECORE_LOCK_ALLOC 1713 OSAL_SPIN_LOCK_DEALLOC(iwarp_info->iw_lock); 1714 OSAL_SPIN_LOCK_DEALLOC(iwarp_info->qp_lock); 1715 #endif 1716 ecore_ooo_free(p_hwfn); 1717 if (iwarp_info->partial_fpdus) 1718 OSAL_FREE(p_hwfn->p_dev, iwarp_info->partial_fpdus); 1719 if (iwarp_info->mpa_bufs) 1720 OSAL_FREE(p_hwfn->p_dev, iwarp_info->mpa_bufs); 1721 if (iwarp_info->mpa_intermediate_buf) 1722 OSAL_FREE(p_hwfn->p_dev, iwarp_info->mpa_intermediate_buf); 1723 1724 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); 1725 } 1726 1727 1728 enum _ecore_status_t 1729 ecore_iwarp_accept(void *rdma_cxt, 1730 struct ecore_iwarp_accept_in *iparams) 1731 { 1732 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 1733 struct ecore_iwarp_ep *ep; 1734 u8 mpa_data_size = 0; 1735 enum _ecore_status_t rc; 1736 1737 ep = (struct ecore_iwarp_ep *)iparams->ep_context; 1738 if (!ep) { 1739 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); 1740 return ECORE_INVAL; 1741 } 1742 1743 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", 1744 iparams->qp->icid, ep->tcp_cid); 1745 1746 if ((iparams->ord > ECORE_IWARP_ORD_DEFAULT) || 1747 (iparams->ird > ECORE_IWARP_IRD_DEFAULT)) { 1748 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1749 "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", 1750 iparams->qp->icid, ep->tcp_cid, 1751 iparams->ord, iparams->ord); 1752 return ECORE_INVAL; 1753 } 1754 1755 /* We could reach qp->ep != OSAL NULL if we do accept on the same qp */ 1756 if (iparams->qp->ep == OSAL_NULL) { 1757 /* We need to add a replacement for the ep to the free list */ 1758 ecore_iwarp_prealloc_ep(p_hwfn, false); 1759 } else { 1760 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 1761 "Note re-use of QP for different connect\n"); 1762 /* Return the old ep to the free_pool */ 1763 ecore_iwarp_return_ep(p_hwfn, iparams->qp->ep); 1764 } 1765 1766 ecore_iwarp_move_to_ep_list(p_hwfn, 1767 &p_hwfn->p_rdma_info->iwarp.ep_list, 1768 ep); 1769 ep->listener = OSAL_NULL; 1770 ep->cb_context = iparams->cb_context; 1771 ep->qp = iparams->qp; 1772 ep->qp->ep = ep; 1773 1774 if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { 1775 /* Negotiate ord/ird: if upperlayer requested ord larger than 1776 * ird advertised by remote, we need to decrease our ord 1777 * to match remote ord 1778 */ 1779 if (iparams->ord > ep->cm_info.ird) { 1780 iparams->ord = ep->cm_info.ird; 1781 } 1782 1783 /* For chelsio compatability, if rtr_zero read is requested 1784 * we can't set ird to zero 1785 */ 1786 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && 1787 (iparams->ird == 0)) 1788 iparams->ird = 1; 1789 } 1790 1791 /* Update cm_info ord/ird to be negotiated values */ 1792 ep->cm_info.ord = iparams->ord; 1793 ep->cm_info.ird = iparams->ird; 1794 1795 ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1796 1797 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1798 ep->cm_info.private_data_len = 1799 iparams->private_data_len + mpa_data_size; 1800 1801 OSAL_MEMCPY((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1802 iparams->private_data, 1803 iparams->private_data_len); 1804 1805 if (ep->state == ECORE_IWARP_EP_CLOSED) { 1806 DP_NOTICE(p_hwfn, false, 1807 "(0x%x) Accept called on EP in CLOSED state\n", 1808 ep->tcp_cid); 1809 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID; 1810 ecore_iwarp_return_ep(p_hwfn, ep); 1811 return ECORE_CONN_RESET; 1812 } 1813 1814 rc = ecore_iwarp_mpa_offload(p_hwfn, ep); 1815 if (rc) { 1816 ecore_iwarp_modify_qp(p_hwfn, 1817 iparams->qp, 1818 ECORE_IWARP_QP_STATE_ERROR, 1819 1); 1820 } 1821 1822 return rc; 1823 } 1824 1825 enum _ecore_status_t 1826 ecore_iwarp_reject(void *rdma_cxt, 1827 struct ecore_iwarp_reject_in *iparams) 1828 { 1829 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 1830 struct ecore_iwarp_ep *ep; 1831 u8 mpa_data_size = 0; 1832 enum _ecore_status_t rc; 1833 1834 ep = (struct ecore_iwarp_ep *)iparams->ep_context; 1835 if (!ep) { 1836 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n"); 1837 return ECORE_INVAL; 1838 } 1839 1840 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid); 1841 1842 ep->cb_context = iparams->cb_context; 1843 ep->qp = OSAL_NULL; 1844 1845 ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); 1846 1847 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; 1848 ep->cm_info.private_data_len = 1849 iparams->private_data_len + mpa_data_size; 1850 1851 OSAL_MEMCPY((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, 1852 iparams->private_data, 1853 iparams->private_data_len); 1854 1855 if (ep->state == ECORE_IWARP_EP_CLOSED) { 1856 DP_NOTICE(p_hwfn, false, 1857 "(0x%x) Reject called on EP in CLOSED state\n", 1858 ep->tcp_cid); 1859 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID; 1860 ecore_iwarp_return_ep(p_hwfn, ep); 1861 return ECORE_CONN_RESET; 1862 } 1863 1864 rc = ecore_iwarp_mpa_offload(p_hwfn, ep); 1865 return rc; 1866 } 1867 1868 static void 1869 ecore_iwarp_print_cm_info(struct ecore_hwfn *p_hwfn, 1870 struct ecore_iwarp_cm_info *cm_info) 1871 { 1872 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ip_version = %d\n", 1873 cm_info->ip_version); 1874 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_ip %x.%x.%x.%x\n", 1875 cm_info->remote_ip[0], 1876 cm_info->remote_ip[1], 1877 cm_info->remote_ip[2], 1878 cm_info->remote_ip[3]); 1879 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_ip %x.%x.%x.%x\n", 1880 cm_info->local_ip[0], 1881 cm_info->local_ip[1], 1882 cm_info->local_ip[2], 1883 cm_info->local_ip[3]); 1884 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_port = %x\n", 1885 cm_info->remote_port); 1886 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_port = %x\n", 1887 cm_info->local_port); 1888 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "vlan = %x\n", 1889 cm_info->vlan); 1890 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "private_data_len = %x\n", 1891 cm_info->private_data_len); 1892 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ord = %d\n", 1893 cm_info->ord); 1894 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ird = %d\n", 1895 cm_info->ird); 1896 } 1897 1898 static int 1899 ecore_iwarp_ll2_post_rx(struct ecore_hwfn *p_hwfn, 1900 struct ecore_iwarp_ll2_buff *buf, 1901 u8 handle) 1902 { 1903 enum _ecore_status_t rc; 1904 1905 rc = ecore_ll2_post_rx_buffer( 1906 p_hwfn, 1907 handle, 1908 buf->data_phys_addr, 1909 (u16)buf->buff_size, 1910 buf, 1); 1911 1912 if (rc) { 1913 DP_NOTICE(p_hwfn, false, 1914 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n", 1915 rc, handle); 1916 OSAL_DMA_FREE_COHERENT( 1917 p_hwfn->p_dev, 1918 buf->data, 1919 buf->data_phys_addr, 1920 buf->buff_size); 1921 OSAL_FREE(p_hwfn->p_dev, buf); 1922 } 1923 1924 return rc; 1925 } 1926 1927 static bool 1928 ecore_iwarp_ep_exists(struct ecore_hwfn *p_hwfn, 1929 struct ecore_iwarp_listener *listener, 1930 struct ecore_iwarp_cm_info *cm_info) 1931 { 1932 struct ecore_iwarp_ep *ep = OSAL_NULL; 1933 bool found = false; 1934 1935 OSAL_SPIN_LOCK(&listener->lock); 1936 OSAL_LIST_FOR_EACH_ENTRY(ep, &listener->ep_list, 1937 list_entry, struct ecore_iwarp_ep) { 1938 if ((ep->cm_info.local_port == cm_info->local_port) && 1939 (ep->cm_info.remote_port == cm_info->remote_port) && 1940 (ep->cm_info.vlan == cm_info->vlan) && 1941 !OSAL_MEMCMP(&(ep->cm_info.local_ip), cm_info->local_ip, 1942 sizeof(cm_info->local_ip)) && 1943 !OSAL_MEMCMP(&(ep->cm_info.remote_ip), cm_info->remote_ip, 1944 sizeof(cm_info->remote_ip))) { 1945 found = true; 1946 break; 1947 } 1948 } 1949 1950 OSAL_SPIN_UNLOCK(&listener->lock); 1951 1952 if (found) { 1953 DP_NOTICE(p_hwfn, false, "SYN received on active connection - dropping\n"); 1954 ecore_iwarp_print_cm_info(p_hwfn, cm_info); 1955 1956 return true; 1957 } 1958 1959 return false; 1960 } 1961 1962 static struct ecore_iwarp_listener * 1963 ecore_iwarp_get_listener(struct ecore_hwfn *p_hwfn, 1964 struct ecore_iwarp_cm_info *cm_info) 1965 { 1966 struct ecore_iwarp_listener *listener = OSAL_NULL; 1967 static const u32 ip_zero[4] = {0, 0, 0, 0}; 1968 bool found = false; 1969 1970 ecore_iwarp_print_cm_info(p_hwfn, cm_info); 1971 1972 OSAL_LIST_FOR_EACH_ENTRY(listener, 1973 &p_hwfn->p_rdma_info->iwarp.listen_list, 1974 list_entry, struct ecore_iwarp_listener) { 1975 1976 if (listener->port == cm_info->local_port) { 1977 /* Any IP (i.e. 0.0.0.0 ) will be treated as any vlan */ 1978 if (!OSAL_MEMCMP(listener->ip_addr, 1979 ip_zero, 1980 sizeof(ip_zero))) { 1981 found = true; 1982 break; 1983 } 1984 1985 /* If not any IP -> check vlan as well */ 1986 if (!OSAL_MEMCMP(listener->ip_addr, 1987 cm_info->local_ip, 1988 sizeof(cm_info->local_ip)) && 1989 1990 (listener->vlan == cm_info->vlan)) { 1991 found = true; 1992 break; 1993 } 1994 } 1995 } 1996 1997 if (found && listener->state == ECORE_IWARP_LISTENER_STATE_ACTIVE) { 1998 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener found = %p\n", 1999 listener); 2000 return listener; 2001 } 2002 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener not found\n"); 2003 return OSAL_NULL; 2004 } 2005 2006 static enum _ecore_status_t 2007 ecore_iwarp_parse_rx_pkt(struct ecore_hwfn *p_hwfn, 2008 struct ecore_iwarp_cm_info *cm_info, 2009 void *buf, 2010 u8 *remote_mac_addr, 2011 u8 *local_mac_addr, 2012 int *payload_len, 2013 int *tcp_start_offset) 2014 { 2015 struct ecore_vlan_ethhdr *vethh; 2016 struct ecore_ethhdr *ethh; 2017 struct ecore_iphdr *iph; 2018 struct ecore_ipv6hdr *ip6h; 2019 struct ecore_tcphdr *tcph; 2020 bool vlan_valid = false; 2021 int eth_hlen, ip_hlen; 2022 u16 eth_type; 2023 int i; 2024 2025 ethh = (struct ecore_ethhdr *)buf; 2026 eth_type = ntohs(ethh->h_proto); 2027 if (eth_type == ETH_P_8021Q) { 2028 vlan_valid = true; 2029 vethh = (struct ecore_vlan_ethhdr *)ethh; 2030 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; 2031 eth_type = ntohs(vethh->h_vlan_encapsulated_proto); 2032 } 2033 2034 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); 2035 2036 OSAL_MEMCPY(remote_mac_addr, 2037 ethh->h_source, 2038 ETH_ALEN); 2039 2040 OSAL_MEMCPY(local_mac_addr, 2041 ethh->h_dest, 2042 ETH_ALEN); 2043 2044 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_type =%d Source mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n", 2045 eth_type, ethh->h_source[0], ethh->h_source[1], 2046 ethh->h_source[2], ethh->h_source[3], 2047 ethh->h_source[4], ethh->h_source[5]); 2048 2049 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_hlen=%d destination mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n", 2050 eth_hlen, ethh->h_dest[0], ethh->h_dest[1], 2051 ethh->h_dest[2], ethh->h_dest[3], 2052 ethh->h_dest[4], ethh->h_dest[5]); 2053 2054 iph = (struct ecore_iphdr *)((u8 *)(ethh) + eth_hlen); 2055 2056 if (eth_type == ETH_P_IP) { 2057 if (iph->protocol != IPPROTO_TCP) { 2058 DP_NOTICE(p_hwfn, false, 2059 "Unexpected ip protocol on ll2 %x\n", 2060 iph->protocol); 2061 return ECORE_INVAL; 2062 } 2063 2064 cm_info->local_ip[0] = ntohl(iph->daddr); 2065 cm_info->remote_ip[0] = ntohl(iph->saddr); 2066 cm_info->ip_version = (enum ecore_tcp_ip_version)TCP_IPV4; 2067 2068 ip_hlen = (iph->ihl)*sizeof(u32); 2069 *payload_len = ntohs(iph->tot_len) - ip_hlen; 2070 2071 } else if (eth_type == ETH_P_IPV6) { 2072 ip6h = (struct ecore_ipv6hdr *)iph; 2073 2074 if (ip6h->nexthdr != IPPROTO_TCP) { 2075 DP_NOTICE(p_hwfn, false, 2076 "Unexpected ip protocol on ll2 %x\n", 2077 iph->protocol); 2078 return ECORE_INVAL; 2079 } 2080 2081 for (i = 0; i < 4; i++) { 2082 cm_info->local_ip[i] = 2083 ntohl(ip6h->daddr.in6_u.u6_addr32[i]); 2084 cm_info->remote_ip[i] = 2085 ntohl(ip6h->saddr.in6_u.u6_addr32[i]); 2086 } 2087 cm_info->ip_version = (enum ecore_tcp_ip_version)TCP_IPV6; 2088 2089 ip_hlen = sizeof(*ip6h); 2090 *payload_len = ntohs(ip6h->payload_len); 2091 } else { 2092 DP_NOTICE(p_hwfn, false, 2093 "Unexpected ethertype on ll2 %x\n", eth_type); 2094 return ECORE_INVAL; 2095 } 2096 2097 tcph = (struct ecore_tcphdr *)((u8 *)iph + ip_hlen); 2098 2099 if (!tcph->syn) { 2100 DP_NOTICE(p_hwfn, false, 2101 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n", 2102 iph->ihl, tcph->source, tcph->dest); 2103 return ECORE_INVAL; 2104 } 2105 2106 cm_info->local_port = ntohs(tcph->dest); 2107 cm_info->remote_port = ntohs(tcph->source); 2108 2109 ecore_iwarp_print_cm_info(p_hwfn, cm_info); 2110 2111 *tcp_start_offset = eth_hlen + ip_hlen; 2112 2113 return ECORE_SUCCESS; 2114 } 2115 2116 static struct ecore_iwarp_fpdu * 2117 ecore_iwarp_get_curr_fpdu(struct ecore_hwfn *p_hwfn, u16 cid) 2118 { 2119 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2120 struct ecore_iwarp_fpdu *partial_fpdu; 2121 u32 idx = cid - ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); 2122 2123 if (idx >= iwarp_info->max_num_partial_fpdus) { 2124 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid, 2125 iwarp_info->max_num_partial_fpdus); 2126 return OSAL_NULL; 2127 } 2128 2129 partial_fpdu = &iwarp_info->partial_fpdus[idx]; 2130 2131 return partial_fpdu; 2132 } 2133 2134 enum ecore_iwarp_mpa_pkt_type { 2135 ECORE_IWARP_MPA_PKT_PACKED, 2136 ECORE_IWARP_MPA_PKT_PARTIAL, 2137 ECORE_IWARP_MPA_PKT_UNALIGNED 2138 }; 2139 2140 #define ECORE_IWARP_INVALID_FPDU_LENGTH 0xffff 2141 #define ECORE_IWARP_MPA_FPDU_LENGTH_SIZE (2) 2142 #define ECORE_IWARP_MPA_CRC32_DIGEST_SIZE (4) 2143 2144 /* Pad to multiple of 4 */ 2145 #define ECORE_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) (((data_len) + 3) & ~3) 2146 2147 #define ECORE_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \ 2148 (ECORE_IWARP_PDU_DATA_LEN_WITH_PAD(_mpa_len + \ 2149 ECORE_IWARP_MPA_FPDU_LENGTH_SIZE) + \ 2150 ECORE_IWARP_MPA_CRC32_DIGEST_SIZE) 2151 2152 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ 2153 #define ECORE_IWARP_MAX_BDS_PER_FPDU 3 2154 2155 char *pkt_type_str[] = { 2156 "ECORE_IWARP_MPA_PKT_PACKED", 2157 "ECORE_IWARP_MPA_PKT_PARTIAL", 2158 "ECORE_IWARP_MPA_PKT_UNALIGNED" 2159 }; 2160 2161 static enum _ecore_status_t 2162 ecore_iwarp_recycle_pkt(struct ecore_hwfn *p_hwfn, 2163 struct ecore_iwarp_fpdu *fpdu, 2164 struct ecore_iwarp_ll2_buff *buf); 2165 2166 static enum ecore_iwarp_mpa_pkt_type 2167 ecore_iwarp_mpa_classify(struct ecore_hwfn *p_hwfn, 2168 struct ecore_iwarp_fpdu *fpdu, 2169 u16 tcp_payload_len, 2170 u8 *mpa_data) 2171 2172 { 2173 enum ecore_iwarp_mpa_pkt_type pkt_type; 2174 u16 mpa_len; 2175 2176 if (fpdu->incomplete_bytes) { 2177 pkt_type = ECORE_IWARP_MPA_PKT_UNALIGNED; 2178 goto out; 2179 } 2180 2181 /* special case of one byte remaining... */ 2182 if (tcp_payload_len == 1) { 2183 /* lower byte will be read next packet */ 2184 fpdu->fpdu_length = *mpa_data << 8; 2185 pkt_type = ECORE_IWARP_MPA_PKT_PARTIAL; 2186 goto out; 2187 } 2188 2189 mpa_len = ntohs(*((u16 *)(mpa_data))); 2190 fpdu->fpdu_length = ECORE_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 2191 2192 if (fpdu->fpdu_length <= tcp_payload_len) 2193 pkt_type = ECORE_IWARP_MPA_PKT_PACKED; 2194 else 2195 pkt_type = ECORE_IWARP_MPA_PKT_PARTIAL; 2196 2197 out: 2198 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2199 "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n", 2200 pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len); 2201 2202 return pkt_type; 2203 } 2204 2205 static void 2206 ecore_iwarp_init_fpdu(struct ecore_iwarp_ll2_buff *buf, 2207 struct ecore_iwarp_fpdu *fpdu, 2208 struct unaligned_opaque_data *pkt_data, 2209 u16 tcp_payload_size, u8 placement_offset) 2210 { 2211 fpdu->mpa_buf = buf; 2212 fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; 2213 fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; 2214 2215 fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset; 2216 fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset; 2217 2218 if (tcp_payload_size == 1) 2219 fpdu->incomplete_bytes = ECORE_IWARP_INVALID_FPDU_LENGTH; 2220 else if (tcp_payload_size < fpdu->fpdu_length) 2221 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; 2222 else 2223 fpdu->incomplete_bytes = 0; /* complete fpdu */ 2224 2225 fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; 2226 } 2227 2228 static enum _ecore_status_t 2229 ecore_iwarp_copy_fpdu(struct ecore_hwfn *p_hwfn, 2230 struct ecore_iwarp_fpdu *fpdu, 2231 struct unaligned_opaque_data *pkt_data, 2232 struct ecore_iwarp_ll2_buff *buf, 2233 u16 tcp_payload_size) 2234 2235 { 2236 u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; 2237 enum _ecore_status_t rc; 2238 2239 /* need to copy the data from the partial packet stored in fpdu 2240 * to the new buf, for this we also need to move the data currently 2241 * placed on the buf. The assumption is that the buffer is big enough 2242 * since fpdu_length <= mss, we use an intermediate buffer since 2243 * we may need to copy the new data to an overlapping location 2244 */ 2245 if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) { 2246 DP_ERR(p_hwfn, 2247 "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", 2248 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, 2249 fpdu->incomplete_bytes); 2250 return ECORE_INVAL; 2251 } 2252 2253 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2254 "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", 2255 fpdu->mpa_frag_virt, fpdu->mpa_frag_len, 2256 (u8 *)(buf->data) + pkt_data->first_mpa_offset, 2257 tcp_payload_size); 2258 2259 OSAL_MEMCPY(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); 2260 OSAL_MEMCPY(tmp_buf + fpdu->mpa_frag_len, 2261 (u8 *)(buf->data) + pkt_data->first_mpa_offset, 2262 tcp_payload_size); 2263 2264 rc = ecore_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); 2265 if (rc) 2266 return rc; 2267 2268 /* If we managed to post the buffer copy the data to the new buffer 2269 * o/w this will occur in the next round... 2270 */ 2271 OSAL_MEMCPY((u8 *)(buf->data), tmp_buf, 2272 fpdu->mpa_frag_len + tcp_payload_size); 2273 2274 fpdu->mpa_buf = buf; 2275 /* fpdu->pkt_hdr remains as is */ 2276 /* fpdu->mpa_frag is overriden with new buf */ 2277 fpdu->mpa_frag = buf->data_phys_addr; 2278 fpdu->mpa_frag_virt = buf->data; 2279 fpdu->mpa_frag_len += tcp_payload_size; 2280 2281 fpdu->incomplete_bytes -= tcp_payload_size; 2282 2283 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2284 "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", 2285 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, 2286 fpdu->incomplete_bytes); 2287 2288 return 0; 2289 } 2290 2291 static void 2292 ecore_iwarp_update_fpdu_length(struct ecore_hwfn *p_hwfn, 2293 struct ecore_iwarp_fpdu *fpdu, 2294 u8 *mpa_data) 2295 { 2296 u16 mpa_len; 2297 2298 /* Update incomplete packets if needed */ 2299 if (fpdu->incomplete_bytes == ECORE_IWARP_INVALID_FPDU_LENGTH) { 2300 mpa_len = fpdu->fpdu_length | *mpa_data; 2301 fpdu->fpdu_length = ECORE_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 2302 fpdu->mpa_frag_len = fpdu->fpdu_length; 2303 /* one byte of hdr */ 2304 fpdu->incomplete_bytes = fpdu->fpdu_length - 1; 2305 DP_VERBOSE(p_hwfn, 2306 ECORE_MSG_RDMA, 2307 "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n", 2308 mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes); 2309 } 2310 } 2311 2312 #define ECORE_IWARP_IS_RIGHT_EDGE(_curr_pkt) \ 2313 (GET_FIELD(_curr_pkt->flags, \ 2314 UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE)) 2315 2316 /* This function is used to recycle a buffer using the ll2 drop option. It 2317 * uses the mechanism to ensure that all buffers posted to tx before this one 2318 * were completed. The buffer sent here will be sent as a cookie in the tx 2319 * completion function and can then be reposted to rx chain when done. The flow 2320 * that requires this is the flow where a FPDU splits over more than 3 tcp 2321 * segments. In this case the driver needs to re-post a rx buffer instead of 2322 * the one received, but driver can't simply repost a buffer it copied from 2323 * as there is a case where the buffer was originally a packed FPDU, and is 2324 * partially posted to FW. Driver needs to ensure FW is done with it. 2325 */ 2326 static enum _ecore_status_t 2327 ecore_iwarp_recycle_pkt(struct ecore_hwfn *p_hwfn, 2328 struct ecore_iwarp_fpdu *fpdu, 2329 struct ecore_iwarp_ll2_buff *buf) 2330 { 2331 struct ecore_ll2_tx_pkt_info tx_pkt; 2332 enum _ecore_status_t rc; 2333 u8 ll2_handle; 2334 2335 OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt)); 2336 tx_pkt.num_of_bds = 1; 2337 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_DROP; 2338 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; 2339 tx_pkt.first_frag = fpdu->pkt_hdr; 2340 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 2341 buf->piggy_buf = OSAL_NULL; 2342 tx_pkt.cookie = buf; 2343 2344 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 2345 2346 rc = ecore_ll2_prepare_tx_packet(p_hwfn, 2347 ll2_handle, 2348 &tx_pkt, true); 2349 2350 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2351 "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n", 2352 (long unsigned int)tx_pkt.first_frag, 2353 tx_pkt.first_frag_len, buf, rc); 2354 2355 if (rc) 2356 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2357 "Can't drop packet rc=%d\n", rc); 2358 2359 return rc; 2360 } 2361 2362 static enum _ecore_status_t 2363 ecore_iwarp_win_right_edge(struct ecore_hwfn *p_hwfn, 2364 struct ecore_iwarp_fpdu *fpdu) 2365 { 2366 struct ecore_ll2_tx_pkt_info tx_pkt; 2367 enum _ecore_status_t rc; 2368 u8 ll2_handle; 2369 2370 OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt)); 2371 tx_pkt.num_of_bds = 1; 2372 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB; 2373 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; 2374 2375 tx_pkt.first_frag = fpdu->pkt_hdr; 2376 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 2377 tx_pkt.enable_ip_cksum = true; 2378 tx_pkt.enable_l4_cksum = true; 2379 tx_pkt.calc_ip_len = true; 2380 /* vlan overload with enum iwarp_ll2_tx_queues */ 2381 tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE; 2382 2383 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 2384 2385 rc = ecore_ll2_prepare_tx_packet(p_hwfn, 2386 ll2_handle, 2387 &tx_pkt, true); 2388 2389 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2390 "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n", 2391 tx_pkt.num_of_bds, (long unsigned int)tx_pkt.first_frag, 2392 tx_pkt.first_frag_len, rc); 2393 2394 if (rc) 2395 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2396 "Can't send right edge rc=%d\n", rc); 2397 2398 return rc; 2399 } 2400 2401 static enum _ecore_status_t 2402 ecore_iwarp_send_fpdu(struct ecore_hwfn *p_hwfn, 2403 struct ecore_iwarp_fpdu *fpdu, 2404 struct unaligned_opaque_data *curr_pkt, 2405 struct ecore_iwarp_ll2_buff *buf, 2406 u16 tcp_payload_size, 2407 enum ecore_iwarp_mpa_pkt_type pkt_type) 2408 { 2409 struct ecore_ll2_tx_pkt_info tx_pkt; 2410 enum _ecore_status_t rc; 2411 u8 ll2_handle; 2412 2413 OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt)); 2414 2415 tx_pkt.num_of_bds = (pkt_type == ECORE_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2; 2416 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB; 2417 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; 2418 2419 /* Send the mpa_buf only with the last fpdu (in case of packed) */ 2420 if ((pkt_type == ECORE_IWARP_MPA_PKT_UNALIGNED) || 2421 (tcp_payload_size <= fpdu->fpdu_length)) 2422 tx_pkt.cookie = fpdu->mpa_buf; 2423 2424 tx_pkt.first_frag = fpdu->pkt_hdr; 2425 tx_pkt.first_frag_len = fpdu->pkt_hdr_size; 2426 tx_pkt.enable_ip_cksum = true; 2427 tx_pkt.enable_l4_cksum = true; 2428 tx_pkt.calc_ip_len = true; 2429 /* vlan overload with enum iwarp_ll2_tx_queues */ 2430 tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; 2431 2432 /* special case of unaligned packet and not packed, need to send 2433 * both buffers as cookie to release. 2434 */ 2435 if (tcp_payload_size == fpdu->incomplete_bytes) { 2436 fpdu->mpa_buf->piggy_buf = buf; 2437 } 2438 2439 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; 2440 2441 rc = ecore_ll2_prepare_tx_packet(p_hwfn, 2442 ll2_handle, 2443 &tx_pkt, true); 2444 if (rc) 2445 goto err; 2446 2447 rc = ecore_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, 2448 fpdu->mpa_frag, 2449 fpdu->mpa_frag_len); 2450 if (rc) 2451 goto err; 2452 2453 if (fpdu->incomplete_bytes) { 2454 rc = ecore_ll2_set_fragment_of_tx_packet( 2455 p_hwfn, ll2_handle, 2456 buf->data_phys_addr + curr_pkt->first_mpa_offset, 2457 fpdu->incomplete_bytes); 2458 2459 if (rc) 2460 goto err; 2461 } 2462 2463 err: 2464 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2465 "MPA_ALIGN: Sent FPDU num_bds=%d [%lx, 0x%x], [0x%lx, 0x%x], [0x%lx, 0x%x] (cookie %p) rc=%d\n", 2466 tx_pkt.num_of_bds, (long unsigned int)tx_pkt.first_frag, 2467 tx_pkt.first_frag_len, (long unsigned int)fpdu->mpa_frag, 2468 fpdu->mpa_frag_len, (long unsigned int)buf->data_phys_addr + 2469 curr_pkt->first_mpa_offset, fpdu->incomplete_bytes, 2470 tx_pkt.cookie, rc); 2471 2472 return rc; 2473 } 2474 2475 static void 2476 ecore_iwarp_mpa_get_data(struct ecore_hwfn *p_hwfn, 2477 struct unaligned_opaque_data *curr_pkt, 2478 u32 opaque_data0, u32 opaque_data1) 2479 { 2480 u64 opaque_data; 2481 2482 opaque_data = HILO_64(opaque_data1, opaque_data0); 2483 *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); 2484 2485 /* fix endianity */ 2486 curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset + 2487 OSAL_LE16_TO_CPU(curr_pkt->first_mpa_offset); 2488 curr_pkt->cid = OSAL_LE32_TO_CPU(curr_pkt->cid); 2489 2490 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2491 "OPAQUE0=0x%x OPAQUE1=0x%x first_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", 2492 opaque_data0, opaque_data1, curr_pkt->first_mpa_offset, 2493 curr_pkt->tcp_payload_offset, curr_pkt->flags, 2494 curr_pkt->cid); 2495 } 2496 2497 static void 2498 ecore_iwarp_mpa_print_tcp_seq(struct ecore_hwfn *p_hwfn, 2499 void *buf) 2500 { 2501 struct ecore_vlan_ethhdr *vethh; 2502 struct ecore_ethhdr *ethh; 2503 struct ecore_iphdr *iph; 2504 struct ecore_ipv6hdr *ip6h; 2505 struct ecore_tcphdr *tcph; 2506 bool vlan_valid = false; 2507 int eth_hlen, ip_hlen; 2508 u16 eth_type; 2509 2510 if ((p_hwfn->dp_level > ECORE_LEVEL_VERBOSE) || 2511 !(p_hwfn->dp_module & ECORE_MSG_RDMA)) 2512 return; 2513 2514 ethh = (struct ecore_ethhdr *)buf; 2515 eth_type = ntohs(ethh->h_proto); 2516 if (eth_type == ETH_P_8021Q) { 2517 vlan_valid = true; 2518 vethh = (struct ecore_vlan_ethhdr *)ethh; 2519 eth_type = ntohs(vethh->h_vlan_encapsulated_proto); 2520 } 2521 2522 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); 2523 2524 iph = (struct ecore_iphdr *)((u8 *)(ethh) + eth_hlen); 2525 2526 if (eth_type == ETH_P_IP) { 2527 ip_hlen = (iph->ihl)*sizeof(u32); 2528 } else if (eth_type == ETH_P_IPV6) { 2529 ip6h = (struct ecore_ipv6hdr *)iph; 2530 ip_hlen = sizeof(*ip6h); 2531 } else { 2532 DP_ERR(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type); 2533 return; 2534 } 2535 2536 tcph = (struct ecore_tcphdr *)((u8 *)iph + ip_hlen); 2537 2538 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Processing MPA PKT: tcp_seq=0x%x tcp_ack_seq=0x%x\n", 2539 ntohl(tcph->seq), ntohl(tcph->ack_seq)); 2540 2541 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_type =%d Source mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n", 2542 eth_type, ethh->h_source[0], ethh->h_source[1], 2543 ethh->h_source[2], ethh->h_source[3], 2544 ethh->h_source[4], ethh->h_source[5]); 2545 2546 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_hlen=%d destination mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n", 2547 eth_hlen, ethh->h_dest[0], ethh->h_dest[1], 2548 ethh->h_dest[2], ethh->h_dest[3], 2549 ethh->h_dest[4], ethh->h_dest[5]); 2550 2551 return; 2552 } 2553 2554 /* This function is called when an unaligned or incomplete MPA packet arrives 2555 * driver needs to align the packet, perhaps using previous data and send 2556 * it down to FW once it is aligned. 2557 */ 2558 static enum _ecore_status_t 2559 ecore_iwarp_process_mpa_pkt(struct ecore_hwfn *p_hwfn, 2560 struct ecore_iwarp_ll2_mpa_buf *mpa_buf) 2561 { 2562 struct ecore_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; 2563 enum ecore_iwarp_mpa_pkt_type pkt_type; 2564 struct unaligned_opaque_data *curr_pkt = &mpa_buf->data; 2565 struct ecore_iwarp_fpdu *fpdu; 2566 u8 *mpa_data; 2567 enum _ecore_status_t rc = ECORE_SUCCESS; 2568 2569 ecore_iwarp_mpa_print_tcp_seq( 2570 p_hwfn, (u8 *)(buf->data) + mpa_buf->placement_offset); 2571 2572 fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff); 2573 if (!fpdu) {/* something corrupt with cid, post rx back */ 2574 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", 2575 curr_pkt->cid); 2576 rc = ecore_iwarp_ll2_post_rx( 2577 p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); 2578 2579 if (rc) { /* not much we can do here except log and free */ 2580 DP_ERR(p_hwfn, "Post rx buffer failed\n"); 2581 2582 /* we don't expect any failures from rx, not even 2583 * busy since we allocate #bufs=#descs 2584 */ 2585 rc = ECORE_UNKNOWN_ERROR; 2586 } 2587 return rc; 2588 } 2589 2590 do { 2591 mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset); 2592 2593 pkt_type = ecore_iwarp_mpa_classify(p_hwfn, fpdu, 2594 mpa_buf->tcp_payload_len, 2595 mpa_data); 2596 2597 switch (pkt_type) { 2598 case ECORE_IWARP_MPA_PKT_PARTIAL: 2599 ecore_iwarp_init_fpdu(buf, fpdu, 2600 curr_pkt, 2601 mpa_buf->tcp_payload_len, 2602 mpa_buf->placement_offset); 2603 2604 if (!ECORE_IWARP_IS_RIGHT_EDGE(curr_pkt)) { 2605 mpa_buf->tcp_payload_len = 0; 2606 break; 2607 } 2608 2609 rc = ecore_iwarp_win_right_edge(p_hwfn, fpdu); 2610 2611 if (rc) { 2612 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2613 "Can't send FPDU:reset rc=%d\n", rc); 2614 OSAL_MEM_ZERO(fpdu, sizeof(*fpdu)); 2615 break; 2616 } 2617 2618 mpa_buf->tcp_payload_len = 0; 2619 break; 2620 case ECORE_IWARP_MPA_PKT_PACKED: 2621 if (fpdu->fpdu_length == 8) { 2622 DP_ERR(p_hwfn, "SUSPICIOUS fpdu_length = 0x%x: assuming bug...aborting this packet...\n", 2623 fpdu->fpdu_length); 2624 mpa_buf->tcp_payload_len = 0; 2625 break; 2626 } 2627 2628 ecore_iwarp_init_fpdu(buf, fpdu, 2629 curr_pkt, 2630 mpa_buf->tcp_payload_len, 2631 mpa_buf->placement_offset); 2632 2633 rc = ecore_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, 2634 mpa_buf->tcp_payload_len, 2635 pkt_type); 2636 if (rc) { 2637 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2638 "Can't send FPDU:reset rc=%d\n", rc); 2639 OSAL_MEM_ZERO(fpdu, sizeof(*fpdu)); 2640 break; 2641 } 2642 mpa_buf->tcp_payload_len -= fpdu->fpdu_length; 2643 curr_pkt->first_mpa_offset += fpdu->fpdu_length; 2644 break; 2645 case ECORE_IWARP_MPA_PKT_UNALIGNED: 2646 ecore_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); 2647 if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) { 2648 /* special handling of fpdu split over more 2649 * than 2 segments 2650 */ 2651 if (ECORE_IWARP_IS_RIGHT_EDGE(curr_pkt)) { 2652 rc = ecore_iwarp_win_right_edge(p_hwfn, 2653 fpdu); 2654 /* packet will be re-processed later */ 2655 if (rc) 2656 return rc; 2657 } 2658 2659 rc = ecore_iwarp_copy_fpdu( 2660 p_hwfn, fpdu, curr_pkt, 2661 buf, mpa_buf->tcp_payload_len); 2662 2663 /* packet will be re-processed later */ 2664 if (rc) 2665 return rc; 2666 2667 mpa_buf->tcp_payload_len = 0; 2668 2669 break; 2670 } 2671 2672 rc = ecore_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, 2673 mpa_buf->tcp_payload_len, 2674 pkt_type); 2675 if (rc) { 2676 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2677 "Can't send FPDU:delay rc=%d\n", rc); 2678 /* don't reset fpdu -> we need it for next 2679 * classify 2680 */ 2681 break; 2682 } 2683 mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; 2684 curr_pkt->first_mpa_offset += fpdu->incomplete_bytes; 2685 /* The framed PDU was sent - no more incomplete bytes */ 2686 fpdu->incomplete_bytes = 0; 2687 break; 2688 } 2689 2690 } while (mpa_buf->tcp_payload_len && !rc); 2691 2692 return rc; 2693 } 2694 2695 static void 2696 ecore_iwarp_process_pending_pkts(struct ecore_hwfn *p_hwfn) 2697 { 2698 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2699 struct ecore_iwarp_ll2_mpa_buf *mpa_buf = OSAL_NULL; 2700 enum _ecore_status_t rc; 2701 2702 while (!OSAL_LIST_IS_EMPTY(&iwarp_info->mpa_buf_pending_list)) { 2703 mpa_buf = OSAL_LIST_FIRST_ENTRY( 2704 &iwarp_info->mpa_buf_pending_list, 2705 struct ecore_iwarp_ll2_mpa_buf, 2706 list_entry); 2707 2708 rc = ecore_iwarp_process_mpa_pkt(p_hwfn, mpa_buf); 2709 2710 /* busy means break and continue processing later, don't 2711 * remove the buf from the pending list. 2712 */ 2713 if (rc == ECORE_BUSY) 2714 break; 2715 2716 #ifdef _NTDDK_ 2717 #pragma warning(suppress : 6011) 2718 #pragma warning(suppress : 28182) 2719 #endif 2720 OSAL_LIST_REMOVE_ENTRY( 2721 &mpa_buf->list_entry, 2722 &iwarp_info->mpa_buf_pending_list); 2723 2724 OSAL_LIST_PUSH_TAIL(&mpa_buf->list_entry, 2725 &iwarp_info->mpa_buf_list); 2726 2727 if (rc) { /* different error, don't continue */ 2728 DP_NOTICE(p_hwfn, false, "process pkts failed rc=%d\n", 2729 rc); 2730 break; 2731 } 2732 } 2733 } 2734 2735 static void 2736 ecore_iwarp_ll2_comp_mpa_pkt(void *cxt, 2737 struct ecore_ll2_comp_rx_data *data) 2738 { 2739 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 2740 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 2741 struct ecore_iwarp_ll2_mpa_buf *mpa_buf; 2742 2743 iwarp_info->unalign_rx_comp++; 2744 2745 mpa_buf = OSAL_LIST_FIRST_ENTRY(&iwarp_info->mpa_buf_list, 2746 struct ecore_iwarp_ll2_mpa_buf, 2747 list_entry); 2748 2749 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2750 "LL2 MPA CompRx buf=%p placement_offset=%d, payload_len=0x%x mpa_buf=%p\n", 2751 data->cookie, data->u.placement_offset, 2752 data->length.packet_length, mpa_buf); 2753 2754 if (!mpa_buf) { 2755 DP_ERR(p_hwfn, "no free mpa buf. this is a driver bug.\n"); 2756 return; 2757 } 2758 OSAL_LIST_REMOVE_ENTRY(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list); 2759 2760 ecore_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, 2761 data->opaque_data_0, data->opaque_data_1); 2762 2763 mpa_buf->tcp_payload_len = data->length.packet_length - 2764 mpa_buf->data.first_mpa_offset; 2765 mpa_buf->ll2_buf = (struct ecore_iwarp_ll2_buff *)data->cookie; 2766 mpa_buf->data.first_mpa_offset += data->u.placement_offset; 2767 mpa_buf->placement_offset = data->u.placement_offset; 2768 2769 OSAL_LIST_PUSH_TAIL(&mpa_buf->list_entry, 2770 &iwarp_info->mpa_buf_pending_list); 2771 2772 ecore_iwarp_process_pending_pkts(p_hwfn); 2773 } 2774 2775 static void 2776 ecore_iwarp_ll2_comp_syn_pkt(void *cxt, struct ecore_ll2_comp_rx_data *data) 2777 { 2778 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 2779 struct ecore_iwarp_ll2_buff *buf = 2780 (struct ecore_iwarp_ll2_buff *)data->cookie; 2781 struct ecore_iwarp_listener *listener; 2782 struct ecore_iwarp_cm_info cm_info; 2783 struct ecore_ll2_tx_pkt_info tx_pkt; 2784 u8 remote_mac_addr[ETH_ALEN]; 2785 u8 local_mac_addr[ETH_ALEN]; 2786 struct ecore_iwarp_ep *ep; 2787 enum _ecore_status_t rc; 2788 int tcp_start_offset; 2789 u8 ts_hdr_size = 0; 2790 int payload_len; 2791 u32 hdr_size; 2792 2793 OSAL_MEM_ZERO(&cm_info, sizeof(cm_info)); 2794 2795 /* Check if packet was received with errors... */ 2796 if (data->err_flags != 0) { 2797 DP_NOTICE(p_hwfn, false, "Error received on SYN packet: 0x%x\n", 2798 data->err_flags); 2799 goto err; 2800 } 2801 2802 if (GET_FIELD(data->parse_flags, 2803 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && 2804 GET_FIELD(data->parse_flags, 2805 PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { 2806 DP_NOTICE(p_hwfn, false, "Syn packet received with checksum error\n"); 2807 goto err; 2808 } 2809 2810 rc = ecore_iwarp_parse_rx_pkt( 2811 p_hwfn, &cm_info, (u8 *)(buf->data) + data->u.placement_offset, 2812 remote_mac_addr, local_mac_addr, &payload_len, 2813 &tcp_start_offset); 2814 if (rc) 2815 goto err; 2816 2817 /* Check if there is a listener for this 4-tuple */ 2818 listener = ecore_iwarp_get_listener(p_hwfn, &cm_info); 2819 if (!listener) { 2820 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2821 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n", 2822 data->parse_flags, data->length.packet_length); 2823 2824 OSAL_MEMSET(&tx_pkt, 0, sizeof(tx_pkt)); 2825 tx_pkt.num_of_bds = 1; 2826 tx_pkt.bd_flags = 0; 2827 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; 2828 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB; 2829 tx_pkt.first_frag = buf->data_phys_addr + 2830 data->u.placement_offset; 2831 tx_pkt.first_frag_len = data->length.packet_length; 2832 tx_pkt.cookie = buf; 2833 2834 rc = ecore_ll2_prepare_tx_packet( 2835 p_hwfn, 2836 p_hwfn->p_rdma_info->iwarp.ll2_syn_handle, 2837 &tx_pkt, true); 2838 2839 if (rc) { 2840 DP_NOTICE(p_hwfn, false, 2841 "Can't post SYN back to chip rc=%d\n", rc); 2842 goto err; 2843 } 2844 return; 2845 } 2846 2847 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Received syn on listening port\n"); 2848 2849 /* For debugging purpose... */ 2850 if (listener->drop) 2851 goto err; 2852 2853 /* There may be an open ep on this connection if this is a syn 2854 * retrasnmit... need to make sure there isn't... 2855 */ 2856 if (ecore_iwarp_ep_exists(p_hwfn, listener, &cm_info)) 2857 goto err; 2858 2859 ep = ecore_iwarp_get_free_ep(p_hwfn); 2860 if (ep == OSAL_NULL) 2861 goto err; 2862 2863 OSAL_SPIN_LOCK(&listener->lock); 2864 OSAL_LIST_PUSH_TAIL(&ep->list_entry, &listener->ep_list); 2865 OSAL_SPIN_UNLOCK(&listener->lock); 2866 2867 OSAL_MEMCPY(ep->remote_mac_addr, 2868 remote_mac_addr, 2869 ETH_ALEN); 2870 OSAL_MEMCPY(ep->local_mac_addr, 2871 local_mac_addr, 2872 ETH_ALEN); 2873 2874 OSAL_MEMCPY(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); 2875 2876 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & ECORE_IWARP_TS_EN) 2877 ts_hdr_size = TIMESTAMP_HEADER_SIZE; 2878 2879 hdr_size = ((cm_info.ip_version == ECORE_TCP_IPV4) ? 40 : 60) + 2880 ts_hdr_size; 2881 ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; 2882 ep->mss = OSAL_MIN_T(u16, ECORE_IWARP_MAX_FW_MSS, ep->mss); 2883 2884 ep->listener = listener; 2885 ep->event_cb = listener->event_cb; 2886 ep->cb_context = listener->cb_context; 2887 ep->connect_mode = TCP_CONNECT_PASSIVE; 2888 2889 ep->syn = buf; 2890 ep->syn_ip_payload_length = (u16)payload_len; 2891 ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + 2892 tcp_start_offset; 2893 2894 rc = ecore_iwarp_tcp_offload(p_hwfn, ep); 2895 if (rc != ECORE_SUCCESS) { 2896 ecore_iwarp_return_ep(p_hwfn, ep); 2897 goto err; 2898 } 2899 return; 2900 2901 err: 2902 ecore_iwarp_ll2_post_rx( 2903 p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_syn_handle); 2904 } 2905 2906 static void 2907 ecore_iwarp_ll2_rel_rx_pkt(void *cxt, 2908 u8 OSAL_UNUSED connection_handle, 2909 void *cookie, 2910 dma_addr_t OSAL_UNUSED rx_buf_addr, 2911 bool OSAL_UNUSED b_last_packet) 2912 { 2913 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 2914 struct ecore_iwarp_ll2_buff *buffer = 2915 (struct ecore_iwarp_ll2_buff *)cookie; 2916 2917 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 2918 buffer->data, 2919 buffer->data_phys_addr, 2920 buffer->buff_size); 2921 2922 OSAL_FREE(p_hwfn->p_dev, buffer); 2923 } 2924 2925 static void 2926 ecore_iwarp_ll2_comp_tx_pkt(void *cxt, 2927 u8 connection_handle, 2928 void *cookie, 2929 dma_addr_t OSAL_UNUSED first_frag_addr, 2930 bool OSAL_UNUSED b_last_fragment, 2931 bool OSAL_UNUSED b_last_packet) 2932 { 2933 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 2934 struct ecore_iwarp_ll2_buff *buffer = 2935 (struct ecore_iwarp_ll2_buff *)cookie; 2936 struct ecore_iwarp_ll2_buff *piggy; 2937 2938 if (!buffer) /* can happen in packed mpa unaligned... */ 2939 return; 2940 2941 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 2942 "LL2 CompTX buf=%p piggy_buf=%p handle=%d\n", 2943 buffer, buffer->piggy_buf, connection_handle); 2944 2945 /* we got a tx packet -> this was originally a rx packet... now we 2946 * can post it back... 2947 */ 2948 piggy = buffer->piggy_buf; 2949 if (piggy) { 2950 buffer->piggy_buf = OSAL_NULL; 2951 ecore_iwarp_ll2_post_rx(p_hwfn, piggy, 2952 connection_handle); 2953 } 2954 2955 ecore_iwarp_ll2_post_rx(p_hwfn, buffer, 2956 connection_handle); 2957 2958 if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) 2959 ecore_iwarp_process_pending_pkts(p_hwfn); 2960 2961 return; 2962 } 2963 2964 static void 2965 ecore_iwarp_ll2_rel_tx_pkt(void *cxt, 2966 u8 OSAL_UNUSED connection_handle, 2967 void *cookie, 2968 dma_addr_t OSAL_UNUSED first_frag_addr, 2969 bool OSAL_UNUSED b_last_fragment, 2970 bool OSAL_UNUSED b_last_packet) 2971 { 2972 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 2973 struct ecore_iwarp_ll2_buff *buffer = 2974 (struct ecore_iwarp_ll2_buff *)cookie; 2975 2976 if (!buffer) 2977 return; 2978 2979 if (buffer->piggy_buf) { 2980 OSAL_DMA_FREE_COHERENT( 2981 p_hwfn->p_dev, 2982 buffer->piggy_buf->data, 2983 buffer->piggy_buf->data_phys_addr, 2984 buffer->piggy_buf->buff_size); 2985 2986 OSAL_FREE(p_hwfn->p_dev, buffer->piggy_buf); 2987 } 2988 2989 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 2990 buffer->data, 2991 buffer->data_phys_addr, 2992 buffer->buff_size); 2993 2994 OSAL_FREE(p_hwfn->p_dev, buffer); 2995 return; 2996 } 2997 2998 /* Current known slowpath for iwarp ll2 is unalign flush. When this completion 2999 * is received, need to reset the FPDU. 3000 */ 3001 static void 3002 ecore_iwarp_ll2_slowpath(void *cxt, 3003 u8 OSAL_UNUSED connection_handle, 3004 u32 opaque_data_0, 3005 u32 opaque_data_1) 3006 { 3007 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 3008 struct unaligned_opaque_data unalign_data; 3009 struct ecore_iwarp_fpdu *fpdu; 3010 3011 ecore_iwarp_mpa_get_data(p_hwfn, &unalign_data, 3012 opaque_data_0, opaque_data_1); 3013 3014 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "(0x%x) Flush fpdu\n", 3015 unalign_data.cid); 3016 3017 fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid); 3018 if (fpdu) 3019 OSAL_MEM_ZERO(fpdu, sizeof(*fpdu)); 3020 } 3021 3022 static int 3023 ecore_iwarp_ll2_stop(struct ecore_hwfn *p_hwfn) 3024 { 3025 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; 3026 int rc = 0; 3027 3028 if (iwarp_info->ll2_syn_handle != ECORE_IWARP_HANDLE_INVAL) { 3029 3030 rc = ecore_ll2_terminate_connection(p_hwfn, 3031 iwarp_info->ll2_syn_handle); 3032 if (rc) 3033 DP_INFO(p_hwfn, "Failed to terminate syn connection\n"); 3034 3035 ecore_ll2_release_connection(p_hwfn, 3036 iwarp_info->ll2_syn_handle); 3037 iwarp_info->ll2_syn_handle = ECORE_IWARP_HANDLE_INVAL; 3038 } 3039 3040 if (iwarp_info->ll2_ooo_handle != ECORE_IWARP_HANDLE_INVAL) { 3041 rc = ecore_ll2_terminate_connection(p_hwfn, 3042 iwarp_info->ll2_ooo_handle); 3043 if (rc) 3044 DP_INFO(p_hwfn, "Failed to terminate ooo connection\n"); 3045 3046 ecore_ll2_release_connection(p_hwfn, 3047 iwarp_info->ll2_ooo_handle); 3048 iwarp_info->ll2_ooo_handle = ECORE_IWARP_HANDLE_INVAL; 3049 } 3050 3051 if (iwarp_info->ll2_mpa_handle != ECORE_IWARP_HANDLE_INVAL) { 3052 rc = ecore_ll2_terminate_connection(p_hwfn, 3053 iwarp_info->ll2_mpa_handle); 3054 if (rc) 3055 DP_INFO(p_hwfn, "Failed to terminate mpa connection\n"); 3056 3057 ecore_ll2_release_connection(p_hwfn, 3058 iwarp_info->ll2_mpa_handle); 3059 iwarp_info->ll2_mpa_handle = ECORE_IWARP_HANDLE_INVAL; 3060 } 3061 3062 ecore_llh_remove_mac_filter(p_hwfn->p_dev, 0, 3063 p_hwfn->p_rdma_info->iwarp.mac_addr); 3064 3065 return rc; 3066 } 3067 3068 static int 3069 ecore_iwarp_ll2_alloc_buffers(struct ecore_hwfn *p_hwfn, 3070 int num_rx_bufs, 3071 int buff_size, 3072 u8 ll2_handle) 3073 { 3074 struct ecore_iwarp_ll2_buff *buffer; 3075 int rc = 0; 3076 int i; 3077 3078 for (i = 0; i < num_rx_bufs; i++) { 3079 buffer = OSAL_ZALLOC(p_hwfn->p_dev, 3080 GFP_KERNEL, sizeof(*buffer)); 3081 if (!buffer) { 3082 DP_INFO(p_hwfn, "Failed to allocate LL2 buffer desc\n"); 3083 break; 3084 } 3085 3086 buffer->data = 3087 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 3088 &buffer->data_phys_addr, 3089 buff_size); 3090 3091 if (!buffer->data) { 3092 DP_INFO(p_hwfn, "Failed to allocate LL2 buffers\n"); 3093 OSAL_FREE(p_hwfn->p_dev, buffer); 3094 rc = ECORE_NOMEM; 3095 break; 3096 } 3097 3098 buffer->buff_size = buff_size; 3099 rc = ecore_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle); 3100 3101 if (rc) 3102 break; /* buffers will be deallocated by ecore_ll2 */ 3103 } 3104 return rc; 3105 } 3106 3107 #define ECORE_IWARP_CACHE_PADDING(size) \ 3108 (((size) + ETH_CACHE_LINE_SIZE - 1) & ~(ETH_CACHE_LINE_SIZE - 1)) 3109 3110 #define ECORE_IWARP_MAX_BUF_SIZE(mtu) \ 3111 ECORE_IWARP_CACHE_PADDING(mtu + ETH_HLEN + 2*VLAN_HLEN + 2 +\ 3112 ETH_CACHE_LINE_SIZE) 3113 3114 static int 3115 ecore_iwarp_ll2_start(struct ecore_hwfn *p_hwfn, 3116 struct ecore_rdma_start_in_params *params) 3117 { 3118 struct ecore_iwarp_info *iwarp_info; 3119 struct ecore_ll2_acquire_data data; 3120 struct ecore_ll2_cbs cbs; 3121 u32 mpa_buff_size; 3122 int rc = ECORE_SUCCESS; 3123 u16 n_ooo_bufs; 3124 int i; 3125 3126 iwarp_info = &p_hwfn->p_rdma_info->iwarp; 3127 iwarp_info->ll2_syn_handle = ECORE_IWARP_HANDLE_INVAL; 3128 iwarp_info->ll2_ooo_handle = ECORE_IWARP_HANDLE_INVAL; 3129 iwarp_info->ll2_mpa_handle = ECORE_IWARP_HANDLE_INVAL; 3130 3131 iwarp_info->max_mtu = params->max_mtu; 3132 3133 OSAL_MEMCPY(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr, 3134 ETH_ALEN); 3135 3136 rc = ecore_llh_add_mac_filter(p_hwfn->p_dev, 0, params->mac_addr); 3137 if (rc != ECORE_SUCCESS) 3138 return rc; 3139 3140 /* Start SYN connection */ 3141 cbs.rx_comp_cb = ecore_iwarp_ll2_comp_syn_pkt; 3142 cbs.rx_release_cb = ecore_iwarp_ll2_rel_rx_pkt; 3143 cbs.tx_comp_cb = ecore_iwarp_ll2_comp_tx_pkt; 3144 cbs.tx_release_cb = ecore_iwarp_ll2_rel_tx_pkt; 3145 cbs.cookie = p_hwfn; 3146 3147 OSAL_MEMSET(&data, 0, sizeof(data)); 3148 data.input.conn_type = ECORE_LL2_TYPE_IWARP; 3149 data.input.mtu = ECORE_IWARP_MAX_SYN_PKT_SIZE; 3150 data.input.rx_num_desc = ECORE_IWARP_LL2_SYN_RX_SIZE; 3151 data.input.tx_num_desc = ECORE_IWARP_LL2_SYN_TX_SIZE; 3152 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 3153 data.input.tx_tc = PKT_LB_TC; 3154 data.input.tx_dest = ECORE_LL2_TX_DEST_LB; 3155 data.p_connection_handle = &iwarp_info->ll2_syn_handle; 3156 data.cbs = &cbs; 3157 3158 rc = ecore_ll2_acquire_connection(p_hwfn, &data); 3159 if (rc) { 3160 DP_NOTICE(p_hwfn, false, "Failed to acquire LL2 connection\n"); 3161 ecore_llh_remove_mac_filter(p_hwfn->p_dev, 0, params->mac_addr); 3162 return rc; 3163 } 3164 3165 rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle); 3166 if (rc) { 3167 DP_NOTICE(p_hwfn, false, 3168 "Failed to establish LL2 connection\n"); 3169 goto err; 3170 } 3171 3172 rc = ecore_iwarp_ll2_alloc_buffers(p_hwfn, 3173 ECORE_IWARP_LL2_SYN_RX_SIZE, 3174 ECORE_IWARP_MAX_SYN_PKT_SIZE, 3175 iwarp_info->ll2_syn_handle); 3176 if (rc) 3177 goto err; 3178 3179 /* Start OOO connection */ 3180 data.input.conn_type = ECORE_LL2_TYPE_OOO; 3181 data.input.mtu = params->max_mtu; 3182 3183 n_ooo_bufs = params->iwarp.ooo_num_rx_bufs; 3184 3185 if (n_ooo_bufs > ECORE_IWARP_LL2_OOO_MAX_RX_SIZE) 3186 n_ooo_bufs = ECORE_IWARP_LL2_OOO_MAX_RX_SIZE; 3187 3188 data.input.rx_num_desc = n_ooo_bufs; 3189 data.input.rx_num_ooo_buffers = n_ooo_bufs; 3190 3191 p_hwfn->p_rdma_info->iwarp.num_ooo_rx_bufs = data.input.rx_num_desc; 3192 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ 3193 data.input.tx_num_desc = ECORE_IWARP_LL2_OOO_DEF_TX_SIZE; 3194 data.p_connection_handle = &iwarp_info->ll2_ooo_handle; 3195 data.input.secondary_queue = true; 3196 3197 rc = ecore_ll2_acquire_connection(p_hwfn, &data); 3198 if (rc) 3199 goto err; 3200 3201 rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle); 3202 if (rc) 3203 goto err; 3204 3205 /* Start MPA connection */ 3206 cbs.rx_comp_cb = ecore_iwarp_ll2_comp_mpa_pkt; 3207 cbs.slowpath_cb = ecore_iwarp_ll2_slowpath; 3208 3209 OSAL_MEMSET(&data, 0, sizeof(data)); 3210 data.input.conn_type = ECORE_LL2_TYPE_IWARP; 3211 data.input.mtu = params->max_mtu; 3212 data.input.rx_num_desc = n_ooo_bufs * 2; 3213 /* we allocate the same amount for TX to reduce the chance we 3214 * run out of tx descriptors 3215 */ 3216 data.input.tx_num_desc = data.input.rx_num_desc; 3217 data.input.tx_max_bds_per_packet = ECORE_IWARP_MAX_BDS_PER_FPDU; 3218 data.p_connection_handle = &iwarp_info->ll2_mpa_handle; 3219 data.input.secondary_queue = true; 3220 data.cbs = &cbs; 3221 3222 rc = ecore_ll2_acquire_connection(p_hwfn, &data); 3223 if (rc) 3224 goto err; 3225 3226 rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle); 3227 if (rc) 3228 goto err; 3229 3230 mpa_buff_size = ECORE_IWARP_MAX_BUF_SIZE(params->max_mtu); 3231 rc = ecore_iwarp_ll2_alloc_buffers(p_hwfn, 3232 data.input.rx_num_desc, 3233 mpa_buff_size, 3234 iwarp_info->ll2_mpa_handle); 3235 if (rc) 3236 goto err; 3237 3238 iwarp_info->partial_fpdus = 3239 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 3240 sizeof(*iwarp_info->partial_fpdus) * 3241 (u16)p_hwfn->p_rdma_info->num_qps); 3242 3243 if (!iwarp_info->partial_fpdus) { 3244 DP_NOTICE(p_hwfn, false, 3245 "Failed to allocate ecore_iwarp_info(partial_fpdus)\n"); 3246 goto err; 3247 } 3248 3249 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; 3250 3251 /* The mpa_bufs array serves for pending RX packets received on the 3252 * mpa ll2 that don't have place on the tx ring and require later 3253 * processing. We can't fail on allocation of such a struct therefore 3254 * we allocate enough to take care of all rx packets 3255 */ 3256 iwarp_info->mpa_bufs = 3257 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 3258 sizeof(*iwarp_info->mpa_bufs) * 3259 data.input.rx_num_desc); 3260 3261 if (!iwarp_info->mpa_bufs) { 3262 DP_NOTICE(p_hwfn, false, 3263 "Failed to allocate mpa_bufs array mem_size=%d\n", 3264 (u32)(sizeof(*iwarp_info->mpa_bufs) * 3265 data.input.rx_num_desc)); 3266 goto err; 3267 } 3268 3269 iwarp_info->mpa_intermediate_buf = 3270 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, mpa_buff_size); 3271 if (!iwarp_info->mpa_intermediate_buf) { 3272 DP_NOTICE(p_hwfn, false, 3273 "Failed to allocate mpa_intermediate_buf mem_size=%d\n", 3274 mpa_buff_size); 3275 goto err; 3276 } 3277 3278 OSAL_LIST_INIT(&iwarp_info->mpa_buf_pending_list); 3279 OSAL_LIST_INIT(&iwarp_info->mpa_buf_list); 3280 for (i = 0; i < data.input.rx_num_desc; i++) { 3281 OSAL_LIST_PUSH_TAIL(&iwarp_info->mpa_bufs[i].list_entry, 3282 &iwarp_info->mpa_buf_list); 3283 } 3284 3285 return rc; 3286 3287 err: 3288 ecore_iwarp_ll2_stop(p_hwfn); 3289 3290 return rc; 3291 } 3292 3293 static void 3294 ecore_iwarp_set_defaults(struct ecore_hwfn *p_hwfn, 3295 struct ecore_rdma_start_in_params *params) 3296 { 3297 u32 rcv_wnd_size; 3298 u32 n_ooo_bufs; 3299 3300 /* rcv_wnd_size = 0: use defaults */ 3301 rcv_wnd_size = params->iwarp.rcv_wnd_size; 3302 if (!rcv_wnd_size) { 3303 if (ecore_device_num_ports(p_hwfn->p_dev) == 4) { 3304 rcv_wnd_size = ECORE_IS_AH(p_hwfn->p_dev) ? 3305 ECORE_IWARP_RCV_WND_SIZE_AH_DEF_4_PORTS : 3306 ECORE_IWARP_RCV_WND_SIZE_BB_DEF_4_PORTS; 3307 } else { 3308 rcv_wnd_size = ECORE_IS_AH(p_hwfn->p_dev) ? 3309 ECORE_IWARP_RCV_WND_SIZE_AH_DEF_2_PORTS : 3310 ECORE_IWARP_RCV_WND_SIZE_BB_DEF_2_PORTS; 3311 } 3312 params->iwarp.rcv_wnd_size = rcv_wnd_size; 3313 } 3314 3315 n_ooo_bufs = params->iwarp.ooo_num_rx_bufs; 3316 if (!n_ooo_bufs) { 3317 n_ooo_bufs = (u32)(((u64)ECORE_MAX_OOO * 3318 params->iwarp.rcv_wnd_size) / 3319 params->max_mtu); 3320 n_ooo_bufs = OSAL_MIN_T(u32, n_ooo_bufs, USHRT_MAX); 3321 params->iwarp.ooo_num_rx_bufs = (u16)n_ooo_bufs; 3322 } 3323 } 3324 3325 enum _ecore_status_t 3326 ecore_iwarp_setup(struct ecore_hwfn *p_hwfn, 3327 struct ecore_rdma_start_in_params *params) 3328 { 3329 enum _ecore_status_t rc = ECORE_SUCCESS; 3330 struct ecore_iwarp_info *iwarp_info; 3331 u32 rcv_wnd_size; 3332 3333 iwarp_info = &(p_hwfn->p_rdma_info->iwarp); 3334 3335 if (!params->iwarp.rcv_wnd_size || !params->iwarp.ooo_num_rx_bufs) 3336 ecore_iwarp_set_defaults(p_hwfn, params); 3337 3338 /* Scale 0 will set window of 0xFFFC (64K -4). 3339 * Scale x will set window of 0xFFFC << (x) 3340 * Therefore we subtract log2(64K) so that result is 0 3341 */ 3342 rcv_wnd_size = params->iwarp.rcv_wnd_size; 3343 if (rcv_wnd_size < ECORE_IWARP_RCV_WND_SIZE_MIN) 3344 rcv_wnd_size = ECORE_IWARP_RCV_WND_SIZE_MIN; 3345 3346 iwarp_info->rcv_wnd_scale = OSAL_MIN_T(u32, OSAL_LOG2(rcv_wnd_size) - 3347 OSAL_LOG2(ECORE_IWARP_RCV_WND_SIZE_MIN), ECORE_IWARP_MAX_WND_SCALE); 3348 iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; 3349 3350 iwarp_info->tcp_flags = params->iwarp.flags; 3351 iwarp_info->crc_needed = params->iwarp.crc_needed; 3352 switch (params->iwarp.mpa_rev) { 3353 case ECORE_MPA_REV1: 3354 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; 3355 break; 3356 case ECORE_MPA_REV2: 3357 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; 3358 break; 3359 } 3360 3361 iwarp_info->peer2peer = params->iwarp.mpa_peer2peer; 3362 iwarp_info->rtr_type = MPA_RTR_TYPE_NONE; 3363 3364 if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_SEND) 3365 iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; 3366 3367 if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_WRITE) 3368 iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; 3369 3370 if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_READ) 3371 iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_READ; 3372 3373 //DAVIDS OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.qp_lock); 3374 OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.ep_list); 3375 OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.listen_list); 3376 3377 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, 3378 ecore_iwarp_async_event); 3379 ecore_ooo_setup(p_hwfn); 3380 3381 rc = ecore_iwarp_ll2_start(p_hwfn, params); 3382 3383 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3384 "MPA_REV = %d. peer2peer=%d rtr=%x\n", 3385 iwarp_info->mpa_rev, 3386 iwarp_info->peer2peer, 3387 iwarp_info->rtr_type); 3388 3389 return rc; 3390 } 3391 3392 enum _ecore_status_t 3393 ecore_iwarp_stop(struct ecore_hwfn *p_hwfn) 3394 { 3395 enum _ecore_status_t rc; 3396 3397 ecore_iwarp_free_prealloc_ep(p_hwfn); 3398 rc = ecore_iwarp_wait_for_all_cids(p_hwfn); 3399 if (rc != ECORE_SUCCESS) 3400 return rc; 3401 3402 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); 3403 3404 return ecore_iwarp_ll2_stop(p_hwfn); 3405 } 3406 3407 static void 3408 ecore_iwarp_qp_in_error(struct ecore_hwfn *p_hwfn, 3409 struct ecore_iwarp_ep *ep, 3410 u8 fw_return_code) 3411 { 3412 struct ecore_iwarp_cm_event_params params; 3413 3414 ecore_iwarp_modify_qp(p_hwfn, ep->qp, ECORE_IWARP_QP_STATE_ERROR, true); 3415 3416 params.event = ECORE_IWARP_EVENT_CLOSE; 3417 params.ep_context = ep; 3418 params.cm_info = &ep->cm_info; 3419 params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? 3420 ECORE_SUCCESS : ECORE_CONN_RESET; 3421 3422 ep->state = ECORE_IWARP_EP_CLOSED; 3423 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3424 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry, 3425 &p_hwfn->p_rdma_info->iwarp.ep_list); 3426 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3427 3428 ep->event_cb(ep->cb_context, ¶ms); 3429 } 3430 3431 static void 3432 ecore_iwarp_exception_received(struct ecore_hwfn *p_hwfn, 3433 struct ecore_iwarp_ep *ep, 3434 int fw_ret_code) 3435 { 3436 struct ecore_iwarp_cm_event_params params; 3437 bool event_cb = false; 3438 3439 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n", 3440 ep->cid, fw_ret_code); 3441 3442 switch (fw_ret_code) { 3443 case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: 3444 params.status = ECORE_SUCCESS; 3445 params.event = ECORE_IWARP_EVENT_DISCONNECT; 3446 event_cb = true; 3447 break; 3448 case IWARP_EXCEPTION_DETECTED_LLP_RESET: 3449 params.status = ECORE_CONN_RESET; 3450 params.event = ECORE_IWARP_EVENT_DISCONNECT; 3451 event_cb = true; 3452 break; 3453 case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: 3454 params.event = ECORE_IWARP_EVENT_RQ_EMPTY; 3455 event_cb = true; 3456 break; 3457 case IWARP_EXCEPTION_DETECTED_IRQ_FULL: 3458 params.event = ECORE_IWARP_EVENT_IRQ_FULL; 3459 event_cb = true; 3460 break; 3461 case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: 3462 params.event = ECORE_IWARP_EVENT_LLP_TIMEOUT; 3463 event_cb = true; 3464 break; 3465 case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: 3466 params.event = ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR; 3467 event_cb = true; 3468 break; 3469 case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: 3470 params.event = ECORE_IWARP_EVENT_CQ_OVERFLOW; 3471 event_cb = true; 3472 break; 3473 case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: 3474 params.event = ECORE_IWARP_EVENT_QP_CATASTROPHIC; 3475 event_cb = true; 3476 break; 3477 case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: 3478 params.event = ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR; 3479 event_cb = true; 3480 break; 3481 case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: 3482 params.event = ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR; 3483 event_cb = true; 3484 break; 3485 case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: 3486 params.event = ECORE_IWARP_EVENT_TERMINATE_RECEIVED; 3487 event_cb = true; 3488 break; 3489 default: 3490 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3491 "Unhandled exception received...\n"); 3492 break; 3493 } 3494 3495 if (event_cb) { 3496 params.ep_context = ep; 3497 params.cm_info = &ep->cm_info; 3498 ep->event_cb(ep->cb_context, ¶ms); 3499 } 3500 } 3501 3502 static void 3503 ecore_iwarp_tcp_connect_unsuccessful(struct ecore_hwfn *p_hwfn, 3504 struct ecore_iwarp_ep *ep, 3505 u8 fw_return_code) 3506 { 3507 struct ecore_iwarp_cm_event_params params; 3508 3509 OSAL_MEM_ZERO(¶ms, sizeof(params)); 3510 params.event = ECORE_IWARP_EVENT_ACTIVE_COMPLETE; 3511 params.ep_context = ep; 3512 params.cm_info = &ep->cm_info; 3513 ep->state = ECORE_IWARP_EP_CLOSED; 3514 3515 switch (fw_return_code) { 3516 case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: 3517 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3518 "%s(0x%x) TCP connect got invalid packet\n", 3519 ECORE_IWARP_CONNECT_MODE_STRING(ep), 3520 ep->tcp_cid); 3521 params.status = ECORE_CONN_RESET; 3522 break; 3523 case IWARP_CONN_ERROR_TCP_CONNECTION_RST: 3524 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3525 "%s(0x%x) TCP Connection Reset\n", 3526 ECORE_IWARP_CONNECT_MODE_STRING(ep), 3527 ep->tcp_cid); 3528 params.status = ECORE_CONN_RESET; 3529 break; 3530 case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: 3531 DP_NOTICE(p_hwfn, false, "%s(0x%x) TCP timeout\n", 3532 ECORE_IWARP_CONNECT_MODE_STRING(ep), 3533 ep->tcp_cid); 3534 params.status = ECORE_TIMEOUT; 3535 break; 3536 case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: 3537 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA not supported VER\n", 3538 ECORE_IWARP_CONNECT_MODE_STRING(ep), 3539 ep->tcp_cid); 3540 params.status = ECORE_CONN_REFUSED; 3541 break; 3542 case IWARP_CONN_ERROR_MPA_INVALID_PACKET: 3543 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Invalid Packet\n", 3544 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); 3545 params.status = ECORE_CONN_RESET; 3546 break; 3547 default: 3548 DP_ERR(p_hwfn, "%s(0x%x) Unexpected return code tcp connect: %d\n", 3549 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid, 3550 fw_return_code); 3551 params.status = ECORE_CONN_RESET; 3552 break; 3553 } 3554 3555 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 3556 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID; 3557 ecore_iwarp_return_ep(p_hwfn, ep); 3558 } else { 3559 ep->event_cb(ep->cb_context, ¶ms); 3560 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3561 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry, 3562 &p_hwfn->p_rdma_info->iwarp.ep_list); 3563 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3564 } 3565 } 3566 3567 static void 3568 ecore_iwarp_connect_complete(struct ecore_hwfn *p_hwfn, 3569 struct ecore_iwarp_ep *ep, 3570 u8 fw_return_code) 3571 { 3572 if (ep->connect_mode == TCP_CONNECT_PASSIVE) { 3573 /* Done with the SYN packet, post back to ll2 rx */ 3574 ecore_iwarp_ll2_post_rx( 3575 p_hwfn, ep->syn, 3576 p_hwfn->p_rdma_info->iwarp.ll2_syn_handle); 3577 3578 ep->syn = OSAL_NULL; 3579 3580 if (ep->state == ECORE_IWARP_EP_ABORTING) 3581 return; 3582 3583 /* If connect failed - upper layer doesn't know about it */ 3584 if (fw_return_code == RDMA_RETURN_OK) 3585 ecore_iwarp_mpa_received(p_hwfn, ep); 3586 else 3587 ecore_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, 3588 fw_return_code); 3589 3590 } else { 3591 if (fw_return_code == RDMA_RETURN_OK) 3592 ecore_iwarp_mpa_offload(p_hwfn, ep); 3593 else 3594 ecore_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, 3595 fw_return_code); 3596 } 3597 } 3598 3599 static OSAL_INLINE bool 3600 ecore_iwarp_check_ep_ok(struct ecore_hwfn *p_hwfn, 3601 struct ecore_iwarp_ep *ep) 3602 { 3603 if (ep == OSAL_NULL) { 3604 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); 3605 return false; 3606 } 3607 3608 if (ep->sig != 0xdeadbeef) { 3609 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); 3610 return false; 3611 } 3612 3613 return true; 3614 } 3615 3616 static enum _ecore_status_t 3617 ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn, 3618 u8 fw_event_code, 3619 u16 OSAL_UNUSED echo, 3620 union event_ring_data *data, 3621 u8 fw_return_code) 3622 { 3623 struct regpair *fw_handle = &data->rdma_data.async_handle; 3624 struct ecore_iwarp_ep *ep = OSAL_NULL; 3625 u16 cid; 3626 3627 ep = (struct ecore_iwarp_ep *)(osal_uintptr_t)HILO_64(fw_handle->hi, 3628 fw_handle->lo); 3629 3630 switch (fw_event_code) { 3631 /* Async completion after TCP 3-way handshake */ 3632 case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: 3633 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep)) 3634 return ECORE_INVAL; 3635 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3636 "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n", 3637 ep->tcp_cid, fw_return_code); 3638 ecore_iwarp_connect_complete(p_hwfn, ep, fw_return_code); 3639 break; 3640 case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: 3641 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep)) 3642 return ECORE_INVAL; 3643 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3644 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n", 3645 ep->cid, fw_return_code); 3646 ecore_iwarp_exception_received(p_hwfn, ep, fw_return_code); 3647 break; 3648 /* Async completion for Close Connection ramrod */ 3649 case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: 3650 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep)) 3651 return ECORE_INVAL; 3652 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3653 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n", 3654 ep->cid, fw_return_code); 3655 ecore_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); 3656 break; 3657 /* Async event for active side only */ 3658 case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: 3659 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep)) 3660 return ECORE_INVAL; 3661 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3662 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n", 3663 ep->cid, fw_return_code); 3664 ecore_iwarp_mpa_reply_arrived(p_hwfn, ep); 3665 break; 3666 /* MPA Negotiations completed */ 3667 case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: 3668 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep)) 3669 return ECORE_INVAL; 3670 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3671 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n", 3672 ep->cid, fw_return_code); 3673 ecore_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); 3674 break; 3675 case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: 3676 cid = (u16)OSAL_LE32_TO_CPU(fw_handle->lo); 3677 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3678 "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", 3679 cid); 3680 ecore_iwarp_cid_cleaned(p_hwfn, cid); 3681 3682 break; 3683 case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: 3684 DP_NOTICE(p_hwfn, false, 3685 "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); 3686 3687 p_hwfn->p_rdma_info->events.affiliated_event( 3688 p_hwfn->p_rdma_info->events.context, 3689 ECORE_IWARP_EVENT_CQ_OVERFLOW, 3690 (void *)fw_handle); 3691 break; 3692 default: 3693 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n", 3694 fw_event_code); 3695 return ECORE_INVAL; 3696 } 3697 return ECORE_SUCCESS; 3698 } 3699 3700 enum _ecore_status_t 3701 ecore_iwarp_create_listen(void *rdma_cxt, 3702 struct ecore_iwarp_listen_in *iparams, 3703 struct ecore_iwarp_listen_out *oparams) 3704 { 3705 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 3706 struct ecore_iwarp_listener *listener; 3707 3708 listener = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*listener)); 3709 3710 if (!listener) { 3711 DP_NOTICE(p_hwfn, 3712 false, 3713 "ecore iwarp create listener failed: cannot allocate memory (listener). rc = %d\n", 3714 ECORE_NOMEM); 3715 return ECORE_NOMEM; 3716 } 3717 listener->ip_version = iparams->ip_version; 3718 OSAL_MEMCPY(listener->ip_addr, 3719 iparams->ip_addr, 3720 sizeof(listener->ip_addr)); 3721 listener->port = iparams->port; 3722 listener->vlan = iparams->vlan; 3723 3724 listener->event_cb = iparams->event_cb; 3725 listener->cb_context = iparams->cb_context; 3726 listener->max_backlog = iparams->max_backlog; 3727 listener->state = ECORE_IWARP_LISTENER_STATE_ACTIVE; 3728 oparams->handle = listener; 3729 3730 OSAL_SPIN_LOCK_INIT(&listener->lock); 3731 OSAL_LIST_INIT(&listener->ep_list); 3732 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3733 OSAL_LIST_PUSH_TAIL(&listener->list_entry, 3734 &p_hwfn->p_rdma_info->iwarp.listen_list); 3735 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock); 3736 3737 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n", 3738 listener->event_cb, 3739 listener, 3740 listener->ip_addr[0], 3741 listener->ip_addr[1], 3742 listener->ip_addr[2], 3743 listener->ip_addr[3], 3744 listener->port, 3745 listener->vlan); 3746 3747 return ECORE_SUCCESS; 3748 } 3749 3750 static void 3751 ecore_iwarp_pause_complete(struct ecore_iwarp_listener *listener) 3752 { 3753 struct ecore_iwarp_cm_event_params params; 3754 3755 if (listener->state == ECORE_IWARP_LISTENER_STATE_UNPAUSE) 3756 listener->state = ECORE_IWARP_LISTENER_STATE_ACTIVE; 3757 3758 params.event = ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP; 3759 listener->event_cb(listener->cb_context, ¶ms); 3760 } 3761 3762 static void 3763 ecore_iwarp_tcp_abort_comp(struct ecore_hwfn *p_hwfn, void *cookie, 3764 union event_ring_data OSAL_UNUSED *data, 3765 u8 OSAL_UNUSED fw_return_code) 3766 { 3767 struct ecore_iwarp_ep *ep = (struct ecore_iwarp_ep *)cookie; 3768 struct ecore_iwarp_listener *listener = ep->listener; 3769 3770 ecore_iwarp_return_ep(p_hwfn, ep); 3771 3772 if (OSAL_LIST_IS_EMPTY(&listener->ep_list)) 3773 listener->done = true; 3774 } 3775 3776 static void 3777 ecore_iwarp_abort_inflight_connections(struct ecore_hwfn *p_hwfn, 3778 struct ecore_iwarp_listener *listener) 3779 { 3780 struct ecore_spq_entry *p_ent = OSAL_NULL; 3781 struct ecore_iwarp_ep *ep = OSAL_NULL; 3782 struct ecore_sp_init_data init_data; 3783 struct ecore_spq_comp_cb comp_data; 3784 enum _ecore_status_t rc; 3785 3786 /* remove listener from list before destroying listener */ 3787 OSAL_LIST_REMOVE_ENTRY(&listener->list_entry, 3788 &p_hwfn->p_rdma_info->iwarp.listen_list); 3789 if (OSAL_LIST_IS_EMPTY(&listener->ep_list)) { 3790 listener->done = true; 3791 return; 3792 } 3793 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 3794 init_data.p_comp_data = &comp_data; 3795 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 3796 init_data.comp_mode = ECORE_SPQ_MODE_CB; 3797 init_data.p_comp_data->function = ecore_iwarp_tcp_abort_comp; 3798 3799 OSAL_LIST_FOR_EACH_ENTRY(ep, &listener->ep_list, 3800 list_entry, struct ecore_iwarp_ep) { 3801 ep->state = ECORE_IWARP_EP_ABORTING; 3802 init_data.p_comp_data->cookie = ep; 3803 init_data.cid = ep->tcp_cid; 3804 rc = ecore_sp_init_request(p_hwfn, &p_ent, 3805 IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD, 3806 PROTOCOLID_IWARP, 3807 &init_data); 3808 if (rc == ECORE_SUCCESS) 3809 ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 3810 } 3811 } 3812 3813 static void 3814 ecore_iwarp_listener_state_transition(struct ecore_hwfn *p_hwfn, void *cookie, 3815 union event_ring_data OSAL_UNUSED *data, 3816 u8 OSAL_UNUSED fw_return_code) 3817 { 3818 struct ecore_iwarp_listener *listener = (struct ecore_iwarp_listener *)cookie; 3819 3820 switch (listener->state) { 3821 case ECORE_IWARP_LISTENER_STATE_PAUSE: 3822 case ECORE_IWARP_LISTENER_STATE_UNPAUSE: 3823 ecore_iwarp_pause_complete(listener); 3824 break; 3825 case ECORE_IWARP_LISTENER_STATE_DESTROYING: 3826 ecore_iwarp_abort_inflight_connections(p_hwfn, listener); 3827 break; 3828 default: 3829 break; 3830 } 3831 } 3832 3833 static enum _ecore_status_t 3834 ecore_iwarp_empty_ramrod(struct ecore_hwfn *p_hwfn, 3835 struct ecore_iwarp_listener *listener) 3836 { 3837 struct ecore_spq_entry *p_ent = OSAL_NULL; 3838 struct ecore_spq_comp_cb comp_data; 3839 struct ecore_sp_init_data init_data; 3840 enum _ecore_status_t rc; 3841 3842 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 3843 init_data.p_comp_data = &comp_data; 3844 init_data.cid = ecore_spq_get_cid(p_hwfn); 3845 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 3846 init_data.comp_mode = ECORE_SPQ_MODE_CB; 3847 init_data.p_comp_data->function = ecore_iwarp_listener_state_transition; 3848 init_data.p_comp_data->cookie = listener; 3849 rc = ecore_sp_init_request(p_hwfn, &p_ent, 3850 COMMON_RAMROD_EMPTY, 3851 PROTOCOLID_COMMON, 3852 &init_data); 3853 if (rc != ECORE_SUCCESS) 3854 return rc; 3855 3856 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 3857 if (rc != ECORE_SUCCESS) 3858 return rc; 3859 3860 return rc; 3861 } 3862 3863 enum _ecore_status_t 3864 ecore_iwarp_pause_listen(void *rdma_cxt, void *handle, 3865 bool pause, bool comp) 3866 { 3867 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 3868 struct ecore_iwarp_listener *listener = 3869 (struct ecore_iwarp_listener *)handle; 3870 enum _ecore_status_t rc; 3871 3872 listener->state = pause ? 3873 ECORE_IWARP_LISTENER_STATE_PAUSE : 3874 ECORE_IWARP_LISTENER_STATE_UNPAUSE; 3875 if (!comp) 3876 return ECORE_SUCCESS; 3877 3878 rc = ecore_iwarp_empty_ramrod(p_hwfn, listener); 3879 if (rc != ECORE_SUCCESS) 3880 return rc; 3881 3882 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener=%p, state=%d\n", 3883 listener, listener->state); 3884 3885 return ECORE_PENDING; 3886 } 3887 3888 enum _ecore_status_t 3889 ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle) 3890 { 3891 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 3892 struct ecore_iwarp_listener *listener = 3893 (struct ecore_iwarp_listener *)handle; 3894 enum _ecore_status_t rc; 3895 int wait_count = 0; 3896 3897 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "handle=%p\n", handle); 3898 3899 listener->state = ECORE_IWARP_LISTENER_STATE_DESTROYING; 3900 rc = ecore_iwarp_empty_ramrod(p_hwfn, listener); 3901 if (rc != ECORE_SUCCESS) 3902 return rc; 3903 3904 while (!listener->done) { 3905 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, 3906 "Waiting for ep list to be empty...\n"); 3907 OSAL_MSLEEP(100); 3908 if (wait_count++ > 200) { 3909 DP_NOTICE(p_hwfn, false, "ep list close timeout\n"); 3910 break; 3911 } 3912 } 3913 3914 OSAL_FREE(p_hwfn->p_dev, listener); 3915 3916 return ECORE_SUCCESS; 3917 } 3918 3919 enum _ecore_status_t 3920 ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams) 3921 { 3922 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt; 3923 struct ecore_sp_init_data init_data; 3924 struct ecore_spq_entry *p_ent; 3925 struct ecore_rdma_qp *qp; 3926 struct ecore_iwarp_ep *ep; 3927 enum _ecore_status_t rc; 3928 3929 ep = (struct ecore_iwarp_ep *)iparams->ep_context; 3930 if (!ep) { 3931 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n"); 3932 return ECORE_INVAL; 3933 } 3934 3935 qp = ep->qp; 3936 3937 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", 3938 qp->icid, ep->tcp_cid); 3939 3940 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 3941 init_data.cid = qp->icid; 3942 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 3943 init_data.comp_mode = ECORE_SPQ_MODE_CB; 3944 3945 rc = ecore_sp_init_request(p_hwfn, &p_ent, 3946 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, 3947 PROTOCOLID_IWARP, &init_data); 3948 3949 if (rc != ECORE_SUCCESS) 3950 return rc; 3951 3952 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 3953 3954 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ecore_iwarp_send_rtr, rc = 0x%x\n", 3955 rc); 3956 3957 return rc; 3958 } 3959 3960 enum _ecore_status_t 3961 ecore_iwarp_query_qp(struct ecore_rdma_qp *qp, 3962 struct ecore_rdma_query_qp_out_params *out_params) 3963 { 3964 out_params->state = ecore_iwarp2roce_state(qp->iwarp_state); 3965 return ECORE_SUCCESS; 3966 } 3967 3968 #ifdef _NTDDK_ 3969 #pragma warning(pop) 3970 #endif 3971