1 /*
2 * Copyright (c) 2018-2019 Cavium, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * File : ecore_iwarp.c
30 */
31 #include <sys/cdefs.h>
32 #include "bcm_osal.h"
33 #include "ecore.h"
34 #include "ecore_status.h"
35 #include "ecore_sp_commands.h"
36 #include "ecore_cxt.h"
37 #include "ecore_rdma.h"
38 #include "reg_addr.h"
39 #include "ecore_hw.h"
40 #include "ecore_hsi_iwarp.h"
41 #include "ecore_ll2.h"
42 #include "ecore_ooo.h"
43 #ifndef LINUX_REMOVE
44 #include "ecore_tcp_ip.h"
45 #endif
46
47 #ifdef _NTDDK_
48 #pragma warning(push)
49 #pragma warning(disable : 28123)
50 #pragma warning(disable : 28167)
51 #endif
52
53 /* Default values used for MPA Rev 1 */
54 #define ECORE_IWARP_ORD_DEFAULT 32
55 #define ECORE_IWARP_IRD_DEFAULT 32
56
57 #define ECORE_IWARP_MAX_FW_MSS 4120
58
59 struct mpa_v2_hdr {
60 __be16 ird;
61 __be16 ord;
62 };
63
64 #define MPA_V2_PEER2PEER_MODEL 0x8000
65 #define MPA_V2_SEND_RTR 0x4000 /* on ird */
66 #define MPA_V2_READ_RTR 0x4000 /* on ord */
67 #define MPA_V2_WRITE_RTR 0x8000
68 #define MPA_V2_IRD_ORD_MASK 0x3FFF
69
70 #define MPA_REV2(_mpa_rev) (_mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED)
71
72 #define ECORE_IWARP_INVALID_TCP_CID 0xffffffff
73 /* How many times fin will be sent before FW aborts and send RST */
74 #define ECORE_IWARP_MAX_FIN_RT_DEFAULT 2
75 #define ECORE_IWARP_RCV_WND_SIZE_MIN (0xffff)
76 /* INTERNAL: These numbers are derived from BRB buffer sizes to obtain optimal performance */
77 #define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_2_PORTS (200*1024)
78 #define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_4_PORTS (100*1024)
79 #define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_2_PORTS (150*1024)
80 #define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_4_PORTS (90*1024)
81 #define ECORE_IWARP_MAX_WND_SCALE (14)
82 /* Timestamp header is the length of the timestamp option (10):
83 * kind:8 bit, length:8 bit, timestamp:32 bit, ack: 32bit
84 * rounded up to a multiple of 4
85 */
86 #define TIMESTAMP_HEADER_SIZE (12)
87
88 static enum _ecore_status_t
89 ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn,
90 u8 fw_event_code,
91 u16 OSAL_UNUSED echo,
92 union event_ring_data *data,
93 u8 fw_return_code);
94
95 static enum _ecore_status_t
96 ecore_iwarp_empty_ramrod(struct ecore_hwfn *p_hwfn,
97 struct ecore_iwarp_listener *listener);
98
99 static OSAL_INLINE struct ecore_iwarp_fpdu *
100 ecore_iwarp_get_curr_fpdu(struct ecore_hwfn *p_hwfn, u16 cid);
101
102 /* Override devinfo with iWARP specific values */
103 void
ecore_iwarp_init_devinfo(struct ecore_hwfn * p_hwfn)104 ecore_iwarp_init_devinfo(struct ecore_hwfn *p_hwfn)
105 {
106 struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
107
108 dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
109 dev->max_qp = OSAL_MIN_T(u64,
110 IWARP_MAX_QPS,
111 p_hwfn->p_rdma_info->num_qps) -
112 ECORE_IWARP_PREALLOC_CNT;
113
114 dev->max_cq = dev->max_qp;
115
116 dev->max_qp_resp_rd_atomic_resc = ECORE_IWARP_IRD_DEFAULT;
117 dev->max_qp_req_rd_atomic_resc = ECORE_IWARP_ORD_DEFAULT;
118 }
119
120 enum _ecore_status_t
ecore_iwarp_init_hw(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt)121 ecore_iwarp_init_hw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
122 {
123 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
124 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
125 p_hwfn->b_rdma_enabled_in_prs = true;
126
127 return 0;
128 }
129
130 void
ecore_iwarp_init_fw_ramrod(struct ecore_hwfn * p_hwfn,struct iwarp_init_func_ramrod_data * p_ramrod)131 ecore_iwarp_init_fw_ramrod(struct ecore_hwfn *p_hwfn,
132 struct iwarp_init_func_ramrod_data *p_ramrod)
133 {
134 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
135 "ooo handle = %d\n",
136 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle);
137
138 p_ramrod->iwarp.ll2_ooo_q_index =
139 p_hwfn->hw_info.resc_start[ECORE_LL2_QUEUE] +
140 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
141
142 p_ramrod->tcp.max_fin_rt = ECORE_IWARP_MAX_FIN_RT_DEFAULT;
143 return;
144 }
145
146 static enum _ecore_status_t
ecore_iwarp_alloc_cid(struct ecore_hwfn * p_hwfn,u32 * cid)147 ecore_iwarp_alloc_cid(struct ecore_hwfn *p_hwfn, u32 *cid)
148 {
149 enum _ecore_status_t rc;
150
151 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
152
153 rc = ecore_rdma_bmap_alloc_id(p_hwfn,
154 &p_hwfn->p_rdma_info->cid_map,
155 cid);
156
157 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
158 *cid += ecore_cxt_get_proto_cid_start(p_hwfn,
159 p_hwfn->p_rdma_info->proto);
160 if (rc != ECORE_SUCCESS) {
161 DP_NOTICE(p_hwfn, false, "Failed in allocating iwarp cid\n");
162 return rc;
163 }
164
165 rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *cid);
166
167 if (rc != ECORE_SUCCESS) {
168 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
169 *cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
170 p_hwfn->p_rdma_info->proto);
171
172 ecore_bmap_release_id(p_hwfn,
173 &p_hwfn->p_rdma_info->cid_map,
174 *cid);
175
176 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
177 }
178
179 return rc;
180 }
181
182 static void
ecore_iwarp_set_tcp_cid(struct ecore_hwfn * p_hwfn,u32 cid)183 ecore_iwarp_set_tcp_cid(struct ecore_hwfn *p_hwfn, u32 cid)
184 {
185 cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
186 p_hwfn->p_rdma_info->proto);
187
188 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
189 ecore_bmap_set_id(p_hwfn,
190 &p_hwfn->p_rdma_info->tcp_cid_map,
191 cid);
192 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
193 }
194
195 /* This function allocates a cid for passive tcp ( called from syn receive)
196 * the reason it's separate from the regular cid allocation is because it
197 * is assured that these cids already have ilt alloacted. They are preallocated
198 * to ensure that we won't need to allocate memory during syn processing
199 */
200 static enum _ecore_status_t
ecore_iwarp_alloc_tcp_cid(struct ecore_hwfn * p_hwfn,u32 * cid)201 ecore_iwarp_alloc_tcp_cid(struct ecore_hwfn *p_hwfn, u32 *cid)
202 {
203 enum _ecore_status_t rc;
204
205 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
206
207 rc = ecore_rdma_bmap_alloc_id(p_hwfn,
208 &p_hwfn->p_rdma_info->tcp_cid_map,
209 cid);
210
211 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
212
213 *cid += ecore_cxt_get_proto_cid_start(p_hwfn,
214 p_hwfn->p_rdma_info->proto);
215 if (rc != ECORE_SUCCESS) {
216 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
217 "can't allocate iwarp tcp cid max-count=%d\n",
218 p_hwfn->p_rdma_info->tcp_cid_map.max_count);
219
220 *cid = ECORE_IWARP_INVALID_TCP_CID;
221 }
222
223 return rc;
224 }
225
226 /* We have two cid maps, one for tcp which should be used only from passive
227 * syn processing and replacing a pre-allocated ep in the list. the second
228 * for active tcp and for QPs.
229 */
ecore_iwarp_cid_cleaned(struct ecore_hwfn * p_hwfn,u32 cid)230 static void ecore_iwarp_cid_cleaned(struct ecore_hwfn *p_hwfn, u32 cid)
231 {
232 cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
233 p_hwfn->p_rdma_info->proto);
234
235 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
236
237 if (cid < ECORE_IWARP_PREALLOC_CNT) {
238 ecore_bmap_release_id(p_hwfn,
239 &p_hwfn->p_rdma_info->tcp_cid_map,
240 cid);
241 } else {
242 ecore_bmap_release_id(p_hwfn,
243 &p_hwfn->p_rdma_info->cid_map,
244 cid);
245 }
246
247 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
248 }
249
250 enum _ecore_status_t
ecore_iwarp_create_qp(struct ecore_hwfn * p_hwfn,struct ecore_rdma_qp * qp,struct ecore_rdma_create_qp_out_params * out_params)251 ecore_iwarp_create_qp(struct ecore_hwfn *p_hwfn,
252 struct ecore_rdma_qp *qp,
253 struct ecore_rdma_create_qp_out_params *out_params)
254 {
255 struct iwarp_create_qp_ramrod_data *p_ramrod;
256 struct ecore_sp_init_data init_data;
257 struct ecore_spq_entry *p_ent;
258 enum _ecore_status_t rc;
259 u16 physical_queue;
260 u32 cid;
261
262 qp->shared_queue =
263 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
264 &qp->shared_queue_phys_addr,
265 IWARP_SHARED_QUEUE_PAGE_SIZE);
266 if (!qp->shared_queue) {
267 DP_NOTICE(p_hwfn, false,
268 "ecore iwarp create qp failed: cannot allocate memory (shared queue).\n");
269 return ECORE_NOMEM;
270 } else {
271 out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
272 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
273 out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
274 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
275 out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
276 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
277 out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
278 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
279 }
280
281 rc = ecore_iwarp_alloc_cid(p_hwfn, &cid);
282 if (rc != ECORE_SUCCESS)
283 goto err1;
284
285 qp->icid = (u16)cid;
286
287 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
288 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
289 init_data.cid = qp->icid;
290 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
291
292 rc = ecore_sp_init_request(p_hwfn, &p_ent,
293 IWARP_RAMROD_CMD_ID_CREATE_QP,
294 PROTOCOLID_IWARP, &init_data);
295 if (rc != ECORE_SUCCESS)
296 return rc;
297
298 p_ramrod = &p_ent->ramrod.iwarp_create_qp;
299
300 SET_FIELD(p_ramrod->flags,
301 IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
302 qp->fmr_and_reserved_lkey);
303
304 SET_FIELD(p_ramrod->flags,
305 IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP,
306 qp->signal_all);
307
308 SET_FIELD(p_ramrod->flags,
309 IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
310 qp->incoming_rdma_read_en);
311
312 SET_FIELD(p_ramrod->flags,
313 IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
314 qp->incoming_rdma_write_en);
315
316 SET_FIELD(p_ramrod->flags,
317 IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
318 qp->incoming_atomic_en);
319
320 SET_FIELD(p_ramrod->flags,
321 IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG,
322 qp->use_srq);
323
324 p_ramrod->pd = qp->pd;
325 p_ramrod->sq_num_pages = qp->sq_num_pages;
326 p_ramrod->rq_num_pages = qp->rq_num_pages;
327
328 p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi);
329 p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo);
330
331 p_ramrod->cq_cid_for_sq =
332 OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
333 qp->sq_cq_id);
334 p_ramrod->cq_cid_for_rq =
335 OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
336 qp->rq_cq_id);
337
338 p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi);
339
340 physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
341 p_ramrod->physical_q0 = OSAL_CPU_TO_LE16(physical_queue);
342 physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
343 p_ramrod->physical_q1 = OSAL_CPU_TO_LE16(physical_queue);
344
345 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
346
347 if (rc != ECORE_SUCCESS)
348 goto err1;
349
350 return rc;
351
352 err1:
353 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
354 qp->shared_queue,
355 qp->shared_queue_phys_addr,
356 IWARP_SHARED_QUEUE_PAGE_SIZE);
357
358 return rc;
359 }
360
361 static enum _ecore_status_t
ecore_iwarp_modify_fw(struct ecore_hwfn * p_hwfn,struct ecore_rdma_qp * qp)362 ecore_iwarp_modify_fw(struct ecore_hwfn *p_hwfn,
363 struct ecore_rdma_qp *qp)
364 {
365 struct iwarp_modify_qp_ramrod_data *p_ramrod;
366 struct ecore_sp_init_data init_data;
367 struct ecore_spq_entry *p_ent;
368 enum _ecore_status_t rc;
369
370 /* Get SPQ entry */
371 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
372 init_data.cid = qp->icid;
373 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
374 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
375
376 rc = ecore_sp_init_request(p_hwfn, &p_ent,
377 IWARP_RAMROD_CMD_ID_MODIFY_QP,
378 p_hwfn->p_rdma_info->proto,
379 &init_data);
380 if (rc != ECORE_SUCCESS)
381 return rc;
382
383 p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
384 SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
385 0x1);
386 if (qp->iwarp_state == ECORE_IWARP_QP_STATE_CLOSING)
387 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
388 else
389 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
390
391 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
392
393 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)rc=%d\n",
394 qp->icid, rc);
395
396 return rc;
397 }
398
399 enum ecore_iwarp_qp_state
ecore_roce2iwarp_state(enum ecore_roce_qp_state state)400 ecore_roce2iwarp_state(enum ecore_roce_qp_state state)
401 {
402 switch (state) {
403 case ECORE_ROCE_QP_STATE_RESET:
404 case ECORE_ROCE_QP_STATE_INIT:
405 case ECORE_ROCE_QP_STATE_RTR:
406 return ECORE_IWARP_QP_STATE_IDLE;
407 case ECORE_ROCE_QP_STATE_RTS:
408 return ECORE_IWARP_QP_STATE_RTS;
409 case ECORE_ROCE_QP_STATE_SQD:
410 return ECORE_IWARP_QP_STATE_CLOSING;
411 case ECORE_ROCE_QP_STATE_ERR:
412 return ECORE_IWARP_QP_STATE_ERROR;
413 case ECORE_ROCE_QP_STATE_SQE:
414 return ECORE_IWARP_QP_STATE_TERMINATE;
415 }
416 return ECORE_IWARP_QP_STATE_ERROR;
417 }
418
419 static enum ecore_roce_qp_state
ecore_iwarp2roce_state(enum ecore_iwarp_qp_state state)420 ecore_iwarp2roce_state(enum ecore_iwarp_qp_state state)
421 {
422 switch (state) {
423 case ECORE_IWARP_QP_STATE_IDLE:
424 return ECORE_ROCE_QP_STATE_INIT;
425 case ECORE_IWARP_QP_STATE_RTS:
426 return ECORE_ROCE_QP_STATE_RTS;
427 case ECORE_IWARP_QP_STATE_TERMINATE:
428 return ECORE_ROCE_QP_STATE_SQE;
429 case ECORE_IWARP_QP_STATE_CLOSING:
430 return ECORE_ROCE_QP_STATE_SQD;
431 case ECORE_IWARP_QP_STATE_ERROR:
432 return ECORE_ROCE_QP_STATE_ERR;
433 }
434 return ECORE_ROCE_QP_STATE_ERR;
435 }
436
437 const char *iwarp_state_names[] = {
438 "IDLE",
439 "RTS",
440 "TERMINATE",
441 "CLOSING",
442 "ERROR",
443 };
444
445 enum _ecore_status_t
ecore_iwarp_modify_qp(struct ecore_hwfn * p_hwfn,struct ecore_rdma_qp * qp,enum ecore_iwarp_qp_state new_state,bool internal)446 ecore_iwarp_modify_qp(struct ecore_hwfn *p_hwfn,
447 struct ecore_rdma_qp *qp,
448 enum ecore_iwarp_qp_state new_state,
449 bool internal)
450 {
451 enum ecore_iwarp_qp_state prev_iw_state;
452 enum _ecore_status_t rc = 0;
453 bool modify_fw = false;
454
455 /* modify QP can be called from upper-layer or as a result of async
456 * RST/FIN... therefore need to protect
457 */
458 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
459 prev_iw_state = qp->iwarp_state;
460
461 if (prev_iw_state == new_state) {
462 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
463 return ECORE_SUCCESS;
464 }
465
466 switch (prev_iw_state) {
467 case ECORE_IWARP_QP_STATE_IDLE:
468 switch (new_state) {
469 case ECORE_IWARP_QP_STATE_RTS:
470 qp->iwarp_state = ECORE_IWARP_QP_STATE_RTS;
471 break;
472 case ECORE_IWARP_QP_STATE_ERROR:
473 qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR;
474 if (!internal)
475 modify_fw = true;
476 break;
477 default:
478 break;
479 }
480 break;
481 case ECORE_IWARP_QP_STATE_RTS:
482 switch (new_state) {
483 case ECORE_IWARP_QP_STATE_CLOSING:
484 if (!internal)
485 modify_fw = true;
486
487 qp->iwarp_state = ECORE_IWARP_QP_STATE_CLOSING;
488 break;
489 case ECORE_IWARP_QP_STATE_ERROR:
490 if (!internal)
491 modify_fw = true;
492 qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR;
493 break;
494 default:
495 break;
496 }
497 break;
498 case ECORE_IWARP_QP_STATE_ERROR:
499 switch (new_state) {
500 case ECORE_IWARP_QP_STATE_IDLE:
501 /* TODO: destroy flow -> need to destroy EP&QP */
502 qp->iwarp_state = new_state;
503 break;
504 case ECORE_IWARP_QP_STATE_CLOSING:
505 /* could happen due to race... do nothing.... */
506 break;
507 default:
508 rc = ECORE_INVAL;
509 }
510 break;
511 case ECORE_IWARP_QP_STATE_TERMINATE:
512 case ECORE_IWARP_QP_STATE_CLOSING:
513 qp->iwarp_state = new_state;
514 break;
515 default:
516 break;
517 }
518
519 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) %s --> %s %s\n",
520 qp->icid,
521 iwarp_state_names[prev_iw_state],
522 iwarp_state_names[qp->iwarp_state],
523 internal ? "internal" : " ");
524
525 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
526
527 if (modify_fw)
528 ecore_iwarp_modify_fw(p_hwfn, qp);
529
530 return rc;
531 }
532
533 enum _ecore_status_t
ecore_iwarp_fw_destroy(struct ecore_hwfn * p_hwfn,struct ecore_rdma_qp * qp)534 ecore_iwarp_fw_destroy(struct ecore_hwfn *p_hwfn,
535 struct ecore_rdma_qp *qp)
536 {
537 struct ecore_sp_init_data init_data;
538 struct ecore_spq_entry *p_ent;
539 enum _ecore_status_t rc;
540
541 /* Get SPQ entry */
542 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
543 init_data.cid = qp->icid;
544 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
545 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
546
547 rc = ecore_sp_init_request(p_hwfn, &p_ent,
548 IWARP_RAMROD_CMD_ID_DESTROY_QP,
549 p_hwfn->p_rdma_info->proto,
550 &init_data);
551 if (rc != ECORE_SUCCESS)
552 return rc;
553
554 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
555
556 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
557
558 return rc;
559 }
560
ecore_iwarp_destroy_ep(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep,bool remove_from_active_list)561 static void ecore_iwarp_destroy_ep(struct ecore_hwfn *p_hwfn,
562 struct ecore_iwarp_ep *ep,
563 bool remove_from_active_list)
564 {
565 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
566 ep->ep_buffer_virt,
567 ep->ep_buffer_phys,
568 sizeof(*ep->ep_buffer_virt));
569
570 if (remove_from_active_list) {
571 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
572
573 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
574 &p_hwfn->p_rdma_info->iwarp.ep_list);
575
576 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
577 }
578
579 if (ep->qp)
580 ep->qp->ep = OSAL_NULL;
581
582 OSAL_FREE(p_hwfn->p_dev, ep);
583 }
584
585 enum _ecore_status_t
ecore_iwarp_destroy_qp(struct ecore_hwfn * p_hwfn,struct ecore_rdma_qp * qp)586 ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn,
587 struct ecore_rdma_qp *qp)
588 {
589 enum _ecore_status_t rc = ECORE_SUCCESS;
590 struct ecore_iwarp_ep *ep = qp->ep;
591 struct ecore_iwarp_fpdu *fpdu;
592 int wait_count = 0;
593
594 fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, qp->icid);
595 if (fpdu && fpdu->incomplete_bytes)
596 DP_NOTICE(p_hwfn, false,
597 "Pending Partial fpdu with incomplete bytes=%d\n",
598 fpdu->incomplete_bytes);
599
600 if (qp->iwarp_state != ECORE_IWARP_QP_STATE_ERROR) {
601 rc = ecore_iwarp_modify_qp(p_hwfn, qp,
602 ECORE_IWARP_QP_STATE_ERROR,
603 false);
604
605 if (rc != ECORE_SUCCESS)
606 return rc;
607 }
608
609 /* Make sure ep is closed before returning and freeing memory. */
610 if (ep) {
611 while (ep->state != ECORE_IWARP_EP_CLOSED) {
612 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
613 "Waiting for ep->state to be closed...state=%x\n",
614 ep->state);
615
616 OSAL_MSLEEP(100);
617 if (wait_count++ > 200) {
618 DP_NOTICE(p_hwfn, false, "ep state close timeout state=%x\n",
619 ep->state);
620 break;
621 }
622 }
623
624 ecore_iwarp_destroy_ep(p_hwfn, ep, false);
625 }
626
627 rc = ecore_iwarp_fw_destroy(p_hwfn, qp);
628
629 if (qp->shared_queue)
630 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
631 qp->shared_queue,
632 qp->shared_queue_phys_addr,
633 IWARP_SHARED_QUEUE_PAGE_SIZE);
634
635 return rc;
636 }
637
638 static enum _ecore_status_t
ecore_iwarp_create_ep(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep ** ep_out)639 ecore_iwarp_create_ep(struct ecore_hwfn *p_hwfn,
640 struct ecore_iwarp_ep **ep_out)
641 {
642 struct ecore_iwarp_ep *ep;
643 enum _ecore_status_t rc;
644
645 ep = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*ep));
646 if (!ep) {
647 DP_NOTICE(p_hwfn, false,
648 "ecore create ep failed: cannot allocate memory (ep). rc = %d\n",
649 ECORE_NOMEM);
650 return ECORE_NOMEM;
651 }
652
653 ep->state = ECORE_IWARP_EP_INIT;
654
655 /* ep_buffer is allocated once and is structured as follows:
656 * [MAX_PRIV_DATA_LEN][MAX_PRIV_DATA_LEN][union async_output]
657 * We could have allocated this in three calls but since all together
658 * it is less than a page, we do one allocation and initialize pointers
659 * accordingly
660 */
661 ep->ep_buffer_virt = OSAL_DMA_ALLOC_COHERENT(
662 p_hwfn->p_dev,
663 &ep->ep_buffer_phys,
664 sizeof(*ep->ep_buffer_virt));
665
666 if (!ep->ep_buffer_virt) {
667 DP_NOTICE(p_hwfn, false,
668 "ecore create ep failed: cannot allocate memory (ulp buffer). rc = %d\n",
669 ECORE_NOMEM);
670 rc = ECORE_NOMEM;
671 goto err;
672 }
673
674 ep->sig = 0xdeadbeef;
675
676 *ep_out = ep;
677
678 return ECORE_SUCCESS;
679
680 err:
681 OSAL_FREE(p_hwfn->p_dev, ep);
682 return rc;
683 }
684
685 static void
ecore_iwarp_print_tcp_ramrod(struct ecore_hwfn * p_hwfn,struct iwarp_tcp_offload_ramrod_data * p_tcp_ramrod)686 ecore_iwarp_print_tcp_ramrod(struct ecore_hwfn *p_hwfn,
687 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
688 {
689 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, ">>> PRINT TCP RAMROD\n");
690
691 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_mac=%x %x %x\n",
692 p_tcp_ramrod->tcp.local_mac_addr_lo,
693 p_tcp_ramrod->tcp.local_mac_addr_mid,
694 p_tcp_ramrod->tcp.local_mac_addr_hi);
695
696 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_mac=%x %x %x\n",
697 p_tcp_ramrod->tcp.remote_mac_addr_lo,
698 p_tcp_ramrod->tcp.remote_mac_addr_mid,
699 p_tcp_ramrod->tcp.remote_mac_addr_hi);
700
701 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "vlan_id=%x\n",
702 p_tcp_ramrod->tcp.vlan_id);
703 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flags=%x\n",
704 p_tcp_ramrod->tcp.flags);
705
706 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ip_version=%x\n",
707 p_tcp_ramrod->tcp.ip_version);
708 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_ip=%x.%x.%x.%x\n",
709 p_tcp_ramrod->tcp.local_ip[0],
710 p_tcp_ramrod->tcp.local_ip[1],
711 p_tcp_ramrod->tcp.local_ip[2],
712 p_tcp_ramrod->tcp.local_ip[3]);
713 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_ip=%x.%x.%x.%x\n",
714 p_tcp_ramrod->tcp.remote_ip[0],
715 p_tcp_ramrod->tcp.remote_ip[1],
716 p_tcp_ramrod->tcp.remote_ip[2],
717 p_tcp_ramrod->tcp.remote_ip[3]);
718 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flow_label=%x\n",
719 p_tcp_ramrod->tcp.flow_label);
720 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ttl=%x\n",
721 p_tcp_ramrod->tcp.ttl);
722 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "tos_or_tc=%x\n",
723 p_tcp_ramrod->tcp.tos_or_tc);
724 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_port=%x\n",
725 p_tcp_ramrod->tcp.local_port);
726 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_port=%x\n",
727 p_tcp_ramrod->tcp.remote_port);
728 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "mss=%x\n",
729 p_tcp_ramrod->tcp.mss);
730 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rcv_wnd_scale=%x\n",
731 p_tcp_ramrod->tcp.rcv_wnd_scale);
732 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "connect_mode=%x\n",
733 p_tcp_ramrod->tcp.connect_mode);
734 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_ip_payload_length=%x\n",
735 p_tcp_ramrod->tcp.syn_ip_payload_length);
736 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_lo=%x\n",
737 p_tcp_ramrod->tcp.syn_phy_addr_lo);
738 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_hi=%x\n",
739 p_tcp_ramrod->tcp.syn_phy_addr_hi);
740
741 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "<<<f PRINT TCP RAMROD\n");
742 }
743
744 /* Default values for tcp option2 */
745 #define ECORE_IWARP_DEF_MAX_RT_TIME (0)
746 #define ECORE_IWARP_DEF_CWND_FACTOR (4)
747 #define ECORE_IWARP_DEF_KA_MAX_PROBE_CNT (5)
748 #define ECORE_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
749 #define ECORE_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
750
751 static enum _ecore_status_t
ecore_iwarp_tcp_offload(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep)752 ecore_iwarp_tcp_offload(struct ecore_hwfn *p_hwfn,
753 struct ecore_iwarp_ep *ep)
754 {
755 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
756 struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
757 struct ecore_sp_init_data init_data;
758 struct ecore_spq_entry *p_ent;
759 dma_addr_t async_output_phys;
760 dma_addr_t in_pdata_phys;
761 enum _ecore_status_t rc;
762 u16 physical_q;
763 u8 tcp_flags;
764 int i;
765
766 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
767 init_data.cid = ep->tcp_cid;
768 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
769
770 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
771 init_data.comp_mode = ECORE_SPQ_MODE_CB;
772 } else {
773 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
774 }
775
776 rc = ecore_sp_init_request(p_hwfn, &p_ent,
777 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
778 PROTOCOLID_IWARP, &init_data);
779 if (rc != ECORE_SUCCESS)
780 return rc;
781
782 p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
783
784 /* Point to the "second half" of the ulp buffer */
785 in_pdata_phys = ep->ep_buffer_phys +
786 OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata);
787 p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.hi =
788 DMA_HI_LE(in_pdata_phys);
789 p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.lo =
790 DMA_LO_LE(in_pdata_phys);
791 p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
792 OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata));
793
794 async_output_phys = ep->ep_buffer_phys +
795 OFFSETOF(struct ecore_iwarp_ep_memory, async_output);
796
797 p_tcp_ramrod->iwarp.async_eqe_output_buf.hi =
798 DMA_HI_LE(async_output_phys);
799 p_tcp_ramrod->iwarp.async_eqe_output_buf.lo =
800 DMA_LO_LE(async_output_phys);
801 p_tcp_ramrod->iwarp.handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep));
802 p_tcp_ramrod->iwarp.handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep));
803
804 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
805 p_tcp_ramrod->iwarp.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
806 physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
807 p_tcp_ramrod->iwarp.physical_q1 = OSAL_CPU_TO_LE16(physical_q);
808 p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
809
810 ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.remote_mac_addr_hi,
811 &p_tcp_ramrod->tcp.remote_mac_addr_mid,
812 &p_tcp_ramrod->tcp.remote_mac_addr_lo,
813 ep->remote_mac_addr);
814 ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.local_mac_addr_hi,
815 &p_tcp_ramrod->tcp.local_mac_addr_mid,
816 &p_tcp_ramrod->tcp.local_mac_addr_lo,
817 ep->local_mac_addr);
818
819 p_tcp_ramrod->tcp.vlan_id = OSAL_CPU_TO_LE16(ep->cm_info.vlan);
820
821 tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
822 p_tcp_ramrod->tcp.flags = 0;
823 SET_FIELD(p_tcp_ramrod->tcp.flags,
824 TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
825 !!(tcp_flags & ECORE_IWARP_TS_EN));
826
827 SET_FIELD(p_tcp_ramrod->tcp.flags,
828 TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
829 !!(tcp_flags & ECORE_IWARP_DA_EN));
830
831 p_tcp_ramrod->tcp.ip_version = ep->cm_info.ip_version;
832
833 for (i = 0; i < 4; i++) {
834 p_tcp_ramrod->tcp.remote_ip[i] =
835 OSAL_CPU_TO_LE32(ep->cm_info.remote_ip[i]);
836 p_tcp_ramrod->tcp.local_ip[i] =
837 OSAL_CPU_TO_LE32(ep->cm_info.local_ip[i]);
838 }
839
840 p_tcp_ramrod->tcp.remote_port =
841 OSAL_CPU_TO_LE16(ep->cm_info.remote_port);
842 p_tcp_ramrod->tcp.local_port = OSAL_CPU_TO_LE16(ep->cm_info.local_port);
843 p_tcp_ramrod->tcp.mss = OSAL_CPU_TO_LE16(ep->mss);
844 p_tcp_ramrod->tcp.flow_label = 0;
845 p_tcp_ramrod->tcp.ttl = 0x40;
846 p_tcp_ramrod->tcp.tos_or_tc = 0;
847
848 p_tcp_ramrod->tcp.max_rt_time = ECORE_IWARP_DEF_MAX_RT_TIME;
849 p_tcp_ramrod->tcp.cwnd = ECORE_IWARP_DEF_CWND_FACTOR * p_tcp_ramrod->tcp.mss;
850 p_tcp_ramrod->tcp.ka_max_probe_cnt = ECORE_IWARP_DEF_KA_MAX_PROBE_CNT;
851 p_tcp_ramrod->tcp.ka_timeout = ECORE_IWARP_DEF_KA_TIMEOUT;
852 p_tcp_ramrod->tcp.ka_interval = ECORE_IWARP_DEF_KA_INTERVAL;
853
854 p_tcp_ramrod->tcp.rcv_wnd_scale =
855 (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
856 p_tcp_ramrod->tcp.connect_mode = ep->connect_mode;
857
858 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
859 p_tcp_ramrod->tcp.syn_ip_payload_length =
860 OSAL_CPU_TO_LE16(ep->syn_ip_payload_length);
861 p_tcp_ramrod->tcp.syn_phy_addr_hi =
862 DMA_HI_LE(ep->syn_phy_addr);
863 p_tcp_ramrod->tcp.syn_phy_addr_lo =
864 DMA_LO_LE(ep->syn_phy_addr);
865 }
866
867 ecore_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
868
869 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
870
871 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
872 "EP(0x%x) Offload completed rc=%d\n" , ep->tcp_cid, rc);
873
874 return rc;
875 }
876
877 /* This function should be called after IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE
878 * is received. it will be called from the dpc context.
879 */
880 static enum _ecore_status_t
ecore_iwarp_mpa_offload(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep)881 ecore_iwarp_mpa_offload(struct ecore_hwfn *p_hwfn,
882 struct ecore_iwarp_ep *ep)
883 {
884 struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
885 struct ecore_iwarp_info *iwarp_info;
886 struct ecore_sp_init_data init_data;
887 struct ecore_spq_entry *p_ent;
888 dma_addr_t async_output_phys;
889 dma_addr_t out_pdata_phys;
890 dma_addr_t in_pdata_phys;
891 struct ecore_rdma_qp *qp;
892 bool reject;
893 enum _ecore_status_t rc;
894
895 if (!ep)
896 return ECORE_INVAL;
897
898 qp = ep->qp;
899 reject = (qp == OSAL_NULL);
900
901 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
902 init_data.cid = reject ? ep->tcp_cid : qp->icid;
903 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
904
905 if (ep->connect_mode == TCP_CONNECT_ACTIVE || !ep->event_cb)
906 init_data.comp_mode = ECORE_SPQ_MODE_CB;
907 else
908 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
909
910 rc = ecore_sp_init_request(p_hwfn, &p_ent,
911 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
912 PROTOCOLID_IWARP, &init_data);
913
914 if (rc != ECORE_SUCCESS)
915 return rc;
916
917 p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
918 out_pdata_phys = ep->ep_buffer_phys +
919 OFFSETOF(struct ecore_iwarp_ep_memory, out_pdata);
920 p_mpa_ramrod->common.outgoing_ulp_buffer.addr.hi =
921 DMA_HI_LE(out_pdata_phys);
922 p_mpa_ramrod->common.outgoing_ulp_buffer.addr.lo =
923 DMA_LO_LE(out_pdata_phys);
924 p_mpa_ramrod->common.outgoing_ulp_buffer.len =
925 ep->cm_info.private_data_len;
926 p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
927
928 p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
929 p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
930
931 p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
932
933 in_pdata_phys = ep->ep_buffer_phys +
934 OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata);
935 p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
936 p_mpa_ramrod->incoming_ulp_buffer.addr.hi =
937 DMA_HI_LE(in_pdata_phys);
938 p_mpa_ramrod->incoming_ulp_buffer.addr.lo =
939 DMA_LO_LE(in_pdata_phys);
940 p_mpa_ramrod->incoming_ulp_buffer.len =
941 OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata));
942 async_output_phys = ep->ep_buffer_phys +
943 OFFSETOF(struct ecore_iwarp_ep_memory, async_output);
944 p_mpa_ramrod->async_eqe_output_buf.hi =
945 DMA_HI_LE(async_output_phys);
946 p_mpa_ramrod->async_eqe_output_buf.lo =
947 DMA_LO_LE(async_output_phys);
948 p_mpa_ramrod->handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep));
949 p_mpa_ramrod->handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep));
950
951 if (!reject) {
952 p_mpa_ramrod->shared_queue_addr.hi =
953 DMA_HI_LE(qp->shared_queue_phys_addr);
954 p_mpa_ramrod->shared_queue_addr.lo =
955 DMA_LO_LE(qp->shared_queue_phys_addr);
956
957 p_mpa_ramrod->stats_counter_id =
958 RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
959 qp->stats_queue;
960 } else {
961 p_mpa_ramrod->common.reject = 1;
962 }
963
964 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
965 p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
966 p_mpa_ramrod->mode = ep->mpa_rev;
967 SET_FIELD(p_mpa_ramrod->rtr_pref,
968 IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED,
969 ep->rtr_type);
970
971 ep->state = ECORE_IWARP_EP_MPA_OFFLOADED;
972 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
973 if (!reject)
974 ep->cid = qp->icid; /* Now they're migrated. */
975
976 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
977 "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
978 reject ? 0xffff : qp->icid, ep->tcp_cid, rc, ep->cm_info.ird,
979 ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
980 return rc;
981 }
982
983 static void
ecore_iwarp_mpa_received(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep)984 ecore_iwarp_mpa_received(struct ecore_hwfn *p_hwfn,
985 struct ecore_iwarp_ep *ep)
986 {
987 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
988 struct ecore_iwarp_cm_event_params params;
989 struct mpa_v2_hdr *mpa_v2_params;
990 union async_output *async_data;
991 u16 mpa_ord, mpa_ird;
992 u8 mpa_hdr_size = 0;
993 u8 mpa_rev;
994
995 async_data = &ep->ep_buffer_virt->async_output;
996
997 mpa_rev = async_data->mpa_request.mpa_handshake_mode;
998 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
999 "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
1000 async_data->mpa_request.ulp_data_len,
1001 mpa_rev,
1002 *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata)));
1003
1004 if (ep->listener->state > ECORE_IWARP_LISTENER_STATE_UNPAUSE) {
1005 /* MPA reject initiated by ecore */
1006 OSAL_MEMSET(&ep->cm_info, 0, sizeof(ep->cm_info));
1007 ep->event_cb = OSAL_NULL;
1008 ecore_iwarp_mpa_offload(p_hwfn, ep);
1009 return;
1010 }
1011
1012 if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1013 if (iwarp_info->mpa_rev == MPA_NEGOTIATION_TYPE_BASIC) {
1014 DP_ERR(p_hwfn, "MPA_NEGOTIATE Received MPA rev 2 on driver supporting only MPA rev 1\n");
1015 /* MPA_REV2 ToDo: close the tcp connection. */
1016 return;
1017 }
1018
1019 /* Read ord/ird values from private data buffer */
1020 mpa_v2_params =
1021 (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
1022 mpa_hdr_size = sizeof(*mpa_v2_params);
1023
1024 mpa_ord = ntohs(mpa_v2_params->ord);
1025 mpa_ird = ntohs(mpa_v2_params->ird);
1026
1027 /* Temprary store in cm_info incoming ord/ird requested, later
1028 * replace with negotiated value during accept
1029 */
1030 ep->cm_info.ord = (u8)OSAL_MIN_T(u16,
1031 (mpa_ord & MPA_V2_IRD_ORD_MASK),
1032 ECORE_IWARP_ORD_DEFAULT);
1033
1034 ep->cm_info.ird = (u8)OSAL_MIN_T(u16,
1035 (mpa_ird & MPA_V2_IRD_ORD_MASK),
1036 ECORE_IWARP_IRD_DEFAULT);
1037
1038 /* Peer2Peer negotiation */
1039 ep->rtr_type = MPA_RTR_TYPE_NONE;
1040 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
1041 if (mpa_ord & MPA_V2_WRITE_RTR)
1042 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
1043
1044 if (mpa_ord & MPA_V2_READ_RTR)
1045 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
1046
1047 if (mpa_ird & MPA_V2_SEND_RTR)
1048 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
1049
1050 ep->rtr_type &= iwarp_info->rtr_type;
1051 /* if we're left with no match send our capabilities */
1052 if (ep->rtr_type == MPA_RTR_TYPE_NONE)
1053 ep->rtr_type = iwarp_info->rtr_type;
1054
1055 /* prioritize write over send and read */
1056 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1057 ep->rtr_type = MPA_RTR_TYPE_ZERO_WRITE;
1058 }
1059
1060 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
1061 } else {
1062 ep->cm_info.ord = ECORE_IWARP_ORD_DEFAULT;
1063 ep->cm_info.ird = ECORE_IWARP_IRD_DEFAULT;
1064 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
1065 }
1066
1067 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1068 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
1069 mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
1070 async_data->mpa_request.ulp_data_len,
1071 mpa_hdr_size);
1072
1073 /* Strip mpa v2 hdr from private data before sending to upper layer */
1074 ep->cm_info.private_data =
1075 ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
1076
1077 ep->cm_info.private_data_len =
1078 async_data->mpa_request.ulp_data_len - mpa_hdr_size;
1079
1080 params.event = ECORE_IWARP_EVENT_MPA_REQUEST;
1081 params.cm_info = &ep->cm_info;
1082 params.ep_context = ep;
1083 params.status = ECORE_SUCCESS;
1084
1085 ep->state = ECORE_IWARP_EP_MPA_REQ_RCVD;
1086 ep->event_cb(ep->cb_context, ¶ms);
1087 }
1088
1089 static void
ecore_iwarp_move_to_ep_list(struct ecore_hwfn * p_hwfn,osal_list_t * list,struct ecore_iwarp_ep * ep)1090 ecore_iwarp_move_to_ep_list(struct ecore_hwfn *p_hwfn,
1091 osal_list_t *list, struct ecore_iwarp_ep *ep)
1092 {
1093 OSAL_SPIN_LOCK(&ep->listener->lock);
1094 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry, &ep->listener->ep_list);
1095 OSAL_SPIN_UNLOCK(&ep->listener->lock);
1096 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1097 OSAL_LIST_PUSH_TAIL(&ep->list_entry, list);
1098 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1099 }
1100
1101 static void
ecore_iwarp_return_ep(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep)1102 ecore_iwarp_return_ep(struct ecore_hwfn *p_hwfn,
1103 struct ecore_iwarp_ep *ep)
1104 {
1105 ep->state = ECORE_IWARP_EP_INIT;
1106 if (ep->qp)
1107 ep->qp->ep = OSAL_NULL;
1108 ep->qp = OSAL_NULL;
1109 OSAL_MEMSET(&ep->cm_info, 0, sizeof(ep->cm_info));
1110
1111 if (ep->tcp_cid == ECORE_IWARP_INVALID_TCP_CID) {
1112 /* We don't care about the return code, it's ok if tcp_cid
1113 * remains invalid...in this case we'll defer allocation
1114 */
1115 ecore_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1116 }
1117
1118 ecore_iwarp_move_to_ep_list(p_hwfn,
1119 &p_hwfn->p_rdma_info->iwarp.ep_free_list,
1120 ep);
1121 }
1122
1123 static void
ecore_iwarp_parse_private_data(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep)1124 ecore_iwarp_parse_private_data(struct ecore_hwfn *p_hwfn,
1125 struct ecore_iwarp_ep *ep)
1126 {
1127 struct mpa_v2_hdr *mpa_v2_params;
1128 union async_output *async_data;
1129 u16 mpa_ird, mpa_ord;
1130 u8 mpa_data_size = 0;
1131
1132 if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
1133 mpa_v2_params = (struct mpa_v2_hdr *)
1134 ((u8 *)ep->ep_buffer_virt->in_pdata);
1135 mpa_data_size = sizeof(*mpa_v2_params);
1136 mpa_ird = ntohs(mpa_v2_params->ird);
1137 mpa_ord = ntohs(mpa_v2_params->ord);
1138
1139 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
1140 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
1141 } /* else: Ord / Ird already configured */
1142
1143 async_data = &ep->ep_buffer_virt->async_output;
1144
1145 ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
1146 ep->cm_info.private_data_len =
1147 async_data->mpa_response.ulp_data_len - mpa_data_size;
1148 }
1149
1150 static void
ecore_iwarp_mpa_reply_arrived(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep)1151 ecore_iwarp_mpa_reply_arrived(struct ecore_hwfn *p_hwfn,
1152 struct ecore_iwarp_ep *ep)
1153 {
1154 struct ecore_iwarp_cm_event_params params;
1155
1156 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
1157 DP_NOTICE(p_hwfn, true, "MPA reply event not expected on passive side!\n");
1158 return;
1159 }
1160
1161 params.event = ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY;
1162
1163 ecore_iwarp_parse_private_data(p_hwfn, ep);
1164
1165 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1166 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1167 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1168
1169 params.cm_info = &ep->cm_info;
1170 params.ep_context = ep;
1171 params.status = ECORE_SUCCESS;
1172
1173 ep->mpa_reply_processed = true;
1174
1175 ep->event_cb(ep->cb_context, ¶ms);
1176 }
1177
1178 #define ECORE_IWARP_CONNECT_MODE_STRING(ep) \
1179 (ep->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
1180
1181 /* Called as a result of the event:
1182 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1183 */
1184 static void
ecore_iwarp_mpa_complete(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep,u8 fw_return_code)1185 ecore_iwarp_mpa_complete(struct ecore_hwfn *p_hwfn,
1186 struct ecore_iwarp_ep *ep,
1187 u8 fw_return_code)
1188 {
1189 struct ecore_iwarp_cm_event_params params;
1190
1191 if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1192 params.event = ECORE_IWARP_EVENT_ACTIVE_COMPLETE;
1193 else
1194 params.event = ECORE_IWARP_EVENT_PASSIVE_COMPLETE;
1195
1196 if (ep->connect_mode == TCP_CONNECT_ACTIVE &&
1197 !ep->mpa_reply_processed) {
1198 ecore_iwarp_parse_private_data(p_hwfn, ep);
1199 }
1200
1201 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1202 "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1203 ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1204
1205 params.cm_info = &ep->cm_info;
1206
1207 params.ep_context = ep;
1208
1209 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1210 (ep->state != ECORE_IWARP_EP_MPA_OFFLOADED)) {
1211 /* This is a FW bug. Shouldn't get complete without offload */
1212 DP_NOTICE(p_hwfn, false, "%s(0x%x) ERROR: Got MPA complete without MPA offload fw_return_code=%d ep->state=%d\n",
1213 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid,
1214 fw_return_code, ep->state);
1215 ep->state = ECORE_IWARP_EP_CLOSED;
1216 return;
1217 }
1218
1219 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1220 (ep->state == ECORE_IWARP_EP_ABORTING))
1221 return;
1222
1223 ep->state = ECORE_IWARP_EP_CLOSED;
1224
1225 switch (fw_return_code) {
1226 case RDMA_RETURN_OK:
1227 ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1228 ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1229 ecore_iwarp_modify_qp(p_hwfn, ep->qp,
1230 ECORE_IWARP_QP_STATE_RTS,
1231 1);
1232 ep->state = ECORE_IWARP_EP_ESTABLISHED;
1233 params.status = ECORE_SUCCESS;
1234 break;
1235 case IWARP_CONN_ERROR_MPA_TIMEOUT:
1236 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA timeout\n",
1237 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1238 params.status = ECORE_TIMEOUT;
1239 break;
1240 case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1241 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Reject\n",
1242 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1243 params.status = ECORE_CONN_REFUSED;
1244 break;
1245 case IWARP_CONN_ERROR_MPA_RST:
1246 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1247 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1248 ep->tcp_cid);
1249 params.status = ECORE_CONN_RESET;
1250 break;
1251 case IWARP_CONN_ERROR_MPA_FIN:
1252 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA received FIN\n",
1253 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1254 params.status = ECORE_CONN_REFUSED;
1255 break;
1256 case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1257 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA insufficient ird\n",
1258 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1259 params.status = ECORE_CONN_REFUSED;
1260 break;
1261 case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1262 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA RTR MISMATCH\n",
1263 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1264 params.status = ECORE_CONN_REFUSED;
1265 break;
1266 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1267 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Invalid Packet\n",
1268 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1269 params.status = ECORE_CONN_REFUSED;
1270 break;
1271 case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1272 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Local Error\n",
1273 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1274 params.status = ECORE_CONN_REFUSED;
1275 break;
1276 case IWARP_CONN_ERROR_MPA_TERMINATE:
1277 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA TERMINATE\n",
1278 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1279 params.status = ECORE_CONN_REFUSED;
1280 break;
1281 default:
1282 params.status = ECORE_CONN_RESET;
1283 break;
1284 }
1285
1286 if (ep->event_cb)
1287 ep->event_cb(ep->cb_context, ¶ms);
1288
1289 /* on passive side, if there is no associated QP (REJECT) we need to
1290 * return the ep to the pool, otherwise we wait for QP to release it.
1291 * Since we add an element in accept instead of this one. in anycase
1292 * we need to remove it from the ep_list (active connections)...
1293 */
1294 if (fw_return_code != RDMA_RETURN_OK) {
1295 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1296 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1297 (ep->qp == OSAL_NULL)) { /* Rejected */
1298 ecore_iwarp_return_ep(p_hwfn, ep);
1299 } else {
1300 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1301 OSAL_LIST_REMOVE_ENTRY(
1302 &ep->list_entry,
1303 &p_hwfn->p_rdma_info->iwarp.ep_list);
1304 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1305 }
1306 }
1307 }
1308
1309 static void
ecore_iwarp_mpa_v2_set_private(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep,u8 * mpa_data_size)1310 ecore_iwarp_mpa_v2_set_private(struct ecore_hwfn *p_hwfn,
1311 struct ecore_iwarp_ep *ep,
1312 u8 *mpa_data_size)
1313 {
1314 struct mpa_v2_hdr *mpa_v2_params;
1315 u16 mpa_ird, mpa_ord;
1316
1317 *mpa_data_size = 0;
1318 if (MPA_REV2(ep->mpa_rev)) {
1319 mpa_v2_params =
1320 (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1321 *mpa_data_size = sizeof(*mpa_v2_params);
1322
1323 mpa_ird = (u16)ep->cm_info.ird;
1324 mpa_ord = (u16)ep->cm_info.ord;
1325
1326 if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1327 mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1328
1329 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1330 mpa_ird |= MPA_V2_SEND_RTR;
1331
1332 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1333 mpa_ord |= MPA_V2_WRITE_RTR;
1334
1335 if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1336 mpa_ord |= MPA_V2_READ_RTR;
1337 }
1338
1339 mpa_v2_params->ird = htons(mpa_ird);
1340 mpa_v2_params->ord = htons(mpa_ord);
1341
1342 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1343 "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1344 mpa_v2_params->ird,
1345 mpa_v2_params->ord,
1346 *((u32 *)mpa_v2_params),
1347 mpa_ord & MPA_V2_IRD_ORD_MASK,
1348 mpa_ird & MPA_V2_IRD_ORD_MASK,
1349 !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1350 !!(mpa_ird & MPA_V2_SEND_RTR),
1351 !!(mpa_ord & MPA_V2_WRITE_RTR),
1352 !!(mpa_ord & MPA_V2_READ_RTR));
1353 }
1354 }
1355
1356 enum _ecore_status_t
ecore_iwarp_connect(void * rdma_cxt,struct ecore_iwarp_connect_in * iparams,struct ecore_iwarp_connect_out * oparams)1357 ecore_iwarp_connect(void *rdma_cxt,
1358 struct ecore_iwarp_connect_in *iparams,
1359 struct ecore_iwarp_connect_out *oparams)
1360 {
1361 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1362 struct ecore_iwarp_info *iwarp_info;
1363 struct ecore_iwarp_ep *ep;
1364 enum _ecore_status_t rc;
1365 u8 mpa_data_size = 0;
1366 u8 ts_hdr_size = 0;
1367 u32 cid;
1368
1369 if ((iparams->cm_info.ord > ECORE_IWARP_ORD_DEFAULT) ||
1370 (iparams->cm_info.ird > ECORE_IWARP_IRD_DEFAULT)) {
1371 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1372 "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1373 iparams->qp->icid, iparams->cm_info.ord,
1374 iparams->cm_info.ird);
1375
1376 return ECORE_INVAL;
1377 }
1378
1379 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1380
1381 /* Allocate ep object */
1382 rc = ecore_iwarp_alloc_cid(p_hwfn, &cid);
1383 if (rc != ECORE_SUCCESS)
1384 return rc;
1385
1386 if (iparams->qp->ep == OSAL_NULL) {
1387 rc = ecore_iwarp_create_ep(p_hwfn, &ep);
1388 if (rc != ECORE_SUCCESS)
1389 return rc;
1390 } else {
1391 ep = iparams->qp->ep;
1392 DP_ERR(p_hwfn, "Note re-use of QP for different connect\n");
1393 ep->state = ECORE_IWARP_EP_INIT;
1394 }
1395
1396 ep->tcp_cid = cid;
1397
1398 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1399 OSAL_LIST_PUSH_TAIL(&ep->list_entry,
1400 &p_hwfn->p_rdma_info->iwarp.ep_list);
1401 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1402
1403 ep->qp = iparams->qp;
1404 ep->qp->ep = ep;
1405 OSAL_MEMCPY(ep->remote_mac_addr,
1406 iparams->remote_mac_addr,
1407 ETH_ALEN);
1408 OSAL_MEMCPY(ep->local_mac_addr,
1409 iparams->local_mac_addr,
1410 ETH_ALEN);
1411 OSAL_MEMCPY(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1412
1413 ep->cm_info.ord = iparams->cm_info.ord;
1414 ep->cm_info.ird = iparams->cm_info.ird;
1415
1416 ep->rtr_type = iwarp_info->rtr_type;
1417 if (iwarp_info->peer2peer == 0)
1418 ep->rtr_type = MPA_RTR_TYPE_NONE;
1419
1420 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1421 (ep->cm_info.ord == 0))
1422 ep->cm_info.ord = 1;
1423
1424 ep->mpa_rev = iwarp_info->mpa_rev;
1425
1426 ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1427
1428 ep->cm_info.private_data = (u8 *)ep->ep_buffer_virt->out_pdata;
1429 ep->cm_info.private_data_len =
1430 iparams->cm_info.private_data_len + mpa_data_size;
1431
1432 OSAL_MEMCPY((u8 *)(u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1433 iparams->cm_info.private_data,
1434 iparams->cm_info.private_data_len);
1435
1436 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & ECORE_IWARP_TS_EN)
1437 ts_hdr_size = TIMESTAMP_HEADER_SIZE;
1438
1439 ep->mss = iparams->mss - ts_hdr_size;
1440 ep->mss = OSAL_MIN_T(u16, ECORE_IWARP_MAX_FW_MSS, ep->mss);
1441
1442 ep->event_cb = iparams->event_cb;
1443 ep->cb_context = iparams->cb_context;
1444 ep->connect_mode = TCP_CONNECT_ACTIVE;
1445
1446 oparams->ep_context = ep;
1447
1448 rc = ecore_iwarp_tcp_offload(p_hwfn, ep);
1449
1450 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1451 iparams->qp->icid, ep->tcp_cid, rc);
1452
1453 if (rc != ECORE_SUCCESS)
1454 ecore_iwarp_destroy_ep(p_hwfn, ep, true);
1455
1456 return rc;
1457 }
1458
1459 static struct ecore_iwarp_ep *
ecore_iwarp_get_free_ep(struct ecore_hwfn * p_hwfn)1460 ecore_iwarp_get_free_ep(struct ecore_hwfn *p_hwfn)
1461 {
1462 struct ecore_iwarp_ep *ep = OSAL_NULL;
1463 enum _ecore_status_t rc;
1464
1465 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1466
1467 if (OSAL_LIST_IS_EMPTY(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1468 DP_ERR(p_hwfn, "Ep list is empty\n");
1469 goto out;
1470 }
1471
1472 ep = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1473 struct ecore_iwarp_ep,
1474 list_entry);
1475
1476 /* in some cases we could have failed allocating a tcp cid when added
1477 * from accept / failure... retry now..this is not the common case.
1478 */
1479 if (ep->tcp_cid == ECORE_IWARP_INVALID_TCP_CID) {
1480 rc = ecore_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1481 /* if we fail we could look for another entry with a valid
1482 * tcp_cid, but since we don't expect to reach this anyway
1483 * it's not worth the handling
1484 */
1485 if (rc) {
1486 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1487 ep = OSAL_NULL;
1488 goto out;
1489 }
1490 }
1491
1492 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
1493 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1494
1495 out:
1496 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1497 return ep;
1498 }
1499
1500 /* takes into account timer scan ~20 ms and interrupt/dpc overhead */
1501 #define ECORE_IWARP_MAX_CID_CLEAN_TIME 100
1502 /* Technically we shouldn't reach this count with 100 ms iteration sleep */
1503 #define ECORE_IWARP_MAX_NO_PROGRESS_CNT 5
1504
1505 /* This function waits for all the bits of a bmap to be cleared, as long as
1506 * there is progress ( i.e. the number of bits left to be cleared decreases )
1507 * the function continues.
1508 */
1509 static enum _ecore_status_t
ecore_iwarp_wait_cid_map_cleared(struct ecore_hwfn * p_hwfn,struct ecore_bmap * bmap)1510 ecore_iwarp_wait_cid_map_cleared(struct ecore_hwfn *p_hwfn,
1511 struct ecore_bmap *bmap)
1512 {
1513 int prev_weight = 0;
1514 int wait_count = 0;
1515 int weight = 0;
1516
1517 weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
1518 prev_weight = weight;
1519
1520 while (weight) {
1521 OSAL_MSLEEP(ECORE_IWARP_MAX_CID_CLEAN_TIME);
1522
1523 weight = OSAL_BITMAP_WEIGHT(bmap->bitmap, bmap->max_count);
1524
1525 if (prev_weight == weight) {
1526 wait_count++;
1527 } else {
1528 prev_weight = weight;
1529 wait_count = 0;
1530 }
1531
1532 if (wait_count > ECORE_IWARP_MAX_NO_PROGRESS_CNT) {
1533 DP_NOTICE(p_hwfn, false,
1534 "%s bitmap wait timed out (%d cids pending)\n",
1535 bmap->name, weight);
1536 return ECORE_TIMEOUT;
1537 }
1538 }
1539 return ECORE_SUCCESS;
1540 }
1541
1542 static enum _ecore_status_t
ecore_iwarp_wait_for_all_cids(struct ecore_hwfn * p_hwfn)1543 ecore_iwarp_wait_for_all_cids(struct ecore_hwfn *p_hwfn)
1544 {
1545 enum _ecore_status_t rc;
1546 int i;
1547
1548 rc = ecore_iwarp_wait_cid_map_cleared(
1549 p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map);
1550 if (rc)
1551 return rc;
1552
1553 /* Now free the tcp cids from the main cid map */
1554 for (i = 0; i < ECORE_IWARP_PREALLOC_CNT; i++) {
1555 ecore_bmap_release_id(p_hwfn,
1556 &p_hwfn->p_rdma_info->cid_map,
1557 i);
1558 }
1559
1560 /* Now wait for all cids to be completed */
1561 rc = ecore_iwarp_wait_cid_map_cleared(
1562 p_hwfn, &p_hwfn->p_rdma_info->cid_map);
1563
1564 return rc;
1565 }
1566
1567 static void
ecore_iwarp_free_prealloc_ep(struct ecore_hwfn * p_hwfn)1568 ecore_iwarp_free_prealloc_ep(struct ecore_hwfn *p_hwfn)
1569 {
1570 struct ecore_iwarp_ep *ep;
1571 u32 cid;
1572
1573 while (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1574 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1575
1576 ep = OSAL_LIST_FIRST_ENTRY(
1577 &p_hwfn->p_rdma_info->iwarp.ep_free_list,
1578 struct ecore_iwarp_ep, list_entry);
1579
1580 if (ep == OSAL_NULL) {
1581 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1582 break;
1583 }
1584
1585 #ifdef _NTDDK_
1586 #pragma warning(suppress : 6011)
1587 #endif
1588 OSAL_LIST_REMOVE_ENTRY(
1589 &ep->list_entry,
1590 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1591
1592 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1593
1594 if (ep->tcp_cid != ECORE_IWARP_INVALID_TCP_CID) {
1595 cid = ep->tcp_cid - ecore_cxt_get_proto_cid_start(
1596 p_hwfn, p_hwfn->p_rdma_info->proto);
1597
1598 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
1599
1600 ecore_bmap_release_id(p_hwfn,
1601 &p_hwfn->p_rdma_info->tcp_cid_map,
1602 cid);
1603
1604 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
1605 }
1606
1607 ecore_iwarp_destroy_ep(p_hwfn, ep, false);
1608 }
1609 }
1610
1611 static enum _ecore_status_t
ecore_iwarp_prealloc_ep(struct ecore_hwfn * p_hwfn,bool init)1612 ecore_iwarp_prealloc_ep(struct ecore_hwfn *p_hwfn, bool init)
1613 {
1614 struct ecore_iwarp_ep *ep;
1615 int rc = ECORE_SUCCESS;
1616 u32 cid;
1617 int count;
1618 int i;
1619
1620 if (init)
1621 count = ECORE_IWARP_PREALLOC_CNT;
1622 else
1623 count = 1;
1624
1625 for (i = 0; i < count; i++) {
1626 rc = ecore_iwarp_create_ep(p_hwfn, &ep);
1627 if (rc != ECORE_SUCCESS)
1628 return rc;
1629
1630 /* During initialization we allocate from the main pool,
1631 * afterwards we allocate only from the tcp_cid.
1632 */
1633 if (init) {
1634 rc = ecore_iwarp_alloc_cid(p_hwfn, &cid);
1635 if (rc != ECORE_SUCCESS)
1636 goto err;
1637 ecore_iwarp_set_tcp_cid(p_hwfn, cid);
1638 } else {
1639 /* We don't care about the return code, it's ok if
1640 * tcp_cid remains invalid...in this case we'll
1641 * defer allocation
1642 */
1643 ecore_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1644 }
1645
1646 ep->tcp_cid = cid;
1647
1648 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1649 OSAL_LIST_PUSH_TAIL(&ep->list_entry,
1650 &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1651 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1652 }
1653
1654 return rc;
1655
1656 err:
1657 ecore_iwarp_destroy_ep(p_hwfn, ep, false);
1658
1659 return rc;
1660 }
1661
1662 enum _ecore_status_t
ecore_iwarp_alloc(struct ecore_hwfn * p_hwfn)1663 ecore_iwarp_alloc(struct ecore_hwfn *p_hwfn)
1664 {
1665 enum _ecore_status_t rc;
1666
1667 #ifdef CONFIG_ECORE_LOCK_ALLOC
1668 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->p_rdma_info->iwarp.iw_lock);
1669 OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->p_rdma_info->iwarp.qp_lock);
1670 #endif
1671 OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1672 OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.qp_lock);
1673
1674 /* Allocate bitmap for tcp cid. These are used by passive side
1675 * to ensure it can allocate a tcp cid during dpc that was
1676 * pre-acquired and doesn't require dynamic allocation of ilt
1677 */
1678 rc = ecore_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1679 ECORE_IWARP_PREALLOC_CNT,
1680 "TCP_CID");
1681 if (rc != ECORE_SUCCESS) {
1682 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1683 "Failed to allocate tcp cid, rc = %d\n",
1684 rc);
1685 return rc;
1686 }
1687
1688 OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1689 //DAVIDS OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1690 rc = ecore_iwarp_prealloc_ep(p_hwfn, true);
1691 if (rc != ECORE_SUCCESS) {
1692 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1693 "ecore_iwarp_prealloc_ep failed, rc = %d\n",
1694 rc);
1695 return rc;
1696 }
1697 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1698 "ecore_iwarp_prealloc_ep success, rc = %d\n",
1699 rc);
1700
1701 return ecore_ooo_alloc(p_hwfn);
1702 }
1703
1704 void
ecore_iwarp_resc_free(struct ecore_hwfn * p_hwfn)1705 ecore_iwarp_resc_free(struct ecore_hwfn *p_hwfn)
1706 {
1707 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1708
1709 #ifdef CONFIG_ECORE_LOCK_ALLOC
1710 OSAL_SPIN_LOCK_DEALLOC(iwarp_info->iw_lock);
1711 OSAL_SPIN_LOCK_DEALLOC(iwarp_info->qp_lock);
1712 #endif
1713 ecore_ooo_free(p_hwfn);
1714 if (iwarp_info->partial_fpdus)
1715 OSAL_FREE(p_hwfn->p_dev, iwarp_info->partial_fpdus);
1716 if (iwarp_info->mpa_bufs)
1717 OSAL_FREE(p_hwfn->p_dev, iwarp_info->mpa_bufs);
1718 if (iwarp_info->mpa_intermediate_buf)
1719 OSAL_FREE(p_hwfn->p_dev, iwarp_info->mpa_intermediate_buf);
1720
1721 ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1722 }
1723
1724 enum _ecore_status_t
ecore_iwarp_accept(void * rdma_cxt,struct ecore_iwarp_accept_in * iparams)1725 ecore_iwarp_accept(void *rdma_cxt,
1726 struct ecore_iwarp_accept_in *iparams)
1727 {
1728 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1729 struct ecore_iwarp_ep *ep;
1730 u8 mpa_data_size = 0;
1731 enum _ecore_status_t rc;
1732
1733 ep = (struct ecore_iwarp_ep *)iparams->ep_context;
1734 if (!ep) {
1735 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1736 return ECORE_INVAL;
1737 }
1738
1739 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1740 iparams->qp->icid, ep->tcp_cid);
1741
1742 if ((iparams->ord > ECORE_IWARP_ORD_DEFAULT) ||
1743 (iparams->ird > ECORE_IWARP_IRD_DEFAULT)) {
1744 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1745 "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1746 iparams->qp->icid, ep->tcp_cid,
1747 iparams->ord, iparams->ord);
1748 return ECORE_INVAL;
1749 }
1750
1751 /* We could reach qp->ep != OSAL NULL if we do accept on the same qp */
1752 if (iparams->qp->ep == OSAL_NULL) {
1753 /* We need to add a replacement for the ep to the free list */
1754 ecore_iwarp_prealloc_ep(p_hwfn, false);
1755 } else {
1756 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
1757 "Note re-use of QP for different connect\n");
1758 /* Return the old ep to the free_pool */
1759 ecore_iwarp_return_ep(p_hwfn, iparams->qp->ep);
1760 }
1761
1762 ecore_iwarp_move_to_ep_list(p_hwfn,
1763 &p_hwfn->p_rdma_info->iwarp.ep_list,
1764 ep);
1765 ep->listener = OSAL_NULL;
1766 ep->cb_context = iparams->cb_context;
1767 ep->qp = iparams->qp;
1768 ep->qp->ep = ep;
1769
1770 if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1771 /* Negotiate ord/ird: if upperlayer requested ord larger than
1772 * ird advertised by remote, we need to decrease our ord
1773 * to match remote ord
1774 */
1775 if (iparams->ord > ep->cm_info.ird) {
1776 iparams->ord = ep->cm_info.ird;
1777 }
1778
1779 /* For chelsio compatability, if rtr_zero read is requested
1780 * we can't set ird to zero
1781 */
1782 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1783 (iparams->ird == 0))
1784 iparams->ird = 1;
1785 }
1786
1787 /* Update cm_info ord/ird to be negotiated values */
1788 ep->cm_info.ord = iparams->ord;
1789 ep->cm_info.ird = iparams->ird;
1790
1791 ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1792
1793 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1794 ep->cm_info.private_data_len =
1795 iparams->private_data_len + mpa_data_size;
1796
1797 OSAL_MEMCPY((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1798 iparams->private_data,
1799 iparams->private_data_len);
1800
1801 if (ep->state == ECORE_IWARP_EP_CLOSED) {
1802 DP_NOTICE(p_hwfn, false,
1803 "(0x%x) Accept called on EP in CLOSED state\n",
1804 ep->tcp_cid);
1805 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1806 ecore_iwarp_return_ep(p_hwfn, ep);
1807 return ECORE_CONN_RESET;
1808 }
1809
1810 rc = ecore_iwarp_mpa_offload(p_hwfn, ep);
1811 if (rc) {
1812 ecore_iwarp_modify_qp(p_hwfn,
1813 iparams->qp,
1814 ECORE_IWARP_QP_STATE_ERROR,
1815 1);
1816 }
1817
1818 return rc;
1819 }
1820
1821 enum _ecore_status_t
ecore_iwarp_reject(void * rdma_cxt,struct ecore_iwarp_reject_in * iparams)1822 ecore_iwarp_reject(void *rdma_cxt,
1823 struct ecore_iwarp_reject_in *iparams)
1824 {
1825 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
1826 struct ecore_iwarp_ep *ep;
1827 u8 mpa_data_size = 0;
1828 enum _ecore_status_t rc;
1829
1830 ep = (struct ecore_iwarp_ep *)iparams->ep_context;
1831 if (!ep) {
1832 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1833 return ECORE_INVAL;
1834 }
1835
1836 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1837
1838 ep->cb_context = iparams->cb_context;
1839 ep->qp = OSAL_NULL;
1840
1841 ecore_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1842
1843 ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1844 ep->cm_info.private_data_len =
1845 iparams->private_data_len + mpa_data_size;
1846
1847 OSAL_MEMCPY((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1848 iparams->private_data,
1849 iparams->private_data_len);
1850
1851 if (ep->state == ECORE_IWARP_EP_CLOSED) {
1852 DP_NOTICE(p_hwfn, false,
1853 "(0x%x) Reject called on EP in CLOSED state\n",
1854 ep->tcp_cid);
1855 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
1856 ecore_iwarp_return_ep(p_hwfn, ep);
1857 return ECORE_CONN_RESET;
1858 }
1859
1860 rc = ecore_iwarp_mpa_offload(p_hwfn, ep);
1861 return rc;
1862 }
1863
1864 static void
ecore_iwarp_print_cm_info(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_cm_info * cm_info)1865 ecore_iwarp_print_cm_info(struct ecore_hwfn *p_hwfn,
1866 struct ecore_iwarp_cm_info *cm_info)
1867 {
1868 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ip_version = %d\n",
1869 cm_info->ip_version);
1870 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_ip %x.%x.%x.%x\n",
1871 cm_info->remote_ip[0],
1872 cm_info->remote_ip[1],
1873 cm_info->remote_ip[2],
1874 cm_info->remote_ip[3]);
1875 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_ip %x.%x.%x.%x\n",
1876 cm_info->local_ip[0],
1877 cm_info->local_ip[1],
1878 cm_info->local_ip[2],
1879 cm_info->local_ip[3]);
1880 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_port = %x\n",
1881 cm_info->remote_port);
1882 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_port = %x\n",
1883 cm_info->local_port);
1884 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "vlan = %x\n",
1885 cm_info->vlan);
1886 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "private_data_len = %x\n",
1887 cm_info->private_data_len);
1888 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ord = %d\n",
1889 cm_info->ord);
1890 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ird = %d\n",
1891 cm_info->ird);
1892 }
1893
1894 static int
ecore_iwarp_ll2_post_rx(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ll2_buff * buf,u8 handle)1895 ecore_iwarp_ll2_post_rx(struct ecore_hwfn *p_hwfn,
1896 struct ecore_iwarp_ll2_buff *buf,
1897 u8 handle)
1898 {
1899 enum _ecore_status_t rc;
1900
1901 rc = ecore_ll2_post_rx_buffer(
1902 p_hwfn,
1903 handle,
1904 buf->data_phys_addr,
1905 (u16)buf->buff_size,
1906 buf, 1);
1907
1908 if (rc) {
1909 DP_NOTICE(p_hwfn, false,
1910 "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1911 rc, handle);
1912 OSAL_DMA_FREE_COHERENT(
1913 p_hwfn->p_dev,
1914 buf->data,
1915 buf->data_phys_addr,
1916 buf->buff_size);
1917 OSAL_FREE(p_hwfn->p_dev, buf);
1918 }
1919
1920 return rc;
1921 }
1922
1923 static bool
ecore_iwarp_ep_exists(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_listener * listener,struct ecore_iwarp_cm_info * cm_info)1924 ecore_iwarp_ep_exists(struct ecore_hwfn *p_hwfn,
1925 struct ecore_iwarp_listener *listener,
1926 struct ecore_iwarp_cm_info *cm_info)
1927 {
1928 struct ecore_iwarp_ep *ep = OSAL_NULL;
1929 bool found = false;
1930
1931 OSAL_SPIN_LOCK(&listener->lock);
1932 OSAL_LIST_FOR_EACH_ENTRY(ep, &listener->ep_list,
1933 list_entry, struct ecore_iwarp_ep) {
1934 if ((ep->cm_info.local_port == cm_info->local_port) &&
1935 (ep->cm_info.remote_port == cm_info->remote_port) &&
1936 (ep->cm_info.vlan == cm_info->vlan) &&
1937 !OSAL_MEMCMP(&(ep->cm_info.local_ip), cm_info->local_ip,
1938 sizeof(cm_info->local_ip)) &&
1939 !OSAL_MEMCMP(&(ep->cm_info.remote_ip), cm_info->remote_ip,
1940 sizeof(cm_info->remote_ip))) {
1941 found = true;
1942 break;
1943 }
1944 }
1945
1946 OSAL_SPIN_UNLOCK(&listener->lock);
1947
1948 if (found) {
1949 DP_NOTICE(p_hwfn, false, "SYN received on active connection - dropping\n");
1950 ecore_iwarp_print_cm_info(p_hwfn, cm_info);
1951
1952 return true;
1953 }
1954
1955 return false;
1956 }
1957
1958 static struct ecore_iwarp_listener *
ecore_iwarp_get_listener(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_cm_info * cm_info)1959 ecore_iwarp_get_listener(struct ecore_hwfn *p_hwfn,
1960 struct ecore_iwarp_cm_info *cm_info)
1961 {
1962 struct ecore_iwarp_listener *listener = OSAL_NULL;
1963 static const u32 ip_zero[4] = {0, 0, 0, 0};
1964 bool found = false;
1965
1966 ecore_iwarp_print_cm_info(p_hwfn, cm_info);
1967
1968 OSAL_LIST_FOR_EACH_ENTRY(listener,
1969 &p_hwfn->p_rdma_info->iwarp.listen_list,
1970 list_entry, struct ecore_iwarp_listener) {
1971 if (listener->port == cm_info->local_port) {
1972 /* Any IP (i.e. 0.0.0.0 ) will be treated as any vlan */
1973 if (!OSAL_MEMCMP(listener->ip_addr,
1974 ip_zero,
1975 sizeof(ip_zero))) {
1976 found = true;
1977 break;
1978 }
1979
1980 /* If not any IP -> check vlan as well */
1981 if (!OSAL_MEMCMP(listener->ip_addr,
1982 cm_info->local_ip,
1983 sizeof(cm_info->local_ip)) &&
1984
1985 (listener->vlan == cm_info->vlan)) {
1986 found = true;
1987 break;
1988 }
1989 }
1990 }
1991
1992 if (found && listener->state == ECORE_IWARP_LISTENER_STATE_ACTIVE) {
1993 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener found = %p\n",
1994 listener);
1995 return listener;
1996 }
1997 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener not found\n");
1998 return OSAL_NULL;
1999 }
2000
2001 static enum _ecore_status_t
ecore_iwarp_parse_rx_pkt(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_cm_info * cm_info,void * buf,u8 * remote_mac_addr,u8 * local_mac_addr,int * payload_len,int * tcp_start_offset)2002 ecore_iwarp_parse_rx_pkt(struct ecore_hwfn *p_hwfn,
2003 struct ecore_iwarp_cm_info *cm_info,
2004 void *buf,
2005 u8 *remote_mac_addr,
2006 u8 *local_mac_addr,
2007 int *payload_len,
2008 int *tcp_start_offset)
2009 {
2010 struct ecore_vlan_ethhdr *vethh;
2011 struct ecore_ethhdr *ethh;
2012 struct ecore_iphdr *iph;
2013 struct ecore_ipv6hdr *ip6h;
2014 struct ecore_tcphdr *tcph;
2015 bool vlan_valid = false;
2016 int eth_hlen, ip_hlen;
2017 u16 eth_type;
2018 int i;
2019
2020 ethh = (struct ecore_ethhdr *)buf;
2021 eth_type = ntohs(ethh->h_proto);
2022 if (eth_type == ETH_P_8021Q) {
2023 vlan_valid = true;
2024 vethh = (struct ecore_vlan_ethhdr *)ethh;
2025 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
2026 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
2027 }
2028
2029 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
2030
2031 OSAL_MEMCPY(remote_mac_addr,
2032 ethh->h_source,
2033 ETH_ALEN);
2034
2035 OSAL_MEMCPY(local_mac_addr,
2036 ethh->h_dest,
2037 ETH_ALEN);
2038
2039 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_type =%d Source mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2040 eth_type, ethh->h_source[0], ethh->h_source[1],
2041 ethh->h_source[2], ethh->h_source[3],
2042 ethh->h_source[4], ethh->h_source[5]);
2043
2044 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_hlen=%d destination mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2045 eth_hlen, ethh->h_dest[0], ethh->h_dest[1],
2046 ethh->h_dest[2], ethh->h_dest[3],
2047 ethh->h_dest[4], ethh->h_dest[5]);
2048
2049 iph = (struct ecore_iphdr *)((u8 *)(ethh) + eth_hlen);
2050
2051 if (eth_type == ETH_P_IP) {
2052 if (iph->protocol != IPPROTO_TCP) {
2053 DP_NOTICE(p_hwfn, false,
2054 "Unexpected ip protocol on ll2 %x\n",
2055 iph->protocol);
2056 return ECORE_INVAL;
2057 }
2058
2059 cm_info->local_ip[0] = ntohl(iph->daddr);
2060 cm_info->remote_ip[0] = ntohl(iph->saddr);
2061 cm_info->ip_version = (enum ecore_tcp_ip_version)TCP_IPV4;
2062
2063 ip_hlen = (iph->ihl)*sizeof(u32);
2064 *payload_len = ntohs(iph->tot_len) - ip_hlen;
2065
2066 } else if (eth_type == ETH_P_IPV6) {
2067 ip6h = (struct ecore_ipv6hdr *)iph;
2068
2069 if (ip6h->nexthdr != IPPROTO_TCP) {
2070 DP_NOTICE(p_hwfn, false,
2071 "Unexpected ip protocol on ll2 %x\n",
2072 iph->protocol);
2073 return ECORE_INVAL;
2074 }
2075
2076 for (i = 0; i < 4; i++) {
2077 cm_info->local_ip[i] =
2078 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
2079 cm_info->remote_ip[i] =
2080 ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
2081 }
2082 cm_info->ip_version = (enum ecore_tcp_ip_version)TCP_IPV6;
2083
2084 ip_hlen = sizeof(*ip6h);
2085 *payload_len = ntohs(ip6h->payload_len);
2086 } else {
2087 DP_NOTICE(p_hwfn, false,
2088 "Unexpected ethertype on ll2 %x\n", eth_type);
2089 return ECORE_INVAL;
2090 }
2091
2092 tcph = (struct ecore_tcphdr *)((u8 *)iph + ip_hlen);
2093
2094 if (!tcph->syn) {
2095 DP_NOTICE(p_hwfn, false,
2096 "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
2097 iph->ihl, tcph->source, tcph->dest);
2098 return ECORE_INVAL;
2099 }
2100
2101 cm_info->local_port = ntohs(tcph->dest);
2102 cm_info->remote_port = ntohs(tcph->source);
2103
2104 ecore_iwarp_print_cm_info(p_hwfn, cm_info);
2105
2106 *tcp_start_offset = eth_hlen + ip_hlen;
2107
2108 return ECORE_SUCCESS;
2109 }
2110
2111 static struct ecore_iwarp_fpdu *
ecore_iwarp_get_curr_fpdu(struct ecore_hwfn * p_hwfn,u16 cid)2112 ecore_iwarp_get_curr_fpdu(struct ecore_hwfn *p_hwfn, u16 cid)
2113 {
2114 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2115 struct ecore_iwarp_fpdu *partial_fpdu;
2116 u32 idx = cid - ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
2117
2118 if (idx >= iwarp_info->max_num_partial_fpdus) {
2119 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
2120 iwarp_info->max_num_partial_fpdus);
2121 return OSAL_NULL;
2122 }
2123
2124 partial_fpdu = &iwarp_info->partial_fpdus[idx];
2125
2126 return partial_fpdu;
2127 }
2128
2129 enum ecore_iwarp_mpa_pkt_type {
2130 ECORE_IWARP_MPA_PKT_PACKED,
2131 ECORE_IWARP_MPA_PKT_PARTIAL,
2132 ECORE_IWARP_MPA_PKT_UNALIGNED
2133 };
2134
2135 #define ECORE_IWARP_INVALID_FPDU_LENGTH 0xffff
2136 #define ECORE_IWARP_MPA_FPDU_LENGTH_SIZE (2)
2137 #define ECORE_IWARP_MPA_CRC32_DIGEST_SIZE (4)
2138
2139 /* Pad to multiple of 4 */
2140 #define ECORE_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) (((data_len) + 3) & ~3)
2141
2142 #define ECORE_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \
2143 (ECORE_IWARP_PDU_DATA_LEN_WITH_PAD(_mpa_len + \
2144 ECORE_IWARP_MPA_FPDU_LENGTH_SIZE) + \
2145 ECORE_IWARP_MPA_CRC32_DIGEST_SIZE)
2146
2147 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
2148 #define ECORE_IWARP_MAX_BDS_PER_FPDU 3
2149
2150 char *pkt_type_str[] = {
2151 "ECORE_IWARP_MPA_PKT_PACKED",
2152 "ECORE_IWARP_MPA_PKT_PARTIAL",
2153 "ECORE_IWARP_MPA_PKT_UNALIGNED"
2154 };
2155
2156 static enum _ecore_status_t
2157 ecore_iwarp_recycle_pkt(struct ecore_hwfn *p_hwfn,
2158 struct ecore_iwarp_fpdu *fpdu,
2159 struct ecore_iwarp_ll2_buff *buf);
2160
2161 static enum ecore_iwarp_mpa_pkt_type
ecore_iwarp_mpa_classify(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_fpdu * fpdu,u16 tcp_payload_len,u8 * mpa_data)2162 ecore_iwarp_mpa_classify(struct ecore_hwfn *p_hwfn,
2163 struct ecore_iwarp_fpdu *fpdu,
2164 u16 tcp_payload_len,
2165 u8 *mpa_data)
2166
2167 {
2168 enum ecore_iwarp_mpa_pkt_type pkt_type;
2169 u16 mpa_len;
2170
2171 if (fpdu->incomplete_bytes) {
2172 pkt_type = ECORE_IWARP_MPA_PKT_UNALIGNED;
2173 goto out;
2174 }
2175
2176 /* special case of one byte remaining... */
2177 if (tcp_payload_len == 1) {
2178 /* lower byte will be read next packet */
2179 fpdu->fpdu_length = *mpa_data << 8;
2180 pkt_type = ECORE_IWARP_MPA_PKT_PARTIAL;
2181 goto out;
2182 }
2183
2184 mpa_len = ntohs(*((u16 *)(mpa_data)));
2185 fpdu->fpdu_length = ECORE_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
2186
2187 if (fpdu->fpdu_length <= tcp_payload_len)
2188 pkt_type = ECORE_IWARP_MPA_PKT_PACKED;
2189 else
2190 pkt_type = ECORE_IWARP_MPA_PKT_PARTIAL;
2191
2192 out:
2193 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2194 "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
2195 pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
2196
2197 return pkt_type;
2198 }
2199
2200 static void
ecore_iwarp_init_fpdu(struct ecore_iwarp_ll2_buff * buf,struct ecore_iwarp_fpdu * fpdu,struct unaligned_opaque_data * pkt_data,u16 tcp_payload_size,u8 placement_offset)2201 ecore_iwarp_init_fpdu(struct ecore_iwarp_ll2_buff *buf,
2202 struct ecore_iwarp_fpdu *fpdu,
2203 struct unaligned_opaque_data *pkt_data,
2204 u16 tcp_payload_size, u8 placement_offset)
2205 {
2206 fpdu->mpa_buf = buf;
2207 fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
2208 fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
2209
2210 fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
2211 fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
2212
2213 if (tcp_payload_size == 1)
2214 fpdu->incomplete_bytes = ECORE_IWARP_INVALID_FPDU_LENGTH;
2215 else if (tcp_payload_size < fpdu->fpdu_length)
2216 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
2217 else
2218 fpdu->incomplete_bytes = 0; /* complete fpdu */
2219
2220 fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
2221 }
2222
2223 static enum _ecore_status_t
ecore_iwarp_copy_fpdu(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_fpdu * fpdu,struct unaligned_opaque_data * pkt_data,struct ecore_iwarp_ll2_buff * buf,u16 tcp_payload_size)2224 ecore_iwarp_copy_fpdu(struct ecore_hwfn *p_hwfn,
2225 struct ecore_iwarp_fpdu *fpdu,
2226 struct unaligned_opaque_data *pkt_data,
2227 struct ecore_iwarp_ll2_buff *buf,
2228 u16 tcp_payload_size)
2229
2230 {
2231 u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
2232 enum _ecore_status_t rc;
2233
2234 /* need to copy the data from the partial packet stored in fpdu
2235 * to the new buf, for this we also need to move the data currently
2236 * placed on the buf. The assumption is that the buffer is big enough
2237 * since fpdu_length <= mss, we use an intermediate buffer since
2238 * we may need to copy the new data to an overlapping location
2239 */
2240 if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
2241 DP_ERR(p_hwfn,
2242 "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
2243 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
2244 fpdu->incomplete_bytes);
2245 return ECORE_INVAL;
2246 }
2247
2248 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2249 "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
2250 fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
2251 (u8 *)(buf->data) + pkt_data->first_mpa_offset,
2252 tcp_payload_size);
2253
2254 OSAL_MEMCPY(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
2255 OSAL_MEMCPY(tmp_buf + fpdu->mpa_frag_len,
2256 (u8 *)(buf->data) + pkt_data->first_mpa_offset,
2257 tcp_payload_size);
2258
2259 rc = ecore_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
2260 if (rc)
2261 return rc;
2262
2263 /* If we managed to post the buffer copy the data to the new buffer
2264 * o/w this will occur in the next round...
2265 */
2266 OSAL_MEMCPY((u8 *)(buf->data), tmp_buf,
2267 fpdu->mpa_frag_len + tcp_payload_size);
2268
2269 fpdu->mpa_buf = buf;
2270 /* fpdu->pkt_hdr remains as is */
2271 /* fpdu->mpa_frag is overriden with new buf */
2272 fpdu->mpa_frag = buf->data_phys_addr;
2273 fpdu->mpa_frag_virt = buf->data;
2274 fpdu->mpa_frag_len += tcp_payload_size;
2275
2276 fpdu->incomplete_bytes -= tcp_payload_size;
2277
2278 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2279 "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
2280 buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
2281 fpdu->incomplete_bytes);
2282
2283 return 0;
2284 }
2285
2286 static void
ecore_iwarp_update_fpdu_length(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_fpdu * fpdu,u8 * mpa_data)2287 ecore_iwarp_update_fpdu_length(struct ecore_hwfn *p_hwfn,
2288 struct ecore_iwarp_fpdu *fpdu,
2289 u8 *mpa_data)
2290 {
2291 u16 mpa_len;
2292
2293 /* Update incomplete packets if needed */
2294 if (fpdu->incomplete_bytes == ECORE_IWARP_INVALID_FPDU_LENGTH) {
2295 mpa_len = fpdu->fpdu_length | *mpa_data;
2296 fpdu->fpdu_length = ECORE_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
2297 fpdu->mpa_frag_len = fpdu->fpdu_length;
2298 /* one byte of hdr */
2299 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
2300 DP_VERBOSE(p_hwfn,
2301 ECORE_MSG_RDMA,
2302 "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
2303 mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
2304 }
2305 }
2306
2307 #define ECORE_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
2308 (GET_FIELD(_curr_pkt->flags, \
2309 UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
2310
2311 /* This function is used to recycle a buffer using the ll2 drop option. It
2312 * uses the mechanism to ensure that all buffers posted to tx before this one
2313 * were completed. The buffer sent here will be sent as a cookie in the tx
2314 * completion function and can then be reposted to rx chain when done. The flow
2315 * that requires this is the flow where a FPDU splits over more than 3 tcp
2316 * segments. In this case the driver needs to re-post a rx buffer instead of
2317 * the one received, but driver can't simply repost a buffer it copied from
2318 * as there is a case where the buffer was originally a packed FPDU, and is
2319 * partially posted to FW. Driver needs to ensure FW is done with it.
2320 */
2321 static enum _ecore_status_t
ecore_iwarp_recycle_pkt(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_fpdu * fpdu,struct ecore_iwarp_ll2_buff * buf)2322 ecore_iwarp_recycle_pkt(struct ecore_hwfn *p_hwfn,
2323 struct ecore_iwarp_fpdu *fpdu,
2324 struct ecore_iwarp_ll2_buff *buf)
2325 {
2326 struct ecore_ll2_tx_pkt_info tx_pkt;
2327 enum _ecore_status_t rc;
2328 u8 ll2_handle;
2329
2330 OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt));
2331 tx_pkt.num_of_bds = 1;
2332 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_DROP;
2333 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2334 tx_pkt.first_frag = fpdu->pkt_hdr;
2335 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2336 buf->piggy_buf = OSAL_NULL;
2337 tx_pkt.cookie = buf;
2338
2339 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2340
2341 rc = ecore_ll2_prepare_tx_packet(p_hwfn,
2342 ll2_handle,
2343 &tx_pkt, true);
2344
2345 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2346 "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2347 (long unsigned int)tx_pkt.first_frag,
2348 tx_pkt.first_frag_len, buf, rc);
2349
2350 if (rc)
2351 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2352 "Can't drop packet rc=%d\n", rc);
2353
2354 return rc;
2355 }
2356
2357 static enum _ecore_status_t
ecore_iwarp_win_right_edge(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_fpdu * fpdu)2358 ecore_iwarp_win_right_edge(struct ecore_hwfn *p_hwfn,
2359 struct ecore_iwarp_fpdu *fpdu)
2360 {
2361 struct ecore_ll2_tx_pkt_info tx_pkt;
2362 enum _ecore_status_t rc;
2363 u8 ll2_handle;
2364
2365 OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt));
2366 tx_pkt.num_of_bds = 1;
2367 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB;
2368 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2369
2370 tx_pkt.first_frag = fpdu->pkt_hdr;
2371 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2372 tx_pkt.enable_ip_cksum = true;
2373 tx_pkt.enable_l4_cksum = true;
2374 tx_pkt.calc_ip_len = true;
2375 /* vlan overload with enum iwarp_ll2_tx_queues */
2376 tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2377
2378 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2379
2380 rc = ecore_ll2_prepare_tx_packet(p_hwfn,
2381 ll2_handle,
2382 &tx_pkt, true);
2383
2384 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2385 "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2386 tx_pkt.num_of_bds, (long unsigned int)tx_pkt.first_frag,
2387 tx_pkt.first_frag_len, rc);
2388
2389 if (rc)
2390 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2391 "Can't send right edge rc=%d\n", rc);
2392
2393 return rc;
2394 }
2395
2396 static enum _ecore_status_t
ecore_iwarp_send_fpdu(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_fpdu * fpdu,struct unaligned_opaque_data * curr_pkt,struct ecore_iwarp_ll2_buff * buf,u16 tcp_payload_size,enum ecore_iwarp_mpa_pkt_type pkt_type)2397 ecore_iwarp_send_fpdu(struct ecore_hwfn *p_hwfn,
2398 struct ecore_iwarp_fpdu *fpdu,
2399 struct unaligned_opaque_data *curr_pkt,
2400 struct ecore_iwarp_ll2_buff *buf,
2401 u16 tcp_payload_size,
2402 enum ecore_iwarp_mpa_pkt_type pkt_type)
2403 {
2404 struct ecore_ll2_tx_pkt_info tx_pkt;
2405 enum _ecore_status_t rc;
2406 u8 ll2_handle;
2407
2408 OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt));
2409
2410 tx_pkt.num_of_bds = (pkt_type == ECORE_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2411 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB;
2412 tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2413
2414 /* Send the mpa_buf only with the last fpdu (in case of packed) */
2415 if ((pkt_type == ECORE_IWARP_MPA_PKT_UNALIGNED) ||
2416 (tcp_payload_size <= fpdu->fpdu_length))
2417 tx_pkt.cookie = fpdu->mpa_buf;
2418
2419 tx_pkt.first_frag = fpdu->pkt_hdr;
2420 tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2421 tx_pkt.enable_ip_cksum = true;
2422 tx_pkt.enable_l4_cksum = true;
2423 tx_pkt.calc_ip_len = true;
2424 /* vlan overload with enum iwarp_ll2_tx_queues */
2425 tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2426
2427 /* special case of unaligned packet and not packed, need to send
2428 * both buffers as cookie to release.
2429 */
2430 if (tcp_payload_size == fpdu->incomplete_bytes) {
2431 fpdu->mpa_buf->piggy_buf = buf;
2432 }
2433
2434 ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2435
2436 rc = ecore_ll2_prepare_tx_packet(p_hwfn,
2437 ll2_handle,
2438 &tx_pkt, true);
2439 if (rc)
2440 goto err;
2441
2442 rc = ecore_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2443 fpdu->mpa_frag,
2444 fpdu->mpa_frag_len);
2445 if (rc)
2446 goto err;
2447
2448 if (fpdu->incomplete_bytes) {
2449 rc = ecore_ll2_set_fragment_of_tx_packet(
2450 p_hwfn, ll2_handle,
2451 buf->data_phys_addr + curr_pkt->first_mpa_offset,
2452 fpdu->incomplete_bytes);
2453
2454 if (rc)
2455 goto err;
2456 }
2457
2458 err:
2459 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2460 "MPA_ALIGN: Sent FPDU num_bds=%d [%lx, 0x%x], [0x%lx, 0x%x], [0x%lx, 0x%x] (cookie %p) rc=%d\n",
2461 tx_pkt.num_of_bds, (long unsigned int)tx_pkt.first_frag,
2462 tx_pkt.first_frag_len, (long unsigned int)fpdu->mpa_frag,
2463 fpdu->mpa_frag_len, (long unsigned int)buf->data_phys_addr +
2464 curr_pkt->first_mpa_offset, fpdu->incomplete_bytes,
2465 tx_pkt.cookie, rc);
2466
2467 return rc;
2468 }
2469
2470 static void
ecore_iwarp_mpa_get_data(struct ecore_hwfn * p_hwfn,struct unaligned_opaque_data * curr_pkt,u32 opaque_data0,u32 opaque_data1)2471 ecore_iwarp_mpa_get_data(struct ecore_hwfn *p_hwfn,
2472 struct unaligned_opaque_data *curr_pkt,
2473 u32 opaque_data0, u32 opaque_data1)
2474 {
2475 u64 opaque_data;
2476
2477 opaque_data = HILO_64(opaque_data1, opaque_data0);
2478 *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2479
2480 /* fix endianity */
2481 curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2482 OSAL_LE16_TO_CPU(curr_pkt->first_mpa_offset);
2483 curr_pkt->cid = OSAL_LE32_TO_CPU(curr_pkt->cid);
2484
2485 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2486 "OPAQUE0=0x%x OPAQUE1=0x%x first_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2487 opaque_data0, opaque_data1, curr_pkt->first_mpa_offset,
2488 curr_pkt->tcp_payload_offset, curr_pkt->flags,
2489 curr_pkt->cid);
2490 }
2491
2492 static void
ecore_iwarp_mpa_print_tcp_seq(struct ecore_hwfn * p_hwfn,void * buf)2493 ecore_iwarp_mpa_print_tcp_seq(struct ecore_hwfn *p_hwfn,
2494 void *buf)
2495 {
2496 struct ecore_vlan_ethhdr *vethh;
2497 struct ecore_ethhdr *ethh;
2498 struct ecore_iphdr *iph;
2499 struct ecore_ipv6hdr *ip6h;
2500 struct ecore_tcphdr *tcph;
2501 bool vlan_valid = false;
2502 int eth_hlen, ip_hlen;
2503 u16 eth_type;
2504
2505 if ((p_hwfn->dp_level > ECORE_LEVEL_VERBOSE) ||
2506 !(p_hwfn->dp_module & ECORE_MSG_RDMA))
2507 return;
2508
2509 ethh = (struct ecore_ethhdr *)buf;
2510 eth_type = ntohs(ethh->h_proto);
2511 if (eth_type == ETH_P_8021Q) {
2512 vlan_valid = true;
2513 vethh = (struct ecore_vlan_ethhdr *)ethh;
2514 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
2515 }
2516
2517 eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
2518
2519 iph = (struct ecore_iphdr *)((u8 *)(ethh) + eth_hlen);
2520
2521 if (eth_type == ETH_P_IP) {
2522 ip_hlen = (iph->ihl)*sizeof(u32);
2523 } else if (eth_type == ETH_P_IPV6) {
2524 ip6h = (struct ecore_ipv6hdr *)iph;
2525 ip_hlen = sizeof(*ip6h);
2526 } else {
2527 DP_ERR(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
2528 return;
2529 }
2530
2531 tcph = (struct ecore_tcphdr *)((u8 *)iph + ip_hlen);
2532
2533 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Processing MPA PKT: tcp_seq=0x%x tcp_ack_seq=0x%x\n",
2534 ntohl(tcph->seq), ntohl(tcph->ack_seq));
2535
2536 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_type =%d Source mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2537 eth_type, ethh->h_source[0], ethh->h_source[1],
2538 ethh->h_source[2], ethh->h_source[3],
2539 ethh->h_source[4], ethh->h_source[5]);
2540
2541 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "eth_hlen=%d destination mac: [0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]:[0x%x]\n",
2542 eth_hlen, ethh->h_dest[0], ethh->h_dest[1],
2543 ethh->h_dest[2], ethh->h_dest[3],
2544 ethh->h_dest[4], ethh->h_dest[5]);
2545
2546 return;
2547 }
2548
2549 /* This function is called when an unaligned or incomplete MPA packet arrives
2550 * driver needs to align the packet, perhaps using previous data and send
2551 * it down to FW once it is aligned.
2552 */
2553 static enum _ecore_status_t
ecore_iwarp_process_mpa_pkt(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ll2_mpa_buf * mpa_buf)2554 ecore_iwarp_process_mpa_pkt(struct ecore_hwfn *p_hwfn,
2555 struct ecore_iwarp_ll2_mpa_buf *mpa_buf)
2556 {
2557 struct ecore_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2558 enum ecore_iwarp_mpa_pkt_type pkt_type;
2559 struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2560 struct ecore_iwarp_fpdu *fpdu;
2561 u8 *mpa_data;
2562 enum _ecore_status_t rc = ECORE_SUCCESS;
2563
2564 ecore_iwarp_mpa_print_tcp_seq(
2565 p_hwfn, (u8 *)(buf->data) + mpa_buf->placement_offset);
2566
2567 fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
2568 if (!fpdu) {/* something corrupt with cid, post rx back */
2569 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2570 curr_pkt->cid);
2571 rc = ecore_iwarp_ll2_post_rx(
2572 p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2573
2574 if (rc) { /* not much we can do here except log and free */
2575 DP_ERR(p_hwfn, "Post rx buffer failed\n");
2576
2577 /* we don't expect any failures from rx, not even
2578 * busy since we allocate #bufs=#descs
2579 */
2580 rc = ECORE_UNKNOWN_ERROR;
2581 }
2582 return rc;
2583 }
2584
2585 do {
2586 mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
2587
2588 pkt_type = ecore_iwarp_mpa_classify(p_hwfn, fpdu,
2589 mpa_buf->tcp_payload_len,
2590 mpa_data);
2591
2592 switch (pkt_type) {
2593 case ECORE_IWARP_MPA_PKT_PARTIAL:
2594 ecore_iwarp_init_fpdu(buf, fpdu,
2595 curr_pkt,
2596 mpa_buf->tcp_payload_len,
2597 mpa_buf->placement_offset);
2598
2599 if (!ECORE_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2600 mpa_buf->tcp_payload_len = 0;
2601 break;
2602 }
2603
2604 rc = ecore_iwarp_win_right_edge(p_hwfn, fpdu);
2605
2606 if (rc) {
2607 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2608 "Can't send FPDU:reset rc=%d\n", rc);
2609 OSAL_MEM_ZERO(fpdu, sizeof(*fpdu));
2610 break;
2611 }
2612
2613 mpa_buf->tcp_payload_len = 0;
2614 break;
2615 case ECORE_IWARP_MPA_PKT_PACKED:
2616 if (fpdu->fpdu_length == 8) {
2617 DP_ERR(p_hwfn, "SUSPICIOUS fpdu_length = 0x%x: assuming bug...aborting this packet...\n",
2618 fpdu->fpdu_length);
2619 mpa_buf->tcp_payload_len = 0;
2620 break;
2621 }
2622
2623 ecore_iwarp_init_fpdu(buf, fpdu,
2624 curr_pkt,
2625 mpa_buf->tcp_payload_len,
2626 mpa_buf->placement_offset);
2627
2628 rc = ecore_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2629 mpa_buf->tcp_payload_len,
2630 pkt_type);
2631 if (rc) {
2632 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2633 "Can't send FPDU:reset rc=%d\n", rc);
2634 OSAL_MEM_ZERO(fpdu, sizeof(*fpdu));
2635 break;
2636 }
2637 mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2638 curr_pkt->first_mpa_offset += fpdu->fpdu_length;
2639 break;
2640 case ECORE_IWARP_MPA_PKT_UNALIGNED:
2641 ecore_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2642 if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2643 /* special handling of fpdu split over more
2644 * than 2 segments
2645 */
2646 if (ECORE_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2647 rc = ecore_iwarp_win_right_edge(p_hwfn,
2648 fpdu);
2649 /* packet will be re-processed later */
2650 if (rc)
2651 return rc;
2652 }
2653
2654 rc = ecore_iwarp_copy_fpdu(
2655 p_hwfn, fpdu, curr_pkt,
2656 buf, mpa_buf->tcp_payload_len);
2657
2658 /* packet will be re-processed later */
2659 if (rc)
2660 return rc;
2661
2662 mpa_buf->tcp_payload_len = 0;
2663
2664 break;
2665 }
2666
2667 rc = ecore_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2668 mpa_buf->tcp_payload_len,
2669 pkt_type);
2670 if (rc) {
2671 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2672 "Can't send FPDU:delay rc=%d\n", rc);
2673 /* don't reset fpdu -> we need it for next
2674 * classify
2675 */
2676 break;
2677 }
2678 mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2679 curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
2680 /* The framed PDU was sent - no more incomplete bytes */
2681 fpdu->incomplete_bytes = 0;
2682 break;
2683 }
2684
2685 } while (mpa_buf->tcp_payload_len && !rc);
2686
2687 return rc;
2688 }
2689
2690 static void
ecore_iwarp_process_pending_pkts(struct ecore_hwfn * p_hwfn)2691 ecore_iwarp_process_pending_pkts(struct ecore_hwfn *p_hwfn)
2692 {
2693 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2694 struct ecore_iwarp_ll2_mpa_buf *mpa_buf = OSAL_NULL;
2695 enum _ecore_status_t rc;
2696
2697 while (!OSAL_LIST_IS_EMPTY(&iwarp_info->mpa_buf_pending_list)) {
2698 mpa_buf = OSAL_LIST_FIRST_ENTRY(
2699 &iwarp_info->mpa_buf_pending_list,
2700 struct ecore_iwarp_ll2_mpa_buf,
2701 list_entry);
2702
2703 rc = ecore_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2704
2705 /* busy means break and continue processing later, don't
2706 * remove the buf from the pending list.
2707 */
2708 if (rc == ECORE_BUSY)
2709 break;
2710
2711 #ifdef _NTDDK_
2712 #pragma warning(suppress : 6011)
2713 #pragma warning(suppress : 28182)
2714 #endif
2715 OSAL_LIST_REMOVE_ENTRY(
2716 &mpa_buf->list_entry,
2717 &iwarp_info->mpa_buf_pending_list);
2718
2719 OSAL_LIST_PUSH_TAIL(&mpa_buf->list_entry,
2720 &iwarp_info->mpa_buf_list);
2721
2722 if (rc) { /* different error, don't continue */
2723 DP_NOTICE(p_hwfn, false, "process pkts failed rc=%d\n",
2724 rc);
2725 break;
2726 }
2727 }
2728 }
2729
2730 static void
ecore_iwarp_ll2_comp_mpa_pkt(void * cxt,struct ecore_ll2_comp_rx_data * data)2731 ecore_iwarp_ll2_comp_mpa_pkt(void *cxt,
2732 struct ecore_ll2_comp_rx_data *data)
2733 {
2734 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2735 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2736 struct ecore_iwarp_ll2_mpa_buf *mpa_buf;
2737
2738 iwarp_info->unalign_rx_comp++;
2739
2740 mpa_buf = OSAL_LIST_FIRST_ENTRY(&iwarp_info->mpa_buf_list,
2741 struct ecore_iwarp_ll2_mpa_buf,
2742 list_entry);
2743
2744 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2745 "LL2 MPA CompRx buf=%p placement_offset=%d, payload_len=0x%x mpa_buf=%p\n",
2746 data->cookie, data->u.placement_offset,
2747 data->length.packet_length, mpa_buf);
2748
2749 if (!mpa_buf) {
2750 DP_ERR(p_hwfn, "no free mpa buf. this is a driver bug.\n");
2751 return;
2752 }
2753 OSAL_LIST_REMOVE_ENTRY(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
2754
2755 ecore_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2756 data->opaque_data_0, data->opaque_data_1);
2757
2758 mpa_buf->tcp_payload_len = data->length.packet_length -
2759 mpa_buf->data.first_mpa_offset;
2760 mpa_buf->ll2_buf = (struct ecore_iwarp_ll2_buff *)data->cookie;
2761 mpa_buf->data.first_mpa_offset += data->u.placement_offset;
2762 mpa_buf->placement_offset = data->u.placement_offset;
2763
2764 OSAL_LIST_PUSH_TAIL(&mpa_buf->list_entry,
2765 &iwarp_info->mpa_buf_pending_list);
2766
2767 ecore_iwarp_process_pending_pkts(p_hwfn);
2768 }
2769
2770 static void
ecore_iwarp_ll2_comp_syn_pkt(void * cxt,struct ecore_ll2_comp_rx_data * data)2771 ecore_iwarp_ll2_comp_syn_pkt(void *cxt, struct ecore_ll2_comp_rx_data *data)
2772 {
2773 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2774 struct ecore_iwarp_ll2_buff *buf =
2775 (struct ecore_iwarp_ll2_buff *)data->cookie;
2776 struct ecore_iwarp_listener *listener;
2777 struct ecore_iwarp_cm_info cm_info;
2778 struct ecore_ll2_tx_pkt_info tx_pkt;
2779 u8 remote_mac_addr[ETH_ALEN];
2780 u8 local_mac_addr[ETH_ALEN];
2781 struct ecore_iwarp_ep *ep;
2782 enum _ecore_status_t rc;
2783 int tcp_start_offset;
2784 u8 ts_hdr_size = 0;
2785 int payload_len;
2786 u32 hdr_size;
2787
2788 OSAL_MEM_ZERO(&cm_info, sizeof(cm_info));
2789
2790 /* Check if packet was received with errors... */
2791 if (data->err_flags != 0) {
2792 DP_NOTICE(p_hwfn, false, "Error received on SYN packet: 0x%x\n",
2793 data->err_flags);
2794 goto err;
2795 }
2796
2797 if (GET_FIELD(data->parse_flags,
2798 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2799 GET_FIELD(data->parse_flags,
2800 PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2801 DP_NOTICE(p_hwfn, false, "Syn packet received with checksum error\n");
2802 goto err;
2803 }
2804
2805 rc = ecore_iwarp_parse_rx_pkt(
2806 p_hwfn, &cm_info, (u8 *)(buf->data) + data->u.placement_offset,
2807 remote_mac_addr, local_mac_addr, &payload_len,
2808 &tcp_start_offset);
2809 if (rc)
2810 goto err;
2811
2812 /* Check if there is a listener for this 4-tuple */
2813 listener = ecore_iwarp_get_listener(p_hwfn, &cm_info);
2814 if (!listener) {
2815 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2816 "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2817 data->parse_flags, data->length.packet_length);
2818
2819 OSAL_MEMSET(&tx_pkt, 0, sizeof(tx_pkt));
2820 tx_pkt.num_of_bds = 1;
2821 tx_pkt.bd_flags = 0;
2822 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2823 tx_pkt.tx_dest = ECORE_LL2_TX_DEST_LB;
2824 tx_pkt.first_frag = buf->data_phys_addr +
2825 data->u.placement_offset;
2826 tx_pkt.first_frag_len = data->length.packet_length;
2827 tx_pkt.cookie = buf;
2828
2829 rc = ecore_ll2_prepare_tx_packet(
2830 p_hwfn,
2831 p_hwfn->p_rdma_info->iwarp.ll2_syn_handle,
2832 &tx_pkt, true);
2833
2834 if (rc) {
2835 DP_NOTICE(p_hwfn, false,
2836 "Can't post SYN back to chip rc=%d\n", rc);
2837 goto err;
2838 }
2839 return;
2840 }
2841
2842 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "Received syn on listening port\n");
2843
2844 /* For debugging purpose... */
2845 if (listener->drop)
2846 goto err;
2847
2848 /* There may be an open ep on this connection if this is a syn
2849 * retrasnmit... need to make sure there isn't...
2850 */
2851 if (ecore_iwarp_ep_exists(p_hwfn, listener, &cm_info))
2852 goto err;
2853
2854 ep = ecore_iwarp_get_free_ep(p_hwfn);
2855 if (ep == OSAL_NULL)
2856 goto err;
2857
2858 OSAL_SPIN_LOCK(&listener->lock);
2859 OSAL_LIST_PUSH_TAIL(&ep->list_entry, &listener->ep_list);
2860 OSAL_SPIN_UNLOCK(&listener->lock);
2861
2862 OSAL_MEMCPY(ep->remote_mac_addr,
2863 remote_mac_addr,
2864 ETH_ALEN);
2865 OSAL_MEMCPY(ep->local_mac_addr,
2866 local_mac_addr,
2867 ETH_ALEN);
2868
2869 OSAL_MEMCPY(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2870
2871 if (p_hwfn->p_rdma_info->iwarp.tcp_flags & ECORE_IWARP_TS_EN)
2872 ts_hdr_size = TIMESTAMP_HEADER_SIZE;
2873
2874 hdr_size = ((cm_info.ip_version == ECORE_TCP_IPV4) ? 40 : 60) +
2875 ts_hdr_size;
2876 ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2877 ep->mss = OSAL_MIN_T(u16, ECORE_IWARP_MAX_FW_MSS, ep->mss);
2878
2879 ep->listener = listener;
2880 ep->event_cb = listener->event_cb;
2881 ep->cb_context = listener->cb_context;
2882 ep->connect_mode = TCP_CONNECT_PASSIVE;
2883
2884 ep->syn = buf;
2885 ep->syn_ip_payload_length = (u16)payload_len;
2886 ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2887 tcp_start_offset;
2888
2889 rc = ecore_iwarp_tcp_offload(p_hwfn, ep);
2890 if (rc != ECORE_SUCCESS) {
2891 ecore_iwarp_return_ep(p_hwfn, ep);
2892 goto err;
2893 }
2894 return;
2895
2896 err:
2897 ecore_iwarp_ll2_post_rx(
2898 p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
2899 }
2900
2901 static void
ecore_iwarp_ll2_rel_rx_pkt(void * cxt,u8 OSAL_UNUSED connection_handle,void * cookie,dma_addr_t OSAL_UNUSED rx_buf_addr,bool OSAL_UNUSED b_last_packet)2902 ecore_iwarp_ll2_rel_rx_pkt(void *cxt,
2903 u8 OSAL_UNUSED connection_handle,
2904 void *cookie,
2905 dma_addr_t OSAL_UNUSED rx_buf_addr,
2906 bool OSAL_UNUSED b_last_packet)
2907 {
2908 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2909 struct ecore_iwarp_ll2_buff *buffer =
2910 (struct ecore_iwarp_ll2_buff *)cookie;
2911
2912 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2913 buffer->data,
2914 buffer->data_phys_addr,
2915 buffer->buff_size);
2916
2917 OSAL_FREE(p_hwfn->p_dev, buffer);
2918 }
2919
2920 static void
ecore_iwarp_ll2_comp_tx_pkt(void * cxt,u8 connection_handle,void * cookie,dma_addr_t OSAL_UNUSED first_frag_addr,bool OSAL_UNUSED b_last_fragment,bool OSAL_UNUSED b_last_packet)2921 ecore_iwarp_ll2_comp_tx_pkt(void *cxt,
2922 u8 connection_handle,
2923 void *cookie,
2924 dma_addr_t OSAL_UNUSED first_frag_addr,
2925 bool OSAL_UNUSED b_last_fragment,
2926 bool OSAL_UNUSED b_last_packet)
2927 {
2928 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2929 struct ecore_iwarp_ll2_buff *buffer =
2930 (struct ecore_iwarp_ll2_buff *)cookie;
2931 struct ecore_iwarp_ll2_buff *piggy;
2932
2933 if (!buffer) /* can happen in packed mpa unaligned... */
2934 return;
2935
2936 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
2937 "LL2 CompTX buf=%p piggy_buf=%p handle=%d\n",
2938 buffer, buffer->piggy_buf, connection_handle);
2939
2940 /* we got a tx packet -> this was originally a rx packet... now we
2941 * can post it back...
2942 */
2943 piggy = buffer->piggy_buf;
2944 if (piggy) {
2945 buffer->piggy_buf = OSAL_NULL;
2946 ecore_iwarp_ll2_post_rx(p_hwfn, piggy,
2947 connection_handle);
2948 }
2949
2950 ecore_iwarp_ll2_post_rx(p_hwfn, buffer,
2951 connection_handle);
2952
2953 if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2954 ecore_iwarp_process_pending_pkts(p_hwfn);
2955
2956 return;
2957 }
2958
2959 static void
ecore_iwarp_ll2_rel_tx_pkt(void * cxt,u8 OSAL_UNUSED connection_handle,void * cookie,dma_addr_t OSAL_UNUSED first_frag_addr,bool OSAL_UNUSED b_last_fragment,bool OSAL_UNUSED b_last_packet)2960 ecore_iwarp_ll2_rel_tx_pkt(void *cxt,
2961 u8 OSAL_UNUSED connection_handle,
2962 void *cookie,
2963 dma_addr_t OSAL_UNUSED first_frag_addr,
2964 bool OSAL_UNUSED b_last_fragment,
2965 bool OSAL_UNUSED b_last_packet)
2966 {
2967 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
2968 struct ecore_iwarp_ll2_buff *buffer =
2969 (struct ecore_iwarp_ll2_buff *)cookie;
2970
2971 if (!buffer)
2972 return;
2973
2974 if (buffer->piggy_buf) {
2975 OSAL_DMA_FREE_COHERENT(
2976 p_hwfn->p_dev,
2977 buffer->piggy_buf->data,
2978 buffer->piggy_buf->data_phys_addr,
2979 buffer->piggy_buf->buff_size);
2980
2981 OSAL_FREE(p_hwfn->p_dev, buffer->piggy_buf);
2982 }
2983
2984 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2985 buffer->data,
2986 buffer->data_phys_addr,
2987 buffer->buff_size);
2988
2989 OSAL_FREE(p_hwfn->p_dev, buffer);
2990 return;
2991 }
2992
2993 /* Current known slowpath for iwarp ll2 is unalign flush. When this completion
2994 * is received, need to reset the FPDU.
2995 */
2996 static void
ecore_iwarp_ll2_slowpath(void * cxt,u8 OSAL_UNUSED connection_handle,u32 opaque_data_0,u32 opaque_data_1)2997 ecore_iwarp_ll2_slowpath(void *cxt,
2998 u8 OSAL_UNUSED connection_handle,
2999 u32 opaque_data_0,
3000 u32 opaque_data_1)
3001 {
3002 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt;
3003 struct unaligned_opaque_data unalign_data;
3004 struct ecore_iwarp_fpdu *fpdu;
3005
3006 ecore_iwarp_mpa_get_data(p_hwfn, &unalign_data,
3007 opaque_data_0, opaque_data_1);
3008
3009 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "(0x%x) Flush fpdu\n",
3010 unalign_data.cid);
3011
3012 fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
3013 if (fpdu)
3014 OSAL_MEM_ZERO(fpdu, sizeof(*fpdu));
3015 }
3016
3017 static int
ecore_iwarp_ll2_stop(struct ecore_hwfn * p_hwfn)3018 ecore_iwarp_ll2_stop(struct ecore_hwfn *p_hwfn)
3019 {
3020 struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
3021 int rc = 0;
3022
3023 if (iwarp_info->ll2_syn_handle != ECORE_IWARP_HANDLE_INVAL) {
3024 rc = ecore_ll2_terminate_connection(p_hwfn,
3025 iwarp_info->ll2_syn_handle);
3026 if (rc)
3027 DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
3028
3029 ecore_ll2_release_connection(p_hwfn,
3030 iwarp_info->ll2_syn_handle);
3031 iwarp_info->ll2_syn_handle = ECORE_IWARP_HANDLE_INVAL;
3032 }
3033
3034 if (iwarp_info->ll2_ooo_handle != ECORE_IWARP_HANDLE_INVAL) {
3035 rc = ecore_ll2_terminate_connection(p_hwfn,
3036 iwarp_info->ll2_ooo_handle);
3037 if (rc)
3038 DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
3039
3040 ecore_ll2_release_connection(p_hwfn,
3041 iwarp_info->ll2_ooo_handle);
3042 iwarp_info->ll2_ooo_handle = ECORE_IWARP_HANDLE_INVAL;
3043 }
3044
3045 if (iwarp_info->ll2_mpa_handle != ECORE_IWARP_HANDLE_INVAL) {
3046 rc = ecore_ll2_terminate_connection(p_hwfn,
3047 iwarp_info->ll2_mpa_handle);
3048 if (rc)
3049 DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
3050
3051 ecore_ll2_release_connection(p_hwfn,
3052 iwarp_info->ll2_mpa_handle);
3053 iwarp_info->ll2_mpa_handle = ECORE_IWARP_HANDLE_INVAL;
3054 }
3055
3056 ecore_llh_remove_mac_filter(p_hwfn->p_dev, 0,
3057 p_hwfn->p_rdma_info->iwarp.mac_addr);
3058
3059 return rc;
3060 }
3061
3062 static int
ecore_iwarp_ll2_alloc_buffers(struct ecore_hwfn * p_hwfn,int num_rx_bufs,int buff_size,u8 ll2_handle)3063 ecore_iwarp_ll2_alloc_buffers(struct ecore_hwfn *p_hwfn,
3064 int num_rx_bufs,
3065 int buff_size,
3066 u8 ll2_handle)
3067 {
3068 struct ecore_iwarp_ll2_buff *buffer;
3069 int rc = 0;
3070 int i;
3071
3072 for (i = 0; i < num_rx_bufs; i++) {
3073 buffer = OSAL_ZALLOC(p_hwfn->p_dev,
3074 GFP_KERNEL, sizeof(*buffer));
3075 if (!buffer) {
3076 DP_INFO(p_hwfn, "Failed to allocate LL2 buffer desc\n");
3077 break;
3078 }
3079
3080 buffer->data =
3081 OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
3082 &buffer->data_phys_addr,
3083 buff_size);
3084
3085 if (!buffer->data) {
3086 DP_INFO(p_hwfn, "Failed to allocate LL2 buffers\n");
3087 OSAL_FREE(p_hwfn->p_dev, buffer);
3088 rc = ECORE_NOMEM;
3089 break;
3090 }
3091
3092 buffer->buff_size = buff_size;
3093 rc = ecore_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
3094
3095 if (rc)
3096 break; /* buffers will be deallocated by ecore_ll2 */
3097 }
3098 return rc;
3099 }
3100
3101 #define ECORE_IWARP_CACHE_PADDING(size) \
3102 (((size) + ETH_CACHE_LINE_SIZE - 1) & ~(ETH_CACHE_LINE_SIZE - 1))
3103
3104 #define ECORE_IWARP_MAX_BUF_SIZE(mtu) \
3105 ECORE_IWARP_CACHE_PADDING(mtu + ETH_HLEN + 2*VLAN_HLEN + 2 +\
3106 ETH_CACHE_LINE_SIZE)
3107
3108 static int
ecore_iwarp_ll2_start(struct ecore_hwfn * p_hwfn,struct ecore_rdma_start_in_params * params)3109 ecore_iwarp_ll2_start(struct ecore_hwfn *p_hwfn,
3110 struct ecore_rdma_start_in_params *params)
3111 {
3112 struct ecore_iwarp_info *iwarp_info;
3113 struct ecore_ll2_acquire_data data;
3114 struct ecore_ll2_cbs cbs;
3115 u32 mpa_buff_size;
3116 int rc = ECORE_SUCCESS;
3117 u16 n_ooo_bufs;
3118 int i;
3119
3120 iwarp_info = &p_hwfn->p_rdma_info->iwarp;
3121 iwarp_info->ll2_syn_handle = ECORE_IWARP_HANDLE_INVAL;
3122 iwarp_info->ll2_ooo_handle = ECORE_IWARP_HANDLE_INVAL;
3123 iwarp_info->ll2_mpa_handle = ECORE_IWARP_HANDLE_INVAL;
3124
3125 iwarp_info->max_mtu = params->max_mtu;
3126
3127 OSAL_MEMCPY(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr,
3128 ETH_ALEN);
3129
3130 rc = ecore_llh_add_mac_filter(p_hwfn->p_dev, 0, params->mac_addr);
3131 if (rc != ECORE_SUCCESS)
3132 return rc;
3133
3134 /* Start SYN connection */
3135 cbs.rx_comp_cb = ecore_iwarp_ll2_comp_syn_pkt;
3136 cbs.rx_release_cb = ecore_iwarp_ll2_rel_rx_pkt;
3137 cbs.tx_comp_cb = ecore_iwarp_ll2_comp_tx_pkt;
3138 cbs.tx_release_cb = ecore_iwarp_ll2_rel_tx_pkt;
3139 cbs.cookie = p_hwfn;
3140
3141 OSAL_MEMSET(&data, 0, sizeof(data));
3142 data.input.conn_type = ECORE_LL2_TYPE_IWARP;
3143 data.input.mtu = ECORE_IWARP_MAX_SYN_PKT_SIZE;
3144 data.input.rx_num_desc = ECORE_IWARP_LL2_SYN_RX_SIZE;
3145 data.input.tx_num_desc = ECORE_IWARP_LL2_SYN_TX_SIZE;
3146 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
3147 data.input.tx_tc = PKT_LB_TC;
3148 data.input.tx_dest = ECORE_LL2_TX_DEST_LB;
3149 data.p_connection_handle = &iwarp_info->ll2_syn_handle;
3150 data.cbs = &cbs;
3151
3152 rc = ecore_ll2_acquire_connection(p_hwfn, &data);
3153 if (rc) {
3154 DP_NOTICE(p_hwfn, false, "Failed to acquire LL2 connection\n");
3155 ecore_llh_remove_mac_filter(p_hwfn->p_dev, 0, params->mac_addr);
3156 return rc;
3157 }
3158
3159 rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
3160 if (rc) {
3161 DP_NOTICE(p_hwfn, false,
3162 "Failed to establish LL2 connection\n");
3163 goto err;
3164 }
3165
3166 rc = ecore_iwarp_ll2_alloc_buffers(p_hwfn,
3167 ECORE_IWARP_LL2_SYN_RX_SIZE,
3168 ECORE_IWARP_MAX_SYN_PKT_SIZE,
3169 iwarp_info->ll2_syn_handle);
3170 if (rc)
3171 goto err;
3172
3173 /* Start OOO connection */
3174 data.input.conn_type = ECORE_LL2_TYPE_OOO;
3175 data.input.mtu = params->max_mtu;
3176
3177 n_ooo_bufs = params->iwarp.ooo_num_rx_bufs;
3178
3179 if (n_ooo_bufs > ECORE_IWARP_LL2_OOO_MAX_RX_SIZE)
3180 n_ooo_bufs = ECORE_IWARP_LL2_OOO_MAX_RX_SIZE;
3181
3182 data.input.rx_num_desc = n_ooo_bufs;
3183 data.input.rx_num_ooo_buffers = n_ooo_bufs;
3184
3185 p_hwfn->p_rdma_info->iwarp.num_ooo_rx_bufs = data.input.rx_num_desc;
3186 data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
3187 data.input.tx_num_desc = ECORE_IWARP_LL2_OOO_DEF_TX_SIZE;
3188 data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
3189 data.input.secondary_queue = true;
3190
3191 rc = ecore_ll2_acquire_connection(p_hwfn, &data);
3192 if (rc)
3193 goto err;
3194
3195 rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
3196 if (rc)
3197 goto err;
3198
3199 /* Start MPA connection */
3200 cbs.rx_comp_cb = ecore_iwarp_ll2_comp_mpa_pkt;
3201 cbs.slowpath_cb = ecore_iwarp_ll2_slowpath;
3202
3203 OSAL_MEMSET(&data, 0, sizeof(data));
3204 data.input.conn_type = ECORE_LL2_TYPE_IWARP;
3205 data.input.mtu = params->max_mtu;
3206 data.input.rx_num_desc = n_ooo_bufs * 2;
3207 /* we allocate the same amount for TX to reduce the chance we
3208 * run out of tx descriptors
3209 */
3210 data.input.tx_num_desc = data.input.rx_num_desc;
3211 data.input.tx_max_bds_per_packet = ECORE_IWARP_MAX_BDS_PER_FPDU;
3212 data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
3213 data.input.secondary_queue = true;
3214 data.cbs = &cbs;
3215
3216 rc = ecore_ll2_acquire_connection(p_hwfn, &data);
3217 if (rc)
3218 goto err;
3219
3220 rc = ecore_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
3221 if (rc)
3222 goto err;
3223
3224 mpa_buff_size = ECORE_IWARP_MAX_BUF_SIZE(params->max_mtu);
3225 rc = ecore_iwarp_ll2_alloc_buffers(p_hwfn,
3226 data.input.rx_num_desc,
3227 mpa_buff_size,
3228 iwarp_info->ll2_mpa_handle);
3229 if (rc)
3230 goto err;
3231
3232 iwarp_info->partial_fpdus =
3233 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
3234 sizeof(*iwarp_info->partial_fpdus) *
3235 (u16)p_hwfn->p_rdma_info->num_qps);
3236
3237 if (!iwarp_info->partial_fpdus) {
3238 DP_NOTICE(p_hwfn, false,
3239 "Failed to allocate ecore_iwarp_info(partial_fpdus)\n");
3240 goto err;
3241 }
3242
3243 iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
3244
3245 /* The mpa_bufs array serves for pending RX packets received on the
3246 * mpa ll2 that don't have place on the tx ring and require later
3247 * processing. We can't fail on allocation of such a struct therefore
3248 * we allocate enough to take care of all rx packets
3249 */
3250 iwarp_info->mpa_bufs =
3251 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
3252 sizeof(*iwarp_info->mpa_bufs) *
3253 data.input.rx_num_desc);
3254
3255 if (!iwarp_info->mpa_bufs) {
3256 DP_NOTICE(p_hwfn, false,
3257 "Failed to allocate mpa_bufs array mem_size=%d\n",
3258 (u32)(sizeof(*iwarp_info->mpa_bufs) *
3259 data.input.rx_num_desc));
3260 goto err;
3261 }
3262
3263 iwarp_info->mpa_intermediate_buf =
3264 OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, mpa_buff_size);
3265 if (!iwarp_info->mpa_intermediate_buf) {
3266 DP_NOTICE(p_hwfn, false,
3267 "Failed to allocate mpa_intermediate_buf mem_size=%d\n",
3268 mpa_buff_size);
3269 goto err;
3270 }
3271
3272 OSAL_LIST_INIT(&iwarp_info->mpa_buf_pending_list);
3273 OSAL_LIST_INIT(&iwarp_info->mpa_buf_list);
3274 for (i = 0; i < data.input.rx_num_desc; i++) {
3275 OSAL_LIST_PUSH_TAIL(&iwarp_info->mpa_bufs[i].list_entry,
3276 &iwarp_info->mpa_buf_list);
3277 }
3278
3279 return rc;
3280
3281 err:
3282 ecore_iwarp_ll2_stop(p_hwfn);
3283
3284 return rc;
3285 }
3286
3287 static void
ecore_iwarp_set_defaults(struct ecore_hwfn * p_hwfn,struct ecore_rdma_start_in_params * params)3288 ecore_iwarp_set_defaults(struct ecore_hwfn *p_hwfn,
3289 struct ecore_rdma_start_in_params *params)
3290 {
3291 u32 rcv_wnd_size;
3292 u32 n_ooo_bufs;
3293
3294 /* rcv_wnd_size = 0: use defaults */
3295 rcv_wnd_size = params->iwarp.rcv_wnd_size;
3296 if (!rcv_wnd_size) {
3297 if (ecore_device_num_ports(p_hwfn->p_dev) == 4) {
3298 rcv_wnd_size = ECORE_IS_AH(p_hwfn->p_dev) ?
3299 ECORE_IWARP_RCV_WND_SIZE_AH_DEF_4_PORTS :
3300 ECORE_IWARP_RCV_WND_SIZE_BB_DEF_4_PORTS;
3301 } else {
3302 rcv_wnd_size = ECORE_IS_AH(p_hwfn->p_dev) ?
3303 ECORE_IWARP_RCV_WND_SIZE_AH_DEF_2_PORTS :
3304 ECORE_IWARP_RCV_WND_SIZE_BB_DEF_2_PORTS;
3305 }
3306 params->iwarp.rcv_wnd_size = rcv_wnd_size;
3307 }
3308
3309 n_ooo_bufs = params->iwarp.ooo_num_rx_bufs;
3310 if (!n_ooo_bufs) {
3311 n_ooo_bufs = (u32)(((u64)ECORE_MAX_OOO *
3312 params->iwarp.rcv_wnd_size) /
3313 params->max_mtu);
3314 n_ooo_bufs = OSAL_MIN_T(u32, n_ooo_bufs, USHRT_MAX);
3315 params->iwarp.ooo_num_rx_bufs = (u16)n_ooo_bufs;
3316 }
3317 }
3318
3319 enum _ecore_status_t
ecore_iwarp_setup(struct ecore_hwfn * p_hwfn,struct ecore_rdma_start_in_params * params)3320 ecore_iwarp_setup(struct ecore_hwfn *p_hwfn,
3321 struct ecore_rdma_start_in_params *params)
3322 {
3323 enum _ecore_status_t rc = ECORE_SUCCESS;
3324 struct ecore_iwarp_info *iwarp_info;
3325 u32 rcv_wnd_size;
3326
3327 iwarp_info = &(p_hwfn->p_rdma_info->iwarp);
3328
3329 if (!params->iwarp.rcv_wnd_size || !params->iwarp.ooo_num_rx_bufs)
3330 ecore_iwarp_set_defaults(p_hwfn, params);
3331
3332 /* Scale 0 will set window of 0xFFFC (64K -4).
3333 * Scale x will set window of 0xFFFC << (x)
3334 * Therefore we subtract log2(64K) so that result is 0
3335 */
3336 rcv_wnd_size = params->iwarp.rcv_wnd_size;
3337 if (rcv_wnd_size < ECORE_IWARP_RCV_WND_SIZE_MIN)
3338 rcv_wnd_size = ECORE_IWARP_RCV_WND_SIZE_MIN;
3339
3340 iwarp_info->rcv_wnd_scale = OSAL_MIN_T(u32, OSAL_LOG2(rcv_wnd_size) -
3341 OSAL_LOG2(ECORE_IWARP_RCV_WND_SIZE_MIN), ECORE_IWARP_MAX_WND_SCALE);
3342 iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
3343
3344 iwarp_info->tcp_flags = params->iwarp.flags;
3345 iwarp_info->crc_needed = params->iwarp.crc_needed;
3346 switch (params->iwarp.mpa_rev) {
3347 case ECORE_MPA_REV1:
3348 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
3349 break;
3350 case ECORE_MPA_REV2:
3351 iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
3352 break;
3353 }
3354
3355 iwarp_info->peer2peer = params->iwarp.mpa_peer2peer;
3356 iwarp_info->rtr_type = MPA_RTR_TYPE_NONE;
3357
3358 if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_SEND)
3359 iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
3360
3361 if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_WRITE)
3362 iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
3363
3364 if (params->iwarp.mpa_rtr & ECORE_MPA_RTR_TYPE_ZERO_READ)
3365 iwarp_info->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
3366
3367 //DAVIDS OSAL_SPIN_LOCK_INIT(&p_hwfn->p_rdma_info->iwarp.qp_lock);
3368 OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.ep_list);
3369 OSAL_LIST_INIT(&p_hwfn->p_rdma_info->iwarp.listen_list);
3370
3371 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
3372 ecore_iwarp_async_event);
3373 ecore_ooo_setup(p_hwfn);
3374
3375 rc = ecore_iwarp_ll2_start(p_hwfn, params);
3376
3377 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3378 "MPA_REV = %d. peer2peer=%d rtr=%x\n",
3379 iwarp_info->mpa_rev,
3380 iwarp_info->peer2peer,
3381 iwarp_info->rtr_type);
3382
3383 return rc;
3384 }
3385
3386 enum _ecore_status_t
ecore_iwarp_stop(struct ecore_hwfn * p_hwfn)3387 ecore_iwarp_stop(struct ecore_hwfn *p_hwfn)
3388 {
3389 enum _ecore_status_t rc;
3390
3391 ecore_iwarp_free_prealloc_ep(p_hwfn);
3392 rc = ecore_iwarp_wait_for_all_cids(p_hwfn);
3393 if (rc != ECORE_SUCCESS)
3394 return rc;
3395
3396 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
3397
3398 return ecore_iwarp_ll2_stop(p_hwfn);
3399 }
3400
3401 static void
ecore_iwarp_qp_in_error(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep,u8 fw_return_code)3402 ecore_iwarp_qp_in_error(struct ecore_hwfn *p_hwfn,
3403 struct ecore_iwarp_ep *ep,
3404 u8 fw_return_code)
3405 {
3406 struct ecore_iwarp_cm_event_params params;
3407
3408 ecore_iwarp_modify_qp(p_hwfn, ep->qp, ECORE_IWARP_QP_STATE_ERROR, true);
3409
3410 params.event = ECORE_IWARP_EVENT_CLOSE;
3411 params.ep_context = ep;
3412 params.cm_info = &ep->cm_info;
3413 params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
3414 ECORE_SUCCESS : ECORE_CONN_RESET;
3415
3416 ep->state = ECORE_IWARP_EP_CLOSED;
3417 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3418 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
3419 &p_hwfn->p_rdma_info->iwarp.ep_list);
3420 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3421
3422 ep->event_cb(ep->cb_context, ¶ms);
3423 }
3424
3425 static void
ecore_iwarp_exception_received(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep,int fw_ret_code)3426 ecore_iwarp_exception_received(struct ecore_hwfn *p_hwfn,
3427 struct ecore_iwarp_ep *ep,
3428 int fw_ret_code)
3429 {
3430 struct ecore_iwarp_cm_event_params params;
3431 bool event_cb = false;
3432
3433 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
3434 ep->cid, fw_ret_code);
3435
3436 switch (fw_ret_code) {
3437 case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
3438 params.status = ECORE_SUCCESS;
3439 params.event = ECORE_IWARP_EVENT_DISCONNECT;
3440 event_cb = true;
3441 break;
3442 case IWARP_EXCEPTION_DETECTED_LLP_RESET:
3443 params.status = ECORE_CONN_RESET;
3444 params.event = ECORE_IWARP_EVENT_DISCONNECT;
3445 event_cb = true;
3446 break;
3447 case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
3448 params.event = ECORE_IWARP_EVENT_RQ_EMPTY;
3449 event_cb = true;
3450 break;
3451 case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
3452 params.event = ECORE_IWARP_EVENT_IRQ_FULL;
3453 event_cb = true;
3454 break;
3455 case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
3456 params.event = ECORE_IWARP_EVENT_LLP_TIMEOUT;
3457 event_cb = true;
3458 break;
3459 case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
3460 params.event = ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
3461 event_cb = true;
3462 break;
3463 case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
3464 params.event = ECORE_IWARP_EVENT_CQ_OVERFLOW;
3465 event_cb = true;
3466 break;
3467 case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
3468 params.event = ECORE_IWARP_EVENT_QP_CATASTROPHIC;
3469 event_cb = true;
3470 break;
3471 case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
3472 params.event = ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR;
3473 event_cb = true;
3474 break;
3475 case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
3476 params.event = ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR;
3477 event_cb = true;
3478 break;
3479 case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
3480 params.event = ECORE_IWARP_EVENT_TERMINATE_RECEIVED;
3481 event_cb = true;
3482 break;
3483 default:
3484 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3485 "Unhandled exception received...\n");
3486 break;
3487 }
3488
3489 if (event_cb) {
3490 params.ep_context = ep;
3491 params.cm_info = &ep->cm_info;
3492 ep->event_cb(ep->cb_context, ¶ms);
3493 }
3494 }
3495
3496 static void
ecore_iwarp_tcp_connect_unsuccessful(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep,u8 fw_return_code)3497 ecore_iwarp_tcp_connect_unsuccessful(struct ecore_hwfn *p_hwfn,
3498 struct ecore_iwarp_ep *ep,
3499 u8 fw_return_code)
3500 {
3501 struct ecore_iwarp_cm_event_params params;
3502
3503 OSAL_MEM_ZERO(¶ms, sizeof(params));
3504 params.event = ECORE_IWARP_EVENT_ACTIVE_COMPLETE;
3505 params.ep_context = ep;
3506 params.cm_info = &ep->cm_info;
3507 ep->state = ECORE_IWARP_EP_CLOSED;
3508
3509 switch (fw_return_code) {
3510 case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
3511 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3512 "%s(0x%x) TCP connect got invalid packet\n",
3513 ECORE_IWARP_CONNECT_MODE_STRING(ep),
3514 ep->tcp_cid);
3515 params.status = ECORE_CONN_RESET;
3516 break;
3517 case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
3518 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3519 "%s(0x%x) TCP Connection Reset\n",
3520 ECORE_IWARP_CONNECT_MODE_STRING(ep),
3521 ep->tcp_cid);
3522 params.status = ECORE_CONN_RESET;
3523 break;
3524 case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
3525 DP_NOTICE(p_hwfn, false, "%s(0x%x) TCP timeout\n",
3526 ECORE_IWARP_CONNECT_MODE_STRING(ep),
3527 ep->tcp_cid);
3528 params.status = ECORE_TIMEOUT;
3529 break;
3530 case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
3531 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA not supported VER\n",
3532 ECORE_IWARP_CONNECT_MODE_STRING(ep),
3533 ep->tcp_cid);
3534 params.status = ECORE_CONN_REFUSED;
3535 break;
3536 case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
3537 DP_NOTICE(p_hwfn, false, "%s(0x%x) MPA Invalid Packet\n",
3538 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
3539 params.status = ECORE_CONN_RESET;
3540 break;
3541 default:
3542 DP_ERR(p_hwfn, "%s(0x%x) Unexpected return code tcp connect: %d\n",
3543 ECORE_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid,
3544 fw_return_code);
3545 params.status = ECORE_CONN_RESET;
3546 break;
3547 }
3548
3549 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3550 ep->tcp_cid = ECORE_IWARP_INVALID_TCP_CID;
3551 ecore_iwarp_return_ep(p_hwfn, ep);
3552 } else {
3553 ep->event_cb(ep->cb_context, ¶ms);
3554 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3555 OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
3556 &p_hwfn->p_rdma_info->iwarp.ep_list);
3557 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3558 }
3559 }
3560
3561 static void
ecore_iwarp_connect_complete(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep,u8 fw_return_code)3562 ecore_iwarp_connect_complete(struct ecore_hwfn *p_hwfn,
3563 struct ecore_iwarp_ep *ep,
3564 u8 fw_return_code)
3565 {
3566 if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3567 /* Done with the SYN packet, post back to ll2 rx */
3568 ecore_iwarp_ll2_post_rx(
3569 p_hwfn, ep->syn,
3570 p_hwfn->p_rdma_info->iwarp.ll2_syn_handle);
3571
3572 ep->syn = OSAL_NULL;
3573
3574 if (ep->state == ECORE_IWARP_EP_ABORTING)
3575 return;
3576
3577 /* If connect failed - upper layer doesn't know about it */
3578 if (fw_return_code == RDMA_RETURN_OK)
3579 ecore_iwarp_mpa_received(p_hwfn, ep);
3580 else
3581 ecore_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3582 fw_return_code);
3583
3584 } else {
3585 if (fw_return_code == RDMA_RETURN_OK)
3586 ecore_iwarp_mpa_offload(p_hwfn, ep);
3587 else
3588 ecore_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3589 fw_return_code);
3590 }
3591 }
3592
3593 static OSAL_INLINE bool
ecore_iwarp_check_ep_ok(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_ep * ep)3594 ecore_iwarp_check_ep_ok(struct ecore_hwfn *p_hwfn,
3595 struct ecore_iwarp_ep *ep)
3596 {
3597 if (ep == OSAL_NULL) {
3598 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3599 return false;
3600 }
3601
3602 if (ep->sig != 0xdeadbeef) {
3603 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3604 return false;
3605 }
3606
3607 return true;
3608 }
3609
3610 static enum _ecore_status_t
ecore_iwarp_async_event(struct ecore_hwfn * p_hwfn,u8 fw_event_code,u16 OSAL_UNUSED echo,union event_ring_data * data,u8 fw_return_code)3611 ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn,
3612 u8 fw_event_code,
3613 u16 OSAL_UNUSED echo,
3614 union event_ring_data *data,
3615 u8 fw_return_code)
3616 {
3617 struct regpair *fw_handle = &data->rdma_data.async_handle;
3618 struct ecore_iwarp_ep *ep = OSAL_NULL;
3619 u16 cid;
3620
3621 ep = (struct ecore_iwarp_ep *)(osal_uintptr_t)HILO_64(fw_handle->hi,
3622 fw_handle->lo);
3623
3624 switch (fw_event_code) {
3625 /* Async completion after TCP 3-way handshake */
3626 case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3627 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3628 return ECORE_INVAL;
3629 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3630 "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3631 ep->tcp_cid, fw_return_code);
3632 ecore_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3633 break;
3634 case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3635 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3636 return ECORE_INVAL;
3637 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3638 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3639 ep->cid, fw_return_code);
3640 ecore_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3641 break;
3642 /* Async completion for Close Connection ramrod */
3643 case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3644 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3645 return ECORE_INVAL;
3646 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3647 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3648 ep->cid, fw_return_code);
3649 ecore_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3650 break;
3651 /* Async event for active side only */
3652 case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3653 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3654 return ECORE_INVAL;
3655 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3656 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3657 ep->cid, fw_return_code);
3658 ecore_iwarp_mpa_reply_arrived(p_hwfn, ep);
3659 break;
3660 /* MPA Negotiations completed */
3661 case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3662 if (!ecore_iwarp_check_ep_ok(p_hwfn, ep))
3663 return ECORE_INVAL;
3664 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3665 "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3666 ep->cid, fw_return_code);
3667 ecore_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3668 break;
3669 case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3670 cid = (u16)OSAL_LE32_TO_CPU(fw_handle->lo);
3671 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3672 "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n",
3673 cid);
3674 ecore_iwarp_cid_cleaned(p_hwfn, cid);
3675
3676 break;
3677 case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3678 DP_NOTICE(p_hwfn, false,
3679 "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3680
3681 p_hwfn->p_rdma_info->events.affiliated_event(
3682 p_hwfn->p_rdma_info->events.context,
3683 ECORE_IWARP_EVENT_CQ_OVERFLOW,
3684 (void *)fw_handle);
3685 break;
3686 default:
3687 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3688 fw_event_code);
3689 return ECORE_INVAL;
3690 }
3691 return ECORE_SUCCESS;
3692 }
3693
3694 enum _ecore_status_t
ecore_iwarp_create_listen(void * rdma_cxt,struct ecore_iwarp_listen_in * iparams,struct ecore_iwarp_listen_out * oparams)3695 ecore_iwarp_create_listen(void *rdma_cxt,
3696 struct ecore_iwarp_listen_in *iparams,
3697 struct ecore_iwarp_listen_out *oparams)
3698 {
3699 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3700 struct ecore_iwarp_listener *listener;
3701
3702 listener = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*listener));
3703
3704 if (!listener) {
3705 DP_NOTICE(p_hwfn,
3706 false,
3707 "ecore iwarp create listener failed: cannot allocate memory (listener). rc = %d\n",
3708 ECORE_NOMEM);
3709 return ECORE_NOMEM;
3710 }
3711 listener->ip_version = iparams->ip_version;
3712 OSAL_MEMCPY(listener->ip_addr,
3713 iparams->ip_addr,
3714 sizeof(listener->ip_addr));
3715 listener->port = iparams->port;
3716 listener->vlan = iparams->vlan;
3717
3718 listener->event_cb = iparams->event_cb;
3719 listener->cb_context = iparams->cb_context;
3720 listener->max_backlog = iparams->max_backlog;
3721 listener->state = ECORE_IWARP_LISTENER_STATE_ACTIVE;
3722 oparams->handle = listener;
3723
3724 OSAL_SPIN_LOCK_INIT(&listener->lock);
3725 OSAL_LIST_INIT(&listener->ep_list);
3726 OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3727 OSAL_LIST_PUSH_TAIL(&listener->list_entry,
3728 &p_hwfn->p_rdma_info->iwarp.listen_list);
3729 OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3730
3731 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3732 listener->event_cb,
3733 listener,
3734 listener->ip_addr[0],
3735 listener->ip_addr[1],
3736 listener->ip_addr[2],
3737 listener->ip_addr[3],
3738 listener->port,
3739 listener->vlan);
3740
3741 return ECORE_SUCCESS;
3742 }
3743
3744 static void
ecore_iwarp_pause_complete(struct ecore_iwarp_listener * listener)3745 ecore_iwarp_pause_complete(struct ecore_iwarp_listener *listener)
3746 {
3747 struct ecore_iwarp_cm_event_params params;
3748
3749 if (listener->state == ECORE_IWARP_LISTENER_STATE_UNPAUSE)
3750 listener->state = ECORE_IWARP_LISTENER_STATE_ACTIVE;
3751
3752 params.event = ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP;
3753 listener->event_cb(listener->cb_context, ¶ms);
3754 }
3755
3756 static void
ecore_iwarp_tcp_abort_comp(struct ecore_hwfn * p_hwfn,void * cookie,union event_ring_data OSAL_UNUSED * data,u8 OSAL_UNUSED fw_return_code)3757 ecore_iwarp_tcp_abort_comp(struct ecore_hwfn *p_hwfn, void *cookie,
3758 union event_ring_data OSAL_UNUSED *data,
3759 u8 OSAL_UNUSED fw_return_code)
3760 {
3761 struct ecore_iwarp_ep *ep = (struct ecore_iwarp_ep *)cookie;
3762 struct ecore_iwarp_listener *listener = ep->listener;
3763
3764 ecore_iwarp_return_ep(p_hwfn, ep);
3765
3766 if (OSAL_LIST_IS_EMPTY(&listener->ep_list))
3767 listener->done = true;
3768 }
3769
3770 static void
ecore_iwarp_abort_inflight_connections(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_listener * listener)3771 ecore_iwarp_abort_inflight_connections(struct ecore_hwfn *p_hwfn,
3772 struct ecore_iwarp_listener *listener)
3773 {
3774 struct ecore_spq_entry *p_ent = OSAL_NULL;
3775 struct ecore_iwarp_ep *ep = OSAL_NULL;
3776 struct ecore_sp_init_data init_data;
3777 struct ecore_spq_comp_cb comp_data;
3778 enum _ecore_status_t rc;
3779
3780 /* remove listener from list before destroying listener */
3781 OSAL_LIST_REMOVE_ENTRY(&listener->list_entry,
3782 &p_hwfn->p_rdma_info->iwarp.listen_list);
3783 if (OSAL_LIST_IS_EMPTY(&listener->ep_list)) {
3784 listener->done = true;
3785 return;
3786 }
3787 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
3788 init_data.p_comp_data = &comp_data;
3789 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3790 init_data.comp_mode = ECORE_SPQ_MODE_CB;
3791 init_data.p_comp_data->function = ecore_iwarp_tcp_abort_comp;
3792
3793 OSAL_LIST_FOR_EACH_ENTRY(ep, &listener->ep_list,
3794 list_entry, struct ecore_iwarp_ep) {
3795 ep->state = ECORE_IWARP_EP_ABORTING;
3796 init_data.p_comp_data->cookie = ep;
3797 init_data.cid = ep->tcp_cid;
3798 rc = ecore_sp_init_request(p_hwfn, &p_ent,
3799 IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD,
3800 PROTOCOLID_IWARP,
3801 &init_data);
3802 if (rc == ECORE_SUCCESS)
3803 ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
3804 }
3805 }
3806
3807 static void
ecore_iwarp_listener_state_transition(struct ecore_hwfn * p_hwfn,void * cookie,union event_ring_data OSAL_UNUSED * data,u8 OSAL_UNUSED fw_return_code)3808 ecore_iwarp_listener_state_transition(struct ecore_hwfn *p_hwfn, void *cookie,
3809 union event_ring_data OSAL_UNUSED *data,
3810 u8 OSAL_UNUSED fw_return_code)
3811 {
3812 struct ecore_iwarp_listener *listener = (struct ecore_iwarp_listener *)cookie;
3813
3814 switch (listener->state) {
3815 case ECORE_IWARP_LISTENER_STATE_PAUSE:
3816 case ECORE_IWARP_LISTENER_STATE_UNPAUSE:
3817 ecore_iwarp_pause_complete(listener);
3818 break;
3819 case ECORE_IWARP_LISTENER_STATE_DESTROYING:
3820 ecore_iwarp_abort_inflight_connections(p_hwfn, listener);
3821 break;
3822 default:
3823 break;
3824 }
3825 }
3826
3827 static enum _ecore_status_t
ecore_iwarp_empty_ramrod(struct ecore_hwfn * p_hwfn,struct ecore_iwarp_listener * listener)3828 ecore_iwarp_empty_ramrod(struct ecore_hwfn *p_hwfn,
3829 struct ecore_iwarp_listener *listener)
3830 {
3831 struct ecore_spq_entry *p_ent = OSAL_NULL;
3832 struct ecore_spq_comp_cb comp_data;
3833 struct ecore_sp_init_data init_data;
3834 enum _ecore_status_t rc;
3835
3836 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
3837 init_data.p_comp_data = &comp_data;
3838 init_data.cid = ecore_spq_get_cid(p_hwfn);
3839 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3840 init_data.comp_mode = ECORE_SPQ_MODE_CB;
3841 init_data.p_comp_data->function = ecore_iwarp_listener_state_transition;
3842 init_data.p_comp_data->cookie = listener;
3843 rc = ecore_sp_init_request(p_hwfn, &p_ent,
3844 COMMON_RAMROD_EMPTY,
3845 PROTOCOLID_COMMON,
3846 &init_data);
3847 if (rc != ECORE_SUCCESS)
3848 return rc;
3849
3850 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
3851 if (rc != ECORE_SUCCESS)
3852 return rc;
3853
3854 return rc;
3855 }
3856
3857 enum _ecore_status_t
ecore_iwarp_pause_listen(void * rdma_cxt,void * handle,bool pause,bool comp)3858 ecore_iwarp_pause_listen(void *rdma_cxt, void *handle,
3859 bool pause, bool comp)
3860 {
3861 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3862 struct ecore_iwarp_listener *listener =
3863 (struct ecore_iwarp_listener *)handle;
3864 enum _ecore_status_t rc;
3865
3866 listener->state = pause ?
3867 ECORE_IWARP_LISTENER_STATE_PAUSE :
3868 ECORE_IWARP_LISTENER_STATE_UNPAUSE;
3869 if (!comp)
3870 return ECORE_SUCCESS;
3871
3872 rc = ecore_iwarp_empty_ramrod(p_hwfn, listener);
3873 if (rc != ECORE_SUCCESS)
3874 return rc;
3875
3876 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "listener=%p, state=%d\n",
3877 listener, listener->state);
3878
3879 return ECORE_PENDING;
3880 }
3881
3882 enum _ecore_status_t
ecore_iwarp_destroy_listen(void * rdma_cxt,void * handle)3883 ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3884 {
3885 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3886 struct ecore_iwarp_listener *listener =
3887 (struct ecore_iwarp_listener *)handle;
3888 enum _ecore_status_t rc;
3889 int wait_count = 0;
3890
3891 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "handle=%p\n", handle);
3892
3893 listener->state = ECORE_IWARP_LISTENER_STATE_DESTROYING;
3894 rc = ecore_iwarp_empty_ramrod(p_hwfn, listener);
3895 if (rc != ECORE_SUCCESS)
3896 return rc;
3897
3898 while (!listener->done) {
3899 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
3900 "Waiting for ep list to be empty...\n");
3901 OSAL_MSLEEP(100);
3902 if (wait_count++ > 200) {
3903 DP_NOTICE(p_hwfn, false, "ep list close timeout\n");
3904 break;
3905 }
3906 }
3907
3908 OSAL_FREE(p_hwfn->p_dev, listener);
3909
3910 return ECORE_SUCCESS;
3911 }
3912
3913 enum _ecore_status_t
ecore_iwarp_send_rtr(void * rdma_cxt,struct ecore_iwarp_send_rtr_in * iparams)3914 ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams)
3915 {
3916 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
3917 struct ecore_sp_init_data init_data;
3918 struct ecore_spq_entry *p_ent;
3919 struct ecore_rdma_qp *qp;
3920 struct ecore_iwarp_ep *ep;
3921 enum _ecore_status_t rc;
3922
3923 ep = (struct ecore_iwarp_ep *)iparams->ep_context;
3924 if (!ep) {
3925 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3926 return ECORE_INVAL;
3927 }
3928
3929 qp = ep->qp;
3930
3931 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3932 qp->icid, ep->tcp_cid);
3933
3934 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
3935 init_data.cid = qp->icid;
3936 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3937 init_data.comp_mode = ECORE_SPQ_MODE_CB;
3938
3939 rc = ecore_sp_init_request(p_hwfn, &p_ent,
3940 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3941 PROTOCOLID_IWARP, &init_data);
3942
3943 if (rc != ECORE_SUCCESS)
3944 return rc;
3945
3946 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
3947
3948 DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ecore_iwarp_send_rtr, rc = 0x%x\n",
3949 rc);
3950
3951 return rc;
3952 }
3953
3954 enum _ecore_status_t
ecore_iwarp_query_qp(struct ecore_rdma_qp * qp,struct ecore_rdma_query_qp_out_params * out_params)3955 ecore_iwarp_query_qp(struct ecore_rdma_qp *qp,
3956 struct ecore_rdma_query_qp_out_params *out_params)
3957 {
3958 out_params->state = ecore_iwarp2roce_state(qp->iwarp_state);
3959 return ECORE_SUCCESS;
3960 }
3961
3962 #ifdef _NTDDK_
3963 #pragma warning(pop)
3964 #endif
3965