1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4
5 static struct irdma_rsrc_limits rsrc_limits_table[] = {
6 [0] = {
7 .qplimit = SZ_128,
8 },
9 [1] = {
10 .qplimit = SZ_1K,
11 },
12 [2] = {
13 .qplimit = SZ_2K,
14 },
15 [3] = {
16 .qplimit = SZ_4K,
17 },
18 [4] = {
19 .qplimit = SZ_16K,
20 },
21 [5] = {
22 .qplimit = SZ_64K,
23 },
24 [6] = {
25 .qplimit = SZ_128K,
26 },
27 [7] = {
28 .qplimit = SZ_256K,
29 },
30 };
31
32 /* types of hmc objects */
33 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
34 IRDMA_HMC_IW_QP,
35 IRDMA_HMC_IW_CQ,
36 IRDMA_HMC_IW_SRQ,
37 IRDMA_HMC_IW_HTE,
38 IRDMA_HMC_IW_ARP,
39 IRDMA_HMC_IW_APBVT_ENTRY,
40 IRDMA_HMC_IW_MR,
41 IRDMA_HMC_IW_XF,
42 IRDMA_HMC_IW_XFFL,
43 IRDMA_HMC_IW_Q1,
44 IRDMA_HMC_IW_Q1FL,
45 IRDMA_HMC_IW_PBLE,
46 IRDMA_HMC_IW_TIMER,
47 IRDMA_HMC_IW_FSIMC,
48 IRDMA_HMC_IW_FSIAV,
49 IRDMA_HMC_IW_RRF,
50 IRDMA_HMC_IW_RRFFL,
51 IRDMA_HMC_IW_HDR,
52 IRDMA_HMC_IW_MD,
53 IRDMA_HMC_IW_OOISC,
54 IRDMA_HMC_IW_OOISCFFL,
55 };
56
57 /**
58 * irdma_iwarp_ce_handler - handle iwarp completions
59 * @iwcq: iwarp cq receiving event
60 */
irdma_iwarp_ce_handler(struct irdma_sc_cq * iwcq)61 static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
62 {
63 struct irdma_cq *cq = iwcq->back_cq;
64
65 if (!cq->user_mode)
66 atomic_set(&cq->armed, 0);
67 if (cq->ibcq.comp_handler)
68 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
69 }
70
71 /**
72 * irdma_puda_ce_handler - handle puda completion events
73 * @rf: RDMA PCI function
74 * @cq: puda completion q for event
75 */
irdma_puda_ce_handler(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)76 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
77 struct irdma_sc_cq *cq)
78 {
79 struct irdma_sc_dev *dev = &rf->sc_dev;
80 u32 compl_error;
81 int status;
82
83 do {
84 status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
85 if (status == -ENOENT)
86 break;
87 if (status) {
88 ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
89 break;
90 }
91 if (compl_error) {
92 ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n",
93 compl_error);
94 break;
95 }
96 } while (1);
97
98 irdma_sc_ccq_arm(cq);
99 }
100
101 /**
102 * irdma_process_ceq - handle ceq for completions
103 * @rf: RDMA PCI function
104 * @ceq: ceq having cq for completion
105 */
irdma_process_ceq(struct irdma_pci_f * rf,struct irdma_ceq * ceq)106 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
107 {
108 struct irdma_sc_dev *dev = &rf->sc_dev;
109 struct irdma_sc_ceq *sc_ceq;
110 struct irdma_sc_cq *cq;
111 unsigned long flags;
112
113 sc_ceq = &ceq->sc_ceq;
114 do {
115 spin_lock_irqsave(&ceq->ce_lock, flags);
116 cq = irdma_sc_process_ceq(dev, sc_ceq);
117 if (!cq) {
118 spin_unlock_irqrestore(&ceq->ce_lock, flags);
119 break;
120 }
121
122 if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
123 irdma_iwarp_ce_handler(cq);
124
125 spin_unlock_irqrestore(&ceq->ce_lock, flags);
126
127 if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
128 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
129 else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
130 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
131 irdma_puda_ce_handler(rf, cq);
132 } while (1);
133 }
134
irdma_set_flush_fields(struct irdma_sc_qp * qp,struct irdma_aeqe_info * info)135 static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
136 struct irdma_aeqe_info *info)
137 {
138 struct qp_err_code qp_err;
139
140 qp->sq_flush_code = info->sq;
141 qp->rq_flush_code = info->rq;
142 if (qp->qp_uk.uk_attrs->hw_rev >= IRDMA_GEN_3) {
143 if (info->sq) {
144 qp->err_sq_idx_valid = true;
145 qp->err_sq_idx = info->wqe_idx;
146 }
147 if (info->rq) {
148 qp->err_rq_idx_valid = true;
149 qp->err_rq_idx = info->wqe_idx;
150 }
151 }
152
153 qp_err = irdma_ae_to_qp_err_code(info->ae_id);
154 qp->flush_code = qp_err.flush_code;
155 qp->event_type = qp_err.event_type;
156 }
157
158 /**
159 * irdma_complete_cqp_request - perform post-completion cleanup
160 * @cqp: device CQP
161 * @cqp_request: CQP request
162 *
163 * Mark CQP request as done, wake up waiting thread or invoke
164 * callback function and release/free CQP request.
165 */
irdma_complete_cqp_request(struct irdma_cqp * cqp,struct irdma_cqp_request * cqp_request)166 static void irdma_complete_cqp_request(struct irdma_cqp *cqp,
167 struct irdma_cqp_request *cqp_request)
168 {
169 if (cqp_request->waiting) {
170 WRITE_ONCE(cqp_request->request_done, true);
171 wake_up(&cqp_request->waitq);
172 } else if (cqp_request->callback_fcn) {
173 cqp_request->callback_fcn(cqp_request);
174 }
175 irdma_put_cqp_request(cqp, cqp_request);
176 }
177
178 /**
179 * irdma_process_ae_def_cmpl - handle IRDMA_AE_CQP_DEFERRED_COMPLETE event
180 * @rf: RDMA PCI function
181 * @info: AEQ entry info
182 */
irdma_process_ae_def_cmpl(struct irdma_pci_f * rf,struct irdma_aeqe_info * info)183 static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf,
184 struct irdma_aeqe_info *info)
185 {
186 u32 sw_def_info;
187 u64 scratch;
188
189 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
190
191 irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true,
192 &scratch, &sw_def_info);
193 while (scratch) {
194 struct irdma_cqp_request *cqp_request =
195 (struct irdma_cqp_request *)(uintptr_t)scratch;
196
197 irdma_complete_cqp_request(&rf->cqp, cqp_request);
198 irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false,
199 &scratch, &sw_def_info);
200 }
201 }
202
203 /**
204 * irdma_process_aeq - handle aeq events
205 * @rf: RDMA PCI function
206 */
irdma_process_aeq(struct irdma_pci_f * rf)207 static void irdma_process_aeq(struct irdma_pci_f *rf)
208 {
209 struct irdma_sc_dev *dev = &rf->sc_dev;
210 struct irdma_aeq *aeq = &rf->aeq;
211 struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
212 struct irdma_aeqe_info aeinfo;
213 struct irdma_aeqe_info *info = &aeinfo;
214 int ret;
215 struct irdma_qp *iwqp = NULL;
216 struct irdma_cq *iwcq = NULL;
217 struct irdma_sc_qp *qp = NULL;
218 struct irdma_qp_host_ctx_info *ctx_info = NULL;
219 struct irdma_device *iwdev = rf->iwdev;
220 struct irdma_sc_srq *srq;
221 unsigned long flags;
222
223 u32 aeqcnt = 0;
224
225 if (!sc_aeq->size)
226 return;
227
228 do {
229 memset(info, 0, sizeof(*info));
230 ret = irdma_sc_get_next_aeqe(sc_aeq, info);
231 if (ret)
232 break;
233
234 if (info->aeqe_overflow) {
235 ibdev_err(&iwdev->ibdev, "AEQ has overflowed\n");
236 rf->reset = true;
237 rf->gen_ops.request_reset(rf);
238 return;
239 }
240
241 aeqcnt++;
242 ibdev_dbg(&iwdev->ibdev,
243 "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
244 info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
245 info->iwarp_state, info->ae_src);
246
247 if (info->qp) {
248 spin_lock_irqsave(&rf->qptable_lock, flags);
249 iwqp = rf->qp_table[info->qp_cq_id];
250 if (!iwqp) {
251 spin_unlock_irqrestore(&rf->qptable_lock,
252 flags);
253 if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
254 atomic_dec(&iwdev->vsi.qp_suspend_reqs);
255 wake_up(&iwdev->suspend_wq);
256 continue;
257 }
258 ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
259 info->qp_cq_id);
260 continue;
261 }
262 irdma_qp_add_ref(&iwqp->ibqp);
263 spin_unlock_irqrestore(&rf->qptable_lock, flags);
264 qp = &iwqp->sc_qp;
265 spin_lock_irqsave(&iwqp->lock, flags);
266 iwqp->hw_tcp_state = info->tcp_state;
267 iwqp->hw_iwarp_state = info->iwarp_state;
268 if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
269 iwqp->last_aeq = info->ae_id;
270 spin_unlock_irqrestore(&iwqp->lock, flags);
271 } else if (info->srq) {
272 if (info->ae_id != IRDMA_AE_SRQ_LIMIT)
273 continue;
274 } else {
275 if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR &&
276 info->ae_id != IRDMA_AE_CQP_DEFERRED_COMPLETE)
277 continue;
278 }
279
280 switch (info->ae_id) {
281 struct irdma_cm_node *cm_node;
282 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
283 cm_node = iwqp->cm_node;
284 if (cm_node->accept_pend) {
285 atomic_dec(&cm_node->listener->pend_accepts_cnt);
286 cm_node->accept_pend = 0;
287 }
288 iwqp->rts_ae_rcvd = 1;
289 wake_up_interruptible(&iwqp->waitq);
290 break;
291 case IRDMA_AE_LLP_FIN_RECEIVED:
292 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
293 if (qp->term_flags)
294 break;
295 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
296 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
297 if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
298 iwqp->ibqp_state == IB_QPS_RTS) {
299 irdma_next_iw_state(iwqp,
300 IRDMA_QP_STATE_CLOSING,
301 0, 0, 0);
302 irdma_cm_disconn(iwqp);
303 }
304 irdma_schedule_cm_timer(iwqp->cm_node,
305 (struct irdma_puda_buf *)iwqp,
306 IRDMA_TIMER_TYPE_CLOSE,
307 1, 0);
308 }
309 break;
310 case IRDMA_AE_LLP_CLOSE_COMPLETE:
311 if (qp->term_flags)
312 irdma_terminate_done(qp, 0);
313 else
314 irdma_cm_disconn(iwqp);
315 break;
316 case IRDMA_AE_BAD_CLOSE:
317 case IRDMA_AE_RESET_SENT:
318 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
319 0);
320 irdma_cm_disconn(iwqp);
321 break;
322 case IRDMA_AE_LLP_CONNECTION_RESET:
323 if (atomic_read(&iwqp->close_timer_started))
324 break;
325 irdma_cm_disconn(iwqp);
326 break;
327 case IRDMA_AE_QP_SUSPEND_COMPLETE:
328 if (iwqp->iwdev->vsi.tc_change_pending) {
329 if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
330 wake_up(&iwqp->iwdev->suspend_wq);
331 }
332 if (iwqp->suspend_pending) {
333 iwqp->suspend_pending = false;
334 wake_up(&iwqp->iwdev->suspend_wq);
335 }
336 break;
337 case IRDMA_AE_TERMINATE_SENT:
338 irdma_terminate_send_fin(qp);
339 break;
340 case IRDMA_AE_LLP_TERMINATE_RECEIVED:
341 irdma_terminate_received(qp, info);
342 break;
343 case IRDMA_AE_CQ_OPERATION_ERROR:
344 ibdev_err(&iwdev->ibdev,
345 "Processing an iWARP related AE for CQ misc = 0x%04X\n",
346 info->ae_id);
347
348 spin_lock_irqsave(&rf->cqtable_lock, flags);
349 iwcq = rf->cq_table[info->qp_cq_id];
350 if (!iwcq) {
351 spin_unlock_irqrestore(&rf->cqtable_lock,
352 flags);
353 ibdev_dbg(to_ibdev(dev),
354 "cq_id %d is already freed\n", info->qp_cq_id);
355 continue;
356 }
357 irdma_cq_add_ref(&iwcq->ibcq);
358 spin_unlock_irqrestore(&rf->cqtable_lock, flags);
359
360 if (iwcq->ibcq.event_handler) {
361 struct ib_event ibevent;
362
363 ibevent.device = iwcq->ibcq.device;
364 ibevent.event = IB_EVENT_CQ_ERR;
365 ibevent.element.cq = &iwcq->ibcq;
366 iwcq->ibcq.event_handler(&ibevent,
367 iwcq->ibcq.cq_context);
368 }
369 irdma_cq_rem_ref(&iwcq->ibcq);
370 break;
371 case IRDMA_AE_SRQ_LIMIT:
372 srq = (struct irdma_sc_srq *)(uintptr_t)info->compl_ctx;
373 irdma_srq_event(srq);
374 break;
375 case IRDMA_AE_SRQ_CATASTROPHIC_ERROR:
376 break;
377 case IRDMA_AE_CQP_DEFERRED_COMPLETE:
378 /* Remove completed CQP requests from pending list
379 * and notify about those CQP ops completion.
380 */
381 irdma_process_ae_def_cmpl(rf, info);
382 break;
383 case IRDMA_AE_RESET_NOT_SENT:
384 case IRDMA_AE_LLP_DOUBT_REACHABILITY:
385 case IRDMA_AE_RESOURCE_EXHAUSTION:
386 break;
387 case IRDMA_AE_PRIV_OPERATION_DENIED:
388 case IRDMA_AE_STAG_ZERO_INVALID:
389 case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
390 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
391 case IRDMA_AE_DDP_UBE_INVALID_MO:
392 case IRDMA_AE_DDP_UBE_INVALID_QN:
393 case IRDMA_AE_DDP_NO_L_BIT:
394 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
395 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
396 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
397 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
398 case IRDMA_AE_INVALID_ARP_ENTRY:
399 case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
400 case IRDMA_AE_STALE_ARP_ENTRY:
401 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
402 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
403 case IRDMA_AE_LLP_SYN_RECEIVED:
404 case IRDMA_AE_LLP_TOO_MANY_RETRIES:
405 case IRDMA_AE_LCE_QP_CATASTROPHIC:
406 case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
407 case IRDMA_AE_LLP_TOO_MANY_RNRS:
408 case IRDMA_AE_LCE_CQ_CATASTROPHIC:
409 case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
410 case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
411 case IRDMA_AE_RCE_QP_CATASTROPHIC:
412 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
413 default:
414 ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
415 info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
416 ctx_info = &iwqp->ctx_info;
417 if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
418 ctx_info->roce_info->err_rq_idx_valid =
419 ctx_info->srq_valid ? false : info->err_rq_idx_valid;
420 if (ctx_info->roce_info->err_rq_idx_valid) {
421 ctx_info->roce_info->err_rq_idx = info->wqe_idx;
422 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
423 ctx_info);
424 }
425 irdma_set_flush_fields(qp, info);
426 irdma_cm_disconn(iwqp);
427 break;
428 }
429 ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
430 if (info->rq) {
431 ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
432 ctx_info->tcp_info_valid = false;
433 ctx_info->iwarp_info_valid = true;
434 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
435 ctx_info);
436 }
437 if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
438 iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
439 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
440 irdma_cm_disconn(iwqp);
441 } else {
442 irdma_terminate_connection(qp, info);
443 }
444 break;
445 }
446 if (info->qp)
447 irdma_qp_rem_ref(&iwqp->ibqp);
448 } while (1);
449
450 if (aeqcnt)
451 irdma_sc_repost_aeq_entries(dev, aeqcnt);
452 }
453
454 /**
455 * irdma_ena_intr - set up device interrupts
456 * @dev: hardware control device structure
457 * @msix_id: id of the interrupt to be enabled
458 */
irdma_ena_intr(struct irdma_sc_dev * dev,u32 msix_id)459 static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
460 {
461 dev->irq_ops->irdma_en_irq(dev, msix_id);
462 }
463
464 /**
465 * irdma_dpc - tasklet for aeq and ceq 0
466 * @t: tasklet_struct ptr
467 */
irdma_dpc(struct tasklet_struct * t)468 static void irdma_dpc(struct tasklet_struct *t)
469 {
470 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
471
472 if (rf->msix_shared)
473 irdma_process_ceq(rf, rf->ceqlist);
474 irdma_process_aeq(rf);
475 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
476 }
477
478 /**
479 * irdma_ceq_dpc - dpc handler for CEQ
480 * @t: tasklet_struct ptr
481 */
irdma_ceq_dpc(struct tasklet_struct * t)482 static void irdma_ceq_dpc(struct tasklet_struct *t)
483 {
484 struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
485 struct irdma_pci_f *rf = iwceq->rf;
486
487 irdma_process_ceq(rf, iwceq);
488 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
489 }
490
491 /**
492 * irdma_save_msix_info - copy msix vector information to iwarp device
493 * @rf: RDMA PCI function
494 *
495 * Allocate iwdev msix table and copy the msix info to the table
496 * Return 0 if successful, otherwise return error
497 */
irdma_save_msix_info(struct irdma_pci_f * rf)498 static int irdma_save_msix_info(struct irdma_pci_f *rf)
499 {
500 struct irdma_qvlist_info *iw_qvlist;
501 struct irdma_qv_info *iw_qvinfo;
502 struct msix_entry *pmsix;
503 u32 ceq_idx;
504 u32 i;
505 size_t size;
506
507 if (!rf->msix_count)
508 return -EINVAL;
509
510 size = sizeof(struct irdma_msix_vector) * rf->msix_count;
511 size += struct_size(iw_qvlist, qv_info, rf->msix_count);
512 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
513 if (!rf->iw_msixtbl)
514 return -ENOMEM;
515
516 rf->iw_qvlist = (struct irdma_qvlist_info *)
517 (&rf->iw_msixtbl[rf->msix_count]);
518 iw_qvlist = rf->iw_qvlist;
519 iw_qvinfo = iw_qvlist->qv_info;
520 iw_qvlist->num_vectors = rf->msix_count;
521 if (rf->msix_count <= num_online_cpus())
522 rf->msix_shared = true;
523
524 pmsix = rf->msix_entries;
525 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
526 rf->iw_msixtbl[i].idx = pmsix->entry;
527 rf->iw_msixtbl[i].irq = pmsix->vector;
528 rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
529 if (!i) {
530 iw_qvinfo->aeq_idx = 0;
531 if (rf->msix_shared)
532 iw_qvinfo->ceq_idx = ceq_idx++;
533 else
534 iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
535 } else {
536 iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
537 iw_qvinfo->ceq_idx = ceq_idx++;
538 }
539 iw_qvinfo->itr_idx = 3;
540 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
541 pmsix++;
542 }
543
544 return 0;
545 }
546
547 /**
548 * irdma_irq_handler - interrupt handler for aeq and ceq0
549 * @irq: Interrupt request number
550 * @data: RDMA PCI function
551 */
irdma_irq_handler(int irq,void * data)552 static irqreturn_t irdma_irq_handler(int irq, void *data)
553 {
554 struct irdma_pci_f *rf = data;
555
556 tasklet_schedule(&rf->dpc_tasklet);
557
558 return IRQ_HANDLED;
559 }
560
561 /**
562 * irdma_ceq_handler - interrupt handler for ceq
563 * @irq: interrupt request number
564 * @data: ceq pointer
565 */
irdma_ceq_handler(int irq,void * data)566 static irqreturn_t irdma_ceq_handler(int irq, void *data)
567 {
568 struct irdma_ceq *iwceq = data;
569
570 if (iwceq->irq != irq)
571 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
572 iwceq->irq, irq);
573 tasklet_schedule(&iwceq->dpc_tasklet);
574
575 return IRQ_HANDLED;
576 }
577
578 /**
579 * irdma_destroy_irq - destroy device interrupts
580 * @rf: RDMA PCI function
581 * @msix_vec: msix vector to disable irq
582 * @dev_id: parameter to pass to free_irq (used during irq setup)
583 *
584 * The function is called when destroying aeq/ceq
585 */
irdma_destroy_irq(struct irdma_pci_f * rf,struct irdma_msix_vector * msix_vec,void * dev_id)586 static void irdma_destroy_irq(struct irdma_pci_f *rf,
587 struct irdma_msix_vector *msix_vec, void *dev_id)
588 {
589 struct irdma_sc_dev *dev = &rf->sc_dev;
590
591 dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
592 irq_update_affinity_hint(msix_vec->irq, NULL);
593 free_irq(msix_vec->irq, dev_id);
594 if (rf == dev_id) {
595 tasklet_kill(&rf->dpc_tasklet);
596 } else {
597 struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
598
599 tasklet_kill(&iwceq->dpc_tasklet);
600 }
601 }
602
603 /**
604 * irdma_destroy_cqp - destroy control qp
605 * @rf: RDMA PCI function
606 *
607 * Issue destroy cqp request and
608 * free the resources associated with the cqp
609 */
irdma_destroy_cqp(struct irdma_pci_f * rf)610 static void irdma_destroy_cqp(struct irdma_pci_f *rf)
611 {
612 struct irdma_sc_dev *dev = &rf->sc_dev;
613 struct irdma_cqp *cqp = &rf->cqp;
614 int status = 0;
615
616 status = irdma_sc_cqp_destroy(dev->cqp);
617 if (status)
618 ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
619
620 irdma_cleanup_pending_cqp_op(rf);
621 dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
622 cqp->sq.pa);
623 cqp->sq.va = NULL;
624 kfree(cqp->oop_op_array);
625 cqp->oop_op_array = NULL;
626 kfree(cqp->scratch_array);
627 cqp->scratch_array = NULL;
628 kfree(cqp->cqp_requests);
629 cqp->cqp_requests = NULL;
630 }
631
irdma_destroy_virt_aeq(struct irdma_pci_f * rf)632 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
633 {
634 struct irdma_aeq *aeq = &rf->aeq;
635 u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
636 dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
637
638 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
639 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
640 vfree(aeq->mem.va);
641 }
642
643 /**
644 * irdma_destroy_aeq - destroy aeq
645 * @rf: RDMA PCI function
646 *
647 * Issue a destroy aeq request and
648 * free the resources associated with the aeq
649 * The function is called during driver unload
650 */
irdma_destroy_aeq(struct irdma_pci_f * rf)651 static void irdma_destroy_aeq(struct irdma_pci_f *rf)
652 {
653 struct irdma_sc_dev *dev = &rf->sc_dev;
654 struct irdma_aeq *aeq = &rf->aeq;
655 int status = -EBUSY;
656
657 if (!rf->msix_shared) {
658 if (rf->sc_dev.privileged)
659 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev,
660 rf->iw_msixtbl->idx, false);
661 irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
662 }
663 if (rf->reset)
664 goto exit;
665
666 aeq->sc_aeq.size = 0;
667 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
668 if (status)
669 ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
670
671 exit:
672 if (aeq->virtual_map) {
673 irdma_destroy_virt_aeq(rf);
674 } else {
675 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
676 aeq->mem.pa);
677 aeq->mem.va = NULL;
678 }
679 }
680
681 /**
682 * irdma_destroy_ceq - destroy ceq
683 * @rf: RDMA PCI function
684 * @iwceq: ceq to be destroyed
685 *
686 * Issue a destroy ceq request and
687 * free the resources associated with the ceq
688 */
irdma_destroy_ceq(struct irdma_pci_f * rf,struct irdma_ceq * iwceq)689 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
690 {
691 struct irdma_sc_dev *dev = &rf->sc_dev;
692 int status;
693
694 if (rf->reset)
695 goto exit;
696
697 status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
698 if (status) {
699 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
700 goto exit;
701 }
702
703 status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
704 if (status)
705 ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
706 status);
707 exit:
708 dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
709 iwceq->mem.pa);
710 iwceq->mem.va = NULL;
711 }
712
713 /**
714 * irdma_del_ceq_0 - destroy ceq 0
715 * @rf: RDMA PCI function
716 *
717 * Disable the ceq 0 interrupt and destroy the ceq 0
718 */
irdma_del_ceq_0(struct irdma_pci_f * rf)719 static void irdma_del_ceq_0(struct irdma_pci_f *rf)
720 {
721 struct irdma_ceq *iwceq = rf->ceqlist;
722 struct irdma_msix_vector *msix_vec;
723
724 if (rf->msix_shared) {
725 msix_vec = &rf->iw_msixtbl[0];
726 if (rf->sc_dev.privileged)
727 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
728 msix_vec->ceq_id,
729 msix_vec->idx, false);
730 irdma_destroy_irq(rf, msix_vec, rf);
731 } else {
732 msix_vec = &rf->iw_msixtbl[1];
733 irdma_destroy_irq(rf, msix_vec, iwceq);
734 }
735
736 irdma_destroy_ceq(rf, iwceq);
737 rf->sc_dev.ceq_valid = false;
738 rf->ceqs_count = 0;
739 }
740
741 /**
742 * irdma_del_ceqs - destroy all ceq's except CEQ 0
743 * @rf: RDMA PCI function
744 *
745 * Go through all of the device ceq's, except 0, and for each
746 * ceq disable the ceq interrupt and destroy the ceq
747 */
irdma_del_ceqs(struct irdma_pci_f * rf)748 static void irdma_del_ceqs(struct irdma_pci_f *rf)
749 {
750 struct irdma_ceq *iwceq = &rf->ceqlist[1];
751 struct irdma_msix_vector *msix_vec;
752 u32 i = 0;
753
754 if (rf->msix_shared)
755 msix_vec = &rf->iw_msixtbl[1];
756 else
757 msix_vec = &rf->iw_msixtbl[2];
758
759 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
760 if (rf->sc_dev.privileged)
761 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
762 msix_vec->ceq_id,
763 msix_vec->idx, false);
764 irdma_destroy_irq(rf, msix_vec, iwceq);
765 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
766 IRDMA_OP_CEQ_DESTROY);
767 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
768 iwceq->mem.va, iwceq->mem.pa);
769 iwceq->mem.va = NULL;
770 }
771 rf->ceqs_count = 1;
772 }
773
774 /**
775 * irdma_destroy_ccq - destroy control cq
776 * @rf: RDMA PCI function
777 *
778 * Issue destroy ccq request and
779 * free the resources associated with the ccq
780 */
irdma_destroy_ccq(struct irdma_pci_f * rf)781 static void irdma_destroy_ccq(struct irdma_pci_f *rf)
782 {
783 struct irdma_sc_dev *dev = &rf->sc_dev;
784 struct irdma_ccq *ccq = &rf->ccq;
785 int status = 0;
786
787 if (rf->cqp_cmpl_wq)
788 destroy_workqueue(rf->cqp_cmpl_wq);
789
790 if (!rf->reset)
791 status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
792 if (status)
793 ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
794 dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
795 ccq->mem_cq.pa);
796 ccq->mem_cq.va = NULL;
797 }
798
799 /**
800 * irdma_close_hmc_objects_type - delete hmc objects of a given type
801 * @dev: iwarp device
802 * @obj_type: the hmc object type to be deleted
803 * @hmc_info: host memory info struct
804 * @privileged: permission to close HMC objects
805 * @reset: true if called before reset
806 */
irdma_close_hmc_objects_type(struct irdma_sc_dev * dev,enum irdma_hmc_rsrc_type obj_type,struct irdma_hmc_info * hmc_info,bool privileged,bool reset)807 static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
808 enum irdma_hmc_rsrc_type obj_type,
809 struct irdma_hmc_info *hmc_info,
810 bool privileged, bool reset)
811 {
812 struct irdma_hmc_del_obj_info info = {};
813
814 info.hmc_info = hmc_info;
815 info.rsrc_type = obj_type;
816 info.count = hmc_info->hmc_obj[obj_type].cnt;
817 info.privileged = privileged;
818 if (irdma_sc_del_hmc_obj(dev, &info, reset))
819 ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
820 obj_type);
821 }
822
823 /**
824 * irdma_del_hmc_objects - remove all device hmc objects
825 * @dev: iwarp device
826 * @hmc_info: hmc_info to free
827 * @privileged: permission to delete HMC objects
828 * @reset: true if called before reset
829 * @vers: hardware version
830 */
irdma_del_hmc_objects(struct irdma_sc_dev * dev,struct irdma_hmc_info * hmc_info,bool privileged,bool reset,enum irdma_vers vers)831 static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
832 struct irdma_hmc_info *hmc_info, bool privileged,
833 bool reset, enum irdma_vers vers)
834 {
835 unsigned int i;
836
837 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
838 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
839 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
840 hmc_info, privileged, reset);
841 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
842 break;
843 }
844 }
845
846 /**
847 * irdma_create_hmc_obj_type - create hmc object of a given type
848 * @dev: hardware control device structure
849 * @info: information for the hmc object to create
850 */
irdma_create_hmc_obj_type(struct irdma_sc_dev * dev,struct irdma_hmc_create_obj_info * info)851 static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
852 struct irdma_hmc_create_obj_info *info)
853 {
854 return irdma_sc_create_hmc_obj(dev, info);
855 }
856
857 /**
858 * irdma_create_hmc_objs - create all hmc objects for the device
859 * @rf: RDMA PCI function
860 * @privileged: permission to create HMC objects
861 * @vers: HW version
862 *
863 * Create the device hmc objects and allocate hmc pages
864 * Return 0 if successful, otherwise clean up and return error
865 */
irdma_create_hmc_objs(struct irdma_pci_f * rf,bool privileged,enum irdma_vers vers)866 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
867 enum irdma_vers vers)
868 {
869 struct irdma_sc_dev *dev = &rf->sc_dev;
870 struct irdma_hmc_create_obj_info info = {};
871 int i, status = 0;
872
873 info.hmc_info = dev->hmc_info;
874 info.privileged = privileged;
875 info.entry_type = rf->sd_type;
876
877 for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
878 if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
879 continue;
880 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
881 info.rsrc_type = iw_hmc_obj_types[i];
882 info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
883 info.add_sd_cnt = 0;
884 status = irdma_create_hmc_obj_type(dev, &info);
885 if (status) {
886 ibdev_dbg(to_ibdev(dev),
887 "ERR: create obj type %d status = %d\n",
888 iw_hmc_obj_types[i], status);
889 break;
890 }
891 }
892 if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
893 break;
894 }
895
896 if (!status)
897 return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
898 true, true);
899
900 while (i) {
901 i--;
902 /* destroy the hmc objects of a given type */
903 if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
904 irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
905 dev->hmc_info, privileged,
906 false);
907 }
908
909 return status;
910 }
911
912 /**
913 * irdma_obj_aligned_mem - get aligned memory from device allocated memory
914 * @rf: RDMA PCI function
915 * @memptr: points to the memory addresses
916 * @size: size of memory needed
917 * @mask: mask for the aligned memory
918 *
919 * Get aligned memory of the requested size and
920 * update the memptr to point to the new aligned memory
921 * Return 0 if successful, otherwise return no memory error
922 */
irdma_obj_aligned_mem(struct irdma_pci_f * rf,struct irdma_dma_mem * memptr,u32 size,u32 mask)923 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
924 struct irdma_dma_mem *memptr, u32 size,
925 u32 mask)
926 {
927 unsigned long va, newva;
928 unsigned long extra;
929
930 va = (unsigned long)rf->obj_next.va;
931 newva = va;
932 if (mask)
933 newva = ALIGN(va, (unsigned long)mask + 1ULL);
934 extra = newva - va;
935 memptr->va = (u8 *)va + extra;
936 memptr->pa = rf->obj_next.pa + extra;
937 memptr->size = size;
938 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
939 return -ENOMEM;
940
941 rf->obj_next.va = (u8 *)memptr->va + size;
942 rf->obj_next.pa = memptr->pa + size;
943
944 return 0;
945 }
946
947 /**
948 * irdma_create_cqp - create control qp
949 * @rf: RDMA PCI function
950 *
951 * Return 0, if the cqp and all the resources associated with it
952 * are successfully created, otherwise return error
953 */
irdma_create_cqp(struct irdma_pci_f * rf)954 static int irdma_create_cqp(struct irdma_pci_f *rf)
955 {
956 u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
957 struct irdma_dma_mem mem;
958 struct irdma_sc_dev *dev = &rf->sc_dev;
959 struct irdma_cqp_init_info cqp_init_info = {};
960 struct irdma_cqp *cqp = &rf->cqp;
961 u16 maj_err, min_err;
962 int i, status;
963
964 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
965 if (!cqp->cqp_requests)
966 return -ENOMEM;
967
968 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
969 if (!cqp->scratch_array) {
970 status = -ENOMEM;
971 goto err_scratch;
972 }
973
974 cqp->oop_op_array = kcalloc(sqsize, sizeof(*cqp->oop_op_array),
975 GFP_KERNEL);
976 if (!cqp->oop_op_array) {
977 status = -ENOMEM;
978 goto err_oop;
979 }
980 cqp_init_info.ooo_op_array = cqp->oop_op_array;
981 dev->cqp = &cqp->sc_cqp;
982 dev->cqp->dev = dev;
983 cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
984 IRDMA_CQP_ALIGNMENT);
985 cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
986 &cqp->sq.pa, GFP_KERNEL);
987 if (!cqp->sq.va) {
988 status = -ENOMEM;
989 goto err_sq;
990 }
991
992 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
993 IRDMA_HOST_CTX_ALIGNMENT_M);
994 if (status)
995 goto err_ctx;
996
997 dev->cqp->host_ctx_pa = mem.pa;
998 dev->cqp->host_ctx = mem.va;
999 /* populate the cqp init info */
1000 cqp_init_info.dev = dev;
1001 cqp_init_info.sq_size = sqsize;
1002 cqp_init_info.sq = cqp->sq.va;
1003 cqp_init_info.sq_pa = cqp->sq.pa;
1004 cqp_init_info.host_ctx_pa = mem.pa;
1005 cqp_init_info.host_ctx = mem.va;
1006 cqp_init_info.hmc_profile = rf->rsrc_profile;
1007 cqp_init_info.scratch_array = cqp->scratch_array;
1008 cqp_init_info.protocol_used = rf->protocol_used;
1009
1010 switch (rf->rdma_ver) {
1011 case IRDMA_GEN_1:
1012 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
1013 break;
1014 case IRDMA_GEN_2:
1015 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
1016 break;
1017 case IRDMA_GEN_3:
1018 cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
1019 cqp_init_info.ts_override = 1;
1020 break;
1021 }
1022 status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
1023 if (status) {
1024 ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
1025 goto err_ctx;
1026 }
1027
1028 spin_lock_init(&cqp->req_lock);
1029 spin_lock_init(&cqp->compl_lock);
1030
1031 status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
1032 if (status) {
1033 ibdev_dbg(to_ibdev(dev),
1034 "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
1035 status, maj_err, min_err);
1036 goto err_ctx;
1037 }
1038
1039 INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
1040 INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
1041
1042 /* init the waitqueue of the cqp_requests and add them to the list */
1043 for (i = 0; i < sqsize; i++) {
1044 init_waitqueue_head(&cqp->cqp_requests[i].waitq);
1045 list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
1046 }
1047 init_waitqueue_head(&cqp->remove_wq);
1048 return 0;
1049
1050 err_ctx:
1051 dma_free_coherent(dev->hw->device, cqp->sq.size,
1052 cqp->sq.va, cqp->sq.pa);
1053 cqp->sq.va = NULL;
1054 err_sq:
1055 kfree(cqp->oop_op_array);
1056 cqp->oop_op_array = NULL;
1057 err_oop:
1058 kfree(cqp->scratch_array);
1059 cqp->scratch_array = NULL;
1060 err_scratch:
1061 kfree(cqp->cqp_requests);
1062 cqp->cqp_requests = NULL;
1063
1064 return status;
1065 }
1066
1067 /**
1068 * irdma_create_ccq - create control cq
1069 * @rf: RDMA PCI function
1070 *
1071 * Return 0, if the ccq and the resources associated with it
1072 * are successfully created, otherwise return error
1073 */
irdma_create_ccq(struct irdma_pci_f * rf)1074 static int irdma_create_ccq(struct irdma_pci_f *rf)
1075 {
1076 struct irdma_sc_dev *dev = &rf->sc_dev;
1077 struct irdma_ccq_init_info info = {};
1078 struct irdma_ccq *ccq = &rf->ccq;
1079 int ccq_size;
1080 int status;
1081
1082 dev->ccq = &ccq->sc_cq;
1083 dev->ccq->dev = dev;
1084 info.dev = dev;
1085 ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
1086 ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1087 ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
1088 IRDMA_CQ0_ALIGNMENT);
1089 ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
1090 &ccq->mem_cq.pa, GFP_KERNEL);
1091 if (!ccq->mem_cq.va)
1092 return -ENOMEM;
1093
1094 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1095 ccq->shadow_area.size,
1096 IRDMA_SHADOWAREA_M);
1097 if (status)
1098 goto exit;
1099
1100 ccq->sc_cq.back_cq = ccq;
1101 /* populate the ccq init info */
1102 info.cq_base = ccq->mem_cq.va;
1103 info.cq_pa = ccq->mem_cq.pa;
1104 info.num_elem = ccq_size;
1105 info.shadow_area = ccq->shadow_area.va;
1106 info.shadow_area_pa = ccq->shadow_area.pa;
1107 info.ceqe_mask = false;
1108 info.ceq_id_valid = true;
1109 info.shadow_read_threshold = 16;
1110 info.vsi = &rf->default_vsi;
1111 status = irdma_sc_ccq_init(dev->ccq, &info);
1112 if (!status)
1113 status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1114 exit:
1115 if (status) {
1116 dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
1117 ccq->mem_cq.va, ccq->mem_cq.pa);
1118 ccq->mem_cq.va = NULL;
1119 }
1120
1121 return status;
1122 }
1123
1124 /**
1125 * irdma_alloc_set_mac - set up a mac address table entry
1126 * @iwdev: irdma device
1127 *
1128 * Allocate a mac ip entry and add it to the hw table Return 0
1129 * if successful, otherwise return error
1130 */
irdma_alloc_set_mac(struct irdma_device * iwdev)1131 static int irdma_alloc_set_mac(struct irdma_device *iwdev)
1132 {
1133 int status;
1134
1135 status = irdma_alloc_local_mac_entry(iwdev->rf,
1136 &iwdev->mac_ip_table_idx);
1137 if (!status) {
1138 status = irdma_add_local_mac_entry(iwdev->rf,
1139 (const u8 *)iwdev->netdev->dev_addr,
1140 (u8)iwdev->mac_ip_table_idx);
1141 if (status)
1142 irdma_del_local_mac_entry(iwdev->rf,
1143 (u8)iwdev->mac_ip_table_idx);
1144 }
1145 return status;
1146 }
1147
1148 /**
1149 * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1150 * ceq
1151 * @rf: RDMA PCI function
1152 * @iwceq: ceq associated with the vector
1153 * @ceq_id: the id number of the iwceq
1154 * @msix_vec: interrupt vector information
1155 *
1156 * Allocate interrupt resources and enable irq handling
1157 * Return 0 if successful, otherwise return error
1158 */
irdma_cfg_ceq_vector(struct irdma_pci_f * rf,struct irdma_ceq * iwceq,u32 ceq_id,struct irdma_msix_vector * msix_vec)1159 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1160 u32 ceq_id, struct irdma_msix_vector *msix_vec)
1161 {
1162 int status;
1163
1164 if (rf->msix_shared && !ceq_id) {
1165 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1166 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
1167 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1168 status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1169 msix_vec->name, rf);
1170 } else {
1171 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1172 "irdma-%s-CEQ-%d",
1173 dev_name(&rf->pcidev->dev), ceq_id);
1174 tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
1175
1176 status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
1177 msix_vec->name, iwceq);
1178 }
1179 cpumask_clear(&msix_vec->mask);
1180 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
1181 irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
1182 if (status) {
1183 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1184 return status;
1185 }
1186
1187 msix_vec->ceq_id = ceq_id;
1188 if (rf->sc_dev.privileged)
1189 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id,
1190 msix_vec->idx, true);
1191 else
1192 status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id,
1193 msix_vec->idx);
1194 return status;
1195 }
1196
1197 /**
1198 * irdma_cfg_aeq_vector - set up the msix vector for aeq
1199 * @rf: RDMA PCI function
1200 *
1201 * Allocate interrupt resources and enable irq handling
1202 * Return 0 if successful, otherwise return error
1203 */
irdma_cfg_aeq_vector(struct irdma_pci_f * rf)1204 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1205 {
1206 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1207 int ret = 0;
1208
1209 if (!rf->msix_shared) {
1210 snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1211 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
1212 tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1213 ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1214 msix_vec->name, rf);
1215 }
1216 if (ret) {
1217 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1218 return ret;
1219 }
1220
1221 if (rf->sc_dev.privileged)
1222 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx,
1223 true);
1224 else
1225 ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx);
1226
1227 return ret;
1228 }
1229
1230 /**
1231 * irdma_create_ceq - create completion event queue
1232 * @rf: RDMA PCI function
1233 * @iwceq: pointer to the ceq resources to be created
1234 * @ceq_id: the id number of the iwceq
1235 * @vsi_idx: vsi idx
1236 *
1237 * Return 0, if the ceq and the resources associated with it
1238 * are successfully created, otherwise return error
1239 */
irdma_create_ceq(struct irdma_pci_f * rf,struct irdma_ceq * iwceq,u32 ceq_id,u16 vsi_idx)1240 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1241 u32 ceq_id, u16 vsi_idx)
1242 {
1243 int status;
1244 struct irdma_ceq_init_info info = {};
1245 struct irdma_sc_dev *dev = &rf->sc_dev;
1246 u32 ceq_size;
1247
1248 info.ceq_id = ceq_id;
1249 iwceq->rf = rf;
1250 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1251 dev->hw_attrs.max_hw_ceq_size);
1252 iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
1253 IRDMA_CEQ_ALIGNMENT);
1254 iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
1255 &iwceq->mem.pa, GFP_KERNEL);
1256 if (!iwceq->mem.va)
1257 return -ENOMEM;
1258
1259 info.ceq_id = ceq_id;
1260 info.ceqe_base = iwceq->mem.va;
1261 info.ceqe_pa = iwceq->mem.pa;
1262 info.elem_cnt = ceq_size;
1263 iwceq->sc_ceq.ceq_id = ceq_id;
1264 info.dev = dev;
1265 info.vsi_idx = vsi_idx;
1266 status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1267 if (!status) {
1268 if (dev->ceq_valid)
1269 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1270 IRDMA_OP_CEQ_CREATE);
1271 else
1272 status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
1273 }
1274
1275 if (status) {
1276 dma_free_coherent(dev->hw->device, iwceq->mem.size,
1277 iwceq->mem.va, iwceq->mem.pa);
1278 iwceq->mem.va = NULL;
1279 }
1280
1281 return status;
1282 }
1283
1284 /**
1285 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1286 * @rf: RDMA PCI function
1287 *
1288 * Allocate a list for all device completion event queues
1289 * Create the ceq 0 and configure it's msix interrupt vector
1290 * Return 0, if successfully set up, otherwise return error
1291 */
irdma_setup_ceq_0(struct irdma_pci_f * rf)1292 static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
1293 {
1294 struct irdma_ceq *iwceq;
1295 struct irdma_msix_vector *msix_vec;
1296 u32 i;
1297 int status = 0;
1298 u32 num_ceqs;
1299
1300 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1301 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1302 if (!rf->ceqlist) {
1303 status = -ENOMEM;
1304 goto exit;
1305 }
1306
1307 iwceq = &rf->ceqlist[0];
1308 status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx);
1309 if (status) {
1310 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1311 status);
1312 goto exit;
1313 }
1314
1315 spin_lock_init(&iwceq->ce_lock);
1316 i = rf->msix_shared ? 0 : 1;
1317 msix_vec = &rf->iw_msixtbl[i];
1318 iwceq->irq = msix_vec->irq;
1319 iwceq->msix_idx = msix_vec->idx;
1320 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1321 if (status) {
1322 irdma_destroy_ceq(rf, iwceq);
1323 goto exit;
1324 }
1325
1326 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1327 rf->ceqs_count++;
1328
1329 exit:
1330 if (status && !rf->ceqs_count) {
1331 kfree(rf->ceqlist);
1332 rf->ceqlist = NULL;
1333 return status;
1334 }
1335 rf->sc_dev.ceq_valid = true;
1336
1337 return 0;
1338 }
1339
1340 /**
1341 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1342 * @rf: RDMA PCI function
1343 * @vsi_idx: vsi_idx for this CEQ
1344 *
1345 * Allocate a list for all device completion event queues
1346 * Create the ceq's and configure their msix interrupt vectors
1347 * Return 0, if ceqs are successfully set up, otherwise return error
1348 */
irdma_setup_ceqs(struct irdma_pci_f * rf,u16 vsi_idx)1349 static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx)
1350 {
1351 u32 i;
1352 u32 ceq_id;
1353 struct irdma_ceq *iwceq;
1354 struct irdma_msix_vector *msix_vec;
1355 int status;
1356 u32 num_ceqs;
1357
1358 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1359 i = (rf->msix_shared) ? 1 : 2;
1360 for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1361 iwceq = &rf->ceqlist[ceq_id];
1362 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx);
1363 if (status) {
1364 ibdev_dbg(&rf->iwdev->ibdev,
1365 "ERR: create ceq status = %d\n", status);
1366 goto del_ceqs;
1367 }
1368 spin_lock_init(&iwceq->ce_lock);
1369 msix_vec = &rf->iw_msixtbl[i];
1370 iwceq->irq = msix_vec->irq;
1371 iwceq->msix_idx = msix_vec->idx;
1372 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1373 if (status) {
1374 irdma_destroy_ceq(rf, iwceq);
1375 goto del_ceqs;
1376 }
1377 irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1378 rf->ceqs_count++;
1379 }
1380
1381 return 0;
1382
1383 del_ceqs:
1384 irdma_del_ceqs(rf);
1385
1386 return status;
1387 }
1388
irdma_create_virt_aeq(struct irdma_pci_f * rf,u32 size)1389 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1390 {
1391 struct irdma_aeq *aeq = &rf->aeq;
1392 dma_addr_t *pg_arr;
1393 u32 pg_cnt;
1394 int status;
1395
1396 if (rf->rdma_ver < IRDMA_GEN_2)
1397 return -EOPNOTSUPP;
1398
1399 aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1400 aeq->mem.va = vzalloc(aeq->mem.size);
1401
1402 if (!aeq->mem.va)
1403 return -ENOMEM;
1404
1405 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1406 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1407 if (status) {
1408 vfree(aeq->mem.va);
1409 return status;
1410 }
1411
1412 pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
1413 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1414 if (status) {
1415 irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1416 vfree(aeq->mem.va);
1417 return status;
1418 }
1419
1420 return 0;
1421 }
1422
1423 /**
1424 * irdma_create_aeq - create async event queue
1425 * @rf: RDMA PCI function
1426 *
1427 * Return 0, if the aeq and the resources associated with it
1428 * are successfully created, otherwise return error
1429 */
irdma_create_aeq(struct irdma_pci_f * rf)1430 static int irdma_create_aeq(struct irdma_pci_f *rf)
1431 {
1432 struct irdma_aeq_init_info info = {};
1433 struct irdma_sc_dev *dev = &rf->sc_dev;
1434 struct irdma_aeq *aeq = &rf->aeq;
1435 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1436 u32 aeq_size;
1437 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1438 int status;
1439
1440 aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1441 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1442 aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1443 /* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
1444 if (rf->rdma_ver == IRDMA_GEN_3)
1445 aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
1446 sizeof(struct irdma_sc_aeqe)));
1447 aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
1448 IRDMA_AEQ_ALIGNMENT);
1449 aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
1450 &aeq->mem.pa,
1451 GFP_KERNEL | __GFP_NOWARN);
1452 if (aeq->mem.va)
1453 goto skip_virt_aeq;
1454 else if (rf->rdma_ver == IRDMA_GEN_3)
1455 return -ENOMEM;
1456
1457 /* physically mapped aeq failed. setup virtual aeq */
1458 status = irdma_create_virt_aeq(rf, aeq_size);
1459 if (status)
1460 return status;
1461
1462 info.virtual_map = true;
1463 aeq->virtual_map = info.virtual_map;
1464 info.pbl_chunk_size = 1;
1465 info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1466
1467 skip_virt_aeq:
1468 info.aeqe_base = aeq->mem.va;
1469 info.aeq_elem_pa = aeq->mem.pa;
1470 info.elem_cnt = aeq_size;
1471 info.dev = dev;
1472 info.msix_idx = rf->iw_msixtbl->idx;
1473 status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1474 if (status)
1475 goto err;
1476
1477 status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1478 if (status)
1479 goto err;
1480
1481 return 0;
1482
1483 err:
1484 if (aeq->virtual_map) {
1485 irdma_destroy_virt_aeq(rf);
1486 } else {
1487 dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
1488 aeq->mem.pa);
1489 aeq->mem.va = NULL;
1490 }
1491
1492 return status;
1493 }
1494
1495 /**
1496 * irdma_setup_aeq - set up the device aeq
1497 * @rf: RDMA PCI function
1498 *
1499 * Create the aeq and configure its msix interrupt vector
1500 * Return 0 if successful, otherwise return error
1501 */
irdma_setup_aeq(struct irdma_pci_f * rf)1502 static int irdma_setup_aeq(struct irdma_pci_f *rf)
1503 {
1504 struct irdma_sc_dev *dev = &rf->sc_dev;
1505 int status;
1506
1507 status = irdma_create_aeq(rf);
1508 if (status)
1509 return status;
1510
1511 status = irdma_cfg_aeq_vector(rf);
1512 if (status) {
1513 irdma_destroy_aeq(rf);
1514 return status;
1515 }
1516
1517 if (!rf->msix_shared)
1518 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1519
1520 return 0;
1521 }
1522
1523 /**
1524 * irdma_initialize_ilq - create iwarp local queue for cm
1525 * @iwdev: irdma device
1526 *
1527 * Return 0 if successful, otherwise return error
1528 */
irdma_initialize_ilq(struct irdma_device * iwdev)1529 static int irdma_initialize_ilq(struct irdma_device *iwdev)
1530 {
1531 struct irdma_puda_rsrc_info info = {};
1532 int status;
1533
1534 info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1535 info.cq_id = 1;
1536 info.qp_id = 1;
1537 info.count = 1;
1538 info.pd_id = 1;
1539 info.abi_ver = IRDMA_ABI_VER;
1540 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1541 info.rq_size = info.sq_size;
1542 info.buf_size = 1024;
1543 info.tx_buf_cnt = 2 * info.sq_size;
1544 info.receive = irdma_receive_ilq;
1545 info.xmit_complete = irdma_free_sqbuf;
1546 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1547 if (status)
1548 ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
1549
1550 return status;
1551 }
1552
1553 /**
1554 * irdma_initialize_ieq - create iwarp exception queue
1555 * @iwdev: irdma device
1556 *
1557 * Return 0 if successful, otherwise return error
1558 */
irdma_initialize_ieq(struct irdma_device * iwdev)1559 static int irdma_initialize_ieq(struct irdma_device *iwdev)
1560 {
1561 struct irdma_puda_rsrc_info info = {};
1562 int status;
1563
1564 info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1565 info.cq_id = 2;
1566 info.qp_id = iwdev->vsi.exception_lan_q;
1567 info.count = 1;
1568 info.pd_id = 2;
1569 info.abi_ver = IRDMA_ABI_VER;
1570 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1571 info.rq_size = info.sq_size;
1572 info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1573 info.tx_buf_cnt = 4096;
1574 status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1575 if (status)
1576 ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
1577
1578 return status;
1579 }
1580
1581 /**
1582 * irdma_reinitialize_ieq - destroy and re-create ieq
1583 * @vsi: VSI structure
1584 */
irdma_reinitialize_ieq(struct irdma_sc_vsi * vsi)1585 void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1586 {
1587 struct irdma_device *iwdev = vsi->back_vsi;
1588 struct irdma_pci_f *rf = iwdev->rf;
1589
1590 irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1591 if (irdma_initialize_ieq(iwdev)) {
1592 iwdev->rf->reset = true;
1593 rf->gen_ops.request_reset(rf);
1594 }
1595 }
1596
1597 /**
1598 * irdma_hmc_setup - create hmc objects for the device
1599 * @rf: RDMA PCI function
1600 *
1601 * Set up the device private memory space for the number and size of
1602 * the hmc objects and create the objects
1603 * Return 0 if successful, otherwise return error
1604 */
irdma_hmc_setup(struct irdma_pci_f * rf)1605 static int irdma_hmc_setup(struct irdma_pci_f *rf)
1606 {
1607 int status;
1608 u32 qpcnt;
1609
1610 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1611
1612 rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1613 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
1614 if (status)
1615 return status;
1616
1617 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1618
1619 return status;
1620 }
1621
1622 /**
1623 * irdma_del_init_mem - deallocate memory resources
1624 * @rf: RDMA PCI function
1625 */
irdma_del_init_mem(struct irdma_pci_f * rf)1626 static void irdma_del_init_mem(struct irdma_pci_f *rf)
1627 {
1628 struct irdma_sc_dev *dev = &rf->sc_dev;
1629
1630 if (!rf->sc_dev.privileged)
1631 irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
1632 kfree(dev->hmc_info->sd_table.sd_entry);
1633 dev->hmc_info->sd_table.sd_entry = NULL;
1634 vfree(rf->mem_rsrc);
1635 rf->mem_rsrc = NULL;
1636 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1637 rf->obj_mem.pa);
1638 rf->obj_mem.va = NULL;
1639 if (rf->rdma_ver != IRDMA_GEN_1) {
1640 bitmap_free(rf->allocated_ws_nodes);
1641 rf->allocated_ws_nodes = NULL;
1642 }
1643 kfree(rf->ceqlist);
1644 rf->ceqlist = NULL;
1645 kfree(rf->iw_msixtbl);
1646 rf->iw_msixtbl = NULL;
1647 kfree(rf->hmc_info_mem);
1648 rf->hmc_info_mem = NULL;
1649 }
1650
1651 /**
1652 * irdma_initialize_dev - initialize device
1653 * @rf: RDMA PCI function
1654 *
1655 * Allocate memory for the hmc objects and initialize iwdev
1656 * Return 0 if successful, otherwise clean up the resources
1657 * and return error
1658 */
irdma_initialize_dev(struct irdma_pci_f * rf)1659 static int irdma_initialize_dev(struct irdma_pci_f *rf)
1660 {
1661 int status;
1662 struct irdma_sc_dev *dev = &rf->sc_dev;
1663 struct irdma_device_init_info info = {};
1664 struct irdma_dma_mem mem;
1665 u32 size;
1666
1667 size = sizeof(struct irdma_hmc_pble_rsrc) +
1668 sizeof(struct irdma_hmc_info) +
1669 (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1670
1671 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1672 if (!rf->hmc_info_mem)
1673 return -ENOMEM;
1674
1675 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1676 dev->hmc_info = &rf->hw.hmc;
1677 dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1678 (rf->pble_rsrc + 1);
1679
1680 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1681 IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1682 if (status)
1683 goto error;
1684
1685 info.fpm_query_buf_pa = mem.pa;
1686 info.fpm_query_buf = mem.va;
1687
1688 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1689 IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1690 if (status)
1691 goto error;
1692
1693 info.fpm_commit_buf_pa = mem.pa;
1694 info.fpm_commit_buf = mem.va;
1695
1696 info.bar0 = rf->hw.hw_addr;
1697 info.hmc_fn_id = rf->pf_id;
1698 info.protocol_used = rf->protocol_used;
1699 info.hw = &rf->hw;
1700 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
1701 if (status)
1702 goto error;
1703
1704 return status;
1705 error:
1706 kfree(rf->hmc_info_mem);
1707 rf->hmc_info_mem = NULL;
1708
1709 return status;
1710 }
1711
1712 /**
1713 * irdma_rt_deinit_hw - clean up the irdma device resources
1714 * @iwdev: irdma device
1715 *
1716 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1717 * device queues and free the pble and the hmc objects
1718 */
irdma_rt_deinit_hw(struct irdma_device * iwdev)1719 void irdma_rt_deinit_hw(struct irdma_device *iwdev)
1720 {
1721 ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
1722
1723 switch (iwdev->init_state) {
1724 case IP_ADDR_REGISTERED:
1725 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1726 irdma_del_local_mac_entry(iwdev->rf,
1727 (u8)iwdev->mac_ip_table_idx);
1728 fallthrough;
1729 case IEQ_CREATED:
1730 if (!iwdev->roce_mode)
1731 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1732 iwdev->rf->reset);
1733 fallthrough;
1734 case ILQ_CREATED:
1735 if (!iwdev->roce_mode)
1736 irdma_puda_dele_rsrc(&iwdev->vsi,
1737 IRDMA_PUDA_RSRC_TYPE_ILQ,
1738 iwdev->rf->reset);
1739 break;
1740 default:
1741 ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1742 break;
1743 }
1744
1745 irdma_cleanup_cm_core(&iwdev->cm_core);
1746 if (iwdev->vsi.pestat) {
1747 irdma_vsi_stats_free(&iwdev->vsi);
1748 kfree(iwdev->vsi.pestat);
1749 }
1750 if (iwdev->cleanup_wq)
1751 destroy_workqueue(iwdev->cleanup_wq);
1752 }
1753
irdma_setup_init_state(struct irdma_pci_f * rf)1754 static int irdma_setup_init_state(struct irdma_pci_f *rf)
1755 {
1756 int status;
1757
1758 status = irdma_save_msix_info(rf);
1759 if (status)
1760 return status;
1761
1762 rf->hw.device = &rf->pcidev->dev;
1763 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
1764 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
1765 &rf->obj_mem.pa, GFP_KERNEL);
1766 if (!rf->obj_mem.va) {
1767 status = -ENOMEM;
1768 goto clean_msixtbl;
1769 }
1770
1771 rf->obj_next = rf->obj_mem;
1772 status = irdma_initialize_dev(rf);
1773 if (status)
1774 goto clean_obj_mem;
1775
1776 return 0;
1777
1778 clean_obj_mem:
1779 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1780 rf->obj_mem.pa);
1781 rf->obj_mem.va = NULL;
1782 clean_msixtbl:
1783 kfree(rf->iw_msixtbl);
1784 rf->iw_msixtbl = NULL;
1785 return status;
1786 }
1787
1788 /**
1789 * irdma_get_used_rsrc - determine resources used internally
1790 * @iwdev: irdma device
1791 *
1792 * Called at the end of open to get all internal allocations
1793 */
irdma_get_used_rsrc(struct irdma_device * iwdev)1794 static void irdma_get_used_rsrc(struct irdma_device *iwdev)
1795 {
1796 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1797 iwdev->rf->max_pd);
1798 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1799 iwdev->rf->max_qp);
1800 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1801 iwdev->rf->max_cq);
1802 iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs,
1803 iwdev->rf->max_srq);
1804 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1805 iwdev->rf->max_mr);
1806 }
1807
irdma_ctrl_deinit_hw(struct irdma_pci_f * rf)1808 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1809 {
1810 enum init_completion_state state = rf->init_state;
1811
1812 rf->init_state = INVALID_STATE;
1813
1814 switch (state) {
1815 case AEQ_CREATED:
1816 irdma_destroy_aeq(rf);
1817 fallthrough;
1818 case PBLE_CHUNK_MEM:
1819 irdma_destroy_pble_prm(rf->pble_rsrc);
1820 fallthrough;
1821 case CEQS_CREATED:
1822 irdma_del_ceqs(rf);
1823 fallthrough;
1824 case CEQ0_CREATED:
1825 irdma_del_ceq_0(rf);
1826 fallthrough;
1827 case CCQ_CREATED:
1828 irdma_destroy_ccq(rf);
1829 fallthrough;
1830 case HW_RSRC_INITIALIZED:
1831 case HMC_OBJS_CREATED:
1832 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1833 rf->reset, rf->rdma_ver);
1834 fallthrough;
1835 case CQP_CREATED:
1836 irdma_destroy_cqp(rf);
1837 fallthrough;
1838 case INITIAL_STATE:
1839 irdma_del_init_mem(rf);
1840 break;
1841 case INVALID_STATE:
1842 default:
1843 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1844 break;
1845 }
1846 }
1847
1848 /**
1849 * irdma_rt_init_hw - Initializes runtime portion of HW
1850 * @iwdev: irdma device
1851 * @l2params: qos, tc, mtu info from netdev driver
1852 *
1853 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1854 * device resource objects.
1855 */
irdma_rt_init_hw(struct irdma_device * iwdev,struct irdma_l2params * l2params)1856 int irdma_rt_init_hw(struct irdma_device *iwdev,
1857 struct irdma_l2params *l2params)
1858 {
1859 struct irdma_pci_f *rf = iwdev->rf;
1860 struct irdma_sc_dev *dev = &rf->sc_dev;
1861 struct irdma_vsi_init_info vsi_info = {};
1862 struct irdma_vsi_stats_info stats_info = {};
1863 int status;
1864
1865 vsi_info.dev = dev;
1866 vsi_info.back_vsi = iwdev;
1867 vsi_info.params = l2params;
1868 vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1869 vsi_info.register_qset = rf->gen_ops.register_qset;
1870 vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1871 vsi_info.exception_lan_q = 2;
1872 irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1873
1874 status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1875 if (status)
1876 return status;
1877
1878 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1879 if (!stats_info.pestat) {
1880 irdma_cleanup_cm_core(&iwdev->cm_core);
1881 return -ENOMEM;
1882 }
1883 stats_info.fcn_id = dev->hmc_fn_id;
1884 status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1885 if (status) {
1886 irdma_cleanup_cm_core(&iwdev->cm_core);
1887 kfree(stats_info.pestat);
1888 return status;
1889 }
1890
1891 do {
1892 if (!iwdev->roce_mode) {
1893 status = irdma_initialize_ilq(iwdev);
1894 if (status)
1895 break;
1896 iwdev->init_state = ILQ_CREATED;
1897 status = irdma_initialize_ieq(iwdev);
1898 if (status)
1899 break;
1900 iwdev->init_state = IEQ_CREATED;
1901 }
1902 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1903 irdma_alloc_set_mac(iwdev);
1904 irdma_add_ip(iwdev);
1905 iwdev->init_state = IP_ADDR_REGISTERED;
1906
1907 /* handles asynch cleanup tasks - disconnect CM , free qp,
1908 * free cq bufs
1909 */
1910 iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1911 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1912 if (!iwdev->cleanup_wq)
1913 return -ENOMEM;
1914 irdma_get_used_rsrc(iwdev);
1915 init_waitqueue_head(&iwdev->suspend_wq);
1916
1917 return 0;
1918 } while (0);
1919
1920 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
1921 status, iwdev->init_state);
1922 irdma_rt_deinit_hw(iwdev);
1923
1924 return status;
1925 }
1926
1927 /**
1928 * irdma_ctrl_init_hw - Initializes control portion of HW
1929 * @rf: RDMA PCI function
1930 *
1931 * Create admin queues, HMC obejcts and RF resource objects
1932 */
irdma_ctrl_init_hw(struct irdma_pci_f * rf)1933 int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
1934 {
1935 struct irdma_sc_dev *dev = &rf->sc_dev;
1936 int status;
1937 do {
1938 status = irdma_setup_init_state(rf);
1939 if (status)
1940 break;
1941 rf->init_state = INITIAL_STATE;
1942
1943 status = irdma_create_cqp(rf);
1944 if (status)
1945 break;
1946 rf->init_state = CQP_CREATED;
1947
1948 dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
1949 if (rf->rdma_ver != IRDMA_GEN_1) {
1950 status = irdma_get_rdma_features(dev);
1951 if (status)
1952 break;
1953 }
1954
1955 status = irdma_hmc_setup(rf);
1956 if (status)
1957 break;
1958 rf->init_state = HMC_OBJS_CREATED;
1959
1960 status = irdma_initialize_hw_rsrc(rf);
1961 if (status)
1962 break;
1963 rf->init_state = HW_RSRC_INITIALIZED;
1964
1965 status = irdma_create_ccq(rf);
1966 if (status)
1967 break;
1968 rf->init_state = CCQ_CREATED;
1969
1970 status = irdma_setup_ceq_0(rf);
1971 if (status)
1972 break;
1973 rf->init_state = CEQ0_CREATED;
1974 /* Handles processing of CQP completions */
1975 rf->cqp_cmpl_wq =
1976 alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI);
1977 if (!rf->cqp_cmpl_wq) {
1978 status = -ENOMEM;
1979 break;
1980 }
1981 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
1982 irdma_sc_ccq_arm(dev->ccq);
1983
1984 status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0);
1985 if (status)
1986 break;
1987
1988 rf->init_state = CEQS_CREATED;
1989
1990 status = irdma_hmc_init_pble(&rf->sc_dev,
1991 rf->pble_rsrc);
1992 if (status)
1993 break;
1994
1995 rf->init_state = PBLE_CHUNK_MEM;
1996
1997 status = irdma_setup_aeq(rf);
1998 if (status)
1999 break;
2000 rf->init_state = AEQ_CREATED;
2001
2002 return 0;
2003 } while (0);
2004
2005 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
2006 rf->init_state, status);
2007 irdma_ctrl_deinit_hw(rf);
2008 return status;
2009 }
2010
2011 /**
2012 * irdma_set_hw_rsrc - set hw memory resources.
2013 * @rf: RDMA PCI function
2014 */
irdma_set_hw_rsrc(struct irdma_pci_f * rf)2015 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
2016 {
2017 rf->allocated_qps = (void *)(rf->mem_rsrc +
2018 (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
2019 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
2020 rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
2021 rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)];
2022 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
2023 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
2024 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
2025 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
2026 rf->qp_table = (struct irdma_qp **)
2027 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
2028 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
2029
2030 spin_lock_init(&rf->rsrc_lock);
2031 spin_lock_init(&rf->arp_lock);
2032 spin_lock_init(&rf->qptable_lock);
2033 spin_lock_init(&rf->cqtable_lock);
2034 spin_lock_init(&rf->qh_list_lock);
2035 }
2036
2037 /**
2038 * irdma_calc_mem_rsrc_size - calculate memory resources size.
2039 * @rf: RDMA PCI function
2040 */
irdma_calc_mem_rsrc_size(struct irdma_pci_f * rf)2041 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
2042 {
2043 u32 rsrc_size;
2044
2045 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
2046 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
2047 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
2048 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
2049 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq);
2050 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
2051 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
2052 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
2053 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
2054 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
2055 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
2056 rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
2057
2058 return rsrc_size;
2059 }
2060
2061 /**
2062 * irdma_initialize_hw_rsrc - initialize hw resource tracking array
2063 * @rf: RDMA PCI function
2064 */
irdma_initialize_hw_rsrc(struct irdma_pci_f * rf)2065 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
2066 {
2067 u32 rsrc_size;
2068 u32 mrdrvbits;
2069 u32 ret;
2070
2071 if (rf->rdma_ver != IRDMA_GEN_1) {
2072 rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
2073 GFP_KERNEL);
2074 if (!rf->allocated_ws_nodes)
2075 return -ENOMEM;
2076
2077 set_bit(0, rf->allocated_ws_nodes);
2078 rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
2079 }
2080 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
2081 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
2082 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
2083 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
2084 rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt;
2085 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
2086 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
2087 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
2088 rf->max_mcg = rf->max_qp;
2089
2090 rsrc_size = irdma_calc_mem_rsrc_size(rf);
2091 rf->mem_rsrc = vzalloc(rsrc_size);
2092 if (!rf->mem_rsrc) {
2093 ret = -ENOMEM;
2094 goto mem_rsrc_vzalloc_fail;
2095 }
2096
2097 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2098
2099 irdma_set_hw_rsrc(rf);
2100
2101 set_bit(0, rf->allocated_mrs);
2102 set_bit(0, rf->allocated_qps);
2103 set_bit(0, rf->allocated_cqs);
2104 set_bit(0, rf->allocated_srqs);
2105 set_bit(0, rf->allocated_pds);
2106 set_bit(0, rf->allocated_arps);
2107 set_bit(0, rf->allocated_ahs);
2108 set_bit(0, rf->allocated_mcgs);
2109 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
2110 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
2111 set_bit(1, rf->allocated_cqs);
2112 set_bit(1, rf->allocated_pds);
2113 set_bit(2, rf->allocated_cqs);
2114 set_bit(2, rf->allocated_pds);
2115
2116 INIT_LIST_HEAD(&rf->mc_qht_list.list);
2117 /* stag index mask has a minimum of 14 bits */
2118 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2119 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2120
2121 return 0;
2122
2123 mem_rsrc_vzalloc_fail:
2124 bitmap_free(rf->allocated_ws_nodes);
2125 rf->allocated_ws_nodes = NULL;
2126
2127 return ret;
2128 }
2129
2130 /**
2131 * irdma_cqp_ce_handler - handle cqp completions
2132 * @rf: RDMA PCI function
2133 * @cq: cq for cqp completions
2134 */
irdma_cqp_ce_handler(struct irdma_pci_f * rf,struct irdma_sc_cq * cq)2135 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2136 {
2137 struct irdma_cqp_request *cqp_request;
2138 struct irdma_sc_dev *dev = &rf->sc_dev;
2139 u32 cqe_count = 0;
2140 struct irdma_ccq_cqe_info info;
2141 unsigned long flags;
2142 int ret;
2143
2144 do {
2145 memset(&info, 0, sizeof(info));
2146 spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2147 ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2148 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2149 if (ret)
2150 break;
2151
2152 cqp_request = (struct irdma_cqp_request *)
2153 (unsigned long)info.scratch;
2154 if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
2155 info.maj_err_code,
2156 info.min_err_code))
2157 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2158 info.op_code, info.maj_err_code, info.min_err_code);
2159 if (cqp_request) {
2160 cqp_request->compl_info.maj_err_code = info.maj_err_code;
2161 cqp_request->compl_info.min_err_code = info.min_err_code;
2162 cqp_request->compl_info.op_ret_val = info.op_ret_val;
2163 cqp_request->compl_info.error = info.error;
2164
2165 /*
2166 * If this is deferred or pending completion, then mark
2167 * CQP request as pending to not block the CQ, but don't
2168 * release CQP request, as it is still on the OOO list.
2169 */
2170 if (info.pending)
2171 cqp_request->pending = true;
2172 else
2173 irdma_complete_cqp_request(&rf->cqp,
2174 cqp_request);
2175 }
2176
2177 cqe_count++;
2178 } while (1);
2179
2180 if (cqe_count) {
2181 irdma_process_bh(dev);
2182 irdma_sc_ccq_arm(cq);
2183 }
2184 }
2185
2186 /**
2187 * cqp_compl_worker - Handle cqp completions
2188 * @work: Pointer to work structure
2189 */
cqp_compl_worker(struct work_struct * work)2190 void cqp_compl_worker(struct work_struct *work)
2191 {
2192 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2193 cqp_cmpl_work);
2194 struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2195
2196 irdma_cqp_ce_handler(rf, cq);
2197 }
2198
2199 /**
2200 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2201 * @cm_core: cm's core
2202 * @port: port to identify apbvt entry
2203 */
irdma_lookup_apbvt_entry(struct irdma_cm_core * cm_core,u16 port)2204 static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2205 u16 port)
2206 {
2207 struct irdma_apbvt_entry *entry;
2208
2209 hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2210 if (entry->port == port) {
2211 entry->use_cnt++;
2212 return entry;
2213 }
2214 }
2215
2216 return NULL;
2217 }
2218
2219 /**
2220 * irdma_next_iw_state - modify qp state
2221 * @iwqp: iwarp qp to modify
2222 * @state: next state for qp
2223 * @del_hash: del hash
2224 * @term: term message
2225 * @termlen: length of term message
2226 */
irdma_next_iw_state(struct irdma_qp * iwqp,u8 state,u8 del_hash,u8 term,u8 termlen)2227 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2228 u8 termlen)
2229 {
2230 struct irdma_modify_qp_info info = {};
2231
2232 info.next_iwarp_state = state;
2233 info.remove_hash_idx = del_hash;
2234 info.cq_num_valid = true;
2235 info.arp_cache_idx_valid = true;
2236 info.dont_send_term = true;
2237 info.dont_send_fin = true;
2238 info.termlen = termlen;
2239
2240 if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2241 info.dont_send_term = false;
2242 if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2243 info.dont_send_fin = false;
2244 if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2245 info.reset_tcp_conn = true;
2246 iwqp->hw_iwarp_state = state;
2247 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2248 iwqp->iwarp_state = info.next_iwarp_state;
2249 }
2250
2251 /**
2252 * irdma_del_local_mac_entry - remove a mac entry from the hw
2253 * table
2254 * @rf: RDMA PCI function
2255 * @idx: the index of the mac ip address to delete
2256 */
irdma_del_local_mac_entry(struct irdma_pci_f * rf,u16 idx)2257 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2258 {
2259 struct irdma_cqp *iwcqp = &rf->cqp;
2260 struct irdma_cqp_request *cqp_request;
2261 struct cqp_cmds_info *cqp_info;
2262
2263 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2264 if (!cqp_request)
2265 return;
2266
2267 cqp_info = &cqp_request->info;
2268 cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2269 cqp_info->post_sq = 1;
2270 cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2271 cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2272 cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2273 cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2274
2275 irdma_handle_cqp_op(rf, cqp_request);
2276 irdma_put_cqp_request(iwcqp, cqp_request);
2277 }
2278
2279 /**
2280 * irdma_add_local_mac_entry - add a mac ip address entry to the
2281 * hw table
2282 * @rf: RDMA PCI function
2283 * @mac_addr: pointer to mac address
2284 * @idx: the index of the mac ip address to add
2285 */
irdma_add_local_mac_entry(struct irdma_pci_f * rf,const u8 * mac_addr,u16 idx)2286 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2287 {
2288 struct irdma_local_mac_entry_info *info;
2289 struct irdma_cqp *iwcqp = &rf->cqp;
2290 struct irdma_cqp_request *cqp_request;
2291 struct cqp_cmds_info *cqp_info;
2292 int status;
2293
2294 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2295 if (!cqp_request)
2296 return -ENOMEM;
2297
2298 cqp_info = &cqp_request->info;
2299 cqp_info->post_sq = 1;
2300 info = &cqp_info->in.u.add_local_mac_entry.info;
2301 ether_addr_copy(info->mac_addr, mac_addr);
2302 info->entry_idx = idx;
2303 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2304 cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2305 cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2306 cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2307
2308 status = irdma_handle_cqp_op(rf, cqp_request);
2309 irdma_put_cqp_request(iwcqp, cqp_request);
2310
2311 return status;
2312 }
2313
2314 /**
2315 * irdma_alloc_local_mac_entry - allocate a mac entry
2316 * @rf: RDMA PCI function
2317 * @mac_tbl_idx: the index of the new mac address
2318 *
2319 * Allocate a mac address entry and update the mac_tbl_idx
2320 * to hold the index of the newly created mac address
2321 * Return 0 if successful, otherwise return error
2322 */
irdma_alloc_local_mac_entry(struct irdma_pci_f * rf,u16 * mac_tbl_idx)2323 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2324 {
2325 struct irdma_cqp *iwcqp = &rf->cqp;
2326 struct irdma_cqp_request *cqp_request;
2327 struct cqp_cmds_info *cqp_info;
2328 int status = 0;
2329
2330 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2331 if (!cqp_request)
2332 return -ENOMEM;
2333
2334 cqp_info = &cqp_request->info;
2335 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2336 cqp_info->post_sq = 1;
2337 cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2338 cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2339 status = irdma_handle_cqp_op(rf, cqp_request);
2340 if (!status)
2341 *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2342
2343 irdma_put_cqp_request(iwcqp, cqp_request);
2344
2345 return status;
2346 }
2347
2348 /**
2349 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2350 * @iwdev: irdma device
2351 * @accel_local_port: port for apbvt
2352 * @add_port: add ordelete port
2353 */
irdma_cqp_manage_apbvt_cmd(struct irdma_device * iwdev,u16 accel_local_port,bool add_port)2354 static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2355 u16 accel_local_port, bool add_port)
2356 {
2357 struct irdma_apbvt_info *info;
2358 struct irdma_cqp_request *cqp_request;
2359 struct cqp_cmds_info *cqp_info;
2360 int status;
2361
2362 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2363 if (!cqp_request)
2364 return -ENOMEM;
2365
2366 cqp_info = &cqp_request->info;
2367 info = &cqp_info->in.u.manage_apbvt_entry.info;
2368 info->add = add_port;
2369 info->port = accel_local_port;
2370 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2371 cqp_info->post_sq = 1;
2372 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2373 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2374 ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
2375 (!add_port) ? "DELETE" : "ADD", accel_local_port);
2376
2377 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2378 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2379
2380 return status;
2381 }
2382
2383 /**
2384 * irdma_add_apbvt - add tcp port to HW apbvt table
2385 * @iwdev: irdma device
2386 * @port: port for apbvt
2387 */
irdma_add_apbvt(struct irdma_device * iwdev,u16 port)2388 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2389 {
2390 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2391 struct irdma_apbvt_entry *entry;
2392 unsigned long flags;
2393
2394 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2395 entry = irdma_lookup_apbvt_entry(cm_core, port);
2396 if (entry) {
2397 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2398 return entry;
2399 }
2400
2401 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2402 if (!entry) {
2403 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2404 return NULL;
2405 }
2406
2407 entry->port = port;
2408 entry->use_cnt = 1;
2409 hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2410 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2411
2412 if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2413 kfree(entry);
2414 return NULL;
2415 }
2416
2417 return entry;
2418 }
2419
2420 /**
2421 * irdma_del_apbvt - delete tcp port from HW apbvt table
2422 * @iwdev: irdma device
2423 * @entry: apbvt entry object
2424 */
irdma_del_apbvt(struct irdma_device * iwdev,struct irdma_apbvt_entry * entry)2425 void irdma_del_apbvt(struct irdma_device *iwdev,
2426 struct irdma_apbvt_entry *entry)
2427 {
2428 struct irdma_cm_core *cm_core = &iwdev->cm_core;
2429 unsigned long flags;
2430
2431 spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2432 if (--entry->use_cnt) {
2433 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2434 return;
2435 }
2436
2437 hash_del(&entry->hlist);
2438 /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
2439 * protect against race where add APBVT CQP can race ahead of the delete
2440 * APBVT for same port.
2441 */
2442 irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2443 kfree(entry);
2444 spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2445 }
2446
2447 /**
2448 * irdma_manage_arp_cache - manage hw arp cache
2449 * @rf: RDMA PCI function
2450 * @mac_addr: mac address ptr
2451 * @ip_addr: ip addr for arp cache
2452 * @ipv4: flag inicating IPv4
2453 * @action: add, delete or modify
2454 */
irdma_manage_arp_cache(struct irdma_pci_f * rf,const unsigned char * mac_addr,u32 * ip_addr,bool ipv4,u32 action)2455 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
2456 const unsigned char *mac_addr,
2457 u32 *ip_addr, bool ipv4, u32 action)
2458 {
2459 struct irdma_add_arp_cache_entry_info *info;
2460 struct irdma_cqp_request *cqp_request;
2461 struct cqp_cmds_info *cqp_info;
2462 int arp_index;
2463
2464 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
2465 if (arp_index == -1)
2466 return;
2467
2468 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2469 if (!cqp_request)
2470 return;
2471
2472 cqp_info = &cqp_request->info;
2473 if (action == IRDMA_ARP_ADD) {
2474 cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2475 info = &cqp_info->in.u.add_arp_cache_entry.info;
2476 info->arp_index = (u16)arp_index;
2477 info->permanent = true;
2478 ether_addr_copy(info->mac_addr, mac_addr);
2479 cqp_info->in.u.add_arp_cache_entry.scratch =
2480 (uintptr_t)cqp_request;
2481 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2482 } else {
2483 cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2484 cqp_info->in.u.del_arp_cache_entry.scratch =
2485 (uintptr_t)cqp_request;
2486 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2487 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2488 }
2489
2490 cqp_info->post_sq = 1;
2491 irdma_handle_cqp_op(rf, cqp_request);
2492 irdma_put_cqp_request(&rf->cqp, cqp_request);
2493 }
2494
2495 /**
2496 * irdma_send_syn_cqp_callback - do syn/ack after qhash
2497 * @cqp_request: qhash cqp completion
2498 */
irdma_send_syn_cqp_callback(struct irdma_cqp_request * cqp_request)2499 static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2500 {
2501 struct irdma_cm_node *cm_node = cqp_request->param;
2502
2503 irdma_send_syn(cm_node, 1);
2504 irdma_rem_ref_cm_node(cm_node);
2505 }
2506
2507 /**
2508 * irdma_manage_qhash - add or modify qhash
2509 * @iwdev: irdma device
2510 * @cminfo: cm info for qhash
2511 * @etype: type (syn or quad)
2512 * @mtype: type of qhash
2513 * @cmnode: cmnode associated with connection
2514 * @wait: wait for completion
2515 */
irdma_manage_qhash(struct irdma_device * iwdev,struct irdma_cm_info * cminfo,enum irdma_quad_entry_type etype,enum irdma_quad_hash_manage_type mtype,void * cmnode,bool wait)2516 int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2517 enum irdma_quad_entry_type etype,
2518 enum irdma_quad_hash_manage_type mtype, void *cmnode,
2519 bool wait)
2520 {
2521 struct irdma_qhash_table_info *info;
2522 struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2523 struct irdma_cqp_request *cqp_request;
2524 struct cqp_cmds_info *cqp_info;
2525 struct irdma_cm_node *cm_node = cmnode;
2526 int status;
2527
2528 cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2529 if (!cqp_request)
2530 return -ENOMEM;
2531
2532 cqp_info = &cqp_request->info;
2533 info = &cqp_info->in.u.manage_qhash_table_entry.info;
2534 info->vsi = &iwdev->vsi;
2535 info->manage = mtype;
2536 info->entry_type = etype;
2537 if (cminfo->vlan_id < VLAN_N_VID) {
2538 info->vlan_valid = true;
2539 info->vlan_id = cminfo->vlan_id;
2540 } else {
2541 info->vlan_valid = false;
2542 }
2543 info->ipv4_valid = cminfo->ipv4;
2544 info->user_pri = cminfo->user_pri;
2545 ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
2546 info->qp_num = cminfo->qh_qpid;
2547 info->dest_port = cminfo->loc_port;
2548 info->dest_ip[0] = cminfo->loc_addr[0];
2549 info->dest_ip[1] = cminfo->loc_addr[1];
2550 info->dest_ip[2] = cminfo->loc_addr[2];
2551 info->dest_ip[3] = cminfo->loc_addr[3];
2552 if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2553 etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2554 etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2555 etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2556 etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2557 info->src_port = cminfo->rem_port;
2558 info->src_ip[0] = cminfo->rem_addr[0];
2559 info->src_ip[1] = cminfo->rem_addr[1];
2560 info->src_ip[2] = cminfo->rem_addr[2];
2561 info->src_ip[3] = cminfo->rem_addr[3];
2562 }
2563 if (cmnode) {
2564 cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2565 cqp_request->param = cmnode;
2566 if (!wait)
2567 refcount_inc(&cm_node->refcnt);
2568 }
2569 if (info->ipv4_valid)
2570 ibdev_dbg(&iwdev->ibdev,
2571 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
2572 (!mtype) ? "DELETE" : "ADD",
2573 __builtin_return_address(0), info->dest_port,
2574 info->src_port, info->dest_ip, info->src_ip,
2575 info->mac_addr, cminfo->vlan_id,
2576 cmnode ? cmnode : NULL);
2577 else
2578 ibdev_dbg(&iwdev->ibdev,
2579 "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
2580 (!mtype) ? "DELETE" : "ADD",
2581 __builtin_return_address(0), info->dest_port,
2582 info->src_port, info->dest_ip, info->src_ip,
2583 info->mac_addr, cminfo->vlan_id,
2584 cmnode ? cmnode : NULL);
2585
2586 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2587 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2588 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2589 cqp_info->post_sq = 1;
2590 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2591 if (status && cm_node && !wait)
2592 irdma_rem_ref_cm_node(cm_node);
2593
2594 irdma_put_cqp_request(iwcqp, cqp_request);
2595
2596 return status;
2597 }
2598
2599 /**
2600 * irdma_hw_flush_wqes_callback - Check return code after flush
2601 * @cqp_request: qhash cqp completion
2602 */
irdma_hw_flush_wqes_callback(struct irdma_cqp_request * cqp_request)2603 static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
2604 {
2605 struct irdma_qp_flush_info *hw_info;
2606 struct irdma_sc_qp *qp;
2607 struct irdma_qp *iwqp;
2608 struct cqp_cmds_info *cqp_info;
2609
2610 cqp_info = &cqp_request->info;
2611 hw_info = &cqp_info->in.u.qp_flush_wqes.info;
2612 qp = cqp_info->in.u.qp_flush_wqes.qp;
2613 iwqp = qp->qp_uk.back_qp;
2614
2615 if (cqp_request->compl_info.maj_err_code)
2616 return;
2617
2618 if (hw_info->rq &&
2619 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2620 cqp_request->compl_info.min_err_code == 0)) {
2621 /* RQ WQE flush was requested but did not happen */
2622 qp->qp_uk.rq_flush_complete = true;
2623 }
2624 if (hw_info->sq &&
2625 (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2626 cqp_request->compl_info.min_err_code == 0)) {
2627 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2628 ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2629 qp->qp_uk.qp_id);
2630 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2631 }
2632 qp->qp_uk.sq_flush_complete = true;
2633 }
2634 }
2635
2636 /**
2637 * irdma_hw_flush_wqes - flush qp's wqe
2638 * @rf: RDMA PCI function
2639 * @qp: hardware control qp
2640 * @info: info for flush
2641 * @wait: flag wait for completion
2642 */
irdma_hw_flush_wqes(struct irdma_pci_f * rf,struct irdma_sc_qp * qp,struct irdma_qp_flush_info * info,bool wait)2643 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2644 struct irdma_qp_flush_info *info, bool wait)
2645 {
2646 int status;
2647 struct irdma_qp_flush_info *hw_info;
2648 struct irdma_cqp_request *cqp_request;
2649 struct cqp_cmds_info *cqp_info;
2650 struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2651
2652 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2653 if (!cqp_request)
2654 return -ENOMEM;
2655
2656 cqp_info = &cqp_request->info;
2657 if (!wait)
2658 cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
2659 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2660 memcpy(hw_info, info, sizeof(*hw_info));
2661 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2662 cqp_info->post_sq = 1;
2663 cqp_info->in.u.qp_flush_wqes.qp = qp;
2664 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2665 status = irdma_handle_cqp_op(rf, cqp_request);
2666 if (status) {
2667 qp->qp_uk.sq_flush_complete = true;
2668 qp->qp_uk.rq_flush_complete = true;
2669 irdma_put_cqp_request(&rf->cqp, cqp_request);
2670 return status;
2671 }
2672
2673 if (!wait || cqp_request->compl_info.maj_err_code)
2674 goto put_cqp;
2675
2676 if (info->rq) {
2677 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2678 cqp_request->compl_info.min_err_code == 0) {
2679 /* RQ WQE flush was requested but did not happen */
2680 qp->qp_uk.rq_flush_complete = true;
2681 }
2682 }
2683 if (info->sq) {
2684 if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2685 cqp_request->compl_info.min_err_code == 0) {
2686 /*
2687 * Handling case where WQE is posted to empty SQ when
2688 * flush has not completed
2689 */
2690 if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2691 struct irdma_cqp_request *new_req;
2692
2693 if (!qp->qp_uk.sq_flush_complete)
2694 goto put_cqp;
2695 qp->qp_uk.sq_flush_complete = false;
2696 qp->flush_sq = false;
2697
2698 info->rq = false;
2699 info->sq = true;
2700 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2701 if (!new_req) {
2702 status = -ENOMEM;
2703 goto put_cqp;
2704 }
2705 cqp_info = &new_req->info;
2706 hw_info = &new_req->info.in.u.qp_flush_wqes.info;
2707 memcpy(hw_info, info, sizeof(*hw_info));
2708 cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2709 cqp_info->post_sq = 1;
2710 cqp_info->in.u.qp_flush_wqes.qp = qp;
2711 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
2712
2713 status = irdma_handle_cqp_op(rf, new_req);
2714 if (new_req->compl_info.maj_err_code ||
2715 new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2716 status) {
2717 ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2718 iwqp->ibqp.qp_num);
2719 qp->qp_uk.sq_flush_complete = false;
2720 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2721 }
2722 irdma_put_cqp_request(&rf->cqp, new_req);
2723 } else {
2724 /* SQ WQE flush was requested but did not happen */
2725 qp->qp_uk.sq_flush_complete = true;
2726 }
2727 } else {
2728 if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
2729 qp->qp_uk.sq_flush_complete = true;
2730 }
2731 }
2732
2733 ibdev_dbg(&rf->iwdev->ibdev,
2734 "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2735 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2736 iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2737 cqp_request->compl_info.maj_err_code,
2738 cqp_request->compl_info.min_err_code);
2739 put_cqp:
2740 irdma_put_cqp_request(&rf->cqp, cqp_request);
2741
2742 return status;
2743 }
2744
2745 /**
2746 * irdma_gen_ae - generate AE
2747 * @rf: RDMA PCI function
2748 * @qp: qp associated with AE
2749 * @info: info for ae
2750 * @wait: wait for completion
2751 */
irdma_gen_ae(struct irdma_pci_f * rf,struct irdma_sc_qp * qp,struct irdma_gen_ae_info * info,bool wait)2752 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2753 struct irdma_gen_ae_info *info, bool wait)
2754 {
2755 struct irdma_gen_ae_info *ae_info;
2756 struct irdma_cqp_request *cqp_request;
2757 struct cqp_cmds_info *cqp_info;
2758
2759 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2760 if (!cqp_request)
2761 return;
2762
2763 cqp_info = &cqp_request->info;
2764 ae_info = &cqp_request->info.in.u.gen_ae.info;
2765 memcpy(ae_info, info, sizeof(*ae_info));
2766 cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2767 cqp_info->post_sq = 1;
2768 cqp_info->in.u.gen_ae.qp = qp;
2769 cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2770
2771 irdma_handle_cqp_op(rf, cqp_request);
2772 irdma_put_cqp_request(&rf->cqp, cqp_request);
2773 }
2774
irdma_flush_wqes(struct irdma_qp * iwqp,u32 flush_mask)2775 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2776 {
2777 struct irdma_qp_flush_info info = {};
2778 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2779 u8 flush_code = iwqp->sc_qp.flush_code;
2780
2781 if ((!(flush_mask & IRDMA_FLUSH_SQ) &&
2782 !(flush_mask & IRDMA_FLUSH_RQ)) ||
2783 ((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3))
2784 return;
2785
2786 /* Set flush info fields*/
2787 info.sq = flush_mask & IRDMA_FLUSH_SQ;
2788 info.rq = flush_mask & IRDMA_FLUSH_RQ;
2789
2790 /* Generate userflush errors in CQE */
2791 info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2792 info.sq_minor_code = FLUSH_GENERAL_ERR;
2793 info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2794 info.rq_minor_code = FLUSH_GENERAL_ERR;
2795 info.userflushcode = true;
2796 info.err_sq_idx_valid = iwqp->sc_qp.err_sq_idx_valid;
2797 info.err_sq_idx = iwqp->sc_qp.err_sq_idx;
2798 info.err_rq_idx_valid = iwqp->sc_qp.err_rq_idx_valid;
2799 info.err_rq_idx = iwqp->sc_qp.err_rq_idx;
2800
2801 if (flush_mask & IRDMA_REFLUSH) {
2802 if (info.sq)
2803 iwqp->sc_qp.flush_sq = false;
2804 if (info.rq)
2805 iwqp->sc_qp.flush_rq = false;
2806 } else {
2807 if (flush_code) {
2808 if (info.sq && iwqp->sc_qp.sq_flush_code)
2809 info.sq_minor_code = flush_code;
2810 if (info.rq && iwqp->sc_qp.rq_flush_code)
2811 info.rq_minor_code = flush_code;
2812 }
2813 if (!iwqp->user_mode)
2814 queue_delayed_work(iwqp->iwdev->cleanup_wq,
2815 &iwqp->dwork_flush,
2816 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2817 }
2818
2819 /* Issue flush */
2820 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2821 flush_mask & IRDMA_FLUSH_WAIT);
2822 iwqp->flush_issued = true;
2823 }
2824