xref: /linux/drivers/infiniband/hw/irdma/hw.c (revision f3cf74933c9ca62a46e51c69412c93c9df816b4b)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "main.h"
4 
5 static struct irdma_rsrc_limits rsrc_limits_table[] = {
6 	[0] = {
7 		.qplimit = SZ_128,
8 	},
9 	[1] = {
10 		.qplimit = SZ_1K,
11 	},
12 	[2] = {
13 		.qplimit = SZ_2K,
14 	},
15 	[3] = {
16 		.qplimit = SZ_4K,
17 	},
18 	[4] = {
19 		.qplimit = SZ_16K,
20 	},
21 	[5] = {
22 		.qplimit = SZ_64K,
23 	},
24 	[6] = {
25 		.qplimit = SZ_128K,
26 	},
27 	[7] = {
28 		.qplimit = SZ_256K,
29 	},
30 };
31 
32 /* types of hmc objects */
33 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
34 	IRDMA_HMC_IW_QP,
35 	IRDMA_HMC_IW_CQ,
36 	IRDMA_HMC_IW_SRQ,
37 	IRDMA_HMC_IW_HTE,
38 	IRDMA_HMC_IW_ARP,
39 	IRDMA_HMC_IW_APBVT_ENTRY,
40 	IRDMA_HMC_IW_MR,
41 	IRDMA_HMC_IW_XF,
42 	IRDMA_HMC_IW_XFFL,
43 	IRDMA_HMC_IW_Q1,
44 	IRDMA_HMC_IW_Q1FL,
45 	IRDMA_HMC_IW_PBLE,
46 	IRDMA_HMC_IW_TIMER,
47 	IRDMA_HMC_IW_FSIMC,
48 	IRDMA_HMC_IW_FSIAV,
49 	IRDMA_HMC_IW_RRF,
50 	IRDMA_HMC_IW_RRFFL,
51 	IRDMA_HMC_IW_HDR,
52 	IRDMA_HMC_IW_MD,
53 	IRDMA_HMC_IW_OOISC,
54 	IRDMA_HMC_IW_OOISCFFL,
55 };
56 
57 /**
58  * irdma_iwarp_ce_handler - handle iwarp completions
59  * @iwcq: iwarp cq receiving event
60  */
61 static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
62 {
63 	struct irdma_cq *cq = iwcq->back_cq;
64 
65 	if (!cq->user_mode)
66 		atomic_set(&cq->armed, 0);
67 	if (cq->ibcq.comp_handler)
68 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
69 }
70 
71 /**
72  * irdma_puda_ce_handler - handle puda completion events
73  * @rf: RDMA PCI function
74  * @cq: puda completion q for event
75  */
76 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
77 				  struct irdma_sc_cq *cq)
78 {
79 	struct irdma_sc_dev *dev = &rf->sc_dev;
80 	u32 compl_error;
81 	int status;
82 
83 	do {
84 		status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
85 		if (status == -ENOENT)
86 			break;
87 		if (status) {
88 			ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
89 			break;
90 		}
91 		if (compl_error) {
92 			ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err  =0x%x\n",
93 				  compl_error);
94 			break;
95 		}
96 	} while (1);
97 
98 	irdma_sc_ccq_arm(cq);
99 }
100 
101 /**
102  * irdma_process_normal_ceqe - Handle a CEQE for a normal CQ.
103  * @rf: RDMA PCI function.
104  * @dev: iWARP device.
105  * @cq_idx: CQ ID. Must be in table bounds.
106  *
107  * Context: Atomic (CEQ lock must be held)
108  */
109 static void irdma_process_normal_ceqe(struct irdma_pci_f *rf,
110 				      struct irdma_sc_dev *dev, u32 cq_idx)
111 {
112 	/* cq_idx bounds validated in irdma_sc_process_ceq. */
113 	struct irdma_cq *icq = READ_ONCE(rf->cq_table[cq_idx]);
114 	struct irdma_sc_cq *cq;
115 
116 	if (unlikely(!icq)) {
117 		/* Should not happen since CEQ is scrubbed upon CQ delete. */
118 		ibdev_warn_ratelimited(to_ibdev(dev), "Stale CEQE for CQ %u",
119 				       cq_idx);
120 		return;
121 	}
122 
123 	cq = &icq->sc_cq;
124 
125 	if (unlikely(cq->cq_type != IRDMA_CQ_TYPE_IWARP)) {
126 		ibdev_warn_ratelimited(to_ibdev(dev), "Unexpected CQ type %u",
127 				       cq->cq_type);
128 		return;
129 	}
130 
131 	writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
132 	irdma_iwarp_ce_handler(cq);
133 }
134 
135 /**
136  * irdma_process_reserved_ceqe - Handle a CEQE for a reserved CQ.
137  * @rf: RDMA PCI function.
138  * @dev: iWARP device.
139  * @cq_idx: CQ ID.
140  *
141  * Context: Atomic
142  */
143 static void irdma_process_reserved_ceqe(struct irdma_pci_f *rf,
144 					struct irdma_sc_dev *dev, u32 cq_idx)
145 {
146 	struct irdma_sc_cq *cq;
147 
148 	if (cq_idx == IRDMA_RSVD_CQ_ID_CQP) {
149 		cq = &rf->ccq.sc_cq;
150 		/* CQP CQ lifetime > CEQ. */
151 		writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
152 		queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
153 	} else if (cq_idx == IRDMA_RSVD_CQ_ID_ILQ ||
154 		   cq_idx == IRDMA_RSVD_CQ_ID_IEQ) {
155 		scoped_guard(spinlock_irqsave, &dev->puda_cq_lock) {
156 			cq = (cq_idx == IRDMA_RSVD_CQ_ID_ILQ) ?
157 				dev->ilq_cq : dev->ieq_cq;
158 			if (!cq) {
159 				ibdev_warn_ratelimited(to_ibdev(dev),
160 						       "Stale ILQ/IEQ CEQE");
161 				return;
162 			}
163 			writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
164 			irdma_puda_ce_handler(rf, cq);
165 		}
166 	}
167 }
168 
169 /**
170  * irdma_process_ceq - handle ceq for completions
171  * @rf: RDMA PCI function
172  * @ceq: ceq having cq for completion
173  */
174 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
175 {
176 	struct irdma_sc_dev *dev = &rf->sc_dev;
177 	struct irdma_sc_ceq *sc_ceq;
178 	unsigned long flags;
179 	u32 cq_idx;
180 
181 	sc_ceq = &ceq->sc_ceq;
182 	do {
183 		spin_lock_irqsave(&ceq->ce_lock, flags);
184 
185 		if (!irdma_sc_process_ceq(dev, sc_ceq, &cq_idx)) {
186 			spin_unlock_irqrestore(&ceq->ce_lock, flags);
187 			break;
188 		}
189 
190 		/* Normal CQs must be handled while holding CEQ lock. */
191 		if (likely(cq_idx > IRDMA_RSVD_CQ_ID_IEQ)) {
192 			irdma_process_normal_ceqe(rf, dev, cq_idx);
193 			spin_unlock_irqrestore(&ceq->ce_lock, flags);
194 			continue;
195 		}
196 
197 		spin_unlock_irqrestore(&ceq->ce_lock, flags);
198 
199 		irdma_process_reserved_ceqe(rf, dev, cq_idx);
200 	} while (1);
201 }
202 
203 static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
204 				   struct irdma_aeqe_info *info)
205 {
206 	struct qp_err_code qp_err;
207 
208 	qp->sq_flush_code = info->sq;
209 	qp->rq_flush_code = info->rq;
210 	if (qp->qp_uk.uk_attrs->hw_rev >= IRDMA_GEN_3) {
211 		if (info->sq) {
212 			qp->err_sq_idx_valid = true;
213 			qp->err_sq_idx = info->wqe_idx;
214 		}
215 		if (info->rq) {
216 			qp->err_rq_idx_valid = true;
217 			qp->err_rq_idx = info->wqe_idx;
218 		}
219 	}
220 
221 	qp_err = irdma_ae_to_qp_err_code(info->ae_id);
222 	qp->flush_code = qp_err.flush_code;
223 	qp->event_type = qp_err.event_type;
224 }
225 
226 /**
227  * irdma_complete_cqp_request - perform post-completion cleanup
228  * @cqp: device CQP
229  * @cqp_request: CQP request
230  *
231  * Mark CQP request as done, wake up waiting thread or invoke
232  * callback function and release/free CQP request.
233  */
234 static void irdma_complete_cqp_request(struct irdma_cqp *cqp,
235 				       struct irdma_cqp_request *cqp_request)
236 {
237 	if (cqp_request->waiting) {
238 		WRITE_ONCE(cqp_request->request_done, true);
239 		wake_up(&cqp_request->waitq);
240 	} else if (cqp_request->callback_fcn) {
241 		cqp_request->callback_fcn(cqp_request);
242 	}
243 	irdma_put_cqp_request(cqp, cqp_request);
244 }
245 
246 /**
247  * irdma_process_ae_def_cmpl - handle IRDMA_AE_CQP_DEFERRED_COMPLETE event
248  * @rf: RDMA PCI function
249  * @info: AEQ entry info
250  */
251 static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf,
252 				      struct irdma_aeqe_info *info)
253 {
254 	u32 sw_def_info;
255 	u64 scratch;
256 
257 	irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
258 
259 	irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true,
260 					 &scratch, &sw_def_info);
261 	while (scratch) {
262 		struct irdma_cqp_request *cqp_request =
263 			(struct irdma_cqp_request *)(uintptr_t)scratch;
264 
265 		irdma_complete_cqp_request(&rf->cqp, cqp_request);
266 		irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false,
267 						 &scratch, &sw_def_info);
268 	}
269 }
270 
271 /**
272  * irdma_process_aeq - handle aeq events
273  * @rf: RDMA PCI function
274  */
275 static void irdma_process_aeq(struct irdma_pci_f *rf)
276 {
277 	struct irdma_sc_dev *dev = &rf->sc_dev;
278 	struct irdma_aeq *aeq = &rf->aeq;
279 	struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
280 	struct irdma_aeqe_info aeinfo;
281 	struct irdma_aeqe_info *info = &aeinfo;
282 	int ret;
283 	struct irdma_qp *iwqp = NULL;
284 	struct irdma_cq *iwcq = NULL;
285 	struct irdma_sc_qp *qp = NULL;
286 	struct irdma_qp_host_ctx_info *ctx_info = NULL;
287 	struct irdma_device *iwdev = rf->iwdev;
288 	struct irdma_sc_srq *srq;
289 	unsigned long flags;
290 
291 	u32 aeqcnt = 0;
292 
293 	if (!sc_aeq->size)
294 		return;
295 
296 	do {
297 		memset(info, 0, sizeof(*info));
298 		ret = irdma_sc_get_next_aeqe(sc_aeq, info);
299 		if (ret)
300 			break;
301 
302 		if (info->aeqe_overflow) {
303 			ibdev_err(&iwdev->ibdev, "AEQ has overflowed\n");
304 			rf->reset = true;
305 			rf->gen_ops.request_reset(rf);
306 			return;
307 		}
308 
309 		aeqcnt++;
310 		ibdev_dbg(&iwdev->ibdev,
311 			  "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
312 			  info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
313 			  info->iwarp_state, info->ae_src);
314 
315 		if (info->qp) {
316 			spin_lock_irqsave(&rf->qptable_lock, flags);
317 			iwqp = rf->qp_table[info->qp_cq_id];
318 			if (!iwqp) {
319 				spin_unlock_irqrestore(&rf->qptable_lock,
320 						       flags);
321 				if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
322 					atomic_dec(&iwdev->vsi.qp_suspend_reqs);
323 					wake_up(&iwdev->suspend_wq);
324 					continue;
325 				}
326 				ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
327 					  info->qp_cq_id);
328 				continue;
329 			}
330 			irdma_qp_add_ref(&iwqp->ibqp);
331 			spin_unlock_irqrestore(&rf->qptable_lock, flags);
332 			qp = &iwqp->sc_qp;
333 			spin_lock_irqsave(&iwqp->lock, flags);
334 			iwqp->hw_tcp_state = info->tcp_state;
335 			iwqp->hw_iwarp_state = info->iwarp_state;
336 			if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
337 				iwqp->last_aeq = info->ae_id;
338 			spin_unlock_irqrestore(&iwqp->lock, flags);
339 		} else if (info->srq) {
340 			if (info->ae_id != IRDMA_AE_SRQ_LIMIT)
341 				continue;
342 		} else {
343 			if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR &&
344 			    info->ae_id != IRDMA_AE_CQP_DEFERRED_COMPLETE)
345 				continue;
346 		}
347 
348 		switch (info->ae_id) {
349 			struct irdma_cm_node *cm_node;
350 		case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
351 			cm_node = iwqp->cm_node;
352 			if (cm_node->accept_pend) {
353 				atomic_dec(&cm_node->listener->pend_accepts_cnt);
354 				cm_node->accept_pend = 0;
355 			}
356 			iwqp->rts_ae_rcvd = 1;
357 			wake_up_interruptible(&iwqp->waitq);
358 			break;
359 		case IRDMA_AE_LLP_FIN_RECEIVED:
360 		case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
361 			if (qp->term_flags)
362 				break;
363 			if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
364 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
365 				if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
366 				    iwqp->ibqp_state == IB_QPS_RTS) {
367 					irdma_next_iw_state(iwqp,
368 							    IRDMA_QP_STATE_CLOSING,
369 							    0, 0, 0);
370 					irdma_cm_disconn(iwqp);
371 				}
372 				irdma_schedule_cm_timer(iwqp->cm_node,
373 							(struct irdma_puda_buf *)iwqp,
374 							IRDMA_TIMER_TYPE_CLOSE,
375 							1, 0);
376 			}
377 			break;
378 		case IRDMA_AE_LLP_CLOSE_COMPLETE:
379 			if (qp->term_flags)
380 				irdma_terminate_done(qp, 0);
381 			else
382 				irdma_cm_disconn(iwqp);
383 			break;
384 		case IRDMA_AE_BAD_CLOSE:
385 		case IRDMA_AE_RESET_SENT:
386 			irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
387 					    0);
388 			irdma_cm_disconn(iwqp);
389 			break;
390 		case IRDMA_AE_LLP_CONNECTION_RESET:
391 			if (atomic_read(&iwqp->close_timer_started))
392 				break;
393 			irdma_cm_disconn(iwqp);
394 			break;
395 		case IRDMA_AE_QP_SUSPEND_COMPLETE:
396 			if (iwqp->iwdev->vsi.tc_change_pending) {
397 				if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
398 					wake_up(&iwqp->iwdev->suspend_wq);
399 			}
400 			if (iwqp->suspend_pending) {
401 				iwqp->suspend_pending = false;
402 				wake_up(&iwqp->iwdev->suspend_wq);
403 			}
404 			break;
405 		case IRDMA_AE_TERMINATE_SENT:
406 			irdma_terminate_send_fin(qp);
407 			break;
408 		case IRDMA_AE_LLP_TERMINATE_RECEIVED:
409 			irdma_terminate_received(qp, info);
410 			break;
411 		case IRDMA_AE_CQ_OPERATION_ERROR:
412 			ibdev_err(&iwdev->ibdev,
413 				  "Processing an iWARP related AE for CQ misc = 0x%04X\n",
414 				  info->ae_id);
415 
416 			spin_lock_irqsave(&rf->cqtable_lock, flags);
417 			iwcq = rf->cq_table[info->qp_cq_id];
418 			if (!iwcq) {
419 				spin_unlock_irqrestore(&rf->cqtable_lock,
420 						       flags);
421 				ibdev_dbg(to_ibdev(dev),
422 					  "cq_id %d is already freed\n", info->qp_cq_id);
423 				continue;
424 			}
425 			irdma_cq_add_ref(&iwcq->ibcq);
426 			spin_unlock_irqrestore(&rf->cqtable_lock, flags);
427 
428 			if (iwcq->ibcq.event_handler) {
429 				struct ib_event ibevent;
430 
431 				ibevent.device = iwcq->ibcq.device;
432 				ibevent.event = IB_EVENT_CQ_ERR;
433 				ibevent.element.cq = &iwcq->ibcq;
434 				iwcq->ibcq.event_handler(&ibevent,
435 							 iwcq->ibcq.cq_context);
436 			}
437 			irdma_cq_rem_ref(&iwcq->ibcq);
438 			break;
439 		case IRDMA_AE_SRQ_LIMIT:
440 			srq = (struct irdma_sc_srq *)(uintptr_t)info->compl_ctx;
441 			irdma_srq_event(srq);
442 			break;
443 		case IRDMA_AE_SRQ_CATASTROPHIC_ERROR:
444 			break;
445 		case IRDMA_AE_CQP_DEFERRED_COMPLETE:
446 			/* Remove completed CQP requests from pending list
447 			 * and notify about those CQP ops completion.
448 			 */
449 			irdma_process_ae_def_cmpl(rf, info);
450 			break;
451 		case IRDMA_AE_RESET_NOT_SENT:
452 		case IRDMA_AE_LLP_DOUBT_REACHABILITY:
453 		case IRDMA_AE_RESOURCE_EXHAUSTION:
454 			break;
455 		case IRDMA_AE_PRIV_OPERATION_DENIED:
456 		case IRDMA_AE_STAG_ZERO_INVALID:
457 		case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
458 		case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
459 		case IRDMA_AE_DDP_UBE_INVALID_MO:
460 		case IRDMA_AE_DDP_UBE_INVALID_QN:
461 		case IRDMA_AE_DDP_NO_L_BIT:
462 		case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
463 		case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
464 		case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
465 		case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
466 		case IRDMA_AE_INVALID_ARP_ENTRY:
467 		case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
468 		case IRDMA_AE_STALE_ARP_ENTRY:
469 		case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
470 		case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
471 		case IRDMA_AE_LLP_SYN_RECEIVED:
472 		case IRDMA_AE_LLP_TOO_MANY_RETRIES:
473 		case IRDMA_AE_LCE_QP_CATASTROPHIC:
474 		case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
475 		case IRDMA_AE_LLP_TOO_MANY_RNRS:
476 		case IRDMA_AE_LCE_CQ_CATASTROPHIC:
477 		case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
478 		case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
479 		case IRDMA_AE_RCE_QP_CATASTROPHIC:
480 		case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
481 		default:
482 			ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
483 				  info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
484 			ctx_info = &iwqp->ctx_info;
485 			if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
486 				ctx_info->roce_info->err_rq_idx_valid =
487 					ctx_info->srq_valid ? false : info->err_rq_idx_valid;
488 				if (ctx_info->roce_info->err_rq_idx_valid) {
489 					ctx_info->roce_info->err_rq_idx = info->wqe_idx;
490 					irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
491 								ctx_info);
492 				}
493 				irdma_set_flush_fields(qp, info);
494 				irdma_cm_disconn(iwqp);
495 				break;
496 			}
497 			ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
498 			if (info->rq) {
499 				ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
500 				ctx_info->tcp_info_valid = false;
501 				ctx_info->iwarp_info_valid = true;
502 				irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
503 						   ctx_info);
504 			}
505 			if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
506 			    iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
507 				irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
508 				irdma_cm_disconn(iwqp);
509 			} else {
510 				irdma_terminate_connection(qp, info);
511 			}
512 			break;
513 		}
514 		if (info->qp)
515 			irdma_qp_rem_ref(&iwqp->ibqp);
516 	} while (1);
517 
518 	if (aeqcnt)
519 		irdma_sc_repost_aeq_entries(dev, aeqcnt);
520 }
521 
522 /**
523  * irdma_ena_intr - set up device interrupts
524  * @dev: hardware control device structure
525  * @msix_id: id of the interrupt to be enabled
526  */
527 static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
528 {
529 	dev->irq_ops->irdma_en_irq(dev, msix_id);
530 }
531 
532 /**
533  * irdma_dpc - tasklet for aeq and ceq 0
534  * @t: tasklet_struct ptr
535  */
536 static void irdma_dpc(struct tasklet_struct *t)
537 {
538 	struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
539 
540 	if (rf->msix_shared)
541 		irdma_process_ceq(rf, rf->ceqlist);
542 	irdma_process_aeq(rf);
543 	irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
544 }
545 
546 /**
547  * irdma_ceq_dpc - dpc handler for CEQ
548  * @t: tasklet_struct ptr
549  */
550 static void irdma_ceq_dpc(struct tasklet_struct *t)
551 {
552 	struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
553 	struct irdma_pci_f *rf = iwceq->rf;
554 
555 	irdma_process_ceq(rf, iwceq);
556 	irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
557 }
558 
559 /**
560  * irdma_save_msix_info - copy msix vector information to iwarp device
561  * @rf: RDMA PCI function
562  *
563  * Allocate iwdev msix table and copy the msix info to the table
564  * Return 0 if successful, otherwise return error
565  */
566 static int irdma_save_msix_info(struct irdma_pci_f *rf)
567 {
568 	struct irdma_qvlist_info *iw_qvlist;
569 	struct irdma_qv_info *iw_qvinfo;
570 	struct msix_entry *pmsix;
571 	u32 ceq_idx;
572 	u32 i;
573 	size_t size;
574 
575 	if (!rf->msix_count)
576 		return -EINVAL;
577 
578 	size = sizeof(struct irdma_msix_vector) * rf->msix_count;
579 	size += struct_size(iw_qvlist, qv_info, rf->msix_count);
580 	rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
581 	if (!rf->iw_msixtbl)
582 		return -ENOMEM;
583 
584 	rf->iw_qvlist = (struct irdma_qvlist_info *)
585 			(&rf->iw_msixtbl[rf->msix_count]);
586 	iw_qvlist = rf->iw_qvlist;
587 	iw_qvinfo = iw_qvlist->qv_info;
588 	iw_qvlist->num_vectors = rf->msix_count;
589 	if (rf->msix_count <= num_online_cpus())
590 		rf->msix_shared = true;
591 
592 	pmsix = rf->msix_entries;
593 	for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
594 		rf->iw_msixtbl[i].idx = pmsix->entry;
595 		rf->iw_msixtbl[i].irq = pmsix->vector;
596 		rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
597 		if (!i) {
598 			iw_qvinfo->aeq_idx = 0;
599 			if (rf->msix_shared)
600 				iw_qvinfo->ceq_idx = ceq_idx++;
601 			else
602 				iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
603 		} else {
604 			iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
605 			iw_qvinfo->ceq_idx = ceq_idx++;
606 		}
607 		iw_qvinfo->itr_idx = 3;
608 		iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
609 		pmsix++;
610 	}
611 
612 	return 0;
613 }
614 
615 /**
616  * irdma_irq_handler - interrupt handler for aeq and ceq0
617  * @irq: Interrupt request number
618  * @data: RDMA PCI function
619  */
620 static irqreturn_t irdma_irq_handler(int irq, void *data)
621 {
622 	struct irdma_pci_f *rf = data;
623 
624 	tasklet_schedule(&rf->dpc_tasklet);
625 
626 	return IRQ_HANDLED;
627 }
628 
629 /**
630  * irdma_ceq_handler - interrupt handler for ceq
631  * @irq: interrupt request number
632  * @data: ceq pointer
633  */
634 static irqreturn_t irdma_ceq_handler(int irq, void *data)
635 {
636 	struct irdma_ceq *iwceq = data;
637 
638 	if (iwceq->irq != irq)
639 		ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
640 			  iwceq->irq, irq);
641 	tasklet_schedule(&iwceq->dpc_tasklet);
642 
643 	return IRQ_HANDLED;
644 }
645 
646 /**
647  * irdma_destroy_irq - destroy device interrupts
648  * @rf: RDMA PCI function
649  * @msix_vec: msix vector to disable irq
650  * @dev_id: parameter to pass to free_irq (used during irq setup)
651  *
652  * The function is called when destroying aeq/ceq
653  */
654 static void irdma_destroy_irq(struct irdma_pci_f *rf,
655 			      struct irdma_msix_vector *msix_vec, void *dev_id)
656 {
657 	struct irdma_sc_dev *dev = &rf->sc_dev;
658 
659 	dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
660 	irq_update_affinity_hint(msix_vec->irq, NULL);
661 	free_irq(msix_vec->irq, dev_id);
662 	if (rf == dev_id) {
663 		tasklet_kill(&rf->dpc_tasklet);
664 	} else {
665 		struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
666 
667 		tasklet_kill(&iwceq->dpc_tasklet);
668 	}
669 }
670 
671 /**
672  * irdma_destroy_cqp  - destroy control qp
673  * @rf: RDMA PCI function
674  *
675  * Issue destroy cqp request and
676  * free the resources associated with the cqp
677  */
678 static void irdma_destroy_cqp(struct irdma_pci_f *rf)
679 {
680 	struct irdma_sc_dev *dev = &rf->sc_dev;
681 	struct irdma_cqp *cqp = &rf->cqp;
682 	int status = 0;
683 
684 	status = irdma_sc_cqp_destroy(dev->cqp);
685 	if (status)
686 		ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
687 
688 	irdma_cleanup_pending_cqp_op(rf);
689 	dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
690 			  cqp->sq.pa);
691 	cqp->sq.va = NULL;
692 	kfree(cqp->oop_op_array);
693 	cqp->oop_op_array = NULL;
694 	kfree(cqp->scratch_array);
695 	cqp->scratch_array = NULL;
696 	kfree(cqp->cqp_requests);
697 	cqp->cqp_requests = NULL;
698 }
699 
700 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
701 {
702 	struct irdma_aeq *aeq = &rf->aeq;
703 	u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
704 	dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
705 
706 	irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
707 	irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
708 	vfree(aeq->mem.va);
709 }
710 
711 /**
712  * irdma_destroy_aeq - destroy aeq
713  * @rf: RDMA PCI function
714  *
715  * Issue a destroy aeq request and
716  * free the resources associated with the aeq
717  * The function is called during driver unload
718  */
719 static void irdma_destroy_aeq(struct irdma_pci_f *rf)
720 {
721 	struct irdma_sc_dev *dev = &rf->sc_dev;
722 	struct irdma_aeq *aeq = &rf->aeq;
723 	int status = -EBUSY;
724 
725 	if (!rf->msix_shared) {
726 		if (rf->sc_dev.privileged)
727 			rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev,
728 							  rf->iw_msixtbl->idx, false);
729 		irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
730 	}
731 	if (rf->reset)
732 		goto exit;
733 
734 	aeq->sc_aeq.size = 0;
735 	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
736 	if (status)
737 		ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
738 
739 exit:
740 	if (aeq->virtual_map) {
741 		irdma_destroy_virt_aeq(rf);
742 	} else {
743 		dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
744 				  aeq->mem.pa);
745 		aeq->mem.va = NULL;
746 	}
747 }
748 
749 /**
750  * irdma_destroy_ceq - destroy ceq
751  * @rf: RDMA PCI function
752  * @iwceq: ceq to be destroyed
753  *
754  * Issue a destroy ceq request and
755  * free the resources associated with the ceq
756  */
757 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
758 {
759 	struct irdma_sc_dev *dev = &rf->sc_dev;
760 	int status;
761 
762 	if (rf->reset)
763 		goto exit;
764 
765 	status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
766 	if (status) {
767 		ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
768 		goto exit;
769 	}
770 
771 	status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
772 	if (status)
773 		ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
774 			  status);
775 exit:
776 	dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
777 			  iwceq->mem.pa);
778 	iwceq->mem.va = NULL;
779 }
780 
781 /**
782  * irdma_del_ceq_0 - destroy ceq 0
783  * @rf: RDMA PCI function
784  *
785  * Disable the ceq 0 interrupt and destroy the ceq 0
786  */
787 static void irdma_del_ceq_0(struct irdma_pci_f *rf)
788 {
789 	struct irdma_ceq *iwceq = rf->ceqlist;
790 	struct irdma_msix_vector *msix_vec;
791 
792 	if (rf->msix_shared) {
793 		msix_vec = &rf->iw_msixtbl[0];
794 		if (rf->sc_dev.privileged)
795 			rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
796 							  msix_vec->ceq_id,
797 							  msix_vec->idx, false);
798 		irdma_destroy_irq(rf, msix_vec, rf);
799 	} else {
800 		msix_vec = &rf->iw_msixtbl[1];
801 		irdma_destroy_irq(rf, msix_vec, iwceq);
802 	}
803 
804 	irdma_destroy_ceq(rf, iwceq);
805 	rf->sc_dev.ceq_valid = false;
806 	rf->ceqs_count = 0;
807 }
808 
809 /**
810  * irdma_del_ceqs - destroy all ceq's except CEQ 0
811  * @rf: RDMA PCI function
812  *
813  * Go through all of the device ceq's, except 0, and for each
814  * ceq disable the ceq interrupt and destroy the ceq
815  */
816 static void irdma_del_ceqs(struct irdma_pci_f *rf)
817 {
818 	struct irdma_ceq *iwceq = &rf->ceqlist[1];
819 	struct irdma_msix_vector *msix_vec;
820 	u32 i = 0;
821 
822 	if (rf->msix_shared)
823 		msix_vec = &rf->iw_msixtbl[1];
824 	else
825 		msix_vec = &rf->iw_msixtbl[2];
826 
827 	for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
828 		if (rf->sc_dev.privileged)
829 			rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
830 							  msix_vec->ceq_id,
831 							  msix_vec->idx, false);
832 		irdma_destroy_irq(rf, msix_vec, iwceq);
833 		irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
834 				  IRDMA_OP_CEQ_DESTROY);
835 		dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
836 				  iwceq->mem.va, iwceq->mem.pa);
837 		iwceq->mem.va = NULL;
838 	}
839 	rf->ceqs_count = 1;
840 }
841 
842 /**
843  * irdma_destroy_ccq - destroy control cq
844  * @rf: RDMA PCI function
845  *
846  * Issue destroy ccq request and
847  * free the resources associated with the ccq
848  */
849 static void irdma_destroy_ccq(struct irdma_pci_f *rf)
850 {
851 	struct irdma_sc_dev *dev = &rf->sc_dev;
852 	struct irdma_ccq *ccq = &rf->ccq;
853 	int status = 0;
854 
855 	if (rf->cqp_cmpl_wq)
856 		destroy_workqueue(rf->cqp_cmpl_wq);
857 
858 	if (!rf->reset)
859 		status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
860 	if (status)
861 		ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
862 	dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
863 			  ccq->mem_cq.pa);
864 	ccq->mem_cq.va = NULL;
865 }
866 
867 /**
868  * irdma_close_hmc_objects_type - delete hmc objects of a given type
869  * @dev: iwarp device
870  * @obj_type: the hmc object type to be deleted
871  * @hmc_info: host memory info struct
872  * @privileged: permission to close HMC objects
873  * @reset: true if called before reset
874  */
875 static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
876 					 enum irdma_hmc_rsrc_type obj_type,
877 					 struct irdma_hmc_info *hmc_info,
878 					 bool privileged, bool reset)
879 {
880 	struct irdma_hmc_del_obj_info info = {};
881 
882 	info.hmc_info = hmc_info;
883 	info.rsrc_type = obj_type;
884 	info.count = hmc_info->hmc_obj[obj_type].cnt;
885 	info.privileged = privileged;
886 	if (irdma_sc_del_hmc_obj(dev, &info, reset))
887 		ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
888 			  obj_type);
889 }
890 
891 /**
892  * irdma_del_hmc_objects - remove all device hmc objects
893  * @dev: iwarp device
894  * @hmc_info: hmc_info to free
895  * @privileged: permission to delete HMC objects
896  * @reset: true if called before reset
897  * @vers: hardware version
898  */
899 static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
900 				  struct irdma_hmc_info *hmc_info, bool privileged,
901 				  bool reset, enum irdma_vers vers)
902 {
903 	unsigned int i;
904 
905 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
906 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
907 			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
908 						     hmc_info, privileged, reset);
909 		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
910 			break;
911 	}
912 }
913 
914 /**
915  * irdma_create_hmc_obj_type - create hmc object of a given type
916  * @dev: hardware control device structure
917  * @info: information for the hmc object to create
918  */
919 static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
920 				     struct irdma_hmc_create_obj_info *info)
921 {
922 	return irdma_sc_create_hmc_obj(dev, info);
923 }
924 
925 /**
926  * irdma_create_hmc_objs - create all hmc objects for the device
927  * @rf: RDMA PCI function
928  * @privileged: permission to create HMC objects
929  * @vers: HW version
930  *
931  * Create the device hmc objects and allocate hmc pages
932  * Return 0 if successful, otherwise clean up and return error
933  */
934 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
935 				 enum irdma_vers vers)
936 {
937 	struct irdma_sc_dev *dev = &rf->sc_dev;
938 	struct irdma_hmc_create_obj_info info = {};
939 	int i, status = 0;
940 
941 	info.hmc_info = dev->hmc_info;
942 	info.privileged = privileged;
943 	info.entry_type = rf->sd_type;
944 
945 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
946 		if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
947 			continue;
948 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
949 			info.rsrc_type = iw_hmc_obj_types[i];
950 			info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
951 			info.add_sd_cnt = 0;
952 			status = irdma_create_hmc_obj_type(dev, &info);
953 			if (status) {
954 				ibdev_dbg(to_ibdev(dev),
955 					  "ERR: create obj type %d status = %d\n",
956 					  iw_hmc_obj_types[i], status);
957 				break;
958 			}
959 		}
960 		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
961 			break;
962 	}
963 
964 	if (!status)
965 		return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
966 							   true, true);
967 
968 	while (i) {
969 		i--;
970 		/* destroy the hmc objects of a given type */
971 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
972 			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
973 						     dev->hmc_info, privileged,
974 						     false);
975 	}
976 
977 	return status;
978 }
979 
980 /**
981  * irdma_obj_aligned_mem - get aligned memory from device allocated memory
982  * @rf: RDMA PCI function
983  * @memptr: points to the memory addresses
984  * @size: size of memory needed
985  * @mask: mask for the aligned memory
986  *
987  * Get aligned memory of the requested size and
988  * update the memptr to point to the new aligned memory
989  * Return 0 if successful, otherwise return no memory error
990  */
991 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
992 				 struct irdma_dma_mem *memptr, u32 size,
993 				 u32 mask)
994 {
995 	unsigned long va, newva;
996 	unsigned long extra;
997 
998 	va = (unsigned long)rf->obj_next.va;
999 	newva = va;
1000 	if (mask)
1001 		newva = ALIGN(va, (unsigned long)mask + 1ULL);
1002 	extra = newva - va;
1003 	memptr->va = (u8 *)va + extra;
1004 	memptr->pa = rf->obj_next.pa + extra;
1005 	memptr->size = size;
1006 	if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
1007 		return -ENOMEM;
1008 
1009 	rf->obj_next.va = (u8 *)memptr->va + size;
1010 	rf->obj_next.pa = memptr->pa + size;
1011 
1012 	return 0;
1013 }
1014 
1015 /**
1016  * irdma_create_cqp - create control qp
1017  * @rf: RDMA PCI function
1018  *
1019  * Return 0, if the cqp and all the resources associated with it
1020  * are successfully created, otherwise return error
1021  */
1022 static int irdma_create_cqp(struct irdma_pci_f *rf)
1023 {
1024 	u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
1025 	struct irdma_dma_mem mem;
1026 	struct irdma_sc_dev *dev = &rf->sc_dev;
1027 	struct irdma_cqp_init_info cqp_init_info = {};
1028 	struct irdma_cqp *cqp = &rf->cqp;
1029 	u16 maj_err, min_err;
1030 	int i, status;
1031 
1032 	cqp->cqp_requests = kzalloc_objs(*cqp->cqp_requests, sqsize);
1033 	if (!cqp->cqp_requests)
1034 		return -ENOMEM;
1035 
1036 	cqp->scratch_array = kzalloc_objs(*cqp->scratch_array, sqsize);
1037 	if (!cqp->scratch_array) {
1038 		status = -ENOMEM;
1039 		goto err_scratch;
1040 	}
1041 
1042 	cqp->oop_op_array = kzalloc_objs(*cqp->oop_op_array, sqsize);
1043 	if (!cqp->oop_op_array) {
1044 		status = -ENOMEM;
1045 		goto err_oop;
1046 	}
1047 	cqp_init_info.ooo_op_array = cqp->oop_op_array;
1048 	dev->cqp = &cqp->sc_cqp;
1049 	dev->cqp->dev = dev;
1050 	cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
1051 			     IRDMA_CQP_ALIGNMENT);
1052 	cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
1053 					&cqp->sq.pa, GFP_KERNEL);
1054 	if (!cqp->sq.va) {
1055 		status = -ENOMEM;
1056 		goto err_sq;
1057 	}
1058 
1059 	status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
1060 				       IRDMA_HOST_CTX_ALIGNMENT_M);
1061 	if (status)
1062 		goto err_ctx;
1063 
1064 	dev->cqp->host_ctx_pa = mem.pa;
1065 	dev->cqp->host_ctx = mem.va;
1066 	/* populate the cqp init info */
1067 	cqp_init_info.dev = dev;
1068 	cqp_init_info.sq_size = sqsize;
1069 	cqp_init_info.sq = cqp->sq.va;
1070 	cqp_init_info.sq_pa = cqp->sq.pa;
1071 	cqp_init_info.host_ctx_pa = mem.pa;
1072 	cqp_init_info.host_ctx = mem.va;
1073 	cqp_init_info.hmc_profile = rf->rsrc_profile;
1074 	cqp_init_info.scratch_array = cqp->scratch_array;
1075 	cqp_init_info.protocol_used = rf->protocol_used;
1076 
1077 	switch (rf->rdma_ver) {
1078 	case IRDMA_GEN_1:
1079 		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
1080 		break;
1081 	case IRDMA_GEN_2:
1082 		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
1083 		break;
1084 	case IRDMA_GEN_3:
1085 	case IRDMA_GEN_4:
1086 		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
1087 		cqp_init_info.ts_override = 1;
1088 		break;
1089 	}
1090 	status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
1091 	if (status) {
1092 		ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
1093 		goto err_ctx;
1094 	}
1095 
1096 	spin_lock_init(&cqp->req_lock);
1097 	spin_lock_init(&cqp->compl_lock);
1098 
1099 	status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
1100 	if (status) {
1101 		ibdev_dbg(to_ibdev(dev),
1102 			  "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
1103 			  status, maj_err, min_err);
1104 		goto err_ctx;
1105 	}
1106 
1107 	INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
1108 	INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
1109 
1110 	/* init the waitqueue of the cqp_requests and add them to the list */
1111 	for (i = 0; i < sqsize; i++) {
1112 		init_waitqueue_head(&cqp->cqp_requests[i].waitq);
1113 		list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
1114 	}
1115 	init_waitqueue_head(&cqp->remove_wq);
1116 	return 0;
1117 
1118 err_ctx:
1119 	dma_free_coherent(dev->hw->device, cqp->sq.size,
1120 			  cqp->sq.va, cqp->sq.pa);
1121 	cqp->sq.va = NULL;
1122 err_sq:
1123 	kfree(cqp->oop_op_array);
1124 	cqp->oop_op_array = NULL;
1125 err_oop:
1126 	kfree(cqp->scratch_array);
1127 	cqp->scratch_array = NULL;
1128 err_scratch:
1129 	kfree(cqp->cqp_requests);
1130 	cqp->cqp_requests = NULL;
1131 
1132 	return status;
1133 }
1134 
1135 /**
1136  * irdma_create_ccq - create control cq
1137  * @rf: RDMA PCI function
1138  *
1139  * Return 0, if the ccq and the resources associated with it
1140  * are successfully created, otherwise return error
1141  */
1142 static int irdma_create_ccq(struct irdma_pci_f *rf)
1143 {
1144 	struct irdma_sc_dev *dev = &rf->sc_dev;
1145 	struct irdma_ccq_init_info info = {};
1146 	struct irdma_ccq *ccq = &rf->ccq;
1147 	int ccq_size;
1148 	int status;
1149 
1150 	dev->ccq = &ccq->sc_cq;
1151 	dev->ccq->dev = dev;
1152 	info.dev = dev;
1153 	ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
1154 	ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1155 	ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
1156 				 IRDMA_CQ0_ALIGNMENT);
1157 	ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
1158 					    &ccq->mem_cq.pa, GFP_KERNEL);
1159 	if (!ccq->mem_cq.va)
1160 		return -ENOMEM;
1161 
1162 	status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1163 				       ccq->shadow_area.size,
1164 				       IRDMA_SHADOWAREA_M);
1165 	if (status)
1166 		goto exit;
1167 
1168 	ccq->sc_cq.back_cq = ccq;
1169 	/* populate the ccq init info */
1170 	info.cq_base = ccq->mem_cq.va;
1171 	info.cq_pa = ccq->mem_cq.pa;
1172 	info.num_elem = ccq_size;
1173 	info.shadow_area = ccq->shadow_area.va;
1174 	info.shadow_area_pa = ccq->shadow_area.pa;
1175 	info.ceqe_mask = false;
1176 	info.ceq_id_valid = true;
1177 	info.shadow_read_threshold = 16;
1178 	info.vsi = &rf->default_vsi;
1179 	status = irdma_sc_ccq_init(dev->ccq, &info);
1180 	if (!status)
1181 		status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1182 exit:
1183 	if (status) {
1184 		dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
1185 				  ccq->mem_cq.va, ccq->mem_cq.pa);
1186 		ccq->mem_cq.va = NULL;
1187 	}
1188 
1189 	return status;
1190 }
1191 
1192 /**
1193  * irdma_alloc_set_mac - set up a mac address table entry
1194  * @iwdev: irdma device
1195  *
1196  * Allocate a mac ip entry and add it to the hw table Return 0
1197  * if successful, otherwise return error
1198  */
1199 static int irdma_alloc_set_mac(struct irdma_device *iwdev)
1200 {
1201 	int status;
1202 
1203 	status = irdma_alloc_local_mac_entry(iwdev->rf,
1204 					     &iwdev->mac_ip_table_idx);
1205 	if (!status) {
1206 		status = irdma_add_local_mac_entry(iwdev->rf,
1207 						   (const u8 *)iwdev->netdev->dev_addr,
1208 						   (u8)iwdev->mac_ip_table_idx);
1209 		if (status)
1210 			irdma_del_local_mac_entry(iwdev->rf,
1211 						  (u8)iwdev->mac_ip_table_idx);
1212 	}
1213 	return status;
1214 }
1215 
1216 /**
1217  * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1218  * ceq
1219  * @rf: RDMA PCI function
1220  * @iwceq: ceq associated with the vector
1221  * @ceq_id: the id number of the iwceq
1222  * @msix_vec: interrupt vector information
1223  *
1224  * Allocate interrupt resources and enable irq handling
1225  * Return 0 if successful, otherwise return error
1226  */
1227 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1228 				u32 ceq_id, struct irdma_msix_vector *msix_vec)
1229 {
1230 	int status;
1231 
1232 	if (rf->msix_shared && !ceq_id) {
1233 		snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1234 			 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
1235 		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1236 		status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1237 				     msix_vec->name, rf);
1238 	} else {
1239 		snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1240 			 "irdma-%s-CEQ-%d",
1241 			 dev_name(&rf->pcidev->dev), ceq_id);
1242 		tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
1243 
1244 		status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
1245 				     msix_vec->name, iwceq);
1246 	}
1247 	cpumask_clear(&msix_vec->mask);
1248 	cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
1249 	irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
1250 	if (status) {
1251 		ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
1252 		return status;
1253 	}
1254 
1255 	msix_vec->ceq_id = ceq_id;
1256 	if (rf->sc_dev.privileged)
1257 		rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id,
1258 						  msix_vec->idx, true);
1259 	else
1260 		status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id,
1261 						     msix_vec->idx);
1262 	return status;
1263 }
1264 
1265 /**
1266  * irdma_cfg_aeq_vector - set up the msix vector for aeq
1267  * @rf: RDMA PCI function
1268  *
1269  * Allocate interrupt resources and enable irq handling
1270  * Return 0 if successful, otherwise return error
1271  */
1272 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1273 {
1274 	struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1275 	int ret = 0;
1276 
1277 	if (!rf->msix_shared) {
1278 		snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1279 			 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
1280 		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1281 		ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
1282 				  msix_vec->name, rf);
1283 	}
1284 	if (ret) {
1285 		ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
1286 		return ret;
1287 	}
1288 
1289 	if (rf->sc_dev.privileged)
1290 		rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx,
1291 						  true);
1292 	else
1293 		ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx);
1294 
1295 	return ret;
1296 }
1297 
1298 /**
1299  * irdma_create_ceq - create completion event queue
1300  * @rf: RDMA PCI function
1301  * @iwceq: pointer to the ceq resources to be created
1302  * @ceq_id: the id number of the iwceq
1303  * @vsi_idx: vsi idx
1304  *
1305  * Return 0, if the ceq and the resources associated with it
1306  * are successfully created, otherwise return error
1307  */
1308 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1309 			    u32 ceq_id, u16 vsi_idx)
1310 {
1311 	int status;
1312 	struct irdma_ceq_init_info info = {};
1313 	struct irdma_sc_dev *dev = &rf->sc_dev;
1314 	u32 ceq_size;
1315 
1316 	info.ceq_id = ceq_id;
1317 	iwceq->rf = rf;
1318 	ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1319 		       dev->hw_attrs.max_hw_ceq_size);
1320 	iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
1321 				IRDMA_CEQ_ALIGNMENT);
1322 	iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
1323 					   &iwceq->mem.pa, GFP_KERNEL);
1324 	if (!iwceq->mem.va)
1325 		return -ENOMEM;
1326 
1327 	info.ceq_id = ceq_id;
1328 	info.ceqe_base = iwceq->mem.va;
1329 	info.ceqe_pa = iwceq->mem.pa;
1330 	info.elem_cnt = ceq_size;
1331 	iwceq->sc_ceq.ceq_id = ceq_id;
1332 	info.dev = dev;
1333 	info.vsi_idx = vsi_idx;
1334 	status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1335 	if (!status) {
1336 		if (dev->ceq_valid)
1337 			status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1338 						   IRDMA_OP_CEQ_CREATE);
1339 		else
1340 			status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
1341 	}
1342 
1343 	if (status) {
1344 		dma_free_coherent(dev->hw->device, iwceq->mem.size,
1345 				  iwceq->mem.va, iwceq->mem.pa);
1346 		iwceq->mem.va = NULL;
1347 	}
1348 
1349 	return status;
1350 }
1351 
1352 /**
1353  * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1354  * @rf: RDMA PCI function
1355  *
1356  * Allocate a list for all device completion event queues
1357  * Create the ceq 0 and configure it's msix interrupt vector
1358  * Return 0, if successfully set up, otherwise return error
1359  */
1360 static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
1361 {
1362 	struct irdma_ceq *iwceq;
1363 	struct irdma_msix_vector *msix_vec;
1364 	u32 i;
1365 	int status = 0;
1366 	u32 num_ceqs;
1367 
1368 	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1369 	rf->ceqlist = kzalloc_objs(*rf->ceqlist, num_ceqs);
1370 	if (!rf->ceqlist) {
1371 		status = -ENOMEM;
1372 		goto exit;
1373 	}
1374 
1375 	iwceq = &rf->ceqlist[0];
1376 	status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx);
1377 	if (status) {
1378 		ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
1379 			  status);
1380 		goto exit;
1381 	}
1382 
1383 	spin_lock_init(&iwceq->ce_lock);
1384 	i = rf->msix_shared ? 0 : 1;
1385 	msix_vec = &rf->iw_msixtbl[i];
1386 	iwceq->irq = msix_vec->irq;
1387 	iwceq->msix_idx = msix_vec->idx;
1388 	status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1389 	if (status) {
1390 		irdma_destroy_ceq(rf, iwceq);
1391 		goto exit;
1392 	}
1393 
1394 	irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1395 	rf->ceqs_count++;
1396 
1397 exit:
1398 	if (status && !rf->ceqs_count) {
1399 		kfree(rf->ceqlist);
1400 		rf->ceqlist = NULL;
1401 		return status;
1402 	}
1403 	rf->sc_dev.ceq_valid = true;
1404 
1405 	return 0;
1406 }
1407 
1408 /**
1409  * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1410  * @rf: RDMA PCI function
1411  * @vsi_idx: vsi_idx for this CEQ
1412  *
1413  * Allocate a list for all device completion event queues
1414  * Create the ceq's and configure their msix interrupt vectors
1415  * Return 0, if ceqs are successfully set up, otherwise return error
1416  */
1417 static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx)
1418 {
1419 	u32 i;
1420 	u32 ceq_id;
1421 	struct irdma_ceq *iwceq;
1422 	struct irdma_msix_vector *msix_vec;
1423 	int status;
1424 	u32 num_ceqs;
1425 
1426 	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1427 	i = (rf->msix_shared) ? 1 : 2;
1428 	for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1429 		iwceq = &rf->ceqlist[ceq_id];
1430 		status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx);
1431 		if (status) {
1432 			ibdev_dbg(&rf->iwdev->ibdev,
1433 				  "ERR: create ceq status = %d\n", status);
1434 			goto del_ceqs;
1435 		}
1436 		spin_lock_init(&iwceq->ce_lock);
1437 		msix_vec = &rf->iw_msixtbl[i];
1438 		iwceq->irq = msix_vec->irq;
1439 		iwceq->msix_idx = msix_vec->idx;
1440 		status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1441 		if (status) {
1442 			irdma_destroy_ceq(rf, iwceq);
1443 			goto del_ceqs;
1444 		}
1445 		irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1446 		rf->ceqs_count++;
1447 	}
1448 
1449 	return 0;
1450 
1451 del_ceqs:
1452 	irdma_del_ceqs(rf);
1453 
1454 	return status;
1455 }
1456 
1457 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1458 {
1459 	struct irdma_aeq *aeq = &rf->aeq;
1460 	dma_addr_t *pg_arr;
1461 	u32 pg_cnt;
1462 	int status;
1463 
1464 	if (rf->rdma_ver < IRDMA_GEN_2)
1465 		return -EOPNOTSUPP;
1466 
1467 	aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1468 	aeq->mem.va = vzalloc(aeq->mem.size);
1469 
1470 	if (!aeq->mem.va)
1471 		return -ENOMEM;
1472 
1473 	pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1474 	status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1475 	if (status) {
1476 		vfree(aeq->mem.va);
1477 		return status;
1478 	}
1479 
1480 	pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
1481 	status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1482 	if (status) {
1483 		irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1484 		vfree(aeq->mem.va);
1485 		return status;
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 /**
1492  * irdma_create_aeq - create async event queue
1493  * @rf: RDMA PCI function
1494  *
1495  * Return 0, if the aeq and the resources associated with it
1496  * are successfully created, otherwise return error
1497  */
1498 static int irdma_create_aeq(struct irdma_pci_f *rf)
1499 {
1500 	struct irdma_aeq_init_info info = {};
1501 	struct irdma_sc_dev *dev = &rf->sc_dev;
1502 	struct irdma_aeq *aeq = &rf->aeq;
1503 	struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1504 	u32 aeq_size;
1505 	u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1506 	int status;
1507 
1508 	aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1509 		   hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1510 	aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1511 	/* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
1512 	if (rf->rdma_ver >= IRDMA_GEN_3)
1513 		aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
1514 			       sizeof(struct irdma_sc_aeqe)));
1515 	aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
1516 			      IRDMA_AEQ_ALIGNMENT);
1517 	aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
1518 					 &aeq->mem.pa,
1519 					 GFP_KERNEL | __GFP_NOWARN);
1520 	if (aeq->mem.va)
1521 		goto skip_virt_aeq;
1522 	else if (rf->rdma_ver >= IRDMA_GEN_3)
1523 		return -ENOMEM;
1524 
1525 	/* physically mapped aeq failed. setup virtual aeq */
1526 	status = irdma_create_virt_aeq(rf, aeq_size);
1527 	if (status)
1528 		return status;
1529 
1530 	info.virtual_map = true;
1531 	aeq->virtual_map = info.virtual_map;
1532 	info.pbl_chunk_size = 1;
1533 	info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1534 
1535 skip_virt_aeq:
1536 	info.aeqe_base = aeq->mem.va;
1537 	info.aeq_elem_pa = aeq->mem.pa;
1538 	info.elem_cnt = aeq_size;
1539 	info.dev = dev;
1540 	info.msix_idx = rf->iw_msixtbl->idx;
1541 	status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1542 	if (status)
1543 		goto err;
1544 
1545 	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1546 	if (status)
1547 		goto err;
1548 
1549 	return 0;
1550 
1551 err:
1552 	if (aeq->virtual_map) {
1553 		irdma_destroy_virt_aeq(rf);
1554 	} else {
1555 		dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
1556 				  aeq->mem.pa);
1557 		aeq->mem.va = NULL;
1558 	}
1559 
1560 	return status;
1561 }
1562 
1563 /**
1564  * irdma_setup_aeq - set up the device aeq
1565  * @rf: RDMA PCI function
1566  *
1567  * Create the aeq and configure its msix interrupt vector
1568  * Return 0 if successful, otherwise return error
1569  */
1570 static int irdma_setup_aeq(struct irdma_pci_f *rf)
1571 {
1572 	struct irdma_sc_dev *dev = &rf->sc_dev;
1573 	int status;
1574 
1575 	status = irdma_create_aeq(rf);
1576 	if (status)
1577 		return status;
1578 
1579 	status = irdma_cfg_aeq_vector(rf);
1580 	if (status) {
1581 		irdma_destroy_aeq(rf);
1582 		return status;
1583 	}
1584 
1585 	if (!rf->msix_shared)
1586 		irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1587 
1588 	return 0;
1589 }
1590 
1591 /**
1592  * irdma_initialize_ilq - create iwarp local queue for cm
1593  * @iwdev: irdma device
1594  *
1595  * Return 0 if successful, otherwise return error
1596  */
1597 static int irdma_initialize_ilq(struct irdma_device *iwdev)
1598 {
1599 	struct irdma_puda_rsrc_info info = {};
1600 	int status;
1601 
1602 	info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1603 	info.cq_id = IRDMA_RSVD_CQ_ID_ILQ;
1604 	info.qp_id = IRDMA_RSVD_QP_ID_GSI_ILQ;
1605 	info.count = 1;
1606 	info.pd_id = 1;
1607 	info.abi_ver = IRDMA_ABI_VER;
1608 	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1609 	info.rq_size = info.sq_size;
1610 	info.buf_size = 1024;
1611 	info.tx_buf_cnt = 2 * info.sq_size;
1612 	info.receive = irdma_receive_ilq;
1613 	info.xmit_complete = irdma_free_sqbuf;
1614 	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1615 	if (status)
1616 		ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
1617 
1618 	return status;
1619 }
1620 
1621 /**
1622  * irdma_initialize_ieq - create iwarp exception queue
1623  * @iwdev: irdma device
1624  *
1625  * Return 0 if successful, otherwise return error
1626  */
1627 static int irdma_initialize_ieq(struct irdma_device *iwdev)
1628 {
1629 	struct irdma_puda_rsrc_info info = {};
1630 	int status;
1631 
1632 	info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1633 	info.cq_id = IRDMA_RSVD_CQ_ID_IEQ;
1634 	info.qp_id = iwdev->vsi.exception_lan_q;
1635 	info.count = 1;
1636 	info.pd_id = 2;
1637 	info.abi_ver = IRDMA_ABI_VER;
1638 	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1639 	info.rq_size = info.sq_size;
1640 	info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1641 	info.tx_buf_cnt = 4096;
1642 	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1643 	if (status)
1644 		ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
1645 
1646 	return status;
1647 }
1648 
1649 /**
1650  * irdma_reinitialize_ieq - destroy and re-create ieq
1651  * @vsi: VSI structure
1652  */
1653 void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1654 {
1655 	struct irdma_device *iwdev = vsi->back_vsi;
1656 	struct irdma_pci_f *rf = iwdev->rf;
1657 
1658 	irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1659 	if (irdma_initialize_ieq(iwdev)) {
1660 		iwdev->rf->reset = true;
1661 		rf->gen_ops.request_reset(rf);
1662 	}
1663 }
1664 
1665 /**
1666  * irdma_hmc_setup - create hmc objects for the device
1667  * @rf: RDMA PCI function
1668  *
1669  * Set up the device private memory space for the number and size of
1670  * the hmc objects and create the objects
1671  * Return 0 if successful, otherwise return error
1672  */
1673 static int irdma_hmc_setup(struct irdma_pci_f *rf)
1674 {
1675 	int status;
1676 	u32 qpcnt;
1677 
1678 	qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1679 
1680 	rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1681 	status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
1682 	if (status)
1683 		return status;
1684 
1685 	status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1686 
1687 	return status;
1688 }
1689 
1690 /**
1691  * irdma_del_init_mem - deallocate memory resources
1692  * @rf: RDMA PCI function
1693  */
1694 static void irdma_del_init_mem(struct irdma_pci_f *rf)
1695 {
1696 	struct irdma_sc_dev *dev = &rf->sc_dev;
1697 	struct irdma_dma_mem *fw_scratch_buf0;
1698 	struct irdma_dma_mem *fw_scratch_buf1;
1699 
1700 	if (!rf->sc_dev.privileged)
1701 		irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
1702 	kfree(dev->hmc_info->sd_table.sd_entry);
1703 	dev->hmc_info->sd_table.sd_entry = NULL;
1704 	vfree(rf->mem_rsrc);
1705 	rf->mem_rsrc = NULL;
1706 	dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1707 			  rf->obj_mem.pa);
1708 	rf->obj_mem.va = NULL;
1709 	if (rf->rdma_ver != IRDMA_GEN_1) {
1710 		bitmap_free(rf->allocated_ws_nodes);
1711 		rf->allocated_ws_nodes = NULL;
1712 	}
1713 	kfree(rf->ceqlist);
1714 	rf->ceqlist = NULL;
1715 	kfree(rf->iw_msixtbl);
1716 	rf->iw_msixtbl = NULL;
1717 	kfree(rf->hmc_info_mem);
1718 	rf->hmc_info_mem = NULL;
1719 
1720 	fw_scratch_buf0 = &dev->hmc_fpm_misc.fw_scratch_buf0;
1721 	fw_scratch_buf1 = &dev->hmc_fpm_misc.fw_scratch_buf1;
1722 	if (fw_scratch_buf0->va)
1723 		dma_free_coherent(dev->hw->device, fw_scratch_buf0->size,
1724 				  fw_scratch_buf0->va, fw_scratch_buf0->pa);
1725 	if (fw_scratch_buf1->va)
1726 		dma_free_coherent(dev->hw->device, fw_scratch_buf1->size,
1727 				  fw_scratch_buf1->va, fw_scratch_buf1->pa);
1728 }
1729 
1730 /**
1731  * irdma_initialize_dev - initialize device
1732  * @rf: RDMA PCI function
1733  *
1734  * Allocate memory for the hmc objects and initialize iwdev
1735  * Return 0 if successful, otherwise clean up the resources
1736  * and return error
1737  */
1738 static int irdma_initialize_dev(struct irdma_pci_f *rf)
1739 {
1740 	int status;
1741 	struct irdma_sc_dev *dev = &rf->sc_dev;
1742 	struct irdma_device_init_info info = {};
1743 	struct irdma_dma_mem mem;
1744 	u32 size;
1745 
1746 	size = sizeof(struct irdma_hmc_pble_rsrc) +
1747 	       sizeof(struct irdma_hmc_info) +
1748 	       (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1749 
1750 	rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1751 	if (!rf->hmc_info_mem)
1752 		return -ENOMEM;
1753 
1754 	rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1755 	dev->hmc_info = &rf->hw.hmc;
1756 	dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1757 				 (rf->pble_rsrc + 1);
1758 
1759 	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1760 				       IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1761 	if (status)
1762 		goto error;
1763 
1764 	info.fpm_query_buf_pa = mem.pa;
1765 	info.fpm_query_buf = mem.va;
1766 
1767 	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1768 				       IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1769 	if (status)
1770 		goto error;
1771 
1772 	info.fpm_commit_buf_pa = mem.pa;
1773 	info.fpm_commit_buf = mem.va;
1774 
1775 	info.bar0 = rf->hw.hw_addr;
1776 	info.hmc_fn_id = rf->pf_id;
1777 	info.protocol_used = rf->protocol_used;
1778 	info.hw = &rf->hw;
1779 	status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
1780 	if (status)
1781 		goto error;
1782 
1783 	return status;
1784 error:
1785 	kfree(rf->hmc_info_mem);
1786 	rf->hmc_info_mem = NULL;
1787 
1788 	return status;
1789 }
1790 
1791 /**
1792  * irdma_rt_deinit_hw - clean up the irdma device resources
1793  * @iwdev: irdma device
1794  *
1795  * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1796  * device queues and free the pble and the hmc objects
1797  */
1798 void irdma_rt_deinit_hw(struct irdma_device *iwdev)
1799 {
1800 	ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
1801 
1802 	switch (iwdev->init_state) {
1803 	case IP_ADDR_REGISTERED:
1804 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1805 			irdma_del_local_mac_entry(iwdev->rf,
1806 						  (u8)iwdev->mac_ip_table_idx);
1807 		fallthrough;
1808 	case IEQ_CREATED:
1809 		if (!iwdev->roce_mode)
1810 			irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1811 					     iwdev->rf->reset);
1812 		fallthrough;
1813 	case ILQ_CREATED:
1814 		if (!iwdev->roce_mode)
1815 			irdma_puda_dele_rsrc(&iwdev->vsi,
1816 					     IRDMA_PUDA_RSRC_TYPE_ILQ,
1817 					     iwdev->rf->reset);
1818 		break;
1819 	default:
1820 		ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1821 		break;
1822 	}
1823 
1824 	irdma_cleanup_cm_core(&iwdev->cm_core);
1825 	if (iwdev->vsi.pestat) {
1826 		irdma_vsi_stats_free(&iwdev->vsi);
1827 		kfree(iwdev->vsi.pestat);
1828 	}
1829 	if (iwdev->cleanup_wq)
1830 		destroy_workqueue(iwdev->cleanup_wq);
1831 }
1832 
1833 static int irdma_setup_init_state(struct irdma_pci_f *rf)
1834 {
1835 	int status;
1836 
1837 	status = irdma_save_msix_info(rf);
1838 	if (status)
1839 		return status;
1840 
1841 	rf->hw.device = &rf->pcidev->dev;
1842 	rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
1843 	rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
1844 					    &rf->obj_mem.pa, GFP_KERNEL);
1845 	if (!rf->obj_mem.va) {
1846 		status = -ENOMEM;
1847 		goto clean_msixtbl;
1848 	}
1849 
1850 	rf->obj_next = rf->obj_mem;
1851 	status = irdma_initialize_dev(rf);
1852 	if (status)
1853 		goto clean_obj_mem;
1854 
1855 	return 0;
1856 
1857 clean_obj_mem:
1858 	dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
1859 			  rf->obj_mem.pa);
1860 	rf->obj_mem.va = NULL;
1861 clean_msixtbl:
1862 	kfree(rf->iw_msixtbl);
1863 	rf->iw_msixtbl = NULL;
1864 	return status;
1865 }
1866 
1867 /**
1868  * irdma_get_used_rsrc - determine resources used internally
1869  * @iwdev: irdma device
1870  *
1871  * Called at the end of open to get all internal allocations
1872  */
1873 static void irdma_get_used_rsrc(struct irdma_device *iwdev)
1874 {
1875 	iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1876 						 iwdev->rf->max_pd);
1877 	iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1878 						 iwdev->rf->max_qp);
1879 	iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1880 						  iwdev->rf->max_cq);
1881 	iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs,
1882 						   iwdev->rf->max_srq);
1883 	iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1884 						 iwdev->rf->max_mr);
1885 }
1886 
1887 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1888 {
1889 	enum init_completion_state state = rf->init_state;
1890 
1891 	rf->init_state = INVALID_STATE;
1892 
1893 	switch (state) {
1894 	case AEQ_CREATED:
1895 		irdma_destroy_aeq(rf);
1896 		fallthrough;
1897 	case PBLE_CHUNK_MEM:
1898 		irdma_destroy_pble_prm(rf->pble_rsrc);
1899 		fallthrough;
1900 	case CEQS_CREATED:
1901 		irdma_del_ceqs(rf);
1902 		fallthrough;
1903 	case CEQ0_CREATED:
1904 		irdma_del_ceq_0(rf);
1905 		fallthrough;
1906 	case CCQ_CREATED:
1907 		irdma_destroy_ccq(rf);
1908 		fallthrough;
1909 	case HW_RSRC_INITIALIZED:
1910 	case HMC_OBJS_CREATED:
1911 		irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1912 				      rf->reset, rf->rdma_ver);
1913 		fallthrough;
1914 	case CQP_CREATED:
1915 		irdma_destroy_cqp(rf);
1916 		fallthrough;
1917 	case INITIAL_STATE:
1918 		irdma_del_init_mem(rf);
1919 		break;
1920 	case INVALID_STATE:
1921 	default:
1922 		ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1923 		break;
1924 	}
1925 }
1926 
1927 /**
1928  * irdma_rt_init_hw - Initializes runtime portion of HW
1929  * @iwdev: irdma device
1930  * @l2params: qos, tc, mtu info from netdev driver
1931  *
1932  * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1933  * device resource objects.
1934  */
1935 int irdma_rt_init_hw(struct irdma_device *iwdev,
1936 		     struct irdma_l2params *l2params)
1937 {
1938 	struct irdma_pci_f *rf = iwdev->rf;
1939 	struct irdma_sc_dev *dev = &rf->sc_dev;
1940 	struct irdma_vsi_init_info vsi_info = {};
1941 	struct irdma_vsi_stats_info stats_info = {};
1942 	int status;
1943 
1944 	vsi_info.dev = dev;
1945 	vsi_info.back_vsi = iwdev;
1946 	vsi_info.params = l2params;
1947 	vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1948 	vsi_info.register_qset = rf->gen_ops.register_qset;
1949 	vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1950 	vsi_info.exception_lan_q = IRDMA_RSVD_QP_ID_IEQ;
1951 	irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1952 
1953 	status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1954 	if (status)
1955 		return status;
1956 
1957 	stats_info.pestat = kzalloc_obj(*stats_info.pestat);
1958 	if (!stats_info.pestat) {
1959 		irdma_cleanup_cm_core(&iwdev->cm_core);
1960 		return -ENOMEM;
1961 	}
1962 	stats_info.fcn_id = dev->hmc_fn_id;
1963 	status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1964 	if (status) {
1965 		irdma_cleanup_cm_core(&iwdev->cm_core);
1966 		kfree(stats_info.pestat);
1967 		return status;
1968 	}
1969 
1970 	do {
1971 		if (!iwdev->roce_mode) {
1972 			status = irdma_initialize_ilq(iwdev);
1973 			if (status)
1974 				break;
1975 			iwdev->init_state = ILQ_CREATED;
1976 			status = irdma_initialize_ieq(iwdev);
1977 			if (status)
1978 				break;
1979 			iwdev->init_state = IEQ_CREATED;
1980 		}
1981 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1982 			irdma_alloc_set_mac(iwdev);
1983 		irdma_add_ip(iwdev);
1984 		iwdev->init_state = IP_ADDR_REGISTERED;
1985 
1986 		/* handles asynch cleanup tasks - disconnect CM , free qp,
1987 		 * free cq bufs
1988 		 */
1989 		iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1990 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1991 		if (!iwdev->cleanup_wq)
1992 			return -ENOMEM;
1993 		irdma_get_used_rsrc(iwdev);
1994 		init_waitqueue_head(&iwdev->suspend_wq);
1995 
1996 		return 0;
1997 	} while (0);
1998 
1999 	dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
2000 		status, iwdev->init_state);
2001 	irdma_rt_deinit_hw(iwdev);
2002 
2003 	return status;
2004 }
2005 
2006 /**
2007  * irdma_ctrl_init_hw - Initializes control portion of HW
2008  * @rf: RDMA PCI function
2009  *
2010  * Create admin queues, HMC obejcts and RF resource objects
2011  */
2012 int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
2013 {
2014 	struct irdma_sc_dev *dev = &rf->sc_dev;
2015 	int status;
2016 	do {
2017 		status = irdma_setup_init_state(rf);
2018 		if (status)
2019 			break;
2020 		rf->init_state = INITIAL_STATE;
2021 
2022 		status = irdma_create_cqp(rf);
2023 		if (status)
2024 			break;
2025 		rf->init_state = CQP_CREATED;
2026 
2027 		dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
2028 		if (rf->rdma_ver != IRDMA_GEN_1) {
2029 			status = irdma_get_rdma_features(dev);
2030 			if (status)
2031 				break;
2032 		}
2033 
2034 		status = irdma_hmc_setup(rf);
2035 		if (status)
2036 			break;
2037 		rf->init_state = HMC_OBJS_CREATED;
2038 
2039 		status = irdma_initialize_hw_rsrc(rf);
2040 		if (status)
2041 			break;
2042 		rf->init_state = HW_RSRC_INITIALIZED;
2043 
2044 		status = irdma_create_ccq(rf);
2045 		if (status)
2046 			break;
2047 		rf->init_state = CCQ_CREATED;
2048 
2049 		status = irdma_setup_ceq_0(rf);
2050 		if (status)
2051 			break;
2052 		rf->init_state = CEQ0_CREATED;
2053 		/* Handles processing of CQP completions */
2054 		rf->cqp_cmpl_wq =
2055 			alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI);
2056 		if (!rf->cqp_cmpl_wq) {
2057 			status = -ENOMEM;
2058 			break;
2059 		}
2060 		INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
2061 		irdma_sc_ccq_arm(dev->ccq);
2062 
2063 		status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0);
2064 		if (status)
2065 			break;
2066 
2067 		rf->init_state = CEQS_CREATED;
2068 
2069 		status = irdma_hmc_init_pble(&rf->sc_dev,
2070 					     rf->pble_rsrc);
2071 		if (status)
2072 			break;
2073 
2074 		rf->init_state = PBLE_CHUNK_MEM;
2075 
2076 		status = irdma_setup_aeq(rf);
2077 		if (status)
2078 			break;
2079 		rf->init_state = AEQ_CREATED;
2080 
2081 		return 0;
2082 	} while (0);
2083 
2084 	dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
2085 		rf->init_state, status);
2086 	irdma_ctrl_deinit_hw(rf);
2087 	return status;
2088 }
2089 
2090 /**
2091  * irdma_set_hw_rsrc - set hw memory resources.
2092  * @rf: RDMA PCI function
2093  */
2094 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
2095 {
2096 	rf->allocated_qps = (void *)(rf->mem_rsrc +
2097 		   (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
2098 	rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
2099 	rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
2100 	rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)];
2101 	rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
2102 	rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
2103 	rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
2104 	rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
2105 	rf->qp_table = (struct irdma_qp **)
2106 		(&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
2107 	rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
2108 
2109 	spin_lock_init(&rf->rsrc_lock);
2110 	spin_lock_init(&rf->arp_lock);
2111 	spin_lock_init(&rf->qptable_lock);
2112 	spin_lock_init(&rf->cqtable_lock);
2113 	spin_lock_init(&rf->qh_list_lock);
2114 }
2115 
2116 /**
2117  * irdma_calc_mem_rsrc_size - calculate memory resources size.
2118  * @rf: RDMA PCI function
2119  */
2120 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
2121 {
2122 	u32 rsrc_size;
2123 
2124 	rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
2125 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
2126 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
2127 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
2128 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq);
2129 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
2130 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
2131 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
2132 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
2133 	rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
2134 	rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
2135 	rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
2136 
2137 	return rsrc_size;
2138 }
2139 
2140 /**
2141  * irdma_initialize_hw_rsrc - initialize hw resource tracking array
2142  * @rf: RDMA PCI function
2143  */
2144 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
2145 {
2146 	u32 rsrc_size;
2147 	u32 mrdrvbits;
2148 	u32 ret;
2149 
2150 	if (rf->rdma_ver != IRDMA_GEN_1) {
2151 		rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
2152 						       GFP_KERNEL);
2153 		if (!rf->allocated_ws_nodes)
2154 			return -ENOMEM;
2155 
2156 		set_bit(0, rf->allocated_ws_nodes);
2157 		rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
2158 	}
2159 	rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
2160 	rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
2161 	rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
2162 	rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
2163 	rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt;
2164 	rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
2165 	rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
2166 	rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
2167 	rf->max_mcg = rf->max_qp;
2168 
2169 	rsrc_size = irdma_calc_mem_rsrc_size(rf);
2170 	rf->mem_rsrc = vzalloc(rsrc_size);
2171 	if (!rf->mem_rsrc) {
2172 		ret = -ENOMEM;
2173 		goto mem_rsrc_vzalloc_fail;
2174 	}
2175 
2176 	rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2177 
2178 	irdma_set_hw_rsrc(rf);
2179 
2180 	set_bit(0, rf->allocated_mrs);
2181 	set_bit(IRDMA_RSVD_QP_ID_0, rf->allocated_qps);
2182 	set_bit(IRDMA_RSVD_CQ_ID_CQP, rf->allocated_cqs);
2183 	set_bit(0, rf->allocated_srqs);
2184 	set_bit(0, rf->allocated_pds);
2185 	set_bit(0, rf->allocated_arps);
2186 	set_bit(0, rf->allocated_ahs);
2187 	set_bit(0, rf->allocated_mcgs);
2188 	set_bit(IRDMA_RSVD_QP_ID_IEQ, rf->allocated_qps);
2189 	set_bit(IRDMA_RSVD_QP_ID_GSI_ILQ, rf->allocated_qps);
2190 	set_bit(IRDMA_RSVD_CQ_ID_ILQ, rf->allocated_cqs);
2191 	set_bit(1, rf->allocated_pds);
2192 	set_bit(IRDMA_RSVD_CQ_ID_IEQ, rf->allocated_cqs);
2193 	set_bit(2, rf->allocated_pds);
2194 
2195 	INIT_LIST_HEAD(&rf->mc_qht_list.list);
2196 
2197 	if (rf->rdma_ver >= IRDMA_GEN_4)
2198 		mrdrvbits = 24 - max(get_count_order(rf->max_mr), 16);
2199 	else
2200 		/* stag index mask has a minimum of 14 bits */
2201 		mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2202 
2203 	rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2204 
2205 	return 0;
2206 
2207 mem_rsrc_vzalloc_fail:
2208 	bitmap_free(rf->allocated_ws_nodes);
2209 	rf->allocated_ws_nodes = NULL;
2210 
2211 	return ret;
2212 }
2213 
2214 /**
2215  * irdma_cqp_ce_handler - handle cqp completions
2216  * @rf: RDMA PCI function
2217  * @cq: cq for cqp completions
2218  */
2219 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2220 {
2221 	struct irdma_cqp_request *cqp_request;
2222 	struct irdma_sc_dev *dev = &rf->sc_dev;
2223 	u32 cqe_count = 0;
2224 	struct irdma_ccq_cqe_info info;
2225 	unsigned long flags;
2226 	int ret;
2227 
2228 	do {
2229 		memset(&info, 0, sizeof(info));
2230 		spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2231 		ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2232 		spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2233 		if (ret)
2234 			break;
2235 
2236 		cqp_request = (struct irdma_cqp_request *)
2237 			      (unsigned long)info.scratch;
2238 		if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
2239 						     info.maj_err_code,
2240 						     info.min_err_code))
2241 			ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2242 				  info.op_code, info.maj_err_code, info.min_err_code);
2243 		if (cqp_request) {
2244 			cqp_request->compl_info.maj_err_code = info.maj_err_code;
2245 			cqp_request->compl_info.min_err_code = info.min_err_code;
2246 			cqp_request->compl_info.op_ret_val = info.op_ret_val;
2247 			cqp_request->compl_info.error = info.error;
2248 
2249 			/*
2250 			 * If this is deferred or pending completion, then mark
2251 			 * CQP request as pending to not block the CQ, but don't
2252 			 * release CQP request, as it is still on the OOO list.
2253 			 */
2254 			if (info.pending)
2255 				cqp_request->pending = true;
2256 			else
2257 				irdma_complete_cqp_request(&rf->cqp,
2258 							   cqp_request);
2259 		}
2260 
2261 		cqe_count++;
2262 	} while (1);
2263 
2264 	if (cqe_count) {
2265 		irdma_process_bh(dev);
2266 		irdma_sc_ccq_arm(cq);
2267 	}
2268 }
2269 
2270 /**
2271  * cqp_compl_worker - Handle cqp completions
2272  * @work: Pointer to work structure
2273  */
2274 void cqp_compl_worker(struct work_struct *work)
2275 {
2276 	struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2277 					      cqp_cmpl_work);
2278 	struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2279 
2280 	irdma_cqp_ce_handler(rf, cq);
2281 }
2282 
2283 /**
2284  * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2285  * @cm_core: cm's core
2286  * @port: port to identify apbvt entry
2287  */
2288 static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2289 							  u16 port)
2290 {
2291 	struct irdma_apbvt_entry *entry;
2292 
2293 	hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2294 		if (entry->port == port) {
2295 			entry->use_cnt++;
2296 			return entry;
2297 		}
2298 	}
2299 
2300 	return NULL;
2301 }
2302 
2303 /**
2304  * irdma_next_iw_state - modify qp state
2305  * @iwqp: iwarp qp to modify
2306  * @state: next state for qp
2307  * @del_hash: del hash
2308  * @term: term message
2309  * @termlen: length of term message
2310  */
2311 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2312 			 u8 termlen)
2313 {
2314 	struct irdma_modify_qp_info info = {};
2315 
2316 	info.next_iwarp_state = state;
2317 	info.remove_hash_idx = del_hash;
2318 	info.cq_num_valid = true;
2319 	info.arp_cache_idx_valid = true;
2320 	info.dont_send_term = true;
2321 	info.dont_send_fin = true;
2322 	info.termlen = termlen;
2323 
2324 	if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2325 		info.dont_send_term = false;
2326 	if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2327 		info.dont_send_fin = false;
2328 	if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2329 		info.reset_tcp_conn = true;
2330 	iwqp->hw_iwarp_state = state;
2331 	irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2332 	iwqp->iwarp_state = info.next_iwarp_state;
2333 }
2334 
2335 /**
2336  * irdma_del_local_mac_entry - remove a mac entry from the hw
2337  * table
2338  * @rf: RDMA PCI function
2339  * @idx: the index of the mac ip address to delete
2340  */
2341 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2342 {
2343 	struct irdma_cqp *iwcqp = &rf->cqp;
2344 	struct irdma_cqp_request *cqp_request;
2345 	struct cqp_cmds_info *cqp_info;
2346 
2347 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2348 	if (!cqp_request)
2349 		return;
2350 
2351 	cqp_info = &cqp_request->info;
2352 	cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2353 	cqp_info->post_sq = 1;
2354 	cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2355 	cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2356 	cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2357 	cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2358 
2359 	irdma_handle_cqp_op(rf, cqp_request);
2360 	irdma_put_cqp_request(iwcqp, cqp_request);
2361 }
2362 
2363 /**
2364  * irdma_add_local_mac_entry - add a mac ip address entry to the
2365  * hw table
2366  * @rf: RDMA PCI function
2367  * @mac_addr: pointer to mac address
2368  * @idx: the index of the mac ip address to add
2369  */
2370 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2371 {
2372 	struct irdma_local_mac_entry_info *info;
2373 	struct irdma_cqp *iwcqp = &rf->cqp;
2374 	struct irdma_cqp_request *cqp_request;
2375 	struct cqp_cmds_info *cqp_info;
2376 	int status;
2377 
2378 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2379 	if (!cqp_request)
2380 		return -ENOMEM;
2381 
2382 	cqp_info = &cqp_request->info;
2383 	cqp_info->post_sq = 1;
2384 	info = &cqp_info->in.u.add_local_mac_entry.info;
2385 	ether_addr_copy(info->mac_addr, mac_addr);
2386 	info->entry_idx = idx;
2387 	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2388 	cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2389 	cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2390 	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2391 
2392 	status = irdma_handle_cqp_op(rf, cqp_request);
2393 	irdma_put_cqp_request(iwcqp, cqp_request);
2394 
2395 	return status;
2396 }
2397 
2398 /**
2399  * irdma_alloc_local_mac_entry - allocate a mac entry
2400  * @rf: RDMA PCI function
2401  * @mac_tbl_idx: the index of the new mac address
2402  *
2403  * Allocate a mac address entry and update the mac_tbl_idx
2404  * to hold the index of the newly created mac address
2405  * Return 0 if successful, otherwise return error
2406  */
2407 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2408 {
2409 	struct irdma_cqp *iwcqp = &rf->cqp;
2410 	struct irdma_cqp_request *cqp_request;
2411 	struct cqp_cmds_info *cqp_info;
2412 	int status = 0;
2413 
2414 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2415 	if (!cqp_request)
2416 		return -ENOMEM;
2417 
2418 	cqp_info = &cqp_request->info;
2419 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2420 	cqp_info->post_sq = 1;
2421 	cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2422 	cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2423 	status = irdma_handle_cqp_op(rf, cqp_request);
2424 	if (!status)
2425 		*mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2426 
2427 	irdma_put_cqp_request(iwcqp, cqp_request);
2428 
2429 	return status;
2430 }
2431 
2432 /**
2433  * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2434  * @iwdev: irdma device
2435  * @accel_local_port: port for apbvt
2436  * @add_port: add ordelete port
2437  */
2438 static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2439 				      u16 accel_local_port, bool add_port)
2440 {
2441 	struct irdma_apbvt_info *info;
2442 	struct irdma_cqp_request *cqp_request;
2443 	struct cqp_cmds_info *cqp_info;
2444 	int status;
2445 
2446 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2447 	if (!cqp_request)
2448 		return -ENOMEM;
2449 
2450 	cqp_info = &cqp_request->info;
2451 	info = &cqp_info->in.u.manage_apbvt_entry.info;
2452 	info->add = add_port;
2453 	info->port = accel_local_port;
2454 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2455 	cqp_info->post_sq = 1;
2456 	cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2457 	cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2458 	ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
2459 		  (!add_port) ? "DELETE" : "ADD", accel_local_port);
2460 
2461 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2462 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2463 
2464 	return status;
2465 }
2466 
2467 /**
2468  * irdma_add_apbvt - add tcp port to HW apbvt table
2469  * @iwdev: irdma device
2470  * @port: port for apbvt
2471  */
2472 struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2473 {
2474 	struct irdma_cm_core *cm_core = &iwdev->cm_core;
2475 	struct irdma_apbvt_entry *entry;
2476 	unsigned long flags;
2477 
2478 	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2479 	entry = irdma_lookup_apbvt_entry(cm_core, port);
2480 	if (entry) {
2481 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2482 		return entry;
2483 	}
2484 
2485 	entry = kzalloc_obj(*entry, GFP_ATOMIC);
2486 	if (!entry) {
2487 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2488 		return NULL;
2489 	}
2490 
2491 	entry->port = port;
2492 	entry->use_cnt = 1;
2493 	hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2494 	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2495 
2496 	if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2497 		kfree(entry);
2498 		return NULL;
2499 	}
2500 
2501 	return entry;
2502 }
2503 
2504 /**
2505  * irdma_del_apbvt - delete tcp port from HW apbvt table
2506  * @iwdev: irdma device
2507  * @entry: apbvt entry object
2508  */
2509 void irdma_del_apbvt(struct irdma_device *iwdev,
2510 		     struct irdma_apbvt_entry *entry)
2511 {
2512 	struct irdma_cm_core *cm_core = &iwdev->cm_core;
2513 	unsigned long flags;
2514 
2515 	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2516 	if (--entry->use_cnt) {
2517 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2518 		return;
2519 	}
2520 
2521 	hash_del(&entry->hlist);
2522 	/* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
2523 	 * protect against race where add APBVT CQP can race ahead of the delete
2524 	 * APBVT for same port.
2525 	 */
2526 	irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2527 	kfree(entry);
2528 	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2529 }
2530 
2531 /**
2532  * irdma_manage_arp_cache - manage hw arp cache
2533  * @rf: RDMA PCI function
2534  * @mac_addr: mac address ptr
2535  * @ip_addr: ip addr for arp cache
2536  * @ipv4: flag inicating IPv4
2537  * @action: add, delete or modify
2538  */
2539 void irdma_manage_arp_cache(struct irdma_pci_f *rf,
2540 			    const unsigned char *mac_addr,
2541 			    u32 *ip_addr, bool ipv4, u32 action)
2542 {
2543 	struct irdma_add_arp_cache_entry_info *info;
2544 	struct irdma_cqp_request *cqp_request;
2545 	struct cqp_cmds_info *cqp_info;
2546 	int arp_index;
2547 
2548 	arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
2549 	if (arp_index == -1)
2550 		return;
2551 
2552 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2553 	if (!cqp_request)
2554 		return;
2555 
2556 	cqp_info = &cqp_request->info;
2557 	if (action == IRDMA_ARP_ADD) {
2558 		cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2559 		info = &cqp_info->in.u.add_arp_cache_entry.info;
2560 		info->arp_index = (u16)arp_index;
2561 		info->permanent = true;
2562 		ether_addr_copy(info->mac_addr, mac_addr);
2563 		cqp_info->in.u.add_arp_cache_entry.scratch =
2564 			(uintptr_t)cqp_request;
2565 		cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2566 	} else {
2567 		cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2568 		cqp_info->in.u.del_arp_cache_entry.scratch =
2569 			(uintptr_t)cqp_request;
2570 		cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2571 		cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2572 	}
2573 
2574 	cqp_info->post_sq = 1;
2575 	irdma_handle_cqp_op(rf, cqp_request);
2576 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2577 }
2578 
2579 /**
2580  * irdma_send_syn_cqp_callback - do syn/ack after qhash
2581  * @cqp_request: qhash cqp completion
2582  */
2583 static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2584 {
2585 	struct irdma_cm_node *cm_node = cqp_request->param;
2586 
2587 	irdma_send_syn(cm_node, 1);
2588 	irdma_rem_ref_cm_node(cm_node);
2589 }
2590 
2591 /**
2592  * irdma_manage_qhash - add or modify qhash
2593  * @iwdev: irdma device
2594  * @cminfo: cm info for qhash
2595  * @etype: type (syn or quad)
2596  * @mtype: type of qhash
2597  * @cmnode: cmnode associated with connection
2598  * @wait: wait for completion
2599  */
2600 int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2601 		       enum irdma_quad_entry_type etype,
2602 		       enum irdma_quad_hash_manage_type mtype, void *cmnode,
2603 		       bool wait)
2604 {
2605 	struct irdma_qhash_table_info *info;
2606 	struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2607 	struct irdma_cqp_request *cqp_request;
2608 	struct cqp_cmds_info *cqp_info;
2609 	struct irdma_cm_node *cm_node = cmnode;
2610 	int status;
2611 
2612 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2613 	if (!cqp_request)
2614 		return -ENOMEM;
2615 
2616 	cqp_info = &cqp_request->info;
2617 	info = &cqp_info->in.u.manage_qhash_table_entry.info;
2618 	info->vsi = &iwdev->vsi;
2619 	info->manage = mtype;
2620 	info->entry_type = etype;
2621 	if (cminfo->vlan_id < VLAN_N_VID) {
2622 		info->vlan_valid = true;
2623 		info->vlan_id = cminfo->vlan_id;
2624 	} else {
2625 		info->vlan_valid = false;
2626 	}
2627 	info->ipv4_valid = cminfo->ipv4;
2628 	info->user_pri = cminfo->user_pri;
2629 	ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
2630 	info->qp_num = cminfo->qh_qpid;
2631 	info->dest_port = cminfo->loc_port;
2632 	info->dest_ip[0] = cminfo->loc_addr[0];
2633 	info->dest_ip[1] = cminfo->loc_addr[1];
2634 	info->dest_ip[2] = cminfo->loc_addr[2];
2635 	info->dest_ip[3] = cminfo->loc_addr[3];
2636 	if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2637 	    etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2638 	    etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2639 	    etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2640 	    etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2641 		info->src_port = cminfo->rem_port;
2642 		info->src_ip[0] = cminfo->rem_addr[0];
2643 		info->src_ip[1] = cminfo->rem_addr[1];
2644 		info->src_ip[2] = cminfo->rem_addr[2];
2645 		info->src_ip[3] = cminfo->rem_addr[3];
2646 	}
2647 	if (cmnode) {
2648 		cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2649 		cqp_request->param = cmnode;
2650 		if (!wait)
2651 			refcount_inc(&cm_node->refcnt);
2652 	}
2653 	if (info->ipv4_valid)
2654 		ibdev_dbg(&iwdev->ibdev,
2655 			  "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
2656 			  (!mtype) ? "DELETE" : "ADD",
2657 			  __builtin_return_address(0), info->dest_port,
2658 			  info->src_port, info->dest_ip, info->src_ip,
2659 			  info->mac_addr, cminfo->vlan_id,
2660 			  cmnode ? cmnode : NULL);
2661 	else
2662 		ibdev_dbg(&iwdev->ibdev,
2663 			  "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
2664 			  (!mtype) ? "DELETE" : "ADD",
2665 			  __builtin_return_address(0), info->dest_port,
2666 			  info->src_port, info->dest_ip, info->src_ip,
2667 			  info->mac_addr, cminfo->vlan_id,
2668 			  cmnode ? cmnode : NULL);
2669 
2670 	cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2671 	cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2672 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2673 	cqp_info->post_sq = 1;
2674 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2675 	if (status && cm_node && !wait)
2676 		irdma_rem_ref_cm_node(cm_node);
2677 
2678 	irdma_put_cqp_request(iwcqp, cqp_request);
2679 
2680 	return status;
2681 }
2682 
2683 /**
2684  * irdma_hw_flush_wqes_callback - Check return code after flush
2685  * @cqp_request: qhash cqp completion
2686  */
2687 static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
2688 {
2689 	struct irdma_qp_flush_info *hw_info;
2690 	struct irdma_sc_qp *qp;
2691 	struct irdma_qp *iwqp;
2692 	struct cqp_cmds_info *cqp_info;
2693 
2694 	cqp_info = &cqp_request->info;
2695 	hw_info = &cqp_info->in.u.qp_flush_wqes.info;
2696 	qp = cqp_info->in.u.qp_flush_wqes.qp;
2697 	iwqp = qp->qp_uk.back_qp;
2698 
2699 	if (cqp_request->compl_info.maj_err_code)
2700 		return;
2701 
2702 	if (hw_info->rq &&
2703 	    (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2704 	     cqp_request->compl_info.min_err_code == 0)) {
2705 		/* RQ WQE flush was requested but did not happen */
2706 		qp->qp_uk.rq_flush_complete = true;
2707 	}
2708 	if (hw_info->sq &&
2709 	    (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2710 	     cqp_request->compl_info.min_err_code == 0)) {
2711 		if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2712 			ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2713 				  qp->qp_uk.qp_id);
2714 			irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2715 		}
2716 		qp->qp_uk.sq_flush_complete = true;
2717 	}
2718 }
2719 
2720 /**
2721  * irdma_hw_flush_wqes - flush qp's wqe
2722  * @rf: RDMA PCI function
2723  * @qp: hardware control qp
2724  * @info: info for flush
2725  * @wait: flag wait for completion
2726  */
2727 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2728 			struct irdma_qp_flush_info *info, bool wait)
2729 {
2730 	int status;
2731 	struct irdma_qp_flush_info *hw_info;
2732 	struct irdma_cqp_request *cqp_request;
2733 	struct cqp_cmds_info *cqp_info;
2734 	struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2735 
2736 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2737 	if (!cqp_request)
2738 		return -ENOMEM;
2739 
2740 	cqp_info = &cqp_request->info;
2741 	if (!wait)
2742 		cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
2743 	hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2744 	memcpy(hw_info, info, sizeof(*hw_info));
2745 	cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2746 	cqp_info->post_sq = 1;
2747 	cqp_info->in.u.qp_flush_wqes.qp = qp;
2748 	cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2749 	status = irdma_handle_cqp_op(rf, cqp_request);
2750 	if (status) {
2751 		qp->qp_uk.sq_flush_complete = true;
2752 		qp->qp_uk.rq_flush_complete = true;
2753 		irdma_put_cqp_request(&rf->cqp, cqp_request);
2754 		return status;
2755 	}
2756 
2757 	if (!wait || cqp_request->compl_info.maj_err_code)
2758 		goto put_cqp;
2759 
2760 	if (info->rq) {
2761 		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2762 		    cqp_request->compl_info.min_err_code == 0) {
2763 			/* RQ WQE flush was requested but did not happen */
2764 			qp->qp_uk.rq_flush_complete = true;
2765 		}
2766 	}
2767 	if (info->sq) {
2768 		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2769 		    cqp_request->compl_info.min_err_code == 0) {
2770 			/*
2771 			 * Handling case where WQE is posted to empty SQ when
2772 			 * flush has not completed
2773 			 */
2774 			if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
2775 				struct irdma_cqp_request *new_req;
2776 
2777 				if (!qp->qp_uk.sq_flush_complete)
2778 					goto put_cqp;
2779 				qp->qp_uk.sq_flush_complete = false;
2780 				qp->flush_sq = false;
2781 
2782 				info->rq = false;
2783 				info->sq = true;
2784 				new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2785 				if (!new_req) {
2786 					status = -ENOMEM;
2787 					goto put_cqp;
2788 				}
2789 				cqp_info = &new_req->info;
2790 				hw_info = &new_req->info.in.u.qp_flush_wqes.info;
2791 				memcpy(hw_info, info, sizeof(*hw_info));
2792 				cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2793 				cqp_info->post_sq = 1;
2794 				cqp_info->in.u.qp_flush_wqes.qp = qp;
2795 				cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
2796 
2797 				status = irdma_handle_cqp_op(rf, new_req);
2798 				if (new_req->compl_info.maj_err_code ||
2799 				    new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2800 				    status) {
2801 					ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2802 						  iwqp->ibqp.qp_num);
2803 					qp->qp_uk.sq_flush_complete = false;
2804 					irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2805 				}
2806 				irdma_put_cqp_request(&rf->cqp, new_req);
2807 			} else {
2808 				/* SQ WQE flush was requested but did not happen */
2809 				qp->qp_uk.sq_flush_complete = true;
2810 			}
2811 		} else {
2812 			if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
2813 				qp->qp_uk.sq_flush_complete = true;
2814 		}
2815 	}
2816 
2817 	ibdev_dbg(&rf->iwdev->ibdev,
2818 		  "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2819 		  iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2820 		  iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2821 		  cqp_request->compl_info.maj_err_code,
2822 		  cqp_request->compl_info.min_err_code);
2823 put_cqp:
2824 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2825 
2826 	return status;
2827 }
2828 
2829 /**
2830  * irdma_gen_ae - generate AE
2831  * @rf: RDMA PCI function
2832  * @qp: qp associated with AE
2833  * @info: info for ae
2834  * @wait: wait for completion
2835  */
2836 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2837 		  struct irdma_gen_ae_info *info, bool wait)
2838 {
2839 	struct irdma_gen_ae_info *ae_info;
2840 	struct irdma_cqp_request *cqp_request;
2841 	struct cqp_cmds_info *cqp_info;
2842 
2843 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2844 	if (!cqp_request)
2845 		return;
2846 
2847 	cqp_info = &cqp_request->info;
2848 	ae_info = &cqp_request->info.in.u.gen_ae.info;
2849 	memcpy(ae_info, info, sizeof(*ae_info));
2850 	cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2851 	cqp_info->post_sq = 1;
2852 	cqp_info->in.u.gen_ae.qp = qp;
2853 	cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2854 
2855 	irdma_handle_cqp_op(rf, cqp_request);
2856 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2857 }
2858 
2859 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2860 {
2861 	struct irdma_qp_flush_info info = {};
2862 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
2863 	u8 flush_code = iwqp->sc_qp.flush_code;
2864 
2865 	if ((!(flush_mask & IRDMA_FLUSH_SQ) &&
2866 	     !(flush_mask & IRDMA_FLUSH_RQ)) ||
2867 	    ((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3))
2868 		return;
2869 
2870 	/* Set flush info fields*/
2871 	info.sq = flush_mask & IRDMA_FLUSH_SQ;
2872 	info.rq = flush_mask & IRDMA_FLUSH_RQ;
2873 
2874 	/* Generate userflush errors in CQE */
2875 	info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2876 	info.sq_minor_code = FLUSH_GENERAL_ERR;
2877 	info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2878 	info.rq_minor_code = FLUSH_GENERAL_ERR;
2879 	info.userflushcode = true;
2880 	info.err_sq_idx_valid = iwqp->sc_qp.err_sq_idx_valid;
2881 	info.err_sq_idx = iwqp->sc_qp.err_sq_idx;
2882 	info.err_rq_idx_valid = iwqp->sc_qp.err_rq_idx_valid;
2883 	info.err_rq_idx = iwqp->sc_qp.err_rq_idx;
2884 
2885 	if (flush_mask & IRDMA_REFLUSH) {
2886 		if (info.sq)
2887 			iwqp->sc_qp.flush_sq = false;
2888 		if (info.rq)
2889 			iwqp->sc_qp.flush_rq = false;
2890 	} else {
2891 		if (flush_code) {
2892 			if (info.sq && iwqp->sc_qp.sq_flush_code)
2893 				info.sq_minor_code = flush_code;
2894 			if (info.rq && iwqp->sc_qp.rq_flush_code)
2895 				info.rq_minor_code = flush_code;
2896 		}
2897 		if (!iwqp->user_mode)
2898 			queue_delayed_work(iwqp->iwdev->cleanup_wq,
2899 					   &iwqp->dwork_flush,
2900 					   msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
2901 	}
2902 
2903 	/* Issue flush */
2904 	(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2905 				  flush_mask & IRDMA_FLUSH_WAIT);
2906 	iwqp->flush_issued = true;
2907 }
2908