xref: /freebsd/sys/dev/irdma/irdma_hw.c (revision f5463265955b829775bbb32e1fd0bc11dafc36ce)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2023 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include "irdma_main.h"
36 
37 static struct irdma_rsrc_limits rsrc_limits_table[] = {
38 	[0] = {
39 		.qplimit = SZ_128,
40 	},
41 	[1] = {
42 		.qplimit = SZ_1K,
43 	},
44 	[2] = {
45 		.qplimit = SZ_2K,
46 	},
47 	[3] = {
48 		.qplimit = SZ_4K,
49 	},
50 	[4] = {
51 		.qplimit = SZ_16K,
52 	},
53 	[5] = {
54 		.qplimit = SZ_64K,
55 	},
56 	[6] = {
57 		.qplimit = SZ_128K,
58 	},
59 	[7] = {
60 		.qplimit = SZ_256K,
61 	},
62 };
63 
64 /* types of hmc objects */
65 static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
66 	IRDMA_HMC_IW_QP,
67 	IRDMA_HMC_IW_CQ,
68 	IRDMA_HMC_IW_HTE,
69 	IRDMA_HMC_IW_ARP,
70 	IRDMA_HMC_IW_APBVT_ENTRY,
71 	IRDMA_HMC_IW_MR,
72 	IRDMA_HMC_IW_XF,
73 	IRDMA_HMC_IW_XFFL,
74 	IRDMA_HMC_IW_Q1,
75 	IRDMA_HMC_IW_Q1FL,
76 	IRDMA_HMC_IW_PBLE,
77 	IRDMA_HMC_IW_TIMER,
78 	IRDMA_HMC_IW_FSIMC,
79 	IRDMA_HMC_IW_FSIAV,
80 	IRDMA_HMC_IW_RRF,
81 	IRDMA_HMC_IW_RRFFL,
82 	IRDMA_HMC_IW_HDR,
83 	IRDMA_HMC_IW_MD,
84 	IRDMA_HMC_IW_OOISC,
85 	IRDMA_HMC_IW_OOISCFFL,
86 };
87 
88 /**
89  * irdma_iwarp_ce_handler - handle iwarp completions
90  * @iwcq: iwarp cq receiving event
91  */
92 static void
93 irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
94 {
95 	struct irdma_cq *cq = iwcq->back_cq;
96 
97 	if (!cq->user_mode)
98 		atomic_set(&cq->armed, 0);
99 	if (cq->ibcq.comp_handler)
100 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
101 }
102 
103 /**
104  * irdma_puda_ce_handler - handle puda completion events
105  * @rf: RDMA PCI function
106  * @cq: puda completion q for event
107  */
108 static void
109 irdma_puda_ce_handler(struct irdma_pci_f *rf,
110 		      struct irdma_sc_cq *cq)
111 {
112 	struct irdma_sc_dev *dev = &rf->sc_dev;
113 	u32 compl_error;
114 	int status;
115 
116 	do {
117 		status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
118 		if (status == -ENOENT)
119 			break;
120 		if (status) {
121 			irdma_debug(dev, IRDMA_DEBUG_ERR, "puda status = %d\n", status);
122 			break;
123 		}
124 		if (compl_error) {
125 			irdma_debug(dev, IRDMA_DEBUG_ERR,
126 				    "puda compl_err = 0x%x\n", compl_error);
127 			break;
128 		}
129 	} while (1);
130 
131 	irdma_sc_ccq_arm(cq);
132 }
133 
134 /**
135  * irdma_process_ceq - handle ceq for completions
136  * @rf: RDMA PCI function
137  * @ceq: ceq having cq for completion
138  */
139 static void
140 irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
141 {
142 	struct irdma_sc_dev *dev = &rf->sc_dev;
143 	struct irdma_sc_ceq *sc_ceq;
144 	struct irdma_sc_cq *cq;
145 	unsigned long flags;
146 
147 	sc_ceq = &ceq->sc_ceq;
148 	do {
149 		spin_lock_irqsave(&ceq->ce_lock, flags);
150 		cq = irdma_sc_process_ceq(dev, sc_ceq);
151 		if (!cq) {
152 			spin_unlock_irqrestore(&ceq->ce_lock, flags);
153 			break;
154 		}
155 
156 		if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
157 			irdma_iwarp_ce_handler(cq);
158 
159 		spin_unlock_irqrestore(&ceq->ce_lock, flags);
160 
161 		if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
162 			queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
163 		else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
164 			 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
165 			irdma_puda_ce_handler(rf, cq);
166 	} while (1);
167 }
168 
169 static void
170 irdma_set_flush_fields(struct irdma_sc_qp *qp,
171 		       struct irdma_aeqe_info *info)
172 {
173 	struct qp_err_code qp_err;
174 
175 	qp->sq_flush_code = info->sq;
176 	qp->rq_flush_code = info->rq;
177 	qp_err = irdma_ae_to_qp_err_code(info->ae_id);
178 
179 	qp->flush_code = qp_err.flush_code;
180 	qp->event_type = qp_err.event_type;
181 }
182 
183 /**
184  * irdma_complete_cqp_request - perform post-completion cleanup
185  * @cqp: device CQP
186  * @cqp_request: CQP request
187  *
188  * Mark CQP request as done, wake up waiting thread or invoke
189  * callback function and release/free CQP request.
190  */
191 static void
192 irdma_complete_cqp_request(struct irdma_cqp *cqp,
193 			   struct irdma_cqp_request *cqp_request)
194 {
195 	WRITE_ONCE(cqp_request->request_done, true);
196 	if (cqp_request->waiting)
197 		wake_up(&cqp_request->waitq);
198 	else if (cqp_request->callback_fcn)
199 		cqp_request->callback_fcn(cqp_request);
200 	irdma_put_cqp_request(cqp, cqp_request);
201 }
202 
203 /**
204  * irdma_process_aeq - handle aeq events
205  * @rf: RDMA PCI function
206  */
207 static void
208 irdma_process_aeq(struct irdma_pci_f *rf)
209 {
210 	struct irdma_sc_dev *dev = &rf->sc_dev;
211 	struct irdma_aeq *aeq = &rf->aeq;
212 	struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
213 	struct irdma_aeqe_info aeinfo;
214 	struct irdma_aeqe_info *info = &aeinfo;
215 	int ret;
216 	struct irdma_qp *iwqp = NULL;
217 	struct irdma_cq *iwcq = NULL;
218 	struct irdma_sc_qp *qp = NULL;
219 	struct irdma_device *iwdev = rf->iwdev;
220 	struct irdma_qp_host_ctx_info *ctx_info = NULL;
221 	unsigned long flags;
222 
223 	u32 aeqcnt = 0;
224 
225 	if (!sc_aeq->size)
226 		return;
227 
228 	do {
229 		memset(info, 0, sizeof(*info));
230 		ret = irdma_sc_get_next_aeqe(sc_aeq, info);
231 		if (ret)
232 			break;
233 
234 		aeqcnt++;
235 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
236 			    "ae_id = 0x%x (%s), is_qp = %d, qp_id = %d, tcp_state = %d, iwarp_state = %d, ae_src = %d\n",
237 			    info->ae_id, irdma_get_ae_desc(info->ae_id),
238 			    info->qp, info->qp_cq_id, info->tcp_state,
239 			    info->iwarp_state, info->ae_src);
240 
241 		if (info->qp) {
242 			spin_lock_irqsave(&rf->qptable_lock, flags);
243 			iwqp = rf->qp_table[info->qp_cq_id];
244 			if (!iwqp) {
245 				spin_unlock_irqrestore(&rf->qptable_lock,
246 						       flags);
247 				if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
248 					struct irdma_device *iwdev = rf->iwdev;
249 
250 					if (!iwdev->vsi.tc_change_pending)
251 						continue;
252 
253 					atomic_dec(&iwdev->vsi.qp_suspend_reqs);
254 					wake_up(&iwdev->suspend_wq);
255 					continue;
256 				}
257 				irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
258 					    "qp_id %d is already freed\n",
259 					    info->qp_cq_id);
260 				continue;
261 			}
262 			irdma_qp_add_ref(&iwqp->ibqp);
263 			spin_unlock_irqrestore(&rf->qptable_lock, flags);
264 			qp = &iwqp->sc_qp;
265 			spin_lock_irqsave(&iwqp->lock, flags);
266 			iwqp->hw_tcp_state = info->tcp_state;
267 			iwqp->hw_iwarp_state = info->iwarp_state;
268 			if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
269 				iwqp->last_aeq = info->ae_id;
270 			spin_unlock_irqrestore(&iwqp->lock, flags);
271 			ctx_info = &iwqp->ctx_info;
272 		} else {
273 			if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
274 				continue;
275 		}
276 
277 		switch (info->ae_id) {
278 			struct irdma_cm_node *cm_node;
279 		case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
280 			cm_node = iwqp->cm_node;
281 			if (cm_node->accept_pend) {
282 				atomic_dec(&cm_node->listener->pend_accepts_cnt);
283 				cm_node->accept_pend = 0;
284 			}
285 			iwqp->rts_ae_rcvd = 1;
286 			wake_up_interruptible(&iwqp->waitq);
287 			break;
288 		case IRDMA_AE_LLP_FIN_RECEIVED:
289 			if (qp->term_flags)
290 				break;
291 			if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
292 				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
293 				if (iwqp->ibqp_state == IB_QPS_RTS) {
294 					irdma_next_iw_state(iwqp,
295 							    IRDMA_QP_STATE_CLOSING,
296 							    0, 0, 0);
297 					irdma_cm_disconn(iwqp);
298 				}
299 				irdma_schedule_cm_timer(iwqp->cm_node,
300 							(struct irdma_puda_buf *)iwqp,
301 							IRDMA_TIMER_TYPE_CLOSE,
302 							1, 0);
303 			}
304 			break;
305 		case IRDMA_AE_LLP_CLOSE_COMPLETE:
306 			if (qp->term_flags)
307 				irdma_terminate_done(qp, 0);
308 			else
309 				irdma_cm_disconn(iwqp);
310 			break;
311 		case IRDMA_AE_BAD_CLOSE:
312 		case IRDMA_AE_RESET_SENT:
313 			irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
314 					    0);
315 			irdma_cm_disconn(iwqp);
316 			break;
317 		case IRDMA_AE_LLP_CONNECTION_RESET:
318 			if (atomic_read(&iwqp->close_timer_started))
319 				break;
320 			irdma_cm_disconn(iwqp);
321 			break;
322 		case IRDMA_AE_QP_SUSPEND_COMPLETE:
323 			if (iwqp->iwdev->vsi.tc_change_pending) {
324 				if (!atomic_dec_return(&iwqp->sc_qp.vsi->qp_suspend_reqs))
325 					wake_up(&iwqp->iwdev->suspend_wq);
326 			}
327 			if (iwqp->suspend_pending) {
328 				iwqp->suspend_pending = false;
329 				wake_up(&iwqp->iwdev->suspend_wq);
330 			}
331 			break;
332 		case IRDMA_AE_TERMINATE_SENT:
333 			irdma_terminate_send_fin(qp);
334 			break;
335 		case IRDMA_AE_LLP_TERMINATE_RECEIVED:
336 			irdma_terminate_received(qp, info);
337 			break;
338 		case IRDMA_AE_LCE_CQ_CATASTROPHIC:
339 		case IRDMA_AE_CQ_OPERATION_ERROR:
340 			irdma_dev_err(&iwdev->ibdev,
341 				      "Processing CQ[0x%x] op error, AE 0x%04X\n",
342 				      info->qp_cq_id, info->ae_id);
343 			spin_lock_irqsave(&rf->cqtable_lock, flags);
344 			iwcq = rf->cq_table[info->qp_cq_id];
345 			if (!iwcq) {
346 				spin_unlock_irqrestore(&rf->cqtable_lock,
347 						       flags);
348 				irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ,
349 					    "cq_id %d is already freed\n",
350 					    info->qp_cq_id);
351 				continue;
352 			}
353 			irdma_cq_add_ref(&iwcq->ibcq);
354 			spin_unlock_irqrestore(&rf->cqtable_lock, flags);
355 			if (iwcq->ibcq.event_handler) {
356 				struct ib_event ibevent;
357 
358 				ibevent.device = iwcq->ibcq.device;
359 				ibevent.event = IB_EVENT_CQ_ERR;
360 				ibevent.element.cq = &iwcq->ibcq;
361 				iwcq->ibcq.event_handler(&ibevent,
362 							 iwcq->ibcq.cq_context);
363 			}
364 			irdma_cq_rem_ref(&iwcq->ibcq);
365 			break;
366 		case IRDMA_AE_RESET_NOT_SENT:
367 		case IRDMA_AE_LLP_DOUBT_REACHABILITY:
368 			break;
369 		case IRDMA_AE_RESOURCE_EXHAUSTION:
370 			irdma_dev_err(&iwdev->ibdev,
371 				      "Resource exhaustion reason: q1 = %d xmit or rreq = %d\n",
372 				      info->ae_src == IRDMA_AE_SOURCE_RSRC_EXHT_Q1,
373 				      info->ae_src == IRDMA_AE_SOURCE_RSRC_EXHT_XT_RR);
374 			break;
375 		case IRDMA_AE_PRIV_OPERATION_DENIED:
376 		case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
377 		case IRDMA_AE_STAG_ZERO_INVALID:
378 		case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
379 		case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
380 		case IRDMA_AE_DDP_UBE_INVALID_MO:
381 		case IRDMA_AE_DDP_UBE_INVALID_QN:
382 		case IRDMA_AE_DDP_NO_L_BIT:
383 		case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
384 		case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
385 		case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
386 		case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
387 		case IRDMA_AE_INVALID_ARP_ENTRY:
388 		case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
389 		case IRDMA_AE_STALE_ARP_ENTRY:
390 		case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
391 		case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
392 		case IRDMA_AE_LLP_SYN_RECEIVED:
393 		case IRDMA_AE_LLP_TOO_MANY_RETRIES:
394 		case IRDMA_AE_LCE_QP_CATASTROPHIC:
395 		case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
396 		case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
397 		default:
398 			irdma_dev_err(&iwdev->ibdev,
399 				      "AEQ: abnormal ae_id = 0x%x (%s), is_qp = %d, qp_id = %d, ae_source = %d\n",
400 				      info->ae_id, irdma_get_ae_desc(info->ae_id),
401 				      info->qp, info->qp_cq_id, info->ae_src);
402 			if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
403 				ctx_info->roce_info->err_rq_idx_valid = info->err_rq_idx_valid;
404 				if (info->rq) {
405 					ctx_info->roce_info->err_rq_idx = info->wqe_idx;
406 					irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
407 								ctx_info);
408 				}
409 				irdma_set_flush_fields(qp, info);
410 				irdma_cm_disconn(iwqp);
411 				break;
412 			}
413 			ctx_info->iwarp_info->err_rq_idx_valid = info->err_rq_idx_valid;
414 			if (info->rq) {
415 				ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
416 				ctx_info->tcp_info_valid = false;
417 				ctx_info->iwarp_info_valid = true;
418 				irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
419 						   ctx_info);
420 			}
421 			if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
422 			    iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
423 				irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
424 				irdma_cm_disconn(iwqp);
425 			} else {
426 				irdma_terminate_connection(qp, info);
427 			}
428 			break;
429 		}
430 		if (info->qp)
431 			irdma_qp_rem_ref(&iwqp->ibqp);
432 	} while (1);
433 
434 	if (aeqcnt)
435 		irdma_sc_repost_aeq_entries(dev, aeqcnt);
436 }
437 
438 /**
439  * irdma_ena_intr - set up device interrupts
440  * @dev: hardware control device structure
441  * @msix_id: id of the interrupt to be enabled
442  */
443 static void
444 irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
445 {
446 	dev->irq_ops->irdma_en_irq(dev, msix_id);
447 }
448 
449 /**
450  * irdma_dpc - tasklet for aeq and ceq 0
451  * @t: tasklet_struct ptr
452  */
453 static void
454 irdma_dpc(unsigned long t)
455 {
456 	struct irdma_pci_f *rf = from_tasklet(rf, (struct tasklet_struct *)t,
457 					      dpc_tasklet);
458 
459 	if (rf->msix_shared)
460 		irdma_process_ceq(rf, rf->ceqlist);
461 	irdma_process_aeq(rf);
462 	irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
463 }
464 
465 /**
466  * irdma_ceq_dpc - dpc handler for CEQ
467  * @t: tasklet_struct ptr
468  */
469 static void
470 irdma_ceq_dpc(unsigned long t)
471 {
472 	struct irdma_ceq *iwceq = from_tasklet(iwceq, (struct tasklet_struct *)t,
473 					       dpc_tasklet);
474 	struct irdma_pci_f *rf = iwceq->rf;
475 
476 	irdma_process_ceq(rf, iwceq);
477 	irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
478 }
479 
480 /**
481  * irdma_save_msix_info - copy msix vector information to iwarp device
482  * @rf: RDMA PCI function
483  *
484  * Allocate iwdev msix table and copy the msix info to the table
485  * Return 0 if successful, otherwise return error
486  */
487 static int
488 irdma_save_msix_info(struct irdma_pci_f *rf)
489 {
490 	struct irdma_qvlist_info *iw_qvlist;
491 	struct irdma_qv_info *iw_qvinfo;
492 	u32 ceq_idx;
493 	u32 i;
494 	u32 size;
495 
496 	if (!rf->msix_count) {
497 		irdma_dev_err(to_ibdev(&rf->sc_dev), "No MSI-X vectors reserved for RDMA.\n");
498 		return -EINVAL;
499 	}
500 
501 	size = sizeof(struct irdma_msix_vector) * rf->msix_count;
502 	size += sizeof(struct irdma_qvlist_info);
503 	size += sizeof(struct irdma_qv_info) * rf->msix_count - 1;
504 	rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
505 	if (!rf->iw_msixtbl)
506 		return -ENOMEM;
507 
508 	rf->iw_qvlist = (struct irdma_qvlist_info *)
509 	    (&rf->iw_msixtbl[rf->msix_count]);
510 	iw_qvlist = rf->iw_qvlist;
511 	iw_qvinfo = iw_qvlist->qv_info;
512 	iw_qvlist->num_vectors = rf->msix_count;
513 	if (rf->msix_count <= num_online_cpus())
514 		rf->msix_shared = true;
515 	else if (rf->msix_count > num_online_cpus() + 1)
516 		rf->msix_count = num_online_cpus() + 1;
517 
518 	for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
519 		rf->iw_msixtbl[i].idx = rf->msix_info.entry + i;
520 		rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
521 		if (!i) {
522 			iw_qvinfo->aeq_idx = 0;
523 			if (rf->msix_shared)
524 				iw_qvinfo->ceq_idx = ceq_idx++;
525 			else
526 				iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
527 		} else {
528 			iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
529 			iw_qvinfo->ceq_idx = ceq_idx++;
530 		}
531 		iw_qvinfo->itr_idx = IRDMA_IDX_NOITR;
532 		iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
533 	}
534 
535 	return 0;
536 }
537 
538 /**
539  * irdma_irq_handler - interrupt handler for aeq and ceq0
540  * @data: RDMA PCI function
541  */
542 static void
543 irdma_irq_handler(void *data)
544 {
545 	struct irdma_pci_f *rf = data;
546 
547 	tasklet_schedule(&rf->dpc_tasklet);
548 }
549 
550 /**
551  * irdma_ceq_handler - interrupt handler for ceq
552  * @data: ceq pointer
553  */
554 static void
555 irdma_ceq_handler(void *data)
556 {
557 	struct irdma_ceq *iwceq = data;
558 
559 	tasklet_schedule(&iwceq->dpc_tasklet);
560 }
561 
562 /**
563  * irdma_free_irq - free device interrupts in FreeBSD manner
564  * @rf: RDMA PCI function
565  * @msix_vec: msix vector to disable irq
566  *
567  * The function is called when destroying irq. It tearsdown
568  * the interrupt and release resources.
569  */
570 static void
571 irdma_free_irq(struct irdma_pci_f *rf, struct irdma_msix_vector *msix_vec)
572 {
573 	if (msix_vec->tag) {
574 		bus_teardown_intr(rf->dev_ctx.dev, msix_vec->res,
575 				  msix_vec->tag);
576 		msix_vec->tag = NULL;
577 	}
578 	if (msix_vec->res) {
579 		bus_release_resource(rf->dev_ctx.dev, SYS_RES_IRQ,
580 				     msix_vec->idx + 1,
581 				     msix_vec->res);
582 		msix_vec->res = NULL;
583 	}
584 }
585 
586 /**
587  * irdma_destroy_irq - destroy device interrupts
588  * @rf: RDMA PCI function
589  * @msix_vec: msix vector to disable irq
590  * @dev_id: parameter to pass to free_irq (used during irq setup)
591  *
592  * The function is called when destroying aeq/ceq
593  */
594 static void
595 irdma_destroy_irq(struct irdma_pci_f *rf,
596 		  struct irdma_msix_vector *msix_vec, void *dev_id)
597 {
598 	struct irdma_sc_dev *dev = &rf->sc_dev;
599 
600 	dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
601 	irdma_free_irq(rf, msix_vec);
602 }
603 
604 /**
605  * irdma_destroy_cqp  - destroy control qp
606  * @rf: RDMA PCI function
607  * @free_hwcqp: 1 if hw cqp should be freed
608  *
609  * Issue destroy cqp request and
610  * free the resources associated with the cqp
611  */
612 static void
613 irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
614 {
615 	struct irdma_sc_dev *dev = &rf->sc_dev;
616 	struct irdma_cqp *cqp = &rf->cqp;
617 	int status = 0;
618 
619 	if (rf->cqp_cmpl_wq)
620 		destroy_workqueue(rf->cqp_cmpl_wq);
621 	status = irdma_sc_cqp_destroy(dev->cqp, free_hwcqp);
622 	if (status)
623 		irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy CQP failed %d\n", status);
624 
625 	irdma_cleanup_pending_cqp_op(rf);
626 	irdma_free_dma_mem(dev->hw, &cqp->sq);
627 	kfree(cqp->scratch_array);
628 	cqp->scratch_array = NULL;
629 	kfree(cqp->cqp_requests);
630 	cqp->cqp_requests = NULL;
631 }
632 
633 static void
634 irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
635 {
636 	struct irdma_aeq *aeq = &rf->aeq;
637 	u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
638 	dma_addr_t *pg_arr = (dma_addr_t *) aeq->palloc.level1.addr;
639 
640 	irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
641 	irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
642 	vfree(aeq->mem.va);
643 }
644 
645 /**
646  * irdma_destroy_aeq - destroy aeq
647  * @rf: RDMA PCI function
648  *
649  * Issue a destroy aeq request and
650  * free the resources associated with the aeq
651  * The function is called during driver unload
652  */
653 static void
654 irdma_destroy_aeq(struct irdma_pci_f *rf)
655 {
656 	struct irdma_sc_dev *dev = &rf->sc_dev;
657 	struct irdma_aeq *aeq = &rf->aeq;
658 	int status = -EBUSY;
659 
660 	if (!rf->msix_shared) {
661 		rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
662 		irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
663 	}
664 	if (rf->reset)
665 		goto exit;
666 
667 	aeq->sc_aeq.size = 0;
668 	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
669 	if (status)
670 		irdma_debug(dev, IRDMA_DEBUG_ERR, "Destroy AEQ failed %d\n", status);
671 
672 exit:
673 	if (aeq->virtual_map)
674 		irdma_destroy_virt_aeq(rf);
675 	else
676 		irdma_free_dma_mem(dev->hw, &aeq->mem);
677 }
678 
679 /**
680  * irdma_destroy_ceq - destroy ceq
681  * @rf: RDMA PCI function
682  * @iwceq: ceq to be destroyed
683  *
684  * Issue a destroy ceq request and
685  * free the resources associated with the ceq
686  */
687 static void
688 irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
689 {
690 	struct irdma_sc_dev *dev = &rf->sc_dev;
691 	int status;
692 
693 	if (rf->reset)
694 		goto exit;
695 
696 	status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
697 	if (status) {
698 		irdma_debug(dev, IRDMA_DEBUG_ERR, "CEQ destroy command failed %d\n", status);
699 		goto exit;
700 	}
701 
702 	status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
703 	if (status)
704 		irdma_debug(dev, IRDMA_DEBUG_ERR,
705 			    "CEQ destroy completion failed %d\n", status);
706 exit:
707 	spin_lock_destroy(&iwceq->ce_lock);
708 	spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock);
709 	kfree(iwceq->sc_ceq.reg_cq);
710 	irdma_free_dma_mem(dev->hw, &iwceq->mem);
711 }
712 
713 /**
714  * irdma_del_ceq_0 - destroy ceq 0
715  * @rf: RDMA PCI function
716  *
717  * Disable the ceq 0 interrupt and destroy the ceq 0
718  */
719 static void
720 irdma_del_ceq_0(struct irdma_pci_f *rf)
721 {
722 	struct irdma_ceq *iwceq = rf->ceqlist;
723 	struct irdma_msix_vector *msix_vec;
724 
725 	if (rf->msix_shared) {
726 		msix_vec = &rf->iw_msixtbl[0];
727 		rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
728 						  msix_vec->ceq_id,
729 						  msix_vec->idx, false);
730 		irdma_destroy_irq(rf, msix_vec, rf);
731 	} else {
732 		msix_vec = &rf->iw_msixtbl[1];
733 		irdma_destroy_irq(rf, msix_vec, iwceq);
734 	}
735 
736 	irdma_destroy_ceq(rf, iwceq);
737 	rf->sc_dev.ceq_valid = false;
738 	rf->ceqs_count = 0;
739 }
740 
741 /**
742  * irdma_del_ceqs - destroy all ceq's except CEQ 0
743  * @rf: RDMA PCI function
744  *
745  * Go through all of the device ceq's, except 0, and for each
746  * ceq disable the ceq interrupt and destroy the ceq
747  */
748 static void
749 irdma_del_ceqs(struct irdma_pci_f *rf)
750 {
751 	struct irdma_ceq *iwceq = &rf->ceqlist[1];
752 	struct irdma_msix_vector *msix_vec;
753 	u32 i = 0;
754 
755 	if (rf->msix_shared)
756 		msix_vec = &rf->iw_msixtbl[1];
757 	else
758 		msix_vec = &rf->iw_msixtbl[2];
759 
760 	for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
761 		rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
762 						  msix_vec->idx, false);
763 		irdma_destroy_irq(rf, msix_vec, iwceq);
764 		irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
765 				  IRDMA_OP_CEQ_DESTROY);
766 		spin_lock_destroy(&iwceq->ce_lock);
767 		spin_lock_destroy(&iwceq->sc_ceq.req_cq_lock);
768 		kfree(iwceq->sc_ceq.reg_cq);
769 		irdma_free_dma_mem(rf->sc_dev.hw, &iwceq->mem);
770 	}
771 	rf->ceqs_count = 1;
772 }
773 
774 /**
775  * irdma_destroy_ccq - destroy control cq
776  * @rf: RDMA PCI function
777  *
778  * Issue destroy ccq request and
779  * free the resources associated with the ccq
780  */
781 static void
782 irdma_destroy_ccq(struct irdma_pci_f *rf)
783 {
784 	struct irdma_sc_dev *dev = &rf->sc_dev;
785 	struct irdma_ccq *ccq = &rf->ccq;
786 	int status = 0;
787 
788 	if (!rf->reset)
789 		status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
790 	if (status)
791 		irdma_debug(dev, IRDMA_DEBUG_ERR, "CCQ destroy failed %d\n", status);
792 	irdma_free_dma_mem(dev->hw, &ccq->mem_cq);
793 }
794 
795 /**
796  * irdma_close_hmc_objects_type - delete hmc objects of a given type
797  * @dev: iwarp device
798  * @obj_type: the hmc object type to be deleted
799  * @hmc_info: host memory info struct
800  * @privileged: permission to close HMC objects
801  * @reset: true if called before reset
802  */
803 static void
804 irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
805 			     enum irdma_hmc_rsrc_type obj_type,
806 			     struct irdma_hmc_info *hmc_info,
807 			     bool privileged, bool reset)
808 {
809 	struct irdma_hmc_del_obj_info info = {0};
810 
811 	info.hmc_info = hmc_info;
812 	info.rsrc_type = obj_type;
813 	info.count = hmc_info->hmc_obj[obj_type].cnt;
814 	info.privileged = privileged;
815 	if (irdma_sc_del_hmc_obj(dev, &info, reset))
816 		irdma_debug(dev, IRDMA_DEBUG_ERR,
817 			    "del HMC obj of type %d failed\n", obj_type);
818 }
819 
820 /**
821  * irdma_del_hmc_objects - remove all device hmc objects
822  * @dev: iwarp device
823  * @hmc_info: hmc_info to free
824  * @privileged: permission to delete HMC objects
825  * @reset: true if called before reset
826  * @vers: hardware version
827  */
828 void
829 irdma_del_hmc_objects(struct irdma_sc_dev *dev,
830 		      struct irdma_hmc_info *hmc_info, bool privileged,
831 		      bool reset, enum irdma_vers vers)
832 {
833 	unsigned int i;
834 
835 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
836 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
837 			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
838 						     hmc_info, privileged, reset);
839 		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
840 			break;
841 	}
842 }
843 
844 /**
845  * irdma_create_hmc_obj_type - create hmc object of a given type
846  * @dev: hardware control device structure
847  * @info: information for the hmc object to create
848  */
849 static int
850 irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
851 			  struct irdma_hmc_create_obj_info *info)
852 {
853 	return irdma_sc_create_hmc_obj(dev, info);
854 }
855 
856 /**
857  * irdma_create_hmc_objs - create all hmc objects for the device
858  * @rf: RDMA PCI function
859  * @privileged: permission to create HMC objects
860  * @vers: HW version
861  *
862  * Create the device hmc objects and allocate hmc pages
863  * Return 0 if successful, otherwise clean up and return error
864  */
865 static int
866 irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
867 		      enum irdma_vers vers)
868 {
869 	struct irdma_sc_dev *dev = &rf->sc_dev;
870 	struct irdma_hmc_create_obj_info info = {0};
871 	int i, status = 0;
872 
873 	info.hmc_info = dev->hmc_info;
874 	info.privileged = privileged;
875 	info.entry_type = rf->sd_type;
876 
877 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
878 		if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
879 			continue;
880 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
881 			info.rsrc_type = iw_hmc_obj_types[i];
882 			info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
883 			info.add_sd_cnt = 0;
884 			status = irdma_create_hmc_obj_type(dev, &info);
885 			if (status) {
886 				irdma_debug(dev, IRDMA_DEBUG_ERR,
887 					    "create obj type %d status = %d\n",
888 					    iw_hmc_obj_types[i], status);
889 				break;
890 			}
891 		}
892 		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
893 			break;
894 	}
895 
896 	if (!status)
897 		return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
898 							   true, true);
899 
900 	while (i) {
901 		i--;
902 		/* destroy the hmc objects of a given type */
903 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
904 			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
905 						     dev->hmc_info, privileged,
906 						     false);
907 	}
908 
909 	return status;
910 }
911 
912 /**
913  * irdma_obj_aligned_mem - get aligned memory from device allocated memory
914  * @rf: RDMA PCI function
915  * @memptr: points to the memory addresses
916  * @size: size of memory needed
917  * @mask: mask for the aligned memory
918  *
919  * Get aligned memory of the requested size and
920  * update the memptr to point to the new aligned memory
921  * Return 0 if successful, otherwise return no memory error
922  */
923 static int
924 irdma_obj_aligned_mem(struct irdma_pci_f *rf,
925 		      struct irdma_dma_mem *memptr, u32 size,
926 		      u32 mask)
927 {
928 	unsigned long va, newva;
929 	unsigned long extra;
930 
931 	va = (unsigned long)rf->obj_next.va;
932 	newva = va;
933 	if (mask)
934 		newva = ALIGN(va, (unsigned long)mask + 1ULL);
935 	extra = newva - va;
936 	memptr->va = (u8 *)va + extra;
937 	memptr->pa = rf->obj_next.pa + extra;
938 	memptr->size = size;
939 	if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
940 		return -ENOMEM;
941 
942 	rf->obj_next.va = (u8 *)memptr->va + size;
943 	rf->obj_next.pa = memptr->pa + size;
944 
945 	return 0;
946 }
947 
948 /**
949  * irdma_create_cqp - create control qp
950  * @rf: RDMA PCI function
951  *
952  * Return 0, if the cqp and all the resources associated with it
953  * are successfully created, otherwise return error
954  */
955 static int
956 irdma_create_cqp(struct irdma_pci_f *rf)
957 {
958 	u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
959 	struct irdma_dma_mem mem;
960 	struct irdma_sc_dev *dev = &rf->sc_dev;
961 	struct irdma_cqp_init_info cqp_init_info = {0};
962 	struct irdma_cqp *cqp = &rf->cqp;
963 	u16 maj_err, min_err;
964 	int i, status;
965 
966 	cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
967 	memset(cqp->cqp_requests, 0, sqsize * sizeof(*cqp->cqp_requests));
968 	if (!cqp->cqp_requests)
969 		return -ENOMEM;
970 
971 	cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
972 	memset(cqp->scratch_array, 0, sqsize * sizeof(*cqp->scratch_array));
973 	if (!cqp->scratch_array) {
974 		status = -ENOMEM;
975 		goto err_scratch;
976 	}
977 
978 	dev->cqp = &cqp->sc_cqp;
979 	dev->cqp->dev = dev;
980 	cqp->sq.size = sizeof(struct irdma_cqp_sq_wqe) * sqsize;
981 	cqp->sq.va = irdma_allocate_dma_mem(dev->hw, &cqp->sq, cqp->sq.size,
982 					    IRDMA_CQP_ALIGNMENT);
983 	if (!cqp->sq.va) {
984 		status = -ENOMEM;
985 		goto err_sq;
986 	}
987 
988 	status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
989 				       IRDMA_HOST_CTX_ALIGNMENT_M);
990 	if (status)
991 		goto err_ctx;
992 
993 	dev->cqp->host_ctx_pa = mem.pa;
994 	dev->cqp->host_ctx = mem.va;
995 	/* populate the cqp init info */
996 	cqp_init_info.dev = dev;
997 	cqp_init_info.sq_size = sqsize;
998 	cqp_init_info.sq = cqp->sq.va;
999 	cqp_init_info.sq_pa = cqp->sq.pa;
1000 	cqp_init_info.host_ctx_pa = mem.pa;
1001 	cqp_init_info.host_ctx = mem.va;
1002 	cqp_init_info.hmc_profile = rf->rsrc_profile;
1003 	cqp_init_info.scratch_array = cqp->scratch_array;
1004 	cqp_init_info.protocol_used = rf->protocol_used;
1005 	cqp_init_info.en_rem_endpoint_trk = rf->en_rem_endpoint_trk;
1006 	memcpy(&cqp_init_info.dcqcn_params, &rf->dcqcn_params,
1007 	       sizeof(cqp_init_info.dcqcn_params));
1008 
1009 	switch (rf->rdma_ver) {
1010 	case IRDMA_GEN_1:
1011 		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
1012 		break;
1013 	case IRDMA_GEN_2:
1014 		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
1015 		break;
1016 	}
1017 	status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
1018 	if (status) {
1019 		irdma_debug(dev, IRDMA_DEBUG_ERR, "cqp init status %d\n", status);
1020 		goto err_ctx;
1021 	}
1022 
1023 	spin_lock_init(&cqp->req_lock);
1024 	spin_lock_init(&cqp->compl_lock);
1025 
1026 	status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
1027 	if (status) {
1028 		irdma_debug(dev, IRDMA_DEBUG_ERR,
1029 			    "cqp create failed - status %d maj_err %d min_err %d\n",
1030 			    status, maj_err, min_err);
1031 		goto err_ctx;
1032 	}
1033 
1034 	INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
1035 	INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
1036 
1037 	/* init the waitqueue of the cqp_requests and add them to the list */
1038 	for (i = 0; i < sqsize; i++) {
1039 		init_waitqueue_head(&cqp->cqp_requests[i].waitq);
1040 		list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
1041 	}
1042 	init_waitqueue_head(&cqp->remove_wq);
1043 	return 0;
1044 
1045 err_ctx:
1046 	irdma_free_dma_mem(dev->hw, &cqp->sq);
1047 err_sq:
1048 	kfree(cqp->scratch_array);
1049 	cqp->scratch_array = NULL;
1050 err_scratch:
1051 	kfree(cqp->cqp_requests);
1052 	cqp->cqp_requests = NULL;
1053 
1054 	return status;
1055 }
1056 
1057 /**
1058  * irdma_create_ccq - create control cq
1059  * @rf: RDMA PCI function
1060  *
1061  * Return 0, if the ccq and the resources associated with it
1062  * are successfully created, otherwise return error
1063  */
1064 static int
1065 irdma_create_ccq(struct irdma_pci_f *rf)
1066 {
1067 	struct irdma_sc_dev *dev = &rf->sc_dev;
1068 	struct irdma_ccq_init_info info = {0};
1069 	struct irdma_ccq *ccq = &rf->ccq;
1070 	int status;
1071 
1072 	dev->ccq = &ccq->sc_cq;
1073 	dev->ccq->dev = dev;
1074 	info.dev = dev;
1075 	ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
1076 	ccq->mem_cq.size = sizeof(struct irdma_cqe) * IW_CCQ_SIZE;
1077 	ccq->mem_cq.va = irdma_allocate_dma_mem(dev->hw, &ccq->mem_cq,
1078 						ccq->mem_cq.size,
1079 						IRDMA_CQ0_ALIGNMENT);
1080 	if (!ccq->mem_cq.va)
1081 		return -ENOMEM;
1082 
1083 	status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
1084 				       ccq->shadow_area.size,
1085 				       IRDMA_SHADOWAREA_M);
1086 	if (status)
1087 		goto exit;
1088 
1089 	ccq->sc_cq.back_cq = ccq;
1090 	/* populate the ccq init info */
1091 	info.cq_base = ccq->mem_cq.va;
1092 	info.cq_pa = ccq->mem_cq.pa;
1093 	info.num_elem = IW_CCQ_SIZE;
1094 	info.shadow_area = ccq->shadow_area.va;
1095 	info.shadow_area_pa = ccq->shadow_area.pa;
1096 	info.ceqe_mask = false;
1097 	info.ceq_id_valid = true;
1098 	info.shadow_read_threshold = 16;
1099 	info.vsi = &rf->default_vsi;
1100 	status = irdma_sc_ccq_init(dev->ccq, &info);
1101 	if (!status)
1102 		status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
1103 exit:
1104 	if (status)
1105 		irdma_free_dma_mem(dev->hw, &ccq->mem_cq);
1106 
1107 	return status;
1108 }
1109 
1110 /**
1111  * irdma_alloc_set_mac - set up a mac address table entry
1112  * @iwdev: irdma device
1113  *
1114  * Allocate a mac ip entry and add it to the hw table Return 0
1115  * if successful, otherwise return error
1116  */
1117 static int
1118 irdma_alloc_set_mac(struct irdma_device *iwdev)
1119 {
1120 	int status;
1121 
1122 	status = irdma_alloc_local_mac_entry(iwdev->rf,
1123 					     &iwdev->mac_ip_table_idx);
1124 	if (!status) {
1125 		status = irdma_add_local_mac_entry(iwdev->rf,
1126 						   (const u8 *)if_getlladdr(iwdev->netdev),
1127 						   (u8)iwdev->mac_ip_table_idx);
1128 		if (status)
1129 			irdma_del_local_mac_entry(iwdev->rf,
1130 						  (u8)iwdev->mac_ip_table_idx);
1131 	}
1132 	return status;
1133 }
1134 
1135 /**
1136  * irdma_irq_request - set up the msix interrupt vector
1137  * @rf: RDMA PCI function
1138  * @msix_vec: interrupt vector information
1139  * @handler: function pointer to associate with interrupt
1140  * @argument: argument passed to the handler
1141  *
1142  * Allocate interrupt resources and setup interrupt
1143  * Return 0 if successful, otherwise return error
1144  * Note that after this function bus_describe_intr shall
1145  * be called.
1146  */
1147 static int
1148 irdma_irq_request(struct irdma_pci_f *rf,
1149 		  struct irdma_msix_vector *msix_vec,
1150 		  driver_intr_t handler, void *argument)
1151 {
1152 	device_t dev = rf->dev_ctx.dev;
1153 	int rid = msix_vec->idx + 1;
1154 	int err, status;
1155 
1156 	msix_vec->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
1157 	if (!msix_vec->res) {
1158 		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1159 			    "Unable to allocate bus resource int[%d]\n", rid);
1160 		return -EINVAL;
1161 	}
1162 	err = bus_setup_intr(dev, msix_vec->res, INTR_TYPE_NET | INTR_MPSAFE,
1163 			     NULL, handler, argument, &msix_vec->tag);
1164 	if (err) {
1165 		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1166 			    "Unable to register handler with %x status\n", err);
1167 		status = -EINVAL;
1168 		goto fail_intr;
1169 	}
1170 	return 0;
1171 
1172 fail_intr:
1173 	bus_release_resource(dev, SYS_RES_IRQ, rid, msix_vec->res);
1174 	msix_vec->res = NULL;
1175 
1176 	return status;
1177 }
1178 
1179 /**
1180  * irdma_cfg_ceq_vector - set up the msix interrupt vector for
1181  * ceq
1182  * @rf: RDMA PCI function
1183  * @iwceq: ceq associated with the vector
1184  * @ceq_id: the id number of the iwceq
1185  * @msix_vec: interrupt vector information
1186  *
1187  * Allocate interrupt resources and enable irq handling
1188  * Return 0 if successful, otherwise return error
1189  */
1190 static int
1191 irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1192 		     u32 ceq_id, struct irdma_msix_vector *msix_vec)
1193 {
1194 	int status;
1195 
1196 	if (rf->msix_shared && !ceq_id) {
1197 		snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1198 			 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
1199 		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1200 		status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
1201 		if (status)
1202 			return status;
1203 		bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
1204 	} else {
1205 		snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1206 			 "irdma-%s-CEQ-%d",
1207 			 dev_name(&rf->pcidev->dev), ceq_id);
1208 		tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
1209 
1210 		status = irdma_irq_request(rf, msix_vec, irdma_ceq_handler, iwceq);
1211 		if (status)
1212 			return status;
1213 		bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
1214 	}
1215 	msix_vec->ceq_id = ceq_id;
1216 	rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
1217 
1218 	return 0;
1219 }
1220 
1221 /**
1222  * irdma_cfg_aeq_vector - set up the msix vector for aeq
1223  * @rf: RDMA PCI function
1224  *
1225  * Allocate interrupt resources and enable irq handling
1226  * Return 0 if successful, otherwise return error
1227  */
1228 static int
1229 irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
1230 {
1231 	struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
1232 	int status = 0;
1233 
1234 	if (!rf->msix_shared) {
1235 		snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
1236 			 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
1237 		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
1238 		status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf);
1239 		if (status)
1240 			return status;
1241 		bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name);
1242 	}
1243 
1244 	if (status) {
1245 		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, "aeq irq config fail\n");
1246 		return status;
1247 	}
1248 
1249 	rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
1250 
1251 	return 0;
1252 }
1253 
1254 /**
1255  * irdma_create_ceq - create completion event queue
1256  * @rf: RDMA PCI function
1257  * @iwceq: pointer to the ceq resources to be created
1258  * @ceq_id: the id number of the iwceq
1259  * @vsi: SC vsi struct
1260  *
1261  * Return 0, if the ceq and the resources associated with it
1262  * are successfully created, otherwise return error
1263  */
1264 static int
1265 irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
1266 		 u32 ceq_id, struct irdma_sc_vsi *vsi)
1267 {
1268 	int status;
1269 	struct irdma_ceq_init_info info = {0};
1270 	struct irdma_sc_dev *dev = &rf->sc_dev;
1271 	u64 scratch;
1272 	u32 ceq_size;
1273 
1274 	info.ceq_id = ceq_id;
1275 	iwceq->rf = rf;
1276 	ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
1277 		       dev->hw_attrs.max_hw_ceq_size);
1278 	iwceq->mem.size = sizeof(struct irdma_ceqe) * ceq_size;
1279 	iwceq->mem.va = irdma_allocate_dma_mem(dev->hw, &iwceq->mem,
1280 					       iwceq->mem.size,
1281 					       IRDMA_CEQ_ALIGNMENT);
1282 	if (!iwceq->mem.va)
1283 		return -ENOMEM;
1284 
1285 	info.ceq_id = ceq_id;
1286 	info.ceqe_base = iwceq->mem.va;
1287 	info.ceqe_pa = iwceq->mem.pa;
1288 	info.elem_cnt = ceq_size;
1289 	info.reg_cq = kzalloc(sizeof(struct irdma_sc_cq *) * info.elem_cnt, GFP_KERNEL);
1290 
1291 	iwceq->sc_ceq.ceq_id = ceq_id;
1292 	info.dev = dev;
1293 	info.vsi = vsi;
1294 	scratch = (uintptr_t)&rf->cqp.sc_cqp;
1295 	status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
1296 	if (!status) {
1297 		if (dev->ceq_valid)
1298 			status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
1299 						   IRDMA_OP_CEQ_CREATE);
1300 		else
1301 			status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
1302 	}
1303 
1304 	if (status) {
1305 		kfree(info.reg_cq);
1306 		irdma_free_dma_mem(dev->hw, &iwceq->mem);
1307 	}
1308 
1309 	return status;
1310 }
1311 
1312 /**
1313  * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
1314  * @rf: RDMA PCI function
1315  *
1316  * Allocate a list for all device completion event queues
1317  * Create the ceq 0 and configure it's msix interrupt vector
1318  * Return 0, if successfully set up, otherwise return error
1319  */
1320 static int
1321 irdma_setup_ceq_0(struct irdma_pci_f *rf)
1322 {
1323 	struct irdma_ceq *iwceq;
1324 	struct irdma_msix_vector *msix_vec;
1325 	u32 i;
1326 	int status = 0;
1327 	u32 num_ceqs;
1328 
1329 	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1330 	rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
1331 	memset(rf->ceqlist, 0, num_ceqs * sizeof(*rf->ceqlist));
1332 	if (!rf->ceqlist) {
1333 		status = -ENOMEM;
1334 		goto exit;
1335 	}
1336 
1337 	iwceq = &rf->ceqlist[0];
1338 	status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
1339 	if (status) {
1340 		irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1341 			    "create ceq status = %d\n", status);
1342 		goto exit;
1343 	}
1344 
1345 	spin_lock_init(&iwceq->ce_lock);
1346 	i = rf->msix_shared ? 0 : 1;
1347 	msix_vec = &rf->iw_msixtbl[i];
1348 	iwceq->irq = msix_vec->irq;
1349 	iwceq->msix_idx = msix_vec->idx;
1350 	status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
1351 	if (status) {
1352 		irdma_destroy_ceq(rf, iwceq);
1353 		goto exit;
1354 	}
1355 
1356 	irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1357 	rf->ceqs_count++;
1358 
1359 exit:
1360 	if (status && !rf->ceqs_count) {
1361 		kfree(rf->ceqlist);
1362 		rf->ceqlist = NULL;
1363 		return status;
1364 	}
1365 	rf->sc_dev.ceq_valid = true;
1366 
1367 	return 0;
1368 }
1369 
1370 /**
1371  * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
1372  * @rf: RDMA PCI function
1373  * @vsi: VSI structure for this CEQ
1374  *
1375  * Allocate a list for all device completion event queues
1376  * Create the ceq's and configure their msix interrupt vectors
1377  * Return 0, if ceqs are successfully set up, otherwise return error
1378  */
1379 static int
1380 irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
1381 {
1382 	u32 i;
1383 	u32 ceq_id;
1384 	struct irdma_ceq *iwceq;
1385 	struct irdma_msix_vector *msix_vec;
1386 	int status;
1387 	u32 num_ceqs;
1388 
1389 	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
1390 	i = (rf->msix_shared) ? 1 : 2;
1391 	for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
1392 		iwceq = &rf->ceqlist[ceq_id];
1393 		status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
1394 		if (status) {
1395 			irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR,
1396 				    "create ceq status = %d\n", status);
1397 			goto del_ceqs;
1398 		}
1399 		spin_lock_init(&iwceq->ce_lock);
1400 		msix_vec = &rf->iw_msixtbl[i];
1401 		iwceq->irq = msix_vec->irq;
1402 		iwceq->msix_idx = msix_vec->idx;
1403 		status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
1404 		if (status) {
1405 			irdma_destroy_ceq(rf, iwceq);
1406 			goto del_ceqs;
1407 		}
1408 		irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
1409 		rf->ceqs_count++;
1410 	}
1411 
1412 	return 0;
1413 
1414 del_ceqs:
1415 	irdma_del_ceqs(rf);
1416 
1417 	return status;
1418 }
1419 
1420 static int
1421 irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
1422 {
1423 	struct irdma_aeq *aeq = &rf->aeq;
1424 	dma_addr_t *pg_arr;
1425 	u32 pg_cnt;
1426 	int status;
1427 
1428 	if (rf->rdma_ver < IRDMA_GEN_2)
1429 		return -EOPNOTSUPP;
1430 
1431 	aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
1432 	aeq->mem.va = vzalloc(aeq->mem.size);
1433 
1434 	if (!aeq->mem.va)
1435 		return -ENOMEM;
1436 
1437 	pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
1438 	status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
1439 	if (status) {
1440 		vfree(aeq->mem.va);
1441 		return status;
1442 	}
1443 
1444 	pg_arr = (dma_addr_t *) aeq->palloc.level1.addr;
1445 	status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
1446 	if (status) {
1447 		irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
1448 		vfree(aeq->mem.va);
1449 		return status;
1450 	}
1451 
1452 	return 0;
1453 }
1454 
1455 /**
1456  * irdma_create_aeq - create async event queue
1457  * @rf: RDMA PCI function
1458  *
1459  * Return 0, if the aeq and the resources associated with it
1460  * are successfully created, otherwise return error
1461  */
1462 static int
1463 irdma_create_aeq(struct irdma_pci_f *rf)
1464 {
1465 	struct irdma_aeq_init_info info = {0};
1466 	struct irdma_sc_dev *dev = &rf->sc_dev;
1467 	struct irdma_aeq *aeq = &rf->aeq;
1468 	struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
1469 	u32 aeq_size;
1470 	u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
1471 	int status;
1472 
1473 	aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
1474 	    hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
1475 	aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
1476 
1477 	aeq->mem.size = sizeof(struct irdma_sc_aeqe) * aeq_size;
1478 	aeq->mem.va = irdma_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
1479 					     IRDMA_AEQ_ALIGNMENT);
1480 	if (aeq->mem.va)
1481 		goto skip_virt_aeq;
1482 
1483 	/* physically mapped aeq failed. setup virtual aeq */
1484 	status = irdma_create_virt_aeq(rf, aeq_size);
1485 	if (status)
1486 		return status;
1487 
1488 	info.virtual_map = true;
1489 	aeq->virtual_map = info.virtual_map;
1490 	info.pbl_chunk_size = 1;
1491 	info.first_pm_pbl_idx = aeq->palloc.level1.idx;
1492 
1493 skip_virt_aeq:
1494 	info.aeqe_base = aeq->mem.va;
1495 	info.aeq_elem_pa = aeq->mem.pa;
1496 	info.elem_cnt = aeq_size;
1497 	info.dev = dev;
1498 	info.msix_idx = rf->iw_msixtbl->idx;
1499 	status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
1500 	if (status)
1501 		goto err;
1502 
1503 	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
1504 	if (status)
1505 		goto err;
1506 
1507 	return 0;
1508 
1509 err:
1510 	if (aeq->virtual_map)
1511 		irdma_destroy_virt_aeq(rf);
1512 	else
1513 		irdma_free_dma_mem(dev->hw, &aeq->mem);
1514 
1515 	return status;
1516 }
1517 
1518 /**
1519  * irdma_setup_aeq - set up the device aeq
1520  * @rf: RDMA PCI function
1521  *
1522  * Create the aeq and configure its msix interrupt vector
1523  * Return 0 if successful, otherwise return error
1524  */
1525 static int
1526 irdma_setup_aeq(struct irdma_pci_f *rf)
1527 {
1528 	struct irdma_sc_dev *dev = &rf->sc_dev;
1529 	int status;
1530 
1531 	status = irdma_create_aeq(rf);
1532 	if (status)
1533 		return status;
1534 
1535 	status = irdma_cfg_aeq_vector(rf);
1536 	if (status) {
1537 		irdma_destroy_aeq(rf);
1538 		return status;
1539 	}
1540 
1541 	if (!rf->msix_shared)
1542 		irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
1543 
1544 	return 0;
1545 }
1546 
1547 /**
1548  * irdma_initialize_ilq - create iwarp local queue for cm
1549  * @iwdev: irdma device
1550  *
1551  * Return 0 if successful, otherwise return error
1552  */
1553 static int
1554 irdma_initialize_ilq(struct irdma_device *iwdev)
1555 {
1556 	struct irdma_puda_rsrc_info info = {0};
1557 	int status;
1558 
1559 	info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
1560 	info.cq_id = 1;
1561 	info.qp_id = 1;
1562 	info.count = 1;
1563 	info.pd_id = 1;
1564 	info.abi_ver = IRDMA_ABI_VER;
1565 	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1566 	info.rq_size = info.sq_size;
1567 	info.buf_size = 1024;
1568 	info.tx_buf_cnt = 2 * info.sq_size;
1569 	info.receive = irdma_receive_ilq;
1570 	info.xmit_complete = irdma_free_sqbuf;
1571 	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1572 	if (status)
1573 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ilq create fail\n");
1574 
1575 	return status;
1576 }
1577 
1578 /**
1579  * irdma_initialize_ieq - create iwarp exception queue
1580  * @iwdev: irdma device
1581  *
1582  * Return 0 if successful, otherwise return error
1583  */
1584 static int
1585 irdma_initialize_ieq(struct irdma_device *iwdev)
1586 {
1587 	struct irdma_puda_rsrc_info info = {0};
1588 	int status;
1589 
1590 	info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
1591 	info.cq_id = 2;
1592 	info.qp_id = iwdev->vsi.exception_lan_q;
1593 	info.count = 1;
1594 	info.pd_id = 2;
1595 	info.abi_ver = IRDMA_ABI_VER;
1596 	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
1597 	info.rq_size = info.sq_size;
1598 	info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
1599 	info.tx_buf_cnt = 4096;
1600 	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
1601 	if (status)
1602 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ieq create fail\n");
1603 
1604 	return status;
1605 }
1606 
1607 /**
1608  * irdma_reinitialize_ieq - destroy and re-create ieq
1609  * @vsi: VSI structure
1610  */
1611 void
1612 irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
1613 {
1614 	struct irdma_device *iwdev = vsi->back_vsi;
1615 	struct irdma_pci_f *rf = iwdev->rf;
1616 
1617 	irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
1618 	if (irdma_initialize_ieq(iwdev)) {
1619 		iwdev->rf->reset = true;
1620 		rf->gen_ops.request_reset(rf);
1621 	}
1622 }
1623 
1624 /**
1625  * irdma_hmc_setup - create hmc objects for the device
1626  * @rf: RDMA PCI function
1627  *
1628  * Set up the device private memory space for the number and size of
1629  * the hmc objects and create the objects
1630  * Return 0 if successful, otherwise return error
1631  */
1632 static int
1633 irdma_hmc_setup(struct irdma_pci_f *rf)
1634 {
1635 	struct irdma_sc_dev *dev = &rf->sc_dev;
1636 	int status;
1637 	u32 qpcnt;
1638 
1639 	qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
1640 
1641 	rf->sd_type = IRDMA_SD_TYPE_DIRECT;
1642 	status = irdma_cfg_fpm_val(dev, qpcnt);
1643 	if (status)
1644 		return status;
1645 
1646 	status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
1647 
1648 	return status;
1649 }
1650 
1651 /**
1652  * irdma_del_init_mem - deallocate memory resources
1653  * @rf: RDMA PCI function
1654  */
1655 static void
1656 irdma_del_init_mem(struct irdma_pci_f *rf)
1657 {
1658 	struct irdma_sc_dev *dev = &rf->sc_dev;
1659 
1660 	kfree(dev->hmc_info->sd_table.sd_entry);
1661 	dev->hmc_info->sd_table.sd_entry = NULL;
1662 	vfree(rf->mem_rsrc);
1663 	rf->mem_rsrc = NULL;
1664 	irdma_free_dma_mem(&rf->hw, &rf->obj_mem);
1665 	if (rf->rdma_ver != IRDMA_GEN_1) {
1666 		kfree(rf->allocated_ws_nodes);
1667 		rf->allocated_ws_nodes = NULL;
1668 	}
1669 	mutex_destroy(&dev->ws_mutex);
1670 	kfree(rf->ceqlist);
1671 	rf->ceqlist = NULL;
1672 	kfree(rf->iw_msixtbl);
1673 	rf->iw_msixtbl = NULL;
1674 	kfree(rf->hmc_info_mem);
1675 	rf->hmc_info_mem = NULL;
1676 }
1677 /**
1678  * irdma_initialize_dev - initialize device
1679  * @rf: RDMA PCI function
1680  *
1681  * Allocate memory for the hmc objects and initialize iwdev
1682  * Return 0 if successful, otherwise clean up the resources
1683  * and return error
1684  */
1685 static int
1686 irdma_initialize_dev(struct irdma_pci_f *rf)
1687 {
1688 	int status;
1689 	struct irdma_sc_dev *dev = &rf->sc_dev;
1690 	struct irdma_device_init_info info = {0};
1691 	struct irdma_dma_mem mem;
1692 	u32 size;
1693 
1694 	size = sizeof(struct irdma_hmc_pble_rsrc) +
1695 	    sizeof(struct irdma_hmc_info) +
1696 	    (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
1697 
1698 	rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1699 	if (!rf->hmc_info_mem)
1700 		return -ENOMEM;
1701 
1702 	rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
1703 	dev->hmc_info = &rf->hw.hmc;
1704 	dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
1705 	    (rf->pble_rsrc + 1);
1706 
1707 	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
1708 				       IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
1709 	if (status)
1710 		goto error;
1711 
1712 	info.fpm_query_buf_pa = mem.pa;
1713 	info.fpm_query_buf = mem.va;
1714 
1715 	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
1716 				       IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
1717 	if (status)
1718 		goto error;
1719 
1720 	info.fpm_commit_buf_pa = mem.pa;
1721 	info.fpm_commit_buf = mem.va;
1722 
1723 	info.bar0 = rf->hw.hw_addr;
1724 	info.hmc_fn_id = rf->peer_info->pf_id;
1725 	/*
1726 	 * the debug_mask is already assigned at this point through sysctl and so the value shouldn't be overwritten
1727 	 */
1728 	info.debug_mask = rf->sc_dev.debug_mask;
1729 	info.hw = &rf->hw;
1730 	status = irdma_sc_dev_init(&rf->sc_dev, &info);
1731 	if (status)
1732 		goto error;
1733 
1734 	return status;
1735 error:
1736 	kfree(rf->hmc_info_mem);
1737 	rf->hmc_info_mem = NULL;
1738 
1739 	return status;
1740 }
1741 
1742 /**
1743  * irdma_rt_deinit_hw - clean up the irdma device resources
1744  * @iwdev: irdma device
1745  *
1746  * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
1747  * device queues and free the pble and the hmc objects
1748  */
1749 void
1750 irdma_rt_deinit_hw(struct irdma_device *iwdev)
1751 {
1752 	struct irdma_sc_qp qp = {{0}};
1753 	irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n", iwdev->init_state);
1754 
1755 	switch (iwdev->init_state) {
1756 	case IP_ADDR_REGISTERED:
1757 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1758 			irdma_del_local_mac_entry(iwdev->rf,
1759 						  (u8)iwdev->mac_ip_table_idx);
1760 		/* fallthrough */
1761 	case AEQ_CREATED:
1762 	case PBLE_CHUNK_MEM:
1763 	case CEQS_CREATED:
1764 	case REM_ENDPOINT_TRK_CREATED:
1765 		if (iwdev->rf->en_rem_endpoint_trk) {
1766 			qp.dev = &iwdev->rf->sc_dev;
1767 			qp.qp_uk.qp_id = IRDMA_REM_ENDPOINT_TRK_QPID;
1768 			qp.qp_uk.qp_type = IRDMA_QP_TYPE_IWARP;
1769 			irdma_cqp_qp_destroy_cmd(qp.dev, &qp);
1770 		}
1771 		/* fallthrough */
1772 	case IEQ_CREATED:
1773 		if (!iwdev->roce_mode)
1774 			irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
1775 					     iwdev->rf->reset);
1776 		/* fallthrough */
1777 	case ILQ_CREATED:
1778 		if (!iwdev->roce_mode)
1779 			irdma_puda_dele_rsrc(&iwdev->vsi,
1780 					     IRDMA_PUDA_RSRC_TYPE_ILQ,
1781 					     iwdev->rf->reset);
1782 		break;
1783 	default:
1784 		irdma_dev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
1785 		break;
1786 	}
1787 
1788 	irdma_cleanup_cm_core(&iwdev->cm_core);
1789 	if (iwdev->vsi.pestat) {
1790 		irdma_vsi_stats_free(&iwdev->vsi);
1791 		kfree(iwdev->vsi.pestat);
1792 	}
1793 	if (iwdev->cleanup_wq)
1794 		destroy_workqueue(iwdev->cleanup_wq);
1795 }
1796 
1797 static int
1798 irdma_setup_init_state(struct irdma_pci_f *rf)
1799 {
1800 	int status;
1801 
1802 	status = irdma_save_msix_info(rf);
1803 	if (status)
1804 		return status;
1805 
1806 	rf->obj_mem.size = 8192;
1807 	rf->obj_mem.va = irdma_allocate_dma_mem(&rf->hw, &rf->obj_mem,
1808 						rf->obj_mem.size,
1809 						IRDMA_HW_PAGE_SIZE);
1810 	if (!rf->obj_mem.va) {
1811 		status = -ENOMEM;
1812 		goto clean_msixtbl;
1813 	}
1814 
1815 	rf->obj_next = rf->obj_mem;
1816 	status = irdma_initialize_dev(rf);
1817 	if (status)
1818 		goto clean_obj_mem;
1819 
1820 	return 0;
1821 
1822 clean_obj_mem:
1823 	irdma_free_dma_mem(&rf->hw, &rf->obj_mem);
1824 clean_msixtbl:
1825 	kfree(rf->iw_msixtbl);
1826 	rf->iw_msixtbl = NULL;
1827 	return status;
1828 }
1829 
1830 /**
1831  * irdma_get_used_rsrc - determine resources used internally
1832  * @iwdev: irdma device
1833  *
1834  * Called at the end of open to get all internal allocations
1835  */
1836 static void
1837 irdma_get_used_rsrc(struct irdma_device *iwdev)
1838 {
1839 	iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
1840 						  iwdev->rf->max_pd);
1841 	iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
1842 						  iwdev->rf->max_qp);
1843 	iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
1844 						  iwdev->rf->max_cq);
1845 	iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
1846 						  iwdev->rf->max_mr);
1847 }
1848 
1849 void
1850 irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
1851 {
1852 	enum init_completion_state state = rf->init_state;
1853 
1854 	rf->init_state = INVALID_STATE;
1855 	if (rf->rsrc_created) {
1856 		irdma_destroy_aeq(rf);
1857 		irdma_destroy_pble_prm(rf->pble_rsrc);
1858 		irdma_del_ceqs(rf);
1859 		rf->rsrc_created = false;
1860 	}
1861 
1862 	switch (state) {
1863 	case CEQ0_CREATED:
1864 		irdma_del_ceq_0(rf);
1865 		/* fallthrough */
1866 	case CCQ_CREATED:
1867 		irdma_destroy_ccq(rf);
1868 		/* fallthrough */
1869 	case HW_RSRC_INITIALIZED:
1870 	case HMC_OBJS_CREATED:
1871 		irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
1872 				      rf->reset, rf->rdma_ver);
1873 		/* fallthrough */
1874 	case CQP_CREATED:
1875 		irdma_destroy_cqp(rf, !rf->reset);
1876 		/* fallthrough */
1877 	case INITIAL_STATE:
1878 		irdma_del_init_mem(rf);
1879 		break;
1880 	case INVALID_STATE:
1881 	default:
1882 		irdma_dev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
1883 		break;
1884 	}
1885 }
1886 
1887 /**
1888  * irdma_rt_init_hw - Initializes runtime portion of HW
1889  * @iwdev: irdma device
1890  * @l2params: qos, tc, mtu info from netdev driver
1891  *
1892  * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
1893  * device resource objects.
1894  */
1895 int
1896 irdma_rt_init_hw(struct irdma_device *iwdev,
1897 		 struct irdma_l2params *l2params)
1898 {
1899 	struct irdma_pci_f *rf = iwdev->rf;
1900 	struct irdma_sc_dev *dev = &rf->sc_dev;
1901 	struct irdma_sc_qp qp = {{0}};
1902 	struct irdma_vsi_init_info vsi_info = {0};
1903 	struct irdma_vsi_stats_info stats_info = {0};
1904 	int status;
1905 
1906 	vsi_info.dev = dev;
1907 	vsi_info.back_vsi = iwdev;
1908 	vsi_info.params = l2params;
1909 	vsi_info.pf_data_vsi_num = iwdev->vsi_num;
1910 	vsi_info.register_qset = rf->gen_ops.register_qset;
1911 	vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
1912 	vsi_info.exception_lan_q = 2;
1913 	irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
1914 
1915 	status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
1916 	if (status)
1917 		return status;
1918 
1919 	stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1920 	if (!stats_info.pestat) {
1921 		irdma_cleanup_cm_core(&iwdev->cm_core);
1922 		return -ENOMEM;
1923 	}
1924 	stats_info.fcn_id = dev->hmc_fn_id;
1925 	status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
1926 	if (status) {
1927 		irdma_cleanup_cm_core(&iwdev->cm_core);
1928 		kfree(stats_info.pestat);
1929 		return status;
1930 	}
1931 
1932 	do {
1933 		if (!iwdev->roce_mode) {
1934 			status = irdma_initialize_ilq(iwdev);
1935 			if (status)
1936 				break;
1937 			iwdev->init_state = ILQ_CREATED;
1938 			status = irdma_initialize_ieq(iwdev);
1939 			if (status)
1940 				break;
1941 			iwdev->init_state = IEQ_CREATED;
1942 		}
1943 		if (iwdev->rf->en_rem_endpoint_trk) {
1944 			qp.dev = dev;
1945 			qp.qp_uk.qp_id = IRDMA_REM_ENDPOINT_TRK_QPID;
1946 			qp.qp_uk.qp_type = IRDMA_QP_TYPE_IWARP;
1947 			status = irdma_cqp_qp_create_cmd(dev, &qp);
1948 			if (status)
1949 				break;
1950 			iwdev->init_state = REM_ENDPOINT_TRK_CREATED;
1951 		}
1952 		if (!rf->rsrc_created) {
1953 			status = irdma_setup_ceqs(rf, &iwdev->vsi);
1954 			if (status)
1955 				break;
1956 
1957 			iwdev->init_state = CEQS_CREATED;
1958 
1959 			status = irdma_hmc_init_pble(&rf->sc_dev,
1960 						     rf->pble_rsrc);
1961 			if (status) {
1962 				irdma_del_ceqs(rf);
1963 				break;
1964 			}
1965 
1966 			iwdev->init_state = PBLE_CHUNK_MEM;
1967 
1968 			status = irdma_setup_aeq(rf);
1969 			if (status) {
1970 				irdma_destroy_pble_prm(rf->pble_rsrc);
1971 				irdma_del_ceqs(rf);
1972 				break;
1973 			}
1974 			iwdev->init_state = AEQ_CREATED;
1975 			rf->rsrc_created = true;
1976 		}
1977 
1978 		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1979 			irdma_alloc_set_mac(iwdev);
1980 		irdma_add_ip(iwdev);
1981 		iwdev->init_state = IP_ADDR_REGISTERED;
1982 
1983 		/*
1984 		 * handles asynch cleanup tasks - disconnect CM , free qp, free cq bufs
1985 		 */
1986 		iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
1987 						    WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1988 		if (!iwdev->cleanup_wq)
1989 			return -ENOMEM;
1990 		irdma_get_used_rsrc(iwdev);
1991 		init_waitqueue_head(&iwdev->suspend_wq);
1992 
1993 		return 0;
1994 	} while (0);
1995 
1996 	dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
1997 		status, iwdev->init_state);
1998 	irdma_rt_deinit_hw(iwdev);
1999 
2000 	return status;
2001 }
2002 
2003 /**
2004  * irdma_ctrl_init_hw - Initializes control portion of HW
2005  * @rf: RDMA PCI function
2006  *
2007  * Create admin queues, HMC obejcts and RF resource objects
2008  */
2009 int
2010 irdma_ctrl_init_hw(struct irdma_pci_f *rf)
2011 {
2012 	struct irdma_sc_dev *dev = &rf->sc_dev;
2013 	int status;
2014 	do {
2015 		status = irdma_setup_init_state(rf);
2016 		if (status)
2017 			break;
2018 		rf->init_state = INITIAL_STATE;
2019 
2020 		status = irdma_create_cqp(rf);
2021 		if (status)
2022 			break;
2023 		rf->init_state = CQP_CREATED;
2024 
2025 		dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
2026 		if (rf->rdma_ver != IRDMA_GEN_1) {
2027 			status = irdma_get_rdma_features(dev);
2028 			if (status)
2029 				break;
2030 		}
2031 
2032 		status = irdma_hmc_setup(rf);
2033 		if (status)
2034 			break;
2035 		rf->init_state = HMC_OBJS_CREATED;
2036 
2037 		status = irdma_initialize_hw_rsrc(rf);
2038 		if (status)
2039 			break;
2040 		rf->init_state = HW_RSRC_INITIALIZED;
2041 
2042 		status = irdma_create_ccq(rf);
2043 		if (status)
2044 			break;
2045 		rf->init_state = CCQ_CREATED;
2046 
2047 		status = irdma_setup_ceq_0(rf);
2048 		if (status)
2049 			break;
2050 		rf->init_state = CEQ0_CREATED;
2051 		/* Handles processing of CQP completions */
2052 		rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
2053 							  WQ_HIGHPRI | WQ_UNBOUND);
2054 		if (!rf->cqp_cmpl_wq) {
2055 			status = -ENOMEM;
2056 			break;
2057 		}
2058 		INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
2059 		irdma_sc_ccq_arm(dev->ccq);
2060 		return 0;
2061 	} while (0);
2062 
2063 	pr_err("IRDMA hardware initialization FAILED init_state=%d status=%d\n",
2064 	       rf->init_state, status);
2065 	irdma_ctrl_deinit_hw(rf);
2066 	return status;
2067 }
2068 
2069 /**
2070  * irdma_set_hw_rsrc - set hw memory resources.
2071  * @rf: RDMA PCI function
2072  */
2073 static void
2074 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
2075 {
2076 	rf->allocated_qps = (void *)(rf->mem_rsrc +
2077 				     (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
2078 	rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
2079 	rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
2080 	rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
2081 	rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
2082 	rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
2083 	rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
2084 
2085 	rf->qp_table = (struct irdma_qp **)
2086 	    (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
2087 	rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
2088 
2089 	spin_lock_init(&rf->rsrc_lock);
2090 	spin_lock_init(&rf->arp_lock);
2091 	spin_lock_init(&rf->qptable_lock);
2092 	spin_lock_init(&rf->cqtable_lock);
2093 	spin_lock_init(&rf->qh_list_lock);
2094 }
2095 
2096 /**
2097  * irdma_calc_mem_rsrc_size - calculate memory resources size.
2098  * @rf: RDMA PCI function
2099  */
2100 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf){
2101 	u32 rsrc_size;
2102 
2103 	rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
2104 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
2105 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
2106 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
2107 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
2108 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
2109 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
2110 	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
2111 	rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
2112 	rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
2113 
2114 	return rsrc_size;
2115 }
2116 
2117 /**
2118  * irdma_initialize_hw_rsrc - initialize hw resource tracking array
2119  * @rf: RDMA PCI function
2120  */
2121 u32
2122 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
2123 {
2124 	u32 rsrc_size;
2125 	u32 mrdrvbits;
2126 	u32 ret;
2127 
2128 	if (rf->rdma_ver != IRDMA_GEN_1) {
2129 		rf->allocated_ws_nodes =
2130 		    kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
2131 			    sizeof(unsigned long), GFP_KERNEL);
2132 		if (!rf->allocated_ws_nodes)
2133 			return -ENOMEM;
2134 
2135 		set_bit(0, rf->allocated_ws_nodes);
2136 		rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
2137 	}
2138 	rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
2139 	rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
2140 	rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
2141 	rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
2142 	rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
2143 	rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
2144 	rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
2145 	rf->max_mcg = rf->max_qp;
2146 
2147 	rsrc_size = irdma_calc_mem_rsrc_size(rf);
2148 	rf->mem_rsrc = vzalloc(rsrc_size);
2149 	if (!rf->mem_rsrc) {
2150 		ret = -ENOMEM;
2151 		goto mem_rsrc_vmalloc_fail;
2152 	}
2153 
2154 	rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
2155 
2156 	irdma_set_hw_rsrc(rf);
2157 
2158 	set_bit(0, rf->allocated_mrs);
2159 	set_bit(0, rf->allocated_qps);
2160 	set_bit(0, rf->allocated_cqs);
2161 	set_bit(0, rf->allocated_pds);
2162 	set_bit(0, rf->allocated_arps);
2163 	set_bit(0, rf->allocated_ahs);
2164 	set_bit(0, rf->allocated_mcgs);
2165 	set_bit(2, rf->allocated_qps);	/* qp 2 IEQ */
2166 	set_bit(1, rf->allocated_qps);	/* qp 1 ILQ */
2167 	set_bit(IRDMA_REM_ENDPOINT_TRK_QPID, rf->allocated_qps);	/* qp 3 Remote Endpt trk */
2168 	set_bit(1, rf->allocated_cqs);
2169 	set_bit(1, rf->allocated_pds);
2170 	set_bit(2, rf->allocated_cqs);
2171 	set_bit(2, rf->allocated_pds);
2172 
2173 	INIT_LIST_HEAD(&rf->mc_qht_list.list);
2174 	/* stag index mask has a minimum of 14 bits */
2175 	mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
2176 	rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
2177 
2178 	return 0;
2179 
2180 mem_rsrc_vmalloc_fail:
2181 	kfree(rf->allocated_ws_nodes);
2182 	rf->allocated_ws_nodes = NULL;
2183 
2184 	return ret;
2185 }
2186 
2187 /**
2188  * irdma_cqp_ce_handler - handle cqp completions
2189  * @rf: RDMA PCI function
2190  * @cq: cq for cqp completions
2191  */
2192 void
2193 irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
2194 {
2195 	struct irdma_cqp_request *cqp_request;
2196 	struct irdma_sc_dev *dev = &rf->sc_dev;
2197 	u32 cqe_count = 0;
2198 	struct irdma_ccq_cqe_info info;
2199 	unsigned long flags;
2200 	int ret;
2201 
2202 	do {
2203 		memset(&info, 0, sizeof(info));
2204 		spin_lock_irqsave(&rf->cqp.compl_lock, flags);
2205 		ret = irdma_sc_ccq_get_cqe_info(cq, &info);
2206 		spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
2207 		if (ret)
2208 			break;
2209 
2210 		cqp_request = (struct irdma_cqp_request *)
2211 		    (uintptr_t)info.scratch;
2212 		if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
2213 						     info.maj_err_code,
2214 						     info.min_err_code))
2215 			irdma_dev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
2216 				      info.op_code, info.maj_err_code, info.min_err_code);
2217 		if (cqp_request) {
2218 			cqp_request->compl_info.maj_err_code = info.maj_err_code;
2219 			cqp_request->compl_info.min_err_code = info.min_err_code;
2220 			cqp_request->compl_info.op_ret_val = info.op_ret_val;
2221 			cqp_request->compl_info.error = info.error;
2222 			irdma_complete_cqp_request(&rf->cqp, cqp_request);
2223 		}
2224 
2225 		cqe_count++;
2226 	} while (1);
2227 
2228 	if (cqe_count) {
2229 		irdma_process_bh(dev);
2230 		irdma_sc_ccq_arm(dev->ccq);
2231 	}
2232 }
2233 
2234 /**
2235  * cqp_compl_worker - Handle cqp completions
2236  * @work: Pointer to work structure
2237  */
2238 void
2239 cqp_compl_worker(struct work_struct *work)
2240 {
2241 	struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
2242 					      cqp_cmpl_work);
2243 	struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
2244 
2245 	irdma_cqp_ce_handler(rf, cq);
2246 }
2247 
2248 /**
2249  * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
2250  * @cm_core: cm's core
2251  * @port: port to identify apbvt entry
2252  */
2253 static struct irdma_apbvt_entry *
2254 irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
2255 			 u16 port)
2256 {
2257 	struct irdma_apbvt_entry *entry;
2258 
2259 	HASH_FOR_EACH_POSSIBLE(cm_core->apbvt_hash_tbl, entry, hlist, port) {
2260 		if (entry->port == port) {
2261 			entry->use_cnt++;
2262 			return entry;
2263 		}
2264 	}
2265 
2266 	return NULL;
2267 }
2268 
2269 /**
2270  * irdma_next_iw_state - modify qp state
2271  * @iwqp: iwarp qp to modify
2272  * @state: next state for qp
2273  * @del_hash: del hash
2274  * @term: term message
2275  * @termlen: length of term message
2276  */
2277 void
2278 irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2279 		    u8 termlen)
2280 {
2281 	struct irdma_modify_qp_info info = {0};
2282 
2283 	info.next_iwarp_state = state;
2284 	info.remove_hash_idx = del_hash;
2285 	info.cq_num_valid = true;
2286 	info.arp_cache_idx_valid = true;
2287 	info.dont_send_term = true;
2288 	info.dont_send_fin = true;
2289 	info.termlen = termlen;
2290 
2291 	if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
2292 		info.dont_send_term = false;
2293 	if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
2294 		info.dont_send_fin = false;
2295 	if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2296 		info.reset_tcp_conn = true;
2297 	iwqp->hw_iwarp_state = state;
2298 	irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2299 	iwqp->iwarp_state = info.next_iwarp_state;
2300 }
2301 
2302 /**
2303  * irdma_del_local_mac_entry - remove a mac entry from the hw
2304  * table
2305  * @rf: RDMA PCI function
2306  * @idx: the index of the mac ip address to delete
2307  */
2308 void
2309 irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
2310 {
2311 	struct irdma_cqp *iwcqp = &rf->cqp;
2312 	struct irdma_cqp_request *cqp_request;
2313 	struct cqp_cmds_info *cqp_info;
2314 
2315 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2316 	if (!cqp_request)
2317 		return;
2318 
2319 	cqp_info = &cqp_request->info;
2320 	cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
2321 	cqp_info->post_sq = 1;
2322 	cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
2323 	cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
2324 	cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
2325 	cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
2326 
2327 	irdma_handle_cqp_op(rf, cqp_request);
2328 	irdma_put_cqp_request(iwcqp, cqp_request);
2329 }
2330 
2331 /**
2332  * irdma_add_local_mac_entry - add a mac ip address entry to the
2333  * hw table
2334  * @rf: RDMA PCI function
2335  * @mac_addr: pointer to mac address
2336  * @idx: the index of the mac ip address to add
2337  */
2338 int
2339 irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
2340 {
2341 	struct irdma_local_mac_entry_info *info;
2342 	struct irdma_cqp *iwcqp = &rf->cqp;
2343 	struct irdma_cqp_request *cqp_request;
2344 	struct cqp_cmds_info *cqp_info;
2345 	int status;
2346 
2347 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2348 	if (!cqp_request)
2349 		return -ENOMEM;
2350 
2351 	cqp_info = &cqp_request->info;
2352 	cqp_info->post_sq = 1;
2353 	info = &cqp_info->in.u.add_local_mac_entry.info;
2354 	ether_addr_copy(info->mac_addr, mac_addr);
2355 	info->entry_idx = idx;
2356 	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2357 	cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
2358 	cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
2359 	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
2360 
2361 	status = irdma_handle_cqp_op(rf, cqp_request);
2362 	irdma_put_cqp_request(iwcqp, cqp_request);
2363 
2364 	return status;
2365 }
2366 
2367 /**
2368  * irdma_alloc_local_mac_entry - allocate a mac entry
2369  * @rf: RDMA PCI function
2370  * @mac_tbl_idx: the index of the new mac address
2371  *
2372  * Allocate a mac address entry and update the mac_tbl_idx
2373  * to hold the index of the newly created mac address
2374  * Return 0 if successful, otherwise return error
2375  */
2376 int
2377 irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
2378 {
2379 	struct irdma_cqp *iwcqp = &rf->cqp;
2380 	struct irdma_cqp_request *cqp_request;
2381 	struct cqp_cmds_info *cqp_info;
2382 	int status = 0;
2383 
2384 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
2385 	if (!cqp_request)
2386 		return -ENOMEM;
2387 
2388 	cqp_info = &cqp_request->info;
2389 	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
2390 	cqp_info->post_sq = 1;
2391 	cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
2392 	cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
2393 	status = irdma_handle_cqp_op(rf, cqp_request);
2394 	if (!status)
2395 		*mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
2396 
2397 	irdma_put_cqp_request(iwcqp, cqp_request);
2398 
2399 	return status;
2400 }
2401 
2402 /**
2403  * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
2404  * @iwdev: irdma device
2405  * @accel_local_port: port for apbvt
2406  * @add_port: add ordelete port
2407  */
2408 static int
2409 irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
2410 			   u16 accel_local_port, bool add_port)
2411 {
2412 	struct irdma_apbvt_info *info;
2413 	struct irdma_cqp_request *cqp_request;
2414 	struct cqp_cmds_info *cqp_info;
2415 	int status;
2416 
2417 	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
2418 	if (!cqp_request)
2419 		return -ENOMEM;
2420 
2421 	cqp_info = &cqp_request->info;
2422 	info = &cqp_info->in.u.manage_apbvt_entry.info;
2423 	memset(info, 0, sizeof(*info));
2424 	info->add = add_port;
2425 	info->port = accel_local_port;
2426 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
2427 	cqp_info->post_sq = 1;
2428 	cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2429 	cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
2430 	irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV,
2431 		    "%s: port=0x%04x\n", (!add_port) ? "DELETE" : "ADD",
2432 		    accel_local_port);
2433 
2434 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2435 	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2436 
2437 	return status;
2438 }
2439 
2440 /**
2441  * irdma_add_apbvt - add tcp port to HW apbvt table
2442  * @iwdev: irdma device
2443  * @port: port for apbvt
2444  */
2445 struct irdma_apbvt_entry *
2446 irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
2447 {
2448 	struct irdma_cm_core *cm_core = &iwdev->cm_core;
2449 	struct irdma_apbvt_entry *entry;
2450 	unsigned long flags;
2451 
2452 	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2453 	entry = irdma_lookup_apbvt_entry(cm_core, port);
2454 	if (entry) {
2455 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2456 		return entry;
2457 	}
2458 
2459 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
2460 	if (!entry) {
2461 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2462 		return NULL;
2463 	}
2464 
2465 	entry->port = port;
2466 	entry->use_cnt = 1;
2467 	HASH_ADD(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
2468 	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2469 
2470 	if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
2471 		kfree(entry);
2472 		return NULL;
2473 	}
2474 
2475 	return entry;
2476 }
2477 
2478 /**
2479  * irdma_del_apbvt - delete tcp port from HW apbvt table
2480  * @iwdev: irdma device
2481  * @entry: apbvt entry object
2482  */
2483 void
2484 irdma_del_apbvt(struct irdma_device *iwdev,
2485 		struct irdma_apbvt_entry *entry)
2486 {
2487 	struct irdma_cm_core *cm_core = &iwdev->cm_core;
2488 	unsigned long flags;
2489 
2490 	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
2491 	if (--entry->use_cnt) {
2492 		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2493 		return;
2494 	}
2495 
2496 	HASH_DEL(cm_core->apbvt_hash_tbl, &entry->hlist);
2497 	/*
2498 	 * apbvt_lock is held across CQP delete APBVT OP (non-waiting) to protect against race where add APBVT CQP can
2499 	 * race ahead of the delete APBVT for same port.
2500 	 */
2501 	irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
2502 	kfree(entry);
2503 	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
2504 }
2505 
2506 /**
2507  * irdma_manage_arp_cache - manage hw arp cache
2508  * @rf: RDMA PCI function
2509  * @mac_addr: mac address ptr
2510  * @ip_addr: ip addr for arp cache
2511  * @action: add, delete or modify
2512  */
2513 void
2514 irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr,
2515 		       u32 *ip_addr, u32 action)
2516 {
2517 	struct irdma_add_arp_cache_entry_info *info;
2518 	struct irdma_cqp_request *cqp_request;
2519 	struct cqp_cmds_info *cqp_info;
2520 	int arp_index;
2521 
2522 	arp_index = irdma_arp_table(rf, ip_addr, mac_addr, action);
2523 	if (arp_index == -1)
2524 		return;
2525 
2526 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
2527 	if (!cqp_request)
2528 		return;
2529 
2530 	cqp_info = &cqp_request->info;
2531 	if (action == IRDMA_ARP_ADD) {
2532 		cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
2533 		info = &cqp_info->in.u.add_arp_cache_entry.info;
2534 		memset(info, 0, sizeof(*info));
2535 		info->arp_index = (u16)arp_index;
2536 		info->permanent = true;
2537 		ether_addr_copy(info->mac_addr, mac_addr);
2538 		cqp_info->in.u.add_arp_cache_entry.scratch =
2539 		    (uintptr_t)cqp_request;
2540 		cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2541 	} else {
2542 		cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
2543 		cqp_info->in.u.del_arp_cache_entry.scratch =
2544 		    (uintptr_t)cqp_request;
2545 		cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
2546 		cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
2547 	}
2548 
2549 	cqp_info->post_sq = 1;
2550 	irdma_handle_cqp_op(rf, cqp_request);
2551 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2552 }
2553 
2554 /**
2555  * irdma_send_syn_cqp_callback - do syn/ack after qhash
2556  * @cqp_request: qhash cqp completion
2557  */
2558 static void
2559 irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
2560 {
2561 	struct irdma_cm_node *cm_node = cqp_request->param;
2562 
2563 	irdma_send_syn(cm_node, 1);
2564 	irdma_rem_ref_cm_node(cm_node);
2565 }
2566 
2567 /**
2568  * irdma_manage_qhash - add or modify qhash
2569  * @iwdev: irdma device
2570  * @cminfo: cm info for qhash
2571  * @etype: type (syn or quad)
2572  * @mtype: type of qhash
2573  * @cmnode: cmnode associated with connection
2574  * @wait: wait for completion
2575  */
2576 int
2577 irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
2578 		   enum irdma_quad_entry_type etype,
2579 		   enum irdma_quad_hash_manage_type mtype, void *cmnode,
2580 		   bool wait)
2581 {
2582 	struct irdma_qhash_table_info *info;
2583 	struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
2584 	struct irdma_cqp_request *cqp_request;
2585 	struct cqp_cmds_info *cqp_info;
2586 	struct irdma_cm_node *cm_node = cmnode;
2587 	int status;
2588 
2589 	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
2590 	if (!cqp_request)
2591 		return -ENOMEM;
2592 
2593 	cminfo->cqp_request = cqp_request;
2594 	if (!wait)
2595 		atomic_inc(&cqp_request->refcnt);
2596 	cqp_info = &cqp_request->info;
2597 	info = &cqp_info->in.u.manage_qhash_table_entry.info;
2598 	memset(info, 0, sizeof(*info));
2599 	info->vsi = &iwdev->vsi;
2600 	info->manage = mtype;
2601 	info->entry_type = etype;
2602 	if (cminfo->vlan_id < VLAN_N_VID) {
2603 		info->vlan_valid = true;
2604 		info->vlan_id = cminfo->vlan_id;
2605 	} else {
2606 		info->vlan_valid = false;
2607 	}
2608 	info->ipv4_valid = cminfo->ipv4;
2609 	info->user_pri = cminfo->user_pri;
2610 	ether_addr_copy(info->mac_addr, if_getlladdr(iwdev->netdev));
2611 	info->qp_num = cminfo->qh_qpid;
2612 	info->dest_port = cminfo->loc_port;
2613 	info->dest_ip[0] = cminfo->loc_addr[0];
2614 	info->dest_ip[1] = cminfo->loc_addr[1];
2615 	info->dest_ip[2] = cminfo->loc_addr[2];
2616 	info->dest_ip[3] = cminfo->loc_addr[3];
2617 	if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
2618 	    etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
2619 	    etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
2620 	    etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
2621 	    etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
2622 		info->src_port = cminfo->rem_port;
2623 		info->src_ip[0] = cminfo->rem_addr[0];
2624 		info->src_ip[1] = cminfo->rem_addr[1];
2625 		info->src_ip[2] = cminfo->rem_addr[2];
2626 		info->src_ip[3] = cminfo->rem_addr[3];
2627 	}
2628 	if (cmnode) {
2629 		cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
2630 		cqp_request->param = cmnode;
2631 		if (!wait)
2632 			atomic_inc(&cm_node->refcnt);
2633 	}
2634 	if (info->ipv4_valid)
2635 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
2636 			    "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x rem_addr=%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d cm_node=%p\n",
2637 			    (!mtype) ? "DELETE" : "ADD",
2638 			    __builtin_return_address(0), info->src_port,
2639 			    info->dest_port, info->src_ip[0], info->dest_ip[0],
2640 			    info->mac_addr[0], info->mac_addr[1],
2641 			    info->mac_addr[2], info->mac_addr[3],
2642 			    info->mac_addr[4], info->mac_addr[5],
2643 			    cminfo->vlan_id, cmnode ? cmnode : NULL);
2644 	else
2645 		irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
2646 			    "%s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%x:%x:%x:%x rem_addr=%x:%x:%x:%x mac=%x:%x:%x:%x:%x:%x, vlan_id=%d cm_node=%p\n",
2647 			    (!mtype) ? "DELETE" : "ADD",
2648 			    __builtin_return_address(0), info->src_port,
2649 			    info->dest_port, IRDMA_PRINT_IP6(info->src_ip),
2650 			    IRDMA_PRINT_IP6(info->dest_ip), info->mac_addr[0],
2651 			    info->mac_addr[1], info->mac_addr[2],
2652 			    info->mac_addr[3], info->mac_addr[4],
2653 			    info->mac_addr[5], cminfo->vlan_id,
2654 			    cmnode ? cmnode : NULL);
2655 
2656 	cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
2657 	cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
2658 	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
2659 	cqp_info->post_sq = 1;
2660 	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2661 	if (status && cm_node && !wait)
2662 		irdma_rem_ref_cm_node(cm_node);
2663 
2664 	irdma_put_cqp_request(iwcqp, cqp_request);
2665 
2666 	return status;
2667 }
2668 
2669 /**
2670  * irdma_hw_flush_wqes - flush qp's wqe
2671  * @rf: RDMA PCI function
2672  * @qp: hardware control qp
2673  * @info: info for flush
2674  * @wait: flag wait for completion
2675  */
2676 int
2677 irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2678 		    struct irdma_qp_flush_info *info, bool wait)
2679 {
2680 	int status;
2681 	struct irdma_qp_flush_info *hw_info;
2682 	struct irdma_cqp_request *cqp_request;
2683 	struct cqp_cmds_info *cqp_info;
2684 	struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2685 
2686 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2687 	if (!cqp_request)
2688 		return -ENOMEM;
2689 
2690 	cqp_info = &cqp_request->info;
2691 	hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
2692 	memcpy(hw_info, info, sizeof(*hw_info));
2693 	cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
2694 	cqp_info->post_sq = 1;
2695 	cqp_info->in.u.qp_flush_wqes.qp = qp;
2696 	cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
2697 	status = irdma_handle_cqp_op(rf, cqp_request);
2698 	if (status) {
2699 		qp->qp_uk.sq_flush_complete = true;
2700 		qp->qp_uk.rq_flush_complete = true;
2701 		irdma_put_cqp_request(&rf->cqp, cqp_request);
2702 		return status;
2703 	}
2704 
2705 	if (!wait || cqp_request->compl_info.maj_err_code)
2706 		goto put_cqp;
2707 
2708 	if (info->rq) {
2709 		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
2710 		    cqp_request->compl_info.min_err_code == 0) {
2711 			/* RQ WQE flush was requested but did not happen */
2712 			qp->qp_uk.rq_flush_complete = true;
2713 		}
2714 	}
2715 	if (info->sq) {
2716 		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
2717 		    cqp_request->compl_info.min_err_code == 0) {
2718 			/* SQ WQE flush was requested but did not happen */
2719 			qp->qp_uk.sq_flush_complete = true;
2720 		}
2721 	}
2722 
2723 	irdma_debug(&rf->sc_dev, IRDMA_DEBUG_VERBS,
2724 		    "qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
2725 		    iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2726 		    iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2727 		    cqp_request->compl_info.maj_err_code,
2728 		    cqp_request->compl_info.min_err_code);
2729 put_cqp:
2730 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2731 
2732 	return status;
2733 }
2734 
2735 /**
2736  * irdma_gen_ae - generate AE
2737  * @rf: RDMA PCI function
2738  * @qp: qp associated with AE
2739  * @info: info for ae
2740  * @wait: wait for completion
2741  */
2742 void
2743 irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
2744 	     struct irdma_gen_ae_info *info, bool wait)
2745 {
2746 	struct irdma_gen_ae_info *ae_info;
2747 	struct irdma_cqp_request *cqp_request;
2748 	struct cqp_cmds_info *cqp_info;
2749 
2750 	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2751 	if (!cqp_request)
2752 		return;
2753 
2754 	cqp_info = &cqp_request->info;
2755 	ae_info = &cqp_request->info.in.u.gen_ae.info;
2756 	memcpy(ae_info, info, sizeof(*ae_info));
2757 	cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
2758 	cqp_info->post_sq = 1;
2759 	cqp_info->in.u.gen_ae.qp = qp;
2760 	cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
2761 
2762 	irdma_handle_cqp_op(rf, cqp_request);
2763 	irdma_put_cqp_request(&rf->cqp, cqp_request);
2764 }
2765 
2766 void
2767 irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2768 {
2769 	struct irdma_qp_flush_info info = {0};
2770 	struct irdma_pci_f *rf = iwqp->iwdev->rf;
2771 	u8 flush_code = iwqp->sc_qp.flush_code;
2772 
2773 	if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
2774 		return;
2775 
2776 	/* Set flush info fields */
2777 	info.sq = flush_mask & IRDMA_FLUSH_SQ;
2778 	info.rq = flush_mask & IRDMA_FLUSH_RQ;
2779 
2780 	/* Generate userflush errors in CQE */
2781 	info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2782 	info.sq_minor_code = FLUSH_GENERAL_ERR;
2783 	info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
2784 	info.rq_minor_code = FLUSH_GENERAL_ERR;
2785 	info.userflushcode = true;
2786 
2787 	if (flush_mask & IRDMA_REFLUSH) {
2788 		if (info.sq)
2789 			iwqp->sc_qp.flush_sq = false;
2790 		if (info.rq)
2791 			iwqp->sc_qp.flush_rq = false;
2792 	} else {
2793 		if (flush_code) {
2794 			if (info.sq && iwqp->sc_qp.sq_flush_code)
2795 				info.sq_minor_code = flush_code;
2796 			if (info.rq && iwqp->sc_qp.rq_flush_code)
2797 				info.rq_minor_code = flush_code;
2798 		}
2799 		if (irdma_upload_context && irdma_upload_qp_context(iwqp, 0, 1))
2800 			irdma_dev_warn(&iwqp->iwdev->ibdev, "failed to upload QP context\n");
2801 		if (!iwqp->user_mode)
2802 			irdma_sched_qp_flush_work(iwqp);
2803 	}
2804 
2805 	/* Issue flush */
2806 	(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2807 				  flush_mask & IRDMA_FLUSH_WAIT);
2808 	iwqp->flush_issued = true;
2809 }
2810