xref: /freebsd/sys/dev/irdma/irdma_ctrl.c (revision a50d73d5782a351ad83e8d1f84d11720a12e70d3)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "irdma_hmc.h"
38 #include "irdma_defs.h"
39 #include "irdma_type.h"
40 #include "irdma_ws.h"
41 #include "irdma_protos.h"
42 
43 /**
44  * irdma_qp_from_entry - Given entry, get to the qp structure
45  * @entry: Points to list of qp structure
46  */
47 static struct irdma_sc_qp *
48 irdma_qp_from_entry(struct list_head *entry)
49 {
50 	if (!entry)
51 		return NULL;
52 
53 	return (struct irdma_sc_qp *)((char *)entry -
54 				      offsetof(struct irdma_sc_qp, list));
55 }
56 
57 /**
58  * irdma_get_qp_from_list - get next qp from a list
59  * @head: Listhead of qp's
60  * @qp: current qp
61  */
62 struct irdma_sc_qp *
63 irdma_get_qp_from_list(struct list_head *head,
64 		       struct irdma_sc_qp *qp)
65 {
66 	struct list_head *lastentry;
67 	struct list_head *entry = NULL;
68 
69 	if (list_empty(head))
70 		return NULL;
71 
72 	if (!qp) {
73 		entry = (head)->next;
74 	} else {
75 		lastentry = &qp->list;
76 		entry = (lastentry)->next;
77 		if (entry == head)
78 			return NULL;
79 	}
80 
81 	return irdma_qp_from_entry(entry);
82 }
83 
84 /**
85  * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
86  * @vsi: the VSI struct pointer
87  * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
88  */
89 void
90 irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
91 {
92 	struct irdma_sc_qp *qp = NULL;
93 	u8 i;
94 
95 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
96 		mutex_lock(&vsi->qos[i].qos_mutex);
97 		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
98 		while (qp) {
99 			if (op == IRDMA_OP_RESUME) {
100 				if (!qp->dev->ws_add(vsi, i)) {
101 					qp->qs_handle =
102 					    vsi->qos[qp->user_pri].qs_handle;
103 					irdma_cqp_qp_suspend_resume(qp, op);
104 				} else {
105 					irdma_cqp_qp_suspend_resume(qp, op);
106 					irdma_modify_qp_to_err(qp);
107 				}
108 			} else if (op == IRDMA_OP_SUSPEND) {
109 				/* issue cqp suspend command */
110 				if (!irdma_cqp_qp_suspend_resume(qp, op))
111 					atomic_inc(&vsi->qp_suspend_reqs);
112 			}
113 			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
114 		}
115 		mutex_unlock(&vsi->qos[i].qos_mutex);
116 	}
117 }
118 
119 static void
120 irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p)
121 {
122 	u8 i;
123 
124 	vsi->qos_rel_bw = l2p->vsi_rel_bw;
125 	vsi->qos_prio_type = l2p->vsi_prio_type;
126 	vsi->dscp_mode = l2p->dscp_mode;
127 	if (l2p->dscp_mode) {
128 		irdma_memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
129 		for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
130 			l2p->up2tc[i] = i;
131 	}
132 	for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++)
133 		vsi->tc_print_warning[i] = true;
134 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
135 		if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
136 			vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
137 		if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
138 			irdma_init_config_check(&vsi->cfg_check[i],
139 						l2p->up2tc[i],
140 						l2p->qs_handle_list[i]);
141 		vsi->qos[i].traffic_class = l2p->up2tc[i];
142 		vsi->qos[i].rel_bw =
143 		    l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
144 		vsi->qos[i].prio_type =
145 		    l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
146 		vsi->qos[i].valid = false;
147 	}
148 }
149 
150 /**
151  * irdma_change_l2params - given the new l2 parameters, change all qp
152  * @vsi: RDMA VSI pointer
153  * @l2params: New parameters from l2
154  */
155 void
156 irdma_change_l2params(struct irdma_sc_vsi *vsi,
157 		      struct irdma_l2params *l2params)
158 {
159 	if (l2params->tc_changed) {
160 		vsi->tc_change_pending = false;
161 		irdma_set_qos_info(vsi, l2params);
162 		irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
163 	}
164 	if (l2params->mtu_changed) {
165 		vsi->mtu = l2params->mtu;
166 		if (vsi->ieq)
167 			irdma_reinitialize_ieq(vsi);
168 	}
169 }
170 
171 /**
172  * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
173  * @qp: qp to be removed from qos
174  */
175 void
176 irdma_qp_rem_qos(struct irdma_sc_qp *qp)
177 {
178 	struct irdma_sc_vsi *vsi = qp->vsi;
179 
180 	irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
181 		    "DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
182 		    qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
183 	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
184 	if (qp->on_qoslist) {
185 		qp->on_qoslist = false;
186 		list_del(&qp->list);
187 	}
188 	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
189 }
190 
191 /**
192  * irdma_qp_add_qos - called during setctx for qp to be added to qos
193  * @qp: qp to be added to qos
194  */
195 void
196 irdma_qp_add_qos(struct irdma_sc_qp *qp)
197 {
198 	struct irdma_sc_vsi *vsi = qp->vsi;
199 
200 	irdma_debug(qp->dev, IRDMA_DEBUG_DCB,
201 		    "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
202 		    qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist);
203 	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
204 	if (!qp->on_qoslist) {
205 		list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
206 		qp->on_qoslist = true;
207 		qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
208 	}
209 	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
210 }
211 
212 /**
213  * irdma_sc_pd_init - initialize sc pd struct
214  * @dev: sc device struct
215  * @pd: sc pd ptr
216  * @pd_id: pd_id for allocated pd
217  * @abi_ver: User/Kernel ABI version
218  */
219 void
220 irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
221 		 int abi_ver)
222 {
223 	pd->pd_id = pd_id;
224 	pd->abi_ver = abi_ver;
225 	pd->dev = dev;
226 }
227 
228 /**
229  * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
230  * @cqp: struct for cqp hw
231  * @info: arp entry information
232  * @scratch: u64 saved to be used during cqp completion
233  * @post_sq: flag for cqp db to ring
234  */
235 static int
236 irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
237 			     struct irdma_add_arp_cache_entry_info *info,
238 			     u64 scratch, bool post_sq)
239 {
240 	__le64 *wqe;
241 	u64 temp, hdr;
242 
243 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
244 	if (!wqe)
245 		return -ENOSPC;
246 	set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max);
247 
248 	temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
249 	    LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
250 	    LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
251 	set_64bit_val(wqe, IRDMA_BYTE_16, temp);
252 
253 	hdr = info->arp_index |
254 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
255 	    FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, info->permanent) |
256 	    FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, true) |
257 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
258 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
259 
260 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
261 
262 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe,
263 			IRDMA_CQP_WQE_SIZE * 8);
264 	if (post_sq)
265 		irdma_sc_cqp_post_sq(cqp);
266 
267 	return 0;
268 }
269 
270 /**
271  * irdma_sc_del_arp_cache_entry - dele arp cache entry
272  * @cqp: struct for cqp hw
273  * @scratch: u64 saved to be used during cqp completion
274  * @arp_index: arp index to delete arp entry
275  * @post_sq: flag for cqp db to ring
276  */
277 static int
278 irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
279 			     u16 arp_index, bool post_sq)
280 {
281 	__le64 *wqe;
282 	u64 hdr;
283 
284 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
285 	if (!wqe)
286 		return -ENOSPC;
287 
288 	hdr = arp_index |
289 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
290 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
291 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
292 
293 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
294 
295 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
296 			wqe, IRDMA_CQP_WQE_SIZE * 8);
297 	if (post_sq)
298 		irdma_sc_cqp_post_sq(cqp);
299 
300 	return 0;
301 }
302 
303 /**
304  * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
305  * @cqp: struct for cqp hw
306  * @info: info for apbvt entry to add or delete
307  * @scratch: u64 saved to be used during cqp completion
308  * @post_sq: flag for cqp db to ring
309  */
310 static int
311 irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
312 			    struct irdma_apbvt_info *info,
313 			    u64 scratch, bool post_sq)
314 {
315 	__le64 *wqe;
316 	u64 hdr;
317 
318 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
319 	if (!wqe)
320 		return -ENOSPC;
321 
322 	set_64bit_val(wqe, IRDMA_BYTE_16, info->port);
323 
324 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
325 	    FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
326 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
327 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
328 
329 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
330 
331 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_APBVT WQE", wqe,
332 			IRDMA_CQP_WQE_SIZE * 8);
333 	if (post_sq)
334 		irdma_sc_cqp_post_sq(cqp);
335 
336 	return 0;
337 }
338 
339 /**
340  * irdma_sc_manage_qhash_table_entry - manage quad hash entries
341  * @cqp: struct for cqp hw
342  * @info: info for quad hash to manage
343  * @scratch: u64 saved to be used during cqp completion
344  * @post_sq: flag for cqp db to ring
345  *
346  * This is called before connection establishment is started.
347  * For passive connections, when listener is created, it will
348  * call with entry type of  IRDMA_QHASH_TYPE_TCP_SYN with local
349  * ip address and tcp port. When SYN is received (passive
350  * connections) or sent (active connections), this routine is
351  * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
352  * and quad is passed in info.
353  *
354  * When iwarp connection is done and its state moves to RTS, the
355  * quad hash entry in the hardware will point to iwarp's qp
356  * number and requires no calls from the driver.
357  */
358 static int
359 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
360 				  struct irdma_qhash_table_info *info,
361 				  u64 scratch, bool post_sq)
362 {
363 	__le64 *wqe;
364 	u64 qw1 = 0;
365 	u64 qw2 = 0;
366 	u64 temp;
367 	struct irdma_sc_vsi *vsi = info->vsi;
368 
369 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
370 	if (!wqe)
371 		return -ENOSPC;
372 	temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
373 	    LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
374 	    LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
375 	set_64bit_val(wqe, IRDMA_BYTE_0, temp);
376 
377 	qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
378 	    FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
379 	if (info->ipv4_valid) {
380 		set_64bit_val(wqe, IRDMA_BYTE_48,
381 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
382 	} else {
383 		set_64bit_val(wqe, IRDMA_BYTE_56,
384 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
385 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
386 
387 		set_64bit_val(wqe, IRDMA_BYTE_48,
388 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
389 			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
390 	}
391 	qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
392 			 vsi->qos[info->user_pri].qs_handle);
393 	if (info->vlan_valid)
394 		qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
395 	set_64bit_val(wqe, IRDMA_BYTE_16, qw2);
396 	if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
397 		qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
398 		if (!info->ipv4_valid) {
399 			set_64bit_val(wqe, IRDMA_BYTE_40,
400 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
401 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
402 			set_64bit_val(wqe, IRDMA_BYTE_32,
403 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
404 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
405 		} else {
406 			set_64bit_val(wqe, IRDMA_BYTE_32,
407 				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
408 		}
409 	}
410 
411 	set_64bit_val(wqe, IRDMA_BYTE_8, qw1);
412 	temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
413 	    FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
414 		       IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
415 	    FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
416 	    FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
417 	    FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
418 	    FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
419 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
420 
421 	set_64bit_val(wqe, IRDMA_BYTE_24, temp);
422 
423 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_QHASH WQE", wqe,
424 			IRDMA_CQP_WQE_SIZE * 8);
425 	if (post_sq)
426 		irdma_sc_cqp_post_sq(cqp);
427 
428 	return 0;
429 }
430 
431 /**
432  * irdma_sc_qp_init - initialize qp
433  * @qp: sc qp
434  * @info: initialization qp info
435  */
436 int
437 irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
438 {
439 	int ret_code;
440 	u32 pble_obj_cnt;
441 	u16 wqe_size;
442 
443 	if (info->qp_uk_init_info.max_sq_frag_cnt >
444 	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
445 	    info->qp_uk_init_info.max_rq_frag_cnt >
446 	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
447 		return -EINVAL;
448 
449 	qp->dev = info->pd->dev;
450 	qp->vsi = info->vsi;
451 	qp->ieq_qp = info->vsi->exception_lan_q;
452 	qp->sq_pa = info->sq_pa;
453 	qp->rq_pa = info->rq_pa;
454 	qp->hw_host_ctx_pa = info->host_ctx_pa;
455 	qp->q2_pa = info->q2_pa;
456 	qp->shadow_area_pa = info->shadow_area_pa;
457 	qp->q2_buf = info->q2;
458 	qp->pd = info->pd;
459 	qp->hw_host_ctx = info->host_ctx;
460 	info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
461 	ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
462 	if (ret_code)
463 		return ret_code;
464 
465 	qp->virtual_map = info->virtual_map;
466 	pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
467 
468 	if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
469 	    (info->virtual_map && info->rq_pa >= pble_obj_cnt))
470 		return -EINVAL;
471 
472 	qp->llp_stream_handle = (void *)(-1);
473 	qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
474 						    IRDMA_QUEUE_TYPE_SQ_RQ);
475 	irdma_debug(qp->dev, IRDMA_DEBUG_WQE,
476 		    "hw_sq_size[%04d] sq_ring.size[%04d]\n", qp->hw_sq_size,
477 		    qp->qp_uk.sq_ring.size);
478 	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
479 		wqe_size = IRDMA_WQE_SIZE_128;
480 	else
481 		ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
482 						       &wqe_size);
483 	if (ret_code)
484 		return ret_code;
485 
486 	qp->hw_rq_size =
487 	    irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
488 				       (wqe_size / IRDMA_QP_WQE_MIN_SIZE),
489 				       IRDMA_QUEUE_TYPE_SQ_RQ);
490 	irdma_debug(qp->dev, IRDMA_DEBUG_WQE,
491 		    "hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
492 		    qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
493 
494 	qp->sq_tph_val = info->sq_tph_val;
495 	qp->rq_tph_val = info->rq_tph_val;
496 	qp->sq_tph_en = info->sq_tph_en;
497 	qp->rq_tph_en = info->rq_tph_en;
498 	qp->rcv_tph_en = info->rcv_tph_en;
499 	qp->xmit_tph_en = info->xmit_tph_en;
500 	qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
501 	qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
502 
503 	return 0;
504 }
505 
506 /**
507  * irdma_sc_qp_create - create qp
508  * @qp: sc qp
509  * @info: qp create info
510  * @scratch: u64 saved to be used during cqp completion
511  * @post_sq: flag for cqp db to ring
512  */
513 int
514 irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
515 		   u64 scratch, bool post_sq)
516 {
517 	struct irdma_sc_cqp *cqp;
518 	__le64 *wqe;
519 	u64 hdr;
520 
521 	cqp = qp->dev->cqp;
522 	if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
523 	    qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1))
524 		return -EINVAL;
525 
526 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
527 	if (!wqe)
528 		return -ENOSPC;
529 
530 	set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
531 	set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
532 
533 	hdr = qp->qp_uk.qp_id |
534 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
535 	    FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
536 	    FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
537 	    FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
538 	    FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
539 	    FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
540 	    FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
541 	    FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
542 	    FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
543 		       info->arp_cache_idx_valid) |
544 	    FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
545 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
546 
547 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
548 
549 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
550 
551 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_CREATE WQE", wqe,
552 			IRDMA_CQP_WQE_SIZE * 8);
553 	if (post_sq)
554 		irdma_sc_cqp_post_sq(cqp);
555 
556 	return 0;
557 }
558 
559 /**
560  * irdma_sc_qp_modify - modify qp cqp wqe
561  * @qp: sc qp
562  * @info: modify qp info
563  * @scratch: u64 saved to be used during cqp completion
564  * @post_sq: flag for cqp db to ring
565  */
566 int
567 irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
568 		   u64 scratch, bool post_sq)
569 {
570 	__le64 *wqe;
571 	struct irdma_sc_cqp *cqp;
572 	u64 hdr;
573 	u8 term_actions = 0;
574 	u8 term_len = 0;
575 
576 	cqp = qp->dev->cqp;
577 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
578 	if (!wqe)
579 		return -ENOSPC;
580 
581 	if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
582 		if (info->dont_send_fin)
583 			term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
584 		if (info->dont_send_term)
585 			term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
586 		if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
587 		    term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
588 			term_len = info->termlen;
589 	}
590 
591 	set_64bit_val(wqe, IRDMA_BYTE_8,
592 		      FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
593 		      FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
594 	set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
595 	set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
596 
597 	hdr = qp->qp_uk.qp_id |
598 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
599 	    FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
600 	    FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
601 	    FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
602 		       info->cached_var_valid) |
603 	    FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
604 	    FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
605 	    FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
606 	    FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
607 	    FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
608 	    FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
609 	    FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
610 		       info->remove_hash_idx) |
611 	    FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
612 	    FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
613 	    FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
614 		       info->arp_cache_idx_valid) |
615 	    FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
616 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
617 
618 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
619 
620 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
621 
622 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_MODIFY WQE", wqe,
623 			IRDMA_CQP_WQE_SIZE * 8);
624 	if (post_sq)
625 		irdma_sc_cqp_post_sq(cqp);
626 
627 	return 0;
628 }
629 
630 /**
631  * irdma_sc_qp_destroy - cqp destroy qp
632  * @qp: sc qp
633  * @scratch: u64 saved to be used during cqp completion
634  * @remove_hash_idx: flag if to remove hash idx
635  * @ignore_mw_bnd: memory window bind flag
636  * @post_sq: flag for cqp db to ring
637  */
638 int
639 irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
640 		    bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
641 {
642 	__le64 *wqe;
643 	struct irdma_sc_cqp *cqp;
644 	u64 hdr;
645 
646 	cqp = qp->dev->cqp;
647 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
648 	if (!wqe)
649 		return -ENOSPC;
650 
651 	set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa);
652 	set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa);
653 
654 	hdr = qp->qp_uk.qp_id |
655 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
656 	    FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
657 	    FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
658 	    FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
659 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
660 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
661 
662 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
663 
664 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_DESTROY WQE", wqe,
665 			IRDMA_CQP_WQE_SIZE * 8);
666 	if (post_sq)
667 		irdma_sc_cqp_post_sq(cqp);
668 
669 	return 0;
670 }
671 
672 /**
673  * irdma_sc_get_encoded_ird_size -
674  * @ird_size: IRD size
675  * The ird from the connection is rounded to a supported HW setting and then encoded
676  * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
677  * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
678  */
679 static u8 irdma_sc_get_encoded_ird_size(u16 ird_size) {
680 	switch (ird_size ?
681 		roundup_pow_of_two(2 * ird_size) : 4) {
682 	case 256:
683 		return IRDMA_IRD_HW_SIZE_256;
684 	case 128:
685 		return IRDMA_IRD_HW_SIZE_128;
686 	case 64:
687 	case 32:
688 		return IRDMA_IRD_HW_SIZE_64;
689 	case 16:
690 	case 8:
691 		return IRDMA_IRD_HW_SIZE_16;
692 	case 4:
693 	default:
694 		break;
695 	}
696 
697 	return IRDMA_IRD_HW_SIZE_4;
698 }
699 
700 /**
701  * irdma_sc_qp_setctx_roce - set qp's context
702  * @qp: sc qp
703  * @qp_ctx: context ptr
704  * @info: ctx info
705  */
706 void
707 irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
708 			struct irdma_qp_host_ctx_info *info)
709 {
710 	struct irdma_roce_offload_info *roce_info;
711 	struct irdma_udp_offload_info *udp;
712 	u8 push_mode_en;
713 	u32 push_idx;
714 	u64 mac;
715 
716 	roce_info = info->roce_info;
717 	udp = info->udp_info;
718 
719 	mac = LS_64_1(roce_info->mac_addr[5], 16) |
720 	    LS_64_1(roce_info->mac_addr[4], 24) |
721 	    LS_64_1(roce_info->mac_addr[3], 32) |
722 	    LS_64_1(roce_info->mac_addr[2], 40) |
723 	    LS_64_1(roce_info->mac_addr[1], 48) |
724 	    LS_64_1(roce_info->mac_addr[0], 56);
725 
726 	qp->user_pri = info->user_pri;
727 	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
728 		push_mode_en = 0;
729 		push_idx = 0;
730 	} else {
731 		push_mode_en = 1;
732 		push_idx = qp->push_idx;
733 	}
734 	set_64bit_val(qp_ctx, IRDMA_BYTE_0,
735 		      FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
736 		      FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
737 		      FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
738 		      FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
739 		      FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
740 		      FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
741 		      FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
742 		      FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
743 		      FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
744 		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
745 		      FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
746 		      FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
747 		      FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
748 		      FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
749 	set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
750 	set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
751 	if (roce_info->dcqcn_en || roce_info->dctcp_en) {
752 		udp->tos &= ~ECN_CODE_PT_MASK;
753 		udp->tos |= ECN_CODE_PT_VAL;
754 	}
755 
756 	set_64bit_val(qp_ctx, IRDMA_BYTE_24,
757 		      FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
758 		      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
759 		      FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
760 		      FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
761 		      FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
762 	set_64bit_val(qp_ctx, IRDMA_BYTE_32,
763 		      FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
764 		      FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
765 	set_64bit_val(qp_ctx, IRDMA_BYTE_40,
766 		      FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
767 		      FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
768 	set_64bit_val(qp_ctx, IRDMA_BYTE_48,
769 		      FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
770 		      FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
771 		      FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
772 	set_64bit_val(qp_ctx, IRDMA_BYTE_56,
773 		      FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
774 		      FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
775 		      FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
776 		      FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
777 	set_64bit_val(qp_ctx, IRDMA_BYTE_64,
778 		      FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
779 		      FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
780 	set_64bit_val(qp_ctx, IRDMA_BYTE_80,
781 		      FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
782 		      FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
783 	set_64bit_val(qp_ctx, IRDMA_BYTE_88,
784 		      FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
785 	set_64bit_val(qp_ctx, IRDMA_BYTE_96,
786 		      FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
787 		      FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
788 	set_64bit_val(qp_ctx, IRDMA_BYTE_112,
789 		      FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
790 	set_64bit_val(qp_ctx, IRDMA_BYTE_128,
791 		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
792 		      FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
793 		      FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
794 		      FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
795 	set_64bit_val(qp_ctx, IRDMA_BYTE_136,
796 		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
797 		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
798 	set_64bit_val(qp_ctx, IRDMA_BYTE_144,
799 		      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
800 	set_64bit_val(qp_ctx, IRDMA_BYTE_152, mac);
801 	set_64bit_val(qp_ctx, IRDMA_BYTE_160,
802 		      FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
803 		      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
804 		      FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
805 		      FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
806 		      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
807 		      FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
808 		      FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
809 		      FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
810 		      FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
811 		      FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
812 		      FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
813 		      FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
814 		      FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
815 	set_64bit_val(qp_ctx, IRDMA_BYTE_168,
816 		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
817 	set_64bit_val(qp_ctx, IRDMA_BYTE_176,
818 		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
819 		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
820 		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
821 	set_64bit_val(qp_ctx, IRDMA_BYTE_184,
822 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
823 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
824 	set_64bit_val(qp_ctx, IRDMA_BYTE_192,
825 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
826 		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
827 	set_64bit_val(qp_ctx, IRDMA_BYTE_200,
828 		      FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
829 		      FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
830 	set_64bit_val(qp_ctx, IRDMA_BYTE_208,
831 		      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
832 
833 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX WQE", qp_ctx,
834 			IRDMA_QP_CTX_SIZE);
835 }
836 
837 /*
838  * irdma_sc_alloc_local_mac_entry - allocate a mac entry @cqp: struct for cqp hw @scratch: u64 saved to be used during
839  * cqp completion @post_sq: flag for cqp db to ring
840  */
841 static int
842 irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
843 			       bool post_sq)
844 {
845 	__le64 *wqe;
846 	u64 hdr;
847 
848 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
849 	if (!wqe)
850 		return -ENOSPC;
851 
852 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
853 			 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
854 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
855 
856 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
857 
858 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
859 
860 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ALLOCATE_LOCAL_MAC WQE",
861 			wqe, IRDMA_CQP_WQE_SIZE * 8);
862 
863 	if (post_sq)
864 		irdma_sc_cqp_post_sq(cqp);
865 	return 0;
866 }
867 
868 /**
869  * irdma_sc_add_local_mac_entry - add mac enry
870  * @cqp: struct for cqp hw
871  * @info:mac addr info
872  * @scratch: u64 saved to be used during cqp completion
873  * @post_sq: flag for cqp db to ring
874  */
875 static int
876 irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
877 			     struct irdma_local_mac_entry_info *info,
878 			     u64 scratch, bool post_sq)
879 {
880 	__le64 *wqe;
881 	u64 temp, header;
882 
883 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
884 	if (!wqe)
885 		return -ENOSPC;
886 	temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) |
887 	    LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) |
888 	    LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40);
889 
890 	set_64bit_val(wqe, IRDMA_BYTE_32, temp);
891 
892 	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
893 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE,
894 		       IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
895 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
896 
897 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
898 
899 	set_64bit_val(wqe, IRDMA_BYTE_24, header);
900 
901 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ADD_LOCAL_MAC WQE", wqe,
902 			IRDMA_CQP_WQE_SIZE * 8);
903 
904 	if (post_sq)
905 		irdma_sc_cqp_post_sq(cqp);
906 	return 0;
907 }
908 
909 /**
910  * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
911  * @cqp: struct for cqp hw
912  * @scratch: u64 saved to be used during cqp completion
913  * @entry_idx: index of mac entry
914  * @ignore_ref_count: to force mac adde delete
915  * @post_sq: flag for cqp db to ring
916  */
917 static int
918 irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
919 			     u16 entry_idx, u8 ignore_ref_count,
920 			     bool post_sq)
921 {
922 	__le64 *wqe;
923 	u64 header;
924 
925 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
926 	if (!wqe)
927 		return -ENOSPC;
928 	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
929 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE,
930 		       IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
931 	    FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
932 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
933 	    FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
934 
935 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
936 
937 	set_64bit_val(wqe, IRDMA_BYTE_24, header);
938 
939 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
940 			wqe, IRDMA_CQP_WQE_SIZE * 8);
941 
942 	if (post_sq)
943 		irdma_sc_cqp_post_sq(cqp);
944 	return 0;
945 }
946 
947 /**
948  * irdma_sc_qp_setctx - set qp's context
949  * @qp: sc qp
950  * @qp_ctx: context ptr
951  * @info: ctx info
952  */
953 void
954 irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx,
955 		   struct irdma_qp_host_ctx_info *info)
956 {
957 	struct irdma_iwarp_offload_info *iw;
958 	struct irdma_tcp_offload_info *tcp;
959 	struct irdma_sc_dev *dev;
960 	u8 push_mode_en;
961 	u32 push_idx;
962 	u64 qw0, qw3, qw7 = 0, qw16 = 0;
963 	u64 mac = 0;
964 
965 	iw = info->iwarp_info;
966 	tcp = info->tcp_info;
967 	dev = qp->dev;
968 	if (iw->rcv_mark_en) {
969 		qp->pfpdu.marker_len = 4;
970 		qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
971 	}
972 	qp->user_pri = info->user_pri;
973 	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
974 		push_mode_en = 0;
975 		push_idx = 0;
976 	} else {
977 		push_mode_en = 1;
978 		push_idx = qp->push_idx;
979 	}
980 	qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
981 	    FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
982 	    FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
983 	    FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
984 	    FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
985 	    FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
986 	    FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
987 
988 	set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa);
989 	set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa);
990 
991 	qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
992 	    FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
993 	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
994 		qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
995 				  qp->src_mac_addr_idx);
996 	set_64bit_val(qp_ctx, IRDMA_BYTE_136,
997 		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
998 		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
999 	set_64bit_val(qp_ctx, IRDMA_BYTE_168,
1000 		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
1001 	set_64bit_val(qp_ctx, IRDMA_BYTE_176,
1002 		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
1003 		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
1004 		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
1005 		      FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
1006 	if (info->iwarp_info_valid) {
1007 		qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
1008 		    FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
1009 		    FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
1010 		    FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
1011 		    FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
1012 		    FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
1013 		    FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
1014 			       iw->err_rq_idx_valid);
1015 		qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
1016 		qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
1017 		    FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
1018 		set_64bit_val(qp_ctx, IRDMA_BYTE_144,
1019 			      FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
1020 			      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
1021 
1022 		if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1023 			mac = LS_64_1(iw->mac_addr[5], 16) |
1024 			    LS_64_1(iw->mac_addr[4], 24) |
1025 			    LS_64_1(iw->mac_addr[3], 32) |
1026 			    LS_64_1(iw->mac_addr[2], 40) |
1027 			    LS_64_1(iw->mac_addr[1], 48) |
1028 			    LS_64_1(iw->mac_addr[0], 56);
1029 		}
1030 
1031 		set_64bit_val(qp_ctx, IRDMA_BYTE_152,
1032 			      mac | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
1033 		set_64bit_val(qp_ctx, IRDMA_BYTE_160,
1034 			      FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
1035 			      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
1036 			      FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
1037 			      FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
1038 			      FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
1039 			      FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
1040 			      FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
1041 			      FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
1042 			      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
1043 			      FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
1044 			      FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
1045 			      FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
1046 			      FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
1047 			      FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset) |
1048 			      FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset) |
1049 			      FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
1050 	}
1051 	if (info->tcp_info_valid) {
1052 		qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
1053 		    FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
1054 		    FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
1055 			       tcp->insert_vlan_tag) |
1056 		    FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
1057 		    FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
1058 		    FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
1059 		    FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
1060 
1061 		if (iw->ecn_en || iw->dctcp_en) {
1062 			tcp->tos &= ~ECN_CODE_PT_MASK;
1063 			tcp->tos |= ECN_CODE_PT_VAL;
1064 		}
1065 
1066 		qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
1067 		    FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
1068 		    FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
1069 		    FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
1070 		    FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
1071 		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
1072 			qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
1073 
1074 			qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
1075 		}
1076 		set_64bit_val(qp_ctx, IRDMA_BYTE_32,
1077 			      FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
1078 			      FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
1079 		set_64bit_val(qp_ctx, IRDMA_BYTE_40,
1080 			      FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
1081 			      FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
1082 		set_64bit_val(qp_ctx, IRDMA_BYTE_48,
1083 			      FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
1084 			      FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
1085 			      FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
1086 			      FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
1087 		qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
1088 		    FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
1089 		    FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
1090 			       tcp->ignore_tcp_opt) |
1091 		    FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
1092 			       tcp->ignore_tcp_uns_opt) |
1093 		    FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
1094 		    FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
1095 		    FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
1096 		set_64bit_val(qp_ctx, IRDMA_BYTE_72,
1097 			      FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
1098 			      FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
1099 		set_64bit_val(qp_ctx, IRDMA_BYTE_80,
1100 			      FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
1101 			      FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
1102 		set_64bit_val(qp_ctx, IRDMA_BYTE_88,
1103 			      FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
1104 			      FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
1105 		set_64bit_val(qp_ctx, IRDMA_BYTE_96,
1106 			      FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
1107 			      FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
1108 		set_64bit_val(qp_ctx, IRDMA_BYTE_104,
1109 			      FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
1110 			      FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
1111 		set_64bit_val(qp_ctx, IRDMA_BYTE_112,
1112 			      FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
1113 			      FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
1114 		set_64bit_val(qp_ctx, IRDMA_BYTE_120,
1115 			      FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
1116 			      FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
1117 		qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
1118 		    FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
1119 		set_64bit_val(qp_ctx, IRDMA_BYTE_184,
1120 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
1121 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
1122 		set_64bit_val(qp_ctx, IRDMA_BYTE_192,
1123 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
1124 			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
1125 		set_64bit_val(qp_ctx, IRDMA_BYTE_200,
1126 			      FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
1127 			      FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
1128 		set_64bit_val(qp_ctx, IRDMA_BYTE_208,
1129 			      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
1130 	}
1131 
1132 	set_64bit_val(qp_ctx, IRDMA_BYTE_0, qw0);
1133 	set_64bit_val(qp_ctx, IRDMA_BYTE_24, qw3);
1134 	set_64bit_val(qp_ctx, IRDMA_BYTE_56, qw7);
1135 	set_64bit_val(qp_ctx, IRDMA_BYTE_128, qw16);
1136 
1137 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX", qp_ctx,
1138 			IRDMA_QP_CTX_SIZE);
1139 }
1140 
1141 /**
1142  * irdma_sc_alloc_stag - mr stag alloc
1143  * @dev: sc device struct
1144  * @info: stag info
1145  * @scratch: u64 saved to be used during cqp completion
1146  * @post_sq: flag for cqp db to ring
1147  */
1148 static int
1149 irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
1150 		    struct irdma_allocate_stag_info *info,
1151 		    u64 scratch, bool post_sq)
1152 {
1153 	__le64 *wqe;
1154 	struct irdma_sc_cqp *cqp;
1155 	u64 hdr;
1156 	enum irdma_page_size page_size;
1157 
1158 	if (!info->total_len && !info->all_memory)
1159 		return -EINVAL;
1160 
1161 	if (info->page_size == 0x40000000)
1162 		page_size = IRDMA_PAGE_SIZE_1G;
1163 	else if (info->page_size == 0x200000)
1164 		page_size = IRDMA_PAGE_SIZE_2M;
1165 	else
1166 		page_size = IRDMA_PAGE_SIZE_4K;
1167 
1168 	cqp = dev->cqp;
1169 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1170 	if (!wqe)
1171 		return -ENOSPC;
1172 
1173 	set_64bit_val(wqe, IRDMA_BYTE_8,
1174 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
1175 		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
1176 	set_64bit_val(wqe, IRDMA_BYTE_16,
1177 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1178 	set_64bit_val(wqe, IRDMA_BYTE_40,
1179 		      FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
1180 
1181 	if (info->chunk_size)
1182 		set_64bit_val(wqe, IRDMA_BYTE_48,
1183 			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
1184 
1185 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1186 	    FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1187 	    FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1188 	    FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1189 	    FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1190 	    FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
1191 	    FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1192 	    FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1193 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1194 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1195 
1196 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1197 
1198 	irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "ALLOC_STAG WQE", wqe,
1199 			IRDMA_CQP_WQE_SIZE * 8);
1200 	if (post_sq)
1201 		irdma_sc_cqp_post_sq(cqp);
1202 
1203 	return 0;
1204 }
1205 
1206 /**
1207  * irdma_sc_mr_reg_non_shared - non-shared mr registration
1208  * @dev: sc device struct
1209  * @info: mr info
1210  * @scratch: u64 saved to be used during cqp completion
1211  * @post_sq: flag for cqp db to ring
1212  */
1213 static int
1214 irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
1215 			   struct irdma_reg_ns_stag_info *info,
1216 			   u64 scratch, bool post_sq)
1217 {
1218 	__le64 *wqe;
1219 	u64 fbo;
1220 	struct irdma_sc_cqp *cqp;
1221 	u64 hdr;
1222 	u32 pble_obj_cnt;
1223 	bool remote_access;
1224 	u8 addr_type;
1225 	enum irdma_page_size page_size;
1226 
1227 	if (!info->total_len && !info->all_memory)
1228 		return -EINVAL;
1229 
1230 	if (info->page_size == 0x40000000)
1231 		page_size = IRDMA_PAGE_SIZE_1G;
1232 	else if (info->page_size == 0x200000)
1233 		page_size = IRDMA_PAGE_SIZE_2M;
1234 	else if (info->page_size == 0x1000)
1235 		page_size = IRDMA_PAGE_SIZE_4K;
1236 	else
1237 		return -EINVAL;
1238 
1239 	if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
1240 				   IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
1241 		remote_access = true;
1242 	else
1243 		remote_access = false;
1244 
1245 	pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
1246 	if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
1247 		return -EINVAL;
1248 
1249 	cqp = dev->cqp;
1250 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1251 	if (!wqe)
1252 		return -ENOSPC;
1253 	fbo = info->va & (info->page_size - 1);
1254 
1255 	set_64bit_val(wqe, IRDMA_BYTE_0,
1256 		      (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
1257 		       info->va : fbo));
1258 	set_64bit_val(wqe, IRDMA_BYTE_8,
1259 		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
1260 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1261 	set_64bit_val(wqe, IRDMA_BYTE_16,
1262 		      FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
1263 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1264 	if (!info->chunk_size)
1265 		set_64bit_val(wqe, IRDMA_BYTE_32, info->reg_addr_pa);
1266 	else
1267 		set_64bit_val(wqe, IRDMA_BYTE_48,
1268 			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
1269 
1270 	set_64bit_val(wqe, IRDMA_BYTE_40, info->hmc_fcn_index);
1271 
1272 	addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
1273 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
1274 	    FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
1275 	    FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
1276 	    FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
1277 	    FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
1278 	    FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
1279 	    FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
1280 	    FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
1281 	    FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
1282 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1283 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1284 
1285 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1286 
1287 	irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MR_REG_NS WQE", wqe,
1288 			IRDMA_CQP_WQE_SIZE * 8);
1289 	if (post_sq)
1290 		irdma_sc_cqp_post_sq(cqp);
1291 
1292 	return 0;
1293 }
1294 
1295 /**
1296  * irdma_sc_dealloc_stag - deallocate stag
1297  * @dev: sc device struct
1298  * @info: dealloc stag info
1299  * @scratch: u64 saved to be used during cqp completion
1300  * @post_sq: flag for cqp db to ring
1301  */
1302 static int
1303 irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
1304 		      struct irdma_dealloc_stag_info *info,
1305 		      u64 scratch, bool post_sq)
1306 {
1307 	u64 hdr;
1308 	__le64 *wqe;
1309 	struct irdma_sc_cqp *cqp;
1310 
1311 	cqp = dev->cqp;
1312 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1313 	if (!wqe)
1314 		return -ENOSPC;
1315 
1316 	set_64bit_val(wqe, IRDMA_BYTE_8,
1317 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1318 	set_64bit_val(wqe, IRDMA_BYTE_16,
1319 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
1320 
1321 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
1322 	    FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
1323 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1324 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1325 
1326 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1327 
1328 	irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "DEALLOC_STAG WQE", wqe,
1329 			IRDMA_CQP_WQE_SIZE * 8);
1330 	if (post_sq)
1331 		irdma_sc_cqp_post_sq(cqp);
1332 
1333 	return 0;
1334 }
1335 
1336 /**
1337  * irdma_sc_mw_alloc - mw allocate
1338  * @dev: sc device struct
1339  * @info: memory window allocation information
1340  * @scratch: u64 saved to be used during cqp completion
1341  * @post_sq: flag for cqp db to ring
1342  */
1343 static int
1344 irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
1345 		  struct irdma_mw_alloc_info *info, u64 scratch,
1346 		  bool post_sq)
1347 {
1348 	u64 hdr;
1349 	struct irdma_sc_cqp *cqp;
1350 	__le64 *wqe;
1351 
1352 	cqp = dev->cqp;
1353 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
1354 	if (!wqe)
1355 		return -ENOSPC;
1356 
1357 	set_64bit_val(wqe, IRDMA_BYTE_8,
1358 		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
1359 	set_64bit_val(wqe, IRDMA_BYTE_16,
1360 		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
1361 
1362 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
1363 	    FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
1364 	    FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
1365 		       info->mw1_bind_dont_vldt_key) |
1366 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
1367 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1368 
1369 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1370 
1371 	irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MW_ALLOC WQE", wqe,
1372 			IRDMA_CQP_WQE_SIZE * 8);
1373 	if (post_sq)
1374 		irdma_sc_cqp_post_sq(cqp);
1375 
1376 	return 0;
1377 }
1378 
1379 /**
1380  * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
1381  * @qp: sc qp struct
1382  * @info: fast mr info
1383  * @post_sq: flag for cqp db to ring
1384  */
1385 int
1386 irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
1387 			  struct irdma_fast_reg_stag_info *info,
1388 			  bool post_sq)
1389 {
1390 	u64 temp, hdr;
1391 	__le64 *wqe;
1392 	u32 wqe_idx;
1393 	u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
1394 	enum irdma_page_size page_size;
1395 	struct irdma_post_sq_info sq_info = {0};
1396 
1397 	if (info->page_size == 0x40000000)
1398 		page_size = IRDMA_PAGE_SIZE_1G;
1399 	else if (info->page_size == 0x200000)
1400 		page_size = IRDMA_PAGE_SIZE_2M;
1401 	else
1402 		page_size = IRDMA_PAGE_SIZE_4K;
1403 
1404 	sq_info.wr_id = info->wr_id;
1405 	sq_info.signaled = info->signaled;
1406 	sq_info.push_wqe = info->push_wqe;
1407 
1408 	wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, &quanta, 0, &sq_info);
1409 	if (!wqe)
1410 		return -ENOSPC;
1411 
1412 	qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled;
1413 	irdma_debug(qp->dev, IRDMA_DEBUG_MR,
1414 		    "wr_id[%llxh] wqe_idx[%04d] location[%p]\n", (unsigned long long)info->wr_id,
1415 		    wqe_idx, &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
1416 
1417 	temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
1418 	    (uintptr_t)info->va : info->fbo;
1419 	set_64bit_val(wqe, IRDMA_BYTE_0, temp);
1420 
1421 	temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
1422 			 info->first_pm_pbl_index >> 16);
1423 	set_64bit_val(wqe, IRDMA_BYTE_8,
1424 		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
1425 		      FIELD_PREP(IRDMAQPSQ_PBLADDR, info->reg_addr_pa >> IRDMA_HW_PAGE_SHIFT));
1426 	set_64bit_val(wqe, IRDMA_BYTE_16,
1427 		      info->total_len |
1428 		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
1429 
1430 	hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
1431 	    FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
1432 	    FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
1433 	    FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
1434 	    FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
1435 	    FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
1436 	    FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
1437 	    FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
1438 	    FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
1439 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
1440 	    FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
1441 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1442 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1443 
1444 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1445 
1446 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe,
1447 			IRDMA_QP_WQE_MIN_SIZE);
1448 	if (sq_info.push_wqe)
1449 		irdma_qp_push_wqe(&qp->qp_uk, wqe, quanta, wqe_idx, post_sq);
1450 	else if (post_sq)
1451 		irdma_uk_qp_post_wr(&qp->qp_uk);
1452 
1453 	return 0;
1454 }
1455 
1456 /**
1457  * irdma_sc_gen_rts_ae - request AE generated after RTS
1458  * @qp: sc qp struct
1459  */
1460 static void
1461 irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
1462 {
1463 	__le64 *wqe;
1464 	u64 hdr;
1465 	struct irdma_qp_uk *qp_uk;
1466 
1467 	qp_uk = &qp->qp_uk;
1468 
1469 	wqe = qp_uk->sq_base[1].elem;
1470 
1471 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1472 	    FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
1473 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1474 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1475 
1476 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1477 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "NOP W/LOCAL FENCE WQE", wqe,
1478 			IRDMA_QP_WQE_MIN_SIZE);
1479 
1480 	wqe = qp_uk->sq_base[2].elem;
1481 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
1482 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1483 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1484 
1485 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1486 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe,
1487 			IRDMA_QP_WQE_MIN_SIZE);
1488 }
1489 
1490 /**
1491  * irdma_sc_send_lsmm - send last streaming mode message
1492  * @qp: sc qp struct
1493  * @lsmm_buf: buffer with lsmm message
1494  * @size: size of lsmm buffer
1495  * @stag: stag of lsmm buffer
1496  */
1497 void
1498 irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1499 		   irdma_stag stag)
1500 {
1501 	__le64 *wqe;
1502 	u64 hdr;
1503 	struct irdma_qp_uk *qp_uk;
1504 
1505 	qp_uk = &qp->qp_uk;
1506 	wqe = qp_uk->sq_base->elem;
1507 
1508 	set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
1509 	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1510 		set_64bit_val(wqe, IRDMA_BYTE_8,
1511 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
1512 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
1513 	} else {
1514 		set_64bit_val(wqe, IRDMA_BYTE_8,
1515 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
1516 			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
1517 			      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1518 	}
1519 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
1520 
1521 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
1522 	    FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
1523 	    FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
1524 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1525 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1526 
1527 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1528 
1529 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM WQE", wqe,
1530 			IRDMA_QP_WQE_MIN_SIZE);
1531 
1532 	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1533 		irdma_sc_gen_rts_ae(qp);
1534 }
1535 
1536 /**
1537  * irdma_sc_send_lsmm_nostag - for privilege qp
1538  * @qp: sc qp struct
1539  * @lsmm_buf: buffer with lsmm message
1540  * @size: size of lsmm buffer
1541  */
1542 void
1543 irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
1544 {
1545 	__le64 *wqe;
1546 	u64 hdr;
1547 	struct irdma_qp_uk *qp_uk;
1548 
1549 	qp_uk = &qp->qp_uk;
1550 	wqe = qp_uk->sq_base->elem;
1551 
1552 	set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf);
1553 
1554 	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
1555 		set_64bit_val(wqe, IRDMA_BYTE_8,
1556 			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
1557 	else
1558 		set_64bit_val(wqe, IRDMA_BYTE_8,
1559 			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
1560 			      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1561 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
1562 
1563 	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
1564 	    FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
1565 	    FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
1566 	    FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1567 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1568 
1569 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1570 
1571 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", wqe,
1572 			IRDMA_QP_WQE_MIN_SIZE);
1573 }
1574 
1575 /**
1576  * irdma_sc_send_rtt - send last read0 or write0
1577  * @qp: sc qp struct
1578  * @read: Do read0 or write0
1579  */
1580 void
1581 irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
1582 {
1583 	__le64 *wqe;
1584 	u64 hdr;
1585 	struct irdma_qp_uk *qp_uk;
1586 
1587 	qp_uk = &qp->qp_uk;
1588 	wqe = qp_uk->sq_base->elem;
1589 
1590 	set_64bit_val(wqe, IRDMA_BYTE_0, 0);
1591 	set_64bit_val(wqe, IRDMA_BYTE_16, 0);
1592 	if (read) {
1593 		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1594 			set_64bit_val(wqe, IRDMA_BYTE_8,
1595 				      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
1596 		} else {
1597 			set_64bit_val(wqe, IRDMA_BYTE_8,
1598 				      (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID,
1599 								qp->qp_uk.swqe_polarity));
1600 		}
1601 		hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
1602 		    FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
1603 		    FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1604 
1605 	} else {
1606 		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
1607 			set_64bit_val(wqe, IRDMA_BYTE_8, 0);
1608 		} else {
1609 			set_64bit_val(wqe, IRDMA_BYTE_8,
1610 				      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
1611 		}
1612 		hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
1613 		    FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
1614 	}
1615 
1616 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
1617 
1618 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
1619 
1620 	irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "RTR WQE", wqe,
1621 			IRDMA_QP_WQE_MIN_SIZE);
1622 
1623 	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
1624 		irdma_sc_gen_rts_ae(qp);
1625 }
1626 
1627 /**
1628  * irdma_iwarp_opcode - determine if incoming is rdma layer
1629  * @info: aeq info for the packet
1630  * @pkt: packet for error
1631  */
1632 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt){
1633 	BE16 *mpa;
1634 	u32 opcode = 0xffffffff;
1635 
1636 	if (info->q2_data_written) {
1637 		mpa = (BE16 *) pkt;
1638 		opcode = IRDMA_NTOHS(mpa[1]) & 0xf;
1639 	}
1640 
1641 	return opcode;
1642 }
1643 
1644 /**
1645  * irdma_locate_mpa - return pointer to mpa in the pkt
1646  * @pkt: packet with data
1647  */
1648 static u8 *irdma_locate_mpa(u8 *pkt) {
1649 	/* skip over ethernet header */
1650 	pkt += IRDMA_MAC_HLEN;
1651 
1652 	/* Skip over IP and TCP headers */
1653 	pkt += 4 * (pkt[0] & 0x0f);
1654 	pkt += 4 * ((pkt[12] >> 4) & 0x0f);
1655 
1656 	return pkt;
1657 }
1658 
1659 /**
1660  * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
1661  * @qp: sc qp ptr for pkt
1662  * @hdr: term hdr
1663  * @opcode: flush opcode for termhdr
1664  * @layer_etype: error layer + error type
1665  * @err: error cod ein the header
1666  */
1667 static void
1668 irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
1669 		       struct irdma_terminate_hdr *hdr,
1670 		       enum irdma_flush_opcode opcode,
1671 		       u8 layer_etype, u8 err)
1672 {
1673 	qp->flush_code = opcode;
1674 	hdr->layer_etype = layer_etype;
1675 	hdr->error_code = err;
1676 }
1677 
1678 /**
1679  * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
1680  * @pkt: ptr to mpa in offending pkt
1681  * @hdr: term hdr
1682  * @copy_len: offending pkt length to be copied to term hdr
1683  * @is_tagged: DDP tagged or untagged
1684  */
1685 static void
1686 irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
1687 			   int *copy_len, u8 *is_tagged)
1688 {
1689 	u16 ddp_seg_len;
1690 
1691 	ddp_seg_len = IRDMA_NTOHS(*(BE16 *) pkt);
1692 	if (ddp_seg_len) {
1693 		*copy_len = 2;
1694 		hdr->hdrct = DDP_LEN_FLAG;
1695 		if (pkt[2] & 0x80) {
1696 			*is_tagged = 1;
1697 			if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
1698 				*copy_len += TERM_DDP_LEN_TAGGED;
1699 				hdr->hdrct |= DDP_HDR_FLAG;
1700 			}
1701 		} else {
1702 			if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
1703 				*copy_len += TERM_DDP_LEN_UNTAGGED;
1704 				hdr->hdrct |= DDP_HDR_FLAG;
1705 			}
1706 			if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
1707 			    ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
1708 				*copy_len += TERM_RDMA_LEN;
1709 				hdr->hdrct |= RDMA_HDR_FLAG;
1710 			}
1711 		}
1712 	}
1713 }
1714 
1715 /**
1716  * irdma_bld_terminate_hdr - build terminate message header
1717  * @qp: qp associated with received terminate AE
1718  * @info: the struct contiaing AE information
1719  */
1720 static int
1721 irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
1722 			struct irdma_aeqe_info *info)
1723 {
1724 	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1725 	int copy_len = 0;
1726 	u8 is_tagged = 0;
1727 	u32 opcode;
1728 	struct irdma_terminate_hdr *termhdr;
1729 
1730 	termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
1731 	memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
1732 
1733 	if (info->q2_data_written) {
1734 		pkt = irdma_locate_mpa(pkt);
1735 		irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
1736 	}
1737 
1738 	opcode = irdma_iwarp_opcode(info, pkt);
1739 	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1740 	qp->sq_flush_code = info->sq;
1741 	qp->rq_flush_code = info->rq;
1742 
1743 	switch (info->ae_id) {
1744 	case IRDMA_AE_AMP_UNALLOCATED_STAG:
1745 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1746 		if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
1747 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1748 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1749 					       DDP_TAGGED_INV_STAG);
1750 		else
1751 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1752 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1753 					       RDMAP_INV_STAG);
1754 		break;
1755 	case IRDMA_AE_AMP_BOUNDS_VIOLATION:
1756 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1757 		if (info->q2_data_written)
1758 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1759 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1760 					       DDP_TAGGED_BOUNDS);
1761 		else
1762 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1763 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1764 					       RDMAP_INV_BOUNDS);
1765 		break;
1766 	case IRDMA_AE_AMP_BAD_PD:
1767 		switch (opcode) {
1768 		case IRDMA_OP_TYPE_RDMA_WRITE:
1769 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
1770 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1771 					       DDP_TAGGED_UNASSOC_STAG);
1772 			break;
1773 		case IRDMA_OP_TYPE_SEND_INV:
1774 		case IRDMA_OP_TYPE_SEND_SOL_INV:
1775 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1776 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1777 					       RDMAP_CANT_INV_STAG);
1778 			break;
1779 		default:
1780 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1781 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1782 					       RDMAP_UNASSOC_STAG);
1783 		}
1784 		break;
1785 	case IRDMA_AE_AMP_INVALID_STAG:
1786 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1787 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1788 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1789 				       RDMAP_INV_STAG);
1790 		break;
1791 	case IRDMA_AE_AMP_BAD_QP:
1792 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
1793 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1794 				       DDP_UNTAGGED_INV_QN);
1795 		break;
1796 	case IRDMA_AE_AMP_BAD_STAG_KEY:
1797 	case IRDMA_AE_AMP_BAD_STAG_INDEX:
1798 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1799 		switch (opcode) {
1800 		case IRDMA_OP_TYPE_SEND_INV:
1801 		case IRDMA_OP_TYPE_SEND_SOL_INV:
1802 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1803 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1804 					       RDMAP_CANT_INV_STAG);
1805 			break;
1806 		default:
1807 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1808 					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1809 					       RDMAP_INV_STAG);
1810 		}
1811 		break;
1812 	case IRDMA_AE_AMP_RIGHTS_VIOLATION:
1813 	case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
1814 	case IRDMA_AE_PRIV_OPERATION_DENIED:
1815 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1816 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1817 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1818 				       RDMAP_ACCESS);
1819 		break;
1820 	case IRDMA_AE_AMP_TO_WRAP:
1821 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1822 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
1823 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
1824 				       RDMAP_TO_WRAP);
1825 		break;
1826 	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
1827 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1828 				       (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
1829 		break;
1830 	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
1831 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1832 				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1833 				       DDP_CATASTROPHIC_LOCAL);
1834 		break;
1835 	case IRDMA_AE_LCE_QP_CATASTROPHIC:
1836 	case IRDMA_AE_DDP_NO_L_BIT:
1837 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1838 				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
1839 				       DDP_CATASTROPHIC_LOCAL);
1840 		break;
1841 	case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
1842 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1843 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1844 				       DDP_UNTAGGED_INV_MSN_RANGE);
1845 		break;
1846 	case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
1847 		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
1848 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
1849 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1850 				       DDP_UNTAGGED_INV_TOO_LONG);
1851 		break;
1852 	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
1853 		if (is_tagged)
1854 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1855 					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
1856 					       DDP_TAGGED_INV_DDP_VER);
1857 		else
1858 			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1859 					       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1860 					       DDP_UNTAGGED_INV_DDP_VER);
1861 		break;
1862 	case IRDMA_AE_DDP_UBE_INVALID_MO:
1863 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1864 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1865 				       DDP_UNTAGGED_INV_MO);
1866 		break;
1867 	case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
1868 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
1869 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1870 				       DDP_UNTAGGED_INV_MSN_NO_BUF);
1871 		break;
1872 	case IRDMA_AE_DDP_UBE_INVALID_QN:
1873 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1874 				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
1875 				       DDP_UNTAGGED_INV_QN);
1876 		break;
1877 	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
1878 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
1879 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1880 				       RDMAP_INV_RDMAP_VER);
1881 		break;
1882 	default:
1883 		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
1884 				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
1885 				       RDMAP_UNSPECIFIED);
1886 		break;
1887 	}
1888 
1889 	if (copy_len)
1890 		irdma_memcpy(termhdr + 1, pkt, copy_len);
1891 
1892 	return sizeof(struct irdma_terminate_hdr) + copy_len;
1893 }
1894 
1895 /**
1896  * irdma_terminate_send_fin() - Send fin for terminate message
1897  * @qp: qp associated with received terminate AE
1898  */
1899 void
1900 irdma_terminate_send_fin(struct irdma_sc_qp *qp)
1901 {
1902 	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1903 			     IRDMAQP_TERM_SEND_FIN_ONLY, 0);
1904 }
1905 
1906 /**
1907  * irdma_terminate_connection() - Bad AE and send terminate to remote QP
1908  * @qp: qp associated with received terminate AE
1909  * @info: the struct contiaing AE information
1910  */
1911 void
1912 irdma_terminate_connection(struct irdma_sc_qp *qp,
1913 			   struct irdma_aeqe_info *info)
1914 {
1915 	u8 termlen = 0;
1916 
1917 	if (qp->term_flags & IRDMA_TERM_SENT)
1918 		return;
1919 
1920 	termlen = irdma_bld_terminate_hdr(qp, info);
1921 	irdma_terminate_start_timer(qp);
1922 	qp->term_flags |= IRDMA_TERM_SENT;
1923 	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
1924 			     IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
1925 }
1926 
1927 /**
1928  * irdma_terminate_received - handle terminate received AE
1929  * @qp: qp associated with received terminate AE
1930  * @info: the struct contiaing AE information
1931  */
1932 void
1933 irdma_terminate_received(struct irdma_sc_qp *qp,
1934 			 struct irdma_aeqe_info *info)
1935 {
1936 	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
1937 	BE32 *mpa;
1938 	u8 ddp_ctl;
1939 	u8 rdma_ctl;
1940 	u16 aeq_id = 0;
1941 	struct irdma_terminate_hdr *termhdr;
1942 
1943 	mpa = (BE32 *) irdma_locate_mpa(pkt);
1944 	if (info->q2_data_written) {
1945 		/* did not validate the frame - do it now */
1946 		ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
1947 		rdma_ctl = ntohl(mpa[0]) & 0xff;
1948 		if ((ddp_ctl & 0xc0) != 0x40)
1949 			aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
1950 		else if ((ddp_ctl & 0x03) != 1)
1951 			aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
1952 		else if (ntohl(mpa[2]) != 2)
1953 			aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
1954 		else if (ntohl(mpa[3]) != 1)
1955 			aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
1956 		else if (ntohl(mpa[4]) != 0)
1957 			aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
1958 		else if ((rdma_ctl & 0xc0) != 0x40)
1959 			aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
1960 
1961 		info->ae_id = aeq_id;
1962 		if (info->ae_id) {
1963 			/* Bad terminate recvd - send back a terminate */
1964 			irdma_terminate_connection(qp, info);
1965 			return;
1966 		}
1967 	}
1968 
1969 	qp->term_flags |= IRDMA_TERM_RCVD;
1970 	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
1971 	termhdr = (struct irdma_terminate_hdr *)&mpa[5];
1972 	if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
1973 	    termhdr->layer_etype == RDMAP_REMOTE_OP) {
1974 		irdma_terminate_done(qp, 0);
1975 	} else {
1976 		irdma_terminate_start_timer(qp);
1977 		irdma_terminate_send_fin(qp);
1978 	}
1979 }
1980 
1981 static int
1982 irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
1983 {
1984 	return 0;
1985 }
1986 
1987 static void
1988 irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
1989 {
1990 	/* do nothing */
1991 }
1992 
1993 static void
1994 irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
1995 {
1996 	/* do nothing */
1997 }
1998 
1999 /**
2000  * irdma_sc_vsi_init - Init the vsi structure
2001  * @vsi: pointer to vsi structure to initialize
2002  * @info: the info used to initialize the vsi struct
2003  */
2004 void
2005 irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
2006 		  struct irdma_vsi_init_info *info)
2007 {
2008 	u8 i;
2009 
2010 	vsi->dev = info->dev;
2011 	vsi->back_vsi = info->back_vsi;
2012 	vsi->register_qset = info->register_qset;
2013 	vsi->unregister_qset = info->unregister_qset;
2014 	vsi->mtu = info->params->mtu;
2015 	vsi->exception_lan_q = info->exception_lan_q;
2016 	vsi->vsi_idx = info->pf_data_vsi_num;
2017 
2018 	irdma_set_qos_info(vsi, info->params);
2019 	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
2020 		mutex_init(&vsi->qos[i].qos_mutex);
2021 		INIT_LIST_HEAD(&vsi->qos[i].qplist);
2022 	}
2023 	if (vsi->register_qset) {
2024 		vsi->dev->ws_add = irdma_ws_add;
2025 		vsi->dev->ws_remove = irdma_ws_remove;
2026 		vsi->dev->ws_reset = irdma_ws_reset;
2027 	} else {
2028 		vsi->dev->ws_add = irdma_null_ws_add;
2029 		vsi->dev->ws_remove = irdma_null_ws_remove;
2030 		vsi->dev->ws_reset = irdma_null_ws_reset;
2031 	}
2032 }
2033 
2034 /**
2035  * irdma_get_stats_idx - Return stats index
2036  * @vsi: pointer to the vsi
2037  */
2038 static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi){
2039 	struct irdma_stats_inst_info stats_info = {0};
2040 	struct irdma_sc_dev *dev = vsi->dev;
2041 
2042 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
2043 		if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
2044 					      &stats_info))
2045 			return stats_info.stats_idx;
2046 	}
2047 
2048 	return IRDMA_INVALID_STATS_IDX;
2049 }
2050 
2051 /**
2052  * irdma_vsi_stats_init - Initialize the vsi statistics
2053  * @vsi: pointer to the vsi structure
2054  * @info: The info structure used for initialization
2055  */
2056 int
2057 irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
2058 		     struct irdma_vsi_stats_info *info)
2059 {
2060 	struct irdma_dma_mem *stats_buff_mem;
2061 
2062 	vsi->pestat = info->pestat;
2063 	vsi->pestat->hw = vsi->dev->hw;
2064 	vsi->pestat->vsi = vsi;
2065 
2066 	stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
2067 	stats_buff_mem->size = IRDMA_GATHER_STATS_BUF_SIZE * 2;
2068 	stats_buff_mem->va = irdma_allocate_dma_mem(vsi->pestat->hw,
2069 						    stats_buff_mem,
2070 						    stats_buff_mem->size, 1);
2071 	if (!stats_buff_mem->va)
2072 		return -ENOMEM;
2073 
2074 	vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
2075 	vsi->pestat->gather_info.last_gather_stats_va =
2076 	    (void *)((uintptr_t)stats_buff_mem->va +
2077 		     IRDMA_GATHER_STATS_BUF_SIZE);
2078 
2079 	irdma_hw_stats_start_timer(vsi);
2080 
2081 	/* when stat allocation is not required default to fcn_id. */
2082 	vsi->stats_idx = info->fcn_id;
2083 	if (info->alloc_stats_inst) {
2084 		u16 stats_idx = irdma_get_stats_idx(vsi);
2085 
2086 		if (stats_idx != IRDMA_INVALID_STATS_IDX) {
2087 			vsi->stats_inst_alloc = true;
2088 			vsi->stats_idx = stats_idx;
2089 			vsi->pestat->gather_info.use_stats_inst = true;
2090 			vsi->pestat->gather_info.stats_inst_index = stats_idx;
2091 		}
2092 	}
2093 
2094 	return 0;
2095 }
2096 
2097 /**
2098  * irdma_vsi_stats_free - Free the vsi stats
2099  * @vsi: pointer to the vsi structure
2100  */
2101 void
2102 irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
2103 {
2104 	struct irdma_stats_inst_info stats_info = {0};
2105 	struct irdma_sc_dev *dev = vsi->dev;
2106 
2107 	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
2108 		if (vsi->stats_inst_alloc) {
2109 			stats_info.stats_idx = vsi->stats_idx;
2110 			irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
2111 						 &stats_info);
2112 		}
2113 	}
2114 
2115 	if (!vsi->pestat)
2116 		return;
2117 
2118 	irdma_hw_stats_stop_timer(vsi);
2119 	irdma_free_dma_mem(vsi->pestat->hw,
2120 			   &vsi->pestat->gather_info.stats_buff_mem);
2121 }
2122 
2123 /**
2124  * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
2125  * @wqsize: size of the wq (sq, rq) to encoded_size
2126  * @queue_type: queue type selected for the calculation algorithm
2127  */
2128 u8
2129 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
2130 {
2131 	u8 encoded_size = 0;
2132 
2133 	/*
2134 	 * cqp sq's hw coded value starts from 1 for size of 4 while it starts from 0 for qp' wq's.
2135 	 */
2136 	if (queue_type == IRDMA_QUEUE_TYPE_CQP)
2137 		encoded_size = 1;
2138 	wqsize >>= 2;
2139 	while (wqsize >>= 1)
2140 		encoded_size++;
2141 
2142 	return encoded_size;
2143 }
2144 
2145 /**
2146  * irdma_sc_gather_stats - collect the statistics
2147  * @cqp: struct for cqp hw
2148  * @info: gather stats info structure
2149  * @scratch: u64 saved to be used during cqp completion
2150  */
2151 static int
2152 irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
2153 		      struct irdma_stats_gather_info *info,
2154 		      u64 scratch)
2155 {
2156 	__le64 *wqe;
2157 	u64 temp;
2158 
2159 	if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
2160 		return -ENOSPC;
2161 
2162 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2163 	if (!wqe)
2164 		return -ENOSPC;
2165 
2166 	set_64bit_val(wqe, IRDMA_BYTE_40,
2167 		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
2168 	set_64bit_val(wqe, IRDMA_BYTE_32, info->stats_buff_mem.pa);
2169 
2170 	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2171 	    FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
2172 	    FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
2173 		       info->stats_inst_index) |
2174 	    FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2175 		       info->use_hmc_fcn_index) |
2176 	    FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
2177 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2178 
2179 	set_64bit_val(wqe, IRDMA_BYTE_24, temp);
2180 
2181 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_STATS, "GATHER_STATS WQE", wqe,
2182 			IRDMA_CQP_WQE_SIZE * 8);
2183 
2184 	irdma_sc_cqp_post_sq(cqp);
2185 	irdma_debug(cqp->dev, IRDMA_DEBUG_STATS,
2186 		    "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
2187 		    cqp->sq_ring.tail, cqp->sq_ring.size);
2188 
2189 	return 0;
2190 }
2191 
2192 /**
2193  * irdma_sc_manage_stats_inst - allocate or free stats instance
2194  * @cqp: struct for cqp hw
2195  * @info: stats info structure
2196  * @alloc: alloc vs. delete flag
2197  * @scratch: u64 saved to be used during cqp completion
2198  */
2199 static int
2200 irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
2201 			   struct irdma_stats_inst_info *info,
2202 			   bool alloc, u64 scratch)
2203 {
2204 	__le64 *wqe;
2205 	u64 temp;
2206 
2207 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2208 	if (!wqe)
2209 		return -ENOSPC;
2210 
2211 	set_64bit_val(wqe, IRDMA_BYTE_40,
2212 		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
2213 	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
2214 	    FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
2215 	    FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
2216 		       info->use_hmc_fcn_index) |
2217 	    FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
2218 	    FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
2219 
2220 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2221 
2222 	set_64bit_val(wqe, IRDMA_BYTE_24, temp);
2223 
2224 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_STATS WQE", wqe,
2225 			IRDMA_CQP_WQE_SIZE * 8);
2226 
2227 	irdma_sc_cqp_post_sq(cqp);
2228 	return 0;
2229 }
2230 
2231 /**
2232  * irdma_sc_set_up_map - set the up map table
2233  * @cqp: struct for cqp hw
2234  * @info: User priority map info
2235  * @scratch: u64 saved to be used during cqp completion
2236  */
2237 static int
2238 irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
2239 		    struct irdma_up_info *info, u64 scratch)
2240 {
2241 	__le64 *wqe;
2242 	u64 temp;
2243 
2244 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2245 	if (!wqe)
2246 		return -ENOSPC;
2247 
2248 	temp = info->map[0] | LS_64_1(info->map[1], 8) |
2249 	    LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) |
2250 	    LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) |
2251 	    LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56);
2252 
2253 	set_64bit_val(wqe, IRDMA_BYTE_0, temp);
2254 	set_64bit_val(wqe, IRDMA_BYTE_40,
2255 		      FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
2256 		      FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
2257 
2258 	temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
2259 	    FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
2260 	    FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
2261 		       info->use_cnp_up_override) |
2262 	    FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
2263 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2264 
2265 	set_64bit_val(wqe, IRDMA_BYTE_24, temp);
2266 
2267 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPMAP WQE", wqe,
2268 			IRDMA_CQP_WQE_SIZE * 8);
2269 	irdma_sc_cqp_post_sq(cqp);
2270 
2271 	return 0;
2272 }
2273 
2274 /**
2275  * irdma_sc_manage_ws_node - create/modify/destroy WS node
2276  * @cqp: struct for cqp hw
2277  * @info: node info structure
2278  * @node_op: 0 for add 1 for modify, 2 for delete
2279  * @scratch: u64 saved to be used during cqp completion
2280  */
2281 static int
2282 irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
2283 			struct irdma_ws_node_info *info,
2284 			enum irdma_ws_node_op node_op, u64 scratch)
2285 {
2286 	__le64 *wqe;
2287 	u64 temp = 0;
2288 
2289 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2290 	if (!wqe)
2291 		return -ENOSPC;
2292 
2293 	set_64bit_val(wqe, IRDMA_BYTE_32,
2294 		      FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
2295 		      FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
2296 
2297 	temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
2298 	    FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
2299 	    FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
2300 	    FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
2301 	    FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
2302 	    FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
2303 	    FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
2304 	    FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
2305 	    FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
2306 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2307 
2308 	set_64bit_val(wqe, IRDMA_BYTE_24, temp);
2309 
2310 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_WS WQE", wqe,
2311 			IRDMA_CQP_WQE_SIZE * 8);
2312 	irdma_sc_cqp_post_sq(cqp);
2313 
2314 	return 0;
2315 }
2316 
2317 /**
2318  * irdma_sc_qp_flush_wqes - flush qp's wqe
2319  * @qp: sc qp
2320  * @info: dlush information
2321  * @scratch: u64 saved to be used during cqp completion
2322  * @post_sq: flag for cqp db to ring
2323  */
2324 int
2325 irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
2326 		       struct irdma_qp_flush_info *info, u64 scratch,
2327 		       bool post_sq)
2328 {
2329 	u64 temp = 0;
2330 	__le64 *wqe;
2331 	struct irdma_sc_cqp *cqp;
2332 	u64 hdr;
2333 	bool flush_sq = false, flush_rq = false;
2334 
2335 	if (info->rq && !qp->flush_rq)
2336 		flush_rq = true;
2337 	if (info->sq && !qp->flush_sq)
2338 		flush_sq = true;
2339 	qp->flush_sq |= flush_sq;
2340 	qp->flush_rq |= flush_rq;
2341 
2342 	if (!flush_sq && !flush_rq) {
2343 		irdma_debug(qp->dev, IRDMA_DEBUG_CQP,
2344 			    "Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id);
2345 		return -EALREADY;
2346 	}
2347 
2348 	cqp = qp->pd->dev->cqp;
2349 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2350 	if (!wqe)
2351 		return -ENOSPC;
2352 
2353 	if (info->userflushcode) {
2354 		if (flush_rq)
2355 			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
2356 					   info->rq_minor_code) |
2357 			    FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
2358 				       info->rq_major_code);
2359 		if (flush_sq)
2360 			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
2361 					   info->sq_minor_code) |
2362 			    FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
2363 				       info->sq_major_code);
2364 	}
2365 	set_64bit_val(wqe, IRDMA_BYTE_16, temp);
2366 
2367 	temp = (info->generate_ae) ?
2368 	    info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2369 				       info->ae_src) : 0;
2370 	set_64bit_val(wqe, IRDMA_BYTE_8, temp);
2371 	hdr = qp->qp_uk.qp_id |
2372 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
2373 	    FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
2374 	    FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
2375 	    FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
2376 	    FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
2377 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2378 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2379 
2380 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2381 
2382 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_FLUSH WQE", wqe,
2383 			IRDMA_CQP_WQE_SIZE * 8);
2384 	if (post_sq)
2385 		irdma_sc_cqp_post_sq(cqp);
2386 
2387 	return 0;
2388 }
2389 
2390 /**
2391  * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
2392  * @qp: sc qp
2393  * @info: gen ae information
2394  * @scratch: u64 saved to be used during cqp completion
2395  * @post_sq: flag for cqp db to ring
2396  */
2397 static int
2398 irdma_sc_gen_ae(struct irdma_sc_qp *qp,
2399 		struct irdma_gen_ae_info *info, u64 scratch,
2400 		bool post_sq)
2401 {
2402 	u64 temp;
2403 	__le64 *wqe;
2404 	struct irdma_sc_cqp *cqp;
2405 	u64 hdr;
2406 
2407 	cqp = qp->pd->dev->cqp;
2408 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2409 	if (!wqe)
2410 		return -ENOSPC;
2411 
2412 	temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
2413 					  info->ae_src);
2414 	set_64bit_val(wqe, IRDMA_BYTE_8, temp);
2415 
2416 	hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
2417 					   IRDMA_CQP_OP_GEN_AE) |
2418 	    FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
2419 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2420 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2421 
2422 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2423 
2424 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "GEN_AE WQE", wqe,
2425 			IRDMA_CQP_WQE_SIZE * 8);
2426 	if (post_sq)
2427 		irdma_sc_cqp_post_sq(cqp);
2428 
2429 	return 0;
2430 }
2431 
2432 /*** irdma_sc_qp_upload_context - upload qp's context
2433  * @dev: sc device struct
2434  * @info: upload context info ptr for return
2435  * @scratch: u64 saved to be used during cqp completion
2436  * @post_sq: flag for cqp db to ring
2437  */
2438 static int
2439 irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
2440 			   struct irdma_upload_context_info *info,
2441 			   u64 scratch, bool post_sq)
2442 {
2443 	__le64 *wqe;
2444 	struct irdma_sc_cqp *cqp;
2445 	u64 hdr;
2446 
2447 	cqp = dev->cqp;
2448 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2449 	if (!wqe)
2450 		return -ENOSPC;
2451 
2452 	set_64bit_val(wqe, IRDMA_BYTE_16, info->buf_pa);
2453 
2454 	hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
2455 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
2456 	    FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
2457 	    FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
2458 	    FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
2459 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2460 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2461 
2462 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2463 
2464 	irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QP_UPLOAD_CTX WQE", wqe,
2465 			IRDMA_CQP_WQE_SIZE * 8);
2466 	if (post_sq)
2467 		irdma_sc_cqp_post_sq(cqp);
2468 
2469 	return 0;
2470 }
2471 
2472 /**
2473  * irdma_sc_manage_push_page - Handle push page
2474  * @cqp: struct for cqp hw
2475  * @info: push page info
2476  * @scratch: u64 saved to be used during cqp completion
2477  * @post_sq: flag for cqp db to ring
2478  */
2479 static int
2480 irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
2481 			  struct irdma_cqp_manage_push_page_info *info,
2482 			  u64 scratch, bool post_sq)
2483 {
2484 	__le64 *wqe;
2485 	u64 hdr;
2486 
2487 	if (info->free_page &&
2488 	    info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
2489 		return -EINVAL;
2490 
2491 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2492 	if (!wqe)
2493 		return -ENOSPC;
2494 
2495 	set_64bit_val(wqe, IRDMA_BYTE_16, info->qs_handle);
2496 	hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
2497 	    FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
2498 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
2499 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
2500 	    FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
2501 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2502 
2503 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2504 
2505 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", wqe,
2506 			IRDMA_CQP_WQE_SIZE * 8);
2507 	if (post_sq)
2508 		irdma_sc_cqp_post_sq(cqp);
2509 
2510 	return 0;
2511 }
2512 
2513 /**
2514  * irdma_sc_suspend_qp - suspend qp for param change
2515  * @cqp: struct for cqp hw
2516  * @qp: sc qp struct
2517  * @scratch: u64 saved to be used during cqp completion
2518  */
2519 static int
2520 irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2521 		    u64 scratch)
2522 {
2523 	u64 hdr;
2524 	__le64 *wqe;
2525 
2526 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2527 	if (!wqe)
2528 		return -ENOSPC;
2529 
2530 	hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
2531 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
2532 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2533 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2534 
2535 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2536 
2537 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SUSPEND_QP WQE", wqe,
2538 			IRDMA_CQP_WQE_SIZE * 8);
2539 	irdma_sc_cqp_post_sq(cqp);
2540 
2541 	return 0;
2542 }
2543 
2544 /**
2545  * irdma_sc_resume_qp - resume qp after suspend
2546  * @cqp: struct for cqp hw
2547  * @qp: sc qp struct
2548  * @scratch: u64 saved to be used during cqp completion
2549  */
2550 static int
2551 irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
2552 		   u64 scratch)
2553 {
2554 	u64 hdr;
2555 	__le64 *wqe;
2556 
2557 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2558 	if (!wqe)
2559 		return -ENOSPC;
2560 
2561 	set_64bit_val(wqe, IRDMA_BYTE_16,
2562 		      FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
2563 
2564 	hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
2565 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
2566 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2567 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2568 
2569 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2570 
2571 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "RESUME_QP WQE", wqe,
2572 			IRDMA_CQP_WQE_SIZE * 8);
2573 	irdma_sc_cqp_post_sq(cqp);
2574 
2575 	return 0;
2576 }
2577 
2578 /**
2579  * irdma_sc_cq_ack - acknowledge completion q
2580  * @cq: cq struct
2581  */
2582 static inline void
2583 irdma_sc_cq_ack(struct irdma_sc_cq *cq)
2584 {
2585 	db_wr32(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
2586 }
2587 
2588 /**
2589  * irdma_sc_cq_init - initialize completion q
2590  * @cq: cq struct
2591  * @info: cq initialization info
2592  */
2593 int
2594 irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
2595 {
2596 	int ret_code;
2597 	u32 pble_obj_cnt;
2598 
2599 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2600 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
2601 		return -EINVAL;
2602 
2603 	cq->cq_pa = info->cq_base_pa;
2604 	cq->dev = info->dev;
2605 	cq->ceq_id = info->ceq_id;
2606 	info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
2607 	info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
2608 	ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
2609 	if (ret_code)
2610 		return ret_code;
2611 
2612 	cq->virtual_map = info->virtual_map;
2613 	cq->pbl_chunk_size = info->pbl_chunk_size;
2614 	cq->ceqe_mask = info->ceqe_mask;
2615 	cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
2616 	cq->shadow_area_pa = info->shadow_area_pa;
2617 	cq->shadow_read_threshold = info->shadow_read_threshold;
2618 	cq->ceq_id_valid = info->ceq_id_valid;
2619 	cq->tph_en = info->tph_en;
2620 	cq->tph_val = info->tph_val;
2621 	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2622 	cq->vsi = info->vsi;
2623 
2624 	return 0;
2625 }
2626 
2627 /**
2628  * irdma_sc_cq_create - create completion q
2629  * @cq: cq struct
2630  * @scratch: u64 saved to be used during cqp completion
2631  * @check_overflow: flag for overflow check
2632  * @post_sq: flag for cqp db to ring
2633  */
2634 static int
2635 irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
2636 		   bool check_overflow, bool post_sq)
2637 {
2638 	__le64 *wqe;
2639 	struct irdma_sc_cqp *cqp;
2640 	u64 hdr;
2641 	struct irdma_sc_ceq *ceq;
2642 	int ret_code = 0;
2643 
2644 	cqp = cq->dev->cqp;
2645 	if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1))
2646 		return -EINVAL;
2647 
2648 	if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1))
2649 		return -EINVAL;
2650 
2651 	ceq = cq->dev->ceq[cq->ceq_id];
2652 	if (ceq && ceq->reg_cq)
2653 		ret_code = irdma_sc_add_cq_ctx(ceq, cq);
2654 
2655 	if (ret_code)
2656 		return ret_code;
2657 
2658 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2659 	if (!wqe) {
2660 		if (ceq && ceq->reg_cq)
2661 			irdma_sc_remove_cq_ctx(ceq, cq);
2662 		return -ENOSPC;
2663 	}
2664 
2665 	set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
2666 	set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
2667 	set_64bit_val(wqe, IRDMA_BYTE_16,
2668 		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
2669 	set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa));
2670 	set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
2671 	set_64bit_val(wqe, IRDMA_BYTE_48,
2672 		      FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
2673 	set_64bit_val(wqe, IRDMA_BYTE_56,
2674 		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2675 		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2676 
2677 	hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
2678 	    FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2679 		      IRDMA_CQPSQ_CQ_CEQID) |
2680 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
2681 	    FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2682 	    FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
2683 	    FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2684 	    FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2685 	    FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2686 	    FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2687 	    FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2688 		       cq->cq_uk.avoid_mem_cflct) |
2689 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2690 
2691 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2692 
2693 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2694 
2695 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_CREATE WQE", wqe,
2696 			IRDMA_CQP_WQE_SIZE * 8);
2697 	if (post_sq)
2698 		irdma_sc_cqp_post_sq(cqp);
2699 
2700 	return 0;
2701 }
2702 
2703 /**
2704  * irdma_sc_cq_destroy - destroy completion q
2705  * @cq: cq struct
2706  * @scratch: u64 saved to be used during cqp completion
2707  * @post_sq: flag for cqp db to ring
2708  */
2709 int
2710 irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
2711 {
2712 	struct irdma_sc_cqp *cqp;
2713 	__le64 *wqe;
2714 	u64 hdr;
2715 	struct irdma_sc_ceq *ceq;
2716 
2717 	cqp = cq->dev->cqp;
2718 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2719 	if (!wqe)
2720 		return -ENOSPC;
2721 
2722 	ceq = cq->dev->ceq[cq->ceq_id];
2723 	if (ceq && ceq->reg_cq)
2724 		irdma_sc_remove_cq_ctx(ceq, cq);
2725 
2726 	set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
2727 	set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
2728 	set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
2729 	set_64bit_val(wqe, IRDMA_BYTE_48,
2730 		      (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2731 
2732 	hdr = cq->cq_uk.cq_id |
2733 	    FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
2734 		      IRDMA_CQPSQ_CQ_CEQID) |
2735 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
2736 	    FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
2737 	    FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
2738 	    FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2739 	    FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
2740 	    FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2741 	    FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
2742 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2743 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2744 
2745 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2746 
2747 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_DESTROY WQE", wqe,
2748 			IRDMA_CQP_WQE_SIZE * 8);
2749 	if (post_sq)
2750 		irdma_sc_cqp_post_sq(cqp);
2751 
2752 	return 0;
2753 }
2754 
2755 /**
2756  * irdma_sc_cq_resize - set resized cq buffer info
2757  * @cq: resized cq
2758  * @info: resized cq buffer info
2759  */
2760 void
2761 irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
2762 {
2763 	cq->virtual_map = info->virtual_map;
2764 	cq->cq_pa = info->cq_pa;
2765 	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2766 	cq->pbl_chunk_size = info->pbl_chunk_size;
2767 	irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
2768 }
2769 
2770 /**
2771  * irdma_sc_cq_modify - modify a Completion Queue
2772  * @cq: cq struct
2773  * @info: modification info struct
2774  * @scratch: u64 saved to be used during cqp completion
2775  * @post_sq: flag to post to sq
2776  */
2777 static int
2778 irdma_sc_cq_modify(struct irdma_sc_cq *cq,
2779 		   struct irdma_modify_cq_info *info, u64 scratch,
2780 		   bool post_sq)
2781 {
2782 	struct irdma_sc_cqp *cqp;
2783 	__le64 *wqe;
2784 	u64 hdr;
2785 	u32 pble_obj_cnt;
2786 
2787 	pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
2788 	if (info->cq_resize && info->virtual_map &&
2789 	    info->first_pm_pbl_idx >= pble_obj_cnt)
2790 		return -EINVAL;
2791 
2792 	cqp = cq->dev->cqp;
2793 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
2794 	if (!wqe)
2795 		return -ENOSPC;
2796 
2797 	set_64bit_val(wqe, IRDMA_BYTE_0, info->cq_size);
2798 	set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
2799 	set_64bit_val(wqe, IRDMA_BYTE_16,
2800 		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
2801 	set_64bit_val(wqe, IRDMA_BYTE_32, info->cq_pa);
2802 	set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
2803 	set_64bit_val(wqe, IRDMA_BYTE_48, info->first_pm_pbl_idx);
2804 	set_64bit_val(wqe, IRDMA_BYTE_56,
2805 		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
2806 		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
2807 
2808 	hdr = cq->cq_uk.cq_id |
2809 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
2810 	    FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
2811 	    FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
2812 	    FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
2813 	    FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
2814 	    FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
2815 	    FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
2816 	    FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
2817 		       cq->cq_uk.avoid_mem_cflct) |
2818 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
2819 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
2820 
2821 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
2822 
2823 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_MODIFY WQE", wqe,
2824 			IRDMA_CQP_WQE_SIZE * 8);
2825 	if (post_sq)
2826 		irdma_sc_cqp_post_sq(cqp);
2827 
2828 	return 0;
2829 }
2830 
2831 /**
2832  * irdma_check_cqp_progress - check cqp processing progress
2833  * @timeout: timeout info struct
2834  * @dev: sc device struct
2835  */
2836 void
2837 irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout,
2838 			 struct irdma_sc_dev *dev)
2839 {
2840 	if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
2841 		timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
2842 		timeout->count = 0;
2843 	} else if (timeout->compl_cqp_cmds !=
2844 		   dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]) {
2845 		timeout->count++;
2846 	}
2847 }
2848 
2849 /**
2850  * irdma_get_cqp_reg_info - get head and tail for cqp using registers
2851  * @cqp: struct for cqp hw
2852  * @val: cqp tail register value
2853  * @tail: wqtail register value
2854  * @error: cqp processing err
2855  */
2856 static inline void
2857 irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
2858 		       u32 *tail, u32 *error)
2859 {
2860 	*val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
2861 	*tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
2862 	*error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
2863 }
2864 
2865 /**
2866  * irdma_cqp_poll_registers - poll cqp registers
2867  * @cqp: struct for cqp hw
2868  * @tail: wqtail register value
2869  * @count: how many times to try for completion
2870  */
2871 static int
2872 irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
2873 			 u32 count)
2874 {
2875 	u32 i = 0;
2876 	u32 newtail, error, val;
2877 
2878 	while (i++ < count) {
2879 		irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
2880 		if (error) {
2881 			error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
2882 			irdma_debug(cqp->dev, IRDMA_DEBUG_CQP,
2883 				    "CQPERRCODES error_code[x%08X]\n", error);
2884 			return -EIO;
2885 		}
2886 		if (newtail != tail) {
2887 			/* SUCCESS */
2888 			IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
2889 			cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
2890 			return 0;
2891 		}
2892 		irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
2893 	}
2894 
2895 	return -ETIMEDOUT;
2896 }
2897 
2898 /**
2899  * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
2900  * @dev: sc device struct
2901  * @buf: pointer to commit buffer
2902  * @buf_idx: buffer index
2903  * @obj_info: object info pointer
2904  * @rsrc_idx: indexs of memory resource
2905  */
2906 static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf,
2907 				      u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
2908 				      u32 rsrc_idx){
2909 	u64 temp;
2910 
2911 	get_64bit_val(buf, buf_idx, &temp);
2912 
2913 	switch (rsrc_idx) {
2914 	case IRDMA_HMC_IW_QP:
2915 		obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
2916 		break;
2917 	case IRDMA_HMC_IW_CQ:
2918 		obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
2919 		break;
2920 	case IRDMA_HMC_IW_APBVT_ENTRY:
2921 		if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
2922 			obj_info[rsrc_idx].cnt = 1;
2923 		else
2924 			obj_info[rsrc_idx].cnt = 0;
2925 		break;
2926 	default:
2927 		obj_info[rsrc_idx].cnt = (u32)temp;
2928 		break;
2929 	}
2930 
2931 	obj_info[rsrc_idx].base = (u64)RS_64_1(temp, IRDMA_COMMIT_FPM_BASE_S) * 512;
2932 
2933 	return temp;
2934 }
2935 
2936 /**
2937  * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
2938  * @dev: pointer to dev struct
2939  * @buf: ptr to fpm commit buffer
2940  * @info: ptr to irdma_hmc_obj_info struct
2941  * @sd: number of SDs for HMC objects
2942  *
2943  * parses fpm commit info and copy base value
2944  * of hmc objects in hmc_info
2945  */
2946 static void
2947 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
2948 			      struct irdma_hmc_obj_info *info,
2949 			      u32 *sd)
2950 {
2951 	u64 size;
2952 	u32 i;
2953 	u64 max_base = 0;
2954 	u32 last_hmc_obj = 0;
2955 
2956 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_0, info,
2957 				   IRDMA_HMC_IW_QP);
2958 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_8, info,
2959 				   IRDMA_HMC_IW_CQ);
2960 	/* skiping RSRVD */
2961 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_24, info,
2962 				   IRDMA_HMC_IW_HTE);
2963 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_32, info,
2964 				   IRDMA_HMC_IW_ARP);
2965 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_40, info,
2966 				   IRDMA_HMC_IW_APBVT_ENTRY);
2967 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_48, info,
2968 				   IRDMA_HMC_IW_MR);
2969 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_56, info,
2970 				   IRDMA_HMC_IW_XF);
2971 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_64, info,
2972 				   IRDMA_HMC_IW_XFFL);
2973 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_72, info,
2974 				   IRDMA_HMC_IW_Q1);
2975 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_80, info,
2976 				   IRDMA_HMC_IW_Q1FL);
2977 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_88, info,
2978 				   IRDMA_HMC_IW_TIMER);
2979 	irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_112, info,
2980 				   IRDMA_HMC_IW_PBLE);
2981 	/* skipping RSVD. */
2982 	if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
2983 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_96, info,
2984 					   IRDMA_HMC_IW_FSIMC);
2985 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_104, info,
2986 					   IRDMA_HMC_IW_FSIAV);
2987 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_128, info,
2988 					   IRDMA_HMC_IW_RRF);
2989 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_136, info,
2990 					   IRDMA_HMC_IW_RRFFL);
2991 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_144, info,
2992 					   IRDMA_HMC_IW_HDR);
2993 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info,
2994 					   IRDMA_HMC_IW_MD);
2995 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
2996 					   IRDMA_HMC_IW_OOISC);
2997 		irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
2998 					   IRDMA_HMC_IW_OOISCFFL);
2999 	}
3000 
3001 	/* searching for the last object in HMC to find the size of the HMC area. */
3002 	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
3003 		if (info[i].base > max_base) {
3004 			max_base = info[i].base;
3005 			last_hmc_obj = i;
3006 		}
3007 	}
3008 
3009 	size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
3010 	    info[last_hmc_obj].base;
3011 
3012 	if (size & 0x1FFFFF)
3013 		*sd = (u32)((size >> 21) + 1);	/* add 1 for remainder */
3014 	else
3015 		*sd = (u32)(size >> 21);
3016 
3017 }
3018 
3019 /**
3020  * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
3021  * @buf: ptr to fpm query buffer
3022  * @buf_idx: index into buf
3023  * @obj_info: ptr to irdma_hmc_obj_info struct
3024  * @rsrc_idx: resource index into info
3025  *
3026  * Decode a 64 bit value from fpm query buffer into max count and size
3027  */
3028 static u64 irdma_sc_decode_fpm_query(__le64 * buf, u32 buf_idx,
3029 				     struct irdma_hmc_obj_info *obj_info,
3030 				     u32 rsrc_idx){
3031 	u64 temp;
3032 	u32 size;
3033 
3034 	get_64bit_val(buf, buf_idx, &temp);
3035 	obj_info[rsrc_idx].max_cnt = (u32)temp;
3036 	size = (u32)RS_64_1(temp, 32);
3037 	obj_info[rsrc_idx].size = LS_64_1(1, size);
3038 
3039 	return temp;
3040 }
3041 
3042 /**
3043  * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
3044  * @dev: ptr to shared code device
3045  * @buf: ptr to fpm query buffer
3046  * @hmc_info: ptr to irdma_hmc_obj_info struct
3047  * @hmc_fpm_misc: ptr to fpm data
3048  *
3049  * parses fpm query buffer and copy max_cnt and
3050  * size value of hmc objects in hmc_info
3051  */
3052 static int
3053 irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
3054 			     struct irdma_hmc_info *hmc_info,
3055 			     struct irdma_hmc_fpm_misc *hmc_fpm_misc)
3056 {
3057 	struct irdma_hmc_obj_info *obj_info;
3058 	u64 temp;
3059 	u32 size;
3060 	u16 max_pe_sds;
3061 
3062 	obj_info = hmc_info->hmc_obj;
3063 
3064 	get_64bit_val(buf, IRDMA_BYTE_0, &temp);
3065 	hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
3066 	max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
3067 
3068 	hmc_fpm_misc->max_sds = max_pe_sds;
3069 	hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
3070 	get_64bit_val(buf, 8, &temp);
3071 	obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
3072 	size = (u32)RS_64_1(temp, 32);
3073 	obj_info[IRDMA_HMC_IW_QP].size = LS_64_1(1, size);
3074 
3075 	get_64bit_val(buf, 16, &temp);
3076 	obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
3077 	size = (u32)RS_64_1(temp, 32);
3078 	obj_info[IRDMA_HMC_IW_CQ].size = LS_64_1(1, size);
3079 
3080 	irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
3081 	irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
3082 
3083 	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
3084 	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
3085 
3086 	irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
3087 	irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
3088 
3089 	get_64bit_val(buf, 64, &temp);
3090 	obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
3091 	obj_info[IRDMA_HMC_IW_XFFL].size = 4;
3092 	hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
3093 	if (!hmc_fpm_misc->xf_block_size)
3094 		return -EINVAL;
3095 
3096 	irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
3097 	get_64bit_val(buf, 80, &temp);
3098 	obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
3099 	obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
3100 
3101 	hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
3102 	if (!hmc_fpm_misc->q1_block_size)
3103 		return -EINVAL;
3104 
3105 	irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
3106 
3107 	get_64bit_val(buf, 112, &temp);
3108 	obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
3109 	obj_info[IRDMA_HMC_IW_PBLE].size = 8;
3110 
3111 	get_64bit_val(buf, 120, &temp);
3112 	hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
3113 	hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
3114 	hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
3115 	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
3116 		return 0;
3117 	irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
3118 	irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
3119 	irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
3120 
3121 	get_64bit_val(buf, IRDMA_BYTE_136, &temp);
3122 	obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
3123 	obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
3124 	hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
3125 	if (!hmc_fpm_misc->rrf_block_size &&
3126 	    obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
3127 		return -EINVAL;
3128 
3129 	irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
3130 	irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
3131 	irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
3132 
3133 	get_64bit_val(buf, IRDMA_BYTE_168, &temp);
3134 	obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
3135 	obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
3136 	hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
3137 	if (!hmc_fpm_misc->ooiscf_block_size &&
3138 	    obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
3139 		return -EINVAL;
3140 
3141 	return 0;
3142 }
3143 
3144 /**
3145  * irdma_sc_find_reg_cq - find cq ctx index
3146  * @ceq: ceq sc structure
3147  * @cq: cq sc structure
3148  */
3149 static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
3150 				struct irdma_sc_cq *cq){
3151 	u32 i;
3152 
3153 	for (i = 0; i < ceq->reg_cq_size; i++) {
3154 		if (cq == ceq->reg_cq[i])
3155 			return i;
3156 	}
3157 
3158 	return IRDMA_INVALID_CQ_IDX;
3159 }
3160 
3161 /**
3162  * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
3163  * @ceq: ceq sc structure
3164  * @cq: cq sc structure
3165  */
3166 int
3167 irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
3168 {
3169 	unsigned long flags;
3170 
3171 	spin_lock_irqsave(&ceq->req_cq_lock, flags);
3172 
3173 	if (ceq->reg_cq_size == ceq->elem_cnt) {
3174 		spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3175 		return -ENOSPC;
3176 	}
3177 
3178 	ceq->reg_cq[ceq->reg_cq_size++] = cq;
3179 
3180 	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3181 
3182 	return 0;
3183 }
3184 
3185 /**
3186  * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
3187  * @ceq: ceq sc structure
3188  * @cq: cq sc structure
3189  */
3190 void
3191 irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
3192 {
3193 	unsigned long flags;
3194 	u32 cq_ctx_idx;
3195 
3196 	spin_lock_irqsave(&ceq->req_cq_lock, flags);
3197 	cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
3198 	if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
3199 		goto exit;
3200 
3201 	ceq->reg_cq_size--;
3202 	if (cq_ctx_idx != ceq->reg_cq_size)
3203 		ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
3204 	ceq->reg_cq[ceq->reg_cq_size] = NULL;
3205 
3206 exit:
3207 	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3208 }
3209 
3210 /**
3211  * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
3212  * @cqp: IWARP control queue pair pointer
3213  * @info: IWARP control queue pair init info pointer
3214  *
3215  * Initializes the object and context buffers for a control Queue Pair.
3216  */
3217 int
3218 irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
3219 		  struct irdma_cqp_init_info *info)
3220 {
3221 	u8 hw_sq_size;
3222 
3223 	if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
3224 	    info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
3225 	    ((info->sq_size & (info->sq_size - 1))))
3226 		return -EINVAL;
3227 
3228 	hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
3229 						IRDMA_QUEUE_TYPE_CQP);
3230 	cqp->size = sizeof(*cqp);
3231 	cqp->sq_size = info->sq_size;
3232 	cqp->hw_sq_size = hw_sq_size;
3233 	cqp->sq_base = info->sq;
3234 	cqp->host_ctx = info->host_ctx;
3235 	cqp->sq_pa = info->sq_pa;
3236 	cqp->host_ctx_pa = info->host_ctx_pa;
3237 	cqp->dev = info->dev;
3238 	cqp->struct_ver = info->struct_ver;
3239 	cqp->hw_maj_ver = info->hw_maj_ver;
3240 	cqp->hw_min_ver = info->hw_min_ver;
3241 	cqp->scratch_array = info->scratch_array;
3242 	cqp->polarity = 0;
3243 	cqp->en_datacenter_tcp = info->en_datacenter_tcp;
3244 	cqp->ena_vf_count = info->ena_vf_count;
3245 	cqp->hmc_profile = info->hmc_profile;
3246 	cqp->ceqs_per_vf = info->ceqs_per_vf;
3247 	cqp->disable_packed = info->disable_packed;
3248 	cqp->rocev2_rto_policy = info->rocev2_rto_policy;
3249 	cqp->protocol_used = info->protocol_used;
3250 	irdma_memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
3251 	cqp->en_rem_endpoint_trk = info->en_rem_endpoint_trk;
3252 	info->dev->cqp = cqp;
3253 
3254 	IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
3255 	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
3256 	cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
3257 	/* for the cqp commands backlog. */
3258 	INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
3259 
3260 	writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
3261 	writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
3262 	writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3263 
3264 	irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
3265 		    "sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04x]\n",
3266 		    cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (unsigned long long)cqp->sq_pa, cqp,
3267 		    cqp->polarity);
3268 	return 0;
3269 }
3270 
3271 /**
3272  * irdma_sc_cqp_create - create cqp during bringup
3273  * @cqp: struct for cqp hw
3274  * @maj_err: If error, major err number
3275  * @min_err: If error, minor err number
3276  */
3277 int
3278 irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
3279 {
3280 	u64 temp;
3281 	u8 hw_rev;
3282 	u32 cnt = 0, p1, p2, val = 0, err_code;
3283 	int ret_code;
3284 
3285 	hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
3286 	cqp->sdbuf.size = IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size;
3287 	cqp->sdbuf.va = irdma_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf,
3288 					       cqp->sdbuf.size,
3289 					       IRDMA_SD_BUF_ALIGNMENT);
3290 	if (!cqp->sdbuf.va)
3291 		return -ENOMEM;
3292 
3293 	spin_lock_init(&cqp->dev->cqp_lock);
3294 
3295 	temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
3296 	    FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
3297 	    FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
3298 	    FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
3299 	if (hw_rev >= IRDMA_GEN_2) {
3300 		temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
3301 				   cqp->rocev2_rto_policy) |
3302 		    FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
3303 			       cqp->protocol_used);
3304 	}
3305 
3306 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_0, temp);
3307 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_8, cqp->sq_pa);
3308 
3309 	temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
3310 	    FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
3311 	if (hw_rev >= IRDMA_GEN_2)
3312 		temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK,
3313 				   cqp->en_rem_endpoint_trk);
3314 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_16, temp);
3315 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_24, (uintptr_t)cqp);
3316 	temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
3317 	    FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
3318 	if (hw_rev >= IRDMA_GEN_2) {
3319 		temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
3320 		    FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
3321 	}
3322 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_32, temp);
3323 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_40, 0);
3324 	temp = 0;
3325 	if (hw_rev >= IRDMA_GEN_2) {
3326 		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
3327 		    FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
3328 		    FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
3329 	}
3330 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_48, temp);
3331 	temp = 0;
3332 	if (hw_rev >= IRDMA_GEN_2) {
3333 		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
3334 		    FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
3335 		    FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
3336 		    FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
3337 	}
3338 	set_64bit_val(cqp->host_ctx, IRDMA_BYTE_56, temp);
3339 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQP_HOST_CTX WQE",
3340 			cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8);
3341 	p1 = RS_32_1(cqp->host_ctx_pa, 32);
3342 	p2 = (u32)cqp->host_ctx_pa;
3343 
3344 	writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3345 	writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3346 
3347 	do {
3348 		if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3349 			ret_code = -ETIMEDOUT;
3350 			goto err;
3351 		}
3352 		irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
3353 		val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3354 	} while (!val);
3355 
3356 	if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
3357 		ret_code = -EOPNOTSUPP;
3358 		goto err;
3359 	}
3360 
3361 	cqp->process_cqp_sds = irdma_update_sds_noccq;
3362 	return 0;
3363 
3364 err:
3365 	spin_lock_destroy(&cqp->dev->cqp_lock);
3366 	irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
3367 	err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3368 	*min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
3369 	*maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
3370 	return ret_code;
3371 }
3372 
3373 /**
3374  * irdma_sc_cqp_post_sq - post of cqp's sq
3375  * @cqp: struct for cqp hw
3376  */
3377 void
3378 irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
3379 {
3380 	db_wr32(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
3381 
3382 	irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
3383 		    "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
3384 		    cqp->sq_ring.tail, cqp->sq_ring.size);
3385 }
3386 
3387 /**
3388  * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
3389  * and pass back index
3390  * @cqp: CQP HW structure
3391  * @scratch: private data for CQP WQE
3392  * @wqe_idx: WQE index of CQP SQ
3393  */
3394 __le64 *
3395 irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
3396 				   u32 *wqe_idx)
3397 {
3398 	__le64 *wqe = NULL;
3399 	int ret_code;
3400 
3401 	if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
3402 		irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
3403 			    "CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
3404 			    cqp->sq_ring.head, cqp->sq_ring.tail,
3405 			    cqp->sq_ring.size);
3406 		return NULL;
3407 	}
3408 	IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
3409 	if (ret_code)
3410 		return NULL;
3411 
3412 	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
3413 	if (!*wqe_idx)
3414 		cqp->polarity = !cqp->polarity;
3415 	wqe = cqp->sq_base[*wqe_idx].elem;
3416 	cqp->scratch_array[*wqe_idx] = scratch;
3417 
3418 	memset(&wqe[0], 0, 24);
3419 	memset(&wqe[4], 0, 32);
3420 
3421 	return wqe;
3422 }
3423 
3424 /**
3425  * irdma_sc_cqp_destroy - destroy cqp during close
3426  * @cqp: struct for cqp hw
3427  * @free_hwcqp: true for regular cqp destroy; false for reset path
3428  */
3429 int
3430 irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp)
3431 {
3432 	u32 cnt = 0, val;
3433 	int ret_code = 0;
3434 
3435 	if (free_hwcqp) {
3436 		writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
3437 		writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
3438 		do {
3439 			if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
3440 				ret_code = -ETIMEDOUT;
3441 				break;
3442 			}
3443 			irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
3444 			val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
3445 		} while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
3446 	}
3447 	irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
3448 	spin_lock_destroy(&cqp->dev->cqp_lock);
3449 	return ret_code;
3450 }
3451 
3452 /**
3453  * irdma_sc_ccq_arm - enable intr for control cq
3454  * @ccq: ccq sc struct
3455  */
3456 void
3457 irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
3458 {
3459 	u64 temp_val;
3460 	u16 sw_cq_sel;
3461 	u8 arm_next_se;
3462 	u8 arm_seq_num;
3463 
3464 	get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val);
3465 	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
3466 	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
3467 	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
3468 	arm_seq_num++;
3469 	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
3470 	    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
3471 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
3472 	    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
3473 	set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val);
3474 
3475 	irdma_wmb();		/* make sure shadow area is updated before arming */
3476 
3477 	db_wr32(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
3478 }
3479 
3480 /**
3481  * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
3482  * @ccq: ccq sc struct
3483  * @info: completion q entry to return
3484  */
3485 int
3486 irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
3487 			  struct irdma_ccq_cqe_info *info)
3488 {
3489 	u64 qp_ctx, temp, temp1;
3490 	__le64 *cqe;
3491 	struct irdma_sc_cqp *cqp;
3492 	u32 wqe_idx;
3493 	u32 error;
3494 	u8 polarity;
3495 	int ret_code = 0;
3496 
3497 	if (ccq->cq_uk.avoid_mem_cflct)
3498 		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
3499 	else
3500 		cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
3501 
3502 	get_64bit_val(cqe, IRDMA_BYTE_24, &temp);
3503 	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
3504 	if (polarity != ccq->cq_uk.polarity)
3505 		return -ENOENT;
3506 
3507 	get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx);
3508 	cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx;
3509 	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
3510 	info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
3511 	info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
3512 	if (info->error) {
3513 		info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
3514 		error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
3515 		irdma_debug(cqp->dev, IRDMA_DEBUG_CQP,
3516 			    "CQPERRCODES error_code[x%08X]\n", error);
3517 	}
3518 
3519 	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
3520 	info->scratch = cqp->scratch_array[wqe_idx];
3521 
3522 	get_64bit_val(cqe, IRDMA_BYTE_16, &temp1);
3523 	info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
3524 
3525 	get_64bit_val(cqp->sq_base[wqe_idx].elem, IRDMA_BYTE_24, &temp1);
3526 	info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
3527 	info->cqp = cqp;
3528 
3529 	/* move the head for cq */
3530 	IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
3531 	if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
3532 		ccq->cq_uk.polarity ^= 1;
3533 
3534 	/* update cq tail in cq shadow memory also */
3535 	IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
3536 	set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_0,
3537 		      IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
3538 
3539 	irdma_wmb();		/* make sure shadow area is updated before moving tail */
3540 
3541 	IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
3542 	ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
3543 
3544 	return ret_code;
3545 }
3546 
3547 /**
3548  * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
3549  * @cqp: struct for cqp hw
3550  * @op_code: cqp opcode for completion
3551  * @compl_info: completion q entry to return
3552  */
3553 int
3554 irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
3555 			      struct irdma_ccq_cqe_info *compl_info)
3556 {
3557 	struct irdma_ccq_cqe_info info = {0};
3558 	struct irdma_sc_cq *ccq;
3559 	int ret_code = 0;
3560 	u32 cnt = 0;
3561 
3562 	ccq = cqp->dev->ccq;
3563 	while (1) {
3564 		if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
3565 			return -ETIMEDOUT;
3566 
3567 		if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
3568 			irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count);
3569 			continue;
3570 		}
3571 		if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
3572 			ret_code = -EIO;
3573 			break;
3574 		}
3575 		/* make sure op code matches */
3576 		if (op_code == info.op_code)
3577 			break;
3578 		irdma_debug(cqp->dev, IRDMA_DEBUG_WQE,
3579 			    "opcode mismatch for my op code 0x%x, returned opcode %x\n",
3580 			    op_code, info.op_code);
3581 	}
3582 
3583 	if (compl_info)
3584 		irdma_memcpy(compl_info, &info, sizeof(*compl_info));
3585 
3586 	return ret_code;
3587 }
3588 
3589 /**
3590  * irdma_sc_manage_hmc_pm_func_table - manage of function table
3591  * @cqp: struct for cqp hw
3592  * @scratch: u64 saved to be used during cqp completion
3593  * @info: info for the manage function table operation
3594  * @post_sq: flag for cqp db to ring
3595  */
3596 static int
3597 irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
3598 				  struct irdma_hmc_fcn_info *info,
3599 				  u64 scratch, bool post_sq)
3600 {
3601 	__le64 *wqe;
3602 	u64 hdr;
3603 
3604 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3605 	if (!wqe)
3606 		return -ENOSPC;
3607 
3608 	hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
3609 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE,
3610 		       IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
3611 	    FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
3612 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3613 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
3614 
3615 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
3616 
3617 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE,
3618 			"MANAGE_HMC_PM_FUNC_TABLE WQE", wqe,
3619 			IRDMA_CQP_WQE_SIZE * 8);
3620 	if (post_sq)
3621 		irdma_sc_cqp_post_sq(cqp);
3622 
3623 	return 0;
3624 }
3625 
3626 /**
3627  * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
3628  * for fpm commit
3629  * @cqp: struct for cqp hw
3630  */
3631 static int
3632 irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
3633 {
3634 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
3635 					     NULL);
3636 }
3637 
3638 /**
3639  * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
3640  * @cqp: struct for cqp hw
3641  * @scratch: u64 saved to be used during cqp completion
3642  * @hmc_fn_id: hmc function id
3643  * @commit_fpm_mem: Memory for fpm values
3644  * @post_sq: flag for cqp db to ring
3645  * @wait_type: poll ccq or cqp registers for cqp completion
3646  */
3647 static int
3648 irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3649 			u16 hmc_fn_id,
3650 			struct irdma_dma_mem *commit_fpm_mem,
3651 			bool post_sq, u8 wait_type)
3652 {
3653 	__le64 *wqe;
3654 	u64 hdr;
3655 	u32 tail, val, error;
3656 	int ret_code = 0;
3657 
3658 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3659 	if (!wqe)
3660 		return -ENOSPC;
3661 
3662 	set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
3663 	set_64bit_val(wqe, IRDMA_BYTE_32, commit_fpm_mem->pa);
3664 
3665 	hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
3666 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
3667 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3668 
3669 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
3670 
3671 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
3672 
3673 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "COMMIT_FPM_VAL WQE", wqe,
3674 			IRDMA_CQP_WQE_SIZE * 8);
3675 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3676 
3677 	if (post_sq) {
3678 		irdma_sc_cqp_post_sq(cqp);
3679 		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3680 			ret_code = irdma_cqp_poll_registers(cqp, tail,
3681 							    cqp->dev->hw_attrs.max_done_count);
3682 		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3683 			ret_code = irdma_sc_commit_fpm_val_done(cqp);
3684 	}
3685 
3686 	return ret_code;
3687 }
3688 
3689 /**
3690  * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
3691  * query fpm
3692  * @cqp: struct for cqp hw
3693  */
3694 static int
3695 irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
3696 {
3697 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
3698 					     NULL);
3699 }
3700 
3701 /**
3702  * irdma_sc_query_fpm_val - cqp wqe query fpm values
3703  * @cqp: struct for cqp hw
3704  * @scratch: u64 saved to be used during cqp completion
3705  * @hmc_fn_id: hmc function id
3706  * @query_fpm_mem: memory for return fpm values
3707  * @post_sq: flag for cqp db to ring
3708  * @wait_type: poll ccq or cqp registers for cqp completion
3709  */
3710 static int
3711 irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
3712 		       u16 hmc_fn_id,
3713 		       struct irdma_dma_mem *query_fpm_mem,
3714 		       bool post_sq, u8 wait_type)
3715 {
3716 	__le64 *wqe;
3717 	u64 hdr;
3718 	u32 tail, val, error;
3719 	int ret_code = 0;
3720 
3721 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3722 	if (!wqe)
3723 		return -ENOSPC;
3724 
3725 	set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id);
3726 	set_64bit_val(wqe, IRDMA_BYTE_32, query_fpm_mem->pa);
3727 
3728 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
3729 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3730 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
3731 
3732 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
3733 
3734 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY_FPM WQE", wqe,
3735 			IRDMA_CQP_WQE_SIZE * 8);
3736 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
3737 
3738 	if (post_sq) {
3739 		irdma_sc_cqp_post_sq(cqp);
3740 		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
3741 			ret_code = irdma_cqp_poll_registers(cqp, tail,
3742 							    cqp->dev->hw_attrs.max_done_count);
3743 		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
3744 			ret_code = irdma_sc_query_fpm_val_done(cqp);
3745 	}
3746 
3747 	return ret_code;
3748 }
3749 
3750 /**
3751  * irdma_sc_ceq_init - initialize ceq
3752  * @ceq: ceq sc structure
3753  * @info: ceq initialization info
3754  */
3755 int
3756 irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
3757 		  struct irdma_ceq_init_info *info)
3758 {
3759 	u32 pble_obj_cnt;
3760 
3761 	if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
3762 	    info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
3763 		return -EINVAL;
3764 
3765 	if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
3766 		return -EINVAL;
3767 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
3768 
3769 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
3770 		return -EINVAL;
3771 
3772 	ceq->size = sizeof(*ceq);
3773 	ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
3774 	ceq->ceq_id = info->ceq_id;
3775 	ceq->dev = info->dev;
3776 	ceq->elem_cnt = info->elem_cnt;
3777 	ceq->ceq_elem_pa = info->ceqe_pa;
3778 	ceq->virtual_map = info->virtual_map;
3779 	ceq->itr_no_expire = info->itr_no_expire;
3780 	ceq->reg_cq = info->reg_cq;
3781 	ceq->reg_cq_size = 0;
3782 	spin_lock_init(&ceq->req_cq_lock);
3783 	ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
3784 	ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
3785 	ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
3786 	ceq->tph_en = info->tph_en;
3787 	ceq->tph_val = info->tph_val;
3788 	ceq->vsi = info->vsi;
3789 	ceq->polarity = 1;
3790 	IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
3791 	ceq->dev->ceq[info->ceq_id] = ceq;
3792 
3793 	return 0;
3794 }
3795 
3796 /**
3797  * irdma_sc_ceq_create - create ceq wqe
3798  * @ceq: ceq sc structure
3799  * @scratch: u64 saved to be used during cqp completion
3800  * @post_sq: flag for cqp db to ring
3801  */
3802 static int
3803 irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
3804 		    bool post_sq)
3805 {
3806 	struct irdma_sc_cqp *cqp;
3807 	__le64 *wqe;
3808 	u64 hdr;
3809 
3810 	cqp = ceq->dev->cqp;
3811 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3812 	if (!wqe)
3813 		return -ENOSPC;
3814 	set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt);
3815 	set_64bit_val(wqe, IRDMA_BYTE_32,
3816 		      (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
3817 	set_64bit_val(wqe, IRDMA_BYTE_48,
3818 		      (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
3819 	set_64bit_val(wqe, IRDMA_BYTE_56,
3820 		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
3821 		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
3822 	hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
3823 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
3824 	    FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3825 	    FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3826 	    FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
3827 	    FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3828 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3829 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
3830 
3831 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
3832 
3833 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_CREATE WQE", wqe,
3834 			IRDMA_CQP_WQE_SIZE * 8);
3835 	if (post_sq)
3836 		irdma_sc_cqp_post_sq(cqp);
3837 
3838 	return 0;
3839 }
3840 
3841 /**
3842  * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
3843  * @ceq: ceq sc structure
3844  */
3845 static int
3846 irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
3847 {
3848 	struct irdma_sc_cqp *cqp;
3849 
3850 	cqp = ceq->dev->cqp;
3851 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
3852 					     NULL);
3853 }
3854 
3855 /**
3856  * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
3857  * @ceq: ceq sc structure
3858  */
3859 int
3860 irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
3861 {
3862 	struct irdma_sc_cqp *cqp;
3863 
3864 	if (ceq->reg_cq)
3865 		irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
3866 
3867 	cqp = ceq->dev->cqp;
3868 	cqp->process_cqp_sds = irdma_update_sds_noccq;
3869 
3870 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
3871 					     NULL);
3872 }
3873 
3874 /**
3875  * irdma_sc_cceq_create - create cceq
3876  * @ceq: ceq sc structure
3877  * @scratch: u64 saved to be used during cqp completion
3878  */
3879 int
3880 irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
3881 {
3882 	int ret_code;
3883 	struct irdma_sc_dev *dev = ceq->dev;
3884 
3885 	dev->ccq->vsi = ceq->vsi;
3886 	if (ceq->reg_cq) {
3887 		ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
3888 		if (ret_code)
3889 			return ret_code;
3890 	}
3891 
3892 	ret_code = irdma_sc_ceq_create(ceq, scratch, true);
3893 	if (!ret_code)
3894 		return irdma_sc_cceq_create_done(ceq);
3895 
3896 	return ret_code;
3897 }
3898 
3899 /**
3900  * irdma_sc_ceq_destroy - destroy ceq
3901  * @ceq: ceq sc structure
3902  * @scratch: u64 saved to be used during cqp completion
3903  * @post_sq: flag for cqp db to ring
3904  */
3905 int
3906 irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
3907 {
3908 	struct irdma_sc_cqp *cqp;
3909 	__le64 *wqe;
3910 	u64 hdr;
3911 
3912 	cqp = ceq->dev->cqp;
3913 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
3914 	if (!wqe)
3915 		return -ENOSPC;
3916 
3917 	set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt);
3918 	set_64bit_val(wqe, IRDMA_BYTE_48, ceq->first_pm_pbl_idx);
3919 	hdr = ceq->ceq_id |
3920 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
3921 	    FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
3922 	    FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
3923 	    FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
3924 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
3925 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
3926 
3927 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
3928 
3929 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_DESTROY WQE", wqe,
3930 			IRDMA_CQP_WQE_SIZE * 8);
3931 	ceq->dev->ceq[ceq->ceq_id] = NULL;
3932 	if (post_sq)
3933 		irdma_sc_cqp_post_sq(cqp);
3934 
3935 	return 0;
3936 }
3937 
3938 /**
3939  * irdma_sc_process_ceq - process ceq
3940  * @dev: sc device struct
3941  * @ceq: ceq sc structure
3942  *
3943  * It is expected caller serializes this function with cleanup_ceqes()
3944  * because these functions manipulate the same ceq
3945  */
3946 void *
3947 irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
3948 {
3949 	u64 temp;
3950 	__le64 *ceqe;
3951 	struct irdma_sc_cq *cq = NULL;
3952 	struct irdma_sc_cq *temp_cq;
3953 	u8 polarity;
3954 	u32 cq_idx;
3955 	unsigned long flags;
3956 
3957 	do {
3958 		cq_idx = 0;
3959 		ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
3960 		get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
3961 		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
3962 		if (polarity != ceq->polarity)
3963 			return NULL;
3964 
3965 		temp_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1);
3966 		if (!temp_cq) {
3967 			cq_idx = IRDMA_INVALID_CQ_IDX;
3968 			IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3969 
3970 			if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3971 				ceq->polarity ^= 1;
3972 			continue;
3973 		}
3974 
3975 		cq = temp_cq;
3976 		if (ceq->reg_cq) {
3977 			spin_lock_irqsave(&ceq->req_cq_lock, flags);
3978 			cq_idx = irdma_sc_find_reg_cq(ceq, cq);
3979 			spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
3980 		}
3981 
3982 		IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
3983 		if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
3984 			ceq->polarity ^= 1;
3985 	} while (cq_idx == IRDMA_INVALID_CQ_IDX);
3986 
3987 	if (cq)
3988 		irdma_sc_cq_ack(cq);
3989 	return cq;
3990 }
3991 
3992 /**
3993  * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
3994  * @cq: cq for which the ceqes need to be cleaned up
3995  * @ceq: ceq ptr
3996  *
3997  * The function is called after the cq is destroyed to cleanup
3998  * its pending ceqe entries. It is expected caller serializes this
3999  * function with process_ceq() in interrupt context.
4000  */
4001 void
4002 irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
4003 {
4004 	struct irdma_sc_cq *next_cq;
4005 	u8 ceq_polarity = ceq->polarity;
4006 	__le64 *ceqe;
4007 	u8 polarity;
4008 	u64 temp;
4009 	int next;
4010 	u32 i;
4011 
4012 	next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
4013 
4014 	for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
4015 		ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
4016 
4017 		get_64bit_val(ceqe, IRDMA_BYTE_0, &temp);
4018 		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
4019 		if (polarity != ceq_polarity)
4020 			return;
4021 
4022 		next_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1);
4023 		if (cq == next_cq)
4024 			set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID);
4025 
4026 		next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
4027 		if (!next)
4028 			ceq_polarity ^= 1;
4029 	}
4030 }
4031 
4032 /**
4033  * irdma_sc_aeq_init - initialize aeq
4034  * @aeq: aeq structure ptr
4035  * @info: aeq initialization info
4036  */
4037 int
4038 irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
4039 		  struct irdma_aeq_init_info *info)
4040 {
4041 	u32 pble_obj_cnt;
4042 
4043 	if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
4044 	    info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
4045 		return -EINVAL;
4046 
4047 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
4048 
4049 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
4050 		return -EINVAL;
4051 
4052 	aeq->size = sizeof(*aeq);
4053 	aeq->polarity = 1;
4054 	aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
4055 	aeq->dev = info->dev;
4056 	aeq->elem_cnt = info->elem_cnt;
4057 	aeq->aeq_elem_pa = info->aeq_elem_pa;
4058 	IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
4059 	aeq->virtual_map = info->virtual_map;
4060 	aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
4061 	aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
4062 	aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
4063 	aeq->msix_idx = info->msix_idx;
4064 	info->dev->aeq = aeq;
4065 
4066 	return 0;
4067 }
4068 
4069 /**
4070  * irdma_sc_aeq_create - create aeq
4071  * @aeq: aeq structure ptr
4072  * @scratch: u64 saved to be used during cqp completion
4073  * @post_sq: flag for cqp db to ring
4074  */
4075 static int
4076 irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
4077 		    bool post_sq)
4078 {
4079 	__le64 *wqe;
4080 	struct irdma_sc_cqp *cqp;
4081 	u64 hdr;
4082 
4083 	cqp = aeq->dev->cqp;
4084 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4085 	if (!wqe)
4086 		return -ENOSPC;
4087 	set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt);
4088 	set_64bit_val(wqe, IRDMA_BYTE_32,
4089 		      (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
4090 	set_64bit_val(wqe, IRDMA_BYTE_48,
4091 		      (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
4092 
4093 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
4094 	    FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
4095 	    FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
4096 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4097 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
4098 
4099 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
4100 
4101 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "AEQ_CREATE WQE", wqe,
4102 			IRDMA_CQP_WQE_SIZE * 8);
4103 	if (post_sq)
4104 		irdma_sc_cqp_post_sq(cqp);
4105 
4106 	return 0;
4107 }
4108 
4109 /**
4110  * irdma_sc_aeq_destroy - destroy aeq during close
4111  * @aeq: aeq structure ptr
4112  * @scratch: u64 saved to be used during cqp completion
4113  * @post_sq: flag for cqp db to ring
4114  */
4115 int
4116 irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, bool post_sq)
4117 {
4118 	__le64 *wqe;
4119 	struct irdma_sc_cqp *cqp;
4120 	struct irdma_sc_dev *dev;
4121 	u64 hdr;
4122 
4123 	dev = aeq->dev;
4124 	writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
4125 
4126 	cqp = dev->cqp;
4127 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4128 	if (!wqe)
4129 		return -ENOSPC;
4130 	set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt);
4131 	set_64bit_val(wqe, IRDMA_BYTE_48, aeq->first_pm_pbl_idx);
4132 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
4133 	    FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
4134 	    FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
4135 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4136 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
4137 
4138 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
4139 
4140 	irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "AEQ_DESTROY WQE", wqe,
4141 			IRDMA_CQP_WQE_SIZE * 8);
4142 	if (post_sq)
4143 		irdma_sc_cqp_post_sq(cqp);
4144 	return 0;
4145 }
4146 
4147 /**
4148  * irdma_sc_get_next_aeqe - get next aeq entry
4149  * @aeq: aeq structure ptr
4150  * @info: aeqe info to be returned
4151  */
4152 int
4153 irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
4154 		       struct irdma_aeqe_info *info)
4155 {
4156 	u64 temp, compl_ctx;
4157 	__le64 *aeqe;
4158 	u8 ae_src;
4159 	u8 polarity;
4160 
4161 	aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
4162 	get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx);
4163 	get_64bit_val(aeqe, IRDMA_BYTE_8, &temp);
4164 	polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
4165 
4166 	if (aeq->polarity != polarity)
4167 		return -ENOENT;
4168 
4169 	irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16);
4170 
4171 	ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
4172 	info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
4173 	info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
4174 	    ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
4175 	info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
4176 	info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
4177 	info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
4178 	info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
4179 	info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
4180 
4181 	info->ae_src = ae_src;
4182 	switch (info->ae_id) {
4183 	case IRDMA_AE_PRIV_OPERATION_DENIED:
4184 	case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
4185 	case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
4186 	case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
4187 	case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
4188 	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
4189 	case IRDMA_AE_UDA_XMIT_BAD_PD:
4190 	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
4191 	case IRDMA_AE_BAD_CLOSE:
4192 	case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
4193 	case IRDMA_AE_STAG_ZERO_INVALID:
4194 	case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
4195 	case IRDMA_AE_IB_INVALID_REQUEST:
4196 	case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
4197 	case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
4198 	case IRDMA_AE_IB_REMOTE_OP_ERROR:
4199 	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
4200 	case IRDMA_AE_DDP_UBE_INVALID_MO:
4201 	case IRDMA_AE_DDP_UBE_INVALID_QN:
4202 	case IRDMA_AE_DDP_NO_L_BIT:
4203 	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4204 	case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4205 	case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
4206 	case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
4207 	case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
4208 	case IRDMA_AE_INVALID_ARP_ENTRY:
4209 	case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
4210 	case IRDMA_AE_STALE_ARP_ENTRY:
4211 	case IRDMA_AE_INVALID_AH_ENTRY:
4212 	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4213 	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
4214 	case IRDMA_AE_LLP_TOO_MANY_RETRIES:
4215 	case IRDMA_AE_LCE_QP_CATASTROPHIC:
4216 	case IRDMA_AE_LLP_DOUBT_REACHABILITY:
4217 	case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
4218 	case IRDMA_AE_RESET_SENT:
4219 	case IRDMA_AE_TERMINATE_SENT:
4220 	case IRDMA_AE_RESET_NOT_SENT:
4221 	case IRDMA_AE_QP_SUSPEND_COMPLETE:
4222 	case IRDMA_AE_UDA_L4LEN_INVALID:
4223 		info->qp = true;
4224 		info->compl_ctx = compl_ctx;
4225 		break;
4226 	case IRDMA_AE_LCE_CQ_CATASTROPHIC:
4227 		info->cq = true;
4228 		info->compl_ctx = LS_64_1(compl_ctx, 1);
4229 		ae_src = IRDMA_AE_SOURCE_RSVD;
4230 		break;
4231 	case IRDMA_AE_ROCE_EMPTY_MCG:
4232 	case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
4233 	case IRDMA_AE_ROCE_BAD_MC_QPID:
4234 	case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
4235 		/* fallthrough */
4236 	case IRDMA_AE_LLP_CONNECTION_RESET:
4237 	case IRDMA_AE_LLP_SYN_RECEIVED:
4238 	case IRDMA_AE_LLP_FIN_RECEIVED:
4239 	case IRDMA_AE_LLP_CLOSE_COMPLETE:
4240 	case IRDMA_AE_LLP_TERMINATE_RECEIVED:
4241 	case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
4242 		ae_src = IRDMA_AE_SOURCE_RSVD;
4243 		info->qp = true;
4244 		info->compl_ctx = compl_ctx;
4245 		break;
4246 	case IRDMA_AE_RESOURCE_EXHAUSTION:
4247 		/*
4248 		 * ae_src contains the exhausted resource with a unique decoding. Set RSVD here to prevent matching
4249 		 * with a CQ or QP.
4250 		 */
4251 		ae_src = IRDMA_AE_SOURCE_RSVD;
4252 		break;
4253 	default:
4254 		break;
4255 	}
4256 
4257 	switch (ae_src) {
4258 	case IRDMA_AE_SOURCE_RQ:
4259 	case IRDMA_AE_SOURCE_RQ_0011:
4260 		info->qp = true;
4261 		info->rq = true;
4262 		info->compl_ctx = compl_ctx;
4263 		info->err_rq_idx_valid = true;
4264 		break;
4265 	case IRDMA_AE_SOURCE_CQ:
4266 	case IRDMA_AE_SOURCE_CQ_0110:
4267 	case IRDMA_AE_SOURCE_CQ_1010:
4268 	case IRDMA_AE_SOURCE_CQ_1110:
4269 		info->cq = true;
4270 		info->compl_ctx = LS_64_1(compl_ctx, 1);
4271 		break;
4272 	case IRDMA_AE_SOURCE_SQ:
4273 	case IRDMA_AE_SOURCE_SQ_0111:
4274 		info->qp = true;
4275 		info->sq = true;
4276 		info->compl_ctx = compl_ctx;
4277 		break;
4278 	case IRDMA_AE_SOURCE_IN_WR:
4279 		info->qp = true;
4280 		info->compl_ctx = compl_ctx;
4281 		info->in_rdrsp_wr = true;
4282 		break;
4283 	case IRDMA_AE_SOURCE_IN_RR:
4284 		info->qp = true;
4285 		info->compl_ctx = compl_ctx;
4286 		info->in_rdrsp_wr = true;
4287 		break;
4288 	case IRDMA_AE_SOURCE_OUT_RR:
4289 	case IRDMA_AE_SOURCE_OUT_RR_1111:
4290 		info->qp = true;
4291 		info->compl_ctx = compl_ctx;
4292 		info->out_rdrsp = true;
4293 		break;
4294 	case IRDMA_AE_SOURCE_RSVD:
4295 	default:
4296 		break;
4297 	}
4298 
4299 	IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
4300 	if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
4301 		aeq->polarity ^= 1;
4302 
4303 	return 0;
4304 }
4305 
4306 /**
4307  * irdma_sc_repost_aeq_entries - repost completed aeq entries
4308  * @dev: sc device struct
4309  * @count: allocate count
4310  */
4311 void
4312 irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
4313 {
4314 	db_wr32(count, dev->aeq_alloc_db);
4315 
4316 }
4317 
4318 /**
4319  * irdma_sc_ccq_init - initialize control cq
4320  * @cq: sc's cq ctruct
4321  * @info: info for control cq initialization
4322  */
4323 int
4324 irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
4325 {
4326 	u32 pble_obj_cnt;
4327 
4328 	if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
4329 	    info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
4330 		return -EINVAL;
4331 
4332 	if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1))
4333 		return -EINVAL;
4334 
4335 	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
4336 
4337 	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
4338 		return -EINVAL;
4339 
4340 	cq->cq_pa = info->cq_pa;
4341 	cq->cq_uk.cq_base = info->cq_base;
4342 	cq->shadow_area_pa = info->shadow_area_pa;
4343 	cq->cq_uk.shadow_area = info->shadow_area;
4344 	cq->shadow_read_threshold = info->shadow_read_threshold;
4345 	cq->dev = info->dev;
4346 	cq->ceq_id = info->ceq_id;
4347 	cq->cq_uk.cq_size = info->num_elem;
4348 	cq->cq_type = IRDMA_CQ_TYPE_CQP;
4349 	cq->ceqe_mask = info->ceqe_mask;
4350 	IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
4351 	cq->cq_uk.cq_id = 0;	/* control cq is id 0 always */
4352 	cq->ceq_id_valid = info->ceq_id_valid;
4353 	cq->tph_en = info->tph_en;
4354 	cq->tph_val = info->tph_val;
4355 	cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
4356 	cq->pbl_list = info->pbl_list;
4357 	cq->virtual_map = info->virtual_map;
4358 	cq->pbl_chunk_size = info->pbl_chunk_size;
4359 	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
4360 	cq->cq_uk.polarity = true;
4361 	cq->vsi = info->vsi;
4362 	cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
4363 
4364 	/* Only applicable to CQs other than CCQ so initialize to zero */
4365 	cq->cq_uk.cqe_alloc_db = NULL;
4366 
4367 	info->dev->ccq = cq;
4368 	return 0;
4369 }
4370 
4371 /**
4372  * irdma_sc_ccq_create_done - poll cqp for ccq create
4373  * @ccq: ccq sc struct
4374  */
4375 static inline int
4376 irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
4377 {
4378 	struct irdma_sc_cqp *cqp;
4379 
4380 	cqp = ccq->dev->cqp;
4381 
4382 	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
4383 }
4384 
4385 /**
4386  * irdma_sc_ccq_create - create control cq
4387  * @ccq: ccq sc struct
4388  * @scratch: u64 saved to be used during cqp completion
4389  * @check_overflow: overlow flag for ccq
4390  * @post_sq: flag for cqp db to ring
4391  */
4392 int
4393 irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
4394 		    bool check_overflow, bool post_sq)
4395 {
4396 	int ret_code;
4397 
4398 	ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
4399 	if (ret_code)
4400 		return ret_code;
4401 
4402 	if (post_sq) {
4403 		ret_code = irdma_sc_ccq_create_done(ccq);
4404 		if (ret_code)
4405 			return ret_code;
4406 	}
4407 	ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
4408 
4409 	return 0;
4410 }
4411 
4412 /**
4413  * irdma_sc_ccq_destroy - destroy ccq during close
4414  * @ccq: ccq sc struct
4415  * @scratch: u64 saved to be used during cqp completion
4416  * @post_sq: flag for cqp db to ring
4417  */
4418 int
4419 irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
4420 {
4421 	struct irdma_sc_cqp *cqp;
4422 	__le64 *wqe;
4423 	u64 hdr;
4424 	int ret_code = 0;
4425 	u32 tail, val, error;
4426 
4427 	cqp = ccq->dev->cqp;
4428 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4429 	if (!wqe)
4430 		return -ENOSPC;
4431 
4432 	set_64bit_val(wqe, IRDMA_BYTE_0, ccq->cq_uk.cq_size);
4433 	set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(ccq, 1));
4434 	set_64bit_val(wqe, IRDMA_BYTE_40, ccq->shadow_area_pa);
4435 
4436 	hdr = ccq->cq_uk.cq_id |
4437 	    FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
4438 		      IRDMA_CQPSQ_CQ_CEQID) |
4439 	    FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
4440 	    FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
4441 	    FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
4442 	    FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
4443 	    FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
4444 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4445 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
4446 
4447 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
4448 
4449 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CCQ_DESTROY WQE", wqe,
4450 			IRDMA_CQP_WQE_SIZE * 8);
4451 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4452 
4453 	if (post_sq) {
4454 		irdma_sc_cqp_post_sq(cqp);
4455 		ret_code = irdma_cqp_poll_registers(cqp, tail,
4456 						    cqp->dev->hw_attrs.max_done_count);
4457 	}
4458 
4459 	cqp->process_cqp_sds = irdma_update_sds_noccq;
4460 
4461 	return ret_code;
4462 }
4463 
4464 /**
4465  * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
4466  * @dev : ptr to irdma_dev struct
4467  * @hmc_fn_id: hmc function id
4468  */
4469 int
4470 irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u16 hmc_fn_id)
4471 {
4472 	struct irdma_hmc_info *hmc_info;
4473 	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4474 	struct irdma_dma_mem query_fpm_mem;
4475 	int ret_code = 0;
4476 	u8 wait_type;
4477 
4478 	hmc_info = dev->hmc_info;
4479 	hmc_fpm_misc = &dev->hmc_fpm_misc;
4480 	query_fpm_mem.pa = dev->fpm_query_buf_pa;
4481 	query_fpm_mem.va = dev->fpm_query_buf;
4482 	hmc_info->hmc_fn_id = hmc_fn_id;
4483 	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4484 
4485 	ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4486 					  &query_fpm_mem, true, wait_type);
4487 	if (ret_code)
4488 		return ret_code;
4489 
4490 	/* parse the fpm_query_buf and fill hmc obj info */
4491 	ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
4492 						hmc_fpm_misc);
4493 
4494 	irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "QUERY FPM BUFFER",
4495 			query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE);
4496 	return ret_code;
4497 }
4498 
4499 /**
4500  * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
4501  * command and populates fpm base address in hmc_info
4502  * @dev : ptr to irdma_dev struct
4503  * @hmc_fn_id: hmc function id
4504  */
4505 static int
4506 irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u16 hmc_fn_id)
4507 {
4508 	struct irdma_hmc_obj_info *obj_info;
4509 	__le64 *buf;
4510 	struct irdma_hmc_info *hmc_info;
4511 	struct irdma_dma_mem commit_fpm_mem;
4512 	int ret_code = 0;
4513 	u8 wait_type;
4514 
4515 	hmc_info = dev->hmc_info;
4516 	obj_info = hmc_info->hmc_obj;
4517 	buf = dev->fpm_commit_buf;
4518 
4519 	set_64bit_val(buf, IRDMA_BYTE_0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
4520 	set_64bit_val(buf, IRDMA_BYTE_8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
4521 	set_64bit_val(buf, IRDMA_BYTE_16, (u64)0);	/* RSRVD */
4522 	set_64bit_val(buf, IRDMA_BYTE_24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
4523 	set_64bit_val(buf, IRDMA_BYTE_32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
4524 	set_64bit_val(buf, IRDMA_BYTE_40, (u64)0);	/* RSVD */
4525 	set_64bit_val(buf, IRDMA_BYTE_48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
4526 	set_64bit_val(buf, IRDMA_BYTE_56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
4527 	set_64bit_val(buf, IRDMA_BYTE_64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
4528 	set_64bit_val(buf, IRDMA_BYTE_72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
4529 	set_64bit_val(buf, IRDMA_BYTE_80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
4530 	set_64bit_val(buf, IRDMA_BYTE_88,
4531 		      (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
4532 	set_64bit_val(buf, IRDMA_BYTE_96,
4533 		      (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
4534 	set_64bit_val(buf, IRDMA_BYTE_104,
4535 		      (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
4536 	set_64bit_val(buf, IRDMA_BYTE_112,
4537 		      (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
4538 	set_64bit_val(buf, IRDMA_BYTE_120, (u64)0);	/* RSVD */
4539 	set_64bit_val(buf, IRDMA_BYTE_128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
4540 	set_64bit_val(buf, IRDMA_BYTE_136,
4541 		      (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
4542 	set_64bit_val(buf, IRDMA_BYTE_144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
4543 	set_64bit_val(buf, IRDMA_BYTE_152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
4544 	set_64bit_val(buf, IRDMA_BYTE_160,
4545 		      (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
4546 	set_64bit_val(buf, IRDMA_BYTE_168,
4547 		      (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
4548 	commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
4549 	commit_fpm_mem.va = dev->fpm_commit_buf;
4550 
4551 	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
4552 	irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER",
4553 			commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE);
4554 	ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
4555 					   &commit_fpm_mem, true, wait_type);
4556 	if (!ret_code)
4557 		irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
4558 					      hmc_info->hmc_obj,
4559 					      &hmc_info->sd_table.sd_cnt);
4560 	irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER",
4561 			commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE);
4562 
4563 	return ret_code;
4564 }
4565 
4566 /**
4567  * cqp_sds_wqe_fill - fill cqp wqe doe sd
4568  * @cqp: struct for cqp hw
4569  * @info: sd info for wqe
4570  * @scratch: u64 saved to be used during cqp completion
4571  */
4572 static int
4573 cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
4574 		 struct irdma_update_sds_info *info, u64 scratch)
4575 {
4576 	u64 data;
4577 	u64 hdr;
4578 	__le64 *wqe;
4579 	int mem_entries, wqe_entries;
4580 	struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
4581 	u64 offset = 0;
4582 	u32 wqe_idx;
4583 
4584 	wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
4585 	if (!wqe)
4586 		return -ENOSPC;
4587 
4588 	wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
4589 	mem_entries = info->cnt - wqe_entries;
4590 
4591 	if (mem_entries) {
4592 		offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
4593 		irdma_memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
4594 
4595 		data = (u64)sdbuf->pa + offset;
4596 	} else {
4597 		data = 0;
4598 	}
4599 	data |= FLD_LS_64(cqp->dev, info->hmc_fn_id, IRDMA_CQPSQ_UPESD_HMCFNID);
4600 	set_64bit_val(wqe, IRDMA_BYTE_16, data);
4601 
4602 	switch (wqe_entries) {
4603 	case 3:
4604 		set_64bit_val(wqe, IRDMA_BYTE_48,
4605 			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
4606 			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4607 
4608 		set_64bit_val(wqe, IRDMA_BYTE_56, info->entry[2].data);
4609 		/* fallthrough */
4610 	case 2:
4611 		set_64bit_val(wqe, IRDMA_BYTE_32,
4612 			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
4613 			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
4614 
4615 		set_64bit_val(wqe, IRDMA_BYTE_40, info->entry[1].data);
4616 		/* fallthrough */
4617 	case 1:
4618 		set_64bit_val(wqe, IRDMA_BYTE_0,
4619 			      FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
4620 
4621 		set_64bit_val(wqe, IRDMA_BYTE_8, info->entry[0].data);
4622 		break;
4623 	default:
4624 		break;
4625 	}
4626 
4627 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
4628 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
4629 	    FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
4630 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
4631 
4632 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
4633 
4634 	if (mem_entries)
4635 		irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE Buffer",
4636 				(char *)sdbuf->va + offset, mem_entries << 4);
4637 
4638 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE", wqe,
4639 			IRDMA_CQP_WQE_SIZE * 8);
4640 
4641 	return 0;
4642 }
4643 
4644 /**
4645  * irdma_update_pe_sds - cqp wqe for sd
4646  * @dev: ptr to irdma_dev struct
4647  * @info: sd info for sd's
4648  * @scratch: u64 saved to be used during cqp completion
4649  */
4650 static int
4651 irdma_update_pe_sds(struct irdma_sc_dev *dev,
4652 		    struct irdma_update_sds_info *info, u64 scratch)
4653 {
4654 	struct irdma_sc_cqp *cqp = dev->cqp;
4655 	int ret_code;
4656 
4657 	ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
4658 	if (!ret_code)
4659 		irdma_sc_cqp_post_sq(cqp);
4660 
4661 	return ret_code;
4662 }
4663 
4664 /**
4665  * irdma_update_sds_noccq - update sd before ccq created
4666  * @dev: sc device struct
4667  * @info: sd info for sd's
4668  */
4669 int
4670 irdma_update_sds_noccq(struct irdma_sc_dev *dev,
4671 		       struct irdma_update_sds_info *info)
4672 {
4673 	u32 error, val, tail;
4674 	struct irdma_sc_cqp *cqp = dev->cqp;
4675 	int ret_code;
4676 
4677 	ret_code = cqp_sds_wqe_fill(cqp, info, 0);
4678 	if (ret_code)
4679 		return ret_code;
4680 
4681 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4682 
4683 	irdma_sc_cqp_post_sq(cqp);
4684 	return irdma_cqp_poll_registers(cqp, tail,
4685 					cqp->dev->hw_attrs.max_done_count);
4686 }
4687 
4688 /**
4689  * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
4690  * @cqp: struct for cqp hw
4691  * @scratch: u64 saved to be used during cqp completion
4692  * @hmc_fn_id: hmc function id
4693  * @post_sq: flag for cqp db to ring
4694  * @poll_registers: flag to poll register for cqp completion
4695  */
4696 int
4697 irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
4698 				    u16 hmc_fn_id, bool post_sq,
4699 				    bool poll_registers)
4700 {
4701 	u64 hdr;
4702 	__le64 *wqe;
4703 	u32 tail, val, error;
4704 
4705 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4706 	if (!wqe)
4707 		return -ENOSPC;
4708 
4709 	set_64bit_val(wqe, IRDMA_BYTE_16,
4710 		      FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
4711 
4712 	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
4713 			 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
4714 	    FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
4715 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
4716 
4717 	set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
4718 
4719 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
4720 			wqe, IRDMA_CQP_WQE_SIZE * 8);
4721 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4722 
4723 	if (post_sq) {
4724 		irdma_sc_cqp_post_sq(cqp);
4725 		if (poll_registers)
4726 			/* check for cqp sq tail update */
4727 			return irdma_cqp_poll_registers(cqp, tail,
4728 							cqp->dev->hw_attrs.max_done_count);
4729 		else
4730 			return irdma_sc_poll_for_cqp_op_done(cqp,
4731 							     IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
4732 							     NULL);
4733 	}
4734 
4735 	return 0;
4736 }
4737 
4738 /**
4739  * irdma_cqp_ring_full - check if cqp ring is full
4740  * @cqp: struct for cqp hw
4741  */
4742 static bool
4743 irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
4744 {
4745 	return IRDMA_RING_FULL_ERR(cqp->sq_ring);
4746 }
4747 
4748 /**
4749  * irdma_est_sd - returns approximate number of SDs for HMC
4750  * @dev: sc device struct
4751  * @hmc_info: hmc structure, size and count for HMC objects
4752  */
4753 static u32 irdma_est_sd(struct irdma_sc_dev *dev,
4754 			struct irdma_hmc_info *hmc_info){
4755 	int i;
4756 	u64 size = 0;
4757 	u64 sd;
4758 
4759 	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4760 		if (i != IRDMA_HMC_IW_PBLE)
4761 			size += round_up(hmc_info->hmc_obj[i].cnt *
4762 					 hmc_info->hmc_obj[i].size, 512);
4763 	size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
4764 			 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
4765 	if (size & 0x1FFFFF)
4766 		sd = (size >> 21) + 1;	/* add 1 for remainder */
4767 	else
4768 		sd = size >> 21;
4769 	if (sd > 0xFFFFFFFF) {
4770 		irdma_debug(dev, IRDMA_DEBUG_HMC, "sd overflow[%ld]\n", sd);
4771 		sd = 0xFFFFFFFF - 1;
4772 	}
4773 
4774 	return (u32)sd;
4775 }
4776 
4777 /**
4778  * irdma_sc_query_rdma_features - query RDMA features and FW ver
4779  * @cqp: struct for cqp hw
4780  * @buf: buffer to hold query info
4781  * @scratch: u64 saved to be used during cqp completion
4782  */
4783 static int
4784 irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
4785 			     struct irdma_dma_mem *buf, u64 scratch)
4786 {
4787 	__le64 *wqe;
4788 	u64 temp;
4789 	u32 tail, val, error;
4790 	int status;
4791 
4792 	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
4793 	if (!wqe)
4794 		return -ENOSPC;
4795 
4796 	temp = buf->pa;
4797 	set_64bit_val(wqe, IRDMA_BYTE_32, temp);
4798 
4799 	temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
4800 			  cqp->polarity) |
4801 	    FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
4802 	    FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
4803 	irdma_wmb();		/* make sure WQE is written before valid bit is set */
4804 
4805 	set_64bit_val(wqe, IRDMA_BYTE_24, temp);
4806 
4807 	irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", wqe,
4808 			IRDMA_CQP_WQE_SIZE * 8);
4809 	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
4810 
4811 	irdma_sc_cqp_post_sq(cqp);
4812 	status = irdma_cqp_poll_registers(cqp, tail,
4813 					  cqp->dev->hw_attrs.max_done_count);
4814 	if (error || status)
4815 		status = -EIO;
4816 
4817 	return status;
4818 }
4819 
4820 /**
4821  * irdma_get_rdma_features - get RDMA features
4822  * @dev: sc device struct
4823  */
4824 int
4825 irdma_get_rdma_features(struct irdma_sc_dev *dev)
4826 {
4827 	int ret_code;
4828 	struct irdma_dma_mem feat_buf;
4829 	u64 temp;
4830 	u16 byte_idx, feat_type, feat_cnt, feat_idx;
4831 
4832 	feat_buf.size = IRDMA_FEATURE_BUF_SIZE;
4833 	feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, feat_buf.size,
4834 					     IRDMA_FEATURE_BUF_ALIGNMENT);
4835 	if (!feat_buf.va)
4836 		return -ENOMEM;
4837 
4838 	ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4839 	if (ret_code)
4840 		goto exit;
4841 
4842 	get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp);
4843 	feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4844 	if (feat_cnt < IRDMA_MIN_FEATURES) {
4845 		ret_code = -EINVAL;
4846 		goto exit;
4847 	} else if (feat_cnt > IRDMA_MAX_FEATURES) {
4848 		irdma_debug(dev, IRDMA_DEBUG_DEV,
4849 			    "feature buf size insufficient,"
4850 			    "retrying with larger buffer\n");
4851 		irdma_free_dma_mem(dev->hw, &feat_buf);
4852 		feat_buf.size = 8 * feat_cnt;
4853 		feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf,
4854 						     feat_buf.size,
4855 						     IRDMA_FEATURE_BUF_ALIGNMENT);
4856 		if (!feat_buf.va)
4857 			return -ENOMEM;
4858 
4859 		ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
4860 		if (ret_code)
4861 			goto exit;
4862 
4863 		get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp);
4864 		feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
4865 		if (feat_cnt < IRDMA_MIN_FEATURES) {
4866 			ret_code = -EINVAL;
4867 			goto exit;
4868 		}
4869 	}
4870 
4871 	irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", feat_buf.va,
4872 			feat_cnt * 8);
4873 
4874 	for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
4875 	     feat_idx++, byte_idx += 8) {
4876 		get_64bit_val(feat_buf.va, byte_idx, &temp);
4877 		feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
4878 		dev->feature_info[feat_type] = temp;
4879 	}
4880 exit:
4881 	irdma_free_dma_mem(dev->hw, &feat_buf);
4882 	return ret_code;
4883 }
4884 
4885 static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
4886 			struct irdma_hmc_info *hmc_info, u32 qpwanted){
4887 	u32 q1_cnt;
4888 
4889 	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
4890 		q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
4891 	} else {
4892 		if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
4893 			q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
4894 		else
4895 			q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
4896 	}
4897 
4898 	return q1_cnt;
4899 }
4900 
4901 static void
4902 cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
4903 		    struct irdma_hmc_info *hmc_info, u32 qpwanted)
4904 {
4905 	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
4906 }
4907 
4908 static void
4909 cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
4910 		    struct irdma_hmc_info *hmc_info, u32 qpwanted)
4911 {
4912 	struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
4913 
4914 	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
4915 	    4 * hmc_fpm_misc->xf_block_size * qpwanted;
4916 
4917 	hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
4918 
4919 	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
4920 		hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
4921 	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
4922 		hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
4923 		    hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
4924 		    hmc_fpm_misc->rrf_block_size;
4925 	if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
4926 		if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
4927 			hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
4928 		if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
4929 			hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
4930 			    hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
4931 			    hmc_fpm_misc->ooiscf_block_size;
4932 	}
4933 }
4934 
4935 /**
4936  * irdma_cfg_fpm_val - configure HMC objects
4937  * @dev: sc device struct
4938  * @qp_count: desired qp count
4939  */
4940 int
4941 irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
4942 {
4943 	struct irdma_virt_mem virt_mem;
4944 	u32 i, mem_size;
4945 	u32 qpwanted, mrwanted, pblewanted;
4946 	u32 powerof2, hte;
4947 	u32 sd_needed;
4948 	u32 sd_diff;
4949 	u32 loop_count = 0;
4950 	struct irdma_hmc_info *hmc_info;
4951 	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
4952 	int ret_code = 0;
4953 	u32 max_sds;
4954 
4955 	hmc_info = dev->hmc_info;
4956 	hmc_fpm_misc = &dev->hmc_fpm_misc;
4957 	ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
4958 	if (ret_code) {
4959 		irdma_debug(dev, IRDMA_DEBUG_HMC,
4960 			    "irdma_sc_init_iw_hmc returned error_code = %d\n",
4961 			    ret_code);
4962 		return ret_code;
4963 	}
4964 
4965 	max_sds = hmc_fpm_misc->max_sds;
4966 
4967 	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
4968 		hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
4969 
4970 	sd_needed = irdma_est_sd(dev, hmc_info);
4971 	irdma_debug(dev, IRDMA_DEBUG_HMC, "sd count %d where max sd is %d\n",
4972 		    hmc_info->sd_table.sd_cnt, max_sds);
4973 
4974 	qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
4975 
4976 	powerof2 = 1;
4977 	while (powerof2 <= qpwanted)
4978 		powerof2 *= 2;
4979 	powerof2 /= 2;
4980 	qpwanted = powerof2;
4981 
4982 	mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
4983 	pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
4984 
4985 	irdma_debug(dev, IRDMA_DEBUG_HMC,
4986 		    "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
4987 		    qp_count, max_sds,
4988 		    hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
4989 		    hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
4990 		    hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
4991 		    hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
4992 		    hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
4993 		    hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
4994 	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
4995 	    hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
4996 	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
4997 	    hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
4998 	hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
4999 	    hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
5000 	if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
5001 		hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
5002 
5003 	while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
5004 		qpwanted /= 2;
5005 
5006 	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
5007 		cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
5008 		while (hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt > hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt) {
5009 			qpwanted /= 2;
5010 			cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
5011 		}
5012 	}
5013 
5014 	do {
5015 		++loop_count;
5016 		hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
5017 		hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
5018 		    min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
5019 		hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0;	/* Reserved */
5020 		hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
5021 
5022 		hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
5023 		powerof2 = 1;
5024 		while (powerof2 < hte)
5025 			powerof2 *= 2;
5026 		hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
5027 		    powerof2 * hmc_fpm_misc->ht_multiplier;
5028 		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
5029 			cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
5030 		else
5031 			cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
5032 
5033 		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
5034 		hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
5035 		    hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
5036 		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
5037 		    hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
5038 		hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
5039 		    (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
5040 
5041 		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
5042 		sd_needed = irdma_est_sd(dev, hmc_info);
5043 		irdma_debug(dev, IRDMA_DEBUG_HMC,
5044 			    "sd_needed = %d, max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
5045 			    sd_needed, max_sds, mrwanted, pblewanted, qpwanted);
5046 
5047 		/* Do not reduce resources further. All objects fit with max SDs */
5048 		if (sd_needed <= max_sds)
5049 			break;
5050 
5051 		sd_diff = sd_needed - max_sds;
5052 		if (sd_diff > 128) {
5053 			if (!(loop_count % 2) && qpwanted > 128) {
5054 				qpwanted /= 2;
5055 			} else {
5056 				mrwanted /= 2;
5057 				pblewanted /= 2;
5058 			}
5059 			continue;
5060 		}
5061 		if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
5062 		    pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
5063 			pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
5064 			continue;
5065 		} else if (pblewanted > (100 * FPM_MULTIPLIER)) {
5066 			pblewanted -= 10 * FPM_MULTIPLIER;
5067 		} else if (pblewanted > FPM_MULTIPLIER) {
5068 			pblewanted -= FPM_MULTIPLIER;
5069 		} else if (qpwanted <= 128) {
5070 			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
5071 				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
5072 			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
5073 				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
5074 		}
5075 		if (mrwanted > FPM_MULTIPLIER)
5076 			mrwanted -= FPM_MULTIPLIER;
5077 		if (!(loop_count % 10) && qpwanted > 128) {
5078 			qpwanted /= 2;
5079 			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
5080 				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
5081 		}
5082 	} while (loop_count < 2000);
5083 
5084 	if (sd_needed > max_sds) {
5085 		irdma_debug(dev, IRDMA_DEBUG_HMC,
5086 			    "cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
5087 			    loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
5088 		return -EINVAL;
5089 	}
5090 
5091 	if (loop_count > 1 && sd_needed < max_sds) {
5092 		pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER;
5093 		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
5094 		sd_needed = irdma_est_sd(dev, hmc_info);
5095 	}
5096 
5097 	irdma_debug(dev, IRDMA_DEBUG_HMC,
5098 		    "loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
5099 		    loop_count, sd_needed,
5100 		    hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
5101 		    hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
5102 		    hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
5103 		    hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
5104 		    hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
5105 		    hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
5106 		    hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
5107 
5108 	ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
5109 	if (ret_code) {
5110 		irdma_debug(dev, IRDMA_DEBUG_HMC,
5111 			    "cfg_iw_fpm returned error_code[x%08X]\n",
5112 			    readl(dev->hw_regs[IRDMA_CQPERRCODES]));
5113 		return ret_code;
5114 	}
5115 
5116 	mem_size = sizeof(struct irdma_hmc_sd_entry) *
5117 	    (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
5118 	virt_mem.size = mem_size;
5119 	virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
5120 	if (!virt_mem.va) {
5121 		irdma_debug(dev, IRDMA_DEBUG_HMC,
5122 			    "failed to allocate memory for sd_entry buffer\n");
5123 		return -ENOMEM;
5124 	}
5125 	hmc_info->sd_table.sd_entry = virt_mem.va;
5126 
5127 	return ret_code;
5128 }
5129 
5130 /**
5131  * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
5132  * @dev: rdma device
5133  * @pcmdinfo: cqp command info
5134  */
5135 static int
5136 irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
5137 		   struct cqp_cmds_info *pcmdinfo)
5138 {
5139 	int status;
5140 	struct irdma_dma_mem val_mem;
5141 	bool alloc = false;
5142 
5143 	dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
5144 	switch (pcmdinfo->cqp_cmd) {
5145 	case IRDMA_OP_CEQ_DESTROY:
5146 		status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
5147 					      pcmdinfo->in.u.ceq_destroy.scratch,
5148 					      pcmdinfo->post_sq);
5149 		break;
5150 	case IRDMA_OP_AEQ_DESTROY:
5151 		status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
5152 					      pcmdinfo->in.u.aeq_destroy.scratch,
5153 					      pcmdinfo->post_sq);
5154 		break;
5155 	case IRDMA_OP_CEQ_CREATE:
5156 		status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
5157 					     pcmdinfo->in.u.ceq_create.scratch,
5158 					     pcmdinfo->post_sq);
5159 		break;
5160 	case IRDMA_OP_AEQ_CREATE:
5161 		status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
5162 					     pcmdinfo->in.u.aeq_create.scratch,
5163 					     pcmdinfo->post_sq);
5164 		break;
5165 	case IRDMA_OP_QP_UPLOAD_CONTEXT:
5166 		status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
5167 						    &pcmdinfo->in.u.qp_upload_context.info,
5168 						    pcmdinfo->in.u.qp_upload_context.scratch,
5169 						    pcmdinfo->post_sq);
5170 		break;
5171 	case IRDMA_OP_CQ_CREATE:
5172 		status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
5173 					    pcmdinfo->in.u.cq_create.scratch,
5174 					    pcmdinfo->in.u.cq_create.check_overflow,
5175 					    pcmdinfo->post_sq);
5176 		break;
5177 	case IRDMA_OP_CQ_MODIFY:
5178 		status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
5179 					    &pcmdinfo->in.u.cq_modify.info,
5180 					    pcmdinfo->in.u.cq_modify.scratch,
5181 					    pcmdinfo->post_sq);
5182 		break;
5183 	case IRDMA_OP_CQ_DESTROY:
5184 		status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
5185 					     pcmdinfo->in.u.cq_destroy.scratch,
5186 					     pcmdinfo->post_sq);
5187 		break;
5188 	case IRDMA_OP_QP_FLUSH_WQES:
5189 		status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
5190 						&pcmdinfo->in.u.qp_flush_wqes.info,
5191 						pcmdinfo->in.u.qp_flush_wqes.scratch,
5192 						pcmdinfo->post_sq);
5193 		break;
5194 	case IRDMA_OP_GEN_AE:
5195 		status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
5196 					 &pcmdinfo->in.u.gen_ae.info,
5197 					 pcmdinfo->in.u.gen_ae.scratch,
5198 					 pcmdinfo->post_sq);
5199 		break;
5200 	case IRDMA_OP_MANAGE_PUSH_PAGE:
5201 		status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
5202 						   &pcmdinfo->in.u.manage_push_page.info,
5203 						   pcmdinfo->in.u.manage_push_page.scratch,
5204 						   pcmdinfo->post_sq);
5205 		break;
5206 	case IRDMA_OP_UPDATE_PE_SDS:
5207 		status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
5208 					     &pcmdinfo->in.u.update_pe_sds.info,
5209 					     pcmdinfo->in.u.update_pe_sds.scratch);
5210 		break;
5211 	case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
5212 		/* switch to calling through the call table */
5213 		status =
5214 		    irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
5215 						      &pcmdinfo->in.u.manage_hmc_pm.info,
5216 						      pcmdinfo->in.u.manage_hmc_pm.scratch,
5217 						      true);
5218 		break;
5219 	case IRDMA_OP_SUSPEND:
5220 		status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
5221 					     pcmdinfo->in.u.suspend_resume.qp,
5222 					     pcmdinfo->in.u.suspend_resume.scratch);
5223 		break;
5224 	case IRDMA_OP_RESUME:
5225 		status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
5226 					    pcmdinfo->in.u.suspend_resume.qp,
5227 					    pcmdinfo->in.u.suspend_resume.scratch);
5228 		break;
5229 	case IRDMA_OP_QUERY_FPM_VAL:
5230 		val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
5231 		val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
5232 		status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
5233 						pcmdinfo->in.u.query_fpm_val.scratch,
5234 						pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
5235 						&val_mem, true, IRDMA_CQP_WAIT_EVENT);
5236 		break;
5237 	case IRDMA_OP_COMMIT_FPM_VAL:
5238 		val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
5239 		val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
5240 		status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
5241 						 pcmdinfo->in.u.commit_fpm_val.scratch,
5242 						 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
5243 						 &val_mem,
5244 						 true,
5245 						 IRDMA_CQP_WAIT_EVENT);
5246 		break;
5247 	case IRDMA_OP_STATS_ALLOCATE:
5248 		alloc = true;
5249 		/* fallthrough */
5250 	case IRDMA_OP_STATS_FREE:
5251 		status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
5252 						    &pcmdinfo->in.u.stats_manage.info,
5253 						    alloc,
5254 						    pcmdinfo->in.u.stats_manage.scratch);
5255 		break;
5256 	case IRDMA_OP_STATS_GATHER:
5257 		status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
5258 					       &pcmdinfo->in.u.stats_gather.info,
5259 					       pcmdinfo->in.u.stats_gather.scratch);
5260 		break;
5261 	case IRDMA_OP_WS_MODIFY_NODE:
5262 		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5263 						 &pcmdinfo->in.u.ws_node.info,
5264 						 IRDMA_MODIFY_NODE,
5265 						 pcmdinfo->in.u.ws_node.scratch);
5266 		break;
5267 	case IRDMA_OP_WS_DELETE_NODE:
5268 		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5269 						 &pcmdinfo->in.u.ws_node.info,
5270 						 IRDMA_DEL_NODE,
5271 						 pcmdinfo->in.u.ws_node.scratch);
5272 		break;
5273 	case IRDMA_OP_WS_ADD_NODE:
5274 		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
5275 						 &pcmdinfo->in.u.ws_node.info,
5276 						 IRDMA_ADD_NODE,
5277 						 pcmdinfo->in.u.ws_node.scratch);
5278 		break;
5279 	case IRDMA_OP_SET_UP_MAP:
5280 		status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
5281 					     &pcmdinfo->in.u.up_map.info,
5282 					     pcmdinfo->in.u.up_map.scratch);
5283 		break;
5284 	case IRDMA_OP_QUERY_RDMA_FEATURES:
5285 		status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
5286 						      &pcmdinfo->in.u.query_rdma.query_buff_mem,
5287 						      pcmdinfo->in.u.query_rdma.scratch);
5288 		break;
5289 	case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
5290 		status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
5291 						      pcmdinfo->in.u.del_arp_cache_entry.scratch,
5292 						      pcmdinfo->in.u.del_arp_cache_entry.arp_index,
5293 						      pcmdinfo->post_sq);
5294 		break;
5295 	case IRDMA_OP_MANAGE_APBVT_ENTRY:
5296 		status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
5297 						     &pcmdinfo->in.u.manage_apbvt_entry.info,
5298 						     pcmdinfo->in.u.manage_apbvt_entry.scratch,
5299 						     pcmdinfo->post_sq);
5300 		break;
5301 	case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
5302 		status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
5303 							   &pcmdinfo->in.u.manage_qhash_table_entry.info,
5304 							   pcmdinfo->in.u.manage_qhash_table_entry.scratch,
5305 							   pcmdinfo->post_sq);
5306 		break;
5307 	case IRDMA_OP_QP_MODIFY:
5308 		status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
5309 					    &pcmdinfo->in.u.qp_modify.info,
5310 					    pcmdinfo->in.u.qp_modify.scratch,
5311 					    pcmdinfo->post_sq);
5312 		break;
5313 	case IRDMA_OP_QP_CREATE:
5314 		status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
5315 					    &pcmdinfo->in.u.qp_create.info,
5316 					    pcmdinfo->in.u.qp_create.scratch,
5317 					    pcmdinfo->post_sq);
5318 		break;
5319 	case IRDMA_OP_QP_DESTROY:
5320 		status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
5321 					     pcmdinfo->in.u.qp_destroy.scratch,
5322 					     pcmdinfo->in.u.qp_destroy.remove_hash_idx,
5323 					     pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
5324 					     pcmdinfo->post_sq);
5325 		break;
5326 	case IRDMA_OP_ALLOC_STAG:
5327 		status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
5328 					     &pcmdinfo->in.u.alloc_stag.info,
5329 					     pcmdinfo->in.u.alloc_stag.scratch,
5330 					     pcmdinfo->post_sq);
5331 		break;
5332 	case IRDMA_OP_MR_REG_NON_SHARED:
5333 		status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
5334 						    &pcmdinfo->in.u.mr_reg_non_shared.info,
5335 						    pcmdinfo->in.u.mr_reg_non_shared.scratch,
5336 						    pcmdinfo->post_sq);
5337 		break;
5338 	case IRDMA_OP_DEALLOC_STAG:
5339 		status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
5340 					       &pcmdinfo->in.u.dealloc_stag.info,
5341 					       pcmdinfo->in.u.dealloc_stag.scratch,
5342 					       pcmdinfo->post_sq);
5343 		break;
5344 	case IRDMA_OP_MW_ALLOC:
5345 		status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
5346 					   &pcmdinfo->in.u.mw_alloc.info,
5347 					   pcmdinfo->in.u.mw_alloc.scratch,
5348 					   pcmdinfo->post_sq);
5349 		break;
5350 	case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
5351 		status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
5352 						      &pcmdinfo->in.u.add_arp_cache_entry.info,
5353 						      pcmdinfo->in.u.add_arp_cache_entry.scratch,
5354 						      pcmdinfo->post_sq);
5355 		break;
5356 	case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
5357 		status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
5358 							pcmdinfo->in.u.alloc_local_mac_entry.scratch,
5359 							pcmdinfo->post_sq);
5360 		break;
5361 	case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
5362 		status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
5363 						      &pcmdinfo->in.u.add_local_mac_entry.info,
5364 						      pcmdinfo->in.u.add_local_mac_entry.scratch,
5365 						      pcmdinfo->post_sq);
5366 		break;
5367 	case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
5368 		status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
5369 						      pcmdinfo->in.u.del_local_mac_entry.scratch,
5370 						      pcmdinfo->in.u.del_local_mac_entry.entry_idx,
5371 						      pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
5372 						      pcmdinfo->post_sq);
5373 		break;
5374 	case IRDMA_OP_AH_CREATE:
5375 		status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
5376 					    &pcmdinfo->in.u.ah_create.info,
5377 					    pcmdinfo->in.u.ah_create.scratch);
5378 		break;
5379 	case IRDMA_OP_AH_DESTROY:
5380 		status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
5381 					     &pcmdinfo->in.u.ah_destroy.info,
5382 					     pcmdinfo->in.u.ah_destroy.scratch);
5383 		break;
5384 	case IRDMA_OP_MC_CREATE:
5385 		status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
5386 						   &pcmdinfo->in.u.mc_create.info,
5387 						   pcmdinfo->in.u.mc_create.scratch);
5388 		break;
5389 	case IRDMA_OP_MC_DESTROY:
5390 		status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
5391 						    &pcmdinfo->in.u.mc_destroy.info,
5392 						    pcmdinfo->in.u.mc_destroy.scratch);
5393 		break;
5394 	case IRDMA_OP_MC_MODIFY:
5395 		status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
5396 						   &pcmdinfo->in.u.mc_modify.info,
5397 						   pcmdinfo->in.u.mc_modify.scratch);
5398 		break;
5399 	default:
5400 		status = -EOPNOTSUPP;
5401 		break;
5402 	}
5403 
5404 	return status;
5405 }
5406 
5407 /**
5408  * irdma_process_cqp_cmd - process all cqp commands
5409  * @dev: sc device struct
5410  * @pcmdinfo: cqp command info
5411  */
5412 int
5413 irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
5414 		      struct cqp_cmds_info *pcmdinfo)
5415 {
5416 	int status = 0;
5417 	unsigned long flags;
5418 
5419 	spin_lock_irqsave(&dev->cqp_lock, flags);
5420 	if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
5421 		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5422 	else
5423 		list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
5424 	spin_unlock_irqrestore(&dev->cqp_lock, flags);
5425 	return status;
5426 }
5427 
5428 /**
5429  * irdma_process_bh - called from tasklet for cqp list
5430  * @dev: sc device struct
5431  */
5432 int
5433 irdma_process_bh(struct irdma_sc_dev *dev)
5434 {
5435 	int status = 0;
5436 	struct cqp_cmds_info *pcmdinfo;
5437 	unsigned long flags;
5438 
5439 	spin_lock_irqsave(&dev->cqp_lock, flags);
5440 	while (!list_empty(&dev->cqp_cmd_head) &&
5441 	       !irdma_cqp_ring_full(dev->cqp)) {
5442 		pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
5443 		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
5444 		if (status)
5445 			break;
5446 	}
5447 	spin_unlock_irqrestore(&dev->cqp_lock, flags);
5448 	return status;
5449 }
5450 
5451 /**
5452  * irdma_cfg_aeq- Configure AEQ interrupt
5453  * @dev: pointer to the device structure
5454  * @idx: vector index
5455  * @enable: True to enable, False disables
5456  */
5457 void
5458 irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
5459 {
5460 	u32 reg_val;
5461 	reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
5462 	    FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
5463 	    FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, IRDMA_IDX_NOITR);
5464 
5465 	writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
5466 }
5467 
5468 /**
5469  * sc_vsi_update_stats - Update statistics
5470  * @vsi: sc_vsi instance to update
5471  */
5472 void
5473 sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
5474 {
5475 	struct irdma_gather_stats *gather_stats;
5476 	struct irdma_gather_stats *last_gather_stats;
5477 
5478 	gather_stats = vsi->pestat->gather_info.gather_stats_va;
5479 	last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
5480 	irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
5481 			   last_gather_stats, vsi->dev->hw_stats_map,
5482 			   vsi->dev->hw_attrs.max_stat_idx);
5483 }
5484 
5485 /**
5486  * irdma_wait_pe_ready - Check if firmware is ready
5487  * @dev: provides access to registers
5488  */
5489 static int
5490 irdma_wait_pe_ready(struct irdma_sc_dev *dev)
5491 {
5492 	u32 statuscpu0;
5493 	u32 statuscpu1;
5494 	u32 statuscpu2;
5495 	u32 retrycount = 0;
5496 
5497 	do {
5498 		statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
5499 		statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
5500 		statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
5501 		if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
5502 		    statuscpu2 == 0x80)
5503 			return 0;
5504 		mdelay(1000);
5505 	} while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
5506 	return -1;
5507 }
5508 
5509 static inline void
5510 irdma_sc_init_hw(struct irdma_sc_dev *dev)
5511 {
5512 	switch (dev->hw_attrs.uk_attrs.hw_rev) {
5513 	case IRDMA_GEN_2:
5514 		icrdma_init_hw(dev);
5515 		break;
5516 	}
5517 }
5518 
5519 /**
5520  * irdma_sc_dev_init - Initialize control part of device
5521  * @dev: Device pointer
5522  * @info: Device init info
5523  */
5524 int
5525 irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info)
5526 {
5527 	u32 val;
5528 	int ret_code = 0;
5529 	u8 db_size;
5530 
5531 	INIT_LIST_HEAD(&dev->cqp_cmd_head);	/* for CQP command backlog */
5532 	mutex_init(&dev->ws_mutex);
5533 	dev->debug_mask = info->debug_mask;
5534 	dev->hmc_fn_id = info->hmc_fn_id;
5535 	dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5536 	dev->fpm_query_buf = info->fpm_query_buf;
5537 	dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5538 	dev->fpm_commit_buf = info->fpm_commit_buf;
5539 	dev->hw = info->hw;
5540 	dev->hw->hw_addr = info->bar0;
5541 	/* Setup the hardware limits, hmc may limit further */
5542 	dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
5543 	dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
5544 	dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
5545 	dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
5546 	dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
5547 	dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
5548 	dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
5549 	dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
5550 	dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
5551 	dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
5552 	dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
5553 	dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
5554 	dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
5555 	dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
5556 
5557 	dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
5558 	dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
5559 	dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
5560 	dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
5561 
5562 	dev->hw_attrs.max_pe_ready_count = 14;
5563 	dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
5564 	dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
5565 	dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
5566 
5567 	irdma_sc_init_hw(dev);
5568 
5569 	if (irdma_wait_pe_ready(dev))
5570 		return -ETIMEDOUT;
5571 
5572 	val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
5573 	db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
5574 	if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
5575 		irdma_debug(dev, IRDMA_DEBUG_DEV,
5576 			    "RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
5577 			    val, db_size);
5578 		return -ENODEV;
5579 	}
5580 	dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
5581 
5582 	return ret_code;
5583 }
5584 
5585 /**
5586  * irdma_stat_val - Extract HW counter value from statistics buffer
5587  * @stats_val: pointer to statistics buffer
5588  * @byteoff: byte offset of counter value in the buffer (8B-aligned)
5589  * @bitoff: bit offset of counter value within 8B entry
5590  * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter)
5591  */
5592 static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff,
5593 				 u8 bitoff, u64 bitmask){
5594 	u16 idx = byteoff / sizeof(*stats_val);
5595 
5596 	return (stats_val[idx] >> bitoff) & bitmask;
5597 }
5598 
5599 /**
5600  * irdma_stat_delta - Calculate counter delta
5601  * @new_val: updated counter value
5602  * @old_val: last counter value
5603  * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter)
5604  */
5605 static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) {
5606 	if (new_val >= old_val)
5607 		return new_val - old_val;
5608 	else
5609 		/* roll-over case */
5610 		return max_val - old_val + new_val + 1;
5611 }
5612 
5613 /**
5614  * irdma_update_stats - Update statistics
5615  * @hw_stats: hw_stats instance to update
5616  * @gather_stats: updated stat counters
5617  * @last_gather_stats: last stat counters
5618  * @map: HW stat map (hw_stats => gather_stats)
5619  * @max_stat_idx: number of HW stats
5620  */
5621 void
5622 irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
5623 		   struct irdma_gather_stats *gather_stats,
5624 		   struct irdma_gather_stats *last_gather_stats,
5625 		   const struct irdma_hw_stat_map *map,
5626 		   u16 max_stat_idx)
5627 {
5628 	u64 *stats_val = hw_stats->stats_val;
5629 	u16 i;
5630 
5631 	for (i = 0; i < max_stat_idx; i++) {
5632 		u64 new_val = irdma_stat_val(gather_stats->val,
5633 					     map[i].byteoff, map[i].bitoff,
5634 					     map[i].bitmask);
5635 		u64 last_val = irdma_stat_val(last_gather_stats->val,
5636 					      map[i].byteoff, map[i].bitoff,
5637 					      map[i].bitmask);
5638 
5639 		stats_val[i] += irdma_stat_delta(new_val, last_val,
5640 						 map[i].bitmask);
5641 	}
5642 
5643 	irdma_memcpy(last_gather_stats, gather_stats,
5644 		     sizeof(*last_gather_stats));
5645 }
5646